diff --git a/.cargo/config.toml b/.cargo/config.toml index f658f146c9..c5b6fcd9d4 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -4,6 +4,12 @@ # binaries and the test suite. There's no need for typical library # documentation of public interfaces.) # +# NOTE: If you change this, also change the `RUSTDOCFLAGS` values in the various +# CI scripts: +# - .github/buildomat/build-and-test.sh +# - .github/buildomat/jobs/clippy.sh +# - .github/workflows/rust.yml +# [build] rustdocflags = "--document-private-items" diff --git a/.github/buildomat/build-and-test.sh b/.github/buildomat/build-and-test.sh index cc344522db..1e4b655cb9 100755 --- a/.github/buildomat/build-and-test.sh +++ b/.github/buildomat/build-and-test.sh @@ -54,7 +54,7 @@ ptime -m bash ./tools/install_builder_prerequisites.sh -y # banner build export RUSTFLAGS="-D warnings" -export RUSTDOCFLAGS="-D warnings" +export RUSTDOCFLAGS="--document-private-items -D warnings" # When running on illumos we need to pass an additional runpath that is # usually configured via ".cargo/config" but the `RUSTFLAGS` env variable # takes precedence. This path contains oxide specific libraries such as diff --git a/.github/buildomat/jobs/clippy.sh b/.github/buildomat/jobs/clippy.sh index cff9c45a1b..71aa04c907 100755 --- a/.github/buildomat/jobs/clippy.sh +++ b/.github/buildomat/jobs/clippy.sh @@ -30,4 +30,4 @@ ptime -m bash ./tools/install_builder_prerequisites.sh -y banner clippy export CARGO_INCREMENTAL=0 ptime -m cargo xtask clippy -ptime -m cargo doc +RUSTDOCFLAGS="--document-private-items -D warnings" ptime -m cargo doc --workspace --no-deps diff --git a/.github/buildomat/jobs/deploy.sh b/.github/buildomat/jobs/deploy.sh index b14486b651..e4f59aff5f 100755 --- a/.github/buildomat/jobs/deploy.sh +++ b/.github/buildomat/jobs/deploy.sh @@ -2,7 +2,7 @@ #: #: name = "helios / deploy" #: variety = "basic" -#: target = "lab-2.0-opte-0.31" +#: target = "lab-2.0-opte-0.32" #: output_rules = [ #: "%/var/svc/log/oxide-sled-agent:default.log*", #: "%/zone/oxz_*/root/var/svc/log/oxide-*.log*", diff --git a/.github/workflows/hakari.yml b/.github/workflows/hakari.yml index ed2615f655..7616656742 100644 --- a/.github/workflows/hakari.yml +++ b/.github/workflows/hakari.yml @@ -17,14 +17,14 @@ jobs: env: RUSTFLAGS: -D warnings steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 with: ref: ${{ github.event.pull_request.head.sha }} # see omicron#4461 - uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af # v1 with: toolchain: stable - name: Install cargo-hakari - uses: taiki-e/install-action@51b8ba088c63d8750c618764ff2030742da0ec19 # v2 + uses: taiki-e/install-action@331a600f1b10a3fed8dc56f925012bede91ae51f # v2 with: tool: cargo-hakari - name: Check workspace-hack Cargo.toml is up-to-date diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 724f88e7a3..2ef2783108 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -108,4 +108,4 @@ jobs: - name: Install Pre-Requisites run: ./tools/install_builder_prerequisites.sh -y - name: Test build documentation - run: RUSTDOCFLAGS="-Dwarnings" cargo doc + run: RUSTDOCFLAGS="--document-private-items -D warnings" cargo doc --workspace --no-deps diff --git a/Cargo.lock b/Cargo.lock index 5949144ab5..dfd08dcd61 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -166,7 +166,7 @@ dependencies = [ "omicron-workspace-hack", "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -273,7 +273,7 @@ checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -295,7 +295,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -306,7 +306,7 @@ checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -359,7 +359,7 @@ dependencies = [ "quote", "serde", "serde_tokenstream", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -464,9 +464,9 @@ dependencies = [ [[package]] name = "bhyve_api" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=6d7ed9a033babc054db9eff5b59dee978d2b0d76#6d7ed9a033babc054db9eff5b59dee978d2b0d76" +source = "git+https://github.com/oxidecomputer/propolis?rev=59868677c70f3cd03f03e12584ad1056da8b5459#59868677c70f3cd03f03e12584ad1056da8b5459" dependencies = [ - "bhyve_api_sys 0.0.0 (git+https://github.com/oxidecomputer/propolis?rev=6d7ed9a033babc054db9eff5b59dee978d2b0d76)", + "bhyve_api_sys 0.0.0 (git+https://github.com/oxidecomputer/propolis?rev=59868677c70f3cd03f03e12584ad1056da8b5459)", "libc", "strum", ] @@ -484,7 +484,7 @@ dependencies = [ [[package]] name = "bhyve_api_sys" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=6d7ed9a033babc054db9eff5b59dee978d2b0d76#6d7ed9a033babc054db9eff5b59dee978d2b0d76" +source = "git+https://github.com/oxidecomputer/propolis?rev=59868677c70f3cd03f03e12584ad1056da8b5459#59868677c70f3cd03f03e12584ad1056da8b5459" dependencies = [ "libc", "strum", @@ -518,7 +518,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.64", + "syn 2.0.68", "which", ] @@ -543,6 +543,17 @@ version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d7e60934ceec538daadb9d8432424ed043a904d8e0243f3c6446bce549a46ac" +[[package]] +name = "bitfield-struct" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1657dce144574f921af10a92876a96f0ca05dd830900598d21d91c8e4cf78f74" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.68", +] + [[package]] name = "bitflags" version = "1.3.2" @@ -843,9 +854,9 @@ dependencies = [ [[package]] name = "cargo_toml" -version = "0.20.2" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8cb1d556b8b8f36e5ca74938008be3ac102f5dcb5b68a0477e4249ae2291cd3" +checksum = "4895c018bb228aa6b3ba1a0285543fcb4b704734c3fb1f72afaa75aa769500c1" dependencies = [ "serde", "toml 0.8.13", @@ -1038,7 +1049,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -1387,7 +1398,7 @@ dependencies = [ [[package]] name = "crucible-agent-client" version = "0.0.1" -source = "git+https://github.com/oxidecomputer/crucible?rev=8c6d485110ecfae5409575246b986a145c386dc4#8c6d485110ecfae5409575246b986a145c386dc4" +source = "git+https://github.com/oxidecomputer/crucible?rev=64e28cea69b427b05064defaf8800a4d678b4612#64e28cea69b427b05064defaf8800a4d678b4612" dependencies = [ "anyhow", "chrono", @@ -1403,7 +1414,7 @@ dependencies = [ [[package]] name = "crucible-pantry-client" version = "0.0.1" -source = "git+https://github.com/oxidecomputer/crucible?rev=8c6d485110ecfae5409575246b986a145c386dc4#8c6d485110ecfae5409575246b986a145c386dc4" +source = "git+https://github.com/oxidecomputer/crucible?rev=64e28cea69b427b05064defaf8800a4d678b4612#64e28cea69b427b05064defaf8800a4d678b4612" dependencies = [ "anyhow", "chrono", @@ -1420,7 +1431,7 @@ dependencies = [ [[package]] name = "crucible-smf" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/crucible?rev=8c6d485110ecfae5409575246b986a145c386dc4#8c6d485110ecfae5409575246b986a145c386dc4" +source = "git+https://github.com/oxidecomputer/crucible?rev=64e28cea69b427b05064defaf8800a4d678b4612#64e28cea69b427b05064defaf8800a4d678b4612" dependencies = [ "crucible-workspace-hack", "libc", @@ -1506,16 +1517,15 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.1.2" +version = "4.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a677b8922c94e01bdbb12126b0bc852f00447528dee1782229af9c720c3f348" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" dependencies = [ "cfg-if", "cpufeatures", "curve25519-dalek-derive", "digest", "fiat-crypto", - "platforms", "rand_core 0.6.4", "rustc_version 0.4.0", "subtle", @@ -1530,7 +1540,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -1554,7 +1564,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -1565,7 +1575,7 @@ checksum = "733cabb43482b1a1b53eee8583c2b9e8684d592215ea83efd305dd31bc2f0178" dependencies = [ "darling_core", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -1599,14 +1609,15 @@ dependencies = [ "quote", "serde", "serde_tokenstream", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] name = "ddm-admin-client" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/maghemite?rev=5630887d0373857f77cb264f84aa19bdec720ce3#5630887d0373857f77cb264f84aa19bdec720ce3" +source = "git+https://github.com/oxidecomputer/maghemite?rev=3c3fa8482fe09a01da62fbd35efe124ea9cac9e7#3c3fa8482fe09a01da62fbd35efe124ea9cac9e7" dependencies = [ + "oxnet", "percent-encoding", "progenitor", "reqwest", @@ -1642,7 +1653,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -1675,7 +1686,7 @@ checksum = "5fe87ce4529967e0ba1dcf8450bab64d97dfd5010a6256187ffe2e43e6f0e049" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -1696,7 +1707,7 @@ checksum = "62d671cc41a825ebabc75757b62d3d168c577f9149b2d49ece1dad1f72119d25" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -1717,7 +1728,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -1727,7 +1738,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "206868b8242f27cecce124c19fd88157fbd0dd334df2587f36417bafbc85097b" dependencies = [ "derive_builder_core", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -1804,7 +1815,7 @@ dependencies = [ "diesel_table_macro_syntax", "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -1813,7 +1824,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc5557efc453706fed5e4fa85006fe9817c224c3f480a34c7e5959fd700921c5" dependencies = [ - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -1998,7 +2009,6 @@ dependencies = [ "chrono", "futures", "http 0.2.12", - "ipnetwork", "omicron-workspace-hack", "omicron-zone-package", "progenitor", @@ -2071,7 +2081,7 @@ dependencies = [ "quote", "serde", "serde_tokenstream", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -2435,6 +2445,12 @@ dependencies = [ "num-traits", ] +[[package]] +name = "float-ord" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ce81f49ae8a0482e4c55ea62ebbd7e5a686af544c00b9d090bba3ff9be97b3d" + [[package]] name = "flume" version = "0.11.0" @@ -2480,7 +2496,7 @@ checksum = "1a5c6c585bc94aaf2c7b51dd4c2ba22680844aba4c687be581871a6f518c5742" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -2592,7 +2608,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -3239,9 +3255,10 @@ dependencies = [ [[package]] name = "hubtools" -version = "0.4.1" -source = "git+https://github.com/oxidecomputer/hubtools.git?branch=main#73cd5a84689d59ecce9da66ad4389c540d315168" +version = "0.4.6" +source = "git+https://github.com/oxidecomputer/hubtools.git?branch=main#943c4bbe6b50d1ab635d085d6204895fb4154e79" dependencies = [ + "hex", "lpc55_areas", "lpc55_sign", "object 0.30.4", @@ -3463,7 +3480,7 @@ dependencies = [ [[package]] name = "illumos-sys-hdrs" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=417f74e94978c23f3892ac328c3387f3ecd9bb29#417f74e94978c23f3892ac328c3387f3ecd9bb29" +source = "git+https://github.com/oxidecomputer/opte?rev=915975f6d1729db95619f752148974016912412f#915975f6d1729db95619f752148974016912412f" [[package]] name = "illumos-utils" @@ -3471,7 +3488,7 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "bhyve_api 0.0.0 (git+https://github.com/oxidecomputer/propolis?rev=6d7ed9a033babc054db9eff5b59dee978d2b0d76)", + "bhyve_api 0.0.0 (git+https://github.com/oxidecomputer/propolis?rev=59868677c70f3cd03f03e12584ad1056da8b5459)", "byteorder", "camino", "camino-tempfile", @@ -3877,17 +3894,17 @@ dependencies = [ [[package]] name = "kstat-macro" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=417f74e94978c23f3892ac328c3387f3ecd9bb29#417f74e94978c23f3892ac328c3387f3ecd9bb29" +source = "git+https://github.com/oxidecomputer/opte?rev=915975f6d1729db95619f752148974016912412f#915975f6d1729db95619f752148974016912412f" dependencies = [ "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] name = "kstat-rs" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcc713c7902f757cf0c04012dbad3864ea505f2660467b704847ea7ea2ff6d67" +checksum = "27964e4632377753acb0898ce6f28770d50cbca1339200ae63d700cff97b5c2b" dependencies = [ "libc", "thiserror", @@ -4006,7 +4023,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" dependencies = [ "cfg-if", - "windows-targets 0.52.5", + "windows-targets 0.48.5", ] [[package]] @@ -4055,17 +4072,21 @@ dependencies = [ [[package]] name = "libnvme" -version = "0.1.0" -source = "git+https://github.com/oxidecomputer/libnvme?rev=6fffcc81d2c423ed2d2e6c5c2827485554c4ecbe#6fffcc81d2c423ed2d2e6c5c2827485554c4ecbe" +version = "0.1.1" +source = "git+https://github.com/oxidecomputer/libnvme?rev=dd5bb221d327a1bc9287961718c3c10d6bd37da0#dd5bb221d327a1bc9287961718c3c10d6bd37da0" dependencies = [ "libnvme-sys", + "nvme", "thiserror", ] [[package]] name = "libnvme-sys" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/libnvme?rev=6fffcc81d2c423ed2d2e6c5c2827485554c4ecbe#6fffcc81d2c423ed2d2e6c5c2827485554c4ecbe" +source = "git+https://github.com/oxidecomputer/libnvme?rev=dd5bb221d327a1bc9287961718c3c10d6bd37da0#dd5bb221d327a1bc9287961718c3c10d6bd37da0" +dependencies = [ + "bitfield-struct", +] [[package]] name = "libredox" @@ -4145,8 +4166,8 @@ checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" [[package]] name = "lpc55_areas" -version = "0.2.4" -source = "git+https://github.com/oxidecomputer/lpc55_support#96f064eaae5e95930efaab6c29fd1b2e22225dac" +version = "0.2.5" +source = "git+https://github.com/oxidecomputer/lpc55_support#131520fc913ecce9b80557e854751953f743a7d2" dependencies = [ "bitfield", "clap", @@ -4156,8 +4177,8 @@ dependencies = [ [[package]] name = "lpc55_sign" -version = "0.3.3" -source = "git+https://github.com/oxidecomputer/lpc55_support#96f064eaae5e95930efaab6c29fd1b2e22225dac" +version = "0.3.4" +source = "git+https://github.com/oxidecomputer/lpc55_support#131520fc913ecce9b80557e854751953f743a7d2" dependencies = [ "byteorder", "const-oid", @@ -4289,7 +4310,7 @@ dependencies = [ [[package]] name = "mg-admin-client" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/maghemite?rev=5630887d0373857f77cb264f84aa19bdec720ce3#5630887d0373857f77cb264f84aa19bdec720ce3" +source = "git+https://github.com/oxidecomputer/maghemite?rev=3c3fa8482fe09a01da62fbd35efe124ea9cac9e7#3c3fa8482fe09a01da62fbd35efe124ea9cac9e7" dependencies = [ "anyhow", "chrono", @@ -4369,7 +4390,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -4732,7 +4753,7 @@ dependencies = [ "omicron-workspace-hack", "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -4911,6 +4932,7 @@ dependencies = [ "oximeter", "oximeter-collector", "oximeter-producer", + "oxnet", "serde", "serde_json", "serde_urlencoded", @@ -4928,7 +4950,7 @@ version = "0.1.0" dependencies = [ "omicron-workspace-hack", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -5099,7 +5121,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -5188,6 +5210,11 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" +[[package]] +name = "nvme" +version = "0.1.0" +source = "git+https://github.com/oxidecomputer/libnvme?rev=dd5bb221d327a1bc9287961718c3c10d6bd37da0#dd5bb221d327a1bc9287961718c3c10d6bd37da0" + [[package]] name = "nvpair" version = "0.5.0" @@ -5526,7 +5553,7 @@ dependencies = [ "pq-sys", "pretty_assertions", "progenitor-client", - "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=6d7ed9a033babc054db9eff5b59dee978d2b0d76)", + "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=59868677c70f3cd03f03e12584ad1056da8b5459)", "rand 0.8.5", "rcgen", "ref-cast", @@ -5631,6 +5658,7 @@ version = "0.1.0" dependencies = [ "anyhow", "camino", + "cargo_metadata", "clap", "expectorate", "futures", @@ -5652,7 +5680,6 @@ dependencies = [ "slog-term", "smf", "strum", - "swrite", "tar", "thiserror", "tokio", @@ -5776,7 +5803,7 @@ dependencies = [ "oximeter-producer", "oxnet", "pretty_assertions", - "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=6d7ed9a033babc054db9eff5b59dee978d2b0d76)", + "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=59868677c70f3cd03f03e12584ad1056da8b5459)", "propolis-mock-server", "rand 0.8.5", "rcgen", @@ -5955,7 +5982,7 @@ dependencies = [ "string_cache", "subtle", "syn 1.0.109", - "syn 2.0.64", + "syn 2.0.68", "time", "time-macros", "tokio", @@ -6075,7 +6102,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -6099,7 +6126,7 @@ dependencies = [ [[package]] name = "opte" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=417f74e94978c23f3892ac328c3387f3ecd9bb29#417f74e94978c23f3892ac328c3387f3ecd9bb29" +source = "git+https://github.com/oxidecomputer/opte?rev=915975f6d1729db95619f752148974016912412f#915975f6d1729db95619f752148974016912412f" dependencies = [ "cfg-if", "dyn-clone", @@ -6116,7 +6143,7 @@ dependencies = [ [[package]] name = "opte-api" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=417f74e94978c23f3892ac328c3387f3ecd9bb29#417f74e94978c23f3892ac328c3387f3ecd9bb29" +source = "git+https://github.com/oxidecomputer/opte?rev=915975f6d1729db95619f752148974016912412f#915975f6d1729db95619f752148974016912412f" dependencies = [ "illumos-sys-hdrs", "ipnetwork", @@ -6128,7 +6155,7 @@ dependencies = [ [[package]] name = "opte-ioctl" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=417f74e94978c23f3892ac328c3387f3ecd9bb29#417f74e94978c23f3892ac328c3387f3ecd9bb29" +source = "git+https://github.com/oxidecomputer/opte?rev=915975f6d1729db95619f752148974016912412f#915975f6d1729db95619f752148974016912412f" dependencies = [ "libc", "libnet 0.1.0 (git+https://github.com/oxidecomputer/netadm-sys)", @@ -6202,7 +6229,7 @@ dependencies = [ [[package]] name = "oxide-vpc" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=417f74e94978c23f3892ac328c3387f3ecd9bb29#417f74e94978c23f3892ac328c3387f3ecd9bb29" +source = "git+https://github.com/oxidecomputer/opte?rev=915975f6d1729db95619f752148974016912412f#915975f6d1729db95619f752148974016912412f" dependencies = [ "cfg-if", "illumos-sys-hdrs", @@ -6218,21 +6245,16 @@ dependencies = [ name = "oximeter" version = "0.1.0" dependencies = [ - "approx", - "bytes", + "anyhow", "chrono", - "num", - "omicron-common", + "clap", "omicron-workspace-hack", + "oximeter-impl", "oximeter-macro-impl", - "regex", - "rstest", - "schemars", - "serde", - "serde_json", - "strum", - "thiserror", - "trybuild", + "oximeter-timeseries-macro", + "prettyplease", + "syn 2.0.68", + "toml 0.8.13", "uuid", ] @@ -6338,6 +6360,38 @@ dependencies = [ "uuid", ] +[[package]] +name = "oximeter-impl" +version = "0.1.0" +dependencies = [ + "approx", + "bytes", + "chrono", + "float-ord", + "heck 0.5.0", + "num", + "omicron-common", + "omicron-workspace-hack", + "oximeter-macro-impl", + "prettyplease", + "proc-macro2", + "quote", + "rand 0.8.5", + "rand_distr", + "regex", + "rstest", + "schemars", + "serde", + "serde_json", + "slog-error-chain", + "strum", + "syn 2.0.68", + "thiserror", + "toml 0.8.13", + "trybuild", + "uuid", +] + [[package]] name = "oximeter-instruments" version = "0.1.0" @@ -6348,6 +6402,7 @@ dependencies = [ "futures", "http 0.2.12", "kstat-rs", + "libc", "omicron-workspace-hack", "oximeter", "rand 0.8.5", @@ -6366,7 +6421,7 @@ dependencies = [ "omicron-workspace-hack", "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -6394,6 +6449,17 @@ dependencies = [ "uuid", ] +[[package]] +name = "oximeter-timeseries-macro" +version = "0.1.0" +dependencies = [ + "omicron-workspace-hack", + "oximeter-impl", + "proc-macro2", + "quote", + "syn 2.0.68", +] + [[package]] name = "oxlog" version = "0.1.0" @@ -6410,7 +6476,7 @@ dependencies = [ [[package]] name = "oxnet" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/oxnet?branch=main#42b4d3c77c7f5f2636cd6c4bbf37ac3eada047e0" +source = "git+https://github.com/oxidecomputer/oxnet#2612d2203effcfdcbf83778a77f1bfd03fe6ed24" dependencies = [ "ipnetwork", "schemars", @@ -6547,7 +6613,7 @@ dependencies = [ "regex", "regex-syntax 0.8.3", "structmeta 0.3.0", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -6715,7 +6781,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -6785,7 +6851,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -6833,12 +6899,6 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" -[[package]] -name = "platforms" -version = "3.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db23d408679286588f4d4644f965003d056e3dd5abcaaa938116871d7ce2fee7" - [[package]] name = "plotters" version = "0.3.5" @@ -7055,7 +7115,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" dependencies = [ "proc-macro2", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -7103,9 +7163,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.82" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ad3d49ab951a01fbaafe34f2ec74122942fe18a3f9814c3268f1bb72042131b" +checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" dependencies = [ "unicode-ident", ] @@ -7151,7 +7211,7 @@ dependencies = [ "schemars", "serde", "serde_json", - "syn 2.0.64", + "syn 2.0.68", "thiserror", "typify", "unicode-ident", @@ -7171,7 +7231,7 @@ dependencies = [ "serde_json", "serde_tokenstream", "serde_yaml", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -7207,7 +7267,7 @@ dependencies = [ [[package]] name = "propolis-client" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=6d7ed9a033babc054db9eff5b59dee978d2b0d76#6d7ed9a033babc054db9eff5b59dee978d2b0d76" +source = "git+https://github.com/oxidecomputer/propolis?rev=59868677c70f3cd03f03e12584ad1056da8b5459#59868677c70f3cd03f03e12584ad1056da8b5459" dependencies = [ "async-trait", "base64 0.21.7", @@ -7221,7 +7281,7 @@ dependencies = [ "slog", "thiserror", "tokio", - "tokio-tungstenite 0.20.1", + "tokio-tungstenite 0.21.0", "uuid", ] @@ -7249,7 +7309,7 @@ dependencies = [ [[package]] name = "propolis-mock-server" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=6d7ed9a033babc054db9eff5b59dee978d2b0d76#6d7ed9a033babc054db9eff5b59dee978d2b0d76" +source = "git+https://github.com/oxidecomputer/propolis?rev=59868677c70f3cd03f03e12584ad1056da8b5459#59868677c70f3cd03f03e12584ad1056da8b5459" dependencies = [ "anyhow", "atty", @@ -7259,7 +7319,7 @@ dependencies = [ "futures", "hyper 0.14.28", "progenitor", - "propolis_types 0.0.0 (git+https://github.com/oxidecomputer/propolis?rev=6d7ed9a033babc054db9eff5b59dee978d2b0d76)", + "propolis_types 0.0.0 (git+https://github.com/oxidecomputer/propolis?rev=59868677c70f3cd03f03e12584ad1056da8b5459)", "rand 0.8.5", "reqwest", "schemars", @@ -7272,7 +7332,7 @@ dependencies = [ "slog-term", "thiserror", "tokio", - "tokio-tungstenite 0.20.1", + "tokio-tungstenite 0.21.0", "uuid", ] @@ -7291,7 +7351,7 @@ dependencies = [ [[package]] name = "propolis_types" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=6d7ed9a033babc054db9eff5b59dee978d2b0d76#6d7ed9a033babc054db9eff5b59dee978d2b0d76" +source = "git+https://github.com/oxidecomputer/propolis?rev=59868677c70f3cd03f03e12584ad1056da8b5459#59868677c70f3cd03f03e12584ad1056da8b5459" dependencies = [ "schemars", "serde", @@ -7456,6 +7516,16 @@ dependencies = [ "getrandom 0.2.14", ] +[[package]] +name = "rand_distr" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + [[package]] name = "rand_hc" version = "0.2.0" @@ -7651,7 +7721,7 @@ checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -7905,7 +7975,7 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.0", - "syn 2.0.64", + "syn 2.0.68", "unicode-ident", ] @@ -8286,9 +8356,9 @@ dependencies = [ [[package]] name = "schemars" -version = "0.8.20" +version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0218ceea14babe24a4a5836f86ade86c1effbc198164e619194cb5069187e29" +checksum = "09c024468a378b7e36765cd36702b7a90cc3cba11654f6685c8f233408e89e92" dependencies = [ "bytes", "chrono", @@ -8301,14 +8371,14 @@ dependencies = [ [[package]] name = "schemars_derive" -version = "0.8.20" +version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ed5a1ccce8ff962e31a165d41f6e2a2dd1245099dc4d594f5574a86cd90f4d3" +checksum = "b1eee588578aff73f856ab961cd2f79e36bc45d7ded33a7562adba4667aecc0e" dependencies = [ "proc-macro2", "quote", "serde_derive_internals", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -8334,7 +8404,7 @@ checksum = "7f81c2fde025af7e69b1d1420531c8a8811ca898919db177141a85313b1cb932" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -8463,7 +8533,7 @@ checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -8474,7 +8544,7 @@ checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -8488,9 +8558,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.117" +version = "1.0.118" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3" +checksum = "d947f6b3163d8857ea16c4fa0dd4840d52f3041039a85decd46867eb1abef2e4" dependencies = [ "itoa", "ryu", @@ -8524,7 +8594,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -8545,7 +8615,7 @@ dependencies = [ "proc-macro2", "quote", "serde", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -8587,7 +8657,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -8935,7 +9005,7 @@ source = "git+https://github.com/oxidecomputer/slog-error-chain?branch=main#15f6 dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -9062,7 +9132,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -9189,7 +9259,7 @@ checksum = "01b2e185515564f15375f593fb966b5718bc624ba77fe49fa4616ad619690554" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -9199,7 +9269,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2ff9eaf853dec4c8802325d8b6d3dffa86cc707fd7a1a4cdbf416e13b061787a" dependencies = [ "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -9285,7 +9355,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta-derive 0.2.0", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -9297,7 +9367,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta-derive 0.3.0", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -9308,7 +9378,7 @@ checksum = "a60bcaff7397072dca0017d1db428e30d5002e00b6847703e2e42005c95fbe00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -9319,7 +9389,7 @@ checksum = "152a0b65a590ff6c3da95cabe2353ee04e6167c896b28e3b14478c2636c922fc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -9354,7 +9424,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -9367,7 +9437,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -9414,9 +9484,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.64" +version = "2.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ad3dee41f36859875573074334c200d1add8e4a87bb37113ebd31d926b7b11f" +checksum = "901fa70d88b9d6c98022e23b4136f9f3e54e4662c3bc1bd1d84a42a9a0f0c1e9" dependencies = [ "proc-macro2", "quote", @@ -9590,7 +9660,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta 0.2.0", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -9621,7 +9691,7 @@ checksum = "e2470041c06ec3ac1ab38d0356a6119054dedaea53e12fbefc0de730a1c08524" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -9758,7 +9828,7 @@ checksum = "8d9ef545650e79f30233c0003bcc2504d7efac6dad25fca40744de773fe2049c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -9829,7 +9899,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -10106,7 +10176,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -10383,7 +10453,7 @@ dependencies = [ "semver 1.0.23", "serde", "serde_json", - "syn 2.0.64", + "syn 2.0.68", "thiserror", "unicode-ident", ] @@ -10400,7 +10470,7 @@ dependencies = [ "serde", "serde_json", "serde_tokenstream", - "syn 2.0.64", + "syn 2.0.68", "typify-impl", ] @@ -10607,7 +10677,7 @@ dependencies = [ "proc-macro2", "quote", "serde_tokenstream", - "syn 2.0.64", + "syn 2.0.68", "usdt-impl", ] @@ -10625,7 +10695,7 @@ dependencies = [ "quote", "serde", "serde_json", - "syn 2.0.64", + "syn 2.0.68", "thiserror", "thread-id", "version_check", @@ -10641,7 +10711,7 @@ dependencies = [ "proc-macro2", "quote", "serde_tokenstream", - "syn 2.0.64", + "syn 2.0.68", "usdt-impl", ] @@ -10820,7 +10890,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", "wasm-bindgen-shared", ] @@ -10854,7 +10924,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -11374,7 +11444,6 @@ dependencies = [ "fs-err", "futures", "macaddr", - "md5", "reqwest", "serde", "sha2", @@ -11436,7 +11505,7 @@ checksum = "125139de3f6b9d625c39e2efdd73d41bdac468ccd556556440e322be0e1bbd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -11447,7 +11516,7 @@ checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] @@ -11467,7 +11536,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.68", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index d461e0585a..e6b41c66a7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -59,10 +59,12 @@ members = [ "nexus/types", "oximeter/collector", "oximeter/db", + "oximeter/impl", "oximeter/instruments", "oximeter/oximeter-macro-impl", "oximeter/oximeter", "oximeter/producer", + "oximeter/timeseries-macro", "package", "passwords", "rpaths", @@ -87,6 +89,7 @@ members = [ ] default-members = [ + "api_identity", "bootstore", "certificates", "clients/bootstrap-agent-client", @@ -113,6 +116,8 @@ default-members = [ # hakari to not work as well and build times to be longer. # See omicron#4392. "dns-server", + # Do not include end-to-end-tests in the list of default members, as its + # tests only work on a deployed control plane. "gateway-cli", "gateway-test-utils", "gateway", @@ -128,25 +133,30 @@ default-members = [ "nexus-config", "nexus/authz-macros", "nexus/auth", - "nexus/macros-common", - "nexus/metrics-producer-gc", - "nexus/networking", "nexus/db-fixed-data", "nexus/db-macros", "nexus/db-model", "nexus/db-queries", "nexus/defaults", "nexus/inventory", + "nexus/macros-common", + "nexus/metrics-producer-gc", + "nexus/networking", "nexus/reconfigurator/execution", "nexus/reconfigurator/planning", "nexus/reconfigurator/preparation", + "nexus/test-interface", + "nexus/test-utils-macros", + "nexus/test-utils", "nexus/types", "oximeter/collector", "oximeter/db", + "oximeter/impl", "oximeter/instruments", "oximeter/oximeter-macro-impl", "oximeter/oximeter", "oximeter/producer", + "oximeter/timeseries-macro", "package", "passwords", "rpaths", @@ -166,6 +176,7 @@ default-members = [ "wicket-dbg", "wicket", "wicketd", + "workspace-hack", "zone-setup", ] resolver = "2" @@ -249,9 +260,9 @@ cookie = "0.18" criterion = { version = "0.5.1", features = [ "async_tokio" ] } crossbeam = "0.8" crossterm = { version = "0.27.0", features = ["event-stream"] } -crucible-agent-client = { git = "https://github.com/oxidecomputer/crucible", rev = "8c6d485110ecfae5409575246b986a145c386dc4" } -crucible-pantry-client = { git = "https://github.com/oxidecomputer/crucible", rev = "8c6d485110ecfae5409575246b986a145c386dc4" } -crucible-smf = { git = "https://github.com/oxidecomputer/crucible", rev = "8c6d485110ecfae5409575246b986a145c386dc4" } +crucible-agent-client = { git = "https://github.com/oxidecomputer/crucible", rev = "64e28cea69b427b05064defaf8800a4d678b4612" } +crucible-pantry-client = { git = "https://github.com/oxidecomputer/crucible", rev = "64e28cea69b427b05064defaf8800a4d678b4612" } +crucible-smf = { git = "https://github.com/oxidecomputer/crucible", rev = "64e28cea69b427b05064defaf8800a4d678b4612" } csv = "1.3.0" curve25519-dalek = "4" datatest-stable = "0.2.9" @@ -273,6 +284,7 @@ expectorate = "1.1.0" fatfs = "0.3.6" filetime = "0.2.23" flate2 = "1.0.30" +float-ord = "0.3.2" flume = "0.11.0" foreign-types = "0.3.2" fs-err = "2.11.0" @@ -312,17 +324,17 @@ internet-checksum = "0.2" ipnetwork = { version = "0.20", features = ["schemars"] } ispf = { git = "https://github.com/oxidecomputer/ispf" } key-manager = { path = "key-manager" } -kstat-rs = "0.2.3" +kstat-rs = "0.2.4" libc = "0.2.155" libfalcon = { git = "https://github.com/oxidecomputer/falcon", rev = "e69694a1f7cc9fe31fab27f321017280531fb5f7" } -libnvme = { git = "https://github.com/oxidecomputer/libnvme", rev = "6fffcc81d2c423ed2d2e6c5c2827485554c4ecbe" } +libnvme = { git = "https://github.com/oxidecomputer/libnvme", rev = "dd5bb221d327a1bc9287961718c3c10d6bd37da0" } linear-map = "1.2.0" macaddr = { version = "1.0.1", features = ["serde_std"] } maplit = "1.0.2" mockall = "0.12" newtype_derive = "0.1.6" -mg-admin-client = { git = "https://github.com/oxidecomputer/maghemite", rev = "5630887d0373857f77cb264f84aa19bdec720ce3" } -ddm-admin-client = { git = "https://github.com/oxidecomputer/maghemite", rev = "5630887d0373857f77cb264f84aa19bdec720ce3" } +mg-admin-client = { git = "https://github.com/oxidecomputer/maghemite", rev = "3c3fa8482fe09a01da62fbd35efe124ea9cac9e7" } +ddm-admin-client = { git = "https://github.com/oxidecomputer/maghemite", rev = "3c3fa8482fe09a01da62fbd35efe124ea9cac9e7" } multimap = "0.10.0" nexus-auth = { path = "nexus/auth" } nexus-client = { path = "clients/nexus-client" } @@ -342,7 +354,7 @@ omicron-certificates = { path = "certificates" } omicron-passwords = { path = "passwords" } omicron-workspace-hack = "0.1.0" oxlog = { path = "dev-tools/oxlog" } -oxnet = { git = "https://github.com/oxidecomputer/oxnet", branch = "main" } +oxnet = { git = "https://github.com/oxidecomputer/oxnet" } nexus-test-interface = { path = "nexus/test-interface" } nexus-test-utils-macros = { path = "nexus/test-utils-macros" } nexus-test-utils = { path = "nexus/test-utils" } @@ -360,23 +372,25 @@ omicron-sled-agent = { path = "sled-agent" } omicron-test-utils = { path = "test-utils" } omicron-zone-package = "0.11.0" oxide-client = { path = "clients/oxide-client" } -oxide-vpc = { git = "https://github.com/oxidecomputer/opte", rev = "417f74e94978c23f3892ac328c3387f3ecd9bb29", features = [ "api", "std" ] } +oxide-vpc = { git = "https://github.com/oxidecomputer/opte", rev = "915975f6d1729db95619f752148974016912412f", features = [ "api", "std" ] } once_cell = "1.19.0" openapi-lint = { git = "https://github.com/oxidecomputer/openapi-lint", branch = "main" } openapiv3 = "2.0.0" # must match samael's crate! openssl = "0.10" openssl-sys = "0.9" -opte-ioctl = { git = "https://github.com/oxidecomputer/opte", rev = "417f74e94978c23f3892ac328c3387f3ecd9bb29" } +opte-ioctl = { git = "https://github.com/oxidecomputer/opte", rev = "915975f6d1729db95619f752148974016912412f" } oso = "0.27" owo-colors = "4.0.0" oximeter = { path = "oximeter/oximeter" } oximeter-client = { path = "clients/oximeter-client" } oximeter-db = { path = "oximeter/db/" } oximeter-collector = { path = "oximeter/collector" } +oximeter-impl = { path = "oximeter/impl" } oximeter-instruments = { path = "oximeter/instruments" } oximeter-macro-impl = { path = "oximeter/oximeter-macro-impl" } oximeter-producer = { path = "oximeter/producer" } +oximeter-timeseries-macro = { path = "oximeter/timeseries-macro" } p256 = "0.13" parse-display = "0.9.0" partial-io = { version = "0.5.4", features = ["proptest1", "tokio1"] } @@ -394,13 +408,14 @@ prettyplease = { version = "0.2.20", features = ["verbatim"] } proc-macro2 = "1.0" progenitor = { git = "https://github.com/oxidecomputer/progenitor", branch = "main" } progenitor-client = { git = "https://github.com/oxidecomputer/progenitor", branch = "main" } -bhyve_api = { git = "https://github.com/oxidecomputer/propolis", rev = "6d7ed9a033babc054db9eff5b59dee978d2b0d76" } -propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "6d7ed9a033babc054db9eff5b59dee978d2b0d76" } -propolis-mock-server = { git = "https://github.com/oxidecomputer/propolis", rev = "6d7ed9a033babc054db9eff5b59dee978d2b0d76" } +bhyve_api = { git = "https://github.com/oxidecomputer/propolis", rev = "59868677c70f3cd03f03e12584ad1056da8b5459" } +propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "59868677c70f3cd03f03e12584ad1056da8b5459" } +propolis-mock-server = { git = "https://github.com/oxidecomputer/propolis", rev = "59868677c70f3cd03f03e12584ad1056da8b5459" } proptest = "1.4.0" quote = "1.0" rand = "0.8.5" rand_core = "0.6.4" +rand_distr = "0.4.3" rand_seeder = "0.2.3" ratatui = "0.26.2" rayon = "1.10" @@ -435,7 +450,12 @@ signal-hook = "0.3" signal-hook-tokio = { version = "0.3", features = [ "futures-v0_3" ] } sigpipe = "0.1.3" similar-asserts = "1.5.0" -sled = "0.34" +# Don't change sled's version on accident; sled's on-disk format is not yet +# stable and requires manual migrations. In the limit this won't matter because +# the upgrade system will replace the DNS server zones entirely, but while we +# are still doing mupdate a change to the on-disk format will break existing DNS +# server zones. +sled = "=0.34.7" sled-agent-client = { path = "clients/sled-agent-client" } sled-hardware = { path = "sled-hardware" } sled-hardware-types = { path = "sled-hardware/types" } diff --git a/api_identity/src/lib.rs b/api_identity/src/lib.rs index 4d933ed3c0..f90b1e3a89 100644 --- a/api_identity/src/lib.rs +++ b/api_identity/src/lib.rs @@ -60,12 +60,9 @@ mod test { #[test] fn test_identity() { - let ret = do_object_identity( - quote! { - struct Foo { identity: IdentityMetadata } - } - .into(), - ); + let ret = do_object_identity(quote! { + struct Foo { identity: IdentityMetadata } + }); let expected = quote! { impl ObjectIdentity for Foo { @@ -80,12 +77,9 @@ mod test { #[test] fn test_identity_no_field() { - let ret = do_object_identity( - quote! { - struct Foo {} - } - .into(), - ); + let ret = do_object_identity(quote! { + struct Foo {} + }); let error = ret.unwrap_err(); assert!(error.to_string().starts_with("deriving ObjectIdentity")); diff --git a/clients/ddm-admin-client/src/lib.rs b/clients/ddm-admin-client/src/lib.rs index b926ee2971..8cd9781e1d 100644 --- a/clients/ddm-admin-client/src/lib.rs +++ b/clients/ddm-admin-client/src/lib.rs @@ -12,7 +12,7 @@ pub use ddm_admin_client::types; pub use ddm_admin_client::Error; -use ddm_admin_client::types::{Ipv6Prefix, TunnelOrigin}; +use ddm_admin_client::types::TunnelOrigin; use ddm_admin_client::Client as InnerClient; use either::Either; use omicron_common::address::Ipv6Subnet; @@ -81,8 +81,7 @@ impl Client { pub fn advertise_prefix(&self, address: Ipv6Subnet) { let me = self.clone(); tokio::spawn(async move { - let prefix = - Ipv6Prefix { addr: address.net().prefix(), len: SLED_PREFIX }; + let prefix = address.net(); retry_notify(retry_policy_internal_service_aggressive(), || async { info!( me.log, "Sending prefix to ddmd for advertisement"; @@ -130,8 +129,8 @@ impl Client { let prefixes = self.inner.get_prefixes().await?.into_inner(); Ok(prefixes.into_iter().flat_map(|(_, prefixes)| { prefixes.into_iter().flat_map(|prefix| { - let mut segments = prefix.destination.addr.segments(); - if prefix.destination.len == BOOTSTRAP_MASK + let mut segments = prefix.destination.addr().segments(); + if prefix.destination.width() == BOOTSTRAP_MASK && segments[0] == BOOTSTRAP_PREFIX { Either::Left(interfaces.iter().map(move |interface| { diff --git a/clients/dpd-client/Cargo.toml b/clients/dpd-client/Cargo.toml index 477435d8bb..04240ea5bb 100644 --- a/clients/dpd-client/Cargo.toml +++ b/clients/dpd-client/Cargo.toml @@ -16,7 +16,6 @@ slog.workspace = true regress.workspace = true uuid.workspace = true chrono.workspace = true -ipnetwork.workspace = true http.workspace = true schemars.workspace = true rand.workspace = true diff --git a/clients/dpd-client/build.rs b/clients/dpd-client/build.rs index 952a7ddee6..02a685632c 100644 --- a/clients/dpd-client/build.rs +++ b/clients/dpd-client/build.rs @@ -2,7 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -// Copyright 2022 Oxide Computer Company +// Copyright 2024 Oxide Computer Company // // TODO: remove // This code is only required at the moment because the source repo @@ -15,6 +15,7 @@ use anyhow::Context; use anyhow::Result; use omicron_zone_package::config::Config; use omicron_zone_package::package::PackageSource; +use progenitor::TypePatch; use quote::quote; use std::env; use std::fs; @@ -73,7 +74,7 @@ fn main() -> Result<()> { let code = progenitor::Generator::new( progenitor::GenerationSettings::new() - .with_inner_type(quote!(ClientState)) + .with_inner_type(quote!{ ClientState }) .with_pre_hook(quote! { |state: &crate::ClientState, request: &reqwest::Request| { slog::debug!(state.log, "client request"; @@ -88,9 +89,22 @@ fn main() -> Result<()> { slog::debug!(state.log, "client response"; "result" => ?result); } }) - .with_replacement("Ipv4Cidr", "crate::Ipv4Cidr", std::iter::empty()) - .with_replacement("Ipv6Cidr", "crate::Ipv6Cidr", std::iter::empty()) - .with_replacement("Cidr", "crate::Cidr", std::iter::empty()), + .with_patch("LinkId", &TypePatch::default() + .with_derive("Eq") + .with_derive("PartialEq") + ) + .with_patch("LinkCreate", &TypePatch::default() + .with_derive("Eq") + .with_derive("PartialEq") + ) + .with_patch("LinkSettings", &TypePatch::default() + .with_derive("Eq") + .with_derive("PartialEq") + ) + .with_patch("PortSettings", &TypePatch::default() + .with_derive("Eq") + .with_derive("PartialEq") + ) ) .generate_tokens(&spec) .with_context(|| { diff --git a/clients/dpd-client/src/lib.rs b/clients/dpd-client/src/lib.rs index 556a8493d7..a0608a5c7d 100644 --- a/clients/dpd-client/src/lib.rs +++ b/clients/dpd-client/src/lib.rs @@ -2,25 +2,18 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -// Copyright 2023 Oxide Computer Company +// Copyright 2024 Oxide Computer Company -#![allow(clippy::redundant_closure_call)] -#![allow(clippy::needless_lifetimes)] -#![allow(clippy::match_single_binding)] -#![allow(clippy::clone_on_copy)] -#![allow(clippy::unnecessary_to_owned)] // The progenitor-generated API for dpd currently incorporates a type from // oximeter, which includes a docstring that has a doc-test in it. // That test passes for code that lives in omicron, but fails for code imported // by omicron. #![allow(rustdoc::broken_intra_doc_links)] +use std::net::IpAddr; + use slog::info; use slog::Logger; -use types::LinkCreate; -use types::LinkId; -use types::LinkSettings; -use types::PortSettings; include!(concat!(env!("OUT_DIR"), "/dpd-client.rs")); @@ -44,20 +37,16 @@ impl Client { pub async fn ensure_nat_entry( &self, log: &Logger, - target_ip: ipnetwork::IpNetwork, + target_ip: IpAddr, target_mac: types::MacAddr, target_first_port: u16, target_last_port: u16, target_vni: u32, sled_ip_address: &std::net::Ipv6Addr, ) -> Result<(), progenitor_client::Error> { - let existing_nat = match target_ip { - ipnetwork::IpNetwork::V4(network) => { - self.nat_ipv4_get(&network.ip(), target_first_port).await - } - ipnetwork::IpNetwork::V6(network) => { - self.nat_ipv6_get(&network.ip(), target_first_port).await - } + let existing_nat = match &target_ip { + IpAddr::V4(ip) => self.nat_ipv4_get(ip, target_first_port).await, + IpAddr::V6(ip) => self.nat_ipv6_get(ip, target_first_port).await, }; // If a NAT entry already exists, but has the wrong internal @@ -72,20 +61,12 @@ impl Client { info!(log, "deleting old nat entry"; "target_ip" => ?target_ip); - match target_ip { - ipnetwork::IpNetwork::V4(network) => { - self.nat_ipv4_delete( - &network.ip(), - target_first_port, - ) - .await + match &target_ip { + IpAddr::V4(ip) => { + self.nat_ipv4_delete(ip, target_first_port).await } - ipnetwork::IpNetwork::V6(network) => { - self.nat_ipv6_delete( - &network.ip(), - target_first_port, - ) - .await + IpAddr::V6(ip) => { + self.nat_ipv6_delete(ip, target_first_port).await } }?; } else { @@ -113,19 +94,19 @@ impl Client { vni: target_vni.into(), }; - match target_ip { - ipnetwork::IpNetwork::V4(network) => { + match &target_ip { + IpAddr::V4(ip) => { self.nat_ipv4_create( - &network.ip(), + ip, target_first_port, target_last_port, &nat_target, ) .await } - ipnetwork::IpNetwork::V6(network) => { + IpAddr::V6(ip) => { self.nat_ipv6_create( - &network.ip(), + ip, target_first_port, target_last_port, &nat_target, @@ -139,42 +120,6 @@ impl Client { Ok(()) } - /// Ensure that a NAT entry is deleted. - /// - /// nat_ipv[46]_delete are not idempotent (see oxidecomputer/dendrite#343), - /// but this wrapper function is. Call this from sagas instead. - pub async fn ensure_nat_entry_deleted( - &self, - log: &Logger, - target_ip: ipnetwork::IpNetwork, - target_first_port: u16, - ) -> Result<(), progenitor_client::Error> { - let result = match target_ip { - ipnetwork::IpNetwork::V4(network) => { - self.nat_ipv4_delete(&network.ip(), target_first_port).await - } - ipnetwork::IpNetwork::V6(network) => { - self.nat_ipv6_delete(&network.ip(), target_first_port).await - } - }; - - match result { - Ok(_) => { - info!(log, "deleted old nat entry"; "target_ip" => ?target_ip); - } - - Err(e) => { - if e.status() == Some(http::StatusCode::NOT_FOUND) { - info!(log, "no nat entry found for: {target_ip:#?}"); - } else { - return Err(e); - } - } - } - - Ok(()) - } - /// Ensure that a loopback address is created. /// /// loopback_ipv[46]_create are not idempotent (see @@ -261,569 +206,3 @@ impl Client { } } } - -// XXX delete everything below once we use the real dpd-client crate. -// https://github.com/oxidecomputer/omicron/issues/2775 - -use std::convert::TryFrom; -use std::fmt; -use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; -use std::str::FromStr; - -use schemars::JsonSchema; -use serde::Deserialize; -use serde::Serialize; - -use rand::prelude::*; - -// Given an IPv6 multicast address, generate the associated synthetic mac -// address -pub fn multicast_mac_addr(ip: Ipv6Addr) -> MacAddr { - let o = ip.octets(); - MacAddr::new(0x33, 0x33, o[12], o[13], o[14], o[15]) -} - -/// Generate an IPv6 adddress within the provided `cidr`, using the EUI-64 -/// transfrom of `mac`. -pub fn generate_ipv6_addr(cidr: Ipv6Cidr, mac: MacAddr) -> Ipv6Addr { - let prefix: u128 = cidr.prefix.into(); - let mac = u128::from(u64::from_be_bytes(mac.to_eui64())); - let mask = ((1u128 << cidr.prefix_len) - 1) << (128 - cidr.prefix_len); - let ipv6 = (prefix & mask) | (mac & !mask); - ipv6.into() -} - -/// Generate a link-local IPv6 address using the EUI-64 transform of `mac`. -pub fn generate_ipv6_link_local(mac: MacAddr) -> Ipv6Addr { - const LINK_LOCAL_PREFIX: Ipv6Cidr = Ipv6Cidr { - prefix: Ipv6Addr::new(0xfe80, 0, 0, 0, 0, 0, 0, 0), - prefix_len: 64, - }; - - generate_ipv6_addr(LINK_LOCAL_PREFIX, mac) -} - -/// An IP subnet with a network prefix and prefix length. -#[derive(Debug, Eq, PartialEq, Copy, Deserialize, Serialize, Clone)] -#[serde(untagged, rename_all = "snake_case")] -pub enum Cidr { - V4(Ipv4Cidr), - V6(Ipv6Cidr), -} - -// NOTE: We don't derive JsonSchema. That's intended so that we can use an -// untagged enum for `Cidr`, and use this method to annotate schemars output -// for client-generators (e.g., progenitor) to use in generating a better -// client. -impl JsonSchema for Cidr { - fn schema_name() -> String { - "Cidr".to_string() - } - - fn json_schema( - gen: &mut schemars::gen::SchemaGenerator, - ) -> schemars::schema::Schema { - schemars::schema::SchemaObject { - subschemas: Some(Box::new(schemars::schema::SubschemaValidation { - one_of: Some(vec![ - label_schema("v4", gen.subschema_for::()), - label_schema("v6", gen.subschema_for::()), - ]), - ..Default::default() - })), - ..Default::default() - } - .into() - } -} - -// Insert another level of schema indirection in order to provide an -// additional title for a subschema. This allows generators to infer a better -// variant name for an "untagged" enum. -fn label_schema( - label: &str, - schema: schemars::schema::Schema, -) -> schemars::schema::Schema { - schemars::schema::SchemaObject { - metadata: Some( - schemars::schema::Metadata { - title: Some(label.to_string()), - ..Default::default() - } - .into(), - ), - subschemas: Some( - schemars::schema::SubschemaValidation { - all_of: Some(vec![schema]), - ..Default::default() - } - .into(), - ), - ..Default::default() - } - .into() -} - -impl fmt::Display for Cidr { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Cidr::V4(c) => write!(f, "{c}"), - Cidr::V6(c) => write!(f, "{c}"), - } - } -} - -impl FromStr for Cidr { - type Err = String; - - fn from_str(s: &str) -> Result { - if let Ok(cidr) = s.parse() { - Ok(Cidr::V4(cidr)) - } else if let Ok(cidr) = s.parse() { - Ok(Cidr::V6(cidr)) - } else { - Err(format!("Invalid CIDR: '{s}'")) - } - } -} - -/// An IPv4 subnet with prefix and prefix length. -#[derive(Debug, Eq, PartialEq, Clone, Copy)] -pub struct Ipv4Cidr { - pub prefix: Ipv4Addr, - pub prefix_len: u8, -} - -// NOTE -// -// We implement the serde and JsonSchema traits manually. This emitted schema is -// never actually used to generate the client, because we instead ask -// `progenitor` to use the "real" `common::network::Ipv4Cidr` in its place. We -// do however include _some_ schema for this type so that it shows up in the -// document. Rather than provide a regular expression for the format of an IPv4 -// or v6 CIDR block, which is complicated, we just provide a human-friendly -// format name of "ipv4cidr" or "ipv6cidr". -impl<'de> serde::Deserialize<'de> for Ipv4Cidr { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - String::deserialize(deserializer)?.as_str().parse().map_err( - |e: ::Err| { - ::custom(e) - }, - ) - } -} - -impl Serialize for Ipv4Cidr { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - serializer.serialize_str(&format!("{self}")) - } -} - -impl JsonSchema for Ipv4Cidr { - fn schema_name() -> String { - String::from("Ipv4Cidr") - } - - fn json_schema( - _: &mut schemars::gen::SchemaGenerator, - ) -> schemars::schema::Schema { - schemars::schema::SchemaObject { - metadata: Some(Box::new(schemars::schema::Metadata { - title: Some("An IPv4 subnet".to_string()), - description: Some( - "An IPv4 subnet, including prefix and subnet mask" - .to_string(), - ), - examples: vec!["192.168.1.0/24".into()], - ..Default::default() - })), - format: Some(String::from("ipv4cidr")), - instance_type: Some(schemars::schema::InstanceType::String.into()), - ..Default::default() - } - .into() - } -} - -impl Ipv4Cidr { - /// Return `true` if the IP address is within the network. - pub fn contains(&self, ipv4: Ipv4Addr) -> bool { - let prefix: u32 = self.prefix.into(); - let mask = ((1u32 << self.prefix_len) - 1) << (32 - self.prefix_len); - let addr: u32 = ipv4.into(); - - (addr & mask) == prefix - } -} - -impl fmt::Display for Ipv4Cidr { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}/{}", self.prefix, self.prefix_len) - } -} - -impl From for Ipv4Cidr { - fn from(x: u64) -> Self { - let prefix: u32 = (x >> 32) as u32; - let prefix_len: u8 = (x & 0xff) as u8; - Ipv4Cidr { prefix: prefix.into(), prefix_len } - } -} - -impl From for u64 { - fn from(x: Ipv4Cidr) -> Self { - let prefix: u32 = x.prefix.into(); - (u64::from(prefix) << 32) | u64::from(x.prefix_len) - } -} - -impl From<&Ipv4Cidr> for u64 { - fn from(x: &Ipv4Cidr) -> Self { - (*x).into() - } -} - -impl FromStr for Ipv4Cidr { - type Err = String; - - fn from_str(s: &str) -> Result { - let err = || Err(format!("Invalid IPv4 CIDR: '{s}'")); - let Some((maybe_prefix, maybe_prefix_len)) = s.split_once('/') else { - return err(); - }; - let Ok(prefix) = maybe_prefix.parse() else { - return err(); - }; - let Ok(prefix_len) = maybe_prefix_len.parse() else { - return err(); - }; - if prefix_len <= 32 { - Ok(Ipv4Cidr { prefix, prefix_len }) - } else { - err() - } - } -} - -impl From for Cidr { - fn from(cidr: Ipv4Cidr) -> Self { - Cidr::V4(cidr) - } -} - -impl TryFrom for Ipv4Cidr { - type Error = &'static str; - - fn try_from(cidr: Cidr) -> Result { - match cidr { - Cidr::V4(c) => Ok(c), - _ => Err("not a v4 CIDR"), - } - } -} - -/// An IPv6 subnet with prefix and prefix length. -#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)] -pub struct Ipv6Cidr { - pub prefix: Ipv6Addr, - pub prefix_len: u8, -} - -// NOTE: See above about why we manually implement serialization and JsonSchema. -impl<'de> serde::Deserialize<'de> for Ipv6Cidr { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - String::deserialize(deserializer)?.parse().map_err( - |e: ::Err| { - ::custom(e) - }, - ) - } -} - -impl Serialize for Ipv6Cidr { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - serializer.serialize_str(&format!("{self}")) - } -} -impl JsonSchema for Ipv6Cidr { - fn schema_name() -> String { - String::from("Ipv6Cidr") - } - - fn json_schema( - _: &mut schemars::gen::SchemaGenerator, - ) -> schemars::schema::Schema { - schemars::schema::SchemaObject { - metadata: Some(Box::new(schemars::schema::Metadata { - title: Some("An IPv6 subnet".to_string()), - description: Some( - "An IPv6 subnet, including prefix and subnet mask" - .to_string(), - ), - examples: vec!["fe80::/10".into()], - ..Default::default() - })), - format: Some(String::from("ipv6cidr")), - instance_type: Some(schemars::schema::InstanceType::String.into()), - ..Default::default() - } - .into() - } -} - -impl Ipv6Cidr { - /// Return `true` if the address is within the subnet. - pub fn contains(&self, ipv6: Ipv6Addr) -> bool { - let prefix: u128 = self.prefix.into(); - let mask = ((1u128 << self.prefix_len) - 1) << (128 - self.prefix_len); - let addr: u128 = ipv6.into(); - - (addr & mask) == prefix - } -} - -impl Ord for Ipv6Cidr { - fn cmp(&self, other: &Self) -> std::cmp::Ordering { - match self.prefix.cmp(&other.prefix) { - std::cmp::Ordering::Equal => self.prefix_len.cmp(&other.prefix_len), - o => o, - } - } -} - -impl PartialOrd for Ipv6Cidr { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl fmt::Display for Ipv6Cidr { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}/{}", self.prefix, self.prefix_len) - } -} - -impl FromStr for Ipv6Cidr { - type Err = String; - - fn from_str(s: &str) -> Result { - let err = || Err(format!("Invalid IPv6 CIDR: '{s}'")); - let Some((maybe_prefix, maybe_prefix_len)) = s.split_once('/') else { - return err(); - }; - let Ok(prefix) = maybe_prefix.parse() else { - return err(); - }; - let Ok(prefix_len) = maybe_prefix_len.parse() else { - return err(); - }; - if prefix_len <= 128 { - Ok(Ipv6Cidr { prefix, prefix_len }) - } else { - err() - } - } -} - -impl TryFrom for Ipv6Cidr { - type Error = &'static str; - - fn try_from(cidr: Cidr) -> Result { - match cidr { - Cidr::V6(c) => Ok(c), - _ => Err("not a v6 CIDR"), - } - } -} - -impl From for Cidr { - fn from(cidr: Ipv6Cidr) -> Self { - Cidr::V6(cidr) - } -} - -/// An EUI-48 MAC address, used for layer-2 addressing. -#[derive(Copy, Deserialize, Serialize, JsonSchema, Clone, Eq, PartialEq)] -pub struct MacAddr { - a: [u8; 6], -} - -impl MacAddr { - /// Create a new MAC address from octets in network byte order. - pub fn new(o0: u8, o1: u8, o2: u8, o3: u8, o4: u8, o5: u8) -> MacAddr { - MacAddr { a: [o0, o1, o2, o3, o4, o5] } - } - - /// Create a new MAC address from a slice of bytes in network byte order. - /// - /// # Panics - /// - /// Panics if the slice is fewer than 6 octets. - /// - /// Note that any further octets are ignored. - pub fn from_slice(s: &[u8]) -> MacAddr { - MacAddr::new(s[0], s[1], s[2], s[3], s[4], s[5]) - } - - /// Convert `self` to an array of bytes in network byte order. - pub fn to_vec(self) -> Vec { - vec![self.a[0], self.a[1], self.a[2], self.a[3], self.a[4], self.a[5]] - } - - /// Return `true` if `self` is the null MAC address, all zeros. - pub fn is_null(self) -> bool { - const EMPTY: MacAddr = MacAddr { a: [0, 0, 0, 0, 0, 0] }; - - self == EMPTY - } - - /// Generate a random MAC address. - pub fn random() -> MacAddr { - let mut rng = rand::thread_rng(); - let mut m = MacAddr { a: [0; 6] }; - for octet in m.a.iter_mut() { - *octet = rng.gen(); - } - m - } - - /// Generate an EUI-64 ID from the mac address, following the process - /// desribed in RFC 2464, section 4. - pub fn to_eui64(self) -> [u8; 8] { - [ - self.a[0] ^ 0x2, - self.a[1], - self.a[2], - 0xff, - 0xfe, - self.a[3], - self.a[4], - self.a[5], - ] - } -} - -impl FromStr for MacAddr { - type Err = String; - - fn from_str(s: &str) -> Result { - let v: Vec<&str> = s.split(':').collect(); - - if v.len() != 6 { - return Err(format!("invalid mac address: {} octets", v.len())); - } - - let mut m = MacAddr { a: [0u8; 6] }; - for (i, octet) in v.iter().enumerate() { - match u8::from_str_radix(octet, 16) { - Ok(b) => m.a[i] = b, - Err(_) => { - return Err(format!( - "invalid mac address: bad octet '{octet}'", - )) - } - } - } - Ok(m) - } -} - -impl fmt::Display for MacAddr { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!( - f, - "{:02x}:{:02x}:{:02x}:{:02x}:{:02x}:{:02x}", - self.a[0], self.a[1], self.a[2], self.a[3], self.a[4], self.a[5] - ) - } -} - -impl fmt::Debug for MacAddr { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!( - f, - "{:02x}:{:02x}:{:02x}:{:02x}:{:02x}:{:02x}", - self.a[0], self.a[1], self.a[2], self.a[3], self.a[4], self.a[5] - ) - } -} - -impl From for u64 { - fn from(mac: MacAddr) -> u64 { - (u64::from(mac.a[0]) << 40) - | (u64::from(mac.a[1]) << 32) - | (u64::from(mac.a[2]) << 24) - | (u64::from(mac.a[3]) << 16) - | (u64::from(mac.a[4]) << 8) - | u64::from(mac.a[5]) - } -} - -impl From<&MacAddr> for u64 { - fn from(mac: &MacAddr) -> u64 { - From::from(*mac) - } -} - -impl From for MacAddr { - fn from(x: u64) -> Self { - MacAddr { - a: [ - ((x >> 40) & 0xff) as u8, - ((x >> 32) & 0xff) as u8, - ((x >> 24) & 0xff) as u8, - ((x >> 16) & 0xff) as u8, - ((x >> 8) & 0xff) as u8, - (x & 0xff) as u8, - ], - } - } -} - -impl Eq for PortSettings {} - -impl PartialEq for PortSettings { - fn eq(&self, other: &Self) -> bool { - self.links == other.links - } -} - -impl Eq for LinkSettings {} - -impl PartialEq for LinkSettings { - fn eq(&self, other: &Self) -> bool { - self.addrs == other.addrs && self.params == other.params - } -} - -impl Eq for LinkCreate {} - -impl PartialEq for LinkCreate { - fn eq(&self, other: &Self) -> bool { - self.autoneg == other.autoneg - && self.fec == other.fec - && self.kr == other.kr - && self.lane == other.lane - && self.speed == other.speed - } -} - -impl Eq for LinkId {} - -impl PartialEq for LinkId { - fn eq(&self, other: &Self) -> bool { - self.0 == other.0 - } -} diff --git a/clients/sled-agent-client/src/lib.rs b/clients/sled-agent-client/src/lib.rs index e4a407ef3d..42eefaf8b5 100644 --- a/clients/sled-agent-client/src/lib.rs +++ b/clients/sled-agent-client/src/lib.rs @@ -51,6 +51,11 @@ progenitor::generate_api!( NetworkInterface = omicron_common::api::internal::shared::NetworkInterface, PortFec = omicron_common::api::internal::shared::PortFec, PortSpeed = omicron_common::api::internal::shared::PortSpeed, + RouterId = omicron_common::api::internal::shared::RouterId, + ResolvedVpcRoute = omicron_common::api::internal::shared::ResolvedVpcRoute, + ResolvedVpcRouteSet = omicron_common::api::internal::shared::ResolvedVpcRouteSet, + RouterTarget = omicron_common::api::internal::shared::RouterTarget, + RouterVersion = omicron_common::api::internal::shared::RouterVersion, SourceNatConfig = omicron_common::api::internal::shared::SourceNatConfig, SwitchLocation = omicron_common::api::external::SwitchLocation, TypedUuidForInstanceKind = omicron_uuid_kinds::InstanceUuid, @@ -484,6 +489,15 @@ impl From use omicron_common::api::internal::nexus::KnownArtifactKind; match s { + KnownArtifactKind::GimletRotBootloader => { + types::KnownArtifactKind::GimletRotBootloader + } + KnownArtifactKind::PscRotBootloader => { + types::KnownArtifactKind::PscRotBootloader + } + KnownArtifactKind::SwitchRotBootloader => { + types::KnownArtifactKind::SwitchRotBootloader + } KnownArtifactKind::GimletSp => types::KnownArtifactKind::GimletSp, KnownArtifactKind::GimletRot => types::KnownArtifactKind::GimletRot, KnownArtifactKind::Host => types::KnownArtifactKind::Host, diff --git a/common/src/api/external/mod.rs b/common/src/api/external/mod.rs index 0af437bd99..2397cd15f8 100644 --- a/common/src/api/external/mod.rs +++ b/common/src/api/external/mod.rs @@ -305,8 +305,8 @@ impl JsonSchema for Name { "Names must begin with a lower case ASCII letter, be \ composed exclusively of lowercase ASCII, uppercase \ ASCII, numbers, and '-', and may not end with a '-'. \ - Names cannot be a UUID though they may contain a UUID." - .to_string(), + Names cannot be a UUID, but they may contain a UUID. \ + They can be at most 63 characters long.".to_string(), ), ..Default::default() })), @@ -1331,6 +1331,9 @@ pub enum RouteTarget { #[display("inetgw:{0}")] /// Forward traffic to an internet gateway InternetGateway(Name), + #[display("drop")] + /// Drop matching traffic + Drop, } /// A `RouteDestination` is used to match traffic with a routing rule, on the @@ -1404,14 +1407,13 @@ pub struct RouterRoute { /// common identifying metadata #[serde(flatten)] pub identity: IdentityMetadata, - /// The ID of the VPC Router to which the route belongs pub vpc_router_id: Uuid, - /// Describes the kind of router. Set at creation. `read-only` pub kind: RouterRouteKind, - + /// The location that matched packets should be forwarded to. pub target: RouteTarget, + /// Selects which traffic this routing rule will apply to. pub destination: RouteDestination, } @@ -1976,6 +1978,11 @@ pub struct InstanceNetworkInterface { /// True if this interface is the primary for the instance to which it's /// attached. pub primary: bool, + + /// A set of additional networks that this interface may send and + /// receive traffic on. + #[serde(default)] + pub transit_ips: Vec, } #[derive( diff --git a/common/src/api/internal/nexus.rs b/common/src/api/internal/nexus.rs index b4a73f4168..d82d9a980a 100644 --- a/common/src/api/internal/nexus.rs +++ b/common/src/api/internal/nexus.rs @@ -273,16 +273,11 @@ pub struct UpdateArtifactId { // // 1. Add it here. // -// 2. Add the new kind to /{nexus-client,sled-agent-client}/lib.rs. +// 2. Add the new kind to /clients/src/lib.rs. // The mapping from `UpdateArtifactKind::*` to `types::UpdateArtifactKind::*` // must be left as a `todo!()` for now; `types::UpdateArtifactKind` will not // be updated with the new variant until step 5 below. // -// 3. Add it to the sql database schema under (CREATE TYPE -// omicron.public.update_artifact_kind). -// -// TODO: After omicron ships this would likely involve a DB migration. -// // 4. Add the new kind and the mapping to its `update_artifact_kind` to // /nexus/db-model/src/update_artifact.rs // @@ -324,6 +319,7 @@ pub enum KnownArtifactKind { // Sled Artifacts GimletSp, GimletRot, + GimletRotBootloader, Host, Trampoline, ControlPlane, @@ -331,10 +327,12 @@ pub enum KnownArtifactKind { // PSC Artifacts PscSp, PscRot, + PscRotBootloader, // Switch Artifacts SwitchSp, SwitchRot, + SwitchRotBootloader, } impl KnownArtifactKind { diff --git a/common/src/api/internal/shared.rs b/common/src/api/internal/shared.rs index 3d710fc952..884b4dc165 100644 --- a/common/src/api/internal/shared.rs +++ b/common/src/api/internal/shared.rs @@ -6,13 +6,13 @@ use crate::{ address::NUM_SOURCE_NAT_PORTS, - api::external::{self, BfdMode, ImportExportPolicy, Name}, + api::external::{self, BfdMode, ImportExportPolicy, Name, Vni}, }; use oxnet::{IpNet, Ipv4Net, Ipv6Net}; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use std::{ - collections::HashMap, + collections::{HashMap, HashSet}, fmt, net::{IpAddr, Ipv4Addr, Ipv6Addr}, str::FromStr, @@ -50,13 +50,15 @@ pub enum NetworkInterfaceKind { pub struct NetworkInterface { pub id: Uuid, pub kind: NetworkInterfaceKind, - pub name: external::Name, + pub name: Name, pub ip: IpAddr, pub mac: external::MacAddr, pub subnet: IpNet, - pub vni: external::Vni, + pub vni: Vni, pub primary: bool, pub slot: u8, + #[serde(default)] + pub transit_ips: Vec, } /// An IP address and port range used for source NAT, i.e., making @@ -624,6 +626,82 @@ impl TryFrom<&[ipnetwork::IpNetwork]> for IpAllowList { } } +/// A VPC route resolved into a concrete target. +#[derive( + Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, +)] +pub struct ResolvedVpcRoute { + pub dest: IpNet, + pub target: RouterTarget, +} + +/// The target for a given router entry. +#[derive( + Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, +)] +#[serde(tag = "type", rename_all = "snake_case", content = "value")] +pub enum RouterTarget { + Drop, + InternetGateway, + Ip(IpAddr), + VpcSubnet(IpNet), +} + +/// Information on the current parent router (and version) of a route set +/// according to the control plane. +#[derive( + Copy, Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, +)] +pub struct RouterVersion { + pub router_id: Uuid, + pub version: u64, +} + +impl RouterVersion { + /// Return whether a new route set should be applied over the current + /// values. + /// + /// This will occur when seeing a new version and a matching parent, + /// or a new parent router on the control plane. + pub fn is_replaced_by(&self, other: &Self) -> bool { + (self.router_id != other.router_id) || self.version < other.version + } +} + +/// Identifier for a VPC and/or subnet. +#[derive( + Copy, Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, +)] +pub struct RouterId { + pub vni: Vni, + pub kind: RouterKind, +} + +/// The scope of a set of VPC router rules. +#[derive( + Copy, Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, +)] +#[serde(tag = "type", rename_all = "snake_case", content = "subnet")] +pub enum RouterKind { + System, + Custom(IpNet), +} + +/// Version information for routes on a given VPC subnet. +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq)] +pub struct ResolvedVpcRouteState { + pub id: RouterId, + pub version: Option, +} + +/// An updated set of routes for a given VPC and/or subnet. +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq)] +pub struct ResolvedVpcRouteSet { + pub id: RouterId, + pub version: Option, + pub routes: HashSet, +} + #[cfg(test)] mod tests { use crate::api::internal::shared::AllowedSourceIps; diff --git a/common/src/update.rs b/common/src/update.rs index 9feff1f868..fc747cf16d 100644 --- a/common/src/update.rs +++ b/common/src/update.rs @@ -177,6 +177,12 @@ impl ArtifactKind { /// These artifact kinds are not stored anywhere, but are derived from stored /// kinds and used as internal identifiers. impl ArtifactKind { + /// Gimlet root of trust bootloader slot image identifier. + /// + /// Derived from [`KnownArtifactKind::GimletRotBootloader`]. + pub const GIMLET_ROT_STAGE0: Self = + Self::from_static("gimlet_rot_bootloader"); + /// Gimlet root of trust A slot image identifier. /// /// Derived from [`KnownArtifactKind::GimletRot`]. @@ -189,6 +195,11 @@ impl ArtifactKind { pub const GIMLET_ROT_IMAGE_B: Self = Self::from_static("gimlet_rot_image_b"); + /// PSC root of trust stage0 image identifier. + /// + /// Derived from [`KnownArtifactKind::PscRotBootloader`]. + pub const PSC_ROT_STAGE0: Self = Self::from_static("psc_rot_bootloader"); + /// PSC root of trust A slot image identifier. /// /// Derived from [`KnownArtifactKind::PscRot`]. @@ -199,6 +210,12 @@ impl ArtifactKind { /// Derived from [`KnownArtifactKind::PscRot`]. pub const PSC_ROT_IMAGE_B: Self = Self::from_static("psc_rot_image_b"); + /// Switch root of trust A slot image identifier. + /// + /// Derived from [`KnownArtifactKind::SwitchRotBootloader`]. + pub const SWITCH_ROT_STAGE0: Self = + Self::from_static("switch_rot_bootloader"); + /// Switch root of trust A slot image identifier. /// /// Derived from [`KnownArtifactKind::SwitchRot`]. diff --git a/common/tests/output/pagination-schema.txt b/common/tests/output/pagination-schema.txt index 436e614994..9da2fffc26 100644 --- a/common/tests/output/pagination-schema.txt +++ b/common/tests/output/pagination-schema.txt @@ -135,7 +135,7 @@ schema for pagination parameters: page selector, scan by name only "definitions": { "Name": { "title": "A name unique within the parent collection", - "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID though they may contain a UUID.", + "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID, but they may contain a UUID. They can be at most 63 characters long.", "type": "string", "maxLength": 63, "minLength": 1, @@ -224,7 +224,7 @@ schema for pagination parameters: page selector, scan by name or id "definitions": { "Name": { "title": "A name unique within the parent collection", - "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID though they may contain a UUID.", + "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID, but they may contain a UUID. They can be at most 63 characters long.", "type": "string", "maxLength": 63, "minLength": 1, diff --git a/dev-tools/omdb/src/bin/omdb/nexus.rs b/dev-tools/omdb/src/bin/omdb/nexus.rs index 09ae82b5d9..0b1c6c77f4 100644 --- a/dev-tools/omdb/src/bin/omdb/nexus.rs +++ b/dev-tools/omdb/src/bin/omdb/nexus.rs @@ -28,6 +28,7 @@ use nexus_client::types::SledSelector; use nexus_client::types::UninitializedSledId; use nexus_db_queries::db::lookup::LookupPath; use nexus_types::deployment::Blueprint; +use nexus_types::internal_api::background::RegionReplacementDriverStatus; use nexus_types::inventory::BaseboardId; use omicron_uuid_kinds::CollectionUuid; use omicron_uuid_kinds::GenericUuid; @@ -1049,6 +1050,38 @@ fn print_task_details(bgtask: &BackgroundTask, details: &serde_json::Value) { ); } }; + } else if name == "region_replacement_driver" { + match serde_json::from_value::( + details.clone(), + ) { + Err(error) => eprintln!( + "warning: failed to interpret task details: {:?}: {:?}", + error, details + ), + + Ok(status) => { + println!( + " number of region replacement drive sagas started ok: {}", + status.drive_invoked_ok.len() + ); + for line in &status.drive_invoked_ok { + println!(" > {line}"); + } + + println!( + " number of region replacement finish sagas started ok: {}", + status.finish_invoked_ok.len() + ); + for line in &status.finish_invoked_ok { + println!(" > {line}"); + } + + println!(" number of errors: {}", status.errors.len()); + for line in &status.errors { + println!(" > {line}"); + } + } + }; } else { println!( "warning: unknown background task: {:?} \ diff --git a/dev-tools/omdb/tests/env.out b/dev-tools/omdb/tests/env.out index 174ffe5e3e..252313e6c8 100644 --- a/dev-tools/omdb/tests/env.out +++ b/dev-tools/omdb/tests/env.out @@ -110,6 +110,10 @@ task: "region_replacement" detects if a region requires replacing and begins the process +task: "region_replacement_driver" + drive region replacements forward to completion + + task: "service_firewall_rule_propagation" propagates VPC firewall rules for Omicron services with external network connectivity @@ -127,6 +131,10 @@ task: "v2p_manager" manages opte v2p mappings for vpc networking +task: "vpc_route_manager" + propagates updated VPC routes to all OPTE ports + + --------------------------------------------- stderr: note: using Nexus URL http://127.0.0.1:REDACTED_PORT @@ -234,6 +242,10 @@ task: "region_replacement" detects if a region requires replacing and begins the process +task: "region_replacement_driver" + drive region replacements forward to completion + + task: "service_firewall_rule_propagation" propagates VPC firewall rules for Omicron services with external network connectivity @@ -251,6 +263,10 @@ task: "v2p_manager" manages opte v2p mappings for vpc networking +task: "vpc_route_manager" + propagates updated VPC routes to all OPTE ports + + --------------------------------------------- stderr: note: Nexus URL not specified. Will pick one from DNS. @@ -345,6 +361,10 @@ task: "region_replacement" detects if a region requires replacing and begins the process +task: "region_replacement_driver" + drive region replacements forward to completion + + task: "service_firewall_rule_propagation" propagates VPC firewall rules for Omicron services with external network connectivity @@ -362,6 +382,10 @@ task: "v2p_manager" manages opte v2p mappings for vpc networking +task: "vpc_route_manager" + propagates updated VPC routes to all OPTE ports + + --------------------------------------------- stderr: note: Nexus URL not specified. Will pick one from DNS. diff --git a/dev-tools/omdb/tests/successes.out b/dev-tools/omdb/tests/successes.out index 9f16c6026c..032a574c8e 100644 --- a/dev-tools/omdb/tests/successes.out +++ b/dev-tools/omdb/tests/successes.out @@ -311,6 +311,10 @@ task: "region_replacement" detects if a region requires replacing and begins the process +task: "region_replacement_driver" + drive region replacements forward to completion + + task: "service_firewall_rule_propagation" propagates VPC firewall rules for Omicron services with external network connectivity @@ -328,6 +332,10 @@ task: "v2p_manager" manages opte v2p mappings for vpc networking +task: "vpc_route_manager" + propagates updated VPC routes to all OPTE ports + + --------------------------------------------- stderr: note: using Nexus URL http://127.0.0.1:REDACTED_PORT/ @@ -505,6 +513,15 @@ task: "region_replacement" number of region replacements started ok: 0 number of region replacement start errors: 0 +task: "region_replacement_driver" + configured period: every 30s + currently executing: no + last completed activation: , triggered by an explicit signal + started at (s ago) and ran for ms + number of region replacement drive sagas started ok: 0 + number of region replacement finish sagas started ok: 0 + number of errors: 0 + task: "service_firewall_rule_propagation" configured period: every 5m currently executing: no @@ -532,6 +549,13 @@ task: "v2p_manager" started at (s ago) and ran for ms warning: unknown background task: "v2p_manager" (don't know how to interpret details: Object {}) +task: "vpc_route_manager" + configured period: every 30s + currently executing: no + last completed activation: , triggered by an explicit signal + started at (s ago) and ran for ms +warning: unknown background task: "vpc_route_manager" (don't know how to interpret details: Object {}) + --------------------------------------------- stderr: note: using Nexus URL http://127.0.0.1:REDACTED_PORT/ diff --git a/dev-tools/omdb/tests/usage_errors.out b/dev-tools/omdb/tests/usage_errors.out index 563d23d6f3..b7fe7bdf7f 100644 --- a/dev-tools/omdb/tests/usage_errors.out +++ b/dev-tools/omdb/tests/usage_errors.out @@ -290,6 +290,7 @@ Options: for discretionary services) - query-during-inventory: Sleds whose sled agents should be queried for inventory - reservation-create: Sleds on which reservations can be created + - v2p-mapping: Sleds which should be sent OPTE V2P mappings - vpc-firewall: Sleds which should be sent VPC firewall rules --log-level diff --git a/dev-tools/releng/src/hubris.rs b/dev-tools/releng/src/hubris.rs index 685a729a9f..f46af4bfaf 100644 --- a/dev-tools/releng/src/hubris.rs +++ b/dev-tools/releng/src/hubris.rs @@ -14,12 +14,17 @@ use omicron_common::api::external::SemverVersion; use omicron_common::api::internal::nexus::KnownArtifactKind; use semver::Version; use serde::Deserialize; +use slog::warn; +use slog::Logger; use tufaceous_lib::assemble::DeserializedArtifactData; use tufaceous_lib::assemble::DeserializedArtifactSource; use tufaceous_lib::assemble::DeserializedFileArtifactSource; use tufaceous_lib::assemble::DeserializedManifest; +use crate::RETRY_ATTEMPTS; + pub(crate) async fn fetch_hubris_artifacts( + logger: Logger, base_url: &'static str, client: reqwest::Client, manifest_list: Utf8PathBuf, @@ -43,7 +48,7 @@ pub(crate) async fn fetch_hubris_artifacts( for line in fs::read_to_string(manifest_list).await?.lines() { if let Some(hash) = line.split_whitespace().next() { - let data = fetch_hash(base_url, &client, hash).await?; + let data = fetch_hash(&logger, base_url, &client, hash).await?; let str = String::from_utf8(data).with_context(|| { format!("hubris artifact manifest {} was not UTF-8", hash) })?; @@ -85,7 +90,9 @@ pub(crate) async fn fetch_hubris_artifacts( }, ); for hash in hashes { - let data = fetch_hash(base_url, &client, &hash).await?; + let data = + fetch_hash(&logger, base_url, &client, &hash) + .await?; fs::write(output_dir.join(zip!(hash)), data).await?; } } @@ -102,21 +109,39 @@ pub(crate) async fn fetch_hubris_artifacts( } async fn fetch_hash( + logger: &Logger, base_url: &'static str, client: &reqwest::Client, hash: &str, ) -> Result> { - client - .get(format!("{}/artifact/{}", base_url, hash)) - .send() - .and_then(|response| response.json()) - .await - .with_context(|| { - format!( - "failed to fetch hubris artifact {} from {}", - hash, base_url - ) - }) + let url = format!("{}/artifact/{}", base_url, hash); + for attempt in 1..=RETRY_ATTEMPTS { + let result = client + .get(&url) + .send() + .and_then(|response| { + futures::future::ready(response.error_for_status()) + }) + .and_then(|response| response.json()) + .await + .with_context(|| { + format!( + "failed to fetch hubris artifact {} from {}", + hash, base_url + ) + }); + match result { + Ok(data) => return Ok(data), + Err(err) => { + if attempt == RETRY_ATTEMPTS { + return Err(err); + } else { + warn!(logger, "fetching {} failed, retrying: {}", url, err); + } + } + } + } + unreachable!(); } // These structs are similar to `DeserializeManifest` and friends from diff --git a/dev-tools/releng/src/main.rs b/dev-tools/releng/src/main.rs index 9bb0cd33bb..1bd3b69ac9 100644 --- a/dev-tools/releng/src/main.rs +++ b/dev-tools/releng/src/main.rs @@ -43,6 +43,8 @@ use crate::job::Jobs; /// the future. const BASE_VERSION: Version = Version::new(9, 0, 0); +const RETRY_ATTEMPTS: usize = 3; + #[derive(Debug, Clone, Copy)] enum InstallMethod { /// Unpack the tarball to `/opt/oxide/`, and install @@ -54,13 +56,14 @@ enum InstallMethod { } /// Packages to install or bundle in the host OS image. -const HOST_IMAGE_PACKAGES: [(&str, InstallMethod); 7] = [ +const HOST_IMAGE_PACKAGES: [(&str, InstallMethod); 8] = [ ("mg-ddm-gz", InstallMethod::Install), ("omicron-sled-agent", InstallMethod::Install), ("overlay", InstallMethod::Bundle), ("oxlog", InstallMethod::Install), ("propolis-server", InstallMethod::Bundle), ("pumpkind-gz", InstallMethod::Install), + ("crucible-dtrace", InstallMethod::Install), ("switch-asic", InstallMethod::Bundle), ]; /// Packages to install or bundle in the recovery (trampoline) OS image. @@ -234,7 +237,8 @@ async fn main() -> Result<()> { let client = reqwest::ClientBuilder::new() .connect_timeout(Duration::from_secs(15)) - .timeout(Duration::from_secs(15)) + .timeout(Duration::from_secs(120)) + .tcp_keepalive(Duration::from_secs(60)) .build() .context("failed to build reqwest client")?; @@ -565,6 +569,7 @@ async fn main() -> Result<()> { jobs.push( format!("hubris-{}", name), hubris::fetch_hubris_artifacts( + logger.clone(), base_url, client.clone(), WORKSPACE_DIR.join(format!("tools/permslip_{}", name)), diff --git a/dev-tools/xtask/Cargo.toml b/dev-tools/xtask/Cargo.toml index 9268eafc3e..acf225cd9d 100644 --- a/dev-tools/xtask/Cargo.toml +++ b/dev-tools/xtask/Cargo.toml @@ -17,7 +17,6 @@ flate2.workspace = true futures.workspace = true fs-err.workspace = true macaddr.workspace = true -md5 = "0.7.0" reqwest = { workspace = true, features = [ "default-tls" ] } serde.workspace = true sha2.workspace = true diff --git a/dev-tools/xtask/src/check_workspace_deps.rs b/dev-tools/xtask/src/check_workspace_deps.rs index 76e405ce1a..73d5643ffb 100644 --- a/dev-tools/xtask/src/check_workspace_deps.rs +++ b/dev-tools/xtask/src/check_workspace_deps.rs @@ -8,7 +8,7 @@ use anyhow::{bail, Context, Result}; use camino::Utf8Path; use cargo_toml::{Dependency, Manifest}; use fs_err as fs; -use std::collections::BTreeMap; +use std::collections::{BTreeMap, BTreeSet}; const WORKSPACE_HACK_PACKAGE_NAME: &str = "omicron-workspace-hack"; @@ -116,6 +116,53 @@ pub fn run_cmd() -> Result<()> { nerrors += 1; } + // Check that `default-members` is configured correctly. + let non_default = workspace + .packages + .iter() + .filter_map(|package| { + [ + // Including xtask causes hakari to not work as well and build + // times to be longer (omicron#4392). + "xtask", + // The tests here should not be run by default, as they require + // a running control plane. + "end-to-end-tests", + ] + .contains(&package.name.as_str()) + .then_some(&package.id) + }) + .collect::>(); + let members = workspace.workspace_members.iter().collect::>(); + let default_members = + workspace.workspace_default_members.iter().collect::>(); + for package in members.difference(&default_members) { + if !non_default.contains(package) { + eprintln!( + "error: package {:?} not in default-members", + package.repr + ); + nerrors += 1; + } + } + + let mut seen_bins = BTreeSet::new(); + for package in &workspace.packages { + if workspace.workspace_members.contains(&package.id) { + for target in &package.targets { + if target.is_bin() { + if !seen_bins.insert(&target.name) { + eprintln!( + "error: bin target {:?} seen multiple times", + target.name + ); + nerrors += 1; + } + } + } + } + } + eprintln!( "check-workspace-deps: errors: {}, warnings: {}", nerrors, nwarnings diff --git a/dev-tools/xtask/src/clippy.rs b/dev-tools/xtask/src/clippy.rs index 8c454fdebf..7924a05574 100644 --- a/dev-tools/xtask/src/clippy.rs +++ b/dev-tools/xtask/src/clippy.rs @@ -42,6 +42,7 @@ pub fn run_cmd(args: ClippyArgs) -> Result<()> { command // Make sure we check everything. .arg("--all-targets") + .arg("--workspace") .arg("--") // For a list of lints, see // https://rust-lang.github.io/rust-clippy/master. diff --git a/dev-tools/xtask/src/download.rs b/dev-tools/xtask/src/download.rs index ce227b7c4d..2790a638a7 100644 --- a/dev-tools/xtask/src/download.rs +++ b/dev-tools/xtask/src/download.rs @@ -15,6 +15,8 @@ use slog::{info, o, warn, Drain, Logger}; use std::collections::{BTreeSet, HashMap}; use std::io::Write; use std::os::unix::fs::PermissionsExt; +use std::sync::OnceLock; +use std::time::Duration; use strum::EnumIter; use strum::IntoEnumIterator; use tar::Archive; @@ -23,6 +25,7 @@ use tokio::process::Command; const BUILDOMAT_URL: &'static str = "https://buildomat.eng.oxide.computer/public/file"; +const RETRY_ATTEMPTS: usize = 3; /// What is being downloaded? #[derive( @@ -218,7 +221,7 @@ async fn get_values_from_file( .context("Failed to read {path}")?; for line in content.lines() { let line = line.trim(); - let Some((key, value)) = line.split_once("=") else { + let Some((key, value)) = line.split_once('=') else { continue; }; let value = value.trim_matches('"'); @@ -236,7 +239,17 @@ async fn get_values_from_file( /// /// Writes the response to the file as it is received. async fn streaming_download(url: &str, path: &Utf8Path) -> Result<()> { - let mut response = reqwest::get(url).await?; + static CLIENT: OnceLock = OnceLock::new(); + + let client = CLIENT.get_or_init(|| { + reqwest::ClientBuilder::new() + .timeout(Duration::from_secs(3600)) + .tcp_keepalive(Duration::from_secs(60)) + .connect_timeout(Duration::from_secs(15)) + .build() + .unwrap() + }); + let mut response = client.get(url).send().await?.error_for_status()?; let mut tarball = tokio::fs::File::create(&path).await?; while let Some(chunk) = response.chunk().await? { tarball.write_all(chunk.as_ref()).await?; @@ -244,23 +257,6 @@ async fn streaming_download(url: &str, path: &Utf8Path) -> Result<()> { Ok(()) } -/// Returns the hex, lowercase md5 checksum of a file at `path`. -async fn md5_checksum(path: &Utf8Path) -> Result { - let mut buf = vec![0u8; 65536]; - let mut file = tokio::fs::File::open(path).await?; - let mut ctx = md5::Context::new(); - loop { - let n = file.read(&mut buf).await?; - if n == 0 { - break; - } - ctx.write_all(&buf[0..n])?; - } - - let digest = ctx.compute(); - Ok(format!("{digest:x}")) -} - /// Returns the hex, lowercase sha2 checksum of a file at `path`. async fn sha2_checksum(path: &Utf8Path) -> Result { let mut buf = vec![0u8; 65536]; @@ -369,14 +365,12 @@ async fn set_permissions(path: &Utf8Path, mode: u32) -> Result<()> { } enum ChecksumAlgorithm { - Md5, Sha2, } impl ChecksumAlgorithm { async fn checksum(&self, path: &Utf8Path) -> Result { match self { - ChecksumAlgorithm::Md5 => md5_checksum(path).await, ChecksumAlgorithm::Sha2 => sha2_checksum(path).await, } } @@ -410,8 +404,22 @@ async fn download_file_and_verify( }; if do_download { - info!(log, "Downloading {path}"); - streaming_download(&url, &path).await?; + for attempt in 1..=RETRY_ATTEMPTS { + info!( + log, + "Downloading {path} (attempt {attempt}/{RETRY_ATTEMPTS})" + ); + match streaming_download(&url, &path).await { + Ok(()) => break, + Err(err) => { + if attempt == RETRY_ATTEMPTS { + return Err(err); + } else { + warn!(log, "Download failed, retrying: {err}"); + } + } + } + } } let observed_checksum = algorithm.checksum(&path).await?; @@ -432,7 +440,7 @@ impl<'a> Downloader<'a> { let checksums_path = self.versions_dir.join("clickhouse_checksums"); let [checksum] = get_values_from_file( - [&format!("CIDL_MD5_{}", os.env_name())], + [&format!("CIDL_SHA256_{}", os.env_name())], &checksums_path, ) .await?; @@ -464,7 +472,7 @@ impl<'a> Downloader<'a> { &self.log, &tarball_path, &tarball_url, - ChecksumAlgorithm::Md5, + ChecksumAlgorithm::Sha2, &checksum, ) .await?; @@ -787,19 +795,15 @@ impl<'a> Downloader<'a> { let destination_dir = self.output_dir.join("npuzone"); tokio::fs::create_dir_all(&destination_dir).await?; - let repo = "oxidecomputer/softnpu"; + let checksums_path = self.versions_dir.join("softnpu_version"); + let [commit, sha2] = + get_values_from_file(["COMMIT", "SHA2"], &checksums_path).await?; - // TODO: This should probably live in a separate file, but - // at the moment we're just building parity with - // "ci_download_softnpu_machinery". - let commit = "3203c51cf4473d30991b522062ac0df2e045c2f2"; + let repo = "oxidecomputer/softnpu"; let filename = "npuzone"; let base_url = format!("{BUILDOMAT_URL}/{repo}/image/{commit}"); let artifact_url = format!("{base_url}/{filename}"); - let sha2_url = format!("{base_url}/{filename}.sha256.txt"); - let sha2 = reqwest::get(sha2_url).await?.text().await?; - let sha2 = sha2.trim(); let path = destination_dir.join(filename); download_file_and_verify( diff --git a/dev-tools/xtask/src/virtual_hardware.rs b/dev-tools/xtask/src/virtual_hardware.rs index 5384433f55..d28c3d9037 100644 --- a/dev-tools/xtask/src/virtual_hardware.rs +++ b/dev-tools/xtask/src/virtual_hardware.rs @@ -235,9 +235,7 @@ fn unload_xde_driver() -> Result<()> { .context("Invalid modinfo output")? .lines() .find_map(|line| { - let mut cols = line.trim().splitn(2, ' '); - let id = cols.next()?; - let desc = cols.next()?; + let (id, desc) = line.trim().split_once(' ')?; if !desc.contains("xde") { return None; } @@ -419,7 +417,7 @@ fn run_scadm_command(args: Vec<&str>) -> Result { for arg in &args { cmd.arg(arg); } - Ok(execute(cmd)?) + execute(cmd) } fn default_gateway_ip() -> Result { @@ -497,8 +495,8 @@ struct SledAgentConfig { impl SledAgentConfig { fn read(path: &Utf8Path) -> Result { let config = std::fs::read_to_string(path)?; - Ok(toml::from_str(&config) - .context("Could not parse sled agent config as toml")?) + toml::from_str(&config) + .context("Could not parse sled agent config as toml") } } @@ -605,7 +603,7 @@ fn swap_list() -> Result> { let mut cmd = Command::new(SWAP); cmd.arg("-l"); - let output = cmd.output().context(format!("Could not start swap"))?; + let output = cmd.output().context("Could not start swap")?; if !output.status.success() { if let Ok(stderr) = String::from_utf8(output.stderr) { // This is an exceptional case - if there are no swap devices, diff --git a/docs/how-to-run.adoc b/docs/how-to-run.adoc index 097467ef04..9bd99c23d3 100644 --- a/docs/how-to-run.adoc +++ b/docs/how-to-run.adoc @@ -173,7 +173,7 @@ Then install prerequisite software with the following script: [source,text] ---- -$ pfexec ./tools/install_prerequisites.sh +$ ./tools/install_prerequisites.sh ---- You need to do this step once per workspace and potentially again each time you fetch new changes. If the script reports any PATH problems, you'll need to correct those before proceeding. @@ -410,9 +410,9 @@ $ pfexec ./target/release/omicron-package install [WARNING] ==== -**Do not use `pfexec cargo run` directly**; it will cause files in `~/.cargo` and `target/` to be owned by root, which will cause problems down the road. +**Do not use `pfexec cargo run` directly**; it will cause files in `~/.cargo`, `out/`, and `target/` to be owned by root, which will cause problems down the road. -If you've done this already, and you wish to recover, run from the root of this repository `pfexec chown -R $USER:$(id -ng $USER) target ${CARGO_HOME:-~/.cargo}`. +If you've done this already, and you wish to recover, run from the root of this repository `pfexec chown -R $USER:$(id -ng $USER) out target ${CARGO_HOME:-~/.cargo}`. ==== This command installs an SMF service called `svc:/oxide/sled-agent:default`, which itself starts the other required services. This will take a few minutes. You can watch the progress by looking at the Sled Agent log: diff --git a/end-to-end-tests/src/helpers/icmp.rs b/end-to-end-tests/src/helpers/icmp.rs index 36b9bc5675..40256db8d9 100644 --- a/end-to-end-tests/src/helpers/icmp.rs +++ b/end-to-end-tests/src/helpers/icmp.rs @@ -150,7 +150,7 @@ impl Pinger4 { "{:.3}", (t.sum.as_micros() as f32 / 1000.0 - / t.rx_count as f32) + / f32::from(t.rx_count)) ) }, format!("{:.3}", (t.high.as_micros() as f32 / 1000.0)), diff --git a/env.sh b/env.sh index 74f3d1caf4..6a84c35902 100644 --- a/env.sh +++ b/env.sh @@ -4,11 +4,22 @@ # # See also: ./.envrc +OLD_SHELL_OPTS=$- set -o xtrace -OMICRON_WS="$(readlink -f $(dirname "${BASH_SOURCE[0]}"))" + +OMICRON_WS=$(readlink -f "$(dirname "${BASH_SOURCE[0]}")") export PATH="$OMICRON_WS/out/cockroachdb/bin:$PATH" export PATH="$OMICRON_WS/out/clickhouse:$PATH" export PATH="$OMICRON_WS/out/dendrite-stub/bin:$PATH" export PATH="$OMICRON_WS/out/mgd/root/opt/oxide/mgd/bin:$PATH" -unset OMICRON_WS -set +o xtrace + +# if xtrace was set previously, do not unset it +case $OLD_SHELL_OPTS in + *x*) + unset OLD_SHELL_OPTS OMICRON_WS + ;; + *) + unset OLD_SHELL_OPTS OMICRON_WS + set +o xtrace + ;; +esac diff --git a/flake.lock b/flake.lock index 7c6acc0815..5a70a42881 100644 --- a/flake.lock +++ b/flake.lock @@ -1,23 +1,5 @@ { "nodes": { - "flake-utils": { - "inputs": { - "systems": "systems" - }, - "locked": { - "lastModified": 1705309234, - "narHash": "sha256-uNRRNRKmJyCRC/8y1RqBkqWBLM034y4qN7EprSdmgyA=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "1ef2e671c3b0c19053962c07dbda38332dcebf26", - "type": "github" - }, - "original": { - "owner": "numtide", - "repo": "flake-utils", - "type": "github" - } - }, "nixpkgs": { "locked": { "lastModified": 1712791164, @@ -42,17 +24,16 @@ }, "rust-overlay": { "inputs": { - "flake-utils": "flake-utils", "nixpkgs": [ "nixpkgs" ] }, "locked": { - "lastModified": 1712888034, - "narHash": "sha256-SmBeT3oxdwOzheSfxZmk+3xmv98Z3zlzjlnl9nBdOIE=", + "lastModified": 1719368303, + "narHash": "sha256-vhkKOUs9eOZgcPrA6wMw7a7J48pEjVuhzQfitVwVv1g=", "owner": "oxalica", "repo": "rust-overlay", - "rev": "96fbdc73dec8eaa5a9d4a9b307b75c9a856e5dec", + "rev": "32415b22fd3b454e4a1385af64aa5cef9766ff4c", "type": "github" }, "original": { @@ -60,21 +41,6 @@ "repo": "rust-overlay", "type": "github" } - }, - "systems": { - "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", - "type": "github" - }, - "original": { - "owner": "nix-systems", - "repo": "default", - "type": "github" - } } }, "root": "root", diff --git a/flake.nix b/flake.nix index 6828577403..831a0aaea2 100644 --- a/flake.nix +++ b/flake.nix @@ -255,12 +255,12 @@ let name = "clickhouse"; version = readVersionFile "${name}_version"; - # N.B. that unlike maghemite and dendrite, the Clickhouse hashes - # in `tools/clickhouse_checksums` are MD5 rather than SHA256, so we - # can't give Nix those hashes and must instead determine it ourselves. - # this means that we will have to update this SHA if the clickhouse - # version changes. - sha256 = "0wx8w9sdms5hsc9f835ivsissf15wjzdb9cvxr65xdi384i9pkzx"; + sha256 = + let + shaFile = builtins.readFile ./tools/${name}_checksums; + shas = lib.strings.splitString "\n" shaFile; + in + findSha shas "CIDL_SHA256_LINUX"; src = builtins.fetchurl { inherit sha256; diff --git a/illumos-utils/src/opte/firewall_rules.rs b/illumos-utils/src/opte/firewall_rules.rs index 1df0e7421a..4dcb390e9e 100644 --- a/illumos-utils/src/opte/firewall_rules.rs +++ b/illumos-utils/src/opte/firewall_rules.rs @@ -4,6 +4,7 @@ //! Convert Omicron VPC firewall rules to OPTE firewall rules. +use super::net_to_cidr; use crate::opte::params::VpcFirewallRule; use crate::opte::Vni; use macaddr::MacAddr6; @@ -18,11 +19,6 @@ use oxide_vpc::api::Filters; use oxide_vpc::api::FirewallAction; use oxide_vpc::api::FirewallRule; use oxide_vpc::api::IpAddr; -use oxide_vpc::api::IpCidr; -use oxide_vpc::api::Ipv4Cidr; -use oxide_vpc::api::Ipv4PrefixLen; -use oxide_vpc::api::Ipv6Cidr; -use oxide_vpc::api::Ipv6PrefixLen; use oxide_vpc::api::Ports; use oxide_vpc::api::ProtoFilter; use oxide_vpc::api::Protocol; @@ -68,21 +64,10 @@ impl FromVpcFirewallRule for VpcFirewallRule { HostIdentifier::Ip(IpNet::V4(net)) if net.is_host_net() => { Address::Ip(IpAddr::Ip4(net.addr().into())) } - HostIdentifier::Ip(IpNet::V4(net)) => { - Address::Subnet(IpCidr::Ip4(Ipv4Cidr::new( - net.addr().into(), - Ipv4PrefixLen::new(net.width()).unwrap(), - ))) - } HostIdentifier::Ip(IpNet::V6(net)) if net.is_host_net() => { Address::Ip(IpAddr::Ip6(net.addr().into())) } - HostIdentifier::Ip(IpNet::V6(net)) => { - Address::Subnet(IpCidr::Ip6(Ipv6Cidr::new( - net.addr().into(), - Ipv6PrefixLen::new(net.width()).unwrap(), - ))) - } + HostIdentifier::Ip(ip) => Address::Subnet(net_to_cidr(*ip)), HostIdentifier::Vpc(vni) => { Address::Vni(Vni::new(u32::from(*vni)).unwrap()) } diff --git a/illumos-utils/src/opte/mod.rs b/illumos-utils/src/opte/mod.rs index d06b6b26e5..d7fd96b0c0 100644 --- a/illumos-utils/src/opte/mod.rs +++ b/illumos-utils/src/opte/mod.rs @@ -18,15 +18,23 @@ mod port; mod port_manager; pub use firewall_rules::opte_firewall_rules; -pub use port::Port; -pub use port_manager::PortManager; -pub use port_manager::PortTicket; - use ipnetwork::IpNetwork; use macaddr::MacAddr6; +use omicron_common::api::internal::shared; pub use oxide_vpc::api::BoundaryServices; pub use oxide_vpc::api::DhcpCfg; +use oxide_vpc::api::IpCidr; +use oxide_vpc::api::Ipv4Cidr; +use oxide_vpc::api::Ipv4PrefixLen; +use oxide_vpc::api::Ipv6Cidr; +use oxide_vpc::api::Ipv6PrefixLen; +use oxide_vpc::api::RouterTarget; pub use oxide_vpc::api::Vni; +use oxnet::IpNet; +pub use port::Port; +pub use port_manager::PortCreateParams; +pub use port_manager::PortManager; +pub use port_manager::PortTicket; use std::net::IpAddr; /// Information about the gateway for an OPTE port @@ -63,3 +71,28 @@ impl Gateway { &self.ip } } + +/// Convert a nexus `IpNet` to an OPTE `IpCidr`. +fn net_to_cidr(net: IpNet) -> IpCidr { + match net { + IpNet::V4(net) => IpCidr::Ip4(Ipv4Cidr::new( + net.addr().into(), + Ipv4PrefixLen::new(net.width()).unwrap(), + )), + IpNet::V6(net) => IpCidr::Ip6(Ipv6Cidr::new( + net.addr().into(), + Ipv6PrefixLen::new(net.width()).unwrap(), + )), + } +} + +/// Convert a nexus `RouterTarget` to an OPTE `RouterTarget`. +fn router_target_opte(target: &shared::RouterTarget) -> RouterTarget { + use shared::RouterTarget::*; + match target { + Drop => RouterTarget::Drop, + InternetGateway => RouterTarget::InternetGateway, + Ip(ip) => RouterTarget::Ip((*ip).into()), + VpcSubnet(net) => RouterTarget::VpcSubnet(net_to_cidr(*net)), + } +} diff --git a/illumos-utils/src/opte/port.rs b/illumos-utils/src/opte/port.rs index 6fbb89c450..a692a02304 100644 --- a/illumos-utils/src/opte/port.rs +++ b/illumos-utils/src/opte/port.rs @@ -7,23 +7,30 @@ use crate::opte::Gateway; use crate::opte::Vni; use macaddr::MacAddr6; +use omicron_common::api::external; +use omicron_common::api::internal::shared::RouterId; +use omicron_common::api::internal::shared::RouterKind; +use oxnet::IpNet; use std::net::IpAddr; use std::sync::Arc; #[derive(Debug)] -struct PortInner { - // Name of the port as identified by OPTE - name: String, - // IP address within the VPC Subnet - ip: IpAddr, - // VPC-private MAC address - mac: MacAddr6, - // Emulated PCI slot for the guest NIC, passed to Propolis - slot: u8, - // Geneve VNI for the VPC - vni: Vni, - // Information about the virtual gateway, aka OPTE - gateway: Gateway, +pub struct PortData { + /// Name of the port as identified by OPTE + pub(crate) name: String, + /// IP address within the VPC Subnet + pub(crate) ip: IpAddr, + /// VPC-private MAC address + pub(crate) mac: MacAddr6, + /// Emulated PCI slot for the guest NIC, passed to Propolis + pub(crate) slot: u8, + /// Geneve VNI for the VPC + pub(crate) vni: Vni, + /// Subnet the port belong to within the VPC. + pub(crate) subnet: IpNet, + /// Information about the virtual gateway, aka OPTE + pub(crate) gateway: Gateway, + /// Name of the VNIC the OPTE port is bound to. // TODO-remove(#2932): Remove this once we can put Viona directly on top of an // OPTE port device. // @@ -33,7 +40,18 @@ struct PortInner { // https://github.com/oxidecomputer/opte/issues/178 for more details. This // can be changed back to a real VNIC when that is resolved, and the Drop // impl below can simplify to just call `drop(self.vnic)`. - vnic: String, + pub(crate) vnic: String, +} + +#[derive(Debug)] +struct PortInner(PortData); + +impl core::ops::Deref for PortInner { + type Target = PortData; + + fn deref(&self) -> &Self::Target { + &self.0 + } } #[cfg(target_os = "illumos")] @@ -83,26 +101,8 @@ pub struct Port { } impl Port { - pub fn new( - name: String, - ip: IpAddr, - mac: MacAddr6, - slot: u8, - vni: Vni, - gateway: Gateway, - vnic: String, - ) -> Self { - Self { - inner: Arc::new(PortInner { - name, - ip, - mac, - slot, - vni, - gateway, - vnic, - }), - } + pub fn new(data: PortData) -> Self { + Self { inner: Arc::new(PortInner(data)) } } pub fn ip(&self) -> &IpAddr { @@ -126,6 +126,10 @@ impl Port { &self.inner.vni } + pub fn subnet(&self) -> &IpNet { + &self.inner.subnet + } + pub fn vnic_name(&self) -> &str { &self.inner.vnic } @@ -133,4 +137,17 @@ impl Port { pub fn slot(&self) -> u8 { self.inner.slot } + + pub fn system_router_key(&self) -> RouterId { + // Unwrap safety: both of these VNI types represent validated u24s. + let vni = external::Vni::try_from(self.vni().as_u32()).unwrap(); + RouterId { vni, kind: RouterKind::System } + } + + pub fn custom_router_key(&self) -> RouterId { + RouterId { + kind: RouterKind::Custom(*self.subnet()), + ..self.system_router_key() + } + } } diff --git a/illumos-utils/src/opte/port_manager.rs b/illumos-utils/src/opte/port_manager.rs index 726aa01a2a..984e3c55fa 100644 --- a/illumos-utils/src/opte/port_manager.rs +++ b/illumos-utils/src/opte/port_manager.rs @@ -7,6 +7,7 @@ use crate::opte::opte_firewall_rules; use crate::opte::params::VirtualNetworkInterfaceHost; use crate::opte::params::VpcFirewallRule; +use crate::opte::port::PortData; use crate::opte::Error; use crate::opte::Gateway; use crate::opte::Port; @@ -15,8 +16,15 @@ use ipnetwork::IpNetwork; use omicron_common::api::external; use omicron_common::api::internal::shared::NetworkInterface; use omicron_common::api::internal::shared::NetworkInterfaceKind; +use omicron_common::api::internal::shared::ResolvedVpcRoute; +use omicron_common::api::internal::shared::ResolvedVpcRouteSet; +use omicron_common::api::internal::shared::ResolvedVpcRouteState; +use omicron_common::api::internal::shared::RouterId; +use omicron_common::api::internal::shared::RouterTarget as ApiRouterTarget; +use omicron_common::api::internal::shared::RouterVersion; use omicron_common::api::internal::shared::SourceNatConfig; use oxide_vpc::api::AddRouterEntryReq; +use oxide_vpc::api::DelRouterEntryReq; use oxide_vpc::api::DhcpCfg; use oxide_vpc::api::ExternalIpCfg; use oxide_vpc::api::IpCfg; @@ -24,7 +32,7 @@ use oxide_vpc::api::IpCidr; use oxide_vpc::api::Ipv4Cfg; use oxide_vpc::api::Ipv6Cfg; use oxide_vpc::api::MacAddr; -use oxide_vpc::api::RouterTarget; +use oxide_vpc::api::RouterClass; use oxide_vpc::api::SNat4Cfg; use oxide_vpc::api::SNat6Cfg; use oxide_vpc::api::SetExternalIpsReq; @@ -34,6 +42,8 @@ use slog::error; use slog::info; use slog::Logger; use std::collections::BTreeMap; +use std::collections::HashMap; +use std::collections::HashSet; use std::net::IpAddr; use std::net::Ipv6Addr; use std::sync::atomic::AtomicU64; @@ -45,19 +55,30 @@ use uuid::Uuid; // Prefix used to identify xde data links. const XDE_LINK_PREFIX: &str = "opte"; +/// Stored routes (and usage count) for a given VPC/subnet. +#[derive(Debug, Clone)] +struct RouteSet { + version: Option, + routes: HashSet, + active_ports: usize, +} + #[derive(Debug)] struct PortManagerInner { log: Logger, - // Sequential identifier for each port on the system. + /// Sequential identifier for each port on the system. next_port_id: AtomicU64, - // IP address of the hosting sled on the underlay. + /// IP address of the hosting sled on the underlay. underlay_ip: Ipv6Addr, - // Map of all ports, keyed on the interface Uuid and its kind - // (which includes the Uuid of the parent instance or service) + /// Map of all ports, keyed on the interface Uuid and its kind + /// (which includes the Uuid of the parent instance or service) ports: Mutex>, + + /// Map of all current resolved routes. + routes: Mutex>, } impl PortManagerInner { @@ -70,6 +91,18 @@ impl PortManagerInner { } } +#[derive(Debug)] +/// Parameters needed to create and configure an OPTE port. +pub struct PortCreateParams<'a> { + pub nic: &'a NetworkInterface, + pub source_nat: Option, + pub ephemeral_ip: Option, + pub floating_ips: &'a [IpAddr], + pub firewall_rules: &'a [VpcFirewallRule], + pub dhcp_config: DhcpCfg, + pub is_service: bool, +} + /// The port manager controls all OPTE ports on a single host. #[derive(Debug, Clone)] pub struct PortManager { @@ -84,6 +117,7 @@ impl PortManager { next_port_id: AtomicU64::new(0), underlay_ip, ports: Mutex::new(BTreeMap::new()), + routes: Mutex::new(Default::default()), }); Self { inner } @@ -97,13 +131,18 @@ impl PortManager { #[cfg_attr(not(target_os = "illumos"), allow(unused_variables))] pub fn create_port( &self, - nic: &NetworkInterface, - source_nat: Option, - ephemeral_ip: Option, - floating_ips: &[IpAddr], - firewall_rules: &[VpcFirewallRule], - dhcp_config: DhcpCfg, + params: PortCreateParams, ) -> Result<(Port, PortTicket), Error> { + let PortCreateParams { + nic, + source_nat, + ephemeral_ip, + floating_ips, + firewall_rules, + dhcp_config, + is_service, + } = params; + let mac = *nic.mac; let vni = Vni::new(nic.vni).unwrap(); let subnet = IpNetwork::from(nic.subnet); @@ -319,15 +358,16 @@ impl PortManager { let (port, ticket) = { let mut ports = self.inner.ports.lock().unwrap(); let ticket = PortTicket::new(nic.id, nic.kind, self.inner.clone()); - let port = Port::new( - port_name.clone(), - nic.ip, + let port = Port::new(PortData { + name: port_name.clone(), + ip: nic.ip, mac, - nic.slot, + slot: nic.slot, vni, + subnet: nic.subnet, gateway, vnic, - ); + }); let old = ports.insert((nic.id, nic.kind), port.clone()); assert!( old.is_none(), @@ -338,57 +378,102 @@ impl PortManager { (port, ticket) }; - // Add a router entry for this interface's subnet, directing traffic to the - // VPC subnet. - let route = AddRouterEntryReq { - port_name: port_name.clone(), - dest: vpc_subnet, - target: RouterTarget::VpcSubnet(vpc_subnet), - }; - #[cfg(target_os = "illumos")] - hdl.add_router_entry(&route)?; - debug!( - self.inner.log, - "Added VPC Subnet router entry"; - "port_name" => &port_name, - "route" => ?route, - ); + // Check locally to see whether we have any routes from the + // control plane for this port already installed. If not, + // create a record to show that we're interested in receiving + // those routes. + let mut routes = self.inner.routes.lock().unwrap(); + let system_routes = + routes.entry(port.system_router_key()).or_insert_with(|| { + let mut routes = HashSet::new(); + + // Services do not talk to one another via OPTE, but do need + // to reach out over the Internet *before* nexus is up to give + // us real rules. The easiest bet is to instantiate these here. + if is_service { + routes.insert(ResolvedVpcRoute { + dest: "0.0.0.0/0".parse().unwrap(), + target: ApiRouterTarget::InternetGateway, + }); + routes.insert(ResolvedVpcRoute { + dest: "::/0".parse().unwrap(), + target: ApiRouterTarget::InternetGateway, + }); + } - // TODO-remove - // - // See https://github.com/oxidecomputer/omicron/issues/1336 - // - // This is another part of the workaround, allowing reply traffic from - // the guest back out. Normally, OPTE would drop such traffic at the - // router layer, as it has no route for that external IP address. This - // allows such traffic through. - // - // Note that this exact rule will eventually be included, since it's one - // of the default routing rules in the VPC System Router. However, that - // will likely be communicated in a different way, or could be modified, - // and this specific call should be removed in favor of sending the - // routing rules the control plane provides. - // - // This rule sends all traffic that has no better match to the gateway. - let dest = match vpc_subnet { - IpCidr::Ip4(_) => "0.0.0.0/0", - IpCidr::Ip6(_) => "::/0", + RouteSet { version: None, routes, active_ports: 0 } + }); + system_routes.active_ports += 1; + // Clone is needed to get borrowck on our side, sadly. + let system_routes = system_routes.clone(); + + let custom_routes = routes + .entry(port.custom_router_key()) + .or_insert_with(|| RouteSet { + version: None, + routes: HashSet::default(), + active_ports: 0, + }); + custom_routes.active_ports += 1; + + for (class, routes) in [ + (RouterClass::System, &system_routes), + (RouterClass::Custom, custom_routes), + ] { + for route in &routes.routes { + let route = AddRouterEntryReq { + class, + port_name: port_name.clone(), + dest: super::net_to_cidr(route.dest), + target: super::router_target_opte(&route.target), + }; + + #[cfg(target_os = "illumos")] + hdl.add_router_entry(&route)?; + + debug!( + self.inner.log, + "Added router entry"; + "port_name" => &port_name, + "route" => ?route, + ); + } + } + + // If there are any transit IPs set, allow them through. + // TODO: Currently set only in initial state. + // This, external IPs, and cfg'able state + // (DHCP?) are probably worth being managed by an RPW. + for block in &nic.transit_ips { + #[cfg(target_os = "illumos")] + { + use oxide_vpc::api::Direction; + + // In principle if this were an operation on an existing + // port, we would explicitly undo the In addition if the + // Out addition fails. + // However, failure here will just destroy the port + // outright -- this should only happen if an excessive + // number of rules are specified. + hdl.allow_cidr( + &port_name, + super::net_to_cidr(*block), + Direction::In, + )?; + hdl.allow_cidr( + &port_name, + super::net_to_cidr(*block), + Direction::Out, + )?; + } + + debug!( + self.inner.log, + "Added CIDR to in/out allowlist"; + "port_name" => &port_name, + "cidr" => ?block, + ); } - .parse() - .unwrap(); - let route = AddRouterEntryReq { - port_name: port_name.clone(), - dest, - target: RouterTarget::InternetGateway, - }; - #[cfg(target_os = "illumos")] - hdl.add_router_entry(&route)?; - debug!( - self.inner.log, - "Added default internet gateway route entry"; - "port_name" => &port_name, - "route" => ?route, - ); info!( self.inner.log, @@ -398,6 +483,122 @@ impl PortManager { Ok((port, ticket)) } + pub fn vpc_routes_list(&self) -> Vec { + let routes = self.inner.routes.lock().unwrap(); + routes + .iter() + .map(|(k, v)| ResolvedVpcRouteState { id: *k, version: v.version }) + .collect() + } + + pub fn vpc_routes_ensure( + &self, + new_routes: Vec, + ) -> Result<(), Error> { + let mut routes = self.inner.routes.lock().unwrap(); + let mut deltas = HashMap::new(); + for new in new_routes { + // Disregard any route information for a subnet we don't have. + let Some(old) = routes.get(&new.id) else { + continue; + }; + + // We have to handle subnet router changes, as well as + // spurious updates from multiple Nexus instances. + // If there's a UUID match, only update if vers increased, + // otherwise take the update verbatim (including loss of version). + let (to_add, to_delete): (HashSet<_>, HashSet<_>) = + match (old.version, new.version) { + (Some(old_vers), Some(new_vers)) + if !old_vers.is_replaced_by(&new_vers) => + { + continue; + } + _ => ( + new.routes.difference(&old.routes).cloned().collect(), + old.routes.difference(&new.routes).cloned().collect(), + ), + }; + deltas.insert(new.id, (to_add, to_delete)); + + let active_ports = old.active_ports; + routes.insert( + new.id, + RouteSet { + version: new.version, + routes: new.routes, + active_ports, + }, + ); + } + + // Note: We're deliberately holding both locks here + // to prevent several nexuses computng and applying deltas + // out of order. + let ports = self.inner.ports.lock().unwrap(); + #[cfg(target_os = "illumos")] + let hdl = opte_ioctl::OpteHdl::open(opte_ioctl::OpteHdl::XDE_CTL)?; + + // Propagate deltas out to all ports. + for port in ports.values() { + let system_id = port.system_router_key(); + let system_delta = deltas.get(&system_id); + + let custom_id = port.custom_router_key(); + let custom_delta = deltas.get(&custom_id); + + #[cfg_attr(not(target_os = "illumos"), allow(unused_variables))] + for (class, delta) in [ + (RouterClass::System, system_delta), + (RouterClass::Custom, custom_delta), + ] { + let Some((to_add, to_delete)) = delta else { + continue; + }; + + for route in to_delete { + let route = DelRouterEntryReq { + class, + port_name: port.name().into(), + dest: super::net_to_cidr(route.dest), + target: super::router_target_opte(&route.target), + }; + + #[cfg(target_os = "illumos")] + hdl.del_router_entry(&route)?; + + debug!( + self.inner.log, + "Removed router entry"; + "port_name" => &port.name(), + "route" => ?route, + ); + } + + for route in to_add { + let route = AddRouterEntryReq { + class, + port_name: port.name().into(), + dest: super::net_to_cidr(route.dest), + target: super::router_target_opte(&route.target), + }; + + #[cfg(target_os = "illumos")] + hdl.add_router_entry(&route)?; + + debug!( + self.inner.log, + "Added router entry"; + "port_name" => &port.name(), + "route" => ?route, + ); + } + } + } + + Ok(()) + } + /// Ensure external IPs for an OPTE port are up to date. #[cfg_attr(not(target_os = "illumos"), allow(unused_variables))] pub fn external_ips_ensure( @@ -739,6 +940,29 @@ impl PortTicket { ); return Err(Error::ReleaseMissingPort(self.id, self.kind)); }; + drop(ports); + + // Cleanup the set of subnets we want to receive routes for. + let mut routes = self.manager.routes.lock().unwrap(); + for key in [port.system_router_key(), port.custom_router_key()] { + let should_remove = routes + .get_mut(&key) + .map(|v| { + v.active_ports = v.active_ports.saturating_sub(1); + v.active_ports == 0 + }) + .unwrap_or_default(); + + if should_remove { + routes.remove(&key); + info!( + self.manager.log, + "Removed route set for subnet"; + "id" => ?&key, + ); + } + } + debug!( self.manager.log, "Removed OPTE port from manager"; diff --git a/installinator/Cargo.toml b/installinator/Cargo.toml index ebdb6269b7..c21c3f2ee2 100644 --- a/installinator/Cargo.toml +++ b/installinator/Cargo.toml @@ -57,5 +57,3 @@ tokio-stream.workspace = true [features] image-standard = [] -image-trampoline = [] -rack-topology-single-sled = [] diff --git a/installinator/src/hardware.rs b/installinator/src/hardware.rs index a48d816dc8..cc71cea5ee 100644 --- a/installinator/src/hardware.rs +++ b/installinator/src/hardware.rs @@ -31,7 +31,7 @@ impl Hardware { })?; let disks: Vec = - hardware.disks().into_iter().map(|disk| disk.into()).collect(); + hardware.disks().into_values().map(|disk| disk.into()).collect(); info!( log, "found gimlet hardware"; diff --git a/nexus-config/src/nexus_config.rs b/nexus-config/src/nexus_config.rs index 67acb5ec1b..a8c863298e 100644 --- a/nexus-config/src/nexus_config.rs +++ b/nexus-config/src/nexus_config.rs @@ -373,8 +373,10 @@ pub struct BackgroundTaskConfig { pub bfd_manager: BfdManagerConfig, /// configuration for the switch port settings manager task pub switch_port_settings_manager: SwitchPortSettingsManagerConfig, - /// configuration for region replacement task + /// configuration for region replacement starter task pub region_replacement: RegionReplacementConfig, + /// configuration for region replacement driver task + pub region_replacement_driver: RegionReplacementDriverConfig, /// configuration for instance watcher task pub instance_watcher: InstanceWatcherConfig, /// configuration for service VPC firewall propagation task @@ -564,6 +566,14 @@ pub struct AbandonedVmmReaperConfig { pub period_secs: Duration, } +#[serde_as] +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub struct RegionReplacementDriverConfig { + /// period (in seconds) for periodic activations of this background task + #[serde_as(as = "DurationSeconds")] + pub period_secs: Duration, +} + /// Configuration for a nexus server #[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] pub struct PackageConfig { @@ -801,6 +811,7 @@ mod test { sync_service_zone_nat.period_secs = 30 switch_port_settings_manager.period_secs = 30 region_replacement.period_secs = 30 + region_replacement_driver.period_secs = 30 instance_watcher.period_secs = 30 service_firewall_propagation.period_secs = 300 v2p_mapping_propagation.period_secs = 30 @@ -935,6 +946,10 @@ mod test { region_replacement: RegionReplacementConfig { period_secs: Duration::from_secs(30), }, + region_replacement_driver: + RegionReplacementDriverConfig { + period_secs: Duration::from_secs(30), + }, instance_watcher: InstanceWatcherConfig { period_secs: Duration::from_secs(30), }, @@ -1015,6 +1030,7 @@ mod test { sync_service_zone_nat.period_secs = 30 switch_port_settings_manager.period_secs = 30 region_replacement.period_secs = 30 + region_replacement_driver.period_secs = 30 instance_watcher.period_secs = 30 service_firewall_propagation.period_secs = 300 v2p_mapping_propagation.period_secs = 30 diff --git a/nexus/Cargo.toml b/nexus/Cargo.toml index 81cf6499b2..4d55a134c1 100644 --- a/nexus/Cargo.toml +++ b/nexus/Cargo.toml @@ -135,7 +135,7 @@ rcgen.workspace = true regex.workspace = true similar-asserts.workspace = true sp-sim.workspace = true -rustls = { workspace = true } +rustls.workspace = true subprocess.workspace = true term.workspace = true trust-dns-resolver.workspace = true diff --git a/nexus/auth/src/authz/api_resources.rs b/nexus/auth/src/authz/api_resources.rs index 98a24b68b5..f4c91dc544 100644 --- a/nexus/auth/src/authz/api_resources.rs +++ b/nexus/auth/src/authz/api_resources.rs @@ -5,11 +5,11 @@ //! Authz types for resources in the API hierarchy //! //! The general pattern in Nexus for working with an object is to look it up -//! (see [`crate::db::lookup::LookupPath`]) and get back a so-called `authz` -//! type. This type uniquely identifies the resource regardless of any other -//! changes (e.g., name change or moving it to a different parent collection). -//! The various datastore functions that modify API resources accept these -//! `authz` types. +//! (see `nexus_db_queries::db::lookup::LookupPath`) and get back a so-called +//! `authz` type. This type uniquely identifies the resource regardless of +//! any other changes (e.g., name change or moving it to a different parent +//! collection). The various datastore functions that modify API resources +//! accept these `authz` types. //! //! The `authz` types can be passed to //! [`crate::context::OpContext::authorize()`] to do an authorization check -- diff --git a/nexus/db-fixed-data/src/vpc.rs b/nexus/db-fixed-data/src/vpc.rs index 25628a83b5..d5940a976e 100644 --- a/nexus/db-fixed-data/src/vpc.rs +++ b/nexus/db-fixed-data/src/vpc.rs @@ -23,12 +23,21 @@ pub static SERVICES_VPC_ROUTER_ID: Lazy = Lazy::new(|| { .expect("invalid uuid for builtin services vpc router id") }); -/// UUID of default route for built-in Services VPC. -pub static SERVICES_VPC_DEFAULT_ROUTE_ID: Lazy = Lazy::new(|| { - "001de000-074c-4000-8000-000000000002" - .parse() - .expect("invalid uuid for builtin services vpc default route id") -}); +/// UUID of default IPv4 route for built-in Services VPC. +pub static SERVICES_VPC_DEFAULT_V4_ROUTE_ID: Lazy = + Lazy::new(|| { + "001de000-074c-4000-8000-000000000002" + .parse() + .expect("invalid uuid for builtin services vpc default route id") + }); + +/// UUID of default IPv6 route for built-in Services VPC. +pub static SERVICES_VPC_DEFAULT_V6_ROUTE_ID: Lazy = + Lazy::new(|| { + "001de000-074c-4000-8000-000000000003" + .parse() + .expect("invalid uuid for builtin services vpc default route id") + }); /// Built-in VPC for internal services on the rack. pub static SERVICES_VPC: Lazy = Lazy::new(|| { diff --git a/nexus/db-fixed-data/src/vpc_subnet.rs b/nexus/db-fixed-data/src/vpc_subnet.rs index 622799b000..c91581ac13 100644 --- a/nexus/db-fixed-data/src/vpc_subnet.rs +++ b/nexus/db-fixed-data/src/vpc_subnet.rs @@ -31,6 +31,27 @@ pub static NTP_VPC_SUBNET_ID: Lazy = Lazy::new(|| { .expect("invalid uuid for builtin boundary ntp vpc subnet id") }); +/// UUID of built-in subnet route VPC Subnet route for External DNS. +pub static DNS_VPC_SUBNET_ROUTE_ID: Lazy = Lazy::new(|| { + "001de000-c470-4000-8000-000000000004" + .parse() + .expect("invalid uuid for builtin services vpc default route id") +}); + +/// UUID of built-in subnet route VPC Subnet route for Nexus. +pub static NEXUS_VPC_SUBNET_ROUTE_ID: Lazy = Lazy::new(|| { + "001de000-c470-4000-8000-000000000005" + .parse() + .expect("invalid uuid for builtin services vpc default route id") +}); + +/// UUID of built-in subnet route VPC Subnet route for Boundary NTP. +pub static NTP_VPC_SUBNET_ROUTE_ID: Lazy = Lazy::new(|| { + "001de000-c470-4000-8000-000000000006" + .parse() + .expect("invalid uuid for builtin services vpc default route id") +}); + /// Built-in VPC Subnet for External DNS. pub static DNS_VPC_SUBNET: Lazy = Lazy::new(|| { VpcSubnet::new( diff --git a/nexus/db-macros/src/lib.rs b/nexus/db-macros/src/lib.rs index fd9aae4b0a..52ada2ac88 100644 --- a/nexus/db-macros/src/lib.rs +++ b/nexus/db-macros/src/lib.rs @@ -21,7 +21,6 @@ use syn::spanned::Spanned; use syn::{parse_quote, Data, DataStruct, DeriveInput, Error, Fields, Ident}; mod lookup; -mod subquery; #[cfg(test)] mod test_helpers; @@ -134,35 +133,6 @@ fn get_field_with_name<'a>( } } -/// Implements the [`Subquery`] trait. -/// -/// Additionally, implements -/// [`diesel::query_builder::QueryFragment`](https://docs.diesel.rs/master/diesel/query_builder/trait.QueryFragment.html), -/// which refers to the subquery by the name supplied as input. -/// -/// Callers should also derive -/// [`diesel::query_builder::QueryId`](https://docs.diesel.rs/master/diesel/query_builder/trait.QueryId.html), -/// as it should be implemented for structures which implement -/// [`diesel::query_builder::QueryFragment`](https://docs.diesel.rs/master/diesel/query_builder/trait.QueryFragment.html). -/// -/// Example usage: -/// -/// ```ignore -/// #[derive(Subquery, QueryId)] -/// #[subquery(name = my_table)] -/// struct MyQuery { -/// query: Box> -/// } -/// ``` -#[proc_macro_derive(Subquery, attributes(subquery))] -pub fn subquery_target( - input: proc_macro::TokenStream, -) -> proc_macro::TokenStream { - subquery::derive_impl(input.into()) - .unwrap_or_else(|e| e.to_compile_error()) - .into() -} - // Describes which derive macro is being used; allows sharing common code. #[derive(Clone, Copy, Debug)] enum IdentityVariant { diff --git a/nexus/db-macros/src/subquery.rs b/nexus/db-macros/src/subquery.rs deleted file mode 100644 index 3c7fafec7b..0000000000 --- a/nexus/db-macros/src/subquery.rs +++ /dev/null @@ -1,98 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -//! Procedure macro for deriving subquery-related information. - -use super::NameValue; - -use proc_macro2::TokenStream; -use quote::quote; -use syn::spanned::Spanned; -use syn::{DeriveInput, Error}; - -/// Looks for a Meta-style attribute with a particular identifier. -/// -/// As an example, for an attribute like `#[subquery(foo = bar)]`, we can find this -/// attribute by calling `get_subquery_attr(&item.attrs, "foo")`. -fn get_subquery_attr( - attrs: &[syn::Attribute], - name: &str, -) -> Option { - attrs - .iter() - .filter(|attr| attr.path().is_ident("subquery")) - .filter_map(|attr| attr.parse_args::().ok()) - .find(|nv| nv.name.is_ident(name)) -} - -// Implementation of `#[derive(Subquery)]` -pub(crate) fn derive_impl(tokens: TokenStream) -> syn::Result { - let item = syn::parse2::(tokens)?; - let name = &item.ident; - - let subquery_nv = get_subquery_attr(&item.attrs, "name").ok_or_else(|| { - Error::new( - item.span(), - format!( - "Resource needs 'name' attribute.\n\ - Try adding #[subquery(name = your_subquery_module)] to {}.", - name - ), - ) - })?; - - // TODO: We should ensure that a field named "query" exists within this - // struct. We currently rely on it existing. - // - // Don't bother parsing type, but we use it when impl'ing Subquery. - - let as_query_source_impl = - build_query_source_impl(name, &subquery_nv.value); - let subquery_impl = build_subquery_impl(name, &subquery_nv.value); - - Ok(quote! { - #as_query_source_impl - #subquery_impl - }) -} - -// TODO: Should we use diesel's "QuerySource" and "AsQuery" here? -// -// I think that could work for most "select" queries, but might break joins. -fn build_query_source_impl( - name: &syn::Ident, - subquery_module: &syn::Path, -) -> TokenStream { - quote! { - impl crate::db::subquery::AsQuerySource for #name { - type QuerySource = #subquery_module::table; - fn query_source(&self) -> Self::QuerySource { - #subquery_module::table - } - } - } -} - -fn build_subquery_impl( - name: &syn::Ident, - subquery_module: &syn::Path, -) -> TokenStream { - quote! { - impl ::diesel::query_builder::QueryFragment<::diesel::pg::Pg> for #name { - fn walk_ast<'a>( - &'a self, - mut out: ::diesel::query_builder::AstPass<'_, 'a, ::diesel::pg::Pg> - ) -> ::diesel::QueryResult<()> { - #subquery_module::table.walk_ast(out)?; - Ok(()) - } - } - - impl crate::db::subquery::Subquery for #name { - fn query(&self) -> &dyn ::diesel::query_builder::QueryFragment<::diesel::pg::Pg> { - &self.query - } - } - } -} diff --git a/nexus/db-model/src/certificate.rs b/nexus/db-model/src/certificate.rs index 2cd1bcf08a..0e221b7c46 100644 --- a/nexus/db-model/src/certificate.rs +++ b/nexus/db-model/src/certificate.rs @@ -82,6 +82,13 @@ impl TryFrom for views::Certificate { Ok(Self { identity: cert.identity(), service: cert.service.try_into()?, + // This is expected to succeed in normal circumstances. Certificates are stored in the + // database with PEM encoding which are essentially bundles of Base64 encoded text. + // The only cases in which this conversion should fail is when our internal database + // representation of the certificate is invalid. + cert: String::from_utf8(cert.cert).map_err(|_| { + Error::internal_error("Certificate is not valid UTF-8") + })?, }) } } diff --git a/nexus/db-model/src/collection.rs b/nexus/db-model/src/collection.rs index b86e35d407..964aaad248 100644 --- a/nexus/db-model/src/collection.rs +++ b/nexus/db-model/src/collection.rs @@ -152,4 +152,8 @@ pub trait DatastoreAttachTargetConfig: type ResourceTimeDeletedColumn: Column::Table> + Default + ExpressionMethods; + + /// Controls whether a resource may be attached to a new collection without + /// first being explicitly detached from the previous one + const ALLOW_FROM_ATTACHED: bool = false; } diff --git a/nexus/db-model/src/network_interface.rs b/nexus/db-model/src/network_interface.rs index 6d347ecd37..79b16b5658 100644 --- a/nexus/db-model/src/network_interface.rs +++ b/nexus/db-model/src/network_interface.rs @@ -13,6 +13,7 @@ use chrono::DateTime; use chrono::Utc; use db_macros::Resource; use diesel::AsChangeset; +use ipnetwork::IpNetwork; use ipnetwork::NetworkSize; use nexus_types::external_api::params; use nexus_types::identity::Resource; @@ -64,11 +65,13 @@ pub struct NetworkInterface { // // If user requests an address of either kind, give exactly that and not the other. // If neither is specified, auto-assign one of each? - pub ip: ipnetwork::IpNetwork, + pub ip: IpNetwork, pub slot: SqlU8, #[diesel(column_name = is_primary)] pub primary: bool, + + pub transit_ips: Vec, } impl NetworkInterface { @@ -102,6 +105,7 @@ impl NetworkInterface { vni: external::Vni::try_from(0).unwrap(), primary: self.primary, slot: *self.slot, + transit_ips: self.transit_ips.into_iter().map(Into::into).collect(), } } } @@ -122,11 +126,13 @@ pub struct InstanceNetworkInterface { pub subnet_id: Uuid, pub mac: MacAddr, - pub ip: ipnetwork::IpNetwork, + pub ip: IpNetwork, pub slot: SqlU8, #[diesel(column_name = is_primary)] pub primary: bool, + + pub transit_ips: Vec, } /// Service Network Interface DB model. @@ -145,7 +151,7 @@ pub struct ServiceNetworkInterface { pub subnet_id: Uuid, pub mac: MacAddr, - pub ip: ipnetwork::IpNetwork, + pub ip: IpNetwork, pub slot: SqlU8, #[diesel(column_name = is_primary)] @@ -242,6 +248,7 @@ impl NetworkInterface { ip: self.ip, slot: self.slot, primary: self.primary, + transit_ips: self.transit_ips, } } @@ -290,6 +297,7 @@ impl From for NetworkInterface { ip: iface.ip, slot: iface.slot, primary: iface.primary, + transit_ips: iface.transit_ips, } } } @@ -313,6 +321,7 @@ impl From for NetworkInterface { ip: iface.ip, slot: iface.slot, primary: iface.primary, + transit_ips: vec![], } } } @@ -460,6 +469,7 @@ pub struct NetworkInterfaceUpdate { pub time_modified: DateTime, #[diesel(column_name = is_primary)] pub primary: Option, + pub transit_ips: Vec, } impl From for external::InstanceNetworkInterface { @@ -472,6 +482,11 @@ impl From for external::InstanceNetworkInterface { ip: iface.ip.ip(), mac: *iface.mac, primary: iface.primary, + transit_ips: iface + .transit_ips + .into_iter() + .map(Into::into) + .collect(), } } } @@ -484,6 +499,11 @@ impl From for NetworkInterfaceUpdate { description: params.identity.description, time_modified: Utc::now(), primary, + transit_ips: params + .transit_ips + .into_iter() + .map(Into::into) + .collect(), } } } diff --git a/nexus/db-model/src/omicron_zone_config.rs b/nexus/db-model/src/omicron_zone_config.rs index c2258dba6c..3b18a749a7 100644 --- a/nexus/db-model/src/omicron_zone_config.rs +++ b/nexus/db-model/src/omicron_zone_config.rs @@ -659,6 +659,7 @@ impl OmicronZoneNic { vni: omicron_common::api::external::Vni::try_from(*self.vni) .context("parsing VNI")?, subnet: self.subnet.into(), + transit_ips: vec![], }) } } diff --git a/nexus/db-model/src/saga_types.rs b/nexus/db-model/src/saga_types.rs index 3ad3e2603c..010c717356 100644 --- a/nexus/db-model/src/saga_types.rs +++ b/nexus/db-model/src/saga_types.rs @@ -76,7 +76,7 @@ impl From<&SecId> for Uuid { /// This exists because Omicron cannot implement foreign traits /// for foreign types. #[derive( - AsExpression, Copy, Clone, Debug, FromSqlRow, PartialEq, PartialOrd, + AsExpression, Copy, Clone, Debug, FromSqlRow, PartialEq, PartialOrd, Ord, Eq, )] #[diesel(sql_type = sql_types::Uuid)] pub struct SagaId(pub steno::SagaId); @@ -110,7 +110,7 @@ where /// This exists because Omicron cannot implement foreign traits /// for foreign types. #[derive( - AsExpression, Copy, Clone, Debug, FromSqlRow, PartialEq, PartialOrd, + AsExpression, Copy, Clone, Debug, FromSqlRow, PartialEq, PartialOrd, Ord, Eq, )] #[diesel(sql_type = sql_types::BigInt)] pub struct SagaNodeId(pub steno::SagaNodeId); @@ -181,7 +181,7 @@ impl FromSql for SagaCachedState { } /// Represents a row in the "Saga" table -#[derive(Queryable, Insertable, Clone, Debug, Selectable)] +#[derive(Queryable, Insertable, Clone, Debug, Selectable, PartialEq)] #[diesel(table_name = saga)] pub struct Saga { pub id: SagaId, @@ -222,7 +222,7 @@ impl Saga { } /// Represents a row in the "SagaNodeEvent" table -#[derive(Queryable, Insertable, Clone, Debug, Selectable)] +#[derive(Queryable, Insertable, Clone, Debug, Selectable, PartialEq)] #[diesel(table_name = saga_node_event)] pub struct SagaNodeEvent { pub saga_id: SagaId, diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 045c78232b..7fa07aa131 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -285,17 +285,6 @@ table! { } } -table! { - v2p_mapping_view (nic_id) { - nic_id -> Uuid, - sled_id -> Uuid, - sled_ip -> Inet, - vni -> Int4, - mac -> Int8, - ip -> Inet, - } -} - table! { bgp_announce_set (id) { id -> Uuid, @@ -522,6 +511,7 @@ table! { ip -> Inet, slot -> Int2, is_primary -> Bool, + transit_ips -> Array, } } @@ -540,6 +530,7 @@ table! { ip -> Inet, slot -> Int2, is_primary -> Bool, + transit_ips -> Array, } } joinable!(instance_network_interface -> instance (instance_id)); @@ -1117,6 +1108,7 @@ table! { rcgen -> Int8, ipv4_block -> Inet, ipv6_block -> Inet, + custom_router_id -> Nullable, } } @@ -1131,6 +1123,7 @@ table! { kind -> crate::VpcRouterKindEnum, vpc_id -> Uuid, rcgen -> Int8, + resolved_version -> Int8, } } @@ -1843,6 +1836,7 @@ allow_tables_to_appear_in_same_query!( user_builtin, role_builtin, role_assignment, + probe, ); allow_tables_to_appear_in_same_query!(dns_zone, dns_version, dns_name); @@ -1871,3 +1865,5 @@ joinable!(instance_ssh_key -> ssh_key (ssh_key_id)); joinable!(instance_ssh_key -> instance (instance_id)); allow_tables_to_appear_in_same_query!(sled, sled_instance); + +joinable!(network_interface -> probe (parent_id)); diff --git a/nexus/db-model/src/schema_versions.rs b/nexus/db-model/src/schema_versions.rs index f6eda53074..6f537bb522 100644 --- a/nexus/db-model/src/schema_versions.rs +++ b/nexus/db-model/src/schema_versions.rs @@ -17,8 +17,7 @@ use std::collections::BTreeMap; /// /// This must be updated when you change the database schema. Refer to /// schema/crdb/README.adoc in the root of this repository for details. -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(77, 0, 0); - +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(80, 0, 0); /// List of all past database schema versions, in *reverse* order /// /// If you want to change the Omicron database schema, you must update this. @@ -29,7 +28,10 @@ static KNOWN_VERSIONS: Lazy> = Lazy::new(|| { // | leaving the first copy as an example for the next person. // v // KnownVersion::new(next_int, "unique-dirname-with-the-sql-files"), - KnownVersion::new(77, "add-instance-id-to-migrations"), + KnownVersion::new(80, "add-instance-id-to-migrations"), + KnownVersion::new(79, "nic-spoof-allow"), + KnownVersion::new(78, "vpc-subnet-routing"), + KnownVersion::new(77, "remove-view-for-v2p-mappings"), KnownVersion::new(76, "lookup-region-snapshot-by-snapshot-id"), KnownVersion::new(75, "add-cockroach-zone-id-to-node-id"), KnownVersion::new(74, "add-migration-table"), diff --git a/nexus/db-model/src/sled.rs b/nexus/db-model/src/sled.rs index 5019366733..c177650991 100644 --- a/nexus/db-model/src/sled.rs +++ b/nexus/db-model/src/sled.rs @@ -44,7 +44,7 @@ pub struct SledSystemHardware { #[diesel(table_name = sled)] pub struct Sled { #[diesel(embed)] - identity: SledIdentity, + pub identity: SledIdentity, time_deleted: Option>, pub rcgen: Generation, diff --git a/nexus/db-model/src/v2p_mapping.rs b/nexus/db-model/src/v2p_mapping.rs index 43831f7503..bb7c74b83f 100644 --- a/nexus/db-model/src/v2p_mapping.rs +++ b/nexus/db-model/src/v2p_mapping.rs @@ -1,15 +1,17 @@ -use crate::schema::v2p_mapping_view; -use crate::{MacAddr, Vni}; +use crate::{Ipv6Addr, MacAddr, Vni}; use ipnetwork::IpNetwork; use serde::{Deserialize, Serialize}; use uuid::Uuid; -#[derive(Queryable, Selectable, Clone, Debug, Serialize, Deserialize)] -#[diesel(table_name = v2p_mapping_view)] +/// This is not backed by an actual database view, +/// but it is still essentially a "view" in the sense +/// that it is a read-only data model derived from +/// multiple db tables +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct V2PMappingView { pub nic_id: Uuid, pub sled_id: Uuid, - pub sled_ip: IpNetwork, + pub sled_ip: Ipv6Addr, pub vni: Vni, pub mac: MacAddr, pub ip: IpNetwork, diff --git a/nexus/db-model/src/vpc_firewall_rule.rs b/nexus/db-model/src/vpc_firewall_rule.rs index 2d19796524..120d3e1473 100644 --- a/nexus/db-model/src/vpc_firewall_rule.rs +++ b/nexus/db-model/src/vpc_firewall_rule.rs @@ -14,6 +14,7 @@ use nexus_types::identity::Resource; use omicron_common::api::external; use serde::Deserialize; use serde::Serialize; +use std::collections::HashSet; use std::io::Write; use uuid::Uuid; @@ -253,15 +254,42 @@ impl VpcFirewallRule { pub fn vec_from_params( vpc_id: Uuid, params: external::VpcFirewallRuleUpdateParams, - ) -> Vec { - params + ) -> Result, external::Error> { + ensure_no_duplicates(¶ms)?; + Ok(params .rules .iter() .map(|rule| VpcFirewallRule::new(Uuid::new_v4(), vpc_id, rule)) - .collect() + .collect()) } } +fn ensure_no_duplicates( + params: &external::VpcFirewallRuleUpdateParams, +) -> Result<(), external::Error> { + // we could do this by comparing set(names).len() to names.len(), but this + // way we can say what the duplicate names are, and that's nice! + let mut names = HashSet::new(); + let mut dupes = HashSet::new(); + for r in params.rules.iter() { + if !names.insert(r.name.clone()) { + // insert returns false if already present + dupes.insert(r.name.clone()); + } + } + + if dupes.is_empty() { + return Ok(()); + } + + let dupes_str = + dupes.iter().map(|d| format!("\"{d}\"")).collect::>().join(", "); + return Err(external::Error::invalid_value( + "rules", + format!("Rule names must be unique. Duplicates: [{}]", dupes_str), + )); +} + impl Into for VpcFirewallRule { fn into(self) -> external::VpcFirewallRule { external::VpcFirewallRule { diff --git a/nexus/db-model/src/vpc_route.rs b/nexus/db-model/src/vpc_route.rs index 168ed41cef..3015df691f 100644 --- a/nexus/db-model/src/vpc_route.rs +++ b/nexus/db-model/src/vpc_route.rs @@ -18,7 +18,7 @@ use std::io::Write; use uuid::Uuid; impl_enum_wrapper!( - #[derive(SqlType, Debug)] + #[derive(SqlType, Debug, QueryId)] #[diesel(postgres_type(name = "router_route_kind", schema = "public"))] pub struct RouterRouteKindEnum; @@ -127,6 +127,46 @@ impl RouterRoute { destination: RouteDestination::new(params.destination), } } + + /// Create a subnet routing rule for a VPC's system router. + /// + /// This defaults to use the same name as the subnet. If this would conflict + /// with the internet gateway rules, then the UUID is used instead (alongside + /// notice that a name conflict has occurred). + pub fn for_subnet( + route_id: Uuid, + system_router_id: Uuid, + subnet: Name, + ) -> Self { + let forbidden_names = ["default-v4", "default-v6"]; + + let name = if forbidden_names.contains(&subnet.as_str()) { + // unwrap safety: a uuid is not by itself a valid name + // so prepend it with another string. + // - length constraint is <63 chars, + // - a UUID is 36 chars including hyphens, + // - "{subnet}-" is 11 chars + // - "conflict-" is 9 chars + // = 56 chars + format!("conflict-{subnet}-{route_id}").parse().unwrap() + } else { + subnet.0.clone() + }; + + Self::new( + route_id, + system_router_id, + external::RouterRouteKind::VpcSubnet, + params::RouterRouteCreate { + identity: external::IdentityMetadataCreateParams { + name, + description: format!("VPC Subnet route for '{subnet}'"), + }, + target: external::RouteTarget::Subnet(subnet.0.clone()), + destination: external::RouteDestination::Subnet(subnet.0), + }, + ) + } } impl Into for RouterRoute { diff --git a/nexus/db-model/src/vpc_router.rs b/nexus/db-model/src/vpc_router.rs index 71c753e6aa..ee8988ae69 100644 --- a/nexus/db-model/src/vpc_router.rs +++ b/nexus/db-model/src/vpc_router.rs @@ -4,7 +4,8 @@ use super::{impl_enum_type, Generation, Name, RouterRoute}; use crate::collection::DatastoreCollectionConfig; -use crate::schema::{router_route, vpc_router}; +use crate::schema::{router_route, vpc_router, vpc_subnet}; +use crate::{DatastoreAttachTargetConfig, VpcSubnet}; use chrono::{DateTime, Utc}; use db_macros::Resource; use nexus_types::external_api::params; @@ -41,9 +42,10 @@ pub struct VpcRouter { #[diesel(embed)] identity: VpcRouterIdentity, - pub vpc_id: Uuid, pub kind: VpcRouterKind, + pub vpc_id: Uuid, pub rcgen: Generation, + pub resolved_version: i64, } impl VpcRouter { @@ -54,7 +56,13 @@ impl VpcRouter { params: params::VpcRouterCreate, ) -> Self { let identity = VpcRouterIdentity::new(router_id, params.identity); - Self { identity, vpc_id, kind, rcgen: Generation::new() } + Self { + identity, + vpc_id, + kind, + rcgen: Generation::new(), + resolved_version: 0, + } } } @@ -92,3 +100,16 @@ impl From for VpcRouterUpdate { } } } + +impl DatastoreAttachTargetConfig for VpcRouter { + type Id = Uuid; + + type CollectionIdColumn = vpc_router::dsl::id; + type CollectionTimeDeletedColumn = vpc_router::dsl::time_deleted; + + type ResourceIdColumn = vpc_subnet::dsl::id; + type ResourceCollectionIdColumn = vpc_subnet::dsl::custom_router_id; + type ResourceTimeDeletedColumn = vpc_subnet::dsl::time_deleted; + + const ALLOW_FROM_ATTACHED: bool = true; +} diff --git a/nexus/db-model/src/vpc_subnet.rs b/nexus/db-model/src/vpc_subnet.rs index f3c90a908e..eaa7c6e87d 100644 --- a/nexus/db-model/src/vpc_subnet.rs +++ b/nexus/db-model/src/vpc_subnet.rs @@ -39,6 +39,7 @@ pub struct VpcSubnet { pub rcgen: Generation, pub ipv4_block: Ipv4Net, pub ipv6_block: Ipv6Net, + pub custom_router_id: Option, } impl VpcSubnet { @@ -60,6 +61,7 @@ impl VpcSubnet { rcgen: Generation::new(), ipv4_block: Ipv4Net(ipv4_block), ipv6_block: Ipv6Net(ipv6_block), + custom_router_id: None, } } @@ -102,6 +104,7 @@ impl From for views::VpcSubnet { vpc_id: subnet.vpc_id, ipv4_block: subnet.ipv4_block.0, ipv6_block: subnet.ipv6_block.0, + custom_router_id: subnet.custom_router_id, } } } diff --git a/nexus/db-queries/src/db/collection_attach.rs b/nexus/db-queries/src/db/collection_attach.rs index fccc1aa324..95e6afeb4b 100644 --- a/nexus/db-queries/src/db/collection_attach.rs +++ b/nexus/db-queries/src/db/collection_attach.rs @@ -232,12 +232,26 @@ pub trait DatastoreAttachTarget: .filter(collection_table().primary_key().eq(collection_id)) .filter(Self::CollectionTimeDeletedColumn::default().is_null()), ); - let resource_query = Box::new( - resource_query - .filter(resource_table().primary_key().eq(resource_id)) - .filter(Self::ResourceTimeDeletedColumn::default().is_null()) - .filter(Self::ResourceCollectionIdColumn::default().is_null()), - ); + let resource_query = if Self::ALLOW_FROM_ATTACHED { + Box::new( + resource_query + .filter(resource_table().primary_key().eq(resource_id)) + .filter( + Self::ResourceTimeDeletedColumn::default().is_null(), + ), + ) + } else { + Box::new( + resource_query + .filter(resource_table().primary_key().eq(resource_id)) + .filter( + Self::ResourceTimeDeletedColumn::default().is_null(), + ) + .filter( + Self::ResourceCollectionIdColumn::default().is_null(), + ), + ) + }; let update_resource_statement = update .into_boxed() diff --git a/nexus/db-queries/src/db/datastore/network_interface.rs b/nexus/db-queries/src/db/datastore/network_interface.rs index 3076afa39f..c5a8992cd2 100644 --- a/nexus/db-queries/src/db/datastore/network_interface.rs +++ b/nexus/db-queries/src/db/datastore/network_interface.rs @@ -60,6 +60,7 @@ struct NicInfo { vni: db::model::Vni, primary: bool, slot: i16, + transit_ips: Vec, } impl From for omicron_common::api::internal::shared::NetworkInterface { @@ -92,6 +93,7 @@ impl From for omicron_common::api::internal::shared::NetworkInterface { vni: nic.vni.0, primary: nic.primary, slot: u8::try_from(nic.slot).unwrap(), + transit_ips: nic.transit_ips.iter().map(|v| (*v).into()).collect(), } } } @@ -136,11 +138,27 @@ impl DataStore { ), )); } - self.create_network_interface_raw(opctx, interface) + + let out = self + .create_network_interface_raw(opctx, interface) .await // Convert to `InstanceNetworkInterface` before returning; we know // this is valid as we've checked the condition on-entry. - .map(NetworkInterface::as_instance) + .map(NetworkInterface::as_instance)?; + + // `instance:xxx` targets in router rules resolve to the primary + // NIC of that instance. Accordingly, NIC create may cause dangling + // entries to re-resolve to a valid instance (even if it is not yet + // started). + // This will not trigger the route RPW directly, we still need to do + // so in e.g. the instance watcher task. + if out.primary { + self.vpc_increment_rpw_version(opctx, out.vpc_id) + .await + .map_err(|e| network_interface::InsertError::External(e))?; + } + + Ok(out) } /// List network interfaces associated with a given service. @@ -486,6 +504,7 @@ impl DataStore { vpc::vni, network_interface::is_primary, network_interface::slot, + network_interface::transit_ips, )) .get_results_async::( &*self.pool_connection_authorized(opctx).await?, @@ -608,6 +627,28 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } + /// Retrieve the primary network interface for a given instance. + pub async fn instance_get_primary_network_interface( + &self, + opctx: &OpContext, + authz_instance: &authz::Instance, + ) -> LookupResult { + opctx.authorize(authz::Action::ListChildren, authz_instance).await?; + + use db::schema::instance_network_interface::dsl; + dsl::instance_network_interface + .filter(dsl::time_deleted.is_null()) + .filter(dsl::instance_id.eq(authz_instance.id())) + .filter(dsl::is_primary.eq(true)) + .select(InstanceNetworkInterface::as_select()) + .limit(1) + .first_async::( + &*self.pool_connection_authorized(opctx).await?, + ) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + /// Get network interface associated with a given probe. pub async fn probe_get_network_interface( &self, diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index 4af6bf7263..627f1f60ab 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -1390,6 +1390,7 @@ mod test { vni: Vni::SERVICES_VNI, primary: true, slot: 0, + transit_ips: vec![], }, }, ), @@ -1416,6 +1417,7 @@ mod test { vni: Vni::SERVICES_VNI, primary: true, slot: 0, + transit_ips: vec![], }, external_ip: OmicronZoneExternalSnatIp { id: ExternalIpUuid::new_v4(), @@ -1462,6 +1464,7 @@ mod test { vni: Vni::SERVICES_VNI, primary: true, slot: 0, + transit_ips: vec![], }, }, ), @@ -1488,6 +1491,7 @@ mod test { vni: Vni::SERVICES_VNI, primary: true, slot: 0, + transit_ips: vec![], }, external_ip: OmicronZoneExternalSnatIp { id: ExternalIpUuid::new_v4(), @@ -1715,6 +1719,7 @@ mod test { vni: Vni::SERVICES_VNI, primary: true, slot: 0, + transit_ips: vec![], }, }, ), @@ -1746,6 +1751,7 @@ mod test { vni: Vni::SERVICES_VNI, primary: true, slot: 0, + transit_ips: vec![], }, }, ), @@ -1984,6 +1990,7 @@ mod test { vni: Vni::SERVICES_VNI, primary: true, slot: 0, + transit_ips: vec![], }, }, ), @@ -2089,6 +2096,7 @@ mod test { vni: Vni::SERVICES_VNI, primary: true, slot: 0, + transit_ips: vec![], }, }, ), @@ -2120,6 +2128,7 @@ mod test { vni: Vni::SERVICES_VNI, primary: true, slot: 0, + transit_ips: vec![], }, }, ), diff --git a/nexus/db-queries/src/db/datastore/saga.rs b/nexus/db-queries/src/db/datastore/saga.rs index 1cd41a9806..c42d14d0d7 100644 --- a/nexus/db-queries/src/db/datastore/saga.rs +++ b/nexus/db-queries/src/db/datastore/saga.rs @@ -9,17 +9,13 @@ use crate::db; use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::model::Generation; -use crate::db::pagination::paginated; use crate::db::update_and_check::UpdateAndCheck; use crate::db::update_and_check::UpdateStatus; use async_bb8_diesel::AsyncRunQueryDsl; use diesel::prelude::*; -use omicron_common::api::external::DataPageParams; use omicron_common::api::external::Error; -use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupType; use omicron_common::api::external::ResourceType; -use uuid::Uuid; impl DataStore { pub async fn saga_create( @@ -103,54 +99,4 @@ impl DataStore { )), } } - - pub async fn saga_list_unfinished_by_id( - &self, - sec_id: &db::SecId, - pagparams: &DataPageParams<'_, Uuid>, - ) -> ListResultVec { - use db::schema::saga::dsl; - paginated(dsl::saga, dsl::id, &pagparams) - .filter(dsl::saga_state.ne(db::saga_types::SagaCachedState( - steno::SagaCachedState::Done, - ))) - .filter(dsl::current_sec.eq(*sec_id)) - .load_async(&*self.pool_connection_unauthorized().await?) - .await - .map_err(|e| { - public_error_from_diesel( - e, - ErrorHandler::NotFoundByLookup( - ResourceType::SagaDbg, - LookupType::ById(sec_id.0), - ), - ) - }) - } - - pub async fn saga_node_event_list_by_id( - &self, - id: db::saga_types::SagaId, - pagparams: &DataPageParams<'_, Uuid>, - ) -> ListResultVec { - use db::schema::saga_node_event::dsl; - paginated(dsl::saga_node_event, dsl::saga_id, &pagparams) - .filter(dsl::saga_id.eq(id)) - .load_async::( - &*self.pool_connection_unauthorized().await?, - ) - .await - .map_err(|e| { - public_error_from_diesel( - e, - ErrorHandler::NotFoundByLookup( - ResourceType::SagaDbg, - LookupType::ById(id.0 .0), - ), - ) - })? - .into_iter() - .map(|db_event| steno::SagaNodeEvent::try_from(db_event)) - .collect::>() - } } diff --git a/nexus/db-queries/src/db/datastore/sled.rs b/nexus/db-queries/src/db/datastore/sled.rs index bf43b9182d..381b25dc17 100644 --- a/nexus/db-queries/src/db/datastore/sled.rs +++ b/nexus/db-queries/src/db/datastore/sled.rs @@ -695,7 +695,7 @@ impl SledTransition { /// (which is always considered valid). /// /// For a more descriptive listing of valid transitions, see - /// [`test_sled_transitions`]. + /// `test_sled_transitions`. fn valid_old_policies(&self) -> Vec { use SledPolicy::*; use SledProvisionPolicy::*; @@ -731,7 +731,7 @@ impl SledTransition { /// (which is always considered valid). /// /// For a more descriptive listing of valid transitions, see - /// [`test_sled_transitions`]. + /// `test_sled_transitions`. fn valid_old_states(&self) -> Vec { use SledState::*; diff --git a/nexus/db-queries/src/db/datastore/update.rs b/nexus/db-queries/src/db/datastore/update.rs index d73a3d903f..37339beb62 100644 --- a/nexus/db-queries/src/db/datastore/update.rs +++ b/nexus/db-queries/src/db/datastore/update.rs @@ -25,7 +25,7 @@ use omicron_uuid_kinds::TufRepoKind; use omicron_uuid_kinds::TypedUuid; use swrite::{swrite, SWrite}; -/// The return value of [`DataStore::update_tuf_repo_description_insert`]. +/// The return value of [`DataStore::update_tuf_repo_insert`]. /// /// This is similar to [`external::TufRepoInsertResponse`], but uses /// nexus-db-model's types instead of external types. diff --git a/nexus/db-queries/src/db/datastore/v2p_mapping.rs b/nexus/db-queries/src/db/datastore/v2p_mapping.rs index 6c00957e7d..2f54dfb9be 100644 --- a/nexus/db-queries/src/db/datastore/v2p_mapping.rs +++ b/nexus/db-queries/src/db/datastore/v2p_mapping.rs @@ -7,11 +7,15 @@ use crate::context::OpContext; use crate::db; use crate::db::datastore::SQL_BATCH_SIZE; use crate::db::error::{public_error_from_diesel, ErrorHandler}; +use crate::db::model::ApplySledFilterExt; use crate::db::model::V2PMappingView; use crate::db::pagination::paginated; use crate::db::pagination::Paginator; use async_bb8_diesel::AsyncRunQueryDsl; -use diesel::{QueryDsl, SelectableHelper}; +use diesel::{ExpressionMethods, QueryDsl, SelectableHelper}; +use diesel::{JoinOnDsl, NullableExpressionMethods}; +use nexus_db_model::{NetworkInterface, NetworkInterfaceKind, Sled, Vpc}; +use nexus_types::deployment::SledFilter; use omicron_common::api::external::ListResultVec; impl DataStore { @@ -19,22 +23,120 @@ impl DataStore { &self, opctx: &OpContext, ) -> ListResultVec { - use db::schema::v2p_mapping_view::dsl; + use db::schema::instance::dsl as instance_dsl; + use db::schema::network_interface::dsl as network_interface_dsl; + use db::schema::probe::dsl as probe_dsl; + use db::schema::sled::dsl as sled_dsl; + use db::schema::vmm::dsl as vmm_dsl; + use db::schema::vpc::dsl as vpc_dsl; + use db::schema::vpc_subnet::dsl as vpc_subnet_dsl; + + use db::schema::network_interface; opctx.check_complex_operations_allowed()?; let mut mappings = Vec::new(); let mut paginator = Paginator::new(SQL_BATCH_SIZE); while let Some(p) = paginator.next() { - let batch = paginated( - dsl::v2p_mapping_view, - dsl::nic_id, - &p.current_pagparams(), - ) - .select(V2PMappingView::as_select()) - .load_async(&*self.pool_connection_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + let batch: Vec<_> = + paginated( + network_interface_dsl::network_interface, + network_interface_dsl::id, + &p.current_pagparams(), + ) + .inner_join( + instance_dsl::instance + .on(network_interface_dsl::parent_id + .eq(instance_dsl::id)), + ) + .inner_join(vmm_dsl::vmm.on( + vmm_dsl::id.nullable().eq(instance_dsl::active_propolis_id), + )) + .inner_join(vpc_subnet_dsl::vpc_subnet.on( + vpc_subnet_dsl::id.eq(network_interface_dsl::subnet_id), + )) + .inner_join( + vpc_dsl::vpc + .on(vpc_dsl::id.eq(network_interface_dsl::vpc_id)), + ) + .inner_join( + sled_dsl::sled.on(sled_dsl::id.eq(vmm_dsl::sled_id)), + ) + .filter(network_interface::time_deleted.is_null()) + .filter( + network_interface::kind.eq(NetworkInterfaceKind::Instance), + ) + .sled_filter(SledFilter::V2PMapping) + .select(( + NetworkInterface::as_select(), + Sled::as_select(), + Vpc::as_select(), + )) + .load_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))? + .into_iter() + .map(|(nic, sled, vpc): (NetworkInterface, Sled, Vpc)| { + V2PMappingView { + nic_id: nic.identity.id, + sled_id: sled.identity.id, + sled_ip: sled.ip, + vni: vpc.vni, + mac: nic.mac, + ip: nic.ip, + } + }) + .collect(); + + paginator = p.found_batch(&batch, &|mapping| mapping.nic_id); + mappings.extend(batch); + } + + let mut paginator = Paginator::new(SQL_BATCH_SIZE); + while let Some(p) = paginator.next() { + let batch: Vec<_> = + paginated( + network_interface_dsl::network_interface, + network_interface_dsl::id, + &p.current_pagparams(), + ) + .inner_join( + probe_dsl::probe + .on(probe_dsl::id.eq(network_interface_dsl::parent_id)), + ) + .inner_join(vpc_subnet_dsl::vpc_subnet.on( + vpc_subnet_dsl::id.eq(network_interface_dsl::subnet_id), + )) + .inner_join( + vpc_dsl::vpc + .on(vpc_dsl::id.eq(network_interface_dsl::vpc_id)), + ) + .inner_join(sled_dsl::sled.on(sled_dsl::id.eq(probe_dsl::sled))) + .filter(network_interface::time_deleted.is_null()) + .filter( + network_interface::kind.eq(NetworkInterfaceKind::Instance), + ) + .sled_filter(SledFilter::V2PMapping) + .select(( + NetworkInterface::as_select(), + Sled::as_select(), + Vpc::as_select(), + )) + .load_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))? + .into_iter() + .map(|(nic, sled, vpc): (NetworkInterface, Sled, Vpc)| { + V2PMappingView { + nic_id: nic.identity.id, + sled_id: sled.identity.id, + sled_ip: sled.ip, + vni: vpc.vni, + mac: nic.mac, + ip: nic.ip, + } + }) + .collect(); paginator = p.found_batch(&batch, &|mapping| mapping.nic_id); mappings.extend(batch); diff --git a/nexus/db-queries/src/db/datastore/vpc.rs b/nexus/db-queries/src/db/datastore/vpc.rs index 5322e20dbf..fdb9c82fb5 100644 --- a/nexus/db-queries/src/db/datastore/vpc.rs +++ b/nexus/db-queries/src/db/datastore/vpc.rs @@ -5,9 +5,12 @@ //! [`DataStore`] methods on [`Vpc`]s. use super::DataStore; +use super::SQL_BATCH_SIZE; use crate::authz; use crate::context::OpContext; use crate::db; +use crate::db::collection_attach::AttachError; +use crate::db::collection_attach::DatastoreAttachTarget; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; use crate::db::error::public_error_from_diesel; @@ -20,6 +23,7 @@ use crate::db::model::InstanceNetworkInterface; use crate::db::model::Name; use crate::db::model::Project; use crate::db::model::RouterRoute; +use crate::db::model::RouterRouteKind; use crate::db::model::RouterRouteUpdate; use crate::db::model::Sled; use crate::db::model::Vni; @@ -33,6 +37,7 @@ use crate::db::model::VpcSubnetUpdate; use crate::db::model::VpcUpdate; use crate::db::model::{Ipv4Net, Ipv6Net}; use crate::db::pagination::paginated; +use crate::db::pagination::Paginator; use crate::db::queries::vpc::InsertVpcQuery; use crate::db::queries::vpc::VniSearchIter; use crate::db::queries::vpc_subnet::FilterConflictingVpcSubnetRangesQuery; @@ -43,6 +48,7 @@ use chrono::Utc; use diesel::prelude::*; use diesel::result::DatabaseErrorKind; use diesel::result::Error as DieselError; +use futures::stream::{self, StreamExt}; use ipnetwork::IpNetwork; use nexus_db_fixed_data::vpc::SERVICES_VPC_ID; use nexus_types::deployment::BlueprintZoneFilter; @@ -59,11 +65,16 @@ use omicron_common::api::external::LookupType; use omicron_common::api::external::ResourceType; use omicron_common::api::external::RouteDestination; use omicron_common::api::external::RouteTarget; -use omicron_common::api::external::RouterRouteKind; +use omicron_common::api::external::RouterRouteKind as ExternalRouteKind; use omicron_common::api::external::UpdateResult; use omicron_common::api::external::Vni as ExternalVni; +use omicron_common::api::internal::shared::RouterTarget; +use oxnet::IpNet; use ref_cast::RefCast; use std::collections::BTreeMap; +use std::collections::HashMap; +use std::collections::HashSet; +use std::net::IpAddr; use uuid::Uuid; impl DataStore { @@ -74,7 +85,8 @@ impl DataStore { ) -> Result<(), Error> { use nexus_db_fixed_data::project::SERVICES_PROJECT_ID; use nexus_db_fixed_data::vpc::SERVICES_VPC; - use nexus_db_fixed_data::vpc::SERVICES_VPC_DEFAULT_ROUTE_ID; + use nexus_db_fixed_data::vpc::SERVICES_VPC_DEFAULT_V4_ROUTE_ID; + use nexus_db_fixed_data::vpc::SERVICES_VPC_DEFAULT_V6_ROUTE_ID; opctx.authorize(authz::Action::Modify, &authz::DATABASE).await?; @@ -135,35 +147,49 @@ impl DataStore { .map(|(authz_router, _)| authz_router)? }; - let route = RouterRoute::new( - *SERVICES_VPC_DEFAULT_ROUTE_ID, - SERVICES_VPC.system_router_id, - RouterRouteKind::Default, - nexus_types::external_api::params::RouterRouteCreate { - identity: IdentityMetadataCreateParams { - name: "default".parse().unwrap(), - description: - "Default internet gateway route for Oxide Services" - .to_string(), + // Unwrap safety: these are known valid CIDR blocks. + let default_ips = [ + ( + "default-v4", + "0.0.0.0/0".parse().unwrap(), + *SERVICES_VPC_DEFAULT_V4_ROUTE_ID, + ), + ( + "default-v6", + "::/0".parse().unwrap(), + *SERVICES_VPC_DEFAULT_V6_ROUTE_ID, + ), + ]; + + for (name, default, uuid) in default_ips { + let route = RouterRoute::new( + uuid, + SERVICES_VPC.system_router_id, + ExternalRouteKind::Default, + nexus_types::external_api::params::RouterRouteCreate { + identity: IdentityMetadataCreateParams { + name: name.parse().unwrap(), + description: + "Default internet gateway route for Oxide Services" + .to_string(), + }, + target: RouteTarget::InternetGateway( + "outbound".parse().unwrap(), + ), + destination: RouteDestination::IpNet(default), }, - target: RouteTarget::InternetGateway( - "outbound".parse().unwrap(), - ), - destination: RouteDestination::Vpc( - SERVICES_VPC.identity.name.clone().into(), - ), - }, - ); - self.router_create_route(opctx, &authz_router, route) - .await - .map(|_| ()) - .or_else(|e| match e { - Error::ObjectAlreadyExists { .. } => Ok(()), - _ => Err(e), - })?; + ); + self.router_create_route(opctx, &authz_router, route) + .await + .map(|_| ()) + .or_else(|e| match e { + Error::ObjectAlreadyExists { .. } => Ok(()), + _ => Err(e), + })?; + } self.load_builtin_vpc_fw_rules(opctx).await?; - self.load_builtin_vpc_subnets(opctx).await?; + self.load_builtin_vpc_subnets(opctx, &authz_router).await?; info!(opctx.log, "created built-in services vpc"); @@ -228,10 +254,15 @@ impl DataStore { async fn load_builtin_vpc_subnets( &self, opctx: &OpContext, + authz_router: &authz::VpcRouter, ) -> Result<(), Error> { + use nexus_db_fixed_data::vpc::SERVICES_VPC; use nexus_db_fixed_data::vpc_subnet::DNS_VPC_SUBNET; + use nexus_db_fixed_data::vpc_subnet::DNS_VPC_SUBNET_ROUTE_ID; use nexus_db_fixed_data::vpc_subnet::NEXUS_VPC_SUBNET; + use nexus_db_fixed_data::vpc_subnet::NEXUS_VPC_SUBNET_ROUTE_ID; use nexus_db_fixed_data::vpc_subnet::NTP_VPC_SUBNET; + use nexus_db_fixed_data::vpc_subnet::NTP_VPC_SUBNET_ROUTE_ID; debug!(opctx.log, "attempting to create built-in VPC Subnets"); @@ -242,9 +273,11 @@ impl DataStore { .lookup_for(authz::Action::CreateChild) .await .internal_context("lookup built-in services vpc")?; - for vpc_subnet in - [&*DNS_VPC_SUBNET, &*NEXUS_VPC_SUBNET, &*NTP_VPC_SUBNET] - { + for (vpc_subnet, route_id) in [ + (&*DNS_VPC_SUBNET, *DNS_VPC_SUBNET_ROUTE_ID), + (&*NEXUS_VPC_SUBNET, *NEXUS_VPC_SUBNET_ROUTE_ID), + (&*NTP_VPC_SUBNET, *NTP_VPC_SUBNET_ROUTE_ID), + ] { if let Ok(_) = db::lookup::LookupPath::new(opctx, self) .vpc_subnet_id(vpc_subnet.id()) .fetch() @@ -260,6 +293,20 @@ impl DataStore { Error::ObjectAlreadyExists { .. } => Ok(()), _ => Err(e), })?; + + let route = RouterRoute::for_subnet( + route_id, + SERVICES_VPC.system_router_id, + vpc_subnet.name().clone().into(), + ); + + self.router_create_route(opctx, &authz_router, route) + .await + .map(|_| ()) + .or_else(|e| match e { + Error::ObjectAlreadyExists { .. } => Ok(()), + _ => Err(e), + })?; } info!(opctx.log, "created built-in services vpc subnets"); @@ -770,6 +817,9 @@ impl DataStore { assert_eq!(authz_vpc.id(), subnet.vpc_id); let db_subnet = self.vpc_create_subnet_raw(subnet).await?; + self.vpc_system_router_ensure_subnet_routes(opctx, authz_vpc.id()) + .await + .map_err(SubnetError::External)?; Ok(( authz::VpcSubnet::new( authz_vpc.clone(), @@ -850,6 +900,12 @@ impl DataStore { "deletion failed due to concurrent modification", )); } else { + self.vpc_system_router_ensure_subnet_routes( + opctx, + db_subnet.vpc_id, + ) + .await?; + Ok(()) } } @@ -863,13 +919,92 @@ impl DataStore { opctx.authorize(authz::Action::Modify, authz_subnet).await?; use db::schema::vpc_subnet::dsl; - diesel::update(dsl::vpc_subnet) + let out = diesel::update(dsl::vpc_subnet) .filter(dsl::time_deleted.is_null()) .filter(dsl::id.eq(authz_subnet.id())) .set(updates) .returning(VpcSubnet::as_returning()) .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await + .map_err(|e| { + public_error_from_diesel( + e, + ErrorHandler::NotFoundByResource(authz_subnet), + ) + })?; + + self.vpc_system_router_ensure_subnet_routes(opctx, out.vpc_id).await?; + + Ok(out) + } + + pub async fn vpc_subnet_set_custom_router( + &self, + opctx: &OpContext, + authz_subnet: &authz::VpcSubnet, + authz_router: &authz::VpcRouter, + ) -> Result { + opctx.authorize(authz::Action::Modify, authz_subnet).await?; + opctx.authorize(authz::Action::Read, authz_router).await?; + + use db::schema::vpc_router::dsl as router_dsl; + use db::schema::vpc_subnet::dsl as subnet_dsl; + + let query = VpcRouter::attach_resource( + authz_router.id(), + authz_subnet.id(), + router_dsl::vpc_router + .into_boxed() + .filter(router_dsl::kind.eq(VpcRouterKind::Custom)), + subnet_dsl::vpc_subnet.into_boxed(), + u32::MAX, + diesel::update(subnet_dsl::vpc_subnet).set(( + subnet_dsl::time_modified.eq(Utc::now()), + subnet_dsl::custom_router_id.eq(authz_router.id()), + )), + ); + + query + .attach_and_get_result_async( + &*self.pool_connection_authorized(opctx).await?, + ) + .await + .map(|(_, resource)| resource) + .map_err(|e| match e { + AttachError::CollectionNotFound => Error::not_found_by_id( + ResourceType::VpcRouter, + &authz_router.id(), + ), + AttachError::ResourceNotFound => Error::not_found_by_id( + ResourceType::VpcSubnet, + &authz_subnet.id(), + ), + // The only other failure reason can be an attempt to use a system router. + AttachError::NoUpdate { .. } => Error::invalid_request( + "cannot attach a system router to a VPC subnet", + ), + AttachError::DatabaseError(e) => { + public_error_from_diesel(e, ErrorHandler::Server) + } + }) + } + + pub async fn vpc_subnet_unset_custom_router( + &self, + opctx: &OpContext, + authz_subnet: &authz::VpcSubnet, + ) -> Result { + opctx.authorize(authz::Action::Modify, authz_subnet).await?; + + use db::schema::vpc_subnet::dsl; + + diesel::update(dsl::vpc_subnet) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(authz_subnet.id())) + .set(dsl::custom_router_id.eq(Option::::None)) + .returning(VpcSubnet::as_returning()) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) + .await .map_err(|e| { public_error_from_diesel( e, @@ -994,6 +1129,32 @@ impl DataStore { ErrorHandler::NotFoundByResource(authz_router), ) })?; + + // All child routes are deleted. + use db::schema::router_route::dsl as rr; + let now = Utc::now(); + diesel::update(rr::router_route) + .filter(rr::time_deleted.is_null()) + .filter(rr::vpc_router_id.eq(authz_router.id())) + .set(rr::time_deleted.eq(now)) + .execute_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + // Unlink all subnets from this router. + // This will temporarily leave some hanging subnet attachments. + // `vpc_get_active_custom_routers` will join and then filter, + // so such rows will be treated as though they have no custom router + // by the RPW. + use db::schema::vpc_subnet::dsl as vpc; + diesel::update(vpc::vpc_subnet) + .filter(vpc::time_deleted.is_null()) + .filter(vpc::custom_router_id.eq(authz_router.id())) + .set(vpc::custom_router_id.eq(Option::::None)) + .execute_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + Ok(()) } @@ -1059,6 +1220,17 @@ impl DataStore { assert_eq!(authz_router.id(), route.vpc_router_id); opctx.authorize(authz::Action::CreateChild, authz_router).await?; + Self::router_create_route_on_connection( + route, + &*self.pool_connection_authorized(opctx).await?, + ) + .await + } + + pub async fn router_create_route_on_connection( + route: RouterRoute, + conn: &async_bb8_diesel::Connection, + ) -> CreateResult { use db::schema::router_route::dsl; let router_id = route.vpc_router_id; let name = route.name().clone(); @@ -1067,9 +1239,7 @@ impl DataStore { router_id, diesel::insert_into(dsl::router_route).values(route), ) - .insert_and_get_result_async( - &*self.pool_connection_authorized(opctx).await?, - ) + .insert_and_get_result_async(conn) .await .map_err(|e| match e { AsyncInsertError::CollectionNotFound => Error::ObjectNotFound { @@ -1221,6 +1391,487 @@ impl DataStore { ) }) } + + /// Ensure the system router for a VPC has the correct set of subnet + /// routing rules, after any changes to a subnet. + pub async fn vpc_system_router_ensure_subnet_routes( + &self, + opctx: &OpContext, + vpc_id: Uuid, + ) -> Result<(), Error> { + // These rules are immutable from a user's perspective, and + // aren't something which they can meaningfully interact with, + // so uuid stability on e.g. VPC rename is not a primary concern. + // We make sure only to alter VPC subnet rules here: users may + // modify other system routes like internet gateways (which are + // `RouteKind::Default`). + let conn = self.pool_connection_authorized(opctx).await?; + self.transaction_retry_wrapper("vpc_subnet_route_reconcile") + .transaction(&conn, |conn| async move { + use db::schema::router_route::dsl; + use db::schema::vpc::dsl as vpc; + use db::schema::vpc_subnet::dsl as subnet; + + let system_router_id = vpc::vpc + .filter(vpc::id.eq(vpc_id)) + .filter(vpc::time_deleted.is_null()) + .select(vpc::system_router_id) + .limit(1) + .get_result_async(&conn) + .await?; + + let valid_subnets: Vec = subnet::vpc_subnet + .filter(subnet::vpc_id.eq(vpc_id)) + .filter(subnet::time_deleted.is_null()) + .select(VpcSubnet::as_select()) + .load_async(&conn) + .await?; + + let current_rules: Vec = dsl::router_route + .filter( + dsl::kind + .eq(RouterRouteKind(ExternalRouteKind::VpcSubnet)), + ) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::vpc_router_id.eq(system_router_id)) + .select(RouterRoute::as_select()) + .load_async(&conn) + .await?; + + // Build the add/delete sets. + let expected_names: HashSet = valid_subnets + .iter() + .map(|v| v.identity.name.clone()) + .collect(); + + // This checks that we have rules which *point to* the named + // subnets, rather than working with rule names (even if these + // are set to match the subnet where possible). + // Rule names are effectively randomised when someone, e.g., + // names a subnet "default-v4"/"-v6", and this prevents us + // from repeatedly adding/deleting that route. + let mut found_names = HashSet::new(); + let mut invalid = Vec::new(); + for rule in current_rules { + let id = rule.id(); + match (rule.kind.0, rule.target.0) { + ( + ExternalRouteKind::VpcSubnet, + RouteTarget::Subnet(n), + ) if expected_names.contains(Name::ref_cast(&n)) => { + let _ = found_names.insert(n.into()); + } + _ => invalid.push(id), + } + } + + // Add/Remove routes. Retry if number is incorrect due to + // concurrent modification. + let now = Utc::now(); + let to_update = invalid.len(); + let updated_rows = diesel::update(dsl::router_route) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq_any(invalid)) + .set(dsl::time_deleted.eq(now)) + .execute_async(&conn) + .await?; + + if updated_rows != to_update { + return Err(DieselError::RollbackTransaction); + } + + // Duplicate rules are caught here using the UNIQUE constraint + // on names in a router. Only nexus can alter the system router, + // so there is no risk of collision with user-specified names. + // + // Subnets named "default-v4" or "default-v6" have their rules renamed + // to include the rule UUID. + for subnet in expected_names.difference(&found_names) { + let route_id = Uuid::new_v4(); + let route = db::model::RouterRoute::for_subnet( + route_id, + system_router_id, + subnet.clone(), + ); + + match Self::router_create_route_on_connection(route, &conn) + .await + { + Err(Error::Conflict { .. }) => { + return Err(DieselError::RollbackTransaction) + } + Err(_) => return Err(DieselError::NotFound), + _ => {} + } + } + + // Verify that route set is exactly as intended, and rollback otherwise. + let current_rules: Vec = dsl::router_route + .filter( + dsl::kind + .eq(RouterRouteKind(ExternalRouteKind::VpcSubnet)), + ) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::vpc_router_id.eq(system_router_id)) + .select(RouterRoute::as_select()) + .load_async(&conn) + .await?; + + if current_rules.len() != expected_names.len() { + return Err(DieselError::RollbackTransaction); + } + + for rule in current_rules { + match (rule.kind.0, rule.target.0) { + ( + ExternalRouteKind::VpcSubnet, + RouteTarget::Subnet(n), + ) if expected_names.contains(Name::ref_cast(&n)) => {} + _ => return Err(DieselError::RollbackTransaction), + } + } + + Ok(()) + }) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + self.vpc_increment_rpw_version(opctx, vpc_id).await + } + + /// Look up a VPC by VNI. + pub async fn vpc_get_system_router( + &self, + opctx: &OpContext, + vpc_id: Uuid, + ) -> LookupResult { + use db::schema::vpc::dsl as vpc_dsl; + use db::schema::vpc_router::dsl as router_dsl; + + vpc_dsl::vpc + .inner_join( + router_dsl::vpc_router + .on(router_dsl::id.eq(vpc_dsl::system_router_id)), + ) + .filter(vpc_dsl::time_deleted.is_null()) + .filter(vpc_dsl::id.eq(vpc_id)) + .filter(router_dsl::time_deleted.is_null()) + .filter(router_dsl::vpc_id.eq(vpc_id)) + .select(VpcRouter::as_select()) + .limit(1) + .first_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel( + e, + ErrorHandler::NotFoundByLookup( + ResourceType::Vpc, + LookupType::ById(vpc_id), + ), + ) + }) + } + + /// Fetch all active custom routers (and their parent subnets) + /// in a VPC. + pub async fn vpc_get_active_custom_routers( + &self, + opctx: &OpContext, + vpc_id: Uuid, + ) -> ListResultVec<(VpcSubnet, VpcRouter)> { + use db::schema::vpc_router::dsl as router_dsl; + use db::schema::vpc_subnet::dsl as subnet_dsl; + + subnet_dsl::vpc_subnet + .inner_join( + router_dsl::vpc_router.on(router_dsl::id + .nullable() + .eq(subnet_dsl::custom_router_id)), + ) + .filter(subnet_dsl::time_deleted.is_null()) + .filter(subnet_dsl::vpc_id.eq(vpc_id)) + .filter(router_dsl::time_deleted.is_null()) + .filter(router_dsl::vpc_id.eq(vpc_id)) + .select((VpcSubnet::as_select(), VpcRouter::as_select())) + .load_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel( + e, + ErrorHandler::NotFoundByLookup( + ResourceType::Vpc, + LookupType::ById(vpc_id), + ), + ) + }) + } + + /// Resolve all targets in a router into concrete details. + pub async fn vpc_resolve_router_rules( + &self, + opctx: &OpContext, + vpc_router_id: Uuid, + ) -> Result, Error> { + // Get all rules in target router. + opctx.check_complex_operations_allowed()?; + + let (.., authz_project, authz_vpc, authz_router) = + db::lookup::LookupPath::new(opctx, self) + .vpc_router_id(vpc_router_id) + .lookup_for(authz::Action::Read) + .await + .internal_context("lookup router by id for rules")?; + let mut paginator = Paginator::new(SQL_BATCH_SIZE); + let mut all_rules = vec![]; + while let Some(p) = paginator.next() { + let batch = self + .vpc_router_route_list( + opctx, + &authz_router, + &PaginatedBy::Id(p.current_pagparams()), + ) + .await?; + paginator = p + .found_batch(&batch, &|s: &nexus_db_model::RouterRoute| s.id()); + all_rules.extend(batch); + } + + // This is not in a transaction, because... + // We're not necessarily too concerned about getting partially + // updated state when resolving these names. See the header discussion + // in `nexus/src/app/background/vpc_routes.rs`: any state updates + // are followed by a version bump/notify, so we will be eventually + // consistent with route resolution. + let mut subnet_names = HashSet::new(); + let mut vpc_names = HashSet::new(); + let mut inetgw_names = HashSet::new(); + let mut instance_names = HashSet::new(); + for rule in &all_rules { + match &rule.target.0 { + RouteTarget::Vpc(n) => { + vpc_names.insert(n.clone()); + } + RouteTarget::Subnet(n) => { + subnet_names.insert(n.clone()); + } + RouteTarget::Instance(n) => { + instance_names.insert(n.clone()); + } + RouteTarget::InternetGateway(n) => { + inetgw_names.insert(n.clone()); + } + _ => {} + } + + match &rule.destination.0 { + RouteDestination::Vpc(n) => { + vpc_names.insert(n.clone()); + } + RouteDestination::Subnet(n) => { + subnet_names.insert(n.clone()); + } + _ => {} + } + } + + // TODO: This would be nice to solve in fewer queries. + let subnets = stream::iter(subnet_names) + .filter_map(|name| async { + db::lookup::LookupPath::new(opctx, self) + .vpc_id(authz_vpc.id()) + .vpc_subnet_name(Name::ref_cast(&name)) + .fetch() + .await + .ok() + .map(|(.., subnet)| (name, subnet)) + }) + .collect::>() + .await; + + // TODO: unused until VPC peering. + let _vpcs = stream::iter(vpc_names) + .filter_map(|name| async { + db::lookup::LookupPath::new(opctx, self) + .project_id(authz_project.id()) + .vpc_name(Name::ref_cast(&name)) + .fetch() + .await + .ok() + .map(|(.., vpc)| (name, vpc)) + }) + .collect::>() + .await; + + let instances = stream::iter(instance_names) + .filter_map(|name| async { + db::lookup::LookupPath::new(opctx, self) + .project_id(authz_project.id()) + .instance_name(Name::ref_cast(&name)) + .fetch() + .await + .ok() + .map(|(.., auth, inst)| (name, auth, inst)) + }) + .filter_map(|(name, authz_instance, instance)| async move { + // XXX: currently an instance can have one primary NIC, + // and it is not dual-stack (v4 + v6). We need + // to clarify what should be resolved in the v6 case. + self.instance_get_primary_network_interface( + opctx, + &authz_instance, + ) + .await + .ok() + .map(|primary_nic| (name, (instance, primary_nic))) + }) + .collect::>() + .await; + + // TODO: validate names of Internet Gateways. + + // See the discussion in `resolve_firewall_rules_for_sled_agent` on + // how we should resolve name misses in route resolution. + // This method adopts the same strategy: a lookup failure corresponds + // to a NO-OP rule. + let mut out = HashMap::new(); + for rule in all_rules { + // Some dests/targets (e.g., subnet) resolve to *several* specifiers + // to handle both v4 and v6. The user-facing API will prevent severe + // mistakes on naked IPs/CIDRs (mixed v4/6), but we need to be smarter + // around named entities here. + let (v4_dest, v6_dest) = match rule.destination.0 { + RouteDestination::Ip(ip @ IpAddr::V4(_)) => { + (Some(IpNet::host_net(ip)), None) + } + RouteDestination::Ip(ip @ IpAddr::V6(_)) => { + (None, Some(IpNet::host_net(ip))) + } + RouteDestination::IpNet(ip @ IpNet::V4(_)) => (Some(ip), None), + RouteDestination::IpNet(ip @ IpNet::V6(_)) => (None, Some(ip)), + RouteDestination::Subnet(n) => subnets + .get(&n) + .map(|s| { + ( + Some(s.ipv4_block.0.into()), + Some(s.ipv6_block.0.into()), + ) + }) + .unwrap_or_default(), + + // TODO: VPC peering. + RouteDestination::Vpc(_) => (None, None), + }; + + let (v4_target, v6_target) = match rule.target.0 { + RouteTarget::Ip(ip @ IpAddr::V4(_)) => { + (Some(RouterTarget::Ip(ip)), None) + } + RouteTarget::Ip(ip @ IpAddr::V6(_)) => { + (None, Some(RouterTarget::Ip(ip))) + } + RouteTarget::Subnet(n) => subnets + .get(&n) + .map(|s| { + ( + Some(RouterTarget::VpcSubnet( + s.ipv4_block.0.into(), + )), + Some(RouterTarget::VpcSubnet( + s.ipv6_block.0.into(), + )), + ) + }) + .unwrap_or_default(), + RouteTarget::Instance(n) => instances + .get(&n) + .map(|i| match i.1.ip { + // TODO: update for dual-stack v4/6. + ip @ IpNetwork::V4(_) => { + (Some(RouterTarget::Ip(ip.ip())), None) + } + ip @ IpNetwork::V6(_) => { + (None, Some(RouterTarget::Ip(ip.ip()))) + } + }) + .unwrap_or_default(), + RouteTarget::Drop => { + (Some(RouterTarget::Drop), Some(RouterTarget::Drop)) + } + + // TODO: Internet Gateways. + // The semantic here is 'name match => allow', + // as the other aspect they will control is SNAT + // IP allocation. Today, presence of this rule + // allows upstream regardless of name. + RouteTarget::InternetGateway(_n) => ( + Some(RouterTarget::InternetGateway), + Some(RouterTarget::InternetGateway), + ), + + // TODO: VPC Peering. + RouteTarget::Vpc(_) => (None, None), + }; + + // XXX: Is there another way we should be handling destination + // collisions within a router? 'first/last wins' is fairly + // arbitrary when lookups are sorted on UUID, but it's + // unpredictable. + // It would be really useful to raise collisions and + // misses to users, somehow. + if let (Some(dest), Some(target)) = (v4_dest, v4_target) { + out.insert(dest, target); + } + + if let (Some(dest), Some(target)) = (v6_dest, v6_target) { + out.insert(dest, target); + } + } + + Ok(out) + } + + /// Trigger an RPW version bump on a single VPC router in response + /// to CRUD operations on individual routes. + pub async fn vpc_router_increment_rpw_version( + &self, + opctx: &OpContext, + router_id: Uuid, + ) -> UpdateResult<()> { + // NOTE: this operation and `vpc_increment_rpw_version` do not + // have auth checks, as these can occur in connection with unrelated + // resources -- the current user may have access to those, but be unable + // to modify the entire set of VPC routers in a project. + + use db::schema::vpc_router::dsl; + diesel::update(dsl::vpc_router) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(router_id)) + .set(dsl::resolved_version.eq(dsl::resolved_version + 1)) + .execute_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + Ok(()) + } + + /// Trigger an RPW version bump on *all* routers within a VPC in + /// response to changes to named entities (e.g., subnets, instances). + pub async fn vpc_increment_rpw_version( + &self, + opctx: &OpContext, + vpc_id: Uuid, + ) -> UpdateResult<()> { + use db::schema::vpc_router::dsl; + diesel::update(dsl::vpc_router) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::vpc_id.eq(vpc_id)) + .set(dsl::resolved_version.eq(dsl::resolved_version + 1)) + .execute_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + Ok(()) + } } #[cfg(test)] @@ -1232,6 +1883,7 @@ mod tests { use crate::db::datastore::test_utils::IneligibleSleds; use crate::db::model::Project; use crate::db::queries::vpc::MAX_VNI_SEARCH_RANGE_SIZE; + use nexus_db_fixed_data::silo::DEFAULT_SILO; use nexus_db_fixed_data::vpc_subnet::NEXUS_VPC_SUBNET; use nexus_db_model::IncompleteNetworkInterface; use nexus_db_model::SledUpdate; @@ -1249,7 +1901,10 @@ mod tests { use omicron_common::api::external::Generation; use omicron_test_utils::dev; use omicron_uuid_kinds::GenericUuid; + use omicron_uuid_kinds::InstanceUuid; use omicron_uuid_kinds::SledUuid; + use oxnet::IpNet; + use oxnet::Ipv4Net; use slog::info; // Test that we detect the right error condition and return None when we @@ -1748,4 +2403,487 @@ mod tests { db.cleanup().await.unwrap(); logctx.cleanup_successful(); } + + async fn create_initial_vpc( + log: &slog::Logger, + opctx: &OpContext, + datastore: &DataStore, + ) -> (authz::Project, authz::Vpc, Vpc, authz::VpcRouter, VpcRouter) { + // Create a project and VPC. + let project_params = params::ProjectCreate { + identity: IdentityMetadataCreateParams { + name: "project".parse().unwrap(), + description: String::from("test project"), + }, + }; + let project = Project::new(DEFAULT_SILO.id(), project_params); + let (authz_project, _) = datastore + .project_create(&opctx, project) + .await + .expect("failed to create project"); + + let vpc_name: external::Name = "my-vpc".parse().unwrap(); + let description = String::from("test vpc"); + let mut incomplete_vpc = IncompleteVpc::new( + Uuid::new_v4(), + authz_project.id(), + Uuid::new_v4(), + params::VpcCreate { + identity: IdentityMetadataCreateParams { + name: vpc_name.clone(), + description: description.clone(), + }, + ipv6_prefix: None, + dns_name: vpc_name.clone(), + }, + ) + .expect("failed to create incomplete VPC"); + let this_vni = Vni(external::Vni::try_from(2048).unwrap()); + incomplete_vpc.vni = this_vni; + info!( + log, + "creating initial VPC"; + "vni" => ?this_vni, + ); + let query = InsertVpcQuery::new(incomplete_vpc); + let (authz_vpc, db_vpc) = datastore + .project_create_vpc_raw(&opctx, &authz_project, query) + .await + .expect("failed to create initial set of VPCs") + .expect("expected an actual VPC"); + info!( + log, + "created VPC"; + "vpc" => ?db_vpc, + ); + + // Now create the system router for this VPC. Subnet CRUD + // operations need this defined to succeed. + let router = VpcRouter::new( + db_vpc.system_router_id, + db_vpc.id(), + VpcRouterKind::System, + nexus_types::external_api::params::VpcRouterCreate { + identity: IdentityMetadataCreateParams { + name: "system".parse().unwrap(), + description: description.clone(), + }, + }, + ); + + let (authz_router, db_router) = datastore + .vpc_create_router(&opctx, &authz_vpc, router) + .await + .unwrap(); + + (authz_project, authz_vpc, db_vpc, authz_router, db_router) + } + + async fn new_subnet_ez( + opctx: &OpContext, + datastore: &DataStore, + db_vpc: &Vpc, + authz_vpc: &authz::Vpc, + name: &str, + ip: [u8; 4], + prefix_len: u8, + ) -> (authz::VpcSubnet, VpcSubnet) { + let ipv6_block = db_vpc + .ipv6_prefix + .random_subnet( + omicron_common::address::VPC_SUBNET_IPV6_PREFIX_LENGTH, + ) + .map(|block| block.0) + .unwrap(); + + datastore + .vpc_create_subnet( + &opctx, + &authz_vpc, + db::model::VpcSubnet::new( + Uuid::new_v4(), + db_vpc.id(), + IdentityMetadataCreateParams { + name: name.parse().unwrap(), + description: "A subnet...".into(), + }, + Ipv4Net::new(core::net::Ipv4Addr::from(ip), prefix_len) + .unwrap(), + ipv6_block, + ), + ) + .await + .unwrap() + } + + // Test to verify that subnet CRUD operations are correctly + // reflected in the nexus-managed system router attached to a VPC, + // and that these resolve to the v4/6 subnets of each. + #[tokio::test] + async fn test_vpc_system_router_sync_to_subnets() { + usdt::register_probes().unwrap(); + let logctx = + dev::test_setup_log("test_vpc_system_router_sync_to_subnets"); + let log = &logctx.log; + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + let (_, authz_vpc, db_vpc, _, db_router) = + create_initial_vpc(log, &opctx, &datastore).await; + + // InternetGateway route creation is handled by the saga proper, + // so we'll only have subnet routes here. Initially, we start with none: + verify_all_subnet_routes_in_router( + &opctx, + &datastore, + db_router.id(), + &[], + ) + .await; + + // Add a new subnet and we should get a new route. + let (authz_sub0, sub0) = new_subnet_ez( + &opctx, + &datastore, + &db_vpc, + &authz_vpc, + "s0", + [172, 30, 0, 0], + 22, + ) + .await; + + verify_all_subnet_routes_in_router( + &opctx, + &datastore, + db_router.id(), + &[&sub0], + ) + .await; + + // Add another, and get another route. + let (authz_sub1, sub1) = new_subnet_ez( + &opctx, + &datastore, + &db_vpc, + &authz_vpc, + "s1", + [172, 31, 0, 0], + 22, + ) + .await; + + verify_all_subnet_routes_in_router( + &opctx, + &datastore, + db_router.id(), + &[&sub0, &sub1], + ) + .await; + + // Rename one subnet, and our invariants should hold. + let sub0 = datastore + .vpc_update_subnet( + &opctx, + &authz_sub0, + VpcSubnetUpdate { + name: Some( + "a-new-name".parse::().unwrap().into(), + ), + description: None, + time_modified: Utc::now(), + }, + ) + .await + .unwrap(); + + verify_all_subnet_routes_in_router( + &opctx, + &datastore, + db_router.id(), + &[&sub0, &sub1], + ) + .await; + + // Delete one, and routes should stay in sync. + datastore.vpc_delete_subnet(&opctx, &sub0, &authz_sub0).await.unwrap(); + + verify_all_subnet_routes_in_router( + &opctx, + &datastore, + db_router.id(), + &[&sub1], + ) + .await; + + // If we use a reserved name, we should be able to update the table. + let sub1 = datastore + .vpc_update_subnet( + &opctx, + &authz_sub1, + VpcSubnetUpdate { + name: Some( + "default-v4".parse::().unwrap().into(), + ), + description: None, + time_modified: Utc::now(), + }, + ) + .await + .unwrap(); + + verify_all_subnet_routes_in_router( + &opctx, + &datastore, + db_router.id(), + &[&sub1], + ) + .await; + + // Ditto for adding such a route. + let (_, sub0) = new_subnet_ez( + &opctx, + &datastore, + &db_vpc, + &authz_vpc, + "default-v6", + [172, 30, 0, 0], + 22, + ) + .await; + + verify_all_subnet_routes_in_router( + &opctx, + &datastore, + db_router.id(), + &[&sub0, &sub1], + ) + .await; + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + async fn verify_all_subnet_routes_in_router( + opctx: &OpContext, + datastore: &DataStore, + router_id: Uuid, + subnets: &[&VpcSubnet], + ) -> Vec { + let conn = datastore.pool_connection_authorized(opctx).await.unwrap(); + + use db::schema::router_route::dsl; + let routes = dsl::router_route + .filter(dsl::time_deleted.is_null()) + .filter(dsl::vpc_router_id.eq(router_id)) + .filter(dsl::kind.eq(RouterRouteKind(ExternalRouteKind::VpcSubnet))) + .select(RouterRoute::as_select()) + .load_async(&*conn) + .await + .unwrap(); + + // We should have exactly as many subnet routes as subnets. + assert_eq!(routes.len(), subnets.len()); + + let mut names: HashMap<_, _> = + subnets.iter().map(|s| (s.name().clone(), 0usize)).collect(); + + // Each should have a target+dest bound to a subnet by name. + for route in &routes { + let found_name = match &route.target.0 { + RouteTarget::Subnet(name) => name, + e => panic!("found target {e:?} instead of Subnet({{name}})"), + }; + + match &route.destination.0 { + RouteDestination::Subnet(name) => assert_eq!(name, found_name), + e => panic!("found dest {e:?} instead of Subnet({{name}})"), + } + + *names.get_mut(found_name).unwrap() += 1; + } + + // Each name should be used exactly once. + for (name, count) in names { + assert_eq!(count, 1, "subnet {name} should appear exactly once") + } + + // Resolve the routes: we should have two for each entry: + let resolved = datastore + .vpc_resolve_router_rules(&opctx, router_id) + .await + .unwrap(); + assert_eq!(resolved.len(), 2 * subnets.len()); + + // And each subnet generates a v4->v4 and v6->v6. + for subnet in subnets { + assert!(resolved.iter().any(|(k, v)| { + *k == subnet.ipv4_block.0.into() + && match v { + RouterTarget::VpcSubnet(ip) => { + *ip == subnet.ipv4_block.0.into() + } + _ => false, + } + })); + assert!(resolved.iter().any(|(k, v)| { + *k == subnet.ipv6_block.0.into() + && match v { + RouterTarget::VpcSubnet(ip) => { + *ip == subnet.ipv6_block.0.into() + } + _ => false, + } + })); + } + + routes + } + + // Test to verify that VPC routers resolve to the primary addr + // of an instance NIC. + #[tokio::test] + async fn test_vpc_router_rule_instance_resolve() { + usdt::register_probes().unwrap(); + let logctx = + dev::test_setup_log("test_vpc_router_rule_instance_resolve"); + let log = &logctx.log; + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + let (authz_project, authz_vpc, db_vpc, authz_router, _) = + create_initial_vpc(log, &opctx, &datastore).await; + + // Create a subnet for an instance to live in. + let (authz_sub0, sub0) = new_subnet_ez( + &opctx, + &datastore, + &db_vpc, + &authz_vpc, + "s0", + [172, 30, 0, 0], + 22, + ) + .await; + + // Add a rule pointing to the instance before it is created. + // We're commiting some minor data integrity sins by putting + // these into a system router, but that's irrelevant to resolution. + let inst_name = "insty".parse::().unwrap(); + let _ = datastore + .router_create_route( + &opctx, + &authz_router, + RouterRoute::new( + Uuid::new_v4(), + authz_router.id(), + external::RouterRouteKind::Custom, + params::RouterRouteCreate { + identity: IdentityMetadataCreateParams { + name: "to-vpn".parse().unwrap(), + description: "A rule...".into(), + }, + target: external::RouteTarget::Instance( + inst_name.clone(), + ), + destination: external::RouteDestination::IpNet( + "192.168.0.0/16".parse().unwrap(), + ), + }, + ), + ) + .await + .unwrap(); + + // Resolve the rules: we will have two entries generated by the + // VPC subnet (v4, v6). + let routes = datastore + .vpc_resolve_router_rules(&opctx, authz_router.id()) + .await + .unwrap(); + + assert_eq!(routes.len(), 2); + + // Create an instance, this will have no effect for now as + // the instance lacks a NIC. + let db_inst = datastore + .project_create_instance( + &opctx, + &authz_project, + db::model::Instance::new( + InstanceUuid::new_v4(), + authz_project.id(), + ¶ms::InstanceCreate { + identity: IdentityMetadataCreateParams { + name: inst_name.clone(), + description: "An instance...".into(), + }, + ncpus: external::InstanceCpuCount(1), + memory: 10.into(), + hostname: "insty".parse().unwrap(), + user_data: vec![], + network_interfaces: + params::InstanceNetworkInterfaceAttachment::None, + external_ips: vec![], + disks: vec![], + ssh_public_keys: None, + start: false, + }, + ), + ) + .await + .unwrap(); + let (.., authz_instance) = + db::lookup::LookupPath::new(&opctx, &datastore) + .instance_id(db_inst.id()) + .lookup_for(authz::Action::CreateChild) + .await + .unwrap(); + + let routes = datastore + .vpc_resolve_router_rules(&opctx, authz_router.id()) + .await + .unwrap(); + + assert_eq!(routes.len(), 2); + + // Create a primary NIC on the instance; the route can now resolve + // to the instance's IP. + let nic = datastore + .instance_create_network_interface( + &opctx, + &authz_sub0, + &authz_instance, + IncompleteNetworkInterface::new_instance( + Uuid::new_v4(), + InstanceUuid::from_untyped_uuid(db_inst.id()), + sub0, + IdentityMetadataCreateParams { + name: "nic".parse().unwrap(), + description: "A NIC...".into(), + }, + None, + ) + .unwrap(), + ) + .await + .unwrap(); + + let routes = datastore + .vpc_resolve_router_rules(&opctx, authz_router.id()) + .await + .unwrap(); + + // Verify we now have a route pointing at this instance. + assert_eq!(routes.len(), 3); + assert!(routes.iter().any(|(k, v)| (*k + == "192.168.0.0/16".parse::().unwrap()) + && match v { + RouterTarget::Ip(ip) => *ip == nic.ip.ip(), + _ => false, + })); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } } diff --git a/nexus/db-queries/src/db/mod.rs b/nexus/db-queries/src/db/mod.rs index 7bd1bbec61..c8c8860901 100644 --- a/nexus/db-queries/src/db/mod.rs +++ b/nexus/db-queries/src/db/mod.rs @@ -29,7 +29,6 @@ pub mod queries; mod raw_query_builder; mod saga_recovery; mod sec_store; -pub mod subquery; pub(crate) mod true_or_cast_error; mod update_and_check; diff --git a/nexus/db-queries/src/db/saga_recovery.rs b/nexus/db-queries/src/db/saga_recovery.rs index 25f8ff788d..e85011f60f 100644 --- a/nexus/db-queries/src/db/saga_recovery.rs +++ b/nexus/db-queries/src/db/saga_recovery.rs @@ -6,9 +6,18 @@ use crate::context::OpContext; use crate::db; +use crate::db::datastore::SQL_BATCH_SIZE; +use crate::db::error::public_error_from_diesel; +use crate::db::error::ErrorHandler; +use crate::db::pagination::{paginated, paginated_multicolumn, Paginator}; +use async_bb8_diesel::AsyncRunQueryDsl; +use diesel::prelude::*; +use diesel::ExpressionMethods; +use diesel::SelectableHelper; use futures::{future::BoxFuture, TryFutureExt}; -use omicron_common::api::external::DataPageParams; use omicron_common::api::external::Error; +use omicron_common::api::external::LookupType; +use omicron_common::api::external::ResourceType; use omicron_common::backoff::retry_notify; use omicron_common::backoff::retry_policy_internal_service; use omicron_common::backoff::BackoffError; @@ -162,17 +171,6 @@ where })) } -// Creates new page params for querying sagas. -fn new_page_params( - marker: Option<&uuid::Uuid>, -) -> DataPageParams<'_, uuid::Uuid> { - DataPageParams { - marker, - direction: dropshot::PaginationOrder::Ascending, - limit: std::num::NonZeroU32::new(100).unwrap(), - } -} - /// Queries the database to return a list of uncompleted sagas assigned to SEC /// `sec_id` // For now, we do the simplest thing: we fetch all the sagas that the @@ -195,17 +193,32 @@ async fn list_unfinished_sagas( // Although we could read them all into memory simultaneously, this // risks blocking the DB for an unreasonable amount of time. Instead, // we paginate to avoid cutting off availability to the DB. - let mut last_id = None; let mut sagas = vec![]; - loop { - let pagparams = new_page_params(last_id.as_ref()); - let mut some_sagas = - datastore.saga_list_unfinished_by_id(sec_id, &pagparams).await?; - if some_sagas.is_empty() { - break; - } - sagas.append(&mut some_sagas); - last_id = Some(sagas.last().as_ref().unwrap().id.0 .0); + let mut paginator = Paginator::new(SQL_BATCH_SIZE); + let conn = datastore.pool_connection_authorized(opctx).await?; + while let Some(p) = paginator.next() { + use db::schema::saga::dsl; + + let mut batch = paginated(dsl::saga, dsl::id, &p.current_pagparams()) + .filter(dsl::saga_state.ne(db::saga_types::SagaCachedState( + steno::SagaCachedState::Done, + ))) + .filter(dsl::current_sec.eq(*sec_id)) + .select(db::saga_types::Saga::as_select()) + .load_async(&*conn) + .await + .map_err(|e| { + public_error_from_diesel( + e, + ErrorHandler::NotFoundByLookup( + ResourceType::SagaDbg, + LookupType::ById(sec_id.0), + ), + ) + })?; + + paginator = p.found_batch(&batch, &|row| row.id); + sagas.append(&mut batch); } Ok(sagas) } @@ -239,7 +252,7 @@ where "saga_name" => saga_name.clone(), ); - let log_events = load_saga_log(datastore, &saga).await?; + let log_events = load_saga_log(&opctx, datastore, &saga).await?; trace!( opctx.log, "recovering saga: loaded log"; @@ -277,6 +290,7 @@ where /// Queries the database to load the full log for the specified saga async fn load_saga_log( + opctx: &OpContext, datastore: &db::DataStore, saga: &db::saga_types::Saga, ) -> Result, Error> { @@ -285,17 +299,30 @@ async fn load_saga_log( // Although we could read them all into memory simultaneously, this // risks blocking the DB for an unreasonable amount of time. Instead, // we paginate to avoid cutting off availability. - let mut last_id = None; let mut events = vec![]; - loop { - let pagparams = new_page_params(last_id.as_ref()); - let mut some_events = - datastore.saga_node_event_list_by_id(saga.id, &pagparams).await?; - if some_events.is_empty() { - break; - } - events.append(&mut some_events); - last_id = Some(events.last().as_ref().unwrap().saga_id.0); + let mut paginator = Paginator::new(SQL_BATCH_SIZE); + let conn = datastore.pool_connection_authorized(opctx).await?; + while let Some(p) = paginator.next() { + use db::schema::saga_node_event::dsl; + let batch = paginated_multicolumn( + dsl::saga_node_event, + (dsl::node_id, dsl::event_type), + &p.current_pagparams(), + ) + .filter(dsl::saga_id.eq(saga.id)) + .select(db::saga_types::SagaNodeEvent::as_select()) + .load_async(&*conn) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + .await?; + paginator = + p.found_batch(&batch, &|row| (row.node_id, row.event_type.clone())); + + let mut batch = batch + .into_iter() + .map(|event| steno::SagaNodeEvent::try_from(event)) + .collect::, Error>>()?; + + events.append(&mut batch); } Ok(events) } @@ -308,6 +335,8 @@ mod test { use nexus_test_utils::db::test_setup_database; use omicron_test_utils::dev; use once_cell::sync::Lazy; + use pretty_assertions::assert_eq; + use rand::seq::SliceRandom; use std::sync::atomic::{AtomicBool, AtomicU32, Ordering}; use steno::{ new_action_noop_undo, Action, ActionContext, ActionError, @@ -575,4 +604,202 @@ mod test { db.cleanup().await.unwrap(); logctx.cleanup_successful(); } + + #[tokio::test] + async fn test_list_unfinished_sagas() { + // Test setup + let logctx = dev::test_setup_log("test_list_unfinished_sagas"); + let log = logctx.log.new(o!()); + let (mut db, db_datastore) = new_db(&log).await; + let sec_id = db::SecId(uuid::Uuid::new_v4()); + let opctx = OpContext::for_tests( + log, + Arc::clone(&db_datastore) as Arc, + ); + + // Create a couple batches of sagas. + let new_running_db_saga = || { + let params = steno::SagaCreateParams { + id: steno::SagaId(Uuid::new_v4()), + name: steno::SagaName::new("test saga"), + dag: serde_json::value::Value::Null, + state: steno::SagaCachedState::Running, + }; + + db::model::saga_types::Saga::new(sec_id, params) + }; + let mut inserted_sagas = (0..SQL_BATCH_SIZE.get() * 2) + .map(|_| new_running_db_saga()) + .collect::>(); + + // Shuffle these sagas into a random order to check that the pagination + // order is working as intended on the read path, which we'll do later + // in this test. + inserted_sagas.shuffle(&mut rand::thread_rng()); + + // Insert the batches of unfinished sagas into the database + let conn = db_datastore + .pool_connection_unauthorized() + .await + .expect("Failed to access db connection"); + diesel::insert_into(db::schema::saga::dsl::saga) + .values(inserted_sagas.clone()) + .execute_async(&*conn) + .await + .expect("Failed to insert test setup data"); + + // List them, expect to see them all in order by ID. + let mut observed_sagas = + list_unfinished_sagas(&opctx, &db_datastore, &sec_id) + .await + .expect("Failed to list unfinished sagas"); + inserted_sagas.sort_by_key(|a| a.id); + + // Timestamps can change slightly when we insert them. + // + // Sanitize them to make input/output equality checks easier. + let sanitize_timestamps = |sagas: &mut Vec| { + for saga in sagas { + saga.time_created = chrono::DateTime::UNIX_EPOCH; + saga.adopt_time = chrono::DateTime::UNIX_EPOCH; + } + }; + sanitize_timestamps(&mut observed_sagas); + sanitize_timestamps(&mut inserted_sagas); + + assert_eq!( + inserted_sagas, observed_sagas, + "Observed sagas did not match inserted sagas" + ); + + // Test cleanup + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_list_unfinished_nodes() { + // Test setup + let logctx = dev::test_setup_log("test_list_unfinished_nodes"); + let log = logctx.log.new(o!()); + let (mut db, db_datastore) = new_db(&log).await; + let sec_id = db::SecId(uuid::Uuid::new_v4()); + let opctx = OpContext::for_tests( + log, + Arc::clone(&db_datastore) as Arc, + ); + let saga_id = steno::SagaId(Uuid::new_v4()); + + // Create a couple batches of saga events + let new_db_saga_nodes = + |node_id: u32, event_type: steno::SagaNodeEventType| { + let event = steno::SagaNodeEvent { + saga_id, + node_id: steno::SagaNodeId::from(node_id), + event_type, + }; + + db::model::saga_types::SagaNodeEvent::new(event, sec_id) + }; + let mut inserted_nodes = (0..SQL_BATCH_SIZE.get() * 2) + .flat_map(|i| { + // This isn't an exhaustive list of event types, but gives us a few + // options to pick from. Since this is a pagination key, it's + // important to include a variety here. + use steno::SagaNodeEventType::*; + [ + new_db_saga_nodes(i, Started), + new_db_saga_nodes(i, UndoStarted), + new_db_saga_nodes(i, UndoFinished), + ] + }) + .collect::>(); + + // Shuffle these nodes into a random order to check that the pagination + // order is working as intended on the read path, which we'll do later + // in this test. + inserted_nodes.shuffle(&mut rand::thread_rng()); + + // Insert them into the database + let conn = db_datastore + .pool_connection_unauthorized() + .await + .expect("Failed to access db connection"); + diesel::insert_into(db::schema::saga_node_event::dsl::saga_node_event) + .values(inserted_nodes.clone()) + .execute_async(&*conn) + .await + .expect("Failed to insert test setup data"); + + // List them, expect to see them all in order by ID. + // + // Note that we need to make up a saga to see this, but the + // part of it that actually matters is the ID. + let params = steno::SagaCreateParams { + id: saga_id, + name: steno::SagaName::new("test saga"), + dag: serde_json::value::Value::Null, + state: steno::SagaCachedState::Running, + }; + let saga = db::model::saga_types::Saga::new(sec_id, params); + let observed_nodes = load_saga_log(&opctx, &db_datastore, &saga) + .await + .expect("Failed to list unfinished nodes"); + inserted_nodes.sort_by_key(|a| (a.node_id, a.event_type.clone())); + + let inserted_nodes = inserted_nodes + .into_iter() + .map(|node| steno::SagaNodeEvent::try_from(node)) + .collect::, _>>() + .expect("Couldn't convert DB nodes to steno nodes"); + + // The steno::SagaNodeEvent type doesn't implement PartialEq, so we need to do this + // a little manually. + assert_eq!(inserted_nodes.len(), observed_nodes.len()); + for i in 0..inserted_nodes.len() { + assert_eq!(inserted_nodes[i].saga_id, observed_nodes[i].saga_id); + assert_eq!(inserted_nodes[i].node_id, observed_nodes[i].node_id); + assert_eq!( + inserted_nodes[i].event_type.label(), + observed_nodes[i].event_type.label() + ); + } + + // Test cleanup + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_list_no_unfinished_nodes() { + // Test setup + let logctx = dev::test_setup_log("test_list_no_unfinished_nodes"); + let log = logctx.log.new(o!()); + let (mut db, db_datastore) = new_db(&log).await; + let sec_id = db::SecId(uuid::Uuid::new_v4()); + let opctx = OpContext::for_tests( + log, + Arc::clone(&db_datastore) as Arc, + ); + let saga_id = steno::SagaId(Uuid::new_v4()); + + let params = steno::SagaCreateParams { + id: saga_id, + name: steno::SagaName::new("test saga"), + dag: serde_json::value::Value::Null, + state: steno::SagaCachedState::Running, + }; + let saga = db::model::saga_types::Saga::new(sec_id, params); + + // Test that this returns "no nodes" rather than throwing some "not + // found" error. + let observed_nodes = load_saga_log(&opctx, &db_datastore, &saga) + .await + .expect("Failed to list unfinished nodes"); + assert_eq!(observed_nodes.len(), 0); + + // Test cleanup + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } } diff --git a/nexus/db-queries/src/db/sec_store.rs b/nexus/db-queries/src/db/sec_store.rs index f8fd4ab86d..72de02ff54 100644 --- a/nexus/db-queries/src/db/sec_store.rs +++ b/nexus/db-queries/src/db/sec_store.rs @@ -70,8 +70,6 @@ impl steno::SecStore for CockroachDbSecStore { // This is an internal service query to CockroachDB. backoff::retry_policy_internal_service(), || { - // An interesting question is how to handle errors. - // // In general, there are some kinds of database errors that are // temporary/server errors (e.g. network failures), and some // that are permanent/client errors (e.g. conflict during @@ -85,10 +83,9 @@ impl steno::SecStore for CockroachDbSecStore { // errors that likely require operator intervention.) // // At a higher level, callers should plan for the fact that - // record_event could potentially loop forever. See - // https://github.com/oxidecomputer/omicron/issues/5406 and the - // note in `nexus/src/app/saga.rs`'s `execute_saga` for more - // details. + // record_event (and, so, saga execution) could potentially loop + // indefinitely while the datastore (or other dependent + // services) are down. self.datastore .saga_create_event(&our_event) .map_err(backoff::BackoffError::transient) diff --git a/nexus/db-queries/src/db/subquery.rs b/nexus/db-queries/src/db/subquery.rs deleted file mode 100644 index 2ccd8d3e14..0000000000 --- a/nexus/db-queries/src/db/subquery.rs +++ /dev/null @@ -1,135 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -//! Subquery-related traits which may be derived for DB structures. - -use diesel::pg::Pg; -use diesel::query_builder::AstPass; -use diesel::query_builder::Query; -use diesel::query_builder::QueryFragment; -use diesel::query_builder::QueryId; - -/// Represents a named subquery within a CTE. -/// -/// For an expression like: -/// -/// ```sql -/// WITH -/// foo as ..., -/// bar as ..., -/// SELECT * FROM bar; -/// ``` -/// -/// This trait represents one of the sub-query arms, such as "foo as ..." or -/// "bar as ...". -// This trait intentionally is agnostic to the SQL type of the subquery, -// meaning that it can be used by the [`CteBuilder`] within a [`Vec`]. -pub trait Subquery: QueryFragment + Send { - /// Returns the underlying query fragment. - /// - /// For ` as `, this refers to the "QUERY" portion - /// of SQL. - fn query(&self) -> &dyn QueryFragment; -} - -/// Trait which implies that the associated query may be used -/// as a query source. -/// -/// For example, given the subquery: -/// -/// ```sql -/// user_ids as (SELECT id FROM user) -/// ``` -/// -/// It should be possible to "SELECT" from `user_ids`. This trait -/// surfaces that underlying query source. -// TODO: Take a much closer look at "AliasSource". It doesn't solve -// the problem of grabbing the query fragment for you, but it might -// help for referencing the "origin" object (table in upstream, but -// plausibly a subquery too). -pub trait AsQuerySource { - type QuerySource; - fn query_source(&self) -> Self::QuerySource; -} - -/// Describes the requirements to be subquery within a CTE: -/// - (Query) It must be a complete SQL query with a specific return type -/// - (QueryFragment) It must be capable of emitting a SQL string -// TODO: In the future, we may force this subquery to have named columns. -pub trait CteQuery: Query + QueryFragment + Send {} - -impl CteQuery for T where T: Query + QueryFragment + Send {} - -/// A thin wrapper around a [`Subquery`]. -/// -/// Used to avoid orphan rules while creating blanket implementations. -pub struct CteSubquery(Box); - -impl QueryId for CteSubquery { - type QueryId = (); - const HAS_STATIC_QUERY_ID: bool = false; -} - -impl QueryFragment for CteSubquery { - fn walk_ast<'a>( - &'a self, - mut out: AstPass<'_, 'a, Pg>, - ) -> diesel::QueryResult<()> { - out.unsafe_to_cache_prepared(); - - self.0.walk_ast(out.reborrow())?; - out.push_sql(" AS ("); - self.0.query().walk_ast(out.reborrow())?; - out.push_sql(")"); - Ok(()) - } -} - -pub struct CteBuilder { - subqueries: Vec, -} - -impl CteBuilder { - pub fn new() -> Self { - Self { subqueries: vec![] } - } - - pub fn add_subquery(mut self, subquery: Q) -> Self { - self.subqueries.push(CteSubquery(Box::new(subquery))); - self - } - - // TODO: It would be nice if this could be typed? - // It's not necessarily a Subquery, but it's probably a "Query" object - // with a particular SQL type. - pub fn build(self, statement: Box + Send>) -> Cte { - Cte { subqueries: self.subqueries, statement } - } -} - -pub struct Cte { - subqueries: Vec, - statement: Box + Send>, -} - -impl QueryFragment for Cte { - fn walk_ast<'a>( - &'a self, - mut out: AstPass<'_, 'a, Pg>, - ) -> diesel::QueryResult<()> { - out.unsafe_to_cache_prepared(); - - out.push_sql("WITH "); - for (pos, query) in self.subqueries.iter().enumerate() { - query.walk_ast(out.reborrow())?; - if pos == self.subqueries.len() - 1 { - out.push_sql(" "); - } else { - out.push_sql(", "); - } - } - self.statement.walk_ast(out.reborrow())?; - Ok(()) - } -} diff --git a/nexus/examples/config.toml b/nexus/examples/config.toml index c282232ef8..407f5479d5 100644 --- a/nexus/examples/config.toml +++ b/nexus/examples/config.toml @@ -5,7 +5,7 @@ [console] # Directory for static assets. Absolute path or relative to CWD. static_dir = "out/console-assets" -session_idle_timeout_minutes = 480 # 6 hours +session_idle_timeout_minutes = 480 # 8 hours session_absolute_timeout_minutes = 1440 # 24 hours # List of authentication schemes to support. @@ -114,6 +114,7 @@ blueprints.period_secs_collect_crdb_node_ids = 180 sync_service_zone_nat.period_secs = 30 switch_port_settings_manager.period_secs = 30 region_replacement.period_secs = 30 +region_replacement_driver.period_secs = 10 # How frequently to query the status of active instances. instance_watcher.period_secs = 30 service_firewall_propagation.period_secs = 300 diff --git a/nexus/reconfigurator/execution/src/external_networking.rs b/nexus/reconfigurator/execution/src/external_networking.rs index 13cf601135..3ac1de96d5 100644 --- a/nexus/reconfigurator/execution/src/external_networking.rs +++ b/nexus/reconfigurator/execution/src/external_networking.rs @@ -499,6 +499,7 @@ mod tests { vni: Vni::SERVICES_VNI, primary: true, slot: 0, + transit_ips: vec![], }; let dns_id = OmicronZoneUuid::new_v4(); @@ -524,6 +525,7 @@ mod tests { vni: Vni::SERVICES_VNI, primary: true, slot: 0, + transit_ips: vec![], }; // Boundary NTP: @@ -552,6 +554,7 @@ mod tests { vni: Vni::SERVICES_VNI, primary: true, slot: 0, + transit_ips: vec![], }; Self { diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index 9d7c542eda..1609a6c0cd 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -828,6 +828,7 @@ impl<'a> BlueprintBuilder<'a> { vni: Vni::SERVICES_VNI, primary: true, slot: 0, + transit_ips: vec![], } }; @@ -1164,12 +1165,13 @@ impl<'a> BlueprintZonesBuilder<'a> { /// Helper for working with sets of disks on each sled /// -/// Tracking the set of disks is slightly non-trivial because we need to bump -/// the per-sled generation number iff the disks are changed. So we need to -/// keep track of whether we've changed the disks relative to the parent -/// blueprint. We do this by keeping a copy of any [`BlueprintDisksConfig`] -/// that we've changed and a _reference_ to the parent blueprint's disks. This -/// struct makes it easy for callers iterate over the right set of disks. +/// Tracking the set of disks is slightly non-trivial because we need to +/// bump the per-sled generation number iff the disks are changed. So +/// we need to keep track of whether we've changed the disks relative +/// to the parent blueprint. We do this by keeping a copy of any +/// [`BlueprintPhysicalDisksConfig`] that we've changed and a _reference_ to +/// the parent blueprint's disks. This struct makes it easy for callers iterate +/// over the right set of disks. struct BlueprintDisksBuilder<'a> { changed_disks: BTreeMap, parent_disks: &'a BTreeMap, diff --git a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt index 4d366f849c..837cc56553 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt @@ -206,6 +206,7 @@ ERRORS: ), primary: true, slot: 0, + transit_ips: [], }, external_tls: false, external_dns_servers: [], diff --git a/nexus/src/app/background/common.rs b/nexus/src/app/background/driver.rs similarity index 76% rename from nexus/src/app/background/common.rs rename to nexus/src/app/background/driver.rs index da595dc4e1..e620f5d7bc 100644 --- a/nexus/src/app/background/common.rs +++ b/nexus/src/app/background/driver.rs @@ -2,132 +2,10 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -//! # Nexus Background Tasks -//! -//! A **background task** in Nexus is any operation that can be activated both -//! periodically and by an explicit signal. This is aimed at RFD 373-style -//! "reliable persistent workflows", also called "reconcilers" or "controllers". -//! These are a kind of automation that examines some _current_ state, compares -//! it to some _intended_ state, and potentially takes action to try to bring -//! the current state in sync with the intended state. Our canonical example is -//! that we want to have Nexus monitor the intended DNS configuration. When it -//! changes, we want to propagate the new configuration to all DNS servers. We -//! implement this with three different background tasks: -//! -//! 1. `DnsConfigWatcher` reads the DNS configuration from the database, stores -//! it in memory, and makes it available via a `tokio::sync::watch` channel. -//! 2. `DnsServersWatcher` reads the list of DNS servers from the database, -//! stores it in memory, and makes it available via a `tokio::sync::watch` -//! channel. -//! 3. `DnsPropagator` uses the the watch channels provided by the other two -//! background tasks to notice when either the DNS configuration or the list -//! of DNS servers has changed. It uses the latest values to make a request -//! to each server to update its configuration. -//! -//! When Nexus changes the DNS configuration, it will update the database with -//! the new configuration and then explicitly activate the `DnsConfigWatcher`. -//! When it reads the new config, it will send it to its watch channel, and that -//! will activate the `DnsPropagator`. If any of this fails, or if Nexus -//! crashes at any point, then the periodic activation of every background task -//! will eventually cause the latest config to be propagated to all of the -//! current servers. -//! -//! The background task framework here is pretty minimal: essentially what it -//! gives you is that you just write an idempotent function that you want to -//! happen periodically or on-demand, wrap it in an impl of `BackgroundTask`, -//! register that with the `Driver`, and you're done. The framework will take -//! care of: -//! -//! * providing a way for Nexus at-large to activate your task -//! * activating your task periodically -//! * ensuring that the task is activated only once at a time in this Nexus -//! (but note that it may always be running concurrently in other Nexus -//! instances) -//! * providing basic visibility into whether the task is running, when the task -//! last ran, etc. -//! -//! We may well want to extend the framework as we build more tasks in general -//! and reconcilers specifically. But we should be mindful not to create -//! footguns for ourselves! See "Design notes" below. -//! -//! ## Notes for background task implementors -//! -//! Background tasks are not necessarily just for reconcilers. That's just the -//! design center. The first two DNS background tasks above aren't reconcilers -//! in any non-trivial sense. -//! -//! Background task activations do not accept input, by design. See "Design -//! notes" below. -//! -//! Generally, you probably don't want to have your background task do retries. -//! If things fail, you rely on the periodic reactivation to try again. -//! -//! ## Design notes -//! -//! The underlying design for RFD 373-style reconcilers is inspired by a few -//! related principles: -//! -//! * the principle in distributed systems of having exactly one code path to -//! achieve a thing, and then always using that path to do that thing (as -//! opposed to having separate paths for, say, the happy path vs. failover, -//! and having one of those paths rarely used) -//! * the [constant-work pattern][1], which basically suggests that a system can -//! be more robust and scalable if it's constructed in a way that always does -//! the same amount of work. Imagine if we made requests to the DNS servers -//! to incrementally update their config every time the DNS data changed. -//! This system does more work as users make more requests. During overloads, -//! things can fall over. Compare with a system whose frontend merely updates -//! the DNS configuration that _should_ exist and whose backend periodically -//! scans the complete intended state and then sets its own state accordingly. -//! The backend does the same amount of work no matter how many requests were -//! made, making it more resistant to overload. A big downside of this -//! approach is increased latency from the user making a request to seeing it -//! applied. This can be mitigated (sacrificing some, but not all, of the -//! "constant work" property) by triggering a backend scan operation when user -//! requests complete. -//! * the design pattern in distributed systems of keeping two copies of data in -//! sync using both event notifications (like a changelog) _and_ periodic full -//! scans. The hope is that a full scan never finds a change that wasn't -//! correctly sync'd, but incorporating an occasional full scan into the -//! design ensures that such bugs are found and their impact repaired -//! automatically. -//! -//! [1]: https://aws.amazon.com/builders-library/reliability-and-constant-work/ -//! -//! Combining these, we get a design pattern for a "reconciler" where: -//! -//! * The reconciler is activated by explicit request (when we know it has work -//! to do) _and_ periodically (to deal with all manner of transient failures) -//! * The reconciler's activity is idempotent: given the same underlying state -//! (e.g., database state), it always attempts to do the same thing. -//! * Each activation of the reconciler accepts no input. That is, even when we -//! think we know what changed, we do not use that information. This ensures -//! that the reconciler really is idempotent and its actions are based solely -//! on the state that it's watching. Put differently: having reconcilers -//! accept an explicit hint about what changed (and then doing something -//! differently based on that) bifurcates the code: there's the common case -//! where that hint is available and the rarely-exercised case when it's not -//! (e.g., because Nexus crashed and it's the subsequent periodic activation -//! that's propagating this change). This is what we're trying to avoid. -//! * We do allow reconcilers to be triggered by a `tokio::sync::watch` channel -//! -- but again, not using the _data_ from that channel. There are two big -//! advantages here: (1) reduced latency from when a change is made to when -//! the reconciler applies it, and (2) (arguably another way to say the same -//! thing) we can space out the periodic activations much further, knowing -//! that most of the time we're not increasing latency by doing this. This -//! compromises the "constant-work" pattern a bit: we might wind up running -//! the reconciler more often during busy times than during idle times, and we -//! could find that overloads something. However, the _operation_ of the -//! reconciler can still be constant work, and there's no more than that -//! amount of work going on at any given time. -//! -//! `watch` channels are a convenient primitive here because they only store -//! one value. With a little care, we can ensure that the writer never blocks -//! and the readers can all see the latest value. (By design, reconcilers -//! generally only care about the latest state of something, not any -//! intermediate states.) We don't have to worry about an unbounded queue, or -//! handling a full queue, or other forms of backpressure. +//! Manages execution of background tasks +use super::BackgroundTask; +use super::TaskHandle; use assert_matches::assert_matches; use chrono::Utc; use futures::future::BoxFuture; @@ -149,16 +27,6 @@ use tokio::sync::watch; use tokio::sync::Notify; use tokio::time::MissedTickBehavior; -/// An operation activated both periodically and by an explicit signal -/// -/// See module-level documentation for details. -pub trait BackgroundTask: Send + Sync { - fn activate<'a>( - &'a mut self, - opctx: &'a OpContext, - ) -> BoxFuture<'a, serde_json::Value>; -} - /// Drives the execution of background tasks /// /// Nexus has only one Driver. All background tasks are registered with the @@ -170,21 +38,6 @@ pub struct Driver { tasks: BTreeMap, } -/// Identifies a background task -/// -/// This is returned by [`Driver::register()`] to identify the corresponding -/// background task. It's then accepted by functions like -/// [`Driver::activate()`] and [`Driver::task_status()`] to identify the task. -#[derive(Clone, Debug, Ord, PartialOrd, PartialEq, Eq)] -pub struct TaskHandle(String); - -impl TaskHandle { - /// Returns the unique name of this background task - pub fn name(&self) -> &str { - &self.0 - } -} - /// Driver-side state of a background task struct Task { /// what this task does (for developers) @@ -303,7 +156,7 @@ impl Driver { /// /// If the task is currently running, it will be activated again when it /// finishes. - pub fn activate(&self, task: &TaskHandle) { + pub(super) fn activate(&self, task: &TaskHandle) { self.task_required(task).notify.notify_one(); } @@ -466,7 +319,6 @@ impl GenericWatcher for watch::Receiver { mod test { use super::BackgroundTask; use super::Driver; - use crate::app::background::common::ActivationReason; use crate::app::sagas::SagaRequest; use assert_matches::assert_matches; use chrono::Utc; @@ -474,6 +326,7 @@ mod test { use futures::FutureExt; use nexus_db_queries::context::OpContext; use nexus_test_utils_macros::nexus_test; + use nexus_types::internal_api::views::ActivationReason; use std::time::Duration; use std::time::Instant; use tokio::sync::mpsc; diff --git a/nexus/src/app/background/init.rs b/nexus/src/app/background/init.rs index f78cb69d76..c5bef1f517 100644 --- a/nexus/src/app/background/init.rs +++ b/nexus/src/app/background/init.rs @@ -2,29 +2,32 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -//! Background task initialization - -use super::abandoned_vmm_reaper; -use super::bfd; -use super::blueprint_execution; -use super::blueprint_load; -use super::common; -use super::crdb_node_id_collector; -use super::dns_config; -use super::dns_propagation; -use super::dns_servers; -use super::external_endpoints; -use super::instance_watcher; -use super::inventory_collection; -use super::metrics_producer_gc; -use super::nat_cleanup; -use super::phantom_disks; -use super::physical_disk_adoption; -use super::region_replacement; -use super::service_firewall_rules; -use super::sync_service_zone_nat::ServiceZoneNatTracker; -use super::sync_switch_configuration::SwitchPortSettingsManager; -use super::v2p_mappings::V2PManager; +//! Specific background task initialization + +use super::tasks::abandoned_vmm_reaper; +use super::tasks::bfd; +use super::tasks::blueprint_execution; +use super::tasks::blueprint_load; +use super::tasks::crdb_node_id_collector; +use super::tasks::dns_config; +use super::tasks::dns_propagation; +use super::tasks::dns_servers; +use super::tasks::external_endpoints; +use super::tasks::instance_watcher; +use super::tasks::inventory_collection; +use super::tasks::metrics_producer_gc; +use super::tasks::nat_cleanup; +use super::tasks::phantom_disks; +use super::tasks::physical_disk_adoption; +use super::tasks::region_replacement; +use super::tasks::region_replacement_driver; +use super::tasks::service_firewall_rules; +use super::tasks::sync_service_zone_nat::ServiceZoneNatTracker; +use super::tasks::sync_switch_configuration::SwitchPortSettingsManager; +use super::tasks::v2p_mappings::V2PManager; +use super::tasks::vpc_routes; +use super::Driver; +use super::TaskHandle; use crate::app::oximeter::PRODUCER_LEASE_DURATION; use crate::app::sagas::SagaRequest; use nexus_config::BackgroundTaskConfig; @@ -46,73 +49,79 @@ use uuid::Uuid; pub struct BackgroundTasks { /// interface for working with background tasks (activation, checking /// status, etc.) - pub driver: common::Driver, + pub driver: Driver, /// task handle for the internal DNS config background task - pub task_internal_dns_config: common::TaskHandle, + pub task_internal_dns_config: TaskHandle, /// task handle for the internal DNS servers background task - pub task_internal_dns_servers: common::TaskHandle, + pub task_internal_dns_servers: TaskHandle, /// task handle for the external DNS config background task - pub task_external_dns_config: common::TaskHandle, + pub task_external_dns_config: TaskHandle, /// task handle for the external DNS servers background task - pub task_external_dns_servers: common::TaskHandle, + pub task_external_dns_servers: TaskHandle, /// task handle for pruning metrics producers with expired leases - pub task_metrics_producer_gc: common::TaskHandle, + pub task_metrics_producer_gc: TaskHandle, /// task handle for the task that keeps track of external endpoints - pub task_external_endpoints: common::TaskHandle, + pub task_external_endpoints: TaskHandle, /// external endpoints read by the background task pub external_endpoints: tokio::sync::watch::Receiver< Option, >, /// task handle for the ipv4 nat entry garbage collector - pub nat_cleanup: common::TaskHandle, + pub task_nat_cleanup: TaskHandle, /// task handle for the switch bfd manager - pub bfd_manager: common::TaskHandle, + pub task_bfd_manager: TaskHandle, /// task handle for the task that collects inventory - pub task_inventory_collection: common::TaskHandle, + pub task_inventory_collection: TaskHandle, /// task handle for the task that collects inventory - pub task_physical_disk_adoption: common::TaskHandle, + pub task_physical_disk_adoption: TaskHandle, /// task handle for the task that detects phantom disks - pub task_phantom_disks: common::TaskHandle, + pub task_phantom_disks: TaskHandle, /// task handle for blueprint target loader - pub task_blueprint_loader: common::TaskHandle, + pub task_blueprint_loader: TaskHandle, /// task handle for blueprint execution background task - pub task_blueprint_executor: common::TaskHandle, + pub task_blueprint_executor: TaskHandle, /// task handle for collecting CockroachDB node IDs - pub task_crdb_node_id_collector: common::TaskHandle, + pub task_crdb_node_id_collector: TaskHandle, /// task handle for the service zone nat tracker - pub task_service_zone_nat_tracker: common::TaskHandle, + pub task_service_zone_nat_tracker: TaskHandle, /// task handle for the switch port settings manager - pub task_switch_port_settings_manager: common::TaskHandle, + pub task_switch_port_settings_manager: TaskHandle, /// task handle for the opte v2p manager - pub task_v2p_manager: common::TaskHandle, + pub task_v2p_manager: TaskHandle, /// task handle for the task that detects if regions need replacement and /// begins the process - pub task_region_replacement: common::TaskHandle, + pub task_region_replacement: TaskHandle, + + /// task handle for the task that drives region replacements forward + pub task_region_replacement_driver: TaskHandle, /// task handle for the task that polls sled agents for instance states. - pub task_instance_watcher: common::TaskHandle, + pub task_instance_watcher: TaskHandle, /// task handle for propagation of VPC firewall rules for Omicron services /// with external network connectivity, - pub task_service_firewall_propagation: common::TaskHandle, + pub task_service_firewall_propagation: TaskHandle, /// task handle for deletion of database records for VMMs abandoned by their /// instances. - pub task_abandoned_vmm_reaper: common::TaskHandle, + pub task_abandoned_vmm_reaper: TaskHandle, + + /// task handle for propagation of VPC router rules to all OPTE ports + pub task_vpc_route_manager: TaskHandle, } impl BackgroundTasks { @@ -132,7 +141,7 @@ impl BackgroundTasks { ), producer_registry: &ProducerRegistry, ) -> BackgroundTasks { - let mut driver = common::Driver::new(); + let mut driver = Driver::new(); let (task_internal_dns_config, task_internal_dns_servers) = init_dns( &mut driver, @@ -160,7 +169,7 @@ impl BackgroundTasks { String::from("metrics_producer_gc"), String::from( "unregisters Oximeter metrics producers that have not \ - renewed their lease", + renewed their lease", ), config.metrics_producer_gc.period_secs, Box::new(gc), @@ -179,8 +188,8 @@ impl BackgroundTasks { String::from("external_endpoints"), String::from( "reads config for silos and TLS certificates to determine \ - the right set of HTTP endpoints, their HTTP server names, \ - and which TLS certificates to use on each one", + the right set of HTTP endpoints, their HTTP server \ + names, and which TLS certificates to use on each one", ), config.external_endpoints.period_secs, Box::new(watcher), @@ -190,29 +199,29 @@ impl BackgroundTasks { (task, watcher_channel) }; - let nat_cleanup = { + let task_nat_cleanup = { driver.register( "nat_v4_garbage_collector".to_string(), String::from( - "prunes soft-deleted IPV4 NAT entries from ipv4_nat_entry table \ - based on a predetermined retention policy", + "prunes soft-deleted IPV4 NAT entries from ipv4_nat_entry \ + table based on a predetermined retention policy", ), config.nat_cleanup.period_secs, Box::new(nat_cleanup::Ipv4NatGarbageCollector::new( datastore.clone(), - resolver.clone() + resolver.clone(), )), opctx.child(BTreeMap::new()), vec![], ) }; - let bfd_manager = { + let task_bfd_manager = { driver.register( "bfd_manager".to_string(), String::from( "Manages bidirectional fowarding detection (BFD) \ - configuration on rack switches", + configuration on rack switches", ), config.bfd_manager.period_secs, Box::new(bfd::BfdManager::new( @@ -306,7 +315,7 @@ impl BackgroundTasks { String::from("inventory_collection"), String::from( "collects hardware and software inventory data from the \ - whole system", + whole system", ), config.inventory.period_secs, Box::new(collector), @@ -338,7 +347,8 @@ impl BackgroundTasks { driver.register( "service_zone_nat_tracker".to_string(), String::from( - "ensures service zone nat records are recorded in NAT RPW table", + "ensures service zone nat records are recorded in NAT RPW \ + table", ), config.sync_service_zone_nat.period_secs, Box::new(ServiceZoneNatTracker::new( @@ -385,7 +395,10 @@ impl BackgroundTasks { let task = driver.register( String::from("region_replacement"), - String::from("detects if a region requires replacing and begins the process"), + String::from( + "detects if a region requires replacing and begins the \ + process", + ), config.region_replacement.period_secs, Box::new(detector), opctx.child(BTreeMap::new()), @@ -395,6 +408,26 @@ impl BackgroundTasks { task }; + // Background task: drive region replacements forward to completion + let task_region_replacement_driver = { + let detector = + region_replacement_driver::RegionReplacementDriver::new( + datastore.clone(), + saga_request.clone(), + ); + + let task = driver.register( + String::from("region_replacement_driver"), + String::from("drive region replacements forward to completion"), + config.region_replacement_driver.period_secs, + Box::new(detector), + opctx.child(BTreeMap::new()), + vec![], + ); + + task + }; + let task_instance_watcher = { let watcher = instance_watcher::InstanceWatcher::new( datastore.clone(), @@ -412,12 +445,13 @@ impl BackgroundTasks { vec![], ) }; + // Background task: service firewall rule propagation let task_service_firewall_propagation = driver.register( String::from("service_firewall_rule_propagation"), String::from( - "propagates VPC firewall rules for Omicron \ - services with external network connectivity", + "propagates VPC firewall rules for Omicron services with \ + external network connectivity", ), config.service_firewall_propagation.period_secs, Box::new(service_firewall_rules::ServiceRulePropagator::new( @@ -427,19 +461,31 @@ impl BackgroundTasks { vec![], ); + // Background task: OPTE port route propagation + let task_vpc_route_manager = { + let watcher = vpc_routes::VpcRouteManager::new(datastore.clone()); + driver.register( + "vpc_route_manager".to_string(), + "propagates updated VPC routes to all OPTE ports".into(), + config.switch_port_settings_manager.period_secs, + Box::new(watcher), + opctx.child(BTreeMap::new()), + vec![], + ) + }; + // Background task: abandoned VMM reaping let task_abandoned_vmm_reaper = driver.register( - String::from("abandoned_vmm_reaper"), - String::from( - "deletes sled reservations for VMMs that have been abandoned by their instances", - ), - config.abandoned_vmm_reaper.period_secs, - Box::new(abandoned_vmm_reaper::AbandonedVmmReaper::new( - datastore, - )), - opctx.child(BTreeMap::new()), - vec![], - ); + String::from("abandoned_vmm_reaper"), + String::from( + "deletes sled reservations for VMMs that have been abandoned \ + by their instances", + ), + config.abandoned_vmm_reaper.period_secs, + Box::new(abandoned_vmm_reaper::AbandonedVmmReaper::new(datastore)), + opctx.child(BTreeMap::new()), + vec![], + ); BackgroundTasks { driver, @@ -450,8 +496,8 @@ impl BackgroundTasks { task_metrics_producer_gc, task_external_endpoints, external_endpoints, - nat_cleanup, - bfd_manager, + task_nat_cleanup, + task_bfd_manager, task_inventory_collection, task_physical_disk_adoption, task_phantom_disks, @@ -462,25 +508,31 @@ impl BackgroundTasks { task_switch_port_settings_manager, task_v2p_manager, task_region_replacement, + task_region_replacement_driver, task_instance_watcher, task_service_firewall_propagation, task_abandoned_vmm_reaper, + task_vpc_route_manager, } } - pub fn activate(&self, task: &common::TaskHandle) { + /// Activate the specified background task + /// + /// If the task is currently running, it will be activated again when it + /// finishes. + pub fn activate(&self, task: &TaskHandle) { self.driver.activate(task); } } fn init_dns( - driver: &mut common::Driver, + driver: &mut Driver, opctx: &OpContext, datastore: Arc, dns_group: DnsGroup, resolver: internal_dns::resolver::Resolver, config: &DnsTasksConfig, -) -> (common::TaskHandle, common::TaskHandle) { +) -> (TaskHandle, TaskHandle) { let dns_group_name = dns_group.to_string(); let metadata = BTreeMap::from([("dns_group".to_string(), dns_group_name)]); @@ -524,8 +576,8 @@ fn init_dns( format!("dns_propagation_{}", dns_group), format!( "propagates latest {} DNS configuration (from {:?} background \ - task) to the latest list of DNS servers (from {:?} background \ - task)", + task) to the latest list of DNS servers (from {:?} background \ + task)", dns_group, task_name_config, task_name_servers, ), config.period_secs_propagation, @@ -607,7 +659,10 @@ pub mod test { }; match record.get(0) { Some(dns_service_client::types::DnsRecord::Srv(srv)) => srv, - record => panic!("expected a SRV record for {internal_dns_srv_name}, found {record:?}"), + record => panic!( + "expected a SRV record for {internal_dns_srv_name}, found \ + {record:?}" + ), } }; @@ -754,7 +809,7 @@ pub mod test { ) { println!( "waiting for propagation of generation {generation} to {label} \ - DNS server ({addr})", + DNS server ({addr})", ); let client = dns_service_client::Client::new( @@ -785,13 +840,13 @@ pub mod test { .await; if let Err(err) = result { panic!( - "DNS generation {generation} not propagated to \ - {label} DNS server ({addr}) within {poll_max:?}: {err}" + "DNS generation {generation} not propagated to {label} DNS \ + server ({addr}) within {poll_max:?}: {err}" ); } else { println!( - "DNS generation {generation} propagated to {label} \ - DNS server ({addr}) successfully." + "DNS generation {generation} propagated to {label} DNS server \ + ({addr}) successfully." ); } } diff --git a/nexus/src/app/background/mod.rs b/nexus/src/app/background/mod.rs index 7d1fc43d69..40716aa036 100644 --- a/nexus/src/app/background/mod.rs +++ b/nexus/src/app/background/mod.rs @@ -2,31 +2,164 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -//! Background tasks +//! # Nexus Background Tasks +//! +//! A **background task** in Nexus is any operation that can be activated both +//! periodically and by an explicit signal. This is aimed at RFD 373-style +//! "reliable persistent workflows", also called "reconcilers" or "controllers". +//! These are a kind of automation that examines some _current_ state, compares +//! it to some _intended_ state, and potentially takes action to try to bring +//! the current state in sync with the intended state. Our canonical example is +//! that we want to have Nexus monitor the intended DNS configuration. When it +//! changes, we want to propagate the new configuration to all DNS servers. We +//! implement this with three different background tasks: +//! +//! 1. `DnsConfigWatcher` reads the DNS configuration from the database, stores +//! it in memory, and makes it available via a `tokio::sync::watch` channel. +//! 2. `DnsServersWatcher` reads the list of DNS servers from the database, +//! stores it in memory, and makes it available via a `tokio::sync::watch` +//! channel. +//! 3. `DnsPropagator` uses the the watch channels provided by the other two +//! background tasks to notice when either the DNS configuration or the list +//! of DNS servers has changed. It uses the latest values to make a request +//! to each server to update its configuration. +//! +//! When Nexus changes the DNS configuration, it will update the database with +//! the new configuration and then explicitly activate the `DnsConfigWatcher`. +//! When it reads the new config, it will send it to its watch channel, and that +//! will activate the `DnsPropagator`. If any of this fails, or if Nexus +//! crashes at any point, then the periodic activation of every background task +//! will eventually cause the latest config to be propagated to all of the +//! current servers. +//! +//! The background task framework here is pretty minimal: essentially what it +//! gives you is that you just write an idempotent function that you want to +//! happen periodically or on-demand, wrap it in an impl of `BackgroundTask`, +//! register that with the `Driver`, and you're done. The framework will take +//! care of: +//! +//! * providing a way for Nexus at-large to activate your task +//! * activating your task periodically +//! * ensuring that the task is activated only once at a time in this Nexus +//! (but note that it may always be running concurrently in other Nexus +//! instances) +//! * providing basic visibility into whether the task is running, when the task +//! last ran, etc. +//! +//! We may well want to extend the framework as we build more tasks in general +//! and reconcilers specifically. But we should be mindful not to create +//! footguns for ourselves! See "Design notes" below. +//! +//! ## Notes for background task implementors +//! +//! Background tasks are not necessarily just for reconcilers. That's just the +//! design center. The first two DNS background tasks above aren't reconcilers +//! in any non-trivial sense. +//! +//! Background task activations do not accept input, by design. See "Design +//! notes" below. +//! +//! Generally, you probably don't want to have your background task do retries. +//! If things fail, you rely on the periodic reactivation to try again. +//! +//! ## Design notes +//! +//! The underlying design for RFD 373-style reconcilers is inspired by a few +//! related principles: +//! +//! * the principle in distributed systems of having exactly one code path to +//! achieve a thing, and then always using that path to do that thing (as +//! opposed to having separate paths for, say, the happy path vs. failover, +//! and having one of those paths rarely used) +//! * the [constant-work pattern][1], which basically suggests that a system can +//! be more robust and scalable if it's constructed in a way that always does +//! the same amount of work. Imagine if we made requests to the DNS servers +//! to incrementally update their config every time the DNS data changed. +//! This system does more work as users make more requests. During overloads, +//! things can fall over. Compare with a system whose frontend merely updates +//! the DNS configuration that _should_ exist and whose backend periodically +//! scans the complete intended state and then sets its own state accordingly. +//! The backend does the same amount of work no matter how many requests were +//! made, making it more resistant to overload. A big downside of this +//! approach is increased latency from the user making a request to seeing it +//! applied. This can be mitigated (sacrificing some, but not all, of the +//! "constant work" property) by triggering a backend scan operation when user +//! requests complete. +//! * the design pattern in distributed systems of keeping two copies of data in +//! sync using both event notifications (like a changelog) _and_ periodic full +//! scans. The hope is that a full scan never finds a change that wasn't +//! correctly sync'd, but incorporating an occasional full scan into the +//! design ensures that such bugs are found and their impact repaired +//! automatically. +//! +//! [1]: https://aws.amazon.com/builders-library/reliability-and-constant-work/ +//! +//! Combining these, we get a design pattern for a "reconciler" where: +//! +//! * The reconciler is activated by explicit request (when we know it has work +//! to do) _and_ periodically (to deal with all manner of transient failures) +//! * The reconciler's activity is idempotent: given the same underlying state +//! (e.g., database state), it always attempts to do the same thing. +//! * Each activation of the reconciler accepts no input. That is, even when we +//! think we know what changed, we do not use that information. This ensures +//! that the reconciler really is idempotent and its actions are based solely +//! on the state that it's watching. Put differently: having reconcilers +//! accept an explicit hint about what changed (and then doing something +//! differently based on that) bifurcates the code: there's the common case +//! where that hint is available and the rarely-exercised case when it's not +//! (e.g., because Nexus crashed and it's the subsequent periodic activation +//! that's propagating this change). This is what we're trying to avoid. +//! * We do allow reconcilers to be triggered by a `tokio::sync::watch` channel +//! -- but again, not using the _data_ from that channel. There are two big +//! advantages here: (1) reduced latency from when a change is made to when +//! the reconciler applies it, and (2) (arguably another way to say the same +//! thing) we can space out the periodic activations much further, knowing +//! that most of the time we're not increasing latency by doing this. This +//! compromises the "constant-work" pattern a bit: we might wind up running +//! the reconciler more often during busy times than during idle times, and we +//! could find that overloads something. However, the _operation_ of the +//! reconciler can still be constant work, and there's no more than that +//! amount of work going on at any given time. +//! +//! `watch` channels are a convenient primitive here because they only store +//! one value. With a little care, we can ensure that the writer never blocks +//! and the readers can all see the latest value. (By design, reconcilers +//! generally only care about the latest state of something, not any +//! intermediate states.) We don't have to worry about an unbounded queue, or +//! handling a full queue, or other forms of backpressure. -mod abandoned_vmm_reaper; -mod bfd; -mod blueprint_execution; -mod blueprint_load; -mod common; -mod crdb_node_id_collector; -mod dns_config; -mod dns_propagation; -mod dns_servers; -mod external_endpoints; +mod driver; mod init; -mod instance_watcher; -mod inventory_collection; -mod metrics_producer_gc; -mod nat_cleanup; -mod networking; -mod phantom_disks; -mod physical_disk_adoption; -mod region_replacement; -mod service_firewall_rules; mod status; -mod sync_service_zone_nat; -mod sync_switch_configuration; -mod v2p_mappings; +mod tasks; +pub use driver::Driver; pub use init::BackgroundTasks; + +use futures::future::BoxFuture; +use nexus_auth::context::OpContext; + +/// An operation activated both periodically and by an explicit signal +/// +/// See module-level documentation for details. +pub trait BackgroundTask: Send + Sync { + fn activate<'a>( + &'a mut self, + opctx: &'a OpContext, + ) -> BoxFuture<'a, serde_json::Value>; +} + +/// Identifies a background task +/// +/// This is returned by [`Driver::register()`] to identify the corresponding +/// background task. It's then accepted by functions like +/// [`Driver::activate()`] and [`Driver::task_status()`] to identify the task. +#[derive(Clone, Debug, Ord, PartialOrd, PartialEq, Eq)] +pub struct TaskHandle(String); + +impl TaskHandle { + /// Returns the unique name of this background task + pub fn name(&self) -> &str { + &self.0 + } +} diff --git a/nexus/src/app/background/abandoned_vmm_reaper.rs b/nexus/src/app/background/tasks/abandoned_vmm_reaper.rs similarity index 99% rename from nexus/src/app/background/abandoned_vmm_reaper.rs rename to nexus/src/app/background/tasks/abandoned_vmm_reaper.rs index 3883185d9f..a81080ec75 100644 --- a/nexus/src/app/background/abandoned_vmm_reaper.rs +++ b/nexus/src/app/background/tasks/abandoned_vmm_reaper.rs @@ -31,7 +31,7 @@ //! is handled elsewhere, by `notify_instance_updated` and (eventually) the //! `instance-update` saga. -use super::common::BackgroundTask; +use crate::app::background::BackgroundTask; use anyhow::Context; use futures::future::BoxFuture; use futures::FutureExt; @@ -135,7 +135,8 @@ impl AbandonedVmmReaper { results.error_count += 1; *last_err = Err(e).with_context(|| { format!( - "failed to delete sled reservation for VMM {vmm_id}" + "failed to delete sled reservation for VMM \ + {vmm_id}" ) }); } diff --git a/nexus/src/app/background/bfd.rs b/nexus/src/app/background/tasks/bfd.rs similarity index 98% rename from nexus/src/app/background/bfd.rs rename to nexus/src/app/background/tasks/bfd.rs index 39b3c8f661..67b15ee3d3 100644 --- a/nexus/src/app/background/bfd.rs +++ b/nexus/src/app/background/tasks/bfd.rs @@ -6,10 +6,10 @@ //! (BFD) sessions. use crate::app::{ - background::networking::build_mgd_clients, map_switch_zone_addrs, + background::tasks::networking::build_mgd_clients, map_switch_zone_addrs, }; -use super::common::BackgroundTask; +use crate::app::background::BackgroundTask; use futures::future::BoxFuture; use futures::FutureExt; use internal_dns::{resolver::Resolver, ServiceName}; diff --git a/nexus/src/app/background/blueprint_execution.rs b/nexus/src/app/background/tasks/blueprint_execution.rs similarity index 99% rename from nexus/src/app/background/blueprint_execution.rs rename to nexus/src/app/background/tasks/blueprint_execution.rs index b01d1213de..253a89a18d 100644 --- a/nexus/src/app/background/blueprint_execution.rs +++ b/nexus/src/app/background/tasks/blueprint_execution.rs @@ -4,7 +4,7 @@ //! Background task for realizing a plan blueprint -use super::common::BackgroundTask; +use crate::app::background::BackgroundTask; use futures::future::BoxFuture; use futures::FutureExt; use nexus_db_queries::context::OpContext; @@ -111,7 +111,7 @@ impl BackgroundTask for BlueprintExecutor { #[cfg(test)] mod test { use super::BlueprintExecutor; - use crate::app::background::common::BackgroundTask; + use crate::app::background::BackgroundTask; use httptest::matchers::{all_of, request}; use httptest::responders::status_code; use httptest::Expectation; diff --git a/nexus/src/app/background/blueprint_load.rs b/nexus/src/app/background/tasks/blueprint_load.rs similarity index 98% rename from nexus/src/app/background/blueprint_load.rs rename to nexus/src/app/background/tasks/blueprint_load.rs index baf86d655f..31bc00441d 100644 --- a/nexus/src/app/background/blueprint_load.rs +++ b/nexus/src/app/background/tasks/blueprint_load.rs @@ -7,7 +7,7 @@ //! This task triggers the `blueprint_execution` background task when the //! blueprint changes. -use super::common::BackgroundTask; +use crate::app::background::BackgroundTask; use futures::future::BoxFuture; use futures::FutureExt; use nexus_db_queries::context::OpContext; @@ -127,8 +127,8 @@ impl BackgroundTask for TargetBlueprintLoader { // bugs further up the stack. if *old_blueprint != new_blueprint { let message = format!( - "blueprint for id {} changed. \ - Blueprints are supposed to be immutable.", + "blueprint for id {} changed. Blueprints are supposed \ + to be immutable.", target_id ); error!(&log, "{}", message); @@ -185,7 +185,7 @@ impl BackgroundTask for TargetBlueprintLoader { #[cfg(test)] mod test { use super::*; - use crate::app::background::common::BackgroundTask; + use crate::app::background::BackgroundTask; use nexus_inventory::now_db_precision; use nexus_test_utils_macros::nexus_test; use nexus_types::deployment::{ diff --git a/nexus/src/app/background/crdb_node_id_collector.rs b/nexus/src/app/background/tasks/crdb_node_id_collector.rs similarity index 99% rename from nexus/src/app/background/crdb_node_id_collector.rs rename to nexus/src/app/background/tasks/crdb_node_id_collector.rs index 2736514021..2a0e1c6d3d 100644 --- a/nexus/src/app/background/crdb_node_id_collector.rs +++ b/nexus/src/app/background/tasks/crdb_node_id_collector.rs @@ -23,7 +23,7 @@ //! the status of all nodes and looking for orphans, perhaps) to determine //! whether a zone without a known node ID ever existed. -use super::common::BackgroundTask; +use crate::app::background::BackgroundTask; use anyhow::ensure; use anyhow::Context; use futures::future::BoxFuture; diff --git a/nexus/src/app/background/dns_config.rs b/nexus/src/app/background/tasks/dns_config.rs similarity index 96% rename from nexus/src/app/background/dns_config.rs rename to nexus/src/app/background/tasks/dns_config.rs index 71e0a812a7..1b0f627870 100644 --- a/nexus/src/app/background/dns_config.rs +++ b/nexus/src/app/background/tasks/dns_config.rs @@ -4,7 +4,7 @@ //! Background task for keeping track of DNS configuration -use super::common::BackgroundTask; +use crate::app::background::BackgroundTask; use dns_service_client::types::DnsConfigParams; use futures::future::BoxFuture; use futures::FutureExt; @@ -100,8 +100,8 @@ impl BackgroundTask for DnsConfigWatcher { // we just read. This should never happen because we // never remove the latest generation. let message = format!( - "found latest DNS generation ({}) is older \ - than the one we already know about ({})", + "found latest DNS generation ({}) is older than \ + the one we already know about ({})", new.generation, old.generation ); @@ -115,8 +115,8 @@ impl BackgroundTask for DnsConfigWatcher { // immutable once created. let message = format!( "found DNS config at generation {} that does \ - not match the config that we already have for \ - the same generation", + not match the config that we already have \ + for the same generation", new.generation ); error!(&log, "{}", message); @@ -157,9 +157,9 @@ impl BackgroundTask for DnsConfigWatcher { #[cfg(test)] mod test { - use crate::app::background::common::BackgroundTask; - use crate::app::background::dns_config::DnsConfigWatcher; + use super::DnsConfigWatcher; use crate::app::background::init::test::write_test_dns_generation; + use crate::app::background::BackgroundTask; use assert_matches::assert_matches; use async_bb8_diesel::AsyncRunQueryDsl; use async_bb8_diesel::AsyncSimpleConnection; diff --git a/nexus/src/app/background/dns_propagation.rs b/nexus/src/app/background/tasks/dns_propagation.rs similarity index 98% rename from nexus/src/app/background/dns_propagation.rs rename to nexus/src/app/background/tasks/dns_propagation.rs index 7d650f6f27..c680a6f010 100644 --- a/nexus/src/app/background/dns_propagation.rs +++ b/nexus/src/app/background/tasks/dns_propagation.rs @@ -4,8 +4,8 @@ //! Background task for propagating DNS configuration to all DNS servers -use super::common::BackgroundTask; use super::dns_servers::DnsServersList; +use crate::app::background::BackgroundTask; use anyhow::Context; use dns_service_client::types::DnsConfigParams; use futures::future::BoxFuture; @@ -177,9 +177,9 @@ async fn dns_propagate_one( #[cfg(test)] mod test { - use crate::app::background::common::BackgroundTask; - use crate::app::background::dns_propagation::DnsPropagator; - use crate::app::background::dns_servers::DnsServersList; + use super::DnsPropagator; + use crate::app::background::tasks::dns_servers::DnsServersList; + use crate::app::background::BackgroundTask; use dns_service_client::types::DnsConfigParams; use httptest::matchers::request; use httptest::responders::status_code; diff --git a/nexus/src/app/background/dns_servers.rs b/nexus/src/app/background/tasks/dns_servers.rs similarity index 99% rename from nexus/src/app/background/dns_servers.rs rename to nexus/src/app/background/tasks/dns_servers.rs index 8f4cce4ee0..9d99460917 100644 --- a/nexus/src/app/background/dns_servers.rs +++ b/nexus/src/app/background/tasks/dns_servers.rs @@ -4,7 +4,7 @@ //! Background task for keeping track of DNS servers -use super::common::BackgroundTask; +use crate::app::background::BackgroundTask; use futures::future::BoxFuture; use futures::FutureExt; use internal_dns::names::ServiceName; diff --git a/nexus/src/app/background/external_endpoints.rs b/nexus/src/app/background/tasks/external_endpoints.rs similarity index 97% rename from nexus/src/app/background/external_endpoints.rs rename to nexus/src/app/background/tasks/external_endpoints.rs index 1a587298d5..0ff1e06a46 100644 --- a/nexus/src/app/background/external_endpoints.rs +++ b/nexus/src/app/background/tasks/external_endpoints.rs @@ -6,7 +6,7 @@ //! all Silos, their externally-visible DNS names, and the TLS certificates //! associated with those names -use super::common::BackgroundTask; +use crate::app::background::BackgroundTask; use crate::app::external_endpoints::read_all_endpoints; pub use crate::app::external_endpoints::ExternalEndpoints; use futures::future::BoxFuture; @@ -117,8 +117,8 @@ impl BackgroundTask for ExternalEndpointsWatcher { #[cfg(test)] mod test { - use crate::app::background::common::BackgroundTask; - use crate::app::background::external_endpoints::ExternalEndpointsWatcher; + use super::ExternalEndpointsWatcher; + use crate::app::background::BackgroundTask; use nexus_db_queries::context::OpContext; use nexus_db_queries::db::fixed_data::silo::DEFAULT_SILO; use nexus_test_utils::resource_helpers::create_silo; diff --git a/nexus/src/app/background/instance_watcher.rs b/nexus/src/app/background/tasks/instance_watcher.rs similarity index 99% rename from nexus/src/app/background/instance_watcher.rs rename to nexus/src/app/background/tasks/instance_watcher.rs index 1b10605c5e..a6e579eb8a 100644 --- a/nexus/src/app/background/instance_watcher.rs +++ b/nexus/src/app/background/tasks/instance_watcher.rs @@ -4,7 +4,7 @@ //! Background task for pulling instance state from sled-agents. -use super::common::BackgroundTask; +use crate::app::background::BackgroundTask; use futures::{future::BoxFuture, FutureExt}; use http::StatusCode; use nexus_db_model::Instance; diff --git a/nexus/src/app/background/inventory_collection.rs b/nexus/src/app/background/tasks/inventory_collection.rs similarity index 96% rename from nexus/src/app/background/inventory_collection.rs rename to nexus/src/app/background/tasks/inventory_collection.rs index 52ee8f6e13..1e2d3bda1f 100644 --- a/nexus/src/app/background/inventory_collection.rs +++ b/nexus/src/app/background/tasks/inventory_collection.rs @@ -4,7 +4,7 @@ //! Background task for reading inventory for the rack -use super::common::BackgroundTask; +use crate::app::background::BackgroundTask; use anyhow::ensure; use anyhow::Context; use futures::future::BoxFuture; @@ -186,10 +186,10 @@ impl<'a> nexus_inventory::SledAgentEnumerator for DbSledAgentEnumerator<'a> { #[cfg(test)] mod test { + use super::DbSledAgentEnumerator; + use super::InventoryCollector; use crate::app::authz; - use crate::app::background::common::BackgroundTask; - use crate::app::background::inventory_collection::DbSledAgentEnumerator; - use crate::app::background::inventory_collection::InventoryCollector; + use crate::app::background::BackgroundTask; use nexus_db_model::Generation; use nexus_db_model::SledBaseboard; use nexus_db_model::SledSystemHardware; @@ -270,8 +270,8 @@ mod test { // has pushed us out. if our_collections.is_empty() { println!( - "iter {i}: no test collections \ - ({num_collections} Nexus collections)", + "iter {i}: no test collections ({num_collections} Nexus \ + collections)", ); continue; } @@ -285,8 +285,8 @@ mod test { // tail of all IDs we've seen matches the ones we saw in this // iteration (i.e., we're pushing out old collections in order). println!( - "iter {i}: saw {our_collections:?}; \ - should match tail of {all_our_collection_ids:?}" + "iter {i}: saw {our_collections:?}; should match tail of \ + {all_our_collection_ids:?}" ); assert_eq!( all_our_collection_ids @@ -398,8 +398,8 @@ mod test { assert_eq!( removed_urls.len(), 1, - "expected to find exactly one sled URL matching our \ - expunged sled's URL" + "expected to find exactly one sled URL matching our expunged \ + sled's URL" ); let mut found_urls = db_enum.list_sled_agents().await.unwrap(); found_urls.sort(); diff --git a/nexus/src/app/background/metrics_producer_gc.rs b/nexus/src/app/background/tasks/metrics_producer_gc.rs similarity index 99% rename from nexus/src/app/background/metrics_producer_gc.rs rename to nexus/src/app/background/tasks/metrics_producer_gc.rs index 2a8464b80f..1df0afb7ed 100644 --- a/nexus/src/app/background/metrics_producer_gc.rs +++ b/nexus/src/app/background/tasks/metrics_producer_gc.rs @@ -5,7 +5,7 @@ //! Background task for garbage collecting metrics producers that have not //! renewed their lease -use super::common::BackgroundTask; +use crate::app::background::BackgroundTask; use chrono::TimeDelta; use chrono::Utc; use futures::future::BoxFuture; @@ -144,7 +144,7 @@ mod tests { { panic!( "failed to update time_modified for producer {producer_id}: \ - {err}" + {err}" ); } } diff --git a/nexus/src/app/background/tasks/mod.rs b/nexus/src/app/background/tasks/mod.rs new file mode 100644 index 0000000000..cb2ab46c2a --- /dev/null +++ b/nexus/src/app/background/tasks/mod.rs @@ -0,0 +1,29 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Implementations of specific background tasks + +pub mod abandoned_vmm_reaper; +pub mod bfd; +pub mod blueprint_execution; +pub mod blueprint_load; +pub mod crdb_node_id_collector; +pub mod dns_config; +pub mod dns_propagation; +pub mod dns_servers; +pub mod external_endpoints; +pub mod instance_watcher; +pub mod inventory_collection; +pub mod metrics_producer_gc; +pub mod nat_cleanup; +pub mod networking; +pub mod phantom_disks; +pub mod physical_disk_adoption; +pub mod region_replacement; +pub mod region_replacement_driver; +pub mod service_firewall_rules; +pub mod sync_service_zone_nat; +pub mod sync_switch_configuration; +pub mod v2p_mappings; +pub mod vpc_routes; diff --git a/nexus/src/app/background/nat_cleanup.rs b/nexus/src/app/background/tasks/nat_cleanup.rs similarity index 99% rename from nexus/src/app/background/nat_cleanup.rs rename to nexus/src/app/background/tasks/nat_cleanup.rs index 844dbffefe..675f4fc809 100644 --- a/nexus/src/app/background/nat_cleanup.rs +++ b/nexus/src/app/background/tasks/nat_cleanup.rs @@ -8,8 +8,8 @@ use crate::app::map_switch_zone_addrs; -use super::common::BackgroundTask; use super::networking::build_dpd_clients; +use crate::app::background::BackgroundTask; use chrono::{Duration, Utc}; use futures::future::BoxFuture; use futures::FutureExt; diff --git a/nexus/src/app/background/networking.rs b/nexus/src/app/background/tasks/networking.rs similarity index 100% rename from nexus/src/app/background/networking.rs rename to nexus/src/app/background/tasks/networking.rs diff --git a/nexus/src/app/background/phantom_disks.rs b/nexus/src/app/background/tasks/phantom_disks.rs similarity index 97% rename from nexus/src/app/background/phantom_disks.rs rename to nexus/src/app/background/tasks/phantom_disks.rs index 48688838e5..4b0d8bec38 100644 --- a/nexus/src/app/background/phantom_disks.rs +++ b/nexus/src/app/background/tasks/phantom_disks.rs @@ -18,7 +18,7 @@ //! this background task is required to apply the same fix for disks that are //! already in this phantom state. -use super::common::BackgroundTask; +use crate::app::background::BackgroundTask; use futures::future::BoxFuture; use futures::FutureExt; use nexus_db_queries::context::OpContext; @@ -74,7 +74,8 @@ impl BackgroundTask for PhantomDiskDetector { if let Err(e) = result { error!( &log, - "error un-deleting disk {} and setting to faulted: {:#}", + "error un-deleting disk {} and setting to faulted: \ + {:#}", disk.id(), e, ); diff --git a/nexus/src/app/background/physical_disk_adoption.rs b/nexus/src/app/background/tasks/physical_disk_adoption.rs similarity index 99% rename from nexus/src/app/background/physical_disk_adoption.rs rename to nexus/src/app/background/tasks/physical_disk_adoption.rs index 05c53963de..f3b9e8ac62 100644 --- a/nexus/src/app/background/physical_disk_adoption.rs +++ b/nexus/src/app/background/tasks/physical_disk_adoption.rs @@ -11,7 +11,7 @@ //! //! In the future, this may become more explicitly operator-controlled. -use super::common::BackgroundTask; +use crate::app::background::BackgroundTask; use futures::future::BoxFuture; use futures::FutureExt; use nexus_db_model::PhysicalDisk; diff --git a/nexus/src/app/background/region_replacement.rs b/nexus/src/app/background/tasks/region_replacement.rs similarity index 95% rename from nexus/src/app/background/region_replacement.rs rename to nexus/src/app/background/tasks/region_replacement.rs index 02ae548d75..9e14c294ba 100644 --- a/nexus/src/app/background/region_replacement.rs +++ b/nexus/src/app/background/tasks/region_replacement.rs @@ -10,8 +10,8 @@ //! for any requests that are in state "Requested". See the documentation there //! for more information. -use super::common::BackgroundTask; use crate::app::authn; +use crate::app::background::BackgroundTask; use crate::app::sagas; use crate::app::RegionAllocationStrategy; use futures::future::BoxFuture; @@ -82,8 +82,7 @@ impl BackgroundTask for RegionReplacementDetector { Err(e) => { error!( &log, - "find_regions_on_expunged_physical_disks failed: \ - {e}" + "find_regions_on_expunged_physical_disks failed: {e}" ); err += 1; @@ -110,8 +109,8 @@ impl BackgroundTask for RegionReplacementDetector { Err(e) => { error!( &log, - "error looking for existing region \ - replacement requests for {}: {e}", + "error looking for existing region replacement \ + requests for {}: {e}", region.id(), ); continue; @@ -130,7 +129,7 @@ impl BackgroundTask for RegionReplacementDetector { info!( &log, "added region replacement request \ - {request_id} for {} volume {}", + {request_id} for {} volume {}", region.id(), region.volume_id(), ); @@ -140,7 +139,7 @@ impl BackgroundTask for RegionReplacementDetector { error!( &log, "error adding region replacement request for \ - region {} volume id {}: {e}", + region {} volume id {}: {e}", region.id(), region.volume_id(), ); @@ -172,7 +171,7 @@ impl BackgroundTask for RegionReplacementDetector { error!( &log, "sending region replacement start request \ - failed: {e}", + failed: {e}", ); err += 1; } diff --git a/nexus/src/app/background/tasks/region_replacement_driver.rs b/nexus/src/app/background/tasks/region_replacement_driver.rs new file mode 100644 index 0000000000..06155ffa24 --- /dev/null +++ b/nexus/src/app/background/tasks/region_replacement_driver.rs @@ -0,0 +1,736 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Background task for driving region replacement progress +//! +//! Region replacements will have been requested by the +//! `region_replacement_start` saga, but that will not trigger the necessary +//! live repair or reconciliation required on its own: the Volume is left in a +//! degraded state (less than a three way mirror) until either of those complete +//! successfully. +//! +//! For each region replacement request that is in state `Running`, this +//! background task will call a saga that drives that forward: namely, get an +//! Upstairs working on either the repair or reconcilation. If an Upstairs *was* +//! running one of these and for some reason was stopped, start it again. +//! +//! Basically, keep starting either repair or reconcilation until they complete +//! successfully, then "finish" the region replacement. + +use crate::app::authn; +use crate::app::background::BackgroundTask; +use crate::app::sagas; +use futures::future::BoxFuture; +use futures::FutureExt; +use nexus_db_queries::context::OpContext; +use nexus_db_queries::db::DataStore; +use nexus_types::internal_api::background::RegionReplacementDriverStatus; +use serde_json::json; +use std::sync::Arc; +use tokio::sync::mpsc::Sender; + +pub struct RegionReplacementDriver { + datastore: Arc, + saga_request: Sender, +} + +impl RegionReplacementDriver { + pub fn new( + datastore: Arc, + saga_request: Sender, + ) -> Self { + RegionReplacementDriver { datastore, saga_request } + } + + /// Drive running region replacements forward + pub async fn drive_running_replacements_forward( + &self, + opctx: &OpContext, + status: &mut RegionReplacementDriverStatus, + ) { + let log = &opctx.log; + + let running_replacements = + match self.datastore.get_running_region_replacements(opctx).await { + Ok(requests) => requests, + + Err(e) => { + let s = format!( + "query for running region replacement requests \ + failed: {e}" + ); + + error!(&log, "{s}"); + status.errors.push(s); + + return; + } + }; + + for request in running_replacements { + // If a successful finish notification was received, change the + // state here: don't drive requests forward where the replacement is + // done. + + let has_matching_finish_notification = match self + .datastore + .request_has_matching_successful_finish_notification( + opctx, &request, + ) + .await + { + Ok(has_matching_finish_notification) => { + has_matching_finish_notification + } + + Err(e) => { + let s = format!( + "checking for a finish notification for {} failed: {e}", + request.id + ); + + error!(&log, "{s}"); + status.errors.push(s); + + // Nexus may determine the request is `ReplacementDone` via + // the drive saga polling an Upstairs, so return false here + // to invoke that saga. + false + } + }; + + if has_matching_finish_notification { + if let Err(e) = self + .datastore + .mark_region_replacement_as_done(opctx, request.id) + .await + { + let s = format!( + "error marking {} as ReplacementDone: {e}", + request.id + ); + + error!(&log, "{s}"); + status.errors.push(s); + } + } else { + // Otherwise attempt to drive the replacement's progress forward + // (or determine if it is complete). + + let request_id = request.id; + + let result = self + .saga_request + .send(sagas::SagaRequest::RegionReplacementDrive { + params: sagas::region_replacement_drive::Params { + serialized_authn: + authn::saga::Serialized::for_opctx(opctx), + request, + }, + }) + .await; + + match result { + Ok(()) => { + let s = format!("{request_id}: drive invoked ok"); + + info!(&log, "{s}"); + status.drive_invoked_ok.push(s); + } + + Err(e) => { + let s = format!( + "sending region replacement drive request for \ + {request_id} failed: {e}", + ); + + error!(&log, "{s}"); + status.errors.push(s); + } + }; + } + } + } + + /// Complete region replacements that are done + pub async fn complete_done_replacements( + &self, + opctx: &OpContext, + status: &mut RegionReplacementDriverStatus, + ) { + let log = &opctx.log; + + let done_replacements = + match self.datastore.get_done_region_replacements(opctx).await { + Ok(requests) => requests, + + Err(e) => { + let s = format!( + "query for done region replacement requests failed: {e}" + ); + + error!(&log, "{s}"); + status.errors.push(s); + + return; + } + }; + + for request in done_replacements { + let Some(old_region_volume_id) = request.old_region_volume_id + else { + // This state is illegal! + let s = format!( + "request {} old region volume id is None!", + request.id, + ); + + error!(&log, "{s}"); + status.errors.push(s); + + continue; + }; + + let request_id = request.id; + + let result = + self.saga_request + .send(sagas::SagaRequest::RegionReplacementFinish { + params: sagas::region_replacement_finish::Params { + serialized_authn: + authn::saga::Serialized::for_opctx(opctx), + region_volume_id: old_region_volume_id, + request, + }, + }) + .await; + + match result { + Ok(()) => { + let s = format!("{request_id}: finish invoked ok"); + + info!(&log, "{s}"); + status.finish_invoked_ok.push(s); + } + + Err(e) => { + let s = format!( + "sending region replacement finish request for \ + {request_id} failed: {e}" + ); + + error!(&log, "{s}"); + status.errors.push(s); + } + }; + } + } +} + +impl BackgroundTask for RegionReplacementDriver { + fn activate<'a>( + &'a mut self, + opctx: &'a OpContext, + ) -> BoxFuture<'a, serde_json::Value> { + async { + let log = &opctx.log; + info!(&log, "region replacement driver task started"); + + let mut status = RegionReplacementDriverStatus::default(); + + self.drive_running_replacements_forward(opctx, &mut status).await; + self.complete_done_replacements(opctx, &mut status).await; + + info!(&log, "region replacement driver task done"); + + json!(status) + } + .boxed() + } +} + +#[cfg(test)] +mod test { + use super::*; + use async_bb8_diesel::AsyncRunQueryDsl; + use chrono::Utc; + use nexus_db_model::Region; + use nexus_db_model::RegionReplacement; + use nexus_db_model::RegionReplacementState; + use nexus_db_model::UpstairsRepairNotification; + use nexus_db_model::UpstairsRepairNotificationType; + use nexus_db_model::UpstairsRepairType; + use nexus_test_utils_macros::nexus_test; + use omicron_uuid_kinds::DownstairsRegionKind; + use omicron_uuid_kinds::GenericUuid; + use omicron_uuid_kinds::TypedUuid; + use omicron_uuid_kinds::UpstairsKind; + use omicron_uuid_kinds::UpstairsRepairKind; + use omicron_uuid_kinds::UpstairsSessionKind; + use tokio::sync::mpsc; + use uuid::Uuid; + + type ControlPlaneTestContext = + nexus_test_utils::ControlPlaneTestContext; + + #[nexus_test(server = crate::Server)] + async fn test_running_region_replacement_causes_drive( + cptestctx: &ControlPlaneTestContext, + ) { + let nexus = &cptestctx.server.server_context().nexus; + let datastore = nexus.datastore(); + let opctx = OpContext::for_tests( + cptestctx.logctx.log.clone(), + datastore.clone(), + ); + + let (saga_request_tx, mut saga_request_rx) = mpsc::channel(1); + let mut task = + RegionReplacementDriver::new(datastore.clone(), saga_request_tx); + + // Noop test + let result = task.activate(&opctx).await; + assert_eq!(result, json!(RegionReplacementDriverStatus::default())); + + // Add a region replacement request for a fake region, and change it to + // state Running. + let region_id = Uuid::new_v4(); + let new_region_id = Uuid::new_v4(); + let volume_id = Uuid::new_v4(); + + let request = { + let mut request = RegionReplacement::new(region_id, volume_id); + request.replacement_state = RegionReplacementState::Running; + request.new_region_id = Some(new_region_id); + request + }; + + let request_id = request.id; + + datastore + .insert_region_replacement_request(&opctx, request) + .await + .unwrap(); + + // Activate the task - it should pick that up and try to run the region + // replacement drive saga + let result: RegionReplacementDriverStatus = + serde_json::from_value(task.activate(&opctx).await).unwrap(); + + assert_eq!( + result.drive_invoked_ok, + vec![format!("{request_id}: drive invoked ok")] + ); + assert!(result.finish_invoked_ok.is_empty()); + assert!(result.errors.is_empty()); + + let request = saga_request_rx.try_recv().unwrap(); + + assert!(matches!( + request, + sagas::SagaRequest::RegionReplacementDrive { .. } + )); + } + + #[nexus_test(server = crate::Server)] + async fn test_done_region_replacement_causes_finish( + cptestctx: &ControlPlaneTestContext, + ) { + let nexus = &cptestctx.server.server_context().nexus; + let datastore = nexus.datastore(); + let opctx = OpContext::for_tests( + cptestctx.logctx.log.clone(), + datastore.clone(), + ); + + let (saga_request_tx, mut saga_request_rx) = mpsc::channel(1); + let mut task = + RegionReplacementDriver::new(datastore.clone(), saga_request_tx); + + // Noop test + let result = task.activate(&opctx).await; + assert_eq!(result, json!(RegionReplacementDriverStatus::default())); + + // Insert some region records + let old_region = { + let dataset_id = Uuid::new_v4(); + let volume_id = Uuid::new_v4(); + Region::new( + dataset_id, + volume_id, + 512_i64.try_into().unwrap(), + 10, + 10, + ) + }; + + let new_region = { + let dataset_id = Uuid::new_v4(); + let volume_id = Uuid::new_v4(); + Region::new( + dataset_id, + volume_id, + 512_i64.try_into().unwrap(), + 10, + 10, + ) + }; + + { + let conn = datastore.pool_connection_for_tests().await.unwrap(); + + use nexus_db_model::schema::region::dsl; + diesel::insert_into(dsl::region) + .values(old_region.clone()) + .execute_async(&*conn) + .await + .unwrap(); + + diesel::insert_into(dsl::region) + .values(new_region.clone()) + .execute_async(&*conn) + .await + .unwrap(); + } + + // Add a region replacement request for that region, and change it to + // state ReplacementDone. Set the new_region_id to the region created + // above. + let request = { + let mut request = + RegionReplacement::new(old_region.id(), old_region.volume_id()); + request.replacement_state = RegionReplacementState::ReplacementDone; + request.new_region_id = Some(new_region.id()); + request.old_region_volume_id = Some(Uuid::new_v4()); + request + }; + + let request_id = request.id; + + datastore + .insert_region_replacement_request(&opctx, request) + .await + .unwrap(); + + // Activate the task - it should pick that up and try to run the region + // replacement finish saga + let result: RegionReplacementDriverStatus = + serde_json::from_value(task.activate(&opctx).await).unwrap(); + + assert!(result.drive_invoked_ok.is_empty()); + assert_eq!( + result.finish_invoked_ok, + vec![format!("{request_id}: finish invoked ok")] + ); + assert!(result.errors.is_empty()); + + let request = saga_request_rx.try_recv().unwrap(); + + assert!(matches!( + request, + sagas::SagaRequest::RegionReplacementFinish { .. } + )); + } + + #[nexus_test(server = crate::Server)] + async fn test_mark_region_replacement_done_after_notification( + cptestctx: &ControlPlaneTestContext, + ) { + let nexus = &cptestctx.server.server_context().nexus; + let datastore = nexus.datastore(); + let opctx = OpContext::for_tests( + cptestctx.logctx.log.clone(), + datastore.clone(), + ); + + let (saga_request_tx, mut saga_request_rx) = mpsc::channel(1); + let mut task = + RegionReplacementDriver::new(datastore.clone(), saga_request_tx); + + // Noop test + let result = task.activate(&opctx).await; + assert_eq!(result, json!(RegionReplacementDriverStatus::default())); + + // Insert some region records + let old_region = { + let dataset_id = Uuid::new_v4(); + let volume_id = Uuid::new_v4(); + Region::new( + dataset_id, + volume_id, + 512_i64.try_into().unwrap(), + 10, + 10, + ) + }; + + let new_region = { + let dataset_id = Uuid::new_v4(); + let volume_id = Uuid::new_v4(); + Region::new( + dataset_id, + volume_id, + 512_i64.try_into().unwrap(), + 10, + 10, + ) + }; + + { + let conn = datastore.pool_connection_for_tests().await.unwrap(); + + use nexus_db_model::schema::region::dsl; + diesel::insert_into(dsl::region) + .values(old_region.clone()) + .execute_async(&*conn) + .await + .unwrap(); + + diesel::insert_into(dsl::region) + .values(new_region.clone()) + .execute_async(&*conn) + .await + .unwrap(); + } + + // Add a region replacement request for that region, and change it to + // state Running. Set the new_region_id to the region created above. + let request = { + let mut request = + RegionReplacement::new(old_region.id(), old_region.volume_id()); + request.replacement_state = RegionReplacementState::Running; + request.new_region_id = Some(new_region.id()); + request.old_region_volume_id = Some(Uuid::new_v4()); + request + }; + + let request_id = request.id; + + datastore + .insert_region_replacement_request(&opctx, request.clone()) + .await + .unwrap(); + + // Activate the task - it should pick that up and try to run the region + // replacement drive saga + let result: RegionReplacementDriverStatus = + serde_json::from_value(task.activate(&opctx).await).unwrap(); + + assert_eq!( + result.drive_invoked_ok, + vec![format!("{request_id}: drive invoked ok")] + ); + assert!(result.finish_invoked_ok.is_empty()); + assert!(result.errors.is_empty()); + + let saga_request = saga_request_rx.try_recv().unwrap(); + + assert!(matches!( + saga_request, + sagas::SagaRequest::RegionReplacementDrive { .. } + )); + + // Now, pretend that an Upstairs sent a notification that it + // successfully finished a repair + + { + datastore + .upstairs_repair_notification( + &opctx, + UpstairsRepairNotification::new( + Utc::now(), // client time + TypedUuid::::from_untyped_uuid( + Uuid::new_v4(), + ), + UpstairsRepairType::Live, + TypedUuid::::from_untyped_uuid( + Uuid::new_v4(), + ), + TypedUuid::::from_untyped_uuid( + Uuid::new_v4(), + ), + TypedUuid::::from_untyped_uuid( + new_region.id(), + ), // downstairs that was repaired + "[fd00:1122:3344:101::2]:12345".parse().unwrap(), + UpstairsRepairNotificationType::Succeeded, + ), + ) + .await + .unwrap(); + } + + // Activating the task now should + // 1) switch the state to ReplacementDone + // 2) start the finish saga + let result: RegionReplacementDriverStatus = + serde_json::from_value(task.activate(&opctx).await).unwrap(); + + assert_eq!(result.finish_invoked_ok.len(), 1); + + { + let request_in_db = datastore + .get_region_replacement_request_by_id(&opctx, request.id) + .await + .unwrap(); + assert_eq!( + request_in_db.replacement_state, + RegionReplacementState::ReplacementDone + ); + } + + let saga_request = saga_request_rx.try_recv().unwrap(); + + assert!(matches!( + saga_request, + sagas::SagaRequest::RegionReplacementFinish { .. } + )); + } + + #[nexus_test(server = crate::Server)] + async fn test_no_mark_region_replacement_done_after_failed_notification( + cptestctx: &ControlPlaneTestContext, + ) { + let nexus = &cptestctx.server.server_context().nexus; + let datastore = nexus.datastore(); + let opctx = OpContext::for_tests( + cptestctx.logctx.log.clone(), + datastore.clone(), + ); + + let (saga_request_tx, mut saga_request_rx) = mpsc::channel(1); + let mut task = + RegionReplacementDriver::new(datastore.clone(), saga_request_tx); + + // Noop test + let result = task.activate(&opctx).await; + assert_eq!(result, json!(RegionReplacementDriverStatus::default())); + + // Insert some region records + let old_region = { + let dataset_id = Uuid::new_v4(); + let volume_id = Uuid::new_v4(); + Region::new( + dataset_id, + volume_id, + 512_i64.try_into().unwrap(), + 10, + 10, + ) + }; + + let new_region = { + let dataset_id = Uuid::new_v4(); + let volume_id = Uuid::new_v4(); + Region::new( + dataset_id, + volume_id, + 512_i64.try_into().unwrap(), + 10, + 10, + ) + }; + + { + let conn = datastore.pool_connection_for_tests().await.unwrap(); + + use nexus_db_model::schema::region::dsl; + diesel::insert_into(dsl::region) + .values(old_region.clone()) + .execute_async(&*conn) + .await + .unwrap(); + + diesel::insert_into(dsl::region) + .values(new_region.clone()) + .execute_async(&*conn) + .await + .unwrap(); + } + + // Add a region replacement request for that region, and change it to + // state Running. Set the new_region_id to the region created above. + let request = { + let mut request = + RegionReplacement::new(old_region.id(), old_region.volume_id()); + request.replacement_state = RegionReplacementState::Running; + request.new_region_id = Some(new_region.id()); + request + }; + + let request_id = request.id; + + datastore + .insert_region_replacement_request(&opctx, request.clone()) + .await + .unwrap(); + + // Activate the task - it should pick that up and try to run the region + // replacement drive saga + let result: RegionReplacementDriverStatus = + serde_json::from_value(task.activate(&opctx).await).unwrap(); + + assert_eq!( + result.drive_invoked_ok, + vec![format!("{request_id}: drive invoked ok")] + ); + assert!(result.finish_invoked_ok.is_empty()); + assert!(result.errors.is_empty()); + + let saga_request = saga_request_rx.try_recv().unwrap(); + + assert!(matches!( + saga_request, + sagas::SagaRequest::RegionReplacementDrive { .. } + )); + + // Now, pretend that an Upstairs sent a notification that it failed to + // finish a repair + + { + datastore + .upstairs_repair_notification( + &opctx, + UpstairsRepairNotification::new( + Utc::now(), // client time + TypedUuid::::from_untyped_uuid( + Uuid::new_v4(), + ), + UpstairsRepairType::Live, + TypedUuid::::from_untyped_uuid( + Uuid::new_v4(), + ), + TypedUuid::::from_untyped_uuid( + Uuid::new_v4(), + ), + TypedUuid::::from_untyped_uuid( + new_region.id(), + ), // downstairs that was repaired + "[fd00:1122:3344:101::2]:12345".parse().unwrap(), + UpstairsRepairNotificationType::Failed, + ), + ) + .await + .unwrap(); + } + + // Activating the task now should start the drive saga + let result: RegionReplacementDriverStatus = + serde_json::from_value(task.activate(&opctx).await).unwrap(); + + assert_eq!( + result.drive_invoked_ok, + vec![format!("{request_id}: drive invoked ok")] + ); + assert!(result.finish_invoked_ok.is_empty()); + assert!(result.errors.is_empty()); + + let saga_request = saga_request_rx.try_recv().unwrap(); + + assert!(matches!( + saga_request, + sagas::SagaRequest::RegionReplacementDrive { .. } + )); + } +} diff --git a/nexus/src/app/background/service_firewall_rules.rs b/nexus/src/app/background/tasks/service_firewall_rules.rs similarity index 94% rename from nexus/src/app/background/service_firewall_rules.rs rename to nexus/src/app/background/tasks/service_firewall_rules.rs index 1a705d1fae..4004de42c8 100644 --- a/nexus/src/app/background/service_firewall_rules.rs +++ b/nexus/src/app/background/tasks/service_firewall_rules.rs @@ -10,7 +10,7 @@ //! handle general changes to customer-visible VPC firewalls, and is mostly in //! place to propagate changes in the IP allowlist for user-facing services. -use super::common::BackgroundTask; +use crate::app::background::BackgroundTask; use futures::future::BoxFuture; use futures::FutureExt; use nexus_db_queries::context::OpContext; @@ -38,8 +38,8 @@ impl BackgroundTask for ServiceRulePropagator { .new(slog::o!("component" => "service-firewall-rule-progator")); debug!( log, - "starting background task for service \ - firewall rule propagation" + "starting background task for service firewall rule \ + propagation" ); let start = std::time::Instant::now(); let res = nexus_networking::plumb_service_firewall_rules( diff --git a/nexus/src/app/background/sync_service_zone_nat.rs b/nexus/src/app/background/tasks/sync_service_zone_nat.rs similarity index 99% rename from nexus/src/app/background/sync_service_zone_nat.rs rename to nexus/src/app/background/tasks/sync_service_zone_nat.rs index b0a4c8cef2..59cd6a6a79 100644 --- a/nexus/src/app/background/sync_service_zone_nat.rs +++ b/nexus/src/app/background/tasks/sync_service_zone_nat.rs @@ -7,8 +7,8 @@ use crate::app::map_switch_zone_addrs; -use super::common::BackgroundTask; use super::networking::build_dpd_clients; +use crate::app::background::BackgroundTask; use anyhow::Context; use futures::future::BoxFuture; use futures::FutureExt; diff --git a/nexus/src/app/background/sync_switch_configuration.rs b/nexus/src/app/background/tasks/sync_switch_configuration.rs similarity index 96% rename from nexus/src/app/background/sync_switch_configuration.rs rename to nexus/src/app/background/tasks/sync_switch_configuration.rs index 8552d62988..0351c9542a 100644 --- a/nexus/src/app/background/sync_switch_configuration.rs +++ b/nexus/src/app/background/tasks/sync_switch_configuration.rs @@ -6,7 +6,7 @@ //! to relevant management daemons (dendrite, mgd, sled-agent, etc.) use crate::app::{ - background::networking::{ + background::tasks::networking::{ api_to_dpd_port_settings, build_dpd_clients, build_mgd_clients, }, map_switch_zone_addrs, @@ -23,7 +23,7 @@ use nexus_db_model::{ }; use uuid::Uuid; -use super::common::BackgroundTask; +use crate::app::background::BackgroundTask; use display_error_chain::DisplayErrorChain; use dpd_client::types::PortId; use futures::future::BoxFuture; @@ -235,14 +235,19 @@ impl SwitchPortSettingsManager { let config = sled_agent_client::types::BfdPeerConfig { local: spec.local.map(|x| x.ip()), remote: spec.remote.ip(), - detection_threshold: spec.detection_threshold.0.try_into().map_err(|_| { - omicron_common::api::external::Error::InternalError { - internal_message: format!( - "db_bfd_peer_configs: detection threshold overflow: {}", - spec.detection_threshold.0, - ), - } - })?, + detection_threshold: spec + .detection_threshold + .0 + .try_into() + .map_err(|_| { + omicron_common::api::external::Error::InternalError { + internal_message: format!( + "db_bfd_peer_configs: detection threshold \ + overflow: {}", + spec.detection_threshold.0, + ), + } + })?, required_rx: spec.required_rx.0.into(), mode: match spec.mode { nexus_db_model::BfdMode::SingleHop => { @@ -252,15 +257,17 @@ impl SwitchPortSettingsManager { sled_agent_client::types::BfdMode::MultiHop } }, - switch: spec.switch.parse().map_err(|e: ParseSwitchLocationError| { - omicron_common::api::external::Error::InternalError { - internal_message: format!( - "db_bfd_peer_configs: failed to parse switch name: {}: {:?}", - spec.switch, - e, - ), - } - })?, + switch: spec.switch.parse().map_err( + |e: ParseSwitchLocationError| { + omicron_common::api::external::Error::InternalError { + internal_message: format!( + "db_bfd_peer_configs: failed to parse switch \ + name: {}: {:?}", + spec.switch, e, + ), + } + }, + )?, }; result.push(config); } @@ -1760,45 +1767,44 @@ async fn static_routes_on_switch<'a>( let mut routes_on_switch = HashMap::new(); for (location, client) in mgd_clients { - let static_routes: SwitchStaticRoutes = match client - .static_list_v4_routes() - .await - { - Ok(routes) => { - let mut flattened = HashSet::new(); - for (destination, paths) in routes.iter() { - let Ok(dst) = destination.parse() else { - error!( + let static_routes: SwitchStaticRoutes = + match client.static_list_v4_routes().await { + Ok(routes) => { + let mut flattened = HashSet::new(); + for (destination, paths) in routes.iter() { + let Ok(dst) = destination.parse() else { + error!( log, - "failed to parse static route destination: {destination}" + "failed to parse static route destination: \ + {destination}" ); - continue; - }; - for p in paths.iter() { - let nh = match p.nexthop { - IpAddr::V4(addr) => addr, - IpAddr::V6(addr) => { - error!( - log, - "ipv6 nexthops not supported: {addr}" - ); - continue; - } + continue; }; - flattened.insert((nh, dst, p.vlan_id)); + for p in paths.iter() { + let nh = match p.nexthop { + IpAddr::V4(addr) => addr, + IpAddr::V6(addr) => { + error!( + log, + "ipv6 nexthops not supported: {addr}" + ); + continue; + } + }; + flattened.insert((nh, dst, p.vlan_id)); + } } + flattened } - flattened - } - Err(_) => { - error!( - &log, - "unable to retrieve routes from switch"; - "switch_location" => ?location, - ); - continue; - } - }; + Err(_) => { + error!( + &log, + "unable to retrieve routes from switch"; + "switch_location" => ?location, + ); + continue; + } + }; routes_on_switch.insert(*location, static_routes); } routes_on_switch diff --git a/nexus/src/app/background/v2p_mappings.rs b/nexus/src/app/background/tasks/v2p_mappings.rs similarity index 87% rename from nexus/src/app/background/v2p_mappings.rs rename to nexus/src/app/background/tasks/v2p_mappings.rs index a53ac3442f..26ce131e9a 100644 --- a/nexus/src/app/background/v2p_mappings.rs +++ b/nexus/src/app/background/tasks/v2p_mappings.rs @@ -12,7 +12,7 @@ use omicron_common::api::external::Vni; use serde_json::json; use sled_agent_client::types::VirtualNetworkInterfaceHost; -use super::common::BackgroundTask; +use crate::app::background::BackgroundTask; pub struct V2PManager { datastore: Arc, @@ -74,28 +74,13 @@ impl BackgroundTask for V2PManager { // create a set of updates from the v2p mappings let desired_v2p: HashSet<_> = v2p_mappings .into_iter() - .filter_map(|mapping| { - let physical_host_ip = match mapping.sled_ip.ip() { - std::net::IpAddr::V4(v) => { - // sled ip should never be ipv4 - error!( - &log, - "sled ip should be ipv6 but is ipv4: {v}" - ); - return None; - } - std::net::IpAddr::V6(v) => v, - }; - - let vni = mapping.vni.0; - - let mapping = VirtualNetworkInterfaceHost { + .map(|mapping| { + VirtualNetworkInterfaceHost { virtual_ip: mapping.ip.ip(), virtual_mac: *mapping.mac, - physical_host_ip, - vni, - }; - Some(mapping) + physical_host_ip: *mapping.sled_ip, + vni: mapping.vni.0, + } }) .collect(); diff --git a/nexus/src/app/background/tasks/vpc_routes.rs b/nexus/src/app/background/tasks/vpc_routes.rs new file mode 100644 index 0000000000..5ba428308b --- /dev/null +++ b/nexus/src/app/background/tasks/vpc_routes.rs @@ -0,0 +1,283 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Background task for propagating VPC routes (system and custom) to sleds. + +use crate::app::background::BackgroundTask; +use futures::future::BoxFuture; +use futures::FutureExt; +use nexus_db_model::{Sled, SledState, Vni}; +use nexus_db_queries::{context::OpContext, db::DataStore}; +use nexus_networking::sled_client_from_address; +use nexus_types::{ + deployment::SledFilter, external_api::views::SledPolicy, identity::Asset, + identity::Resource, +}; +use omicron_common::api::internal::shared::{ + ResolvedVpcRoute, ResolvedVpcRouteSet, RouterId, RouterKind, RouterVersion, +}; +use serde_json::json; +use std::collections::hash_map::Entry; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; +use uuid::Uuid; + +pub struct VpcRouteManager { + datastore: Arc, +} + +impl VpcRouteManager { + pub fn new(datastore: Arc) -> Self { + Self { datastore } + } +} + +// This RPW doesn't concern itself overly much with resolved router targets +// and destinations being partial wrt. the current generation, in the same +// vein as how firewall rules are handled. Gating *pushing* this update on a +// generation number can be a bit more risky, but there's a sort of eventual +// consistency happening here that keeps this safe. +// +// Any location which updates name-resolvable state follows the pattern: +// * Update state. +// * Update (VPC-wide) router generation numbers. +// * Awaken this task. This might happen indirectly via e.g. instance start. +// +// As a result, any update which accidentally sees partial state will be followed +// by re-triggering this RPW with a higher generation number, giving us a re-resolved +// route set and pushing to any relevant sleds. +impl BackgroundTask for VpcRouteManager { + fn activate<'a>( + &'a mut self, + opctx: &'a OpContext, + ) -> BoxFuture<'a, serde_json::Value> { + async { + let log = &opctx.log; + + let sleds = match self + .datastore + .sled_list_all_batched(opctx, SledFilter::InService) + .await + { + Ok(v) => v, + Err(e) => { + let msg = format!("failed to enumerate sleds: {:#}", e); + error!(&log, "{msg}"); + return json!({"error": msg}); + } + } + .into_iter() + .filter(|sled| { + matches!(sled.state(), SledState::Active) + && matches!(sled.policy(), SledPolicy::InService { .. }) + }); + + // Map sled db records to sled-agent clients + let sled_clients: Vec<(Sled, sled_agent_client::Client)> = sleds + .map(|sled| { + let client = sled_client_from_address( + sled.id(), + sled.address(), + &log, + ); + (sled, client) + }) + .collect(); + + let mut known_rules: HashMap> = + HashMap::new(); + let mut db_routers = HashMap::new(); + let mut vni_to_vpc = HashMap::new(); + + for (sled, client) in sled_clients { + let Ok(route_sets) = client.list_vpc_routes().await else { + warn!( + log, + "failed to fetch current VPC route state from sled"; + "sled" => sled.serial_number(), + ); + continue; + }; + + let route_sets = route_sets.into_inner(); + + // Lookup all VPC<->Subnet<->Router associations we might need, + // based on the set of VNIs reported by this sled. + // These provide the versions we'll stick with -- in the worst + // case we push newer state to a sled with an older generation + // number, which will be fixed up on the next activation. + for set in &route_sets { + let db_vni = Vni(set.id.vni); + let maybe_vpc = vni_to_vpc.entry(set.id.vni); + let vpc = match maybe_vpc { + Entry::Occupied(_) => { + continue; + } + Entry::Vacant(v) => { + let Ok(vpc) = self + .datastore + .resolve_vni_to_vpc(opctx, db_vni) + .await + else { + error!( + log, + "failed to fetch VPC from VNI"; + "sled" => sled.serial_number(), + "vni" => ?db_vni + ); + continue; + }; + + v.insert(vpc) + } + }; + + let vpc_id = vpc.identity().id; + + let Ok(system_router) = self + .datastore + .vpc_get_system_router(opctx, vpc_id) + .await + else { + error!( + log, + "failed to fetch system router for VPC"; + "vpc" => vpc_id.to_string() + ); + continue; + }; + + let Ok(custom_routers) = self + .datastore + .vpc_get_active_custom_routers(opctx, vpc_id) + .await + else { + error!( + log, + "failed to fetch custom routers for VPC"; + "vpc" => vpc_id.to_string() + ); + continue; + }; + + db_routers.insert( + RouterId { vni: set.id.vni, kind: RouterKind::System }, + system_router, + ); + db_routers.extend(custom_routers.iter().map( + |(subnet, router)| { + ( + RouterId { + vni: set.id.vni, + kind: RouterKind::Custom( + subnet.ipv4_block.0.into(), + ), + }, + router.clone(), + ) + }, + )); + db_routers.extend(custom_routers.into_iter().map( + |(subnet, router)| { + ( + RouterId { + vni: set.id.vni, + kind: RouterKind::Custom( + subnet.ipv6_block.0.into(), + ), + }, + router, + ) + }, + )); + } + + let mut to_push = Vec::new(); + let mut set_rules = |id, version, routes| { + to_push.push(ResolvedVpcRouteSet { id, routes, version }); + }; + + // resolve into known_rules on an as-needed basis. + for set in &route_sets { + let Some(db_router) = db_routers.get(&set.id) else { + // The sled wants to know about rules for a VPC + // subnet with no custom router set. Send them + // the empty list, and unset its table version. + set_rules(set.id, None, HashSet::new()); + continue; + }; + + let router_id = db_router.id(); + let version = RouterVersion { + version: db_router.resolved_version as u64, + router_id, + }; + + // Only attempt to resolve/push a ruleset if we have a + // different router ID than the sled, or a higher version + // number. + match &set.version { + Some(v) if !v.is_replaced_by(&version) => { + continue; + } + _ => {} + } + + // We may have already resolved the rules for this + // router in a previous iteration. + if let Some(rules) = known_rules.get(&router_id) { + set_rules(set.id, Some(version), rules.clone()); + continue; + } + + match self + .datastore + .vpc_resolve_router_rules( + opctx, + db_router.identity().id, + ) + .await + { + Ok(rules) => { + let collapsed: HashSet<_> = rules + .into_iter() + .map(|(dest, target)| ResolvedVpcRoute { + dest, + target, + }) + .collect(); + set_rules(set.id, Some(version), collapsed.clone()); + known_rules.insert(router_id, collapsed); + } + Err(e) => { + error!( + &log, + "failed to compute subnet routes"; + "router" => router_id.to_string(), + "err" => e.to_string() + ); + } + } + } + + if !to_push.is_empty() { + if let Err(e) = client.set_vpc_routes(&to_push).await { + error!( + log, + "failed to push new VPC route state from sled"; + "sled" => sled.serial_number(), + "err" => ?e + ); + continue; + }; + } + } + + json!({}) + } + .boxed() + } +} diff --git a/nexus/src/app/bfd.rs b/nexus/src/app/bfd.rs index 0afa238ee3..1ae958c20d 100644 --- a/nexus/src/app/bfd.rs +++ b/nexus/src/app/bfd.rs @@ -39,12 +39,9 @@ impl super::Nexus { // add the bfd session to the db and trigger the bfd manager to handle // the reset self.datastore().bfd_session_create(opctx, &session).await?; - self.background_tasks - .driver - .activate(&self.background_tasks.bfd_manager); + self.background_tasks.activate(&self.background_tasks.task_bfd_manager); // for timely propagation to bootstore self.background_tasks - .driver .activate(&self.background_tasks.task_switch_port_settings_manager); Ok(()) } @@ -57,12 +54,9 @@ impl super::Nexus { // remove the bfd session from the db and trigger the bfd manager to // handle the reset self.datastore().bfd_session_delete(opctx, &session).await?; - self.background_tasks - .driver - .activate(&self.background_tasks.bfd_manager); + self.background_tasks.activate(&self.background_tasks.task_bfd_manager); // for timely propagation to bootstore self.background_tasks - .driver .activate(&self.background_tasks.task_switch_port_settings_manager); Ok(()) } diff --git a/nexus/src/app/disk.rs b/nexus/src/app/disk.rs index 78ae002dd3..adb0010e9a 100644 --- a/nexus/src/app/disk.rs +++ b/nexus/src/app/disk.rs @@ -203,7 +203,8 @@ impl super::Nexus { create_params: params.clone(), }; let saga_outputs = self - .execute_saga::(saga_params) + .sagas + .saga_execute::(saga_params) .await?; let disk_created = saga_outputs .lookup_node_output::("created_disk") @@ -342,7 +343,8 @@ impl super::Nexus { disk_id: authz_disk.id(), volume_id: db_disk.volume_id, }; - self.execute_saga::(saga_params) + self.sagas + .saga_execute::(saga_params) .await?; Ok(()) } @@ -585,10 +587,9 @@ impl super::Nexus { snapshot_name: finalize_params.snapshot_name.clone(), }; - self.execute_saga::( - saga_params, - ) - .await?; + self.sagas + .saga_execute::(saga_params) + .await?; Ok(()) } diff --git a/nexus/src/app/image.rs b/nexus/src/app/image.rs index 03c9c9d6a4..a3fa722d36 100644 --- a/nexus/src/app/image.rs +++ b/nexus/src/app/image.rs @@ -270,7 +270,8 @@ impl super::Nexus { image_param, }; - self.execute_saga::(saga_params) + self.sagas + .saga_execute::(saga_params) .await?; Ok(()) diff --git a/nexus/src/app/instance.rs b/nexus/src/app/instance.rs index fe3b4cabf9..a61cceda8b 100644 --- a/nexus/src/app/instance.rs +++ b/nexus/src/app/instance.rs @@ -20,6 +20,7 @@ use futures::future::Fuse; use futures::{FutureExt, SinkExt, StreamExt}; use nexus_db_model::IpAttachState; use nexus_db_model::IpKind; +use nexus_db_model::Vmm; use nexus_db_model::VmmState as DbVmmState; use nexus_db_queries::authn; use nexus_db_queries::authz; @@ -378,7 +379,8 @@ impl super::Nexus { }; let saga_outputs = self - .execute_saga::( + .sagas + .saga_execute::( saga_params, ) .await?; @@ -461,10 +463,11 @@ impl super::Nexus { instance, boundary_switches, }; - self.execute_saga::( - saga_params, - ) - .await?; + self.sagas + .saga_execute::( + saga_params, + ) + .await?; Ok(()) } @@ -509,10 +512,11 @@ impl super::Nexus { src_vmm: vmm.clone(), migrate_params: params, }; - self.execute_saga::( - saga_params, - ) - .await?; + self.sagas + .saga_execute::( + saga_params, + ) + .await?; // TODO correctness TODO robustness TODO design // Should we lookup the instance again here? @@ -756,10 +760,11 @@ impl super::Nexus { db_instance: instance.clone(), }; - self.execute_saga::( - saga_params, - ) - .await?; + self.sagas + .saga_execute::( + saga_params, + ) + .await?; self.db_datastore.instance_fetch_with_vmm(opctx, &authz_instance).await } @@ -1550,6 +1555,7 @@ impl super::Nexus { self.v2p_notification_tx.clone(), ) .await?; + self.vpc_needed_notify_sleds(); Ok(()) } @@ -1561,7 +1567,7 @@ impl super::Nexus { instance_lookup: &lookup::Instance<'_>, params: ¶ms::InstanceSerialConsoleRequest, ) -> Result { - let client = self + let (_, client) = self .propolis_client_for_instance( opctx, instance_lookup, @@ -1602,7 +1608,7 @@ impl super::Nexus { instance_lookup: &lookup::Instance<'_>, params: ¶ms::InstanceSerialConsoleStreamRequest, ) -> Result<(), Error> { - let client_addr = match self + let (_, client_addr) = match self .propolis_addr_for_instance( opctx, instance_lookup, @@ -1657,12 +1663,14 @@ impl super::Nexus { } } + /// Return a propolis address for the instance, along with the VMM identity + /// that it's for. async fn propolis_addr_for_instance( &self, opctx: &OpContext, instance_lookup: &lookup::Instance<'_>, action: authz::Action, - ) -> Result { + ) -> Result<(Vmm, SocketAddr), Error> { let (.., authz_instance) = instance_lookup.lookup_for(action).await?; let state = self @@ -1676,8 +1684,9 @@ impl super::Nexus { DbVmmState::Running | DbVmmState::Rebooting | DbVmmState::Migrating => { - Ok(SocketAddr::new(vmm.propolis_ip.ip(), vmm.propolis_port.into())) + Ok((vmm.clone(), SocketAddr::new(vmm.propolis_ip.ip(), vmm.propolis_port.into()))) } + DbVmmState::Starting | DbVmmState::Stopping | DbVmmState::Stopped @@ -1687,6 +1696,7 @@ impl super::Nexus { vmm.runtime.state, ))) } + DbVmmState::Destroyed | DbVmmState::SagaUnwound => Err(Error::invalid_request( "cannot connect to serial console of instance in state \"Stopped\"", )), @@ -1700,16 +1710,21 @@ impl super::Nexus { } } - async fn propolis_client_for_instance( + /// Return a propolis client for the instance, along with the VMM identity + /// that it's for. + pub(crate) async fn propolis_client_for_instance( &self, opctx: &OpContext, instance_lookup: &lookup::Instance<'_>, action: authz::Action, - ) -> Result { - let client_addr = self + ) -> Result<(Vmm, propolis_client::Client), Error> { + let (vmm, client_addr) = self .propolis_addr_for_instance(opctx, instance_lookup, action) .await?; - Ok(propolis_client::Client::new(&format!("http://{}", client_addr))) + Ok(( + vmm, + propolis_client::Client::new(&format!("http://{}", client_addr)), + )) } async fn proxy_instance_serial_ws( @@ -1927,7 +1942,8 @@ impl super::Nexus { }; let saga_outputs = self - .execute_saga::( + .sagas + .saga_execute::( saga_params, ) .await?; @@ -1956,7 +1972,8 @@ impl super::Nexus { }; let saga_outputs = self - .execute_saga::( + .sagas + .saga_execute::( saga_params, ) .await?; diff --git a/nexus/src/app/instance_network.rs b/nexus/src/app/instance_network.rs index b1ad2bb6fc..c2d46c5499 100644 --- a/nexus/src/app/instance_network.rs +++ b/nexus/src/app/instance_network.rs @@ -29,8 +29,9 @@ use std::str::FromStr; use uuid::Uuid; use super::background::BackgroundTasks; +use super::Nexus; -impl super::Nexus { +impl Nexus { /// Returns the set of switches with uplinks configured and boundary /// services enabled. pub(crate) async fn boundary_switches( diff --git a/nexus/src/app/mod.rs b/nexus/src/app/mod.rs index 8e4a795a95..707a807d3d 100644 --- a/nexus/src/app/mod.rs +++ b/nexus/src/app/mod.rs @@ -5,6 +5,7 @@ //! Nexus, the service that operates much of the control plane in an Oxide fleet use self::external_endpoints::NexusCertResolver; +use self::saga::SagaExecutor; use crate::app::oximeter::LazyTimeseriesClient; use crate::app::sagas::SagaRequest; use crate::populate::populate_start; @@ -132,7 +133,7 @@ pub struct Nexus { authz: Arc, /// saga execution coordinator - sec_client: Arc, + sagas: SagaExecutor, /// Task representing completion of recovered Sagas recovery_task: std::sync::Mutex>, @@ -238,6 +239,7 @@ impl Nexus { Arc::clone(&db_datastore), log.new(o!("component" => "SecStore")), )) as Arc; + let sec_client = Arc::new(steno::sec( log.new(o!( "component" => "SEC", @@ -246,6 +248,11 @@ impl Nexus { sec_store, )); + let sagas = SagaExecutor::new( + Arc::clone(&sec_client), + log.new(o!("component" => "SagaExecutor")), + ); + let client_state = dpd_client::ClientState { tag: String::from("nexus"), log: log.new(o!( @@ -425,7 +432,7 @@ impl Nexus { log: log.new(o!()), db_datastore: Arc::clone(&db_datastore), authz: Arc::clone(&authz), - sec_client: Arc::clone(&sec_client), + sagas, recovery_task: std::sync::Mutex::new(None), external_server: std::sync::Mutex::new(None), techport_external_server: std::sync::Mutex::new(None), @@ -467,6 +474,7 @@ impl Nexus { // TODO-cleanup all the extra Arcs here seems wrong let nexus = Arc::new(nexus); + nexus.sagas.set_nexus(nexus.clone()); let opctx = OpContext::for_background( log.new(o!("component" => "SagaRecoverer")), Arc::clone(&authz), @@ -480,7 +488,6 @@ impl Nexus { Arc::new(Arc::new(SagaContext::new( Arc::clone(&nexus), saga_logger, - Arc::clone(&authz), ))), db_datastore, Arc::clone(&sec_client), @@ -504,7 +511,7 @@ impl Nexus { "populate complete; activating background tasks" ); for task in task_nexus.background_tasks.driver.tasks() { - task_nexus.background_tasks.driver.activate(task); + task_nexus.background_tasks.activate(task); } } Err(_) => { @@ -554,6 +561,10 @@ impl Nexus { &self.tunables } + pub fn authz(&self) -> &Arc { + &self.authz + } + pub(crate) async fn wait_for_populate(&self) -> Result<(), anyhow::Error> { let mut my_rx = self.populate_status.clone(); loop { @@ -934,7 +945,8 @@ impl Nexus { let nexus = self.clone(); tokio::spawn(async move { let saga_result = nexus - .execute_saga::( + .sagas + .saga_execute::( params, ) .await; @@ -943,7 +955,7 @@ impl Nexus { Ok(_) => { info!( nexus.log, - "region replacement drive saga completed ok" + "region replacement start saga completed ok" ); } @@ -953,6 +965,56 @@ impl Nexus { } }); } + + SagaRequest::RegionReplacementDrive { params } => { + let nexus = self.clone(); + tokio::spawn(async move { + let saga_result = nexus + .sagas + .saga_execute::( + params, + ) + .await; + + match saga_result { + Ok(_) => { + info!( + nexus.log, + "region replacement drive saga completed ok" + ); + } + + Err(e) => { + warn!(nexus.log, "region replacement drive saga returned an error: {e}"); + } + } + }); + } + + SagaRequest::RegionReplacementFinish { params } => { + let nexus = self.clone(); + tokio::spawn(async move { + let saga_result = nexus + .sagas + .saga_execute::( + params, + ) + .await; + + match saga_result { + Ok(_) => { + info!( + nexus.log, + "region replacement finish saga completed ok" + ); + } + + Err(e) => { + warn!(nexus.log, "region replacement finish saga returned an error: {e}"); + } + } + }); + } } } diff --git a/nexus/src/app/project.rs b/nexus/src/app/project.rs index 2e852ba2d3..8f31e74728 100644 --- a/nexus/src/app/project.rs +++ b/nexus/src/app/project.rs @@ -59,7 +59,8 @@ impl super::Nexus { authz_silo, }; let saga_outputs = self - .execute_saga::( + .sagas + .saga_execute::( saga_params, ) .await?; diff --git a/nexus/src/app/rack.rs b/nexus/src/app/rack.rs index 780cb85f3f..ee3818f40c 100644 --- a/nexus/src/app/rack.rs +++ b/nexus/src/app/rack.rs @@ -360,9 +360,9 @@ impl super::Nexus { // TODO // configure rack networking / boundary services here - // Currently calling some of the apis directly, but should we be using sagas - // going forward via self.run_saga()? Note that self.create_runnable_saga and - // self.execute_saga are currently not available within this scope. + // Currently calling some of the apis directly, but should we be using + // sagas going forward via self.sagas.saga_execute()? Note that + // this may not be available within this scope. info!(log, "Recording Rack Network Configuration"); let address_lot_name = Name::from_str(INFRA_LOT).map_err(|e| { Error::internal_error(&format!( diff --git a/nexus/src/app/saga.rs b/nexus/src/app/saga.rs index 8a717839f0..70118fb620 100644 --- a/nexus/src/app/saga.rs +++ b/nexus/src/app/saga.rs @@ -2,12 +2,57 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -//! Saga management and execution +//! Nexus-level saga management and execution +//! +//! Steno provides its own interfaces for managing sagas. The interface here is +//! a thin wrapper aimed at the mini framework we've built at the Nexus level +//! that makes it easier to define and manage sagas in a uniform way. +//! +//! The basic lifecycle at the Nexus level is: +//! +//! ```text +//! input: saga type (impls [`NexusSaga`]), +//! saga parameters (specific to the saga's type) +//! | +//! | [`create_saga_dag()`] +//! v +//! SagaDag +//! | +//! | [`SagaExecutor::saga_prepare()`] +//! v +//! RunnableSaga +//! | +//! | [`RunnableSaga::start()`] +//! v +//! RunningSaga +//! | +//! | [`RunningSaga::wait_until_stopped()`] +//! v +//! StoppedSaga +//! ``` +//! +//! At the end, you can use [`StoppedSaga::into_omicron_result()`] to get at the +//! success output of the saga or convert any saga failure along the way to an +//! Omicron [`Error`]. +//! +//! This interface allows a few different use cases: +//! +//! * A common case is that some code in Nexus wants to do all of this: create +//! the saga DAG, run it, wait for it to finish, and get the result. +//! [`SagaExecutor::saga_execute()`] does all this using these lower-level +//! interfaces. +//! * An expected use case is that some code in Nexus wants to kick off a saga +//! but not wait for it to finish. In this case, they can just stop after +//! calling [`RunnableSaga::start()`]. The saga will continue running; they +//! just won't be able to directly wait for it to finish or get the result. +//! * Tests can use any of the lower-level pieces to examine intermediate state +//! or inject errors. use super::sagas::NexusSaga; use super::sagas::SagaInitError; use super::sagas::ACTION_REGISTRY; use crate::saga_interface::SagaContext; +use crate::Nexus; use anyhow::Context; use futures::future::BoxFuture; use futures::StreamExt; @@ -20,6 +65,7 @@ use omicron_common::api::external::LookupResult; use omicron_common::api::external::ResourceType; use omicron_common::bail_unless; use std::sync::Arc; +use std::sync::OnceLock; use steno::DagBuilder; use steno::SagaDag; use steno::SagaId; @@ -28,23 +74,8 @@ use steno::SagaResult; use steno::SagaResultOk; use uuid::Uuid; -/// Encapsulates a saga to be run before we actually start running it -/// -/// At this point, we've built the DAG, loaded it into the SEC, etc. but haven't -/// started it running. This is a useful point to inject errors, inspect the -/// DAG, etc. -pub(crate) struct RunnableSaga { - id: SagaId, - fut: BoxFuture<'static, SagaResult>, -} - -impl RunnableSaga { - #[cfg(test)] - pub(crate) fn id(&self) -> SagaId { - self.id - } -} - +/// Given a particular kind of Nexus saga (the type parameter `N`) and +/// parameters for that saga, construct a [`SagaDag`] for it pub(crate) fn create_saga_dag( params: N::Params, ) -> Result { @@ -56,70 +87,95 @@ pub(crate) fn create_saga_dag( Ok(SagaDag::new(dag, params)) } -impl super::Nexus { - pub(crate) async fn sagas_list( - &self, - opctx: &OpContext, - pagparams: &DataPageParams<'_, Uuid>, - ) -> ListResult { - // The endpoint we're serving only supports `ScanById`, which only - // supports an ascending scan. - bail_unless!( - pagparams.direction == dropshot::PaginationOrder::Ascending - ); - opctx.authorize(authz::Action::Read, &authz::FLEET).await?; - let marker = pagparams.marker.map(|s| SagaId::from(*s)); - let saga_list = self - .sec_client - .saga_list(marker, pagparams.limit) - .await - .into_iter() - .map(nexus_types::internal_api::views::Saga::from) - .map(Ok); - Ok(futures::stream::iter(saga_list).boxed()) - } +/// Handle to a self-contained subsystem for kicking off sagas +/// +/// See the module-level documentation for details. +pub(crate) struct SagaExecutor { + sec_client: Arc, + log: slog::Logger, + nexus: OnceLock>, +} - pub(crate) async fn saga_get( - &self, - opctx: &OpContext, - id: Uuid, - ) -> LookupResult { - opctx.authorize(authz::Action::Read, &authz::FLEET).await?; - self.sec_client - .saga_get(SagaId::from(id)) - .await - .map(nexus_types::internal_api::views::Saga::from) - .map(Ok) - .map_err(|_: ()| { - Error::not_found_by_id(ResourceType::SagaDbg, &id) - })? +impl SagaExecutor { + pub(crate) fn new( + sec_client: Arc, + log: slog::Logger, + ) -> SagaExecutor { + SagaExecutor { sec_client, log, nexus: OnceLock::new() } } - pub(crate) async fn create_runnable_saga( - self: &Arc, - dag: SagaDag, - ) -> Result { - // Construct the context necessary to execute this saga. - let saga_id = SagaId(Uuid::new_v4()); + // This is a little gross. We want to hang the SagaExecutor off of Nexus, + // but we also need to refer to Nexus, which thus can't exist when + // SagaExecutor is constructed. So we have the caller hand it to us after + // initialization. + // + // This isn't as statically verifiable as we'd normally like. But it's only + // one call site, it does fail cleanly if someone tries to use + // `SagaExecutor` before this has been set, and the result is much cleaner + // for all the other users of `SagaExecutor`. + // + // # Panics + // + // This function should be called exactly once in the lifetime of any + // `SagaExecutor` object. If it gets called more than once, concurrently or + // not, it panics. + pub(crate) fn set_nexus(&self, nexus: Arc) { + self.nexus.set(nexus).unwrap_or_else(|_| { + panic!("multiple initialization of SagaExecutor") + }) + } - self.create_runnable_saga_with_id(dag, saga_id).await + fn nexus(&self) -> Result<&Arc, Error> { + self.nexus + .get() + .ok_or_else(|| Error::unavail("saga are not available yet")) } - pub(crate) async fn create_runnable_saga_with_id( - self: &Arc, + // Low-level interface + // + // The low-level interface for running sagas starts with `saga_prepare()` + // and then uses the `RunnableSaga`, `RunningSaga`, and `StoppedSaga` types + // to drive execution forward. + + /// Given a DAG (which has generally been specifically created for a + /// particular saga and includes the saga's parameters), prepare to start + /// running the saga. This does not actually start the saga running. + /// + /// ## Async cancellation + /// + /// The Future returned by this function is basically not cancellation-safe, + /// in that if this Future is cancelled, one of a few things might be true: + /// + /// * Nothing has happened; it's as though this function was never called. + /// * The saga has been created, but not started. If this happens, the saga + /// will likely start running the next time saga recovery happens (e.g., + /// the next time Nexus starts up) and then run to completion. + /// + /// It's not clear what the caller would _want_ if they cancelled this + /// future, but whatever it is, clearly it's not guaranteed to be true. + /// You're better off avoiding cancellation. Fortunately, we currently + /// execute sagas either from API calls and background tasks, neither of + /// which can be cancelled. **This function should not be used in a + /// `tokio::select!` with a `timeout` or the like.** + pub(crate) async fn saga_prepare( + &self, dag: SagaDag, - saga_id: SagaId, ) -> Result { + // Construct the context necessary to execute this saga. + let nexus = self.nexus()?; + let saga_id = SagaId(Uuid::new_v4()); let saga_logger = self.log.new(o!( "saga_name" => dag.saga_name().to_string(), "saga_id" => saga_id.to_string() )); let saga_context = Arc::new(Arc::new(SagaContext::new( - self.clone(), - saga_logger, - Arc::clone(&self.authz), + nexus.clone(), + saga_logger.clone(), ))); - let future = self + + // Tell Steno about it. This does not start it running yet. + info!(saga_logger, "preparing saga"); + let saga_completion_future = self .sec_client .saga_create( saga_id, @@ -135,22 +191,156 @@ impl super::Nexus { // Steno. Error::internal_error(&format!("{:#}", error)) })?; - Ok(RunnableSaga { id: saga_id, fut: future }) + Ok(RunnableSaga { + id: saga_id, + saga_completion_future, + log: saga_logger, + sec_client: self.sec_client.clone(), + }) } - pub(crate) async fn run_saga( + // Convenience functions + + /// Create a new saga (of type `N` with parameters `params`), start it + /// running, wait for it to finish, and report the result + /// + /// Note that this can take a long time and may not complete while parts of + /// the system are not functioning. Care should be taken when waiting on + /// this in a latency-sensitive context. + /// + /// + /// ## Async cancellation + /// + /// This function isn't really cancel-safe, in that if the Future returned + /// by this function is cancelled, one of three things may be true: + /// + /// * Nothing has happened; it's as though this function was never called. + /// * The saga has been created, but not started. If this happens, the saga + /// will likely start running the next time saga recovery happens (e.g., + /// the next time Nexus starts up) and then run to completion. + /// * The saga has already been started and will eventually run to + /// completion (even though this Future has been cancelled). + /// + /// It's not clear what the caller would _want_ if they cancelled this + /// future, but whatever it is, clearly it's not guaranteed to be true. + /// You're better off avoiding cancellation. Fortunately, we currently + /// execute sagas either from API calls and background tasks, neither of + /// which can be cancelled. **This function should not be used in a + /// `tokio::select!` with a `timeout` or the like.** + /// + /// Say you _do_ want to kick off a saga and wait only a little while before + /// it completes. In that case, you can use the lower-level interface to + /// first create the saga (a process which still should not be cancelled, + /// but would generally be quick) and then wait for it to finish. The + /// waiting part is cancellable. + /// + /// Note that none of this affects _crash safety_. In terms of a crash: the + /// crash will either happen before the saga has been created (in which + /// case it's as though we didn't even call this function) or after (in + /// which case the saga will run to completion). + pub(crate) async fn saga_execute( &self, - runnable_saga: RunnableSaga, + params: N::Params, ) -> Result { - let log = &self.log; + // Construct the DAG specific to this saga. + let dag = create_saga_dag::(params)?; + let runnable_saga = self.saga_prepare(dag).await?; + let running_saga = runnable_saga.start().await?; + let stopped_saga = running_saga.wait_until_stopped().await; + stopped_saga.into_omicron_result() + } +} + +/// Encapsulates a saga to be run before we actually start running it +/// +/// At this point, we've built the DAG, loaded it into the SEC, etc. but haven't +/// started it running. This is a useful point to inject errors, inspect the +/// DAG, etc. +pub(crate) struct RunnableSaga { + id: SagaId, + saga_completion_future: BoxFuture<'static, SagaResult>, + log: slog::Logger, + sec_client: Arc, +} + +impl RunnableSaga { + #[cfg(test)] + pub(crate) fn id(&self) -> SagaId { + self.id + } + + /// Start this saga running. + /// + /// Once this completes, even if you drop the returned `RunningSaga`, the + /// saga will still run to completion. + pub(crate) async fn start(self) -> Result { + info!(self.log, "starting saga"); self.sec_client - .saga_start(runnable_saga.id) + .saga_start(self.id) .await .context("starting saga") .map_err(|error| Error::internal_error(&format!("{:#}", error)))?; - let result = runnable_saga.fut.await; - result.kind.map_err(|saga_error| { + Ok(RunningSaga { + id: self.id, + saga_completion_future: self.saga_completion_future, + log: self.log, + }) + } + + /// Start the saga running and wait for it to complete. + /// + /// This is a shorthand for `start().await?.wait_until_stopped().await`. + // There is no reason this needs to be limited to tests, but it's only used + // by the tests today. + #[cfg(test)] + pub(crate) async fn run_to_completion(self) -> Result { + Ok(self.start().await?.wait_until_stopped().await) + } +} + +/// Describes a saga that's started running +pub(crate) struct RunningSaga { + id: SagaId, + saga_completion_future: BoxFuture<'static, SagaResult>, + log: slog::Logger, +} + +impl RunningSaga { + /// Waits until this saga stops executing + /// + /// Normally, the saga will have finished successfully or failed and unwound + /// completely. If unwinding fails, it will be _stuck_ instead. + pub(crate) async fn wait_until_stopped(self) -> StoppedSaga { + let result = self.saga_completion_future.await; + info!(self.log, "saga finished"; "saga_result" => ?result); + StoppedSaga { id: self.id, result, log: self.log } + } +} + +/// Describes a saga that's finished +pub(crate) struct StoppedSaga { + id: SagaId, + result: SagaResult, + log: slog::Logger, +} + +impl StoppedSaga { + /// Fetches the raw Steno result for the saga's execution + /// + /// This is a test-only routine meant for use in tests that need to examine + /// the details of a saga's final state (e.g., examining the exact point at + /// which it failed). Non-test callers should use `into_omicron_result` + /// instead. + #[cfg(test)] + pub(crate) fn into_raw_result(self) -> SagaResult { + self.result + } + + /// Interprets the result of saga execution as a `Result` whose error type + /// is `Error`. + pub(crate) fn into_omicron_result(self) -> Result { + self.result.kind.map_err(|saga_error| { let mut error = saga_error .error_source .convert::() @@ -165,8 +355,12 @@ impl super::Nexus { undo_node, undo_error )); - error!(log, "saga stuck"; - "saga_id" => runnable_saga.id.to_string(), + // TODO this log message does not belong here because if the + // caller isn't checking this then we won't log it. We should + // probably make Steno log this since there may be no place in + // Nexus that's waiting for a given saga to finish. + error!(self.log, "saga stuck"; + "saga_id" => self.id.to_string(), "error" => #%error, ); } @@ -174,57 +368,55 @@ impl super::Nexus { error }) } +} - /// Starts the supplied `runnable_saga` and, if that succeeded, awaits its - /// completion and returns the raw `SagaResult`. - /// - /// This is a test-only routine meant for use in tests that need to examine - /// the details of a saga's final state (e.g., examining the exact point at - /// which it failed). Non-test callers should use `run_saga` instead (it - /// logs messages on error conditions and has a standard mechanism for - /// converting saga errors to generic Omicron errors). - #[cfg(test)] - pub(crate) async fn run_saga_raw_result( +impl super::Nexus { + /// Lists sagas currently managed by this Nexus instance + pub(crate) async fn sagas_list( &self, - runnable_saga: RunnableSaga, - ) -> Result { - self.sec_client - .saga_start(runnable_saga.id) + opctx: &OpContext, + pagparams: &DataPageParams<'_, Uuid>, + ) -> ListResult { + // The endpoint we're serving only supports `ScanById`, which only + // supports an ascending scan. + bail_unless!( + pagparams.direction == dropshot::PaginationOrder::Ascending + ); + opctx.authorize(authz::Action::Read, &authz::FLEET).await?; + let marker = pagparams.marker.map(|s| SagaId::from(*s)); + let saga_list = self + .sagas + .sec_client + .saga_list(marker, pagparams.limit) .await - .context("starting saga") - .map_err(|error| Error::internal_error(&format!("{:#}", error)))?; - - Ok(runnable_saga.fut.await) + .into_iter() + .map(nexus_types::internal_api::views::Saga::from) + .map(Ok); + Ok(futures::stream::iter(saga_list).boxed()) } - pub fn sec(&self) -> &steno::SecClient { - &self.sec_client + /// Fetch information about a saga currently managed by this Nexus instance + pub(crate) async fn saga_get( + &self, + opctx: &OpContext, + id: Uuid, + ) -> LookupResult { + opctx.authorize(authz::Action::Read, &authz::FLEET).await?; + self.sagas + .sec_client + .saga_get(SagaId::from(id)) + .await + .map(nexus_types::internal_api::views::Saga::from) + .map(Ok) + .map_err(|_: ()| { + Error::not_found_by_id(ResourceType::SagaDbg, &id) + })? } - /// Given a saga type and parameters, create a new saga and execute it. - pub(crate) async fn execute_saga( - self: &Arc, - params: N::Params, - ) -> Result { - // Construct the DAG specific to this saga. - let dag = create_saga_dag::(params)?; - - // Register the saga with the saga executor. - let runnable_saga = self.create_runnable_saga(dag).await?; - - // Actually run the saga to completion. - // - // XXX: This may loop forever in case `SecStore::record_event` fails. - // Ideally, `run_saga` wouldn't both start the saga and wait for it to - // be finished -- instead, it would start off the saga, and then return - // a notification channel that the caller could use to decide: - // - // - either to .await until completion - // - or to stop waiting after a certain period, while still letting the - // saga run in the background. - // - // For more, see https://github.com/oxidecomputer/omicron/issues/5406 - // and the note in `sec_store.rs`'s `record_event`. - self.run_saga(runnable_saga).await + /// For testing only: provides direct access to the underlying SecClient so + /// that tests can inject errors + #[cfg(test)] + pub(crate) fn sec(&self) -> &steno::SecClient { + &self.sagas.sec_client } } diff --git a/nexus/src/app/sagas/disk_create.rs b/nexus/src/app/sagas/disk_create.rs index ff0cc63d00..204d004938 100644 --- a/nexus/src/app/sagas/disk_create.rs +++ b/nexus/src/app/sagas/disk_create.rs @@ -883,15 +883,11 @@ pub(crate) mod test { let project_id = create_project(&client, PROJECT_NAME).await.identity.id; - // Build the saga DAG with the provided test parameters + // Build the saga DAG with the provided test parameters and run it. let opctx = test_opctx(cptestctx); let params = new_test_params(&opctx, project_id); - let dag = create_saga_dag::(params).unwrap(); - let runnable_saga = nexus.create_runnable_saga(dag).await.unwrap(); - - // Actually run the saga - let output = nexus.run_saga(runnable_saga).await.unwrap(); - + let output = + nexus.sagas.saga_execute::(params).await.unwrap(); let disk = output .lookup_node_output::( "created_disk", diff --git a/nexus/src/app/sagas/disk_delete.rs b/nexus/src/app/sagas/disk_delete.rs index 24cf331a34..aab361eced 100644 --- a/nexus/src/app/sagas/disk_delete.rs +++ b/nexus/src/app/sagas/disk_delete.rs @@ -236,7 +236,7 @@ pub(crate) mod test { let project_id = create_project(client, PROJECT_NAME).await.identity.id; let disk = create_disk(&cptestctx).await; - // Build the saga DAG with the provided test parameters + // Build the saga DAG with the provided test parameters and run it. let opctx = test_opctx(&cptestctx); let params = Params { serialized_authn: Serialized::for_opctx(&opctx), @@ -244,11 +244,7 @@ pub(crate) mod test { disk_id: disk.id(), volume_id: disk.volume_id, }; - let dag = create_saga_dag::(params).unwrap(); - let runnable_saga = nexus.create_runnable_saga(dag).await.unwrap(); - - // Actually run the saga - nexus.run_saga(runnable_saga).await.unwrap(); + nexus.sagas.saga_execute::(params).await.unwrap(); } #[nexus_test(server = crate::Server)] diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index ffbd5ff2f5..4f0ec7c0c6 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -1140,15 +1140,12 @@ pub mod test { let nexus = &cptestctx.server.server_context().nexus; let project_id = create_org_project_and_disk(&client).await; - // Build the saga DAG with the provided test parameters + // Build the saga DAG with the provided test parameters and run it let opctx = test_helpers::test_opctx(&cptestctx); let params = new_test_params(&opctx, project_id); - let dag = create_saga_dag::(params).unwrap(); - let runnable_saga = nexus.create_runnable_saga(dag).await.unwrap(); - - // Actually run the saga nexus - .run_saga(runnable_saga) + .sagas + .saga_execute::(params) .await .expect("Saga should have succeeded"); } diff --git a/nexus/src/app/sagas/instance_delete.rs b/nexus/src/app/sagas/instance_delete.rs index b6fedc175d..2168657ef4 100644 --- a/nexus/src/app/sagas/instance_delete.rs +++ b/nexus/src/app/sagas/instance_delete.rs @@ -268,22 +268,17 @@ mod test { let nexus = &cptestctx.server.server_context().nexus; create_org_project_and_disk(&client).await; - // Build the saga DAG with the provided test parameters - let dag = create_saga_dag::( - new_test_params( - &cptestctx, - create_instance(&cptestctx, new_instance_create_params()) - .await - .id(), - ) - .await, + // Build the saga DAG with the provided test parameters and run it. + let params = new_test_params( + &cptestctx, + create_instance(&cptestctx, new_instance_create_params()) + .await + .id(), ) - .unwrap(); - let runnable_saga = nexus.create_runnable_saga(dag).await.unwrap(); - - // Actually run the saga + .await; nexus - .run_saga(runnable_saga) + .sagas + .saga_execute::(params) .await .expect("Saga should have succeeded"); } diff --git a/nexus/src/app/sagas/instance_ip_attach.rs b/nexus/src/app/sagas/instance_ip_attach.rs index c4e209dccd..b18ac3109f 100644 --- a/nexus/src/app/sagas/instance_ip_attach.rs +++ b/nexus/src/app/sagas/instance_ip_attach.rs @@ -427,10 +427,11 @@ pub(crate) mod test { for use_float in [false, true] { let params = new_test_params(&opctx, datastore, use_float).await; - - let dag = create_saga_dag::(params).unwrap(); - let saga = nexus.create_runnable_saga(dag).await.unwrap(); - nexus.run_saga(saga).await.expect("Attach saga should succeed"); + nexus + .sagas + .saga_execute::(params) + .await + .expect("Attach saga should succeed"); } // Sled agent has a record of the new external IPs. diff --git a/nexus/src/app/sagas/instance_ip_detach.rs b/nexus/src/app/sagas/instance_ip_detach.rs index 474cfb18a6..a5b51ce375 100644 --- a/nexus/src/app/sagas/instance_ip_detach.rs +++ b/nexus/src/app/sagas/instance_ip_detach.rs @@ -402,10 +402,11 @@ pub(crate) mod test { for use_float in [false, true] { let params = new_test_params(&opctx, datastore, use_float).await; - - let dag = create_saga_dag::(params).unwrap(); - let saga = nexus.create_runnable_saga(dag).await.unwrap(); - nexus.run_saga(saga).await.expect("Detach saga should succeed"); + nexus + .sagas + .saga_execute::(params) + .await + .expect("Detach saga should succeed"); } // Sled agent has removed its records of the external IPs. diff --git a/nexus/src/app/sagas/instance_migrate.rs b/nexus/src/app/sagas/instance_migrate.rs index e9ffaa194d..b8599feb04 100644 --- a/nexus/src/app/sagas/instance_migrate.rs +++ b/nexus/src/app/sagas/instance_migrate.rs @@ -575,7 +575,7 @@ async fn sim_instance_migrate( #[cfg(test)] mod tests { - use crate::app::{saga::create_saga_dag, sagas::test_helpers}; + use crate::app::sagas::test_helpers; use camino::Utf8Path; use dropshot::test_util::ClientTestContext; use nexus_test_interface::NexusServer; @@ -708,9 +708,11 @@ mod tests { }, }; - let dag = create_saga_dag::(params).unwrap(); - let saga = nexus.create_runnable_saga(dag).await.unwrap(); - nexus.run_saga(saga).await.expect("Migration saga should succeed"); + nexus + .sagas + .saga_execute::(params) + .await + .expect("Migration saga should succeed"); // Merely running the migration saga (without simulating any completion // steps in the simulated agents) should not change where the instance diff --git a/nexus/src/app/sagas/instance_start.rs b/nexus/src/app/sagas/instance_start.rs index 9730025099..adde040a77 100644 --- a/nexus/src/app/sagas/instance_start.rs +++ b/nexus/src/app/sagas/instance_start.rs @@ -756,9 +756,11 @@ mod test { db_instance, }; - let dag = create_saga_dag::(params).unwrap(); - let saga = nexus.create_runnable_saga(dag).await.unwrap(); - nexus.run_saga(saga).await.expect("Start saga should succeed"); + nexus + .sagas + .saga_execute::(params) + .await + .expect("Start saga should succeed"); test_helpers::instance_simulate(cptestctx, &instance_id).await; let vmm_state = test_helpers::instance_fetch(cptestctx, instance_id) @@ -918,11 +920,13 @@ mod test { ))) .await; - let saga = nexus.create_runnable_saga(dag).await.unwrap(); - let saga_error = nexus - .run_saga_raw_result(saga) + let runnable_saga = nexus.sagas.saga_prepare(dag).await.unwrap(); + let saga_result = runnable_saga + .run_to_completion() .await .expect("saga execution should have started") + .into_raw_result(); + let saga_error = saga_result .kind .expect_err("saga should fail due to injected error"); diff --git a/nexus/src/app/sagas/mod.rs b/nexus/src/app/sagas/mod.rs index ac9a30dd98..1604d6013d 100644 --- a/nexus/src/app/sagas/mod.rs +++ b/nexus/src/app/sagas/mod.rs @@ -32,6 +32,8 @@ pub mod instance_ip_detach; pub mod instance_migrate; pub mod instance_start; pub mod project_create; +pub mod region_replacement_drive; +pub mod region_replacement_finish; pub mod region_replacement_start; pub mod snapshot_create; pub mod snapshot_delete; @@ -163,6 +165,12 @@ fn make_action_registry() -> ActionRegistry { ::register_actions( &mut registry, ); + ::register_actions( + &mut registry, + ); + ::register_actions( + &mut registry, + ); #[cfg(test)] ::register_actions(&mut registry); @@ -320,6 +328,14 @@ pub enum SagaRequest { RegionReplacementStart { params: region_replacement_start::Params, }, + + RegionReplacementDrive { + params: region_replacement_drive::Params, + }, + + RegionReplacementFinish { + params: region_replacement_finish::Params, + }, } impl SagaRequest { diff --git a/nexus/src/app/sagas/project_create.rs b/nexus/src/app/sagas/project_create.rs index 6893590519..33f150ec32 100644 --- a/nexus/src/app/sagas/project_create.rs +++ b/nexus/src/app/sagas/project_create.rs @@ -154,7 +154,7 @@ async fn spc_create_vpc_params( #[cfg(test)] mod test { use crate::{ - app::saga::create_saga_dag, app::sagas::project_create::Params, + app::sagas::project_create::Params, app::sagas::project_create::SagaProjectCreate, external_api::params, }; use async_bb8_diesel::{AsyncRunQueryDsl, AsyncSimpleConnection}; @@ -263,15 +263,11 @@ mod test { // Before running the test, confirm we have no records of any projects. verify_clean_slate(nexus.datastore()).await; - // Build the saga DAG with the provided test parameters + // Build the saga DAG with the provided test parameters and run it. let opctx = test_opctx(&cptestctx); let authz_silo = opctx.authn.silo_required().unwrap(); let params = new_test_params(&opctx, authz_silo); - let dag = create_saga_dag::(params).unwrap(); - let runnable_saga = nexus.create_runnable_saga(dag).await.unwrap(); - - // Actually run the saga - nexus.run_saga(runnable_saga).await.unwrap(); + nexus.sagas.saga_execute::(params).await.unwrap(); } #[nexus_test(server = crate::Server)] diff --git a/nexus/src/app/sagas/region_replacement_drive.rs b/nexus/src/app/sagas/region_replacement_drive.rs new file mode 100644 index 0000000000..e2f7620178 --- /dev/null +++ b/nexus/src/app/sagas/region_replacement_drive.rs @@ -0,0 +1,1754 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! # first, some Crucible background # +//! +//! Crucible's Upstairs has two methods of swapping in a new downstairs to a +//! region set: +//! +//! - A running Upstairs that is currently activated can be sent a request to +//! replace a downstairs with a new one - this can be done while accepting all +//! the usual IO requests. This is called _Live Repair_. +//! +//! - Prior to activation, an Upstairs will perform _Reconciliation_ to ensure +//! that all the downstairs are consistent. Activation is held back until this +//! is true. +//! +//! Each of these operations will ensure that each member of the three-way +//! mirror that is a region set is the same. +//! +//! Usually, each running Volume will have been constructed from a Volume +//! Construction Request (VCR) that Nexus created as part of a +//! `volume_checkout`. This VCR is sent to a service (for example, a Propolis +//! or Pantry) and ultimately passed to `Volume::construct` to create a running +//! Volume. This is then activated, and then IO can proceed. +//! +//! # how did we get here? # +//! +//! The process of region replacement begins with a region replacement request. +//! Today this is created either manually with omdb, or as a result of a +//! physical disk being expunged. Affected VCRs are modified first by the region +//! replacement start saga, which includes allocating a new replacement region. +//! This then places the region replacement request into the state "Running". +//! See that saga's documentation for more information. +//! +//! # why does the drive saga exist? # +//! +//! Region replacement is similar to instance migration in that it is initiated +//! by Nexus but not directly controlled by it. Instance migration requires a +//! source and destination Propolis to exist, and then Nexus waits for a +//! callback to occur. For region replacement, it's Nexus' job to trigger +//! either the Live Repair or Reconciliation operations via some Upstairs. Nexus +//! then either receives a notification of success, or sees that the Volume is +//! no longer in a degraded state as the result of some polling operation. +//! +//! Note: _it's very important that only_ the Upstairs can make the +//! determination that a Volume is no longer degraded. Nexus should not be +//! assuming anything. This is the _golden rule_ that this saga must follow. +//! +//! Volumes are in this degraded state the moment one or more targets in a +//! region set is no longer functional. An Upstairs can still accept reads, +//! writes, and flushes with only two out of three present in the set, but it's +//! operating with a reduced redundancy. +//! +//! Through disk expungement, an operator has told Nexus that failure is not +//! transient. The region replacement start saga then modifies them: a blank +//! region is swapped in to replace one of the regions that are gone. Then this +//! saga triggers either Live Repair or Reconciliation, and that's it right? +//! +//! Volumes back higher level objects that users interact with: disks, +//! snapshots, images, etc. Users can start and stop Upstairs by starting and +//! stopping Instances. This interrupts any current operation on the Volume! +//! This is ok: both operations were designed so that interruptions are not a +//! problem, but it does stop progress. +//! +//! Say an Instance is running, and that Instance's propolis is performing a +//! Live Repair. If a user stops that Instance, the propolis is torn down, and +//! the Volume remains degraded. The next time that Volume is constructed and +//! activated, the Upstairs will check each downstairs in the region set, see +//! that there's a difference, and perform Reconciliation. If the user stops an +//! Instance and does not start it again, that difference will remain. +//! +//! Nexus can at that point send the Volume to a Pantry and activate it, causing +//! Reconciliation. At any time, the user can come along and start the Instance +//! in question, which would take over the activation from the Pantry - this +//! would cause that Reconciliation to fail, and the new propolis server would +//! start its own Reconciliation. Again, the user may then stop the Instance, +//! halting progress. +//! +//! This saga is responsible for driving forward the Volume repair process, by +//! initiating repair operations. One invocation of this saga is most likely not +//! enough to repair a Volume: Nexus must continuously monitor the degraded +//! Volumes and initiate the necessary operation (LR or Reconciliation) until +//! those Volumes are no longer degraded. Those operations can fail or be +//! interrupted at any time due to user actions. +//! +//! # what does the saga do? # +//! +//! A background task will look at all region replacement requests in the +//! "Running" state, and call this saga for each one. This saga then does what's +//! required to fix these degraded Volumes. +//! +//! This saga handles the following region replacement request state +//! transitions: +//! +//! ```text +//! Running <-- +//! | +//! | | +//! v | +//! | +//! Driving -- +//! +//! | +//! v +//! +//! ReplacementDone +//! ``` +//! +//! The first thing this saga does is set itself as the "operating saga" for the +//! request, and change the state to "Driving". Then, it performs the following +//! (generic) steps: +//! +//! 1. If there was a previous repair step, check what the status of the +//! Volume's repair is. Determine if there is action required by Nexus, if +//! Nexus should wait, or if Nexus saw that some response that indicated the +//! repair was done (don't forget the golden rule!). +//! +//! If there was no previous repair step, then some action is required. +//! +//! 2. If there is action required, prepare an action that will initiate either +//! Live Repair or Reconciliation, based on the current state of the world +//! (noting that it's entirely possible that state will change before +//! executing that action, and invalidate the action!). +//! +//! 3. If there is one, execute the action. +//! +//! 4. If an action was executed without error, then commit it to CRDB as a +//! repair step. +//! +//! Recording the steps that were taken as part of repairing this Volume helps +//! this saga determine what to do, and can be helpful for Oxide support staff +//! if there's a problem. +//! +//! TODO: Cases not handled yet: +//! - a disk attached to a pantry for bulk imports +//! + +use super::{ + ActionRegistry, NexusActionContext, NexusSaga, SagaInitError, + ACTION_GENERATE_ID, +}; +use crate::app::db::datastore::InstanceAndActiveVmm; +use crate::app::db::lookup::LookupPath; +use crate::app::sagas::common_storage::get_pantry_address; +use crate::app::sagas::declare_saga_actions; +use crate::app::{authn, authz, db}; +use chrono::DateTime; +use chrono::Utc; +use nexus_db_model::VmmState; +use nexus_types::identity::Resource; +use omicron_common::api::external::Error; +use propolis_client::types::ReplaceResult; +use serde::Deserialize; +use serde::Serialize; +use slog::Logger; +use std::net::SocketAddrV6; +use steno::ActionError; +use steno::Node; +use uuid::Uuid; + +// region replacement drive saga: input parameters + +#[derive(Debug, Deserialize, Serialize)] +pub(crate) struct Params { + pub serialized_authn: authn::saga::Serialized, + pub request: db::model::RegionReplacement, +} + +// region replacement drive saga: actions + +declare_saga_actions! { + region_replacement_drive; + SET_SAGA_ID -> "unused_1" { + + srrd_set_saga_id + - srrd_set_saga_id_undo + } + DRIVE_REGION_REPLACEMENT_CHECK -> "check" { + + srrd_drive_region_replacement_check + } + DRIVE_REGION_REPLACEMENT_PREPARE -> "prepare" { + + srrd_drive_region_replacement_prepare + } + DRIVE_REGION_REPLACEMENT_EXECUTE -> "execute" { + + srrd_drive_region_replacement_execute + } + DRIVE_REGION_REPLACEMENT_COMMIT -> "commit" { + + srrd_drive_region_replacement_commit + - srrd_drive_region_replacement_commit_undo + } + FINISH_SAGA -> "unused_2" { + + srrd_finish_saga + } +} + +// region replacement drive saga: definition + +#[derive(Debug)] +pub(crate) struct SagaRegionReplacementDrive; +impl NexusSaga for SagaRegionReplacementDrive { + const NAME: &'static str = "region-replacement-drive"; + type Params = Params; + + fn register_actions(registry: &mut ActionRegistry) { + region_replacement_drive_register_actions(registry); + } + + fn make_saga_dag( + _params: &Self::Params, + mut builder: steno::DagBuilder, + ) -> Result { + builder.append(Node::action( + "saga_id", + "GenerateSagaId", + ACTION_GENERATE_ID.as_ref(), + )); + + builder.append(Node::action( + "job_id", + "GenerateJobId", + ACTION_GENERATE_ID.as_ref(), + )); + + builder.append(set_saga_id_action()); + + builder.append(drive_region_replacement_check_action()); + builder.append(drive_region_replacement_prepare_action()); + builder.append(drive_region_replacement_execute_action()); + builder.append(drive_region_replacement_commit_action()); + + builder.append(finish_saga_action()); + + Ok(builder.build()?) + } +} + +// region replacement drive saga: action implementations + +async fn srrd_set_saga_id( + sagactx: NexusActionContext, +) -> Result<(), ActionError> { + let osagactx = sagactx.user_data(); + let params = sagactx.saga_params::()?; + + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); + + let saga_id = sagactx.lookup::("saga_id")?; + + // Change the request record here to an intermediate "driving" state to + // block out other sagas that will be triggered for the same request. + osagactx + .datastore() + .set_region_replacement_driving(&opctx, params.request.id, saga_id) + .await + .map_err(ActionError::action_failed)?; + + Ok(()) +} + +async fn srrd_set_saga_id_undo( + sagactx: NexusActionContext, +) -> Result<(), anyhow::Error> { + let osagactx = sagactx.user_data(); + let params = sagactx.saga_params::()?; + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); + + let saga_id = sagactx.lookup::("saga_id")?; + + osagactx + .datastore() + .undo_set_region_replacement_driving(&opctx, params.request.id, saga_id) + .await?; + + Ok(()) +} + +/// What is the status of the repair? +#[derive(Debug, Serialize, Deserialize)] +enum DriveCheck { + /// The last step is still running, so don't do anything + LastStepStillRunning, + + /// The last step is not still running, but all we can do is wait. + Wait, + + /// We got some status that indicates that the region has been replaced! + Done, + + /// Some action is required. Either the last step is no longer running, or + /// the repair needs to be unstuck. + ActionRequired, +} + +async fn srrd_drive_region_replacement_check( + sagactx: NexusActionContext, +) -> Result { + let log = sagactx.user_data().log(); + let osagactx = sagactx.user_data(); + let params = sagactx.saga_params::()?; + + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); + + let last_request_step = osagactx + .datastore() + .current_region_replacement_request_step(&opctx, params.request.id) + .await + .map_err(ActionError::action_failed)?; + + let Some(last_request_step) = last_request_step else { + // This is the first time this saga was invoked for this particular + // replacement request, so some action is required + info!( + log, + "no steps taken yet"; + "region replacement id" => %params.request.id, + ); + + return Ok(DriveCheck::ActionRequired); + }; + + // If the last request step is still "running", then check on it, and + // determine if any action is required. + + match last_request_step.step_type { + db::model::RegionReplacementStepType::Propolis => { + let Some((step_instance_id, step_vmm_id)) = + last_request_step.instance_and_vmm_ids() + else { + // This record is invalid, but we can still attempt to drive the + // repair forward. + error!( + log, + "step at {} has no associated ids", last_request_step.step_time; + "region replacement id" => ?params.request.id, + "last replacement drive time" => ?last_request_step.step_time, + "last replacement drive step" => "propolis", + ); + + return Ok(DriveCheck::ActionRequired); + }; + + let (.., authz_instance) = + LookupPath::new(&opctx, &osagactx.datastore()) + .instance_id(step_instance_id) + .lookup_for(authz::Action::Read) + .await + .map_err(ActionError::action_failed)?; + + let instance_and_vmm = osagactx + .datastore() + .instance_fetch_with_vmm(&opctx, &authz_instance) + .await + .map_err(ActionError::action_failed)?; + + check_from_previous_propolis_step( + log, + params.request.id, + last_request_step.step_time, + step_instance_id, + step_vmm_id, + instance_and_vmm, + ) + .await + } + + db::model::RegionReplacementStepType::Pantry => { + // Check if the Pantry is still trying to activate the Volume + + let Some(pantry_address) = last_request_step.pantry_address() + else { + // This record is invalid, but we can still attempt to drive the + // repair forward. + + error!( + log, + "step has no associated pantry address"; + "region replacement id" => %params.request.id, + "last replacement drive time" => ?last_request_step.step_time, + "last replacement drive step" => "pantry", + ); + + return Ok(DriveCheck::ActionRequired); + }; + + let Some(job_id) = last_request_step.step_associated_pantry_job_id + else { + // This record is invalid, but we can still attempt to drive the + // repair forward. + + error!( + log, + "step has no associated pantry job id"; + "region replacement id" => %params.request.id, + "last replacement drive time" => ?last_request_step.step_time, + "last replacement drive step" => "pantry", + "pantry address" => ?pantry_address, + ); + + return Ok(DriveCheck::ActionRequired); + }; + + let Some(new_region_id) = params.request.new_region_id else { + return Err(ActionError::action_failed(format!( + "region replacement request {} has new_region_id = None", + params.request.id, + ))); + }; + + let new_region: db::model::Region = osagactx + .datastore() + .get_region(new_region_id) + .await + .map_err(ActionError::action_failed)?; + + let volume_id = new_region.volume_id().to_string(); + + check_from_previous_pantry_step( + log, + params.request.id, + last_request_step.step_time, + pantry_address, + job_id, + &volume_id.to_string(), + ) + .await + } + } +} + +/// Generate a DriveCheck if the previous step was a Propolis step +async fn check_from_previous_propolis_step( + log: &Logger, + request_id: Uuid, + step_time: DateTime, + step_instance_id: Uuid, + step_vmm_id: Uuid, + instance_and_vmm: InstanceAndActiveVmm, +) -> Result { + // When this saga recorded a Propolis replacement step, an instance existed + // and had a running vmm. Is this true now? + + let Some(current_vmm) = instance_and_vmm.vmm() else { + // There is no current VMM, but if the current repair step was + // `Propolis` then there was previously one. Some action is required: + // namely, attach disk to the pantry and let it perform reconcilation. + + info!( + log, + "instance from last step no longer has vmm"; + "region replacement id" => ?request_id, + "last replacement drive time" => ?step_time, + "last replacement drive step" => "propolis", + "instance id" => ?step_instance_id, + ); + + return Ok(DriveCheck::ActionRequired); + }; + + // `migration_id` is set at the beginning of an instance migration (before + // anything has happened), and is cleared at the end (after the migration is + // finished but before the migration target activates disk Volumes). For + // now, return `DriveCheck::Wait`, and pick up driving the region + // replacement forward after the migration has completed. + // + // If this saga does not wait, it will interleave with the instance + // migration saga. Depending on Nexus' view of what stage the migration is + // in, volume replacement requests could be sent to the source propolis or + // destination propolis. This is because any call to + // `instance_fetch_with_vmm` will always return a VMM that is either a + // migration source or not migrating. If this saga calls + // `instance_fetch_with_vmm` multiple times during a migration, it will + // return the source propolis until the migration is done, where then it + // will return the destination propolis. + // + // Processing a replacement request does _not_ cause an activation, so + // sending a replacement request to the source propolis will not cause the + // destination to be unable to activate (even though the destination _could_ + // be using a VCR with a lower generation number than what the replacement + // request has!). It will probably cause live repair to start on the source, + // which is alright because it can be cancelled at any time (and will be + // when the destination propolis activates the Volume). + // + // Until crucible#871 is addressed, sending the replacement request to the + // destination propolis could cause a panic if activation hasn't occurred + // yet. Even if this saga does wait, this same potential exists because the + // migration is considered complete before propolis activates disk Volumes. + // + // If the destination propolis' Volume activated, the Upstairs will return a + // `ReplacementResult`: either `VcrMatches` (if the destination is using the + // updated VCR) or `Started` (if the destination is using the pre-update VCR + // and the replacement result triggers live repair). + // + // Also note: if the migration target was sent a Volume that refers to a + // region that is no longer responding, it will hang trying to activate, but + // the migration itself will succeed (clearing the migration ID!). This is + // especially bad because it's easy to hit: if a region goes away and a + // migration is triggered before the region replacement start saga can swap + // out the region that's gone, the migration saga will checkout the + // pre-update Volume and the destination propolis will hit this scenario. + + if instance_and_vmm.instance().runtime().migration_id.is_some() { + info!( + log, + "instance is undergoing migration, wait for it to finish"; + "region replacement id" => ?request_id, + "last replacement drive time" => ?step_time, + "last replacement drive step" => "propolis", + "instance id" => ?step_instance_id, + ); + + return Ok(DriveCheck::Wait); + } + + // Check if the VMM has changed. + + if current_vmm.id != step_vmm_id { + // The VMM has changed! This can be due to a stop and start of the + // instance, or a migration. If this is the case, then the new VMM + // (propolis server) could be performing reconcilation as part of the + // Volume activation. Nexus should be receiving notifications from the + // Upstairs there. + // + // If this is the result of a stop/start, then the new vmm will be using + // the updated VCR. If the new vmm is in the right state, this drive + // saga can re-send the target replacement request to poll if the + // replacement is done yet. + + info!( + log, + "vmm has changed from last step"; + "region replacement id" => ?request_id, + "last replacement drive time" => ?step_time, + "last replacement drive step" => "propolis", + "instance id" => ?step_instance_id, + "old vmm id" => ?step_vmm_id, + "new vmm id" => ?current_vmm.id, + ); + + Ok(DriveCheck::ActionRequired) + } else { + // The VMM has not changed: check if the VMM is still active. + + let state = current_vmm.runtime.state; + + info!( + log, + "vmm from last step in state {}", state; + "region replacement id" => ?request_id, + "last replacement drive time" => ?step_time, + "last replacement drive step" => "propolis", + "instance id" => ?step_instance_id, + "vmm id" => ?step_vmm_id, + ); + + match &state { + // If propolis is running, or rebooting, then it is likely that the + // Upstairs that was previously sent the volume replacement request + // is still running the live repair (note: rebooting does not affect + // the running volume). + VmmState::Running | VmmState::Rebooting => { + // Until crucible#1277 is merged, choose to _not_ poll Propolis + // (which would happen if ActionRequired was returned here). + // + // TODO Nexus needs to poll, as it could miss receiving the + // "Finished" notification that would complete this region + // replacement. Most of the time it will receive that ok though. + + Ok(DriveCheck::LastStepStillRunning) + } + + VmmState::Starting => { + // This state is unexpected, considering Nexus previously sent a + // target replacement request to this propolis! + + return Err(ActionError::action_failed(format!( + "vmm {} propolis is Starting", + step_vmm_id, + ))); + } + + VmmState::Migrating => { + // This state is unexpected because we should have already + // returned `DriveCheck::Wait` above. + + return Err(ActionError::action_failed(format!( + "vmm {} propolis is Migrating!", + step_vmm_id, + ))); + } + + VmmState::Stopping + | VmmState::Stopped + | VmmState::Failed + | VmmState::Destroyed + | VmmState::SagaUnwound => { + // The VMM we sent the replacement request to is probably not + // operating on the request anymore. Wait to see where to send + // the next action: if the instance is migrating, eventually + // that will be a new propolis. If the instance is stopping, + // then that will be a Pantry. Otherwise, the saga will wait: + // propolis should only receive target replacement requests when + // in a good state. + + Ok(DriveCheck::Wait) + } + } + } +} + +/// Generate a DriveCheck if the previous step was a Pantry step +async fn check_from_previous_pantry_step( + log: &Logger, + request_id: Uuid, + step_time: DateTime, + pantry_address: SocketAddrV6, + job_id: Uuid, + volume_id: &str, +) -> Result { + // If there is a committed step, Nexus attached this Volume to a Pantry, and + // requested activation in a background job. Is it finished? + + let endpoint = format!("http://{}", pantry_address); + let client = crucible_pantry_client::Client::new(&endpoint); + + match client.is_job_finished(&job_id.to_string()).await { + Ok(status) => { + if status.job_is_finished { + // The job could be done because it failed: check the volume + // status to query if it is active or gone. + + match client.volume_status(volume_id).await { + Ok(volume_status) => { + info!( + log, + "pantry job finished, saw status {volume_status:?}"; + "region replacement id" => %request_id, + "last replacement drive time" => ?step_time, + "last replacement drive step" => "pantry", + "pantry address" => ?pantry_address, + ); + + if volume_status.seen_active { + // It may not be active now if a Propolis activated + // the volume, but if the Pantry's ever seen this + // Volume active before, then the reconciliation + // completed ok. + + Ok(DriveCheck::Done) + } else { + // The Pantry has never seen this active before, and + // the job finished - some action is required, the + // job failed. + + Ok(DriveCheck::ActionRequired) + } + } + + Err(e) => { + // Seeing 410 Gone here may mean that the pantry + // performed reconciliation successfully, but had a + // propolis activation take over from the pantry's. If + // this occurred before a "reconciliation successful" + // notification occurred, and the propolis activation + // does not require a reconcilation (because the pantry + // did it already), then another notification will not + // be resent by propolis. + // + // Return ActionRequired here so that this saga will + // re-send the target replacement request to the + // propolis the did the take over: if the above race + // occurred, that request will return + // ReplaceResult::VcrMatches. + + error!( + log, + "pantry job finished, saw error {e}"; + "region replacement id" => %request_id, + "last replacement drive time" => ?step_time, + "last replacement drive step" => "pantry", + "pantry address" => ?pantry_address, + ); + + Ok(DriveCheck::ActionRequired) + } + } + } else { + info!( + log, + "pantry is still performing reconcilation"; + "region replacement id" => %request_id, + "last replacement drive time" => ?step_time, + "last replacement drive step" => "pantry", + "pantry address" => ?pantry_address, + ); + + Ok(DriveCheck::LastStepStillRunning) + } + } + + Err(e) => { + // If there was some problem accessing the Pantry. It may be because + // that Pantry is now gone, so check on it. + + error!( + log, + "pantry returned an error checking job {job_id}: {e}"; + "region replacement id" => %request_id, + "last replacement drive time" => ?step_time, + "last replacement drive step" => "pantry", + "pantry address" => ?pantry_address, + ); + + match client.pantry_status().await { + Ok(_) => { + // The pantry responded, so it's still there. It may be that + // the volume is no longer attached because a Propolis + // activation took over from the Pantry. + + match client.volume_status(&volume_id).await { + Ok(_) => { + // The volume is still there as an entry, but the + // job isn't? Action is required: this saga should + // delete the attached volume, then re-attach it. + + info!( + log, + "pantry still has active volume"; + "region replacement id" => %request_id, + "last replacement drive time" => ?step_time, + "last replacement drive step" => "pantry", + "pantry address" => ?pantry_address, + "volume id" => volume_id, + ); + + Ok(DriveCheck::ActionRequired) + } + + Err(e) => { + // The volume is gone: it's likely been activated by + // a Propolis, but this could also be because the + // Pantry bounced. Some further action is required: + // either poll the propolis that stole the + // activation or send the volume to a new Pantry. + + error!( + log, + "pantry returned an error checking on volume: {e}"; + "region replacement id" => %request_id, + "last replacement drive time" => ?step_time, + "last replacement drive step" => "pantry", + "pantry address" => ?pantry_address, + "volume id" => volume_id, + ); + + Ok(DriveCheck::ActionRequired) + } + } + } + + Err(e) => { + // The pantry is not responding on its status endpoint. + // Further action is required to drive the repair, which may + // be attaching to another Pantry. + + error!( + log, + "pantry returned an error checking on status: {e}"; + "region replacement id" => %request_id, + "last replacement drive time" => ?step_time, + "last replacement drive step" => "pantry", + "pantry address" => ?pantry_address, + ); + + Ok(DriveCheck::ActionRequired) + } + } + } + } +} + +/// What action does this saga invocation need to take? +#[allow(clippy::large_enum_variant)] +#[derive(Debug, Serialize, Deserialize)] +enum DriveAction { + /// Do nothing - the repair is proceeding from the last drive step, or is + /// done. + Noop { replacement_done: bool }, + + /// If there is no active Propolis that is running the Volume, attach the + /// associated Volume to a Pantry. + Pantry { step: db::model::RegionReplacementStep, volume_id: Uuid }, + + /// If the Volume is currently running in a Propolis server, then send the + /// volume replacement request there. + Propolis { step: db::model::RegionReplacementStep, disk: db::model::Disk }, +} + +async fn srrd_drive_region_replacement_prepare( + sagactx: NexusActionContext, +) -> Result { + let log = sagactx.user_data().log(); + let osagactx = sagactx.user_data(); + let params = sagactx.saga_params::()?; + + // If the previous saga step did _not_ require an action, then return Noop + // here. + + let check_result = sagactx.lookup::("check")?; + + if !matches!(check_result, DriveCheck::ActionRequired) { + return Ok(DriveAction::Noop { + replacement_done: matches!(check_result, DriveCheck::Done), + }); + } + + // Otherwise, take a look at the state of the world, and prepare an action + // to execute. + + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); + + let nexus = osagactx.nexus(); + + let Some(new_region_id) = params.request.new_region_id else { + return Err(ActionError::action_failed(format!( + "region replacement request {} has new_region_id = None", + params.request.id, + ))); + }; + + let new_region: db::model::Region = osagactx + .datastore() + .get_region(new_region_id) + .await + .map_err(ActionError::action_failed)?; + + let maybe_disk = osagactx + .datastore() + .disk_for_volume_id(new_region.volume_id()) + .await + .map_err(ActionError::action_failed)?; + + // Does this volume back a disk? + let drive_action = if let Some(disk) = maybe_disk { + match &disk.runtime().attach_instance_id { + Some(instance_id) => { + // The region's volume is attached to an instance + let (.., authz_instance) = + LookupPath::new(&opctx, &osagactx.datastore()) + .instance_id(*instance_id) + .lookup_for(authz::Action::Read) + .await + .map_err(ActionError::action_failed)?; + + let instance_and_vmm = osagactx + .datastore() + .instance_fetch_with_vmm(&opctx, &authz_instance) + .await + .map_err(ActionError::action_failed)?; + + if let Some(migration_id) = + instance_and_vmm.instance().runtime().migration_id + { + // If the check node did not observe migration_id as Some, + // it will not have returned `Wait`, but here in the prepare + // node we are observing that migration_id is Some: this + // means an instance migration was triggered in the middle + // of the region replacement. + // + // Log a message and bail out. + + info!( + log, + "instance migration_id is {migration_id}"; + "region replacement id" => %params.request.id, + "disk id" => ?disk.id(), + "instance id" => ?instance_id, + ); + + return Err(ActionError::action_failed( + "instance is undergoing migration".to_string(), + )); + } + + match instance_and_vmm.vmm() { + Some(vmm) => { + // The disk is attached to an instance and there's an + // active propolis server. Send the volume replacement + // request to the running Volume there if the runtime + // state is either running or rebooting. + + let state = vmm.runtime.state; + + info!( + log, + "disk attached to instance with vmm in state {state}"; + "region replacement id" => %params.request.id, + "disk id" => ?disk.id(), + "instance id" => ?instance_id, + "vmm id" => ?vmm.id, + ); + + match &state { + VmmState::Running | VmmState::Rebooting => { + // Propolis server is ok to receive the volume + // replacement request. + } + + VmmState::Starting + | VmmState::Stopping + | VmmState::Stopped + | VmmState::Migrating + | VmmState::Failed + | VmmState::Destroyed + | VmmState::SagaUnwound => { + // Propolis server is not ok to receive volume + // replacement requests, bail out + return Err(ActionError::action_failed(format!( + "vmm {} propolis not in a state to receive request", + vmm.id, + ))); + } + } + + DriveAction::Propolis { + step: db::model::RegionReplacementStep { + replacement_id: params.request.id, + step_time: Utc::now(), + step_type: db::model::RegionReplacementStepType::Propolis, + + step_associated_instance_id: Some(*instance_id), + step_associated_vmm_id: Some(vmm.id), + + step_associated_pantry_ip: None, + step_associated_pantry_port: None, + step_associated_pantry_job_id: None, + }, + + disk, + } + } + + None => { + // The disk is attached to an instance but there's no + // active propolis server. Attach to a pantry. + + let state = + &instance_and_vmm.instance().runtime().nexus_state; + + info!( + log, + "disk attached to instance in state {state} with no vmm"; + "region replacement id" => %params.request.id, + "disk id" => ?disk.id(), + "instance id" => ?instance_id, + ); + + let pantry_address = + get_pantry_address(osagactx.nexus()).await?; + + DriveAction::Pantry { + step: db::model::RegionReplacementStep { + replacement_id: params.request.id, + step_time: Utc::now(), + step_type: + db::model::RegionReplacementStepType::Pantry, + + step_associated_instance_id: None, + step_associated_vmm_id: None, + + step_associated_pantry_ip: Some( + pantry_address.ip().into(), + ), + step_associated_pantry_port: Some( + pantry_address.port().into(), + ), + step_associated_pantry_job_id: Some( + sagactx.lookup::("job_id")?, + ), + }, + + volume_id: new_region.volume_id(), + } + } + } + } + + None => { + // The disk is not attached to an instance. Is it attached to a + // Pantry right now (aka performing bulk import)? + + if let Some(address) = &disk.pantry_address { + // TODO currently unsupported + return Err(ActionError::action_failed(format!( + "disk {} attached to {address}, not supported", + disk.id(), + ))); + } + + // Attach to a pantry. + + info!( + log, + "disk not attached to instance"; + "region replacement id" => %params.request.id, + "disk id" => ?disk.id(), + ); + + // XXX: internal-dns does not randomize the order of addresses + // in its responses: if the first Pantry in the list of + // addresses returned by DNS isn't responding, the drive saga + // will still continually try to use it. + + let pantry_address = get_pantry_address(nexus).await?; + + DriveAction::Pantry { + step: db::model::RegionReplacementStep { + replacement_id: params.request.id, + step_time: Utc::now(), + step_type: db::model::RegionReplacementStepType::Pantry, + + step_associated_instance_id: None, + step_associated_vmm_id: None, + + step_associated_pantry_ip: Some( + pantry_address.ip().into(), + ), + step_associated_pantry_port: Some( + pantry_address.port().into(), + ), + step_associated_pantry_job_id: Some( + sagactx.lookup::("job_id")?, + ), + }, + + volume_id: new_region.volume_id(), + } + } + } + } else { + // Is this volume the destination volume for a snapshot? + + let maybe_snapshot = osagactx + .datastore() + .find_snapshot_by_destination_volume_id( + &opctx, + new_region.volume_id(), + ) + .await + .map_err(ActionError::action_failed)?; + + if maybe_snapshot.is_some() { + // Volume is the destination that snapshot blocks should be scrubbed + // into. The scrubber is not written yet, so nothing should be using + // this volume yet. We can attach it to the Pantry. + + info!( + log, + "volume is for a snapshot destination"; + "region replacement id" => %params.request.id, + ); + + let pantry_address = get_pantry_address(nexus).await?; + + DriveAction::Pantry { + step: db::model::RegionReplacementStep { + replacement_id: params.request.id, + step_time: Utc::now(), + step_type: db::model::RegionReplacementStepType::Pantry, + + step_associated_instance_id: None, + step_associated_vmm_id: None, + + step_associated_pantry_ip: Some(pantry_address.ip().into()), + step_associated_pantry_port: Some( + pantry_address.port().into(), + ), + step_associated_pantry_job_id: Some( + sagactx.lookup::("job_id")?, + ), + }, + + volume_id: new_region.volume_id(), + } + } else { + // XXX what other volumes are created? + return Err(ActionError::action_failed(format!( + "don't know what to do with volume {}", + new_region.volume_id(), + ))); + } + }; + + Ok(drive_action) +} + +#[derive(Debug, Serialize, Deserialize)] +struct ExecuteResult { + step_to_commit: Option, + replacement_done: bool, +} + +/// Attempt to execute the prepared step. If it was successful, return the step +/// to commit to the database. +async fn srrd_drive_region_replacement_execute( + sagactx: NexusActionContext, +) -> Result { + let log = sagactx.user_data().log(); + let osagactx = sagactx.user_data(); + let params = sagactx.saga_params::()?; + + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); + + // Look up the prepared action, and execute it. If something has changed + // between when the action was determined and now, then bail out - the next + // drive saga invocation will pick up the new state of the world and act + // accordingly. + + let action = sagactx.lookup::("prepare")?; + + let result = match action { + DriveAction::Noop { replacement_done } => { + // *slaps knees and stands up* welp + ExecuteResult { step_to_commit: None, replacement_done } + } + + DriveAction::Pantry { step, volume_id } => { + let Some(pantry_address) = step.pantry_address() else { + return Err(ActionError::action_failed(String::from( + "pantry step does not have an address", + ))); + }; + + let job_id = sagactx.lookup::("job_id")?; + + execute_pantry_drive_action( + log, + osagactx.datastore(), + params.request.id, + pantry_address, + volume_id, + job_id, + ) + .await?; + + ExecuteResult { + step_to_commit: Some(step), + replacement_done: false, + } + } + + DriveAction::Propolis { step, disk } => { + let Some((instance_id, vmm_id)) = step.instance_and_vmm_ids() + else { + return Err(ActionError::action_failed(Error::internal_error( + "propolis step does not have instance and vmm ids", + ))); + }; + + let (.., authz_instance) = + LookupPath::new(&opctx, &osagactx.datastore()) + .instance_id(instance_id) + .lookup_for(authz::Action::Read) + .await + .map_err(ActionError::action_failed)?; + + let instance_and_vmm = osagactx + .datastore() + .instance_fetch_with_vmm(&opctx, &authz_instance) + .await + .map_err(ActionError::action_failed)?; + + if let Some(migration_id) = + instance_and_vmm.instance().runtime().migration_id + { + // An indefinite amount of time can occur between saga nodes: if + // both the check node and prepare node both observed + // `migration_id` as None, but this node observes Some, this + // still means an instance migration was triggered in the middle + // of the region replacement. + // + // Log a message and bail out. This is still best effort: a + // migration could be triggered after this check! + + info!( + log, + "instance migration_id is {migration_id}"; + "region replacement id" => %params.request.id, + "disk id" => ?disk.id(), + "instance id" => ?instance_id, + ); + + return Err(ActionError::action_failed( + "instance is undergoing migration".to_string(), + )); + } + + // The disk is attached to an instance and there's an active + // propolis server. Send a volume replacement request to the running + // Volume there - either it will start a live repair, or be ignored + // because there is no difference in the volume construction + // request. + + let disk_new_volume_vcr = match osagactx + .datastore() + .volume_get(disk.volume_id) + .await + .map_err(ActionError::action_failed)? + { + Some(volume) => volume.data().to_string(), + + None => { + return Err(ActionError::action_failed( + Error::internal_error("new volume is gone!"), + )); + } + }; + + let instance_lookup = + LookupPath::new(&opctx, &osagactx.datastore()) + .instance_id(instance_id); + + let (vmm, client) = osagactx + .nexus() + .propolis_client_for_instance( + &opctx, + &instance_lookup, + authz::Action::Modify, + ) + .await + .map_err(ActionError::action_failed)?; + + let replacement_done = execute_propolis_drive_action( + log, + params.request.id, + vmm_id, + vmm, + client, + disk, + disk_new_volume_vcr, + ) + .await?; + + ExecuteResult { step_to_commit: Some(step), replacement_done } + } + }; + + Ok(result) +} + +/// Execute a prepared Pantry step +async fn execute_pantry_drive_action( + log: &Logger, + datastore: &db::DataStore, + request_id: Uuid, + pantry_address: SocketAddrV6, + volume_id: Uuid, + job_id: Uuid, +) -> Result<(), ActionError> { + // Importantly, _do not use `call_pantry_attach_for_disk`_! That uses + // `retry_until_known_result`, which we _do not want here_. The Pantry + // attach can fail if there's a racing Volume checkout to be sent to + // Propolis. Additionally, that call uses `attach` instead of + // `attach_activate_background`, which means it will hang on the activation. + + let endpoint = format!("http://{}", pantry_address); + let client = crucible_pantry_client::Client::new(&endpoint); + + // Check pantry first, to see if this volume is attached already. This can + // occur if: + // + // - the volume is attached to the target pantry, but it can't be reliably + // determined if reconcilation finished. + // + // - a previous repair operated on another region in the same Volume, and + // that attachment was not garbage collected. + // + // Try to get the volume's status in order to check. + + let detach_required = + match client.volume_status(&volume_id.to_string()).await { + Ok(volume_status) => { + info!( + log, + "volume is already attached with status {volume_status:?}"; + "region replacement id" => %request_id, + "volume id" => ?volume_id, + "endpoint" => endpoint.clone(), + ); + + // In the case where this forward action is being rerun, + // detaching the volume would mean that the reconciliation would + // be interrupted. This is ok, as that operation can be + // interrupted at any time. + + // Detach this volume so we can reattach with this saga's job id. + true + } + + Err(e) => { + match e { + crucible_pantry_client::Error::ErrorResponse(ref rv) => { + match rv.status() { + http::StatusCode::NOT_FOUND => { + // No detach required, this Volume isn't attached to + // this Pantry. + false + } + + http::StatusCode::GONE => { + // 410 Gone means detach is required - it was + // previously attached and may have been activated + true + } + + _ => { + error!( + log, + "error checking volume status: {e}"; + "region replacement id" => %request_id, + "volume id" => ?volume_id, + "endpoint" => endpoint.clone(), + ); + + return Err(ActionError::action_failed( + Error::internal_error(&format!( + "unexpected error from volume_status: {e}" + )), + )); + } + } + } + + _ => { + error!( + log, + "error checking volume status: {e}"; + "region replacement id" => %request_id, + "volume id" => ?volume_id, + "endpoint" => endpoint.clone(), + ); + + return Err(ActionError::action_failed( + Error::internal_error(&format!( + "unexpected error from volume_status: {e}" + )), + )); + } + } + } + }; + + if detach_required { + info!( + log, + "detach required"; + "region replacement id" => %request_id, + "volume id" => ?volume_id, + "endpoint" => endpoint.clone(), + ); + + match client.detach(&volume_id.to_string()).await { + Ok(_) => { + info!( + log, + "detached volume"; + "region replacement id" => %request_id, + "volume id" => ?volume_id, + "endpoint" => endpoint.clone(), + ); + } + + Err(e) => { + error!( + log, + "error detaching volume: {e}"; + "region replacement id" => %request_id, + "volume id" => ?volume_id, + "endpoint" => endpoint.clone(), + ); + + // Cannot continue: the Pantry will return an error unless the + // volume construction request matches what was originally + // attached, and the job id matches what was originally sent. + // Even if the VCR is the same, this saga does not have the same + // job id. Bail out here: hopefully the next time this saga + // runs, it will select a different Pantry. + + return Err(ActionError::action_failed( + Error::invalid_request(String::from( + "cannot proceed, pantry will reject our request", + )), + )); + } + } + } else { + info!( + log, + "no detach required"; + "region replacement id" => %request_id, + "volume id" => ?volume_id, + "endpoint" => endpoint.clone(), + ); + } + + // Attach the volume to the pantry, and let reconciliation occur. + + info!( + log, + "sending attach for volume"; + "region replacement id" => %request_id, + "volume id" => ?volume_id, + "endpoint" => endpoint.clone(), + ); + + let disk_volume = datastore + .volume_checkout(volume_id, db::datastore::VolumeCheckoutReason::Pantry) + .await + .map_err(ActionError::action_failed)?; + + let volume_construction_request: + crucible_pantry_client::types::VolumeConstructionRequest = + serde_json::from_str(&disk_volume.data()).map_err(|e| { + ActionError::action_failed(Error::internal_error(&format!( + "failed to deserialize volume {volume_id} data: {e}", + ))) + })?; + + let attach_request = + crucible_pantry_client::types::AttachBackgroundRequest { + volume_construction_request, + job_id: job_id.to_string(), + }; + + client + .attach_activate_background(&volume_id.to_string(), &attach_request) + .await + .map_err(|e| { + ActionError::action_failed(format!( + "pantry attach failed with {:?}", + e, + )) + })?; + + Ok(()) +} + +/// Execute a prepared Propolis step +async fn execute_propolis_drive_action( + log: &Logger, + request_id: Uuid, + step_vmm_id: Uuid, + vmm: db::model::Vmm, + client: propolis_client::Client, + disk: db::model::Disk, + disk_new_volume_vcr: String, +) -> Result { + // This client could be for a different VMM than the step was + // prepared for. Bail out if this is true + if vmm.id != step_vmm_id { + return Err(ActionError::action_failed(format!( + "propolis client vmm {} does not match step vmm {}", + vmm.id, step_vmm_id, + ))); + } + + info!( + log, + "sending replacement request for disk volume to propolis {step_vmm_id}"; + "region replacement id" => %request_id, + "disk id" => ?disk.id(), + "volume id" => ?disk.volume_id, + ); + + // Start (or poll) the replacement + let result = client + .instance_issue_crucible_vcr_request() + .id(disk.id()) + .body(propolis_client::types::InstanceVcrReplace { + name: disk.name().to_string(), + vcr_json: disk_new_volume_vcr, + }) + .send() + .await + .map_err(|e| match e { + propolis_client::Error::ErrorResponse(rv) => { + ActionError::action_failed(rv.message.clone()) + } + + _ => ActionError::action_failed(format!( + "unexpected failure during \ + `instance_issue_crucible_vcr_request`: {e}", + )), + })?; + + let replace_result = result.into_inner(); + + info!( + log, + "saw replace result {replace_result:?}"; + "region replacement id" => %request_id, + "disk id" => ?disk.id(), + "volume id" => ?disk.volume_id, + ); + + let replacement_done = match &replace_result { + ReplaceResult::Started => { + // This drive saga's call just started the replacement + false + } + + ReplaceResult::StartedAlready => { + // A previous drive saga's call started the replacement, but it's + // not done yet. + false + } + + ReplaceResult::CompletedAlready => { + // It's done! We see this if the same propolis that received the + // original replace request started and finished the live repair. + true + } + + ReplaceResult::VcrMatches => { + // If this propolis booted after the volume construction request was + // modified but before all the regions were reconciled, then + // `VcrMatches` will be seen as a result of `target_replace`: the + // new propolis will have received the updated VCR when it was + // created. + // + // The upstairs will be performing reconciliation (or have + // previously performed it), not live repair, and will have no + // record of a previous replace request (sent to a different + // propolis!) starting a live repair. + // + // If the Volume is active, that means reconcilation completed ok, + // and therefore Nexus can consider this repair complete. This is + // only true if one repair occurs at a time per volume (which is + // true due to the presence of volume_repair records), and if this + // saga locks the region replacement request record as part of it + // executing (which it does through the SET_SAGA_ID forward action). + // If either of those conditions are not held, then multiple + // replacement calls and activation checks can interleave and + // confuse this saga. + // + // Check if the Volume activated. + + let result = client + .disk_volume_status() + .id(disk.id()) + .send() + .await + .map_err(|e| match e { + propolis_client::Error::ErrorResponse(rv) => { + ActionError::action_failed(rv.message.clone()) + } + + _ => ActionError::action_failed(format!( + "unexpected failure during \ + `disk_volume_status`: {e}", + )), + })?; + + // If the Volume is active, then reconciliation finished + // successfully. + // + // There's a few reasons it may not be active yet: + // + // - Propolis could be shutting down, and tearing down the Upstairs + // in the process (which deactivates the Volume) + // + // - reconciliation could still be going on + // + // - reconciliation could have failed + // + // If it's not active, wait until the next invocation of this saga + // to decide what to do next. + + result.into_inner().active + } + + ReplaceResult::Missing => { + // The disk's volume does not contain the region to be replaced. + // This is an error! + + return Err(ActionError::action_failed(String::from( + "saw ReplaceResult::Missing", + ))); + } + }; + + Ok(replacement_done) +} + +async fn srrd_drive_region_replacement_commit( + sagactx: NexusActionContext, +) -> Result<(), ActionError> { + let log = sagactx.user_data().log(); + let osagactx = sagactx.user_data(); + let params = sagactx.saga_params::()?; + + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); + + // If there was an executed step, record it! + + let execute_result = sagactx.lookup::("execute")?; + + if let Some(step) = execute_result.step_to_commit { + info!( + log, + "committing step {}", step.step_time; + "region replacement id" => %params.request.id, + ); + + osagactx + .datastore() + .add_region_replacement_request_step(&opctx, step) + .await + .map_err(ActionError::action_failed)?; + } else { + info!( + log, + "no step to commit"; + "region replacement id" => %params.request.id, + ); + } + + Ok(()) +} + +async fn srrd_drive_region_replacement_commit_undo( + _sagactx: NexusActionContext, +) -> Result<(), anyhow::Error> { + // If this saga unwinds at the last step, do we have to remove the committed + // step from db? The problem is that we did execute the step, and it's not + // something we can roll back. Leave the last step in the DB so it can be + // referenced during the check step the next time this saga is invoked. + // + // If the saga unwinds at the last step because it didn't commit the + // executed step to the database, this is ok! This would mean that the next + // invocation of the drive saga would be executing without the knowledge of + // what the previous one did - however, this author believes that this is ok + // due to the fact that this saga's forward actions are idempotent. + // + // If the final forward action fails to commit a step to the database, here + // are the cases where this saga could potentially repeat its action: + // + // 1. a propolis action was executed (read: a running propolis was sent a + // replace request) + // 2. a pantry action was executed (read: the volume was attached + // (activating in the background) to a pantry) + // + // # case 1 # + // + // In the case of the next invocation of the drive saga choosing a propolis + // action: + // + // - if the replace request is sent to the same propolis that originally + // received it, the upstairs would respond with `StartedAlready`. The + // drive saga would then consider the replacement not done and wait. + // + // - if the replace request is sent to a different propolis, that propolis + // would have constructed the disk's volume with the replacement VCR, so + // the upstairs would respond with `ReplaceResult::VcrMatches`. The drive + // saga would then consider the replacement done only if propolis observed + // that the volume activated ok. + // + // # case 2 # + // + // In the case of the next invocation of the drive saga choosing a pantry + // action, Nexus first checks if the volume was already attached to the + // selected Pantry, and if so, will detach it before sending a "attach in + // the background with this job id" request. + // + // - if Nexus chose same Pantry as the original drive saga, this would + // cancel any existing reconciliation and start it up again from the + // beginning. This is ok - reconciliation can be interrupted at any time. + // If this repeatedly happened it would cause progress to be very slow, + // but progress would be made. + // + // - if Nexus chose a different Pantry, the newly checked-out Volume would + // steal the activation from the original Pantry, cancelling the + // reconcilation only to start it up again on the different Pantry. + // + // # also! + // + // As well, both of these cases are equivalent to if Nexus chose to always + // attempt some sort of action, instead of choosing no-ops or waiting for + // operations driven by any previous steps to complete, aka if Nexus + // _always_ polled, instead of the behaviour it has now (wait or poll or + // receive push notifications). Polling all the time would be functionally + // correct but unnecessary (and in the case of crucible#1277, a problem!). + + Ok(()) +} + +async fn srrd_finish_saga( + sagactx: NexusActionContext, +) -> Result<(), ActionError> { + let osagactx = sagactx.user_data(); + let params = sagactx.saga_params::()?; + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); + + let saga_id = sagactx.lookup::("saga_id")?; + + let execute_result = sagactx.lookup::("execute")?; + + // Use the same undo function to exit the saga. If it was determined that + // the region replacement is done, transition to ReplacementDone, else + // transition back to Running. + if execute_result.replacement_done { + osagactx + .datastore() + .set_region_replacement_from_driving_to_done( + &opctx, + params.request.id, + saga_id, + ) + .await + .map_err(ActionError::action_failed)?; + } else { + osagactx + .datastore() + .undo_set_region_replacement_driving( + &opctx, + params.request.id, + saga_id, + ) + .await + .map_err(ActionError::action_failed)?; + } + + Ok(()) +} diff --git a/nexus/src/app/sagas/region_replacement_finish.rs b/nexus/src/app/sagas/region_replacement_finish.rs new file mode 100644 index 0000000000..f200156ce6 --- /dev/null +++ b/nexus/src/app/sagas/region_replacement_finish.rs @@ -0,0 +1,343 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Compared to the rest of the region replacement process, finishing the +//! process is straight forward. This saga is responsible for the following +//! region replacement request state transitions: +//! +//! ```text +//! ReplacementDone <-- +//! | +//! | | +//! v | +//! | +//! Completing -- +//! +//! | +//! v +//! +//! Completed +//! ``` +//! +//! It will set itself as the "operating saga" for a region replacement request, +//! change the state to "Completing", and: +//! +//! 1. Call the Volume delete saga for the fake Volume that points to the old +//! region. +//! +//! 2. Clear the operating saga id from the request record, and change the state +//! to Completed. +//! + +use super::{ + ActionRegistry, NexusActionContext, NexusSaga, SagaInitError, + ACTION_GENERATE_ID, +}; +use crate::app::sagas::declare_saga_actions; +use crate::app::sagas::volume_delete; +use crate::app::{authn, db}; +use serde::Deserialize; +use serde::Serialize; +use steno::ActionError; +use steno::Node; +use uuid::Uuid; + +// region replacement finish saga: input parameters + +#[derive(Debug, Deserialize, Serialize)] +pub(crate) struct Params { + pub serialized_authn: authn::saga::Serialized, + /// The fake volume created for the region that was replaced + // Note: this is only required in the params to build the volume-delete sub + // saga + pub region_volume_id: Uuid, + pub request: db::model::RegionReplacement, +} + +// region replacement finish saga: actions + +declare_saga_actions! { + region_replacement_finish; + SET_SAGA_ID -> "unused_1" { + + srrf_set_saga_id + - srrf_set_saga_id_undo + } + UPDATE_REQUEST_RECORD -> "unused_2" { + + srrf_update_request_record + } +} + +// region replacement finish saga: definition + +#[derive(Debug)] +pub(crate) struct SagaRegionReplacementFinish; +impl NexusSaga for SagaRegionReplacementFinish { + const NAME: &'static str = "region-replacement-finish"; + type Params = Params; + + fn register_actions(registry: &mut ActionRegistry) { + region_replacement_finish_register_actions(registry); + } + + fn make_saga_dag( + params: &Self::Params, + mut builder: steno::DagBuilder, + ) -> Result { + builder.append(Node::action( + "saga_id", + "GenerateSagaId", + ACTION_GENERATE_ID.as_ref(), + )); + + builder.append(set_saga_id_action()); + + let subsaga_params = volume_delete::Params { + serialized_authn: params.serialized_authn.clone(), + volume_id: params.region_volume_id, + }; + + let subsaga_dag = { + let subsaga_builder = steno::DagBuilder::new(steno::SagaName::new( + volume_delete::SagaVolumeDelete::NAME, + )); + volume_delete::SagaVolumeDelete::make_saga_dag( + &subsaga_params, + subsaga_builder, + )? + }; + + builder.append(Node::constant( + "params_for_volume_delete_subsaga", + serde_json::to_value(&subsaga_params).map_err(|e| { + SagaInitError::SerializeError( + "params_for_volume_delete_subsaga".to_string(), + e, + ) + })?, + )); + + builder.append(Node::subsaga( + "volume_delete_subsaga_no_result", + subsaga_dag, + "params_for_volume_delete_subsaga", + )); + + builder.append(update_request_record_action()); + + Ok(builder.build()?) + } +} + +// region replacement finish saga: action implementations + +async fn srrf_set_saga_id( + sagactx: NexusActionContext, +) -> Result<(), ActionError> { + let osagactx = sagactx.user_data(); + let params = sagactx.saga_params::()?; + + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); + + let saga_id = sagactx.lookup::("saga_id")?; + + // Change the request record here to an intermediate "completing" state to + // block out other sagas that will be triggered for the same request. + osagactx + .datastore() + .set_region_replacement_completing(&opctx, params.request.id, saga_id) + .await + .map_err(ActionError::action_failed)?; + + Ok(()) +} + +async fn srrf_set_saga_id_undo( + sagactx: NexusActionContext, +) -> Result<(), anyhow::Error> { + let osagactx = sagactx.user_data(); + let params = sagactx.saga_params::()?; + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); + + let saga_id = sagactx.lookup::("saga_id")?; + + osagactx + .datastore() + .undo_set_region_replacement_completing( + &opctx, + params.request.id, + saga_id, + ) + .await?; + + Ok(()) +} + +async fn srrf_update_request_record( + sagactx: NexusActionContext, +) -> Result<(), ActionError> { + let params = sagactx.saga_params::()?; + let osagactx = sagactx.user_data(); + let datastore = osagactx.datastore(); + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); + + let saga_id = sagactx.lookup::("saga_id")?; + + // Now that the region has been deleted, update the replacement request + // record to 'Complete' and clear the operating saga id. There is no undo + // step for this, it should succeed idempotently. + datastore + .set_region_replacement_complete(&opctx, params.request.id, saga_id) + .await + .map_err(ActionError::action_failed)?; + + Ok(()) +} + +#[cfg(test)] +pub(crate) mod test { + use crate::{ + app::sagas::region_replacement_finish::Params, + app::sagas::region_replacement_finish::SagaRegionReplacementFinish, + }; + use async_bb8_diesel::AsyncRunQueryDsl; + use chrono::Utc; + use nexus_db_model::Region; + use nexus_db_model::RegionReplacement; + use nexus_db_model::RegionReplacementState; + use nexus_db_model::Volume; + use nexus_db_queries::authn::saga::Serialized; + use nexus_db_queries::context::OpContext; + use nexus_test_utils_macros::nexus_test; + use sled_agent_client::types::CrucibleOpts; + use sled_agent_client::types::VolumeConstructionRequest; + use uuid::Uuid; + + type ControlPlaneTestContext = + nexus_test_utils::ControlPlaneTestContext; + + #[nexus_test(server = crate::Server)] + async fn test_region_replacement_finish_saga( + cptestctx: &ControlPlaneTestContext, + ) { + let nexus = &cptestctx.server.server_context().nexus; + let datastore = nexus.datastore(); + let opctx = OpContext::for_tests( + cptestctx.logctx.log.clone(), + datastore.clone(), + ); + + // Manually insert required records + let old_region_volume_id = Uuid::new_v4(); + let new_volume_id = Uuid::new_v4(); + + let replaced_region = { + let dataset_id = Uuid::new_v4(); + Region::new( + dataset_id, + old_region_volume_id, + 512_i64.try_into().unwrap(), + 10, + 10, + ) + }; + + { + let conn = datastore.pool_connection_for_tests().await.unwrap(); + + use nexus_db_model::schema::region::dsl; + diesel::insert_into(dsl::region) + .values(replaced_region.clone()) + .execute_async(&*conn) + .await + .unwrap(); + } + + let volume_construction_request = VolumeConstructionRequest::Volume { + id: old_region_volume_id, + block_size: 0, + sub_volumes: vec![VolumeConstructionRequest::Region { + block_size: 0, + blocks_per_extent: 0, + extent_count: 0, + gen: 0, + opts: CrucibleOpts { + id: old_region_volume_id, + target: vec![ + // XXX if you put something here, you'll need a + // synthetic dataset record + ], + lossy: false, + flush_timeout: None, + key: None, + cert_pem: None, + key_pem: None, + root_cert_pem: None, + control: None, + read_only: false, + }, + }], + read_only_parent: None, + }; + + let volume_data = + serde_json::to_string(&volume_construction_request).unwrap(); + + datastore + .volume_create(Volume::new(old_region_volume_id, volume_data)) + .await + .unwrap(); + + let request = RegionReplacement { + id: Uuid::new_v4(), + request_time: Utc::now(), + old_region_id: replaced_region.id(), + volume_id: new_volume_id, + old_region_volume_id: Some(old_region_volume_id), + new_region_id: None, // no value needed here + replacement_state: RegionReplacementState::ReplacementDone, + operating_saga_id: None, + }; + + datastore + .insert_region_replacement_request(&opctx, request.clone()) + .await + .unwrap(); + + // Run the region replacement finish saga + let params = Params { + serialized_authn: Serialized::for_opctx(&opctx), + region_volume_id: old_region_volume_id, + request: request.clone(), + }; + let _output = nexus + .sagas + .saga_execute::(params) + .await + .unwrap(); + + // Validate the state transition + let result = datastore + .get_region_replacement_request_by_id(&opctx, request.id) + .await + .unwrap(); + assert_eq!(result.replacement_state, RegionReplacementState::Complete); + assert!(result.operating_saga_id.is_none()); + + // Validate the Volume was deleted + assert!(datastore + .volume_get(old_region_volume_id) + .await + .unwrap() + .is_none()); + } +} diff --git a/nexus/src/app/sagas/region_replacement_start.rs b/nexus/src/app/sagas/region_replacement_start.rs index c983716b4f..b71944a460 100644 --- a/nexus/src/app/sagas/region_replacement_start.rs +++ b/nexus/src/app/sagas/region_replacement_start.rs @@ -864,19 +864,18 @@ pub(crate) mod test { .unwrap(); // Run the region replacement start saga - let dag = create_saga_dag::(Params { + let params = Params { serialized_authn: Serialized::for_opctx(&opctx), request: request.clone(), allocation_strategy: RegionAllocationStrategy::Random { seed: None, }, - }) - .unwrap(); - - let runnable_saga = nexus.create_runnable_saga(dag).await.unwrap(); - - // Actually run the saga - let output = nexus.run_saga(runnable_saga).await.unwrap(); + }; + let output = nexus + .sagas + .saga_execute::(params) + .await + .unwrap(); // Validate the state transition let result = datastore diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index 41e1793fab..a16ec6932e 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -1908,11 +1908,12 @@ mod test { None, // not attached to an instance true, // use the pantry ); - let dag = create_saga_dag::(params).unwrap(); - let runnable_saga = nexus.create_runnable_saga(dag).await.unwrap(); - // Actually run the saga - let output = nexus.run_saga(runnable_saga).await.unwrap(); + let output = nexus + .sagas + .saga_execute::(params) + .await + .unwrap(); let snapshot = output .lookup_node_output::( @@ -2237,7 +2238,7 @@ mod test { ); let dag = create_saga_dag::(params).unwrap(); - let runnable_saga = nexus.create_runnable_saga(dag).await.unwrap(); + let runnable_saga = nexus.sagas.saga_prepare(dag).await.unwrap(); // Before running the saga, attach the disk to an instance! let _instance_and_vmm = setup_test_instance( @@ -2252,7 +2253,11 @@ mod test { .await; // Actually run the saga - let output = nexus.run_saga(runnable_saga).await; + let output = runnable_saga + .run_to_completion() + .await + .unwrap() + .into_omicron_result(); // Expect to see 409 match output { @@ -2295,9 +2300,8 @@ mod test { true, // use the pantry ); - let dag = create_saga_dag::(params).unwrap(); - let runnable_saga = nexus.create_runnable_saga(dag).await.unwrap(); - let output = nexus.run_saga(runnable_saga).await; + let output = + nexus.sagas.saga_execute::(params).await; // Expect 200 assert!(output.is_ok()); @@ -2349,7 +2353,7 @@ mod test { ); let dag = create_saga_dag::(params).unwrap(); - let runnable_saga = nexus.create_runnable_saga(dag).await.unwrap(); + let runnable_saga = nexus.sagas.saga_prepare(dag).await.unwrap(); // Before running the saga, detach the disk! let (.., authz_disk, db_disk) = @@ -2370,7 +2374,11 @@ mod test { .expect("failed to detach disk")); // Actually run the saga. This should fail. - let output = nexus.run_saga(runnable_saga).await; + let output = runnable_saga + .run_to_completion() + .await + .unwrap() + .into_omicron_result(); assert!(output.is_err()); @@ -2397,9 +2405,8 @@ mod test { false, // use the pantry ); - let dag = create_saga_dag::(params).unwrap(); - let runnable_saga = nexus.create_runnable_saga(dag).await.unwrap(); - let output = nexus.run_saga(runnable_saga).await; + let output = + nexus.sagas.saga_execute::(params).await; // Expect 200 assert!(output.is_ok()); diff --git a/nexus/src/app/sagas/test_helpers.rs b/nexus/src/app/sagas/test_helpers.rs index 684b84dd07..a5d9d0a843 100644 --- a/nexus/src/app/sagas/test_helpers.rs +++ b/nexus/src/app/sagas/test_helpers.rs @@ -261,7 +261,7 @@ pub(crate) async fn actions_succeed_idempotently( nexus: &Arc, dag: SagaDag, ) { - let runnable_saga = nexus.create_runnable_saga(dag.clone()).await.unwrap(); + let runnable_saga = nexus.sagas.saga_prepare(dag.clone()).await.unwrap(); for node in dag.get_nodes() { nexus .sec() @@ -277,7 +277,12 @@ pub(crate) async fn actions_succeed_idempotently( .unwrap(); } - nexus.run_saga(runnable_saga).await.expect("Saga should have succeeded"); + runnable_saga + .run_to_completion() + .await + .expect("Saga should have started") + .into_omicron_result() + .expect("Saga should have succeeded"); } /// Tests that a saga `S` functions properly when any of its nodes fails and @@ -346,7 +351,7 @@ pub(crate) async fn action_failure_can_unwind<'a, S, B, A>( ); let runnable_saga = - nexus.create_runnable_saga(dag.clone()).await.unwrap(); + nexus.sagas.saga_prepare(dag.clone()).await.unwrap(); nexus .sec() @@ -354,12 +359,14 @@ pub(crate) async fn action_failure_can_unwind<'a, S, B, A>( .await .unwrap(); - let saga_error = nexus - .run_saga_raw_result(runnable_saga) + let saga_result = runnable_saga + .run_to_completion() .await .expect("saga should have started successfully") - .kind - .expect_err("saga execution should have failed"); + .into_raw_result(); + + let saga_error = + saga_result.kind.expect_err("saga execution should have failed"); assert_eq!(saga_error.error_node_name, *node.name()); @@ -447,7 +454,7 @@ pub(crate) async fn action_failure_can_unwind_idempotently<'a, S, B, A>( ); let runnable_saga = - nexus.create_runnable_saga(dag.clone()).await.unwrap(); + nexus.sagas.saga_prepare(dag.clone()).await.unwrap(); nexus .sec() @@ -468,10 +475,11 @@ pub(crate) async fn action_failure_can_unwind_idempotently<'a, S, B, A>( .await .unwrap(); - let saga_error = nexus - .run_saga_raw_result(runnable_saga) + let saga_error = runnable_saga + .run_to_completion() .await .expect("saga should have started successfully") + .into_raw_result() .kind .expect_err("saga execution should have failed"); diff --git a/nexus/src/app/sagas/test_saga.rs b/nexus/src/app/sagas/test_saga.rs index 9ccdc4aebc..c872cca67f 100644 --- a/nexus/src/app/sagas/test_saga.rs +++ b/nexus/src/app/sagas/test_saga.rs @@ -78,7 +78,7 @@ async fn test_saga_stuck(cptestctx: &ControlPlaneTestContext) { let nexus = &cptestctx.server.server_context().nexus; let params = Params {}; let dag = create_saga_dag::(params).unwrap(); - let runnable_saga = nexus.create_runnable_saga(dag.clone()).await.unwrap(); + let runnable_saga = nexus.sagas.saga_prepare(dag.clone()).await.unwrap(); let saga_id = runnable_saga.id(); // Inject an error into the second node's action and the first node's undo @@ -87,9 +87,11 @@ async fn test_saga_stuck(cptestctx: &ControlPlaneTestContext) { let n2 = dag.get_index("n2").unwrap(); nexus.sec().saga_inject_error(saga_id, n2).await.unwrap(); nexus.sec().saga_inject_error_undo(saga_id, n1).await.unwrap(); - let result = nexus - .run_saga(runnable_saga) + let result = runnable_saga + .run_to_completion() .await + .expect("expected saga to start") + .into_omicron_result() .expect_err("expected saga to finish stuck"); match result { diff --git a/nexus/src/app/sagas/volume_delete.rs b/nexus/src/app/sagas/volume_delete.rs index bfd8e6616c..b9e02538b2 100644 --- a/nexus/src/app/sagas/volume_delete.rs +++ b/nexus/src/app/sagas/volume_delete.rs @@ -332,8 +332,9 @@ async fn svd_delete_crucible_snapshot_records( /// It's insufficient to rely on the struct of CrucibleResources to clean up /// that is returned as part of svd_decrease_crucible_resource_count. Imagine a /// disk that is composed of three regions (a subset of -/// [`VolumeConstructionRequest`] is shown here): +/// [`sled_agent_client::types::VolumeConstructionRequest`] is shown here): /// +/// ```json /// { /// "type": "volume", /// "id": "6b353c87-afac-4ee2-b71a-6fe35fcf9e46", @@ -352,9 +353,11 @@ async fn svd_delete_crucible_snapshot_records( /// ], /// "read_only_parent": null, /// } +/// ``` /// /// Taking a snapshot of this will produce the following volume: /// +/// ```json /// { /// "type": "volume", /// "id": "1ef7282e-a3fb-4222-85a8-b16d3fbfd738", <-- new UUID @@ -373,6 +376,7 @@ async fn svd_delete_crucible_snapshot_records( /// ], /// "read_only_parent": null, /// } +/// ``` /// /// The snapshot targets will use the same IP but different port: snapshots are /// initially located on the same filesystem as their region. diff --git a/nexus/src/app/sagas/vpc_create.rs b/nexus/src/app/sagas/vpc_create.rs index cc40a8d43a..a34b25ceb7 100644 --- a/nexus/src/app/sagas/vpc_create.rs +++ b/nexus/src/app/sagas/vpc_create.rs @@ -17,6 +17,7 @@ use omicron_common::api::external::LookupType; use omicron_common::api::external::RouteDestination; use omicron_common::api::external::RouteTarget; use omicron_common::api::external::RouterRouteKind; +use oxnet::IpNet; use serde::Deserialize; use serde::Serialize; use steno::ActionError; @@ -44,9 +45,13 @@ declare_saga_actions! { + svc_create_router - svc_create_router_undo } - VPC_CREATE_ROUTE -> "route" { - + svc_create_route - - svc_create_route_undo + VPC_CREATE_V4_ROUTE -> "route4" { + + svc_create_v4_route + - svc_create_v4_route_undo + } + VPC_CREATE_V6_ROUTE -> "route6" { + + svc_create_v6_route + - svc_create_v6_route_undo } VPC_CREATE_SUBNET -> "subnet" { + svc_create_subnet @@ -79,8 +84,13 @@ pub fn create_dag( ACTION_GENERATE_ID.as_ref(), )); builder.append(Node::action( - "default_route_id", - "GenerateDefaultRouteId", + "default_v4_route_id", + "GenerateDefaultV4RouteId", + ACTION_GENERATE_ID.as_ref(), + )); + builder.append(Node::action( + "default_v6_route_id", + "GenerateDefaultV6RouteId", ACTION_GENERATE_ID.as_ref(), )); builder.append(Node::action( @@ -90,7 +100,8 @@ pub fn create_dag( )); builder.append(vpc_create_vpc_action()); builder.append(vpc_create_router_action()); - builder.append(vpc_create_route_action()); + builder.append(vpc_create_v4_route_action()); + builder.append(vpc_create_v6_route_action()); builder.append(vpc_create_subnet_action()); builder.append(vpc_update_firewall_action()); builder.append(vpc_notify_sleds_action()); @@ -217,8 +228,45 @@ async fn svc_create_router_undo( Ok(()) } +async fn svc_create_v4_route( + sagactx: NexusActionContext, +) -> Result<(), ActionError> { + let default_route_id = sagactx.lookup::("default_v4_route_id")?; + let default_route = + "0.0.0.0/0".parse().expect("known-valid specifier for a default route"); + svc_create_route(sagactx, default_route_id, default_route, "default-v4") + .await +} + +async fn svc_create_v4_route_undo( + sagactx: NexusActionContext, +) -> Result<(), anyhow::Error> { + let route_id = sagactx.lookup::("default_v4_route_id")?; + svc_create_route_undo(sagactx, route_id).await +} + +async fn svc_create_v6_route( + sagactx: NexusActionContext, +) -> Result<(), ActionError> { + let default_route_id = sagactx.lookup::("default_v6_route_id")?; + let default_route = + "::/0".parse().expect("known-valid specifier for a default route"); + svc_create_route(sagactx, default_route_id, default_route, "default-v6") + .await +} + +async fn svc_create_v6_route_undo( + sagactx: NexusActionContext, +) -> Result<(), anyhow::Error> { + let route_id = sagactx.lookup::("default_v6_route_id")?; + svc_create_route_undo(sagactx, route_id).await +} + async fn svc_create_route( sagactx: NexusActionContext, + route_id: Uuid, + default_net: IpNet, + name: &str, ) -> Result<(), ActionError> { let osagactx = sagactx.user_data(); let params = sagactx.saga_params::()?; @@ -226,23 +274,20 @@ async fn svc_create_route( &sagactx, ¶ms.serialized_authn, ); - let default_route_id = sagactx.lookup::("default_route_id")?; let system_router_id = sagactx.lookup::("system_router_id")?; let authz_router = sagactx.lookup::("router")?; let route = db::model::RouterRoute::new( - default_route_id, + route_id, system_router_id, RouterRouteKind::Default, params::RouterRouteCreate { identity: IdentityMetadataCreateParams { - name: "default".parse().unwrap(), + name: name.parse().unwrap(), description: "The default route of a vpc".to_string(), }, target: RouteTarget::InternetGateway("outbound".parse().unwrap()), - destination: RouteDestination::Vpc( - params.vpc_create.identity.name.clone(), - ), + destination: RouteDestination::IpNet(default_net), }, ); @@ -256,6 +301,7 @@ async fn svc_create_route( async fn svc_create_route_undo( sagactx: NexusActionContext, + route_id: Uuid, ) -> Result<(), anyhow::Error> { let osagactx = sagactx.user_data(); let params = sagactx.saga_params::()?; @@ -264,7 +310,6 @@ async fn svc_create_route_undo( ¶ms.serialized_authn, ); let authz_router = sagactx.lookup::("router")?; - let route_id = sagactx.lookup::("default_route_id")?; let authz_route = authz::RouterRoute::new( authz_router, route_id, @@ -440,8 +485,8 @@ async fn svc_notify_sleds( #[cfg(test)] pub(crate) mod test { use crate::{ - app::saga::create_saga_dag, app::sagas::vpc_create::Params, - app::sagas::vpc_create::SagaVpcCreate, external_api::params, + app::sagas::vpc_create::Params, app::sagas::vpc_create::SagaVpcCreate, + external_api::params, }; use async_bb8_diesel::AsyncRunQueryDsl; use diesel::{ @@ -538,12 +583,25 @@ pub(crate) mod test { .await .expect("Failed to delete default Subnet"); - // Default route + // Default gateway routes + let (.., authz_route, _route) = LookupPath::new(&opctx, &datastore) + .project_id(project_id) + .vpc_name(&default_name.clone().into()) + .vpc_router_name(&system_name.clone().into()) + .router_route_name(&"default-v4".parse::().unwrap().into()) + .fetch() + .await + .expect("Failed to fetch default route"); + datastore + .router_delete_route(&opctx, &authz_route) + .await + .expect("Failed to delete default route"); + let (.., authz_route, _route) = LookupPath::new(&opctx, &datastore) .project_id(project_id) .vpc_name(&default_name.clone().into()) .vpc_router_name(&system_name.clone().into()) - .router_route_name(&default_name.clone().into()) + .router_route_name(&"default-v6".parse::().unwrap().into()) .fetch() .await .expect("Failed to fetch default route"); @@ -724,11 +782,7 @@ pub(crate) mod test { ) .await; let params = new_test_params(&opctx, authz_project); - let dag = create_saga_dag::(params).unwrap(); - let runnable_saga = nexus.create_runnable_saga(dag).await.unwrap(); - - // Actually run the saga - nexus.run_saga(runnable_saga).await.unwrap(); + nexus.sagas.saga_execute::(params).await.unwrap(); } #[nexus_test(server = crate::Server)] diff --git a/nexus/src/app/snapshot.rs b/nexus/src/app/snapshot.rs index c28d180d3c..2b3f59fbe3 100644 --- a/nexus/src/app/snapshot.rs +++ b/nexus/src/app/snapshot.rs @@ -125,7 +125,8 @@ impl super::Nexus { }; let saga_outputs = self - .execute_saga::( + .sagas + .saga_execute::( saga_params, ) .await?; @@ -165,10 +166,11 @@ impl super::Nexus { snapshot: db_snapshot, }; - self.execute_saga::( - saga_params, - ) - .await?; + self.sagas + .saga_execute::( + saga_params, + ) + .await?; Ok(()) } diff --git a/nexus/src/app/switch_interface.rs b/nexus/src/app/switch_interface.rs index bb4cba4c7b..c4e69d1e3e 100644 --- a/nexus/src/app/switch_interface.rs +++ b/nexus/src/app/switch_interface.rs @@ -57,7 +57,6 @@ impl super::Nexus { // eagerly propagate changes via rpw self.background_tasks - .driver .activate(&self.background_tasks.task_switch_port_settings_manager); Ok(value) @@ -86,7 +85,6 @@ impl super::Nexus { // eagerly propagate changes via rpw self.background_tasks - .driver .activate(&self.background_tasks.task_switch_port_settings_manager); Ok(()) diff --git a/nexus/src/app/switch_port.rs b/nexus/src/app/switch_port.rs index 7a6d56252a..bb35b6939e 100644 --- a/nexus/src/app/switch_port.rs +++ b/nexus/src/app/switch_port.rs @@ -100,7 +100,6 @@ impl super::Nexus { // eagerly propagate changes via rpw self.background_tasks - .driver .activate(&self.background_tasks.task_switch_port_settings_manager); Ok(result) @@ -214,7 +213,6 @@ impl super::Nexus { // eagerly propagate changes via rpw self.background_tasks - .driver .activate(&self.background_tasks.task_switch_port_settings_manager); Ok(()) @@ -248,7 +246,6 @@ impl super::Nexus { // eagerly propagate changes via rpw self.background_tasks - .driver .activate(&self.background_tasks.task_switch_port_settings_manager); Ok(()) diff --git a/nexus/src/app/volume.rs b/nexus/src/app/volume.rs index 8cfffdb686..b4aa1dee7f 100644 --- a/nexus/src/app/volume.rs +++ b/nexus/src/app/volume.rs @@ -34,10 +34,11 @@ impl super::Nexus { volume_id, }; - self.execute_saga::( - saga_params, - ) - .await?; + self.sagas + .saga_execute::( + saga_params, + ) + .await?; Ok(()) } @@ -90,6 +91,8 @@ impl super::Nexus { ); for repaired_downstairs in repair_finish_info.repairs { + // First, record this notification + self.db_datastore .upstairs_repair_notification( opctx, @@ -110,10 +113,27 @@ impl super::Nexus { ) .await?; + // If the live repair or reconciliation was successfully completed, + // check if the repaired downstairs is part of a region + // replacement request. + if !repair_finish_info.aborted { - // TODO-followup if there's an active region replacement - // occurring, a successfully completed live repair can trigger a - // saga to destroy the original region. + let maybe_region_replacement = self + .datastore() + .lookup_in_progress_region_replacement_request_by_new_region_id( + opctx, + repaired_downstairs.region_uuid, + ) + .await?; + + if maybe_region_replacement.is_none() { + // A live repair or reconciliation completed successfully, + // but there is no in-progress region replacement request + // for that region, so it wasn't initated by Nexus. + // + // TODO-followup if there are too many repairs to the same + // downstairs, do something with that information. + } } } diff --git a/nexus/src/app/vpc.rs b/nexus/src/app/vpc.rs index 0950e65b83..b3605945d3 100644 --- a/nexus/src/app/vpc.rs +++ b/nexus/src/app/vpc.rs @@ -81,7 +81,8 @@ impl super::Nexus { }; let saga_outputs = self - .execute_saga::(saga_params) + .sagas + .saga_execute::(saga_params) .await?; let (_, db_vpc) = saga_outputs @@ -179,7 +180,8 @@ impl super::Nexus { let rules = db::model::VpcFirewallRule::vec_from_params( authz_vpc.id(), params.clone(), - ); + )?; + let rules = self .db_datastore .vpc_update_firewall_rules(opctx, &authz_vpc, rules) @@ -199,7 +201,7 @@ impl super::Nexus { let mut rules = db::model::VpcFirewallRule::vec_from_params( vpc_id, defaults::DEFAULT_FIREWALL_RULES.clone(), - ); + )?; for rule in rules.iter_mut() { for target in rule.targets.iter_mut() { match target.0 { diff --git a/nexus/src/app/vpc_router.rs b/nexus/src/app/vpc_router.rs index 523a450bbd..fdc834a14c 100644 --- a/nexus/src/app/vpc_router.rs +++ b/nexus/src/app/vpc_router.rs @@ -20,8 +20,12 @@ use omicron_common::api::external::Error; use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; use omicron_common::api::external::NameOrId; +use omicron_common::api::external::RouteDestination; +use omicron_common::api::external::RouteTarget; use omicron_common::api::external::RouterRouteKind; use omicron_common::api::external::UpdateResult; +use oxnet::IpNet; +use std::net::IpAddr; use uuid::Uuid; impl super::Nexus { @@ -83,6 +87,10 @@ impl super::Nexus { .db_datastore .vpc_create_router(&opctx, &authz_vpc, router) .await?; + + // Note: we don't trigger the route RPW here as it's impossible + // for the router to be bound to a subnet at this point. + Ok(router) } @@ -114,9 +122,6 @@ impl super::Nexus { .await } - // TODO: When a router is deleted all its routes should be deleted - // TODO: When a router is deleted it should be unassociated w/ any subnets it may be associated with - // or trigger an error pub(crate) async fn vpc_delete_router( &self, opctx: &OpContext, @@ -129,9 +134,14 @@ impl super::Nexus { // router kind cannot be changed, but it might be able to save us a // database round-trip. if db_router.kind == VpcRouterKind::System { - return Err(Error::invalid_request("Cannot delete system router")); + return Err(Error::invalid_request("cannot delete system router")); } - self.db_datastore.vpc_delete_router(opctx, &authz_router).await + let out = + self.db_datastore.vpc_delete_router(opctx, &authz_router).await?; + + self.vpc_needed_notify_sleds(); + + Ok(out) } // Routes @@ -185,8 +195,47 @@ impl super::Nexus { kind: &RouterRouteKind, params: ¶ms::RouterRouteCreate, ) -> CreateResult { - let (.., authz_router) = - router_lookup.lookup_for(authz::Action::CreateChild).await?; + let (.., authz_router, db_router) = + router_lookup.fetch_for(authz::Action::CreateChild).await?; + + if db_router.kind == VpcRouterKind::System { + return Err(Error::invalid_request( + "user-provided routes cannot be added to a system router", + )); + } + + // Validate route destinations/targets at this stage: + // - mixed explicit v4 and v6 are disallowed. + // - users cannot specify 'Vpc' as a custom router dest/target. + // - users cannot specify 'Subnet' as a custom router target. + // - the only internet gateway we support today is 'outbound'. + match (¶ms.destination, ¶ms.target) { + (RouteDestination::Ip(IpAddr::V4(_)), RouteTarget::Ip(IpAddr::V4(_))) + | (RouteDestination::Ip(IpAddr::V6(_)), RouteTarget::Ip(IpAddr::V6(_))) + | (RouteDestination::IpNet(IpNet::V4(_)), RouteTarget::Ip(IpAddr::V4(_))) + | (RouteDestination::IpNet(IpNet::V6(_)), RouteTarget::Ip(IpAddr::V6(_))) => {}, + + (RouteDestination::Ip(_), RouteTarget::Ip(_)) + | (RouteDestination::IpNet(_), RouteTarget::Ip(_)) + => return Err(Error::invalid_request( + "cannot mix explicit IPv4 and IPv6 addresses between destination and target" + )), + + (RouteDestination::Vpc(_), _) | (_, RouteTarget::Vpc(_)) => return Err(Error::invalid_request( + "VPCs cannot be used as a destination or target in custom routers" + )), + + (_, RouteTarget::Subnet(_)) => return Err(Error::invalid_request( + "subnets cannot be used as a target in custom routers" + )), + + (_, RouteTarget::InternetGateway(n)) if n.as_str() != "outbound" => return Err(Error::invalid_request( + "'outbound' is currently the only valid internet gateway" + )), + + _ => {}, + }; + let id = Uuid::new_v4(); let route = db::model::RouterRoute::new( id, @@ -198,6 +247,9 @@ impl super::Nexus { .db_datastore .router_create_route(&opctx, &authz_router, route) .await?; + + self.vpc_router_increment_rpw_version(opctx, &authz_router).await?; + Ok(route) } @@ -220,24 +272,39 @@ impl super::Nexus { route_lookup: &lookup::RouterRoute<'_>, params: ¶ms::RouterRouteUpdate, ) -> UpdateResult { - let (.., vpc, _, authz_route, db_route) = + let (.., authz_router, authz_route, db_route) = route_lookup.fetch_for(authz::Action::Modify).await?; - // TODO: Write a test for this once there's a way to test it (i.e. - // subnets automatically register to the system router table) + match db_route.kind.0 { - RouterRouteKind::Custom | RouterRouteKind::Default => (), + // Default routes allow a constrained form of modification: + // only the target may change. + RouterRouteKind::Default if + params.identity.name.is_some() + || params.identity.description.is_some() + || params.destination != db_route.destination.0 => { + return Err(Error::invalid_request( + "the destination and metadata of a Default route cannot be changed", + ))}, + + RouterRouteKind::Custom | RouterRouteKind::Default => {}, + _ => { return Err(Error::invalid_request(format!( - "routes of type {} from the system table of VPC {:?} \ + "routes of type {} within the system router \ are not modifiable", db_route.kind.0, - vpc.id() ))); } } - self.db_datastore + + let out = self + .db_datastore .router_update_route(&opctx, &authz_route, params.clone().into()) - .await + .await?; + + self.vpc_router_increment_rpw_version(opctx, &authz_router).await?; + + Ok(out) } pub(crate) async fn router_delete_route( @@ -245,7 +312,7 @@ impl super::Nexus { opctx: &OpContext, route_lookup: &lookup::RouterRoute<'_>, ) -> DeleteResult { - let (.., authz_route, db_route) = + let (.., authz_router, authz_route, db_route) = route_lookup.fetch_for(authz::Action::Delete).await?; // Only custom routes can be deleted @@ -255,6 +322,37 @@ impl super::Nexus { "DELETE not allowed on system routes", )); } - self.db_datastore.router_delete_route(opctx, &authz_route).await + let out = + self.db_datastore.router_delete_route(opctx, &authz_route).await?; + + self.vpc_router_increment_rpw_version(opctx, &authz_router).await?; + + Ok(out) + } + + /// Trigger the VPC routing RPW in repsonse to a state change + /// or a new possible listener (e.g., instance/probe start, NIC + /// create). + pub(crate) fn vpc_needed_notify_sleds(&self) { + self.background_tasks + .activate(&self.background_tasks.task_vpc_route_manager) + } + + /// Trigger an RPW version bump on a single VPC router in response + /// to CRUD operations on individual routes. + /// + /// This will also awaken the VPC Router RPW. + pub(crate) async fn vpc_router_increment_rpw_version( + &self, + opctx: &OpContext, + authz_router: &authz::VpcRouter, + ) -> UpdateResult<()> { + self.datastore() + .vpc_router_increment_rpw_version(opctx, authz_router.id()) + .await?; + + self.vpc_needed_notify_sleds(); + + Ok(()) } } diff --git a/nexus/src/app/vpc_subnet.rs b/nexus/src/app/vpc_subnet.rs index f081f351db..ce0cd423f4 100644 --- a/nexus/src/app/vpc_subnet.rs +++ b/nexus/src/app/vpc_subnet.rs @@ -64,8 +64,7 @@ impl super::Nexus { )), } } - // TODO: When a subnet is created it should add a route entry into the VPC's - // system router + pub(crate) async fn vpc_create_subnet( &self, opctx: &OpContext, @@ -109,7 +108,7 @@ impl super::Nexus { // See for // details. let subnet_id = Uuid::new_v4(); - match params.ipv6_block { + let mut out = match params.ipv6_block { None => { const NUM_RETRIES: usize = 2; let mut retry = 0; @@ -213,7 +212,28 @@ impl super::Nexus { .map(|(.., subnet)| subnet) .map_err(SubnetError::into_external) } + }?; + + // XX: rollback the creation if this fails? + if let Some(custom_router) = ¶ms.custom_router { + let (.., authz_subnet) = LookupPath::new(opctx, &self.db_datastore) + .vpc_subnet_id(out.id()) + .lookup_for(authz::Action::Modify) + .await?; + + out = self + .vpc_subnet_update_custom_router( + opctx, + &authz_vpc, + &authz_subnet, + Some(custom_router), + ) + .await?; } + + self.vpc_needed_notify_sleds(); + + Ok(out) } pub(crate) async fn vpc_subnet_list( @@ -233,15 +253,90 @@ impl super::Nexus { vpc_subnet_lookup: &lookup::VpcSubnet<'_>, params: ¶ms::VpcSubnetUpdate, ) -> UpdateResult { - let (.., authz_subnet) = + let (.., authz_vpc, authz_subnet) = vpc_subnet_lookup.lookup_for(authz::Action::Modify).await?; - self.db_datastore + + // Updating the custom router is a separate action. + self.vpc_subnet_update_custom_router( + opctx, + &authz_vpc, + &authz_subnet, + params.custom_router.as_ref(), + ) + .await?; + + let out = self + .db_datastore .vpc_update_subnet(&opctx, &authz_subnet, params.clone().into()) - .await + .await?; + + self.vpc_needed_notify_sleds(); + + Ok(out) + } + + async fn vpc_subnet_update_custom_router( + &self, + opctx: &OpContext, + authz_vpc: &authz::Vpc, + authz_subnet: &authz::VpcSubnet, + custom_router: Option<&NameOrId>, + ) -> UpdateResult { + // Resolve the VPC router, if specified. + let router_lookup = match custom_router { + Some(key @ NameOrId::Name(_)) => self + .vpc_router_lookup( + opctx, + params::RouterSelector { + project: None, + vpc: Some(NameOrId::Id(authz_vpc.id())), + router: key.clone(), + }, + ) + .map(Some), + Some(key @ NameOrId::Id(_)) => self + .vpc_router_lookup( + opctx, + params::RouterSelector { + project: None, + vpc: None, + router: key.clone(), + }, + ) + .map(Some), + None => Ok(None), + }?; + + let router_lookup = if let Some(l) = router_lookup { + let (.., rtr_authz_vpc, authz_router) = + l.lookup_for(authz::Action::Read).await?; + + if authz_vpc.id() != rtr_authz_vpc.id() { + return Err(Error::invalid_request( + "router and subnet must belong to the same VPC", + )); + } + + Some(authz_router) + } else { + None + }; + + if let Some(authz_router) = router_lookup { + self.db_datastore + .vpc_subnet_set_custom_router( + opctx, + &authz_subnet, + &authz_router, + ) + .await + } else { + self.db_datastore + .vpc_subnet_unset_custom_router(opctx, &authz_subnet) + .await + } } - // TODO: When a subnet is deleted it should remove its entry from the VPC's - // system router. pub(crate) async fn vpc_delete_subnet( &self, opctx: &OpContext, @@ -249,9 +344,14 @@ impl super::Nexus { ) -> DeleteResult { let (.., authz_subnet, db_subnet) = vpc_subnet_lookup.fetch_for(authz::Action::Delete).await?; - self.db_datastore + let out = self + .db_datastore .vpc_delete_subnet(opctx, &db_subnet, &authz_subnet) - .await + .await?; + + self.vpc_needed_notify_sleds(); + + Ok(out) } pub(crate) async fn subnet_list_instance_network_interfaces( diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 350836441e..2678768b48 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -5446,7 +5446,6 @@ async fn vpc_firewall_rules_update( method = GET, path = "/v1/vpc-routers", tags = ["vpcs"], - unpublished = true, }] async fn vpc_router_list( rqctx: RequestContext, @@ -5486,7 +5485,6 @@ async fn vpc_router_list( method = GET, path = "/v1/vpc-routers/{router}", tags = ["vpcs"], - unpublished = true, }] async fn vpc_router_view( rqctx: RequestContext, @@ -5520,7 +5518,6 @@ async fn vpc_router_view( method = POST, path = "/v1/vpc-routers", tags = ["vpcs"], - unpublished = true, }] async fn vpc_router_create( rqctx: RequestContext, @@ -5556,7 +5553,6 @@ async fn vpc_router_create( method = DELETE, path = "/v1/vpc-routers/{router}", tags = ["vpcs"], - unpublished = true, }] async fn vpc_router_delete( rqctx: RequestContext, @@ -5590,7 +5586,6 @@ async fn vpc_router_delete( method = PUT, path = "/v1/vpc-routers/{router}", tags = ["vpcs"], - unpublished = true, }] async fn vpc_router_update( rqctx: RequestContext, @@ -5630,7 +5625,6 @@ async fn vpc_router_update( method = GET, path = "/v1/vpc-router-routes", tags = ["vpcs"], - unpublished = true, }] async fn vpc_router_route_list( rqctx: RequestContext, @@ -5672,7 +5666,6 @@ async fn vpc_router_route_list( method = GET, path = "/v1/vpc-router-routes/{route}", tags = ["vpcs"], - unpublished = true, }] async fn vpc_router_route_view( rqctx: RequestContext, @@ -5704,12 +5697,11 @@ async fn vpc_router_route_view( .await } -/// Create router +/// Create route #[endpoint { method = POST, path = "/v1/vpc-router-routes", tags = ["vpcs"], - unpublished = true, }] async fn vpc_router_route_create( rqctx: RequestContext, @@ -5745,7 +5737,6 @@ async fn vpc_router_route_create( method = DELETE, path = "/v1/vpc-router-routes/{route}", tags = ["vpcs"], - unpublished = true, }] async fn vpc_router_route_delete( rqctx: RequestContext, @@ -5781,7 +5772,6 @@ async fn vpc_router_route_delete( method = PUT, path = "/v1/vpc-router-routes/{route}", tags = ["vpcs"], - unpublished = true, }] async fn vpc_router_route_update( rqctx: RequestContext, diff --git a/nexus/src/lib.rs b/nexus/src/lib.rs index 6a23048693..e48ec83d98 100644 --- a/nexus/src/lib.rs +++ b/nexus/src/lib.rs @@ -57,7 +57,7 @@ extern crate slog; /// to stdout. pub fn run_openapi_external() -> Result<(), String> { external_api() - .openapi("Oxide Region API", "20240502.0") + .openapi("Oxide Region API", "20240710.0") .description("API for interacting with the Oxide control plane") .contact_url("https://oxide.computer") .contact_email("api@oxide.computer") diff --git a/nexus/src/saga_interface.rs b/nexus/src/saga_interface.rs index e76ea893e7..5a828ff0ec 100644 --- a/nexus/src/saga_interface.rs +++ b/nexus/src/saga_interface.rs @@ -16,7 +16,6 @@ use std::sync::Arc; pub(crate) struct SagaContext { nexus: Arc, log: Logger, - authz: Arc, } impl fmt::Debug for SagaContext { @@ -26,12 +25,8 @@ impl fmt::Debug for SagaContext { } impl SagaContext { - pub(crate) fn new( - nexus: Arc, - log: Logger, - authz: Arc, - ) -> SagaContext { - SagaContext { authz, nexus, log } + pub(crate) fn new(nexus: Arc, log: Logger) -> SagaContext { + SagaContext { nexus, log } } pub(crate) fn log(&self) -> &Logger { @@ -39,7 +34,7 @@ impl SagaContext { } pub(crate) fn authz(&self) -> &Arc { - &self.authz + &self.nexus.authz() } pub(crate) fn nexus(&self) -> &Arc { diff --git a/nexus/test-utils/Cargo.toml b/nexus/test-utils/Cargo.toml index 0eab038f91..7732e00d70 100644 --- a/nexus/test-utils/Cargo.toml +++ b/nexus/test-utils/Cargo.toml @@ -37,6 +37,7 @@ omicron-uuid-kinds.workspace = true oximeter.workspace = true oximeter-collector.workspace = true oximeter-producer.workspace = true +oxnet.workspace = true serde.workspace = true serde_json.workspace = true serde_urlencoded.workspace = true diff --git a/nexus/test-utils/src/http_testing.rs b/nexus/test-utils/src/http_testing.rs index 90c7dd43bc..1a85d7094c 100644 --- a/nexus/test-utils/src/http_testing.rs +++ b/nexus/test-utils/src/http_testing.rs @@ -216,8 +216,8 @@ impl<'a> RequestBuilder<'a> { /// Add header and value to check for at execution time /// - /// Behaves like header() rather than expect_allowed_headers() in that it - /// takes one header at a time rather than a whole set. + /// Behaves like header() in that it takes one header at a time rather than + /// a whole set. pub fn expect_response_header( mut self, name: K, @@ -291,8 +291,9 @@ impl<'a> RequestBuilder<'a> { /// response, and make the response available to the caller /// /// This function checks the returned status code (if [`Self::expect_status()`] - /// was used), allowed headers (if [`Self::expect_allowed_headers()`] was used), and - /// various other properties of the response. + /// was used), allowed headers (if [`Self::expect_websocket_handshake()`] or + /// [`Self::expect_console_asset()`] was used), and various other properties + /// of the response. pub async fn execute(self) -> Result { if let Some(error) = self.error { return Err(error); diff --git a/nexus/test-utils/src/lib.rs b/nexus/test-utils/src/lib.rs index 97fd66f949..7d69e6b3b0 100644 --- a/nexus/test-utils/src/lib.rs +++ b/nexus/test-utils/src/lib.rs @@ -688,6 +688,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { slot: 0, subnet: (*NEXUS_OPTE_IPV4_SUBNET).into(), vni: Vni::SERVICES_VNI, + transit_ips: vec![], }, }), }); @@ -1048,6 +1049,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { slot: 0, subnet: (*DNS_OPTE_IPV4_SUBNET).into(), vni: Vni::SERVICES_VNI, + transit_ips: vec![], }, }, ), diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index 2aef32d37c..ccebffd197 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -26,6 +26,7 @@ use nexus_types::external_api::views::FloatingIp; use nexus_types::external_api::views::IpPool; use nexus_types::external_api::views::IpPoolRange; use nexus_types::external_api::views::User; +use nexus_types::external_api::views::VpcSubnet; use nexus_types::external_api::views::{Project, Silo, Vpc, VpcRouter}; use nexus_types::identity::Resource; use nexus_types::internal_api::params as internal_params; @@ -36,12 +37,17 @@ use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::Instance; use omicron_common::api::external::InstanceCpuCount; use omicron_common::api::external::NameOrId; +use omicron_common::api::external::RouteDestination; +use omicron_common::api::external::RouteTarget; +use omicron_common::api::external::RouterRoute; use omicron_common::disk::DiskIdentity; use omicron_sled_agent::sim::SledAgent; use omicron_test_utils::dev::poll::wait_for_condition; use omicron_test_utils::dev::poll::CondCheckError; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::ZpoolUuid; +use oxnet::Ipv4Net; +use oxnet::Ipv6Net; use slog::debug; use std::net::IpAddr; use std::sync::Arc; @@ -559,6 +565,32 @@ pub async fn create_vpc_with_error( .unwrap() } +pub async fn create_vpc_subnet( + client: &ClientTestContext, + project_name: &str, + vpc_name: &str, + subnet_name: &str, + ipv4_block: Ipv4Net, + ipv6_block: Option, + custom_router: Option<&str>, +) -> VpcSubnet { + object_create( + &client, + &format!("/v1/vpc-subnets?project={project_name}&vpc={vpc_name}"), + ¶ms::VpcSubnetCreate { + identity: IdentityMetadataCreateParams { + name: subnet_name.parse().unwrap(), + description: "vpc description".to_string(), + }, + ipv4_block, + ipv6_block, + custom_router: custom_router + .map(|n| NameOrId::Name(n.parse().unwrap())), + }, + ) + .await +} + pub async fn create_router( client: &ClientTestContext, project_name: &str, @@ -584,6 +616,78 @@ pub async fn create_router( .unwrap() } +pub async fn create_route( + client: &ClientTestContext, + project_name: &str, + vpc_name: &str, + router_name: &str, + route_name: &str, + destination: RouteDestination, + target: RouteTarget, +) -> RouterRoute { + NexusRequest::objects_post( + &client, + format!( + "/v1/vpc-router-routes?project={}&vpc={}&router={}", + &project_name, &vpc_name, &router_name + ) + .as_str(), + ¶ms::RouterRouteCreate { + identity: IdentityMetadataCreateParams { + name: route_name.parse().unwrap(), + description: String::from("route description"), + }, + target, + destination, + }, + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap() +} + +#[allow(clippy::too_many_arguments)] +pub async fn create_route_with_error( + client: &ClientTestContext, + project_name: &str, + vpc_name: &str, + router_name: &str, + route_name: &str, + destination: RouteDestination, + target: RouteTarget, + status: StatusCode, +) -> HttpErrorResponseBody { + NexusRequest::new( + RequestBuilder::new( + client, + Method::POST, + format!( + "/v1/vpc-router-routes?project={}&vpc={}&router={}", + &project_name, &vpc_name, &router_name + ) + .as_str(), + ) + .body(Some(¶ms::RouterRouteCreate { + identity: IdentityMetadataCreateParams { + name: route_name.parse().unwrap(), + description: String::from("route description"), + }, + target, + destination, + })) + .expect_status(Some(status)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap() +} + pub async fn assert_ip_pool_utilization( client: &ClientTestContext, pool_name: &str, diff --git a/nexus/tests/config.test.toml b/nexus/tests/config.test.toml index 9aed5bcb69..f90a035de6 100644 --- a/nexus/tests/config.test.toml +++ b/nexus/tests/config.test.toml @@ -5,8 +5,8 @@ [console] # Directory for static assets. Absolute path or relative to CWD. static_dir = "tests/static" -session_idle_timeout_minutes = 60 -session_absolute_timeout_minutes = 480 +session_idle_timeout_minutes = 480 # 8 hours +session_absolute_timeout_minutes = 1440 # 24 hours # List of authentication schemes to support. [authn] @@ -110,6 +110,11 @@ blueprints.period_secs_collect_crdb_node_ids = 600 sync_service_zone_nat.period_secs = 30 switch_port_settings_manager.period_secs = 30 region_replacement.period_secs = 30 +# The driver task should wake up frequently, something like every 10 seconds. +# however, if it's this low it affects the test_omdb_success_cases test output. +# keep this 30 seconds, so that the test shows "triggered by an explicit +# signal" instead of "triggered by a periodic timer firing" +region_replacement_driver.period_secs = 30 instance_watcher.period_secs = 30 service_firewall_propagation.period_secs = 300 v2p_mapping_propagation.period_secs = 30 diff --git a/nexus/tests/integration_tests/commands.rs b/nexus/tests/integration_tests/commands.rs index 2eaf24d907..1a6e717345 100644 --- a/nexus/tests/integration_tests/commands.rs +++ b/nexus/tests/integration_tests/commands.rs @@ -109,7 +109,7 @@ fn test_nexus_openapi() { .expect("stdout was not valid OpenAPI"); assert_eq!(spec.openapi, "3.0.3"); assert_eq!(spec.info.title, "Oxide Region API"); - assert_eq!(spec.info.version, "20240502.0"); + assert_eq!(spec.info.version, "20240710.0"); // Spot check a couple of items. assert!(!spec.paths.paths.is_empty()); diff --git a/nexus/tests/integration_tests/console_api.rs b/nexus/tests/integration_tests/console_api.rs index 8daaf44733..479baf2fec 100644 --- a/nexus/tests/integration_tests/console_api.rs +++ b/nexus/tests/integration_tests/console_api.rs @@ -892,7 +892,7 @@ async fn log_in_and_extract_token( let (session_token, rest) = session_cookie.split_once("; ").unwrap(); assert!(session_token.starts_with("session=")); - assert_eq!(rest, "Path=/; HttpOnly; SameSite=Lax; Max-Age=28800"); + assert_eq!(rest, "Path=/; HttpOnly; SameSite=Lax; Max-Age=86400"); session_token.to_string() } diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index ca46a8bf06..a8e12ae5d9 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -202,6 +202,7 @@ pub static DEMO_VPC_SUBNET_CREATE: Lazy = }, ipv4_block: "10.1.2.3/8".parse().unwrap(), ipv6_block: None, + custom_router: None, }); // VPC Router used for testing @@ -461,6 +462,7 @@ pub static DEMO_INSTANCE_NIC_PUT: Lazy = description: Some(String::from("an updated description")), }, primary: false, + transit_ips: vec![], }); pub static DEMO_CERTIFICATE_NAME: Lazy = @@ -1513,6 +1515,7 @@ pub static VERIFY_ENDPOINTS: Lazy> = Lazy::new(|| { name: None, description: Some("different".to_string()) }, + custom_router: None, }).unwrap() ), AllowedMethod::Delete, diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index 2cc9f77c69..9c965ccf8a 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -18,6 +18,7 @@ use nexus_db_queries::context::OpContext; use nexus_db_queries::db::fixed_data::silo::DEFAULT_SILO; use nexus_db_queries::db::fixed_data::silo::DEFAULT_SILO_ID; use nexus_db_queries::db::lookup::LookupPath; +use nexus_db_queries::db::DataStore; use nexus_test_interface::NexusServer; use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; @@ -59,6 +60,9 @@ use omicron_common::api::external::InstanceState; use omicron_common::api::external::Name; use omicron_common::api::external::NameOrId; use omicron_common::api::external::Vni; +use omicron_common::api::internal::shared::ResolvedVpcRoute; +use omicron_common::api::internal::shared::RouterId; +use omicron_common::api::internal::shared::RouterKind; use omicron_nexus::app::MAX_MEMORY_BYTES_PER_INSTANCE; use omicron_nexus::app::MAX_VCPU_PER_INSTANCE; use omicron_nexus::app::MIN_MEMORY_BYTES_PER_INSTANCE; @@ -70,6 +74,7 @@ use omicron_uuid_kinds::PropolisUuid; use omicron_uuid_kinds::SledUuid; use omicron_uuid_kinds::{GenericUuid, InstanceUuid}; use sled_agent_client::TestInterfaces as _; +use std::collections::HashSet; use std::convert::TryFrom; use std::net::Ipv4Addr; use std::sync::Arc; @@ -672,6 +677,30 @@ async fn test_instance_start_creates_networking_state( for agent in &sled_agents { assert_sled_v2p_mappings(agent, &nics[0], guest_nics[0].vni).await; } + + // Ensure that the target sled agent for our instance has received + // up-to-date VPC routes. + let with_vmm = datastore + .instance_fetch_with_vmm(&opctx, &authz_instance) + .await + .unwrap(); + + let mut checked = false; + for agent in &sled_agents { + if Some(agent.id) == with_vmm.sled_id().map(SledUuid::into_untyped_uuid) + { + assert_sled_vpc_routes( + agent, + &opctx, + datastore, + nics[0].subnet_id, + guest_nics[0].vni, + ) + .await; + checked = true; + } + } + assert!(checked); } #[nexus_test] @@ -835,7 +864,9 @@ async fn test_instance_migrate(cptestctx: &ControlPlaneTestContext) { } #[nexus_test] -async fn test_instance_migrate_v2p(cptestctx: &ControlPlaneTestContext) { +async fn test_instance_migrate_v2p_and_routes( + cptestctx: &ControlPlaneTestContext, +) { let client = &cptestctx.external_client; let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; @@ -964,6 +995,15 @@ async fn test_instance_migrate_v2p(cptestctx: &ControlPlaneTestContext) { if sled_agent.id != dst_sled_id.into_untyped_uuid() { assert_sled_v2p_mappings(sled_agent, &nics[0], guest_nics[0].vni) .await; + } else { + assert_sled_vpc_routes( + sled_agent, + &opctx, + datastore, + nics[0].subnet_id, + guest_nics[0].vni, + ) + .await; } } } @@ -1760,6 +1800,7 @@ async fn test_instance_with_new_custom_network_interfaces( }, ipv4_block: "172.31.0.0/24".parse().unwrap(), ipv6_block: None, + custom_router: None, }; let _response = NexusRequest::objects_post( client, @@ -1906,6 +1947,7 @@ async fn test_instance_create_delete_network_interface( }, ipv4_block: "172.31.0.0/24".parse().unwrap(), ipv6_block: None, + custom_router: None, }; let _response = NexusRequest::objects_post( client, @@ -2147,6 +2189,7 @@ async fn test_instance_update_network_interfaces( }, ipv4_block: "172.31.0.0/24".parse().unwrap(), ipv6_block: None, + custom_router: None, }; let _response = NexusRequest::objects_post( client, @@ -2246,6 +2289,7 @@ async fn test_instance_update_network_interfaces( description: Some(new_description.clone()), }, primary: false, + transit_ips: vec![], }; // Verify we fail to update the NIC when the instance is running @@ -2322,6 +2366,7 @@ async fn test_instance_update_network_interfaces( description: None, }, primary: true, + transit_ips: vec![], }; let updated_primary_iface1 = NexusRequest::object_put( client, @@ -2415,6 +2460,7 @@ async fn test_instance_update_network_interfaces( description: None, }, primary: true, + transit_ips: vec![], }; let new_primary_iface = NexusRequest::object_put( client, @@ -4763,6 +4809,80 @@ async fn assert_sled_v2p_mappings( .expect("matching v2p mapping should be present"); } +/// Asserts that supplied sled agent's most recent VPC route sets +/// contain up-to-date routes for a known subnet. +pub async fn assert_sled_vpc_routes( + sled_agent: &Arc, + opctx: &OpContext, + datastore: &DataStore, + subnet_id: Uuid, + vni: Vni, +) -> (HashSet, HashSet) { + let (.., authz_vpc, _, db_subnet) = LookupPath::new(opctx, datastore) + .vpc_subnet_id(subnet_id) + .fetch() + .await + .unwrap(); + + let custom_routes: HashSet<_> = + if let Some(router_id) = db_subnet.custom_router_id { + datastore + .vpc_resolve_router_rules(opctx, router_id) + .await + .unwrap() + .into_iter() + .map(|(dest, target)| ResolvedVpcRoute { dest, target }) + .collect() + } else { + Default::default() + }; + + let (.., vpc) = LookupPath::new(opctx, datastore) + .vpc_id(authz_vpc.id()) + .fetch() + .await + .unwrap(); + + let system_routes: HashSet<_> = datastore + .vpc_resolve_router_rules(opctx, vpc.system_router_id) + .await + .unwrap() + .into_iter() + .map(|(dest, target)| ResolvedVpcRoute { dest, target }) + .collect(); + + assert!(!system_routes.is_empty()); + + let condition = || async { + let vpc_routes = sled_agent.vpc_routes.lock().await; + let sys_routes_found = vpc_routes.iter().any(|(id, set)| { + *id == RouterId { vni, kind: RouterKind::System } + && set.routes == system_routes + }); + let custom_routes_found = vpc_routes.iter().any(|(id, set)| { + *id == RouterId { + vni, + kind: RouterKind::Custom(db_subnet.ipv4_block.0.into()), + } && set.routes == custom_routes + }); + + if sys_routes_found && custom_routes_found { + Ok(()) + } else { + Err(CondCheckError::NotYet::<()>) + } + }; + wait_for_condition( + condition, + &Duration::from_secs(1), + &Duration::from_secs(30), + ) + .await + .expect("matching vpc routes should be present"); + + (system_routes, custom_routes) +} + /// Simulate completion of an ongoing instance state transition. To do this, we /// have to look up the instance, then get the sled agent associated with that /// instance, and then tell it to finish simulating whatever async transition is diff --git a/nexus/tests/integration_tests/password_login.rs b/nexus/tests/integration_tests/password_login.rs index a340a804e2..a7b0b627b9 100644 --- a/nexus/tests/integration_tests/password_login.rs +++ b/nexus/tests/integration_tests/password_login.rs @@ -447,7 +447,7 @@ async fn expect_login_success( .split_once("; ") .expect("session cookie: bad cookie header value (missing semicolon)"); assert!(token_cookie.starts_with("session=")); - assert_eq!(rest, "Path=/; HttpOnly; SameSite=Lax; Max-Age=28800"); + assert_eq!(rest, "Path=/; HttpOnly; SameSite=Lax; Max-Age=86400"); let (_, session_token) = token_cookie .split_once('=') .expect("session cookie: bad cookie header value (missing 'session=')"); diff --git a/nexus/tests/integration_tests/router_routes.rs b/nexus/tests/integration_tests/router_routes.rs index 10c594bba9..38f4ecec9a 100644 --- a/nexus/tests/integration_tests/router_routes.rs +++ b/nexus/tests/integration_tests/router_routes.rs @@ -2,18 +2,27 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. +use dropshot::test_util::ClientTestContext; use dropshot::Method; use http::StatusCode; +use itertools::Itertools; use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::identity_eq; +use nexus_test_utils::resource_helpers::create_route; +use nexus_test_utils::resource_helpers::create_route_with_error; +use nexus_test_utils::resource_helpers::object_put; +use nexus_test_utils::resource_helpers::object_put_error; use nexus_test_utils::resource_helpers::objects_list_page_authz; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; +use nexus_types::external_api::params::RouterRouteUpdate; +use omicron_common::api::external::SimpleIdentity; use omicron_common::api::external::{ IdentityMetadataCreateParams, IdentityMetadataUpdateParams, RouteDestination, RouteTarget, RouterRoute, RouterRouteKind, }; +use oxnet::IpNet; use std::net::IpAddr; use std::net::Ipv4Addr; @@ -21,73 +30,115 @@ use nexus_test_utils::resource_helpers::{ create_project, create_router, create_vpc, }; +use crate::integration_tests::vpc_routers::PROJECT_NAME; +use crate::integration_tests::vpc_routers::ROUTER_NAMES; +use crate::integration_tests::vpc_routers::VPC_NAME; + type ControlPlaneTestContext = nexus_test_utils::ControlPlaneTestContext; -#[nexus_test] -async fn test_router_routes(cptestctx: &ControlPlaneTestContext) { - let client = &cptestctx.external_client; - - let project_name = "springfield-squidport"; - let vpc_name = "vpc1"; - let router_name = "router1"; - - let get_routes_url = |router_name: &str| -> String { - format!( - "/v1/vpc-router-routes?project={}&vpc={}&router={}", - project_name, vpc_name, router_name - ) - }; - - let get_route_url = |router_name: &str, route_name: &str| -> String { - format!( - "/v1/vpc-router-routes/{}?project={}&vpc={}&router={}", - route_name, project_name, vpc_name, router_name - ) - }; - - let _ = create_project(&client, project_name).await; +fn get_routes_url(vpc_name: &str, router_name: &str) -> String { + format!( + "/v1/vpc-router-routes?project={}&vpc={}&router={}", + PROJECT_NAME, vpc_name, router_name + ) +} - // Create a vpc - create_vpc(&client, project_name, vpc_name).await; +fn get_route_url( + vpc_name: &str, + router_name: &str, + route_name: &str, +) -> String { + format!( + "/v1/vpc-router-routes/{}?project={}&vpc={}&router={}", + route_name, PROJECT_NAME, vpc_name, router_name + ) +} +async fn get_system_routes( + client: &ClientTestContext, + vpc_name: &str, +) -> [RouterRoute; 3] { // Get the system router's routes let system_router_routes = objects_list_page_authz::( client, - get_routes_url("system").as_str(), + get_routes_url(vpc_name, "system").as_str(), ) .await .items; - // The system should start with a single, pre-configured route - assert_eq!(system_router_routes.len(), 1); + // The system should start with three preconfigured routes: + // - a default v4 gateway route + // - a default v6 gateway route + // - a managed subnet route for the 'default' subnet + assert_eq!(system_router_routes.len(), 3); - // That route should be the default route - let default_route = &system_router_routes[0]; - assert_eq!(default_route.kind, RouterRouteKind::Default); + let mut v4_route = None; + let mut v6_route = None; + let mut subnet_route = None; + for route in system_router_routes { + match (&route.kind, &route.destination, &route.target) { + (RouterRouteKind::Default, RouteDestination::IpNet(IpNet::V4(_)), RouteTarget::InternetGateway(_)) => {v4_route = Some(route);}, + (RouterRouteKind::Default, RouteDestination::IpNet(IpNet::V6(_)), RouteTarget::InternetGateway(_)) => {v6_route = Some(route);}, + (RouterRouteKind::VpcSubnet, RouteDestination::Subnet(n0), RouteTarget::Subnet(n1)) if n0 == n1 && n0.as_str() == "default" => {subnet_route = Some(route);}, + _ => panic!("unexpected system route {route:?} -- wanted gateway and subnet"), + } + } - // It errors if you try to delete the default route - let error: dropshot::HttpErrorResponseBody = NexusRequest::expect_failure( - client, - StatusCode::BAD_REQUEST, - Method::DELETE, - get_route_url("system", "default").as_str(), - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body() - .unwrap(); - assert_eq!(error.message, "DELETE not allowed on system routes"); + let v4_route = + v4_route.expect("no v4 gateway route found in system router"); + let v6_route = + v6_route.expect("no v6 gateway route found in system router"); + let subnet_route = + subnet_route.expect("no default subnet route found in system router"); + + [v4_route, v6_route, subnet_route] +} + +#[nexus_test] +async fn test_router_routes_crud_operations( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + + let vpc_name = "vpc1"; + let router_name = "router1"; + + let _ = create_project(&client, PROJECT_NAME).await; + + // Create a vpc + create_vpc(&client, PROJECT_NAME, vpc_name).await; + + // Get the system router's routes + let [v4_route, v6_route, subnet_route] = + get_system_routes(client, vpc_name).await; + + // Deleting any default system route is disallowed. + for route in &[&v4_route, &v6_route, &subnet_route] { + let error: dropshot::HttpErrorResponseBody = + NexusRequest::expect_failure( + client, + StatusCode::BAD_REQUEST, + Method::DELETE, + get_route_url(vpc_name, "system", route.name().as_str()) + .as_str(), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + assert_eq!(error.message, "DELETE not allowed on system routes"); + } // Create a custom router - create_router(&client, project_name, vpc_name, router_name).await; + create_router(&client, PROJECT_NAME, vpc_name, router_name).await; // Get routes list for custom router let routes = objects_list_page_authz::( client, - get_routes_url(router_name).as_str(), + get_routes_url(vpc_name, router_name).as_str(), ) .await .items; @@ -95,12 +146,12 @@ async fn test_router_routes(cptestctx: &ControlPlaneTestContext) { assert_eq!(routes.len(), 0); let route_name = "custom-route"; - let route_url = get_route_url(router_name, route_name); + let route_url = get_route_url(vpc_name, router_name, route_name); // Create a new custom route let route_created: RouterRoute = NexusRequest::objects_post( client, - get_routes_url(router_name).as_str(), + get_routes_url(vpc_name, router_name).as_str(), ¶ms::RouterRouteCreate { identity: IdentityMetadataCreateParams { name: route_name.parse().unwrap(), @@ -182,10 +233,307 @@ async fn test_router_routes(cptestctx: &ControlPlaneTestContext) { client, StatusCode::NOT_FOUND, Method::GET, - get_route_url(router_name, route_name).as_str(), + get_route_url(vpc_name, router_name, route_name).as_str(), ) .authn_as(AuthnMode::PrivilegedUser) .execute() .await .unwrap(); } + +#[nexus_test] +async fn test_router_routes_disallow_mixed_v4_v6( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + let _ = create_project(&client, PROJECT_NAME).await; + let _ = create_vpc(&client, PROJECT_NAME, VPC_NAME).await; + + let router_name = ROUTER_NAMES[0]; + let _router = + create_router(&client, PROJECT_NAME, VPC_NAME, router_name).await; + + // Some targets/strings refer to a mixed v4/v6 entity, e.g., + // subnet or instance. Others refer to one kind only (ipnet, ip). + // Users should not be able to mix v4 and v6 in these latter routes + // -- route resolution will ignore them, but a helpful error message + // is more useful. + let dest_set: [RouteDestination; 5] = [ + "ip:4.4.4.4".parse().unwrap(), + "ipnet:4.4.4.0/24".parse().unwrap(), + "ip:2001:4860:4860::8888".parse().unwrap(), + "ipnet:2001:4860:4860::/64".parse().unwrap(), + "subnet:named-subnet".parse().unwrap(), + ]; + + let target_set: [RouteTarget; 5] = [ + "ip:172.30.0.5".parse().unwrap(), + "ip:fd37:faf4:cc25::5".parse().unwrap(), + "instance:named-instance".parse().unwrap(), + "inetgw:outbound".parse().unwrap(), + "drop".parse().unwrap(), + ]; + + for (i, (dest, target)) in dest_set + .into_iter() + .cartesian_product(target_set.into_iter()) + .enumerate() + { + use RouteDestination as Rd; + use RouteTarget as Rt; + let allowed = match (&dest, &target) { + (Rd::Ip(IpAddr::V4(_)), Rt::Ip(IpAddr::V4(_))) + | (Rd::Ip(IpAddr::V6(_)), Rt::Ip(IpAddr::V6(_))) + | (Rd::IpNet(IpNet::V4(_)), Rt::Ip(IpAddr::V4(_))) + | (Rd::IpNet(IpNet::V6(_)), Rt::Ip(IpAddr::V6(_))) => true, + (Rd::Ip(_), Rt::Ip(_)) | (Rd::IpNet(_), Rt::Ip(_)) => false, + _ => true, + }; + + let route_name = format!("test-route-{i}"); + + if allowed { + create_route( + client, + PROJECT_NAME, + VPC_NAME, + router_name, + &route_name, + dest, + target, + ) + .await; + } else { + let err = create_route_with_error( + client, + PROJECT_NAME, + VPC_NAME, + router_name, + &route_name, + dest, + target, + StatusCode::BAD_REQUEST, + ) + .await; + assert_eq!( + err.message, + "cannot mix explicit IPv4 and IPv6 addresses between destination and target" + ); + } + } +} + +#[nexus_test] +async fn test_router_routes_modify_system_routes( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + let _ = create_project(&client, PROJECT_NAME).await; + let _ = create_vpc(&client, PROJECT_NAME, VPC_NAME).await; + + // Attempting to add a new route to a system router should fail. + let err = create_route_with_error( + client, + PROJECT_NAME, + VPC_NAME, + "system", + "bad-route", + "ipnet:240.0.0.0/8".parse().unwrap(), + "inetgw:outbound".parse().unwrap(), + StatusCode::BAD_REQUEST, + ) + .await; + assert_eq!( + err.message, + "user-provided routes cannot be added to a system router" + ); + + // Get the system router's routes + let [v4_route, v6_route, subnet_route] = + get_system_routes(client, VPC_NAME).await; + + // Attempting to modify a VPC subnet route should fail. + // Deletes are tested above. + let err = object_put_error( + client, + &get_route_url(VPC_NAME, "system", subnet_route.name().as_str()) + .as_str(), + &RouterRouteUpdate { + identity: IdentityMetadataUpdateParams { + name: None, + description: None, + }, + target: "drop".parse().unwrap(), + destination: "subnet:default".parse().unwrap(), + }, + StatusCode::BAD_REQUEST, + ) + .await; + assert_eq!( + err.message, + "routes of type VpcSubnet within the system router are not modifiable" + ); + + // Modifying the target of a Default (gateway) route should succeed. + let v4_route: RouterRoute = object_put( + client, + &get_route_url(VPC_NAME, "system", v4_route.name().as_str()).as_str(), + &RouterRouteUpdate { + identity: IdentityMetadataUpdateParams { + name: None, + description: None, + }, + destination: v4_route.destination, + target: "drop".parse().unwrap(), + }, + ) + .await; + assert_eq!(v4_route.target, RouteTarget::Drop); + + let v6_route: RouterRoute = object_put( + client, + &get_route_url(VPC_NAME, "system", v6_route.name().as_str()).as_str(), + &RouterRouteUpdate { + identity: IdentityMetadataUpdateParams { + name: None, + description: None, + }, + destination: v6_route.destination, + target: "drop".parse().unwrap(), + }, + ) + .await; + assert_eq!(v6_route.target, RouteTarget::Drop); + + // Modifying the *destination* should not. + let err = object_put_error( + client, + &get_route_url(VPC_NAME, "system", v4_route.name().as_str()).as_str(), + &RouterRouteUpdate { + identity: IdentityMetadataUpdateParams { + name: None, + description: None, + }, + destination: "ipnet:10.0.0.0/8".parse().unwrap(), + target: "drop".parse().unwrap(), + }, + StatusCode::BAD_REQUEST, + ) + .await; + assert_eq!( + err.message, + "the destination and metadata of a Default route cannot be changed", + ); +} + +#[nexus_test] +async fn test_router_routes_internet_gateway_target( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + let _ = create_project(&client, PROJECT_NAME).await; + let _ = create_vpc(&client, PROJECT_NAME, VPC_NAME).await; + let router_name = ROUTER_NAMES[0]; + let _router = + create_router(&client, PROJECT_NAME, VPC_NAME, router_name).await; + + // Internet gateways are not fully supported: only 'inetgw:outbound' + // is a valid choice. + let dest: RouteDestination = "ipnet:240.0.0.0/8".parse().unwrap(); + + let err = create_route_with_error( + client, + PROJECT_NAME, + VPC_NAME, + &router_name, + "bad-route", + dest.clone(), + "inetgw:not-a-real-gw".parse().unwrap(), + StatusCode::BAD_REQUEST, + ) + .await; + assert_eq!( + err.message, + "'outbound' is currently the only valid internet gateway" + ); + + // This can be used in a custom router, in addition + // to its default system spot. + let target: RouteTarget = "inetgw:outbound".parse().unwrap(); + let route = create_route( + client, + PROJECT_NAME, + VPC_NAME, + router_name, + "good-route", + dest.clone(), + target.clone(), + ) + .await; + assert_eq!(route.destination, dest); + assert_eq!(route.target, target); +} + +#[nexus_test] +async fn test_router_routes_disallow_custom_targets( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + let _ = create_project(&client, PROJECT_NAME).await; + let _ = create_vpc(&client, PROJECT_NAME, VPC_NAME).await; + let router_name = ROUTER_NAMES[0]; + let _router = + create_router(&client, PROJECT_NAME, VPC_NAME, router_name).await; + + // Neither 'vpc:xxx' nor 'subnet:xxx' can be specified as route targets + // in custom routers. + let dest: RouteDestination = "ipnet:240.0.0.0/8".parse().unwrap(); + + let err = create_route_with_error( + client, + PROJECT_NAME, + VPC_NAME, + &router_name, + "bad-route", + dest.clone(), + "vpc:a-vpc-name-unknown".parse().unwrap(), + StatusCode::BAD_REQUEST, + ) + .await; + assert_eq!( + err.message, + "VPCs cannot be used as a destination or target in custom routers" + ); + + let err = create_route_with_error( + client, + PROJECT_NAME, + VPC_NAME, + &router_name, + "bad-route", + "vpc:a-vpc-name-unknown".parse().unwrap(), + "drop".parse().unwrap(), + StatusCode::BAD_REQUEST, + ) + .await; + assert_eq!( + err.message, + "VPCs cannot be used as a destination or target in custom routers" + ); + + let err = create_route_with_error( + client, + PROJECT_NAME, + VPC_NAME, + &router_name, + "bad-route", + dest.clone(), + "subnet:a-vpc-name-unknown".parse().unwrap(), + StatusCode::BAD_REQUEST, + ) + .await; + assert_eq!( + err.message, + "subnets cannot be used as a target in custom routers" + ); +} diff --git a/nexus/tests/integration_tests/subnet_allocation.rs b/nexus/tests/integration_tests/subnet_allocation.rs index 794c769da4..8e1f5834c5 100644 --- a/nexus/tests/integration_tests/subnet_allocation.rs +++ b/nexus/tests/integration_tests/subnet_allocation.rs @@ -111,6 +111,7 @@ async fn test_subnet_allocation(cptestctx: &ControlPlaneTestContext) { // Use the minimum subnet size ipv4_block: subnet, ipv6_block: None, + custom_router: None, }; NexusRequest::objects_post(client, &subnets_url, &Some(&subnet_create)) .authn_as(AuthnMode::PrivilegedUser) diff --git a/nexus/tests/integration_tests/volume_management.rs b/nexus/tests/integration_tests/volume_management.rs index ae348e775d..73322e518f 100644 --- a/nexus/tests/integration_tests/volume_management.rs +++ b/nexus/tests/integration_tests/volume_management.rs @@ -356,7 +356,7 @@ async fn test_snapshot_prevents_other_disk( // The Crucible snapshots still remain assert!(!disk_test.crucible_resources_deleted().await); - // Attempt disk allocation, which will fail - the presense of the snapshot + // Attempt disk allocation, which will fail - the presence of the snapshot // means the region wasn't deleted. let disk_size = ByteCount::from_gibibytes_u32(10); let next_disk_name: Name = "next-disk".parse().unwrap(); diff --git a/nexus/tests/integration_tests/vpc_firewall.rs b/nexus/tests/integration_tests/vpc_firewall.rs index a62019288d..83379bef88 100644 --- a/nexus/tests/integration_tests/vpc_firewall.rs +++ b/nexus/tests/integration_tests/vpc_firewall.rs @@ -5,7 +5,9 @@ use http::method::Method; use http::StatusCode; use nexus_test_utils::http_testing::{AuthnMode, NexusRequest}; -use nexus_test_utils::resource_helpers::{create_project, create_vpc}; +use nexus_test_utils::resource_helpers::{ + create_project, create_vpc, object_get, object_put_error, +}; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::views::Vpc; use omicron_common::api::external::{ @@ -42,7 +44,9 @@ async fn test_vpc_firewall(cptestctx: &ControlPlaneTestContext) { let default_vpc_firewall = format!("/v1/vpc-firewall-rules?vpc=default&{}", project_selector,); - let rules = get_rules(client, &default_vpc_firewall).await; + let rules = object_get::(client, &default_vpc_firewall) + .await + .rules; assert!(rules.iter().all(|r| r.vpc_id == default_vpc.identity.id)); assert!(is_default_firewall_rules("default", &rules)); @@ -52,7 +56,8 @@ async fn test_vpc_firewall(cptestctx: &ControlPlaneTestContext) { let other_vpc_firewall = format!("/v1/vpc-firewall-rules?{}", other_vpc_selector); let vpc2 = create_vpc(&client, &project_name, &other_vpc).await; - let rules = get_rules(client, &other_vpc_firewall).await; + let rules = + object_get::(client, &other_vpc_firewall).await.rules; assert!(rules.iter().all(|r| r.vpc_id == vpc2.identity.id)); assert!(is_default_firewall_rules(other_vpc, &rules)); @@ -111,14 +116,17 @@ async fn test_vpc_firewall(cptestctx: &ControlPlaneTestContext) { assert_eq!(updated_rules[1].identity.name, "deny-all-incoming"); // Make sure the firewall is changed - let rules = get_rules(client, &default_vpc_firewall).await; + let rules = object_get::(client, &default_vpc_firewall) + .await + .rules; assert!(!is_default_firewall_rules("default", &rules)); assert_eq!(rules.len(), new_rules.len()); assert_eq!(rules[0].identity.name, "allow-icmp"); assert_eq!(rules[1].identity.name, "deny-all-incoming"); // Make sure the other firewall is unchanged - let rules = get_rules(client, &other_vpc_firewall).await; + let rules = + object_get::(client, &other_vpc_firewall).await.rules; assert!(is_default_firewall_rules(other_vpc, &rules)); // DELETE is unsupported @@ -162,20 +170,6 @@ async fn test_vpc_firewall(cptestctx: &ControlPlaneTestContext) { .unwrap(); } -async fn get_rules( - client: &dropshot::test_util::ClientTestContext, - url: &str, -) -> Vec { - NexusRequest::object_get(client, url) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap() - .parsed_body::() - .unwrap() - .rules -} - fn is_default_firewall_rules( vpc_name: &str, rules: &Vec, @@ -292,3 +286,38 @@ fn is_default_firewall_rules( } true } + +#[nexus_test] +async fn test_firewall_rules_same_name(cptestctx: &ControlPlaneTestContext) { + let client = &cptestctx.external_client; + + let project_name = "my-project"; + create_project(&client, &project_name).await; + + let rule = VpcFirewallRuleUpdate { + name: "dupe".parse().unwrap(), + description: "".to_string(), + status: VpcFirewallRuleStatus::Enabled, + direction: VpcFirewallRuleDirection::Inbound, + targets: vec![], + filters: VpcFirewallRuleFilter { + hosts: None, + protocols: None, + ports: None, + }, + action: VpcFirewallRuleAction::Allow, + priority: VpcFirewallRulePriority(65534), + }; + + let error = object_put_error( + client, + &format!("/v1/vpc-firewall-rules?vpc=default&project={}", project_name), + &VpcFirewallRuleUpdateParams { + rules: vec![rule.clone(), rule.clone()], + }, + StatusCode::BAD_REQUEST, + ) + .await; + assert_eq!(error.error_code, Some("InvalidValue".to_string())); + assert_eq!(error.message, "unsupported value for \"rules\": Rule names must be unique. Duplicates: [\"dupe\"]"); +} diff --git a/nexus/tests/integration_tests/vpc_routers.rs b/nexus/tests/integration_tests/vpc_routers.rs index 0b931efbd7..d85a8cba8e 100644 --- a/nexus/tests/integration_tests/vpc_routers.rs +++ b/nexus/tests/integration_tests/vpc_routers.rs @@ -2,50 +2,91 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. +use crate::integration_tests::instances::assert_sled_vpc_routes; +use crate::integration_tests::instances::instance_simulate; +use dropshot::test_util::ClientTestContext; use http::method::Method; use http::StatusCode; +use nexus_db_queries::context::OpContext; +use nexus_db_queries::db::lookup::LookupPath; use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; use nexus_test_utils::identity_eq; +use nexus_test_utils::resource_helpers::create_default_ip_pool; +use nexus_test_utils::resource_helpers::create_instance_with; +use nexus_test_utils::resource_helpers::create_route; use nexus_test_utils::resource_helpers::create_router; +use nexus_test_utils::resource_helpers::create_vpc_subnet; +use nexus_test_utils::resource_helpers::object_delete; use nexus_test_utils::resource_helpers::objects_list_page_authz; use nexus_test_utils::resource_helpers::{create_project, create_vpc}; +use nexus_test_utils::resource_helpers::{object_put, object_put_error}; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; +use nexus_types::external_api::params::InstanceNetworkInterfaceAttachment; +use nexus_types::external_api::params::InstanceNetworkInterfaceCreate; +use nexus_types::external_api::params::VpcSubnetUpdate; use nexus_types::external_api::views::VpcRouter; use nexus_types::external_api::views::VpcRouterKind; +use nexus_types::external_api::views::VpcSubnet; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::IdentityMetadataUpdateParams; +use omicron_common::api::external::NameOrId; +use omicron_common::api::external::SimpleIdentity; +use omicron_common::api::internal::shared::ResolvedVpcRoute; +use omicron_common::api::internal::shared::RouterTarget; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::InstanceUuid; +use std::collections::HashMap; + +pub const PROJECT_NAME: &str = "cartographer"; +pub const VPC_NAME: &str = "the-isles"; +pub const SUBNET_NAMES: &[&str] = &["scotia", "albion", "eire"]; +const INSTANCE_NAMES: &[&str] = &["glaschu", "londinium"]; +pub const ROUTER_NAMES: &[&str] = &["cycle-network", "motorways"]; type ControlPlaneTestContext = nexus_test_utils::ControlPlaneTestContext; #[nexus_test] -async fn test_vpc_routers(cptestctx: &ControlPlaneTestContext) { +async fn test_vpc_routers_crud_operations(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; // Create a project that we'll use for testing. - let project_name = "springfield-squidport"; - let _ = create_project(&client, project_name).await; + let _ = create_project(&client, PROJECT_NAME).await; // Create a VPC. - let vpc_name = "vpc1"; - let vpc = create_vpc(&client, project_name, vpc_name).await; + let vpc = create_vpc(&client, PROJECT_NAME, VPC_NAME).await; let routers_url = - format!("/v1/vpc-routers?project={}&vpc={}", project_name, vpc_name); + format!("/v1/vpc-routers?project={}&vpc={}", PROJECT_NAME, VPC_NAME); // get routers should have only the system router created w/ the VPC - let routers = - objects_list_page_authz::(client, &routers_url).await.items; + let routers = list_routers(client, &VPC_NAME).await; assert_eq!(routers.len(), 1); assert_eq!(routers[0].kind, VpcRouterKind::System); - let router_name = "router1"; + // This router should not be deletable. + let system_router_url = format!("/v1/vpc-routers/{}", routers[0].id()); + let error: dropshot::HttpErrorResponseBody = NexusRequest::expect_failure( + client, + StatusCode::BAD_REQUEST, + Method::DELETE, + &system_router_url, + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + assert_eq!(error.message, "cannot delete system router"); + + let router_name = ROUTER_NAMES[0]; let router_url = format!( "/v1/vpc-routers/{}?project={}&vpc={}", - router_name, project_name, vpc_name + router_name, PROJECT_NAME, VPC_NAME ); // fetching a particular router should 404 @@ -61,11 +102,14 @@ async fn test_vpc_routers(cptestctx: &ControlPlaneTestContext) { .unwrap() .parsed_body() .unwrap(); - assert_eq!(error.message, "not found: vpc-router with name \"router1\""); + assert_eq!( + error.message, + format!("not found: vpc-router with name \"{router_name}\"") + ); // Create a VPC Router. let router = - create_router(&client, project_name, vpc_name, router_name).await; + create_router(&client, PROJECT_NAME, VPC_NAME, router_name).await; assert_eq!(router.identity.name, router_name); assert_eq!(router.identity.description, "router description"); assert_eq!(router.vpc_id, vpc.identity.id); @@ -82,7 +126,7 @@ async fn test_vpc_routers(cptestctx: &ControlPlaneTestContext) { routers_eq(&router, &same_router); // routers list should now have the one in it - let routers = objects_list_page_authz(client, &routers_url).await.items; + let routers = list_routers(client, &VPC_NAME).await; assert_eq!(routers.len(), 2); routers_eq(&routers[0], &router); @@ -103,12 +147,15 @@ async fn test_vpc_routers(cptestctx: &ControlPlaneTestContext) { .unwrap() .parsed_body() .unwrap(); - assert_eq!(error.message, "already exists: vpc-router \"router1\""); + assert_eq!( + error.message, + format!("already exists: vpc-router \"{router_name}\"") + ); - let router2_name = "router2"; + let router2_name = ROUTER_NAMES[1]; let router2_url = format!( "/v1/vpc-routers/{}?project={}&vpc={}", - router2_name, project_name, vpc_name + router2_name, PROJECT_NAME, VPC_NAME ); // second router 404s before it's created @@ -124,18 +171,20 @@ async fn test_vpc_routers(cptestctx: &ControlPlaneTestContext) { .unwrap() .parsed_body() .unwrap(); - assert_eq!(error.message, "not found: vpc-router with name \"router2\""); + assert_eq!( + error.message, + format!("not found: vpc-router with name \"{router2_name}\"") + ); // create second custom router let router2 = - create_router(client, project_name, vpc_name, router2_name).await; + create_router(client, PROJECT_NAME, VPC_NAME, router2_name).await; assert_eq!(router2.identity.name, router2_name); assert_eq!(router2.vpc_id, vpc.identity.id); assert_eq!(router2.kind, VpcRouterKind::Custom); // routers list should now have two custom and one system - let routers = - objects_list_page_authz::(client, &routers_url).await.items; + let routers = list_routers(client, &VPC_NAME).await; assert_eq!(routers.len(), 3); routers_eq(&routers[0], &router); routers_eq(&routers[1], &router2); @@ -175,11 +224,14 @@ async fn test_vpc_routers(cptestctx: &ControlPlaneTestContext) { .unwrap() .parsed_body() .unwrap(); - assert_eq!(error.message, "not found: vpc-router with name \"router1\""); + assert_eq!( + error.message, + format!("not found: vpc-router with name \"{router_name}\"") + ); let router_url = format!( "/v1/vpc-routers/new-name?project={}&vpc={}", - project_name, vpc_name + PROJECT_NAME, VPC_NAME ); // fetching by new name works @@ -191,14 +243,17 @@ async fn test_vpc_routers(cptestctx: &ControlPlaneTestContext) { .unwrap() .parsed_body() .unwrap(); + routers_eq(&update, &updated_router); assert_eq!(&updated_router.identity.description, "another description"); // fetching list should show updated one - let routers = - objects_list_page_authz::(client, &routers_url).await.items; + let routers = list_routers(client, &VPC_NAME).await; assert_eq!(routers.len(), 3); - routers_eq(&routers[0], &updated_router); + routers_eq( + &routers.iter().find(|v| v.name().as_str() == "new-name").unwrap(), + &updated_router, + ); // delete first router NexusRequest::object_delete(&client, &router_url) @@ -208,8 +263,7 @@ async fn test_vpc_routers(cptestctx: &ControlPlaneTestContext) { .unwrap(); // routers list should now have two again, one system and one custom - let routers = - objects_list_page_authz::(client, &routers_url).await.items; + let routers = list_routers(client, &VPC_NAME).await; assert_eq!(routers.len(), 2); routers_eq(&routers[0], &router2); @@ -245,14 +299,411 @@ async fn test_vpc_routers(cptestctx: &ControlPlaneTestContext) { // Creating a router with the same name in a different VPC is allowed let vpc2_name = "vpc2"; - let vpc2 = create_vpc(&client, project_name, vpc2_name).await; + let vpc2 = create_vpc(&client, PROJECT_NAME, vpc2_name).await; let router_same_name = - create_router(&client, project_name, vpc2_name, router2_name).await; + create_router(&client, PROJECT_NAME, vpc2_name, router2_name).await; assert_eq!(router_same_name.identity.name, router2_name); assert_eq!(router_same_name.vpc_id, vpc2.identity.id); } +#[nexus_test] +async fn test_vpc_routers_attach_to_subnet( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + + // Create a project that we'll use for testing. + let _ = create_project(&client, PROJECT_NAME).await; + let _ = create_vpc(&client, PROJECT_NAME, VPC_NAME).await; + + let subnet_name = "default"; + + let subnets_url = + format!("/v1/vpc-subnets?project={}&vpc={}", PROJECT_NAME, VPC_NAME); + + // get routers should have only the system router created w/ the VPC + let routers = list_routers(client, VPC_NAME).await; + assert_eq!(routers.len(), 1); + assert_eq!(routers[0].kind, VpcRouterKind::System); + + // Create a custom router for later use. + let router_name = ROUTER_NAMES[0]; + let router = + create_router(&client, PROJECT_NAME, VPC_NAME, router_name).await; + assert_eq!(router.kind, VpcRouterKind::Custom); + + // Attaching a system router should fail. + let err = object_put_error( + client, + &format!( + "/v1/vpc-subnets/{subnet_name}?project={PROJECT_NAME}&vpc={VPC_NAME}" + ), + &VpcSubnetUpdate { + identity: IdentityMetadataUpdateParams { + name: None, + description: None, + }, + custom_router: Some(routers[0].identity.id.into()), + }, + StatusCode::BAD_REQUEST, + ) + .await; + assert_eq!(err.message, "cannot attach a system router to a VPC subnet"); + + // Attaching a new custom router should succeed. + let default_subnet = set_custom_router( + client, + "default", + VPC_NAME, + Some(router.identity.id.into()), + ) + .await; + assert_eq!(default_subnet.custom_router_id, Some(router.identity.id)); + + // Attaching a custom router to another subnet (same VPC) should succeed: + // ... at create time. + let subnet2_name = SUBNET_NAMES[0]; + let subnet2 = create_vpc_subnet( + &client, + &PROJECT_NAME, + &VPC_NAME, + &subnet2_name, + "192.168.0.0/24".parse().unwrap(), + None, + Some(router_name), + ) + .await; + assert_eq!(subnet2.custom_router_id, Some(router.identity.id)); + + // ... and via update. + let subnet3_name = SUBNET_NAMES[1]; + let _ = create_vpc_subnet( + &client, + &PROJECT_NAME, + &VPC_NAME, + &subnet3_name, + "192.168.1.0/24".parse().unwrap(), + None, + None, + ) + .await; + + let subnet3 = set_custom_router( + client, + subnet3_name, + VPC_NAME, + Some(router.identity.id.into()), + ) + .await; + assert_eq!(subnet3.custom_router_id, Some(router.identity.id)); + + // Attaching a custom router to another VPC's subnet should fail. + create_vpc(&client, PROJECT_NAME, "vpc1").await; + let err = object_put_error( + client, + &format!("/v1/vpc-subnets/default?project={PROJECT_NAME}&vpc=vpc1"), + &VpcSubnetUpdate { + identity: IdentityMetadataUpdateParams { + name: None, + description: None, + }, + custom_router: Some(router.identity.id.into()), + }, + StatusCode::BAD_REQUEST, + ) + .await; + assert_eq!(err.message, "router and subnet must belong to the same VPC"); + + // Detach (and double detach) should succeed without issue. + let subnet3 = set_custom_router(client, subnet3_name, VPC_NAME, None).await; + assert_eq!(subnet3.custom_router_id, None); + let subnet3 = set_custom_router(client, subnet3_name, VPC_NAME, None).await; + assert_eq!(subnet3.custom_router_id, None); + + // Assigning a new router should not require that we first detach the old one. + let router2_name = ROUTER_NAMES[1]; + let router2 = + create_router(&client, PROJECT_NAME, VPC_NAME, router2_name).await; + let subnet2 = set_custom_router( + client, + subnet2_name, + VPC_NAME, + Some(router2.identity.id.into()), + ) + .await; + assert_eq!(subnet2.custom_router_id, Some(router2.identity.id)); + + // Reset subnet2 back to our first router. + let subnet2 = set_custom_router( + client, + subnet2_name, + VPC_NAME, + Some(router.identity.id.into()), + ) + .await; + assert_eq!(subnet2.custom_router_id, Some(router.identity.id)); + + // Deleting a custom router should detach from remaining subnets. + object_delete( + &client, + &format!( + "/v1/vpc-routers/{router_name}?vpc={VPC_NAME}&project={PROJECT_NAME}", + ), + ) + .await; + + for subnet in + objects_list_page_authz::(client, &subnets_url).await.items + { + assert!(subnet.custom_router_id.is_none(), "{subnet:?}"); + } +} + +#[nexus_test] +async fn test_vpc_routers_custom_delivered_to_instance( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + let apictx = &cptestctx.server.server_context(); + let nexus = &apictx.nexus; + let datastore = nexus.datastore(); + let opctx = + OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); + + // Create some instances, one per subnet, and a default pool etc. + create_default_ip_pool(client).await; + create_project(client, PROJECT_NAME).await; + + let vpc = create_vpc(&client, PROJECT_NAME, VPC_NAME).await; + + let mut subnets = vec![]; + let mut instances = vec![]; + let mut instance_nics = HashMap::new(); + for (i, (subnet_name, instance_name)) in + SUBNET_NAMES.iter().zip(INSTANCE_NAMES.iter()).enumerate() + { + let subnet = create_vpc_subnet( + &client, + PROJECT_NAME, + VPC_NAME, + subnet_name, + format!("192.168.{i}.0/24").parse().unwrap(), + None, + None, + ) + .await; + + let instance = create_instance_with( + client, + PROJECT_NAME, + instance_name, + &InstanceNetworkInterfaceAttachment::Create(vec![ + InstanceNetworkInterfaceCreate { + identity: IdentityMetadataCreateParams { + name: format!("nic-{i}").parse().unwrap(), + description: "".into(), + }, + vpc_name: vpc.name().clone(), + subnet_name: subnet_name.parse().unwrap(), + ip: Some(format!("192.168.{i}.10").parse().unwrap()), + }, + ]), + vec![], + vec![], + true, + ) + .await; + instance_simulate( + nexus, + &InstanceUuid::from_untyped_uuid(instance.identity.id), + ) + .await; + + let (.., authz_instance) = LookupPath::new(&opctx, &datastore) + .instance_id(instance.identity.id) + .lookup_for(nexus_db_queries::authz::Action::Read) + .await + .unwrap(); + + let guest_nics = datastore + .derive_guest_network_interface_info(&opctx, &authz_instance) + .await + .unwrap(); + + instance_nics.insert(*instance_name, guest_nics); + subnets.push(subnet); + instances.push(instance); + } + + let sled_agent = &cptestctx.sled_agent.sled_agent; + + // Create some routers! + let mut routers = vec![]; + for router_name in ROUTER_NAMES { + let router = + create_router(&client, PROJECT_NAME, VPC_NAME, router_name).await; + + routers.push(router); + } + + let vni = instance_nics[INSTANCE_NAMES[0]][0].vni; + + // Installing a custom router onto a subnet with a live instance + // should install routes at that sled. We should only have one sled. + // First, assert the default state. + for subnet in &subnets { + let (_system, custom) = assert_sled_vpc_routes( + &sled_agent, + &opctx, + &datastore, + subnet.id(), + vni, + ) + .await; + + assert!(custom.is_empty()); + } + + // Push a distinct route into each router and attach to each subnet. + for i in 0..2 { + create_route( + &client, + PROJECT_NAME, + VPC_NAME, + ROUTER_NAMES[i], + "a-sharp-drop", + format!("ipnet:24{i}.0.0.0/8").parse().unwrap(), + "drop".parse().unwrap(), + ) + .await; + + set_custom_router( + &client, + SUBNET_NAMES[i], + VPC_NAME, + Some(NameOrId::Name(ROUTER_NAMES[i].parse().unwrap())), + ) + .await; + } + + // Re-verify, assert that new routes are resolved correctly. + // Vec<(System, Custom)>. + let mut last_routes = vec![]; + for subnet in &subnets { + last_routes.push( + assert_sled_vpc_routes( + &sled_agent, + &opctx, + &datastore, + subnet.id(), + vni, + ) + .await, + ); + } + + assert!(last_routes[0].1.contains(&ResolvedVpcRoute { + dest: "240.0.0.0/8".parse().unwrap(), + target: RouterTarget::Drop + })); + assert!(last_routes[1].1.contains(&ResolvedVpcRoute { + dest: "241.0.0.0/8".parse().unwrap(), + target: RouterTarget::Drop + })); + + // Adding a new route should propagate that out to sleds. + create_route( + &client, + PROJECT_NAME, + VPC_NAME, + ROUTER_NAMES[0], + "ncn-74", + "ipnet:2.0.7.0/24".parse().unwrap(), + format!("instance:{}", INSTANCE_NAMES[1]).parse().unwrap(), + ) + .await; + + let (new_system, new_custom) = assert_sled_vpc_routes( + &sled_agent, + &opctx, + &datastore, + subnets[0].id(), + vni, + ) + .await; + + assert_eq!(last_routes[0].0, new_system); + assert!(new_custom.contains(&ResolvedVpcRoute { + dest: "2.0.7.0/24".parse().unwrap(), + target: RouterTarget::Ip(instance_nics[INSTANCE_NAMES[1]][0].ip) + })); + + // Swapping router should change the installed routes at that sled. + set_custom_router( + &client, + SUBNET_NAMES[0], + VPC_NAME, + Some(NameOrId::Name(ROUTER_NAMES[1].parse().unwrap())), + ) + .await; + let (new_system, new_custom) = assert_sled_vpc_routes( + &sled_agent, + &opctx, + &datastore, + subnets[0].id(), + vni, + ) + .await; + assert_eq!(last_routes[0].0, new_system); + assert_eq!(last_routes[1].1, new_custom); + + // Unsetting a router should remove affected non-system routes. + set_custom_router(&client, SUBNET_NAMES[0], VPC_NAME, None).await; + let (new_system, new_custom) = assert_sled_vpc_routes( + &sled_agent, + &opctx, + &datastore, + subnets[0].id(), + vni, + ) + .await; + assert_eq!(last_routes[0].0, new_system); + assert!(new_custom.is_empty()); +} + +async fn set_custom_router( + client: &ClientTestContext, + subnet_name: &str, + vpc_name: &str, + custom_router: Option, +) -> VpcSubnet { + object_put( + client, + &format!( + "/v1/vpc-subnets/{subnet_name}?project={PROJECT_NAME}&vpc={vpc_name}" + ), + &VpcSubnetUpdate { + identity: IdentityMetadataUpdateParams { + name: None, + description: None, + }, + custom_router, + }, + ) + .await +} + +async fn list_routers( + client: &ClientTestContext, + vpc_name: &str, +) -> Vec { + let routers_url = + format!("/v1/vpc-routers?project={}&vpc={}", PROJECT_NAME, vpc_name); + let out = objects_list_page_authz::(client, &routers_url).await; + out.items +} + fn routers_eq(sn1: &VpcRouter, sn2: &VpcRouter) { identity_eq(&sn1.identity, &sn2.identity); assert_eq!(sn1.vpc_id, sn2.vpc_id); diff --git a/nexus/tests/integration_tests/vpc_subnets.rs b/nexus/tests/integration_tests/vpc_subnets.rs index 81e7156e8e..b12c43aecc 100644 --- a/nexus/tests/integration_tests/vpc_subnets.rs +++ b/nexus/tests/integration_tests/vpc_subnets.rs @@ -179,6 +179,7 @@ async fn test_vpc_subnets(cptestctx: &ControlPlaneTestContext) { }, ipv4_block, ipv6_block: Some(ipv6_block), + custom_router: None, }; let subnet: VpcSubnet = NexusRequest::objects_post(client, &subnets_url, &new_subnet) @@ -230,6 +231,7 @@ async fn test_vpc_subnets(cptestctx: &ControlPlaneTestContext) { }, ipv4_block, ipv6_block: Some(ipv6_block), + custom_router: None, }; let expected_error = format!( "IP address range '{}' conflicts with an existing subnet", @@ -257,6 +259,7 @@ async fn test_vpc_subnets(cptestctx: &ControlPlaneTestContext) { }, ipv4_block: other_ipv4_block, ipv6_block: other_ipv6_block, + custom_router: None, }; let error: dropshot::HttpErrorResponseBody = NexusRequest::new( RequestBuilder::new(client, Method::POST, &subnets_url) @@ -301,6 +304,7 @@ async fn test_vpc_subnets(cptestctx: &ControlPlaneTestContext) { }, ipv4_block, ipv6_block: None, + custom_router: None, }; let subnet2: VpcSubnet = NexusRequest::objects_post(client, &subnets_url, &new_subnet) @@ -329,6 +333,7 @@ async fn test_vpc_subnets(cptestctx: &ControlPlaneTestContext) { name: Some("new-name".parse().unwrap()), description: Some("another description".to_string()), }, + custom_router: None, }; NexusRequest::object_put(client, &subnet_url, Some(&update_params)) .authn_as(AuthnMode::PrivilegedUser) diff --git a/nexus/tests/output/nexus_tags.txt b/nexus/tests/output/nexus_tags.txt index a32fe5c4b9..35d8c32561 100644 --- a/nexus/tests/output/nexus_tags.txt +++ b/nexus/tests/output/nexus_tags.txt @@ -232,6 +232,16 @@ vpc_delete DELETE /v1/vpcs/{vpc} vpc_firewall_rules_update PUT /v1/vpc-firewall-rules vpc_firewall_rules_view GET /v1/vpc-firewall-rules vpc_list GET /v1/vpcs +vpc_router_create POST /v1/vpc-routers +vpc_router_delete DELETE /v1/vpc-routers/{router} +vpc_router_list GET /v1/vpc-routers +vpc_router_route_create POST /v1/vpc-router-routes +vpc_router_route_delete DELETE /v1/vpc-router-routes/{route} +vpc_router_route_list GET /v1/vpc-router-routes +vpc_router_route_update PUT /v1/vpc-router-routes/{route} +vpc_router_route_view GET /v1/vpc-router-routes/{route} +vpc_router_update PUT /v1/vpc-routers/{router} +vpc_router_view GET /v1/vpc-routers/{router} vpc_subnet_create POST /v1/vpc-subnets vpc_subnet_delete DELETE /v1/vpc-subnets/{subnet} vpc_subnet_list GET /v1/vpc-subnets diff --git a/nexus/tests/output/unexpected-authz-endpoints.txt b/nexus/tests/output/unexpected-authz-endpoints.txt index e8bb60224a..cd05058762 100644 --- a/nexus/tests/output/unexpected-authz-endpoints.txt +++ b/nexus/tests/output/unexpected-authz-endpoints.txt @@ -1,13 +1,3 @@ API endpoints tested by unauthorized.rs but not found in the OpenAPI spec: -GET "/v1/vpc-routers?project=demo-project&vpc=demo-vpc" -POST "/v1/vpc-routers?project=demo-project&vpc=demo-vpc" -GET "/v1/vpc-routers/demo-vpc-router?project=demo-project&vpc=demo-vpc" -PUT "/v1/vpc-routers/demo-vpc-router?project=demo-project&vpc=demo-vpc" -DELETE "/v1/vpc-routers/demo-vpc-router?project=demo-project&vpc=demo-vpc" -GET "/v1/vpc-router-routes?project=demo-project&vpc=demo-vpc&router=demo-vpc-router" -POST "/v1/vpc-router-routes?project=demo-project&vpc=demo-vpc&router=demo-vpc-router" -GET "/v1/vpc-router-routes/demo-router-route?project=demo-project&vpc=demo-vpc&router=demo-vpc-router" -PUT "/v1/vpc-router-routes/demo-router-route?project=demo-project&vpc=demo-vpc&router=demo-vpc-router" -DELETE "/v1/vpc-router-routes/demo-router-route?project=demo-project&vpc=demo-vpc&router=demo-vpc-router" PUT "/v1/system/update/repository?file_name=demo-repo.zip" GET "/v1/system/update/repository/1.0.0" diff --git a/nexus/types/src/deployment/blueprint_diff.rs b/nexus/types/src/deployment/blueprint_diff.rs index 17631e692d..4f2ee9ed3b 100644 --- a/nexus/types/src/deployment/blueprint_diff.rs +++ b/nexus/types/src/deployment/blueprint_diff.rs @@ -2,7 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -//! Types helpful for diffing [`Blueprints`]. +//! Types helpful for diffing blueprints. use super::blueprint_display::{ constants::*, linear_table_modified, linear_table_unchanged, BpDiffState, diff --git a/nexus/types/src/deployment/blueprint_display.rs b/nexus/types/src/deployment/blueprint_display.rs index 5d106b6ef3..2b0e4cab6c 100644 --- a/nexus/types/src/deployment/blueprint_display.rs +++ b/nexus/types/src/deployment/blueprint_display.rs @@ -2,7 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -//! Types helpful for rendering [`Blueprints`]. +//! Types helpful for rendering blueprints. use omicron_common::api::external::Generation; use std::fmt; @@ -176,7 +176,6 @@ pub trait BpSledSubtableData { } /// A table specific to a sled resource, such as a zone or disk. -/// `BpSledSubtable`s are always nested under [`BpSledTable`]s. pub struct BpSledSubtable { table_name: &'static str, column_names: &'static [&'static str], diff --git a/nexus/types/src/deployment/planning_input.rs b/nexus/types/src/deployment/planning_input.rs index 10d528bbfd..028f2301ba 100644 --- a/nexus/types/src/deployment/planning_input.rs +++ b/nexus/types/src/deployment/planning_input.rs @@ -482,6 +482,9 @@ pub enum SledFilter { /// Sleds on which reservations can be created. ReservationCreate, + /// Sleds which should be sent OPTE V2P mappings. + V2PMapping, + /// Sleds which should be sent VPC firewall rules. VpcFirewall, } @@ -536,6 +539,7 @@ impl SledPolicy { SledFilter::InService => true, SledFilter::QueryDuringInventory => true, SledFilter::ReservationCreate => true, + SledFilter::V2PMapping => true, SledFilter::VpcFirewall => true, }, SledPolicy::InService { @@ -547,6 +551,7 @@ impl SledPolicy { SledFilter::InService => true, SledFilter::QueryDuringInventory => true, SledFilter::ReservationCreate => false, + SledFilter::V2PMapping => true, SledFilter::VpcFirewall => true, }, SledPolicy::Expunged => match filter { @@ -556,6 +561,7 @@ impl SledPolicy { SledFilter::InService => false, SledFilter::QueryDuringInventory => false, SledFilter::ReservationCreate => false, + SledFilter::V2PMapping => false, SledFilter::VpcFirewall => false, }, } @@ -587,6 +593,7 @@ impl SledState { SledFilter::InService => true, SledFilter::QueryDuringInventory => true, SledFilter::ReservationCreate => true, + SledFilter::V2PMapping => true, SledFilter::VpcFirewall => true, }, SledState::Decommissioned => match filter { @@ -596,6 +603,7 @@ impl SledState { SledFilter::InService => false, SledFilter::QueryDuringInventory => false, SledFilter::ReservationCreate => false, + SledFilter::V2PMapping => false, SledFilter::VpcFirewall => false, }, } diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index ac169a35ee..6d92f2b1ba 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -808,6 +808,11 @@ pub struct InstanceNetworkInterfaceUpdate { // for the instance, though not the name. #[serde(default)] pub primary: bool, + + /// A set of additional networks that this interface may send and + /// receive traffic on. + #[serde(default)] + pub transit_ips: Vec, } // CERTIFICATES @@ -1220,6 +1225,14 @@ pub struct VpcSubnetCreate { /// be assigned if one is not provided. It must not overlap with any /// existing subnet in the VPC. pub ipv6_block: Option, + + /// An optional router, used to direct packets sent from hosts in this subnet + /// to any destination address. + /// + /// Custom routers apply in addition to the VPC-wide *system* router, and have + /// higher priority than the system router for an otherwise + /// equal-prefix-length match. + pub custom_router: Option, } /// Updateable properties of a `VpcSubnet` @@ -1227,6 +1240,10 @@ pub struct VpcSubnetCreate { pub struct VpcSubnetUpdate { #[serde(flatten)] pub identity: IdentityMetadataUpdateParams, + + /// An optional router, used to direct packets sent from hosts in this subnet + /// to any destination address. + pub custom_router: Option, } // VPC ROUTERS @@ -1252,7 +1269,9 @@ pub struct VpcRouterUpdate { pub struct RouterRouteCreate { #[serde(flatten)] pub identity: IdentityMetadataCreateParams, + /// The location that matched packets should be forwarded to. pub target: RouteTarget, + /// Selects which traffic this routing rule will apply to. pub destination: RouteDestination, } @@ -1261,7 +1280,9 @@ pub struct RouterRouteCreate { pub struct RouterRouteUpdate { #[serde(flatten)] pub identity: IdentityMetadataUpdateParams, + /// The location that matched packets should be forwarded to. pub target: RouteTarget, + /// Selects which traffic this routing rule will apply to. pub destination: RouteDestination, } diff --git a/nexus/types/src/external_api/views.rs b/nexus/types/src/external_api/views.rs index 2fa94b0e80..8e2ee39c21 100644 --- a/nexus/types/src/external_api/views.rs +++ b/nexus/types/src/external_api/views.rs @@ -177,7 +177,10 @@ pub struct Project { pub struct Certificate { #[serde(flatten)] pub identity: IdentityMetadata, + /// The service using this certificate pub service: ServiceUsingCertificate, + /// PEM-formatted string containing public certificate chain + pub cert: String, } // IMAGES @@ -259,7 +262,7 @@ pub struct Vpc { } /// A VPC subnet represents a logical grouping for instances that allows network traffic between -/// them, within a IPv4 subnetwork or optionall an IPv6 subnetwork. +/// them, within a IPv4 subnetwork or optionally an IPv6 subnetwork. #[derive(ObjectIdentity, Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct VpcSubnet { /// common identifying metadata @@ -274,6 +277,9 @@ pub struct VpcSubnet { /// The IPv6 subnet CIDR block. pub ipv6_block: Ipv6Net, + + /// ID for an attached custom router. + pub custom_router_id: Option, } #[derive(Clone, Copy, Debug, Deserialize, Serialize, PartialEq, JsonSchema)] diff --git a/nexus/types/src/internal_api/background.rs b/nexus/types/src/internal_api/background.rs new file mode 100644 index 0000000000..be4c2ec9c0 --- /dev/null +++ b/nexus/types/src/internal_api/background.rs @@ -0,0 +1,14 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use serde::Deserialize; +use serde::Serialize; + +/// The status of a `region_replacement_drive` background task activation +#[derive(Serialize, Deserialize, Default)] +pub struct RegionReplacementDriverStatus { + pub drive_invoked_ok: Vec, + pub finish_invoked_ok: Vec, + pub errors: Vec, +} diff --git a/nexus/types/src/internal_api/mod.rs b/nexus/types/src/internal_api/mod.rs index 9c029d5072..66a5d21a23 100644 --- a/nexus/types/src/internal_api/mod.rs +++ b/nexus/types/src/internal_api/mod.rs @@ -2,5 +2,6 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. +pub mod background; pub mod params; pub mod views; diff --git a/openapi/bootstrap-agent.json b/openapi/bootstrap-agent.json index 5d175e7b09..370f0fb404 100644 --- a/openapi/bootstrap-agent.json +++ b/openapi/bootstrap-agent.json @@ -328,6 +328,7 @@ "checker": { "nullable": true, "description": "Checker to apply to incoming messages.", + "default": null, "type": "string" }, "originate": { @@ -340,6 +341,7 @@ "shaper": { "nullable": true, "description": "Shaper to apply to outgoing messages.", + "default": null, "type": "string" } }, @@ -437,6 +439,7 @@ "local_pref": { "nullable": true, "description": "Apply a local preference to routes received from this peer.", + "default": null, "type": "integer", "format": "uint32", "minimum": 0 @@ -444,11 +447,13 @@ "md5_auth_key": { "nullable": true, "description": "Use the given key for TCP-MD5 authentication with the peer.", + "default": null, "type": "string" }, "min_ttl": { "nullable": true, "description": "Require messages from a peer have a minimum IP time to live field.", + "default": null, "type": "integer", "format": "uint8", "minimum": 0 @@ -456,6 +461,7 @@ "multi_exit_discriminator": { "nullable": true, "description": "Apply the provided multi-exit discriminator (MED) updates sent to the peer.", + "default": null, "type": "integer", "format": "uint32", "minimum": 0 @@ -467,6 +473,7 @@ "remote_asn": { "nullable": true, "description": "Require that a peer has a specified ASN.", + "default": null, "type": "integer", "format": "uint32", "minimum": 0 @@ -474,6 +481,7 @@ "vlan_id": { "nullable": true, "description": "Associate a VLAN ID with a BGP peer session.", + "default": null, "type": "integer", "format": "uint16", "minimum": 0 @@ -726,7 +734,7 @@ }, "Name": { "title": "A name unique within the parent collection", - "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID though they may contain a UUID.", + "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID, but they may contain a UUID. They can be at most 63 characters long.", "type": "string", "pattern": "^(?![0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$)^[a-z]([a-zA-Z0-9-]*[a-zA-Z0-9]+)?$", "minLength": 1, @@ -1192,6 +1200,7 @@ "vlan_id": { "nullable": true, "description": "The VLAN id associated with this route.", + "default": null, "type": "integer", "format": "uint16", "minimum": 0 @@ -1234,6 +1243,7 @@ "vlan_id": { "nullable": true, "description": "The VLAN id (if any) associated with this address.", + "default": null, "type": "integer", "format": "uint16", "minimum": 0 @@ -1244,7 +1254,7 @@ ] }, "UserId": { - "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID though they may contain a UUID.\n\n
JSON schema\n\n```json { \"title\": \"A name unique within the parent collection\", \"description\": \"Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID though they may contain a UUID.\", \"type\": \"string\", \"maxLength\": 63, \"minLength\": 1, \"pattern\": \"^(?![0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$)^[a-z]([a-zA-Z0-9-]*[a-zA-Z0-9]+)?$\" } ```
", + "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID, but they may contain a UUID. They can be at most 63 characters long.\n\n
JSON schema\n\n```json { \"title\": \"A name unique within the parent collection\", \"description\": \"Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID, but they may contain a UUID. They can be at most 63 characters long.\", \"type\": \"string\", \"maxLength\": 63, \"minLength\": 1, \"pattern\": \"^(?![0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$)^[a-z]([a-zA-Z0-9-]*[a-zA-Z0-9]+)?$\" } ```
", "type": "string" } }, diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index c3cc3c059d..9d495a726c 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -1567,6 +1567,7 @@ "checker": { "nullable": true, "description": "Checker to apply to incoming messages.", + "default": null, "type": "string" }, "originate": { @@ -1579,6 +1580,7 @@ "shaper": { "nullable": true, "description": "Shaper to apply to outgoing messages.", + "default": null, "type": "string" } }, @@ -1676,6 +1678,7 @@ "local_pref": { "nullable": true, "description": "Apply a local preference to routes received from this peer.", + "default": null, "type": "integer", "format": "uint32", "minimum": 0 @@ -1683,11 +1686,13 @@ "md5_auth_key": { "nullable": true, "description": "Use the given key for TCP-MD5 authentication with the peer.", + "default": null, "type": "string" }, "min_ttl": { "nullable": true, "description": "Require messages from a peer have a minimum IP time to live field.", + "default": null, "type": "integer", "format": "uint8", "minimum": 0 @@ -1695,6 +1700,7 @@ "multi_exit_discriminator": { "nullable": true, "description": "Apply the provided multi-exit discriminator (MED) updates sent to the peer.", + "default": null, "type": "integer", "format": "uint32", "minimum": 0 @@ -1706,6 +1712,7 @@ "remote_asn": { "nullable": true, "description": "Require that a peer has a specified ASN.", + "default": null, "type": "integer", "format": "uint32", "minimum": 0 @@ -1713,6 +1720,7 @@ "vlan_id": { "nullable": true, "description": "Associate a VLAN ID with a BGP peer session.", + "default": null, "type": "integer", "format": "uint16", "minimum": 0 @@ -3518,7 +3526,7 @@ }, "Name": { "title": "A name unique within the parent collection", - "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID though they may contain a UUID.", + "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID, but they may contain a UUID. They can be at most 63 characters long.", "type": "string", "pattern": "^(?![0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$)^[a-z]([a-zA-Z0-9-]*[a-zA-Z0-9]+)?$", "minLength": 1, @@ -3556,6 +3564,13 @@ "subnet": { "$ref": "#/components/schemas/IpNet" }, + "transit_ips": { + "default": [], + "type": "array", + "items": { + "$ref": "#/components/schemas/IpNet" + } + }, "vni": { "$ref": "#/components/schemas/Vni" } @@ -4345,6 +4360,7 @@ "vlan_id": { "nullable": true, "description": "The VLAN id associated with this route.", + "default": null, "type": "integer", "format": "uint16", "minimum": 0 @@ -5003,6 +5019,7 @@ "vlan_id": { "nullable": true, "description": "The VLAN id (if any) associated with this address.", + "default": null, "type": "integer", "format": "uint16", "minimum": 0 @@ -5021,7 +5038,7 @@ }, "UserId": { "title": "A name unique within the parent collection", - "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID though they may contain a UUID.", + "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID, but they may contain a UUID. They can be at most 63 characters long.", "type": "string", "pattern": "^(?![0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$)^[a-z]([a-zA-Z0-9-]*[a-zA-Z0-9]+)?$", "minLength": 1, diff --git a/openapi/nexus.json b/openapi/nexus.json index 01ec9aeb56..339dd35fbe 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -7,7 +7,7 @@ "url": "https://oxide.computer", "email": "api@oxide.computer" }, - "version": "20240502.0" + "version": "20240710.0" }, "paths": { "/device/auth": { @@ -8346,13 +8346,14 @@ } } }, - "/v1/vpc-subnets": { + "/v1/vpc-router-routes": { "get": { "tags": [ "vpcs" ], - "summary": "List subnets", - "operationId": "vpc_subnet_list", + "summary": "List routes", + "description": "List the routes associated with a router in a particular VPC.", + "operationId": "vpc_router_route_list", "parameters": [ { "in": "query", @@ -8382,6 +8383,14 @@ "$ref": "#/components/schemas/NameOrId" } }, + { + "in": "query", + "name": "router", + "description": "Name or ID of the router", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, { "in": "query", "name": "sort_by", @@ -8392,7 +8401,7 @@ { "in": "query", "name": "vpc", - "description": "Name or ID of the VPC", + "description": "Name or ID of the VPC, only required if `subnet` is provided as a `Name`", "schema": { "$ref": "#/components/schemas/NameOrId" } @@ -8404,7 +8413,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/VpcSubnetResultsPage" + "$ref": "#/components/schemas/RouterRouteResultsPage" } } } @@ -8418,7 +8427,7 @@ }, "x-dropshot-pagination": { "required": [ - "vpc" + "router" ] } }, @@ -8426,8 +8435,8 @@ "tags": [ "vpcs" ], - "summary": "Create subnet", - "operationId": "vpc_subnet_create", + "summary": "Create route", + "operationId": "vpc_router_route_create", "parameters": [ { "in": "query", @@ -8439,19 +8448,27 @@ }, { "in": "query", - "name": "vpc", - "description": "Name or ID of the VPC", + "name": "router", + "description": "Name or ID of the router", "required": true, "schema": { "$ref": "#/components/schemas/NameOrId" } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC, only required if `subnet` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } } ], "requestBody": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/VpcSubnetCreate" + "$ref": "#/components/schemas/RouterRouteCreate" } } }, @@ -8463,7 +8480,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/VpcSubnet" + "$ref": "#/components/schemas/RouterRoute" } } } @@ -8477,18 +8494,18 @@ } } }, - "/v1/vpc-subnets/{subnet}": { + "/v1/vpc-router-routes/{route}": { "get": { "tags": [ "vpcs" ], - "summary": "Fetch subnet", - "operationId": "vpc_subnet_view", + "summary": "Fetch route", + "operationId": "vpc_router_route_view", "parameters": [ { "in": "path", - "name": "subnet", - "description": "Name or ID of the subnet", + "name": "route", + "description": "Name or ID of the route", "required": true, "schema": { "$ref": "#/components/schemas/NameOrId" @@ -8502,10 +8519,19 @@ "$ref": "#/components/schemas/NameOrId" } }, + { + "in": "query", + "name": "router", + "description": "Name or ID of the router", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, { "in": "query", "name": "vpc", - "description": "Name or ID of the VPC", + "description": "Name or ID of the VPC, only required if `subnet` is provided as a `Name`", "schema": { "$ref": "#/components/schemas/NameOrId" } @@ -8517,7 +8543,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/VpcSubnet" + "$ref": "#/components/schemas/RouterRoute" } } } @@ -8534,13 +8560,13 @@ "tags": [ "vpcs" ], - "summary": "Update subnet", - "operationId": "vpc_subnet_update", + "summary": "Update route", + "operationId": "vpc_router_route_update", "parameters": [ { "in": "path", - "name": "subnet", - "description": "Name or ID of the subnet", + "name": "route", + "description": "Name or ID of the route", "required": true, "schema": { "$ref": "#/components/schemas/NameOrId" @@ -8554,10 +8580,18 @@ "$ref": "#/components/schemas/NameOrId" } }, + { + "in": "query", + "name": "router", + "description": "Name or ID of the router", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, { "in": "query", "name": "vpc", - "description": "Name or ID of the VPC", + "description": "Name or ID of the VPC, only required if `subnet` is provided as a `Name`", "schema": { "$ref": "#/components/schemas/NameOrId" } @@ -8567,7 +8601,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/VpcSubnetUpdate" + "$ref": "#/components/schemas/RouterRouteUpdate" } } }, @@ -8579,7 +8613,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/VpcSubnet" + "$ref": "#/components/schemas/RouterRoute" } } } @@ -8596,13 +8630,13 @@ "tags": [ "vpcs" ], - "summary": "Delete subnet", - "operationId": "vpc_subnet_delete", + "summary": "Delete route", + "operationId": "vpc_router_route_delete", "parameters": [ { "in": "path", - "name": "subnet", - "description": "Name or ID of the subnet", + "name": "route", + "description": "Name or ID of the route", "required": true, "schema": { "$ref": "#/components/schemas/NameOrId" @@ -8616,10 +8650,18 @@ "$ref": "#/components/schemas/NameOrId" } }, + { + "in": "query", + "name": "router", + "description": "Name or ID of the router", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, { "in": "query", "name": "vpc", - "description": "Name or ID of the VPC", + "description": "Name or ID of the VPC, only required if `subnet` is provided as a `Name`", "schema": { "$ref": "#/components/schemas/NameOrId" } @@ -8638,23 +8680,14 @@ } } }, - "/v1/vpc-subnets/{subnet}/network-interfaces": { + "/v1/vpc-routers": { "get": { "tags": [ "vpcs" ], - "summary": "List network interfaces", - "operationId": "vpc_subnet_list_network_interfaces", + "summary": "List routers", + "operationId": "vpc_router_list", "parameters": [ - { - "in": "path", - "name": "subnet", - "description": "Name or ID of the subnet", - "required": true, - "schema": { - "$ref": "#/components/schemas/NameOrId" - } - }, { "in": "query", "name": "limit", @@ -8705,7 +8738,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/InstanceNetworkInterfaceResultsPage" + "$ref": "#/components/schemas/VpcRouterResultsPage" } } } @@ -8718,89 +8751,30 @@ } }, "x-dropshot-pagination": { - "required": [] + "required": [ + "vpc" + ] } - } - }, - "/v1/vpcs": { - "get": { + }, + "post": { "tags": [ "vpcs" ], - "summary": "List VPCs", - "operationId": "vpc_list", + "summary": "Create VPC router", + "operationId": "vpc_router_create", "parameters": [ - { - "in": "query", - "name": "limit", - "description": "Maximum number of items returned by a single call", - "schema": { - "nullable": true, - "type": "integer", - "format": "uint32", - "minimum": 1 - } - }, - { - "in": "query", - "name": "page_token", - "description": "Token returned by previous call to retrieve the subsequent page", - "schema": { - "nullable": true, - "type": "string" - } - }, { "in": "query", "name": "project", - "description": "Name or ID of the project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", "schema": { "$ref": "#/components/schemas/NameOrId" } }, { "in": "query", - "name": "sort_by", - "schema": { - "$ref": "#/components/schemas/NameOrIdSortMode" - } - } - ], - "responses": { - "200": { - "description": "successful operation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/VpcResultsPage" - } - } - } - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - }, - "x-dropshot-pagination": { - "required": [ - "project" - ] - } - }, - "post": { - "tags": [ - "vpcs" - ], - "summary": "Create VPC", - "operationId": "vpc_create", - "parameters": [ - { - "in": "query", - "name": "project", - "description": "Name or ID of the project", + "name": "vpc", + "description": "Name or ID of the VPC", "required": true, "schema": { "$ref": "#/components/schemas/NameOrId" @@ -8811,7 +8785,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/VpcCreate" + "$ref": "#/components/schemas/VpcRouterCreate" } } }, @@ -8823,7 +8797,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/Vpc" + "$ref": "#/components/schemas/VpcRouter" } } } @@ -8837,18 +8811,18 @@ } } }, - "/v1/vpcs/{vpc}": { + "/v1/vpc-routers/{router}": { "get": { "tags": [ "vpcs" ], - "summary": "Fetch VPC", - "operationId": "vpc_view", + "summary": "Fetch router", + "operationId": "vpc_router_view", "parameters": [ { "in": "path", - "name": "vpc", - "description": "Name or ID of the VPC", + "name": "router", + "description": "Name or ID of the router", "required": true, "schema": { "$ref": "#/components/schemas/NameOrId" @@ -8857,7 +8831,15 @@ { "in": "query", "name": "project", - "description": "Name or ID of the project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC", "schema": { "$ref": "#/components/schemas/NameOrId" } @@ -8869,7 +8851,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/Vpc" + "$ref": "#/components/schemas/VpcRouter" } } } @@ -8886,13 +8868,13 @@ "tags": [ "vpcs" ], - "summary": "Update a VPC", - "operationId": "vpc_update", + "summary": "Update router", + "operationId": "vpc_router_update", "parameters": [ { "in": "path", - "name": "vpc", - "description": "Name or ID of the VPC", + "name": "router", + "description": "Name or ID of the router", "required": true, "schema": { "$ref": "#/components/schemas/NameOrId" @@ -8901,7 +8883,15 @@ { "in": "query", "name": "project", - "description": "Name or ID of the project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC", "schema": { "$ref": "#/components/schemas/NameOrId" } @@ -8911,7 +8901,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/VpcUpdate" + "$ref": "#/components/schemas/VpcRouterUpdate" } } }, @@ -8923,7 +8913,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/Vpc" + "$ref": "#/components/schemas/VpcRouter" } } } @@ -8940,13 +8930,13 @@ "tags": [ "vpcs" ], - "summary": "Delete VPC", - "operationId": "vpc_delete", + "summary": "Delete router", + "operationId": "vpc_router_delete", "parameters": [ { "in": "path", - "name": "vpc", - "description": "Name or ID of the VPC", + "name": "router", + "description": "Name or ID of the router", "required": true, "schema": { "$ref": "#/components/schemas/NameOrId" @@ -8955,7 +8945,15 @@ { "in": "query", "name": "project", - "description": "Name or ID of the project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC", "schema": { "$ref": "#/components/schemas/NameOrId" } @@ -8973,683 +8971,691 @@ } } } - } - }, - "components": { - "schemas": { - "Address": { - "description": "An address tied to an address lot.", - "type": "object", - "properties": { - "address": { - "description": "The address and prefix length of this address.", - "allOf": [ - { - "$ref": "#/components/schemas/IpNet" - } - ] + }, + "/v1/vpc-subnets": { + "get": { + "tags": [ + "vpcs" + ], + "summary": "List subnets", + "operationId": "vpc_subnet_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } }, - "address_lot": { - "description": "The address lot this address is drawn from.", - "allOf": [ - { - "$ref": "#/components/schemas/NameOrId" - } - ] + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } }, - "vlan_id": { - "nullable": true, - "description": "Optional VLAN ID for this address", - "type": "integer", - "format": "uint16", - "minimum": 0 - } - }, - "required": [ - "address", - "address_lot" - ] - }, - "AddressConfig": { - "description": "A set of addresses associated with a port configuration.", - "type": "object", - "properties": { - "addresses": { - "description": "The set of addresses assigned to the port configuration.", - "type": "array", - "items": { - "$ref": "#/components/schemas/Address" + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" } - } - }, - "required": [ - "addresses" - ] - }, - "AddressLot": { - "description": "Represents an address lot object, containing the id of the lot that can be used in other API calls.", - "type": "object", - "properties": { - "description": { - "description": "human-readable free-form text about a resource", - "type": "string" - }, - "id": { - "description": "unique, immutable, system-controlled identifier for each resource", - "type": "string", - "format": "uuid" }, - "kind": { - "description": "Desired use of `AddressLot`", - "allOf": [ - { - "$ref": "#/components/schemas/AddressLotKind" - } - ] + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } }, - "name": { - "description": "unique, mutable, user-controlled identifier for each resource", - "allOf": [ - { - "$ref": "#/components/schemas/Name" + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VpcSubnetResultsPage" + } } - ] + } }, - "time_created": { - "description": "timestamp when this resource was created", - "type": "string", - "format": "date-time" + "4XX": { + "$ref": "#/components/responses/Error" }, - "time_modified": { - "description": "timestamp when this resource was last modified", - "type": "string", - "format": "date-time" + "5XX": { + "$ref": "#/components/responses/Error" } }, - "required": [ - "description", - "id", - "kind", - "name", - "time_created", - "time_modified" - ] + "x-dropshot-pagination": { + "required": [ + "vpc" + ] + } }, - "AddressLotBlock": { - "description": "An address lot block is a part of an address lot and contains a range of addresses. The range is inclusive.", - "type": "object", - "properties": { - "first_address": { - "description": "The first address of the block (inclusive).", - "type": "string", - "format": "ip" - }, - "id": { - "description": "The id of the address lot block.", - "type": "string", - "format": "uuid" + "post": { + "tags": [ + "vpcs" + ], + "summary": "Create subnet", + "operationId": "vpc_subnet_create", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } }, - "last_address": { - "description": "The last address of the block (inclusive).", - "type": "string", - "format": "ip" + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } } - }, - "required": [ - "first_address", - "id", - "last_address" - ] - }, - "AddressLotBlockCreate": { - "description": "Parameters for creating an address lot block. Fist and last addresses are inclusive.", - "type": "object", - "properties": { - "first_address": { - "description": "The first address in the lot (inclusive).", - "type": "string", - "format": "ip" + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VpcSubnetCreate" + } + } }, - "last_address": { - "description": "The last address in the lot (inclusive).", - "type": "string", - "format": "ip" - } + "required": true }, - "required": [ - "first_address", - "last_address" - ] - }, - "AddressLotBlockResultsPage": { - "description": "A single page of results", - "type": "object", - "properties": { - "items": { - "description": "list of items on this page of results", - "type": "array", - "items": { - "$ref": "#/components/schemas/AddressLotBlock" + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VpcSubnet" + } + } } }, - "next_page": { - "nullable": true, - "description": "token used to fetch the next page of results (if any)", - "type": "string" + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" } - }, - "required": [ - "items" - ] - }, - "AddressLotCreate": { - "description": "Parameters for creating an address lot.", - "type": "object", - "properties": { - "blocks": { - "description": "The blocks to add along with the new address lot.", - "type": "array", - "items": { - "$ref": "#/components/schemas/AddressLotBlockCreate" + } + } + }, + "/v1/vpc-subnets/{subnet}": { + "get": { + "tags": [ + "vpcs" + ], + "summary": "Fetch subnet", + "operationId": "vpc_subnet_view", + "parameters": [ + { + "in": "path", + "name": "subnet", + "description": "Name or ID of the subnet", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" } }, - "description": { - "type": "string" - }, - "kind": { - "description": "The kind of address lot to create.", - "allOf": [ - { - "$ref": "#/components/schemas/AddressLotKind" - } - ] + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } }, - "name": { - "$ref": "#/components/schemas/Name" - } - }, - "required": [ - "blocks", - "description", - "kind", - "name" - ] - }, - "AddressLotCreateResponse": { - "description": "An address lot and associated blocks resulting from creating an address lot.", - "type": "object", - "properties": { - "blocks": { - "description": "The address lot blocks that were created.", - "type": "array", - "items": { - "$ref": "#/components/schemas/AddressLotBlock" + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC", + "schema": { + "$ref": "#/components/schemas/NameOrId" } - }, - "lot": { - "description": "The address lot that was created.", - "allOf": [ - { - "$ref": "#/components/schemas/AddressLot" + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VpcSubnet" + } } - ] + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" } - }, - "required": [ - "blocks", - "lot" - ] + } }, - "AddressLotKind": { - "description": "The kind associated with an address lot.", - "oneOf": [ + "put": { + "tags": [ + "vpcs" + ], + "summary": "Update subnet", + "operationId": "vpc_subnet_update", + "parameters": [ { - "description": "Infrastructure address lots are used for network infrastructure like addresses assigned to rack switches.", - "type": "string", - "enum": [ - "infra" - ] + "in": "path", + "name": "subnet", + "description": "Name or ID of the subnet", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } }, { - "description": "Pool address lots are used by IP pools.", - "type": "string", - "enum": [ - "pool" - ] - } - ] - }, - "AddressLotResultsPage": { - "description": "A single page of results", - "type": "object", - "properties": { - "items": { - "description": "list of items on this page of results", - "type": "array", - "items": { - "$ref": "#/components/schemas/AddressLot" + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" } }, - "next_page": { - "nullable": true, - "description": "token used to fetch the next page of results (if any)", - "type": "string" - } - }, - "required": [ - "items" - ] - }, - "AggregateBgpMessageHistory": { - "description": "BGP message history for rack switches.", - "type": "object", - "properties": { - "switch_histories": { - "description": "BGP history organized by switch.", - "type": "array", - "items": { - "$ref": "#/components/schemas/SwitchBgpHistory" + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC", + "schema": { + "$ref": "#/components/schemas/NameOrId" } } - }, - "required": [ - "switch_histories" - ] - }, - "AllowList": { - "description": "Allowlist of IPs or subnets that can make requests to user-facing services.", - "type": "object", - "properties": { - "allowed_ips": { - "description": "The allowlist of IPs or subnets.", - "allOf": [ - { - "$ref": "#/components/schemas/AllowedSourceIps" + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VpcSubnetUpdate" } - ] - }, - "time_created": { - "description": "Time the list was created.", - "type": "string", - "format": "date-time" + } }, - "time_modified": { - "description": "Time the list was last modified.", - "type": "string", - "format": "date-time" - } + "required": true }, - "required": [ - "allowed_ips", - "time_created", - "time_modified" - ] - }, - "AllowListUpdate": { - "description": "Parameters for updating allowed source IPs", - "type": "object", - "properties": { - "allowed_ips": { - "description": "The new list of allowed source IPs.", - "allOf": [ - { - "$ref": "#/components/schemas/AllowedSourceIps" + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VpcSubnet" + } } - ] + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" } - }, - "required": [ - "allowed_ips" - ] + } }, - "AllowedSourceIps": { - "description": "Description of source IPs allowed to reach rack services.", - "oneOf": [ + "delete": { + "tags": [ + "vpcs" + ], + "summary": "Delete subnet", + "operationId": "vpc_subnet_delete", + "parameters": [ { - "description": "Allow traffic from any external IP address.", - "type": "object", - "properties": { - "allow": { - "type": "string", - "enum": [ - "any" - ] - } - }, - "required": [ - "allow" - ] + "in": "path", + "name": "subnet", + "description": "Name or ID of the subnet", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } }, { - "description": "Restrict access to a specific set of source IP addresses or subnets.\n\nAll others are prevented from reaching rack services.", - "type": "object", - "properties": { - "allow": { - "type": "string", - "enum": [ - "list" - ] - }, - "ips": { - "type": "array", - "items": { - "$ref": "#/components/schemas/IpNet" - } - } - }, - "required": [ - "allow", - "ips" - ] - } - ] - }, - "Baseboard": { - "description": "Properties that uniquely identify an Oxide hardware component", - "type": "object", - "properties": { - "part": { - "type": "string" + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } }, - "revision": { - "type": "integer", - "format": "int64" - }, - "serial": { - "type": "string" + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } } - }, - "required": [ - "part", - "revision", - "serial" - ] - }, - "BfdMode": { - "description": "BFD connection mode.", - "type": "string", - "enum": [ - "single_hop", - "multi_hop" - ] - }, - "BfdSessionDisable": { - "description": "Information needed to disable a BFD session", - "type": "object", - "properties": { - "remote": { - "description": "Address of the remote peer to disable a BFD session for.", - "type": "string", - "format": "ip" + ], + "responses": { + "204": { + "description": "successful deletion" }, - "switch": { - "description": "The switch to enable this session on. Must be `switch0` or `switch1`.", - "allOf": [ - { - "$ref": "#/components/schemas/Name" - } - ] + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" } - }, - "required": [ - "remote", - "switch" - ] - }, - "BfdSessionEnable": { - "description": "Information about a bidirectional forwarding detection (BFD) session.", - "type": "object", - "properties": { - "detection_threshold": { - "description": "The negotiated Control packet transmission interval, multiplied by this variable, will be the Detection Time for this session (as seen by the remote system)", - "type": "integer", - "format": "uint8", - "minimum": 0 + } + } + }, + "/v1/vpc-subnets/{subnet}/network-interfaces": { + "get": { + "tags": [ + "vpcs" + ], + "summary": "List network interfaces", + "operationId": "vpc_subnet_list_network_interfaces", + "parameters": [ + { + "in": "path", + "name": "subnet", + "description": "Name or ID of the subnet", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } }, - "local": { - "nullable": true, - "description": "Address the Oxide switch will listen on for BFD traffic. If `None` then the unspecified address (0.0.0.0 or ::) is used.", - "type": "string", - "format": "ip" + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } }, - "mode": { - "description": "Select either single-hop (RFC 5881) or multi-hop (RFC 5883)", - "allOf": [ - { - "$ref": "#/components/schemas/BfdMode" - } - ] + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } }, - "remote": { - "description": "Address of the remote peer to establish a BFD session with.", - "type": "string", - "format": "ip" + { + "in": "query", + "name": "project", + "description": "Name or ID of the project, only required if `vpc` is provided as a `Name`", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } }, - "required_rx": { - "description": "The minimum interval, in microseconds, between received BFD Control packets that this system requires", - "type": "integer", - "format": "uint64", - "minimum": 0 + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } }, - "switch": { - "description": "The switch to enable this session on. Must be `switch0` or `switch1`.", - "allOf": [ - { - "$ref": "#/components/schemas/Name" + { + "in": "query", + "name": "vpc", + "description": "Name or ID of the VPC", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InstanceNetworkInterfaceResultsPage" + } } - ] + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" } }, - "required": [ - "detection_threshold", - "mode", - "remote", - "required_rx", - "switch" - ] - }, - "BfdState": { - "oneOf": [ + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/vpcs": { + "get": { + "tags": [ + "vpcs" + ], + "summary": "List VPCs", + "operationId": "vpc_list", + "parameters": [ { - "description": "A stable down state. Non-responsive to incoming messages.", - "type": "string", - "enum": [ - "admin_down" - ] + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } }, { - "description": "The initial state.", - "type": "string", - "enum": [ - "down" - ] + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } }, { - "description": "The peer has detected a remote peer in the down state.", - "type": "string", - "enum": [ - "init" - ] + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } }, { - "description": "The peer has detected a remote peer in the up or init state while in the init state.", - "type": "string", - "enum": [ - "up" - ] + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } } - ] - }, - "BfdStatus": { - "type": "object", - "properties": { - "detection_threshold": { - "type": "integer", - "format": "uint8", - "minimum": 0 + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VpcResultsPage" + } + } + } }, - "local": { - "nullable": true, - "type": "string", - "format": "ip" - }, - "mode": { - "$ref": "#/components/schemas/BfdMode" - }, - "peer": { - "type": "string", - "format": "ip" - }, - "required_rx": { - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "state": { - "$ref": "#/components/schemas/BfdState" + "4XX": { + "$ref": "#/components/responses/Error" }, - "switch": { - "$ref": "#/components/schemas/Name" + "5XX": { + "$ref": "#/components/responses/Error" } }, - "required": [ - "detection_threshold", - "mode", - "peer", - "required_rx", - "state", - "switch" - ] + "x-dropshot-pagination": { + "required": [ + "project" + ] + } }, - "BgpAnnounceSet": { - "description": "Represents a BGP announce set by id. The id can be used with other API calls to view and manage the announce set.", - "type": "object", - "properties": { - "description": { - "description": "human-readable free-form text about a resource", - "type": "string" - }, - "id": { - "description": "unique, immutable, system-controlled identifier for each resource", - "type": "string", - "format": "uuid" + "post": { + "tags": [ + "vpcs" + ], + "summary": "Create VPC", + "operationId": "vpc_create", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VpcCreate" + } + } }, - "name": { - "description": "unique, mutable, user-controlled identifier for each resource", - "allOf": [ - { - "$ref": "#/components/schemas/Name" + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Vpc" + } } - ] + } }, - "time_created": { - "description": "timestamp when this resource was created", - "type": "string", - "format": "date-time" + "4XX": { + "$ref": "#/components/responses/Error" }, - "time_modified": { - "description": "timestamp when this resource was last modified", - "type": "string", - "format": "date-time" + "5XX": { + "$ref": "#/components/responses/Error" } - }, - "required": [ - "description", - "id", - "name", - "time_created", - "time_modified" - ] - }, - "BgpAnnounceSetCreate": { - "description": "Parameters for creating a named set of BGP announcements.", - "type": "object", - "properties": { - "announcement": { - "description": "The announcements in this set.", - "type": "array", - "items": { - "$ref": "#/components/schemas/BgpAnnouncementCreate" + } + } + }, + "/v1/vpcs/{vpc}": { + "get": { + "tags": [ + "vpcs" + ], + "summary": "Fetch VPC", + "operationId": "vpc_view", + "parameters": [ + { + "in": "path", + "name": "vpc", + "description": "Name or ID of the VPC", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" } }, - "description": { - "type": "string" - }, - "name": { - "$ref": "#/components/schemas/Name" + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } } - }, - "required": [ - "announcement", - "description", - "name" - ] - }, - "BgpAnnouncement": { - "description": "A BGP announcement tied to an address lot block.", - "type": "object", - "properties": { - "address_lot_block_id": { - "description": "The address block the IP network being announced is drawn from.", - "type": "string", - "format": "uuid" + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Vpc" + } + } + } }, - "announce_set_id": { - "description": "The id of the set this announcement is a part of.", - "type": "string", - "format": "uuid" + "4XX": { + "$ref": "#/components/responses/Error" }, - "network": { - "description": "The IP network being announced.", - "allOf": [ - { - "$ref": "#/components/schemas/IpNet" - } - ] + "5XX": { + "$ref": "#/components/responses/Error" } - }, - "required": [ - "address_lot_block_id", - "announce_set_id", - "network" - ] + } }, - "BgpAnnouncementCreate": { - "description": "A BGP announcement tied to a particular address lot block.", - "type": "object", - "properties": { - "address_lot_block": { - "description": "Address lot this announcement is drawn from.", - "allOf": [ - { - "$ref": "#/components/schemas/NameOrId" - } - ] + "put": { + "tags": [ + "vpcs" + ], + "summary": "Update a VPC", + "operationId": "vpc_update", + "parameters": [ + { + "in": "path", + "name": "vpc", + "description": "Name or ID of the VPC", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } }, - "network": { - "description": "The network being announced.", - "allOf": [ - { - "$ref": "#/components/schemas/IpNet" - } - ] + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VpcUpdate" + } + } + }, + "required": true }, - "required": [ - "address_lot_block", - "network" - ] - }, - "BgpConfig": { - "description": "A base BGP configuration.", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Vpc" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "vpcs" + ], + "summary": "Delete VPC", + "operationId": "vpc_delete", + "parameters": [ + { + "in": "path", + "name": "vpc", + "description": "Name or ID of the VPC", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + } + }, + "components": { + "schemas": { + "Address": { + "description": "An address tied to an address lot.", "type": "object", "properties": { - "asn": { - "description": "The autonomous system number of this BGP configuration.", + "address": { + "description": "The address and prefix length of this address.", + "allOf": [ + { + "$ref": "#/components/schemas/IpNet" + } + ] + }, + "address_lot": { + "description": "The address lot this address is drawn from.", + "allOf": [ + { + "$ref": "#/components/schemas/NameOrId" + } + ] + }, + "vlan_id": { + "nullable": true, + "description": "Optional VLAN ID for this address", "type": "integer", - "format": "uint32", + "format": "uint16", "minimum": 0 - }, + } + }, + "required": [ + "address", + "address_lot" + ] + }, + "AddressConfig": { + "description": "A set of addresses associated with a port configuration.", + "type": "object", + "properties": { + "addresses": { + "description": "The set of addresses assigned to the port configuration.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Address" + } + } + }, + "required": [ + "addresses" + ] + }, + "AddressLot": { + "description": "Represents an address lot object, containing the id of the lot that can be used in other API calls.", + "type": "object", + "properties": { "description": { "description": "human-readable free-form text about a resource", "type": "string" @@ -9659,6 +9665,14 @@ "type": "string", "format": "uuid" }, + "kind": { + "description": "Desired use of `AddressLot`", + "allOf": [ + { + "$ref": "#/components/schemas/AddressLotKind" + } + ] + }, "name": { "description": "unique, mutable, user-controlled identifier for each resource", "allOf": [ @@ -9676,59 +9690,64 @@ "description": "timestamp when this resource was last modified", "type": "string", "format": "date-time" - }, - "vrf": { - "nullable": true, - "description": "Optional virtual routing and forwarding identifier for this BGP configuration.", - "type": "string" } }, "required": [ - "asn", "description", "id", + "kind", "name", "time_created", "time_modified" ] }, - "BgpConfigCreate": { - "description": "Parameters for creating a BGP configuration. This includes and autonomous system number (ASN) and a virtual routing and forwarding (VRF) identifier.", + "AddressLotBlock": { + "description": "An address lot block is a part of an address lot and contains a range of addresses. The range is inclusive.", "type": "object", "properties": { - "asn": { - "description": "The autonomous system number of this BGP configuration.", - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "bgp_announce_set_id": { - "$ref": "#/components/schemas/NameOrId" + "first_address": { + "description": "The first address of the block (inclusive).", + "type": "string", + "format": "ip" }, - "description": { - "type": "string" + "id": { + "description": "The id of the address lot block.", + "type": "string", + "format": "uuid" }, - "name": { - "$ref": "#/components/schemas/Name" + "last_address": { + "description": "The last address of the block (inclusive).", + "type": "string", + "format": "ip" + } + }, + "required": [ + "first_address", + "id", + "last_address" + ] + }, + "AddressLotBlockCreate": { + "description": "Parameters for creating an address lot block. Fist and last addresses are inclusive.", + "type": "object", + "properties": { + "first_address": { + "description": "The first address in the lot (inclusive).", + "type": "string", + "format": "ip" }, - "vrf": { - "nullable": true, - "description": "Optional virtual routing and forwarding identifier for this BGP configuration.", - "allOf": [ - { - "$ref": "#/components/schemas/Name" - } - ] + "last_address": { + "description": "The last address in the lot (inclusive).", + "type": "string", + "format": "ip" } }, "required": [ - "asn", - "bgp_announce_set_id", - "description", - "name" + "first_address", + "last_address" ] }, - "BgpConfigResultsPage": { + "AddressLotBlockResultsPage": { "description": "A single page of results", "type": "object", "properties": { @@ -9736,7 +9755,7 @@ "description": "list of items on this page of results", "type": "array", "items": { - "$ref": "#/components/schemas/BgpConfig" + "$ref": "#/components/schemas/AddressLotBlock" } }, "next_page": { @@ -9749,846 +9768,989 @@ "items" ] }, - "BgpImportedRouteIpv4": { - "description": "A route imported from a BGP peer.", + "AddressLotCreate": { + "description": "Parameters for creating an address lot.", "type": "object", "properties": { - "id": { - "description": "BGP identifier of the originating router.", - "type": "integer", - "format": "uint32", - "minimum": 0 + "blocks": { + "description": "The blocks to add along with the new address lot.", + "type": "array", + "items": { + "$ref": "#/components/schemas/AddressLotBlockCreate" + } }, - "nexthop": { - "description": "The nexthop the prefix is reachable through.", - "type": "string", - "format": "ipv4" + "description": { + "type": "string" }, - "prefix": { - "description": "The destination network prefix.", + "kind": { + "description": "The kind of address lot to create.", "allOf": [ { - "$ref": "#/components/schemas/Ipv4Net" + "$ref": "#/components/schemas/AddressLotKind" } ] }, - "switch": { - "description": "Switch the route is imported into.", + "name": { + "$ref": "#/components/schemas/Name" + } + }, + "required": [ + "blocks", + "description", + "kind", + "name" + ] + }, + "AddressLotCreateResponse": { + "description": "An address lot and associated blocks resulting from creating an address lot.", + "type": "object", + "properties": { + "blocks": { + "description": "The address lot blocks that were created.", + "type": "array", + "items": { + "$ref": "#/components/schemas/AddressLotBlock" + } + }, + "lot": { + "description": "The address lot that was created.", "allOf": [ { - "$ref": "#/components/schemas/SwitchLocation" + "$ref": "#/components/schemas/AddressLot" } ] } }, "required": [ - "id", - "nexthop", - "prefix", - "switch" + "blocks", + "lot" ] }, - "BgpMessageHistory": {}, - "BgpPeer": { - "description": "A BGP peer configuration for an interface. Includes the set of announcements that will be advertised to the peer identified by `addr`. The `bgp_config` parameter is a reference to global BGP parameters. The `interface_name` indicates what interface the peer should be contacted on.", - "type": "object", - "properties": { - "addr": { - "description": "The address of the host to peer with.", + "AddressLotKind": { + "description": "The kind associated with an address lot.", + "oneOf": [ + { + "description": "Infrastructure address lots are used for network infrastructure like addresses assigned to rack switches.", "type": "string", - "format": "ip" - }, - "allowed_export": { - "description": "Define export policy for a peer.", - "allOf": [ - { - "$ref": "#/components/schemas/ImportExportPolicy" - } - ] - }, - "allowed_import": { - "description": "Define import policy for a peer.", - "allOf": [ - { - "$ref": "#/components/schemas/ImportExportPolicy" - } + "enum": [ + "infra" ] }, - "bgp_config": { - "description": "The global BGP configuration used for establishing a session with this peer.", - "allOf": [ - { - "$ref": "#/components/schemas/NameOrId" - } + { + "description": "Pool address lots are used by IP pools.", + "type": "string", + "enum": [ + "pool" ] - }, - "communities": { - "description": "Include the provided communities in updates sent to the peer.", + } + ] + }, + "AddressLotResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", "type": "array", "items": { - "type": "integer", - "format": "uint32", - "minimum": 0 + "$ref": "#/components/schemas/AddressLot" } }, - "connect_retry": { - "description": "How long to to wait between TCP connection retries (seconds).", - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "delay_open": { - "description": "How long to delay sending an open request after establishing a TCP session (seconds).", - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "enforce_first_as": { - "description": "Enforce that the first AS in paths received from this peer is the peer's AS.", - "type": "boolean" - }, - "hold_time": { - "description": "How long to hold peer connections between keepalives (seconds).", - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "idle_hold_time": { - "description": "How long to hold a peer in idle before attempting a new session (seconds).", - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "interface_name": { - "description": "The name of interface to peer on. This is relative to the port configuration this BGP peer configuration is a part of. For example this value could be phy0 to refer to a primary physical interface. Or it could be vlan47 to refer to a VLAN interface.", - "type": "string" - }, - "keepalive": { - "description": "How often to send keepalive requests (seconds).", - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "local_pref": { - "nullable": true, - "description": "Apply a local preference to routes received from this peer.", - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "md5_auth_key": { + "next_page": { "nullable": true, - "description": "Use the given key for TCP-MD5 authentication with the peer.", + "description": "token used to fetch the next page of results (if any)", "type": "string" - }, - "min_ttl": { - "nullable": true, - "description": "Require messages from a peer have a minimum IP time to live field.", - "type": "integer", - "format": "uint8", - "minimum": 0 - }, - "multi_exit_discriminator": { - "nullable": true, - "description": "Apply the provided multi-exit discriminator (MED) updates sent to the peer.", - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "remote_asn": { - "nullable": true, - "description": "Require that a peer has a specified ASN.", - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "vlan_id": { - "nullable": true, - "description": "Associate a VLAN ID with a peer.", - "type": "integer", - "format": "uint16", - "minimum": 0 } }, "required": [ - "addr", - "allowed_export", - "allowed_import", - "bgp_config", - "communities", - "connect_retry", - "delay_open", - "enforce_first_as", - "hold_time", - "idle_hold_time", - "interface_name", - "keepalive" + "items" ] }, - "BgpPeerConfig": { + "AggregateBgpMessageHistory": { + "description": "BGP message history for rack switches.", "type": "object", "properties": { - "peers": { + "switch_histories": { + "description": "BGP history organized by switch.", "type": "array", "items": { - "$ref": "#/components/schemas/BgpPeer" + "$ref": "#/components/schemas/SwitchBgpHistory" } } }, "required": [ - "peers" + "switch_histories" ] }, - "BgpPeerState": { - "description": "The current state of a BGP peer.", + "AllowList": { + "description": "Allowlist of IPs or subnets that can make requests to user-facing services.", + "type": "object", + "properties": { + "allowed_ips": { + "description": "The allowlist of IPs or subnets.", + "allOf": [ + { + "$ref": "#/components/schemas/AllowedSourceIps" + } + ] + }, + "time_created": { + "description": "Time the list was created.", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "Time the list was last modified.", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "allowed_ips", + "time_created", + "time_modified" + ] + }, + "AllowListUpdate": { + "description": "Parameters for updating allowed source IPs", + "type": "object", + "properties": { + "allowed_ips": { + "description": "The new list of allowed source IPs.", + "allOf": [ + { + "$ref": "#/components/schemas/AllowedSourceIps" + } + ] + } + }, + "required": [ + "allowed_ips" + ] + }, + "AllowedSourceIps": { + "description": "Description of source IPs allowed to reach rack services.", "oneOf": [ { - "description": "Initial state. Refuse all incoming BGP connections. No resources allocated to peer.", - "type": "string", - "enum": [ - "idle" + "description": "Allow traffic from any external IP address.", + "type": "object", + "properties": { + "allow": { + "type": "string", + "enum": [ + "any" + ] + } + }, + "required": [ + "allow" ] }, { - "description": "Waiting for the TCP connection to be completed.", + "description": "Restrict access to a specific set of source IP addresses or subnets.\n\nAll others are prevented from reaching rack services.", + "type": "object", + "properties": { + "allow": { + "type": "string", + "enum": [ + "list" + ] + }, + "ips": { + "type": "array", + "items": { + "$ref": "#/components/schemas/IpNet" + } + } + }, + "required": [ + "allow", + "ips" + ] + } + ] + }, + "AuthzScope": { + "description": "Authorization scope for a timeseries.\n\nThis describes the level at which a user must be authorized to read data from a timeseries. For example, fleet-scoping means the data is only visible to an operator or fleet reader. Project-scoped, on the other hand, indicates that a user will see data limited to the projects on which they have read permissions.", + "oneOf": [ + { + "description": "Timeseries data is limited to fleet readers.", "type": "string", "enum": [ - "connect" + "fleet" ] }, { - "description": "Trying to acquire peer by listening for and accepting a TCP connection.", + "description": "Timeseries data is limited to the authorized silo for a user.", "type": "string", "enum": [ - "active" + "silo" ] }, { - "description": "Waiting for open message from peer.", + "description": "Timeseries data is limited to the authorized projects for a user.", "type": "string", "enum": [ - "open_sent" + "project" ] }, { - "description": "Waiting for keepaliave or notification from peer.", + "description": "The timeseries is viewable to all without limitation.", "type": "string", "enum": [ - "open_confirm" - ] - }, - { - "description": "Synchronizing with peer.", - "type": "string", - "enum": [ - "session_setup" + "viewable_to_all" ] + } + ] + }, + "Baseboard": { + "description": "Properties that uniquely identify an Oxide hardware component", + "type": "object", + "properties": { + "part": { + "type": "string" }, - { - "description": "Session established. Able to exchange update, notification and keepalive messages with peers.", - "type": "string", - "enum": [ - "established" - ] + "revision": { + "type": "integer", + "format": "int64" + }, + "serial": { + "type": "string" } + }, + "required": [ + "part", + "revision", + "serial" ] }, - "BgpPeerStatus": { - "description": "The current status of a BGP peer.", + "BfdMode": { + "description": "BFD connection mode.", + "type": "string", + "enum": [ + "single_hop", + "multi_hop" + ] + }, + "BfdSessionDisable": { + "description": "Information needed to disable a BFD session", "type": "object", "properties": { - "addr": { - "description": "IP address of the peer.", + "remote": { + "description": "Address of the remote peer to disable a BFD session for.", "type": "string", "format": "ip" }, - "local_asn": { - "description": "Local autonomous system number.", + "switch": { + "description": "The switch to enable this session on. Must be `switch0` or `switch1`.", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + } + }, + "required": [ + "remote", + "switch" + ] + }, + "BfdSessionEnable": { + "description": "Information about a bidirectional forwarding detection (BFD) session.", + "type": "object", + "properties": { + "detection_threshold": { + "description": "The negotiated Control packet transmission interval, multiplied by this variable, will be the Detection Time for this session (as seen by the remote system)", "type": "integer", - "format": "uint32", + "format": "uint8", "minimum": 0 }, - "remote_asn": { - "description": "Remote autonomous system number.", - "type": "integer", - "format": "uint32", - "minimum": 0 + "local": { + "nullable": true, + "description": "Address the Oxide switch will listen on for BFD traffic. If `None` then the unspecified address (0.0.0.0 or ::) is used.", + "type": "string", + "format": "ip" }, - "state": { - "description": "State of the peer.", + "mode": { + "description": "Select either single-hop (RFC 5881) or multi-hop (RFC 5883)", "allOf": [ { - "$ref": "#/components/schemas/BgpPeerState" + "$ref": "#/components/schemas/BfdMode" } ] }, - "state_duration_millis": { - "description": "Time of last state change.", + "remote": { + "description": "Address of the remote peer to establish a BFD session with.", + "type": "string", + "format": "ip" + }, + "required_rx": { + "description": "The minimum interval, in microseconds, between received BFD Control packets that this system requires", "type": "integer", "format": "uint64", "minimum": 0 }, "switch": { - "description": "Switch with the peer session.", + "description": "The switch to enable this session on. Must be `switch0` or `switch1`.", "allOf": [ { - "$ref": "#/components/schemas/SwitchLocation" + "$ref": "#/components/schemas/Name" } ] } }, "required": [ - "addr", - "local_asn", - "remote_asn", - "state", - "state_duration_millis", + "detection_threshold", + "mode", + "remote", + "required_rx", "switch" ] }, - "BinRangedouble": { - "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "BfdState": { "oneOf": [ { - "description": "A range unbounded below and exclusively above, `..end`.", - "type": "object", - "properties": { - "end": { - "type": "number", - "format": "double" - }, - "type": { - "type": "string", - "enum": [ - "range_to" - ] - } - }, - "required": [ - "end", - "type" - ] - }, - { - "description": "A range bounded inclusively below and exclusively above, `start..end`.", - "type": "object", - "properties": { - "end": { - "type": "number", - "format": "double" - }, - "start": { - "type": "number", - "format": "double" - }, - "type": { - "type": "string", - "enum": [ - "range" - ] - } - }, - "required": [ - "end", - "start", - "type" + "description": "A stable down state. Non-responsive to incoming messages.", + "type": "string", + "enum": [ + "admin_down" ] }, { - "description": "A range bounded inclusively below and unbounded above, `start..`.", - "type": "object", - "properties": { - "start": { - "type": "number", - "format": "double" - }, - "type": { - "type": "string", - "enum": [ - "range_from" - ] - } - }, - "required": [ - "start", - "type" - ] - } - ] - }, - "BinRangefloat": { - "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", - "oneOf": [ - { - "description": "A range unbounded below and exclusively above, `..end`.", - "type": "object", - "properties": { - "end": { - "type": "number", - "format": "float" - }, - "type": { - "type": "string", - "enum": [ - "range_to" - ] - } - }, - "required": [ - "end", - "type" + "description": "The initial state.", + "type": "string", + "enum": [ + "down" ] }, { - "description": "A range bounded inclusively below and exclusively above, `start..end`.", - "type": "object", - "properties": { - "end": { - "type": "number", - "format": "float" - }, - "start": { - "type": "number", - "format": "float" - }, - "type": { - "type": "string", - "enum": [ - "range" - ] - } - }, - "required": [ - "end", - "start", - "type" + "description": "The peer has detected a remote peer in the down state.", + "type": "string", + "enum": [ + "init" ] }, { - "description": "A range bounded inclusively below and unbounded above, `start..`.", - "type": "object", - "properties": { - "start": { - "type": "number", - "format": "float" - }, - "type": { - "type": "string", - "enum": [ - "range_from" - ] - } - }, - "required": [ - "start", - "type" + "description": "The peer has detected a remote peer in the up or init state while in the init state.", + "type": "string", + "enum": [ + "up" ] } ] }, - "BinRangeint16": { - "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", - "oneOf": [ - { - "description": "A range unbounded below and exclusively above, `..end`.", - "type": "object", - "properties": { - "end": { - "type": "integer", - "format": "int16" - }, - "type": { - "type": "string", - "enum": [ - "range_to" - ] - } - }, - "required": [ - "end", - "type" - ] + "BfdStatus": { + "type": "object", + "properties": { + "detection_threshold": { + "type": "integer", + "format": "uint8", + "minimum": 0 }, - { - "description": "A range bounded inclusively below and exclusively above, `start..end`.", - "type": "object", - "properties": { - "end": { - "type": "integer", - "format": "int16" - }, - "start": { - "type": "integer", - "format": "int16" - }, - "type": { - "type": "string", - "enum": [ - "range" - ] + "local": { + "nullable": true, + "type": "string", + "format": "ip" + }, + "mode": { + "$ref": "#/components/schemas/BfdMode" + }, + "peer": { + "type": "string", + "format": "ip" + }, + "required_rx": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "state": { + "$ref": "#/components/schemas/BfdState" + }, + "switch": { + "$ref": "#/components/schemas/Name" + } + }, + "required": [ + "detection_threshold", + "mode", + "peer", + "required_rx", + "state", + "switch" + ] + }, + "BgpAnnounceSet": { + "description": "Represents a BGP announce set by id. The id can be used with other API calls to view and manage the announce set.", + "type": "object", + "properties": { + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" } - }, - "required": [ - "end", - "start", - "type" ] }, - { - "description": "A range bounded inclusively below and unbounded above, `start..`.", - "type": "object", - "properties": { - "start": { - "type": "integer", - "format": "int16" - }, - "type": { - "type": "string", - "enum": [ - "range_from" - ] + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "description", + "id", + "name", + "time_created", + "time_modified" + ] + }, + "BgpAnnounceSetCreate": { + "description": "Parameters for creating a named set of BGP announcements.", + "type": "object", + "properties": { + "announcement": { + "description": "The announcements in this set.", + "type": "array", + "items": { + "$ref": "#/components/schemas/BgpAnnouncementCreate" + } + }, + "description": { + "type": "string" + }, + "name": { + "$ref": "#/components/schemas/Name" + } + }, + "required": [ + "announcement", + "description", + "name" + ] + }, + "BgpAnnouncement": { + "description": "A BGP announcement tied to an address lot block.", + "type": "object", + "properties": { + "address_lot_block_id": { + "description": "The address block the IP network being announced is drawn from.", + "type": "string", + "format": "uuid" + }, + "announce_set_id": { + "description": "The id of the set this announcement is a part of.", + "type": "string", + "format": "uuid" + }, + "network": { + "description": "The IP network being announced.", + "allOf": [ + { + "$ref": "#/components/schemas/IpNet" } - }, - "required": [ - "start", - "type" ] } + }, + "required": [ + "address_lot_block_id", + "announce_set_id", + "network" ] }, - "BinRangeint32": { - "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", - "oneOf": [ - { - "description": "A range unbounded below and exclusively above, `..end`.", - "type": "object", - "properties": { - "end": { - "type": "integer", - "format": "int32" - }, - "type": { - "type": "string", - "enum": [ - "range_to" - ] + "BgpAnnouncementCreate": { + "description": "A BGP announcement tied to a particular address lot block.", + "type": "object", + "properties": { + "address_lot_block": { + "description": "Address lot this announcement is drawn from.", + "allOf": [ + { + "$ref": "#/components/schemas/NameOrId" } - }, - "required": [ - "end", - "type" ] }, - { - "description": "A range bounded inclusively below and exclusively above, `start..end`.", - "type": "object", - "properties": { - "end": { - "type": "integer", - "format": "int32" - }, - "start": { - "type": "integer", - "format": "int32" - }, - "type": { - "type": "string", - "enum": [ - "range" - ] + "network": { + "description": "The network being announced.", + "allOf": [ + { + "$ref": "#/components/schemas/IpNet" } - }, - "required": [ - "end", - "start", - "type" ] + } + }, + "required": [ + "address_lot_block", + "network" + ] + }, + "BgpConfig": { + "description": "A base BGP configuration.", + "type": "object", + "properties": { + "asn": { + "description": "The autonomous system number of this BGP configuration.", + "type": "integer", + "format": "uint32", + "minimum": 0 }, - { - "description": "A range bounded inclusively below and unbounded above, `start..`.", - "type": "object", - "properties": { - "start": { - "type": "integer", - "format": "int32" - }, - "type": { - "type": "string", - "enum": [ - "range_from" - ] + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" } - }, - "required": [ - "start", - "type" ] + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + }, + "vrf": { + "nullable": true, + "description": "Optional virtual routing and forwarding identifier for this BGP configuration.", + "type": "string" } + }, + "required": [ + "asn", + "description", + "id", + "name", + "time_created", + "time_modified" ] }, - "BinRangeint64": { - "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", - "oneOf": [ - { - "description": "A range unbounded below and exclusively above, `..end`.", - "type": "object", - "properties": { - "end": { - "type": "integer", - "format": "int64" - }, - "type": { - "type": "string", - "enum": [ - "range_to" - ] - } - }, - "required": [ - "end", - "type" - ] + "BgpConfigCreate": { + "description": "Parameters for creating a BGP configuration. This includes and autonomous system number (ASN) and a virtual routing and forwarding (VRF) identifier.", + "type": "object", + "properties": { + "asn": { + "description": "The autonomous system number of this BGP configuration.", + "type": "integer", + "format": "uint32", + "minimum": 0 }, - { - "description": "A range bounded inclusively below and exclusively above, `start..end`.", - "type": "object", - "properties": { - "end": { - "type": "integer", - "format": "int64" - }, - "start": { - "type": "integer", - "format": "int64" - }, - "type": { - "type": "string", - "enum": [ - "range" - ] - } - }, - "required": [ - "end", - "start", - "type" - ] + "bgp_announce_set_id": { + "$ref": "#/components/schemas/NameOrId" }, - { - "description": "A range bounded inclusively below and unbounded above, `start..`.", - "type": "object", - "properties": { - "start": { - "type": "integer", - "format": "int64" - }, - "type": { - "type": "string", - "enum": [ - "range_from" - ] + "description": { + "type": "string" + }, + "name": { + "$ref": "#/components/schemas/Name" + }, + "vrf": { + "nullable": true, + "description": "Optional virtual routing and forwarding identifier for this BGP configuration.", + "allOf": [ + { + "$ref": "#/components/schemas/Name" } - }, - "required": [ - "start", - "type" ] } + }, + "required": [ + "asn", + "bgp_announce_set_id", + "description", + "name" ] }, - "BinRangeint8": { - "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", - "oneOf": [ - { - "description": "A range unbounded below and exclusively above, `..end`.", - "type": "object", - "properties": { - "end": { - "type": "integer", - "format": "int8" - }, - "type": { - "type": "string", - "enum": [ - "range_to" - ] - } - }, - "required": [ - "end", - "type" - ] + "BgpConfigResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/BgpConfig" + } }, - { - "description": "A range bounded inclusively below and exclusively above, `start..end`.", - "type": "object", - "properties": { - "end": { - "type": "integer", - "format": "int8" - }, - "start": { - "type": "integer", - "format": "int8" - }, - "type": { - "type": "string", - "enum": [ - "range" - ] + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "BgpImportedRouteIpv4": { + "description": "A route imported from a BGP peer.", + "type": "object", + "properties": { + "id": { + "description": "BGP identifier of the originating router.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "nexthop": { + "description": "The nexthop the prefix is reachable through.", + "type": "string", + "format": "ipv4" + }, + "prefix": { + "description": "The destination network prefix.", + "allOf": [ + { + "$ref": "#/components/schemas/Ipv4Net" } - }, - "required": [ - "end", - "start", - "type" ] }, - { - "description": "A range bounded inclusively below and unbounded above, `start..`.", - "type": "object", - "properties": { - "start": { - "type": "integer", - "format": "int8" - }, - "type": { - "type": "string", - "enum": [ - "range_from" - ] + "switch": { + "description": "Switch the route is imported into.", + "allOf": [ + { + "$ref": "#/components/schemas/SwitchLocation" } - }, - "required": [ - "start", - "type" ] } + }, + "required": [ + "id", + "nexthop", + "prefix", + "switch" ] }, - "BinRangeuint16": { - "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", - "oneOf": [ - { - "description": "A range unbounded below and exclusively above, `..end`.", - "type": "object", - "properties": { - "end": { - "type": "integer", - "format": "uint16", - "minimum": 0 - }, - "type": { - "type": "string", - "enum": [ - "range_to" - ] + "BgpMessageHistory": {}, + "BgpPeer": { + "description": "A BGP peer configuration for an interface. Includes the set of announcements that will be advertised to the peer identified by `addr`. The `bgp_config` parameter is a reference to global BGP parameters. The `interface_name` indicates what interface the peer should be contacted on.", + "type": "object", + "properties": { + "addr": { + "description": "The address of the host to peer with.", + "type": "string", + "format": "ip" + }, + "allowed_export": { + "description": "Define export policy for a peer.", + "allOf": [ + { + "$ref": "#/components/schemas/ImportExportPolicy" } - }, - "required": [ - "end", - "type" ] }, - { - "description": "A range bounded inclusively below and exclusively above, `start..end`.", - "type": "object", - "properties": { - "end": { - "type": "integer", - "format": "uint16", - "minimum": 0 - }, - "start": { - "type": "integer", - "format": "uint16", - "minimum": 0 - }, - "type": { - "type": "string", - "enum": [ - "range" - ] + "allowed_import": { + "description": "Define import policy for a peer.", + "allOf": [ + { + "$ref": "#/components/schemas/ImportExportPolicy" } - }, - "required": [ - "end", - "start", - "type" ] }, - { - "description": "A range bounded inclusively below and unbounded above, `start..`.", - "type": "object", - "properties": { - "start": { - "type": "integer", - "format": "uint16", - "minimum": 0 - }, - "type": { - "type": "string", - "enum": [ - "range_from" - ] + "bgp_config": { + "description": "The global BGP configuration used for establishing a session with this peer.", + "allOf": [ + { + "$ref": "#/components/schemas/NameOrId" } - }, - "required": [ - "start", - "type" ] + }, + "communities": { + "description": "Include the provided communities in updates sent to the peer.", + "type": "array", + "items": { + "type": "integer", + "format": "uint32", + "minimum": 0 + } + }, + "connect_retry": { + "description": "How long to to wait between TCP connection retries (seconds).", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "delay_open": { + "description": "How long to delay sending an open request after establishing a TCP session (seconds).", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "enforce_first_as": { + "description": "Enforce that the first AS in paths received from this peer is the peer's AS.", + "type": "boolean" + }, + "hold_time": { + "description": "How long to hold peer connections between keepalives (seconds).", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "idle_hold_time": { + "description": "How long to hold a peer in idle before attempting a new session (seconds).", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "interface_name": { + "description": "The name of interface to peer on. This is relative to the port configuration this BGP peer configuration is a part of. For example this value could be phy0 to refer to a primary physical interface. Or it could be vlan47 to refer to a VLAN interface.", + "type": "string" + }, + "keepalive": { + "description": "How often to send keepalive requests (seconds).", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "local_pref": { + "nullable": true, + "description": "Apply a local preference to routes received from this peer.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "md5_auth_key": { + "nullable": true, + "description": "Use the given key for TCP-MD5 authentication with the peer.", + "type": "string" + }, + "min_ttl": { + "nullable": true, + "description": "Require messages from a peer have a minimum IP time to live field.", + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "multi_exit_discriminator": { + "nullable": true, + "description": "Apply the provided multi-exit discriminator (MED) updates sent to the peer.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "remote_asn": { + "nullable": true, + "description": "Require that a peer has a specified ASN.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "vlan_id": { + "nullable": true, + "description": "Associate a VLAN ID with a peer.", + "type": "integer", + "format": "uint16", + "minimum": 0 } + }, + "required": [ + "addr", + "allowed_export", + "allowed_import", + "bgp_config", + "communities", + "connect_retry", + "delay_open", + "enforce_first_as", + "hold_time", + "idle_hold_time", + "interface_name", + "keepalive" ] }, - "BinRangeuint32": { - "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "BgpPeerConfig": { + "type": "object", + "properties": { + "peers": { + "type": "array", + "items": { + "$ref": "#/components/schemas/BgpPeer" + } + } + }, + "required": [ + "peers" + ] + }, + "BgpPeerState": { + "description": "The current state of a BGP peer.", "oneOf": [ { - "description": "A range unbounded below and exclusively above, `..end`.", - "type": "object", - "properties": { - "end": { - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "type": { - "type": "string", - "enum": [ - "range_to" - ] - } - }, - "required": [ - "end", - "type" - ] - }, - { - "description": "A range bounded inclusively below and exclusively above, `start..end`.", - "type": "object", - "properties": { - "end": { - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "start": { - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "type": { - "type": "string", - "enum": [ - "range" - ] - } - }, - "required": [ - "end", - "start", - "type" + "description": "Initial state. Refuse all incoming BGP connections. No resources allocated to peer.", + "type": "string", + "enum": [ + "idle" + ] + }, + { + "description": "Waiting for the TCP connection to be completed.", + "type": "string", + "enum": [ + "connect" + ] + }, + { + "description": "Trying to acquire peer by listening for and accepting a TCP connection.", + "type": "string", + "enum": [ + "active" + ] + }, + { + "description": "Waiting for open message from peer.", + "type": "string", + "enum": [ + "open_sent" + ] + }, + { + "description": "Waiting for keepaliave or notification from peer.", + "type": "string", + "enum": [ + "open_confirm" + ] + }, + { + "description": "Synchronizing with peer.", + "type": "string", + "enum": [ + "session_setup" + ] + }, + { + "description": "Session established. Able to exchange update, notification and keepalive messages with peers.", + "type": "string", + "enum": [ + "established" + ] + } + ] + }, + "BgpPeerStatus": { + "description": "The current status of a BGP peer.", + "type": "object", + "properties": { + "addr": { + "description": "IP address of the peer.", + "type": "string", + "format": "ip" + }, + "local_asn": { + "description": "Local autonomous system number.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "remote_asn": { + "description": "Remote autonomous system number.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "state": { + "description": "State of the peer.", + "allOf": [ + { + "$ref": "#/components/schemas/BgpPeerState" + } + ] + }, + "state_duration_millis": { + "description": "Time of last state change.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "switch": { + "description": "Switch with the peer session.", + "allOf": [ + { + "$ref": "#/components/schemas/SwitchLocation" + } + ] + } + }, + "required": [ + "addr", + "local_asn", + "remote_asn", + "state", + "state_duration_millis", + "switch" + ] + }, + "BinRangedouble": { + "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "oneOf": [ + { + "description": "A range unbounded below and exclusively above, `..end`.", + "type": "object", + "properties": { + "end": { + "type": "number", + "format": "double" + }, + "type": { + "type": "string", + "enum": [ + "range_to" + ] + } + }, + "required": [ + "end", + "type" + ] + }, + { + "description": "A range bounded inclusively below and exclusively above, `start..end`.", + "type": "object", + "properties": { + "end": { + "type": "number", + "format": "double" + }, + "start": { + "type": "number", + "format": "double" + }, + "type": { + "type": "string", + "enum": [ + "range" + ] + } + }, + "required": [ + "end", + "start", + "type" ] }, { @@ -10596,9 +10758,8 @@ "type": "object", "properties": { "start": { - "type": "integer", - "format": "uint32", - "minimum": 0 + "type": "number", + "format": "double" }, "type": { "type": "string", @@ -10614,7 +10775,77 @@ } ] }, - "BinRangeuint64": { + "BinRangefloat": { + "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "oneOf": [ + { + "description": "A range unbounded below and exclusively above, `..end`.", + "type": "object", + "properties": { + "end": { + "type": "number", + "format": "float" + }, + "type": { + "type": "string", + "enum": [ + "range_to" + ] + } + }, + "required": [ + "end", + "type" + ] + }, + { + "description": "A range bounded inclusively below and exclusively above, `start..end`.", + "type": "object", + "properties": { + "end": { + "type": "number", + "format": "float" + }, + "start": { + "type": "number", + "format": "float" + }, + "type": { + "type": "string", + "enum": [ + "range" + ] + } + }, + "required": [ + "end", + "start", + "type" + ] + }, + { + "description": "A range bounded inclusively below and unbounded above, `start..`.", + "type": "object", + "properties": { + "start": { + "type": "number", + "format": "float" + }, + "type": { + "type": "string", + "enum": [ + "range_from" + ] + } + }, + "required": [ + "start", + "type" + ] + } + ] + }, + "BinRangeint16": { "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", "oneOf": [ { @@ -10623,8 +10854,7 @@ "properties": { "end": { "type": "integer", - "format": "uint64", - "minimum": 0 + "format": "int16" }, "type": { "type": "string", @@ -10644,13 +10874,11 @@ "properties": { "end": { "type": "integer", - "format": "uint64", - "minimum": 0 + "format": "int16" }, "start": { "type": "integer", - "format": "uint64", - "minimum": 0 + "format": "int16" }, "type": { "type": "string", @@ -10671,8 +10899,7 @@ "properties": { "start": { "type": "integer", - "format": "uint64", - "minimum": 0 + "format": "int16" }, "type": { "type": "string", @@ -10688,7 +10915,7 @@ } ] }, - "BinRangeuint8": { + "BinRangeint32": { "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", "oneOf": [ { @@ -10697,8 +10924,7 @@ "properties": { "end": { "type": "integer", - "format": "uint8", - "minimum": 0 + "format": "int32" }, "type": { "type": "string", @@ -10718,13 +10944,11 @@ "properties": { "end": { "type": "integer", - "format": "uint8", - "minimum": 0 + "format": "int32" }, "start": { "type": "integer", - "format": "uint8", - "minimum": 0 + "format": "int32" }, "type": { "type": "string", @@ -10745,8 +10969,7 @@ "properties": { "start": { "type": "integer", - "format": "uint8", - "minimum": 0 + "format": "int32" }, "type": { "type": "string", @@ -10762,480 +10985,1269 @@ } ] }, - "Bindouble": { - "description": "Type storing bin edges and a count of samples within it.", - "type": "object", - "properties": { - "count": { - "description": "The total count of samples in this bin.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "range": { - "description": "The range of the support covered by this bin.", - "allOf": [ - { - "$ref": "#/components/schemas/BinRangedouble" - } - ] - } - }, - "required": [ - "count", - "range" - ] - }, - "Binfloat": { - "description": "Type storing bin edges and a count of samples within it.", - "type": "object", - "properties": { - "count": { - "description": "The total count of samples in this bin.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "range": { - "description": "The range of the support covered by this bin.", - "allOf": [ - { - "$ref": "#/components/schemas/BinRangefloat" + "BinRangeint64": { + "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "oneOf": [ + { + "description": "A range unbounded below and exclusively above, `..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "int64" + }, + "type": { + "type": "string", + "enum": [ + "range_to" + ] } + }, + "required": [ + "end", + "type" ] - } - }, - "required": [ - "count", - "range" - ] - }, - "Binint16": { - "description": "Type storing bin edges and a count of samples within it.", - "type": "object", - "properties": { - "count": { - "description": "The total count of samples in this bin.", - "type": "integer", - "format": "uint64", - "minimum": 0 }, - "range": { - "description": "The range of the support covered by this bin.", - "allOf": [ - { - "$ref": "#/components/schemas/BinRangeint16" + { + "description": "A range bounded inclusively below and exclusively above, `start..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "int64" + }, + "start": { + "type": "integer", + "format": "int64" + }, + "type": { + "type": "string", + "enum": [ + "range" + ] } + }, + "required": [ + "end", + "start", + "type" ] - } - }, - "required": [ - "count", - "range" - ] - }, - "Binint32": { - "description": "Type storing bin edges and a count of samples within it.", - "type": "object", - "properties": { - "count": { - "description": "The total count of samples in this bin.", - "type": "integer", - "format": "uint64", - "minimum": 0 }, - "range": { - "description": "The range of the support covered by this bin.", - "allOf": [ - { - "$ref": "#/components/schemas/BinRangeint32" + { + "description": "A range bounded inclusively below and unbounded above, `start..`.", + "type": "object", + "properties": { + "start": { + "type": "integer", + "format": "int64" + }, + "type": { + "type": "string", + "enum": [ + "range_from" + ] } + }, + "required": [ + "start", + "type" ] } - }, - "required": [ - "count", - "range" ] }, - "Binint64": { - "description": "Type storing bin edges and a count of samples within it.", - "type": "object", - "properties": { - "count": { - "description": "The total count of samples in this bin.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "range": { - "description": "The range of the support covered by this bin.", - "allOf": [ - { - "$ref": "#/components/schemas/BinRangeint64" + "BinRangeint8": { + "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "oneOf": [ + { + "description": "A range unbounded below and exclusively above, `..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "int8" + }, + "type": { + "type": "string", + "enum": [ + "range_to" + ] } + }, + "required": [ + "end", + "type" ] - } - }, - "required": [ - "count", - "range" - ] - }, - "Binint8": { - "description": "Type storing bin edges and a count of samples within it.", - "type": "object", - "properties": { - "count": { - "description": "The total count of samples in this bin.", - "type": "integer", - "format": "uint64", - "minimum": 0 }, - "range": { - "description": "The range of the support covered by this bin.", - "allOf": [ - { - "$ref": "#/components/schemas/BinRangeint8" + { + "description": "A range bounded inclusively below and exclusively above, `start..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "int8" + }, + "start": { + "type": "integer", + "format": "int8" + }, + "type": { + "type": "string", + "enum": [ + "range" + ] } + }, + "required": [ + "end", + "start", + "type" ] - } - }, - "required": [ - "count", - "range" - ] - }, - "Binuint16": { - "description": "Type storing bin edges and a count of samples within it.", - "type": "object", - "properties": { - "count": { - "description": "The total count of samples in this bin.", - "type": "integer", - "format": "uint64", - "minimum": 0 }, - "range": { - "description": "The range of the support covered by this bin.", - "allOf": [ - { - "$ref": "#/components/schemas/BinRangeuint16" + { + "description": "A range bounded inclusively below and unbounded above, `start..`.", + "type": "object", + "properties": { + "start": { + "type": "integer", + "format": "int8" + }, + "type": { + "type": "string", + "enum": [ + "range_from" + ] } + }, + "required": [ + "start", + "type" ] } - }, - "required": [ - "count", - "range" ] }, - "Binuint32": { - "description": "Type storing bin edges and a count of samples within it.", - "type": "object", - "properties": { - "count": { - "description": "The total count of samples in this bin.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "range": { - "description": "The range of the support covered by this bin.", - "allOf": [ - { - "$ref": "#/components/schemas/BinRangeuint32" + "BinRangeuint16": { + "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "oneOf": [ + { + "description": "A range unbounded below and exclusively above, `..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "range_to" + ] } + }, + "required": [ + "end", + "type" ] - } - }, - "required": [ - "count", - "range" - ] - }, - "Binuint64": { - "description": "Type storing bin edges and a count of samples within it.", - "type": "object", - "properties": { - "count": { - "description": "The total count of samples in this bin.", - "type": "integer", - "format": "uint64", - "minimum": 0 }, - "range": { - "description": "The range of the support covered by this bin.", - "allOf": [ - { - "$ref": "#/components/schemas/BinRangeuint64" + { + "description": "A range bounded inclusively below and exclusively above, `start..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "start": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "range" + ] } + }, + "required": [ + "end", + "start", + "type" ] - } - }, - "required": [ - "count", - "range" - ] - }, - "Binuint8": { - "description": "Type storing bin edges and a count of samples within it.", - "type": "object", - "properties": { - "count": { - "description": "The total count of samples in this bin.", - "type": "integer", - "format": "uint64", - "minimum": 0 }, - "range": { - "description": "The range of the support covered by this bin.", - "allOf": [ - { - "$ref": "#/components/schemas/BinRangeuint8" + { + "description": "A range bounded inclusively below and unbounded above, `start..`.", + "type": "object", + "properties": { + "start": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "range_from" + ] } + }, + "required": [ + "start", + "type" ] } - }, - "required": [ - "count", - "range" - ] - }, - "BlockSize": { - "title": "disk block size in bytes", - "type": "integer", - "enum": [ - 512, - 2048, - 4096 ] }, - "ByteCount": { - "description": "Byte count to express memory or storage capacity.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "Certificate": { - "description": "View of a Certificate", - "type": "object", - "properties": { - "description": { - "description": "human-readable free-form text about a resource", - "type": "string" - }, - "id": { - "description": "unique, immutable, system-controlled identifier for each resource", - "type": "string", - "format": "uuid" - }, - "name": { - "description": "unique, mutable, user-controlled identifier for each resource", - "allOf": [ - { - "$ref": "#/components/schemas/Name" + "BinRangeuint32": { + "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "oneOf": [ + { + "description": "A range unbounded below and exclusively above, `..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "range_to" + ] } + }, + "required": [ + "end", + "type" ] }, - "service": { - "$ref": "#/components/schemas/ServiceUsingCertificate" - }, - "time_created": { - "description": "timestamp when this resource was created", - "type": "string", - "format": "date-time" + { + "description": "A range bounded inclusively below and exclusively above, `start..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "start": { + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "range" + ] + } + }, + "required": [ + "end", + "start", + "type" + ] }, - "time_modified": { - "description": "timestamp when this resource was last modified", - "type": "string", - "format": "date-time" + { + "description": "A range bounded inclusively below and unbounded above, `start..`.", + "type": "object", + "properties": { + "start": { + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "range_from" + ] + } + }, + "required": [ + "start", + "type" + ] } - }, - "required": [ - "description", - "id", - "name", - "service", - "time_created", - "time_modified" ] }, - "CertificateCreate": { - "description": "Create-time parameters for a `Certificate`", - "type": "object", - "properties": { - "cert": { - "description": "PEM-formatted string containing public certificate chain", - "type": "string" - }, - "description": { - "type": "string" - }, - "key": { - "description": "PEM-formatted string containing private key", - "type": "string" + "BinRangeuint64": { + "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "oneOf": [ + { + "description": "A range unbounded below and exclusively above, `..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "range_to" + ] + } + }, + "required": [ + "end", + "type" + ] }, - "name": { - "$ref": "#/components/schemas/Name" + { + "description": "A range bounded inclusively below and exclusively above, `start..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "start": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "range" + ] + } + }, + "required": [ + "end", + "start", + "type" + ] }, - "service": { - "description": "The service using this certificate", - "allOf": [ - { - "$ref": "#/components/schemas/ServiceUsingCertificate" + { + "description": "A range bounded inclusively below and unbounded above, `start..`.", + "type": "object", + "properties": { + "start": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "range_from" + ] } + }, + "required": [ + "start", + "type" ] } - }, - "required": [ - "cert", - "description", - "key", - "name", - "service" ] }, - "CertificateResultsPage": { - "description": "A single page of results", - "type": "object", - "properties": { - "items": { - "description": "list of items on this page of results", - "type": "array", - "items": { - "$ref": "#/components/schemas/Certificate" - } + "BinRangeuint8": { + "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "oneOf": [ + { + "description": "A range unbounded below and exclusively above, `..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "range_to" + ] + } + }, + "required": [ + "end", + "type" + ] }, - "next_page": { - "nullable": true, - "description": "token used to fetch the next page of results (if any)", - "type": "string" + { + "description": "A range bounded inclusively below and exclusively above, `start..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "start": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "range" + ] + } + }, + "required": [ + "end", + "start", + "type" + ] + }, + { + "description": "A range bounded inclusively below and unbounded above, `start..`.", + "type": "object", + "properties": { + "start": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "range_from" + ] + } + }, + "required": [ + "start", + "type" + ] } - }, - "required": [ - "items" ] }, - "Cumulativedouble": { - "description": "A cumulative or counter data type.", + "Bindouble": { + "description": "Type storing bin edges and a count of samples within it.", "type": "object", "properties": { - "start_time": { - "type": "string", - "format": "date-time" + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 }, - "value": { - "type": "number", - "format": "double" + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangedouble" + } + ] } }, "required": [ - "start_time", - "value" + "count", + "range" ] }, - "Cumulativefloat": { - "description": "A cumulative or counter data type.", + "Binfloat": { + "description": "Type storing bin edges and a count of samples within it.", "type": "object", "properties": { - "start_time": { - "type": "string", - "format": "date-time" + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 }, - "value": { - "type": "number", - "format": "float" + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangefloat" + } + ] } }, "required": [ - "start_time", - "value" + "count", + "range" ] }, - "Cumulativeint64": { - "description": "A cumulative or counter data type.", + "Binint16": { + "description": "Type storing bin edges and a count of samples within it.", "type": "object", "properties": { - "start_time": { - "type": "string", - "format": "date-time" - }, - "value": { + "count": { + "description": "The total count of samples in this bin.", "type": "integer", - "format": "int64" + "format": "uint64", + "minimum": 0 + }, + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangeint16" + } + ] } }, "required": [ - "start_time", - "value" + "count", + "range" ] }, - "Cumulativeuint64": { - "description": "A cumulative or counter data type.", + "Binint32": { + "description": "Type storing bin edges and a count of samples within it.", "type": "object", "properties": { - "start_time": { - "type": "string", - "format": "date-time" - }, - "value": { - "type": "integer", + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", "format": "uint64", "minimum": 0 + }, + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangeint32" + } + ] } }, "required": [ - "start_time", - "value" + "count", + "range" ] }, - "CurrentUser": { - "description": "Info about the current user", + "Binint64": { + "description": "Type storing bin edges and a count of samples within it.", "type": "object", "properties": { - "display_name": { - "description": "Human-readable name that can identify the user", + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangeint64" + } + ] + } + }, + "required": [ + "count", + "range" + ] + }, + "Binint8": { + "description": "Type storing bin edges and a count of samples within it.", + "type": "object", + "properties": { + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangeint8" + } + ] + } + }, + "required": [ + "count", + "range" + ] + }, + "Binuint16": { + "description": "Type storing bin edges and a count of samples within it.", + "type": "object", + "properties": { + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangeuint16" + } + ] + } + }, + "required": [ + "count", + "range" + ] + }, + "Binuint32": { + "description": "Type storing bin edges and a count of samples within it.", + "type": "object", + "properties": { + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangeuint32" + } + ] + } + }, + "required": [ + "count", + "range" + ] + }, + "Binuint64": { + "description": "Type storing bin edges and a count of samples within it.", + "type": "object", + "properties": { + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangeuint64" + } + ] + } + }, + "required": [ + "count", + "range" + ] + }, + "Binuint8": { + "description": "Type storing bin edges and a count of samples within it.", + "type": "object", + "properties": { + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangeuint8" + } + ] + } + }, + "required": [ + "count", + "range" + ] + }, + "BlockSize": { + "title": "disk block size in bytes", + "type": "integer", + "enum": [ + 512, + 2048, + 4096 + ] + }, + "ByteCount": { + "description": "Byte count to express memory or storage capacity.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "Certificate": { + "description": "View of a Certificate", + "type": "object", + "properties": { + "cert": { + "description": "PEM-formatted string containing public certificate chain", "type": "string" }, - "id": { - "type": "string", - "format": "uuid" + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" }, - "silo_id": { - "description": "Uuid of the silo to which this user belongs", + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", "type": "string", "format": "uuid" }, - "silo_name": { - "description": "Name of the silo to which this user belongs.", + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", "allOf": [ { "$ref": "#/components/schemas/Name" } ] + }, + "service": { + "description": "The service using this certificate", + "allOf": [ + { + "$ref": "#/components/schemas/ServiceUsingCertificate" + } + ] + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" } }, "required": [ - "display_name", + "cert", + "description", "id", - "silo_id", - "silo_name" + "name", + "service", + "time_created", + "time_modified" ] }, - "Datum": { - "description": "A `Datum` is a single sampled data point from a metric.", - "oneOf": [ + "CertificateCreate": { + "description": "Create-time parameters for a `Certificate`", + "type": "object", + "properties": { + "cert": { + "description": "PEM-formatted string containing public certificate chain", + "type": "string" + }, + "description": { + "type": "string" + }, + "key": { + "description": "PEM-formatted string containing private key", + "type": "string" + }, + "name": { + "$ref": "#/components/schemas/Name" + }, + "service": { + "description": "The service using this certificate", + "allOf": [ + { + "$ref": "#/components/schemas/ServiceUsingCertificate" + } + ] + } + }, + "required": [ + "cert", + "description", + "key", + "name", + "service" + ] + }, + "CertificateResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/Certificate" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "Cumulativedouble": { + "description": "A cumulative or counter data type.", + "type": "object", + "properties": { + "start_time": { + "type": "string", + "format": "date-time" + }, + "value": { + "type": "number", + "format": "double" + } + }, + "required": [ + "start_time", + "value" + ] + }, + "Cumulativefloat": { + "description": "A cumulative or counter data type.", + "type": "object", + "properties": { + "start_time": { + "type": "string", + "format": "date-time" + }, + "value": { + "type": "number", + "format": "float" + } + }, + "required": [ + "start_time", + "value" + ] + }, + "Cumulativeint64": { + "description": "A cumulative or counter data type.", + "type": "object", + "properties": { + "start_time": { + "type": "string", + "format": "date-time" + }, + "value": { + "type": "integer", + "format": "int64" + } + }, + "required": [ + "start_time", + "value" + ] + }, + "Cumulativeuint64": { + "description": "A cumulative or counter data type.", + "type": "object", + "properties": { + "start_time": { + "type": "string", + "format": "date-time" + }, + "value": { + "type": "integer", + "format": "uint64", + "minimum": 0 + } + }, + "required": [ + "start_time", + "value" + ] + }, + "CurrentUser": { + "description": "Info about the current user", + "type": "object", + "properties": { + "display_name": { + "description": "Human-readable name that can identify the user", + "type": "string" + }, + "id": { + "type": "string", + "format": "uuid" + }, + "silo_id": { + "description": "Uuid of the silo to which this user belongs", + "type": "string", + "format": "uuid" + }, + "silo_name": { + "description": "Name of the silo to which this user belongs.", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + } + }, + "required": [ + "display_name", + "id", + "silo_id", + "silo_name" + ] + }, + "Datum": { + "description": "A `Datum` is a single sampled data point from a metric.", + "oneOf": [ + { + "type": "object", + "properties": { + "datum": { + "type": "boolean" + }, + "type": { + "type": "string", + "enum": [ + "bool" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "integer", + "format": "int8" + }, + "type": { + "type": "string", + "enum": [ + "i8" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "u8" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "integer", + "format": "int16" + }, + "type": { + "type": "string", + "enum": [ + "i16" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "u16" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "integer", + "format": "int32" + }, + "type": { + "type": "string", + "enum": [ + "i32" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "u32" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "integer", + "format": "int64" + }, + "type": { + "type": "string", + "enum": [ + "i64" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "u64" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "number", + "format": "float" + }, + "type": { + "type": "string", + "enum": [ + "f32" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "number", + "format": "double" + }, + "type": { + "type": "string", + "enum": [ + "f64" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "string" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "array", + "items": { + "type": "integer", + "format": "uint8", + "minimum": 0 + } + }, + "type": { + "type": "string", + "enum": [ + "bytes" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Cumulativeint64" + }, + "type": { + "type": "string", + "enum": [ + "cumulative_i64" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Cumulativeuint64" + }, + "type": { + "type": "string", + "enum": [ + "cumulative_u64" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Cumulativefloat" + }, + "type": { + "type": "string", + "enum": [ + "cumulative_f32" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Cumulativedouble" + }, + "type": { + "type": "string", + "enum": [ + "cumulative_f64" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Histogramint8" + }, + "type": { + "type": "string", + "enum": [ + "histogram_i8" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, { "type": "object", "properties": { "datum": { - "type": "boolean" + "$ref": "#/components/schemas/Histogramuint8" }, "type": { "type": "string", "enum": [ - "bool" + "histogram_u8" ] } }, @@ -11248,13 +12260,12 @@ "type": "object", "properties": { "datum": { - "type": "integer", - "format": "int8" + "$ref": "#/components/schemas/Histogramint16" }, "type": { "type": "string", "enum": [ - "i8" + "histogram_i16" ] } }, @@ -11267,14 +12278,12 @@ "type": "object", "properties": { "datum": { - "type": "integer", - "format": "uint8", - "minimum": 0 + "$ref": "#/components/schemas/Histogramuint16" }, "type": { "type": "string", "enum": [ - "u8" + "histogram_u16" ] } }, @@ -11287,13 +12296,12 @@ "type": "object", "properties": { "datum": { - "type": "integer", - "format": "int16" + "$ref": "#/components/schemas/Histogramint32" }, "type": { "type": "string", "enum": [ - "i16" + "histogram_i32" ] } }, @@ -11306,14 +12314,12 @@ "type": "object", "properties": { "datum": { - "type": "integer", - "format": "uint16", - "minimum": 0 + "$ref": "#/components/schemas/Histogramuint32" }, "type": { "type": "string", "enum": [ - "u16" + "histogram_u32" ] } }, @@ -11326,13 +12332,12 @@ "type": "object", "properties": { "datum": { - "type": "integer", - "format": "int32" + "$ref": "#/components/schemas/Histogramint64" }, "type": { "type": "string", "enum": [ - "i32" + "histogram_i64" ] } }, @@ -11345,14 +12350,12 @@ "type": "object", "properties": { "datum": { - "type": "integer", - "format": "uint32", - "minimum": 0 + "$ref": "#/components/schemas/Histogramuint64" }, "type": { "type": "string", "enum": [ - "u32" + "histogram_u64" ] } }, @@ -11365,13 +12368,12 @@ "type": "object", "properties": { "datum": { - "type": "integer", - "format": "int64" + "$ref": "#/components/schemas/Histogramfloat" }, "type": { "type": "string", "enum": [ - "i64" + "histogram_f32" ] } }, @@ -11384,14 +12386,12 @@ "type": "object", "properties": { "datum": { - "type": "integer", - "format": "uint64", - "minimum": 0 + "$ref": "#/components/schemas/Histogramdouble" }, "type": { "type": "string", "enum": [ - "u64" + "histogram_f64" ] } }, @@ -11404,13 +12404,12 @@ "type": "object", "properties": { "datum": { - "type": "number", - "format": "float" + "$ref": "#/components/schemas/MissingDatum" }, "type": { "type": "string", "enum": [ - "f32" + "missing" ] } }, @@ -11418,578 +12417,870 @@ "datum", "type" ] + } + ] + }, + "DatumType": { + "description": "The type of an individual datum of a metric.", + "type": "string", + "enum": [ + "bool", + "i8", + "u8", + "i16", + "u16", + "i32", + "u32", + "i64", + "u64", + "f32", + "f64", + "string", + "bytes", + "cumulative_i64", + "cumulative_u64", + "cumulative_f32", + "cumulative_f64", + "histogram_i8", + "histogram_u8", + "histogram_i16", + "histogram_u16", + "histogram_i32", + "histogram_u32", + "histogram_i64", + "histogram_u64", + "histogram_f32", + "histogram_f64" + ] + }, + "DerEncodedKeyPair": { + "type": "object", + "properties": { + "private_key": { + "description": "request signing private key (base64 encoded der file)", + "type": "string" + }, + "public_cert": { + "description": "request signing public certificate (base64 encoded der file)", + "type": "string" + } + }, + "required": [ + "private_key", + "public_cert" + ] + }, + "DeviceAccessTokenRequest": { + "type": "object", + "properties": { + "client_id": { + "type": "string", + "format": "uuid" + }, + "device_code": { + "type": "string" + }, + "grant_type": { + "type": "string" + } + }, + "required": [ + "client_id", + "device_code", + "grant_type" + ] + }, + "DeviceAuthRequest": { + "type": "object", + "properties": { + "client_id": { + "type": "string", + "format": "uuid" + } + }, + "required": [ + "client_id" + ] + }, + "DeviceAuthVerify": { + "type": "object", + "properties": { + "user_code": { + "type": "string" + } + }, + "required": [ + "user_code" + ] + }, + "Digest": { + "oneOf": [ + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "sha256" + ] + }, + "value": { + "type": "string" + } + }, + "required": [ + "type", + "value" + ] + } + ] + }, + "Disk": { + "description": "View of a Disk", + "type": "object", + "properties": { + "block_size": { + "$ref": "#/components/schemas/ByteCount" + }, + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "device_path": { + "type": "string" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "image_id": { + "nullable": true, + "description": "ID of image from which disk was created, if any", + "type": "string", + "format": "uuid" + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "project_id": { + "type": "string", + "format": "uuid" + }, + "size": { + "$ref": "#/components/schemas/ByteCount" + }, + "snapshot_id": { + "nullable": true, + "description": "ID of snapshot from which disk was created, if any", + "type": "string", + "format": "uuid" + }, + "state": { + "$ref": "#/components/schemas/DiskState" + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "block_size", + "description", + "device_path", + "id", + "name", + "project_id", + "size", + "state", + "time_created", + "time_modified" + ] + }, + "DiskCreate": { + "description": "Create-time parameters for a `Disk`", + "type": "object", + "properties": { + "description": { + "type": "string" }, - { - "type": "object", - "properties": { - "datum": { - "type": "number", - "format": "double" - }, - "type": { - "type": "string", - "enum": [ - "f64" - ] + "disk_source": { + "description": "initial source for this disk", + "allOf": [ + { + "$ref": "#/components/schemas/DiskSource" } - }, - "required": [ - "datum", - "type" ] }, - { - "type": "object", - "properties": { - "datum": { - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "string" - ] + "name": { + "$ref": "#/components/schemas/Name" + }, + "size": { + "description": "total size of the Disk in bytes", + "allOf": [ + { + "$ref": "#/components/schemas/ByteCount" } - }, - "required": [ - "datum", - "type" ] - }, - { - "type": "object", - "properties": { - "datum": { - "type": "array", - "items": { - "type": "integer", - "format": "uint8", - "minimum": 0 - } - }, - "type": { - "type": "string", - "enum": [ - "bytes" - ] + } + }, + "required": [ + "description", + "disk_source", + "name", + "size" + ] + }, + "DiskPath": { + "type": "object", + "properties": { + "disk": { + "description": "Name or ID of the disk", + "allOf": [ + { + "$ref": "#/components/schemas/NameOrId" } - }, - "required": [ - "datum", - "type" ] + } + }, + "required": [ + "disk" + ] + }, + "DiskResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/Disk" + } }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "DiskSource": { + "description": "Different sources for a disk", + "oneOf": [ { + "description": "Create a blank disk", "type": "object", "properties": { - "datum": { - "$ref": "#/components/schemas/Cumulativeint64" + "block_size": { + "description": "size of blocks for this Disk. valid values are: 512, 2048, or 4096", + "allOf": [ + { + "$ref": "#/components/schemas/BlockSize" + } + ] }, "type": { "type": "string", "enum": [ - "cumulative_i64" + "blank" ] } }, "required": [ - "datum", + "block_size", "type" ] }, { + "description": "Create a disk from a disk snapshot", "type": "object", "properties": { - "datum": { - "$ref": "#/components/schemas/Cumulativeuint64" + "snapshot_id": { + "type": "string", + "format": "uuid" }, "type": { "type": "string", "enum": [ - "cumulative_u64" + "snapshot" ] } }, "required": [ - "datum", + "snapshot_id", "type" ] }, { + "description": "Create a disk from an image", "type": "object", "properties": { - "datum": { - "$ref": "#/components/schemas/Cumulativefloat" + "image_id": { + "type": "string", + "format": "uuid" }, "type": { "type": "string", "enum": [ - "cumulative_f32" + "image" ] } }, "required": [ - "datum", + "image_id", "type" ] }, { + "description": "Create a blank disk that will accept bulk writes or pull blocks from an external source.", "type": "object", "properties": { - "datum": { - "$ref": "#/components/schemas/Cumulativedouble" + "block_size": { + "$ref": "#/components/schemas/BlockSize" }, "type": { "type": "string", "enum": [ - "cumulative_f64" + "importing_blocks" ] } }, "required": [ - "datum", + "block_size", "type" ] - }, + } + ] + }, + "DiskState": { + "description": "State of a Disk", + "oneOf": [ { + "description": "Disk is being initialized", "type": "object", "properties": { - "datum": { - "$ref": "#/components/schemas/Histogramint8" - }, - "type": { + "state": { "type": "string", "enum": [ - "histogram_i8" + "creating" ] } }, "required": [ - "datum", - "type" + "state" ] }, { + "description": "Disk is ready but detached from any Instance", "type": "object", "properties": { - "datum": { - "$ref": "#/components/schemas/Histogramuint8" - }, - "type": { + "state": { "type": "string", "enum": [ - "histogram_u8" + "detached" ] } }, "required": [ - "datum", - "type" + "state" ] }, { + "description": "Disk is ready to receive blocks from an external source", "type": "object", "properties": { - "datum": { - "$ref": "#/components/schemas/Histogramint16" - }, - "type": { + "state": { "type": "string", "enum": [ - "histogram_i16" + "import_ready" ] } }, "required": [ - "datum", - "type" + "state" ] }, { + "description": "Disk is importing blocks from a URL", "type": "object", "properties": { - "datum": { - "$ref": "#/components/schemas/Histogramuint16" - }, - "type": { + "state": { "type": "string", "enum": [ - "histogram_u16" + "importing_from_url" ] } }, "required": [ - "datum", - "type" + "state" ] }, { + "description": "Disk is importing blocks from bulk writes", "type": "object", "properties": { - "datum": { - "$ref": "#/components/schemas/Histogramint32" - }, - "type": { + "state": { "type": "string", "enum": [ - "histogram_i32" + "importing_from_bulk_writes" ] } }, "required": [ - "datum", - "type" + "state" ] }, { + "description": "Disk is being finalized to state Detached", "type": "object", "properties": { - "datum": { - "$ref": "#/components/schemas/Histogramuint32" - }, - "type": { + "state": { "type": "string", "enum": [ - "histogram_u32" + "finalizing" ] } }, "required": [ - "datum", - "type" + "state" ] }, { + "description": "Disk is undergoing maintenance", "type": "object", "properties": { - "datum": { - "$ref": "#/components/schemas/Histogramint64" - }, - "type": { + "state": { "type": "string", "enum": [ - "histogram_i64" + "maintenance" ] } }, "required": [ - "datum", - "type" + "state" ] }, { + "description": "Disk is being attached to the given Instance", "type": "object", "properties": { - "datum": { - "$ref": "#/components/schemas/Histogramuint64" + "instance": { + "type": "string", + "format": "uuid" }, - "type": { + "state": { "type": "string", "enum": [ - "histogram_u64" + "attaching" ] } }, - "required": [ - "datum", - "type" + "required": [ + "instance", + "state" ] }, { + "description": "Disk is attached to the given Instance", "type": "object", "properties": { - "datum": { - "$ref": "#/components/schemas/Histogramfloat" + "instance": { + "type": "string", + "format": "uuid" }, - "type": { + "state": { "type": "string", "enum": [ - "histogram_f32" + "attached" ] } }, "required": [ - "datum", - "type" + "instance", + "state" ] }, { + "description": "Disk is being detached from the given Instance", "type": "object", "properties": { - "datum": { - "$ref": "#/components/schemas/Histogramdouble" + "instance": { + "type": "string", + "format": "uuid" }, - "type": { + "state": { "type": "string", "enum": [ - "histogram_f64" + "detaching" ] } }, "required": [ - "datum", - "type" + "instance", + "state" ] }, { + "description": "Disk has been destroyed", "type": "object", "properties": { - "datum": { - "$ref": "#/components/schemas/MissingDatum" - }, - "type": { + "state": { "type": "string", "enum": [ - "missing" + "destroyed" ] } }, "required": [ - "datum", - "type" + "state" ] - } - ] - }, - "DatumType": { - "description": "The type of an individual datum of a metric.", - "type": "string", - "enum": [ - "bool", - "i8", - "u8", - "i16", - "u16", - "i32", - "u32", - "i64", - "u64", - "f32", - "f64", - "string", - "bytes", - "cumulative_i64", - "cumulative_u64", - "cumulative_f32", - "cumulative_f64", - "histogram_i8", - "histogram_u8", - "histogram_i16", - "histogram_u16", - "histogram_i32", - "histogram_u32", - "histogram_i64", - "histogram_u64", - "histogram_f32", - "histogram_f64" - ] - }, - "DerEncodedKeyPair": { - "type": "object", - "properties": { - "private_key": { - "description": "request signing private key (base64 encoded der file)", - "type": "string" - }, - "public_cert": { - "description": "request signing public certificate (base64 encoded der file)", - "type": "string" - } - }, - "required": [ - "private_key", - "public_cert" - ] - }, - "DeviceAccessTokenRequest": { - "type": "object", - "properties": { - "client_id": { - "type": "string", - "format": "uuid" - }, - "device_code": { - "type": "string" }, - "grant_type": { - "type": "string" - } - }, - "required": [ - "client_id", - "device_code", - "grant_type" - ] - }, - "DeviceAuthRequest": { - "type": "object", - "properties": { - "client_id": { - "type": "string", - "format": "uuid" - } - }, - "required": [ - "client_id" - ] - }, - "DeviceAuthVerify": { - "type": "object", - "properties": { - "user_code": { - "type": "string" - } - }, - "required": [ - "user_code" - ] - }, - "Digest": { - "oneOf": [ { + "description": "Disk is unavailable", "type": "object", "properties": { - "type": { + "state": { "type": "string", "enum": [ - "sha256" + "faulted" ] - }, - "value": { - "type": "string" } }, "required": [ - "type", - "value" + "state" ] } ] }, - "Disk": { - "description": "View of a Disk", + "Distributiondouble": { + "description": "A distribution is a sequence of bins and counts in those bins, and some statistical information tracked to compute the mean, standard deviation, and quantile estimates.\n\nMin, max, and the p-* quantiles are treated as optional due to the possibility of distribution operations, like subtraction.", "type": "object", "properties": { - "block_size": { - "$ref": "#/components/schemas/ByteCount" - }, - "description": { - "description": "human-readable free-form text about a resource", - "type": "string" + "bins": { + "type": "array", + "items": { + "type": "number", + "format": "double" + } }, - "device_path": { - "type": "string" + "counts": { + "type": "array", + "items": { + "type": "integer", + "format": "uint64", + "minimum": 0 + } }, - "id": { - "description": "unique, immutable, system-controlled identifier for each resource", - "type": "string", - "format": "uuid" + "max": { + "nullable": true, + "type": "number", + "format": "double" }, - "image_id": { + "min": { "nullable": true, - "description": "ID of image from which disk was created, if any", - "type": "string", - "format": "uuid" + "type": "number", + "format": "double" }, - "name": { - "description": "unique, mutable, user-controlled identifier for each resource", + "p50": { + "nullable": true, "allOf": [ { - "$ref": "#/components/schemas/Name" + "$ref": "#/components/schemas/Quantile" } ] }, - "project_id": { - "type": "string", - "format": "uuid" - }, - "size": { - "$ref": "#/components/schemas/ByteCount" - }, - "snapshot_id": { + "p90": { "nullable": true, - "description": "ID of snapshot from which disk was created, if any", - "type": "string", - "format": "uuid" + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] }, - "state": { - "$ref": "#/components/schemas/DiskState" + "p99": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] }, - "time_created": { - "description": "timestamp when this resource was created", - "type": "string", - "format": "date-time" + "squared_mean": { + "type": "number", + "format": "double" }, - "time_modified": { - "description": "timestamp when this resource was last modified", - "type": "string", - "format": "date-time" + "sum_of_samples": { + "type": "number", + "format": "double" } }, "required": [ - "block_size", - "description", - "device_path", - "id", - "name", - "project_id", - "size", - "state", - "time_created", - "time_modified" + "bins", + "counts", + "squared_mean", + "sum_of_samples" ] }, - "DiskCreate": { - "description": "Create-time parameters for a `Disk`", + "Distributionint64": { + "description": "A distribution is a sequence of bins and counts in those bins, and some statistical information tracked to compute the mean, standard deviation, and quantile estimates.\n\nMin, max, and the p-* quantiles are treated as optional due to the possibility of distribution operations, like subtraction.", "type": "object", "properties": { - "description": { - "type": "string" + "bins": { + "type": "array", + "items": { + "type": "integer", + "format": "int64" + } + }, + "counts": { + "type": "array", + "items": { + "type": "integer", + "format": "uint64", + "minimum": 0 + } + }, + "max": { + "nullable": true, + "type": "integer", + "format": "int64" }, - "disk_source": { - "description": "initial source for this disk", + "min": { + "nullable": true, + "type": "integer", + "format": "int64" + }, + "p50": { + "nullable": true, "allOf": [ { - "$ref": "#/components/schemas/DiskSource" + "$ref": "#/components/schemas/Quantile" } ] }, - "name": { - "$ref": "#/components/schemas/Name" + "p90": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] }, - "size": { - "description": "total size of the Disk in bytes", + "p99": { + "nullable": true, "allOf": [ { - "$ref": "#/components/schemas/ByteCount" + "$ref": "#/components/schemas/Quantile" } ] + }, + "squared_mean": { + "type": "number", + "format": "double" + }, + "sum_of_samples": { + "type": "integer", + "format": "int64" } }, "required": [ - "description", - "disk_source", - "name", - "size" + "bins", + "counts", + "squared_mean", + "sum_of_samples" ] }, - "DiskPath": { + "EphemeralIpCreate": { + "description": "Parameters for creating an ephemeral IP address for an instance.", "type": "object", "properties": { - "disk": { - "description": "Name or ID of the disk", + "pool": { + "nullable": true, + "description": "Name or ID of the IP pool used to allocate an address", "allOf": [ { "$ref": "#/components/schemas/NameOrId" } ] } + } + }, + "Error": { + "description": "Error information from a response.", + "type": "object", + "properties": { + "error_code": { + "type": "string" + }, + "message": { + "type": "string" + }, + "request_id": { + "type": "string" + } }, "required": [ - "disk" + "message", + "request_id" ] }, - "DiskResultsPage": { + "ExternalIp": { + "oneOf": [ + { + "type": "object", + "properties": { + "ip": { + "type": "string", + "format": "ip" + }, + "kind": { + "type": "string", + "enum": [ + "ephemeral" + ] + } + }, + "required": [ + "ip", + "kind" + ] + }, + { + "description": "A Floating IP is a well-known IP address which can be attached and detached from instances.", + "type": "object", + "properties": { + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "instance_id": { + "nullable": true, + "description": "The ID of the instance that this Floating IP is attached to, if it is presently in use.", + "type": "string", + "format": "uuid" + }, + "ip": { + "description": "The IP address held by this resource.", + "type": "string", + "format": "ip" + }, + "ip_pool_id": { + "description": "The ID of the IP pool this resource belongs to.", + "type": "string", + "format": "uuid" + }, + "kind": { + "type": "string", + "enum": [ + "floating" + ] + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "project_id": { + "description": "The project this resource exists within.", + "type": "string", + "format": "uuid" + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "description", + "id", + "ip", + "ip_pool_id", + "kind", + "name", + "project_id", + "time_created", + "time_modified" + ] + } + ] + }, + "ExternalIpCreate": { + "description": "Parameters for creating an external IP address for instances.", + "oneOf": [ + { + "description": "An IP address providing both inbound and outbound access. The address is automatically-assigned from the provided IP Pool, or the current silo's default pool if not specified.", + "type": "object", + "properties": { + "pool": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/NameOrId" + } + ] + }, + "type": { + "type": "string", + "enum": [ + "ephemeral" + ] + } + }, + "required": [ + "type" + ] + }, + { + "description": "An IP address providing both inbound and outbound access. The address is an existing floating IP object assigned to the current project.\n\nThe floating IP must not be in use by another instance or service.", + "type": "object", + "properties": { + "floating_ip": { + "$ref": "#/components/schemas/NameOrId" + }, + "type": { + "type": "string", + "enum": [ + "floating" + ] + } + }, + "required": [ + "floating_ip", + "type" + ] + } + ] + }, + "ExternalIpResultsPage": { "description": "A single page of results", "type": "object", "properties": { @@ -11997,7 +13288,7 @@ "description": "list of items on this page of results", "type": "array", "items": { - "$ref": "#/components/schemas/Disk" + "$ref": "#/components/schemas/ExternalIp" } }, "next_page": { @@ -12010,520 +13301,480 @@ "items" ] }, - "DiskSource": { - "description": "Different sources for a disk", + "FieldSchema": { + "description": "The name and type information for a field of a timeseries schema.", + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "field_type": { + "$ref": "#/components/schemas/FieldType" + }, + "name": { + "type": "string" + }, + "source": { + "$ref": "#/components/schemas/FieldSource" + } + }, + "required": [ + "description", + "field_type", + "name", + "source" + ] + }, + "FieldSource": { + "description": "The source from which a field is derived, the target or metric.", + "type": "string", + "enum": [ + "target", + "metric" + ] + }, + "FieldType": { + "description": "The `FieldType` identifies the data type of a target or metric field.", + "type": "string", + "enum": [ + "string", + "i8", + "u8", + "i16", + "u16", + "i32", + "u32", + "i64", + "u64", + "ip_addr", + "uuid", + "bool" + ] + }, + "FieldValue": { + "description": "The `FieldValue` contains the value of a target or metric field.", "oneOf": [ { - "description": "Create a blank disk", "type": "object", "properties": { - "block_size": { - "description": "size of blocks for this Disk. valid values are: 512, 2048, or 4096", - "allOf": [ - { - "$ref": "#/components/schemas/BlockSize" - } - ] - }, "type": { "type": "string", "enum": [ - "blank" + "string" ] + }, + "value": { + "type": "string" } }, "required": [ - "block_size", - "type" + "type", + "value" ] }, { - "description": "Create a disk from a disk snapshot", "type": "object", "properties": { - "snapshot_id": { - "type": "string", - "format": "uuid" - }, "type": { "type": "string", "enum": [ - "snapshot" + "i8" ] + }, + "value": { + "type": "integer", + "format": "int8" } }, "required": [ - "snapshot_id", - "type" + "type", + "value" ] }, { - "description": "Create a disk from an image", "type": "object", "properties": { - "image_id": { - "type": "string", - "format": "uuid" - }, "type": { "type": "string", "enum": [ - "image" + "u8" ] + }, + "value": { + "type": "integer", + "format": "uint8", + "minimum": 0 } }, "required": [ - "image_id", - "type" + "type", + "value" ] }, { - "description": "Create a blank disk that will accept bulk writes or pull blocks from an external source.", "type": "object", "properties": { - "block_size": { - "$ref": "#/components/schemas/BlockSize" - }, "type": { "type": "string", "enum": [ - "importing_blocks" - ] - } - }, - "required": [ - "block_size", - "type" - ] - } - ] - }, - "DiskState": { - "description": "State of a Disk", - "oneOf": [ - { - "description": "Disk is being initialized", - "type": "object", - "properties": { - "state": { - "type": "string", - "enum": [ - "creating" + "i16" ] + }, + "value": { + "type": "integer", + "format": "int16" } }, "required": [ - "state" + "type", + "value" ] }, { - "description": "Disk is ready but detached from any Instance", "type": "object", "properties": { - "state": { + "type": { "type": "string", "enum": [ - "detached" + "u16" ] + }, + "value": { + "type": "integer", + "format": "uint16", + "minimum": 0 } }, "required": [ - "state" + "type", + "value" ] }, { - "description": "Disk is ready to receive blocks from an external source", "type": "object", "properties": { - "state": { + "type": { "type": "string", "enum": [ - "import_ready" + "i32" ] + }, + "value": { + "type": "integer", + "format": "int32" } }, "required": [ - "state" + "type", + "value" ] }, { - "description": "Disk is importing blocks from a URL", "type": "object", "properties": { - "state": { + "type": { "type": "string", "enum": [ - "importing_from_url" + "u32" ] + }, + "value": { + "type": "integer", + "format": "uint32", + "minimum": 0 } }, "required": [ - "state" + "type", + "value" ] }, { - "description": "Disk is importing blocks from bulk writes", "type": "object", "properties": { - "state": { + "type": { "type": "string", "enum": [ - "importing_from_bulk_writes" + "i64" ] + }, + "value": { + "type": "integer", + "format": "int64" } }, "required": [ - "state" + "type", + "value" ] }, { - "description": "Disk is being finalized to state Detached", "type": "object", "properties": { - "state": { + "type": { "type": "string", "enum": [ - "finalizing" + "u64" ] + }, + "value": { + "type": "integer", + "format": "uint64", + "minimum": 0 } }, "required": [ - "state" + "type", + "value" ] }, { - "description": "Disk is undergoing maintenance", "type": "object", "properties": { - "state": { + "type": { "type": "string", "enum": [ - "maintenance" + "ip_addr" ] - } - }, - "required": [ - "state" - ] - }, - { - "description": "Disk is being attached to the given Instance", - "type": "object", - "properties": { - "instance": { - "type": "string", - "format": "uuid" }, - "state": { + "value": { "type": "string", - "enum": [ - "attaching" - ] + "format": "ip" } }, "required": [ - "instance", - "state" + "type", + "value" ] }, { - "description": "Disk is attached to the given Instance", "type": "object", "properties": { - "instance": { - "type": "string", - "format": "uuid" - }, - "state": { + "type": { "type": "string", "enum": [ - "attached" + "uuid" ] - } - }, - "required": [ - "instance", - "state" - ] - }, - { - "description": "Disk is being detached from the given Instance", - "type": "object", - "properties": { - "instance": { - "type": "string", - "format": "uuid" }, - "state": { + "value": { "type": "string", - "enum": [ - "detaching" - ] + "format": "uuid" } }, "required": [ - "instance", - "state" + "type", + "value" ] }, { - "description": "Disk has been destroyed", "type": "object", "properties": { - "state": { + "type": { "type": "string", "enum": [ - "destroyed" + "bool" ] + }, + "value": { + "type": "boolean" } }, "required": [ - "state" + "type", + "value" ] - }, - { - "description": "Disk is unavailable", - "type": "object", - "properties": { - "state": { - "type": "string", - "enum": [ - "faulted" - ] + } + ] + }, + "FinalizeDisk": { + "description": "Parameters for finalizing a disk", + "type": "object", + "properties": { + "snapshot_name": { + "nullable": true, + "description": "If specified a snapshot of the disk will be created with the given name during finalization. If not specified, a snapshot for the disk will _not_ be created. A snapshot can be manually created once the disk transitions into the `Detached` state.", + "allOf": [ + { + "$ref": "#/components/schemas/Name" } - }, - "required": [ - "state" ] } + } + }, + "FleetRole": { + "type": "string", + "enum": [ + "admin", + "collaborator", + "viewer" ] }, - "Distributiondouble": { - "description": "A distribution is a sequence of bins and counts in those bins.", + "FleetRolePolicy": { + "description": "Policy for a particular resource\n\nNote that the Policy only describes access granted explicitly for this resource. The policies of parent resources can also cause a user to have access to this resource.", "type": "object", "properties": { - "bins": { - "type": "array", - "items": { - "type": "number", - "format": "double" - } - }, - "counts": { + "role_assignments": { + "description": "Roles directly assigned on this resource", "type": "array", "items": { - "type": "integer", - "format": "uint64", - "minimum": 0 + "$ref": "#/components/schemas/FleetRoleRoleAssignment" } } }, "required": [ - "bins", - "counts" + "role_assignments" ] }, - "Distributionint64": { - "description": "A distribution is a sequence of bins and counts in those bins.", + "FleetRoleRoleAssignment": { + "description": "Describes the assignment of a particular role on a particular resource to a particular identity (user, group, etc.)\n\nThe resource is not part of this structure. Rather, `RoleAssignment`s are put into a `Policy` and that Policy is applied to a particular resource.", "type": "object", "properties": { - "bins": { - "type": "array", - "items": { - "type": "integer", - "format": "int64" - } + "identity_id": { + "type": "string", + "format": "uuid" }, - "counts": { - "type": "array", - "items": { - "type": "integer", - "format": "uint64", - "minimum": 0 - } + "identity_type": { + "$ref": "#/components/schemas/IdentityType" + }, + "role_name": { + "$ref": "#/components/schemas/FleetRole" } }, "required": [ - "bins", - "counts" + "identity_id", + "identity_type", + "role_name" ] }, - "EphemeralIpCreate": { - "description": "Parameters for creating an ephemeral IP address for an instance.", + "FloatingIp": { + "description": "A Floating IP is a well-known IP address which can be attached and detached from instances.", "type": "object", "properties": { - "pool": { + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "instance_id": { "nullable": true, - "description": "Name or ID of the IP pool used to allocate an address", + "description": "The ID of the instance that this Floating IP is attached to, if it is presently in use.", + "type": "string", + "format": "uuid" + }, + "ip": { + "description": "The IP address held by this resource.", + "type": "string", + "format": "ip" + }, + "ip_pool_id": { + "description": "The ID of the IP pool this resource belongs to.", + "type": "string", + "format": "uuid" + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", "allOf": [ { - "$ref": "#/components/schemas/NameOrId" + "$ref": "#/components/schemas/Name" } ] - } - } - }, - "Error": { - "description": "Error information from a response.", - "type": "object", - "properties": { - "error_code": { - "type": "string" }, - "message": { - "type": "string" + "project_id": { + "description": "The project this resource exists within.", + "type": "string", + "format": "uuid" }, - "request_id": { - "type": "string" + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" } }, "required": [ - "message", - "request_id" + "description", + "id", + "ip", + "ip_pool_id", + "name", + "project_id", + "time_created", + "time_modified" ] }, - "ExternalIp": { - "oneOf": [ - { - "type": "object", - "properties": { - "ip": { - "type": "string", - "format": "ip" - }, - "kind": { - "type": "string", - "enum": [ - "ephemeral" - ] + "FloatingIpAttach": { + "description": "Parameters for attaching a floating IP address to another resource", + "type": "object", + "properties": { + "kind": { + "description": "The type of `parent`'s resource", + "allOf": [ + { + "$ref": "#/components/schemas/FloatingIpParentKind" } - }, - "required": [ - "ip", - "kind" ] }, - { - "description": "A Floating IP is a well-known IP address which can be attached and detached from instances.", - "type": "object", - "properties": { - "description": { - "description": "human-readable free-form text about a resource", - "type": "string" - }, - "id": { - "description": "unique, immutable, system-controlled identifier for each resource", - "type": "string", - "format": "uuid" - }, - "instance_id": { - "nullable": true, - "description": "The ID of the instance that this Floating IP is attached to, if it is presently in use.", - "type": "string", - "format": "uuid" - }, - "ip": { - "description": "The IP address held by this resource.", - "type": "string", - "format": "ip" - }, - "ip_pool_id": { - "description": "The ID of the IP pool this resource belongs to.", - "type": "string", - "format": "uuid" - }, - "kind": { - "type": "string", - "enum": [ - "floating" - ] - }, - "name": { - "description": "unique, mutable, user-controlled identifier for each resource", - "allOf": [ - { - "$ref": "#/components/schemas/Name" - } - ] - }, - "project_id": { - "description": "The project this resource exists within.", - "type": "string", - "format": "uuid" - }, - "time_created": { - "description": "timestamp when this resource was created", - "type": "string", - "format": "date-time" - }, - "time_modified": { - "description": "timestamp when this resource was last modified", - "type": "string", - "format": "date-time" + "parent": { + "description": "Name or ID of the resource that this IP address should be attached to", + "allOf": [ + { + "$ref": "#/components/schemas/NameOrId" } - }, - "required": [ - "description", - "id", - "ip", - "ip_pool_id", - "kind", - "name", - "project_id", - "time_created", - "time_modified" ] } + }, + "required": [ + "kind", + "parent" ] }, - "ExternalIpCreate": { - "description": "Parameters for creating an external IP address for instances.", - "oneOf": [ - { - "description": "An IP address providing both inbound and outbound access. The address is automatically-assigned from the provided IP Pool, or the current silo's default pool if not specified.", - "type": "object", - "properties": { - "pool": { - "nullable": true, - "allOf": [ - { - "$ref": "#/components/schemas/NameOrId" - } - ] - }, - "type": { - "type": "string", - "enum": [ - "ephemeral" - ] - } - }, - "required": [ - "type" - ] + "FloatingIpCreate": { + "description": "Parameters for creating a new floating IP address for instances.", + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "ip": { + "nullable": true, + "description": "An IP address to reserve for use as a floating IP. This field is optional: when not set, an address will be automatically chosen from `pool`. If set, then the IP must be available in the resolved `pool`.", + "type": "string", + "format": "ip" }, - { - "description": "An IP address providing both inbound and outbound access. The address is an existing floating IP object assigned to the current project.\n\nThe floating IP must not be in use by another instance or service.", - "type": "object", - "properties": { - "floating_ip": { + "name": { + "$ref": "#/components/schemas/Name" + }, + "pool": { + "nullable": true, + "description": "The parent IP pool that a floating IP is pulled from. If unset, the default pool is selected.", + "allOf": [ + { "$ref": "#/components/schemas/NameOrId" - }, - "type": { - "type": "string", - "enum": [ - "floating" - ] } - }, - "required": [ - "floating_ip", - "type" ] } + }, + "required": [ + "description", + "name" ] }, - "ExternalIpResultsPage": { + "FloatingIpParentKind": { + "description": "The type of resource that a floating IP is attached to", + "type": "string", + "enum": [ + "instance" + ] + }, + "FloatingIpResultsPage": { "description": "A single page of results", "type": "object", "properties": { @@ -12531,7 +13782,7 @@ "description": "list of items on this page of results", "type": "array", "items": { - "$ref": "#/components/schemas/ExternalIp" + "$ref": "#/components/schemas/FloatingIp" } }, "next_page": { @@ -12544,539 +13795,935 @@ "items" ] }, - "FieldSchema": { - "description": "The name and type information for a field of a timeseries schema.", + "FloatingIpUpdate": { + "description": "Updateable identity-related parameters", "type": "object", "properties": { - "field_type": { - "$ref": "#/components/schemas/FieldType" + "description": { + "nullable": true, + "type": "string" }, "name": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + } + } + }, + "Group": { + "description": "View of a Group", + "type": "object", + "properties": { + "display_name": { + "description": "Human-readable name that can identify the group", "type": "string" }, - "source": { - "$ref": "#/components/schemas/FieldSource" + "id": { + "type": "string", + "format": "uuid" + }, + "silo_id": { + "description": "Uuid of the silo to which this group belongs", + "type": "string", + "format": "uuid" } }, "required": [ - "field_type", - "name", - "source" - ] - }, - "FieldSource": { - "description": "The source from which a field is derived, the target or metric.", - "type": "string", - "enum": [ - "target", - "metric" + "display_name", + "id", + "silo_id" ] }, - "FieldType": { - "description": "The `FieldType` identifies the data type of a target or metric field.", - "type": "string", - "enum": [ - "string", - "i8", - "u8", - "i16", - "u16", - "i32", - "u32", - "i64", - "u64", - "ip_addr", - "uuid", - "bool" + "GroupResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/Group" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" ] }, - "FieldValue": { - "description": "The `FieldValue` contains the value of a target or metric field.", - "oneOf": [ - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "string" - ] - }, - "value": { - "type": "string" + "Histogramdouble": { + "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "type": "object", + "properties": { + "bins": { + "description": "The bins of the histogram.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Bindouble" + } + }, + "max": { + "description": "The maximum value of all samples in the histogram.", + "type": "number", + "format": "double" + }, + "min": { + "description": "The minimum value of all samples in the histogram.", + "type": "number", + "format": "double" + }, + "n_samples": { + "description": "The total number of samples in the histogram.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "p50": { + "description": "p50 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" } - }, - "required": [ - "type", - "value" ] }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "i8" - ] - }, - "value": { - "type": "integer", - "format": "int8" + "p90": { + "description": "p95 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" } - }, - "required": [ - "type", - "value" ] }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "u8" - ] - }, - "value": { - "type": "integer", - "format": "uint8", - "minimum": 0 + "p99": { + "description": "p99 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" } - }, - "required": [ - "type", - "value" ] }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "i16" - ] - }, - "value": { - "type": "integer", - "format": "int16" + "squared_mean": { + "description": "M2 for Welford's algorithm for variance calculation.\n\nRead about [Welford's algorithm](https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm) for more information on the algorithm.", + "type": "number", + "format": "double" + }, + "start_time": { + "description": "The start time of the histogram.", + "type": "string", + "format": "date-time" + }, + "sum_of_samples": { + "description": "The sum of all samples in the histogram.", + "type": "number", + "format": "double" + } + }, + "required": [ + "bins", + "max", + "min", + "n_samples", + "p50", + "p90", + "p99", + "squared_mean", + "start_time", + "sum_of_samples" + ] + }, + "Histogramfloat": { + "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "type": "object", + "properties": { + "bins": { + "description": "The bins of the histogram.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Binfloat" + } + }, + "max": { + "description": "The maximum value of all samples in the histogram.", + "type": "number", + "format": "float" + }, + "min": { + "description": "The minimum value of all samples in the histogram.", + "type": "number", + "format": "float" + }, + "n_samples": { + "description": "The total number of samples in the histogram.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "p50": { + "description": "p50 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "p90": { + "description": "p95 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" } - }, - "required": [ - "type", - "value" ] }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "u16" - ] - }, - "value": { - "type": "integer", - "format": "uint16", - "minimum": 0 + "p99": { + "description": "p99 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" } - }, - "required": [ - "type", - "value" ] }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "i32" - ] - }, - "value": { - "type": "integer", - "format": "int32" + "squared_mean": { + "description": "M2 for Welford's algorithm for variance calculation.\n\nRead about [Welford's algorithm](https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm) for more information on the algorithm.", + "type": "number", + "format": "double" + }, + "start_time": { + "description": "The start time of the histogram.", + "type": "string", + "format": "date-time" + }, + "sum_of_samples": { + "description": "The sum of all samples in the histogram.", + "type": "number", + "format": "double" + } + }, + "required": [ + "bins", + "max", + "min", + "n_samples", + "p50", + "p90", + "p99", + "squared_mean", + "start_time", + "sum_of_samples" + ] + }, + "Histogramint16": { + "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "type": "object", + "properties": { + "bins": { + "description": "The bins of the histogram.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Binint16" + } + }, + "max": { + "description": "The maximum value of all samples in the histogram.", + "type": "integer", + "format": "int16" + }, + "min": { + "description": "The minimum value of all samples in the histogram.", + "type": "integer", + "format": "int16" + }, + "n_samples": { + "description": "The total number of samples in the histogram.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "p50": { + "description": "p50 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" } - }, - "required": [ - "type", - "value" ] }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "u32" - ] - }, - "value": { - "type": "integer", - "format": "uint32", - "minimum": 0 + "p90": { + "description": "p95 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" } - }, - "required": [ - "type", - "value" ] }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "i64" - ] - }, - "value": { - "type": "integer", - "format": "int64" + "p99": { + "description": "p99 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" } - }, - "required": [ - "type", - "value" ] }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "u64" - ] - }, - "value": { - "type": "integer", - "format": "uint64", - "minimum": 0 + "squared_mean": { + "description": "M2 for Welford's algorithm for variance calculation.\n\nRead about [Welford's algorithm](https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm) for more information on the algorithm.", + "type": "number", + "format": "double" + }, + "start_time": { + "description": "The start time of the histogram.", + "type": "string", + "format": "date-time" + }, + "sum_of_samples": { + "description": "The sum of all samples in the histogram.", + "type": "integer", + "format": "int64" + } + }, + "required": [ + "bins", + "max", + "min", + "n_samples", + "p50", + "p90", + "p99", + "squared_mean", + "start_time", + "sum_of_samples" + ] + }, + "Histogramint32": { + "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "type": "object", + "properties": { + "bins": { + "description": "The bins of the histogram.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Binint32" + } + }, + "max": { + "description": "The maximum value of all samples in the histogram.", + "type": "integer", + "format": "int32" + }, + "min": { + "description": "The minimum value of all samples in the histogram.", + "type": "integer", + "format": "int32" + }, + "n_samples": { + "description": "The total number of samples in the histogram.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "p50": { + "description": "p50 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" } - }, - "required": [ - "type", - "value" ] }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "ip_addr" - ] - }, - "value": { - "type": "string", - "format": "ip" + "p90": { + "description": "p95 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" } - }, - "required": [ - "type", - "value" ] }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "uuid" - ] - }, - "value": { - "type": "string", - "format": "uuid" + "p99": { + "description": "p99 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" } - }, - "required": [ - "type", - "value" ] }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "bool" - ] - }, - "value": { - "type": "boolean" + "squared_mean": { + "description": "M2 for Welford's algorithm for variance calculation.\n\nRead about [Welford's algorithm](https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm) for more information on the algorithm.", + "type": "number", + "format": "double" + }, + "start_time": { + "description": "The start time of the histogram.", + "type": "string", + "format": "date-time" + }, + "sum_of_samples": { + "description": "The sum of all samples in the histogram.", + "type": "integer", + "format": "int64" + } + }, + "required": [ + "bins", + "max", + "min", + "n_samples", + "p50", + "p90", + "p99", + "squared_mean", + "start_time", + "sum_of_samples" + ] + }, + "Histogramint64": { + "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "type": "object", + "properties": { + "bins": { + "description": "The bins of the histogram.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Binint64" + } + }, + "max": { + "description": "The maximum value of all samples in the histogram.", + "type": "integer", + "format": "int64" + }, + "min": { + "description": "The minimum value of all samples in the histogram.", + "type": "integer", + "format": "int64" + }, + "n_samples": { + "description": "The total number of samples in the histogram.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "p50": { + "description": "p50 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" } - }, - "required": [ - "type", - "value" ] - } - ] - }, - "FinalizeDisk": { - "description": "Parameters for finalizing a disk", - "type": "object", - "properties": { - "snapshot_name": { - "nullable": true, - "description": "If specified a snapshot of the disk will be created with the given name during finalization. If not specified, a snapshot for the disk will _not_ be created. A snapshot can be manually created once the disk transitions into the `Detached` state.", + }, + "p90": { + "description": "p95 Quantile", "allOf": [ { - "$ref": "#/components/schemas/Name" + "$ref": "#/components/schemas/Quantile" } ] + }, + "p99": { + "description": "p99 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "squared_mean": { + "description": "M2 for Welford's algorithm for variance calculation.\n\nRead about [Welford's algorithm](https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm) for more information on the algorithm.", + "type": "number", + "format": "double" + }, + "start_time": { + "description": "The start time of the histogram.", + "type": "string", + "format": "date-time" + }, + "sum_of_samples": { + "description": "The sum of all samples in the histogram.", + "type": "integer", + "format": "int64" } - } - }, - "FleetRole": { - "type": "string", - "enum": [ - "admin", - "collaborator", - "viewer" + }, + "required": [ + "bins", + "max", + "min", + "n_samples", + "p50", + "p90", + "p99", + "squared_mean", + "start_time", + "sum_of_samples" ] }, - "FleetRolePolicy": { - "description": "Policy for a particular resource\n\nNote that the Policy only describes access granted explicitly for this resource. The policies of parent resources can also cause a user to have access to this resource.", + "Histogramint8": { + "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", "type": "object", "properties": { - "role_assignments": { - "description": "Roles directly assigned on this resource", + "bins": { + "description": "The bins of the histogram.", "type": "array", "items": { - "$ref": "#/components/schemas/FleetRoleRoleAssignment" + "$ref": "#/components/schemas/Binint8" } + }, + "max": { + "description": "The maximum value of all samples in the histogram.", + "type": "integer", + "format": "int8" + }, + "min": { + "description": "The minimum value of all samples in the histogram.", + "type": "integer", + "format": "int8" + }, + "n_samples": { + "description": "The total number of samples in the histogram.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "p50": { + "description": "p50 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "p90": { + "description": "p95 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "p99": { + "description": "p99 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "squared_mean": { + "description": "M2 for Welford's algorithm for variance calculation.\n\nRead about [Welford's algorithm](https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm) for more information on the algorithm.", + "type": "number", + "format": "double" + }, + "start_time": { + "description": "The start time of the histogram.", + "type": "string", + "format": "date-time" + }, + "sum_of_samples": { + "description": "The sum of all samples in the histogram.", + "type": "integer", + "format": "int64" } }, "required": [ - "role_assignments" + "bins", + "max", + "min", + "n_samples", + "p50", + "p90", + "p99", + "squared_mean", + "start_time", + "sum_of_samples" ] }, - "FleetRoleRoleAssignment": { - "description": "Describes the assignment of a particular role on a particular resource to a particular identity (user, group, etc.)\n\nThe resource is not part of this structure. Rather, `RoleAssignment`s are put into a `Policy` and that Policy is applied to a particular resource.", + "Histogramuint16": { + "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", "type": "object", "properties": { - "identity_id": { - "type": "string", - "format": "uuid" + "bins": { + "description": "The bins of the histogram.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Binuint16" + } }, - "identity_type": { - "$ref": "#/components/schemas/IdentityType" + "max": { + "description": "The maximum value of all samples in the histogram.", + "type": "integer", + "format": "uint16", + "minimum": 0 }, - "role_name": { - "$ref": "#/components/schemas/FleetRole" + "min": { + "description": "The minimum value of all samples in the histogram.", + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "n_samples": { + "description": "The total number of samples in the histogram.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "p50": { + "description": "p50 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "p90": { + "description": "p95 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "p99": { + "description": "p99 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "squared_mean": { + "description": "M2 for Welford's algorithm for variance calculation.\n\nRead about [Welford's algorithm](https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm) for more information on the algorithm.", + "type": "number", + "format": "double" + }, + "start_time": { + "description": "The start time of the histogram.", + "type": "string", + "format": "date-time" + }, + "sum_of_samples": { + "description": "The sum of all samples in the histogram.", + "type": "integer", + "format": "int64" } }, "required": [ - "identity_id", - "identity_type", - "role_name" + "bins", + "max", + "min", + "n_samples", + "p50", + "p90", + "p99", + "squared_mean", + "start_time", + "sum_of_samples" ] }, - "FloatingIp": { - "description": "A Floating IP is a well-known IP address which can be attached and detached from instances.", + "Histogramuint32": { + "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", "type": "object", "properties": { - "description": { - "description": "human-readable free-form text about a resource", - "type": "string" + "bins": { + "description": "The bins of the histogram.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Binuint32" + } }, - "id": { - "description": "unique, immutable, system-controlled identifier for each resource", - "type": "string", - "format": "uuid" + "max": { + "description": "The maximum value of all samples in the histogram.", + "type": "integer", + "format": "uint32", + "minimum": 0 }, - "instance_id": { - "nullable": true, - "description": "The ID of the instance that this Floating IP is attached to, if it is presently in use.", - "type": "string", - "format": "uuid" + "min": { + "description": "The minimum value of all samples in the histogram.", + "type": "integer", + "format": "uint32", + "minimum": 0 }, - "ip": { - "description": "The IP address held by this resource.", - "type": "string", - "format": "ip" + "n_samples": { + "description": "The total number of samples in the histogram.", + "type": "integer", + "format": "uint64", + "minimum": 0 }, - "ip_pool_id": { - "description": "The ID of the IP pool this resource belongs to.", - "type": "string", - "format": "uuid" + "p50": { + "description": "p50 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] }, - "name": { - "description": "unique, mutable, user-controlled identifier for each resource", + "p90": { + "description": "p95 Quantile", "allOf": [ { - "$ref": "#/components/schemas/Name" + "$ref": "#/components/schemas/Quantile" } ] }, - "project_id": { - "description": "The project this resource exists within.", - "type": "string", - "format": "uuid" + "p99": { + "description": "p99 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] }, - "time_created": { - "description": "timestamp when this resource was created", - "type": "string", - "format": "date-time" + "squared_mean": { + "description": "M2 for Welford's algorithm for variance calculation.\n\nRead about [Welford's algorithm](https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm) for more information on the algorithm.", + "type": "number", + "format": "double" }, - "time_modified": { - "description": "timestamp when this resource was last modified", + "start_time": { + "description": "The start time of the histogram.", "type": "string", "format": "date-time" + }, + "sum_of_samples": { + "description": "The sum of all samples in the histogram.", + "type": "integer", + "format": "int64" } }, "required": [ - "description", - "id", - "ip", - "ip_pool_id", - "name", - "project_id", - "time_created", - "time_modified" + "bins", + "max", + "min", + "n_samples", + "p50", + "p90", + "p99", + "squared_mean", + "start_time", + "sum_of_samples" ] }, - "FloatingIpAttach": { - "description": "Parameters for attaching a floating IP address to another resource", + "Histogramuint64": { + "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", "type": "object", "properties": { - "kind": { - "description": "The type of `parent`'s resource", + "bins": { + "description": "The bins of the histogram.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Binuint64" + } + }, + "max": { + "description": "The maximum value of all samples in the histogram.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "min": { + "description": "The minimum value of all samples in the histogram.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "n_samples": { + "description": "The total number of samples in the histogram.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "p50": { + "description": "p50 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "p90": { + "description": "p95 Quantile", "allOf": [ { - "$ref": "#/components/schemas/FloatingIpParentKind" + "$ref": "#/components/schemas/Quantile" } ] }, - "parent": { - "description": "Name or ID of the resource that this IP address should be attached to", + "p99": { + "description": "p99 Quantile", "allOf": [ { - "$ref": "#/components/schemas/NameOrId" + "$ref": "#/components/schemas/Quantile" } ] + }, + "squared_mean": { + "description": "M2 for Welford's algorithm for variance calculation.\n\nRead about [Welford's algorithm](https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm) for more information on the algorithm.", + "type": "number", + "format": "double" + }, + "start_time": { + "description": "The start time of the histogram.", + "type": "string", + "format": "date-time" + }, + "sum_of_samples": { + "description": "The sum of all samples in the histogram.", + "type": "integer", + "format": "int64" } }, "required": [ - "kind", - "parent" + "bins", + "max", + "min", + "n_samples", + "p50", + "p90", + "p99", + "squared_mean", + "start_time", + "sum_of_samples" ] }, - "FloatingIpCreate": { - "description": "Parameters for creating a new floating IP address for instances.", + "Histogramuint8": { + "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", "type": "object", "properties": { - "description": { - "type": "string" + "bins": { + "description": "The bins of the histogram.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Binuint8" + } }, - "ip": { - "nullable": true, - "description": "An IP address to reserve for use as a floating IP. This field is optional: when not set, an address will be automatically chosen from `pool`. If set, then the IP must be available in the resolved `pool`.", - "type": "string", - "format": "ip" + "max": { + "description": "The maximum value of all samples in the histogram.", + "type": "integer", + "format": "uint8", + "minimum": 0 }, - "name": { - "$ref": "#/components/schemas/Name" + "min": { + "description": "The minimum value of all samples in the histogram.", + "type": "integer", + "format": "uint8", + "minimum": 0 }, - "pool": { - "nullable": true, - "description": "The parent IP pool that a floating IP is pulled from. If unset, the default pool is selected.", + "n_samples": { + "description": "The total number of samples in the histogram.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "p50": { + "description": "p50 Quantile", "allOf": [ { - "$ref": "#/components/schemas/NameOrId" + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "p90": { + "description": "p95 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" + } + ] + }, + "p99": { + "description": "p99 Quantile", + "allOf": [ + { + "$ref": "#/components/schemas/Quantile" } ] + }, + "squared_mean": { + "description": "M2 for Welford's algorithm for variance calculation.\n\nRead about [Welford's algorithm](https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm) for more information on the algorithm.", + "type": "number", + "format": "double" + }, + "start_time": { + "description": "The start time of the histogram.", + "type": "string", + "format": "date-time" + }, + "sum_of_samples": { + "description": "The sum of all samples in the histogram.", + "type": "integer", + "format": "int64" } }, "required": [ - "description", - "name" + "bins", + "max", + "min", + "n_samples", + "p50", + "p90", + "p99", + "squared_mean", + "start_time", + "sum_of_samples" ] }, - "FloatingIpParentKind": { - "description": "The type of resource that a floating IP is attached to", + "Hostname": { + "title": "An RFC-1035-compliant hostname", + "description": "A hostname identifies a host on a network, and is usually a dot-delimited sequence of labels, where each label contains only letters, digits, or the hyphen. See RFCs 1035 and 952 for more details.", "type": "string", - "enum": [ - "instance" - ] - }, - "FloatingIpResultsPage": { - "description": "A single page of results", - "type": "object", - "properties": { - "items": { - "description": "list of items on this page of results", - "type": "array", - "items": { - "$ref": "#/components/schemas/FloatingIp" - } - }, - "next_page": { - "nullable": true, - "description": "token used to fetch the next page of results (if any)", - "type": "string" - } - }, - "required": [ - "items" - ] + "pattern": "^([a-zA-Z0-9]+[a-zA-Z0-9\\-]*(? anyhow::Result<()> { } } -// Describe a single timeseries. +/// Prepare the columns for a timeseries or virtual table. +pub(crate) fn prepare_columns( + schema: &TimeseriesSchema, +) -> (Vec, Vec) { + let mut cols = Vec::with_capacity(schema.field_schema.len() + 2); + let mut types = cols.clone(); + + for field in schema.field_schema.iter() { + cols.push(field.name.clone()); + types.push(field.field_type.to_string()); + } + + cols.push(special_idents::TIMESTAMP.into()); + types.push(special_idents::DATETIME64.into()); + + if schema.datum_type.is_histogram() { + cols.push(special_idents::START_TIME.into()); + types.push(special_idents::DATETIME64.into()); + + cols.push(special_idents::BINS.into()); + types.push( + special_idents::array_type_name_from_histogram_type( + schema.datum_type, + ) + .unwrap(), + ); + + cols.push(special_idents::COUNTS.into()); + types.push(special_idents::ARRAYU64.into()); + + cols.push(special_idents::MIN.into()); + types.push(special_idents::FLOAT64.into()); + + cols.push(special_idents::MAX.into()); + types.push(special_idents::FLOAT64.into()); + + cols.push(special_idents::SUM_OF_SAMPLES.into()); + types.push(special_idents::UINT64.into()); + + cols.push(special_idents::SQUARED_MEAN.into()); + types.push(special_idents::UINT64.into()); + + for quantile in ["P50", "P90", "P99"].iter() { + cols.push(format!("{}_MARKER_HEIGHTS", quantile)); + types.push(special_idents::ARRAYFLOAT64.into()); + cols.push(format!("{}_MARKER_POSITIONS", quantile)); + types.push(special_idents::ARRAYINT64.into()); + cols.push(format!("{}_DESIRED_MARKER_POSITIONS", quantile)); + types.push(special_idents::ARRAYFLOAT64.into()); + } + } else if schema.datum_type.is_cumulative() { + cols.push(special_idents::START_TIME.into()); + types.push(special_idents::DATETIME64.into()); + cols.push(special_idents::DATUM.into()); + types.push(schema.datum_type.to_string()); + } else { + cols.push(special_idents::DATUM.into()); + types.push(schema.datum_type.to_string()); + } + + (cols, types) +} + +/// Describe a single timeseries. async fn describe_timeseries( client: &Client, timeseries: &str, @@ -158,40 +222,7 @@ async fn describe_timeseries( ), Ok(name) => { if let Some(schema) = client.schema_for_timeseries(&name).await? { - let mut cols = - Vec::with_capacity(schema.field_schema.len() + 2); - let mut types = cols.clone(); - for field in schema.field_schema.iter() { - cols.push(field.name.clone()); - types.push(field.field_type.to_string()); - } - cols.push(special_idents::TIMESTAMP.into()); - types.push(special_idents::DATETIME64.into()); - - if schema.datum_type.is_histogram() { - cols.push(special_idents::START_TIME.into()); - types.push(special_idents::DATETIME64.into()); - - cols.push(special_idents::BINS.into()); - types.push( - special_idents::array_type_name_from_histogram_type( - schema.datum_type, - ) - .unwrap(), - ); - - cols.push(special_idents::COUNTS.into()); - types.push(special_idents::ARRAYU64.into()); - } else if schema.datum_type.is_cumulative() { - cols.push(special_idents::START_TIME.into()); - types.push(special_idents::DATETIME64.into()); - cols.push(special_idents::DATUM.into()); - types.push(schema.datum_type.to_string()); - } else { - cols.push(special_idents::DATUM.into()); - types.push(schema.datum_type.to_string()); - } - + let (cols, types) = prepare_columns(&schema); let mut builder = tabled::builder::Builder::default(); builder.push_record(cols); // first record is the header builder.push_record(types); diff --git a/oximeter/db/src/bin/oxdb/sql.rs b/oximeter/db/src/bin/oxdb/sql.rs index d50a60f4d7..44780592fc 100644 --- a/oximeter/db/src/bin/oxdb/sql.rs +++ b/oximeter/db/src/bin/oxdb/sql.rs @@ -6,6 +6,7 @@ // Copyright 2024 Oxide Computer Company +use super::oxql; use crate::make_client; use clap::Args; use dropshot::EmptyScanParams; @@ -63,43 +64,7 @@ async fn describe_virtual_table( Err(_) => println!("Invalid timeseries name: {table}"), Ok(name) => { if let Some(schema) = client.schema_for_timeseries(&name).await? { - let mut cols = - Vec::with_capacity(schema.field_schema.len() + 2); - let mut types = cols.clone(); - for field in schema.field_schema.iter() { - cols.push(field.name.clone()); - types.push(field.field_type.to_string()); - } - cols.push("timestamp".into()); - types.push("DateTime64".into()); - - if schema.datum_type.is_histogram() { - cols.push("start_time".into()); - types.push("DateTime64".into()); - - cols.push("bins".into()); - types.push(format!( - "Array[{}]", - schema - .datum_type - .to_string() - .strip_prefix("Histogram") - .unwrap() - .to_lowercase(), - )); - - cols.push("counts".into()); - types.push("Array[u64]".into()); - } else if schema.datum_type.is_cumulative() { - cols.push("start_time".into()); - types.push("DateTime64".into()); - cols.push("datum".into()); - types.push(schema.datum_type.to_string()); - } else { - cols.push("datum".into()); - types.push(schema.datum_type.to_string()); - } - + let (cols, types) = oxql::prepare_columns(&schema); let mut builder = tabled::builder::Builder::default(); builder.push_record(cols); // first record is the header builder.push_record(types); diff --git a/oximeter/db/src/client/mod.rs b/oximeter/db/src/client/mod.rs index 9a2b7b1bd3..2d6212971e 100644 --- a/oximeter/db/src/client/mod.rs +++ b/oximeter/db/src/client/mod.rs @@ -4185,4 +4185,242 @@ mod tests { db.cleanup().await.unwrap(); logctx.cleanup_successful(); } + + // The schema directory, used in tests. The actual updater uses the + // zone-image files copied in during construction. + const SCHEMA_DIR: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/schema"); + + #[tokio::test] + async fn check_actual_schema_upgrades_are_valid_single_node() { + check_actual_schema_upgrades_are_valid_impl(false).await; + } + + #[tokio::test] + async fn check_actual_schema_upgrades_are_valid_replicated() { + check_actual_schema_upgrades_are_valid_impl(true).await; + } + + // NOTE: This does not actually run the upgrades, only checks them for + // validity. + async fn check_actual_schema_upgrades_are_valid_impl(replicated: bool) { + let name = format!( + "check_actual_schema_upgrades_are_valid_{}", + if replicated { "replicated" } else { "single_node" } + ); + let logctx = test_setup_log(&name); + let log = &logctx.log; + + // We really started tracking the database version in 2. However, that + // set of files is not valid by construction, since we were just + // creating the full database from scratch as an "upgrade". So we'll + // start by applying version 3, and then do all later ones. + const FIRST_VERSION: u64 = 3; + for version in FIRST_VERSION..=OXIMETER_VERSION { + let upgrade_file_contents = Client::read_schema_upgrade_sql_files( + log, replicated, version, SCHEMA_DIR, + ) + .await + .expect("failed to read schema upgrade files"); + + if let Err(e) = + Client::verify_schema_upgrades(&upgrade_file_contents) + { + panic!( + "Schema update files for version {version} \ + are not valid: {e:?}" + ); + } + } + logctx.cleanup_successful(); + } + + // Either a cluster or single node. + // + // How does this not already exist... + enum ClickHouse { + Cluster(ClickHouseCluster), + Single(ClickHouseInstance), + } + + impl ClickHouse { + fn port(&self) -> u16 { + match self { + ClickHouse::Cluster(cluster) => cluster.replica_1.port(), + ClickHouse::Single(node) => node.port(), + } + } + + async fn cleanup(mut self) { + match &mut self { + ClickHouse::Cluster(cluster) => { + cluster + .keeper_1 + .cleanup() + .await + .expect("Failed to cleanup ClickHouse keeper 1"); + cluster + .keeper_2 + .cleanup() + .await + .expect("Failed to cleanup ClickHouse keeper 2"); + cluster + .keeper_3 + .cleanup() + .await + .expect("Failed to cleanup ClickHouse keeper 3"); + cluster + .replica_1 + .cleanup() + .await + .expect("Failed to cleanup ClickHouse server 1"); + cluster + .replica_2 + .cleanup() + .await + .expect("Failed to cleanup ClickHouse server 2"); + } + ClickHouse::Single(node) => node + .cleanup() + .await + .expect("Failed to cleanup single node ClickHouse"), + } + } + } + + #[tokio::test] + async fn check_db_init_is_sum_of_all_up_single_node() { + check_db_init_is_sum_of_all_up_impl(false).await; + } + + #[tokio::test] + async fn check_db_init_is_sum_of_all_up_replicated() { + check_db_init_is_sum_of_all_up_impl(true).await; + } + + // Check that the set of tables we arrive at through upgrades equals those + // we get by creating the latest version directly. + async fn check_db_init_is_sum_of_all_up_impl(replicated: bool) { + let name = format!( + "check_db_init_is_sum_of_all_up_{}", + if replicated { "replicated" } else { "single_node" } + ); + let logctx = test_setup_log(&name); + let log = &logctx.log; + let db = if replicated { + ClickHouse::Cluster(create_cluster(&logctx).await) + } else { + ClickHouse::Single( + ClickHouseInstance::new_single_node(&logctx, 0) + .await + .expect("Failed to start ClickHouse"), + ) + }; + let address = SocketAddr::new(Ipv6Addr::LOCALHOST.into(), db.port()); + let client = Client::new(address, &log); + + // Let's start with version 2, which is the first tracked and contains + // the full SQL files we need to populate the DB. + client + .initialize_db_with_version(replicated, 2) + .await + .expect("Failed to initialize timeseries database"); + + // Now let's apply all the SQL updates from here to the latest. + for version in 3..=OXIMETER_VERSION { + client + .ensure_schema(replicated, version, SCHEMA_DIR) + .await + .expect("Failed to ensure schema"); + } + + // Fetch all the tables as a JSON blob. + let tables_through_upgrades = + fetch_oximeter_table_details(&client).await; + + // We'll completely re-init the DB with the real version now. + if replicated { + client.wipe_replicated_db().await.unwrap() + } else { + client.wipe_single_node_db().await.unwrap() + } + client + .initialize_db_with_version(replicated, OXIMETER_VERSION) + .await + .expect("Failed to initialize timeseries database"); + + // Fetch the tables again and compare. + let tables = fetch_oximeter_table_details(&client).await; + + // This is an annoying comparison. Since the tables are quite + // complicated, we want to really be careful about what errors we show. + // Iterate through all the expected tables (from the direct creation), + // and check each expected field matches. Then we also check that the + // tables from the upgrade path don't have anything else in them. + for (name, json) in tables.iter() { + let upgrade_table = + tables_through_upgrades.get(name).unwrap_or_else(|| { + panic!("The tables via upgrade are missing table '{name}'") + }); + for (key, value) in json.iter() { + let other_value = upgrade_table.get(key).unwrap_or_else(|| { + panic!("Upgrade table is missing key '{key}'") + }); + assert_eq!( + value, + other_value, + "{} database table {name} disagree on the value \ + of the column {key} between the direct table creation \ + and the upgrade path.\nDirect:\n\n{value} \ + \n\nUpgrade:\n\n{other_value}", + if replicated { "Replicated" } else { "Single-node" }, + ); + } + } + + // Check there are zero keys in the upgrade path that don't appear in + // the direct path. + let extra_keys: Vec<_> = tables_through_upgrades + .keys() + .filter(|k| !tables.contains_key(k.as_str())) + .cloned() + .collect(); + assert!( + extra_keys.is_empty(), + "The oximeter database contains tables in the upgrade path \ + that are not in the direct path: {extra_keys:?}" + ); + + db.cleanup().await; + logctx.cleanup_successful(); + } + + // Read the relevant table details from the `oximeter` database, and return + // it keyed on the table name. + async fn fetch_oximeter_table_details( + client: &Client, + ) -> BTreeMap> { + let out = client + .execute_with_body( + "SELECT \ + name, + engine_full, + create_table_query, + sorting_key, + primary_key + FROM system.tables \ + WHERE database = 'oximeter'\ + FORMAT JSONEachRow;", + ) + .await + .unwrap() + .1; + out.lines() + .map(|line| { + let json: serde_json::Map = + serde_json::from_str(&line).unwrap(); + let name = json.get("name").unwrap().to_string(); + (name, json) + }) + .collect() + } } diff --git a/oximeter/db/src/client/oxql.rs b/oximeter/db/src/client/oxql.rs index d1ce131581..29586b8189 100644 --- a/oximeter/db/src/client/oxql.rs +++ b/oximeter/db/src/client/oxql.rs @@ -825,7 +825,13 @@ impl Client { datum_type: oximeter::DatumType, ) -> String { let value_columns = if datum_type.is_histogram() { - "timeseries_key, start_time, timestamp, bins, counts" + concat!( + "timeseries_key, start_time, timestamp, bins, counts, min, max, ", + "sum_of_samples, squared_mean, p50_marker_heights, p50_marker_positions, ", + "p50_desired_marker_positions, p90_marker_heights, p90_marker_positions, ", + "p90_desired_marker_positions, p99_marker_heights, p99_marker_positions, ", + "p99_desired_marker_positions" + ) } else if datum_type.is_cumulative() { "timeseries_key, start_time, timestamp, datum" } else { @@ -1203,7 +1209,7 @@ mod tests { // Create the first metric, starting from a count of 0. let mut metric = SomeMetric { foo: *foo, datum }; - // Create all the samples,, incrementing the datum and sample + // Create all the samples, incrementing the datum and sample // time. for i in 0..N_SAMPLES_PER_TIMESERIES { let sample_time = diff --git a/oximeter/db/src/lib.rs b/oximeter/db/src/lib.rs index e1570ee0c3..c471a837ea 100644 --- a/oximeter/db/src/lib.rs +++ b/oximeter/db/src/lib.rs @@ -160,6 +160,12 @@ impl From for TimeseriesSchema { schema.timeseries_name.as_str(), ) .expect("Invalid timeseries name in database"), + // TODO-cleanup: Fill these in from the values in the database. See + // https://github.com/oxidecomputer/omicron/issues/5942. + description: Default::default(), + version: oximeter::schema::default_schema_version(), + authz_scope: oximeter::schema::AuthzScope::Fleet, + units: oximeter::schema::Units::Count, field_schema: schema.field_schema.into(), datum_type: schema.datum_type.into(), created: schema.created, @@ -236,9 +242,14 @@ pub struct TimeseriesPageSelector { pub(crate) type TimeseriesKey = u64; +// TODO-cleanup: Add the timeseries version in to the computation of the key. +// This will require a full drop of the database, since we're changing the +// sorting key and the timeseries key on each past sample. See +// https://github.com/oxidecomputer/omicron/issues/5942 for more details. pub(crate) fn timeseries_key(sample: &Sample) -> TimeseriesKey { timeseries_key_for( &sample.timeseries_name, + // sample.timeseries_version sample.sorted_target_fields(), sample.sorted_metric_fields(), sample.measurement.datum_type(), @@ -389,11 +400,13 @@ mod tests { name: String::from("later"), field_type: FieldType::U64, source: FieldSource::Target, + description: String::new(), }; let metric_field = FieldSchema { name: String::from("earlier"), field_type: FieldType::U64, source: FieldSource::Metric, + description: String::new(), }; let timeseries_name: TimeseriesName = "foo:bar".parse().unwrap(); let datum_type = DatumType::U64; @@ -401,6 +414,10 @@ mod tests { [target_field.clone(), metric_field.clone()].into_iter().collect(); let expected_schema = TimeseriesSchema { timeseries_name: timeseries_name.clone(), + description: Default::default(), + version: oximeter::schema::default_schema_version(), + authz_scope: oximeter::schema::AuthzScope::Fleet, + units: oximeter::schema::Units::Count, field_schema, datum_type, created: Utc::now(), diff --git a/oximeter/db/src/model.rs b/oximeter/db/src/model.rs index 106c347ef6..810463d250 100644 --- a/oximeter/db/src/model.rs +++ b/oximeter/db/src/model.rs @@ -4,7 +4,7 @@ //! Models for timeseries data in ClickHouse -// Copyright 2023 Oxide Computer Company +// Copyright 2024 Oxide Computer Company use crate::DbFieldSource; use crate::FieldSchema; @@ -16,6 +16,7 @@ use crate::TimeseriesSchema; use bytes::Bytes; use chrono::DateTime; use chrono::Utc; +use num::traits::Zero; use oximeter::histogram::Histogram; use oximeter::traits; use oximeter::types::Cumulative; @@ -27,6 +28,7 @@ use oximeter::types::FieldValue; use oximeter::types::Measurement; use oximeter::types::MissingDatum; use oximeter::types::Sample; +use oximeter::Quantile; use serde::Deserialize; use serde::Serialize; use std::collections::BTreeMap; @@ -43,7 +45,7 @@ use uuid::Uuid; /// - [`crate::Client::initialize_db_with_version`] /// - [`crate::Client::ensure_schema`] /// - The `clickhouse-schema-updater` binary in this crate -pub const OXIMETER_VERSION: u64 = 4; +pub const OXIMETER_VERSION: u64 = 5; // Wrapper type to represent a boolean in the database. // @@ -107,6 +109,10 @@ pub(crate) struct DbFieldList { pub types: Vec, #[serde(rename = "fields.source")] pub sources: Vec, + // TODO-completeness: Populate the description from the database here. See + // https://github.com/oxidecomputer/omicron/issues/5942 for more details. + //#[serde(rename = "fields.description")] + //pub descriptions: Vec, } impl From for BTreeSet { @@ -119,6 +125,7 @@ impl From for BTreeSet { name, field_type: ty.into(), source: source.into(), + description: String::new(), }) .collect() } @@ -147,6 +154,9 @@ pub(crate) struct DbTimeseriesSchema { pub datum_type: DbDatumType, #[serde(with = "serde_timestamp")] pub created: DateTime, + // TODO-completeness: Add the authorization scope, version, and units once + // they are tracked in the database. See + // https://github.com/oxidecomputer/omicron/issues/5942 for more details. } impl From for DbTimeseriesSchema { @@ -446,15 +456,83 @@ declare_cumulative_measurement_row! { CumulativeU64MeasurementRow, u64, "cumulat declare_cumulative_measurement_row! { CumulativeF32MeasurementRow, f32, "cumulativef32" } declare_cumulative_measurement_row! { CumulativeF64MeasurementRow, f64, "cumulativef64" } +/// A representation of all quantiles for a histogram. +#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq)] +struct AllQuantiles { + p50_marker_heights: [f64; 5], + p50_marker_positions: [u64; 5], + p50_desired_marker_positions: [f64; 5], + + p90_marker_heights: [f64; 5], + p90_marker_positions: [u64; 5], + p90_desired_marker_positions: [f64; 5], + + p99_marker_heights: [f64; 5], + p99_marker_positions: [u64; 5], + p99_desired_marker_positions: [f64; 5], +} + +impl AllQuantiles { + /// Create a flat `AllQuantiles` struct from the given quantiles. + fn flatten(q50: Quantile, q90: Quantile, q99: Quantile) -> Self { + Self { + p50_marker_heights: q50.marker_heights(), + p50_marker_positions: q50.marker_positions(), + p50_desired_marker_positions: q50.desired_marker_positions(), + + p90_marker_heights: q90.marker_heights(), + p90_marker_positions: q90.marker_positions(), + p90_desired_marker_positions: q90.desired_marker_positions(), + + p99_marker_heights: q99.marker_heights(), + p99_marker_positions: q99.marker_positions(), + p99_desired_marker_positions: q99.desired_marker_positions(), + } + } + + /// Split the quantiles into separate `Quantile` structs in order of P. + fn split(&self) -> (Quantile, Quantile, Quantile) { + ( + Quantile::from_parts( + 0.5, + self.p50_marker_heights, + self.p50_marker_positions, + self.p50_desired_marker_positions, + ), + Quantile::from_parts( + 0.9, + self.p90_marker_heights, + self.p90_marker_positions, + self.p90_desired_marker_positions, + ), + Quantile::from_parts( + 0.99, + self.p99_marker_heights, + self.p99_marker_positions, + self.p99_desired_marker_positions, + ), + ) + } +} + // Representation of a histogram in ClickHouse. // -// The tables storing measurements of a histogram metric use a pair of arrays to represent them, -// for the bins and counts, respectively. This handles conversion between the type used to -// represent histograms in Rust, [`Histogram`], and this in-database representation. +// The tables storing measurements of a histogram metric use a set of arrays to +// represent them. This handles conversion between the type used to represent +// histograms in Rust, [`Histogram`], and this in-database representation. #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] -struct DbHistogram { +struct DbHistogram +where + T: traits::HistogramSupport, +{ pub bins: Vec, pub counts: Vec, + pub min: T, + pub max: T, + pub sum_of_samples: T::Width, + pub squared_mean: f64, + #[serde(flatten)] + pub quantiles: AllQuantiles, } // We use an empty histogram to indicate a missing sample. @@ -467,9 +545,24 @@ struct DbHistogram { // // That means we can currently use an empty array from the database as a // sentinel for a missing sample. -impl DbHistogram { +impl DbHistogram +where + T: traits::HistogramSupport, +{ fn null() -> Self { - Self { bins: vec![], counts: vec![] } + let p50 = Quantile::p50(); + let p90 = Quantile::p90(); + let p99 = Quantile::p99(); + + Self { + bins: vec![], + counts: vec![], + min: T::zero(), + max: T::zero(), + sum_of_samples: T::Width::zero(), + squared_mean: 0.0, + quantiles: AllQuantiles::flatten(p50, p90, p99), + } } } @@ -478,8 +571,20 @@ where T: traits::HistogramSupport, { fn from(hist: &Histogram) -> Self { - let (bins, counts) = hist.to_arrays(); - Self { bins, counts } + let (bins, counts) = hist.bins_and_counts(); + Self { + bins, + counts, + min: hist.min(), + max: hist.max(), + sum_of_samples: hist.sum_of_samples(), + squared_mean: hist.squared_mean(), + quantiles: AllQuantiles::flatten( + hist.p50q(), + hist.p90q(), + hist.p99q(), + ), + } } } @@ -516,7 +621,7 @@ declare_histogram_measurement_row! { HistogramF64MeasurementRow, DbHistogram BTreeMap> { let mut out = BTreeMap::new(); for field in sample.fields() { - let timeseries_name = sample.timeseries_name.clone(); + let timeseries_name = sample.timeseries_name.to_string(); let timeseries_key = crate::timeseries_key(sample); let field_name = field.name.clone(); let (table_name, row_string) = match &field.value { @@ -664,7 +769,11 @@ pub(crate) fn unroll_measurement_row(sample: &Sample) -> (String, String) { let timeseries_name = sample.timeseries_name.clone(); let timeseries_key = crate::timeseries_key(sample); let measurement = &sample.measurement; - unroll_measurement_row_impl(timeseries_name, timeseries_key, measurement) + unroll_measurement_row_impl( + timeseries_name.to_string(), + timeseries_key, + measurement, + ) } /// Given a sample's measurement, return a table name and row to insert. @@ -1255,7 +1364,10 @@ struct DbTimeseriesScalarCumulativeSample { // A histogram timestamped sample from a timeseries, as extracted from a query to the database. #[derive(Debug, Clone, Deserialize)] -struct DbTimeseriesHistogramSample { +struct DbTimeseriesHistogramSample +where + T: traits::HistogramSupport, +{ timeseries_key: TimeseriesKey, #[serde(with = "serde_timestamp")] start_time: DateTime, @@ -1263,6 +1375,12 @@ struct DbTimeseriesHistogramSample { timestamp: DateTime, bins: Vec, counts: Vec, + min: T, + max: T, + sum_of_samples: T::Width, + squared_mean: f64, + #[serde(flatten)] + quantiles: AllQuantiles, } impl From> for Measurement @@ -1314,14 +1432,30 @@ where .unwrap(), ) } else { - Datum::from( - Histogram::from_arrays( - sample.start_time, - sample.bins, - sample.counts, - ) - .unwrap(), + if sample.bins.len() != sample.counts.len() { + panic!( + "Array size mismatch: bins: {}, counts: {}", + sample.bins.len(), + sample.counts.len() + ); + } + + let (p50, p90, p99) = sample.quantiles.split(); + let hist = Histogram::from_parts( + sample.start_time, + sample.bins, + sample.counts, + sample.min, + sample.max, + sample.sum_of_samples, + sample.squared_mean, + p50, + p90, + p99, ) + .unwrap(); + + Datum::from(hist) }; Measurement::new(sample.timestamp, datum) } @@ -1475,12 +1609,16 @@ where (sample.timeseries_key, sample.into()) } -fn parse_timeseries_histogram_measurement( - line: &str, +fn parse_timeseries_histogram_measurement<'a, T>( + line: &'a str, ) -> (TimeseriesKey, Measurement) where - T: Into + traits::HistogramSupport + FromDbHistogram, + T: Into + + traits::HistogramSupport + + FromDbHistogram + + Deserialize<'a>, Datum: From>, + ::Width: Deserialize<'a>, { let sample = serde_json::from_str::>(line).unwrap(); @@ -1741,6 +1879,7 @@ pub(crate) fn parse_field_select_row( mod tests { use super::*; use chrono::Timelike; + use oximeter::histogram::Record; use oximeter::test_util; use oximeter::Datum; @@ -1803,11 +1942,13 @@ mod tests { name: String::from("field0"), field_type: FieldType::I64, source: FieldSource::Target, + description: String::new(), }, FieldSchema { name: String::from("field1"), field_type: FieldType::IpAddr, source: FieldSource::Metric, + description: String::new(), }, ] .into_iter() @@ -1826,9 +1967,18 @@ mod tests { hist.sample(1).unwrap(); hist.sample(10).unwrap(); let dbhist = DbHistogram::from(&hist); - let (bins, counts) = hist.to_arrays(); + let (bins, counts) = hist.bins_and_counts(); assert_eq!(dbhist.bins, bins); assert_eq!(dbhist.counts, counts); + assert_eq!(dbhist.min, hist.min()); + assert_eq!(dbhist.max, hist.max()); + assert_eq!(dbhist.sum_of_samples, hist.sum_of_samples()); + assert_eq!(dbhist.squared_mean, hist.squared_mean()); + + let (p50, p90, p99) = dbhist.quantiles.split(); + assert_eq!(p50, hist.p50q()); + assert_eq!(p90, hist.p90q()); + assert_eq!(p99, hist.p99q()); } #[test] @@ -1839,7 +1989,7 @@ mod tests { assert_eq!(out["oximeter.fields_i64"].len(), 1); let unpacked: StringFieldRow = serde_json::from_str(&out["oximeter.fields_string"][0]).unwrap(); - assert_eq!(unpacked.timeseries_name, sample.timeseries_name); + assert_eq!(sample.timeseries_name, unpacked.timeseries_name); let field = sample.target_fields().next().unwrap(); assert_eq!(unpacked.field_name, field.name); if let FieldValue::String(v) = &field.value { @@ -1877,10 +2027,20 @@ mod tests { assert_eq!(table_name, "oximeter.measurements_histogramf64"); let unpacked: HistogramF64MeasurementRow = serde_json::from_str(&row).unwrap(); - let unpacked_hist = Histogram::from_arrays( + let (unpacked_p50, unpacked_p90, unpacked_p99) = + unpacked.datum.quantiles.split(); + + let unpacked_hist = Histogram::from_parts( unpacked.start_time, unpacked.datum.bins, unpacked.datum.counts, + unpacked.datum.min, + unpacked.datum.max, + unpacked.datum.sum_of_samples, + unpacked.datum.squared_mean, + unpacked_p50, + unpacked_p90, + unpacked_p99, ) .unwrap(); let measurement = &sample.measurement; @@ -1986,7 +2146,27 @@ mod tests { .with_nanosecond(123_456_789) .unwrap(); - let line = r#"{"timeseries_key": 12, "start_time": "2021-01-01 00:00:00.123456789", "timestamp": "2021-01-01 01:00:00.123456789", "bins": [0, 1], "counts": [1, 1] }"#; + let line = r#" + { + "timeseries_key": 12, + "start_time": "2021-01-01 00:00:00.123456789", + "timestamp": "2021-01-01 01:00:00.123456789", + "bins": [0, 1], + "counts": [1, 1], + "min": 0, + "max": 1, + "sum_of_samples": 2, + "squared_mean": 2.0, + "p50_marker_heights": [0.0, 0.0, 0.0, 0.0, 1.0], + "p50_marker_positions": [1, 2, 3, 4, 2], + "p50_desired_marker_positions": [1.0, 3.0, 5.0, 5.0, 5.0], + "p90_marker_heights": [0.0, 0.0, 0.0, 0.0, 1.0], + "p90_marker_positions": [1, 2, 3, 4, 2], + "p90_desired_marker_positions": [1.0, 3.0, 5.0, 5.0, 5.0], + "p99_marker_heights": [0.0, 0.0, 0.0, 0.0, 1.0], + "p99_marker_positions": [1, 2, 3, 4, 2], + "p99_desired_marker_positions": [1.0, 3.0, 5.0, 5.0, 5.0] + }"#; let (key, measurement) = parse_measurement_from_row(line, DatumType::HistogramI64); assert_eq!(key, 12); @@ -1997,6 +2177,38 @@ mod tests { }; assert_eq!(hist.n_bins(), 3); assert_eq!(hist.n_samples(), 2); + assert_eq!(hist.min(), 0); + assert_eq!(hist.max(), 1); + assert_eq!(hist.sum_of_samples(), 2); + assert_eq!(hist.squared_mean(), 2.); + assert_eq!( + hist.p50q(), + Quantile::from_parts( + 0.5, + [0.0, 0.0, 0.0, 0.0, 1.0], + [1, 2, 3, 4, 2], + [1.0, 3.0, 5.0, 5.0, 5.0], + ) + ); + assert_eq!( + hist.p90q(), + Quantile::from_parts( + 0.9, + [0.0, 0.0, 0.0, 0.0, 1.0], + [1, 2, 3, 4, 2], + [1.0, 3.0, 5.0, 5.0, 5.0], + ) + ); + + assert_eq!( + hist.p99q(), + Quantile::from_parts( + 0.99, + [0.0, 0.0, 0.0, 0.0, 1.0], + [1, 2, 3, 4, 2], + [1.0, 3.0, 5.0, 5.0, 5.0], + ) + ); } #[test] @@ -2007,32 +2219,6 @@ mod tests { assert_eq!(measurement.datum(), &Datum::from("/some/path")); } - #[test] - fn test_histogram_to_arrays() { - let mut hist = Histogram::new(&[0, 10, 20]).unwrap(); - hist.sample(1).unwrap(); - hist.sample(11).unwrap(); - - let (bins, counts) = hist.to_arrays(); - assert_eq!( - bins.len(), - counts.len(), - "Bins and counts should have the same size" - ); - assert_eq!( - bins.len(), - hist.n_bins(), - "Paired-array bins should be of the same length as the histogram" - ); - assert_eq!(counts, &[0, 1, 1, 0], "Paired-array counts are incorrect"); - - let rebuilt = - Histogram::from_arrays(hist.start_time(), bins, counts).unwrap(); - assert_eq!( - hist, rebuilt, - "Histogram reconstructed from paired arrays is not correct" - ); - } #[test] fn test_parse_bytes_measurement() { let s = r#"{"timeseries_key": 101, "timestamp": "2023-11-21 18:25:21.963714255", "datum": "\u0001\u0002\u0003"}"#; diff --git a/oximeter/db/src/oxql/ast/grammar.rs b/oximeter/db/src/oxql/ast/grammar.rs index a644dff41d..a7585402b6 100644 --- a/oximeter/db/src/oxql/ast/grammar.rs +++ b/oximeter/db/src/oxql/ast/grammar.rs @@ -257,7 +257,7 @@ peg::parser! { /// /// We support the following common escape sequences: /// - /// ```ignore + /// ```text /// \n /// \r /// \t @@ -271,7 +271,7 @@ peg::parser! { /// styles if required, by writing them as their Unicode escape /// sequences. For example, this string: /// - /// ```ignore + /// ```text /// "this string has \u{22} in it" /// ``` /// diff --git a/oximeter/db/src/oxql/ast/table_ops/filter.rs b/oximeter/db/src/oxql/ast/table_ops/filter.rs index 4e838f3388..9e796bc730 100644 --- a/oximeter/db/src/oxql/ast/table_ops/filter.rs +++ b/oximeter/db/src/oxql/ast/table_ops/filter.rs @@ -518,8 +518,9 @@ fn implicit_field_names( MetricType::Gauge, DataType::IntegerDistribution | DataType::DoubleDistribution, ) => { - out.insert(special_idents::BINS); - out.insert(special_idents::COUNTS); + special_idents::DISTRIBUTION_IDENTS.iter().for_each(|ident| { + out.insert(ident); + }); } // Scalars, either delta or cumulatives. ( @@ -534,8 +535,9 @@ fn implicit_field_names( MetricType::Delta | MetricType::Cumulative, DataType::IntegerDistribution | DataType::DoubleDistribution, ) => { - out.insert(special_idents::BINS); - out.insert(special_idents::COUNTS); + special_idents::DISTRIBUTION_IDENTS.iter().for_each(|ident| { + out.insert(ident); + }); out.insert(special_idents::START_TIME); } // Impossible combinations diff --git a/oximeter/db/src/oxql/point.rs b/oximeter/db/src/oxql/point.rs index e12214aaf0..7805ec64be 100644 --- a/oximeter/db/src/oxql/point.rs +++ b/oximeter/db/src/oxql/point.rs @@ -11,12 +11,15 @@ use anyhow::Context; use chrono::DateTime; use chrono::Utc; use num::ToPrimitive; +use oximeter::traits::HistogramSupport; use oximeter::DatumType; use oximeter::Measurement; +use oximeter::Quantile; use schemars::JsonSchema; use serde::Deserialize; use serde::Serialize; use std::fmt; +use std::ops::Sub; /// The type of each individual data point's value in a timeseries. #[derive( @@ -1428,7 +1431,7 @@ impl ValueArray { CumulativeDatum::DoubleDistribution(last), oximeter::Datum::HistogramF32(new), ) => { - let new = Distribution::from(new); + let new = Distribution::::from(new); self.as_double_distribution_mut()? .push(Some(new.checked_sub(&last)?)); } @@ -1436,7 +1439,7 @@ impl ValueArray { CumulativeDatum::DoubleDistribution(last), oximeter::Datum::HistogramF64(new), ) => { - let new = Distribution::from(new); + let new = Distribution::::from(new); self.as_double_distribution_mut()? .push(Some(new.checked_sub(&last)?)); } @@ -1517,15 +1520,30 @@ pub trait DistributionSupport: impl DistributionSupport for i64 {} impl DistributionSupport for f64 {} -/// A distribution is a sequence of bins and counts in those bins. +/// A distribution is a sequence of bins and counts in those bins, and some +/// statistical information tracked to compute the mean, standard deviation, and +/// quantile estimates. +/// +/// Min, max, and the p-* quantiles are treated as optional due to the +/// possibility of distribution operations, like subtraction. #[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq, Serialize)] #[schemars(rename = "Distribution{T}")] pub struct Distribution { bins: Vec, counts: Vec, + min: Option, + max: Option, + sum_of_samples: T, + squared_mean: f64, + p50: Option, + p90: Option, + p99: Option, } -impl fmt::Display for Distribution { +impl fmt::Display for Distribution +where + T: DistributionSupport + HistogramSupport + Sub, +{ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let elems = self .bins @@ -1534,12 +1552,52 @@ impl fmt::Display for Distribution { .map(|(bin, count)| format!("{bin}: {count}")) .collect::>() .join(", "); - write!(f, "{}", elems) + + let unwrap_estimate = |opt: Option| { + opt.map_or("None".to_string(), |v| match v.estimate() { + Ok(v) => v.to_string(), + Err(err) => err.to_string(), + }) + }; + + let p50_estimate = unwrap_estimate(self.p50); + let p90_estimate = unwrap_estimate(self.p90); + let p99_estimate = unwrap_estimate(self.p99); + + write!( + f, + "{}, min: {}, max: {}, mean: {}, std_dev: {}, p50: {}, p90: {}, p99: {}", + elems, + self.min.map_or("none".to_string(), |m| m.to_string()), + self.max.unwrap_or_default(), + self.mean(), + self.std_dev().unwrap_or_default(), + p50_estimate, + p90_estimate, + p99_estimate + ) } } -impl Distribution { - // Subtract two distributions, checking that they have the same bins. +impl Distribution +where + T: DistributionSupport + HistogramSupport + Sub, +{ + /// Subtract two distributions, checking that they have the same bins. + /// + /// Min and max values are returned as None, as they lose meaning + /// when subtracting distributions. The same is true for p50, p90, and p99 + /// quantiles. + /// + /// TODO: It's not really clear how to compute the "difference" of two + /// histograms for items like min, max, p*'s. It's certainly not linear, and + /// although we might be able to make some estimates in the case of min and + /// max, we'll defer it for now. Instead, we'll store None for all these + /// values when computing the diff. They will be very useful later, when we + /// start generating distributions in OxQL itself, from a sequence of + /// scalars (similar to a DTrace aggregation). We'll wait to put that in + /// place until we have more data that we want to start aggregating that + /// way. fn checked_sub( &self, rhs: &Distribution, @@ -1548,14 +1606,34 @@ impl Distribution { self.bins == rhs.bins, "Cannot subtract distributions with different bins", ); - let counts = self + let counts: Vec<_> = self .counts .iter() - .zip(rhs.counts.iter().copied()) - .map(|(x, y)| x.checked_sub(y)) + .zip(rhs.counts.iter()) + .map(|(x, y)| x.checked_sub(*y)) .collect::>() .context("Underflow subtracting distributions values")?; - Ok(Self { bins: self.bins.clone(), counts }) + + // Subtract sum_of_samples. + // This can be negative as T is either i64 or f64. + let sum_of_samples = self.sum_of_samples - rhs.sum_of_samples; + + // Squared means are not linear, so we subtract the means and then + // square that number. + let sub_means = self.mean() - rhs.mean(); + let squared_mean = sub_means.powi(2); + + Ok(Self { + bins: self.bins.clone(), + counts, + min: None, + max: None, + sum_of_samples, + squared_mean, + p50: None, + p90: None, + p99: None, + }) } /// Return the slice of bins. @@ -1568,6 +1646,85 @@ impl Distribution { &self.counts } + /// Return the number of samples in the distribution. + pub fn n_samples(&self) -> u64 { + self.counts.iter().sum() + } + + /// Return the minimum value in the distribution. + pub fn min(&self) -> Option { + self.min + } + + /// Return the maximum value in the distribution. + pub fn max(&self) -> Option { + self.max + } + + /// Return the mean of the distribution. + pub fn mean(&self) -> f64 { + if self.n_samples() > 0 { + // We can unwrap here because we know n_samples() > 0, + // so the sum_of_samples should convert to f64 without issue. + self.sum_of_samples + .to_f64() + .map(|sum| sum / (self.n_samples() as f64)) + .unwrap() + } else { + 0. + } + } + + /// Return the variance for inputs to the histogram based on the Welford's + /// algorithm, using the squared mean (M2). + /// + /// Returns `None` if there are fewer than two samples. + pub fn variance(&self) -> Option { + (self.n_samples() > 1) + .then(|| self.squared_mean / (self.n_samples() as f64)) + } + + /// Return the sample variance for inputs to the histogram based on the + /// Welford's algorithm, using the squared mean (M2). + /// + /// Returns `None` if there are fewer than two samples. + pub fn sample_variance(&self) -> Option { + (self.n_samples() > 1) + .then(|| self.squared_mean / ((self.n_samples() - 1) as f64)) + } + + /// Return the standard deviation for inputs to the histogram. + /// + /// This is a biased (as a consequence of Jensen’s inequality), estimate of + /// the population deviation that returns the standard deviation of the + /// samples seen by the histogram. + /// + /// Returns `None` if the variance is `None`, i.e., if there are fewer than + /// two samples. + pub fn std_dev(&self) -> Option { + match self.variance() { + Some(variance) => Some(variance.sqrt()), + None => None, + } + } + + /// Return the "corrected" sample standard deviation for inputs to the + /// histogram. + /// + /// This is an unbiased estimate of the population deviation, applying + /// Bessel's correction, which corrects the bias in the estimation of the + /// population variance, and some, but not all of the bias in the estimation + /// of the population standard deviation. + /// + /// Returns `None` if the variance is `None`, i.e., if there are fewer than + /// two samples. + pub fn sample_std_dev(&self) -> Option { + match self.sample_variance() { + Some(variance) => Some(variance.sqrt()), + None => None, + } + } + /// Return an iterator over each bin and count. pub fn iter(&self) -> impl ExactSizeIterator + '_ { self.bins.iter().zip(self.counts.iter()) @@ -1578,8 +1735,18 @@ macro_rules! i64_dist_from { ($t:ty) => { impl From<&oximeter::histogram::Histogram<$t>> for Distribution { fn from(hist: &oximeter::histogram::Histogram<$t>) -> Self { - let (bins, counts) = hist.to_arrays(); - Self { bins: bins.into_iter().map(i64::from).collect(), counts } + let (bins, counts) = hist.bins_and_counts(); + Self { + bins: bins.into_iter().map(i64::from).collect(), + counts, + min: Some(hist.min() as i64), + max: Some(hist.max() as i64), + sum_of_samples: hist.sum_of_samples(), + squared_mean: hist.squared_mean(), + p50: Some(hist.p50q()), + p90: Some(hist.p90q()), + p99: Some(hist.p99q()), + } } } @@ -1604,13 +1771,23 @@ impl TryFrom<&oximeter::histogram::Histogram> for Distribution { fn try_from( hist: &oximeter::histogram::Histogram, ) -> Result { - let (bins, counts) = hist.to_arrays(); + let (bins, counts) = hist.bins_and_counts(); let bins = bins .into_iter() .map(i64::try_from) .collect::>() .context("Overflow converting u64 to i64")?; - Ok(Self { bins, counts }) + Ok(Self { + bins, + counts, + min: Some(hist.min() as i64), + max: Some(hist.max() as i64), + sum_of_samples: hist.sum_of_samples(), + squared_mean: hist.squared_mean(), + p50: Some(hist.p50q()), + p90: Some(hist.p90q()), + p99: Some(hist.p99q()), + }) } } @@ -1627,8 +1804,18 @@ macro_rules! f64_dist_from { ($t:ty) => { impl From<&oximeter::histogram::Histogram<$t>> for Distribution { fn from(hist: &oximeter::histogram::Histogram<$t>) -> Self { - let (bins, counts) = hist.to_arrays(); - Self { bins: bins.into_iter().map(f64::from).collect(), counts } + let (bins, counts) = hist.bins_and_counts(); + Self { + bins: bins.into_iter().map(f64::from).collect(), + counts, + min: Some(hist.min() as f64), + max: Some(hist.max() as f64), + sum_of_samples: hist.sum_of_samples() as f64, + squared_mean: hist.squared_mean(), + p50: Some(hist.p50q()), + p90: Some(hist.p90q()), + p99: Some(hist.p99q()), + } } } @@ -1645,12 +1832,12 @@ f64_dist_from!(f64); #[cfg(test)] mod tests { - use crate::oxql::point::{DataType, ValueArray}; - use super::{Distribution, MetricType, Points, Values}; + use crate::oxql::point::{DataType, ValueArray}; use chrono::{DateTime, Utc}; - use oximeter::types::Cumulative; - use oximeter::Measurement; + use oximeter::{ + histogram::Record, types::Cumulative, Measurement, Quantile, + }; use std::time::Duration; #[test] @@ -1747,6 +1934,38 @@ mod tests { ); } + #[test] + fn test_sub_between_histogram_distributions() { + let now = Utc::now(); + let current1 = now + Duration::from_secs(1); + let mut hist1 = + oximeter::histogram::Histogram::new(&[0i64, 10, 20]).unwrap(); + hist1.sample(1).unwrap(); + hist1.set_start_time(current1); + let current2 = now + Duration::from_secs(2); + let mut hist2 = + oximeter::histogram::Histogram::new(&[0i64, 10, 20]).unwrap(); + hist2.sample(5).unwrap(); + hist2.sample(10).unwrap(); + hist2.sample(15).unwrap(); + hist2.set_start_time(current2); + let dist1 = Distribution::from(&hist1); + let dist2 = Distribution::from(&hist2); + + let diff = dist2.checked_sub(&dist1).unwrap(); + assert_eq!(diff.bins(), &[i64::MIN, 0, 10, 20]); + assert_eq!(diff.counts(), &[0, 0, 2, 0]); + assert_eq!(diff.n_samples(), 2); + assert!(diff.min().is_none()); + assert!(diff.max().is_none()); + assert_eq!(diff.mean(), 14.5); + assert_eq!(diff.std_dev(), Some(6.363961030678928)); + assert_eq!(diff.sample_std_dev(), Some(9.0)); + assert!(diff.p50.is_none()); + assert!(diff.p90.is_none()); + assert!(diff.p99.is_none()); + } + fn timestamps(n: usize) -> Vec> { let now = Utc::now(); let mut out = Vec::with_capacity(n); @@ -1972,7 +2191,17 @@ mod tests { timestamps: timestamps(1), values: vec![Values { values: ValueArray::IntegerDistribution(vec![Some( - Distribution { bins: vec![0, 1, 2], counts: vec![0; 3] }, + Distribution { + bins: vec![0, 1, 2], + counts: vec![0; 3], + min: Some(0), + max: Some(2), + sum_of_samples: 0, + squared_mean: 0.0, + p50: Some(Quantile::p50()), + p90: Some(Quantile::p90()), + p99: Some(Quantile::p99()), + }, )]), metric_type: MetricType::Gauge, }], @@ -2012,6 +2241,13 @@ mod tests { Distribution { bins: vec![0.0, 1.0, 2.0], counts: vec![0; 3], + min: Some(0.0), + max: Some(2.0), + sum_of_samples: 0.0, + squared_mean: 0.0, + p50: Some(Quantile::p50()), + p90: Some(Quantile::p90()), + p99: Some(Quantile::p99()), }, )]), metric_type: MetricType::Gauge, diff --git a/oximeter/db/src/oxql/query/mod.rs b/oximeter/db/src/oxql/query/mod.rs index 1c4383d68d..40a6c82f93 100644 --- a/oximeter/db/src/oxql/query/mod.rs +++ b/oximeter/db/src/oxql/query/mod.rs @@ -29,13 +29,45 @@ use std::time::Duration; pub mod special_idents { use oximeter::DatumType; + macro_rules! gen_marker { + ($p:expr, $field:expr) => { + concat!("p", $p, "_", $field) + }; + } + pub const TIMESTAMP: &str = "timestamp"; pub const START_TIME: &str = "start_time"; pub const DATUM: &str = "datum"; pub const BINS: &str = "bins"; pub const COUNTS: &str = "counts"; + pub const MIN: &str = "min"; + pub const MAX: &str = "max"; + pub const SUM_OF_SAMPLES: &str = "sum_of_samples"; + pub const SQUARED_MEAN: &str = "squared_mean"; pub const DATETIME64: &str = "DateTime64"; pub const ARRAYU64: &str = "Array[u64]"; + pub const ARRAYFLOAT64: &str = "Array[f64]"; + pub const ARRAYINT64: &str = "Array[i64]"; + pub const FLOAT64: &str = "f64"; + pub const UINT64: &str = "u64"; + + pub const DISTRIBUTION_IDENTS: [&str; 15] = [ + "bins", + "counts", + "min", + "max", + "sum_of_samples", + "squared_mean", + gen_marker!("50", "marker_heights"), + gen_marker!("50", "marker_positions"), + gen_marker!("50", "desired_marker_positions"), + gen_marker!("90", "marker_heights"), + gen_marker!("90", "marker_positions"), + gen_marker!("90", "desired_marker_positions"), + gen_marker!("99", "marker_heights"), + gen_marker!("99", "marker_positions"), + gen_marker!("99", "desired_marker_positions"), + ]; pub fn array_type_name_from_histogram_type( type_: DatumType, diff --git a/oximeter/db/src/query.rs b/oximeter/db/src/query.rs index e14dfbbc55..7b622920ff 100644 --- a/oximeter/db/src/query.rs +++ b/oximeter/db/src/query.rs @@ -249,7 +249,7 @@ impl SelectQueryBuilder { T: Target, M: Metric, { - let schema = TimeseriesSchema::new(target, metric); + let schema = TimeseriesSchema::new(target, metric)?; let mut builder = Self::new(&schema); let target_fields = target.field_names().iter().zip(target.field_values()); @@ -777,16 +777,22 @@ mod tests { fn test_select_query_builder_filter_raw() { let schema = TimeseriesSchema { timeseries_name: TimeseriesName::try_from("foo:bar").unwrap(), + description: Default::default(), + version: oximeter::schema::default_schema_version(), + authz_scope: oximeter::schema::AuthzScope::Fleet, + units: oximeter::schema::Units::Count, field_schema: [ FieldSchema { name: "f0".to_string(), field_type: FieldType::I64, source: FieldSource::Target, + description: String::new(), }, FieldSchema { name: "f1".to_string(), field_type: FieldType::Bool, source: FieldSource::Target, + description: String::new(), }, ] .into_iter() @@ -910,6 +916,10 @@ mod tests { fn test_select_query_builder_no_fields() { let schema = TimeseriesSchema { timeseries_name: TimeseriesName::try_from("foo:bar").unwrap(), + description: Default::default(), + version: oximeter::schema::default_schema_version(), + authz_scope: oximeter::schema::AuthzScope::Fleet, + units: oximeter::schema::Units::Count, field_schema: BTreeSet::new(), datum_type: DatumType::I64, created: Utc::now(), @@ -932,6 +942,10 @@ mod tests { fn test_select_query_builder_limit_offset() { let schema = TimeseriesSchema { timeseries_name: TimeseriesName::try_from("foo:bar").unwrap(), + description: Default::default(), + version: oximeter::schema::default_schema_version(), + authz_scope: oximeter::schema::AuthzScope::Fleet, + units: oximeter::schema::Units::Count, field_schema: BTreeSet::new(), datum_type: DatumType::I64, created: Utc::now(), @@ -1002,16 +1016,22 @@ mod tests { fn test_select_query_builder_no_selectors() { let schema = TimeseriesSchema { timeseries_name: TimeseriesName::try_from("foo:bar").unwrap(), + description: Default::default(), + version: oximeter::schema::default_schema_version(), + authz_scope: oximeter::schema::AuthzScope::Fleet, + units: oximeter::schema::Units::Count, field_schema: [ FieldSchema { name: "f0".to_string(), field_type: FieldType::I64, source: FieldSource::Target, + description: String::new(), }, FieldSchema { name: "f1".to_string(), field_type: FieldType::Bool, source: FieldSource::Target, + description: String::new(), }, ] .into_iter() @@ -1065,16 +1085,22 @@ mod tests { fn test_select_query_builder_field_selectors() { let schema = TimeseriesSchema { timeseries_name: TimeseriesName::try_from("foo:bar").unwrap(), + description: Default::default(), + version: oximeter::schema::default_schema_version(), + authz_scope: oximeter::schema::AuthzScope::Fleet, + units: oximeter::schema::Units::Count, field_schema: [ FieldSchema { name: "f0".to_string(), field_type: FieldType::I64, source: FieldSource::Target, + description: String::new(), }, FieldSchema { name: "f1".to_string(), field_type: FieldType::Bool, source: FieldSource::Target, + description: String::new(), }, ] .into_iter() @@ -1116,16 +1142,22 @@ mod tests { fn test_select_query_builder_full() { let schema = TimeseriesSchema { timeseries_name: TimeseriesName::try_from("foo:bar").unwrap(), + description: Default::default(), + version: oximeter::schema::default_schema_version(), + authz_scope: oximeter::schema::AuthzScope::Fleet, + units: oximeter::schema::Units::Count, field_schema: [ FieldSchema { name: "f0".to_string(), field_type: FieldType::I64, source: FieldSource::Target, + description: String::new(), }, FieldSchema { name: "f1".to_string(), field_type: FieldType::Bool, source: FieldSource::Target, + description: String::new(), }, ] .into_iter() diff --git a/oximeter/db/src/sql/mod.rs b/oximeter/db/src/sql/mod.rs index f3082dcaa5..e434608b1c 100644 --- a/oximeter/db/src/sql/mod.rs +++ b/oximeter/db/src/sql/mod.rs @@ -610,12 +610,31 @@ impl RestrictedQuery { // Return the required measurement columns for a specific datum type. // // Scalar measurements have only a timestamp and datum. Cumulative counters - // have those plus a start_time. And histograms have those plus the bins. + // have those plus a start_time. And histograms have those plus the bins, + // counts, min, max, sum of samples, sum of squares, and quantile arrays. fn datum_type_to_columns( datum_type: &DatumType, ) -> &'static [&'static str] { if datum_type.is_histogram() { - &["start_time", "timestamp", "bins", "counts"] + &[ + "start_time", + "timestamp", + "bins", + "counts", + "min", + "max", + "sum_of_samples", + "squared_mean", + "p50_marker_heights", + "p50_marker_positions", + "p50_desired_marker_positions", + "p90_marker_heights", + "p90_marker_positions", + "p90_desired_marker_positions", + "p99_marker_heights", + "p99_marker_positions", + "p99_desired_marker_positions", + ] } else if datum_type.is_cumulative() { &["start_time", "timestamp", "datum"] } else { diff --git a/oximeter/impl/Cargo.toml b/oximeter/impl/Cargo.toml new file mode 100644 index 0000000000..a8b42d41cd --- /dev/null +++ b/oximeter/impl/Cargo.toml @@ -0,0 +1,39 @@ +[package] +name = "oximeter-impl" +version = "0.1.0" +edition = "2021" +license = "MPL-2.0" + +[lints] +workspace = true + +[dependencies] +bytes = { workspace = true, features = [ "serde" ] } +chrono.workspace = true +float-ord.workspace = true +heck.workspace = true +num.workspace = true +omicron-common.workspace = true +omicron-workspace-hack.workspace = true +oximeter-macro-impl.workspace = true +prettyplease.workspace = true +proc-macro2.workspace = true +quote.workspace = true +regex.workspace = true +schemars = { workspace = true, features = [ "uuid1", "bytes", "chrono" ] } +serde.workspace = true +serde_json.workspace = true +slog-error-chain.workspace = true +strum.workspace = true +syn.workspace = true +toml.workspace = true +thiserror.workspace = true +uuid.workspace = true + +[dev-dependencies] +approx.workspace = true +rand = { workspace = true, features = ["std_rng"] } +rand_distr.workspace = true +rstest.workspace = true +serde_json.workspace = true +trybuild.workspace = true diff --git a/oximeter/oximeter/src/histogram.rs b/oximeter/impl/src/histogram.rs similarity index 75% rename from oximeter/oximeter/src/histogram.rs rename to oximeter/impl/src/histogram.rs index 82b9916153..0fb175555e 100644 --- a/oximeter/oximeter/src/histogram.rs +++ b/oximeter/impl/src/histogram.rs @@ -4,23 +4,28 @@ //! Types for managing metrics that are histograms. -// Copyright 2023 Oxide Computer Company +// Copyright 2024 Oxide Computer Company +use super::Quantile; +use super::QuantileError; use chrono::DateTime; use chrono::Utc; use num::traits::Bounded; use num::traits::FromPrimitive; use num::traits::Num; use num::traits::ToPrimitive; +use num::traits::Zero; +use num::CheckedAdd; +use num::CheckedMul; use num::Float; use num::Integer; use num::NumCast; use schemars::JsonSchema; -use serde::de::DeserializeOwned; use serde::Deserialize; use serde::Serialize; use std::cmp::Ordering; use std::num::NonZeroUsize; +use std::ops::AddAssign; use std::ops::Bound; use std::ops::Range; use std::ops::RangeBounds; @@ -37,24 +42,34 @@ pub trait HistogramSupport: + Bounded + JsonSchema + Serialize - + DeserializeOwned + Clone + Num + + Zero + FromPrimitive + ToPrimitive + + AddAssign + NumCast + 'static { type Power; + type Width: HistogramAdditiveWidth; /// Return true if `self` is a finite number, not NAN or infinite. fn is_finite(&self) -> bool; } +/// Used for designating the subset of types that can be used as the width for +/// summing up values in a histogram. +pub trait HistogramAdditiveWidth: HistogramSupport {} + +impl HistogramAdditiveWidth for i64 {} +impl HistogramAdditiveWidth for f64 {} + macro_rules! impl_int_histogram_support { ($($type:ty),+) => { $( impl HistogramSupport for $type { type Power = u16; + type Width = i64; fn is_finite(&self) -> bool { true } @@ -70,6 +85,7 @@ macro_rules! impl_float_histogram_support { $( impl HistogramSupport for $type { type Power = i16; + type Width = f64; fn is_finite(&self) -> bool { <$type>::is_finite(*self) } @@ -93,8 +109,10 @@ pub enum HistogramError { NonmonotonicBins, /// A non-finite was encountered, either as a bin edge or a sample. - #[error("Bin edges and samples must be finite values, found: {0:?}")] - NonFiniteValue(String), + #[error( + "Bin edges and samples must be finite values, not Infinity or NaN" + )] + NonFiniteValue, /// Error returned when two neighboring bins are not adjoining (there's space between them) #[error("Neigboring bins {left} and {right} are not adjoining")] @@ -104,8 +122,13 @@ pub enum HistogramError { #[error("Bin and count arrays must have the same size, found {n_bins} and {n_counts}")] ArraySizeMismatch { n_bins: usize, n_counts: usize }, + /// Error returned when a quantization error occurs. #[error("Quantization error")] Quantization(#[from] QuantizationError), + + /// Error returned when a quantile error occurs. + #[error("Quantile error")] + Quantile(#[from] QuantileError), } /// Errors occurring during quantizated bin generation. @@ -272,6 +295,10 @@ pub struct Bin { pub count: u64, } +/// Internal, creation-specific newtype wrapper around `Vec>` to +/// implement conversion(s). +struct Bins(Vec>); + /// Histogram metric /// /// A histogram maintains the count of any number of samples, over a set of bins. Bins are @@ -333,12 +360,139 @@ pub struct Bin { // `Histogram::with_log_linear_bins()` are exactly the ones expected. #[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema)] #[schemars(rename = "Histogram{T}")] -pub struct Histogram { +pub struct Histogram +where + T: HistogramSupport, +{ + /// The start time of the histogram. start_time: DateTime, + /// The bins of the histogram. bins: Vec>, + /// The total number of samples in the histogram. n_samples: u64, + /// The minimum value of all samples in the histogram. + min: T, + /// The maximum value of all samples in the histogram. + max: T, + /// The sum of all samples in the histogram. + sum_of_samples: T::Width, + /// M2 for Welford's algorithm for variance calculation. + /// + /// Read about [Welford's algorithm](https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm) + /// for more information on the algorithm. + squared_mean: f64, + /// p50 Quantile + p50: Quantile, + /// p95 Quantile + p90: Quantile, + /// p99 Quantile + p99: Quantile, } +/// A trait for recording samples into a histogram. +pub trait Record { + /// Add a new sample into the histogram. + /// + /// This bumps the internal counter at the bin containing `value`. An `Err` is returned if the + /// sample is not within the distribution's support (non-finite). + fn sample(&mut self, value: T) -> Result<(), HistogramError>; +} + +macro_rules! impl_int_sample { + ($($type:ty),+) => { + $( + impl Record<$type> for Histogram<$type> where $type: HistogramSupport + Integer + CheckedAdd + CheckedMul { + fn sample(&mut self, value: $type) -> Result<(), HistogramError> { + ensure_finite(value)?; + + if self.n_samples == 0 { + self.min = <$type>::max_value(); + self.max = <$type>::min_value(); + } + + // For squared mean (M2) calculation, before we update the + // count. + let value_f = value as f64; + let current_mean = self.mean(); + + let index = self + .bins + .binary_search_by(|bin| bin.range.cmp(&value).reverse()) + .unwrap(); // The `ensure_finite` call above catches values that don't end up in a bin + self.bins[index].count += 1; + self.n_samples += 1; + self.min = self.min.min(value); + self.max = self.max.max(value); + self.sum_of_samples = self.sum_of_samples.saturating_add(value as i64); + + let delta = value_f - current_mean; + let updated_mean = current_mean + delta / (self.n_samples as f64); + let delta2 = value_f - updated_mean; + self.squared_mean += (delta * delta2); + + self.p50.append(value)?; + self.p90.append(value)?; + self.p99.append(value)?; + Ok(()) + } + } + )+ + } +} + +impl_int_sample! { i8, u8, i16, u16, i32, u32, i64, u64 } + +macro_rules! impl_float_sample { + ($($type:ty),+) => { + $( + impl Record<$type> for Histogram<$type> where $type: HistogramSupport + Float { + fn sample(&mut self, value: $type) -> Result<(), HistogramError> { + ensure_finite(value)?; + + if self.n_samples == 0 { + self.min = <$type as num::Bounded>::max_value(); + self.max = <$type as num::Bounded>::min_value(); + } + + // For squared mean (M2) calculation, before we update the + // count. + let value_f = value as f64; + let current_mean = self.mean(); + + let index = self + .bins + .binary_search_by(|bin| bin.range.cmp(&value).reverse()) + .unwrap(); // The `ensure_finite` call above catches values that don't end up in a bin + self.bins[index].count += 1; + self.n_samples += 1; + + if value < self.min { + self.min = value; + } + if value > self.max { + self.max = value; + } + + self.sum_of_samples += value_f; + + let delta = value_f - current_mean; + let updated_mean = current_mean + delta / (self.n_samples as f64); + let delta2 = value_f - updated_mean; + self.squared_mean += (delta * delta2); + + self.p50.append(value)?; + self.p90.append(value)?; + self.p99.append(value)?; + + Ok(()) + } + } + )+ + } +} + +impl_float_sample! { f32, f64 } + impl Histogram where T: HistogramSupport, @@ -355,6 +509,9 @@ where /// Example /// ------- /// ```rust + /// # // Rename the impl crate so the doctests can refer to the public + /// # // `oximeter` crate, not the private impl. + /// # use oximeter_impl as oximeter; /// use oximeter::histogram::Histogram; /// /// let hist = Histogram::with_bins(&[(0..10).into(), (10..100).into()]).unwrap(); @@ -435,67 +592,82 @@ where if let Bound::Excluded(end) = bins_.last().unwrap().range.end_bound() { ensure_finite(*end)?; } - Ok(Self { start_time: Utc::now(), bins: bins_, n_samples: 0 }) + Ok(Self { + start_time: Utc::now(), + bins: bins_, + n_samples: 0, + min: T::zero(), + max: T::zero(), + sum_of_samples: T::Width::zero(), + squared_mean: 0.0, + p50: Quantile::p50(), + p90: Quantile::p90(), + p99: Quantile::p99(), + }) } /// Construct a new histogram from left bin edges. /// - /// The left edges of the bins must be specified as a non-empty, monotonically increasing - /// slice. An `Err` is returned if either constraint is violated. + /// The left edges of the bins must be specified as a non-empty, + /// monotonically increasing slice. An `Err` is returned if either + /// constraint is violated. pub fn new(left_edges: &[T]) -> Result { - let mut items = left_edges.iter(); - let mut bins = Vec::with_capacity(left_edges.len() + 1); - let mut current = *items.next().ok_or(HistogramError::EmptyBins)?; - ensure_finite(current)?; - let min = ::min_value(); - if current > min { - // Bin greater than the minimum was specified, insert a new one from `MIN..current`. - bins.push(Bin { range: BinRange::range(min, current), count: 0 }); - } else if current == min { - // An edge *at* the minimum was specified. Consume it, and insert a bin from - // `MIN..next`, if one exists. If one does not, or if this is the last item, the - // following loop will not be entered. - let next = - items.next().cloned().unwrap_or_else(::max_value); - bins.push(Bin { range: BinRange::range(min, next), count: 0 }); - current = next; - } - for &next in items { - if current < next { - ensure_finite(next)?; - bins.push(Bin { - range: BinRange::range(current, next), - count: 0, - }); - current = next; - } else if current >= next { - return Err(HistogramError::NonmonotonicBins); - } else { - return Err(HistogramError::NonFiniteValue(format!( - "{:?}", - current - ))); - } + let bins = Bins::try_from(left_edges)?; + Ok(Self { + start_time: Utc::now(), + bins: bins.0, + n_samples: 0, + min: T::zero(), + max: T::zero(), + sum_of_samples: T::Width::zero(), + squared_mean: 0.0, + p50: Quantile::p50(), + p90: Quantile::p90(), + p99: Quantile::p99(), + }) + } + + /// Construct a new histogram with the given struct information, including + /// bins, counts, and quantiles. + #[allow(clippy::too_many_arguments)] + pub fn from_parts( + start_time: DateTime, + bins: Vec, + counts: Vec, + min: T, + max: T, + sum_of_samples: T::Width, + squared_mean: f64, + p50: Quantile, + p90: Quantile, + p99: Quantile, + ) -> Result { + if bins.len() != counts.len() { + return Err(HistogramError::ArraySizeMismatch { + n_bins: bins.len(), + n_counts: counts.len(), + }); } - if current < ::max_value() { - bins.push(Bin { range: BinRange::from(current), count: 0 }); + + let mut bins = Bins::try_from(bins.as_slice())?.0; + let mut n_samples = 0; + for (bin, count) in bins.iter_mut().zip(counts.into_iter()) { + bin.count = count; + n_samples += count; } - Ok(Self { start_time: Utc::now(), bins, n_samples: 0 }) - } - /// Add a new sample into the histogram. - /// - /// This bumps the internal counter at the bin containing `value`. An `Err` is returned if the - /// sample is not within the distribution's support (non-finite). - pub fn sample(&mut self, value: T) -> Result<(), HistogramError> { - ensure_finite(value)?; - let index = self - .bins - .binary_search_by(|bin| bin.range.cmp(&value).reverse()) - .unwrap(); // The `ensure_finite` call above catches values that don't end up in a bin - self.bins[index].count += 1; - self.n_samples += 1; - Ok(()) + Ok(Self { + start_time, + bins, + n_samples, + min, + max, + sum_of_samples, + squared_mean, + p50, + p90, + p99, + }) } /// Return the total number of samples contained in the histogram. @@ -508,32 +680,18 @@ where self.bins.len() } - /// Iterate over the bins of the histogram. - pub fn iter(&self) -> impl Iterator> { - self.bins.iter() - } - - /// Get the bin at the given index. - pub fn get(&self, index: usize) -> Option<&Bin> { - self.bins.get(index) - } - - /// Generate paired arrays with the left bin edges and the counts, for each bin. - /// - /// The returned edges are always left-inclusive, by construction of the histogram. - pub fn to_arrays(&self) -> (Vec, Vec) { + /// Return the bins of the histogram. + pub fn bins_and_counts(&self) -> (Vec, Vec) { let mut bins = Vec::with_capacity(self.n_bins()); let mut counts = Vec::with_capacity(self.n_bins()); - - // The first bin may either be BinRange::To or BinRange::Range. for bin in self.bins.iter() { match bin.range { BinRange::Range { start, .. } => { bins.push(start); - }, - BinRange::RangeFrom{start} => { + } + BinRange::RangeFrom { start} => { bins.push(start); - }, + } _ => unreachable!("No bins in a constructed histogram should be of type RangeTo"), } counts.push(bin.count); @@ -541,33 +699,183 @@ where (bins, counts) } - /// Construct a histogram from a start time and paired arrays with the left bin-edge and counts. - pub fn from_arrays( - start_time: DateTime, - bins: Vec, - counts: Vec, - ) -> Result { - if bins.len() != counts.len() { - return Err(HistogramError::ArraySizeMismatch { - n_bins: bins.len(), - n_counts: counts.len(), - }); + /// Return the minimum value of inputs to the histogram. + pub fn min(&self) -> T { + self.min + } + + /// Return the maximum value of all inputs to the histogram. + pub fn max(&self) -> T { + self.max + } + + /// Return the sum of all inputs to the histogram. + pub fn sum_of_samples(&self) -> T::Width { + self.sum_of_samples + } + + /// Return the squared mean (M2) of all inputs to the histogram. + pub fn squared_mean(&self) -> f64 { + self.squared_mean + } + + /// Return the mean of all inputs/samples in the histogram. + pub fn mean(&self) -> f64 { + if self.n_samples() > 0 { + self.sum_of_samples + .to_f64() + .map(|sum| sum / (self.n_samples() as f64)) + .unwrap() + } else { + 0. } - let mut hist = Self::new(&bins)?; - hist.start_time = start_time; - let mut n_samples = 0; - for (bin, count) in hist.bins.iter_mut().zip(counts.into_iter()) { - bin.count = count; - n_samples += count; + } + + /// Return the variance for inputs to the histogram based on the Welford's + /// algorithm, using the squared mean (M2). + /// + /// Returns `None` if there are fewer than two samples. + pub fn variance(&self) -> Option { + (self.n_samples() > 1) + .then(|| self.squared_mean / (self.n_samples() as f64)) + } + + /// Return the sample variance for inputs to the histogram based on the + /// Welford's algorithm, using the squared mean (M2). + /// + /// Returns `None` if there are fewer than two samples. + pub fn sample_variance(&self) -> Option { + (self.n_samples() > 1) + .then(|| self.squared_mean / ((self.n_samples() - 1) as f64)) + } + + /// Return the standard deviation for inputs to the histogram. + /// + /// This is a biased (as a consequence of Jensen’s inequality), estimate of + /// the population deviation that returns the standard deviation of the + /// samples seen by the histogram. + /// + /// Returns `None` if the variance is `None`, i.e., if there are fewer than + /// two samples. + pub fn std_dev(&self) -> Option { + match self.variance() { + Some(variance) => Some(variance.sqrt()), + None => None, } - hist.n_samples = n_samples; - Ok(hist) } - /// Return the start time for this histogram + /// Return the "corrected" sample standard deviation for inputs to the + /// histogram. + /// + /// This is an unbiased estimate of the population deviation, applying + /// Bessel's correction, which corrects the bias in the estimation of the + /// population variance, and some, but not all of the bias in the estimation + /// of the population standard deviation. + /// + /// Returns `None` if the variance is `None`, i.e., if there are fewer than + /// two samples. + pub fn sample_std_dev(&self) -> Option { + match self.sample_variance() { + Some(variance) => Some(variance.sqrt()), + None => None, + } + } + + /// Iterate over the bins of the histogram. + pub fn iter(&self) -> impl Iterator> { + self.bins.iter() + } + + /// Get the bin at the given index. + pub fn get(&self, index: usize) -> Option<&Bin> { + self.bins.get(index) + } + + /// Return the start time for this histogram. pub fn start_time(&self) -> DateTime { self.start_time } + + /// Set the start time for this histogram. + pub fn set_start_time(&mut self, start_time: DateTime) { + self.start_time = start_time; + } + + /// Return the p50 quantile for the histogram. + pub fn p50q(&self) -> Quantile { + self.p50 + } + + /// Return the p90 quantile for the histogram. + pub fn p90q(&self) -> Quantile { + self.p90 + } + + /// Return the p99 quantile for the histogram. + pub fn p99q(&self) -> Quantile { + self.p99 + } + + /// Return the p50 estimate for the histogram. + pub fn p50(&self) -> Result { + self.p50.estimate() + } + + /// Return the p90 estimate for the histogram. + pub fn p90(&self) -> Result { + self.p90.estimate() + } + + /// Return the p99 estimate for the histogram. + pub fn p99(&self) -> Result { + self.p99.estimate() + } +} + +impl TryFrom<&[T]> for Bins +where + T: HistogramSupport, +{ + type Error = HistogramError; + + fn try_from(left_edges: &[T]) -> Result { + let mut items = left_edges.iter(); + let mut bins: Vec> = Vec::with_capacity(left_edges.len() + 1); + let mut current: T = *items.next().ok_or(HistogramError::EmptyBins)?; + ensure_finite(current)?; + let min: T = ::min_value(); + if current > min { + // Bin greater than the minimum was specified, insert a new one from `MIN..current`. + bins.push(Bin { range: BinRange::range(min, current), count: 0 }); + } else if current == min { + // An edge *at* the minimum was specified. Consume it, and insert a bin from + // `MIN..next`, if one exists. If one does not, or if this is the last item, the + // following loop will not be entered. + let next: T = + items.next().cloned().unwrap_or_else(::max_value); + bins.push(Bin { range: BinRange::range(min, next), count: 0 }); + current = next; + } + for &next in items { + if current < next { + ensure_finite(next)?; + bins.push(Bin { + range: BinRange::range(current, next), + count: 0, + }); + current = next; + } else if current >= next { + return Err(HistogramError::NonmonotonicBins); + } else { + return Err(HistogramError::NonFiniteValue); + } + } + if current < ::max_value() { + bins.push(Bin { range: BinRange::from(current), count: 0 }); + } + + Ok(Bins(bins)) + } } impl Histogram @@ -600,6 +908,9 @@ where /// ------- /// /// ```rust + /// # // Rename the impl crate so the doctests can refer to the public + /// # // `oximeter` crate, not the private impl. + /// # use oximeter_impl as oximeter; /// use oximeter::histogram::{Histogram, BinRange}; /// use std::ops::{RangeBounds, Bound}; /// @@ -871,7 +1182,7 @@ where if value.is_finite() { Ok(()) } else { - Err(HistogramError::NonFiniteValue(format!("{:?}", value))) + Err(HistogramError::NonFiniteValue) } } @@ -938,20 +1249,77 @@ mod tests { "Histogram should have 1 more bin than bin edges specified" ); assert_eq!(hist.n_samples(), 0, "Histogram should init with 0 samples"); - - let samples = [-10i64, 0, 1, 10, 50]; + let max_sample = 100; + let min_sample = -10i64; + let samples = [min_sample, 0, 1, 10, max_sample]; let expected_counts = [1u64, 2, 1, 1]; for (i, sample) in samples.iter().enumerate() { hist.sample(*sample).unwrap(); let count = i as u64 + 1; + let current_sum = samples[..=i].iter().sum::() as f64; + let current_mean = current_sum / count as f64; + let current_std_dev = (samples[..=i] + .iter() + .map(|x| (*x as f64 - current_mean).powi(2)) + .sum::() + / count as f64) + .sqrt(); + let current_sample_std_dev = (samples[..=i] + .iter() + .map(|x| (*x as f64 - current_mean).powi(2)) + .sum::() + / (count - 1) as f64) + .sqrt(); assert_eq!( hist.n_samples(), count, "Histogram should have {} sample(s)", count ); + + if count > 0 { + assert_eq!( + hist.mean(), + current_mean, + "Histogram should have a mean of {}", + current_mean + ); + } else { + assert!(hist.mean().is_zero()); + } + + if count > 1 { + assert_eq!( + hist.std_dev().unwrap(), + current_std_dev, + "Histogram should have a sample standard deviation of {}", + current_std_dev + ); + assert_eq!( + hist.sample_std_dev().unwrap(), + current_sample_std_dev, + "Histogram should have a sample standard deviation of {}", + current_sample_std_dev + ); + } else { + assert!(hist.std_dev().is_none()); + assert!(hist.sample_std_dev().is_none()); + } } + assert_eq!( + hist.min(), + min_sample, + "Histogram should have a minimum value of {}", + min_sample + ); + assert_eq!( + hist.max(), + max_sample, + "Histogram should have a maximum value of {}", + max_sample + ); + for (bin, &expected_count) in hist.iter().zip(expected_counts.iter()) { assert_eq!( bin.count, expected_count, @@ -959,6 +1327,15 @@ mod tests { bin.range, expected_count, bin.count ); } + + let p50 = hist.p50().unwrap(); + assert_eq!(p50, 1.0, "P50 should be 1.0, but found {}", p50); + + let p90 = hist.p90().unwrap(); + assert_eq!(p90, 100.0, "P90 should be 100.0, but found {}", p90); + + let p99 = hist.p99().unwrap(); + assert_eq!(p99, 100.0, "P99 should be 100.0, but found {}", p99); } #[test] @@ -972,6 +1349,45 @@ mod tests { assert_eq!(data[2].range, BinRange::from(10)); } + #[test] + fn test_histogram_construct_with() { + let mut hist = Histogram::new(&[0, 10, 20]).unwrap(); + hist.sample(1).unwrap(); + hist.sample(11).unwrap(); + + let (bins, counts) = hist.bins_and_counts(); + assert_eq!( + bins.len(), + counts.len(), + "Bins and counts should have the same size" + ); + assert_eq!( + bins.len(), + hist.n_bins(), + "Paired-array bins should be of the same length as the histogram" + ); + assert_eq!(counts, &[0, 1, 1, 0], "Paired-array counts are incorrect"); + assert_eq!(hist.n_samples(), 2); + + let rebuilt = Histogram::from_parts( + hist.start_time(), + bins, + counts, + hist.min(), + hist.max(), + hist.sum_of_samples(), + hist.squared_mean(), + hist.p50, + hist.p90, + hist.p99, + ) + .unwrap(); + assert_eq!( + hist, rebuilt, + "Histogram reconstructed from paired arrays is not correct" + ); + } + #[test] fn test_histogram_with_overlapping_bins() { let bins = &[(..1_u64).into(), (0..10).into()]; @@ -1082,33 +1498,6 @@ mod tests { ); } - #[test] - fn test_histogram_to_arrays() { - let mut hist = Histogram::new(&[0, 10, 20]).unwrap(); - hist.sample(1).unwrap(); - hist.sample(11).unwrap(); - - let (bins, counts) = hist.to_arrays(); - assert_eq!( - bins.len(), - counts.len(), - "Bins and counts should have the same size" - ); - assert_eq!( - bins.len(), - hist.n_bins(), - "Paired-array bins should be of the same length as the histogram" - ); - assert_eq!(counts, &[0, 1, 1, 0], "Paired-array counts are incorrect"); - - let rebuilt = - Histogram::from_arrays(hist.start_time(), bins, counts).unwrap(); - assert_eq!( - hist, rebuilt, - "Histogram reconstructed from paired arrays is not correct" - ); - } - #[test] fn test_span_decades() { let hist = Histogram::::span_decades(0, 3).unwrap(); diff --git a/oximeter/impl/src/lib.rs b/oximeter/impl/src/lib.rs new file mode 100644 index 0000000000..5acbeb9422 --- /dev/null +++ b/oximeter/impl/src/lib.rs @@ -0,0 +1,51 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +// Copyright 2024 Oxide Computer Company + +pub use oximeter_macro_impl::*; + +// Export the current crate as `oximeter`. The macros defined in `oximeter-macro-impl` generate +// code referring to symbols like `oximeter::traits::Target`. In consumers of this crate, that's +// fine, but internally there _is_ no crate named `oximeter`, it's just `self` or `crate`. +// +// See https://github.com/rust-lang/rust/pull/55275 for the PR introducing this fix, which links to +// lots of related issues and discussion. +extern crate self as oximeter; + +pub mod histogram; +pub mod quantile; +pub mod schema; +pub mod test_util; +pub mod traits; +pub mod types; + +pub use quantile::Quantile; +pub use quantile::QuantileError; +pub use schema::FieldSchema; +pub use schema::TimeseriesName; +pub use schema::TimeseriesSchema; +pub use traits::Metric; +pub use traits::Producer; +pub use traits::Target; +pub use types::Datum; +pub use types::DatumType; +pub use types::Field; +pub use types::FieldType; +pub use types::FieldValue; +pub use types::Measurement; +pub use types::MetricsError; +pub use types::Sample; + +/// Construct the timeseries name for a Target and Metric. +pub fn timeseries_name( + target: &T, + metric: &M, +) -> Result +where + T: Target, + M: Metric, +{ + TimeseriesName::try_from(format!("{}:{}", target.name(), metric.name())) +} diff --git a/oximeter/impl/src/quantile.rs b/oximeter/impl/src/quantile.rs new file mode 100644 index 0000000000..3e070cc302 --- /dev/null +++ b/oximeter/impl/src/quantile.rs @@ -0,0 +1,604 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Data structure for expressing quantile estimation. +//! This is based on the P² heuristic algorithm for dynamic +//! calculation of the median and other quantiles. The estimates +//! are produced dynamically as the observations are generated. +//! The observations are not stored; therefore, the algorithm has +//! a very small and fixed storage requirement regardless of the +//! number of observations. +//! +//! Read the [paper](https://www.cs.wustl.edu/~jain/papers/ftp/psqr.pdf) +//! for more specifics. + +// Copyright 2024 Oxide Computer Company + +use crate::traits::HistogramSupport; +use schemars::JsonSchema; +use serde::Deserialize; +use serde::Serialize; +use thiserror::Error; + +const FILLED_MARKER_LEN: usize = 5; + +/// Errors related to constructing a `Quantile` instance or estimating the +/// p-quantile. +#[derive( + Debug, Clone, Error, JsonSchema, Serialize, Deserialize, PartialEq, +)] +#[serde(tag = "type", content = "content", rename_all = "snake_case")] +pub enum QuantileError { + /// The p value must be in the range [0, 1]. + #[error("The p value must be in the range [0, 1].")] + InvalidPValue, + /// Quantile estimation is not possible without samples. + #[error("Quantile estimation is not possible without any samples.")] + InsufficientSampleSize, + /// A non-finite was encountered, either as a bin edge or a sample. + #[error("Samples must be finite values, not Infinity or NaN.")] + NonFiniteValue, +} + +/// Structure for estimating the p-quantile of a population. +/// +/// This is based on the P² algorithm for estimating quantiles using +/// constant space. +/// +/// The algorithm consists of maintaining five markers: the +/// minimum, the p/2-, p-, and (1 + p)/2 quantiles, and the maximum. +#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize, JsonSchema)] +pub struct Quantile { + /// The p value for the quantile. + p: f64, + /// The heights of the markers. + marker_heights: [f64; FILLED_MARKER_LEN], + /// The positions of the markers. + /// + /// We track sample size in the 5th position, as useful observations won't + /// start until we've filled the heights at the 6th sample anyway + /// This does deviate from the paper, but it's a more useful representation + /// that works according to the paper's algorithm. + marker_positions: [u64; FILLED_MARKER_LEN], + /// The desired marker positions. + desired_marker_positions: [f64; FILLED_MARKER_LEN], +} + +impl Quantile { + /// Create a new `Quantile` instance. + /// + /// Returns a result containing the `Quantile` instance or an error. + /// + /// # Errors + /// + /// Returns [`QuantileError::InvalidPValue`] if the p value is not in the + /// range [0, 1]. + /// + /// # Examples + /// + /// ``` + /// # // Rename the impl crate so the doctests can refer to the public + /// # // `oximeter` crate, not the private impl. + /// # use oximeter_impl as oximeter; + /// use oximeter::Quantile; + /// let q = Quantile::new(0.5).unwrap(); + /// + /// assert_eq!(q.p(), 0.5); + /// assert_eq!(q.len(), 0); + /// ``` + pub fn new(p: f64) -> Result { + if p < 0. || p > 1. { + return Err(QuantileError::InvalidPValue); + } + + Ok(Self { + p, + marker_heights: [0.; FILLED_MARKER_LEN], + // We start with a sample size of 0. + marker_positions: [1, 2, 3, 4, 0], + // 1-indexed, which is like the paper, but + // used to keep track of the sample size without + // needing to do a separate count, use a Vec, + // or do any other kind of bookkeeping. + desired_marker_positions: [ + 1., + 1. + 2. * p, + 1. + 4. * p, + 3. + 2. * p, + 5., + ], + }) + } + + /// Create a new `Quantile` instance from the given a p-value, marker + /// heights and positions. + /// + /// # Examples + /// ``` + /// # // Rename the impl crate so the doctests can refer to the public + /// # // `oximeter` crate, not the private impl. + /// # use oximeter_impl as oximeter; + /// use oximeter::Quantile; + /// let q = Quantile::from_parts( + /// 0.5, + /// [0., 1., 2., 3., 4.], + /// [1, 2, 3, 4, 5], + /// [1., 3., 5., 7., 9.], + /// ); + /// ``` + pub fn from_parts( + p: f64, + marker_heights: [f64; FILLED_MARKER_LEN], + marker_positions: [u64; FILLED_MARKER_LEN], + desired_marker_positions: [f64; FILLED_MARKER_LEN], + ) -> Self { + Self { p, marker_heights, marker_positions, desired_marker_positions } + } + + /// Construct a `Quantile` instance for the 50th/median percentile. + pub fn p50() -> Self { + Self::new(0.5).unwrap() + } + + /// Construct a `Quantile` instance for the 90th percentile. + pub fn p90() -> Self { + Self::new(0.9).unwrap() + } + + /// Construct a `Quantile` instance for the 95th percentile. + pub fn p95() -> Self { + Self::new(0.95).unwrap() + } + + /// Construct a `Quantile` instance for the 99th percentile. + pub fn p99() -> Self { + Self::new(0.99).unwrap() + } + + /// Get the p value as a float. + pub fn p(&self) -> f64 { + self.p + } + + /// Return the sample size. + pub fn len(&self) -> u64 { + self.marker_positions[4] + } + + /// Determine if the number of samples in the population are empty. + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Return the marker heights. + pub fn marker_heights(&self) -> [f64; FILLED_MARKER_LEN] { + self.marker_heights + } + + /// Return the marker positions. + pub fn marker_positions(&self) -> [u64; FILLED_MARKER_LEN] { + self.marker_positions + } + + /// Return the desired marker positions. + pub fn desired_marker_positions(&self) -> [f64; FILLED_MARKER_LEN] { + self.desired_marker_positions + } + + /// Estimate the p-quantile of the population. + /// + /// This is step B.4 in the P² algorithm. + /// + /// Returns a result containing the estimated p-quantile or an error. + /// + /// # Errors + /// + /// Returns [`QuantileError::InsufficientSampleSize`] if the sample size + /// is empty. + /// + /// # Examples + /// + /// ``` + /// # // Rename the impl crate so the doctests can refer to the public + /// # // `oximeter` crate, not the private impl. + /// # use oximeter_impl as oximeter; + /// use oximeter::Quantile; + /// let mut q = Quantile::new(0.5).unwrap(); + /// for o in 1..=100 { + /// q.append(o).unwrap(); + /// } + /// assert_eq!(q.estimate().unwrap(), 50.0); + /// ``` + pub fn estimate(&self) -> Result { + if self.is_empty() { + return Err(QuantileError::InsufficientSampleSize); + } + + if self.len() >= FILLED_MARKER_LEN as u64 { + return Ok(self.marker_heights[2]); + } + + // Try to find an index in heights that is correlated with the p value + // when we have less than 5 samples, but more than 0. + let mut heights = self.marker_heights; + float_ord::sort(&mut heights); + let idx = (heights.len() as f64 - 1.) * self.p(); + return Ok(heights[idx.round() as usize]); + } + + /// Append a value/observation to the population and adjust the heights. + /// + /// This comprises steps B.1, B.2, B.3 (adjust heights) in the P² algorithm, + /// including finding the cell k containing the input value and updating the + /// current and desired marker positions. + /// + /// Returns an empty result or an error. + /// + /// # Errors + /// + /// Returns [`QuantileError::NonFiniteValue`] if the value is not finite + /// when casting to a float. + /// + /// # Examples + /// + /// ``` + /// # // Rename the impl crate so the doctests can refer to the public + /// # // `oximeter` crate, not the private impl. + /// # use oximeter_impl as oximeter; + /// use oximeter::Quantile; + /// let mut q = Quantile::new(0.9).unwrap(); + /// q.append(10).unwrap(); + /// assert_eq!(q.len(), 1); + /// ``` + pub fn append(&mut self, value: T) -> Result<(), QuantileError> + where + T: HistogramSupport, + { + if !value.is_finite() { + return Err(QuantileError::NonFiniteValue); + } + // We've already checked that the value is finite. + let value_f = value.to_f64().unwrap(); + + if self.len() < FILLED_MARKER_LEN as u64 { + self.marker_heights[self.len() as usize] = value_f; + self.marker_positions[4] += 1; + if self.len() == FILLED_MARKER_LEN as u64 { + float_ord::sort(&mut self.marker_heights); + self.adaptive_init(); + } + return Ok(()); + } + + // Find the cell k containing the new value. + let k = match self.find_cell(value_f) { + Some(4) => { + self.marker_heights[4] = value_f; + 3 + } + Some(i) => i, + None => { + self.marker_heights[0] = value_f; + 0 + } + }; + + // Handle rounding issues as described in + // . + let count = self.len() as f64; + self.desired_marker_positions[1] = count * (self.p() / 2.) + 1.; + self.desired_marker_positions[2] = count * self.p() + 1.; + self.desired_marker_positions[3] = count * ((1. + self.p()) / 2.) + 1.; + self.desired_marker_positions[4] = count + 1.; + + for i in k + 1..FILLED_MARKER_LEN { + self.marker_positions[i] += 1; + } + + // Adjust height of markers adaptively to be more optimal for + // not just higher quantiles, but also lower ones. + // + // This is a deviation from the paper, taken from + // . + if self.p >= 0.5 { + for i in 1..4 { + self.adjust_heights(i) + } + } else { + for i in (1..4).rev() { + self.adjust_heights(i) + } + } + + Ok(()) + } + + /// Find the higher marker cell whose height is lower than the observation. + /// + /// Returns `None` if the value is less than the initial marker height. + fn find_cell(&mut self, value: f64) -> Option { + if value < self.marker_heights[0] { + None + } else { + Some( + self.marker_heights + .partition_point(|&height| height <= value) + .saturating_sub(1), + ) + } + } + + /// Adjust the heights of the markers if necessary. + /// + /// Step B.3 in the P² algorithm. Should be used within a loop + /// after appending a value to the population. + fn adjust_heights(&mut self, i: usize) { + let d = + self.desired_marker_positions[i] - self.marker_positions[i] as f64; + + if (d >= 1. + && self.marker_positions[i + 1] > self.marker_positions[i] + 1) + || (d <= -1. + && self.marker_positions[i - 1] < self.marker_positions[i] - 1) + { + let d_signum = d.signum(); + let q_prime = self.parabolic(i, d_signum); + if self.marker_heights[i - 1] < q_prime + && q_prime < self.marker_heights[i + 1] + { + self.marker_heights[i] = q_prime; + } else { + let q_prime = self.linear(i, d_signum); + self.marker_heights[i] = q_prime; + } + + // Update marker positions based on the sign of d. + if d_signum < 0. { + self.marker_positions[i] -= 1; + } else { + self.marker_positions[i] += 1; + } + } + } + + /// An implementation to adaptively initialize the marker heights and + /// positions, particularly useful for extreme quantiles (e.g., 0.99) + /// when estimating on a small sample size. + /// + /// Read + /// for more. + fn adaptive_init(&mut self) { + self.desired_marker_positions[..FILLED_MARKER_LEN] + .copy_from_slice(&self.marker_heights[..FILLED_MARKER_LEN]); + + self.marker_positions[1] = (1. + 2. * self.p()).round() as u64; + self.marker_positions[2] = (1. + 4. * self.p()).round() as u64; + self.marker_positions[3] = (3. + 2. * self.p()).round() as u64; + self.marker_heights[1] = self.desired_marker_positions + [self.marker_positions[1] as usize - 1]; + self.marker_heights[2] = self.desired_marker_positions + [self.marker_positions[2] as usize - 1]; + self.marker_heights[3] = self.desired_marker_positions + [self.marker_positions[3] as usize - 1]; + } + + /// Parabolic prediction for marker height. + fn parabolic(&self, i: usize, d_signum: f64) -> f64 { + let pos_diff1 = (self.marker_positions[i + 1] as i64 + - self.marker_positions[i - 1] as i64) + as f64; + + let pos_diff2 = (self.marker_positions[i + 1] as i64 + - self.marker_positions[i] as i64) as f64; + + let pos_diff3 = (self.marker_positions[i] as i64 + - self.marker_positions[i - 1] as i64) + as f64; + + let term1 = d_signum / pos_diff1; + let term2 = ((self.marker_positions[i] - self.marker_positions[i - 1]) + as f64 + + d_signum) + * (self.marker_heights[i + 1] - self.marker_heights[i]) + / pos_diff2; + let term3 = ((self.marker_positions[i + 1] - self.marker_positions[i]) + as f64 + - d_signum) + * (self.marker_heights[i] - self.marker_heights[i - 1]) + / pos_diff3; + + self.marker_heights[i] + term1 * (term2 + term3) + } + + /// Linear prediction for marker height. + fn linear(&self, i: usize, d_signum: f64) -> f64 { + let idx = if d_signum < 0. { i - 1 } else { i + 1 }; + self.marker_heights[i] + + d_signum * (self.marker_heights[idx] - self.marker_heights[i]) + / (self.marker_positions[idx] as i64 + - self.marker_positions[i] as i64) as f64 + } +} + +#[cfg(test)] +mod tests { + use super::*; + use approx::assert_relative_eq; + use rand::{Rng, SeedableRng}; + use rand_distr::{Distribution, Normal}; + + fn test_quantile_impl( + p: f64, + observations: u64, + assert_on: Option, + ) -> Quantile { + let mut q = Quantile::new(p).unwrap(); + for o in 1..=observations { + q.append(o).unwrap(); + } + assert_eq!(q.p(), p); + assert_eq!(q.estimate().unwrap(), assert_on.unwrap_or(p * 100.)); + q + } + + #[test] + fn test_min_p() { + let observations = [3, 6, 7, 8, 8, 10, 13, 15, 16, 20]; + + let mut q = Quantile::new(0.0).unwrap(); + //assert_eq!(q.p(), 0.1); + for &o in observations.iter() { + q.append(o).unwrap(); + } + assert_eq!(q.estimate().unwrap(), 3.); + } + + /// Compared with C# implementation of P² algorithm. + #[test] + fn test_max_p() { + let observations = [3, 6, 7, 8, 8, 10, 13, 15, 16, 20]; + + let mut q = Quantile::new(1.).unwrap(); + assert_eq!(q.p(), 1.); + + for &o in observations.iter() { + q.append(o).unwrap(); + } + + assert_eq!(q.estimate().unwrap(), 11.66543209876543); + } + + /// Example observations from the P² paper. + #[test] + fn test_float_observations() { + let observations = [ + 0.02, 0.5, 0.74, 3.39, 0.83, 22.37, 10.15, 15.43, 38.62, 15.92, + 34.60, 10.28, 1.47, 0.40, 0.05, 11.39, 0.27, 0.42, 0.09, 11.37, + ]; + let mut q = Quantile::p50(); + for &o in observations.iter() { + q.append(o).unwrap(); + } + assert_eq!(q.marker_positions, [1, 6, 10, 16, 20]); + assert_eq!(q.desired_marker_positions, [0.02, 5.75, 10.5, 15.25, 20.0]); + assert_eq!(q.p(), 0.5); + assert_eq!(q.len(), 20); + assert_relative_eq!(q.estimate().unwrap(), 4.2462394088036435,); + } + + #[test] + fn test_rounding() { + let mut rng = rand::rngs::StdRng::seed_from_u64(42); + let mut estimator = Quantile::new(0.6).unwrap(); + + for _ in 0..100 { + let x: f64 = rng.gen(); + estimator.append(x).unwrap(); + } + + assert_relative_eq!( + estimator.estimate().unwrap(), + 0.552428024067269, + epsilon = f64::EPSILON + ); + } + + #[test] + fn test_integer_observations() { + let observations = 1..=100; + let mut q = Quantile::new(0.3).unwrap(); + for o in observations { + q.append(o).unwrap(); + } + assert_eq!(q.marker_positions, [1, 15, 30, 65, 100]); + assert_eq!( + q.desired_marker_positions, + [1.0, 15.85, 30.7, 65.35000000000001, 100.0] + ); + + assert_eq!(q.p(), 0.3); + assert_eq!(q.estimate().unwrap(), 30.0); + } + + #[test] + fn test_empty_observations() { + let q = Quantile::p50(); + assert_eq!( + q.estimate().err().unwrap(), + QuantileError::InsufficientSampleSize + ); + } + + #[test] + fn test_non_filled_observations() { + let mut q = Quantile::p99(); + let observations = [-10., 0., 1., 10.]; + for &o in observations.iter() { + q.append(o).unwrap(); + } + assert_eq!(q.estimate().unwrap(), 10.); + } + + #[test] + fn test_default_percentiles() { + test_quantile_impl(0.5, 100, None); + test_quantile_impl(0.9, 100, None); + test_quantile_impl(0.95, 100, None); + test_quantile_impl(0.99, 100, Some(97.)); + } + + #[test] + fn test_invalid_p_value() { + assert_eq!( + Quantile::new(1.01).err().unwrap(), + QuantileError::InvalidPValue + ); + assert_eq!( + Quantile::new(f64::MAX).err().unwrap(), + QuantileError::InvalidPValue + ); + } + + #[test] + fn test_find_cells() { + let mut q = test_quantile_impl(0.5, 5, Some(3.)); + assert_eq!(q.find_cell(0.), None); + assert_eq!(q.find_cell(7.), Some(4)); + assert_eq!(q.find_cell(4.), Some(3)); + assert_eq!(q.find_cell(3.5), Some(2)); + } + + /// Emulates baseline test in a basic Python implementation of the P² + /// algorithm: + /// . + #[test] + fn test_against_baseline_normal_distribution() { + let mu = 500.; + let sigma = 100.; + let size = 1000; + let p = 0.9; + + let normal = Normal::new(mu, sigma); + let mut observations = (0..size) + .map(|_| normal.unwrap().sample(&mut rand::thread_rng())) + .collect::>(); + float_ord::sort(&mut observations); + let idx = ((f64::from(size) - 1.) * p) as usize; + + let base_p_est = observations[idx]; + + let mut q = Quantile::new(p).unwrap(); + for o in observations.iter() { + q.append(*o).unwrap(); + } + let p_est = q.estimate().unwrap(); + + println!("Base: {}, Est: {}", base_p_est, p_est); + assert!( + (base_p_est - p_est).abs() < 10.0, + "Difference {} is not less than 10", + (base_p_est - p_est).abs() + ); + } +} diff --git a/oximeter/impl/src/schema/codegen.rs b/oximeter/impl/src/schema/codegen.rs new file mode 100644 index 0000000000..4aa09cf136 --- /dev/null +++ b/oximeter/impl/src/schema/codegen.rs @@ -0,0 +1,491 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +// Copyright 2024 Oxide Computer Company + +//! Generate Rust types and code from oximeter schema definitions. + +use crate::schema::ir::find_schema_version; +use crate::schema::ir::load_schema; +use crate::schema::AuthzScope; +use crate::schema::FieldSource; +use crate::schema::Units; +use crate::DatumType; +use crate::FieldSchema; +use crate::FieldType; +use crate::MetricsError; +use crate::TimeseriesSchema; +use chrono::prelude::DateTime; +use chrono::prelude::Utc; +use proc_macro2::TokenStream; +use quote::quote; + +/// Emit types for using one timeseries definition. +/// +/// Provided with a TOML-formatted schema definition, this emits Rust types for +/// the target and metric from the latest version; and a function that returns +/// the `TimeseriesSchema` for _all_ versions of the timeseries. +/// +/// Both of these items are emitted in a module with the same name as the +/// target. +pub fn use_timeseries(contents: &str) -> Result { + let schema = load_schema(contents)?; + let latest = find_schema_version(schema.iter().cloned(), None); + let mod_name = quote::format_ident!("{}", latest[0].target_name()); + let types = emit_schema_types(latest); + let func = emit_schema_function(schema.into_iter()); + Ok(quote! { + pub mod #mod_name { + #types + #func + } + }) +} + +fn emit_schema_function( + list: impl Iterator, +) -> TokenStream { + quote! { + pub fn timeseries_schema() -> Vec<::oximeter::schema::TimeseriesSchema> { + vec![ + #(#list),* + ] + } + } +} + +fn emit_schema_types(list: Vec) -> TokenStream { + let first_schema = list.first().expect("load_schema ensures non-empty"); + let target_def = emit_target(first_schema); + let metric_defs = emit_metrics(&list); + quote! { + #target_def + #metric_defs + } +} + +fn emit_metrics(schema: &[TimeseriesSchema]) -> TokenStream { + let items = schema.iter().map(|s| emit_one(FieldSource::Metric, s)); + quote! { #(#items)* } +} + +fn emit_target(schema: &TimeseriesSchema) -> TokenStream { + emit_one(FieldSource::Target, schema) +} + +fn emit_one(source: FieldSource, schema: &TimeseriesSchema) -> TokenStream { + let name = match source { + FieldSource::Target => schema.target_name(), + FieldSource::Metric => schema.metric_name(), + }; + let struct_name = + quote::format_ident!("{}", format!("{}", heck::AsPascalCase(name))); + let field_defs: Vec<_> = schema + .field_schema + .iter() + .filter_map(|s| { + if s.source == source { + let name = quote::format_ident!("{}", s.name); + let type_ = emit_rust_type_for_field(s.field_type); + let docstring = s.description.as_str(); + Some(quote! { + #[doc = #docstring] + pub #name: #type_ + }) + } else { + None + } + }) + .collect(); + let (oximeter_trait, maybe_datum, type_docstring) = match source { + FieldSource::Target => ( + quote! {::oximeter::Target }, + quote! {}, + schema.description.target.as_str(), + ), + FieldSource::Metric => { + let datum_type = emit_rust_type_for_datum_type(schema.datum_type); + ( + quote! { ::oximeter::Metric }, + quote! { pub datum: #datum_type, }, + schema.description.metric.as_str(), + ) + } + }; + quote! { + #[doc = #type_docstring] + #[derive(Clone, Debug, PartialEq, #oximeter_trait)] + pub struct #struct_name { + #( #field_defs, )* + #maybe_datum + } + } +} + +// Implement ToTokens for the components of a `TimeseriesSchema`. +// +// This is used so that we can emit a function that will return the same data as +// we parse from the TOML file with the timeseries definition, as a way to +// export the definitions without needing that actual file at runtime. +impl quote::ToTokens for DatumType { + fn to_tokens(&self, tokens: &mut TokenStream) { + let toks = match self { + DatumType::Bool => quote! { ::oximeter::DatumType::Bool }, + DatumType::I8 => quote! { ::oximeter::DatumType::I8 }, + DatumType::U8 => quote! { ::oximeter::DatumType::U8 }, + DatumType::I16 => quote! { ::oximeter::DatumType::I16 }, + DatumType::U16 => quote! { ::oximeter::DatumType::U16 }, + DatumType::I32 => quote! { ::oximeter::DatumType::I32 }, + DatumType::U32 => quote! { ::oximeter::DatumType::U32 }, + DatumType::I64 => quote! { ::oximeter::DatumType::I64 }, + DatumType::U64 => quote! { ::oximeter::DatumType::U64 }, + DatumType::F32 => quote! { ::oximeter::DatumType::F32 }, + DatumType::F64 => quote! { ::oximeter::DatumType::F64 }, + DatumType::String => quote! { ::oximeter::DatumType::String }, + DatumType::Bytes => quote! { ::oximeter::DatumType::Bytes }, + DatumType::CumulativeI64 => { + quote! { ::oximeter::DatumType::CumulativeI64 } + } + DatumType::CumulativeU64 => { + quote! { ::oximeter::DatumType::CumulativeU64 } + } + DatumType::CumulativeF32 => { + quote! { ::oximeter::DatumType::CumulativeF32 } + } + DatumType::CumulativeF64 => { + quote! { ::oximeter::DatumType::CumulativeF64 } + } + DatumType::HistogramI8 => { + quote! { ::oximeter::DatumType::HistogramI8 } + } + DatumType::HistogramU8 => { + quote! { ::oximeter::DatumType::HistogramU8 } + } + DatumType::HistogramI16 => { + quote! { ::oximeter::DatumType::HistogramI16 } + } + DatumType::HistogramU16 => { + quote! { ::oximeter::DatumType::HistogramU16 } + } + DatumType::HistogramI32 => { + quote! { ::oximeter::DatumType::HistogramI32 } + } + DatumType::HistogramU32 => { + quote! { ::oximeter::DatumType::HistogramU32 } + } + DatumType::HistogramI64 => { + quote! { ::oximeter::DatumType::HistogramI64 } + } + DatumType::HistogramU64 => { + quote! { ::oximeter::DatumType::HistogramU64 } + } + DatumType::HistogramF32 => { + quote! { ::oximeter::DatumType::HistogramF32 } + } + DatumType::HistogramF64 => { + quote! { ::oximeter::DatumType::HistogramF64 } + } + }; + toks.to_tokens(tokens); + } +} + +// Emit tokens representing the Rust path matching the provided datum type. +fn emit_rust_type_for_datum_type(datum_type: DatumType) -> TokenStream { + match datum_type { + DatumType::Bool => quote! { bool }, + DatumType::I8 => quote! { i8 }, + DatumType::U8 => quote! { u8 }, + DatumType::I16 => quote! { i16 }, + DatumType::U16 => quote! { u16 }, + DatumType::I32 => quote! { i32 }, + DatumType::U32 => quote! { u32 }, + DatumType::I64 => quote! { i64 }, + DatumType::U64 => quote! { u64 }, + DatumType::F32 => quote! { f32 }, + DatumType::F64 => quote! { f64 }, + DatumType::String => quote! { String }, + DatumType::Bytes => quote! { ::bytes::Bytes }, + DatumType::CumulativeI64 => { + quote! { ::oximeter::types::Cumulative } + } + DatumType::CumulativeU64 => { + quote! { ::oximeter::types::Cumulative } + } + DatumType::CumulativeF32 => { + quote! { ::oximeter::types::Cumulative } + } + DatumType::CumulativeF64 => { + quote! { ::oximeter::types::Cumulative } + } + DatumType::HistogramI8 => { + quote! { ::oximeter::histogram::Histogram } + } + DatumType::HistogramU8 => { + quote! { ::oximeter::histogram::Histogram } + } + DatumType::HistogramI16 => { + quote! { ::oximeter::histogram::Histogram } + } + DatumType::HistogramU16 => { + quote! { ::oximeter::histogram::Histogram } + } + DatumType::HistogramI32 => { + quote! { ::oximeter::histogram::Histogram } + } + DatumType::HistogramU32 => { + quote! { ::oximeter::histogram::Histogram } + } + DatumType::HistogramI64 => { + quote! { ::oximeter::histogram::Histogram } + } + DatumType::HistogramU64 => { + quote! { ::oximeter::histogram::Histogram } + } + DatumType::HistogramF32 => { + quote! { ::oximeter::histogram::Histogram } + } + DatumType::HistogramF64 => { + quote! { ::oximeter::histogram::Histogram } + } + } +} + +// Generate the quoted path to the Rust type matching the given field type. +fn emit_rust_type_for_field(field_type: FieldType) -> TokenStream { + match field_type { + FieldType::String => quote! { ::std::borrow::Cow<'static, str> }, + FieldType::I8 => quote! { i8 }, + FieldType::U8 => quote! { u8 }, + FieldType::I16 => quote! { i16 }, + FieldType::U16 => quote! { u16 }, + FieldType::I32 => quote! { i32 }, + FieldType::U32 => quote! { u32 }, + FieldType::I64 => quote! { i64 }, + FieldType::U64 => quote! { u64 }, + FieldType::IpAddr => quote! { ::core::net::IpAddr }, + FieldType::Uuid => quote! { ::uuid::Uuid }, + FieldType::Bool => quote! { bool }, + } +} + +impl quote::ToTokens for FieldSource { + fn to_tokens(&self, tokens: &mut TokenStream) { + let toks = match self { + FieldSource::Target => { + quote! { ::oximeter::schema::FieldSource::Target } + } + FieldSource::Metric => { + quote! { ::oximeter::schema::FieldSource::Metric } + } + }; + toks.to_tokens(tokens); + } +} + +impl quote::ToTokens for FieldType { + fn to_tokens(&self, tokens: &mut TokenStream) { + let toks = match self { + FieldType::String => quote! { ::oximeter::FieldType::String }, + FieldType::I8 => quote! { ::oximeter::FieldType::I8 }, + FieldType::U8 => quote! { ::oximeter::FieldType::U8 }, + FieldType::I16 => quote! { ::oximeter::FieldType::I16 }, + FieldType::U16 => quote! { ::oximeter::FieldType::U16 }, + FieldType::I32 => quote! { ::oximeter::FieldType::I32 }, + FieldType::U32 => quote! { ::oximeter::FieldType::U32 }, + FieldType::I64 => quote! { ::oximeter::FieldType::I64 }, + FieldType::U64 => quote! { ::oximeter::FieldType::U64 }, + FieldType::IpAddr => quote! { ::oximeter::FieldType::IpAddr }, + FieldType::Uuid => quote! { ::oximeter::FieldType::Uuid }, + FieldType::Bool => quote! { ::oximeter::FieldType::Bool }, + }; + toks.to_tokens(tokens); + } +} + +impl quote::ToTokens for AuthzScope { + fn to_tokens(&self, tokens: &mut TokenStream) { + let toks = match self { + AuthzScope::Fleet => { + quote! { ::oximeter::schema::AuthzScope::Fleet } + } + AuthzScope::Silo => quote! { ::oximeter::schema::AuthzScope::Silo }, + AuthzScope::Project => { + quote! { ::oximeter::schema::AuthzScope::Project } + } + AuthzScope::ViewableToAll => { + quote! { ::oximeter::schema::AuthzScope::ViewableToAll } + } + }; + toks.to_tokens(tokens); + } +} + +fn quote_creation_time(created: DateTime) -> TokenStream { + let secs = created.timestamp(); + let nsecs = created.timestamp_subsec_nanos(); + quote! { + ::chrono::DateTime::from_timestamp(#secs, #nsecs).unwrap() + } +} + +impl quote::ToTokens for Units { + fn to_tokens(&self, tokens: &mut TokenStream) { + let toks = match self { + Units::Count => quote! { ::oximeter::schema::Units::Count }, + Units::Bytes => quote! { ::oximeter::schema::Units::Bytes }, + }; + toks.to_tokens(tokens); + } +} + +impl quote::ToTokens for FieldSchema { + fn to_tokens(&self, tokens: &mut TokenStream) { + let name = self.name.as_str(); + let field_type = self.field_type; + let source = self.source; + let description = self.description.as_str(); + let toks = quote! { + ::oximeter::FieldSchema { + name: String::from(#name), + field_type: #field_type, + source: #source, + description: String::from(#description), + } + }; + toks.to_tokens(tokens); + } +} + +impl quote::ToTokens for TimeseriesSchema { + fn to_tokens(&self, tokens: &mut TokenStream) { + let field_schema = &self.field_schema; + let timeseries_name = self.timeseries_name.to_string(); + let target_description = self.description.target.as_str(); + let metric_description = self.description.metric.as_str(); + let authz_scope = self.authz_scope; + let units = self.units; + let datum_type = self.datum_type; + let ver = self.version.get(); + let version = quote! { ::core::num::NonZeroU8::new(#ver).unwrap() }; + let created = quote_creation_time(self.created); + let toks = quote! { + ::oximeter::schema::TimeseriesSchema { + timeseries_name: ::oximeter::TimeseriesName::try_from(#timeseries_name).unwrap(), + description: ::oximeter::schema::TimeseriesDescription { + target: String::from(#target_description), + metric: String::from(#metric_description), + }, + authz_scope: #authz_scope, + units: #units, + field_schema: ::std::collections::BTreeSet::from([ + #(#field_schema),* + ]), + datum_type: #datum_type, + version: #version, + created: #created, + } + }; + toks.to_tokens(tokens); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::schema::TimeseriesDescription; + use std::{collections::BTreeSet, num::NonZeroU8}; + + #[test] + fn emit_schema_types_generates_expected_tokens() { + let schema = TimeseriesSchema { + timeseries_name: "foo:bar".parse().unwrap(), + description: TimeseriesDescription { + target: "a target".into(), + metric: "a metric".into(), + }, + field_schema: BTreeSet::from([ + FieldSchema { + name: "f0".into(), + field_type: FieldType::String, + source: FieldSource::Target, + description: "target field".into(), + }, + FieldSchema { + name: "f1".into(), + field_type: FieldType::Uuid, + source: FieldSource::Metric, + description: "metric field".into(), + }, + ]), + datum_type: DatumType::CumulativeU64, + version: NonZeroU8::new(1).unwrap(), + authz_scope: AuthzScope::Fleet, + units: Units::Bytes, + created: Utc::now(), + }; + + let tokens = emit_schema_types(vec![schema.clone()]); + + let expected = quote! { + #[doc = "a target"] + #[derive(Clone, Debug, PartialEq, ::oximeter::Target)] + pub struct Foo { + #[doc = "target field"] + pub f0: ::std::borrow::Cow<'static, str>, + } + + #[doc = "a metric"] + #[derive(Clone, Debug, PartialEq, ::oximeter::Metric)] + pub struct Bar { + #[doc = "metric field"] + pub f1: ::uuid::Uuid, + pub datum: ::oximeter::types::Cumulative, + } + }; + + assert_eq!(tokens.to_string(), expected.to_string()); + } + + #[test] + fn emit_schema_types_with_no_metric_fields_generates_expected_tokens() { + let schema = TimeseriesSchema { + timeseries_name: "foo:bar".parse().unwrap(), + description: TimeseriesDescription { + target: "a target".into(), + metric: "a metric".into(), + }, + field_schema: BTreeSet::from([FieldSchema { + name: "f0".into(), + field_type: FieldType::String, + source: FieldSource::Target, + description: "target field".into(), + }]), + datum_type: DatumType::CumulativeU64, + version: NonZeroU8::new(1).unwrap(), + authz_scope: AuthzScope::Fleet, + units: Units::Bytes, + created: Utc::now(), + }; + + let tokens = emit_schema_types(vec![schema.clone()]); + + let expected = quote! { + #[doc = "a target"] + #[derive(Clone, Debug, PartialEq, ::oximeter::Target)] + pub struct Foo { + #[doc = "target field"] + pub f0: ::std::borrow::Cow<'static, str>, + } + + #[doc = "a metric"] + #[derive(Clone, Debug, PartialEq, ::oximeter::Metric)] + pub struct Bar { + pub datum: ::oximeter::types::Cumulative, + } + }; + + assert_eq!(tokens.to_string(), expected.to_string()); + } +} diff --git a/oximeter/impl/src/schema/ir.rs b/oximeter/impl/src/schema/ir.rs new file mode 100644 index 0000000000..573af9c2b0 --- /dev/null +++ b/oximeter/impl/src/schema/ir.rs @@ -0,0 +1,1416 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +// Copyright 2024 Oxide Computer Company + +//! Serialization of timeseries schema definitions. +//! +//! These types are used as an intermediate representation of schema. The schema +//! are written in TOML; deserialized into these types; and then either +//! inspected or used to generate code that contains the equivalent Rust types +//! and trait implementations. + +use crate::schema::AuthzScope; +use crate::schema::DatumType; +use crate::schema::FieldSource; +use crate::schema::FieldType; +use crate::schema::TimeseriesDescription; +use crate::schema::Units; +use crate::FieldSchema; +use crate::MetricsError; +use crate::TimeseriesName; +use crate::TimeseriesSchema; +use chrono::Utc; +use serde::Deserialize; +use std::collections::btree_map::Entry; +use std::collections::BTreeMap; +use std::collections::BTreeSet; +use std::num::NonZeroU8; + +#[derive(Debug, Deserialize)] +pub struct FieldMetadata { + #[serde(rename = "type")] + pub type_: FieldType, + pub description: String, +} + +#[derive(Debug, Deserialize)] +#[serde(untagged)] +pub enum MetricFields { + Removed { removed_in: NonZeroU8 }, + Added { added_in: NonZeroU8, fields: Vec }, + Versioned(VersionedFields), +} + +#[derive(Debug, Deserialize)] +pub struct VersionedFields { + pub version: NonZeroU8, + pub fields: Vec, +} + +#[derive(Debug, Deserialize)] +pub struct TargetDefinition { + pub name: String, + pub description: String, + pub authz_scope: AuthzScope, + pub versions: Vec, +} + +#[derive(Debug, Deserialize)] +pub struct MetricDefinition { + pub name: String, + pub description: String, + pub units: Units, + pub datum_type: DatumType, + pub versions: Vec, +} + +fn checked_version_deser<'de, D>(d: D) -> Result +where + D: serde::Deserializer<'de>, +{ + let x = NonZeroU8::deserialize(d)?; + if x.get() == 1 { + Ok(x) + } else { + Err(serde::de::Error::custom(format!( + "Only version 1 of the timeseries defintion format \ + is currently supported, found version {x}", + ))) + } +} + +#[derive(Debug, Deserialize)] +pub struct TimeseriesDefinition { + #[serde(deserialize_with = "checked_version_deser")] + pub format_version: NonZeroU8, + pub target: TargetDefinition, + pub metrics: Vec, + pub fields: BTreeMap, +} + +impl TimeseriesDefinition { + pub fn into_schema_list( + self, + ) -> Result, MetricsError> { + if self.target.versions.is_empty() { + return Err(MetricsError::SchemaDefinition(String::from( + "At least one target version must be defined", + ))); + } + if self.metrics.is_empty() { + return Err(MetricsError::SchemaDefinition(String::from( + "At least one metric must be defined", + ))); + } + let mut timeseries = BTreeMap::new(); + let target_name = &self.target.name; + + // At this point, we do not support actually _modifying_ schema. + // Instead, we're putting in place infrastructure to support multiple + // versions, while still requiring all schema to define the first and + // only the first version. + // + // We omit this check in tests, to ensure that the code correctly + // handles updates. + #[cfg(not(test))] + if self.target.versions.len() > 1 + || self + .target + .versions + .iter() + .any(|fields| fields.version.get() > 1) + { + return Err(MetricsError::SchemaDefinition(String::from( + "Exactly one timeseries version, with version number 1, \ + may currently be specified. Updates will be supported \ + in future releases.", + ))); + } + + // First create a map from target version to the fields in it. + // + // This is used to do O(lg n) lookups into the set of target fields when we + // iterate through metric versions below, i.e., avoiding quadratic behavior. + let mut target_fields_by_version = BTreeMap::new(); + for (expected_version, target_fields) in + (1u8..).zip(self.target.versions.iter()) + { + if expected_version != target_fields.version.get() { + return Err(MetricsError::SchemaDefinition(format!( + "Target '{}' versions should be sequential \ + and monotonically increasing (expected {}, found {})", + target_name, expected_version, target_fields.version, + ))); + } + + let fields: BTreeSet<_> = + target_fields.fields.iter().cloned().collect(); + if fields.len() != target_fields.fields.len() { + return Err(MetricsError::SchemaDefinition(format!( + "Target '{}' version {} lists duplicate field names", + target_name, expected_version, + ))); + } + if fields.is_empty() { + return Err(MetricsError::SchemaDefinition(format!( + "Target '{}' version {} must have at least one field", + target_name, expected_version, + ))); + } + + if target_fields_by_version + .insert(expected_version, fields) + .is_some() + { + return Err(MetricsError::SchemaDefinition(format!( + "Target '{}' version {} is duplicated", + target_name, expected_version, + ))); + } + } + + // Start by looping over all the metrics in the definition. + // + // As we do so, we'll attach the target definition at the corresponding + // version, along with running some basic lints and checks. + for metric in self.metrics.iter() { + let metric_name = &metric.name; + + // Store the current version of the metric. This doesn't need to be + // sequential, but they do need to be monotonic and have a matching + // target version. We'll fill in any gaps with the last active version + // of the metric (if any). + let mut current_version: Option = None; + + // Also store the last used version of the target. This lets users omit + // an unchanged metric, and we use this value to fill in the implied + // version of the metric. + let mut last_target_version: u8 = 0; + + // Iterate through each version of this metric. + // + // In general, we expect metrics to be addded in the first version; + // modified by adding / removing fields; and possibly removed at the + // end. However, they can be added / removed multiple times, and not + // added until a later version of the target. + for metric_fields in metric.versions.iter() { + // Fill in any gaps from the last target version to this next + // metric version. This only works once we've filled in at least + // one version of the metric, and stored the current version / + // fields. + if let Some(current) = current_version.as_ref() { + let current_fields = current + .fields() + .expect("Should have some fields if we have any previous version"); + while last_target_version <= current.version().get() { + last_target_version += 1; + let Some(target_fields) = + target_fields_by_version.get(&last_target_version) + else { + return Err(MetricsError::SchemaDefinition( + format!( + "Metric '{}' version {} does not have \ + a matching version in the target '{}'", + metric_name, + last_target_version, + target_name, + ), + )); + }; + let field_schema = construct_field_schema( + &self.fields, + target_name, + target_fields, + metric_name, + current_fields, + )?; + let authz_scope = extract_authz_scope( + metric_name, + self.target.authz_scope, + &field_schema, + )?; + let timeseries_name = TimeseriesName::try_from( + format!("{}:{}", target_name, metric_name), + )?; + let version = + NonZeroU8::new(last_target_version).unwrap(); + let description = TimeseriesDescription { + target: self.target.description.clone(), + metric: metric.description.clone(), + }; + let schema = TimeseriesSchema { + timeseries_name: timeseries_name.clone(), + description, + field_schema, + datum_type: metric.datum_type, + version, + authz_scope, + units: metric.units, + created: Utc::now(), + }; + if let Some(old) = timeseries + .insert((timeseries_name, version), schema) + { + return Err(MetricsError::SchemaDefinition( + format!( + "Timeseries '{}' version {} is duplicated", + old.timeseries_name, old.version, + ), + )); + } + } + } + + // Extract the fields named in this version, checking that they're + // compatible with the last known version, if any. + let new_version = extract_metric_fields( + metric_name, + metric_fields, + ¤t_version, + )?; + let version = current_version.insert(new_version); + let Some(metric_fields) = version.fields() else { + continue; + }; + + // Now, insert the _next_ version of the metric with the + // validated fields we've collected for it. + last_target_version += 1; + let Some(target_fields) = + target_fields_by_version.get(&last_target_version) + else { + return Err(MetricsError::SchemaDefinition(format!( + "Metric '{}' version {} does not have \ + a matching version in the target '{}'", + metric_name, last_target_version, target_name, + ))); + }; + let field_schema = construct_field_schema( + &self.fields, + target_name, + target_fields, + metric_name, + metric_fields, + )?; + let authz_scope = extract_authz_scope( + metric_name, + self.target.authz_scope, + &field_schema, + )?; + let timeseries_name = TimeseriesName::try_from(format!( + "{}:{}", + target_name, metric_name + ))?; + let version = NonZeroU8::new(last_target_version).unwrap(); + let description = TimeseriesDescription { + target: self.target.description.clone(), + metric: metric.description.clone(), + }; + let schema = TimeseriesSchema { + timeseries_name: timeseries_name.clone(), + description, + field_schema, + datum_type: metric.datum_type, + version, + authz_scope, + units: metric.units, + created: Utc::now(), + }; + if let Some(old) = + timeseries.insert((timeseries_name, version), schema) + { + return Err(MetricsError::SchemaDefinition(format!( + "Timeseries '{}' version {} is duplicated", + old.timeseries_name, old.version, + ))); + } + } + + // We also allow omitting later versions of metrics if they are + // unchanged. A target has to specify every version, even if it's the + // same, but the metrics need only specify differences. + // + // Here, look for any target version strictly later than the last metric + // version, and create a corresponding target / metric pair for it. + if let Some(last_metric_fields) = metric.versions.last() { + match last_metric_fields { + MetricFields::Removed { .. } => {} + MetricFields::Added { + added_in: last_metric_version, + fields, + } + | MetricFields::Versioned(VersionedFields { + version: last_metric_version, + fields, + }) => { + let metric_field_names: BTreeSet<_> = + fields.iter().cloned().collect(); + let next_version = last_metric_version + .get() + .checked_add(1) + .expect("version < 256"); + for (version, target_fields) in + target_fields_by_version.range(next_version..) + { + let field_schema = construct_field_schema( + &self.fields, + target_name, + target_fields, + metric_name, + &metric_field_names, + )?; + let authz_scope = extract_authz_scope( + metric_name, + self.target.authz_scope, + &field_schema, + )?; + let timeseries_name = TimeseriesName::try_from( + format!("{}:{}", target_name, metric_name), + )?; + let version = NonZeroU8::new(*version).unwrap(); + let description = TimeseriesDescription { + target: self.target.description.clone(), + metric: metric.description.clone(), + }; + let schema = TimeseriesSchema { + timeseries_name: timeseries_name.clone(), + description, + field_schema, + datum_type: metric.datum_type, + version, + authz_scope, + units: metric.units, + created: Utc::now(), + }; + if let Some(old) = timeseries + .insert((timeseries_name, version), schema) + { + return Err(MetricsError::SchemaDefinition( + format!( + "Timeseries '{}' version {} is duplicated", + old.timeseries_name, old.version, + ), + )); + } + } + } + } + } + } + Ok(timeseries.into_values().collect()) + } +} + +#[derive(Clone, Debug)] +enum CurrentVersion { + Active { version: NonZeroU8, fields: BTreeSet }, + Inactive { removed_in: NonZeroU8 }, +} + +impl CurrentVersion { + fn version(&self) -> NonZeroU8 { + match self { + CurrentVersion::Active { version, .. } => *version, + CurrentVersion::Inactive { removed_in } => *removed_in, + } + } + + fn fields(&self) -> Option<&BTreeSet> { + match self { + CurrentVersion::Active { fields, .. } => Some(fields), + CurrentVersion::Inactive { .. } => None, + } + } +} + +/// Load the list of timeseries schema from a schema definition in TOML format. +pub fn load_schema( + contents: &str, +) -> Result, MetricsError> { + toml::from_str::(contents) + .map_err(|e| { + MetricsError::Toml( + slog_error_chain::InlineErrorChain::new(&e).to_string(), + ) + }) + .and_then(TimeseriesDefinition::into_schema_list) +} + +// Find schema of a specified version in an iterator, or the latest. +pub(super) fn find_schema_version( + list: impl Iterator, + version: Option, +) -> Vec { + match version { + Some(ver) => list.into_iter().filter(|s| s.version == ver).collect(), + None => { + let mut last_version = BTreeMap::new(); + for schema in list { + let metric_name = schema.metric_name().to_string(); + match last_version.entry(metric_name) { + Entry::Vacant(entry) => { + entry.insert((schema.version, schema.clone())); + } + Entry::Occupied(mut entry) => { + let existing_version = entry.get().0; + if existing_version < schema.version { + entry.insert((schema.version, schema.clone())); + } + } + } + } + last_version.into_values().map(|(_ver, schema)| schema).collect() + } + } +} + +fn extract_authz_scope( + metric_name: &str, + authz_scope: AuthzScope, + field_schema: &BTreeSet, +) -> Result { + let check_for_key = |scope: &str| { + let key = format!("{scope}_id"); + if field_schema.iter().any(|field| { + field.name == key && field.field_type == FieldType::Uuid + }) { + Ok(()) + } else { + Err(MetricsError::SchemaDefinition(format!( + "Metric '{}' has '{}' authorization scope, and so must \ + contain a field '{}' of UUID type", + metric_name, scope, key, + ))) + } + }; + match authz_scope { + AuthzScope::Silo => check_for_key("silo")?, + AuthzScope::Project => check_for_key("project")?, + AuthzScope::Fleet | AuthzScope::ViewableToAll => {} + } + Ok(authz_scope) +} + +fn construct_field_schema( + all_fields: &BTreeMap, + target_name: &str, + target_fields: &BTreeSet, + metric_name: &str, + metric_field_names: &BTreeSet, +) -> Result, MetricsError> { + if let Some(dup) = target_fields.intersection(&metric_field_names).next() { + return Err(MetricsError::SchemaDefinition(format!( + "Field '{}' is duplicated between target \ + '{}' and metric '{}'", + dup, target_name, metric_name, + ))); + } + + let mut field_schema = BTreeSet::new(); + for (field_name, source) in + target_fields.iter().zip(std::iter::repeat(FieldSource::Target)).chain( + metric_field_names + .iter() + .zip(std::iter::repeat(FieldSource::Metric)), + ) + { + let Some(metadata) = all_fields.get(field_name.as_str()) else { + let (kind, name) = if source == FieldSource::Target { + ("target", target_name) + } else { + ("metric", metric_name) + }; + return Err(MetricsError::SchemaDefinition(format!( + "Field '{}' is referenced in the {} '{}', but it \ + does not appear in the set of all fields.", + field_name, kind, name, + ))); + }; + validate_field_name(field_name, metadata)?; + field_schema.insert(FieldSchema { + name: field_name.to_string(), + field_type: metadata.type_, + source, + description: metadata.description.clone(), + }); + } + Ok(field_schema) +} + +fn is_snake_case(s: &str) -> bool { + s == format!("{}", heck::AsSnakeCase(s)) +} + +fn is_valid_ident_name(s: &str) -> bool { + syn::parse_str::(s).is_ok() && is_snake_case(s) +} + +fn validate_field_name( + field_name: &str, + metadata: &FieldMetadata, +) -> Result<(), MetricsError> { + if !is_valid_ident_name(field_name) { + return Err(MetricsError::SchemaDefinition(format!( + "Field name '{}' should be a valid identifier in snake_case", + field_name, + ))); + } + if metadata.type_ == FieldType::Uuid + && !(field_name.ends_with("_id") || field_name == "id") + { + return Err(MetricsError::SchemaDefinition(format!( + "Uuid field '{}' should end with '_id' or equal 'id'", + field_name, + ))); + } + Ok(()) +} + +fn extract_metric_fields<'a>( + metric_name: &'a str, + metric_fields: &'a MetricFields, + current_version: &Option, +) -> Result { + let new_version = match metric_fields { + MetricFields::Removed { removed_in } => { + match current_version { + Some(CurrentVersion::Active { version, .. }) => { + // This metric was active, and is now being + // removed. Bump the version and mark it active, + // but there are no fields to return here. + if removed_in <= version { + return Err(MetricsError::SchemaDefinition(format!( + "Metric '{}' is removed in version \ + {}, which is not strictly after the \ + current active version, {}", + metric_name, removed_in, version, + ))); + } + CurrentVersion::Inactive { removed_in: *removed_in } + } + Some(CurrentVersion::Inactive { removed_in }) => { + return Err(MetricsError::SchemaDefinition(format!( + "Metric '{}' was already removed in \ + version {}, it cannot be removed again", + metric_name, removed_in, + ))); + } + None => { + return Err(MetricsError::SchemaDefinition(format!( + "Metric {} has no previous version, \ + it cannot be removed.", + metric_name, + ))); + } + } + } + MetricFields::Added { added_in, fields } => { + match current_version { + Some(CurrentVersion::Active { .. }) => { + return Err(MetricsError::SchemaDefinition(format!( + "Metric '{}' is already active, it \ + cannot be added again until it is removed.", + metric_name, + ))); + } + Some(CurrentVersion::Inactive { removed_in }) => { + // The metric is currently inactive, just check + // that the newly-active version is greater. + if added_in <= removed_in { + return Err(MetricsError::SchemaDefinition(format!( + "Re-added metric '{}' must appear in a later \ + version than the one in which it was removed ({})", + metric_name, removed_in, + ))); + } + CurrentVersion::Active { + version: *added_in, + fields: to_unique_field_names(metric_name, fields)?, + } + } + None => { + // There was no previous version for this + // metric, just add it. + CurrentVersion::Active { + version: *added_in, + fields: to_unique_field_names(metric_name, fields)?, + } + } + } + } + MetricFields::Versioned(new_fields) => { + match current_version { + Some(CurrentVersion::Active { version, .. }) => { + // The happy-path, we're stepping the version + // and possibly modifying the fields. + if new_fields.version <= *version { + return Err(MetricsError::SchemaDefinition(format!( + "Metric '{}' version should increment, \ + expected at least {}, found {}", + metric_name, + version.checked_add(1).expect("version < 256"), + new_fields.version, + ))); + } + CurrentVersion::Active { + version: new_fields.version, + fields: to_unique_field_names( + metric_name, + &new_fields.fields, + )?, + } + } + Some(CurrentVersion::Inactive { removed_in }) => { + // The metric has been removed in the past, it + // needs to be added again first. + return Err(MetricsError::SchemaDefinition(format!( + "Metric '{}' was removed in version {}, \ + it must be added again first", + metric_name, removed_in, + ))); + } + None => { + // The metric never existed, it must be added + // first. + return Err(MetricsError::SchemaDefinition(format!( + "Metric '{}' must be added in at its first \ + version, and can then be modified", + metric_name, + ))); + } + } + } + }; + Ok(new_version) +} + +fn to_unique_field_names( + name: &str, + fields: &[String], +) -> Result, MetricsError> { + let set: BTreeSet<_> = fields.iter().cloned().collect(); + if set.len() != fields.len() { + return Err(MetricsError::SchemaDefinition(format!( + "Object '{name}' has duplicate fields" + ))); + } + Ok(set) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn extract_authz_scope_requires_relevant_field() { + assert!( + extract_authz_scope("foo", AuthzScope::Project, &BTreeSet::new()) + .is_err(), + "Project-scoped auth without a project_id field should be an error" + ); + + let schema = std::iter::once(FieldSchema { + name: "project_id".to_string(), + field_type: FieldType::Uuid, + source: FieldSource::Target, + description: String::new(), + }) + .collect(); + extract_authz_scope("foo", AuthzScope::Project, &schema).expect( + "Project-scoped auth with a project_id field should succeed", + ); + + let schema = std::iter::once(FieldSchema { + name: "project_id".to_string(), + field_type: FieldType::String, + source: FieldSource::Target, + description: String::new(), + }) + .collect(); + assert!( + extract_authz_scope("foo", AuthzScope::Project, &schema).is_err(), + "Project-scoped auth with a project_id field \ + that's not a UUID should be an error", + ); + } + + fn all_fields() -> BTreeMap { + let mut out = BTreeMap::new(); + for name in ["foo", "bar"] { + out.insert( + String::from(name), + FieldMetadata { + type_: FieldType::U8, + description: String::new(), + }, + ); + } + out + } + + #[test] + fn construct_field_schema_fails_with_reference_to_unknown_field() { + let all = all_fields(); + let target_fields = BTreeSet::new(); + let bad_name = String::from("baz"); + let metric_fields = std::iter::once(bad_name).collect(); + assert!( + construct_field_schema( + &all, + "target", + &target_fields, + "metric", + &metric_fields, + ) + .is_err(), + "Should return an error when the metric references a field \ + that doesn't exist in the global field list" + ); + } + + #[test] + fn construct_field_schema_fails_with_duplicate_field_names() { + let all = all_fields(); + let name = String::from("bar"); + let target_fields = std::iter::once(name.clone()).collect(); + let metric_fields = std::iter::once(name).collect(); + assert!(construct_field_schema( + &all, + "target", + &target_fields, + "metric", + &metric_fields, + ).is_err(), + "Should return an error when the target and metric share a field name" + ); + } + + #[test] + fn construct_field_schema_picks_up_correct_fields() { + let all = all_fields(); + let all_schema = all + .iter() + .zip([FieldSource::Metric, FieldSource::Target]) + .map(|((name, md), source)| FieldSchema { + name: name.clone(), + field_type: md.type_, + source, + description: String::new(), + }) + .collect(); + let foo = String::from("foo"); + let target_fields = std::iter::once(foo).collect(); + let bar = String::from("bar"); + let metric_fields = std::iter::once(bar).collect(); + assert_eq!( + construct_field_schema( + &all, + "target", + &target_fields, + "metric", + &metric_fields, + ) + .unwrap(), + all_schema, + "Each field is referenced exactly once, so we should return \ + the entire input set of fields" + ); + } + + #[test] + fn validate_field_name_disallows_bad_names() { + for name in ["PascalCase", "with spaces", "12345", "💖"] { + assert!( + validate_field_name( + name, + &FieldMetadata { + type_: FieldType::U8, + description: String::new() + } + ) + .is_err(), + "Field named {name} should be invalid" + ); + } + } + + #[test] + fn validate_field_name_verifies_uuid_field_names() { + assert!( + validate_field_name( + "projectid", + &FieldMetadata { + type_: FieldType::Uuid, + description: String::new() + } + ) + .is_err(), + "A Uuid field should be required to end in `_id`", + ); + for name in ["project_id", "id"] { + assert!( + validate_field_name(name, + &FieldMetadata { + type_: FieldType::Uuid, + description: String::new() + } + ).is_ok(), + "A Uuid field should be required to end in '_id' or exactly 'id'", + ); + } + } + + #[test] + fn extract_metric_fields_succeeds_with_gaps_in_versions() { + let metric_fields = MetricFields::Versioned(VersionedFields { + version: NonZeroU8::new(10).unwrap(), + fields: vec![], + }); + let current_version = Some(CurrentVersion::Active { + version: NonZeroU8::new(1).unwrap(), + fields: BTreeSet::new(), + }); + extract_metric_fields("foo", &metric_fields, ¤t_version).expect( + "Extracting metric fields should work with non-sequential \ + but increasing version numbers", + ); + } + + #[test] + fn extract_metric_fields_fails_with_non_increasing_versions() { + let metric_fields = MetricFields::Versioned(VersionedFields { + version: NonZeroU8::new(1).unwrap(), + fields: vec![], + }); + let current_version = Some(CurrentVersion::Active { + version: NonZeroU8::new(1).unwrap(), + fields: BTreeSet::new(), + }); + let res = + extract_metric_fields("foo", &metric_fields, ¤t_version); + let Err(MetricsError::SchemaDefinition(msg)) = &res else { + panic!("Expected schema definition error, found: {res:#?}"); + }; + assert!( + msg.contains("should increment"), + "Should fail when version numbers are non-increasing", + ); + } + + #[test] + fn extract_metric_fields_requires_adding_first() { + let metric_fields = MetricFields::Versioned(VersionedFields { + version: NonZeroU8::new(1).unwrap(), + fields: vec![], + }); + let current_version = None; + let res = + extract_metric_fields("foo", &metric_fields, ¤t_version); + let Err(MetricsError::SchemaDefinition(msg)) = &res else { + panic!("Expected schema definition error, found: {res:#?}"); + }; + assert!( + msg.contains("must be added in at its first version"), + "Should require that the first version of a metric explicitly \ + adds it in, before modification", + ); + + let metric_fields = MetricFields::Added { + added_in: NonZeroU8::new(1).unwrap(), + fields: vec![], + }; + let current_version = None; + extract_metric_fields("foo", &metric_fields, ¤t_version).expect( + "Should succeed when fields are added_in for their first version", + ); + } + + #[test] + fn extract_metric_fields_fails_to_add_existing_metric() { + let metric_fields = MetricFields::Added { + added_in: NonZeroU8::new(2).unwrap(), + fields: vec![], + }; + let current_version = Some(CurrentVersion::Active { + version: NonZeroU8::new(1).unwrap(), + fields: BTreeSet::new(), + }); + let res = + extract_metric_fields("foo", &metric_fields, ¤t_version); + let Err(MetricsError::SchemaDefinition(msg)) = &res else { + panic!("Expected schema definition error, found: {res:#?}"); + }; + assert!( + msg.contains("is already active"), + "Should fail when adding a metric that's already active", + ); + } + + #[test] + fn extract_metric_fields_fails_to_remove_non_existent_metric() { + let metric_fields = + MetricFields::Removed { removed_in: NonZeroU8::new(3).unwrap() }; + let current_version = Some(CurrentVersion::Inactive { + removed_in: NonZeroU8::new(1).unwrap(), + }); + let res = + extract_metric_fields("foo", &metric_fields, ¤t_version); + let Err(MetricsError::SchemaDefinition(msg)) = &res else { + panic!("Expected schema definition error, found: {res:#?}"); + }; + assert!( + msg.contains("was already removed"), + "Should fail when removing a metric that's already gone", + ); + } + + #[test] + fn load_schema_catches_metric_versions_not_added_in() { + let contents = r#" + format_version = 1 + + [target] + name = "target" + description = "some target" + authz_scope = "fleet" + versions = [ + { version = 1, fields = [ "foo" ] } + ] + + [[metrics]] + name = "metric" + description = "some metric" + datum_type = "u8" + units = "count" + versions = [ + { version = 1, fields = [] } + ] + + [fields.foo] + type = "string" + description = "a field" + "#; + let res = load_schema(contents); + let Err(MetricsError::SchemaDefinition(msg)) = &res else { + panic!("Should fail when metrics are not added in, but found: {res:#?}"); + }; + assert!( + msg.contains("must be added in at its first"), + "Error message should indicate that metrics need to be \ + added_in first, then modified" + ); + } + + #[test] + fn into_schema_list_fails_with_zero_metrics() { + let contents = r#" + format_version = 1 + + [target] + name = "target" + description = "some target" + authz_scope = "fleet" + versions = [ + { version = 1, fields = [ "foo" ] } + ] + + [[metrics]] + name = "metric" + description = "some metric" + datum_type = "u8" + units = "count" + versions = [ + { added_in = 1, fields = [] } + ] + + [fields.foo] + type = "string" + description = "a field" + "#; + let mut def: TimeseriesDefinition = toml::from_str(contents).unwrap(); + def.metrics.clear(); + let res = def.into_schema_list(); + let Err(MetricsError::SchemaDefinition(msg)) = &res else { + panic!("Should fail with zero metrics, but found: {res:#?}"); + }; + assert!( + msg.contains("At least one metric must"), + "Error message should indicate that metrics need to be \ + added_in first, then modified" + ); + } + + #[test] + fn load_schema_fails_with_nonexistent_target_version() { + let contents = r#" + format_version = 1 + + [target] + name = "target" + description = "some target" + authz_scope = "fleet" + versions = [ + { version = 1, fields = [ "foo" ] }, + ] + + [[metrics]] + name = "metric" + description = "some metric" + datum_type = "u8" + units = "count" + versions = [ + { added_in = 1, fields = [] }, + { version = 2, fields = [] } + ] + + [fields.foo] + type = "string" + description = "a field" + "#; + let res = load_schema(contents); + let Err(MetricsError::SchemaDefinition(msg)) = &res else { + panic!( + "Should fail when a metric version refers \ + to a non-existent target version, but found: {res:#?}", + ); + }; + assert!( + msg.contains("does not have a matching version in the target"), + "Error message should indicate that the metric \ + refers to a nonexistent version in the target, found: {msg}", + ); + } + + fn assert_sequential_versions( + first: &TimeseriesSchema, + second: &TimeseriesSchema, + ) { + assert_eq!(first.timeseries_name, second.timeseries_name); + assert_eq!( + first.version.get(), + second.version.get().checked_sub(1).unwrap() + ); + assert_eq!(first.datum_type, second.datum_type); + assert_eq!(first.field_schema, second.field_schema); + } + + #[test] + fn load_schema_fills_in_late_implied_metric_versions() { + let contents = r#" + format_version = 1 + + [target] + name = "target" + description = "some target" + authz_scope = "fleet" + versions = [ + { version = 1, fields = [ "foo" ] }, + { version = 2, fields = [ "foo" ] }, + ] + + [[metrics]] + name = "metric" + description = "some metric" + datum_type = "u8" + units = "count" + versions = [ + { added_in = 1, fields = [] } + ] + + [fields.foo] + type = "string" + description = "a field" + "#; + let schema = load_schema(contents).unwrap(); + assert_eq!( + schema.len(), + 2, + "Should have filled in version 2 of the metric using the \ + corresponding target version", + ); + assert_sequential_versions(&schema[0], &schema[1]); + } + + #[test] + fn load_schema_fills_in_implied_metric_versions_when_last_is_modified() { + let contents = r#" + format_version = 1 + + [target] + name = "target" + description = "some target" + authz_scope = "fleet" + versions = [ + { version = 1, fields = [ "foo" ] }, + { version = 2, fields = [ "foo" ] }, + { version = 3, fields = [ "foo" ] }, + ] + + [[metrics]] + name = "metric" + description = "some metric" + datum_type = "u8" + units = "count" + versions = [ + { added_in = 1, fields = [] }, + { version = 3, fields = [] }, + ] + + [fields.foo] + type = "string" + description = "a field" + "#; + let schema = load_schema(contents).unwrap(); + assert_eq!( + schema.len(), + 3, + "Should have filled in version 2 of the metric using the \ + corresponding target version", + ); + assert_sequential_versions(&schema[0], &schema[1]); + assert_sequential_versions(&schema[1], &schema[2]); + } + + #[test] + fn load_schema_fills_in_implied_metric_versions() { + let contents = r#" + format_version = 1 + + [target] + name = "target" + description = "some target" + authz_scope = "fleet" + versions = [ + { version = 1, fields = [ "foo" ] }, + { version = 2, fields = [ "foo" ] }, + { version = 3, fields = [ "foo" ] }, + ] + + [[metrics]] + name = "metric" + description = "some metric" + datum_type = "u8" + units = "count" + versions = [ + { added_in = 1, fields = [] }, + ] + + [fields.foo] + type = "string" + description = "a field" + "#; + let schema = load_schema(contents).unwrap(); + assert_eq!( + schema.len(), + 3, + "Should have filled in versions 2 and 3 of the metric using the \ + corresponding target version", + ); + assert_sequential_versions(&schema[0], &schema[1]); + assert_sequential_versions(&schema[1], &schema[2]); + } + + #[test] + fn load_schema_fills_in_implied_metric_versions_when_last_version_is_removed( + ) { + let contents = r#" + format_version = 1 + + [target] + name = "target" + description = "some target" + authz_scope = "fleet" + versions = [ + { version = 1, fields = [ "foo" ] }, + { version = 2, fields = [ "foo" ] }, + { version = 3, fields = [ "foo" ] }, + ] + + [[metrics]] + name = "metric" + description = "some metric" + datum_type = "u8" + units = "count" + versions = [ + { added_in = 1, fields = [] }, + { removed_in = 3 }, + ] + + [fields.foo] + type = "string" + description = "a field" + "#; + let schema = load_schema(contents).unwrap(); + dbg!(&schema); + assert_eq!( + schema.len(), + 2, + "Should have filled in version 2 of the metric using the \ + corresponding target version", + ); + assert_sequential_versions(&schema[0], &schema[1]); + } + + #[test] + fn load_schema_skips_versions_until_metric_is_added() { + let contents = r#" + format_version = 1 + + [target] + name = "target" + description = "some target" + authz_scope = "fleet" + versions = [ + { version = 1, fields = [ "foo" ] }, + { version = 2, fields = [ "foo" ] }, + { version = 3, fields = [ "foo" ] }, + ] + + [[metrics]] + name = "metric" + description = "some metric" + datum_type = "u8" + units = "count" + versions = [ + { added_in = 3, fields = [] }, + ] + + [fields.foo] + type = "string" + description = "a field" + "#; + let schema = load_schema(contents).unwrap(); + assert_eq!( + schema.len(), + 1, + "Should have only created the last version of the timeseries" + ); + } + + #[test] + fn load_schema_fails_with_duplicate_timeseries() { + let contents = r#" + format_version = 1 + + [target] + name = "target" + description = "some target" + authz_scope = "fleet" + versions = [ + { version = 1, fields = [ "foo" ] }, + ] + + [[metrics]] + name = "metric" + description = "some metric" + datum_type = "u8" + units = "count" + versions = [ + { added_in = 1, fields = [] }, + ] + + [[metrics]] + name = "metric" + description = "some metric" + datum_type = "u8" + units = "count" + versions = [ + { added_in = 1, fields = [] }, + ] + + [fields.foo] + type = "string" + description = "a field" + "#; + let res = load_schema(contents); + let Err(MetricsError::SchemaDefinition(msg)) = &res else { + panic!( + "Expected to fail with duplicated timeseries, but found {res:#?}", + ); + }; + assert!( + msg.ends_with("is duplicated"), + "Message should indicate that a timeseries name / \ + version is duplicated" + ); + } + + #[test] + fn only_support_format_version_1() { + let contents = r#" + format_version = 2 + + [target] + name = "target" + description = "some target" + authz_scope = "fleet" + versions = [ + { version = 1, fields = [ "foo" ] }, + ] + + [[metrics]] + name = "metric" + description = "some metric" + datum_type = "u8" + units = "count" + versions = [ + { added_in = 1, fields = [] }, + ] + [fields.foo] + type = "string" + description = "a field" + "#; + let res = load_schema(contents); + let Err(MetricsError::Toml(msg)) = &res else { + panic!( + "Expected to fail with bad format version, but found {res:#?}", + ); + }; + assert!( + msg.contains("Only version 1 of"), + "Message should indicate that only format version 1 \ + is supported, but found {msg:?}" + ); + } + + #[test] + fn ensures_target_has_at_least_one_field() { + let contents = r#" + format_version = 1 + + [target] + name = "target" + description = "some target" + authz_scope = "fleet" + versions = [ + { version = 1, fields = [ ] }, + ] + + [[metrics]] + name = "metric" + description = "some metric" + datum_type = "u8" + units = "count" + versions = [ + { added_in = 1, fields = [ "foo" ] }, + ] + + [fields.foo] + type = "string" + description = "a field" + "#; + let res = load_schema(contents); + let Err(MetricsError::SchemaDefinition(msg)) = &res else { + panic!( + "Expected to fail when target has zero fields, but found {res:#?}", + ); + }; + assert_eq!( + msg, "Target 'target' version 1 must have at least one field", + "Message should indicate that all targets must \ + have at least one field, but found {msg:?}", + ); + } +} diff --git a/oximeter/oximeter/src/schema.rs b/oximeter/impl/src/schema/mod.rs similarity index 82% rename from oximeter/oximeter/src/schema.rs rename to oximeter/impl/src/schema/mod.rs index 2a577fc8f1..28dbf38ab8 100644 --- a/oximeter/oximeter/src/schema.rs +++ b/oximeter/impl/src/schema/mod.rs @@ -2,10 +2,13 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -// Copyright 2023 Oxide Computer Company +// Copyright 2024 Oxide Computer Company //! Tools for working with schema for fields and timeseries. +pub mod codegen; +pub mod ir; + use crate::types::DatumType; use crate::types::FieldType; use crate::types::MetricsError; @@ -21,8 +24,17 @@ use std::collections::btree_map::Entry; use std::collections::BTreeMap; use std::collections::BTreeSet; use std::fmt::Write; +use std::num::NonZeroU8; use std::path::Path; +/// Full path to the directory containing all schema. +/// +/// This is defined in this crate as the single source of truth, but not +/// re-exported outside implementation crates (e.g., not via `oximeter` or +/// `oximeter-collector`. +pub const SCHEMA_DIRECTORY: &str = + concat!(env!("CARGO_MANIFEST_DIR"), "/../oximeter/schema"); + /// The name and type information for a field of a timeseries schema. #[derive( Clone, @@ -39,6 +51,7 @@ pub struct FieldSchema { pub name: String, pub field_type: FieldType, pub source: FieldSource, + pub description: String, } /// The source from which a field is derived, the target or metric. @@ -68,7 +81,7 @@ pub enum FieldSource { Debug, Clone, PartialEq, PartialOrd, Ord, Eq, Hash, Serialize, Deserialize, )] #[serde(try_from = "&str")] -pub struct TimeseriesName(String); +pub struct TimeseriesName(pub(crate) String); impl JsonSchema for TimeseriesName { fn schema_name() -> String { @@ -153,6 +166,24 @@ fn validate_timeseries_name(s: &str) -> Result<&str, MetricsError> { } } +/// Text descriptions for the target and metric of a timeseries. +#[derive(Clone, Debug, Default, Deserialize, JsonSchema, Serialize)] +pub struct TimeseriesDescription { + pub target: String, + pub metric: String, +} + +/// Measurement units for timeseries samples. +#[derive(Clone, Copy, Debug, Deserialize, Serialize, JsonSchema, PartialEq)] +#[serde(rename_all = "snake_case")] +// TODO-completeness: Include more units, such as power / temperature. +// TODO-completeness: Decide whether and how to handle dimensional analysis +// during queries, if needed. +pub enum Units { + Count, + Bytes, +} + /// The schema for a timeseries. /// /// This includes the name of the timeseries, as well as the datum type of its metric and the @@ -160,20 +191,33 @@ fn validate_timeseries_name(s: &str) -> Result<&str, MetricsError> { #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct TimeseriesSchema { pub timeseries_name: TimeseriesName, + pub description: TimeseriesDescription, pub field_schema: BTreeSet, pub datum_type: DatumType, + pub version: NonZeroU8, + pub authz_scope: AuthzScope, + pub units: Units, pub created: DateTime, } +/// Default version for timeseries schema, 1. +pub const fn default_schema_version() -> NonZeroU8 { + unsafe { NonZeroU8::new_unchecked(1) } +} + impl From<&Sample> for TimeseriesSchema { fn from(sample: &Sample) -> Self { - let timeseries_name = sample.timeseries_name.parse().unwrap(); + let timeseries_name = sample + .timeseries_name + .parse() + .expect("expected a legal timeseries name in a sample"); let mut field_schema = BTreeSet::new(); for field in sample.target_fields() { let schema = FieldSchema { name: field.name.clone(), field_type: field.value.field_type(), source: FieldSource::Target, + description: String::new(), }; field_schema.insert(schema); } @@ -182,30 +226,39 @@ impl From<&Sample> for TimeseriesSchema { name: field.name.clone(), field_type: field.value.field_type(), source: FieldSource::Metric, + description: String::new(), }; field_schema.insert(schema); } let datum_type = sample.measurement.datum_type(); - Self { timeseries_name, field_schema, datum_type, created: Utc::now() } + Self { + timeseries_name, + description: Default::default(), + field_schema, + datum_type, + version: default_schema_version(), + authz_scope: AuthzScope::Fleet, + units: Units::Count, + created: Utc::now(), + } } } impl TimeseriesSchema { /// Construct a timeseries schema from a target and metric. - pub fn new(target: &T, metric: &M) -> Self + pub fn new(target: &T, metric: &M) -> Result where T: Target, M: Metric, { - let timeseries_name = - TimeseriesName::try_from(crate::timeseries_name(target, metric)) - .unwrap(); + let timeseries_name = crate::timeseries_name(target, metric)?; let mut field_schema = BTreeSet::new(); for field in target.fields() { let schema = FieldSchema { name: field.name.clone(), field_type: field.value.field_type(), source: FieldSource::Target, + description: String::new(), }; field_schema.insert(schema); } @@ -214,11 +267,21 @@ impl TimeseriesSchema { name: field.name.clone(), field_type: field.value.field_type(), source: FieldSource::Metric, + description: String::new(), }; field_schema.insert(schema); } let datum_type = metric.datum_type(); - Self { timeseries_name, field_schema, datum_type, created: Utc::now() } + Ok(Self { + timeseries_name, + description: Default::default(), + field_schema, + datum_type, + version: default_schema_version(), + authz_scope: AuthzScope::Fleet, + units: Units::Count, + created: Utc::now(), + }) } /// Construct a timeseries schema from a sample @@ -240,11 +303,22 @@ impl TimeseriesSchema { .split_once(':') .expect("Incorrectly formatted timseries name") } + + /// Return the name of the target for this timeseries. + pub fn target_name(&self) -> &str { + self.component_names().0 + } + + /// Return the name of the metric for this timeseries. + pub fn metric_name(&self) -> &str { + self.component_names().1 + } } impl PartialEq for TimeseriesSchema { fn eq(&self, other: &TimeseriesSchema) -> bool { self.timeseries_name == other.timeseries_name + && self.version == other.version && self.datum_type == other.datum_type && self.field_schema == other.field_schema } @@ -263,6 +337,38 @@ impl PartialEq for TimeseriesSchema { const TIMESERIES_NAME_REGEX: &str = "^(([a-z]+[a-z0-9]*)(_([a-z0-9]+))*):(([a-z]+[a-z0-9]*)(_([a-z0-9]+))*)$"; +/// Authorization scope for a timeseries. +/// +/// This describes the level at which a user must be authorized to read data +/// from a timeseries. For example, fleet-scoping means the data is only visible +/// to an operator or fleet reader. Project-scoped, on the other hand, indicates +/// that a user will see data limited to the projects on which they have read +/// permissions. +#[derive( + Clone, + Copy, + Debug, + Deserialize, + Eq, + Hash, + JsonSchema, + Ord, + PartialEq, + PartialOrd, + Serialize, +)] +#[serde(rename_all = "snake_case")] +pub enum AuthzScope { + /// Timeseries data is limited to fleet readers. + Fleet, + /// Timeseries data is limited to the authorized silo for a user. + Silo, + /// Timeseries data is limited to the authorized projects for a user. + Project, + /// The timeseries is viewable to all without limitation. + ViewableToAll, +} + /// A set of timeseries schema, useful for testing changes to targets or /// metrics. #[derive(Debug, Default, Deserialize, PartialEq, Serialize)] @@ -284,24 +390,24 @@ impl SchemaSet { &mut self, target: &T, metric: &M, - ) -> Option + ) -> Result, MetricsError> where T: Target, M: Metric, { - let new = TimeseriesSchema::new(target, metric); + let new = TimeseriesSchema::new(target, metric)?; let name = new.timeseries_name.clone(); match self.inner.entry(name) { Entry::Vacant(entry) => { entry.insert(new); - None + Ok(None) } Entry::Occupied(entry) => { let existing = entry.get(); if existing == &new { - None + Ok(None) } else { - Some(existing.clone()) + Ok(Some(existing.clone())) } } } @@ -535,7 +641,7 @@ mod tests { fn test_timeseries_schema_from_parts() { let target = MyTarget::default(); let metric = MyMetric::default(); - let schema = TimeseriesSchema::new(&target, &metric); + let schema = TimeseriesSchema::new(&target, &metric).unwrap(); assert_eq!(schema.timeseries_name, "my_target:my_metric"); let f = schema.schema_for_field("id").unwrap(); @@ -560,7 +666,7 @@ mod tests { let target = MyTarget::default(); let metric = MyMetric::default(); let sample = Sample::new(&target, &metric).unwrap(); - let schema = TimeseriesSchema::new(&target, &metric); + let schema = TimeseriesSchema::new(&target, &metric).unwrap(); let schema_from_sample = TimeseriesSchema::from(&sample); assert_eq!(schema, schema_from_sample); } @@ -586,11 +692,13 @@ mod tests { name: String::from("later"), field_type: FieldType::U64, source: FieldSource::Target, + description: String::new(), }; let metric_field = FieldSchema { name: String::from("earlier"), field_type: FieldType::U64, source: FieldSource::Metric, + description: String::new(), }; let timeseries_name: TimeseriesName = "foo:bar".parse().unwrap(); let datum_type = DatumType::U64; @@ -598,8 +706,12 @@ mod tests { [target_field.clone(), metric_field.clone()].into_iter().collect(); let expected_schema = TimeseriesSchema { timeseries_name, + description: Default::default(), field_schema, datum_type, + version: default_schema_version(), + authz_scope: AuthzScope::Fleet, + units: Units::Count, created: Utc::now(), }; @@ -627,11 +739,13 @@ mod tests { name: String::from("second"), field_type: FieldType::U64, source: FieldSource::Target, + description: String::new(), }); fields.insert(FieldSchema { name: String::from("first"), field_type: FieldType::U64, source: FieldSource::Target, + description: String::new(), }); let mut iter = fields.iter(); assert_eq!(iter.next().unwrap().name, "first"); diff --git a/oximeter/oximeter/src/test_util.rs b/oximeter/impl/src/test_util.rs similarity index 97% rename from oximeter/oximeter/src/test_util.rs rename to oximeter/impl/src/test_util.rs index a9778d03bc..c2ac7b34bd 100644 --- a/oximeter/oximeter/src/test_util.rs +++ b/oximeter/impl/src/test_util.rs @@ -3,10 +3,10 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. //! Utilities for testing the oximeter crate. -// Copyright 2021 Oxide Computer Company +// Copyright 2024 Oxide Computer Company use crate::histogram; -use crate::histogram::Histogram; +use crate::histogram::{Histogram, Record}; use crate::types::{Cumulative, Sample}; use uuid::Uuid; diff --git a/oximeter/oximeter/src/traits.rs b/oximeter/impl/src/traits.rs similarity index 90% rename from oximeter/oximeter/src/traits.rs rename to oximeter/impl/src/traits.rs index 0934d231e3..16baa4f619 100644 --- a/oximeter/oximeter/src/traits.rs +++ b/oximeter/impl/src/traits.rs @@ -4,7 +4,7 @@ //! Traits used to describe metric data and its sources. -// Copyright 2021 Oxide Computer Company +// Copyright 2024 Oxide Computer Company use crate::histogram::Histogram; use crate::types; @@ -19,6 +19,7 @@ use chrono::DateTime; use chrono::Utc; use num::traits::One; use num::traits::Zero; +use std::num::NonZeroU8; use std::ops::Add; use std::ops::AddAssign; @@ -44,7 +45,11 @@ use std::ops::AddAssign; /// -------- /// /// ```rust -/// use oximeter::{Target, FieldType}; +/// # // Rename the impl crate so the doctests can refer to the public +/// # // `oximeter` crate, not the private impl. +/// # extern crate oximeter_impl as oximeter; +/// # use oximeter_macro_impl::*; +/// use oximeter::{traits::Target, types::FieldType}; /// use uuid::Uuid; /// /// #[derive(Target)] @@ -70,15 +75,29 @@ use std::ops::AddAssign; /// supported types. /// /// ```compile_fail +/// # // Rename the impl crate so the doctests can refer to the public +/// # // `oximeter` crate, not the private impl. +/// # extern crate oximeter_impl as oximeter; +/// # use oximeter_macro_impl::*; /// #[derive(oximeter::Target)] /// struct Bad { /// bad: f64, /// } /// ``` +/// +/// **Important:** Deriving this trait is deprecated, and will be removed in the +/// future. Instead, define your timeseries schema through the TOML format +/// described in [the crate documentation](crate), and use the code +/// generated by the `use_timeseries` macro. pub trait Target { /// Return the name of the target, which is the snake_case form of the struct's name. fn name(&self) -> &'static str; + /// Return the version of the target schema. + fn version(&self) -> NonZeroU8 { + unsafe { NonZeroU8::new_unchecked(1) } + } + /// Return the names of the target's fields, in the order in which they're defined. fn field_names(&self) -> &'static [&'static str]; @@ -141,6 +160,10 @@ pub trait Target { /// Example /// ------- /// ```rust +/// # // Rename the impl crate so the doctests can refer to the public +/// # // `oximeter` crate, not the private impl. +/// # extern crate oximeter_impl as oximeter; +/// # use oximeter_macro_impl::*; /// use chrono::Utc; /// use oximeter::Metric; /// @@ -162,6 +185,10 @@ pub trait Target { /// an unsupported type. /// /// ```compile_fail +/// # // Rename the impl crate so the doctests can refer to the public +/// # // `oximeter` crate, not the private impl. +/// # extern crate oximeter_impl as oximeter; +/// # use oximeter_macro_impl::*; /// #[derive(Metric)] /// pub struct BadType { /// field: f32, @@ -174,6 +201,11 @@ pub trait Metric { /// Return the name of the metric, which is the snake_case form of the struct's name. fn name(&self) -> &'static str; + /// Return the version of the metric schema. + fn version(&self) -> NonZeroU8 { + unsafe { NonZeroU8::new_unchecked(1) } + } + /// Return the names of the metric's fields, in the order in which they're defined. fn field_names(&self) -> &'static [&'static str]; @@ -332,6 +364,10 @@ pub use crate::histogram::HistogramSupport; /// Example /// ------- /// ```rust +/// # // Rename the impl crate so the doctests can refer to the public +/// # // `oximeter` crate, not the private impl. +/// # extern crate oximeter_impl as oximeter; +/// # use oximeter_macro_impl::*; /// use oximeter::{Datum, MetricsError, Metric, Producer, Target}; /// use oximeter::types::{Measurement, Sample, Cumulative}; /// diff --git a/oximeter/oximeter/src/types.rs b/oximeter/impl/src/types.rs similarity index 95% rename from oximeter/oximeter/src/types.rs rename to oximeter/impl/src/types.rs index 3e6ffc5442..a6518e4ad5 100644 --- a/oximeter/oximeter/src/types.rs +++ b/oximeter/impl/src/types.rs @@ -4,11 +4,12 @@ //! Types used to describe targets, metrics, and measurements. -// Copyright 2023 Oxide Computer Company +// Copyright 2024 Oxide Computer Company use crate::histogram; use crate::traits; use crate::Producer; +use crate::TimeseriesName; use bytes::Bytes; use chrono::DateTime; use chrono::Utc; @@ -24,6 +25,7 @@ use std::fmt; use std::net::IpAddr; use std::net::Ipv4Addr; use std::net::Ipv6Addr; +use std::num::NonZeroU8; use std::ops::Add; use std::ops::AddAssign; use std::sync::Arc; @@ -667,8 +669,21 @@ pub enum MetricsError { #[error("Missing datum of type {datum_type} cannot have a start time")] MissingDatumCannotHaveStartTime { datum_type: DatumType }, + #[error("Invalid timeseries name")] InvalidTimeseriesName, + + #[error("TOML deserialization error: {0}")] + Toml(String), + + #[error("Schema definition error: {0}")] + SchemaDefinition(String), + + #[error("Target version {target} does not match metric version {metric}")] + TargetMetricVersionMismatch { + target: std::num::NonZeroU8, + metric: std::num::NonZeroU8, + }, } impl From for omicron_common::api::external::Error { @@ -795,7 +810,13 @@ pub struct Sample { pub measurement: Measurement, /// The name of the timeseries this sample belongs to - pub timeseries_name: String, + pub timeseries_name: TimeseriesName, + + /// The version of the timeseries this sample belongs to + // + // TODO-cleanup: This should be removed once schema are tracked in CRDB. + #[serde(default = "::oximeter::schema::default_schema_version")] + pub timeseries_version: NonZeroU8, // Target name and fields target: FieldSet, @@ -810,7 +831,8 @@ impl PartialEq for Sample { /// Two samples are considered equal if they have equal targets and metrics, and occur at the /// same time. Importantly, the _data_ is not used during comparison. fn eq(&self, other: &Sample) -> bool { - self.target.eq(&other.target) + self.timeseries_version.eq(&other.timeseries_version) + && self.target.eq(&other.target) && self.metric.eq(&other.metric) && self.measurement.start_time().eq(&other.measurement.start_time()) && self.measurement.timestamp().eq(&other.measurement.timestamp()) @@ -831,11 +853,19 @@ impl Sample { T: traits::Target, M: traits::Metric, { + if target.version() != metric.version() { + return Err(MetricsError::TargetMetricVersionMismatch { + target: target.version(), + metric: metric.version(), + }); + } let target_fields = FieldSet::from_target(target); let metric_fields = FieldSet::from_metric(metric); Self::verify_field_names(&target_fields, &metric_fields)?; + let timeseries_name = crate::timeseries_name(target, metric)?; Ok(Self { - timeseries_name: crate::timeseries_name(target, metric), + timeseries_name, + timeseries_version: target.version(), target: target_fields, metric: metric_fields, measurement: metric.measure(timestamp), @@ -853,12 +883,20 @@ impl Sample { T: traits::Target, M: traits::Metric, { + if target.version() != metric.version() { + return Err(MetricsError::TargetMetricVersionMismatch { + target: target.version(), + metric: metric.version(), + }); + } let target_fields = FieldSet::from_target(target); let metric_fields = FieldSet::from_metric(metric); Self::verify_field_names(&target_fields, &metric_fields)?; let datum = Datum::Missing(MissingDatum::from(metric)); + let timeseries_name = crate::timeseries_name(target, metric)?; Ok(Self { - timeseries_name: crate::timeseries_name(target, metric), + timeseries_name, + timeseries_version: target.version(), target: target_fields, metric: metric_fields, measurement: Measurement { timestamp, datum }, diff --git a/oximeter/oximeter/tests/fail/failures.rs b/oximeter/impl/tests/fail/failures.rs similarity index 100% rename from oximeter/oximeter/tests/fail/failures.rs rename to oximeter/impl/tests/fail/failures.rs diff --git a/oximeter/oximeter/tests/fail/failures.stderr b/oximeter/impl/tests/fail/failures.stderr similarity index 100% rename from oximeter/oximeter/tests/fail/failures.stderr rename to oximeter/impl/tests/fail/failures.stderr diff --git a/oximeter/oximeter/tests/test_compilation.rs b/oximeter/impl/tests/test_compilation.rs similarity index 100% rename from oximeter/oximeter/tests/test_compilation.rs rename to oximeter/impl/tests/test_compilation.rs diff --git a/oximeter/instruments/Cargo.toml b/oximeter/instruments/Cargo.toml index a04e26fdaa..f51278f794 100644 --- a/oximeter/instruments/Cargo.toml +++ b/oximeter/instruments/Cargo.toml @@ -13,6 +13,8 @@ chrono = { workspace = true, optional = true } dropshot = { workspace = true, optional = true } futures = { workspace = true, optional = true } http = { workspace = true, optional = true } +kstat-rs = { workspace = true, optional = true } +libc = { workspace = true, optional = true } oximeter = { workspace = true, optional = true } slog = { workspace = true, optional = true } tokio = { workspace = true, optional = true } @@ -35,6 +37,7 @@ kstat = [ "dep:chrono", "dep:futures", "dep:kstat-rs", + "dep:libc", "dep:oximeter", "dep:slog", "dep:tokio", @@ -48,6 +51,3 @@ rand.workspace = true slog-async.workspace = true slog-term.workspace = true oximeter.workspace = true - -[target.'cfg(target_os = "illumos")'.dependencies] -kstat-rs = { workspace = true, optional = true } diff --git a/oximeter/instruments/src/http.rs b/oximeter/instruments/src/http.rs index dcbaf65c06..4bc6cf8677 100644 --- a/oximeter/instruments/src/http.rs +++ b/oximeter/instruments/src/http.rs @@ -12,8 +12,10 @@ use dropshot::{ use futures::Future; use http::StatusCode; use http::Uri; -use oximeter::histogram::Histogram; -use oximeter::{Metric, MetricsError, Producer, Sample, Target}; +use oximeter::{ + histogram::Histogram, histogram::Record, Metric, MetricsError, Producer, + Sample, Target, +}; use std::collections::BTreeMap; use std::sync::{Arc, Mutex}; use std::time::{Duration, Instant}; diff --git a/oximeter/instruments/src/kstat/link.rs b/oximeter/instruments/src/kstat/link.rs index 03397c4108..0507594056 100644 --- a/oximeter/instruments/src/kstat/link.rs +++ b/oximeter/instruments/src/kstat/link.rs @@ -15,98 +15,9 @@ use kstat_rs::Data; use kstat_rs::Kstat; use kstat_rs::Named; use oximeter::types::Cumulative; -use oximeter::Metric; use oximeter::Sample; -use oximeter::Target; -use uuid::Uuid; - -/// Information about a single physical Ethernet link on a host. -#[derive(Clone, Debug, Target)] -pub struct PhysicalDataLink { - /// The ID of the rack (cluster) containing this host. - pub rack_id: Uuid, - /// The ID of the sled itself. - pub sled_id: Uuid, - /// The serial number of the hosting sled. - pub serial: String, - /// The name of the host. - pub hostname: String, - /// The name of the link. - pub link_name: String, -} - -/// Information about a virtual Ethernet link on a host. -/// -/// Note that this is specifically for a VNIC in on the host system, not a guest -/// data link. -#[derive(Clone, Debug, Target)] -pub struct VirtualDataLink { - /// The ID of the rack (cluster) containing this host. - pub rack_id: Uuid, - /// The ID of the sled itself. - pub sled_id: Uuid, - /// The serial number of the hosting sled. - pub serial: String, - /// The name of the host, or the zone name for links in a zone. - pub hostname: String, - /// The name of the link. - pub link_name: String, -} - -/// Information about a guest virtual Ethernet link. -#[derive(Clone, Debug, Target)] -pub struct GuestDataLink { - /// The ID of the rack (cluster) containing this host. - pub rack_id: Uuid, - /// The ID of the sled itself. - pub sled_id: Uuid, - /// The serial number of the hosting sled. - pub serial: String, - /// The name of the host, or the zone name for links in a zone. - pub hostname: String, - /// The ID of the project containing the instance. - pub project_id: Uuid, - /// The ID of the instance. - pub instance_id: Uuid, - /// The name of the link. - pub link_name: String, -} - -/// The number of packets received on the link. -#[derive(Clone, Copy, Metric)] -pub struct PacketsReceived { - pub datum: Cumulative, -} - -/// The number of packets sent on the link. -#[derive(Clone, Copy, Metric)] -pub struct PacketsSent { - pub datum: Cumulative, -} - -/// The number of bytes sent on the link. -#[derive(Clone, Copy, Metric)] -pub struct BytesSent { - pub datum: Cumulative, -} - -/// The number of bytes received on the link. -#[derive(Clone, Copy, Metric)] -pub struct BytesReceived { - pub datum: Cumulative, -} -/// The number of errors received on the link. -#[derive(Clone, Copy, Metric)] -pub struct ErrorsReceived { - pub datum: Cumulative, -} - -/// The number of errors sent on the link. -#[derive(Clone, Copy, Metric)] -pub struct ErrorsSent { - pub datum: Cumulative, -} +oximeter::use_timeseries!("physical-data-link.toml"); // Helper function to extract the same kstat metrics from all link targets. fn extract_link_kstats( @@ -121,7 +32,7 @@ where let Named { name, value } = named_data; if *name == "rbytes64" { Some(value.as_u64().and_then(|x| { - let metric = BytesReceived { + let metric = physical_data_link::BytesReceived { datum: Cumulative::with_start_time(creation_time, x), }; Sample::new_with_timestamp(snapshot_time, target, &metric) @@ -129,7 +40,7 @@ where })) } else if *name == "obytes64" { Some(value.as_u64().and_then(|x| { - let metric = BytesSent { + let metric = physical_data_link::BytesSent { datum: Cumulative::with_start_time(creation_time, x), }; Sample::new_with_timestamp(snapshot_time, target, &metric) @@ -137,7 +48,7 @@ where })) } else if *name == "ipackets64" { Some(value.as_u64().and_then(|x| { - let metric = PacketsReceived { + let metric = physical_data_link::PacketsReceived { datum: Cumulative::with_start_time(creation_time, x), }; Sample::new_with_timestamp(snapshot_time, target, &metric) @@ -145,7 +56,7 @@ where })) } else if *name == "opackets64" { Some(value.as_u64().and_then(|x| { - let metric = PacketsSent { + let metric = physical_data_link::PacketsSent { datum: Cumulative::with_start_time(creation_time, x), }; Sample::new_with_timestamp(snapshot_time, target, &metric) @@ -153,7 +64,7 @@ where })) } else if *name == "ierrors" { Some(value.as_u32().and_then(|x| { - let metric = ErrorsReceived { + let metric = physical_data_link::ErrorsReceived { datum: Cumulative::with_start_time(creation_time, x.into()), }; Sample::new_with_timestamp(snapshot_time, target, &metric) @@ -161,7 +72,7 @@ where })) } else if *name == "oerrors" { Some(value.as_u32().and_then(|x| { - let metric = ErrorsSent { + let metric = physical_data_link::ErrorsSent { datum: Cumulative::with_start_time(creation_time, x.into()), }; Sample::new_with_timestamp(snapshot_time, target, &metric) @@ -177,19 +88,7 @@ trait LinkKstatTarget: KstatTarget { fn link_name(&self) -> &str; } -impl LinkKstatTarget for PhysicalDataLink { - fn link_name(&self) -> &str { - &self.link_name - } -} - -impl LinkKstatTarget for VirtualDataLink { - fn link_name(&self) -> &str { - &self.link_name - } -} - -impl LinkKstatTarget for GuestDataLink { +impl LinkKstatTarget for physical_data_link::PhysicalDataLink { fn link_name(&self) -> &str { &self.link_name } @@ -225,7 +124,7 @@ where } } -#[cfg(test)] +#[cfg(all(test, target_os = "illumos"))] mod tests { use super::*; use crate::kstat::sampler::KstatPath; @@ -325,17 +224,17 @@ mod tests { fn test_physical_datalink() { let link = TestEtherstub::new(); let sn = String::from("BRM000001"); - let dl = PhysicalDataLink { + let dl = physical_data_link::PhysicalDataLink { rack_id: RACK_ID, sled_id: SLED_ID, - serial: sn.clone(), - hostname: sn, - link_name: link.name.to_string(), + serial: sn.clone().into(), + hostname: sn.into(), + link_name: link.name.clone().into(), }; let ctl = Ctl::new().unwrap(); let ctl = ctl.update().unwrap(); let mut kstat = ctl - .filter(Some("link"), Some(0), Some(dl.link_name.as_str())) + .filter(Some("link"), Some(0), Some(link.name.as_str())) .next() .unwrap(); let creation_time = hrtime_to_utc(kstat.ks_crtime).unwrap(); @@ -349,12 +248,12 @@ mod tests { let mut sampler = KstatSampler::new(&test_logger()).unwrap(); let sn = String::from("BRM000001"); let link = TestEtherstub::new(); - let dl = PhysicalDataLink { + let dl = physical_data_link::PhysicalDataLink { rack_id: RACK_ID, sled_id: SLED_ID, - serial: sn.clone(), - hostname: sn, - link_name: link.name.to_string(), + serial: sn.clone().into(), + hostname: sn.into(), + link_name: link.name.clone().into(), }; let details = CollectionDetails::never(Duration::from_secs(1)); let id = sampler.add_target(dl, details).await.unwrap(); @@ -397,12 +296,12 @@ mod tests { KstatSampler::with_sample_limit(&test_logger(), limit).unwrap(); let sn = String::from("BRM000001"); let link = TestEtherstub::new(); - let dl = PhysicalDataLink { + let dl = physical_data_link::PhysicalDataLink { rack_id: RACK_ID, sled_id: SLED_ID, - serial: sn.clone(), - hostname: sn, - link_name: link.name.to_string(), + serial: sn.clone().into(), + hostname: sn.into(), + link_name: link.name.to_string().into(), }; let details = CollectionDetails::never(Duration::from_secs(1)); sampler.add_target(dl, details).await.unwrap(); @@ -464,12 +363,12 @@ mod tests { let sn = String::from("BRM000001"); let link = TestEtherstub::new(); info!(log, "created test etherstub"; "name" => &link.name); - let dl = PhysicalDataLink { + let dl = physical_data_link::PhysicalDataLink { rack_id: RACK_ID, sled_id: SLED_ID, - serial: sn.clone(), - hostname: sn, - link_name: link.name.to_string(), + serial: sn.clone().into(), + hostname: sn.into(), + link_name: link.name.to_string().into(), }; let collection_interval = Duration::from_secs(1); let expiry = Duration::from_secs(1); @@ -521,12 +420,12 @@ mod tests { let sn = String::from("BRM000001"); let link = TestEtherstub::new(); info!(log, "created test etherstub"; "name" => &link.name); - let dl = PhysicalDataLink { + let dl = physical_data_link::PhysicalDataLink { rack_id: RACK_ID, sled_id: SLED_ID, - serial: sn.clone(), - hostname: sn, - link_name: link.name.to_string(), + serial: sn.clone().into(), + hostname: sn.into(), + link_name: link.name.to_string().into(), }; let collection_interval = Duration::from_secs(1); let expiry = Duration::from_secs(1); @@ -570,12 +469,12 @@ mod tests { name: link.name.clone(), }; info!(log, "created test etherstub"; "name" => &link.name); - let dl = PhysicalDataLink { + let dl = physical_data_link::PhysicalDataLink { rack_id: RACK_ID, sled_id: SLED_ID, - serial: sn.clone(), - hostname: sn, - link_name: link.name.to_string(), + serial: sn.clone().into(), + hostname: sn.into(), + link_name: link.name.to_string().into(), }; let collection_interval = Duration::from_secs(1); let expiry = Duration::from_secs(1); @@ -617,12 +516,12 @@ mod tests { name: link.name.clone(), }; info!(log, "created test etherstub"; "name" => &link.name); - let dl = PhysicalDataLink { + let dl = physical_data_link::PhysicalDataLink { rack_id: RACK_ID, sled_id: SLED_ID, - serial: sn.clone(), - hostname: sn, - link_name: link.name.to_string(), + serial: sn.clone().into(), + hostname: sn.into(), + link_name: link.name.to_string().into(), }; let collection_interval = Duration::from_secs(1); let expiry = Duration::from_secs(1); diff --git a/oximeter/instruments/src/kstat/mod.rs b/oximeter/instruments/src/kstat/mod.rs index c792a51408..a5020b9b61 100644 --- a/oximeter/instruments/src/kstat/mod.rs +++ b/oximeter/instruments/src/kstat/mod.rs @@ -97,6 +97,22 @@ pub use sampler::KstatSampler; pub use sampler::TargetId; pub use sampler::TargetStatus; +cfg_if::cfg_if! { + if #[cfg(all(test, target_os = "illumos"))] { + type Timestamp = tokio::time::Instant; + #[inline(always)] + fn now() -> Timestamp { + tokio::time::Instant::now() + } + } else { + type Timestamp = chrono::DateTime; + #[inline(always)] + fn now() -> Timestamp { + chrono::Utc::now() + } + } +} + /// The reason a kstat target was expired and removed from a sampler. #[derive(Clone, Copy, Debug)] pub enum ExpirationReason { @@ -114,10 +130,7 @@ pub struct Expiration { /// The last error before expiration. pub error: Box, /// The time at which the expiration occurred. - #[cfg(test)] - pub expired_at: tokio::time::Instant, - #[cfg(not(test))] - pub expired_at: DateTime, + pub expired_at: Timestamp, } /// Errors resulting from reporting kernel statistics. @@ -191,7 +204,7 @@ pub trait KstatTarget: /// Convert from a high-res timestamp into UTC, if possible. pub fn hrtime_to_utc(hrtime: i64) -> Result, Error> { let utc_now = Utc::now(); - let hrtime_now = unsafe { gethrtime() }; + let hrtime_now = get_hires_time(); match hrtime_now.cmp(&hrtime) { Ordering::Equal => Ok(utc_now), Ordering::Less => { @@ -262,7 +275,27 @@ impl<'a> ConvertNamedData for NamedData<'a> { } } -#[link(name = "c")] -extern "C" { - fn gethrtime() -> i64; +/// Return a high-resolution monotonic timestamp, in nanoseconds since an +/// arbitrary point in the past. +/// +/// This is equivalent to `gethrtime(3C)` on illumos, and `clock_gettime()` with +/// an equivalent clock source on other platforms. +pub fn get_hires_time() -> i64 { + // NOTE: See `man clock_gettime`, but this is an alias for `CLOCK_HIGHRES`, + // and is the same source that underlies `gethrtime()`, which this API is + // intended to emulate on other platforms. + #[cfg(target_os = "illumos")] + const SOURCE: libc::clockid_t = libc::CLOCK_MONOTONIC; + #[cfg(not(target_os = "illumos"))] + const SOURCE: libc::clockid_t = libc::CLOCK_MONOTONIC_RAW; + let mut tp = libc::timespec { tv_sec: 0, tv_nsec: 0 }; + if unsafe { libc::clock_gettime(SOURCE, &mut tp as *mut _) } == 0 { + const NANOS_PER_SEC: i64 = 1_000_000_000; + tp.tv_sec + .checked_mul(NANOS_PER_SEC) + .and_then(|nsec| nsec.checked_add(tp.tv_nsec)) + .unwrap_or(0) + } else { + 0 + } } diff --git a/oximeter/instruments/src/kstat/sampler.rs b/oximeter/instruments/src/kstat/sampler.rs index af1b3ba7cf..74770a6225 100644 --- a/oximeter/instruments/src/kstat/sampler.rs +++ b/oximeter/instruments/src/kstat/sampler.rs @@ -4,6 +4,8 @@ //! Generate oximeter samples from kernel statistics. +use super::now; +use super::Timestamp; use crate::kstat::hrtime_to_utc; use crate::kstat::Error; use crate::kstat::Expiration; @@ -41,9 +43,6 @@ use tokio::time::interval; use tokio::time::sleep; use tokio::time::Sleep; -#[cfg(test)] -use tokio::time::Instant; - // The `KstatSampler` generates some statistics about its own operation, mostly // for surfacing failures to collect and dropped samples. mod self_stats { @@ -165,12 +164,7 @@ pub enum TargetStatus { /// The target is currently being collected from normally. /// /// The timestamp of the last collection is included. - Ok { - #[cfg(test)] - last_collection: Option, - #[cfg(not(test))] - last_collection: Option>, - }, + Ok { last_collection: Option }, /// The target has been expired. /// /// The details about the expiration are included. @@ -178,10 +172,7 @@ pub enum TargetStatus { reason: ExpirationReason, // NOTE: The error is a string, because it's not cloneable. error: String, - #[cfg(test)] - expired_at: Instant, - #[cfg(not(test))] - expired_at: DateTime, + expired_at: Timestamp, }, } @@ -204,7 +195,7 @@ enum Request { /// Remove a target. RemoveTarget { id: TargetId, reply_tx: oneshot::Sender> }, /// Return the creation times of all tracked / extant kstats. - #[cfg(test)] + #[cfg(all(test, target_os = "illumos"))] CreationTimes { reply_tx: oneshot::Sender>>, }, @@ -218,15 +209,9 @@ struct SampledKstat { /// The details around collection and expiration behavior. details: CollectionDetails, /// The time at which we _added_ this target to the sampler. - #[cfg(test)] - time_added: Instant, - #[cfg(not(test))] - time_added: DateTime, + time_added: Timestamp, /// The last time we successfully collected from the target. - #[cfg(test)] - time_of_last_collection: Option, - #[cfg(not(test))] - time_of_last_collection: Option>, + time_of_last_collection: Option, /// Attempts since we last successfully collected from the target. attempts_since_last_collection: usize, } @@ -384,7 +369,7 @@ fn hostname() -> Option { } /// Stores the number of samples taken, used for testing. -#[cfg(test)] +#[cfg(all(test, target_os = "illumos"))] pub(crate) struct SampleCounts { pub total: usize, pub overflow: usize, @@ -420,7 +405,8 @@ impl KstatSamplerWorker { /// kstats at their intervals. Samples will be pushed onto the queue. async fn run( mut self, - #[cfg(test)] sample_count_tx: mpsc::UnboundedSender, + #[cfg(all(test, target_os = "illumos"))] + sample_count_tx: mpsc::UnboundedSender, ) { let mut sample_timeouts = FuturesUnordered::new(); let mut creation_prune_interval = @@ -487,7 +473,7 @@ impl KstatSamplerWorker { // Send the total number of samples we've actually // taken and the number we've appended over to any // testing code which might be listening. - #[cfg(test)] + #[cfg(all(test, target_os = "illumos"))] sample_count_tx.send(SampleCounts { total: n_samples, overflow: n_overflow_samples, @@ -635,7 +621,7 @@ impl KstatSamplerWorker { ), } } - #[cfg(test)] + #[cfg(all(test, target_os = "illumos"))] Request::CreationTimes { reply_tx } => { debug!(self.log, "request for creation times"); reply_tx.send(self.creation_times.clone()).unwrap(); @@ -734,13 +720,7 @@ impl KstatSamplerWorker { .collect::, _>>(); match kstats { Ok(k) if !k.is_empty() => { - cfg_if::cfg_if! { - if #[cfg(test)] { - sampled_kstat.time_of_last_collection = Some(Instant::now()); - } else { - sampled_kstat.time_of_last_collection = Some(Utc::now()); - } - } + sampled_kstat.time_of_last_collection = Some(now()); sampled_kstat.attempts_since_last_collection = 0; sampled_kstat.target.to_samples(&k).map(Option::Some) } @@ -769,17 +749,10 @@ impl KstatSamplerWorker { if sampled_kstat.attempts_since_last_collection >= n_attempts { - cfg_if::cfg_if! { - if #[cfg(test)] { - let expired_at = Instant::now(); - } else { - let expired_at = Utc::now(); - } - } return Err(Error::Expired(Expiration { reason: ExpirationReason::Attempts(n_attempts), error: Box::new(e), - expired_at, + expired_at: now(), })); } } @@ -790,18 +763,12 @@ impl KstatSamplerWorker { .time_of_last_collection .unwrap_or(sampled_kstat.time_added); let expire_at = start + duration; - cfg_if::cfg_if! { - if #[cfg(test)] { - let now = Instant::now(); - } else { - let now = Utc::now(); - } - } - if now >= expire_at { + let now_ = now(); + if now_ >= expire_at { return Err(Error::Expired(Expiration { reason: ExpirationReason::Duration(duration), error: Box::new(e), - expired_at: now, + expired_at: now_, })); } } @@ -1019,18 +986,10 @@ impl KstatSamplerWorker { None => {} } self.ensure_creation_times_for_target(&*target)?; - - cfg_if::cfg_if! { - if #[cfg(test)] { - let time_added = Instant::now(); - } else { - let time_added = Utc::now(); - } - } let item = SampledKstat { target, details, - time_added, + time_added: now(), time_of_last_collection: None, attempts_since_last_collection: 0, }; @@ -1076,7 +1035,7 @@ pub struct KstatSampler { outbox: mpsc::Sender, self_stat_rx: Arc>>, _worker_task: Arc>, - #[cfg(test)] + #[cfg(all(test, target_os = "illumos"))] sample_count_rx: Arc>>, } @@ -1110,7 +1069,7 @@ impl KstatSampler { samples.clone(), limit, )?; - #[cfg(test)] + #[cfg(all(test, target_os = "illumos"))] let (sample_count_rx, _worker_task) = { let (sample_count_tx, sample_count_rx) = mpsc::unbounded_channel(); ( @@ -1118,14 +1077,14 @@ impl KstatSampler { Arc::new(tokio::task::spawn(worker.run(sample_count_tx))), ) }; - #[cfg(not(test))] + #[cfg(not(all(test, target_os = "illumos")))] let _worker_task = Arc::new(tokio::task::spawn(worker.run())); Ok(Self { samples, outbox, self_stat_rx: Arc::new(Mutex::new(self_stat_rx)), _worker_task, - #[cfg(test)] + #[cfg(all(test, target_os = "illumos"))] sample_count_rx, }) } @@ -1174,7 +1133,7 @@ impl KstatSampler { } /// Return the number of samples pushed by the sampling task, if any. - #[cfg(test)] + #[cfg(all(test, target_os = "illumos"))] pub(crate) fn sample_counts(&self) -> Option { match self.sample_count_rx.lock().unwrap().try_recv() { Ok(c) => Some(c), @@ -1184,7 +1143,7 @@ impl KstatSampler { } /// Return the creation times for all tracked kstats. - #[cfg(test)] + #[cfg(all(test, target_os = "illumos"))] pub(crate) async fn creation_times( &self, ) -> BTreeMap> { diff --git a/oximeter/instruments/src/lib.rs b/oximeter/instruments/src/lib.rs index c1f839c85d..521034e423 100644 --- a/oximeter/instruments/src/lib.rs +++ b/oximeter/instruments/src/lib.rs @@ -9,5 +9,5 @@ #[cfg(feature = "http-instruments")] pub mod http; -#[cfg(all(feature = "kstat", target_os = "illumos"))] +#[cfg(feature = "kstat")] pub mod kstat; diff --git a/oximeter/oximeter-macro-impl/src/lib.rs b/oximeter/oximeter-macro-impl/src/lib.rs index f110d00e69..499cd82d0a 100644 --- a/oximeter/oximeter-macro-impl/src/lib.rs +++ b/oximeter/oximeter-macro-impl/src/lib.rs @@ -153,6 +153,10 @@ fn build_shared_methods(item_name: &Ident, fields: &[&Field]) -> TokenStream { #name } + fn version(&self) -> ::std::num::NonZeroU8 { + unsafe { ::std::num::NonZeroU8::new_unchecked(1) } + } + fn field_names(&self) -> &'static [&'static str] { &[#(#names),*] } diff --git a/oximeter/oximeter/Cargo.toml b/oximeter/oximeter/Cargo.toml index 2445e0483a..c04d1bd3ae 100644 --- a/oximeter/oximeter/Cargo.toml +++ b/oximeter/oximeter/Cargo.toml @@ -9,22 +9,14 @@ license = "MPL-2.0" workspace = true [dependencies] -bytes = { workspace = true, features = [ "serde" ] } +anyhow.workspace = true +clap.workspace = true chrono.workspace = true -num.workspace = true -omicron-common.workspace = true +omicron-workspace-hack.workspace = true +oximeter-impl.workspace = true oximeter-macro-impl.workspace = true -regex.workspace = true -schemars = { workspace = true, features = [ "uuid1", "bytes", "chrono" ] } -serde.workspace = true -serde_json.workspace = true -strum.workspace = true -thiserror.workspace = true +oximeter-timeseries-macro.workspace = true +prettyplease.workspace = true +syn.workspace = true +toml.workspace = true uuid.workspace = true -omicron-workspace-hack.workspace = true - -[dev-dependencies] -approx.workspace = true -rstest.workspace = true -serde_json.workspace = true -trybuild.workspace = true diff --git a/oximeter/oximeter/schema/physical-data-link.toml b/oximeter/oximeter/schema/physical-data-link.toml new file mode 100644 index 0000000000..d526aa6af1 --- /dev/null +++ b/oximeter/oximeter/schema/physical-data-link.toml @@ -0,0 +1,95 @@ +format_version = 1 + +[target] +name = "physical_data_link" +description = "A physical network link on a compute sled" +authz_scope = "fleet" +versions = [ + { version = 1, fields = [ "rack_id", "sled_id", "hostname", "serial", "link_name" ] }, + # This is the intended next version, but actual schema updates are not yet + # supported. This is left here as an example and breadcrumb to implement + # that update in the future. + #{ version = 2, fields = [ "rack_id", "sled_id", "serial", "model", "revision", "link_name" ] }, +] + +[fields.rack_id] +type = "uuid" +description = "UUID for the link's sled" + +[fields.sled_id] +type = "uuid" +description = "UUID for the link's sled" + +[fields.hostname] +type = "string" +description = "Hostname of the link's sled" + +[fields.model] +type = "string" +description = "Model number of the link's sled" + +[fields.revision] +type = "u32" +description = "Revision number of the sled" + +[fields.serial] +type = "string" +description = "Serial number of the sled" + +[fields.link_name] +type = "string" +description = "Name of the physical data link" + +[[metrics]] +name = "bytes_sent" +description = "Number of bytes sent on the link" +units = "bytes" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [] } +] + +[[metrics]] +name = "bytes_received" +description = "Number of bytes received on the link" +units = "bytes" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [] } +] + +[[metrics]] +name = "packets_sent" +description = "Number of packets sent on the link" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [] } +] + +[[metrics]] +name = "packets_received" +description = "Number of packets received on the link" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [] } +] + +[[metrics]] +name = "errors_sent" +description = "Number of errors encountered when sending on the link" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [] } +] + +[[metrics]] +name = "errors_received" +description = "Number of errors encountered when receiving on the link" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [] } +] diff --git a/oximeter/oximeter/src/bin/oximeter-schema.rs b/oximeter/oximeter/src/bin/oximeter-schema.rs new file mode 100644 index 0000000000..14fb31b1e8 --- /dev/null +++ b/oximeter/oximeter/src/bin/oximeter-schema.rs @@ -0,0 +1,97 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +// Copyright 2024 Oxide Computer Company + +//! CLI tool to understand timeseries schema + +use anyhow::Context as _; +use clap::Parser; +use clap::Subcommand; +use oximeter::schema::ir::TimeseriesDefinition; +use std::num::NonZeroU8; +use std::path::PathBuf; + +#[derive(Debug, Parser)] +struct Args { + #[command(subcommand)] + cmd: Cmd, + /// The path to the schema definition TOML file. + path: PathBuf, +} + +#[derive(Debug, Subcommand)] +enum Cmd { + /// Print the intermediate representation parsed from the schema file + Ir, + + /// Print the derived timeseries schema. + Schema { + /// Show the schema for a specified timeseries by name. + /// + /// If not provided, all timeseries are printed. + #[arg(short, long)] + timeseries: Option, + + /// Show the schema for a specified version. + /// + /// If not provided, all versions are shown. + #[arg(short, long)] + version: Option, + }, + + /// Print the Rust code that would be emitted in the macro format. + Emit, +} + +fn main() -> anyhow::Result<()> { + let args = Args::try_parse()?; + let contents = std::fs::read_to_string(&args.path).with_context(|| { + format!("failed to read from {}", args.path.display()) + })?; + match args.cmd { + Cmd::Ir => { + let def: TimeseriesDefinition = toml::from_str(&contents)?; + println!("{def:#?}"); + } + Cmd::Schema { timeseries, version } => { + let schema = oximeter_impl::schema::ir::load_schema(&contents)?; + match (timeseries, version) { + (None, None) => { + for each in schema.into_iter() { + println!("{each:#?}"); + } + } + (None, Some(version)) => { + for each in + schema.into_iter().filter(|s| s.version == version) + { + println!("{each:#?}"); + } + } + (Some(name), None) => { + for each in + schema.into_iter().filter(|s| s.timeseries_name == name) + { + println!("{each:#?}"); + } + } + (Some(name), Some(version)) => { + for each in schema.into_iter().filter(|s| { + s.timeseries_name == name && s.version == version + }) { + println!("{each:#?}"); + } + } + } + } + Cmd::Emit => { + let code = oximeter::schema::codegen::use_timeseries(&contents)?; + let formatted = + prettyplease::unparse(&syn::parse_file(&format!("{code}"))?); + println!("{formatted}"); + } + } + Ok(()) +} diff --git a/oximeter/oximeter/src/lib.rs b/oximeter/oximeter/src/lib.rs index 1855762abe..9dd8fab47a 100644 --- a/oximeter/oximeter/src/lib.rs +++ b/oximeter/oximeter/src/lib.rs @@ -2,6 +2,8 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. +// Copyright 2024 Oxide Computer Company + //! Tools for generating and collecting metric data in the Oxide rack. //! //! Overview @@ -18,33 +20,121 @@ //! sensor. A _target_ is the thing being measured -- the service or the DIMM, in these cases. Both //! targets and metrics may have key-value pairs associated with them, called _fields_. //! -//! Motivating example -//! ------------------ +//! Defining timeseries schema +//! -------------------------- //! -//! ```rust -//! use uuid::Uuid; -//! use oximeter::{types::Cumulative, Metric, Target}; +//! Creating a timeseries starts by defining its schema. This includes the name +//! for the target and metric, as well as other metadata such as descriptions, +//! data types, and version numbers. Let's start by looking at an example: +//! +//! ```text +//! [target] +//! name = "foo" +//! description = "Statistics about the foo server" +//! authz_scope = "fleet" +//! versions = [ +//! { version = 1, fields = ["name", "id"], +//! ] +//! +//! [[metrics]] +//! name = "total_requests" +//! description = "The cumulative number of requests served" +//! datum_type = "cumulative_u64" +//! units = "count" +//! versions = [ +//! { added_in = 1, fields = ["route", "method", "response_code"], +//! ] +//! +//! [fields.name] +//! type = "string" +//! description = "The name of this server" +//! +//! [fields.id] +//! type = "uuid" +//! description = "Unique ID of this server" +//! +//! [fields.route] +//! type = "string" +//! description = "The route used in the HTTP request" +//! +//! [fields.method] +//! type = "string" +//! description = "The method used in the HTTP request" +//! +//! [fields.response_code] +//! type = u16 +//! description = "Status code the server responded with" +//! ``` +//! +//! In this case, our target is an HTTP server, which we identify with the +//! fields "name" and "id". Those fields are described in the `fields` TOML key, +//! and referred to by name in the target definition. A target also needs to +//! have a description and an _authorization scope_, which describes the +//! visibility of the timeseries and its data. See +//! [`crate::schema::AuthzScope`] for details. +//! +//! A target can have one or more metrics defined for it. As with the target, a +//! metric also has a name and description, and additionally a datum type and +//! units. It may also have fields, again referred to by name. +//! +//! This file should live in the `oximeter/schema` subdirectory, so that it can +//! be used to generate Rust code for producing data. +//! +//! Versions +//! -------- +//! +//! Both targets and metrics have version numbers associated with them. For a +//! target, these numbers must be the numbers 1, 2, 3, ... (increasing, no +//! gaps). As the target or any of its metrics evolves, these numbers are +//! incremented, and the new fields for that version are specified. +//! +//! The fields on the metrics may be specified a bit more flexibly. The first +//! version they appear in must have the form: `{ added_in = X, fields = [ ... +//! ] }`. After, the versions may be omitted, meaning that the previous version +//! of the metric is unchanged; or the metric may be removed entirely with a +//! version like `{ removed_in = Y }`. +//! +//! In all cases, the TOML definition is checked for consistency. Any existing +//! metric versions must match up with a target version, and they must have +//! distinct field names (targets and metrics cannot share fields). +//! +//! Using these primitives, fields may be added, removed, or renamed, with one +//! caveat: a field may not **change type**. +//! +//! Generated code +//! -------------- //! -//! #[derive(Target)] +//! This TOML definition can be used in a number of ways, but the most relevant +//! is to actually produce data from the resulting timeseries. This can be done +//! with the `[use_timeseries]` proc-macro, like this: +//! +//! ```ignore +//! oximeter::use_timeseries!("http-server.toml"); +//! ``` +//! +//! The macro will first validate the timeseries definition, and then generate +//! Rust code like the following: +//! +//! ```rust +//! #[derive(oximeter::Target)] //! struct HttpServer { //! name: String, -//! id: Uuid, +//! id: uuid::Uuid, //! } //! -//! #[derive(Metric)] +//! #[derive(oximeter::Metric)] //! struct TotalRequests { //! route: String, //! method: String, -//! response_code: i64, +//! response_code: u16, //! #[datum] -//! total: Cumulative, +//! total: oximeter::types::Cumulative, //! } //! ``` //! -//! In this case, our target is some HTTP server, which we identify with the fields "name" and -//! "id". The metric of interest is the total count of requests, by route/method/response code, -//! over time. The [`types::Cumulative`] type keeps track of cumulative scalar values, an integer -//! in this case. +//! This code can be used to create **samples** from this timeseries. The target +//! and metric structs can be filled in with the timeseries's _fields_, and the +//! _datum_ may be populated to generate new samples. //! //! Datum, measurement, and samples //! ------------------------------- @@ -95,44 +185,93 @@ //! `Producer`s may be registered with the same `ProducerServer`, each with potentially different //! sampling intervals. -// Copyright 2023 Oxide Computer Company - -pub use oximeter_macro_impl::*; - -// Export the current crate as `oximeter`. The macros defined in `oximeter-macro-impl` generate -// code referring to symbols like `oximeter::traits::Target`. In consumers of this crate, that's -// fine, but internally there _is_ no crate named `oximeter`, it's just `self` or `crate`. -// -// See https://github.com/rust-lang/rust/pull/55275 for the PR introducing this fix, which links to -// lots of related issues and discussion. -extern crate self as oximeter; +pub use oximeter_impl::*; +pub use oximeter_timeseries_macro::use_timeseries; -pub mod histogram; -pub mod schema; -pub mod test_util; -pub mod traits; -pub mod types; +#[cfg(test)] +mod test { + use oximeter_impl::schema::ir::load_schema; + use oximeter_impl::schema::{FieldSource, SCHEMA_DIRECTORY}; + use oximeter_impl::TimeseriesSchema; + use std::collections::BTreeMap; + use std::fs; -pub use schema::FieldSchema; -pub use schema::TimeseriesName; -pub use schema::TimeseriesSchema; -pub use traits::Metric; -pub use traits::Producer; -pub use traits::Target; -pub use types::Datum; -pub use types::DatumType; -pub use types::Field; -pub use types::FieldType; -pub use types::FieldValue; -pub use types::Measurement; -pub use types::MetricsError; -pub use types::Sample; + /// This test checks that changes to timeseries schema are all consistent. + /// + /// Timeseries schema are described in a TOML format that makes it relatively + /// easy to add new versions of the timeseries. Those definitions are ingested + /// at compile-time and checked for self-consistency, but it's still possible + /// for two unrelated definitions to conflict. This test catches those. + #[test] + fn timeseries_schema_consistency() { + let mut all_schema = BTreeMap::new(); + for entry in fs::read_dir(SCHEMA_DIRECTORY).unwrap() { + let entry = entry.unwrap(); + println!( + "examining timeseries schema from: '{}'", + entry.path().canonicalize().unwrap().display() + ); + let contents = fs::read_to_string(entry.path()).unwrap(); + let list = load_schema(&contents).unwrap_or_else(|_| { + panic!( + "Expected a valid timeseries definition in {}", + entry.path().canonicalize().unwrap().display() + ) + }); + println!("found {} schema", list.len()); + for schema in list.into_iter() { + let key = (schema.timeseries_name.clone(), schema.version); + if let Some(dup) = all_schema.insert(key, schema.clone()) { + panic!( + "Timeseries '{}' version {} is duplicated.\ + \noriginal:\n{}\nduplicate:{}\n", + schema.timeseries_name, + schema.version, + pretty_print_schema(&schema), + pretty_print_schema(&dup), + ); + } + } + } + } -/// Construct the timeseries name for a Target and Metric. -pub fn timeseries_name(target: &T, metric: &M) -> String -where - T: Target, - M: Metric, -{ - format!("{}:{}", target.name(), metric.name()) + fn pretty_print_schema(schema: &TimeseriesSchema) -> String { + use std::fmt::Write; + let mut out = String::new(); + writeln!(out, " name: {}", schema.timeseries_name).unwrap(); + writeln!(out, " version: {}", schema.version).unwrap(); + writeln!(out, " target").unwrap(); + writeln!(out, " description: {}", schema.description.target).unwrap(); + writeln!(out, " fields:").unwrap(); + for field in schema + .field_schema + .iter() + .filter(|field| field.source == FieldSource::Target) + { + writeln!( + out, + " {} ({}): {}", + field.name, field.field_type, field.description + ) + .unwrap(); + } + writeln!(out, " metric").unwrap(); + writeln!(out, " description: {}", schema.description.metric).unwrap(); + writeln!(out, " fields:").unwrap(); + for field in schema + .field_schema + .iter() + .filter(|field| field.source == FieldSource::Metric) + { + writeln!( + out, + " {} ({}): {}", + field.name, field.field_type, field.description + ) + .unwrap(); + } + writeln!(out, " datum type: {}", schema.datum_type).unwrap(); + writeln!(out, " units: {:?}", schema.units).unwrap(); + out + } } diff --git a/oximeter/producer/src/lib.rs b/oximeter/producer/src/lib.rs index 6bf8954ae0..36b05d7bb1 100644 --- a/oximeter/producer/src/lib.rs +++ b/oximeter/producer/src/lib.rs @@ -9,7 +9,6 @@ use dropshot::endpoint; use dropshot::ApiDescription; use dropshot::ConfigDropshot; -use dropshot::ConfigLogging; use dropshot::HttpError; use dropshot::HttpResponseOk; use dropshot::HttpServer; @@ -42,6 +41,13 @@ use std::time::Duration; use thiserror::Error; use uuid::Uuid; +// Our public interface depends directly or indirectly on these types; we +// export them so that consumers need not depend on dropshot themselves and +// to simplify how we stage incompatible upgrades. +pub use dropshot::ConfigLogging; +pub use dropshot::ConfigLoggingIfExists; +pub use dropshot::ConfigLoggingLevel; + #[derive(Debug, Clone, Error)] pub enum Error { #[error("Error running producer HTTP server: {0}")] diff --git a/oximeter/timeseries-macro/Cargo.toml b/oximeter/timeseries-macro/Cargo.toml new file mode 100644 index 0000000000..db591aed06 --- /dev/null +++ b/oximeter/timeseries-macro/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "oximeter-timeseries-macro" +version = "0.1.0" +edition = "2021" + +[lib] +proc-macro = true + +[dependencies] +omicron-workspace-hack.workspace = true +oximeter-impl.workspace = true +proc-macro2.workspace = true +quote.workspace = true +syn.workspace = true + +[lints] +workspace = true diff --git a/oximeter/timeseries-macro/src/lib.rs b/oximeter/timeseries-macro/src/lib.rs new file mode 100644 index 0000000000..317a8533a4 --- /dev/null +++ b/oximeter/timeseries-macro/src/lib.rs @@ -0,0 +1,106 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +// Copyright 2024 Oxide Computer Company + +//! Procedural macro to emi Rust code matching a TOML timeseries definition. + +extern crate proc_macro; + +use oximeter_impl::schema::SCHEMA_DIRECTORY; + +/// Generate code to use the timeseries from one target. +/// +/// This macro accepts a single filename, which should be a file in the +/// `oximeter/schema` subdirectory, containing a valid timeseries definition. It +/// attempts parse the file and generate code used to produce samples from those +/// timeseries. It will generate a submodule named by the target in the file, +/// with a Rust struct for the target and each metric defined in the file. +#[proc_macro] +pub fn use_timeseries( + tokens: proc_macro::TokenStream, +) -> proc_macro::TokenStream { + match syn::parse::(tokens) { + Ok(token) => { + let path = match extract_timeseries_path(&token) { + Ok(path) => path, + Err(err) => return err, + }; + + // Now that we have a verified file name only, look up the contents + // within the schema directory; validate it; and generate code from + // it. + let contents = match std::fs::read_to_string(&path) { + Ok(c) => c, + Err(e) => { + let msg = format!( + "Failed to read timeseries schema \ + from file '{}': {:?}", + path.display(), + e, + ); + return syn::Error::new(token.span(), msg) + .into_compile_error() + .into(); + } + }; + match oximeter_impl::schema::codegen::use_timeseries(&contents) { + Ok(toks) => { + let path_ = path.display().to_string(); + return quote::quote! { + /// Include the schema file itself to ensure we recompile + /// when that changes. + const _: &str = include_str!(#path_); + #toks + } + .into(); + } + Err(e) => { + let msg = format!( + "Failed to generate timeseries types \ + from '{}': {:?}", + path.display(), + e, + ); + return syn::Error::new(token.span(), msg) + .into_compile_error() + .into(); + } + } + } + Err(e) => return e.into_compile_error().into(), + } +} + +// Extract the full path to the timeseries definition, from the macro input +// tokens. We currently only allow a filename with no other path components, to +// avoid looking in directories other than the `SCHEMA_DIRECTORY`. +fn extract_timeseries_path( + token: &syn::LitStr, +) -> Result { + let make_err = || { + let path = std::path::Path::new(SCHEMA_DIRECTORY) + .canonicalize() + .map(|p| p.display().to_string()) + .unwrap_or_else(|_| SCHEMA_DIRECTORY.to_string()); + let msg = format!( + "Input must be a valid filename with no \ + path components and directly within the \ + schema directory '{}'", + path, + ); + Err(syn::Error::new(token.span(), msg).into_compile_error().into()) + }; + let value = token.value(); + if value.is_empty() { + return make_err(); + } + let Some(filename) = std::path::Path::new(&value).file_name() else { + return make_err(); + }; + if filename != value.as_str() { + return make_err(); + } + Ok(std::path::Path::new(SCHEMA_DIRECTORY).join(&filename)) +} diff --git a/package-manifest.toml b/package-manifest.toml index 8e27588be3..027ca52b48 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -481,7 +481,6 @@ source.type = "composite" source.packages = [ "crucible.tar.gz", "zone-setup.tar.gz", "zone-network-install.tar.gz" ] output.type = "zone" - [package.crucible-pantry-zone] service_name = "crucible_pantry" only_for_targets.image = "standard" @@ -505,10 +504,10 @@ only_for_targets.image = "standard" # 3. Use source.type = "manual" instead of "prebuilt" source.type = "prebuilt" source.repo = "crucible" -source.commit = "8c6d485110ecfae5409575246b986a145c386dc4" +source.commit = "64e28cea69b427b05064defaf8800a4d678b4612" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/crucible/image//crucible.sha256.txt -source.sha256 = "a974c976babbbbe4d126fe324e28093b4f69b689e1cf607ce38323befcfa494e" +source.sha256 = "e9051934c7d6e274158d4afdb4523797c913acd1a1262f973bc0ab7a2a253b5f" output.type = "zone" output.intermediate_only = true @@ -517,13 +516,29 @@ service_name = "crucible_pantry_prebuilt" only_for_targets.image = "standard" source.type = "prebuilt" source.repo = "crucible" -source.commit = "8c6d485110ecfae5409575246b986a145c386dc4" +source.commit = "64e28cea69b427b05064defaf8800a4d678b4612" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/crucible/image//crucible-pantry.sha256.txt -source.sha256 = "34418c60ecccade796e604997a11b1fa7f01c364996fa4b57131466e910700a8" +source.sha256 = "a8850bfaf08c11a7baa2e4b14b859613b77d9952dc8d20433ebea8136f8a00d3" output.type = "zone" output.intermediate_only = true +[package.crucible-dtrace] +# This package contains a select set of DTrace script that operate on DTrace +# probes that exist for consumers of the crucible upstairs library. These +# scripts are extracted onto the global zone. The source commit here should +# match a version of Crucible that contain probes used by the upstairs. In most +# cases this means the version of Crucible that Propolis is using. +service_name = "crucible_dtrace" +only_for_targets.image = "standard" +source.type = "prebuilt" +source.repo = "crucible" +source.commit = "64e28cea69b427b05064defaf8800a4d678b4612" +# The SHA256 digest is automatically posted to: +# https://buildomat.eng.oxide.computer/public/file/oxidecomputer/crucible/image//crucible-dtrace.sha256.txt +source.sha256 = "fe51b1c771f990761c4f8bf95aa26febbfa452df97f8da7d2f329dad88f63e1d" +output.type = "tarball" + # Refer to # https://github.com/oxidecomputer/propolis/blob/master/package/README.md # for instructions on building this manually. @@ -532,10 +547,10 @@ service_name = "propolis-server" only_for_targets.image = "standard" source.type = "prebuilt" source.repo = "propolis" -source.commit = "6d7ed9a033babc054db9eff5b59dee978d2b0d76" +source.commit = "59868677c70f3cd03f03e12584ad1056da8b5459" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/propolis/image//propolis-server.sha256.txt -source.sha256 = "f8f41b47bc00811fefe2ba75e0f6f8ab77765776c04021e0b31f09c3b21108a9" +source.sha256 = "4ab62342141c655a2bf088ff608fa353063bc3ac44db459e9d56768aa5f4e3d2" output.type = "zone" [package.mg-ddm-gz] @@ -548,10 +563,10 @@ source.repo = "maghemite" # `tools/maghemite_openapi_version`. Failing to do so will cause a failure when # building `ddm-admin-client` (which will instruct you to update # `tools/maghemite_openapi_version`). -source.commit = "c67f6ab49e0e8a49bcf84542500fceb6b9417ca4" +source.commit = "3c3fa8482fe09a01da62fbd35efe124ea9cac9e7" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/maghemite/image//mg-ddm-gz.sha256.txt -source.sha256 = "33e3b09408551be860debac08de50a840909d4e6c6bed9aecaef63fe8bef2d69" +source.sha256 = "63b6c74584e32f52893730e3a567da29c7f93934c38882614aad59034bdd980d" output.type = "tarball" [package.mg-ddm] @@ -564,10 +579,10 @@ source.repo = "maghemite" # `tools/maghemite_openapi_version`. Failing to do so will cause a failure when # building `ddm-admin-client` (which will instruct you to update # `tools/maghemite_openapi_version`). -source.commit = "c67f6ab49e0e8a49bcf84542500fceb6b9417ca4" +source.commit = "3c3fa8482fe09a01da62fbd35efe124ea9cac9e7" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/maghemite/image//mg-ddm.sha256.txt -source.sha256 = "81674afa17873f84bb49a800c8511938d1c2e871026cbb17e5eed2b645b1eb55" +source.sha256 = "b9908b81fee00d71b750f5b9a0f866c807adb0f924ab635295d28753538836f5" output.type = "zone" output.intermediate_only = true @@ -579,10 +594,10 @@ source.repo = "maghemite" # `tools/maghemite_openapi_version`. Failing to do so will cause a failure when # building `ddm-admin-client` (which will instruct you to update # `tools/maghemite_openapi_version`). -source.commit = "c67f6ab49e0e8a49bcf84542500fceb6b9417ca4" +source.commit = "3c3fa8482fe09a01da62fbd35efe124ea9cac9e7" # The SHA256 digest is automatically posted to: -# https://buildomat.eng.oxide.computer/public/file/oxidecomputer/maghemite/image//mg-ddm.sha256.txt -source.sha256 = "5e8bdd6774ef6041189621306577d0e0d174d596d216e53740ce6f035316c5af" +# https://buildomat.eng.oxide.computer/public/file/oxidecomputer/maghemite/image//mgd.sha256.txt +source.sha256 = "51f446933f0d8c426b15ea0845b66664da9b9a129893d12b25d7912b52f07362" output.type = "zone" output.intermediate_only = true @@ -628,8 +643,8 @@ only_for_targets.image = "standard" # 2. Copy dendrite.tar.gz from dendrite/out to omicron/out source.type = "prebuilt" source.repo = "dendrite" -source.commit = "861c00bacbdf7a6e22471f0dabd8f926409b5292" -source.sha256 = "1db849892c60b22f600fb081d4b0145d8ecd98acce9fad3094499a5d2159d001" +source.commit = "a262fe770c173f7879cd942c98ab28a829890661" +source.sha256 = "6f991dacd72c63d7fcff734b1f5c406c001e4d509f7b36e68b89d8b07f69ed79" output.type = "zone" output.intermediate_only = true @@ -653,8 +668,8 @@ only_for_targets.image = "standard" # 2. Copy the output zone image from dendrite/out to omicron/out source.type = "prebuilt" source.repo = "dendrite" -source.commit = "861c00bacbdf7a6e22471f0dabd8f926409b5292" -source.sha256 = "00b2b9372145bc8974f3c75ba7a59d8f2a8178c67cc1869086d29c7f3a2deb36" +source.commit = "a262fe770c173f7879cd942c98ab28a829890661" +source.sha256 = "66f38e194d4899a18825ec1a28adc9e63b5c3806696ffe9b210a16071d892013" output.type = "zone" output.intermediate_only = true @@ -671,8 +686,8 @@ only_for_targets.image = "standard" # 2. Copy dendrite.tar.gz from dendrite/out to omicron/out/dendrite-softnpu.tar.gz source.type = "prebuilt" source.repo = "dendrite" -source.commit = "861c00bacbdf7a6e22471f0dabd8f926409b5292" -source.sha256 = "b0b62b22c0e781edb0790b8730b99bb6e635c95ad3e83c2afbb2b15956153d66" +source.commit = "a262fe770c173f7879cd942c98ab28a829890661" +source.sha256 = "6d4870275f9119da6bcafd0f57ee3e467aae4ff32c861af9ebf5a81ff304d6ce" output.type = "zone" output.intermediate_only = true diff --git a/package/Cargo.toml b/package/Cargo.toml index 4632e66731..b63a5ed96f 100644 --- a/package/Cargo.toml +++ b/package/Cargo.toml @@ -11,11 +11,13 @@ workspace = true [dependencies] anyhow.workspace = true camino.workspace = true +cargo_metadata.workspace = true clap.workspace = true futures.workspace = true hex.workspace = true illumos-utils.workspace = true indicatif.workspace = true +omicron-workspace-hack.workspace = true omicron-zone-package.workspace = true petgraph.workspace = true rayon.workspace = true @@ -30,13 +32,11 @@ slog-bunyan.workspace = true slog-term.workspace = true smf.workspace = true strum.workspace = true -swrite.workspace = true tar.workspace = true thiserror.workspace = true tokio = { workspace = true, features = [ "full" ] } toml.workspace = true walkdir.workspace = true -omicron-workspace-hack.workspace = true [dev-dependencies] expectorate.workspace = true diff --git a/package/src/bin/omicron-package.rs b/package/src/bin/omicron-package.rs index 09fa7ab178..6db168c9f8 100644 --- a/package/src/bin/omicron-package.rs +++ b/package/src/bin/omicron-package.rs @@ -4,7 +4,7 @@ //! Utility for bundling target binaries as tarfiles. -use anyhow::{anyhow, bail, Context, Result}; +use anyhow::{anyhow, bail, ensure, Context, Result}; use camino::{Utf8Path, Utf8PathBuf}; use clap::{Parser, Subcommand}; use futures::stream::{self, StreamExt, TryStreamExt}; @@ -12,7 +12,7 @@ use illumos_utils::{zfs, zone}; use indicatif::{MultiProgress, ProgressBar, ProgressStyle}; use omicron_package::target::KnownTarget; use omicron_package::{parse, BuildCommand, DeployCommand, TargetCommand}; -use omicron_zone_package::config::Config as PackageConfig; +use omicron_zone_package::config::{Config as PackageConfig, PackageMap}; use omicron_zone_package::package::{Package, PackageOutput, PackageSource}; use omicron_zone_package::progress::Progress; use omicron_zone_package::target::Target; @@ -24,12 +24,13 @@ use slog::o; use slog::Drain; use slog::Logger; use slog::{info, warn}; +use std::collections::{BTreeMap, BTreeSet}; use std::env; use std::fs::create_dir_all; use std::io::Write; use std::str::FromStr; -use std::sync::Arc; -use swrite::{swrite, SWrite}; +use std::sync::{Arc, OnceLock}; +use std::time::Duration; use tokio::io::{AsyncReadExt, AsyncWriteExt, BufReader}; use tokio::process::Command; @@ -104,82 +105,117 @@ struct Args { subcommand: SubCommand, } -async fn run_cargo_on_packages( - subcmd: &str, - packages: I, +#[derive(Debug, Default)] +struct CargoPlan<'a> { + command: &'a str, + bins: BTreeSet<&'a String>, + features: BTreeSet<&'a String>, release: bool, - features: &str, -) -> Result<()> -where - I: IntoIterator, - S: AsRef, -{ - let mut cmd = Command::new("cargo"); - // We rely on the rust-toolchain.toml file for toolchain information, - // rather than specifying one within the packaging tool. - cmd.arg(subcmd); - for package in packages { - cmd.arg("-p").arg(package); - } - cmd.arg("--features").arg(features); - if release { - cmd.arg("--release"); - } - let status = cmd - .status() - .await - .context(format!("Failed to run command: ({:?})", cmd))?; - if !status.success() { - bail!("Failed to build packages"); - } +} - Ok(()) +impl<'a> CargoPlan<'a> { + async fn run(&self, log: &Logger) -> Result<()> { + if self.bins.is_empty() { + return Ok(()); + } + + let mut cmd = Command::new("cargo"); + // We rely on the rust-toolchain.toml file for toolchain information, + // rather than specifying one within the packaging tool. + cmd.arg(self.command); + for bin in &self.bins { + cmd.arg("--bin").arg(bin); + } + if !self.features.is_empty() { + cmd.arg("--features").arg(self.features.iter().fold( + String::new(), + |mut acc, s| { + if !acc.is_empty() { + acc.push(' '); + } + acc.push_str(s); + acc + }, + )); + } + if self.release { + cmd.arg("--release"); + } + info!(log, "running: {:?}", cmd.as_std()); + let status = cmd + .status() + .await + .context(format!("Failed to run command: ({:?})", cmd))?; + if !status.success() { + bail!("Failed to build packages"); + } + + Ok(()) + } } async fn do_for_all_rust_packages( config: &Config, command: &str, ) -> Result<()> { - // First, filter out all Rust packages from the configuration that should be - // built, and partition them into "release" and "debug" categories. - let (release_pkgs, debug_pkgs): (Vec<_>, _) = config - .package_config - .packages_to_build(&config.target) - .0 + // Collect a map of all of the workspace packages + let workspace = cargo_metadata::MetadataCommand::new().no_deps().exec()?; + let workspace_pkgs = workspace + .packages .into_iter() - .filter_map(|(name, pkg)| match &pkg.source { - PackageSource::Local { rust: Some(rust_pkg), .. } => { - Some((name, rust_pkg.release)) - } - _ => None, + .filter_map(|package| { + workspace + .workspace_members + .contains(&package.id) + .then_some((package.name.clone(), package)) }) - .partition(|(_, release)| *release); - - let features = - config.target.0.iter().fold(String::new(), |mut acc, (name, value)| { - swrite!(acc, "{}-{} ", name, value); - acc - }); + .collect::>(); - // Execute all the release / debug packages at the same time. - if !release_pkgs.is_empty() { - run_cargo_on_packages( - command, - release_pkgs.iter().map(|(name, _)| name), - true, - &features, - ) - .await?; - } - if !debug_pkgs.is_empty() { - run_cargo_on_packages( - command, - debug_pkgs.iter().map(|(name, _)| name), - false, - &features, - ) - .await?; + // Generate a list of all features we might want to request + let features = config + .target + .0 + .iter() + .map(|(name, value)| format!("{name}-{value}")) + .collect::>(); + + // We split the packages to be built into "release" and "debug" lists + let mut release = + CargoPlan { command, release: true, ..Default::default() }; + let mut debug = CargoPlan { command, release: false, ..Default::default() }; + + for (name, pkg) in config.packages_to_build().0 { + // If this is a Rust package... + if let PackageSource::Local { rust: Some(rust_pkg), .. } = &pkg.source { + let plan = if rust_pkg.release { &mut release } else { &mut debug }; + // Get the package metadata + let metadata = workspace_pkgs.get(name).with_context(|| { + format!("package '{name}' is not a workspace package") + })?; + // Add the binaries we want to build to the plan + let bins = metadata + .targets + .iter() + .filter_map(|target| target.is_bin().then_some(&target.name)) + .collect::>(); + for bin in &rust_pkg.binary_names { + ensure!( + bins.contains(bin), + "bin target '{bin}' does not belong to package '{name}'" + ); + plan.bins.insert(bin); + } + // Add all features we want to request to the plan + plan.features.extend( + features + .iter() + .filter(|feature| metadata.features.contains_key(*feature)), + ); + } } + + release.run(&config.log).await?; + debug.run(&config.log).await?; Ok(()) } @@ -204,9 +240,7 @@ async fn do_list_outputs( output_directory: &Utf8Path, intermediate: bool, ) -> Result<()> { - for (name, package) in - config.package_config.packages_to_build(&config.target).0 - { + for (name, package) in config.packages_to_build().0 { if !intermediate && package.output == (PackageOutput::Zone { intermediate_only: true }) @@ -350,6 +384,8 @@ async fn download_prebuilt( expected_digest: &Vec, path: &Utf8Path, ) -> Result<()> { + static CLIENT: OnceLock = OnceLock::new(); + progress.set_message("downloading prebuilt".into()); let url = format!( "https://buildomat.eng.oxide.computer/public/file/oxidecomputer/{}/image/{}/{}", @@ -357,7 +393,15 @@ async fn download_prebuilt( commit, path.file_name().unwrap(), ); - let response = reqwest::Client::new() + let client = CLIENT.get_or_init(|| { + reqwest::ClientBuilder::new() + .timeout(Duration::from_secs(3600)) + .tcp_keepalive(Duration::from_secs(60)) + .connect_timeout(Duration::from_secs(15)) + .build() + .unwrap() + }); + let response = client .get(&url) .send() .await @@ -497,7 +541,7 @@ async fn do_package( do_build(&config).await?; - let packages = config.package_config.packages_to_build(&config.target); + let packages = config.packages_to_build(); let package_iter = packages.build_order(); for batch in package_iter { @@ -901,6 +945,8 @@ struct Config { package_config: PackageConfig, // Description of the target we're trying to operate on. target: Target, + // The list of packages the user wants us to build (all, if empty) + only: Vec, // True if we should skip confirmations for destructive operations. force: bool, // Number of times to retry failed downloads. @@ -926,6 +972,67 @@ impl Config { _ => bail!("Aborting"), } } + + /// Returns target packages to be assembled on the builder machine, limited + /// to those specified in `only` (if set). + fn packages_to_build(&self) -> PackageMap<'_> { + let packages = self.package_config.packages_to_build(&self.target); + if self.only.is_empty() { + return packages; + } + + let mut filtered_packages = PackageMap(BTreeMap::new()); + let mut to_walk = PackageMap(BTreeMap::new()); + // add the requested packages to `to_walk` + for package_name in &self.only { + to_walk.0.insert( + package_name, + packages.0.get(package_name).unwrap_or_else(|| { + panic!( + "Explicitly-requested package '{}' does not exist", + package_name + ) + }), + ); + } + // dependencies are listed by output name, so create a lookup table to + // get a package by its output name. + let lookup_by_output = packages + .0 + .iter() + .map(|(name, package)| { + (package.get_output_file(name), (*name, *package)) + }) + .collect::>(); + // packages yet to be walked are added to `to_walk`. pop each entry and + // add its dependencies to `to_walk`, then add the package we finished + // walking to `filtered_packages`. + while let Some((package_name, package)) = to_walk.0.pop_first() { + if let PackageSource::Composite { packages } = &package.source { + for output in packages { + // find the package by output name + let (dep_name, dep_package) = + lookup_by_output.get(output).unwrap_or_else(|| { + panic!( + "Could not find a package which creates '{}'", + output + ) + }); + if dep_name.as_str() == package_name { + panic!("'{}' depends on itself", package_name); + } + // if we've seen this package already, it will be in + // `filtered_packages`. otherwise, add it to `to_walk`. + if !filtered_packages.0.contains_key(dep_name) { + to_walk.0.insert(dep_name, dep_package); + } + } + } + // we're done looking at this package's deps + filtered_packages.0.insert(package_name, package); + } + filtered_packages + } } #[tokio::main] @@ -978,6 +1085,7 @@ async fn main() -> Result<()> { log: log.clone(), package_config, target, + only: Vec::new(), force: args.force, retry_count: args.retry_count, retry_duration: args.retry_duration, @@ -993,7 +1101,7 @@ async fn main() -> Result<()> { })?; } - match &args.subcommand { + match args.subcommand { SubCommand::Build(BuildCommand::Target { subcommand }) => { do_target(&args.artifact_dir, &args.target, &subcommand).await?; } @@ -1001,16 +1109,22 @@ async fn main() -> Result<()> { do_dot(&get_config()?).await?; } SubCommand::Build(BuildCommand::ListOutputs { intermediate }) => { - do_list_outputs(&get_config()?, &args.artifact_dir, *intermediate) + do_list_outputs(&get_config()?, &args.artifact_dir, intermediate) .await?; } - SubCommand::Build(BuildCommand::Package { disable_cache }) => { - do_package(&get_config()?, &args.artifact_dir, *disable_cache) - .await?; + SubCommand::Build(BuildCommand::Package { disable_cache, only }) => { + let mut config = get_config()?; + config.only = only; + do_package(&config, &args.artifact_dir, disable_cache).await?; } SubCommand::Build(BuildCommand::Stamp { package_name, version }) => { - do_stamp(&get_config()?, &args.artifact_dir, package_name, version) - .await?; + do_stamp( + &get_config()?, + &args.artifact_dir, + &package_name, + &version, + ) + .await?; } SubCommand::Build(BuildCommand::Check) => { do_check(&get_config()?).await? diff --git a/package/src/lib.rs b/package/src/lib.rs index 2b99cfbe07..2009de9dfe 100644 --- a/package/src/lib.rs +++ b/package/src/lib.rs @@ -103,6 +103,9 @@ pub enum BuildCommand { /// By default, the cache is used. #[clap(short, long)] disable_cache: bool, + /// Limit to building only these packages + #[clap(long)] + only: Vec, }, /// Stamps semver versions onto packages within a manifest Stamp { diff --git a/schema/all-zone-requests.json b/schema/all-zone-requests.json deleted file mode 100644 index fde6ee18a4..0000000000 --- a/schema/all-zone-requests.json +++ /dev/null @@ -1,801 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "AllZoneRequests", - "description": "A wrapper around `ZoneRequest` that allows it to be serialized to a JSON file.", - "type": "object", - "required": [ - "generation", - "requests" - ], - "properties": { - "generation": { - "description": "ledger generation (not an Omicron-provided generation)", - "allOf": [ - { - "$ref": "#/definitions/Generation" - } - ] - }, - "requests": { - "type": "array", - "items": { - "$ref": "#/definitions/ZoneRequest" - } - } - }, - "definitions": { - "DatasetKind": { - "description": "The type of a dataset, and an auxiliary information necessary to successfully launch a zone managing the associated data.", - "oneOf": [ - { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "cockroach_db" - ] - } - } - }, - { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "crucible" - ] - } - } - }, - { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "clickhouse" - ] - } - } - }, - { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "clickhouse_keeper" - ] - } - } - }, - { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "external_dns" - ] - } - } - }, - { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "internal_dns" - ] - } - } - } - ] - }, - "DatasetName": { - "type": "object", - "required": [ - "kind", - "pool_name" - ], - "properties": { - "kind": { - "$ref": "#/definitions/DatasetKind" - }, - "pool_name": { - "$ref": "#/definitions/ZpoolName" - } - } - }, - "DatasetRequest": { - "description": "Describes a request to provision a specific dataset", - "type": "object", - "required": [ - "id", - "name", - "service_address" - ], - "properties": { - "id": { - "type": "string", - "format": "uuid" - }, - "name": { - "$ref": "#/definitions/DatasetName" - }, - "service_address": { - "type": "string" - } - } - }, - "Generation": { - "description": "Generation numbers stored in the database, used for optimistic concurrency control", - "type": "integer", - "format": "uint64", - "minimum": 0.0 - }, - "IpNet": { - "oneOf": [ - { - "title": "v4", - "allOf": [ - { - "$ref": "#/definitions/Ipv4Net" - } - ] - }, - { - "title": "v6", - "allOf": [ - { - "$ref": "#/definitions/Ipv6Net" - } - ] - } - ], - "x-rust-type": { - "crate": "oxnet", - "path": "oxnet::IpNet", - "version": "0.1.0" - } - }, - "Ipv4Net": { - "title": "An IPv4 subnet", - "description": "An IPv4 subnet, including prefix and prefix length", - "examples": [ - "192.168.1.0/24" - ], - "type": "string", - "pattern": "^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|1[0-9]|2[0-9]|3[0-2])$", - "x-rust-type": { - "crate": "oxnet", - "path": "oxnet::Ipv4Net", - "version": "0.1.0" - } - }, - "Ipv6Net": { - "title": "An IPv6 subnet", - "description": "An IPv6 subnet, including prefix and subnet mask", - "examples": [ - "fd12:3456::/64" - ], - "type": "string", - "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))\\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])$", - "x-rust-type": { - "crate": "oxnet", - "path": "oxnet::Ipv6Net", - "version": "0.1.0" - } - }, - "MacAddr": { - "title": "A MAC address", - "description": "A Media Access Control address, in EUI-48 format", - "examples": [ - "ff:ff:ff:ff:ff:ff" - ], - "type": "string", - "maxLength": 17, - "minLength": 5, - "pattern": "^([0-9a-fA-F]{0,2}:){5}[0-9a-fA-F]{0,2}$" - }, - "Name": { - "title": "A name unique within the parent collection", - "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID though they may contain a UUID.", - "type": "string", - "maxLength": 63, - "minLength": 1, - "pattern": "^(?![0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$)^[a-z]([a-zA-Z0-9-]*[a-zA-Z0-9]+)?$" - }, - "NetworkInterface": { - "description": "Information required to construct a virtual network interface", - "type": "object", - "required": [ - "id", - "ip", - "kind", - "mac", - "name", - "primary", - "slot", - "subnet", - "vni" - ], - "properties": { - "id": { - "type": "string", - "format": "uuid" - }, - "ip": { - "type": "string", - "format": "ip" - }, - "kind": { - "$ref": "#/definitions/NetworkInterfaceKind" - }, - "mac": { - "$ref": "#/definitions/MacAddr" - }, - "name": { - "$ref": "#/definitions/Name" - }, - "primary": { - "type": "boolean" - }, - "slot": { - "type": "integer", - "format": "uint8", - "minimum": 0.0 - }, - "subnet": { - "$ref": "#/definitions/IpNet" - }, - "vni": { - "$ref": "#/definitions/Vni" - } - } - }, - "NetworkInterfaceKind": { - "description": "The type of network interface", - "oneOf": [ - { - "description": "A vNIC attached to a guest instance", - "type": "object", - "required": [ - "id", - "type" - ], - "properties": { - "id": { - "type": "string", - "format": "uuid" - }, - "type": { - "type": "string", - "enum": [ - "instance" - ] - } - } - }, - { - "description": "A vNIC associated with an internal service", - "type": "object", - "required": [ - "id", - "type" - ], - "properties": { - "id": { - "type": "string", - "format": "uuid" - }, - "type": { - "type": "string", - "enum": [ - "service" - ] - } - } - }, - { - "description": "A vNIC associated with a probe", - "type": "object", - "required": [ - "id", - "type" - ], - "properties": { - "id": { - "type": "string", - "format": "uuid" - }, - "type": { - "type": "string", - "enum": [ - "probe" - ] - } - } - } - ] - }, - "ServiceType": { - "description": "Describes service-specific parameters.", - "oneOf": [ - { - "type": "object", - "required": [ - "external_dns_servers", - "external_ip", - "external_tls", - "internal_address", - "nic", - "type" - ], - "properties": { - "external_dns_servers": { - "description": "External DNS servers Nexus can use to resolve external hosts.", - "type": "array", - "items": { - "type": "string", - "format": "ip" - } - }, - "external_ip": { - "description": "The address at which the external nexus server is reachable.", - "type": "string", - "format": "ip" - }, - "external_tls": { - "description": "Whether Nexus's external endpoint should use TLS", - "type": "boolean" - }, - "internal_address": { - "description": "The address at which the internal nexus server is reachable.", - "type": "string" - }, - "nic": { - "description": "The service vNIC providing external connectivity using OPTE.", - "allOf": [ - { - "$ref": "#/definitions/NetworkInterface" - } - ] - }, - "type": { - "type": "string", - "enum": [ - "nexus" - ] - } - } - }, - { - "type": "object", - "required": [ - "dns_address", - "http_address", - "nic", - "type" - ], - "properties": { - "dns_address": { - "description": "The address at which the external DNS server is reachable.", - "type": "string" - }, - "http_address": { - "description": "The address at which the external DNS server API is reachable.", - "type": "string" - }, - "nic": { - "description": "The service vNIC providing external connectivity using OPTE.", - "allOf": [ - { - "$ref": "#/definitions/NetworkInterface" - } - ] - }, - "type": { - "type": "string", - "enum": [ - "external_dns" - ] - } - } - }, - { - "type": "object", - "required": [ - "dns_address", - "gz_address", - "gz_address_index", - "http_address", - "type" - ], - "properties": { - "dns_address": { - "type": "string" - }, - "gz_address": { - "description": "The addresses in the global zone which should be created\n\nFor the DNS service, which exists outside the sleds's typical subnet - adding an address in the GZ is necessary to allow inter-zone traffic routing.", - "type": "string", - "format": "ipv6" - }, - "gz_address_index": { - "description": "The address is also identified with an auxiliary bit of information to ensure that the created global zone address can have a unique name.", - "type": "integer", - "format": "uint32", - "minimum": 0.0 - }, - "http_address": { - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "internal_dns" - ] - } - } - }, - { - "type": "object", - "required": [ - "address", - "type" - ], - "properties": { - "address": { - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "oximeter" - ] - } - } - }, - { - "type": "object", - "required": [ - "address", - "type" - ], - "properties": { - "address": { - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "crucible_pantry" - ] - } - } - }, - { - "type": "object", - "required": [ - "address", - "dns_servers", - "nic", - "ntp_servers", - "snat_cfg", - "type" - ], - "properties": { - "address": { - "type": "string" - }, - "dns_servers": { - "type": "array", - "items": { - "type": "string", - "format": "ip" - } - }, - "domain": { - "type": [ - "string", - "null" - ] - }, - "nic": { - "description": "The service vNIC providing outbound connectivity using OPTE.", - "allOf": [ - { - "$ref": "#/definitions/NetworkInterface" - } - ] - }, - "ntp_servers": { - "type": "array", - "items": { - "type": "string" - } - }, - "snat_cfg": { - "description": "The SNAT configuration for outbound connections.", - "allOf": [ - { - "$ref": "#/definitions/SourceNatConfig" - } - ] - }, - "type": { - "type": "string", - "enum": [ - "boundary_ntp" - ] - } - } - }, - { - "type": "object", - "required": [ - "address", - "dns_servers", - "ntp_servers", - "type" - ], - "properties": { - "address": { - "type": "string" - }, - "dns_servers": { - "type": "array", - "items": { - "type": "string", - "format": "ip" - } - }, - "domain": { - "type": [ - "string", - "null" - ] - }, - "ntp_servers": { - "type": "array", - "items": { - "type": "string" - } - }, - "type": { - "type": "string", - "enum": [ - "internal_ntp" - ] - } - } - }, - { - "type": "object", - "required": [ - "address", - "type" - ], - "properties": { - "address": { - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "clickhouse" - ] - } - } - }, - { - "type": "object", - "required": [ - "address", - "type" - ], - "properties": { - "address": { - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "clickhouse_keeper" - ] - } - } - }, - { - "type": "object", - "required": [ - "address", - "type" - ], - "properties": { - "address": { - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "cockroach_db" - ] - } - } - }, - { - "type": "object", - "required": [ - "address", - "type" - ], - "properties": { - "address": { - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "crucible" - ] - } - } - } - ] - }, - "ServiceZoneRequest": { - "description": "Describes a request to create a zone running one or more services.", - "type": "object", - "required": [ - "addresses", - "id", - "services", - "zone_type" - ], - "properties": { - "addresses": { - "type": "array", - "items": { - "type": "string", - "format": "ipv6" - } - }, - "dataset": { - "anyOf": [ - { - "$ref": "#/definitions/DatasetRequest" - }, - { - "type": "null" - } - ] - }, - "id": { - "type": "string", - "format": "uuid" - }, - "services": { - "type": "array", - "items": { - "$ref": "#/definitions/ServiceZoneService" - } - }, - "zone_type": { - "$ref": "#/definitions/ZoneType" - } - } - }, - "ServiceZoneService": { - "description": "Used to request that the Sled initialize a single service.", - "type": "object", - "required": [ - "details", - "id" - ], - "properties": { - "details": { - "$ref": "#/definitions/ServiceType" - }, - "id": { - "type": "string", - "format": "uuid" - } - } - }, - "SourceNatConfig": { - "description": "An IP address and port range used for source NAT, i.e., making outbound network connections from guests or services.", - "type": "object", - "required": [ - "first_port", - "ip", - "last_port" - ], - "properties": { - "first_port": { - "description": "The first port used for source NAT, inclusive.", - "type": "integer", - "format": "uint16", - "minimum": 0.0 - }, - "ip": { - "description": "The external address provided to the instance or service.", - "type": "string", - "format": "ip" - }, - "last_port": { - "description": "The last port used for source NAT, also inclusive.", - "type": "integer", - "format": "uint16", - "minimum": 0.0 - } - } - }, - "Vni": { - "description": "A Geneve Virtual Network Identifier", - "type": "integer", - "format": "uint32", - "minimum": 0.0 - }, - "ZoneRequest": { - "description": "This struct represents the combo of \"what zone did you ask for\" + \"where did we put it\".", - "type": "object", - "required": [ - "root", - "zone" - ], - "properties": { - "root": { - "type": "string" - }, - "zone": { - "$ref": "#/definitions/ServiceZoneRequest" - } - } - }, - "ZoneType": { - "description": "The type of zone that Sled Agent may run", - "type": "string", - "enum": [ - "clickhouse", - "clickhouse_keeper", - "cockroach_db", - "crucible_pantry", - "crucible", - "external_dns", - "internal_dns", - "nexus", - "ntp", - "oximeter", - "switch" - ] - }, - "ZpoolName": { - "title": "The name of a Zpool", - "description": "Zpool names are of the format ox{i,p}_. They are either Internal or External, and should be unique", - "type": "string", - "pattern": "^ox[ip]_[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" - } - } -} \ No newline at end of file diff --git a/schema/all-zones-requests.json b/schema/all-zones-requests.json index 526e41376f..20b99b2064 100644 --- a/schema/all-zones-requests.json +++ b/schema/all-zones-requests.json @@ -105,7 +105,7 @@ }, "Name": { "title": "A name unique within the parent collection", - "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID though they may contain a UUID.", + "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID, but they may contain a UUID. They can be at most 63 characters long.", "type": "string", "maxLength": 63, "minLength": 1, @@ -154,6 +154,13 @@ "subnet": { "$ref": "#/definitions/IpNet" }, + "transit_ips": { + "default": [], + "type": "array", + "items": { + "$ref": "#/definitions/IpNet" + } + }, "vni": { "$ref": "#/definitions/Vni" } diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index e723101242..990b1047a2 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -1406,7 +1406,9 @@ CREATE TABLE IF NOT EXISTS omicron.public.vpc_subnet ( /* Child resource creation generation number */ rcgen INT8 NOT NULL, ipv4_block INET NOT NULL, - ipv6_block INET NOT NULL + ipv6_block INET NOT NULL, + /* nullable FK to the `vpc_router` table. */ + custom_router_id UUID ); /* Subnet and network interface names are unique per VPC, not project */ @@ -1471,7 +1473,14 @@ CREATE TABLE IF NOT EXISTS omicron.public.network_interface ( * The primary interface appears in DNS and its address is used for external * connectivity. */ - is_primary BOOL NOT NULL + is_primary BOOL NOT NULL, + + /* + * A supplementary list of addresses/CIDR blocks which a NIC is + * *allowed* to send/receive traffic on, in addition to its + * assigned address. + */ + transit_ips INET[] NOT NULL DEFAULT ARRAY[] ); /* A view of the network_interface table for just instance-kind records. */ @@ -1489,7 +1498,8 @@ SELECT mac, ip, slot, - is_primary + is_primary, + transit_ips FROM omicron.public.network_interface WHERE @@ -1636,7 +1646,13 @@ CREATE TABLE IF NOT EXISTS omicron.public.vpc_router ( time_deleted TIMESTAMPTZ, kind omicron.public.vpc_router_kind NOT NULL, vpc_id UUID NOT NULL, - rcgen INT NOT NULL + rcgen INT NOT NULL, + /* + * version information used to trigger VPC router RPW. + * this is sensitive to CRUD on named resources beyond + * routers e.g. instances, subnets, ... + */ + resolved_version INT NOT NULL DEFAULT 0 ); CREATE UNIQUE INDEX IF NOT EXISTS lookup_router_by_vpc ON omicron.public.vpc_router ( @@ -1662,6 +1678,7 @@ CREATE TABLE IF NOT EXISTS omicron.public.router_route ( /* Indicates that the object has been deleted */ time_deleted TIMESTAMPTZ, + /* FK to the `vpc_router` table. */ vpc_router_id UUID NOT NULL, kind omicron.public.router_route_kind NOT NULL, target STRING(128) NOT NULL, @@ -3891,53 +3908,6 @@ ON omicron.public.switch_port (port_settings_id, port_name) STORING (switch_loca CREATE INDEX IF NOT EXISTS switch_port_name ON omicron.public.switch_port (port_name); -COMMIT; -BEGIN; - --- view for v2p mapping rpw -CREATE VIEW IF NOT EXISTS omicron.public.v2p_mapping_view -AS -WITH VmV2pMappings AS ( - SELECT - n.id as nic_id, - s.id as sled_id, - s.ip as sled_ip, - v.vni, - n.mac, - n.ip - FROM omicron.public.network_interface n - JOIN omicron.public.vpc_subnet vs ON vs.id = n.subnet_id - JOIN omicron.public.vpc v ON v.id = n.vpc_id - JOIN omicron.public.vmm vmm ON n.parent_id = vmm.instance_id - JOIN omicron.public.sled s ON vmm.sled_id = s.id - WHERE n.time_deleted IS NULL - AND n.kind = 'instance' - AND (vmm.state = 'running' OR vmm.state = 'starting') - AND s.sled_policy = 'in_service' - AND s.sled_state = 'active' -), -ProbeV2pMapping AS ( - SELECT - n.id as nic_id, - s.id as sled_id, - s.ip as sled_ip, - v.vni, - n.mac, - n.ip - FROM omicron.public.network_interface n - JOIN omicron.public.vpc_subnet vs ON vs.id = n.subnet_id - JOIN omicron.public.vpc v ON v.id = n.vpc_id - JOIN omicron.public.probe p ON n.parent_id = p.id - JOIN omicron.public.sled s ON p.sled = s.id - WHERE n.time_deleted IS NULL - AND n.kind = 'probe' - AND s.sled_policy = 'in_service' - AND s.sled_state = 'active' -) -SELECT nic_id, sled_id, sled_ip, vni, mac, ip FROM VmV2pMappings -UNION -SELECT nic_id, sled_id, sled_ip, vni, mac, ip FROM ProbeV2pMapping; - CREATE INDEX IF NOT EXISTS network_interface_by_parent ON omicron.public.network_interface (parent_id) STORING (name, kind, vpc_id, subnet_id, mac, ip, slot); @@ -4159,7 +4129,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - (TRUE, NOW(), NOW(), '77.0.0', NULL) + (TRUE, NOW(), NOW(), '80.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; diff --git a/schema/crdb/nic-spoof-allow/up01.sql b/schema/crdb/nic-spoof-allow/up01.sql new file mode 100644 index 0000000000..2ca13e0a38 --- /dev/null +++ b/schema/crdb/nic-spoof-allow/up01.sql @@ -0,0 +1,2 @@ +ALTER TABLE omicron.public.network_interface +ADD COLUMN IF NOT EXISTS transit_ips INET[] NOT NULL DEFAULT ARRAY[]; diff --git a/schema/crdb/nic-spoof-allow/up02.sql b/schema/crdb/nic-spoof-allow/up02.sql new file mode 100644 index 0000000000..68ab39567d --- /dev/null +++ b/schema/crdb/nic-spoof-allow/up02.sql @@ -0,0 +1 @@ +DROP VIEW IF EXISTS omicron.public.instance_network_interface; diff --git a/schema/crdb/nic-spoof-allow/up03.sql b/schema/crdb/nic-spoof-allow/up03.sql new file mode 100644 index 0000000000..ac3cfe6b32 --- /dev/null +++ b/schema/crdb/nic-spoof-allow/up03.sql @@ -0,0 +1,20 @@ +CREATE VIEW IF NOT EXISTS omicron.public.instance_network_interface AS +SELECT + id, + name, + description, + time_created, + time_modified, + time_deleted, + parent_id AS instance_id, + vpc_id, + subnet_id, + mac, + ip, + slot, + is_primary, + transit_ips +FROM + omicron.public.network_interface +WHERE + kind = 'instance'; diff --git a/schema/crdb/remove-view-for-v2p-mappings/up01.sql b/schema/crdb/remove-view-for-v2p-mappings/up01.sql new file mode 100644 index 0000000000..aebe0119f5 --- /dev/null +++ b/schema/crdb/remove-view-for-v2p-mappings/up01.sql @@ -0,0 +1 @@ +DROP VIEW IF EXISTS omicron.public.v2p_mapping_view; diff --git a/schema/crdb/vpc-subnet-routing/up01.sql b/schema/crdb/vpc-subnet-routing/up01.sql new file mode 100644 index 0000000000..d1869dd010 --- /dev/null +++ b/schema/crdb/vpc-subnet-routing/up01.sql @@ -0,0 +1,3 @@ +-- Each subnet may have a custom router attached. +ALTER TABLE omicron.public.vpc_subnet +ADD COLUMN IF NOT EXISTS custom_router_id UUID; diff --git a/schema/crdb/vpc-subnet-routing/up02.sql b/schema/crdb/vpc-subnet-routing/up02.sql new file mode 100644 index 0000000000..77e72961a3 --- /dev/null +++ b/schema/crdb/vpc-subnet-routing/up02.sql @@ -0,0 +1,7 @@ +/* + * version information used to trigger VPC router RPW. + * this is sensitive to CRUD on named resources beyond + * routers e.g. instances, subnets, ... + */ +ALTER TABLE omicron.public.vpc_router +ADD COLUMN IF NOT EXISTS resolved_version INT NOT NULL DEFAULT 0; diff --git a/schema/crdb/vpc-subnet-routing/up03.sql b/schema/crdb/vpc-subnet-routing/up03.sql new file mode 100644 index 0000000000..fb4fd2324a --- /dev/null +++ b/schema/crdb/vpc-subnet-routing/up03.sql @@ -0,0 +1,96 @@ +set local disallow_full_table_scans = off; + +-- We need to manually rebuild a compliant set of routes. +-- Remove everything that exists today. +DELETE FROM omicron.public.router_route WHERE 1=1; + +-- Insert gateway routes for all VPCs. +INSERT INTO omicron.public.router_route + ( + id, name, + description, + time_created, time_modified, + vpc_router_id, kind, + target, destination + ) +SELECT + gen_random_uuid(), 'default-v4', + 'The default route of a vpc', + now(), now(), + omicron.public.vpc_router.id, 'default', + 'inetgw:outbound', 'ipnet:0.0.0.0/0' +FROM + omicron.public.vpc_router +ON CONFLICT DO NOTHING; + +INSERT INTO omicron.public.router_route + ( + id, name, + description, + time_created, time_modified, + vpc_router_id, kind, + target, destination + ) +SELECT + gen_random_uuid(), 'default-v6', + 'The default route of a vpc', + now(), now(), + omicron.public.vpc_router.id, 'default', + 'inetgw:outbound', 'ipnet:::/0' +FROM + omicron.public.vpc_router +ON CONFLICT DO NOTHING; + +-- Insert subnet routes for every defined VPC subnet. +INSERT INTO omicron.public.router_route + ( + id, name, + description, + time_created, time_modified, + vpc_router_id, kind, + target, destination + ) +SELECT + gen_random_uuid(), vpc_subnet.name, + 'VPC Subnet route for ''' || vpc_subnet.name || '''', + now(), now(), + omicron.public.vpc_router.id, 'vpc_subnet', + 'subnet:' || vpc_subnet.name, 'subnet:' || vpc_subnet.name +FROM + (omicron.public.vpc_subnet JOIN omicron.public.vpc + ON vpc_subnet.vpc_id = vpc.id) JOIN omicron.public.vpc_router + ON vpc_router.vpc_id = vpc.id +ON CONFLICT DO NOTHING; + +-- Replace IDs of fixed_data routes for the services VPC. +-- This is done instead of an insert to match the initial +-- empty state of dbinit.sql. +WITH known_ids (new_id, new_name, new_description) AS ( + VALUES + ( + '001de000-074c-4000-8000-000000000002', 'default-v4', + 'Default internet gateway route for Oxide Services' + ), + ( + '001de000-074c-4000-8000-000000000003', 'default-v6', + 'Default internet gateway route for Oxide Services' + ), + ( + '001de000-c470-4000-8000-000000000004', 'external-dns', + 'Built-in VPC Subnet for Oxide service (external-dns)' + ), + ( + '001de000-c470-4000-8000-000000000005', 'nexus', + 'Built-in VPC Subnet for Oxide service (nexus)' + ), + ( + '001de000-c470-4000-8000-000000000006', 'boundary-ntp', + 'Built-in VPC Subnet for Oxide service (boundary-ntp)' + ) +) +UPDATE omicron.public.router_route +SET + id = CAST(new_id AS UUID), + description = new_description +FROM known_ids +WHERE vpc_router_id = '001de000-074c-4000-8000-000000000001' AND new_name = router_route.name; diff --git a/schema/rss-service-plan-v2.json b/schema/rss-service-plan-v2.json index ee0b21af81..e5aba43040 100644 --- a/schema/rss-service-plan-v2.json +++ b/schema/rss-service-plan-v2.json @@ -178,7 +178,7 @@ }, "Name": { "title": "A name unique within the parent collection", - "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID though they may contain a UUID.", + "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID, but they may contain a UUID. They can be at most 63 characters long.", "type": "string", "maxLength": 63, "minLength": 1, diff --git a/schema/rss-service-plan-v3.json b/schema/rss-service-plan-v3.json index d1540ca351..481c92cc36 100644 --- a/schema/rss-service-plan-v3.json +++ b/schema/rss-service-plan-v3.json @@ -219,7 +219,7 @@ }, "Name": { "title": "A name unique within the parent collection", - "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID though they may contain a UUID.", + "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID, but they may contain a UUID. They can be at most 63 characters long.", "type": "string", "maxLength": 63, "minLength": 1, @@ -268,6 +268,13 @@ "subnet": { "$ref": "#/definitions/IpNet" }, + "transit_ips": { + "default": [], + "type": "array", + "items": { + "$ref": "#/definitions/IpNet" + } + }, "vni": { "$ref": "#/definitions/Vni" } diff --git a/schema/rss-sled-plan.json b/schema/rss-sled-plan.json index 04ba5d8d31..cb3c5c8eeb 100644 --- a/schema/rss-sled-plan.json +++ b/schema/rss-sled-plan.json @@ -195,6 +195,7 @@ }, "checker": { "description": "Checker to apply to incoming messages.", + "default": null, "type": [ "string", "null" @@ -209,6 +210,7 @@ }, "shaper": { "description": "Shaper to apply to outgoing messages.", + "default": null, "type": [ "string", "null" @@ -319,6 +321,7 @@ }, "local_pref": { "description": "Apply a local preference to routes received from this peer.", + "default": null, "type": [ "integer", "null" @@ -328,6 +331,7 @@ }, "md5_auth_key": { "description": "Use the given key for TCP-MD5 authentication with the peer.", + "default": null, "type": [ "string", "null" @@ -335,6 +339,7 @@ }, "min_ttl": { "description": "Require messages from a peer have a minimum IP time to live field.", + "default": null, "type": [ "integer", "null" @@ -344,6 +349,7 @@ }, "multi_exit_discriminator": { "description": "Apply the provided multi-exit discriminator (MED) updates sent to the peer.", + "default": null, "type": [ "integer", "null" @@ -357,6 +363,7 @@ }, "remote_asn": { "description": "Require that a peer has a specified ASN.", + "default": null, "type": [ "integer", "null" @@ -366,6 +373,7 @@ }, "vlan_id": { "description": "Associate a VLAN ID with a BGP peer session.", + "default": null, "type": [ "integer", "null" @@ -598,7 +606,7 @@ }, "Name": { "title": "A name unique within the parent collection", - "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID though they may contain a UUID.", + "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID, but they may contain a UUID. They can be at most 63 characters long.", "type": "string", "maxLength": 63, "minLength": 1, @@ -894,6 +902,7 @@ }, "vlan_id": { "description": "The VLAN id associated with this route.", + "default": null, "type": [ "integer", "null" @@ -997,6 +1006,7 @@ }, "vlan_id": { "description": "The VLAN id (if any) associated with this address.", + "default": null, "type": [ "integer", "null" @@ -1007,7 +1017,7 @@ } }, "UserId": { - "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID though they may contain a UUID.\n\n
JSON schema\n\n```json { \"title\": \"A name unique within the parent collection\", \"description\": \"Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID though they may contain a UUID.\", \"type\": \"string\", \"maxLength\": 63, \"minLength\": 1, \"pattern\": \"^(?![0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$)^[a-z]([a-zA-Z0-9-]*[a-zA-Z0-9]+)?$\" } ```
", + "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID, but they may contain a UUID. They can be at most 63 characters long.\n\n
JSON schema\n\n```json { \"title\": \"A name unique within the parent collection\", \"description\": \"Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID, but they may contain a UUID. They can be at most 63 characters long.\", \"type\": \"string\", \"maxLength\": 63, \"minLength\": 1, \"pattern\": \"^(?![0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$)^[a-z]([a-zA-Z0-9-]*[a-zA-Z0-9]+)?$\" } ```
", "type": "string" } } diff --git a/sled-agent/Cargo.toml b/sled-agent/Cargo.toml index 167ac987ca..b798ba783d 100644 --- a/sled-agent/Cargo.toml +++ b/sled-agent/Cargo.toml @@ -127,14 +127,7 @@ name = "sled-agent" doc = false [features] -image-standard = [] image-trampoline = [] -machine-gimlet = [] -machine-gimlet-standalone = [] -machine-non-gimlet = [] switch-asic = [] switch-stub = [] switch-softnpu = [] -switch-hypersoftnpu = [] -rack-topology-single-sled = [] -rack-topology-multi-sled = [] diff --git a/sled-agent/src/bin/services-ledger-check-migrate.rs b/sled-agent/src/bin/services-ledger-check-migrate.rs deleted file mode 100644 index 456fdc74b7..0000000000 --- a/sled-agent/src/bin/services-ledger-check-migrate.rs +++ /dev/null @@ -1,80 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -//! Test-migrates one or more old-format services ledger files to new-format -//! Omicron zones ledgers - -use anyhow::Context; -use camino::Utf8PathBuf; -use clap::Args; -use clap::Parser; -use omicron_common::cmd::fatal; -use omicron_common::cmd::CmdError; -use omicron_sled_agent::services::OmicronZonesConfigLocal; -use omicron_sled_agent::services_migration::AllZoneRequests; - -#[tokio::main] -async fn main() { - if let Err(message) = do_run().await { - fatal(CmdError::Failure(message)); - } -} - -#[derive(Debug, Parser)] -#[clap(about = "Test conversion of old-format services ledgers to new-format \ - zones ledgers")] -enum Converter { - /// checks whether one or more ledger file(s) can be converted successfully - Check(CheckArgs), - - /// for a given ledger file, prints the converted form - Show(ShowArgs), -} - -#[derive(Debug, Args)] -struct CheckArgs { - #[clap(action)] - files: Vec, -} - -#[derive(Debug, Args)] -struct ShowArgs { - #[clap(action)] - file: Utf8PathBuf, -} - -async fn do_run() -> Result<(), anyhow::Error> { - let args = Converter::parse(); - - let (files, do_show) = match args { - Converter::Check(CheckArgs { files }) => (files, false), - Converter::Show(ShowArgs { file }) => (vec![file], true), - }; - - for file_path in &files { - let contents = tokio::fs::read_to_string(file_path) - .await - .with_context(|| format!("read {:?}", &file_path))?; - let parsed: AllZoneRequests = serde_json::from_str(&contents) - .with_context(|| format!("parse {:?}", &file_path))?; - let converted = OmicronZonesConfigLocal::try_from(parsed) - .with_context(|| format!("convert contents of {:?}", &file_path))?; - if do_show { - println!( - "{:#}", - serde_json::to_string_pretty(&converted).with_context( - || format!("print contents of {:?}", &file_path) - )? - ); - } - eprintln!( - "{}: processed okay (zones: {})", - file_path, - converted.zones.len() - ); - } - - eprintln!("all files processed okay (files: {})", files.len()); - Ok(()) -} diff --git a/sled-agent/src/common/instance.rs b/sled-agent/src/common/instance.rs index 02623455f4..ed0aceff82 100644 --- a/sled-agent/src/common/instance.rs +++ b/sled-agent/src/common/instance.rs @@ -117,50 +117,57 @@ impl ObservedPropolisState { instance_runtime: &InstanceRuntimeState, propolis_state: &InstanceStateMonitorResponse, ) -> Self { - let migration_status = - match (instance_runtime.migration_id, &propolis_state.migration) { - // If the runtime state and Propolis state agree that there's - // a migration in progress, and they agree on its ID, the - // Propolis migration state determines the migration status. - (Some(this_id), Some(propolis_migration)) - if this_id == propolis_migration.migration_id => - { - match propolis_migration.state { - PropolisMigrationState::Finish => { - ObservedMigrationStatus::Succeeded - } - PropolisMigrationState::Error => { - ObservedMigrationStatus::Failed - } - _ => ObservedMigrationStatus::InProgress, - } - } - - // If both sides have a migration ID, but the IDs don't match, - // assume the instance's migration ID is newer. This can happen - // if Propolis was initialized via migration in and has not yet - // been told to migrate out. - (Some(_), Some(_)) => ObservedMigrationStatus::Pending, - - // If only Propolis has a migration ID, assume it was from a - // prior migration in and report that no migration is in - // progress. This could be improved with propolis#508. - (None, Some(_)) => ObservedMigrationStatus::NoMigration, - - // A migration source's migration IDs get set before its - // Propolis actually gets asked to migrate, so it's possible for - // the runtime state to contain an ID while the Propolis has - // none, in which case the migration is pending. - (Some(_), None) => ObservedMigrationStatus::Pending, - - // If neither side has a migration ID, then there's clearly no - // migration. - (None, None) => ObservedMigrationStatus::NoMigration, + // If there's no migration currently registered with this sled, report + // the current state and that no migration is currently in progress, + // even if Propolis has some migration data to share. (This case arises + // when Propolis returns state from a previous migration that sled agent + // has already retired.) + // + // N.B. This needs to be read from the instance runtime state and not + // the migration runtime state to ensure that, once a migration in + // completes, the "completed" observation is reported to + // `InstanceStates::apply_propolis_observation` exactly once. + // Otherwise that routine will try to apply the "inbound migration + // complete" instance state transition twice. + let Some(migration_id) = instance_runtime.migration_id else { + return Self { + vmm_state: PropolisInstanceState(propolis_state.state), + migration_status: ObservedMigrationStatus::NoMigration, + time: Utc::now(), }; + }; + + // Sled agent believes a live migration may be in progress. See if + // either of the Propolis migrations corresponds to it. + let propolis_migration = match ( + &propolis_state.migration.migration_in, + &propolis_state.migration.migration_out, + ) { + (Some(inbound), _) if inbound.id == migration_id => inbound, + (_, Some(outbound)) if outbound.id == migration_id => outbound, + _ => { + // Sled agent believes this instance should be migrating, but + // Propolis isn't reporting a matching migration yet, so assume + // the migration is still pending. + return Self { + vmm_state: PropolisInstanceState(propolis_state.state), + migration_status: ObservedMigrationStatus::Pending, + time: Utc::now(), + }; + } + }; Self { vmm_state: PropolisInstanceState(propolis_state.state), - migration_status, + migration_status: match propolis_migration.state { + PropolisMigrationState::Finish => { + ObservedMigrationStatus::Succeeded + } + PropolisMigrationState::Error => { + ObservedMigrationStatus::Failed + } + _ => ObservedMigrationStatus::InProgress, + }, time: Utc::now(), } } diff --git a/sled-agent/src/hardware_monitor.rs b/sled-agent/src/hardware_monitor.rs index 6dbca89d74..9508a11bfb 100644 --- a/sled-agent/src/hardware_monitor.rs +++ b/sled-agent/src/hardware_monitor.rs @@ -199,6 +199,15 @@ impl HardwareMonitor { .detected_raw_disk_removal(disk.into()) .await; } + HardwareUpdate::DiskUpdated(disk) => { + // We notify the storage manager of the hardware, but do not need to + // wait for the result to be fully processed. + #[allow(clippy::let_underscore_future)] + let _ = self + .storage_manager + .detected_raw_disk_update(disk.into()) + .await; + } }, Err(broadcast::error::RecvError::Lagged(count)) => { warn!(self.log, "Hardware monitor missed {count} messages"); @@ -277,7 +286,7 @@ impl HardwareMonitor { let _ = self .storage_manager .ensure_using_exactly_these_disks( - self.hardware_manager.disks().into_iter().map(RawDisk::from), + self.hardware_manager.disks().into_values().map(RawDisk::from), ) .await; } diff --git a/sled-agent/src/http_entrypoints.rs b/sled-agent/src/http_entrypoints.rs index 6defd18a95..2d41e2860a 100644 --- a/sled-agent/src/http_entrypoints.rs +++ b/sled-agent/src/http_entrypoints.rs @@ -31,7 +31,9 @@ use omicron_common::api::external::Error; use omicron_common::api::internal::nexus::{ DiskRuntimeState, SledInstanceState, UpdateArtifactId, }; -use omicron_common::api::internal::shared::SwitchPorts; +use omicron_common::api::internal::shared::{ + ResolvedVpcRouteSet, ResolvedVpcRouteState, SwitchPorts, +}; use omicron_uuid_kinds::{GenericUuid, InstanceUuid}; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; @@ -86,6 +88,8 @@ pub fn api() -> SledApiDescription { api.register(host_os_write_status_delete)?; api.register(inventory)?; api.register(bootstore_status)?; + api.register(list_vpc_routes)?; + api.register(set_vpc_routes)?; Ok(()) } @@ -1025,3 +1029,29 @@ async fn bootstore_status( .into(); Ok(HttpResponseOk(status)) } + +/// Get the current versions of VPC routing rules. +#[endpoint { + method = GET, + path = "/vpc-routes", +}] +async fn list_vpc_routes( + request_context: RequestContext, +) -> Result>, HttpError> { + let sa = request_context.context(); + Ok(HttpResponseOk(sa.list_vpc_routes())) +} + +/// Update VPC routing rules. +#[endpoint { + method = PUT, + path = "/vpc-routes", +}] +async fn set_vpc_routes( + request_context: RequestContext, + body: TypedBody>, +) -> Result { + let sa = request_context.context(); + sa.set_vpc_routes(body.into_inner())?; + Ok(HttpResponseUpdatedNoContent()) +} diff --git a/sled-agent/src/instance.rs b/sled-agent/src/instance.rs index 04b68ef752..ec4d503e7b 100644 --- a/sled-agent/src/instance.rs +++ b/sled-agent/src/instance.rs @@ -27,7 +27,7 @@ use backoff::BackoffError; use chrono::Utc; use illumos_utils::dladm::Etherstub; use illumos_utils::link::VnicAllocator; -use illumos_utils::opte::{DhcpCfg, PortManager}; +use illumos_utils::opte::{DhcpCfg, PortCreateParams, PortManager}; use illumos_utils::running_zone::{RunningZone, ZoneBuilderFactory}; use illumos_utils::svc::wait_for_service; use illumos_utils::zone::PROPOLIS_ZONE_PREFIX; @@ -1327,14 +1327,15 @@ impl InstanceRunner { } else { (None, None, &[][..]) }; - let port = self.port_manager.create_port( + let port = self.port_manager.create_port(PortCreateParams { nic, - snat, + source_nat: snat, ephemeral_ip, floating_ips, - &self.firewall_rules, - self.dhcp_config.clone(), - )?; + firewall_rules: &self.firewall_rules, + dhcp_config: self.dhcp_config.clone(), + is_service: false, + })?; opte_ports.push(port); } diff --git a/sled-agent/src/lib.rs b/sled-agent/src/lib.rs index 989f011ed8..f7dc23e4d9 100644 --- a/sled-agent/src/lib.rs +++ b/sled-agent/src/lib.rs @@ -33,7 +33,6 @@ mod profile; pub mod rack_setup; pub mod server; pub mod services; -pub mod services_migration; mod sled_agent; mod smf_helper; mod storage_monitor; diff --git a/sled-agent/src/metrics.rs b/sled-agent/src/metrics.rs index 62eaaf6154..fcb260e93a 100644 --- a/sled-agent/src/metrics.rs +++ b/sled-agent/src/metrics.rs @@ -8,30 +8,23 @@ use omicron_common::api::internal::nexus::ProducerEndpoint; use omicron_common::api::internal::nexus::ProducerKind; use oximeter::types::MetricsError; use oximeter::types::ProducerRegistry; +use oximeter_instruments::kstat::link; +use oximeter_instruments::kstat::CollectionDetails; +use oximeter_instruments::kstat::Error as KstatError; +use oximeter_instruments::kstat::KstatSampler; +use oximeter_instruments::kstat::TargetId; use oximeter_producer::LogConfig; use oximeter_producer::Server as ProducerServer; use sled_hardware_types::Baseboard; use slog::Logger; +use std::collections::BTreeMap; use std::net::Ipv6Addr; use std::net::SocketAddr; use std::sync::Arc; +use std::sync::Mutex; use std::time::Duration; use uuid::Uuid; -cfg_if::cfg_if! { - if #[cfg(target_os = "illumos")] { - use oximeter_instruments::kstat::link; - use oximeter_instruments::kstat::CollectionDetails; - use oximeter_instruments::kstat::Error as KstatError; - use oximeter_instruments::kstat::KstatSampler; - use oximeter_instruments::kstat::TargetId; - use std::collections::BTreeMap; - use std::sync::Mutex; - } else { - use anyhow::anyhow; - } -} - /// The interval on which we ask `oximeter` to poll us for metric data. pub(crate) const METRIC_COLLECTION_INTERVAL: Duration = Duration::from_secs(30); @@ -44,14 +37,9 @@ const METRIC_REQUEST_MAX_SIZE: usize = 10 * 1024 * 1024; /// An error during sled-agent metric production. #[derive(Debug, thiserror::Error)] pub enum Error { - #[cfg(target_os = "illumos")] #[error("Kstat-based metric failure")] Kstat(#[source] KstatError), - #[cfg(not(target_os = "illumos"))] - #[error("Kstat-based metric failure")] - Kstat(#[source] anyhow::Error), - #[error("Failed to insert metric producer into registry")] Registry(#[source] MetricsError), @@ -70,7 +58,6 @@ pub enum Error { // Basic metadata about the sled agent used when publishing metrics. #[derive(Clone, Debug)] -#[cfg_attr(not(target_os = "illumos"), allow(dead_code))] struct SledIdentifiers { sled_id: Uuid, rack_id: Uuid, @@ -86,13 +73,9 @@ struct SledIdentifiers { // pattern, but until we have more statistics, it's not clear whether that's // worth it right now. #[derive(Clone)] -// NOTE: The ID fields aren't used on non-illumos systems, rather than changing -// the name of fields that are not yet used. -#[cfg_attr(not(target_os = "illumos"), allow(dead_code))] pub struct MetricsManager { metadata: Arc, _log: Logger, - #[cfg(target_os = "illumos")] kstat_sampler: KstatSampler, // TODO-scalability: We may want to generalize this to store any kind of // tracked target, and use a naming scheme that allows us pick out which @@ -103,7 +86,6 @@ pub struct MetricsManager { // for disks or memory. If we wanted to guarantee uniqueness, we could // namespace them internally, e.g., `"datalink:{link_name}"` would be the // real key. - #[cfg(target_os = "illumos")] tracked_links: Arc>>, producer_server: Arc, } @@ -122,23 +104,16 @@ impl MetricsManager { ) -> Result { let producer_server = start_producer_server(&log, sled_id, sled_address)?; - - cfg_if::cfg_if! { - if #[cfg(target_os = "illumos")] { - let kstat_sampler = KstatSampler::new(&log).map_err(Error::Kstat)?; - producer_server - .registry() - .register_producer(kstat_sampler.clone()) - .map_err(Error::Registry)?; - let tracked_links = Arc::new(Mutex::new(BTreeMap::new())); - } - } + let kstat_sampler = KstatSampler::new(&log).map_err(Error::Kstat)?; + producer_server + .registry() + .register_producer(kstat_sampler.clone()) + .map_err(Error::Registry)?; + let tracked_links = Arc::new(Mutex::new(BTreeMap::new())); Ok(Self { metadata: Arc::new(SledIdentifiers { sled_id, rack_id, baseboard }), _log: log, - #[cfg(target_os = "illumos")] kstat_sampler, - #[cfg(target_os = "illumos")] tracked_links, producer_server, }) @@ -178,7 +153,6 @@ fn start_producer_server( ProducerServer::start(&config).map(Arc::new).map_err(Error::ProducerServer) } -#[cfg(target_os = "illumos")] impl MetricsManager { /// Track metrics for a physical datalink. pub async fn track_physical_link( @@ -187,12 +161,12 @@ impl MetricsManager { interval: Duration, ) -> Result<(), Error> { let hostname = hostname()?; - let link = link::PhysicalDataLink { + let link = link::physical_data_link::PhysicalDataLink { rack_id: self.metadata.rack_id, sled_id: self.metadata.sled_id, - serial: self.serial_number(), - hostname, - link_name: link_name.as_ref().to_string(), + serial: self.serial_number().into(), + hostname: hostname.into(), + link_name: link_name.as_ref().to_string().into(), }; let details = CollectionDetails::never(interval); let id = self @@ -224,29 +198,6 @@ impl MetricsManager { } } - /// Track metrics for a virtual datalink. - #[allow(dead_code)] - pub async fn track_virtual_link( - &self, - link_name: impl AsRef, - hostname: impl AsRef, - interval: Duration, - ) -> Result<(), Error> { - let link = link::VirtualDataLink { - rack_id: self.metadata.rack_id, - sled_id: self.metadata.sled_id, - serial: self.serial_number(), - hostname: hostname.as_ref().to_string(), - link_name: link_name.as_ref().to_string(), - }; - let details = CollectionDetails::never(interval); - self.kstat_sampler - .add_target(link, details) - .await - .map(|_| ()) - .map_err(Error::Kstat) - } - // Return the serial number out of the baseboard, if one exists. fn serial_number(&self) -> String { match &self.metadata.baseboard { @@ -257,48 +208,7 @@ impl MetricsManager { } } -#[cfg(not(target_os = "illumos"))] -impl MetricsManager { - /// Track metrics for a physical datalink. - pub async fn track_physical_link( - &self, - _link_name: impl AsRef, - _interval: Duration, - ) -> Result<(), Error> { - Err(Error::Kstat(anyhow!( - "kstat metrics are not supported on this platform" - ))) - } - - /// Stop tracking metrics for a datalink. - /// - /// This works for both physical and virtual links. - #[allow(dead_code)] - pub async fn stop_tracking_link( - &self, - _link_name: impl AsRef, - ) -> Result<(), Error> { - Err(Error::Kstat(anyhow!( - "kstat metrics are not supported on this platform" - ))) - } - - /// Track metrics for a virtual datalink. - #[allow(dead_code)] - pub async fn track_virtual_link( - &self, - _link_name: impl AsRef, - _hostname: impl AsRef, - _interval: Duration, - ) -> Result<(), Error> { - Err(Error::Kstat(anyhow!( - "kstat metrics are not supported on this platform" - ))) - } -} - // Return the current hostname if possible. -#[cfg(target_os = "illumos")] fn hostname() -> Result { // See netdb.h const MAX_LEN: usize = 256; diff --git a/sled-agent/src/probe_manager.rs b/sled-agent/src/probe_manager.rs index 16559039a2..40af604645 100644 --- a/sled-agent/src/probe_manager.rs +++ b/sled-agent/src/probe_manager.rs @@ -3,10 +3,12 @@ use anyhow::{anyhow, Result}; use illumos_utils::dladm::Etherstub; use illumos_utils::link::VnicAllocator; use illumos_utils::opte::params::VpcFirewallRule; -use illumos_utils::opte::{DhcpCfg, PortManager}; +use illumos_utils::opte::{DhcpCfg, PortCreateParams, PortManager}; use illumos_utils::running_zone::{RunningZone, ZoneBuilderFactory}; use illumos_utils::zone::Zones; -use nexus_client::types::{ProbeExternalIp, ProbeInfo}; +use nexus_client::types::{ + BackgroundTasksActivateRequest, ProbeExternalIp, ProbeInfo, +}; use omicron_common::api::external::{ VpcFirewallRuleAction, VpcFirewallRuleDirection, VpcFirewallRulePriority, VpcFirewallRuleStatus, @@ -179,24 +181,44 @@ impl ProbeManagerInner { } }; - self.add(target.difference(¤t)).await; + let n_added = self.add(target.difference(¤t)).await; self.remove(current.difference(&target)).await; self.check(current.intersection(&target)).await; + + // If we have created some new probes, we may need the control plane + // to provide us with valid routes for the VPC the probe belongs to. + if n_added > 0 { + if let Err(e) = self + .nexus_client + .client() + .bgtask_activate(&BackgroundTasksActivateRequest { + bgtask_names: vec!["vpc_route_manager".into()], + }) + .await + { + error!(self.log, "get routes for probe: {e}"); + } + } } }) } /// Add a set of probes to this sled. - async fn add<'a, I>(self: &Arc, probes: I) + /// + /// Returns the number of inserted probes. + async fn add<'a, I>(self: &Arc, probes: I) -> usize where I: Iterator, { + let mut i = 0; for probe in probes { info!(self.log, "adding probe {}", probe.id); if let Err(e) = self.add_probe(probe).await { error!(self.log, "add probe: {e}"); } + i += 1; } + i } /// Add a probe to this sled. This sets up resources for the probe zone @@ -223,12 +245,12 @@ impl ProbeManagerInner { .get(0) .ok_or(anyhow!("expected an external ip"))?; - let port = self.port_manager.create_port( - &nic, - None, - Some(eip.ip), - &[], // floating ips - &[VpcFirewallRule { + let port = self.port_manager.create_port(PortCreateParams { + nic, + source_nat: None, + ephemeral_ip: Some(eip.ip), + floating_ips: &[], + firewall_rules: &[VpcFirewallRule { status: VpcFirewallRuleStatus::Enabled, direction: VpcFirewallRuleDirection::Inbound, targets: vec![nic.clone()], @@ -238,8 +260,9 @@ impl ProbeManagerInner { action: VpcFirewallRuleAction::Allow, priority: VpcFirewallRulePriority(100), }], - DhcpCfg::default(), - )?; + dhcp_config: DhcpCfg::default(), + is_service: false, + })?; let installed_zone = ZoneBuilderFactory::default() .builder() diff --git a/sled-agent/src/rack_setup/plan/service.rs b/sled-agent/src/rack_setup/plan/service.rs index 8499a0000c..f13c15723c 100644 --- a/sled-agent/src/rack_setup/plan/service.rs +++ b/sled-agent/src/rack_setup/plan/service.rs @@ -1042,6 +1042,7 @@ impl ServicePortBuilder { vni: Vni::SERVICES_VNI, primary: true, slot: 0, + transit_ips: vec![], }; Some((nic, external_ip)) @@ -1082,6 +1083,7 @@ impl ServicePortBuilder { vni: Vni::SERVICES_VNI, primary: true, slot: 0, + transit_ips: vec![], }; Ok((nic, external_ip)) @@ -1139,6 +1141,7 @@ impl ServicePortBuilder { vni: Vni::SERVICES_VNI, primary: true, slot: 0, + transit_ips: vec![], }; Ok((nic, snat_cfg)) diff --git a/sled-agent/src/services.rs b/sled-agent/src/services.rs index 70d68f6a8e..fb57990f1b 100644 --- a/sled-agent/src/services.rs +++ b/sled-agent/src/services.rs @@ -35,7 +35,6 @@ use crate::params::{ TimeSync, ZoneBundleCause, ZoneBundleMetadata, ZoneType, }; use crate::profile::*; -use crate::services_migration::{AllZoneRequests, SERVICES_LEDGER_FILENAME}; use crate::smf_helper::SmfHelper; use crate::zone_bundle::BundleError; use crate::zone_bundle::ZoneBundler; @@ -49,7 +48,9 @@ use illumos_utils::dladm::{ Dladm, Etherstub, EtherstubVnic, GetSimnetError, PhysicalLink, }; use illumos_utils::link::{Link, VnicAllocator}; -use illumos_utils::opte::{DhcpCfg, Port, PortManager, PortTicket}; +use illumos_utils::opte::{ + DhcpCfg, Port, PortCreateParams, PortManager, PortTicket, +}; use illumos_utils::running_zone::{ EnsureAddressError, InstalledZone, RunCommandError, RunningZone, ZoneBuilderFactory, @@ -759,7 +760,11 @@ impl ServiceManager { self.inner.switch_zone_bootstrap_address } + // TODO: This function refers to an old, deprecated format for storing + // service information. It is not deprecated for cleanup purposes, but + // should otherwise not be called in new code. async fn all_service_ledgers(&self) -> Vec { + pub const SERVICES_LEDGER_FILENAME: &str = "services.json"; if let Some(dir) = self.inner.ledger_directory_override.get() { return vec![dir.join(SERVICES_LEDGER_FILENAME)]; } @@ -785,157 +790,54 @@ impl ServiceManager { // Loads persistent configuration about any Omicron-managed zones that we're // supposed to be running. - // - // For historical reasons, there are two possible places this configuration - // could live, each with its own format. This function first checks the - // newer one. If no configuration was found there, it checks the older - // one. If only the older one was found, it is converted into the new form - // so that future calls will only look at the new form. async fn load_ledgered_zones( &self, // This argument attempts to ensure that the caller holds the right // lock. _map: &MutexGuard<'_, ZoneMap>, ) -> Result>, Error> { - // First, try to load the current software's zone ledger. If that - // works, we're done. let log = &self.inner.log; + + // NOTE: This is a function where we used to access zones by "service + // ledgers". This format has since been deprecated, and these files, + // if they exist, should not be used. + // + // We try to clean them up at this spot. Deleting this "removal" code + // in the future should be a safe operation; this is a non-load-bearing + // cleanup. + for path in self.all_service_ledgers().await { + match tokio::fs::remove_file(&path).await { + Ok(_) => (), + Err(ref e) if e.kind() == std::io::ErrorKind::NotFound => (), + Err(e) => { + warn!( + log, + "Failed to delete old service ledger"; + "err" => ?e, + "path" => ?path, + ); + } + } + } + + // Try to load the current software's zone ledger let ledger_paths = self.all_omicron_zone_ledgers().await; info!(log, "Loading Omicron zones from: {ledger_paths:?}"); let maybe_ledger = Ledger::::new(log, ledger_paths.clone()) .await; - if let Some(ledger) = maybe_ledger { - info!( - log, - "Loaded Omicron zones"; - "zones_config" => ?ledger.data() - ); - return Ok(Some(ledger)); - } + let Some(ledger) = maybe_ledger else { + info!(log, "Loading Omicron zones - No zones detected"); + return Ok(None); + }; - // Now look for the ledger used by previous versions. If we find it, - // we'll convert it and write out a new ledger used by the current - // software. - info!( - log, - "Loading Omicron zones - No zones detected \ - (will look for old-format services)" - ); - let services_ledger_paths = self.all_service_ledgers().await; info!( log, - "Loading old-format services from: {services_ledger_paths:?}" + "Loaded Omicron zones"; + "zones_config" => ?ledger.data() ); - - let maybe_ledger = - Ledger::::new(log, services_ledger_paths.clone()) - .await; - let maybe_converted = match maybe_ledger { - None => { - // The ledger ignores all errors attempting to load files. That - // might be fine most of the time. In this case, we want to - // raise a big red flag if we find an old-format ledger that we - // can't process. - if services_ledger_paths.iter().any(|p| p.exists()) { - Err(Error::ServicesMigration(anyhow!( - "failed to read or parse old-format ledger, \ - but one exists" - ))) - } else { - // There was no old-format ledger at all. - return Ok(None); - } - } - Some(ledger) => { - let all_services = ledger.into_inner(); - OmicronZonesConfigLocal::try_from(all_services) - .map_err(Error::ServicesMigration) - } - }; - - match maybe_converted { - Err(error) => { - // We've tried to test thoroughly so that this should never - // happen. If for some reason it does happen, engineering - // intervention is likely to be required to figure out how to - // proceed. The current software does not directly support - // whatever was in the ledger, and it's not safe to just come up - // with no zones when we're supposed to be running stuff. We'll - // need to figure out what's unexpected about what we found in - // the ledger and figure out how to fix the - // conversion. - error!( - log, - "Loading Omicron zones - found services but failed \ - to convert them (support intervention required): \ - {:#}", - error - ); - return Err(error); - } - Ok(new_config) => { - // We've successfully converted the old ledger. Write a new - // one. - info!( - log, - "Successfully migrated old-format services ledger to \ - zones ledger" - ); - let mut ledger = Ledger::::new_with( - log, - ledger_paths.clone(), - new_config, - ); - - ledger.commit().await?; - - // We could consider removing the old ledger here. That would - // not guarantee that it would be gone, though, because we could - // crash during `ledger.commit()` above having written at least - // one of the new ledgers. In that case, we won't go through - // this code path again on restart. If we wanted to ensure the - // old-format ledger was gone after the migration, we could - // consider unconditionally removing the old ledger paths in the - // caller, after we've got a copy of the new-format ledger. - // - // Should we? In principle, it shouldn't matter either way - // because we will never look at the old-format ledger unless we - // don't have a new-format one, and we should now have a - // new-format one forever now. - // - // When might it matter? Two cases: - // - // (1) If the sled agent is downgraded to a previous version - // that doesn't know about the new-format ledger. Do we - // want that sled agent to use the old-format one? It - // depends. If that downgrade happens immediately because - // the upgrade to the first new-format version was a - // disaster, then we'd probably rather the downgraded sled - // agent _did_ start its zones. If the downgrade happens - // months later, potentially after various additional - // reconfigurations, then that old-format ledger is probably - // out of date and shouldn't be used. There's no way to - // really know which case we're in, but the latter seems - // quite unlikely (why would we downgrade so far back after - // so long?). So that's a reason to keep the old-format - // ledger. - // - // (2) Suppose a developer or Oxide support engineer removes the - // new ledger for some reason, maybe thinking sled agent - // would come up with no zones running. They'll be - // surprised to discover that it actually starts running a - // potentially old set of zones. This probably only matters - // on a production system, and even then, it probably - // shouldn't happen. - // - // Given these cases, we're left ambivalent. We choose to keep - // the old ledger around. If nothing else, if something goes - // wrong, we'll have a copy of its last contents! - Ok(Some(ledger)) - } - } + Ok(Some(ledger)) } // TODO(https://github.com/oxidecomputer/omicron/issues/2973): @@ -1262,11 +1164,19 @@ impl ServiceManager { // Create the OPTE port for the service. // Note we don't plumb any firewall rules at this point, - // Nexus will plumb them down later but the default OPTE + // Nexus will plumb them down later but services' default OPTE // config allows outbound access which is enough for // Boundary NTP which needs to come up before Nexus. let port = port_manager - .create_port(nic, snat, None, floating_ips, &[], DhcpCfg::default()) + .create_port(PortCreateParams { + nic, + source_nat: snat, + ephemeral_ip: None, + floating_ips, + firewall_rules: &[], + dhcp_config: DhcpCfg::default(), + is_service: true, + }) .map_err(|err| Error::ServicePortCreation { service: zone_type_str.clone(), err: Box::new(err), @@ -1295,7 +1205,7 @@ impl ServiceManager { dpd_client .ensure_nat_entry( &self.inner.log, - target_ip.into(), + target_ip, dpd_client::types::MacAddr { a: port.0.mac().into_array(), }, @@ -4989,109 +4899,6 @@ mod test { logctx.cleanup_successful(); } - #[tokio::test] - async fn test_old_ledger_migration() { - let logctx = omicron_test_utils::dev::test_setup_log( - "test_old_ledger_migration", - ); - let test_config = TestConfig::new().await; - - // Before we start the service manager, stuff one of our old-format - // service ledgers into place. - let contents = - include_str!("../tests/old-service-ledgers/rack2-sled10.json"); - std::fs::write( - test_config.config_dir.path().join(SERVICES_LEDGER_FILENAME), - contents, - ) - .expect("failed to copy example old-format services ledger into place"); - - // Now start the service manager. - let mut helper = - LedgerTestHelper::new(logctx.log.clone(), &test_config).await; - let mgr = helper.new_service_manager(); - LedgerTestHelper::sled_agent_started(&logctx.log, &test_config, &mgr); - - // Trigger the migration code. (Yes, it's hokey that we create this - // fake argument.) - let unused = Mutex::new(BTreeMap::new()); - let migrated_ledger = mgr - .load_ledgered_zones(&unused.lock().await) - .await - .expect("failed to load ledgered zones") - .unwrap(); - - // As a quick check, the migrated ledger should have some zones. - let migrated_config = migrated_ledger.data(); - assert!(!migrated_config.zones.is_empty()); - - // The ServiceManager should now report the migrated zones, meaning that - // they've been copied into the new-format ledger. - let found = - mgr.omicron_zones_list().await.expect("failed to list zones"); - assert_eq!(found, migrated_config.clone().to_omicron_zones_config()); - // They should both match the expected converted output. - let expected: OmicronZonesConfigLocal = serde_json::from_str( - include_str!("../tests/output/new-zones-ledgers/rack2-sled10.json"), - ) - .unwrap(); - let expected_config = expected.to_omicron_zones_config(); - assert_eq!(found, expected_config); - - // Just to be sure, shut down the manager and create a new one without - // triggering migration again. It should also report the same zones. - drop_service_manager(mgr); - - let mgr = helper.new_service_manager(); - LedgerTestHelper::sled_agent_started(&logctx.log, &test_config, &mgr); - - let found = - mgr.omicron_zones_list().await.expect("failed to list zones"); - assert_eq!(found, expected_config); - - drop_service_manager(mgr); - helper.cleanup().await; - logctx.cleanup_successful(); - } - - #[tokio::test] - async fn test_old_ledger_migration_bad() { - let logctx = omicron_test_utils::dev::test_setup_log( - "test_old_ledger_migration_bad", - ); - let test_config = TestConfig::new().await; - let mut helper = - LedgerTestHelper::new(logctx.log.clone(), &test_config).await; - - // Before we start things, stuff a broken ledger into place. For this - // to test what we want, it needs to be a valid ledger that we simply - // failed to convert. - std::fs::write( - test_config.config_dir.path().join(SERVICES_LEDGER_FILENAME), - "{", - ) - .expect("failed to copy example old-format services ledger into place"); - - // Start the service manager. - let mgr = helper.new_service_manager(); - LedgerTestHelper::sled_agent_started(&logctx.log, &test_config, &mgr); - - // Trigger the migration code. - let unused = Mutex::new(BTreeMap::new()); - let error = mgr - .load_ledgered_zones(&unused.lock().await) - .await - .expect_err("succeeded in loading bogus ledgered zones"); - assert_eq!( - "Error migrating old-format services ledger: failed to read or \ - parse old-format ledger, but one exists", - format!("{:#}", error) - ); - - helper.cleanup().await; - logctx.cleanup_successful(); - } - #[test] fn test_bootstrap_addr_to_techport_prefixes() { let ba: Ipv6Addr = "fdb0:1122:3344:5566::".parse().unwrap(); diff --git a/sled-agent/src/services_migration.rs b/sled-agent/src/services_migration.rs deleted file mode 100644 index 511368e2f6..0000000000 --- a/sled-agent/src/services_migration.rs +++ /dev/null @@ -1,623 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -//! Sled Agents are responsible for running zones that make up much of the -//! control plane (Omicron). Configuration for these zones is owned by the -//! control plane, but that configuration must be persisted locally in order to -//! support cold boot of the control plane. (The control plane can't very well -//! tell sled agents what to run if it's not online yet!) -//! -//! Historically, these configurations were represented as an -//! `AllZonesRequests`, which contains a bunch of `ZoneRequest`s, each -//! containing a `ServiceZoneRequest`. This last structure was quite general -//! and made it possible to express a world of configurations that are not -//! actually valid. To avoid spreading extra complexity, these structures were -//! replaced with `OmicronZonesConfigLocal` and `OmicronZonesConfig`, -//! respectively. Upgrading production systems across this change requires -//! migrating any locally-stored configuration in the old format into the new -//! one. -//! -//! This file defines these old-format types and functions to convert them to -//! the new types, solely to perform that migration. We can remove all this -//! when we're satified that all deployed systems that we care about have moved -//! past this change. - -use crate::params::{ - OmicronZoneConfig, OmicronZoneDataset, OmicronZoneType, OmicronZonesConfig, - ZoneType, -}; -use crate::services::{OmicronZoneConfigLocal, OmicronZonesConfigLocal}; -use anyhow::{anyhow, ensure, Context}; -use camino::Utf8PathBuf; -use omicron_common::api::external::Generation; -use omicron_common::api::internal::shared::{ - NetworkInterface, SourceNatConfig, -}; -use omicron_common::ledger::Ledgerable; -use schemars::JsonSchema; -use serde::{Deserialize, Serialize}; -use sled_storage::dataset::{DatasetKind, DatasetName}; -use std::fmt::Debug; -use std::net::{IpAddr, Ipv6Addr, SocketAddr, SocketAddrV6}; -use uuid::Uuid; - -/// The filename of the ledger containing this old-format configuration. -pub const SERVICES_LEDGER_FILENAME: &str = "services.json"; - -/// A wrapper around `ZoneRequest` that allows it to be serialized to a JSON -/// file. -#[derive(Clone, serde::Serialize, serde::Deserialize, schemars::JsonSchema)] -pub struct AllZoneRequests { - /// ledger generation (not an Omicron-provided generation) - generation: Generation, - requests: Vec, -} - -impl Default for AllZoneRequests { - fn default() -> Self { - Self { generation: Generation::new(), requests: vec![] } - } -} - -impl Ledgerable for AllZoneRequests { - fn is_newer_than(&self, other: &AllZoneRequests) -> bool { - self.generation >= other.generation - } - - fn generation_bump(&mut self) { - self.generation = self.generation.next(); - } -} - -impl TryFrom for OmicronZonesConfigLocal { - type Error = anyhow::Error; - - fn try_from(input: AllZoneRequests) -> Result { - // The Omicron generation number that we choose here (2) deserves some - // explanation. - // - // This is supposed to be the control-plane-issued generation number for - // this configuration. But any configuration that we're converting here - // predates the point where the control plane issued generation numbers - // at all. So what should we assign it? Well, what are the - // constraints? - // - // - It must be newer than generation 1 because generation 1 canonically - // represents the initial state of having no zones deployed. If we - // used generation 1 here, any code could ignore this configuration on - // the grounds that it's no newer than what it already has. (The - // contents of a given generation are supposed to be immutable.) - // - // - It should be older than anything else that the control plane might - // try to send us so that if the control plane wants to change - // anything, we won't ignore its request because we think this - // configuration is newer. But really this has to be the control - // plane's responsibility, not ours. That is: Nexus needs to ask us - // what our generation number is and subsequent configurations should - // use newer generation numbers. It's not a great plan for it to - // assume anything about the generation numbers deployed on sleds - // whose configurations it's never seen. (In practice, newly deployed - // systems currently wind up with generation 5, so it _could_ choose - // something like 6 to start with -- or some larger number to leave - // some buffer.) - // - // In summary, 2 seems fine. - let omicron_generation = OmicronZonesConfig::INITIAL_GENERATION.next(); - - // The ledger generation doesn't really matter. In case it's useful, we - // pick the generation from the ledger that we loaded. - let ledger_generation = input.generation; - - let ndatasets_input = - input.requests.iter().filter(|r| r.zone.dataset.is_some()).count(); - - let zones = input - .requests - .into_iter() - .map(OmicronZoneConfigLocal::try_from) - .collect::, _>>() - .context( - "mapping `AllZoneRequests` to `OmicronZonesConfigLocal`", - )?; - - // As a quick check, the number of datasets in the old and new - // generations ought to be the same. - let ndatasets_output = - zones.iter().filter(|r| r.zone.dataset_name().is_some()).count(); - ensure!( - ndatasets_input == ndatasets_output, - "conversion produced a different number of datasets" - ); - - Ok(OmicronZonesConfigLocal { - omicron_generation, - ledger_generation, - zones, - }) - } -} - -/// This struct represents the combo of "what zone did you ask for" + "where did -/// we put it". -#[derive(Clone, serde::Serialize, serde::Deserialize, schemars::JsonSchema)] -struct ZoneRequest { - zone: ServiceZoneRequest, - #[schemars(with = "String")] - root: Utf8PathBuf, -} - -impl TryFrom for OmicronZoneConfigLocal { - type Error = anyhow::Error; - - fn try_from(input: ZoneRequest) -> Result { - Ok(OmicronZoneConfigLocal { - zone: OmicronZoneConfig::try_from(input.zone)?, - root: input.root, - }) - } -} - -/// Describes a request to create a zone running one or more services. -#[derive( - Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, -)] -struct ServiceZoneRequest { - // The UUID of the zone to be initialized. - id: Uuid, - // The type of the zone to be created. - zone_type: ZoneType, - // The addresses on which the service should listen for requests. - addresses: Vec, - // Datasets which should be managed by this service. - #[serde(default)] - dataset: Option, - // Services that should be run in the zone - services: Vec, -} - -impl TryFrom for OmicronZoneConfig { - type Error = anyhow::Error; - - fn try_from(input: ServiceZoneRequest) -> Result { - let error_context = || { - format!( - "zone {} (type {:?})", - input.id, - input.zone_type.to_string() - ) - }; - - // Historically, this type was used to describe two distinct kinds of - // thing: - // - // 1. an "Omicron" zone: Clickhouse, CockroachDb, Nexus, etc. We call - // these Omicron zones because they're managed by the control plane - // (Omicron). Nexus knows about these, stores information in - // CockroachDB about them, and is responsible for using Sled Agent - // APIs to configure these zones. - // - // 2. a "sled-local" zone. The only such zone is the "switch" zone. - // This is not really known to Nexus nor exposed outside Sled Agent. - // It's configured either based on Sled Agent's config file or else - // autodetection of whether this system _is_ a Scrimlet. - // - // All of the types in this file describe the ledgered configuration of - // the Omicron zones. We don't care about the switch zone here. Even - // for Omicron zones, the `ServiceZoneRequest` type is much more general - // than was strictly necessary to represent the kinds of zones we - // defined in practice. The more constrained schema is described by - // `OmicronZoneConfig`. This function verifies that the structures we - // find conform to that more constrained schema. - // - // Many of these properties were determined by code inspection. They - // could be wrong! But we've tried hard to make sure we're not wrong. - - match input.zone_type { - ZoneType::Clickhouse - | ZoneType::ClickhouseKeeper - | ZoneType::CockroachDb - | ZoneType::CruciblePantry - | ZoneType::Crucible - | ZoneType::ExternalDns - | ZoneType::InternalDns - | ZoneType::Nexus - | ZoneType::Ntp - | ZoneType::Oximeter => (), - ZoneType::Switch => { - return Err(anyhow!("unsupported zone type")) - .with_context(error_context) - } - } - - let id = input.id; - - // In production systems, Omicron zones only ever had exactly one - // address here. Multiple addresses were used for the "switch" zone, - // which cannot appear here. - if input.addresses.len() != 1 { - return Err(anyhow!( - "expected exactly one address, found {}", - input.addresses.len() - )) - .with_context(error_context); - } - - let underlay_address = input.addresses[0]; - - // In production systems, Omicron zones only ever had exactly one - // "service" inside them. (Multiple services were only supported for - // the "switch" zone and for Omicron zones in pre-release versions of - // Omicron, neither of which we expect to see here.) - if input.services.len() != 1 { - return Err(anyhow!( - "expected exactly one service, found {}", - input.services.len(), - )) - .with_context(error_context); - } - - let service = input.services.into_iter().next().unwrap(); - - // The id for the one service we found must match the overall request - // id. - if service.id != input.id { - return Err(anyhow!( - "expected service id ({}) to match id ({})", - service.id, - input.id, - )) - .with_context(error_context); - } - - // If there's a dataset, its id must match the overall request id. - let dataset_request = input - .dataset - .ok_or_else(|| anyhow!("missing dataset")) - .with_context(error_context); - let has_dataset = dataset_request.is_ok(); - if let Ok(dataset) = &dataset_request { - if dataset.id != input.id { - return Err(anyhow!( - "expected dataset id ({}) to match id ({})", - dataset.id, - input.id, - )) - .with_context(error_context); - } - } - - let zone_type = match service.details { - ServiceType::Nexus { - internal_address, - external_ip, - nic, - external_tls, - external_dns_servers, - } => OmicronZoneType::Nexus { - internal_address, - external_ip, - nic, - external_tls, - external_dns_servers, - }, - ServiceType::ExternalDns { http_address, dns_address, nic } => { - OmicronZoneType::ExternalDns { - dataset: dataset_request?.to_omicron_zone_dataset( - DatasetKind::ExternalDns, - http_address, - )?, - http_address, - dns_address, - nic, - } - } - ServiceType::InternalDns { - http_address, - dns_address, - gz_address, - gz_address_index, - } => OmicronZoneType::InternalDns { - dataset: dataset_request?.to_omicron_zone_dataset( - DatasetKind::InternalDns, - http_address, - )?, - http_address, - dns_address, - gz_address, - gz_address_index, - }, - ServiceType::Oximeter { address } => { - OmicronZoneType::Oximeter { address } - } - ServiceType::CruciblePantry { address } => { - OmicronZoneType::CruciblePantry { address } - } - ServiceType::BoundaryNtp { - address, - ntp_servers, - dns_servers, - domain, - nic, - snat_cfg, - } => OmicronZoneType::BoundaryNtp { - address, - ntp_servers, - dns_servers, - domain, - nic, - snat_cfg, - }, - ServiceType::InternalNtp { - address, - ntp_servers, - dns_servers, - domain, - } => OmicronZoneType::InternalNtp { - address, - ntp_servers, - dns_servers, - domain, - }, - ServiceType::Clickhouse { address } => { - OmicronZoneType::Clickhouse { - address, - dataset: dataset_request?.to_omicron_zone_dataset( - DatasetKind::Clickhouse, - address, - )?, - } - } - ServiceType::ClickhouseKeeper { address } => { - OmicronZoneType::ClickhouseKeeper { - address, - dataset: dataset_request?.to_omicron_zone_dataset( - DatasetKind::ClickhouseKeeper, - address, - )?, - } - } - ServiceType::CockroachDb { address } => { - OmicronZoneType::CockroachDb { - address, - dataset: dataset_request?.to_omicron_zone_dataset( - DatasetKind::CockroachDb, - address, - )?, - } - } - ServiceType::Crucible { address } => OmicronZoneType::Crucible { - address, - dataset: dataset_request? - .to_omicron_zone_dataset(DatasetKind::Crucible, address)?, - }, - }; - - if zone_type.dataset_name().is_none() && has_dataset { - // This indicates that the legacy form specified a dataset for a - // zone type that we do not (today) believe should have one. This - // should be impossible. If it happens, we need to re-evaluate our - // assumptions in designing `OmicronZoneType`. - return Err(anyhow!("found dataset that went unused")) - .with_context(error_context); - } - - Ok(OmicronZoneConfig { id, underlay_address, zone_type }) - } -} - -/// Used to request that the Sled initialize a single service. -#[derive( - Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, -)] -struct ServiceZoneService { - id: Uuid, - details: ServiceType, -} - -/// Describes service-specific parameters. -#[derive( - Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, -)] -#[serde(tag = "type", rename_all = "snake_case")] -enum ServiceType { - Nexus { - /// The address at which the internal nexus server is reachable. - internal_address: SocketAddrV6, - /// The address at which the external nexus server is reachable. - external_ip: IpAddr, - /// The service vNIC providing external connectivity using OPTE. - nic: NetworkInterface, - /// Whether Nexus's external endpoint should use TLS - external_tls: bool, - /// External DNS servers Nexus can use to resolve external hosts. - external_dns_servers: Vec, - }, - ExternalDns { - /// The address at which the external DNS server API is reachable. - http_address: SocketAddrV6, - /// The address at which the external DNS server is reachable. - dns_address: SocketAddr, - /// The service vNIC providing external connectivity using OPTE. - nic: NetworkInterface, - }, - InternalDns { - http_address: SocketAddrV6, - dns_address: SocketAddrV6, - /// The addresses in the global zone which should be created - /// - /// For the DNS service, which exists outside the sleds's typical subnet - /// - adding an address in the GZ is necessary to allow inter-zone - /// traffic routing. - gz_address: Ipv6Addr, - - /// The address is also identified with an auxiliary bit of information - /// to ensure that the created global zone address can have a unique - /// name. - gz_address_index: u32, - }, - Oximeter { - address: SocketAddrV6, - }, - CruciblePantry { - address: SocketAddrV6, - }, - BoundaryNtp { - address: SocketAddrV6, - ntp_servers: Vec, - dns_servers: Vec, - domain: Option, - /// The service vNIC providing outbound connectivity using OPTE. - nic: NetworkInterface, - /// The SNAT configuration for outbound connections. - snat_cfg: SourceNatConfig, - }, - InternalNtp { - address: SocketAddrV6, - ntp_servers: Vec, - dns_servers: Vec, - domain: Option, - }, - Clickhouse { - address: SocketAddrV6, - }, - ClickhouseKeeper { - address: SocketAddrV6, - }, - CockroachDb { - address: SocketAddrV6, - }, - Crucible { - address: SocketAddrV6, - }, -} - -/// Describes a request to provision a specific dataset -#[derive( - Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, -)] -struct DatasetRequest { - id: Uuid, - name: DatasetName, - service_address: SocketAddrV6, -} - -impl DatasetRequest { - fn to_omicron_zone_dataset( - self, - kind: DatasetKind, - service_address: SocketAddrV6, - ) -> Result { - ensure!( - kind == *self.name.dataset(), - "expected dataset kind {:?}, found {:?}", - kind, - self.name.dataset(), - ); - - ensure!( - self.service_address == service_address, - "expected dataset kind {:?} service address to be {}, found {}", - kind, - service_address, - self.service_address, - ); - - Ok(OmicronZoneDataset { pool_name: self.name.pool().clone() }) - } -} - -#[cfg(test)] -mod test { - use super::AllZoneRequests; - use crate::services::OmicronZonesConfigLocal; - use camino::Utf8PathBuf; - - /// Verifies that our understanding of this old-format ledger has not - /// changed. (If you need to change this for some reason, you must figure - /// out how that affects systems with old-format ledgers and update this - /// test accordingly.) - #[test] - fn test_all_services_requests_schema() { - let schema = schemars::schema_for!(AllZoneRequests); - expectorate::assert_contents( - "../schema/all-zone-requests.json", - &serde_json::to_string_pretty(&schema).unwrap(), - ); - } - - /// Verifies that we can successfully convert a corpus of known old-format - /// ledgers. These came from two racks operated by Oxide. In practice - /// there probably aren't many different configurations represented here but - /// it's easy enough to just check them all. - /// - /// In terms of verifying the output: all we have done by hand in - /// constructing this test is verify that the code successfully converts - /// them. The conversion code does some basic sanity checks as well, like - /// that we produced the same number of zones and datasets. - #[test] - fn test_convert_known_ledgers() { - let known_ledgers = &[ - /* rack2 */ - "rack2-sled8.json", - "rack2-sled9.json", - "rack2-sled10.json", - "rack2-sled11.json", - "rack2-sled12.json", - "rack2-sled14.json", - "rack2-sled16.json", - "rack2-sled17.json", - "rack2-sled21.json", - "rack2-sled23.json", - "rack2-sled25.json", - /* rack3 (no sled 10) */ - "rack3-sled0.json", - "rack3-sled1.json", - "rack3-sled2.json", - "rack3-sled3.json", - "rack3-sled4.json", - "rack3-sled5.json", - "rack3-sled6.json", - "rack3-sled7.json", - "rack3-sled8.json", - "rack3-sled9.json", - "rack3-sled11.json", - "rack3-sled12.json", - "rack3-sled13.json", - "rack3-sled14.json", - "rack3-sled15.json", - "rack3-sled16.json", - "rack3-sled17.json", - "rack3-sled18.json", - "rack3-sled19.json", - "rack3-sled20.json", - "rack3-sled21.json", - "rack3-sled22.json", - "rack3-sled23.json", - "rack3-sled24.json", - "rack3-sled25.json", - "rack3-sled26.json", - "rack3-sled27.json", - "rack3-sled28.json", - "rack3-sled29.json", - "rack3-sled30.json", - "rack3-sled31.json", - ]; - - let path = Utf8PathBuf::from("tests/old-service-ledgers"); - let out_path = Utf8PathBuf::from("tests/output/new-zones-ledgers"); - for ledger_basename in known_ledgers { - println!("checking {:?}", ledger_basename); - let contents = std::fs::read_to_string(path.join(ledger_basename)) - .expect("failed to read file"); - let parsed: AllZoneRequests = - serde_json::from_str(&contents).expect("failed to parse file"); - let converted = OmicronZonesConfigLocal::try_from(parsed) - .expect("failed to convert file"); - expectorate::assert_contents( - out_path.join(ledger_basename), - &serde_json::to_string_pretty(&converted).unwrap(), - ); - } - } -} diff --git a/sled-agent/src/sim/http_entrypoints.rs b/sled-agent/src/sim/http_entrypoints.rs index 012889c664..cfafaeea22 100644 --- a/sled-agent/src/sim/http_entrypoints.rs +++ b/sled-agent/src/sim/http_entrypoints.rs @@ -24,7 +24,9 @@ use illumos_utils::opte::params::VirtualNetworkInterfaceHost; use omicron_common::api::internal::nexus::DiskRuntimeState; use omicron_common::api::internal::nexus::SledInstanceState; use omicron_common::api::internal::nexus::UpdateArtifactId; -use omicron_common::api::internal::shared::SwitchPorts; +use omicron_common::api::internal::shared::{ + ResolvedVpcRouteSet, ResolvedVpcRouteState, SwitchPorts, +}; use omicron_uuid_kinds::{GenericUuid, InstanceUuid}; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; @@ -64,6 +66,8 @@ pub fn api() -> SledApiDescription { api.register(omicron_zones_get)?; api.register(omicron_zones_put)?; api.register(sled_add)?; + api.register(list_vpc_routes)?; + api.register(set_vpc_routes)?; Ok(()) } @@ -508,3 +512,27 @@ async fn sled_add( ) -> Result { Ok(HttpResponseUpdatedNoContent()) } + +#[endpoint { + method = GET, + path = "/vpc-routes", +}] +async fn list_vpc_routes( + rqctx: RequestContext>, +) -> Result>, HttpError> { + let sa = rqctx.context(); + Ok(HttpResponseOk(sa.list_vpc_routes().await)) +} + +#[endpoint { + method = PUT, + path = "/vpc-routes", +}] +async fn set_vpc_routes( + rqctx: RequestContext>, + body: TypedBody>, +) -> Result { + let sa = rqctx.context(); + sa.set_vpc_routes(body.into_inner()).await; + Ok(HttpResponseUpdatedNoContent()) +} diff --git a/sled-agent/src/sim/instance.rs b/sled-agent/src/sim/instance.rs index 2ac8618399..be6c63f53a 100644 --- a/sled-agent/src/sim/instance.rs +++ b/sled-agent/src/sim/instance.rs @@ -19,7 +19,8 @@ use omicron_common::api::internal::nexus::{ InstanceRuntimeState, MigrationRole, SledInstanceState, VmmState, }; use propolis_client::types::{ - InstanceMigrateStatusResponse as PropolisMigrateStatus, + InstanceMigrateStatusResponse as PropolisMigrateResponse, + InstanceMigrationStatus as PropolisMigrationStatus, InstanceState as PropolisInstanceState, InstanceStateMonitorResponse, }; use std::collections::VecDeque; @@ -32,7 +33,7 @@ use crate::common::instance::{Action as InstanceAction, InstanceStates}; #[derive(Clone, Debug)] enum MonitorChange { PropolisState(PropolisInstanceState), - MigrateStatus(PropolisMigrateStatus), + MigrateStatus(PropolisMigrateResponse), } /// A simulation of an Instance created by the external Oxide API. @@ -42,11 +43,11 @@ enum MonitorChange { /// integration tests. /// /// The simulated instance contains a fake instance state stored as a -/// [`propolis_client::api::InstanceStateMonitorResponse`]. Transition requests -/// enqueue changes to either the instance state or the migration status fields -/// of this response. When poked, the simulated instance applies the next -/// transition, translates this to an observed Propolis state, and sends it -/// off for processing. +/// [`propolis_client::types::InstanceStateMonitorResponse`]. Transition +/// requests enqueue changes to either the instance state or the migration +/// status fields of this response. When poked, the simulated instance applies +/// the next transition, translates this to an observed Propolis state, and +/// sends it off for processing. #[derive(Debug)] struct SimInstanceInner { /// The current simulated instance state. @@ -70,10 +71,10 @@ impl SimInstanceInner { self.queue.push_back(MonitorChange::PropolisState(propolis_state)); } - /// Pushes a Propolis migration status to the state change queue. - fn queue_migration_status( + /// Pushes a Propolis migration update to the state change queue. + fn queue_migration_update( &mut self, - migrate_status: PropolisMigrateStatus, + migrate_status: PropolisMigrateResponse, ) { self.queue.push_back(MonitorChange::MigrateStatus(migrate_status)) } @@ -92,22 +93,42 @@ impl SimInstanceInner { self ) }); - self.queue_migration_status(PropolisMigrateStatus { - migration_id, - state: propolis_client::types::MigrationState::Sync, - }); - self.queue_migration_status(PropolisMigrateStatus { - migration_id, - state: propolis_client::types::MigrationState::Finish, - }); - - // The state we transition to after the migration completes will depend - // on whether we are the source or destination. + match role { + MigrationRole::Source => { + self.queue_migration_update(PropolisMigrateResponse { + migration_in: None, + migration_out: Some(PropolisMigrationStatus { + id: migration_id, + state: propolis_client::types::MigrationState::Sync, + }), + }); + self.queue_migration_update(PropolisMigrateResponse { + migration_in: None, + migration_out: Some(PropolisMigrationStatus { + id: migration_id, + state: propolis_client::types::MigrationState::Finish, + }), + }); + self.queue_graceful_stop(); + } MigrationRole::Target => { + self.queue_migration_update(PropolisMigrateResponse { + migration_in: Some(PropolisMigrationStatus { + id: migration_id, + state: propolis_client::types::MigrationState::Sync, + }), + migration_out: None, + }); + self.queue_migration_update(PropolisMigrateResponse { + migration_in: Some(PropolisMigrationStatus { + id: migration_id, + state: propolis_client::types::MigrationState::Finish, + }), + migration_out: None, + }); self.queue_propolis_state(PropolisInstanceState::Running) } - MigrationRole::Source => self.queue_graceful_stop(), } } @@ -252,12 +273,12 @@ impl SimInstanceInner { self.last_response.state = state; } MonitorChange::MigrateStatus(status) => { - self.last_response.migration = Some(status); + self.last_response.migration = status; } } self.state.apply_propolis_observation(&ObservedPropolisState::new( - &self.state.instance(), + self.state.instance(), &self.last_response, )) } else { @@ -450,7 +471,10 @@ impl Simulatable for SimInstance { last_response: InstanceStateMonitorResponse { gen: 1, state: PropolisInstanceState::Starting, - migration: None, + migration: PropolisMigrateResponse { + migration_in: None, + migration_out: None, + }, }, queue: VecDeque::new(), destroyed: false, diff --git a/sled-agent/src/sim/server.rs b/sled-agent/src/sim/server.rs index 7ce34473e7..5b66342a1a 100644 --- a/sled-agent/src/sim/server.rs +++ b/sled-agent/src/sim/server.rs @@ -410,6 +410,7 @@ pub async fn run_standalone_server( vni: Vni::SERVICES_VNI, primary: true, slot: 0, + transit_ips: vec![], }, external_tls: false, external_dns_servers: vec![], @@ -453,6 +454,7 @@ pub async fn run_standalone_server( vni: Vni::SERVICES_VNI, primary: true, slot: 0, + transit_ips: vec![], }, }, }); diff --git a/sled-agent/src/sim/sled_agent.rs b/sled-agent/src/sim/sled_agent.rs index f47d8a9100..9cb146531b 100644 --- a/sled-agent/src/sim/sled_agent.rs +++ b/sled-agent/src/sim/sled_agent.rs @@ -36,7 +36,10 @@ use omicron_common::api::internal::nexus::{ use omicron_common::api::internal::nexus::{ InstanceRuntimeState, VmmRuntimeState, }; -use omicron_common::api::internal::shared::RackNetworkConfig; +use omicron_common::api::internal::shared::{ + RackNetworkConfig, ResolvedVpcRoute, ResolvedVpcRouteSet, + ResolvedVpcRouteState, RouterId, RouterKind, RouterVersion, +}; use omicron_common::disk::DiskIdentity; use omicron_uuid_kinds::{GenericUuid, InstanceUuid, PropolisUuid, ZpoolUuid}; use oxnet::Ipv6Net; @@ -77,6 +80,7 @@ pub struct SledAgent { Mutex>, PropolisClient)>>, /// lists of external IPs assigned to instances pub external_ips: Mutex>>, + pub vpc_routes: Mutex>, config: Config, fake_zones: Mutex, instance_ensure_state_error: Mutex>, @@ -182,6 +186,7 @@ impl SledAgent { disk_id_to_region_ids: Mutex::new(HashMap::new()), v2p_mappings: Mutex::new(HashSet::new()), external_ips: Mutex::new(HashMap::new()), + vpc_routes: Mutex::new(HashMap::new()), mock_propolis: Mutex::new(None), config: config.clone(), fake_zones: Mutex::new(OmicronZonesConfig { @@ -360,6 +365,18 @@ impl SledAgent { self.map_disk_ids_to_region_ids(&vcr).await?; } + let mut routes = self.vpc_routes.lock().await; + for nic in &hardware.nics { + let my_routers = [ + RouterId { vni: nic.vni, kind: RouterKind::System }, + RouterId { vni: nic.vni, kind: RouterKind::Custom(nic.subnet) }, + ]; + + for router in my_routers { + routes.entry(router).or_default(); + } + } + Ok(instance_run_time_state) } @@ -879,4 +896,49 @@ impl SledAgent { pub async fn drop_dataset(&self, zpool_id: ZpoolUuid, dataset_id: Uuid) { self.storage.lock().await.drop_dataset(zpool_id, dataset_id) } + + pub async fn list_vpc_routes(&self) -> Vec { + let routes = self.vpc_routes.lock().await; + routes + .iter() + .map(|(k, v)| ResolvedVpcRouteState { id: *k, version: v.version }) + .collect() + } + + pub async fn set_vpc_routes(&self, new_routes: Vec) { + let mut routes = self.vpc_routes.lock().await; + for new in new_routes { + // Disregard any route information for a subnet we don't have. + let Some(old) = routes.get(&new.id) else { + continue; + }; + + // We have to handle subnet router changes, as well as + // spurious updates from multiple Nexus instances. + // If there's a UUID match, only update if vers increased, + // otherwise take the update verbatim (including loss of version). + match (old.version, new.version) { + (Some(old_vers), Some(new_vers)) + if !old_vers.is_replaced_by(&new_vers) => + { + continue; + } + _ => {} + }; + + routes.insert( + new.id, + RouteSet { version: new.version, routes: new.routes }, + ); + } + } +} + +/// Stored routes (and usage count) for a given VPC/subnet. +// NB: We aren't doing post count tracking here to unsubscribe +// from (VNI, subnet) pairs. +#[derive(Debug, Clone, Default)] +pub struct RouteSet { + pub version: Option, + pub routes: HashSet, } diff --git a/sled-agent/src/sled_agent.rs b/sled-agent/src/sled_agent.rs index 993e5f6a94..82c16b0b8d 100644 --- a/sled-agent/src/sled_agent.rs +++ b/sled-agent/src/sled_agent.rs @@ -49,7 +49,8 @@ use omicron_common::api::internal::nexus::{ SledInstanceState, VmmRuntimeState, }; use omicron_common::api::internal::shared::{ - HostPortConfig, RackNetworkConfig, + HostPortConfig, RackNetworkConfig, ResolvedVpcRouteSet, + ResolvedVpcRouteState, }; use omicron_common::api::{ internal::nexus::DiskRuntimeState, internal::nexus::InstanceRuntimeState, @@ -1096,6 +1097,17 @@ impl SledAgent { self.inner.bootstore.clone() } + pub fn list_vpc_routes(&self) -> Vec { + self.inner.port_manager.vpc_routes_list() + } + + pub fn set_vpc_routes( + &self, + routes: Vec, + ) -> Result<(), Error> { + self.inner.port_manager.vpc_routes_ensure(routes).map_err(Error::from) + } + /// Return the metric producer registry. pub fn metrics_registry(&self) -> &ProducerRegistry { self.inner.metrics_manager.registry() @@ -1132,7 +1144,7 @@ impl SledAgent { let mut disks = vec![]; let mut zpools = vec![]; let all_disks = self.storage().get_latest_disks().await; - for (identity, variant, slot) in all_disks.iter_all() { + for (identity, variant, slot, _firmware) in all_disks.iter_all() { disks.push(crate::params::InventoryDisk { identity: identity.clone(), variant, diff --git a/sled-agent/tests/old-service-ledgers/rack2-sled10.json b/sled-agent/tests/old-service-ledgers/rack2-sled10.json deleted file mode 100644 index b92a2bf4a0..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack2-sled10.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":4,"requests":[{"zone":{"id":"04eef8aa-055c-42ab-bdb6-c982f63c9be0","zone_type":"crucible","addresses":["fd00:1122:3344:107::d"],"dataset":{"id":"04eef8aa-055c-42ab-bdb6-c982f63c9be0","name":{"pool_name":"oxp_845ff39a-3205-416f-8bda-e35829107c8a","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::d]:32345"},"services":[{"id":"04eef8aa-055c-42ab-bdb6-c982f63c9be0","details":{"type":"crucible","address":"[fd00:1122:3344:107::d]:32345"}}]},"root":"/pool/ext/43efdd6d-7419-437a-a282-fc45bfafd042/crypt/zone"},{"zone":{"id":"8568c997-fbbb-46a8-8549-b78284530ffc","zone_type":"crucible","addresses":["fd00:1122:3344:107::5"],"dataset":{"id":"8568c997-fbbb-46a8-8549-b78284530ffc","name":{"pool_name":"oxp_0e485ad3-04e6-404b-b619-87d4fea9f5ae","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::5]:32345"},"services":[{"id":"8568c997-fbbb-46a8-8549-b78284530ffc","details":{"type":"crucible","address":"[fd00:1122:3344:107::5]:32345"}}]},"root":"/pool/ext/9b61d4b2-66f6-459f-86f4-13d0b8c5d6cf/crypt/zone"},{"zone":{"id":"6cec1d60-5c1a-4c1b-9632-2b4bc76bd37c","zone_type":"crucible","addresses":["fd00:1122:3344:107::e"],"dataset":{"id":"6cec1d60-5c1a-4c1b-9632-2b4bc76bd37c","name":{"pool_name":"oxp_62a4c68a-2073-42d0-8e49-01f5e8b90cd4","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::e]:32345"},"services":[{"id":"6cec1d60-5c1a-4c1b-9632-2b4bc76bd37c","details":{"type":"crucible","address":"[fd00:1122:3344:107::e]:32345"}}]},"root":"/pool/ext/845ff39a-3205-416f-8bda-e35829107c8a/crypt/zone"},{"zone":{"id":"aa646c82-c6d7-4d0c-8401-150130927759","zone_type":"clickhouse","addresses":["fd00:1122:3344:107::4"],"dataset":{"id":"aa646c82-c6d7-4d0c-8401-150130927759","name":{"pool_name":"oxp_0e485ad3-04e6-404b-b619-87d4fea9f5ae","kind":{"type":"clickhouse"}},"service_address":"[fd00:1122:3344:107::4]:8123"},"services":[{"id":"aa646c82-c6d7-4d0c-8401-150130927759","details":{"type":"clickhouse","address":"[fd00:1122:3344:107::4]:8123"}}]},"root":"/pool/ext/fd82dcc7-00dd-4d01-826a-937a7d8238fb/crypt/zone"},{"zone":{"id":"2f294ca1-7a4f-468f-8966-2b7915804729","zone_type":"crucible","addresses":["fd00:1122:3344:107::7"],"dataset":{"id":"2f294ca1-7a4f-468f-8966-2b7915804729","name":{"pool_name":"oxp_43efdd6d-7419-437a-a282-fc45bfafd042","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::7]:32345"},"services":[{"id":"2f294ca1-7a4f-468f-8966-2b7915804729","details":{"type":"crucible","address":"[fd00:1122:3344:107::7]:32345"}}]},"root":"/pool/ext/fd82dcc7-00dd-4d01-826a-937a7d8238fb/crypt/zone"},{"zone":{"id":"1a77bd1d-4fd4-4d6c-a105-17f942d94ba6","zone_type":"crucible","addresses":["fd00:1122:3344:107::c"],"dataset":{"id":"1a77bd1d-4fd4-4d6c-a105-17f942d94ba6","name":{"pool_name":"oxp_b6bdfdaf-9c0d-4b74-926c-49ff3ed05562","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::c]:32345"},"services":[{"id":"1a77bd1d-4fd4-4d6c-a105-17f942d94ba6","details":{"type":"crucible","address":"[fd00:1122:3344:107::c]:32345"}}]},"root":"/pool/ext/9b61d4b2-66f6-459f-86f4-13d0b8c5d6cf/crypt/zone"},{"zone":{"id":"f65a6668-1aea-4deb-81ed-191fbe469328","zone_type":"crucible","addresses":["fd00:1122:3344:107::9"],"dataset":{"id":"f65a6668-1aea-4deb-81ed-191fbe469328","name":{"pool_name":"oxp_9b61d4b2-66f6-459f-86f4-13d0b8c5d6cf","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::9]:32345"},"services":[{"id":"f65a6668-1aea-4deb-81ed-191fbe469328","details":{"type":"crucible","address":"[fd00:1122:3344:107::9]:32345"}}]},"root":"/pool/ext/d0584f4a-20ba-436d-a75b-7709e80deb79/crypt/zone"},{"zone":{"id":"ee8bce67-8f8e-4221-97b0-85f1860d66d0","zone_type":"crucible","addresses":["fd00:1122:3344:107::8"],"dataset":{"id":"ee8bce67-8f8e-4221-97b0-85f1860d66d0","name":{"pool_name":"oxp_b252b176-3974-436a-915b-60382b21eb76","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::8]:32345"},"services":[{"id":"ee8bce67-8f8e-4221-97b0-85f1860d66d0","details":{"type":"crucible","address":"[fd00:1122:3344:107::8]:32345"}}]},"root":"/pool/ext/b6bdfdaf-9c0d-4b74-926c-49ff3ed05562/crypt/zone"},{"zone":{"id":"cf3b2d54-5e36-4c93-b44f-8bf36ac98071","zone_type":"crucible","addresses":["fd00:1122:3344:107::b"],"dataset":{"id":"cf3b2d54-5e36-4c93-b44f-8bf36ac98071","name":{"pool_name":"oxp_d0584f4a-20ba-436d-a75b-7709e80deb79","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::b]:32345"},"services":[{"id":"cf3b2d54-5e36-4c93-b44f-8bf36ac98071","details":{"type":"crucible","address":"[fd00:1122:3344:107::b]:32345"}}]},"root":"/pool/ext/4c157f35-865d-4310-9d81-c6259cb69293/crypt/zone"},{"zone":{"id":"5c8c244c-00dc-4b16-aa17-6d9eb4827fab","zone_type":"crucible","addresses":["fd00:1122:3344:107::a"],"dataset":{"id":"5c8c244c-00dc-4b16-aa17-6d9eb4827fab","name":{"pool_name":"oxp_4c157f35-865d-4310-9d81-c6259cb69293","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::a]:32345"},"services":[{"id":"5c8c244c-00dc-4b16-aa17-6d9eb4827fab","details":{"type":"crucible","address":"[fd00:1122:3344:107::a]:32345"}}]},"root":"/pool/ext/845ff39a-3205-416f-8bda-e35829107c8a/crypt/zone"},{"zone":{"id":"7d5e942b-926c-442d-937a-76cc4aa72bf3","zone_type":"crucible","addresses":["fd00:1122:3344:107::6"],"dataset":{"id":"7d5e942b-926c-442d-937a-76cc4aa72bf3","name":{"pool_name":"oxp_fd82dcc7-00dd-4d01-826a-937a7d8238fb","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::6]:32345"},"services":[{"id":"7d5e942b-926c-442d-937a-76cc4aa72bf3","details":{"type":"crucible","address":"[fd00:1122:3344:107::6]:32345"}}]},"root":"/pool/ext/b252b176-3974-436a-915b-60382b21eb76/crypt/zone"},{"zone":{"id":"a3628a56-6f85-43b5-be50-71d8f0e04877","zone_type":"cockroach_db","addresses":["fd00:1122:3344:107::3"],"dataset":{"id":"a3628a56-6f85-43b5-be50-71d8f0e04877","name":{"pool_name":"oxp_0e485ad3-04e6-404b-b619-87d4fea9f5ae","kind":{"type":"cockroach_db"}},"service_address":"[fd00:1122:3344:107::3]:32221"},"services":[{"id":"a3628a56-6f85-43b5-be50-71d8f0e04877","details":{"type":"cockroach_db","address":"[fd00:1122:3344:107::3]:32221"}}]},"root":"/pool/ext/4c157f35-865d-4310-9d81-c6259cb69293/crypt/zone"},{"zone":{"id":"7529be1c-ca8b-441a-89aa-37166cc450df","zone_type":"ntp","addresses":["fd00:1122:3344:107::f"],"dataset":null,"services":[{"id":"7529be1c-ca8b-441a-89aa-37166cc450df","details":{"type":"internal_ntp","address":"[fd00:1122:3344:107::f]:123","ntp_servers":["c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55.host.control-plane.oxide.internal","6ea2684c-115e-48a6-8453-ab52d1cecd73.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/fd82dcc7-00dd-4d01-826a-937a7d8238fb/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack2-sled11.json b/sled-agent/tests/old-service-ledgers/rack2-sled11.json deleted file mode 100644 index 3833bed5c9..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack2-sled11.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":4,"requests":[{"zone":{"id":"605be8b9-c652-4a5f-94ca-068ec7a39472","zone_type":"crucible","addresses":["fd00:1122:3344:106::a"],"dataset":{"id":"605be8b9-c652-4a5f-94ca-068ec7a39472","name":{"pool_name":"oxp_cf14d1b9-b4db-4594-b3ab-a9957e770ce9","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::a]:32345"},"services":[{"id":"605be8b9-c652-4a5f-94ca-068ec7a39472","details":{"type":"crucible","address":"[fd00:1122:3344:106::a]:32345"}}]},"root":"/pool/ext/cf5f8849-0c5a-475b-8683-6d17da88d1d1/crypt/zone"},{"zone":{"id":"af8a8712-457c-4ea7-a8b6-aecb04761c1b","zone_type":"crucible","addresses":["fd00:1122:3344:106::9"],"dataset":{"id":"af8a8712-457c-4ea7-a8b6-aecb04761c1b","name":{"pool_name":"oxp_cf5f8849-0c5a-475b-8683-6d17da88d1d1","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::9]:32345"},"services":[{"id":"af8a8712-457c-4ea7-a8b6-aecb04761c1b","details":{"type":"crucible","address":"[fd00:1122:3344:106::9]:32345"}}]},"root":"/pool/ext/7f778610-7328-4554-98f6-b17f74f551c7/crypt/zone"},{"zone":{"id":"0022703b-dcfc-44d4-897a-b42f6f53b433","zone_type":"crucible","addresses":["fd00:1122:3344:106::c"],"dataset":{"id":"0022703b-dcfc-44d4-897a-b42f6f53b433","name":{"pool_name":"oxp_025725fa-9e40-4b46-b018-c420408394ef","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::c]:32345"},"services":[{"id":"0022703b-dcfc-44d4-897a-b42f6f53b433","details":{"type":"crucible","address":"[fd00:1122:3344:106::c]:32345"}}]},"root":"/pool/ext/025725fa-9e40-4b46-b018-c420408394ef/crypt/zone"},{"zone":{"id":"fffddf56-10ca-4b62-9be3-5b3764a5f682","zone_type":"crucible","addresses":["fd00:1122:3344:106::d"],"dataset":{"id":"fffddf56-10ca-4b62-9be3-5b3764a5f682","name":{"pool_name":"oxp_4d2f5aaf-eb14-4b1e-aa99-ae38ec844605","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::d]:32345"},"services":[{"id":"fffddf56-10ca-4b62-9be3-5b3764a5f682","details":{"type":"crucible","address":"[fd00:1122:3344:106::d]:32345"}}]},"root":"/pool/ext/834c9aad-c53b-4357-bc3f-f422efa63848/crypt/zone"},{"zone":{"id":"9b8194ee-917d-4abc-a55c-94cea6cdaea1","zone_type":"crucible","addresses":["fd00:1122:3344:106::6"],"dataset":{"id":"9b8194ee-917d-4abc-a55c-94cea6cdaea1","name":{"pool_name":"oxp_d7665e0d-9354-4341-a76f-965d7c49f277","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::6]:32345"},"services":[{"id":"9b8194ee-917d-4abc-a55c-94cea6cdaea1","details":{"type":"crucible","address":"[fd00:1122:3344:106::6]:32345"}}]},"root":"/pool/ext/cf5f8849-0c5a-475b-8683-6d17da88d1d1/crypt/zone"},{"zone":{"id":"b369e133-485c-4d98-8fee-83542d1fd94d","zone_type":"crucible","addresses":["fd00:1122:3344:106::4"],"dataset":{"id":"b369e133-485c-4d98-8fee-83542d1fd94d","name":{"pool_name":"oxp_4366f80d-3902-4b93-8f2d-380008e805fc","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::4]:32345"},"services":[{"id":"b369e133-485c-4d98-8fee-83542d1fd94d","details":{"type":"crucible","address":"[fd00:1122:3344:106::4]:32345"}}]},"root":"/pool/ext/025725fa-9e40-4b46-b018-c420408394ef/crypt/zone"},{"zone":{"id":"edd99650-5df1-4241-815d-253e4ef2399c","zone_type":"external_dns","addresses":["fd00:1122:3344:106::3"],"dataset":{"id":"edd99650-5df1-4241-815d-253e4ef2399c","name":{"pool_name":"oxp_4366f80d-3902-4b93-8f2d-380008e805fc","kind":{"type":"external_dns"}},"service_address":"[fd00:1122:3344:106::3]:5353"},"services":[{"id":"edd99650-5df1-4241-815d-253e4ef2399c","details":{"type":"external_dns","http_address":"[fd00:1122:3344:106::3]:5353","dns_address":"172.20.26.1:53","nic":{"id":"99b759fc-8e2e-44b7-aca8-93c3b201974d","kind":{"type":"service","id":"edd99650-5df1-4241-815d-253e4ef2399c"},"name":"external-dns-edd99650-5df1-4241-815d-253e4ef2399c","ip":"172.30.1.5","mac":"A8:40:25:FF:B0:9C","subnet":"172.30.1.0/24","vni":100,"primary":true,"slot":0}}}]},"root":"/pool/ext/7f778610-7328-4554-98f6-b17f74f551c7/crypt/zone"},{"zone":{"id":"46d1afcc-cc3f-4b17-aafc-054dd4862d15","zone_type":"crucible","addresses":["fd00:1122:3344:106::5"],"dataset":{"id":"46d1afcc-cc3f-4b17-aafc-054dd4862d15","name":{"pool_name":"oxp_7f778610-7328-4554-98f6-b17f74f551c7","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::5]:32345"},"services":[{"id":"46d1afcc-cc3f-4b17-aafc-054dd4862d15","details":{"type":"crucible","address":"[fd00:1122:3344:106::5]:32345"}}]},"root":"/pool/ext/cf5f8849-0c5a-475b-8683-6d17da88d1d1/crypt/zone"},{"zone":{"id":"12afe1c3-bfe6-4278-8240-91d401347d36","zone_type":"crucible","addresses":["fd00:1122:3344:106::8"],"dataset":{"id":"12afe1c3-bfe6-4278-8240-91d401347d36","name":{"pool_name":"oxp_534bcd4b-502f-4109-af6e-4b28a22c20f1","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::8]:32345"},"services":[{"id":"12afe1c3-bfe6-4278-8240-91d401347d36","details":{"type":"crucible","address":"[fd00:1122:3344:106::8]:32345"}}]},"root":"/pool/ext/4366f80d-3902-4b93-8f2d-380008e805fc/crypt/zone"},{"zone":{"id":"c33b5912-9985-43ed-98f2-41297e2b796a","zone_type":"crucible","addresses":["fd00:1122:3344:106::b"],"dataset":{"id":"c33b5912-9985-43ed-98f2-41297e2b796a","name":{"pool_name":"oxp_834c9aad-c53b-4357-bc3f-f422efa63848","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::b]:32345"},"services":[{"id":"c33b5912-9985-43ed-98f2-41297e2b796a","details":{"type":"crucible","address":"[fd00:1122:3344:106::b]:32345"}}]},"root":"/pool/ext/d7665e0d-9354-4341-a76f-965d7c49f277/crypt/zone"},{"zone":{"id":"65b3db59-9361-4100-9cee-04e32a8c67d3","zone_type":"crucible","addresses":["fd00:1122:3344:106::7"],"dataset":{"id":"65b3db59-9361-4100-9cee-04e32a8c67d3","name":{"pool_name":"oxp_32b5303f-f667-4345-84d2-c7eec63b91b2","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::7]:32345"},"services":[{"id":"65b3db59-9361-4100-9cee-04e32a8c67d3","details":{"type":"crucible","address":"[fd00:1122:3344:106::7]:32345"}}]},"root":"/pool/ext/d7665e0d-9354-4341-a76f-965d7c49f277/crypt/zone"},{"zone":{"id":"82500cc9-f33d-4d59-9e6e-d70ea6133077","zone_type":"ntp","addresses":["fd00:1122:3344:106::e"],"dataset":null,"services":[{"id":"82500cc9-f33d-4d59-9e6e-d70ea6133077","details":{"type":"internal_ntp","address":"[fd00:1122:3344:106::e]:123","ntp_servers":["c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55.host.control-plane.oxide.internal","6ea2684c-115e-48a6-8453-ab52d1cecd73.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/cf14d1b9-b4db-4594-b3ab-a9957e770ce9/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack2-sled12.json b/sled-agent/tests/old-service-ledgers/rack2-sled12.json deleted file mode 100644 index 5126c007f3..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack2-sled12.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":5,"requests":[{"zone":{"id":"a76b3357-b690-43b8-8352-3300568ffc2b","zone_type":"crucible","addresses":["fd00:1122:3344:104::a"],"dataset":{"id":"a76b3357-b690-43b8-8352-3300568ffc2b","name":{"pool_name":"oxp_05715ad8-59a1-44ab-ad5f-0cdffb46baab","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::a]:32345"},"services":[{"id":"a76b3357-b690-43b8-8352-3300568ffc2b","details":{"type":"crucible","address":"[fd00:1122:3344:104::a]:32345"}}]},"root":"/pool/ext/2ec2a731-3340-4777-b1bb-4a906c598174/crypt/zone"},{"zone":{"id":"8d202759-ca06-4383-b50f-7f3ec4062bf7","zone_type":"crucible","addresses":["fd00:1122:3344:104::4"],"dataset":{"id":"8d202759-ca06-4383-b50f-7f3ec4062bf7","name":{"pool_name":"oxp_56e32a8f-0877-4437-9cab-94a4928b1495","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::4]:32345"},"services":[{"id":"8d202759-ca06-4383-b50f-7f3ec4062bf7","details":{"type":"crucible","address":"[fd00:1122:3344:104::4]:32345"}}]},"root":"/pool/ext/613b58fc-5a80-42dc-a61c-b143cf220fb5/crypt/zone"},{"zone":{"id":"fcdda266-fc6a-4518-89db-aec007a4b682","zone_type":"crucible","addresses":["fd00:1122:3344:104::b"],"dataset":{"id":"fcdda266-fc6a-4518-89db-aec007a4b682","name":{"pool_name":"oxp_7e1293ad-b903-4054-aeae-2182d5e4a785","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::b]:32345"},"services":[{"id":"fcdda266-fc6a-4518-89db-aec007a4b682","details":{"type":"crucible","address":"[fd00:1122:3344:104::b]:32345"}}]},"root":"/pool/ext/416fd29e-d3b5-4fdf-8101-d0d163fa0706/crypt/zone"},{"zone":{"id":"167cf6a2-ec51-4de2-bc6c-7785bbc0e436","zone_type":"crucible","addresses":["fd00:1122:3344:104::c"],"dataset":{"id":"167cf6a2-ec51-4de2-bc6c-7785bbc0e436","name":{"pool_name":"oxp_f96c8d49-fdf7-4bd6-84f6-c282202d1abc","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::c]:32345"},"services":[{"id":"167cf6a2-ec51-4de2-bc6c-7785bbc0e436","details":{"type":"crucible","address":"[fd00:1122:3344:104::c]:32345"}}]},"root":"/pool/ext/56e32a8f-0877-4437-9cab-94a4928b1495/crypt/zone"},{"zone":{"id":"c6fde82d-8dae-4ef0-b557-6c3d094d9454","zone_type":"crucible","addresses":["fd00:1122:3344:104::9"],"dataset":{"id":"c6fde82d-8dae-4ef0-b557-6c3d094d9454","name":{"pool_name":"oxp_416fd29e-d3b5-4fdf-8101-d0d163fa0706","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::9]:32345"},"services":[{"id":"c6fde82d-8dae-4ef0-b557-6c3d094d9454","details":{"type":"crucible","address":"[fd00:1122:3344:104::9]:32345"}}]},"root":"/pool/ext/3af01cc4-1f16-47d9-a489-abafcb91c2db/crypt/zone"},{"zone":{"id":"650f5da7-86a0-4ade-af0f-bc96e021ded0","zone_type":"crucible","addresses":["fd00:1122:3344:104::5"],"dataset":{"id":"650f5da7-86a0-4ade-af0f-bc96e021ded0","name":{"pool_name":"oxp_b4a71d3d-1ecd-418a-9a52-8d118f82082b","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::5]:32345"},"services":[{"id":"650f5da7-86a0-4ade-af0f-bc96e021ded0","details":{"type":"crucible","address":"[fd00:1122:3344:104::5]:32345"}}]},"root":"/pool/ext/613b58fc-5a80-42dc-a61c-b143cf220fb5/crypt/zone"},{"zone":{"id":"7ce9a2c5-2d37-4188-b7b5-a9db819396c3","zone_type":"crucible","addresses":["fd00:1122:3344:104::d"],"dataset":{"id":"7ce9a2c5-2d37-4188-b7b5-a9db819396c3","name":{"pool_name":"oxp_c87d16b8-e814-4159-8562-f8d7fdd19d13","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::d]:32345"},"services":[{"id":"7ce9a2c5-2d37-4188-b7b5-a9db819396c3","details":{"type":"crucible","address":"[fd00:1122:3344:104::d]:32345"}}]},"root":"/pool/ext/416fd29e-d3b5-4fdf-8101-d0d163fa0706/crypt/zone"},{"zone":{"id":"23e1cf01-70ab-422f-997b-6216158965c3","zone_type":"crucible","addresses":["fd00:1122:3344:104::8"],"dataset":{"id":"23e1cf01-70ab-422f-997b-6216158965c3","name":{"pool_name":"oxp_3af01cc4-1f16-47d9-a489-abafcb91c2db","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::8]:32345"},"services":[{"id":"23e1cf01-70ab-422f-997b-6216158965c3","details":{"type":"crucible","address":"[fd00:1122:3344:104::8]:32345"}}]},"root":"/pool/ext/3af01cc4-1f16-47d9-a489-abafcb91c2db/crypt/zone"},{"zone":{"id":"50209816-89fb-48ed-9595-16899d114844","zone_type":"crucible","addresses":["fd00:1122:3344:104::6"],"dataset":{"id":"50209816-89fb-48ed-9595-16899d114844","name":{"pool_name":"oxp_2ec2a731-3340-4777-b1bb-4a906c598174","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::6]:32345"},"services":[{"id":"50209816-89fb-48ed-9595-16899d114844","details":{"type":"crucible","address":"[fd00:1122:3344:104::6]:32345"}}]},"root":"/pool/ext/416fd29e-d3b5-4fdf-8101-d0d163fa0706/crypt/zone"},{"zone":{"id":"20b100d0-84c3-4119-aa9b-0c632b0b6a3a","zone_type":"nexus","addresses":["fd00:1122:3344:104::3"],"dataset":null,"services":[{"id":"20b100d0-84c3-4119-aa9b-0c632b0b6a3a","details":{"type":"nexus","internal_address":"[fd00:1122:3344:104::3]:12221","external_ip":"172.20.26.4","nic":{"id":"364b0ecd-bf08-4cac-a993-bbf4a70564c7","kind":{"type":"service","id":"20b100d0-84c3-4119-aa9b-0c632b0b6a3a"},"name":"nexus-20b100d0-84c3-4119-aa9b-0c632b0b6a3a","ip":"172.30.2.6","mac":"A8:40:25:FF:B4:C1","subnet":"172.30.2.0/24","vni":100,"primary":true,"slot":0},"external_tls":true,"external_dns_servers":["1.1.1.1","9.9.9.9"]}}]},"root":"/pool/ext/c87d16b8-e814-4159-8562-f8d7fdd19d13/crypt/zone"},{"zone":{"id":"8bc0f29e-0c20-437e-b8ca-7b9844acda22","zone_type":"crucible","addresses":["fd00:1122:3344:104::7"],"dataset":{"id":"8bc0f29e-0c20-437e-b8ca-7b9844acda22","name":{"pool_name":"oxp_613b58fc-5a80-42dc-a61c-b143cf220fb5","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::7]:32345"},"services":[{"id":"8bc0f29e-0c20-437e-b8ca-7b9844acda22","details":{"type":"crucible","address":"[fd00:1122:3344:104::7]:32345"}}]},"root":"/pool/ext/56e32a8f-0877-4437-9cab-94a4928b1495/crypt/zone"},{"zone":{"id":"c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55","zone_type":"ntp","addresses":["fd00:1122:3344:104::e"],"dataset":null,"services":[{"id":"c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55","details":{"type":"boundary_ntp","address":"[fd00:1122:3344:104::e]:123","ntp_servers":["ntp.eng.oxide.computer"],"dns_servers":["1.1.1.1","9.9.9.9"],"domain":null,"nic":{"id":"a4b9bacf-6c04-431a-81ad-9bf0302af96e","kind":{"type":"service","id":"c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55"},"name":"ntp-c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55","ip":"172.30.3.5","mac":"A8:40:25:FF:B2:52","subnet":"172.30.3.0/24","vni":100,"primary":true,"slot":0},"snat_cfg":{"ip":"172.20.26.6","first_port":0,"last_port":16383}}}]},"root":"/pool/ext/3af01cc4-1f16-47d9-a489-abafcb91c2db/crypt/zone"},{"zone":{"id":"51c9ad09-7814-4643-8ad4-689ccbe53fbd","zone_type":"internal_dns","addresses":["fd00:1122:3344:1::1"],"dataset":{"id":"51c9ad09-7814-4643-8ad4-689ccbe53fbd","name":{"pool_name":"oxp_56e32a8f-0877-4437-9cab-94a4928b1495","kind":{"type":"internal_dns"}},"service_address":"[fd00:1122:3344:1::1]:5353"},"services":[{"id":"51c9ad09-7814-4643-8ad4-689ccbe53fbd","details":{"type":"internal_dns","http_address":"[fd00:1122:3344:1::1]:5353","dns_address":"[fd00:1122:3344:1::1]:53","gz_address":"fd00:1122:3344:1::2","gz_address_index":0}}]},"root":"/pool/ext/3af01cc4-1f16-47d9-a489-abafcb91c2db/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack2-sled14.json b/sled-agent/tests/old-service-ledgers/rack2-sled14.json deleted file mode 100644 index 421e21d84d..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack2-sled14.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":4,"requests":[{"zone":{"id":"ee8b2cfa-87fe-46a6-98ef-23640b80a968","zone_type":"crucible","addresses":["fd00:1122:3344:10b::d"],"dataset":{"id":"ee8b2cfa-87fe-46a6-98ef-23640b80a968","name":{"pool_name":"oxp_4a624324-003a-4255-98e8-546a90b5b7fa","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::d]:32345"},"services":[{"id":"ee8b2cfa-87fe-46a6-98ef-23640b80a968","details":{"type":"crucible","address":"[fd00:1122:3344:10b::d]:32345"}}]},"root":"/pool/ext/6b9ec5f1-859f-459c-9c06-6a51ba87786f/crypt/zone"},{"zone":{"id":"9228f8ca-2a83-439f-9cb7-f2801b5fea27","zone_type":"crucible","addresses":["fd00:1122:3344:10b::6"],"dataset":{"id":"9228f8ca-2a83-439f-9cb7-f2801b5fea27","name":{"pool_name":"oxp_6b9ec5f1-859f-459c-9c06-6a51ba87786f","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::6]:32345"},"services":[{"id":"9228f8ca-2a83-439f-9cb7-f2801b5fea27","details":{"type":"crucible","address":"[fd00:1122:3344:10b::6]:32345"}}]},"root":"/pool/ext/6b9ec5f1-859f-459c-9c06-6a51ba87786f/crypt/zone"},{"zone":{"id":"ee44cdde-7ac9-4469-9f1d-e8bcfeb5cc46","zone_type":"crucible","addresses":["fd00:1122:3344:10b::e"],"dataset":{"id":"ee44cdde-7ac9-4469-9f1d-e8bcfeb5cc46","name":{"pool_name":"oxp_11b02ce7-7e50-486f-86c2-de8af9575a45","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::e]:32345"},"services":[{"id":"ee44cdde-7ac9-4469-9f1d-e8bcfeb5cc46","details":{"type":"crucible","address":"[fd00:1122:3344:10b::e]:32345"}}]},"root":"/pool/ext/11b02ce7-7e50-486f-86c2-de8af9575a45/crypt/zone"},{"zone":{"id":"96bac0b1-8b34-4c81-9e76-6404d2c37630","zone_type":"crucible_pantry","addresses":["fd00:1122:3344:10b::4"],"dataset":null,"services":[{"id":"96bac0b1-8b34-4c81-9e76-6404d2c37630","details":{"type":"crucible_pantry","address":"[fd00:1122:3344:10b::4]:17000"}}]},"root":"/pool/ext/350b2814-7b7f-40f1-9bf6-9818a1ef49bb/crypt/zone"},{"zone":{"id":"d4e1e554-7b98-4413-809e-4a42561c3d0c","zone_type":"crucible","addresses":["fd00:1122:3344:10b::a"],"dataset":{"id":"d4e1e554-7b98-4413-809e-4a42561c3d0c","name":{"pool_name":"oxp_e6d2fe1d-c74d-40cd-8fae-bc7d06bdaac8","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::a]:32345"},"services":[{"id":"d4e1e554-7b98-4413-809e-4a42561c3d0c","details":{"type":"crucible","address":"[fd00:1122:3344:10b::a]:32345"}}]},"root":"/pool/ext/6b9ec5f1-859f-459c-9c06-6a51ba87786f/crypt/zone"},{"zone":{"id":"1dd69b02-a032-46c3-8e2a-5012e8314455","zone_type":"crucible","addresses":["fd00:1122:3344:10b::b"],"dataset":{"id":"1dd69b02-a032-46c3-8e2a-5012e8314455","name":{"pool_name":"oxp_350b2814-7b7f-40f1-9bf6-9818a1ef49bb","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::b]:32345"},"services":[{"id":"1dd69b02-a032-46c3-8e2a-5012e8314455","details":{"type":"crucible","address":"[fd00:1122:3344:10b::b]:32345"}}]},"root":"/pool/ext/350b2814-7b7f-40f1-9bf6-9818a1ef49bb/crypt/zone"},{"zone":{"id":"921f7752-d2f3-40df-a739-5cb1390abc2c","zone_type":"crucible","addresses":["fd00:1122:3344:10b::8"],"dataset":{"id":"921f7752-d2f3-40df-a739-5cb1390abc2c","name":{"pool_name":"oxp_2d1ebe24-6deb-4f81-8450-6842de28126c","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::8]:32345"},"services":[{"id":"921f7752-d2f3-40df-a739-5cb1390abc2c","details":{"type":"crucible","address":"[fd00:1122:3344:10b::8]:32345"}}]},"root":"/pool/ext/91ea7bb6-2be7-4498-9b0d-a0521509ec00/crypt/zone"},{"zone":{"id":"609b25e8-9750-4308-ae6f-7202907a3675","zone_type":"crucible","addresses":["fd00:1122:3344:10b::9"],"dataset":{"id":"609b25e8-9750-4308-ae6f-7202907a3675","name":{"pool_name":"oxp_91ea7bb6-2be7-4498-9b0d-a0521509ec00","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::9]:32345"},"services":[{"id":"609b25e8-9750-4308-ae6f-7202907a3675","details":{"type":"crucible","address":"[fd00:1122:3344:10b::9]:32345"}}]},"root":"/pool/ext/2d1ebe24-6deb-4f81-8450-6842de28126c/crypt/zone"},{"zone":{"id":"a232eba2-e94f-4592-a5a6-ec23f9be3296","zone_type":"crucible","addresses":["fd00:1122:3344:10b::5"],"dataset":{"id":"a232eba2-e94f-4592-a5a6-ec23f9be3296","name":{"pool_name":"oxp_e12f29b8-1ab8-431e-bc96-1c1298947980","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::5]:32345"},"services":[{"id":"a232eba2-e94f-4592-a5a6-ec23f9be3296","details":{"type":"crucible","address":"[fd00:1122:3344:10b::5]:32345"}}]},"root":"/pool/ext/021afd19-2f87-4def-9284-ab7add1dd6ae/crypt/zone"},{"zone":{"id":"800d1758-9312-4b1a-8f02-dc6d644c2a9b","zone_type":"crucible","addresses":["fd00:1122:3344:10b::c"],"dataset":{"id":"800d1758-9312-4b1a-8f02-dc6d644c2a9b","name":{"pool_name":"oxp_b6932bb0-bab8-4876-914a-9c75a600e794","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::c]:32345"},"services":[{"id":"800d1758-9312-4b1a-8f02-dc6d644c2a9b","details":{"type":"crucible","address":"[fd00:1122:3344:10b::c]:32345"}}]},"root":"/pool/ext/b6932bb0-bab8-4876-914a-9c75a600e794/crypt/zone"},{"zone":{"id":"668a4d4a-96dc-4b45-866b-bed3d64c26ec","zone_type":"crucible","addresses":["fd00:1122:3344:10b::7"],"dataset":{"id":"668a4d4a-96dc-4b45-866b-bed3d64c26ec","name":{"pool_name":"oxp_021afd19-2f87-4def-9284-ab7add1dd6ae","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::7]:32345"},"services":[{"id":"668a4d4a-96dc-4b45-866b-bed3d64c26ec","details":{"type":"crucible","address":"[fd00:1122:3344:10b::7]:32345"}}]},"root":"/pool/ext/91ea7bb6-2be7-4498-9b0d-a0521509ec00/crypt/zone"},{"zone":{"id":"8bbea076-ff60-4330-8302-383e18140ef3","zone_type":"cockroach_db","addresses":["fd00:1122:3344:10b::3"],"dataset":{"id":"8bbea076-ff60-4330-8302-383e18140ef3","name":{"pool_name":"oxp_e12f29b8-1ab8-431e-bc96-1c1298947980","kind":{"type":"cockroach_db"}},"service_address":"[fd00:1122:3344:10b::3]:32221"},"services":[{"id":"8bbea076-ff60-4330-8302-383e18140ef3","details":{"type":"cockroach_db","address":"[fd00:1122:3344:10b::3]:32221"}}]},"root":"/pool/ext/4a624324-003a-4255-98e8-546a90b5b7fa/crypt/zone"},{"zone":{"id":"3ccea933-89f2-4ce5-8367-efb0afeffe97","zone_type":"ntp","addresses":["fd00:1122:3344:10b::f"],"dataset":null,"services":[{"id":"3ccea933-89f2-4ce5-8367-efb0afeffe97","details":{"type":"internal_ntp","address":"[fd00:1122:3344:10b::f]:123","ntp_servers":["c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55.host.control-plane.oxide.internal","6ea2684c-115e-48a6-8453-ab52d1cecd73.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/4a624324-003a-4255-98e8-546a90b5b7fa/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack2-sled16.json b/sled-agent/tests/old-service-ledgers/rack2-sled16.json deleted file mode 100644 index c928e004b2..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack2-sled16.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":4,"requests":[{"zone":{"id":"b12aa520-a769-4eac-b56b-09960550a831","zone_type":"crucible","addresses":["fd00:1122:3344:108::7"],"dataset":{"id":"b12aa520-a769-4eac-b56b-09960550a831","name":{"pool_name":"oxp_34dadf3f-f60c-4acc-b82b-4b0c82224222","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::7]:32345"},"services":[{"id":"b12aa520-a769-4eac-b56b-09960550a831","details":{"type":"crucible","address":"[fd00:1122:3344:108::7]:32345"}}]},"root":"/pool/ext/8be8c577-23ac-452e-a205-6d9c95088f61/crypt/zone"},{"zone":{"id":"9bdc40ee-ccba-4d18-9efb-a30596e2d290","zone_type":"crucible","addresses":["fd00:1122:3344:108::d"],"dataset":{"id":"9bdc40ee-ccba-4d18-9efb-a30596e2d290","name":{"pool_name":"oxp_eb81728c-3b83-42fb-8133-ac32a0bdf70f","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::d]:32345"},"services":[{"id":"9bdc40ee-ccba-4d18-9efb-a30596e2d290","details":{"type":"crucible","address":"[fd00:1122:3344:108::d]:32345"}}]},"root":"/pool/ext/8be8c577-23ac-452e-a205-6d9c95088f61/crypt/zone"},{"zone":{"id":"c9a367c7-64d7-48e4-b484-9ecb4e8faea7","zone_type":"crucible","addresses":["fd00:1122:3344:108::9"],"dataset":{"id":"c9a367c7-64d7-48e4-b484-9ecb4e8faea7","name":{"pool_name":"oxp_76ab5a67-e20f-4bf0-87b3-01fcc4144bd2","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::9]:32345"},"services":[{"id":"c9a367c7-64d7-48e4-b484-9ecb4e8faea7","details":{"type":"crucible","address":"[fd00:1122:3344:108::9]:32345"}}]},"root":"/pool/ext/34dadf3f-f60c-4acc-b82b-4b0c82224222/crypt/zone"},{"zone":{"id":"bc5124d8-65e8-4879-bfac-64d59003d482","zone_type":"crucible","addresses":["fd00:1122:3344:108::a"],"dataset":{"id":"bc5124d8-65e8-4879-bfac-64d59003d482","name":{"pool_name":"oxp_5fac7a1d-e855-46e1-b8c2-dd848ac4fee6","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::a]:32345"},"services":[{"id":"bc5124d8-65e8-4879-bfac-64d59003d482","details":{"type":"crucible","address":"[fd00:1122:3344:108::a]:32345"}}]},"root":"/pool/ext/0c4ef358-5533-43db-ad38-a8eff716e53a/crypt/zone"},{"zone":{"id":"5cc7c840-8e6b-48c8-ac4b-f4297f8cf61a","zone_type":"crucible","addresses":["fd00:1122:3344:108::c"],"dataset":{"id":"5cc7c840-8e6b-48c8-ac4b-f4297f8cf61a","name":{"pool_name":"oxp_0c4ef358-5533-43db-ad38-a8eff716e53a","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::c]:32345"},"services":[{"id":"5cc7c840-8e6b-48c8-ac4b-f4297f8cf61a","details":{"type":"crucible","address":"[fd00:1122:3344:108::c]:32345"}}]},"root":"/pool/ext/6d3e9cc6-f03b-4055-9785-05711d5e4fdc/crypt/zone"},{"zone":{"id":"3b767edf-a72d-4d80-a0fc-65d6801ed0e0","zone_type":"crucible","addresses":["fd00:1122:3344:108::e"],"dataset":{"id":"3b767edf-a72d-4d80-a0fc-65d6801ed0e0","name":{"pool_name":"oxp_f522118c-5dcd-4116-8044-07f0cceec52e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::e]:32345"},"services":[{"id":"3b767edf-a72d-4d80-a0fc-65d6801ed0e0","details":{"type":"crucible","address":"[fd00:1122:3344:108::e]:32345"}}]},"root":"/pool/ext/5fac7a1d-e855-46e1-b8c2-dd848ac4fee6/crypt/zone"},{"zone":{"id":"f3c02ed6-fbc5-45c3-a030-409f74b450fd","zone_type":"crucible_pantry","addresses":["fd00:1122:3344:108::4"],"dataset":null,"services":[{"id":"f3c02ed6-fbc5-45c3-a030-409f74b450fd","details":{"type":"crucible_pantry","address":"[fd00:1122:3344:108::4]:17000"}}]},"root":"/pool/ext/eb81728c-3b83-42fb-8133-ac32a0bdf70f/crypt/zone"},{"zone":{"id":"85bd9bdb-1ec5-4a8d-badb-8b5d502546a1","zone_type":"crucible","addresses":["fd00:1122:3344:108::5"],"dataset":{"id":"85bd9bdb-1ec5-4a8d-badb-8b5d502546a1","name":{"pool_name":"oxp_416232c1-bc8f-403f-bacb-28403dd8fced","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::5]:32345"},"services":[{"id":"85bd9bdb-1ec5-4a8d-badb-8b5d502546a1","details":{"type":"crucible","address":"[fd00:1122:3344:108::5]:32345"}}]},"root":"/pool/ext/34dadf3f-f60c-4acc-b82b-4b0c82224222/crypt/zone"},{"zone":{"id":"d2f1c3df-d4e0-4469-b50e-f1871da86ebf","zone_type":"crucible","addresses":["fd00:1122:3344:108::6"],"dataset":{"id":"d2f1c3df-d4e0-4469-b50e-f1871da86ebf","name":{"pool_name":"oxp_6d3e9cc6-f03b-4055-9785-05711d5e4fdc","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::6]:32345"},"services":[{"id":"d2f1c3df-d4e0-4469-b50e-f1871da86ebf","details":{"type":"crucible","address":"[fd00:1122:3344:108::6]:32345"}}]},"root":"/pool/ext/34dadf3f-f60c-4acc-b82b-4b0c82224222/crypt/zone"},{"zone":{"id":"88fe3c12-4c55-47df-b4ee-ed26b795439d","zone_type":"crucible","addresses":["fd00:1122:3344:108::8"],"dataset":{"id":"88fe3c12-4c55-47df-b4ee-ed26b795439d","name":{"pool_name":"oxp_8be8c577-23ac-452e-a205-6d9c95088f61","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::8]:32345"},"services":[{"id":"88fe3c12-4c55-47df-b4ee-ed26b795439d","details":{"type":"crucible","address":"[fd00:1122:3344:108::8]:32345"}}]},"root":"/pool/ext/34dadf3f-f60c-4acc-b82b-4b0c82224222/crypt/zone"},{"zone":{"id":"4d20175a-588b-44b8-8b9c-b16c6c3a97a0","zone_type":"crucible","addresses":["fd00:1122:3344:108::b"],"dataset":{"id":"4d20175a-588b-44b8-8b9c-b16c6c3a97a0","name":{"pool_name":"oxp_a726cacd-fa35-4ed2-ade6-31ad928b24cb","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::b]:32345"},"services":[{"id":"4d20175a-588b-44b8-8b9c-b16c6c3a97a0","details":{"type":"crucible","address":"[fd00:1122:3344:108::b]:32345"}}]},"root":"/pool/ext/0c4ef358-5533-43db-ad38-a8eff716e53a/crypt/zone"},{"zone":{"id":"e86845b5-eabd-49f5-9a10-6dfef9066209","zone_type":"cockroach_db","addresses":["fd00:1122:3344:108::3"],"dataset":{"id":"e86845b5-eabd-49f5-9a10-6dfef9066209","name":{"pool_name":"oxp_416232c1-bc8f-403f-bacb-28403dd8fced","kind":{"type":"cockroach_db"}},"service_address":"[fd00:1122:3344:108::3]:32221"},"services":[{"id":"e86845b5-eabd-49f5-9a10-6dfef9066209","details":{"type":"cockroach_db","address":"[fd00:1122:3344:108::3]:32221"}}]},"root":"/pool/ext/416232c1-bc8f-403f-bacb-28403dd8fced/crypt/zone"},{"zone":{"id":"209b6213-588b-43b6-a89b-19ee5c84ffba","zone_type":"ntp","addresses":["fd00:1122:3344:108::f"],"dataset":null,"services":[{"id":"209b6213-588b-43b6-a89b-19ee5c84ffba","details":{"type":"internal_ntp","address":"[fd00:1122:3344:108::f]:123","ntp_servers":["c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55.host.control-plane.oxide.internal","6ea2684c-115e-48a6-8453-ab52d1cecd73.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/416232c1-bc8f-403f-bacb-28403dd8fced/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack2-sled17.json b/sled-agent/tests/old-service-ledgers/rack2-sled17.json deleted file mode 100644 index 93872adf13..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack2-sled17.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":4,"requests":[{"zone":{"id":"90b53c3d-42fa-4ca9-bbfc-96fff245b508","zone_type":"crucible","addresses":["fd00:1122:3344:109::4"],"dataset":{"id":"90b53c3d-42fa-4ca9-bbfc-96fff245b508","name":{"pool_name":"oxp_ae56280b-17ce-4266-8573-e1da9db6c6bb","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::4]:32345"},"services":[{"id":"90b53c3d-42fa-4ca9-bbfc-96fff245b508","details":{"type":"crucible","address":"[fd00:1122:3344:109::4]:32345"}}]},"root":"/pool/ext/b0e1a261-b932-47c4-81e9-1977275ae9d9/crypt/zone"},{"zone":{"id":"4f9f2e1d-be04-4e8b-a50b-ffb18557a650","zone_type":"crucible","addresses":["fd00:1122:3344:109::5"],"dataset":{"id":"4f9f2e1d-be04-4e8b-a50b-ffb18557a650","name":{"pool_name":"oxp_d5b07362-64db-4b18-a3e9-8d7cbabae2d5","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::5]:32345"},"services":[{"id":"4f9f2e1d-be04-4e8b-a50b-ffb18557a650","details":{"type":"crucible","address":"[fd00:1122:3344:109::5]:32345"}}]},"root":"/pool/ext/027a82e8-daa3-4fa6-8205-ed03445e1086/crypt/zone"},{"zone":{"id":"2fa5671d-3109-4f11-ae70-1280f4fa3b89","zone_type":"crucible","addresses":["fd00:1122:3344:109::6"],"dataset":{"id":"2fa5671d-3109-4f11-ae70-1280f4fa3b89","name":{"pool_name":"oxp_9ba7bfbf-b9a2-4237-a142-94c1e68de984","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::6]:32345"},"services":[{"id":"2fa5671d-3109-4f11-ae70-1280f4fa3b89","details":{"type":"crucible","address":"[fd00:1122:3344:109::6]:32345"}}]},"root":"/pool/ext/3cafbb47-c194-4a42-99ff-34dfeab999ed/crypt/zone"},{"zone":{"id":"b63c6882-ca90-4156-b561-4781ab4a0962","zone_type":"crucible","addresses":["fd00:1122:3344:109::7"],"dataset":{"id":"b63c6882-ca90-4156-b561-4781ab4a0962","name":{"pool_name":"oxp_b0e1a261-b932-47c4-81e9-1977275ae9d9","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::7]:32345"},"services":[{"id":"b63c6882-ca90-4156-b561-4781ab4a0962","details":{"type":"crucible","address":"[fd00:1122:3344:109::7]:32345"}}]},"root":"/pool/ext/d5b07362-64db-4b18-a3e9-8d7cbabae2d5/crypt/zone"},{"zone":{"id":"f71344eb-f7e2-439d-82a0-9941e6868fb6","zone_type":"crucible","addresses":["fd00:1122:3344:109::9"],"dataset":{"id":"f71344eb-f7e2-439d-82a0-9941e6868fb6","name":{"pool_name":"oxp_027a82e8-daa3-4fa6-8205-ed03445e1086","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::9]:32345"},"services":[{"id":"f71344eb-f7e2-439d-82a0-9941e6868fb6","details":{"type":"crucible","address":"[fd00:1122:3344:109::9]:32345"}}]},"root":"/pool/ext/027a82e8-daa3-4fa6-8205-ed03445e1086/crypt/zone"},{"zone":{"id":"a60cf0d7-12d5-43cb-aa3f-7a9e84de08fb","zone_type":"crucible","addresses":["fd00:1122:3344:109::a"],"dataset":{"id":"a60cf0d7-12d5-43cb-aa3f-7a9e84de08fb","name":{"pool_name":"oxp_8736aaf9-4d72-42b1-8e4f-07644d999c8b","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::a]:32345"},"services":[{"id":"a60cf0d7-12d5-43cb-aa3f-7a9e84de08fb","details":{"type":"crucible","address":"[fd00:1122:3344:109::a]:32345"}}]},"root":"/pool/ext/8736aaf9-4d72-42b1-8e4f-07644d999c8b/crypt/zone"},{"zone":{"id":"5d0e03b2-8958-4c43-8851-bf819f102958","zone_type":"crucible","addresses":["fd00:1122:3344:109::8"],"dataset":{"id":"5d0e03b2-8958-4c43-8851-bf819f102958","name":{"pool_name":"oxp_62426615-7832-49e7-9426-e39ffeb42c69","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::8]:32345"},"services":[{"id":"5d0e03b2-8958-4c43-8851-bf819f102958","details":{"type":"crucible","address":"[fd00:1122:3344:109::8]:32345"}}]},"root":"/pool/ext/07fc8ec9-1216-4d98-be34-c2970b585e61/crypt/zone"},{"zone":{"id":"accc05a2-ec80-4856-a825-ec6b7f700eaa","zone_type":"crucible","addresses":["fd00:1122:3344:109::d"],"dataset":{"id":"accc05a2-ec80-4856-a825-ec6b7f700eaa","name":{"pool_name":"oxp_dc083c53-7014-4482-8a79-f338ba2b0fb4","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::d]:32345"},"services":[{"id":"accc05a2-ec80-4856-a825-ec6b7f700eaa","details":{"type":"crucible","address":"[fd00:1122:3344:109::d]:32345"}}]},"root":"/pool/ext/027a82e8-daa3-4fa6-8205-ed03445e1086/crypt/zone"},{"zone":{"id":"2e32fdcc-737a-4430-8290-cb7028ea4d50","zone_type":"crucible","addresses":["fd00:1122:3344:109::b"],"dataset":{"id":"2e32fdcc-737a-4430-8290-cb7028ea4d50","name":{"pool_name":"oxp_3cafbb47-c194-4a42-99ff-34dfeab999ed","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::b]:32345"},"services":[{"id":"2e32fdcc-737a-4430-8290-cb7028ea4d50","details":{"type":"crucible","address":"[fd00:1122:3344:109::b]:32345"}}]},"root":"/pool/ext/027a82e8-daa3-4fa6-8205-ed03445e1086/crypt/zone"},{"zone":{"id":"a97c6ae2-37f6-4d93-a66e-cb5cd3c6aaa2","zone_type":"crucible","addresses":["fd00:1122:3344:109::c"],"dataset":{"id":"a97c6ae2-37f6-4d93-a66e-cb5cd3c6aaa2","name":{"pool_name":"oxp_07fc8ec9-1216-4d98-be34-c2970b585e61","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::c]:32345"},"services":[{"id":"a97c6ae2-37f6-4d93-a66e-cb5cd3c6aaa2","details":{"type":"crucible","address":"[fd00:1122:3344:109::c]:32345"}}]},"root":"/pool/ext/07fc8ec9-1216-4d98-be34-c2970b585e61/crypt/zone"},{"zone":{"id":"3237a532-acaa-4ebe-bf11-dde794fea739","zone_type":"cockroach_db","addresses":["fd00:1122:3344:109::3"],"dataset":{"id":"3237a532-acaa-4ebe-bf11-dde794fea739","name":{"pool_name":"oxp_ae56280b-17ce-4266-8573-e1da9db6c6bb","kind":{"type":"cockroach_db"}},"service_address":"[fd00:1122:3344:109::3]:32221"},"services":[{"id":"3237a532-acaa-4ebe-bf11-dde794fea739","details":{"type":"cockroach_db","address":"[fd00:1122:3344:109::3]:32221"}}]},"root":"/pool/ext/027a82e8-daa3-4fa6-8205-ed03445e1086/crypt/zone"},{"zone":{"id":"83257100-5590-484a-b72a-a079389d8da6","zone_type":"ntp","addresses":["fd00:1122:3344:109::e"],"dataset":null,"services":[{"id":"83257100-5590-484a-b72a-a079389d8da6","details":{"type":"internal_ntp","address":"[fd00:1122:3344:109::e]:123","ntp_servers":["c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55.host.control-plane.oxide.internal","6ea2684c-115e-48a6-8453-ab52d1cecd73.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/3cafbb47-c194-4a42-99ff-34dfeab999ed/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack2-sled21.json b/sled-agent/tests/old-service-ledgers/rack2-sled21.json deleted file mode 100644 index 78e003f79e..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack2-sled21.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":5,"requests":[{"zone":{"id":"0437b69d-73a8-4231-86f9-6b5556e7e7ef","zone_type":"crucible","addresses":["fd00:1122:3344:102::5"],"dataset":{"id":"0437b69d-73a8-4231-86f9-6b5556e7e7ef","name":{"pool_name":"oxp_aa0ffe35-76db-42ab-adf2-ceb072bdf811","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::5]:32345"},"services":[{"id":"0437b69d-73a8-4231-86f9-6b5556e7e7ef","details":{"type":"crucible","address":"[fd00:1122:3344:102::5]:32345"}}]},"root":"/pool/ext/0d2805da-6d24-4e57-a700-0c3865c05544/crypt/zone"},{"zone":{"id":"47234ca5-305f-436a-9e9a-36bca9667680","zone_type":"crucible","addresses":["fd00:1122:3344:102::b"],"dataset":{"id":"47234ca5-305f-436a-9e9a-36bca9667680","name":{"pool_name":"oxp_0d2805da-6d24-4e57-a700-0c3865c05544","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::b]:32345"},"services":[{"id":"47234ca5-305f-436a-9e9a-36bca9667680","details":{"type":"crucible","address":"[fd00:1122:3344:102::b]:32345"}}]},"root":"/pool/ext/160691d8-33a1-4d7d-a48a-c3fd27d76822/crypt/zone"},{"zone":{"id":"2898657e-4141-4c05-851b-147bffc6bbbd","zone_type":"nexus","addresses":["fd00:1122:3344:102::3"],"dataset":null,"services":[{"id":"2898657e-4141-4c05-851b-147bffc6bbbd","details":{"type":"nexus","internal_address":"[fd00:1122:3344:102::3]:12221","external_ip":"172.20.26.5","nic":{"id":"2e9a412e-c79a-48fe-8fa4-f5a6afed1040","kind":{"type":"service","id":"2898657e-4141-4c05-851b-147bffc6bbbd"},"name":"nexus-2898657e-4141-4c05-851b-147bffc6bbbd","ip":"172.30.2.7","mac":"A8:40:25:FF:C6:59","subnet":"172.30.2.0/24","vni":100,"primary":true,"slot":0},"external_tls":true,"external_dns_servers":["1.1.1.1","9.9.9.9"]}}]},"root":"/pool/ext/c0b4ecc1-a145-443f-90d1-2e8136b007bc/crypt/zone"},{"zone":{"id":"cf98c4d6-4a7b-49c0-9b14-48a8adf52ce9","zone_type":"crucible","addresses":["fd00:1122:3344:102::c"],"dataset":{"id":"cf98c4d6-4a7b-49c0-9b14-48a8adf52ce9","name":{"pool_name":"oxp_c0b4ecc1-a145-443f-90d1-2e8136b007bc","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::c]:32345"},"services":[{"id":"cf98c4d6-4a7b-49c0-9b14-48a8adf52ce9","details":{"type":"crucible","address":"[fd00:1122:3344:102::c]:32345"}}]},"root":"/pool/ext/f6acd70a-d6cb-464d-a460-dd5c60301562/crypt/zone"},{"zone":{"id":"13c1e91e-bfcc-4eea-8185-412fc37fdea3","zone_type":"crucible","addresses":["fd00:1122:3344:102::9"],"dataset":{"id":"13c1e91e-bfcc-4eea-8185-412fc37fdea3","name":{"pool_name":"oxp_e9b0a2e4-8060-41bd-a3b5-d0642246d06d","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::9]:32345"},"services":[{"id":"13c1e91e-bfcc-4eea-8185-412fc37fdea3","details":{"type":"crucible","address":"[fd00:1122:3344:102::9]:32345"}}]},"root":"/pool/ext/c0b4ecc1-a145-443f-90d1-2e8136b007bc/crypt/zone"},{"zone":{"id":"c9cb60af-9e0e-4b3b-b971-53138a9b8d27","zone_type":"crucible","addresses":["fd00:1122:3344:102::4"],"dataset":{"id":"c9cb60af-9e0e-4b3b-b971-53138a9b8d27","name":{"pool_name":"oxp_77749ec7-39a9-489d-904b-87f7223c4e3c","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::4]:32345"},"services":[{"id":"c9cb60af-9e0e-4b3b-b971-53138a9b8d27","details":{"type":"crucible","address":"[fd00:1122:3344:102::4]:32345"}}]},"root":"/pool/ext/77749ec7-39a9-489d-904b-87f7223c4e3c/crypt/zone"},{"zone":{"id":"32995cfa-47ec-4b84-8514-7c1c8a86c19d","zone_type":"crucible","addresses":["fd00:1122:3344:102::8"],"dataset":{"id":"32995cfa-47ec-4b84-8514-7c1c8a86c19d","name":{"pool_name":"oxp_eac83f81-eb51-4f3e-874e-82f55dd952ba","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::8]:32345"},"services":[{"id":"32995cfa-47ec-4b84-8514-7c1c8a86c19d","details":{"type":"crucible","address":"[fd00:1122:3344:102::8]:32345"}}]},"root":"/pool/ext/0d2805da-6d24-4e57-a700-0c3865c05544/crypt/zone"},{"zone":{"id":"b93d2e2d-d54b-4503-85c3-9878e3cee9c7","zone_type":"crucible","addresses":["fd00:1122:3344:102::a"],"dataset":{"id":"b93d2e2d-d54b-4503-85c3-9878e3cee9c7","name":{"pool_name":"oxp_160691d8-33a1-4d7d-a48a-c3fd27d76822","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::a]:32345"},"services":[{"id":"b93d2e2d-d54b-4503-85c3-9878e3cee9c7","details":{"type":"crucible","address":"[fd00:1122:3344:102::a]:32345"}}]},"root":"/pool/ext/138663ad-a382-4595-baf0-08f6b0276a67/crypt/zone"},{"zone":{"id":"2ebbac4f-7b0f-43eb-99fd-dd6ff7f9e097","zone_type":"crucible","addresses":["fd00:1122:3344:102::6"],"dataset":{"id":"2ebbac4f-7b0f-43eb-99fd-dd6ff7f9e097","name":{"pool_name":"oxp_138663ad-a382-4595-baf0-08f6b0276a67","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::6]:32345"},"services":[{"id":"2ebbac4f-7b0f-43eb-99fd-dd6ff7f9e097","details":{"type":"crucible","address":"[fd00:1122:3344:102::6]:32345"}}]},"root":"/pool/ext/e9b0a2e4-8060-41bd-a3b5-d0642246d06d/crypt/zone"},{"zone":{"id":"d0eea3b2-e5ac-42bf-97b7-531b78fa06d1","zone_type":"crucible","addresses":["fd00:1122:3344:102::7"],"dataset":{"id":"d0eea3b2-e5ac-42bf-97b7-531b78fa06d1","name":{"pool_name":"oxp_69f0b863-f73f-42b2-9822-b2cb99f09003","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::7]:32345"},"services":[{"id":"d0eea3b2-e5ac-42bf-97b7-531b78fa06d1","details":{"type":"crucible","address":"[fd00:1122:3344:102::7]:32345"}}]},"root":"/pool/ext/138663ad-a382-4595-baf0-08f6b0276a67/crypt/zone"},{"zone":{"id":"2b34cd1d-ea7d-41a1-82b9-75550fdf6eb0","zone_type":"crucible","addresses":["fd00:1122:3344:102::d"],"dataset":{"id":"2b34cd1d-ea7d-41a1-82b9-75550fdf6eb0","name":{"pool_name":"oxp_f6acd70a-d6cb-464d-a460-dd5c60301562","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::d]:32345"},"services":[{"id":"2b34cd1d-ea7d-41a1-82b9-75550fdf6eb0","details":{"type":"crucible","address":"[fd00:1122:3344:102::d]:32345"}}]},"root":"/pool/ext/c0b4ecc1-a145-443f-90d1-2e8136b007bc/crypt/zone"},{"zone":{"id":"6ea2684c-115e-48a6-8453-ab52d1cecd73","zone_type":"ntp","addresses":["fd00:1122:3344:102::e"],"dataset":null,"services":[{"id":"6ea2684c-115e-48a6-8453-ab52d1cecd73","details":{"type":"boundary_ntp","address":"[fd00:1122:3344:102::e]:123","ntp_servers":["ntp.eng.oxide.computer"],"dns_servers":["1.1.1.1","9.9.9.9"],"domain":null,"nic":{"id":"4effd079-ed4e-4cf6-8545-bb9574f516d2","kind":{"type":"service","id":"6ea2684c-115e-48a6-8453-ab52d1cecd73"},"name":"ntp-6ea2684c-115e-48a6-8453-ab52d1cecd73","ip":"172.30.3.6","mac":"A8:40:25:FF:A0:F9","subnet":"172.30.3.0/24","vni":100,"primary":true,"slot":0},"snat_cfg":{"ip":"172.20.26.7","first_port":16384,"last_port":32767}}}]},"root":"/pool/ext/aa0ffe35-76db-42ab-adf2-ceb072bdf811/crypt/zone"},{"zone":{"id":"3a1ea15f-06a4-4afd-959a-c3a00b2bdd80","zone_type":"internal_dns","addresses":["fd00:1122:3344:2::1"],"dataset":{"id":"3a1ea15f-06a4-4afd-959a-c3a00b2bdd80","name":{"pool_name":"oxp_77749ec7-39a9-489d-904b-87f7223c4e3c","kind":{"type":"internal_dns"}},"service_address":"[fd00:1122:3344:2::1]:5353"},"services":[{"id":"3a1ea15f-06a4-4afd-959a-c3a00b2bdd80","details":{"type":"internal_dns","http_address":"[fd00:1122:3344:2::1]:5353","dns_address":"[fd00:1122:3344:2::1]:53","gz_address":"fd00:1122:3344:2::2","gz_address_index":1}}]},"root":"/pool/ext/69f0b863-f73f-42b2-9822-b2cb99f09003/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack2-sled23.json b/sled-agent/tests/old-service-ledgers/rack2-sled23.json deleted file mode 100644 index 29b8c455d3..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack2-sled23.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":5,"requests":[{"zone":{"id":"1876cdcf-b2e7-4b79-ad2e-67df716e1860","zone_type":"crucible","addresses":["fd00:1122:3344:10a::8"],"dataset":{"id":"1876cdcf-b2e7-4b79-ad2e-67df716e1860","name":{"pool_name":"oxp_d4c6bdc6-5e99-4f6c-b57a-9bfcb9a76be4","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::8]:32345"},"services":[{"id":"1876cdcf-b2e7-4b79-ad2e-67df716e1860","details":{"type":"crucible","address":"[fd00:1122:3344:10a::8]:32345"}}]},"root":"/pool/ext/86c58ea3-1413-4af3-9aff-9c0a3d758459/crypt/zone"},{"zone":{"id":"0e708ee3-b7a6-4993-a88a-4489add33e29","zone_type":"crucible","addresses":["fd00:1122:3344:10a::d"],"dataset":{"id":"0e708ee3-b7a6-4993-a88a-4489add33e29","name":{"pool_name":"oxp_718ad834-b415-4abb-934d-9f987cde0a96","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::d]:32345"},"services":[{"id":"0e708ee3-b7a6-4993-a88a-4489add33e29","details":{"type":"crucible","address":"[fd00:1122:3344:10a::d]:32345"}}]},"root":"/pool/ext/30f7d236-c835-46cc-bc27-9099a6826f67/crypt/zone"},{"zone":{"id":"4e1b9a65-848f-4649-b360-1df0d135b44d","zone_type":"crucible","addresses":["fd00:1122:3344:10a::c"],"dataset":{"id":"4e1b9a65-848f-4649-b360-1df0d135b44d","name":{"pool_name":"oxp_88ee08c6-1c0f-44c2-9110-b8d5a7589ebb","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::c]:32345"},"services":[{"id":"4e1b9a65-848f-4649-b360-1df0d135b44d","details":{"type":"crucible","address":"[fd00:1122:3344:10a::c]:32345"}}]},"root":"/pool/ext/30f7d236-c835-46cc-bc27-9099a6826f67/crypt/zone"},{"zone":{"id":"da510a57-3af1-4d2b-b2ed-2e8849f27d8b","zone_type":"oximeter","addresses":["fd00:1122:3344:10a::3"],"dataset":null,"services":[{"id":"da510a57-3af1-4d2b-b2ed-2e8849f27d8b","details":{"type":"oximeter","address":"[fd00:1122:3344:10a::3]:12223"}}]},"root":"/pool/ext/718ad834-b415-4abb-934d-9f987cde0a96/crypt/zone"},{"zone":{"id":"d4d9acc8-3e0b-4fab-a0a2-d21920fabd7e","zone_type":"crucible","addresses":["fd00:1122:3344:10a::6"],"dataset":{"id":"d4d9acc8-3e0b-4fab-a0a2-d21920fabd7e","name":{"pool_name":"oxp_9dfe424f-cba6-4bfb-a3dd-e8bd7fdea57d","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::6]:32345"},"services":[{"id":"d4d9acc8-3e0b-4fab-a0a2-d21920fabd7e","details":{"type":"crucible","address":"[fd00:1122:3344:10a::6]:32345"}}]},"root":"/pool/ext/30f7d236-c835-46cc-bc27-9099a6826f67/crypt/zone"},{"zone":{"id":"fcb75972-836b-4f55-ba21-9722832cf5c2","zone_type":"crucible","addresses":["fd00:1122:3344:10a::7"],"dataset":{"id":"fcb75972-836b-4f55-ba21-9722832cf5c2","name":{"pool_name":"oxp_9005671f-3d90-4ed1-be15-ad65b9a65bd5","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::7]:32345"},"services":[{"id":"fcb75972-836b-4f55-ba21-9722832cf5c2","details":{"type":"crucible","address":"[fd00:1122:3344:10a::7]:32345"}}]},"root":"/pool/ext/d4c6bdc6-5e99-4f6c-b57a-9bfcb9a76be4/crypt/zone"},{"zone":{"id":"624beba0-7dcd-4d55-af05-4670c6fcb1fb","zone_type":"crucible","addresses":["fd00:1122:3344:10a::4"],"dataset":{"id":"624beba0-7dcd-4d55-af05-4670c6fcb1fb","name":{"pool_name":"oxp_93867156-a43d-4c03-a899-1535e566c8bd","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::4]:32345"},"services":[{"id":"624beba0-7dcd-4d55-af05-4670c6fcb1fb","details":{"type":"crucible","address":"[fd00:1122:3344:10a::4]:32345"}}]},"root":"/pool/ext/93867156-a43d-4c03-a899-1535e566c8bd/crypt/zone"},{"zone":{"id":"26fb3830-898e-4086-afaf-8f9654716b8c","zone_type":"crucible","addresses":["fd00:1122:3344:10a::b"],"dataset":{"id":"26fb3830-898e-4086-afaf-8f9654716b8c","name":{"pool_name":"oxp_86c58ea3-1413-4af3-9aff-9c0a3d758459","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::b]:32345"},"services":[{"id":"26fb3830-898e-4086-afaf-8f9654716b8c","details":{"type":"crucible","address":"[fd00:1122:3344:10a::b]:32345"}}]},"root":"/pool/ext/93867156-a43d-4c03-a899-1535e566c8bd/crypt/zone"},{"zone":{"id":"a3ef7eba-c08e-48ef-ae7a-89e2fcb49b66","zone_type":"crucible","addresses":["fd00:1122:3344:10a::a"],"dataset":{"id":"a3ef7eba-c08e-48ef-ae7a-89e2fcb49b66","name":{"pool_name":"oxp_cd3fdbae-a9d9-4db7-866a-bca36f6dd634","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::a]:32345"},"services":[{"id":"a3ef7eba-c08e-48ef-ae7a-89e2fcb49b66","details":{"type":"crucible","address":"[fd00:1122:3344:10a::a]:32345"}}]},"root":"/pool/ext/718ad834-b415-4abb-934d-9f987cde0a96/crypt/zone"},{"zone":{"id":"5c1d4a02-f33b-433a-81f5-5c149e3433bd","zone_type":"crucible","addresses":["fd00:1122:3344:10a::5"],"dataset":{"id":"5c1d4a02-f33b-433a-81f5-5c149e3433bd","name":{"pool_name":"oxp_9adfc865-2eef-4880-a6e3-9d2f88c8efd0","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::5]:32345"},"services":[{"id":"5c1d4a02-f33b-433a-81f5-5c149e3433bd","details":{"type":"crucible","address":"[fd00:1122:3344:10a::5]:32345"}}]},"root":"/pool/ext/cd3fdbae-a9d9-4db7-866a-bca36f6dd634/crypt/zone"},{"zone":{"id":"ee77efe9-81d0-4395-a237-15e30c2c2d04","zone_type":"crucible","addresses":["fd00:1122:3344:10a::9"],"dataset":{"id":"ee77efe9-81d0-4395-a237-15e30c2c2d04","name":{"pool_name":"oxp_30f7d236-c835-46cc-bc27-9099a6826f67","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::9]:32345"},"services":[{"id":"ee77efe9-81d0-4395-a237-15e30c2c2d04","details":{"type":"crucible","address":"[fd00:1122:3344:10a::9]:32345"}}]},"root":"/pool/ext/88ee08c6-1c0f-44c2-9110-b8d5a7589ebb/crypt/zone"},{"zone":{"id":"71ab91b7-48d4-4d31-b47e-59f29f419116","zone_type":"ntp","addresses":["fd00:1122:3344:10a::e"],"dataset":null,"services":[{"id":"71ab91b7-48d4-4d31-b47e-59f29f419116","details":{"type":"internal_ntp","address":"[fd00:1122:3344:10a::e]:123","ntp_servers":["c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55.host.control-plane.oxide.internal","6ea2684c-115e-48a6-8453-ab52d1cecd73.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/cd3fdbae-a9d9-4db7-866a-bca36f6dd634/crypt/zone"},{"zone":{"id":"46ccd8fe-4e3c-4307-97ae-1f7ac505082a","zone_type":"internal_dns","addresses":["fd00:1122:3344:3::1"],"dataset":{"id":"46ccd8fe-4e3c-4307-97ae-1f7ac505082a","name":{"pool_name":"oxp_93867156-a43d-4c03-a899-1535e566c8bd","kind":{"type":"internal_dns"}},"service_address":"[fd00:1122:3344:3::1]:5353"},"services":[{"id":"46ccd8fe-4e3c-4307-97ae-1f7ac505082a","details":{"type":"internal_dns","http_address":"[fd00:1122:3344:3::1]:5353","dns_address":"[fd00:1122:3344:3::1]:53","gz_address":"fd00:1122:3344:3::2","gz_address_index":2}}]},"root":"/pool/ext/9dfe424f-cba6-4bfb-a3dd-e8bd7fdea57d/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack2-sled25.json b/sled-agent/tests/old-service-ledgers/rack2-sled25.json deleted file mode 100644 index e48ef68faa..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack2-sled25.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":4,"requests":[{"zone":{"id":"180d466d-eb36-4546-8922-e52c4c076823","zone_type":"crucible","addresses":["fd00:1122:3344:101::5"],"dataset":{"id":"180d466d-eb36-4546-8922-e52c4c076823","name":{"pool_name":"oxp_ac789935-fa42-4d00-8967-df0d96dbb74e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::5]:32345"},"services":[{"id":"180d466d-eb36-4546-8922-e52c4c076823","details":{"type":"crucible","address":"[fd00:1122:3344:101::5]:32345"}}]},"root":"/pool/ext/d732addc-cfe8-4c2c-8028-72eb4481b04e/crypt/zone"},{"zone":{"id":"b5af0303-bc03-40a3-b733-0396d705dfbf","zone_type":"crucible","addresses":["fd00:1122:3344:101::7"],"dataset":{"id":"b5af0303-bc03-40a3-b733-0396d705dfbf","name":{"pool_name":"oxp_d732addc-cfe8-4c2c-8028-72eb4481b04e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::7]:32345"},"services":[{"id":"b5af0303-bc03-40a3-b733-0396d705dfbf","details":{"type":"crucible","address":"[fd00:1122:3344:101::7]:32345"}}]},"root":"/pool/ext/677b0057-3a80-461b-aca8-c2cb501a7278/crypt/zone"},{"zone":{"id":"9c7c805a-f5ed-4e48-86e3-7aa81a718881","zone_type":"crucible","addresses":["fd00:1122:3344:101::c"],"dataset":{"id":"9c7c805a-f5ed-4e48-86e3-7aa81a718881","name":{"pool_name":"oxp_923c930c-80f8-448d-8321-cebfc6c41760","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::c]:32345"},"services":[{"id":"9c7c805a-f5ed-4e48-86e3-7aa81a718881","details":{"type":"crucible","address":"[fd00:1122:3344:101::c]:32345"}}]},"root":"/pool/ext/ac789935-fa42-4d00-8967-df0d96dbb74e/crypt/zone"},{"zone":{"id":"4e49c83c-2d4a-491a-91ac-4ab022026dcf","zone_type":"crucible","addresses":["fd00:1122:3344:101::4"],"dataset":{"id":"4e49c83c-2d4a-491a-91ac-4ab022026dcf","name":{"pool_name":"oxp_c99e6032-1d4f-47d2-9efe-ae2b2479554e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::4]:32345"},"services":[{"id":"4e49c83c-2d4a-491a-91ac-4ab022026dcf","details":{"type":"crucible","address":"[fd00:1122:3344:101::4]:32345"}}]},"root":"/pool/ext/653065d2-ab70-47c9-b832-34238fdc95ef/crypt/zone"},{"zone":{"id":"0e38475e-b8b2-4813-bf80-3c170081081a","zone_type":"crucible","addresses":["fd00:1122:3344:101::d"],"dataset":{"id":"0e38475e-b8b2-4813-bf80-3c170081081a","name":{"pool_name":"oxp_653065d2-ab70-47c9-b832-34238fdc95ef","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::d]:32345"},"services":[{"id":"0e38475e-b8b2-4813-bf80-3c170081081a","details":{"type":"crucible","address":"[fd00:1122:3344:101::d]:32345"}}]},"root":"/pool/ext/4c7ad252-55c2-4a1a-9d93-9dfcdfdfacca/crypt/zone"},{"zone":{"id":"75123e60-1116-4b8d-a466-7302220127da","zone_type":"crucible","addresses":["fd00:1122:3344:101::8"],"dataset":{"id":"75123e60-1116-4b8d-a466-7302220127da","name":{"pool_name":"oxp_c764a8ae-6862-4eec-9db0-cc6ea478e4a7","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::8]:32345"},"services":[{"id":"75123e60-1116-4b8d-a466-7302220127da","details":{"type":"crucible","address":"[fd00:1122:3344:101::8]:32345"}}]},"root":"/pool/ext/c764a8ae-6862-4eec-9db0-cc6ea478e4a7/crypt/zone"},{"zone":{"id":"fbd0379c-97fa-49ea-8980-17ae30ffff3c","zone_type":"crucible","addresses":["fd00:1122:3344:101::b"],"dataset":{"id":"fbd0379c-97fa-49ea-8980-17ae30ffff3c","name":{"pool_name":"oxp_fcb0e4c7-e046-4cf5-ad35-3ad90e1eb90c","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::b]:32345"},"services":[{"id":"fbd0379c-97fa-49ea-8980-17ae30ffff3c","details":{"type":"crucible","address":"[fd00:1122:3344:101::b]:32345"}}]},"root":"/pool/ext/4c7ad252-55c2-4a1a-9d93-9dfcdfdfacca/crypt/zone"},{"zone":{"id":"ec635326-cd1d-4f73-b8e6-c3a36a7020db","zone_type":"crucible","addresses":["fd00:1122:3344:101::a"],"dataset":{"id":"ec635326-cd1d-4f73-b8e6-c3a36a7020db","name":{"pool_name":"oxp_6bfb4120-488d-4f3d-90ef-e9bfa523b388","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::a]:32345"},"services":[{"id":"ec635326-cd1d-4f73-b8e6-c3a36a7020db","details":{"type":"crucible","address":"[fd00:1122:3344:101::a]:32345"}}]},"root":"/pool/ext/c99e6032-1d4f-47d2-9efe-ae2b2479554e/crypt/zone"},{"zone":{"id":"f500d564-c40a-4eca-ac8a-a26b435f2037","zone_type":"external_dns","addresses":["fd00:1122:3344:101::3"],"dataset":{"id":"f500d564-c40a-4eca-ac8a-a26b435f2037","name":{"pool_name":"oxp_c99e6032-1d4f-47d2-9efe-ae2b2479554e","kind":{"type":"external_dns"}},"service_address":"[fd00:1122:3344:101::3]:5353"},"services":[{"id":"f500d564-c40a-4eca-ac8a-a26b435f2037","details":{"type":"external_dns","http_address":"[fd00:1122:3344:101::3]:5353","dns_address":"172.20.26.2:53","nic":{"id":"b0b42776-3914-4a69-889f-4831dc72327c","kind":{"type":"service","id":"f500d564-c40a-4eca-ac8a-a26b435f2037"},"name":"external-dns-f500d564-c40a-4eca-ac8a-a26b435f2037","ip":"172.30.1.6","mac":"A8:40:25:FF:D0:B4","subnet":"172.30.1.0/24","vni":100,"primary":true,"slot":0}}}]},"root":"/pool/ext/ac789935-fa42-4d00-8967-df0d96dbb74e/crypt/zone"},{"zone":{"id":"56d4dbcc-3b4a-4ed0-8795-7734aadcc4c0","zone_type":"crucible","addresses":["fd00:1122:3344:101::9"],"dataset":{"id":"56d4dbcc-3b4a-4ed0-8795-7734aadcc4c0","name":{"pool_name":"oxp_4c7ad252-55c2-4a1a-9d93-9dfcdfdfacca","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::9]:32345"},"services":[{"id":"56d4dbcc-3b4a-4ed0-8795-7734aadcc4c0","details":{"type":"crucible","address":"[fd00:1122:3344:101::9]:32345"}}]},"root":"/pool/ext/4c7ad252-55c2-4a1a-9d93-9dfcdfdfacca/crypt/zone"},{"zone":{"id":"0d3a1bd5-f6fe-49cb-807a-190dabc90103","zone_type":"crucible","addresses":["fd00:1122:3344:101::6"],"dataset":{"id":"0d3a1bd5-f6fe-49cb-807a-190dabc90103","name":{"pool_name":"oxp_677b0057-3a80-461b-aca8-c2cb501a7278","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::6]:32345"},"services":[{"id":"0d3a1bd5-f6fe-49cb-807a-190dabc90103","details":{"type":"crucible","address":"[fd00:1122:3344:101::6]:32345"}}]},"root":"/pool/ext/6bfb4120-488d-4f3d-90ef-e9bfa523b388/crypt/zone"},{"zone":{"id":"d34c7184-5d4e-4cb5-8f91-df74a343ffbc","zone_type":"ntp","addresses":["fd00:1122:3344:101::e"],"dataset":null,"services":[{"id":"d34c7184-5d4e-4cb5-8f91-df74a343ffbc","details":{"type":"internal_ntp","address":"[fd00:1122:3344:101::e]:123","ntp_servers":["c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55.host.control-plane.oxide.internal","6ea2684c-115e-48a6-8453-ab52d1cecd73.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/ac789935-fa42-4d00-8967-df0d96dbb74e/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack2-sled8.json b/sled-agent/tests/old-service-ledgers/rack2-sled8.json deleted file mode 100644 index 7d52980d9f..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack2-sled8.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":4,"requests":[{"zone":{"id":"7153983f-8fd7-4fb9-92ac-0f07a07798b4","zone_type":"crucible","addresses":["fd00:1122:3344:103::a"],"dataset":{"id":"7153983f-8fd7-4fb9-92ac-0f07a07798b4","name":{"pool_name":"oxp_bf428719-1b16-4503-99f4-ad95846d916f","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::a]:32345"},"services":[{"id":"7153983f-8fd7-4fb9-92ac-0f07a07798b4","details":{"type":"crucible","address":"[fd00:1122:3344:103::a]:32345"}}]},"root":"/pool/ext/26e698bb-006d-4208-94b9-d1bc279111fa/crypt/zone"},{"zone":{"id":"7d44ba36-4a69-490a-bc40-f6f90a4208d4","zone_type":"crucible","addresses":["fd00:1122:3344:103::c"],"dataset":{"id":"7d44ba36-4a69-490a-bc40-f6f90a4208d4","name":{"pool_name":"oxp_414e235b-55c3-4dc1-a568-8adf4ea1a052","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::c]:32345"},"services":[{"id":"7d44ba36-4a69-490a-bc40-f6f90a4208d4","details":{"type":"crucible","address":"[fd00:1122:3344:103::c]:32345"}}]},"root":"/pool/ext/cf940e15-dbc5-481b-866a-4de4b018898e/crypt/zone"},{"zone":{"id":"65a11c18-7f59-41ac-b9e7-680627f996e7","zone_type":"nexus","addresses":["fd00:1122:3344:103::3"],"dataset":null,"services":[{"id":"65a11c18-7f59-41ac-b9e7-680627f996e7","details":{"type":"nexus","internal_address":"[fd00:1122:3344:103::3]:12221","external_ip":"172.20.26.3","nic":{"id":"a3e13dde-a2bc-4170-ad84-aad8085b6034","kind":{"type":"service","id":"65a11c18-7f59-41ac-b9e7-680627f996e7"},"name":"nexus-65a11c18-7f59-41ac-b9e7-680627f996e7","ip":"172.30.2.5","mac":"A8:40:25:FF:A6:83","subnet":"172.30.2.0/24","vni":100,"primary":true,"slot":0},"external_tls":true,"external_dns_servers":["1.1.1.1","9.9.9.9"]}}]},"root":"/pool/ext/e126ddcc-8bee-46ba-8199-2a74df0ba040/crypt/zone"},{"zone":{"id":"072fdae8-2adf-4fd2-94ce-e9b0663b91e7","zone_type":"crucible","addresses":["fd00:1122:3344:103::b"],"dataset":{"id":"072fdae8-2adf-4fd2-94ce-e9b0663b91e7","name":{"pool_name":"oxp_26e698bb-006d-4208-94b9-d1bc279111fa","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::b]:32345"},"services":[{"id":"072fdae8-2adf-4fd2-94ce-e9b0663b91e7","details":{"type":"crucible","address":"[fd00:1122:3344:103::b]:32345"}}]},"root":"/pool/ext/bf428719-1b16-4503-99f4-ad95846d916f/crypt/zone"},{"zone":{"id":"01f93020-7e7d-4185-93fb-6ca234056c82","zone_type":"crucible","addresses":["fd00:1122:3344:103::5"],"dataset":{"id":"01f93020-7e7d-4185-93fb-6ca234056c82","name":{"pool_name":"oxp_7b24095a-72df-45e3-984f-2b795e052ac7","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::5]:32345"},"services":[{"id":"01f93020-7e7d-4185-93fb-6ca234056c82","details":{"type":"crucible","address":"[fd00:1122:3344:103::5]:32345"}}]},"root":"/pool/ext/7b24095a-72df-45e3-984f-2b795e052ac7/crypt/zone"},{"zone":{"id":"e238116d-e5cc-43d4-9c8a-6f138ae8a15d","zone_type":"crucible","addresses":["fd00:1122:3344:103::6"],"dataset":{"id":"e238116d-e5cc-43d4-9c8a-6f138ae8a15d","name":{"pool_name":"oxp_e126ddcc-8bee-46ba-8199-2a74df0ba040","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::6]:32345"},"services":[{"id":"e238116d-e5cc-43d4-9c8a-6f138ae8a15d","details":{"type":"crucible","address":"[fd00:1122:3344:103::6]:32345"}}]},"root":"/pool/ext/7b24095a-72df-45e3-984f-2b795e052ac7/crypt/zone"},{"zone":{"id":"585cd8c5-c41e-4be4-beb8-bfbef9b53856","zone_type":"crucible","addresses":["fd00:1122:3344:103::7"],"dataset":{"id":"585cd8c5-c41e-4be4-beb8-bfbef9b53856","name":{"pool_name":"oxp_6340805e-c5af-418d-8bd1-fc0085667f33","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::7]:32345"},"services":[{"id":"585cd8c5-c41e-4be4-beb8-bfbef9b53856","details":{"type":"crucible","address":"[fd00:1122:3344:103::7]:32345"}}]},"root":"/pool/ext/414e235b-55c3-4dc1-a568-8adf4ea1a052/crypt/zone"},{"zone":{"id":"0b41c560-3b20-42f4-82ad-92f5bb575d6b","zone_type":"crucible","addresses":["fd00:1122:3344:103::9"],"dataset":{"id":"0b41c560-3b20-42f4-82ad-92f5bb575d6b","name":{"pool_name":"oxp_b93f880e-c55b-4d6c-9a16-939d84b628fc","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::9]:32345"},"services":[{"id":"0b41c560-3b20-42f4-82ad-92f5bb575d6b","details":{"type":"crucible","address":"[fd00:1122:3344:103::9]:32345"}}]},"root":"/pool/ext/6340805e-c5af-418d-8bd1-fc0085667f33/crypt/zone"},{"zone":{"id":"0ccf27c0-e32d-4b52-a2c5-6db0c64a26f9","zone_type":"crucible","addresses":["fd00:1122:3344:103::d"],"dataset":{"id":"0ccf27c0-e32d-4b52-a2c5-6db0c64a26f9","name":{"pool_name":"oxp_2115b084-be0f-4fba-941b-33a659798a9e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::d]:32345"},"services":[{"id":"0ccf27c0-e32d-4b52-a2c5-6db0c64a26f9","details":{"type":"crucible","address":"[fd00:1122:3344:103::d]:32345"}}]},"root":"/pool/ext/414e235b-55c3-4dc1-a568-8adf4ea1a052/crypt/zone"},{"zone":{"id":"a6ba8273-0320-4dab-b801-281f041b0c50","zone_type":"crucible","addresses":["fd00:1122:3344:103::4"],"dataset":{"id":"a6ba8273-0320-4dab-b801-281f041b0c50","name":{"pool_name":"oxp_8a199f12-4f5c-483a-8aca-f97856658a35","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::4]:32345"},"services":[{"id":"a6ba8273-0320-4dab-b801-281f041b0c50","details":{"type":"crucible","address":"[fd00:1122:3344:103::4]:32345"}}]},"root":"/pool/ext/b93f880e-c55b-4d6c-9a16-939d84b628fc/crypt/zone"},{"zone":{"id":"b9b7b4c2-284a-4ec1-80ea-75b7a43b71c4","zone_type":"crucible","addresses":["fd00:1122:3344:103::8"],"dataset":{"id":"b9b7b4c2-284a-4ec1-80ea-75b7a43b71c4","name":{"pool_name":"oxp_cf940e15-dbc5-481b-866a-4de4b018898e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::8]:32345"},"services":[{"id":"b9b7b4c2-284a-4ec1-80ea-75b7a43b71c4","details":{"type":"crucible","address":"[fd00:1122:3344:103::8]:32345"}}]},"root":"/pool/ext/cf940e15-dbc5-481b-866a-4de4b018898e/crypt/zone"},{"zone":{"id":"7a85d50e-b524-41c1-a052-118027eb77db","zone_type":"ntp","addresses":["fd00:1122:3344:103::e"],"dataset":null,"services":[{"id":"7a85d50e-b524-41c1-a052-118027eb77db","details":{"type":"internal_ntp","address":"[fd00:1122:3344:103::e]:123","ntp_servers":["c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55.host.control-plane.oxide.internal","6ea2684c-115e-48a6-8453-ab52d1cecd73.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/b93f880e-c55b-4d6c-9a16-939d84b628fc/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack2-sled9.json b/sled-agent/tests/old-service-ledgers/rack2-sled9.json deleted file mode 100644 index 36af68759b..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack2-sled9.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":4,"requests":[{"zone":{"id":"912346a2-d7e6-427e-b373-e8dcbe4fcea9","zone_type":"crucible","addresses":["fd00:1122:3344:105::5"],"dataset":{"id":"912346a2-d7e6-427e-b373-e8dcbe4fcea9","name":{"pool_name":"oxp_b358fb1e-f52a-4a63-9aab-170225509b37","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::5]:32345"},"services":[{"id":"912346a2-d7e6-427e-b373-e8dcbe4fcea9","details":{"type":"crucible","address":"[fd00:1122:3344:105::5]:32345"}}]},"root":"/pool/ext/0ae29053-29a2-489e-a1e6-6aec0ecd05f8/crypt/zone"},{"zone":{"id":"3d420dff-c616-4c7d-bab1-0f9c2b5396bf","zone_type":"crucible","addresses":["fd00:1122:3344:105::a"],"dataset":{"id":"3d420dff-c616-4c7d-bab1-0f9c2b5396bf","name":{"pool_name":"oxp_4eb2e4eb-41d8-496c-9a5a-687d7e004aa4","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::a]:32345"},"services":[{"id":"3d420dff-c616-4c7d-bab1-0f9c2b5396bf","details":{"type":"crucible","address":"[fd00:1122:3344:105::a]:32345"}}]},"root":"/pool/ext/eb1234a5-fdf7-4977-94d5-2eef25ce56a1/crypt/zone"},{"zone":{"id":"9c5d88c9-8ff1-4f23-9438-7b81322eaf68","zone_type":"crucible","addresses":["fd00:1122:3344:105::b"],"dataset":{"id":"9c5d88c9-8ff1-4f23-9438-7b81322eaf68","name":{"pool_name":"oxp_aadf48eb-6ff0-40b5-a092-1fdd06c03e11","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::b]:32345"},"services":[{"id":"9c5d88c9-8ff1-4f23-9438-7b81322eaf68","details":{"type":"crucible","address":"[fd00:1122:3344:105::b]:32345"}}]},"root":"/pool/ext/4358f47f-f21e-4cc8-829e-0c7fc2400a59/crypt/zone"},{"zone":{"id":"f9c1deca-1898-429e-8c93-254c7aa7bae6","zone_type":"crucible","addresses":["fd00:1122:3344:105::8"],"dataset":{"id":"f9c1deca-1898-429e-8c93-254c7aa7bae6","name":{"pool_name":"oxp_d1cb6b7d-2b92-4b7d-8a4d-551987f0277e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::8]:32345"},"services":[{"id":"f9c1deca-1898-429e-8c93-254c7aa7bae6","details":{"type":"crucible","address":"[fd00:1122:3344:105::8]:32345"}}]},"root":"/pool/ext/f8b11629-ced6-412a-9c3f-d169b99ee996/crypt/zone"},{"zone":{"id":"ce8563f3-4a93-45ff-b727-cbfbee6aa413","zone_type":"crucible","addresses":["fd00:1122:3344:105::9"],"dataset":{"id":"ce8563f3-4a93-45ff-b727-cbfbee6aa413","name":{"pool_name":"oxp_4358f47f-f21e-4cc8-829e-0c7fc2400a59","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::9]:32345"},"services":[{"id":"ce8563f3-4a93-45ff-b727-cbfbee6aa413","details":{"type":"crucible","address":"[fd00:1122:3344:105::9]:32345"}}]},"root":"/pool/ext/eb1234a5-fdf7-4977-94d5-2eef25ce56a1/crypt/zone"},{"zone":{"id":"9470ea7d-1920-4b4b-8fca-e7659a1ef733","zone_type":"crucible","addresses":["fd00:1122:3344:105::c"],"dataset":{"id":"9470ea7d-1920-4b4b-8fca-e7659a1ef733","name":{"pool_name":"oxp_17eff217-f0b1-4353-b133-0f68bbd5ceaa","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::c]:32345"},"services":[{"id":"9470ea7d-1920-4b4b-8fca-e7659a1ef733","details":{"type":"crucible","address":"[fd00:1122:3344:105::c]:32345"}}]},"root":"/pool/ext/eb1234a5-fdf7-4977-94d5-2eef25ce56a1/crypt/zone"},{"zone":{"id":"375296e5-0a23-466c-b605-4204080f8103","zone_type":"crucible_pantry","addresses":["fd00:1122:3344:105::4"],"dataset":null,"services":[{"id":"375296e5-0a23-466c-b605-4204080f8103","details":{"type":"crucible_pantry","address":"[fd00:1122:3344:105::4]:17000"}}]},"root":"/pool/ext/4eb2e4eb-41d8-496c-9a5a-687d7e004aa4/crypt/zone"},{"zone":{"id":"f9940969-b0e8-4e8c-86c7-4bc49cd15a5f","zone_type":"crucible","addresses":["fd00:1122:3344:105::7"],"dataset":{"id":"f9940969-b0e8-4e8c-86c7-4bc49cd15a5f","name":{"pool_name":"oxp_f8b11629-ced6-412a-9c3f-d169b99ee996","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::7]:32345"},"services":[{"id":"f9940969-b0e8-4e8c-86c7-4bc49cd15a5f","details":{"type":"crucible","address":"[fd00:1122:3344:105::7]:32345"}}]},"root":"/pool/ext/17eff217-f0b1-4353-b133-0f68bbd5ceaa/crypt/zone"},{"zone":{"id":"23dca27d-c79b-4930-a817-392e8aeaa4c1","zone_type":"crucible","addresses":["fd00:1122:3344:105::e"],"dataset":{"id":"23dca27d-c79b-4930-a817-392e8aeaa4c1","name":{"pool_name":"oxp_57650e05-36ff-4de8-865f-b9562bdb67f5","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::e]:32345"},"services":[{"id":"23dca27d-c79b-4930-a817-392e8aeaa4c1","details":{"type":"crucible","address":"[fd00:1122:3344:105::e]:32345"}}]},"root":"/pool/ext/0ae29053-29a2-489e-a1e6-6aec0ecd05f8/crypt/zone"},{"zone":{"id":"92d3e4e9-0768-4772-83c1-23cce52190e9","zone_type":"crucible","addresses":["fd00:1122:3344:105::6"],"dataset":{"id":"92d3e4e9-0768-4772-83c1-23cce52190e9","name":{"pool_name":"oxp_eb1234a5-fdf7-4977-94d5-2eef25ce56a1","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::6]:32345"},"services":[{"id":"92d3e4e9-0768-4772-83c1-23cce52190e9","details":{"type":"crucible","address":"[fd00:1122:3344:105::6]:32345"}}]},"root":"/pool/ext/b358fb1e-f52a-4a63-9aab-170225509b37/crypt/zone"},{"zone":{"id":"b3e9fee2-24d2-44e7-8539-a6918e85cf2b","zone_type":"crucible","addresses":["fd00:1122:3344:105::d"],"dataset":{"id":"b3e9fee2-24d2-44e7-8539-a6918e85cf2b","name":{"pool_name":"oxp_0ae29053-29a2-489e-a1e6-6aec0ecd05f8","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::d]:32345"},"services":[{"id":"b3e9fee2-24d2-44e7-8539-a6918e85cf2b","details":{"type":"crucible","address":"[fd00:1122:3344:105::d]:32345"}}]},"root":"/pool/ext/eb1234a5-fdf7-4977-94d5-2eef25ce56a1/crypt/zone"},{"zone":{"id":"4c3ef132-ec83-4b1b-9574-7c7d3035f9e9","zone_type":"cockroach_db","addresses":["fd00:1122:3344:105::3"],"dataset":{"id":"4c3ef132-ec83-4b1b-9574-7c7d3035f9e9","name":{"pool_name":"oxp_b358fb1e-f52a-4a63-9aab-170225509b37","kind":{"type":"cockroach_db"}},"service_address":"[fd00:1122:3344:105::3]:32221"},"services":[{"id":"4c3ef132-ec83-4b1b-9574-7c7d3035f9e9","details":{"type":"cockroach_db","address":"[fd00:1122:3344:105::3]:32221"}}]},"root":"/pool/ext/d1cb6b7d-2b92-4b7d-8a4d-551987f0277e/crypt/zone"},{"zone":{"id":"76b79b96-eaa2-4341-9aba-e77cfc92e0a9","zone_type":"ntp","addresses":["fd00:1122:3344:105::f"],"dataset":null,"services":[{"id":"76b79b96-eaa2-4341-9aba-e77cfc92e0a9","details":{"type":"internal_ntp","address":"[fd00:1122:3344:105::f]:123","ntp_servers":["c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55.host.control-plane.oxide.internal","6ea2684c-115e-48a6-8453-ab52d1cecd73.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/0ae29053-29a2-489e-a1e6-6aec0ecd05f8/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled0.json b/sled-agent/tests/old-service-ledgers/rack3-sled0.json deleted file mode 100644 index a853a525bc..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack3-sled0.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":4,"requests":[{"zone":{"id":"0710ecea-dbc4-417f-a6f7-1b97c3045db1","zone_type":"crucible","addresses":["fd00:1122:3344:116::6"],"dataset":{"id":"0710ecea-dbc4-417f-a6f7-1b97c3045db1","name":{"pool_name":"oxp_d5313ef5-019c-4c47-bc5e-63794107a1bb","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:116::6]:32345"},"services":[{"id":"0710ecea-dbc4-417f-a6f7-1b97c3045db1","details":{"type":"crucible","address":"[fd00:1122:3344:116::6]:32345"}}]},"root":"/pool/ext/904e93a9-d175-4a20-9006-8c1e847aecf7/crypt/zone"},{"zone":{"id":"28b29d14-d55f-4b55-bbc1-f66e46ae3e70","zone_type":"crucible","addresses":["fd00:1122:3344:116::9"],"dataset":{"id":"28b29d14-d55f-4b55-bbc1-f66e46ae3e70","name":{"pool_name":"oxp_60755ffe-e9ee-4619-a751-8b3ea6405e67","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:116::9]:32345"},"services":[{"id":"28b29d14-d55f-4b55-bbc1-f66e46ae3e70","details":{"type":"crucible","address":"[fd00:1122:3344:116::9]:32345"}}]},"root":"/pool/ext/d5313ef5-019c-4c47-bc5e-63794107a1bb/crypt/zone"},{"zone":{"id":"6f8f9fd2-b139-4069-a7e2-8d40efd58f6c","zone_type":"crucible","addresses":["fd00:1122:3344:116::d"],"dataset":{"id":"6f8f9fd2-b139-4069-a7e2-8d40efd58f6c","name":{"pool_name":"oxp_ccd2cb0b-782f-4026-a160-6d1192f04ca3","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:116::d]:32345"},"services":[{"id":"6f8f9fd2-b139-4069-a7e2-8d40efd58f6c","details":{"type":"crucible","address":"[fd00:1122:3344:116::d]:32345"}}]},"root":"/pool/ext/d5313ef5-019c-4c47-bc5e-63794107a1bb/crypt/zone"},{"zone":{"id":"450308ad-bf4d-40ff-ba62-f3290f7fffaf","zone_type":"crucible","addresses":["fd00:1122:3344:116::4"],"dataset":{"id":"450308ad-bf4d-40ff-ba62-f3290f7fffaf","name":{"pool_name":"oxp_46b09442-65ba-4d59-9121-9803fe3b724b","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:116::4]:32345"},"services":[{"id":"450308ad-bf4d-40ff-ba62-f3290f7fffaf","details":{"type":"crucible","address":"[fd00:1122:3344:116::4]:32345"}}]},"root":"/pool/ext/54d901cc-f75e-417d-8a9f-24363136d0ef/crypt/zone"},{"zone":{"id":"9a22bbaa-eab4-4a32-8546-9882dc029483","zone_type":"crucible","addresses":["fd00:1122:3344:116::8"],"dataset":{"id":"9a22bbaa-eab4-4a32-8546-9882dc029483","name":{"pool_name":"oxp_93e3f350-75a0-4af0-bdac-baf9b423926f","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:116::8]:32345"},"services":[{"id":"9a22bbaa-eab4-4a32-8546-9882dc029483","details":{"type":"crucible","address":"[fd00:1122:3344:116::8]:32345"}}]},"root":"/pool/ext/d5313ef5-019c-4c47-bc5e-63794107a1bb/crypt/zone"},{"zone":{"id":"63a9dc49-0b5b-4483-95ed-553b545dc202","zone_type":"crucible","addresses":["fd00:1122:3344:116::a"],"dataset":{"id":"63a9dc49-0b5b-4483-95ed-553b545dc202","name":{"pool_name":"oxp_e3532845-76c0-42a9-903b-a07f7992e937","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:116::a]:32345"},"services":[{"id":"63a9dc49-0b5b-4483-95ed-553b545dc202","details":{"type":"crucible","address":"[fd00:1122:3344:116::a]:32345"}}]},"root":"/pool/ext/60755ffe-e9ee-4619-a751-8b3ea6405e67/crypt/zone"},{"zone":{"id":"1fef5b6c-78e4-4ad9-9973-9d8c78f1e232","zone_type":"crucible","addresses":["fd00:1122:3344:116::7"],"dataset":{"id":"1fef5b6c-78e4-4ad9-9973-9d8c78f1e232","name":{"pool_name":"oxp_54d901cc-f75e-417d-8a9f-24363136d0ef","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:116::7]:32345"},"services":[{"id":"1fef5b6c-78e4-4ad9-9973-9d8c78f1e232","details":{"type":"crucible","address":"[fd00:1122:3344:116::7]:32345"}}]},"root":"/pool/ext/90d7b6f9-3e28-48b0-86ac-0486728075cf/crypt/zone"},{"zone":{"id":"b2aab21a-cccd-4aa9-977f-a32090e6eaa7","zone_type":"crucible","addresses":["fd00:1122:3344:116::5"],"dataset":{"id":"b2aab21a-cccd-4aa9-977f-a32090e6eaa7","name":{"pool_name":"oxp_90d7b6f9-3e28-48b0-86ac-0486728075cf","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:116::5]:32345"},"services":[{"id":"b2aab21a-cccd-4aa9-977f-a32090e6eaa7","details":{"type":"crucible","address":"[fd00:1122:3344:116::5]:32345"}}]},"root":"/pool/ext/46b09442-65ba-4d59-9121-9803fe3b724b/crypt/zone"},{"zone":{"id":"fc1bbf28-24f3-4c1f-b367-2bc8231eb7d4","zone_type":"crucible","addresses":["fd00:1122:3344:116::b"],"dataset":{"id":"fc1bbf28-24f3-4c1f-b367-2bc8231eb7d4","name":{"pool_name":"oxp_0a7bb0d3-408b-42b1-8846-76cf106a9580","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:116::b]:32345"},"services":[{"id":"fc1bbf28-24f3-4c1f-b367-2bc8231eb7d4","details":{"type":"crucible","address":"[fd00:1122:3344:116::b]:32345"}}]},"root":"/pool/ext/e3532845-76c0-42a9-903b-a07f7992e937/crypt/zone"},{"zone":{"id":"bcb7617a-f76a-4912-8ccc-802d2a697e3c","zone_type":"crucible","addresses":["fd00:1122:3344:116::c"],"dataset":{"id":"bcb7617a-f76a-4912-8ccc-802d2a697e3c","name":{"pool_name":"oxp_904e93a9-d175-4a20-9006-8c1e847aecf7","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:116::c]:32345"},"services":[{"id":"bcb7617a-f76a-4912-8ccc-802d2a697e3c","details":{"type":"crucible","address":"[fd00:1122:3344:116::c]:32345"}}]},"root":"/pool/ext/ccd2cb0b-782f-4026-a160-6d1192f04ca3/crypt/zone"},{"zone":{"id":"371fba3a-658b-469b-b675-c90cc0d39254","zone_type":"cockroach_db","addresses":["fd00:1122:3344:116::3"],"dataset":{"id":"371fba3a-658b-469b-b675-c90cc0d39254","name":{"pool_name":"oxp_46b09442-65ba-4d59-9121-9803fe3b724b","kind":{"type":"cockroach_db"}},"service_address":"[fd00:1122:3344:116::3]:32221"},"services":[{"id":"371fba3a-658b-469b-b675-c90cc0d39254","details":{"type":"cockroach_db","address":"[fd00:1122:3344:116::3]:32221"}}]},"root":"/pool/ext/46b09442-65ba-4d59-9121-9803fe3b724b/crypt/zone"},{"zone":{"id":"5a4d89f5-49e0-4566-a99c-342d1bb26b1c","zone_type":"ntp","addresses":["fd00:1122:3344:116::e"],"dataset":null,"services":[{"id":"5a4d89f5-49e0-4566-a99c-342d1bb26b1c","details":{"type":"internal_ntp","address":"[fd00:1122:3344:116::e]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/60755ffe-e9ee-4619-a751-8b3ea6405e67/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled1.json b/sled-agent/tests/old-service-ledgers/rack3-sled1.json deleted file mode 100644 index bd735e5e64..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack3-sled1.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":4,"requests":[{"zone":{"id":"f401d06c-46fc-42f8-aa51-7515a51355ce","zone_type":"crucible","addresses":["fd00:1122:3344:11c::8"],"dataset":{"id":"f401d06c-46fc-42f8-aa51-7515a51355ce","name":{"pool_name":"oxp_8a88768a-2dd5-43b7-bd40-0db77be4d3a8","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11c::8]:32345"},"services":[{"id":"f401d06c-46fc-42f8-aa51-7515a51355ce","details":{"type":"crucible","address":"[fd00:1122:3344:11c::8]:32345"}}]},"root":"/pool/ext/19d23d27-6a33-4203-b8c1-4b0df4ac791f/crypt/zone"},{"zone":{"id":"721c96ea-08d4-4c89-828f-600e7e344916","zone_type":"crucible","addresses":["fd00:1122:3344:11c::6"],"dataset":{"id":"721c96ea-08d4-4c89-828f-600e7e344916","name":{"pool_name":"oxp_15259003-fb04-4547-b4a9-b4511893c0fd","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11c::6]:32345"},"services":[{"id":"721c96ea-08d4-4c89-828f-600e7e344916","details":{"type":"crucible","address":"[fd00:1122:3344:11c::6]:32345"}}]},"root":"/pool/ext/d2a8ed82-22ef-46d8-ad40-e1cb2cecebee/crypt/zone"},{"zone":{"id":"ca17bdf9-51c5-4e1e-b822-856609070ec6","zone_type":"crucible","addresses":["fd00:1122:3344:11c::5"],"dataset":{"id":"ca17bdf9-51c5-4e1e-b822-856609070ec6","name":{"pool_name":"oxp_d2a8ed82-22ef-46d8-ad40-e1cb2cecebee","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11c::5]:32345"},"services":[{"id":"ca17bdf9-51c5-4e1e-b822-856609070ec6","details":{"type":"crucible","address":"[fd00:1122:3344:11c::5]:32345"}}]},"root":"/pool/ext/15259003-fb04-4547-b4a9-b4511893c0fd/crypt/zone"},{"zone":{"id":"5825447e-1b5b-4960-b202-e75853d3d250","zone_type":"crucible","addresses":["fd00:1122:3344:11c::9"],"dataset":{"id":"5825447e-1b5b-4960-b202-e75853d3d250","name":{"pool_name":"oxp_04e94454-cbd4-4cee-ad69-42372bcbabd5","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11c::9]:32345"},"services":[{"id":"5825447e-1b5b-4960-b202-e75853d3d250","details":{"type":"crucible","address":"[fd00:1122:3344:11c::9]:32345"}}]},"root":"/pool/ext/542e0fb3-552c-4d3b-b853-da1f13b581a0/crypt/zone"},{"zone":{"id":"b937d3f0-1352-47a2-b9d1-a9ccf9c82b16","zone_type":"crucible","addresses":["fd00:1122:3344:11c::c"],"dataset":{"id":"b937d3f0-1352-47a2-b9d1-a9ccf9c82b16","name":{"pool_name":"oxp_542e0fb3-552c-4d3b-b853-da1f13b581a0","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11c::c]:32345"},"services":[{"id":"b937d3f0-1352-47a2-b9d1-a9ccf9c82b16","details":{"type":"crucible","address":"[fd00:1122:3344:11c::c]:32345"}}]},"root":"/pool/ext/eedd1d58-4892-456f-aaf7-9d650c7921ca/crypt/zone"},{"zone":{"id":"d63a677b-8dac-44ee-89a2-cc4cb151254d","zone_type":"crucible","addresses":["fd00:1122:3344:11c::3"],"dataset":{"id":"d63a677b-8dac-44ee-89a2-cc4cb151254d","name":{"pool_name":"oxp_45b5f1ee-7b66-4d74-8364-54fa0c73775f","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11c::3]:32345"},"services":[{"id":"d63a677b-8dac-44ee-89a2-cc4cb151254d","details":{"type":"crucible","address":"[fd00:1122:3344:11c::3]:32345"}}]},"root":"/pool/ext/8a88768a-2dd5-43b7-bd40-0db77be4d3a8/crypt/zone"},{"zone":{"id":"abcb92ea-9f17-4cd8-897b-9d0d1ef7903a","zone_type":"crucible","addresses":["fd00:1122:3344:11c::4"],"dataset":{"id":"abcb92ea-9f17-4cd8-897b-9d0d1ef7903a","name":{"pool_name":"oxp_341d49db-c06a-416d-90e1-b0a3426ed02e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11c::4]:32345"},"services":[{"id":"abcb92ea-9f17-4cd8-897b-9d0d1ef7903a","details":{"type":"crucible","address":"[fd00:1122:3344:11c::4]:32345"}}]},"root":"/pool/ext/eedd1d58-4892-456f-aaf7-9d650c7921ca/crypt/zone"},{"zone":{"id":"000ac89d-db07-47ae-83cf-d9cafef013de","zone_type":"crucible","addresses":["fd00:1122:3344:11c::b"],"dataset":{"id":"000ac89d-db07-47ae-83cf-d9cafef013de","name":{"pool_name":"oxp_eedd1d58-4892-456f-aaf7-9d650c7921ca","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11c::b]:32345"},"services":[{"id":"000ac89d-db07-47ae-83cf-d9cafef013de","details":{"type":"crucible","address":"[fd00:1122:3344:11c::b]:32345"}}]},"root":"/pool/ext/04e94454-cbd4-4cee-ad69-42372bcbabd5/crypt/zone"},{"zone":{"id":"29e1e2e4-695e-4c05-8f0c-c16a0a61d390","zone_type":"crucible","addresses":["fd00:1122:3344:11c::7"],"dataset":{"id":"29e1e2e4-695e-4c05-8f0c-c16a0a61d390","name":{"pool_name":"oxp_19d23d27-6a33-4203-b8c1-4b0df4ac791f","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11c::7]:32345"},"services":[{"id":"29e1e2e4-695e-4c05-8f0c-c16a0a61d390","details":{"type":"crucible","address":"[fd00:1122:3344:11c::7]:32345"}}]},"root":"/pool/ext/d2a8ed82-22ef-46d8-ad40-e1cb2cecebee/crypt/zone"},{"zone":{"id":"9fa7d7be-a6de-4d36-b56b-d1cc5ca7c82c","zone_type":"crucible","addresses":["fd00:1122:3344:11c::a"],"dataset":{"id":"9fa7d7be-a6de-4d36-b56b-d1cc5ca7c82c","name":{"pool_name":"oxp_0fd7a0b1-ed4b-4dc6-8c44-a49c9628c7e1","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11c::a]:32345"},"services":[{"id":"9fa7d7be-a6de-4d36-b56b-d1cc5ca7c82c","details":{"type":"crucible","address":"[fd00:1122:3344:11c::a]:32345"}}]},"root":"/pool/ext/d2a8ed82-22ef-46d8-ad40-e1cb2cecebee/crypt/zone"},{"zone":{"id":"249db5f1-45e2-4a5c-a91f-cc51dbd87040","zone_type":"ntp","addresses":["fd00:1122:3344:11c::d"],"dataset":null,"services":[{"id":"249db5f1-45e2-4a5c-a91f-cc51dbd87040","details":{"type":"internal_ntp","address":"[fd00:1122:3344:11c::d]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/542e0fb3-552c-4d3b-b853-da1f13b581a0/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled11.json b/sled-agent/tests/old-service-ledgers/rack3-sled11.json deleted file mode 100644 index 2918c74c4b..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack3-sled11.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":5,"requests":[{"zone":{"id":"7ddd0738-59df-4b67-a41e-7f0de9827187","zone_type":"crucible","addresses":["fd00:1122:3344:11e::4"],"dataset":{"id":"7ddd0738-59df-4b67-a41e-7f0de9827187","name":{"pool_name":"oxp_09af632a-6b1b-4a18-8c91-d392da38b02f","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11e::4]:32345"},"services":[{"id":"7ddd0738-59df-4b67-a41e-7f0de9827187","details":{"type":"crucible","address":"[fd00:1122:3344:11e::4]:32345"}}]},"root":"/pool/ext/09af632a-6b1b-4a18-8c91-d392da38b02f/crypt/zone"},{"zone":{"id":"9706189f-713a-4394-b5dc-45dcf67dc46e","zone_type":"crucible","addresses":["fd00:1122:3344:11e::9"],"dataset":{"id":"9706189f-713a-4394-b5dc-45dcf67dc46e","name":{"pool_name":"oxp_4e1837c8-91ab-4d1d-abfd-f5144d88535e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11e::9]:32345"},"services":[{"id":"9706189f-713a-4394-b5dc-45dcf67dc46e","details":{"type":"crucible","address":"[fd00:1122:3344:11e::9]:32345"}}]},"root":"/pool/ext/2f0d47cb-28d1-4350-8656-60c6121f773b/crypt/zone"},{"zone":{"id":"7bdd841b-5e34-4c19-9066-b12578651446","zone_type":"crucible","addresses":["fd00:1122:3344:11e::a"],"dataset":{"id":"7bdd841b-5e34-4c19-9066-b12578651446","name":{"pool_name":"oxp_78d1e7f7-8d11-4fed-8b1e-be58908aea2f","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11e::a]:32345"},"services":[{"id":"7bdd841b-5e34-4c19-9066-b12578651446","details":{"type":"crucible","address":"[fd00:1122:3344:11e::a]:32345"}}]},"root":"/pool/ext/62c23f4b-8e7b-4cd8-9055-19c1d8bd5ac8/crypt/zone"},{"zone":{"id":"74c0f60b-de5f-4456-a85f-f992a6e10424","zone_type":"crucible","addresses":["fd00:1122:3344:11e::b"],"dataset":{"id":"74c0f60b-de5f-4456-a85f-f992a6e10424","name":{"pool_name":"oxp_3b81d709-bf10-4dd7-a2c0-759d8acc2da0","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11e::b]:32345"},"services":[{"id":"74c0f60b-de5f-4456-a85f-f992a6e10424","details":{"type":"crucible","address":"[fd00:1122:3344:11e::b]:32345"}}]},"root":"/pool/ext/09af632a-6b1b-4a18-8c91-d392da38b02f/crypt/zone"},{"zone":{"id":"da81ce6f-bd38-440e-b966-8a743092fa21","zone_type":"crucible","addresses":["fd00:1122:3344:11e::6"],"dataset":{"id":"da81ce6f-bd38-440e-b966-8a743092fa21","name":{"pool_name":"oxp_62c23f4b-8e7b-4cd8-9055-19c1d8bd5ac8","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11e::6]:32345"},"services":[{"id":"da81ce6f-bd38-440e-b966-8a743092fa21","details":{"type":"crucible","address":"[fd00:1122:3344:11e::6]:32345"}}]},"root":"/pool/ext/215dd02b-0de6-488a-9e65-5e588cd079fb/crypt/zone"},{"zone":{"id":"febbca37-5279-400f-a2e9-6b5271b2d2fc","zone_type":"crucible","addresses":["fd00:1122:3344:11e::7"],"dataset":{"id":"febbca37-5279-400f-a2e9-6b5271b2d2fc","name":{"pool_name":"oxp_fb33e773-fb93-41a0-8078-b653b9078dda","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11e::7]:32345"},"services":[{"id":"febbca37-5279-400f-a2e9-6b5271b2d2fc","details":{"type":"crucible","address":"[fd00:1122:3344:11e::7]:32345"}}]},"root":"/pool/ext/2f0d47cb-28d1-4350-8656-60c6121f773b/crypt/zone"},{"zone":{"id":"5100e222-5ea4-4e67-9040-679137e666c8","zone_type":"crucible","addresses":["fd00:1122:3344:11e::5"],"dataset":{"id":"5100e222-5ea4-4e67-9040-679137e666c8","name":{"pool_name":"oxp_23767587-2253-431b-8944-18b9bfefcb3d","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11e::5]:32345"},"services":[{"id":"5100e222-5ea4-4e67-9040-679137e666c8","details":{"type":"crucible","address":"[fd00:1122:3344:11e::5]:32345"}}]},"root":"/pool/ext/3b81d709-bf10-4dd7-a2c0-759d8acc2da0/crypt/zone"},{"zone":{"id":"c7ec3bc8-08ca-4901-a45e-0d68db72c6a7","zone_type":"crucible","addresses":["fd00:1122:3344:11e::3"],"dataset":{"id":"c7ec3bc8-08ca-4901-a45e-0d68db72c6a7","name":{"pool_name":"oxp_2f0d47cb-28d1-4350-8656-60c6121f773b","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11e::3]:32345"},"services":[{"id":"c7ec3bc8-08ca-4901-a45e-0d68db72c6a7","details":{"type":"crucible","address":"[fd00:1122:3344:11e::3]:32345"}}]},"root":"/pool/ext/215dd02b-0de6-488a-9e65-5e588cd079fb/crypt/zone"},{"zone":{"id":"1fc80dd3-0fd9-4403-96bd-5bbf9eb0f15a","zone_type":"crucible","addresses":["fd00:1122:3344:11e::c"],"dataset":{"id":"1fc80dd3-0fd9-4403-96bd-5bbf9eb0f15a","name":{"pool_name":"oxp_2c932d54-41fb-4ffe-a57f-0479b9e5841e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11e::c]:32345"},"services":[{"id":"1fc80dd3-0fd9-4403-96bd-5bbf9eb0f15a","details":{"type":"crucible","address":"[fd00:1122:3344:11e::c]:32345"}}]},"root":"/pool/ext/3b81d709-bf10-4dd7-a2c0-759d8acc2da0/crypt/zone"},{"zone":{"id":"4eacc68d-5699-440a-ab33-c75f259e4cc3","zone_type":"crucible","addresses":["fd00:1122:3344:11e::8"],"dataset":{"id":"4eacc68d-5699-440a-ab33-c75f259e4cc3","name":{"pool_name":"oxp_215dd02b-0de6-488a-9e65-5e588cd079fb","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11e::8]:32345"},"services":[{"id":"4eacc68d-5699-440a-ab33-c75f259e4cc3","details":{"type":"crucible","address":"[fd00:1122:3344:11e::8]:32345"}}]},"root":"/pool/ext/4e1837c8-91ab-4d1d-abfd-f5144d88535e/crypt/zone"},{"zone":{"id":"cb901d3e-8811-4c4c-a274-a44130501ecf","zone_type":"ntp","addresses":["fd00:1122:3344:11e::d"],"dataset":null,"services":[{"id":"cb901d3e-8811-4c4c-a274-a44130501ecf","details":{"type":"boundary_ntp","address":"[fd00:1122:3344:11e::d]:123","ntp_servers":["time.cloudflare.com"],"dns_servers":["1.1.1.1","8.8.8.8"],"domain":null,"nic":{"id":"bcf9d9eb-b4ba-4fd5-91e0-55a3414ae049","kind":{"type":"service","id":"cb901d3e-8811-4c4c-a274-a44130501ecf"},"name":"ntp-cb901d3e-8811-4c4c-a274-a44130501ecf","ip":"172.30.3.6","mac":"A8:40:25:FF:D5:2F","subnet":"172.30.3.0/24","vni":100,"primary":true,"slot":0},"snat_cfg":{"ip":"45.154.216.39","first_port":16384,"last_port":32767}}}]},"root":"/pool/ext/23767587-2253-431b-8944-18b9bfefcb3d/crypt/zone"},{"zone":{"id":"be4aada9-d160-401d-a630-a0764c039702","zone_type":"internal_dns","addresses":["fd00:1122:3344:2::1"],"dataset":{"id":"be4aada9-d160-401d-a630-a0764c039702","name":{"pool_name":"oxp_2f0d47cb-28d1-4350-8656-60c6121f773b","kind":{"type":"internal_dns"}},"service_address":"[fd00:1122:3344:2::1]:5353"},"services":[{"id":"be4aada9-d160-401d-a630-a0764c039702","details":{"type":"internal_dns","http_address":"[fd00:1122:3344:2::1]:5353","dns_address":"[fd00:1122:3344:2::1]:53","gz_address":"fd00:1122:3344:2::2","gz_address_index":1}}]},"root":"/pool/ext/78d1e7f7-8d11-4fed-8b1e-be58908aea2f/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled12.json b/sled-agent/tests/old-service-ledgers/rack3-sled12.json deleted file mode 100644 index c81f586e01..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack3-sled12.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":4,"requests":[{"zone":{"id":"d8f1b9d2-fa2e-4f03-bbea-2039448d7792","zone_type":"crucible","addresses":["fd00:1122:3344:112::5"],"dataset":{"id":"d8f1b9d2-fa2e-4f03-bbea-2039448d7792","name":{"pool_name":"oxp_7d7ed1b7-7b77-4f0a-abb1-27de7cb584d1","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:112::5]:32345"},"services":[{"id":"d8f1b9d2-fa2e-4f03-bbea-2039448d7792","details":{"type":"crucible","address":"[fd00:1122:3344:112::5]:32345"}}]},"root":"/pool/ext/78d9f0ae-8e7f-450e-abc2-76b983efa5cd/crypt/zone"},{"zone":{"id":"2074a935-c0b3-4c4f-aae5-a29adae3e1ac","zone_type":"crucible","addresses":["fd00:1122:3344:112::8"],"dataset":{"id":"2074a935-c0b3-4c4f-aae5-a29adae3e1ac","name":{"pool_name":"oxp_ac663368-45fb-447c-811e-561c68e37bdd","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:112::8]:32345"},"services":[{"id":"2074a935-c0b3-4c4f-aae5-a29adae3e1ac","details":{"type":"crucible","address":"[fd00:1122:3344:112::8]:32345"}}]},"root":"/pool/ext/ac663368-45fb-447c-811e-561c68e37bdd/crypt/zone"},{"zone":{"id":"2885d3c7-ad7d-445c-8630-dc6c81f8caa0","zone_type":"crucible","addresses":["fd00:1122:3344:112::a"],"dataset":{"id":"2885d3c7-ad7d-445c-8630-dc6c81f8caa0","name":{"pool_name":"oxp_8e82e8da-e1c5-4867-bc1c-b5441f9c1010","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:112::a]:32345"},"services":[{"id":"2885d3c7-ad7d-445c-8630-dc6c81f8caa0","details":{"type":"crucible","address":"[fd00:1122:3344:112::a]:32345"}}]},"root":"/pool/ext/8e82e8da-e1c5-4867-bc1c-b5441f9c1010/crypt/zone"},{"zone":{"id":"1eca241b-6868-4c59-876b-58356654f3b5","zone_type":"crucible","addresses":["fd00:1122:3344:112::c"],"dataset":{"id":"1eca241b-6868-4c59-876b-58356654f3b5","name":{"pool_name":"oxp_fde16c69-aa47-4a15-bb3f-3a5861ae45bd","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:112::c]:32345"},"services":[{"id":"1eca241b-6868-4c59-876b-58356654f3b5","details":{"type":"crucible","address":"[fd00:1122:3344:112::c]:32345"}}]},"root":"/pool/ext/7d7ed1b7-7b77-4f0a-abb1-27de7cb584d1/crypt/zone"},{"zone":{"id":"cc656f2e-8542-4986-8524-2f55984939c1","zone_type":"crucible","addresses":["fd00:1122:3344:112::d"],"dataset":{"id":"cc656f2e-8542-4986-8524-2f55984939c1","name":{"pool_name":"oxp_21e6d0f9-887e-4d6f-9a00-4cd61139eea6","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:112::d]:32345"},"services":[{"id":"cc656f2e-8542-4986-8524-2f55984939c1","details":{"type":"crucible","address":"[fd00:1122:3344:112::d]:32345"}}]},"root":"/pool/ext/21e6d0f9-887e-4d6f-9a00-4cd61139eea6/crypt/zone"},{"zone":{"id":"dfb1ebce-a4c7-4b50-9435-9a79b884c1af","zone_type":"clickhouse","addresses":["fd00:1122:3344:112::3"],"dataset":{"id":"dfb1ebce-a4c7-4b50-9435-9a79b884c1af","name":{"pool_name":"oxp_4f045315-de51-46ed-a011-16496615278f","kind":{"type":"clickhouse"}},"service_address":"[fd00:1122:3344:112::3]:8123"},"services":[{"id":"dfb1ebce-a4c7-4b50-9435-9a79b884c1af","details":{"type":"clickhouse","address":"[fd00:1122:3344:112::3]:8123"}}]},"root":"/pool/ext/7d7ed1b7-7b77-4f0a-abb1-27de7cb584d1/crypt/zone"},{"zone":{"id":"a95d90ed-b2b1-4a5d-8d0d-4195b34bc764","zone_type":"crucible","addresses":["fd00:1122:3344:112::6"],"dataset":{"id":"a95d90ed-b2b1-4a5d-8d0d-4195b34bc764","name":{"pool_name":"oxp_d2c77c69-14d7-442e-8b47-a0d7af5a0e7e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:112::6]:32345"},"services":[{"id":"a95d90ed-b2b1-4a5d-8d0d-4195b34bc764","details":{"type":"crucible","address":"[fd00:1122:3344:112::6]:32345"}}]},"root":"/pool/ext/fad56ff1-ad9f-4215-b584-522eab18cf7b/crypt/zone"},{"zone":{"id":"1d3ebc90-d5a5-4cb0-ae90-50bb2163ae13","zone_type":"crucible","addresses":["fd00:1122:3344:112::b"],"dataset":{"id":"1d3ebc90-d5a5-4cb0-ae90-50bb2163ae13","name":{"pool_name":"oxp_fad56ff1-ad9f-4215-b584-522eab18cf7b","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:112::b]:32345"},"services":[{"id":"1d3ebc90-d5a5-4cb0-ae90-50bb2163ae13","details":{"type":"crucible","address":"[fd00:1122:3344:112::b]:32345"}}]},"root":"/pool/ext/7d7ed1b7-7b77-4f0a-abb1-27de7cb584d1/crypt/zone"},{"zone":{"id":"7af9f38b-0c7a-402e-8db3-7c7fb50b4665","zone_type":"crucible","addresses":["fd00:1122:3344:112::9"],"dataset":{"id":"7af9f38b-0c7a-402e-8db3-7c7fb50b4665","name":{"pool_name":"oxp_d0693580-5c5a-449f-803f-ce7188ebc580","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:112::9]:32345"},"services":[{"id":"7af9f38b-0c7a-402e-8db3-7c7fb50b4665","details":{"type":"crucible","address":"[fd00:1122:3344:112::9]:32345"}}]},"root":"/pool/ext/d2c77c69-14d7-442e-8b47-a0d7af5a0e7e/crypt/zone"},{"zone":{"id":"94d9bb0a-ecd2-4501-b960-60982f55ad12","zone_type":"crucible","addresses":["fd00:1122:3344:112::7"],"dataset":{"id":"94d9bb0a-ecd2-4501-b960-60982f55ad12","name":{"pool_name":"oxp_78d9f0ae-8e7f-450e-abc2-76b983efa5cd","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:112::7]:32345"},"services":[{"id":"94d9bb0a-ecd2-4501-b960-60982f55ad12","details":{"type":"crucible","address":"[fd00:1122:3344:112::7]:32345"}}]},"root":"/pool/ext/ac663368-45fb-447c-811e-561c68e37bdd/crypt/zone"},{"zone":{"id":"277c1105-576e-4ec1-8e2c-cbae2f5ac9f6","zone_type":"crucible","addresses":["fd00:1122:3344:112::4"],"dataset":{"id":"277c1105-576e-4ec1-8e2c-cbae2f5ac9f6","name":{"pool_name":"oxp_4f045315-de51-46ed-a011-16496615278f","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:112::4]:32345"},"services":[{"id":"277c1105-576e-4ec1-8e2c-cbae2f5ac9f6","details":{"type":"crucible","address":"[fd00:1122:3344:112::4]:32345"}}]},"root":"/pool/ext/7d7ed1b7-7b77-4f0a-abb1-27de7cb584d1/crypt/zone"},{"zone":{"id":"555c3407-a76c-4ea4-a17a-a670d85a59b0","zone_type":"ntp","addresses":["fd00:1122:3344:112::e"],"dataset":null,"services":[{"id":"555c3407-a76c-4ea4-a17a-a670d85a59b0","details":{"type":"internal_ntp","address":"[fd00:1122:3344:112::e]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/8e82e8da-e1c5-4867-bc1c-b5441f9c1010/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled13.json b/sled-agent/tests/old-service-ledgers/rack3-sled13.json deleted file mode 100644 index ab151a828e..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack3-sled13.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":5,"requests":[{"zone":{"id":"fbcf51c9-a732-4a03-8c19-cfb5b819cb7a","zone_type":"crucible","addresses":["fd00:1122:3344:104::5"],"dataset":{"id":"fbcf51c9-a732-4a03-8c19-cfb5b819cb7a","name":{"pool_name":"oxp_382a2961-cd27-4a9c-901d-468a45ff5708","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::5]:32345"},"services":[{"id":"fbcf51c9-a732-4a03-8c19-cfb5b819cb7a","details":{"type":"crucible","address":"[fd00:1122:3344:104::5]:32345"}}]},"root":"/pool/ext/e99994ae-61ca-4742-a02c-eb0a8a5b69ff/crypt/zone"},{"zone":{"id":"7f8a5026-1f1d-4ab3-8c04-077bfda2f815","zone_type":"crucible","addresses":["fd00:1122:3344:104::4"],"dataset":{"id":"7f8a5026-1f1d-4ab3-8c04-077bfda2f815","name":{"pool_name":"oxp_9c99b9b6-8018-455e-a58a-c048ddd3e11b","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::4]:32345"},"services":[{"id":"7f8a5026-1f1d-4ab3-8c04-077bfda2f815","details":{"type":"crucible","address":"[fd00:1122:3344:104::4]:32345"}}]},"root":"/pool/ext/22c79e54-37ef-4ad2-a6cb-a7ee3e4f7167/crypt/zone"},{"zone":{"id":"6d45d856-0e49-4eb7-ad76-989a9ae636a2","zone_type":"crucible","addresses":["fd00:1122:3344:104::3"],"dataset":{"id":"6d45d856-0e49-4eb7-ad76-989a9ae636a2","name":{"pool_name":"oxp_b74a84fa-b4c8-4c5f-92f4-f4e62a0a311d","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::3]:32345"},"services":[{"id":"6d45d856-0e49-4eb7-ad76-989a9ae636a2","details":{"type":"crucible","address":"[fd00:1122:3344:104::3]:32345"}}]},"root":"/pool/ext/9c99b9b6-8018-455e-a58a-c048ddd3e11b/crypt/zone"},{"zone":{"id":"c8dc7fff-72c8-49eb-a552-d605f8655134","zone_type":"crucible","addresses":["fd00:1122:3344:104::6"],"dataset":{"id":"c8dc7fff-72c8-49eb-a552-d605f8655134","name":{"pool_name":"oxp_22c79e54-37ef-4ad2-a6cb-a7ee3e4f7167","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::6]:32345"},"services":[{"id":"c8dc7fff-72c8-49eb-a552-d605f8655134","details":{"type":"crucible","address":"[fd00:1122:3344:104::6]:32345"}}]},"root":"/pool/ext/22c79e54-37ef-4ad2-a6cb-a7ee3e4f7167/crypt/zone"},{"zone":{"id":"128a90f5-8889-4665-8343-2c7098f2922c","zone_type":"crucible","addresses":["fd00:1122:3344:104::7"],"dataset":{"id":"128a90f5-8889-4665-8343-2c7098f2922c","name":{"pool_name":"oxp_8b3d0b51-c6a5-4d2c-827a-0d0d1471136d","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::7]:32345"},"services":[{"id":"128a90f5-8889-4665-8343-2c7098f2922c","details":{"type":"crucible","address":"[fd00:1122:3344:104::7]:32345"}}]},"root":"/pool/ext/29cd042b-e772-4d26-ac85-ef16009950bd/crypt/zone"},{"zone":{"id":"a72f1878-3b03-4267-9024-5df5ebae69de","zone_type":"crucible","addresses":["fd00:1122:3344:104::a"],"dataset":{"id":"a72f1878-3b03-4267-9024-5df5ebae69de","name":{"pool_name":"oxp_e99994ae-61ca-4742-a02c-eb0a8a5b69ff","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::a]:32345"},"services":[{"id":"a72f1878-3b03-4267-9024-5df5ebae69de","details":{"type":"crucible","address":"[fd00:1122:3344:104::a]:32345"}}]},"root":"/pool/ext/8b3d0b51-c6a5-4d2c-827a-0d0d1471136d/crypt/zone"},{"zone":{"id":"6a9165a2-9b66-485a-aaf0-70d89d60bb6c","zone_type":"crucible","addresses":["fd00:1122:3344:104::b"],"dataset":{"id":"6a9165a2-9b66-485a-aaf0-70d89d60bb6c","name":{"pool_name":"oxp_6a02f05f-e400-4c80-8df8-89aaecb6c12b","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::b]:32345"},"services":[{"id":"6a9165a2-9b66-485a-aaf0-70d89d60bb6c","details":{"type":"crucible","address":"[fd00:1122:3344:104::b]:32345"}}]},"root":"/pool/ext/9c99b9b6-8018-455e-a58a-c048ddd3e11b/crypt/zone"},{"zone":{"id":"9677c4ed-96bc-4dcb-ae74-f7a3e9d2b5e2","zone_type":"crucible","addresses":["fd00:1122:3344:104::c"],"dataset":{"id":"9677c4ed-96bc-4dcb-ae74-f7a3e9d2b5e2","name":{"pool_name":"oxp_7c30978f-ee87-4e53-8fdf-3455e5e851b7","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::c]:32345"},"services":[{"id":"9677c4ed-96bc-4dcb-ae74-f7a3e9d2b5e2","details":{"type":"crucible","address":"[fd00:1122:3344:104::c]:32345"}}]},"root":"/pool/ext/29cd042b-e772-4d26-ac85-ef16009950bd/crypt/zone"},{"zone":{"id":"179039e7-3ffd-4b76-9379-bef41d42a5ff","zone_type":"crucible","addresses":["fd00:1122:3344:104::8"],"dataset":{"id":"179039e7-3ffd-4b76-9379-bef41d42a5ff","name":{"pool_name":"oxp_4db7e002-e112-4bfc-a41e-8ae26991b01e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::8]:32345"},"services":[{"id":"179039e7-3ffd-4b76-9379-bef41d42a5ff","details":{"type":"crucible","address":"[fd00:1122:3344:104::8]:32345"}}]},"root":"/pool/ext/8b3d0b51-c6a5-4d2c-827a-0d0d1471136d/crypt/zone"},{"zone":{"id":"6067e31e-b6a3-4114-9e49-0296adc8e7af","zone_type":"crucible","addresses":["fd00:1122:3344:104::9"],"dataset":{"id":"6067e31e-b6a3-4114-9e49-0296adc8e7af","name":{"pool_name":"oxp_29cd042b-e772-4d26-ac85-ef16009950bd","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::9]:32345"},"services":[{"id":"6067e31e-b6a3-4114-9e49-0296adc8e7af","details":{"type":"crucible","address":"[fd00:1122:3344:104::9]:32345"}}]},"root":"/pool/ext/9c99b9b6-8018-455e-a58a-c048ddd3e11b/crypt/zone"},{"zone":{"id":"440dd615-e11f-4a5d-aeb4-dcf88bb314de","zone_type":"ntp","addresses":["fd00:1122:3344:104::d"],"dataset":null,"services":[{"id":"440dd615-e11f-4a5d-aeb4-dcf88bb314de","details":{"type":"boundary_ntp","address":"[fd00:1122:3344:104::d]:123","ntp_servers":["time.cloudflare.com"],"dns_servers":["1.1.1.1","8.8.8.8"],"domain":null,"nic":{"id":"0b52fe1b-f4cc-43b1-9ac3-4ebb4ab60133","kind":{"type":"service","id":"440dd615-e11f-4a5d-aeb4-dcf88bb314de"},"name":"ntp-440dd615-e11f-4a5d-aeb4-dcf88bb314de","ip":"172.30.3.5","mac":"A8:40:25:FF:85:1E","subnet":"172.30.3.0/24","vni":100,"primary":true,"slot":0},"snat_cfg":{"ip":"45.154.216.38","first_port":0,"last_port":16383}}}]},"root":"/pool/ext/382a2961-cd27-4a9c-901d-468a45ff5708/crypt/zone"},{"zone":{"id":"06e2de03-bd92-404c-a8ea-a13185539d24","zone_type":"internal_dns","addresses":["fd00:1122:3344:1::1"],"dataset":{"id":"06e2de03-bd92-404c-a8ea-a13185539d24","name":{"pool_name":"oxp_b74a84fa-b4c8-4c5f-92f4-f4e62a0a311d","kind":{"type":"internal_dns"}},"service_address":"[fd00:1122:3344:1::1]:5353"},"services":[{"id":"06e2de03-bd92-404c-a8ea-a13185539d24","details":{"type":"internal_dns","http_address":"[fd00:1122:3344:1::1]:5353","dns_address":"[fd00:1122:3344:1::1]:53","gz_address":"fd00:1122:3344:1::2","gz_address_index":0}}]},"root":"/pool/ext/e99994ae-61ca-4742-a02c-eb0a8a5b69ff/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled14.json b/sled-agent/tests/old-service-ledgers/rack3-sled14.json deleted file mode 100644 index 89c12a015f..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack3-sled14.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":4,"requests":[{"zone":{"id":"ac35afab-a312-43c3-a42d-04b8e99fcbde","zone_type":"crucible","addresses":["fd00:1122:3344:111::4"],"dataset":{"id":"ac35afab-a312-43c3-a42d-04b8e99fcbde","name":{"pool_name":"oxp_6601065c-c172-4118-81b4-16adde7e9401","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:111::4]:32345"},"services":[{"id":"ac35afab-a312-43c3-a42d-04b8e99fcbde","details":{"type":"crucible","address":"[fd00:1122:3344:111::4]:32345"}}]},"root":"/pool/ext/24d7e250-9fc6-459e-8155-30f8e8ccb28c/crypt/zone"},{"zone":{"id":"6cd94da2-35b9-4683-a931-29ad4a5ed0ef","zone_type":"crucible","addresses":["fd00:1122:3344:111::c"],"dataset":{"id":"6cd94da2-35b9-4683-a931-29ad4a5ed0ef","name":{"pool_name":"oxp_58276eba-a53c-4ef3-b374-4cdcde4d6e12","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:111::c]:32345"},"services":[{"id":"6cd94da2-35b9-4683-a931-29ad4a5ed0ef","details":{"type":"crucible","address":"[fd00:1122:3344:111::c]:32345"}}]},"root":"/pool/ext/24d7e250-9fc6-459e-8155-30f8e8ccb28c/crypt/zone"},{"zone":{"id":"41f07d39-fcc0-4796-8b7c-7cfcd9135f78","zone_type":"crucible","addresses":["fd00:1122:3344:111::9"],"dataset":{"id":"41f07d39-fcc0-4796-8b7c-7cfcd9135f78","name":{"pool_name":"oxp_4b90abdc-3348-4158-bedc-5bcd56e281d8","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:111::9]:32345"},"services":[{"id":"41f07d39-fcc0-4796-8b7c-7cfcd9135f78","details":{"type":"crucible","address":"[fd00:1122:3344:111::9]:32345"}}]},"root":"/pool/ext/8e955f54-fbef-4021-9eec-457825468813/crypt/zone"},{"zone":{"id":"44c35566-dd64-4e4a-896e-c50aaa3df14f","zone_type":"nexus","addresses":["fd00:1122:3344:111::3"],"dataset":null,"services":[{"id":"44c35566-dd64-4e4a-896e-c50aaa3df14f","details":{"type":"nexus","internal_address":"[fd00:1122:3344:111::3]:12221","external_ip":"45.154.216.37","nic":{"id":"6f824d20-6ce0-4e8b-9ce3-b12dd2b59913","kind":{"type":"service","id":"44c35566-dd64-4e4a-896e-c50aaa3df14f"},"name":"nexus-44c35566-dd64-4e4a-896e-c50aaa3df14f","ip":"172.30.2.7","mac":"A8:40:25:FF:E8:5F","subnet":"172.30.2.0/24","vni":100,"primary":true,"slot":0},"external_tls":true,"external_dns_servers":["1.1.1.1","8.8.8.8"]}}]},"root":"/pool/ext/435d7a1b-2865-4d49-903f-a68f464ade4d/crypt/zone"},{"zone":{"id":"e5020d24-8652-456b-bf92-cd7d255a34c5","zone_type":"crucible","addresses":["fd00:1122:3344:111::6"],"dataset":{"id":"e5020d24-8652-456b-bf92-cd7d255a34c5","name":{"pool_name":"oxp_f6925045-363d-4e18-9bde-ee2987b33d21","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:111::6]:32345"},"services":[{"id":"e5020d24-8652-456b-bf92-cd7d255a34c5","details":{"type":"crucible","address":"[fd00:1122:3344:111::6]:32345"}}]},"root":"/pool/ext/6601065c-c172-4118-81b4-16adde7e9401/crypt/zone"},{"zone":{"id":"8f25f258-afd7-4351-83e4-24220ec0c251","zone_type":"crucible","addresses":["fd00:1122:3344:111::8"],"dataset":{"id":"8f25f258-afd7-4351-83e4-24220ec0c251","name":{"pool_name":"oxp_8e955f54-fbef-4021-9eec-457825468813","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:111::8]:32345"},"services":[{"id":"8f25f258-afd7-4351-83e4-24220ec0c251","details":{"type":"crucible","address":"[fd00:1122:3344:111::8]:32345"}}]},"root":"/pool/ext/6601065c-c172-4118-81b4-16adde7e9401/crypt/zone"},{"zone":{"id":"26aa50ec-d70a-47ea-85fc-e55c62a2e0c6","zone_type":"crucible","addresses":["fd00:1122:3344:111::5"],"dataset":{"id":"26aa50ec-d70a-47ea-85fc-e55c62a2e0c6","name":{"pool_name":"oxp_24d7e250-9fc6-459e-8155-30f8e8ccb28c","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:111::5]:32345"},"services":[{"id":"26aa50ec-d70a-47ea-85fc-e55c62a2e0c6","details":{"type":"crucible","address":"[fd00:1122:3344:111::5]:32345"}}]},"root":"/pool/ext/435d7a1b-2865-4d49-903f-a68f464ade4d/crypt/zone"},{"zone":{"id":"68dc212f-a96a-420f-8334-b11ee5d7cb95","zone_type":"crucible","addresses":["fd00:1122:3344:111::7"],"dataset":{"id":"68dc212f-a96a-420f-8334-b11ee5d7cb95","name":{"pool_name":"oxp_4353b00b-937e-4d07-aea6-014c57b6f12c","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:111::7]:32345"},"services":[{"id":"68dc212f-a96a-420f-8334-b11ee5d7cb95","details":{"type":"crucible","address":"[fd00:1122:3344:111::7]:32345"}}]},"root":"/pool/ext/24d7e250-9fc6-459e-8155-30f8e8ccb28c/crypt/zone"},{"zone":{"id":"475140fa-a5dc-4ec1-876d-751c48adfc37","zone_type":"crucible","addresses":["fd00:1122:3344:111::a"],"dataset":{"id":"475140fa-a5dc-4ec1-876d-751c48adfc37","name":{"pool_name":"oxp_ee55b053-6874-4e20-86b5-2e105e64c068","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:111::a]:32345"},"services":[{"id":"475140fa-a5dc-4ec1-876d-751c48adfc37","details":{"type":"crucible","address":"[fd00:1122:3344:111::a]:32345"}}]},"root":"/pool/ext/ee55b053-6874-4e20-86b5-2e105e64c068/crypt/zone"},{"zone":{"id":"09d5a8c9-00db-4914-a2c6-7ae3d2da4558","zone_type":"crucible","addresses":["fd00:1122:3344:111::d"],"dataset":{"id":"09d5a8c9-00db-4914-a2c6-7ae3d2da4558","name":{"pool_name":"oxp_9ab5aba5-47dc-4bc4-8f6d-7cbe0f98a9a2","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:111::d]:32345"},"services":[{"id":"09d5a8c9-00db-4914-a2c6-7ae3d2da4558","details":{"type":"crucible","address":"[fd00:1122:3344:111::d]:32345"}}]},"root":"/pool/ext/8e955f54-fbef-4021-9eec-457825468813/crypt/zone"},{"zone":{"id":"014f6a39-ad64-4f0a-9fef-01ca0d184cbf","zone_type":"crucible","addresses":["fd00:1122:3344:111::b"],"dataset":{"id":"014f6a39-ad64-4f0a-9fef-01ca0d184cbf","name":{"pool_name":"oxp_435d7a1b-2865-4d49-903f-a68f464ade4d","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:111::b]:32345"},"services":[{"id":"014f6a39-ad64-4f0a-9fef-01ca0d184cbf","details":{"type":"crucible","address":"[fd00:1122:3344:111::b]:32345"}}]},"root":"/pool/ext/f6925045-363d-4e18-9bde-ee2987b33d21/crypt/zone"},{"zone":{"id":"aceaf348-ba07-4965-a543-63a800826fe8","zone_type":"ntp","addresses":["fd00:1122:3344:111::e"],"dataset":null,"services":[{"id":"aceaf348-ba07-4965-a543-63a800826fe8","details":{"type":"internal_ntp","address":"[fd00:1122:3344:111::e]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/8e955f54-fbef-4021-9eec-457825468813/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled15.json b/sled-agent/tests/old-service-ledgers/rack3-sled15.json deleted file mode 100644 index 880f29409e..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack3-sled15.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":4,"requests":[{"zone":{"id":"09a9ecee-1e7c-4819-b27a-73bb61099ce7","zone_type":"external_dns","addresses":["fd00:1122:3344:114::3"],"dataset":{"id":"09a9ecee-1e7c-4819-b27a-73bb61099ce7","name":{"pool_name":"oxp_b7fbb6db-aa4a-4a6d-8206-b7bdc000d56e","kind":{"type":"external_dns"}},"service_address":"[fd00:1122:3344:114::3]:5353"},"services":[{"id":"09a9ecee-1e7c-4819-b27a-73bb61099ce7","details":{"type":"external_dns","http_address":"[fd00:1122:3344:114::3]:5353","dns_address":"45.154.216.33:53","nic":{"id":"400ca77b-7fee-47d5-8f17-1f4b9c729f27","kind":{"type":"service","id":"09a9ecee-1e7c-4819-b27a-73bb61099ce7"},"name":"external-dns-09a9ecee-1e7c-4819-b27a-73bb61099ce7","ip":"172.30.1.5","mac":"A8:40:25:FF:B7:C7","subnet":"172.30.1.0/24","vni":100,"primary":true,"slot":0}}}]},"root":"/pool/ext/9e878b1e-bf92-4155-8162-640851c2f5d5/crypt/zone"},{"zone":{"id":"1792e003-55f7-49b8-906c-4160db91bc23","zone_type":"crucible","addresses":["fd00:1122:3344:114::5"],"dataset":{"id":"1792e003-55f7-49b8-906c-4160db91bc23","name":{"pool_name":"oxp_7f3a760f-a4c0-456f-8a22-2d06ecac1022","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:114::5]:32345"},"services":[{"id":"1792e003-55f7-49b8-906c-4160db91bc23","details":{"type":"crucible","address":"[fd00:1122:3344:114::5]:32345"}}]},"root":"/pool/ext/76f09ad5-c96c-4748-bbe4-71afaea7bc5e/crypt/zone"},{"zone":{"id":"73bc7c0e-1034-449f-8920-4a1f418653ff","zone_type":"crucible","addresses":["fd00:1122:3344:114::8"],"dataset":{"id":"73bc7c0e-1034-449f-8920-4a1f418653ff","name":{"pool_name":"oxp_e87037be-1cdf-4c6e-a8a3-c27b830eaef9","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:114::8]:32345"},"services":[{"id":"73bc7c0e-1034-449f-8920-4a1f418653ff","details":{"type":"crucible","address":"[fd00:1122:3344:114::8]:32345"}}]},"root":"/pool/ext/b7fbb6db-aa4a-4a6d-8206-b7bdc000d56e/crypt/zone"},{"zone":{"id":"06dc6619-6251-4543-9a10-da1698af49d5","zone_type":"crucible","addresses":["fd00:1122:3344:114::9"],"dataset":{"id":"06dc6619-6251-4543-9a10-da1698af49d5","name":{"pool_name":"oxp_ee34c530-ce70-4f1a-8c97-d0ebb77ccfc8","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:114::9]:32345"},"services":[{"id":"06dc6619-6251-4543-9a10-da1698af49d5","details":{"type":"crucible","address":"[fd00:1122:3344:114::9]:32345"}}]},"root":"/pool/ext/9e878b1e-bf92-4155-8162-640851c2f5d5/crypt/zone"},{"zone":{"id":"0d796c52-37ca-490d-b42f-dcc22fe5fd6b","zone_type":"crucible","addresses":["fd00:1122:3344:114::c"],"dataset":{"id":"0d796c52-37ca-490d-b42f-dcc22fe5fd6b","name":{"pool_name":"oxp_9ec2b893-d486-4b24-a077-1a297f9eb15f","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:114::c]:32345"},"services":[{"id":"0d796c52-37ca-490d-b42f-dcc22fe5fd6b","details":{"type":"crucible","address":"[fd00:1122:3344:114::c]:32345"}}]},"root":"/pool/ext/9e72c0e2-4895-4791-b606-2f18e432fb69/crypt/zone"},{"zone":{"id":"91d0011f-de44-4823-bc26-a447affa39bc","zone_type":"crucible","addresses":["fd00:1122:3344:114::a"],"dataset":{"id":"91d0011f-de44-4823-bc26-a447affa39bc","name":{"pool_name":"oxp_85e81a14-031d-4a63-a91f-981c64e91f60","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:114::a]:32345"},"services":[{"id":"91d0011f-de44-4823-bc26-a447affa39bc","details":{"type":"crucible","address":"[fd00:1122:3344:114::a]:32345"}}]},"root":"/pool/ext/b7fbb6db-aa4a-4a6d-8206-b7bdc000d56e/crypt/zone"},{"zone":{"id":"0c44a2f1-559a-459c-9931-e0e7964d41c6","zone_type":"crucible","addresses":["fd00:1122:3344:114::b"],"dataset":{"id":"0c44a2f1-559a-459c-9931-e0e7964d41c6","name":{"pool_name":"oxp_76f09ad5-c96c-4748-bbe4-71afaea7bc5e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:114::b]:32345"},"services":[{"id":"0c44a2f1-559a-459c-9931-e0e7964d41c6","details":{"type":"crucible","address":"[fd00:1122:3344:114::b]:32345"}}]},"root":"/pool/ext/e87037be-1cdf-4c6e-a8a3-c27b830eaef9/crypt/zone"},{"zone":{"id":"ea363819-96f6-4fb6-a203-f18414f1c60e","zone_type":"crucible","addresses":["fd00:1122:3344:114::4"],"dataset":{"id":"ea363819-96f6-4fb6-a203-f18414f1c60e","name":{"pool_name":"oxp_b7fbb6db-aa4a-4a6d-8206-b7bdc000d56e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:114::4]:32345"},"services":[{"id":"ea363819-96f6-4fb6-a203-f18414f1c60e","details":{"type":"crucible","address":"[fd00:1122:3344:114::4]:32345"}}]},"root":"/pool/ext/b7fbb6db-aa4a-4a6d-8206-b7bdc000d56e/crypt/zone"},{"zone":{"id":"21592c39-da6b-4527-842e-edeeceffafa1","zone_type":"crucible","addresses":["fd00:1122:3344:114::6"],"dataset":{"id":"21592c39-da6b-4527-842e-edeeceffafa1","name":{"pool_name":"oxp_9e72c0e2-4895-4791-b606-2f18e432fb69","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:114::6]:32345"},"services":[{"id":"21592c39-da6b-4527-842e-edeeceffafa1","details":{"type":"crucible","address":"[fd00:1122:3344:114::6]:32345"}}]},"root":"/pool/ext/7aff8429-b65d-4a53-a796-7221ac7581a9/crypt/zone"},{"zone":{"id":"f33b1263-f1b2-43a6-a8aa-5f8570dd4e72","zone_type":"crucible","addresses":["fd00:1122:3344:114::7"],"dataset":{"id":"f33b1263-f1b2-43a6-a8aa-5f8570dd4e72","name":{"pool_name":"oxp_9e878b1e-bf92-4155-8162-640851c2f5d5","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:114::7]:32345"},"services":[{"id":"f33b1263-f1b2-43a6-a8aa-5f8570dd4e72","details":{"type":"crucible","address":"[fd00:1122:3344:114::7]:32345"}}]},"root":"/pool/ext/7f3a760f-a4c0-456f-8a22-2d06ecac1022/crypt/zone"},{"zone":{"id":"6f42b469-5a36-4048-a152-e884f7e8a206","zone_type":"crucible","addresses":["fd00:1122:3344:114::d"],"dataset":{"id":"6f42b469-5a36-4048-a152-e884f7e8a206","name":{"pool_name":"oxp_7aff8429-b65d-4a53-a796-7221ac7581a9","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:114::d]:32345"},"services":[{"id":"6f42b469-5a36-4048-a152-e884f7e8a206","details":{"type":"crucible","address":"[fd00:1122:3344:114::d]:32345"}}]},"root":"/pool/ext/9e72c0e2-4895-4791-b606-2f18e432fb69/crypt/zone"},{"zone":{"id":"ad77d594-8f78-4d33-a5e4-59887060178e","zone_type":"ntp","addresses":["fd00:1122:3344:114::e"],"dataset":null,"services":[{"id":"ad77d594-8f78-4d33-a5e4-59887060178e","details":{"type":"internal_ntp","address":"[fd00:1122:3344:114::e]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/85e81a14-031d-4a63-a91f-981c64e91f60/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled16.json b/sled-agent/tests/old-service-ledgers/rack3-sled16.json deleted file mode 100644 index 3a1cbeb411..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack3-sled16.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":4,"requests":[{"zone":{"id":"dcb9a4ae-2c89-4a74-905b-b7936ff49c19","zone_type":"crucible","addresses":["fd00:1122:3344:11f::9"],"dataset":{"id":"dcb9a4ae-2c89-4a74-905b-b7936ff49c19","name":{"pool_name":"oxp_af509039-d27f-4095-bc9d-cecbc5c606db","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11f::9]:32345"},"services":[{"id":"dcb9a4ae-2c89-4a74-905b-b7936ff49c19","details":{"type":"crucible","address":"[fd00:1122:3344:11f::9]:32345"}}]},"root":"/pool/ext/44ee0fb4-6034-44e8-b3de-b3a44457ffca/crypt/zone"},{"zone":{"id":"dbd46f71-ec39-4b72-a77d-9d281ccb37e0","zone_type":"crucible","addresses":["fd00:1122:3344:11f::b"],"dataset":{"id":"dbd46f71-ec39-4b72-a77d-9d281ccb37e0","name":{"pool_name":"oxp_44ee0fb4-6034-44e8-b3de-b3a44457ffca","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11f::b]:32345"},"services":[{"id":"dbd46f71-ec39-4b72-a77d-9d281ccb37e0","details":{"type":"crucible","address":"[fd00:1122:3344:11f::b]:32345"}}]},"root":"/pool/ext/5e32c0a3-1210-402b-91fb-256946eeac2b/crypt/zone"},{"zone":{"id":"a1f30569-a5c6-4a6d-922e-241966aea142","zone_type":"crucible","addresses":["fd00:1122:3344:11f::6"],"dataset":{"id":"a1f30569-a5c6-4a6d-922e-241966aea142","name":{"pool_name":"oxp_d2133e8b-51cc-455e-89d0-5454fd4fe109","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11f::6]:32345"},"services":[{"id":"a1f30569-a5c6-4a6d-922e-241966aea142","details":{"type":"crucible","address":"[fd00:1122:3344:11f::6]:32345"}}]},"root":"/pool/ext/3f57835b-1469-499a-8757-7cc56acc5d49/crypt/zone"},{"zone":{"id":"a33e25ae-4e41-40f4-843d-3d12f62d8cb6","zone_type":"crucible","addresses":["fd00:1122:3344:11f::8"],"dataset":{"id":"a33e25ae-4e41-40f4-843d-3d12f62d8cb6","name":{"pool_name":"oxp_c8e4a7f4-1ae6-4683-8397-ea53475a53e8","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11f::8]:32345"},"services":[{"id":"a33e25ae-4e41-40f4-843d-3d12f62d8cb6","details":{"type":"crucible","address":"[fd00:1122:3344:11f::8]:32345"}}]},"root":"/pool/ext/5e32c0a3-1210-402b-91fb-256946eeac2b/crypt/zone"},{"zone":{"id":"65ed75c2-2d80-4de5-a6f6-adfa6516c7cf","zone_type":"crucible","addresses":["fd00:1122:3344:11f::c"],"dataset":{"id":"65ed75c2-2d80-4de5-a6f6-adfa6516c7cf","name":{"pool_name":"oxp_3f57835b-1469-499a-8757-7cc56acc5d49","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11f::c]:32345"},"services":[{"id":"65ed75c2-2d80-4de5-a6f6-adfa6516c7cf","details":{"type":"crucible","address":"[fd00:1122:3344:11f::c]:32345"}}]},"root":"/pool/ext/cd8cd75c-632b-4527-889a-7ca0c080fe2c/crypt/zone"},{"zone":{"id":"bc6ccf18-6b9b-4687-8b70-c7917d972ae0","zone_type":"crucible","addresses":["fd00:1122:3344:11f::a"],"dataset":{"id":"bc6ccf18-6b9b-4687-8b70-c7917d972ae0","name":{"pool_name":"oxp_cd8cd75c-632b-4527-889a-7ca0c080fe2c","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11f::a]:32345"},"services":[{"id":"bc6ccf18-6b9b-4687-8b70-c7917d972ae0","details":{"type":"crucible","address":"[fd00:1122:3344:11f::a]:32345"}}]},"root":"/pool/ext/5e32c0a3-1210-402b-91fb-256946eeac2b/crypt/zone"},{"zone":{"id":"06233bfe-a857-4819-aefe-212af9eeb90f","zone_type":"crucible","addresses":["fd00:1122:3344:11f::5"],"dataset":{"id":"06233bfe-a857-4819-aefe-212af9eeb90f","name":{"pool_name":"oxp_c8a1aaf1-d27c-45fd-9f8d-80ac6bf6865d","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11f::5]:32345"},"services":[{"id":"06233bfe-a857-4819-aefe-212af9eeb90f","details":{"type":"crucible","address":"[fd00:1122:3344:11f::5]:32345"}}]},"root":"/pool/ext/af509039-d27f-4095-bc9d-cecbc5c606db/crypt/zone"},{"zone":{"id":"0bbfef71-9eae-43b6-b5e7-0060ce9269dd","zone_type":"crucible","addresses":["fd00:1122:3344:11f::4"],"dataset":{"id":"0bbfef71-9eae-43b6-b5e7-0060ce9269dd","name":{"pool_name":"oxp_5e32c0a3-1210-402b-91fb-256946eeac2b","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11f::4]:32345"},"services":[{"id":"0bbfef71-9eae-43b6-b5e7-0060ce9269dd","details":{"type":"crucible","address":"[fd00:1122:3344:11f::4]:32345"}}]},"root":"/pool/ext/af509039-d27f-4095-bc9d-cecbc5c606db/crypt/zone"},{"zone":{"id":"550e10ee-24d1-444f-80be-2744dd321e0f","zone_type":"crucible","addresses":["fd00:1122:3344:11f::7"],"dataset":{"id":"550e10ee-24d1-444f-80be-2744dd321e0f","name":{"pool_name":"oxp_f437ce0e-eb45-4be8-b1fe-33ed2656eb01","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11f::7]:32345"},"services":[{"id":"550e10ee-24d1-444f-80be-2744dd321e0f","details":{"type":"crucible","address":"[fd00:1122:3344:11f::7]:32345"}}]},"root":"/pool/ext/44ee0fb4-6034-44e8-b3de-b3a44457ffca/crypt/zone"},{"zone":{"id":"86d768f3-ece2-4956-983f-999bdb23a983","zone_type":"cockroach_db","addresses":["fd00:1122:3344:11f::3"],"dataset":{"id":"86d768f3-ece2-4956-983f-999bdb23a983","name":{"pool_name":"oxp_5e32c0a3-1210-402b-91fb-256946eeac2b","kind":{"type":"cockroach_db"}},"service_address":"[fd00:1122:3344:11f::3]:32221"},"services":[{"id":"86d768f3-ece2-4956-983f-999bdb23a983","details":{"type":"cockroach_db","address":"[fd00:1122:3344:11f::3]:32221"}}]},"root":"/pool/ext/c8a1aaf1-d27c-45fd-9f8d-80ac6bf6865d/crypt/zone"},{"zone":{"id":"2f358812-f72c-4838-a5ea-7d78d0954be0","zone_type":"ntp","addresses":["fd00:1122:3344:11f::d"],"dataset":null,"services":[{"id":"2f358812-f72c-4838-a5ea-7d78d0954be0","details":{"type":"internal_ntp","address":"[fd00:1122:3344:11f::d]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/f437ce0e-eb45-4be8-b1fe-33ed2656eb01/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled17.json b/sled-agent/tests/old-service-ledgers/rack3-sled17.json deleted file mode 100644 index 4063fed2e2..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack3-sled17.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":4,"requests":[{"zone":{"id":"525a19a2-d4ac-418d-bdcf-2ce26e7abe70","zone_type":"crucible","addresses":["fd00:1122:3344:107::a"],"dataset":{"id":"525a19a2-d4ac-418d-bdcf-2ce26e7abe70","name":{"pool_name":"oxp_cb774d2f-ff86-4fd7-866b-17a6b10e61f0","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::a]:32345"},"services":[{"id":"525a19a2-d4ac-418d-bdcf-2ce26e7abe70","details":{"type":"crucible","address":"[fd00:1122:3344:107::a]:32345"}}]},"root":"/pool/ext/e17b68b5-f50c-4fc3-b55a-80d284c6c32d/crypt/zone"},{"zone":{"id":"7af188e1-6175-4769-9e4f-2ca7a98b76f6","zone_type":"crucible","addresses":["fd00:1122:3344:107::4"],"dataset":{"id":"7af188e1-6175-4769-9e4f-2ca7a98b76f6","name":{"pool_name":"oxp_0cbbcf22-770d-4e75-9148-e6109b129093","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::4]:32345"},"services":[{"id":"7af188e1-6175-4769-9e4f-2ca7a98b76f6","details":{"type":"crucible","address":"[fd00:1122:3344:107::4]:32345"}}]},"root":"/pool/ext/b998e8df-ea69-4bdd-84cb-b7f17075b060/crypt/zone"},{"zone":{"id":"2544540f-6ffc-46c0-84bf-f42a110c02d7","zone_type":"crucible","addresses":["fd00:1122:3344:107::6"],"dataset":{"id":"2544540f-6ffc-46c0-84bf-f42a110c02d7","name":{"pool_name":"oxp_e17b68b5-f50c-4fc3-b55a-80d284c6c32d","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::6]:32345"},"services":[{"id":"2544540f-6ffc-46c0-84bf-f42a110c02d7","details":{"type":"crucible","address":"[fd00:1122:3344:107::6]:32345"}}]},"root":"/pool/ext/521fa477-4d83-49a8-a5cf-c267b7f0c409/crypt/zone"},{"zone":{"id":"cfc20f72-cac2-4681-a6d8-e5a0accafbb7","zone_type":"crucible","addresses":["fd00:1122:3344:107::7"],"dataset":{"id":"cfc20f72-cac2-4681-a6d8-e5a0accafbb7","name":{"pool_name":"oxp_b998e8df-ea69-4bdd-84cb-b7f17075b060","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::7]:32345"},"services":[{"id":"cfc20f72-cac2-4681-a6d8-e5a0accafbb7","details":{"type":"crucible","address":"[fd00:1122:3344:107::7]:32345"}}]},"root":"/pool/ext/0cbbcf22-770d-4e75-9148-e6109b129093/crypt/zone"},{"zone":{"id":"e24be791-5773-425e-a3df-e35ca81570c7","zone_type":"crucible","addresses":["fd00:1122:3344:107::9"],"dataset":{"id":"e24be791-5773-425e-a3df-e35ca81570c7","name":{"pool_name":"oxp_7849c221-dc7f-43ac-ac47-bc51864e083b","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::9]:32345"},"services":[{"id":"e24be791-5773-425e-a3df-e35ca81570c7","details":{"type":"crucible","address":"[fd00:1122:3344:107::9]:32345"}}]},"root":"/pool/ext/7849c221-dc7f-43ac-ac47-bc51864e083b/crypt/zone"},{"zone":{"id":"170856ee-21cf-4780-8903-175d558bc7cc","zone_type":"crucible","addresses":["fd00:1122:3344:107::3"],"dataset":{"id":"170856ee-21cf-4780-8903-175d558bc7cc","name":{"pool_name":"oxp_618e21e5-77d4-40ba-9f8e-7960e9ad92e2","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::3]:32345"},"services":[{"id":"170856ee-21cf-4780-8903-175d558bc7cc","details":{"type":"crucible","address":"[fd00:1122:3344:107::3]:32345"}}]},"root":"/pool/ext/aa7a37fb-2f03-4d5c-916b-db3a4fc269ac/crypt/zone"},{"zone":{"id":"604278ff-525a-4d41-82ff-07aef3174d38","zone_type":"crucible","addresses":["fd00:1122:3344:107::5"],"dataset":{"id":"604278ff-525a-4d41-82ff-07aef3174d38","name":{"pool_name":"oxp_521fa477-4d83-49a8-a5cf-c267b7f0c409","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::5]:32345"},"services":[{"id":"604278ff-525a-4d41-82ff-07aef3174d38","details":{"type":"crucible","address":"[fd00:1122:3344:107::5]:32345"}}]},"root":"/pool/ext/0cbbcf22-770d-4e75-9148-e6109b129093/crypt/zone"},{"zone":{"id":"d0d4fcc0-6ed0-410a-99c7-5daf34014421","zone_type":"crucible","addresses":["fd00:1122:3344:107::b"],"dataset":{"id":"d0d4fcc0-6ed0-410a-99c7-5daf34014421","name":{"pool_name":"oxp_aa7a37fb-2f03-4d5c-916b-db3a4fc269ac","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::b]:32345"},"services":[{"id":"d0d4fcc0-6ed0-410a-99c7-5daf34014421","details":{"type":"crucible","address":"[fd00:1122:3344:107::b]:32345"}}]},"root":"/pool/ext/aa7a37fb-2f03-4d5c-916b-db3a4fc269ac/crypt/zone"},{"zone":{"id":"c935df7b-2629-48ee-bc10-20508301905d","zone_type":"crucible","addresses":["fd00:1122:3344:107::c"],"dataset":{"id":"c935df7b-2629-48ee-bc10-20508301905d","name":{"pool_name":"oxp_793fd018-5fdc-4e54-9c45-f8023fa3ea18","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::c]:32345"},"services":[{"id":"c935df7b-2629-48ee-bc10-20508301905d","details":{"type":"crucible","address":"[fd00:1122:3344:107::c]:32345"}}]},"root":"/pool/ext/7849c221-dc7f-43ac-ac47-bc51864e083b/crypt/zone"},{"zone":{"id":"4ba5f3b6-8be5-4a85-bc57-a5e3b0b867d8","zone_type":"crucible","addresses":["fd00:1122:3344:107::8"],"dataset":{"id":"4ba5f3b6-8be5-4a85-bc57-a5e3b0b867d8","name":{"pool_name":"oxp_e80e7996-c572-481e-8c22-61c16c6e47f4","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::8]:32345"},"services":[{"id":"4ba5f3b6-8be5-4a85-bc57-a5e3b0b867d8","details":{"type":"crucible","address":"[fd00:1122:3344:107::8]:32345"}}]},"root":"/pool/ext/e17b68b5-f50c-4fc3-b55a-80d284c6c32d/crypt/zone"},{"zone":{"id":"395c9d6e-3bd0-445e-9269-46c3260edb83","zone_type":"ntp","addresses":["fd00:1122:3344:107::d"],"dataset":null,"services":[{"id":"395c9d6e-3bd0-445e-9269-46c3260edb83","details":{"type":"internal_ntp","address":"[fd00:1122:3344:107::d]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/0cbbcf22-770d-4e75-9148-e6109b129093/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled18.json b/sled-agent/tests/old-service-ledgers/rack3-sled18.json deleted file mode 100644 index f47e912424..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack3-sled18.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":4,"requests":[{"zone":{"id":"c7096dd4-e429-4a6f-9725-041a77ef2513","zone_type":"crucible","addresses":["fd00:1122:3344:11a::6"],"dataset":{"id":"c7096dd4-e429-4a6f-9725-041a77ef2513","name":{"pool_name":"oxp_dcf62af6-c0f9-4eb5-9b23-9424ef8f3d32","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11a::6]:32345"},"services":[{"id":"c7096dd4-e429-4a6f-9725-041a77ef2513","details":{"type":"crucible","address":"[fd00:1122:3344:11a::6]:32345"}}]},"root":"/pool/ext/b869e463-c8b9-4c12-a6b9-13175b3896dd/crypt/zone"},{"zone":{"id":"09dd367f-b32f-43f3-aa53-11ccec1cd0c9","zone_type":"crucible","addresses":["fd00:1122:3344:11a::9"],"dataset":{"id":"09dd367f-b32f-43f3-aa53-11ccec1cd0c9","name":{"pool_name":"oxp_d7d00317-42c7-4d1e-a04c-85491fb230cd","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11a::9]:32345"},"services":[{"id":"09dd367f-b32f-43f3-aa53-11ccec1cd0c9","details":{"type":"crucible","address":"[fd00:1122:3344:11a::9]:32345"}}]},"root":"/pool/ext/d7d00317-42c7-4d1e-a04c-85491fb230cd/crypt/zone"},{"zone":{"id":"fb2f85f1-05b3-432f-9bb5-63fb27a762b1","zone_type":"crucible","addresses":["fd00:1122:3344:11a::5"],"dataset":{"id":"fb2f85f1-05b3-432f-9bb5-63fb27a762b1","name":{"pool_name":"oxp_db4a9949-68da-4c1c-9a1c-49083eba14fe","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11a::5]:32345"},"services":[{"id":"fb2f85f1-05b3-432f-9bb5-63fb27a762b1","details":{"type":"crucible","address":"[fd00:1122:3344:11a::5]:32345"}}]},"root":"/pool/ext/db4a9949-68da-4c1c-9a1c-49083eba14fe/crypt/zone"},{"zone":{"id":"5b89425e-69e4-4305-8f33-dc5768a1849e","zone_type":"crucible","addresses":["fd00:1122:3344:11a::a"],"dataset":{"id":"5b89425e-69e4-4305-8f33-dc5768a1849e","name":{"pool_name":"oxp_64a1bad7-d1b1-4e39-a3f3-9b8d73c4709e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11a::a]:32345"},"services":[{"id":"5b89425e-69e4-4305-8f33-dc5768a1849e","details":{"type":"crucible","address":"[fd00:1122:3344:11a::a]:32345"}}]},"root":"/pool/ext/64a1bad7-d1b1-4e39-a3f3-9b8d73c4709e/crypt/zone"},{"zone":{"id":"a5156db4-273a-4f8b-b8d8-df77062a6c63","zone_type":"crucible","addresses":["fd00:1122:3344:11a::4"],"dataset":{"id":"a5156db4-273a-4f8b-b8d8-df77062a6c63","name":{"pool_name":"oxp_b869e463-c8b9-4c12-a6b9-13175b3896dd","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11a::4]:32345"},"services":[{"id":"a5156db4-273a-4f8b-b8d8-df77062a6c63","details":{"type":"crucible","address":"[fd00:1122:3344:11a::4]:32345"}}]},"root":"/pool/ext/dcf62af6-c0f9-4eb5-9b23-9424ef8f3d32/crypt/zone"},{"zone":{"id":"1f2d2f86-b69b-4130-bb9b-e62ba0cb6802","zone_type":"crucible","addresses":["fd00:1122:3344:11a::b"],"dataset":{"id":"1f2d2f86-b69b-4130-bb9b-e62ba0cb6802","name":{"pool_name":"oxp_153ffee4-5d7a-4786-ad33-d5567b434fe0","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11a::b]:32345"},"services":[{"id":"1f2d2f86-b69b-4130-bb9b-e62ba0cb6802","details":{"type":"crucible","address":"[fd00:1122:3344:11a::b]:32345"}}]},"root":"/pool/ext/174a067d-1c5a-49f7-a29f-1e62ab1c3796/crypt/zone"},{"zone":{"id":"1e249cc9-52e7-4d66-b713-8ace1392e991","zone_type":"crucible","addresses":["fd00:1122:3344:11a::7"],"dataset":{"id":"1e249cc9-52e7-4d66-b713-8ace1392e991","name":{"pool_name":"oxp_04b6215e-9651-4a3c-ba1b-b8a1e67b3d89","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11a::7]:32345"},"services":[{"id":"1e249cc9-52e7-4d66-b713-8ace1392e991","details":{"type":"crucible","address":"[fd00:1122:3344:11a::7]:32345"}}]},"root":"/pool/ext/db4a9949-68da-4c1c-9a1c-49083eba14fe/crypt/zone"},{"zone":{"id":"eb779538-2b1b-4d1d-8c7e-b15f04db6e53","zone_type":"crucible","addresses":["fd00:1122:3344:11a::3"],"dataset":{"id":"eb779538-2b1b-4d1d-8c7e-b15f04db6e53","name":{"pool_name":"oxp_aacb8524-3562-4f97-a616-9023230d6efa","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11a::3]:32345"},"services":[{"id":"eb779538-2b1b-4d1d-8c7e-b15f04db6e53","details":{"type":"crucible","address":"[fd00:1122:3344:11a::3]:32345"}}]},"root":"/pool/ext/174a067d-1c5a-49f7-a29f-1e62ab1c3796/crypt/zone"},{"zone":{"id":"b575d52d-be7d-46af-814b-91e6d18f3464","zone_type":"crucible","addresses":["fd00:1122:3344:11a::8"],"dataset":{"id":"b575d52d-be7d-46af-814b-91e6d18f3464","name":{"pool_name":"oxp_174a067d-1c5a-49f7-a29f-1e62ab1c3796","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11a::8]:32345"},"services":[{"id":"b575d52d-be7d-46af-814b-91e6d18f3464","details":{"type":"crucible","address":"[fd00:1122:3344:11a::8]:32345"}}]},"root":"/pool/ext/64a1bad7-d1b1-4e39-a3f3-9b8d73c4709e/crypt/zone"},{"zone":{"id":"274200bc-eac7-47d7-8a57-4b7be794caba","zone_type":"crucible","addresses":["fd00:1122:3344:11a::c"],"dataset":{"id":"274200bc-eac7-47d7-8a57-4b7be794caba","name":{"pool_name":"oxp_2e7644e4-7d46-42bf-8e7a-9c3f39085b3f","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11a::c]:32345"},"services":[{"id":"274200bc-eac7-47d7-8a57-4b7be794caba","details":{"type":"crucible","address":"[fd00:1122:3344:11a::c]:32345"}}]},"root":"/pool/ext/2e7644e4-7d46-42bf-8e7a-9c3f39085b3f/crypt/zone"},{"zone":{"id":"bc20ba3a-df62-4a62-97c2-75b5653f84b4","zone_type":"ntp","addresses":["fd00:1122:3344:11a::d"],"dataset":null,"services":[{"id":"bc20ba3a-df62-4a62-97c2-75b5653f84b4","details":{"type":"internal_ntp","address":"[fd00:1122:3344:11a::d]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/04b6215e-9651-4a3c-ba1b-b8a1e67b3d89/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled19.json b/sled-agent/tests/old-service-ledgers/rack3-sled19.json deleted file mode 100644 index c450320a73..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack3-sled19.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":4,"requests":[{"zone":{"id":"9c73abb9-edb8-4aa2-835b-c25ebe4466d9","zone_type":"crucible","addresses":["fd00:1122:3344:109::7"],"dataset":{"id":"9c73abb9-edb8-4aa2-835b-c25ebe4466d9","name":{"pool_name":"oxp_b7a3032f-7b8c-4a6a-9fa2-e5773bfdbc94","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::7]:32345"},"services":[{"id":"9c73abb9-edb8-4aa2-835b-c25ebe4466d9","details":{"type":"crucible","address":"[fd00:1122:3344:109::7]:32345"}}]},"root":"/pool/ext/46d21f3d-23be-4361-b5c5-9d0f6ece5b8c/crypt/zone"},{"zone":{"id":"ca576bda-cbdd-4bb9-9d75-ce06d569e926","zone_type":"crucible","addresses":["fd00:1122:3344:109::a"],"dataset":{"id":"ca576bda-cbdd-4bb9-9d75-ce06d569e926","name":{"pool_name":"oxp_863c4bc4-9c7e-453c-99d8-a3d509f49f3e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::a]:32345"},"services":[{"id":"ca576bda-cbdd-4bb9-9d75-ce06d569e926","details":{"type":"crucible","address":"[fd00:1122:3344:109::a]:32345"}}]},"root":"/pool/ext/7e67cb32-0c00-4090-9647-eb7bae75deeb/crypt/zone"},{"zone":{"id":"f010978d-346e-49cd-b265-7607a25685f9","zone_type":"crucible","addresses":["fd00:1122:3344:109::c"],"dataset":{"id":"f010978d-346e-49cd-b265-7607a25685f9","name":{"pool_name":"oxp_9bc1dab8-2d2a-4f92-bdfb-94ebca7881f1","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::c]:32345"},"services":[{"id":"f010978d-346e-49cd-b265-7607a25685f9","details":{"type":"crucible","address":"[fd00:1122:3344:109::c]:32345"}}]},"root":"/pool/ext/9bc1dab8-2d2a-4f92-bdfb-94ebca7881f1/crypt/zone"},{"zone":{"id":"daff4162-cc81-4586-a457-91d767b8f1d9","zone_type":"crucible","addresses":["fd00:1122:3344:109::6"],"dataset":{"id":"daff4162-cc81-4586-a457-91d767b8f1d9","name":{"pool_name":"oxp_b9b5b50c-e823-41ae-9585-01b818883521","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::6]:32345"},"services":[{"id":"daff4162-cc81-4586-a457-91d767b8f1d9","details":{"type":"crucible","address":"[fd00:1122:3344:109::6]:32345"}}]},"root":"/pool/ext/de682b18-afaf-4d53-b62e-934f6bd4a1f8/crypt/zone"},{"zone":{"id":"9f300d3d-e698-4cc8-be4c-1f81ac8c927f","zone_type":"crucible","addresses":["fd00:1122:3344:109::d"],"dataset":{"id":"9f300d3d-e698-4cc8-be4c-1f81ac8c927f","name":{"pool_name":"oxp_f1d82c22-ad7d-4cda-9ab0-8f5f496d90ce","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::d]:32345"},"services":[{"id":"9f300d3d-e698-4cc8-be4c-1f81ac8c927f","details":{"type":"crucible","address":"[fd00:1122:3344:109::d]:32345"}}]},"root":"/pool/ext/de682b18-afaf-4d53-b62e-934f6bd4a1f8/crypt/zone"},{"zone":{"id":"8db7c7be-da40-4a1c-9681-4d02606a7eb7","zone_type":"crucible","addresses":["fd00:1122:3344:109::9"],"dataset":{"id":"8db7c7be-da40-4a1c-9681-4d02606a7eb7","name":{"pool_name":"oxp_46d21f3d-23be-4361-b5c5-9d0f6ece5b8c","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::9]:32345"},"services":[{"id":"8db7c7be-da40-4a1c-9681-4d02606a7eb7","details":{"type":"crucible","address":"[fd00:1122:3344:109::9]:32345"}}]},"root":"/pool/ext/b7a3032f-7b8c-4a6a-9fa2-e5773bfdbc94/crypt/zone"},{"zone":{"id":"b990911b-805a-4f9d-bd83-e977f5b19a35","zone_type":"crucible","addresses":["fd00:1122:3344:109::4"],"dataset":{"id":"b990911b-805a-4f9d-bd83-e977f5b19a35","name":{"pool_name":"oxp_7e67cb32-0c00-4090-9647-eb7bae75deeb","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::4]:32345"},"services":[{"id":"b990911b-805a-4f9d-bd83-e977f5b19a35","details":{"type":"crucible","address":"[fd00:1122:3344:109::4]:32345"}}]},"root":"/pool/ext/de682b18-afaf-4d53-b62e-934f6bd4a1f8/crypt/zone"},{"zone":{"id":"c99392f5-8f30-41ac-9eeb-12d7f4b707f1","zone_type":"crucible","addresses":["fd00:1122:3344:109::b"],"dataset":{"id":"c99392f5-8f30-41ac-9eeb-12d7f4b707f1","name":{"pool_name":"oxp_de682b18-afaf-4d53-b62e-934f6bd4a1f8","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::b]:32345"},"services":[{"id":"c99392f5-8f30-41ac-9eeb-12d7f4b707f1","details":{"type":"crucible","address":"[fd00:1122:3344:109::b]:32345"}}]},"root":"/pool/ext/46d21f3d-23be-4361-b5c5-9d0f6ece5b8c/crypt/zone"},{"zone":{"id":"7f6cb339-9eb1-4866-8a4f-383bad25b36f","zone_type":"crucible","addresses":["fd00:1122:3344:109::5"],"dataset":{"id":"7f6cb339-9eb1-4866-8a4f-383bad25b36f","name":{"pool_name":"oxp_458cbfa3-3752-415d-8a3b-fb64e88468e1","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::5]:32345"},"services":[{"id":"7f6cb339-9eb1-4866-8a4f-383bad25b36f","details":{"type":"crucible","address":"[fd00:1122:3344:109::5]:32345"}}]},"root":"/pool/ext/b9b5b50c-e823-41ae-9585-01b818883521/crypt/zone"},{"zone":{"id":"11946372-f253-4648-b00c-c7874a7b2888","zone_type":"crucible","addresses":["fd00:1122:3344:109::8"],"dataset":{"id":"11946372-f253-4648-b00c-c7874a7b2888","name":{"pool_name":"oxp_d73332f5-b2a5-46c0-94cf-c5c5712abfe8","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::8]:32345"},"services":[{"id":"11946372-f253-4648-b00c-c7874a7b2888","details":{"type":"crucible","address":"[fd00:1122:3344:109::8]:32345"}}]},"root":"/pool/ext/b9b5b50c-e823-41ae-9585-01b818883521/crypt/zone"},{"zone":{"id":"58ece9e1-387f-4d2f-a42f-69cd34f9f380","zone_type":"cockroach_db","addresses":["fd00:1122:3344:109::3"],"dataset":{"id":"58ece9e1-387f-4d2f-a42f-69cd34f9f380","name":{"pool_name":"oxp_7e67cb32-0c00-4090-9647-eb7bae75deeb","kind":{"type":"cockroach_db"}},"service_address":"[fd00:1122:3344:109::3]:32221"},"services":[{"id":"58ece9e1-387f-4d2f-a42f-69cd34f9f380","details":{"type":"cockroach_db","address":"[fd00:1122:3344:109::3]:32221"}}]},"root":"/pool/ext/b9b5b50c-e823-41ae-9585-01b818883521/crypt/zone"},{"zone":{"id":"f016a25a-deb5-4f20-bdb0-2425c00d41a6","zone_type":"ntp","addresses":["fd00:1122:3344:109::e"],"dataset":null,"services":[{"id":"f016a25a-deb5-4f20-bdb0-2425c00d41a6","details":{"type":"internal_ntp","address":"[fd00:1122:3344:109::e]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/b9b5b50c-e823-41ae-9585-01b818883521/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled2.json b/sled-agent/tests/old-service-ledgers/rack3-sled2.json deleted file mode 100644 index 6c420c989d..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack3-sled2.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":4,"requests":[{"zone":{"id":"dd799dd4-03f9-451d-85e2-844155753a03","zone_type":"crucible","addresses":["fd00:1122:3344:10a::7"],"dataset":{"id":"dd799dd4-03f9-451d-85e2-844155753a03","name":{"pool_name":"oxp_7dcf3acc-bde9-4306-bb46-4c6a6cbbb7ba","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::7]:32345"},"services":[{"id":"dd799dd4-03f9-451d-85e2-844155753a03","details":{"type":"crucible","address":"[fd00:1122:3344:10a::7]:32345"}}]},"root":"/pool/ext/7dcf3acc-bde9-4306-bb46-4c6a6cbbb7ba/crypt/zone"},{"zone":{"id":"dbf9346d-b46d-4402-bb44-92ce20fb5290","zone_type":"crucible","addresses":["fd00:1122:3344:10a::9"],"dataset":{"id":"dbf9346d-b46d-4402-bb44-92ce20fb5290","name":{"pool_name":"oxp_9275d50f-da2c-4f84-9775-598a364309ad","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::9]:32345"},"services":[{"id":"dbf9346d-b46d-4402-bb44-92ce20fb5290","details":{"type":"crucible","address":"[fd00:1122:3344:10a::9]:32345"}}]},"root":"/pool/ext/d83e36ef-dd7a-4cc2-be19-379b1114c031/crypt/zone"},{"zone":{"id":"9a55ebdd-eeef-4954-b0a1-e32b04837f14","zone_type":"crucible","addresses":["fd00:1122:3344:10a::4"],"dataset":{"id":"9a55ebdd-eeef-4954-b0a1-e32b04837f14","name":{"pool_name":"oxp_7f30f77e-5998-4676-a226-b433b5940e77","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::4]:32345"},"services":[{"id":"9a55ebdd-eeef-4954-b0a1-e32b04837f14","details":{"type":"crucible","address":"[fd00:1122:3344:10a::4]:32345"}}]},"root":"/pool/ext/9275d50f-da2c-4f84-9775-598a364309ad/crypt/zone"},{"zone":{"id":"bc2935f8-e4fa-4015-968e-f90985533a6a","zone_type":"crucible","addresses":["fd00:1122:3344:10a::6"],"dataset":{"id":"bc2935f8-e4fa-4015-968e-f90985533a6a","name":{"pool_name":"oxp_022c9d58-e91f-480d-bda6-0cf32ce3b1f5","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::6]:32345"},"services":[{"id":"bc2935f8-e4fa-4015-968e-f90985533a6a","details":{"type":"crucible","address":"[fd00:1122:3344:10a::6]:32345"}}]},"root":"/pool/ext/c395dcc3-6ece-4b3f-b143-e111a54ef7da/crypt/zone"},{"zone":{"id":"63f8c861-fa1d-4121-92d9-7efa5ef7f5a0","zone_type":"crucible","addresses":["fd00:1122:3344:10a::a"],"dataset":{"id":"63f8c861-fa1d-4121-92d9-7efa5ef7f5a0","name":{"pool_name":"oxp_3c805784-f403-4d01-9eb0-4f77d0821980","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::a]:32345"},"services":[{"id":"63f8c861-fa1d-4121-92d9-7efa5ef7f5a0","details":{"type":"crucible","address":"[fd00:1122:3344:10a::a]:32345"}}]},"root":"/pool/ext/9275d50f-da2c-4f84-9775-598a364309ad/crypt/zone"},{"zone":{"id":"4996dcf9-78de-4f69-94fa-c09cc86a8d3c","zone_type":"crucible","addresses":["fd00:1122:3344:10a::b"],"dataset":{"id":"4996dcf9-78de-4f69-94fa-c09cc86a8d3c","name":{"pool_name":"oxp_f9fe9ce6-be0d-4974-bc30-78a8f1330496","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::b]:32345"},"services":[{"id":"4996dcf9-78de-4f69-94fa-c09cc86a8d3c","details":{"type":"crucible","address":"[fd00:1122:3344:10a::b]:32345"}}]},"root":"/pool/ext/9275d50f-da2c-4f84-9775-598a364309ad/crypt/zone"},{"zone":{"id":"36b9a4bf-7b30-4fe7-903d-3b722c79fa86","zone_type":"crucible","addresses":["fd00:1122:3344:10a::c"],"dataset":{"id":"36b9a4bf-7b30-4fe7-903d-3b722c79fa86","name":{"pool_name":"oxp_cb1052e0-4c70-4d37-b979-dd55e6a25f08","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::c]:32345"},"services":[{"id":"36b9a4bf-7b30-4fe7-903d-3b722c79fa86","details":{"type":"crucible","address":"[fd00:1122:3344:10a::c]:32345"}}]},"root":"/pool/ext/3c805784-f403-4d01-9eb0-4f77d0821980/crypt/zone"},{"zone":{"id":"a109a902-6a27-41b6-a881-c353e28e5389","zone_type":"crucible","addresses":["fd00:1122:3344:10a::8"],"dataset":{"id":"a109a902-6a27-41b6-a881-c353e28e5389","name":{"pool_name":"oxp_d83e36ef-dd7a-4cc2-be19-379b1114c031","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::8]:32345"},"services":[{"id":"a109a902-6a27-41b6-a881-c353e28e5389","details":{"type":"crucible","address":"[fd00:1122:3344:10a::8]:32345"}}]},"root":"/pool/ext/d83e36ef-dd7a-4cc2-be19-379b1114c031/crypt/zone"},{"zone":{"id":"d2a9a0bc-ea12-44e3-ac4a-904c76120d11","zone_type":"crucible","addresses":["fd00:1122:3344:10a::3"],"dataset":{"id":"d2a9a0bc-ea12-44e3-ac4a-904c76120d11","name":{"pool_name":"oxp_c395dcc3-6ece-4b3f-b143-e111a54ef7da","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::3]:32345"},"services":[{"id":"d2a9a0bc-ea12-44e3-ac4a-904c76120d11","details":{"type":"crucible","address":"[fd00:1122:3344:10a::3]:32345"}}]},"root":"/pool/ext/9898a289-2f0d-43a6-b053-850f6e784e9a/crypt/zone"},{"zone":{"id":"b3c3e53b-d9ec-4dd8-bd2c-bd811319aa44","zone_type":"crucible","addresses":["fd00:1122:3344:10a::5"],"dataset":{"id":"b3c3e53b-d9ec-4dd8-bd2c-bd811319aa44","name":{"pool_name":"oxp_9898a289-2f0d-43a6-b053-850f6e784e9a","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::5]:32345"},"services":[{"id":"b3c3e53b-d9ec-4dd8-bd2c-bd811319aa44","details":{"type":"crucible","address":"[fd00:1122:3344:10a::5]:32345"}}]},"root":"/pool/ext/9275d50f-da2c-4f84-9775-598a364309ad/crypt/zone"},{"zone":{"id":"7b445d3b-fd25-4538-ac3f-f439c66d1223","zone_type":"ntp","addresses":["fd00:1122:3344:10a::d"],"dataset":null,"services":[{"id":"7b445d3b-fd25-4538-ac3f-f439c66d1223","details":{"type":"internal_ntp","address":"[fd00:1122:3344:10a::d]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/f9fe9ce6-be0d-4974-bc30-78a8f1330496/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled20.json b/sled-agent/tests/old-service-ledgers/rack3-sled20.json deleted file mode 100644 index 20c9d60624..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack3-sled20.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":4,"requests":[{"zone":{"id":"4b49e669-264d-4bfb-8ab1-555b520b679c","zone_type":"crucible","addresses":["fd00:1122:3344:108::c"],"dataset":{"id":"4b49e669-264d-4bfb-8ab1-555b520b679c","name":{"pool_name":"oxp_799a1c86-9e1a-4626-91e2-a19f7ff5356e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::c]:32345"},"services":[{"id":"4b49e669-264d-4bfb-8ab1-555b520b679c","details":{"type":"crucible","address":"[fd00:1122:3344:108::c]:32345"}}]},"root":"/pool/ext/d2478613-b7c9-4bd3-856f-1fe8e9c903c2/crypt/zone"},{"zone":{"id":"d802baae-9c3f-437a-85fe-cd72653b6db1","zone_type":"crucible","addresses":["fd00:1122:3344:108::5"],"dataset":{"id":"d802baae-9c3f-437a-85fe-cd72653b6db1","name":{"pool_name":"oxp_d2478613-b7c9-4bd3-856f-1fe8e9c903c2","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::5]:32345"},"services":[{"id":"d802baae-9c3f-437a-85fe-cd72653b6db1","details":{"type":"crucible","address":"[fd00:1122:3344:108::5]:32345"}}]},"root":"/pool/ext/116f216c-e151-410f-82bf-8913904cf7b4/crypt/zone"},{"zone":{"id":"e5f69e60-3421-49a4-8c1d-2db8cbb6a5e9","zone_type":"crucible","addresses":["fd00:1122:3344:108::b"],"dataset":{"id":"e5f69e60-3421-49a4-8c1d-2db8cbb6a5e9","name":{"pool_name":"oxp_116f216c-e151-410f-82bf-8913904cf7b4","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::b]:32345"},"services":[{"id":"e5f69e60-3421-49a4-8c1d-2db8cbb6a5e9","details":{"type":"crucible","address":"[fd00:1122:3344:108::b]:32345"}}]},"root":"/pool/ext/eea15142-4635-4e40-b0b4-b0c4f13eca3c/crypt/zone"},{"zone":{"id":"3e598962-ef8c-4cb6-bdfe-ec8563939d6a","zone_type":"crucible","addresses":["fd00:1122:3344:108::4"],"dataset":{"id":"3e598962-ef8c-4cb6-bdfe-ec8563939d6a","name":{"pool_name":"oxp_ababce44-01d1-4c50-b389-f60464c5dde9","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::4]:32345"},"services":[{"id":"3e598962-ef8c-4cb6-bdfe-ec8563939d6a","details":{"type":"crucible","address":"[fd00:1122:3344:108::4]:32345"}}]},"root":"/pool/ext/ababce44-01d1-4c50-b389-f60464c5dde9/crypt/zone"},{"zone":{"id":"25355c9f-cc2b-4b24-8eaa-65190f8936a8","zone_type":"crucible","addresses":["fd00:1122:3344:108::d"],"dataset":{"id":"25355c9f-cc2b-4b24-8eaa-65190f8936a8","name":{"pool_name":"oxp_fed46d41-136d-4462-8782-359014efba59","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::d]:32345"},"services":[{"id":"25355c9f-cc2b-4b24-8eaa-65190f8936a8","details":{"type":"crucible","address":"[fd00:1122:3344:108::d]:32345"}}]},"root":"/pool/ext/eea15142-4635-4e40-b0b4-b0c4f13eca3c/crypt/zone"},{"zone":{"id":"efb2f16c-ebad-4192-b575-dcb4d9b1d5cd","zone_type":"crucible","addresses":["fd00:1122:3344:108::a"],"dataset":{"id":"efb2f16c-ebad-4192-b575-dcb4d9b1d5cd","name":{"pool_name":"oxp_bf509067-0165-456d-98ae-72c86378e626","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::a]:32345"},"services":[{"id":"efb2f16c-ebad-4192-b575-dcb4d9b1d5cd","details":{"type":"crucible","address":"[fd00:1122:3344:108::a]:32345"}}]},"root":"/pool/ext/95220093-e3b8-4f7f-9f5a-cb32cb75180a/crypt/zone"},{"zone":{"id":"89191f0d-4e0b-47fa-9a9e-fbe2a6db1385","zone_type":"crucible","addresses":["fd00:1122:3344:108::8"],"dataset":{"id":"89191f0d-4e0b-47fa-9a9e-fbe2a6db1385","name":{"pool_name":"oxp_eea15142-4635-4e40-b0b4-b0c4f13eca3c","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::8]:32345"},"services":[{"id":"89191f0d-4e0b-47fa-9a9e-fbe2a6db1385","details":{"type":"crucible","address":"[fd00:1122:3344:108::8]:32345"}}]},"root":"/pool/ext/eea15142-4635-4e40-b0b4-b0c4f13eca3c/crypt/zone"},{"zone":{"id":"e4589324-c528-49c7-9141-35e0a7af6947","zone_type":"crucible","addresses":["fd00:1122:3344:108::6"],"dataset":{"id":"e4589324-c528-49c7-9141-35e0a7af6947","name":{"pool_name":"oxp_95220093-e3b8-4f7f-9f5a-cb32cb75180a","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::6]:32345"},"services":[{"id":"e4589324-c528-49c7-9141-35e0a7af6947","details":{"type":"crucible","address":"[fd00:1122:3344:108::6]:32345"}}]},"root":"/pool/ext/ababce44-01d1-4c50-b389-f60464c5dde9/crypt/zone"},{"zone":{"id":"95ebe94d-0e68-421d-9260-c30bd7fe4bd6","zone_type":"nexus","addresses":["fd00:1122:3344:108::3"],"dataset":null,"services":[{"id":"95ebe94d-0e68-421d-9260-c30bd7fe4bd6","details":{"type":"nexus","internal_address":"[fd00:1122:3344:108::3]:12221","external_ip":"45.154.216.35","nic":{"id":"301aa595-f072-4da3-a533-99647b44a66a","kind":{"type":"service","id":"95ebe94d-0e68-421d-9260-c30bd7fe4bd6"},"name":"nexus-95ebe94d-0e68-421d-9260-c30bd7fe4bd6","ip":"172.30.2.5","mac":"A8:40:25:FF:F1:30","subnet":"172.30.2.0/24","vni":100,"primary":true,"slot":0},"external_tls":true,"external_dns_servers":["1.1.1.1","8.8.8.8"]}}]},"root":"/pool/ext/eea15142-4635-4e40-b0b4-b0c4f13eca3c/crypt/zone"},{"zone":{"id":"4b7a7052-f8e8-4196-8d6b-315943986ce6","zone_type":"crucible","addresses":["fd00:1122:3344:108::7"],"dataset":{"id":"4b7a7052-f8e8-4196-8d6b-315943986ce6","name":{"pool_name":"oxp_a549421c-2f12-45cc-b691-202f0a9bfa8b","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::7]:32345"},"services":[{"id":"4b7a7052-f8e8-4196-8d6b-315943986ce6","details":{"type":"crucible","address":"[fd00:1122:3344:108::7]:32345"}}]},"root":"/pool/ext/bf509067-0165-456d-98ae-72c86378e626/crypt/zone"},{"zone":{"id":"71b8ff53-c781-47bb-8ddc-2c7129680542","zone_type":"crucible","addresses":["fd00:1122:3344:108::9"],"dataset":{"id":"71b8ff53-c781-47bb-8ddc-2c7129680542","name":{"pool_name":"oxp_9d19f891-a3d9-4c6e-b1e1-6b0b085a9440","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::9]:32345"},"services":[{"id":"71b8ff53-c781-47bb-8ddc-2c7129680542","details":{"type":"crucible","address":"[fd00:1122:3344:108::9]:32345"}}]},"root":"/pool/ext/fed46d41-136d-4462-8782-359014efba59/crypt/zone"},{"zone":{"id":"eaf7bf77-f4c2-4016-9909-4b88a27e9d9a","zone_type":"ntp","addresses":["fd00:1122:3344:108::e"],"dataset":null,"services":[{"id":"eaf7bf77-f4c2-4016-9909-4b88a27e9d9a","details":{"type":"internal_ntp","address":"[fd00:1122:3344:108::e]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/ababce44-01d1-4c50-b389-f60464c5dde9/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled21.json b/sled-agent/tests/old-service-ledgers/rack3-sled21.json deleted file mode 100644 index 4f69e01c7f..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack3-sled21.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":4,"requests":[{"zone":{"id":"a91e4af3-5d18-4b08-8cb6-0583db8f8842","zone_type":"crucible","addresses":["fd00:1122:3344:117::a"],"dataset":{"id":"a91e4af3-5d18-4b08-8cb6-0583db8f8842","name":{"pool_name":"oxp_4b2896b8-5f0e-42fb-a474-658b28421e65","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:117::a]:32345"},"services":[{"id":"a91e4af3-5d18-4b08-8cb6-0583db8f8842","details":{"type":"crucible","address":"[fd00:1122:3344:117::a]:32345"}}]},"root":"/pool/ext/23393ed9-acee-4686-861f-7fc825af1249/crypt/zone"},{"zone":{"id":"1ce74512-ce3a-4125-95f1-12c86e0275d5","zone_type":"crucible","addresses":["fd00:1122:3344:117::8"],"dataset":{"id":"1ce74512-ce3a-4125-95f1-12c86e0275d5","name":{"pool_name":"oxp_46ece76f-ef00-4dd0-9f73-326c63959470","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:117::8]:32345"},"services":[{"id":"1ce74512-ce3a-4125-95f1-12c86e0275d5","details":{"type":"crucible","address":"[fd00:1122:3344:117::8]:32345"}}]},"root":"/pool/ext/1bd5955e-14a9-463f-adeb-f12bcb45a6c1/crypt/zone"},{"zone":{"id":"fef5d35f-9622-4dee-8635-d26e9f7f6869","zone_type":"crucible","addresses":["fd00:1122:3344:117::4"],"dataset":{"id":"fef5d35f-9622-4dee-8635-d26e9f7f6869","name":{"pool_name":"oxp_e4d7c2e8-016b-4617-afb5-38a2d9c1b508","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:117::4]:32345"},"services":[{"id":"fef5d35f-9622-4dee-8635-d26e9f7f6869","details":{"type":"crucible","address":"[fd00:1122:3344:117::4]:32345"}}]},"root":"/pool/ext/e372bba3-ef60-466f-b819-a3d5b9acbe77/crypt/zone"},{"zone":{"id":"4f024a31-cd38-4219-8381-9f1af70d1d54","zone_type":"crucible","addresses":["fd00:1122:3344:117::c"],"dataset":{"id":"4f024a31-cd38-4219-8381-9f1af70d1d54","name":{"pool_name":"oxp_7cb2a3c2-9d33-4c6a-af57-669f251cf4cf","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:117::c]:32345"},"services":[{"id":"4f024a31-cd38-4219-8381-9f1af70d1d54","details":{"type":"crucible","address":"[fd00:1122:3344:117::c]:32345"}}]},"root":"/pool/ext/cfbd185d-e185-4aaa-a598-9216124ceec4/crypt/zone"},{"zone":{"id":"d00e1d0b-e12f-420a-a4df-21e4cac176f6","zone_type":"crucible","addresses":["fd00:1122:3344:117::b"],"dataset":{"id":"d00e1d0b-e12f-420a-a4df-21e4cac176f6","name":{"pool_name":"oxp_e372bba3-ef60-466f-b819-a3d5b9acbe77","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:117::b]:32345"},"services":[{"id":"d00e1d0b-e12f-420a-a4df-21e4cac176f6","details":{"type":"crucible","address":"[fd00:1122:3344:117::b]:32345"}}]},"root":"/pool/ext/cfbd185d-e185-4aaa-a598-9216124ceec4/crypt/zone"},{"zone":{"id":"1598058a-6064-449e-b39c-1e3d345ed793","zone_type":"crucible","addresses":["fd00:1122:3344:117::5"],"dataset":{"id":"1598058a-6064-449e-b39c-1e3d345ed793","name":{"pool_name":"oxp_022a8d67-1e00-49f3-81ed-a0a1bc187cfa","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:117::5]:32345"},"services":[{"id":"1598058a-6064-449e-b39c-1e3d345ed793","details":{"type":"crucible","address":"[fd00:1122:3344:117::5]:32345"}}]},"root":"/pool/ext/022a8d67-1e00-49f3-81ed-a0a1bc187cfa/crypt/zone"},{"zone":{"id":"c723c4b8-3031-4b25-8c16-fe08bc0b5f00","zone_type":"crucible","addresses":["fd00:1122:3344:117::7"],"dataset":{"id":"c723c4b8-3031-4b25-8c16-fe08bc0b5f00","name":{"pool_name":"oxp_23393ed9-acee-4686-861f-7fc825af1249","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:117::7]:32345"},"services":[{"id":"c723c4b8-3031-4b25-8c16-fe08bc0b5f00","details":{"type":"crucible","address":"[fd00:1122:3344:117::7]:32345"}}]},"root":"/pool/ext/1bd5955e-14a9-463f-adeb-f12bcb45a6c1/crypt/zone"},{"zone":{"id":"7751b307-888f-46c8-8787-75d2f3fdaef3","zone_type":"crucible","addresses":["fd00:1122:3344:117::9"],"dataset":{"id":"7751b307-888f-46c8-8787-75d2f3fdaef3","name":{"pool_name":"oxp_e54e53d4-f68f-4b19-b8c1-9d5ab42e51c1","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:117::9]:32345"},"services":[{"id":"7751b307-888f-46c8-8787-75d2f3fdaef3","details":{"type":"crucible","address":"[fd00:1122:3344:117::9]:32345"}}]},"root":"/pool/ext/e372bba3-ef60-466f-b819-a3d5b9acbe77/crypt/zone"},{"zone":{"id":"89413ff1-d5de-4931-8389-e84e7ea321af","zone_type":"crucible","addresses":["fd00:1122:3344:117::6"],"dataset":{"id":"89413ff1-d5de-4931-8389-e84e7ea321af","name":{"pool_name":"oxp_1bd5955e-14a9-463f-adeb-f12bcb45a6c1","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:117::6]:32345"},"services":[{"id":"89413ff1-d5de-4931-8389-e84e7ea321af","details":{"type":"crucible","address":"[fd00:1122:3344:117::6]:32345"}}]},"root":"/pool/ext/1bd5955e-14a9-463f-adeb-f12bcb45a6c1/crypt/zone"},{"zone":{"id":"287b0b24-72aa-41b5-a597-8523d84225ef","zone_type":"crucible","addresses":["fd00:1122:3344:117::3"],"dataset":{"id":"287b0b24-72aa-41b5-a597-8523d84225ef","name":{"pool_name":"oxp_cfbd185d-e185-4aaa-a598-9216124ceec4","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:117::3]:32345"},"services":[{"id":"287b0b24-72aa-41b5-a597-8523d84225ef","details":{"type":"crucible","address":"[fd00:1122:3344:117::3]:32345"}}]},"root":"/pool/ext/cfbd185d-e185-4aaa-a598-9216124ceec4/crypt/zone"},{"zone":{"id":"4728253e-c534-4a5b-b707-c64ac9a8eb8c","zone_type":"ntp","addresses":["fd00:1122:3344:117::d"],"dataset":null,"services":[{"id":"4728253e-c534-4a5b-b707-c64ac9a8eb8c","details":{"type":"internal_ntp","address":"[fd00:1122:3344:117::d]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/cfbd185d-e185-4aaa-a598-9216124ceec4/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled22.json b/sled-agent/tests/old-service-ledgers/rack3-sled22.json deleted file mode 100644 index dc98c0390c..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack3-sled22.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":4,"requests":[{"zone":{"id":"49f20cd1-a8a3-4fa8-9209-59da60cd8f9b","zone_type":"crucible","addresses":["fd00:1122:3344:103::5"],"dataset":{"id":"49f20cd1-a8a3-4fa8-9209-59da60cd8f9b","name":{"pool_name":"oxp_13a9ef4a-f33a-4781-8f83-712c07a79b1f","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::5]:32345"},"services":[{"id":"49f20cd1-a8a3-4fa8-9209-59da60cd8f9b","details":{"type":"crucible","address":"[fd00:1122:3344:103::5]:32345"}}]},"root":"/pool/ext/711eff4e-736c-478e-83aa-ae86f5efbf1d/crypt/zone"},{"zone":{"id":"896fd564-f94e-496b-9fcf-ddfbfcfac9f7","zone_type":"crucible","addresses":["fd00:1122:3344:103::c"],"dataset":{"id":"896fd564-f94e-496b-9fcf-ddfbfcfac9f7","name":{"pool_name":"oxp_0944c0a2-0fb7-4f51-bced-52cc257cd2f6","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::c]:32345"},"services":[{"id":"896fd564-f94e-496b-9fcf-ddfbfcfac9f7","details":{"type":"crucible","address":"[fd00:1122:3344:103::c]:32345"}}]},"root":"/pool/ext/bc54d8c5-955d-429d-84e0-a20a4e5e27a3/crypt/zone"},{"zone":{"id":"911fb8b3-05c2-4af7-8974-6c74a61d94ad","zone_type":"crucible","addresses":["fd00:1122:3344:103::9"],"dataset":{"id":"911fb8b3-05c2-4af7-8974-6c74a61d94ad","name":{"pool_name":"oxp_29f59fce-a867-4571-9d2e-b03fa5c13510","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::9]:32345"},"services":[{"id":"911fb8b3-05c2-4af7-8974-6c74a61d94ad","details":{"type":"crucible","address":"[fd00:1122:3344:103::9]:32345"}}]},"root":"/pool/ext/711eff4e-736c-478e-83aa-ae86f5efbf1d/crypt/zone"},{"zone":{"id":"682b34db-0b06-4770-a8fe-74437cf184d6","zone_type":"crucible","addresses":["fd00:1122:3344:103::6"],"dataset":{"id":"682b34db-0b06-4770-a8fe-74437cf184d6","name":{"pool_name":"oxp_094d11d2-8049-4138-bcf4-562f5f8e77c0","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::6]:32345"},"services":[{"id":"682b34db-0b06-4770-a8fe-74437cf184d6","details":{"type":"crucible","address":"[fd00:1122:3344:103::6]:32345"}}]},"root":"/pool/ext/0944c0a2-0fb7-4f51-bced-52cc257cd2f6/crypt/zone"},{"zone":{"id":"d8d20365-ecd3-4fd5-9495-c0670e3bd5d9","zone_type":"crucible","addresses":["fd00:1122:3344:103::a"],"dataset":{"id":"d8d20365-ecd3-4fd5-9495-c0670e3bd5d9","name":{"pool_name":"oxp_fb97ff7b-0225-400c-a137-3b38a786c0a0","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::a]:32345"},"services":[{"id":"d8d20365-ecd3-4fd5-9495-c0670e3bd5d9","details":{"type":"crucible","address":"[fd00:1122:3344:103::a]:32345"}}]},"root":"/pool/ext/094d11d2-8049-4138-bcf4-562f5f8e77c0/crypt/zone"},{"zone":{"id":"673620b6-44d9-4310-8e17-3024ac84e708","zone_type":"crucible","addresses":["fd00:1122:3344:103::7"],"dataset":{"id":"673620b6-44d9-4310-8e17-3024ac84e708","name":{"pool_name":"oxp_711eff4e-736c-478e-83aa-ae86f5efbf1d","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::7]:32345"},"services":[{"id":"673620b6-44d9-4310-8e17-3024ac84e708","details":{"type":"crucible","address":"[fd00:1122:3344:103::7]:32345"}}]},"root":"/pool/ext/fb97ff7b-0225-400c-a137-3b38a786c0a0/crypt/zone"},{"zone":{"id":"bf6dfc04-4d4c-41b6-a011-40ffc3bc5080","zone_type":"crucible","addresses":["fd00:1122:3344:103::8"],"dataset":{"id":"bf6dfc04-4d4c-41b6-a011-40ffc3bc5080","name":{"pool_name":"oxp_f815f1b6-48ef-436d-8768-eb08227e2386","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::8]:32345"},"services":[{"id":"bf6dfc04-4d4c-41b6-a011-40ffc3bc5080","details":{"type":"crucible","address":"[fd00:1122:3344:103::8]:32345"}}]},"root":"/pool/ext/13a9ef4a-f33a-4781-8f83-712c07a79b1f/crypt/zone"},{"zone":{"id":"ac8a82a8-fb6f-4635-a9a9-d98617eab390","zone_type":"crucible","addresses":["fd00:1122:3344:103::3"],"dataset":{"id":"ac8a82a8-fb6f-4635-a9a9-d98617eab390","name":{"pool_name":"oxp_97d6c860-4e2f-496e-974b-2e293fee6af9","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::3]:32345"},"services":[{"id":"ac8a82a8-fb6f-4635-a9a9-d98617eab390","details":{"type":"crucible","address":"[fd00:1122:3344:103::3]:32345"}}]},"root":"/pool/ext/0944c0a2-0fb7-4f51-bced-52cc257cd2f6/crypt/zone"},{"zone":{"id":"4ed66558-4815-4b85-9b94-9edf3ee69ead","zone_type":"crucible","addresses":["fd00:1122:3344:103::4"],"dataset":{"id":"4ed66558-4815-4b85-9b94-9edf3ee69ead","name":{"pool_name":"oxp_bc54d8c5-955d-429d-84e0-a20a4e5e27a3","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::4]:32345"},"services":[{"id":"4ed66558-4815-4b85-9b94-9edf3ee69ead","details":{"type":"crucible","address":"[fd00:1122:3344:103::4]:32345"}}]},"root":"/pool/ext/13a9ef4a-f33a-4781-8f83-712c07a79b1f/crypt/zone"},{"zone":{"id":"8a71c6ee-b08d-4c3d-b13c-c9cebc4c328a","zone_type":"crucible","addresses":["fd00:1122:3344:103::b"],"dataset":{"id":"8a71c6ee-b08d-4c3d-b13c-c9cebc4c328a","name":{"pool_name":"oxp_2bdfa429-09bd-4fa1-aa20-eea99f0d2b85","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::b]:32345"},"services":[{"id":"8a71c6ee-b08d-4c3d-b13c-c9cebc4c328a","details":{"type":"crucible","address":"[fd00:1122:3344:103::b]:32345"}}]},"root":"/pool/ext/29f59fce-a867-4571-9d2e-b03fa5c13510/crypt/zone"},{"zone":{"id":"7e6b8962-7a1e-4d7b-b7ea-49e64a51d98d","zone_type":"ntp","addresses":["fd00:1122:3344:103::d"],"dataset":null,"services":[{"id":"7e6b8962-7a1e-4d7b-b7ea-49e64a51d98d","details":{"type":"internal_ntp","address":"[fd00:1122:3344:103::d]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/2bdfa429-09bd-4fa1-aa20-eea99f0d2b85/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled23.json b/sled-agent/tests/old-service-ledgers/rack3-sled23.json deleted file mode 100644 index ade2144287..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack3-sled23.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":4,"requests":[{"zone":{"id":"6b7e931d-4b91-4dc6-9a7b-4c19ac669e5d","zone_type":"crucible","addresses":["fd00:1122:3344:105::4"],"dataset":{"id":"6b7e931d-4b91-4dc6-9a7b-4c19ac669e5d","name":{"pool_name":"oxp_24dab7f5-164a-47f3-a878-f32ab1e68cce","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::4]:32345"},"services":[{"id":"6b7e931d-4b91-4dc6-9a7b-4c19ac669e5d","details":{"type":"crucible","address":"[fd00:1122:3344:105::4]:32345"}}]},"root":"/pool/ext/ad493851-2d11-4c2d-8d75-989579d9616a/crypt/zone"},{"zone":{"id":"6c58e7aa-71e1-4868-9d4b-e12c7ef40303","zone_type":"crucible","addresses":["fd00:1122:3344:105::a"],"dataset":{"id":"6c58e7aa-71e1-4868-9d4b-e12c7ef40303","name":{"pool_name":"oxp_d664c9e8-bc81-4225-a618-a8ae2d057186","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::a]:32345"},"services":[{"id":"6c58e7aa-71e1-4868-9d4b-e12c7ef40303","details":{"type":"crucible","address":"[fd00:1122:3344:105::a]:32345"}}]},"root":"/pool/ext/ad493851-2d11-4c2d-8d75-989579d9616a/crypt/zone"},{"zone":{"id":"51c6dc8d-b1a4-454a-9b19-01e45eb0b599","zone_type":"crucible","addresses":["fd00:1122:3344:105::d"],"dataset":{"id":"51c6dc8d-b1a4-454a-9b19-01e45eb0b599","name":{"pool_name":"oxp_f5f85537-eb25-4d0e-8e94-b775c41abd73","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::d]:32345"},"services":[{"id":"51c6dc8d-b1a4-454a-9b19-01e45eb0b599","details":{"type":"crucible","address":"[fd00:1122:3344:105::d]:32345"}}]},"root":"/pool/ext/4f1eafe9-b28d-49d3-83e2-ceac8721d6b5/crypt/zone"},{"zone":{"id":"8cbffa61-0bd0-4ad2-bd7d-30fe0dd57469","zone_type":"crucible","addresses":["fd00:1122:3344:105::9"],"dataset":{"id":"8cbffa61-0bd0-4ad2-bd7d-30fe0dd57469","name":{"pool_name":"oxp_88abca38-3f61-4d4b-80a1-4ea3e4827f84","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::9]:32345"},"services":[{"id":"8cbffa61-0bd0-4ad2-bd7d-30fe0dd57469","details":{"type":"crucible","address":"[fd00:1122:3344:105::9]:32345"}}]},"root":"/pool/ext/88abca38-3f61-4d4b-80a1-4ea3e4827f84/crypt/zone"},{"zone":{"id":"2177f37f-2ac9-4e66-bf74-a10bd91f4d33","zone_type":"crucible","addresses":["fd00:1122:3344:105::6"],"dataset":{"id":"2177f37f-2ac9-4e66-bf74-a10bd91f4d33","name":{"pool_name":"oxp_59e20871-4670-40d6-8ff4-aa97899fc991","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::6]:32345"},"services":[{"id":"2177f37f-2ac9-4e66-bf74-a10bd91f4d33","details":{"type":"crucible","address":"[fd00:1122:3344:105::6]:32345"}}]},"root":"/pool/ext/4f1eafe9-b28d-49d3-83e2-ceac8721d6b5/crypt/zone"},{"zone":{"id":"e4e43855-4879-4910-a2ba-40f625c1cc2d","zone_type":"crucible","addresses":["fd00:1122:3344:105::b"],"dataset":{"id":"e4e43855-4879-4910-a2ba-40f625c1cc2d","name":{"pool_name":"oxp_967d2f05-b141-44f5-837d-9b2aa67ee128","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::b]:32345"},"services":[{"id":"e4e43855-4879-4910-a2ba-40f625c1cc2d","details":{"type":"crucible","address":"[fd00:1122:3344:105::b]:32345"}}]},"root":"/pool/ext/6b6f34cd-6d3d-4832-a4e6-3df112c97133/crypt/zone"},{"zone":{"id":"8d2517e1-f9ad-40f2-abb9-2f5122839910","zone_type":"crucible","addresses":["fd00:1122:3344:105::7"],"dataset":{"id":"8d2517e1-f9ad-40f2-abb9-2f5122839910","name":{"pool_name":"oxp_ad493851-2d11-4c2d-8d75-989579d9616a","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::7]:32345"},"services":[{"id":"8d2517e1-f9ad-40f2-abb9-2f5122839910","details":{"type":"crucible","address":"[fd00:1122:3344:105::7]:32345"}}]},"root":"/pool/ext/88abca38-3f61-4d4b-80a1-4ea3e4827f84/crypt/zone"},{"zone":{"id":"44cb3698-a7b1-4388-9165-ac76082ec8bc","zone_type":"crucible","addresses":["fd00:1122:3344:105::5"],"dataset":{"id":"44cb3698-a7b1-4388-9165-ac76082ec8bc","name":{"pool_name":"oxp_4292a83c-8c1f-4b2e-9120-72e0c510bf3c","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::5]:32345"},"services":[{"id":"44cb3698-a7b1-4388-9165-ac76082ec8bc","details":{"type":"crucible","address":"[fd00:1122:3344:105::5]:32345"}}]},"root":"/pool/ext/24dab7f5-164a-47f3-a878-f32ab1e68cce/crypt/zone"},{"zone":{"id":"931b5c86-9d72-4518-bfd6-97863152ac65","zone_type":"crucible","addresses":["fd00:1122:3344:105::c"],"dataset":{"id":"931b5c86-9d72-4518-bfd6-97863152ac65","name":{"pool_name":"oxp_6b6f34cd-6d3d-4832-a4e6-3df112c97133","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::c]:32345"},"services":[{"id":"931b5c86-9d72-4518-bfd6-97863152ac65","details":{"type":"crucible","address":"[fd00:1122:3344:105::c]:32345"}}]},"root":"/pool/ext/ad493851-2d11-4c2d-8d75-989579d9616a/crypt/zone"},{"zone":{"id":"ac568073-1889-463e-8cc4-cfed16ce2a34","zone_type":"crucible","addresses":["fd00:1122:3344:105::8"],"dataset":{"id":"ac568073-1889-463e-8cc4-cfed16ce2a34","name":{"pool_name":"oxp_4f1eafe9-b28d-49d3-83e2-ceac8721d6b5","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::8]:32345"},"services":[{"id":"ac568073-1889-463e-8cc4-cfed16ce2a34","details":{"type":"crucible","address":"[fd00:1122:3344:105::8]:32345"}}]},"root":"/pool/ext/4292a83c-8c1f-4b2e-9120-72e0c510bf3c/crypt/zone"},{"zone":{"id":"e8f86fbb-864e-4d5a-961c-b50b54ae853e","zone_type":"cockroach_db","addresses":["fd00:1122:3344:105::3"],"dataset":{"id":"e8f86fbb-864e-4d5a-961c-b50b54ae853e","name":{"pool_name":"oxp_24dab7f5-164a-47f3-a878-f32ab1e68cce","kind":{"type":"cockroach_db"}},"service_address":"[fd00:1122:3344:105::3]:32221"},"services":[{"id":"e8f86fbb-864e-4d5a-961c-b50b54ae853e","details":{"type":"cockroach_db","address":"[fd00:1122:3344:105::3]:32221"}}]},"root":"/pool/ext/4f1eafe9-b28d-49d3-83e2-ceac8721d6b5/crypt/zone"},{"zone":{"id":"c79caea0-37b1-49d6-ae6e-8cf849d91374","zone_type":"ntp","addresses":["fd00:1122:3344:105::e"],"dataset":null,"services":[{"id":"c79caea0-37b1-49d6-ae6e-8cf849d91374","details":{"type":"internal_ntp","address":"[fd00:1122:3344:105::e]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/24dab7f5-164a-47f3-a878-f32ab1e68cce/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled24.json b/sled-agent/tests/old-service-ledgers/rack3-sled24.json deleted file mode 100644 index e7bd3050d6..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack3-sled24.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":4,"requests":[{"zone":{"id":"d2b1e468-bc3c-4d08-b855-ae3327465375","zone_type":"crucible","addresses":["fd00:1122:3344:106::3"],"dataset":{"id":"d2b1e468-bc3c-4d08-b855-ae3327465375","name":{"pool_name":"oxp_9db196bf-828d-4e55-a2c1-dd9d579d3908","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::3]:32345"},"services":[{"id":"d2b1e468-bc3c-4d08-b855-ae3327465375","details":{"type":"crucible","address":"[fd00:1122:3344:106::3]:32345"}}]},"root":"/pool/ext/74df4c92-edbb-4431-a770-1d015110e66b/crypt/zone"},{"zone":{"id":"61f94a16-79fd-42e3-b225-a4dc67228437","zone_type":"crucible","addresses":["fd00:1122:3344:106::6"],"dataset":{"id":"61f94a16-79fd-42e3-b225-a4dc67228437","name":{"pool_name":"oxp_d77d5b08-5f70-496a-997b-b38804dc3b8a","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::6]:32345"},"services":[{"id":"61f94a16-79fd-42e3-b225-a4dc67228437","details":{"type":"crucible","address":"[fd00:1122:3344:106::6]:32345"}}]},"root":"/pool/ext/daf9e3cd-5a40-4eba-a0f6-4f94dab37dae/crypt/zone"},{"zone":{"id":"7d32ef34-dec5-4fd8-899e-20bbc473a3ee","zone_type":"crucible","addresses":["fd00:1122:3344:106::7"],"dataset":{"id":"7d32ef34-dec5-4fd8-899e-20bbc473a3ee","name":{"pool_name":"oxp_50c1b653-6231-41fe-b3cf-b7ba709a0746","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::7]:32345"},"services":[{"id":"7d32ef34-dec5-4fd8-899e-20bbc473a3ee","details":{"type":"crucible","address":"[fd00:1122:3344:106::7]:32345"}}]},"root":"/pool/ext/9db196bf-828d-4e55-a2c1-dd9d579d3908/crypt/zone"},{"zone":{"id":"c34b7ae5-26b9-4651-a3c4-20bba2bd0d2c","zone_type":"crucible","addresses":["fd00:1122:3344:106::5"],"dataset":{"id":"c34b7ae5-26b9-4651-a3c4-20bba2bd0d2c","name":{"pool_name":"oxp_88aea92c-ab92-44c1-9471-eb8e30e075d3","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::5]:32345"},"services":[{"id":"c34b7ae5-26b9-4651-a3c4-20bba2bd0d2c","details":{"type":"crucible","address":"[fd00:1122:3344:106::5]:32345"}}]},"root":"/pool/ext/8da316d4-6b18-4980-a0a8-6e76e72cc40d/crypt/zone"},{"zone":{"id":"36472be8-9a70-4c14-bd02-439b725cec1a","zone_type":"crucible","addresses":["fd00:1122:3344:106::8"],"dataset":{"id":"36472be8-9a70-4c14-bd02-439b725cec1a","name":{"pool_name":"oxp_54544b3a-1513-4db2-911e-7c1eb4b12385","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::8]:32345"},"services":[{"id":"36472be8-9a70-4c14-bd02-439b725cec1a","details":{"type":"crucible","address":"[fd00:1122:3344:106::8]:32345"}}]},"root":"/pool/ext/54544b3a-1513-4db2-911e-7c1eb4b12385/crypt/zone"},{"zone":{"id":"2548f8ab-5255-4334-a1fb-5d7d95213129","zone_type":"crucible","addresses":["fd00:1122:3344:106::9"],"dataset":{"id":"2548f8ab-5255-4334-a1fb-5d7d95213129","name":{"pool_name":"oxp_08050450-967f-431c-9a12-0d051aff020e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::9]:32345"},"services":[{"id":"2548f8ab-5255-4334-a1fb-5d7d95213129","details":{"type":"crucible","address":"[fd00:1122:3344:106::9]:32345"}}]},"root":"/pool/ext/08050450-967f-431c-9a12-0d051aff020e/crypt/zone"},{"zone":{"id":"1455c069-853c-49cd-853a-3ea81b89acd4","zone_type":"crucible","addresses":["fd00:1122:3344:106::c"],"dataset":{"id":"1455c069-853c-49cd-853a-3ea81b89acd4","name":{"pool_name":"oxp_8da316d4-6b18-4980-a0a8-6e76e72cc40d","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::c]:32345"},"services":[{"id":"1455c069-853c-49cd-853a-3ea81b89acd4","details":{"type":"crucible","address":"[fd00:1122:3344:106::c]:32345"}}]},"root":"/pool/ext/08050450-967f-431c-9a12-0d051aff020e/crypt/zone"},{"zone":{"id":"27c0244b-f91a-46c3-bc96-e8eec009371e","zone_type":"crucible","addresses":["fd00:1122:3344:106::b"],"dataset":{"id":"27c0244b-f91a-46c3-bc96-e8eec009371e","name":{"pool_name":"oxp_daf9e3cd-5a40-4eba-a0f6-4f94dab37dae","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::b]:32345"},"services":[{"id":"27c0244b-f91a-46c3-bc96-e8eec009371e","details":{"type":"crucible","address":"[fd00:1122:3344:106::b]:32345"}}]},"root":"/pool/ext/74df4c92-edbb-4431-a770-1d015110e66b/crypt/zone"},{"zone":{"id":"9e46d837-1e0f-42b6-a352-84e6946b8734","zone_type":"crucible","addresses":["fd00:1122:3344:106::4"],"dataset":{"id":"9e46d837-1e0f-42b6-a352-84e6946b8734","name":{"pool_name":"oxp_74df4c92-edbb-4431-a770-1d015110e66b","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::4]:32345"},"services":[{"id":"9e46d837-1e0f-42b6-a352-84e6946b8734","details":{"type":"crucible","address":"[fd00:1122:3344:106::4]:32345"}}]},"root":"/pool/ext/15f94c39-d48c-41f6-a913-cc1d04aef1a2/crypt/zone"},{"zone":{"id":"b972fcd4-c1b3-4b3c-9e24-f59c7a7cb192","zone_type":"crucible","addresses":["fd00:1122:3344:106::a"],"dataset":{"id":"b972fcd4-c1b3-4b3c-9e24-f59c7a7cb192","name":{"pool_name":"oxp_15f94c39-d48c-41f6-a913-cc1d04aef1a2","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::a]:32345"},"services":[{"id":"b972fcd4-c1b3-4b3c-9e24-f59c7a7cb192","details":{"type":"crucible","address":"[fd00:1122:3344:106::a]:32345"}}]},"root":"/pool/ext/74df4c92-edbb-4431-a770-1d015110e66b/crypt/zone"},{"zone":{"id":"e1c8c655-1950-42d5-ae1f-a4ce84854bbc","zone_type":"ntp","addresses":["fd00:1122:3344:106::d"],"dataset":null,"services":[{"id":"e1c8c655-1950-42d5-ae1f-a4ce84854bbc","details":{"type":"internal_ntp","address":"[fd00:1122:3344:106::d]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/15f94c39-d48c-41f6-a913-cc1d04aef1a2/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled25.json b/sled-agent/tests/old-service-ledgers/rack3-sled25.json deleted file mode 100644 index 642657bbce..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack3-sled25.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":4,"requests":[{"zone":{"id":"10b80058-9b2e-4d6c-8a1a-a61a8258c12f","zone_type":"crucible","addresses":["fd00:1122:3344:118::9"],"dataset":{"id":"10b80058-9b2e-4d6c-8a1a-a61a8258c12f","name":{"pool_name":"oxp_953c19bb-9fff-4488-8a7b-29de9994a948","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:118::9]:32345"},"services":[{"id":"10b80058-9b2e-4d6c-8a1a-a61a8258c12f","details":{"type":"crucible","address":"[fd00:1122:3344:118::9]:32345"}}]},"root":"/pool/ext/a78caf97-6145-4908-83b5-a03a6d2e0ac4/crypt/zone"},{"zone":{"id":"f58fef96-7b5e-40c2-9482-669088a19209","zone_type":"crucible","addresses":["fd00:1122:3344:118::d"],"dataset":{"id":"f58fef96-7b5e-40c2-9482-669088a19209","name":{"pool_name":"oxp_d7976706-d6ed-4465-8b04-450c96d8feec","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:118::d]:32345"},"services":[{"id":"f58fef96-7b5e-40c2-9482-669088a19209","details":{"type":"crucible","address":"[fd00:1122:3344:118::d]:32345"}}]},"root":"/pool/ext/d7976706-d6ed-4465-8b04-450c96d8feec/crypt/zone"},{"zone":{"id":"624f1168-47b6-4aa1-84da-e20a0d74d783","zone_type":"crucible","addresses":["fd00:1122:3344:118::b"],"dataset":{"id":"624f1168-47b6-4aa1-84da-e20a0d74d783","name":{"pool_name":"oxp_a78caf97-6145-4908-83b5-a03a6d2e0ac4","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:118::b]:32345"},"services":[{"id":"624f1168-47b6-4aa1-84da-e20a0d74d783","details":{"type":"crucible","address":"[fd00:1122:3344:118::b]:32345"}}]},"root":"/pool/ext/a5b16ffe-a834-4a83-a4e9-487d4cbb7e3d/crypt/zone"},{"zone":{"id":"8ea85412-19b4-45c1-a53c-027ddd629296","zone_type":"crucible","addresses":["fd00:1122:3344:118::6"],"dataset":{"id":"8ea85412-19b4-45c1-a53c-027ddd629296","name":{"pool_name":"oxp_d5f4c903-155a-4c91-aadd-6039a4f64821","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:118::6]:32345"},"services":[{"id":"8ea85412-19b4-45c1-a53c-027ddd629296","details":{"type":"crucible","address":"[fd00:1122:3344:118::6]:32345"}}]},"root":"/pool/ext/7d2a7685-c1c9-4d2d-a2bb-df65d96ea3e2/crypt/zone"},{"zone":{"id":"fd226b82-71d7-4719-b32c-a6c7abe28a2a","zone_type":"external_dns","addresses":["fd00:1122:3344:118::3"],"dataset":{"id":"fd226b82-71d7-4719-b32c-a6c7abe28a2a","name":{"pool_name":"oxp_84a80b58-70e9-439c-9558-5b343d9a4b53","kind":{"type":"external_dns"}},"service_address":"[fd00:1122:3344:118::3]:5353"},"services":[{"id":"fd226b82-71d7-4719-b32c-a6c7abe28a2a","details":{"type":"external_dns","http_address":"[fd00:1122:3344:118::3]:5353","dns_address":"45.154.216.34:53","nic":{"id":"7f72b6fd-1120-44dc-b3a7-f727502ba47c","kind":{"type":"service","id":"fd226b82-71d7-4719-b32c-a6c7abe28a2a"},"name":"external-dns-fd226b82-71d7-4719-b32c-a6c7abe28a2a","ip":"172.30.1.6","mac":"A8:40:25:FF:9E:D1","subnet":"172.30.1.0/24","vni":100,"primary":true,"slot":0}}}]},"root":"/pool/ext/a5b16ffe-a834-4a83-a4e9-487d4cbb7e3d/crypt/zone"},{"zone":{"id":"08d0c38d-f0d9-45b9-856d-b85059fe5f07","zone_type":"crucible","addresses":["fd00:1122:3344:118::4"],"dataset":{"id":"08d0c38d-f0d9-45b9-856d-b85059fe5f07","name":{"pool_name":"oxp_84a80b58-70e9-439c-9558-5b343d9a4b53","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:118::4]:32345"},"services":[{"id":"08d0c38d-f0d9-45b9-856d-b85059fe5f07","details":{"type":"crucible","address":"[fd00:1122:3344:118::4]:32345"}}]},"root":"/pool/ext/a5b16ffe-a834-4a83-a4e9-487d4cbb7e3d/crypt/zone"},{"zone":{"id":"5de7d3fd-4a3f-4fdd-b6b2-d1186e16dce5","zone_type":"crucible","addresses":["fd00:1122:3344:118::7"],"dataset":{"id":"5de7d3fd-4a3f-4fdd-b6b2-d1186e16dce5","name":{"pool_name":"oxp_d76e058f-2d1e-4b15-b3a0-e5509a246876","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:118::7]:32345"},"services":[{"id":"5de7d3fd-4a3f-4fdd-b6b2-d1186e16dce5","details":{"type":"crucible","address":"[fd00:1122:3344:118::7]:32345"}}]},"root":"/pool/ext/a5b16ffe-a834-4a83-a4e9-487d4cbb7e3d/crypt/zone"},{"zone":{"id":"5d0f5cad-10b3-497c-903b-eeeabce920e2","zone_type":"crucible","addresses":["fd00:1122:3344:118::8"],"dataset":{"id":"5d0f5cad-10b3-497c-903b-eeeabce920e2","name":{"pool_name":"oxp_3a3ad639-8800-4951-bc2a-201d269e47a2","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:118::8]:32345"},"services":[{"id":"5d0f5cad-10b3-497c-903b-eeeabce920e2","details":{"type":"crucible","address":"[fd00:1122:3344:118::8]:32345"}}]},"root":"/pool/ext/3a3ad639-8800-4951-bc2a-201d269e47a2/crypt/zone"},{"zone":{"id":"39f9cefa-801c-4843-9fb9-05446ffbdd1a","zone_type":"crucible","addresses":["fd00:1122:3344:118::a"],"dataset":{"id":"39f9cefa-801c-4843-9fb9-05446ffbdd1a","name":{"pool_name":"oxp_7d2a7685-c1c9-4d2d-a2bb-df65d96ea3e2","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:118::a]:32345"},"services":[{"id":"39f9cefa-801c-4843-9fb9-05446ffbdd1a","details":{"type":"crucible","address":"[fd00:1122:3344:118::a]:32345"}}]},"root":"/pool/ext/a78caf97-6145-4908-83b5-a03a6d2e0ac4/crypt/zone"},{"zone":{"id":"0711e710-7fdd-4e68-94c8-294b8677e804","zone_type":"crucible","addresses":["fd00:1122:3344:118::5"],"dataset":{"id":"0711e710-7fdd-4e68-94c8-294b8677e804","name":{"pool_name":"oxp_a5b16ffe-a834-4a83-a4e9-487d4cbb7e3d","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:118::5]:32345"},"services":[{"id":"0711e710-7fdd-4e68-94c8-294b8677e804","details":{"type":"crucible","address":"[fd00:1122:3344:118::5]:32345"}}]},"root":"/pool/ext/3a3ad639-8800-4951-bc2a-201d269e47a2/crypt/zone"},{"zone":{"id":"318a62cc-5c6c-4805-9fb6-c0f6a75ce31c","zone_type":"crucible","addresses":["fd00:1122:3344:118::c"],"dataset":{"id":"318a62cc-5c6c-4805-9fb6-c0f6a75ce31c","name":{"pool_name":"oxp_1d5f0ba3-6b31-4cea-a9a9-2065a538887d","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:118::c]:32345"},"services":[{"id":"318a62cc-5c6c-4805-9fb6-c0f6a75ce31c","details":{"type":"crucible","address":"[fd00:1122:3344:118::c]:32345"}}]},"root":"/pool/ext/d7976706-d6ed-4465-8b04-450c96d8feec/crypt/zone"},{"zone":{"id":"463d0498-85b9-40eb-af96-d99af58a587c","zone_type":"ntp","addresses":["fd00:1122:3344:118::e"],"dataset":null,"services":[{"id":"463d0498-85b9-40eb-af96-d99af58a587c","details":{"type":"internal_ntp","address":"[fd00:1122:3344:118::e]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/d5f4c903-155a-4c91-aadd-6039a4f64821/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled26.json b/sled-agent/tests/old-service-ledgers/rack3-sled26.json deleted file mode 100644 index 0978cb9e45..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack3-sled26.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":4,"requests":[{"zone":{"id":"d8b3de97-cc79-48f6-83ad-02017c21223b","zone_type":"crucible_pantry","addresses":["fd00:1122:3344:119::3"],"dataset":null,"services":[{"id":"d8b3de97-cc79-48f6-83ad-02017c21223b","details":{"type":"crucible_pantry","address":"[fd00:1122:3344:119::3]:17000"}}]},"root":"/pool/ext/e0faea44-8b5c-40b0-bb75-a1aec1a10377/crypt/zone"},{"zone":{"id":"adba1a3b-5bac-44d5-aa5a-879dc6eadb5f","zone_type":"crucible","addresses":["fd00:1122:3344:119::c"],"dataset":{"id":"adba1a3b-5bac-44d5-aa5a-879dc6eadb5f","name":{"pool_name":"oxp_21c339c3-6461-4bdb-8b0e-c0f9f08ee10b","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:119::c]:32345"},"services":[{"id":"adba1a3b-5bac-44d5-aa5a-879dc6eadb5f","details":{"type":"crucible","address":"[fd00:1122:3344:119::c]:32345"}}]},"root":"/pool/ext/f5c73c28-2168-4321-b737-4ca6663155c9/crypt/zone"},{"zone":{"id":"42bb9833-5c39-4aba-b2c4-da2ca1287728","zone_type":"crucible","addresses":["fd00:1122:3344:119::a"],"dataset":{"id":"42bb9833-5c39-4aba-b2c4-da2ca1287728","name":{"pool_name":"oxp_1f91451d-a466-4c9a-a6e6-0abd7985595f","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:119::a]:32345"},"services":[{"id":"42bb9833-5c39-4aba-b2c4-da2ca1287728","details":{"type":"crucible","address":"[fd00:1122:3344:119::a]:32345"}}]},"root":"/pool/ext/21c339c3-6461-4bdb-8b0e-c0f9f08ee10b/crypt/zone"},{"zone":{"id":"197695e1-d949-4982-b679-6e5c9ab4bcc7","zone_type":"crucible","addresses":["fd00:1122:3344:119::b"],"dataset":{"id":"197695e1-d949-4982-b679-6e5c9ab4bcc7","name":{"pool_name":"oxp_e0faea44-8b5c-40b0-bb75-a1aec1a10377","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:119::b]:32345"},"services":[{"id":"197695e1-d949-4982-b679-6e5c9ab4bcc7","details":{"type":"crucible","address":"[fd00:1122:3344:119::b]:32345"}}]},"root":"/pool/ext/b31e1815-cae0-4145-940c-874fff63bdd5/crypt/zone"},{"zone":{"id":"bf99d4f8-edf1-4de5-98d4-8e6a24965005","zone_type":"crucible","addresses":["fd00:1122:3344:119::8"],"dataset":{"id":"bf99d4f8-edf1-4de5-98d4-8e6a24965005","name":{"pool_name":"oxp_ef2c3afb-6962-4f6b-b567-14766bbd9ec0","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:119::8]:32345"},"services":[{"id":"bf99d4f8-edf1-4de5-98d4-8e6a24965005","details":{"type":"crucible","address":"[fd00:1122:3344:119::8]:32345"}}]},"root":"/pool/ext/21c339c3-6461-4bdb-8b0e-c0f9f08ee10b/crypt/zone"},{"zone":{"id":"390d1853-8be9-4987-b8b6-f022999bf4e7","zone_type":"crucible","addresses":["fd00:1122:3344:119::7"],"dataset":{"id":"390d1853-8be9-4987-b8b6-f022999bf4e7","name":{"pool_name":"oxp_06eed00a-d8d3-4b9d-84c9-23fce535f63e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:119::7]:32345"},"services":[{"id":"390d1853-8be9-4987-b8b6-f022999bf4e7","details":{"type":"crucible","address":"[fd00:1122:3344:119::7]:32345"}}]},"root":"/pool/ext/ef2c3afb-6962-4f6b-b567-14766bbd9ec0/crypt/zone"},{"zone":{"id":"76fe2161-90df-41b5-9c94-067de9c29db1","zone_type":"crucible","addresses":["fd00:1122:3344:119::4"],"dataset":{"id":"76fe2161-90df-41b5-9c94-067de9c29db1","name":{"pool_name":"oxp_f5c73c28-2168-4321-b737-4ca6663155c9","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:119::4]:32345"},"services":[{"id":"76fe2161-90df-41b5-9c94-067de9c29db1","details":{"type":"crucible","address":"[fd00:1122:3344:119::4]:32345"}}]},"root":"/pool/ext/ef2c3afb-6962-4f6b-b567-14766bbd9ec0/crypt/zone"},{"zone":{"id":"f49dc522-2b13-4055-964c-8315671096aa","zone_type":"crucible","addresses":["fd00:1122:3344:119::d"],"dataset":{"id":"f49dc522-2b13-4055-964c-8315671096aa","name":{"pool_name":"oxp_662c278b-7f5f-4c7e-91ff-70207e8a307b","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:119::d]:32345"},"services":[{"id":"f49dc522-2b13-4055-964c-8315671096aa","details":{"type":"crucible","address":"[fd00:1122:3344:119::d]:32345"}}]},"root":"/pool/ext/1f91451d-a466-4c9a-a6e6-0abd7985595f/crypt/zone"},{"zone":{"id":"08cc7bd6-368e-4d16-a619-28b17eff35af","zone_type":"crucible","addresses":["fd00:1122:3344:119::9"],"dataset":{"id":"08cc7bd6-368e-4d16-a619-28b17eff35af","name":{"pool_name":"oxp_5516b9ac-b139-40da-aa3b-f094568ba095","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:119::9]:32345"},"services":[{"id":"08cc7bd6-368e-4d16-a619-28b17eff35af","details":{"type":"crucible","address":"[fd00:1122:3344:119::9]:32345"}}]},"root":"/pool/ext/06eed00a-d8d3-4b9d-84c9-23fce535f63e/crypt/zone"},{"zone":{"id":"74b0613f-bce8-4922-93e0-b5bfccfc8443","zone_type":"crucible","addresses":["fd00:1122:3344:119::5"],"dataset":{"id":"74b0613f-bce8-4922-93e0-b5bfccfc8443","name":{"pool_name":"oxp_b31e1815-cae0-4145-940c-874fff63bdd5","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:119::5]:32345"},"services":[{"id":"74b0613f-bce8-4922-93e0-b5bfccfc8443","details":{"type":"crucible","address":"[fd00:1122:3344:119::5]:32345"}}]},"root":"/pool/ext/21c339c3-6461-4bdb-8b0e-c0f9f08ee10b/crypt/zone"},{"zone":{"id":"55fcfc62-8435-475f-a2aa-29373901b993","zone_type":"crucible","addresses":["fd00:1122:3344:119::6"],"dataset":{"id":"55fcfc62-8435-475f-a2aa-29373901b993","name":{"pool_name":"oxp_eadf6a03-1028-4d48-ac0d-0d27ef2c8c0f","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:119::6]:32345"},"services":[{"id":"55fcfc62-8435-475f-a2aa-29373901b993","details":{"type":"crucible","address":"[fd00:1122:3344:119::6]:32345"}}]},"root":"/pool/ext/1f91451d-a466-4c9a-a6e6-0abd7985595f/crypt/zone"},{"zone":{"id":"d52ccea3-6d7f-43a6-a19f-e0409f4e9cdc","zone_type":"ntp","addresses":["fd00:1122:3344:119::e"],"dataset":null,"services":[{"id":"d52ccea3-6d7f-43a6-a19f-e0409f4e9cdc","details":{"type":"internal_ntp","address":"[fd00:1122:3344:119::e]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/f5c73c28-2168-4321-b737-4ca6663155c9/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled27.json b/sled-agent/tests/old-service-ledgers/rack3-sled27.json deleted file mode 100644 index 0b2db29c4a..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack3-sled27.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":4,"requests":[{"zone":{"id":"095e612f-e218-4a16-aa6e-98c3d69a470a","zone_type":"crucible","addresses":["fd00:1122:3344:10d::a"],"dataset":{"id":"095e612f-e218-4a16-aa6e-98c3d69a470a","name":{"pool_name":"oxp_9f657858-623f-4d78-9841-6e620b5ede30","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10d::a]:32345"},"services":[{"id":"095e612f-e218-4a16-aa6e-98c3d69a470a","details":{"type":"crucible","address":"[fd00:1122:3344:10d::a]:32345"}}]},"root":"/pool/ext/2d086b51-2b77-4bc7-adc6-43586ea38ce9/crypt/zone"},{"zone":{"id":"de818730-0e3b-4567-94e7-344bd9b6f564","zone_type":"crucible","addresses":["fd00:1122:3344:10d::3"],"dataset":{"id":"de818730-0e3b-4567-94e7-344bd9b6f564","name":{"pool_name":"oxp_ba6ab301-07e1-4d35-80ac-59612f2c2bdb","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10d::3]:32345"},"services":[{"id":"de818730-0e3b-4567-94e7-344bd9b6f564","details":{"type":"crucible","address":"[fd00:1122:3344:10d::3]:32345"}}]},"root":"/pool/ext/7cee2806-e898-47d8-b568-e276a6e271f8/crypt/zone"},{"zone":{"id":"6a21dc3c-3a9d-4520-9a91-7d8f2737bcd4","zone_type":"crucible","addresses":["fd00:1122:3344:10d::4"],"dataset":{"id":"6a21dc3c-3a9d-4520-9a91-7d8f2737bcd4","name":{"pool_name":"oxp_7cee2806-e898-47d8-b568-e276a6e271f8","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10d::4]:32345"},"services":[{"id":"6a21dc3c-3a9d-4520-9a91-7d8f2737bcd4","details":{"type":"crucible","address":"[fd00:1122:3344:10d::4]:32345"}}]},"root":"/pool/ext/cef23d87-31ed-40d5-99b8-12d7be8e46e7/crypt/zone"},{"zone":{"id":"e01b7f45-b8d7-4944-ba5b-41fb699889a9","zone_type":"crucible","addresses":["fd00:1122:3344:10d::b"],"dataset":{"id":"e01b7f45-b8d7-4944-ba5b-41fb699889a9","name":{"pool_name":"oxp_d9af8878-50bd-4425-95d9-e6556ce92cfa","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10d::b]:32345"},"services":[{"id":"e01b7f45-b8d7-4944-ba5b-41fb699889a9","details":{"type":"crucible","address":"[fd00:1122:3344:10d::b]:32345"}}]},"root":"/pool/ext/6fe9bcaa-88cb-451d-b086-24a3ad53fa22/crypt/zone"},{"zone":{"id":"4271ef62-d319-4e80-b157-915321cec8c7","zone_type":"crucible","addresses":["fd00:1122:3344:10d::c"],"dataset":{"id":"4271ef62-d319-4e80-b157-915321cec8c7","name":{"pool_name":"oxp_ba8ee7dd-cdfb-48bd-92ce-4dc45e070930","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10d::c]:32345"},"services":[{"id":"4271ef62-d319-4e80-b157-915321cec8c7","details":{"type":"crucible","address":"[fd00:1122:3344:10d::c]:32345"}}]},"root":"/pool/ext/9f657858-623f-4d78-9841-6e620b5ede30/crypt/zone"},{"zone":{"id":"6bdcc159-aeb9-4903-9486-dd8b43a3dc16","zone_type":"crucible","addresses":["fd00:1122:3344:10d::8"],"dataset":{"id":"6bdcc159-aeb9-4903-9486-dd8b43a3dc16","name":{"pool_name":"oxp_5b03a5dc-bb5a-4bf4-bc21-0af849cd1dab","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10d::8]:32345"},"services":[{"id":"6bdcc159-aeb9-4903-9486-dd8b43a3dc16","details":{"type":"crucible","address":"[fd00:1122:3344:10d::8]:32345"}}]},"root":"/pool/ext/d9af8878-50bd-4425-95d9-e6556ce92cfa/crypt/zone"},{"zone":{"id":"85540e54-cdd7-4baa-920c-5cf54cbc1f83","zone_type":"crucible","addresses":["fd00:1122:3344:10d::7"],"dataset":{"id":"85540e54-cdd7-4baa-920c-5cf54cbc1f83","name":{"pool_name":"oxp_ee24f9a6-84ab-49a5-a28f-e394abfcaa95","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10d::7]:32345"},"services":[{"id":"85540e54-cdd7-4baa-920c-5cf54cbc1f83","details":{"type":"crucible","address":"[fd00:1122:3344:10d::7]:32345"}}]},"root":"/pool/ext/9f657858-623f-4d78-9841-6e620b5ede30/crypt/zone"},{"zone":{"id":"750d1a0b-6a14-46c5-9a0b-a504caefb198","zone_type":"crucible","addresses":["fd00:1122:3344:10d::9"],"dataset":{"id":"750d1a0b-6a14-46c5-9a0b-a504caefb198","name":{"pool_name":"oxp_cef23d87-31ed-40d5-99b8-12d7be8e46e7","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10d::9]:32345"},"services":[{"id":"750d1a0b-6a14-46c5-9a0b-a504caefb198","details":{"type":"crucible","address":"[fd00:1122:3344:10d::9]:32345"}}]},"root":"/pool/ext/ba8ee7dd-cdfb-48bd-92ce-4dc45e070930/crypt/zone"},{"zone":{"id":"b5996893-1a9a-434e-a257-d702694f058b","zone_type":"crucible","addresses":["fd00:1122:3344:10d::6"],"dataset":{"id":"b5996893-1a9a-434e-a257-d702694f058b","name":{"pool_name":"oxp_2d086b51-2b77-4bc7-adc6-43586ea38ce9","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10d::6]:32345"},"services":[{"id":"b5996893-1a9a-434e-a257-d702694f058b","details":{"type":"crucible","address":"[fd00:1122:3344:10d::6]:32345"}}]},"root":"/pool/ext/7cee2806-e898-47d8-b568-e276a6e271f8/crypt/zone"},{"zone":{"id":"8b36686a-b98d-451a-9124-a3583000a83a","zone_type":"crucible","addresses":["fd00:1122:3344:10d::5"],"dataset":{"id":"8b36686a-b98d-451a-9124-a3583000a83a","name":{"pool_name":"oxp_6fe9bcaa-88cb-451d-b086-24a3ad53fa22","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10d::5]:32345"},"services":[{"id":"8b36686a-b98d-451a-9124-a3583000a83a","details":{"type":"crucible","address":"[fd00:1122:3344:10d::5]:32345"}}]},"root":"/pool/ext/9f657858-623f-4d78-9841-6e620b5ede30/crypt/zone"},{"zone":{"id":"88d695a2-c8c1-41af-85b0-77424f4d650d","zone_type":"ntp","addresses":["fd00:1122:3344:10d::d"],"dataset":null,"services":[{"id":"88d695a2-c8c1-41af-85b0-77424f4d650d","details":{"type":"internal_ntp","address":"[fd00:1122:3344:10d::d]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/ba6ab301-07e1-4d35-80ac-59612f2c2bdb/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled28.json b/sled-agent/tests/old-service-ledgers/rack3-sled28.json deleted file mode 100644 index ec137c18fa..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack3-sled28.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":4,"requests":[{"zone":{"id":"a126365d-f459-43bf-9f99-dbe1c4cdecf8","zone_type":"crucible","addresses":["fd00:1122:3344:113::4"],"dataset":{"id":"a126365d-f459-43bf-9f99-dbe1c4cdecf8","name":{"pool_name":"oxp_c99eabb2-6815-416a-9660-87e2609b357a","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:113::4]:32345"},"services":[{"id":"a126365d-f459-43bf-9f99-dbe1c4cdecf8","details":{"type":"crucible","address":"[fd00:1122:3344:113::4]:32345"}}]},"root":"/pool/ext/6461a450-f043-4d1e-bc03-4a68ed5fe94a/crypt/zone"},{"zone":{"id":"52f57ef8-546a-43bd-a0f3-8c42b99c37a6","zone_type":"crucible","addresses":["fd00:1122:3344:113::3"],"dataset":{"id":"52f57ef8-546a-43bd-a0f3-8c42b99c37a6","name":{"pool_name":"oxp_f6530e9c-6d64-44fa-93d5-ae427916fbf1","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:113::3]:32345"},"services":[{"id":"52f57ef8-546a-43bd-a0f3-8c42b99c37a6","details":{"type":"crucible","address":"[fd00:1122:3344:113::3]:32345"}}]},"root":"/pool/ext/97662260-6b62-450f-9d7e-42f7dee5d568/crypt/zone"},{"zone":{"id":"3ee87855-9423-43ff-800a-fa4fdbf1d956","zone_type":"crucible","addresses":["fd00:1122:3344:113::a"],"dataset":{"id":"3ee87855-9423-43ff-800a-fa4fdbf1d956","name":{"pool_name":"oxp_6461a450-f043-4d1e-bc03-4a68ed5fe94a","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:113::a]:32345"},"services":[{"id":"3ee87855-9423-43ff-800a-fa4fdbf1d956","details":{"type":"crucible","address":"[fd00:1122:3344:113::a]:32345"}}]},"root":"/pool/ext/9515dc86-fe62-4d4f-b38d-b3461cc042fc/crypt/zone"},{"zone":{"id":"55d0ddf9-9b24-4a7a-b97f-248e240f9ba6","zone_type":"crucible","addresses":["fd00:1122:3344:113::5"],"dataset":{"id":"55d0ddf9-9b24-4a7a-b97f-248e240f9ba6","name":{"pool_name":"oxp_97662260-6b62-450f-9d7e-42f7dee5d568","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:113::5]:32345"},"services":[{"id":"55d0ddf9-9b24-4a7a-b97f-248e240f9ba6","details":{"type":"crucible","address":"[fd00:1122:3344:113::5]:32345"}}]},"root":"/pool/ext/9515dc86-fe62-4d4f-b38d-b3461cc042fc/crypt/zone"},{"zone":{"id":"014cad37-56a7-4b2a-9c9e-505b15b4de85","zone_type":"crucible","addresses":["fd00:1122:3344:113::b"],"dataset":{"id":"014cad37-56a7-4b2a-9c9e-505b15b4de85","name":{"pool_name":"oxp_8529ce8e-21d2-4b23-b9fd-6b90c7ae4f90","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:113::b]:32345"},"services":[{"id":"014cad37-56a7-4b2a-9c9e-505b15b4de85","details":{"type":"crucible","address":"[fd00:1122:3344:113::b]:32345"}}]},"root":"/pool/ext/6461a450-f043-4d1e-bc03-4a68ed5fe94a/crypt/zone"},{"zone":{"id":"e14fb192-aaab-42ab-aa86-c85f13955940","zone_type":"crucible","addresses":["fd00:1122:3344:113::6"],"dataset":{"id":"e14fb192-aaab-42ab-aa86-c85f13955940","name":{"pool_name":"oxp_5a9455ca-fb01-4549-9a70-7579c031779d","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:113::6]:32345"},"services":[{"id":"e14fb192-aaab-42ab-aa86-c85f13955940","details":{"type":"crucible","address":"[fd00:1122:3344:113::6]:32345"}}]},"root":"/pool/ext/f6530e9c-6d64-44fa-93d5-ae427916fbf1/crypt/zone"},{"zone":{"id":"14540609-9371-442b-8486-88c244e97cd4","zone_type":"crucible","addresses":["fd00:1122:3344:113::8"],"dataset":{"id":"14540609-9371-442b-8486-88c244e97cd4","name":{"pool_name":"oxp_2916d6f3-8775-4887-a6d3-f9723982756f","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:113::8]:32345"},"services":[{"id":"14540609-9371-442b-8486-88c244e97cd4","details":{"type":"crucible","address":"[fd00:1122:3344:113::8]:32345"}}]},"root":"/pool/ext/8529ce8e-21d2-4b23-b9fd-6b90c7ae4f90/crypt/zone"},{"zone":{"id":"97a6b35f-0af9-41eb-93a1-f8bc5dbba357","zone_type":"crucible","addresses":["fd00:1122:3344:113::7"],"dataset":{"id":"97a6b35f-0af9-41eb-93a1-f8bc5dbba357","name":{"pool_name":"oxp_9515dc86-fe62-4d4f-b38d-b3461cc042fc","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:113::7]:32345"},"services":[{"id":"97a6b35f-0af9-41eb-93a1-f8bc5dbba357","details":{"type":"crucible","address":"[fd00:1122:3344:113::7]:32345"}}]},"root":"/pool/ext/8529ce8e-21d2-4b23-b9fd-6b90c7ae4f90/crypt/zone"},{"zone":{"id":"5734aa24-cb66-4b0a-9eb2-564646f8d729","zone_type":"crucible","addresses":["fd00:1122:3344:113::9"],"dataset":{"id":"5734aa24-cb66-4b0a-9eb2-564646f8d729","name":{"pool_name":"oxp_9f889a6c-17b1-4edd-9659-458d91439dc1","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:113::9]:32345"},"services":[{"id":"5734aa24-cb66-4b0a-9eb2-564646f8d729","details":{"type":"crucible","address":"[fd00:1122:3344:113::9]:32345"}}]},"root":"/pool/ext/a5074e7f-8d3b-40e0-a79e-dbd9af9d5693/crypt/zone"},{"zone":{"id":"ba86eca1-1427-4540-b4a6-1d9a0e1bc656","zone_type":"crucible","addresses":["fd00:1122:3344:113::c"],"dataset":{"id":"ba86eca1-1427-4540-b4a6-1d9a0e1bc656","name":{"pool_name":"oxp_a5074e7f-8d3b-40e0-a79e-dbd9af9d5693","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:113::c]:32345"},"services":[{"id":"ba86eca1-1427-4540-b4a6-1d9a0e1bc656","details":{"type":"crucible","address":"[fd00:1122:3344:113::c]:32345"}}]},"root":"/pool/ext/2916d6f3-8775-4887-a6d3-f9723982756f/crypt/zone"},{"zone":{"id":"6634dbc4-d22f-40a4-8cd3-4f271d781fa1","zone_type":"ntp","addresses":["fd00:1122:3344:113::d"],"dataset":null,"services":[{"id":"6634dbc4-d22f-40a4-8cd3-4f271d781fa1","details":{"type":"internal_ntp","address":"[fd00:1122:3344:113::d]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/a5074e7f-8d3b-40e0-a79e-dbd9af9d5693/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled29.json b/sled-agent/tests/old-service-ledgers/rack3-sled29.json deleted file mode 100644 index 2618364e4f..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack3-sled29.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":5,"requests":[{"zone":{"id":"1cdd1ebf-9321-4f2d-914c-1e617f60b41a","zone_type":"crucible","addresses":["fd00:1122:3344:120::8"],"dataset":{"id":"1cdd1ebf-9321-4f2d-914c-1e617f60b41a","name":{"pool_name":"oxp_74046573-78a2-46b4-86dc-40bb2ee29dd5","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:120::8]:32345"},"services":[{"id":"1cdd1ebf-9321-4f2d-914c-1e617f60b41a","details":{"type":"crucible","address":"[fd00:1122:3344:120::8]:32345"}}]},"root":"/pool/ext/c1f0a9e4-ea10-4fd9-8b6d-79a2bacfec5e/crypt/zone"},{"zone":{"id":"720a0d08-d1c0-43ba-af86-f2dac1a53639","zone_type":"crucible","addresses":["fd00:1122:3344:120::c"],"dataset":{"id":"720a0d08-d1c0-43ba-af86-f2dac1a53639","name":{"pool_name":"oxp_068d2790-1044-41ed-97a5-b493490b14d1","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:120::c]:32345"},"services":[{"id":"720a0d08-d1c0-43ba-af86-f2dac1a53639","details":{"type":"crucible","address":"[fd00:1122:3344:120::c]:32345"}}]},"root":"/pool/ext/86cd16cf-d00d-40bc-b14a-8220b1e11476/crypt/zone"},{"zone":{"id":"d9f0b97b-2cef-4155-b45f-7db89263e4cf","zone_type":"crucible","addresses":["fd00:1122:3344:120::9"],"dataset":{"id":"d9f0b97b-2cef-4155-b45f-7db89263e4cf","name":{"pool_name":"oxp_8171bf0d-e61e-43f9-87d6-ec8833b80102","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:120::9]:32345"},"services":[{"id":"d9f0b97b-2cef-4155-b45f-7db89263e4cf","details":{"type":"crucible","address":"[fd00:1122:3344:120::9]:32345"}}]},"root":"/pool/ext/86cd16cf-d00d-40bc-b14a-8220b1e11476/crypt/zone"},{"zone":{"id":"018edff1-0d95-45a3-9a01-39c419bec55a","zone_type":"crucible","addresses":["fd00:1122:3344:120::b"],"dataset":{"id":"018edff1-0d95-45a3-9a01-39c419bec55a","name":{"pool_name":"oxp_0b11e026-f265-49a0-935f-7b234c19c789","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:120::b]:32345"},"services":[{"id":"018edff1-0d95-45a3-9a01-39c419bec55a","details":{"type":"crucible","address":"[fd00:1122:3344:120::b]:32345"}}]},"root":"/pool/ext/35db8700-d6a7-498c-9d2c-08eb9ab41b7c/crypt/zone"},{"zone":{"id":"f8cc1c1e-a556-436c-836d-42052101c38a","zone_type":"crucible","addresses":["fd00:1122:3344:120::3"],"dataset":{"id":"f8cc1c1e-a556-436c-836d-42052101c38a","name":{"pool_name":"oxp_ed8e5a26-5591-405a-b792-408f5b16e444","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:120::3]:32345"},"services":[{"id":"f8cc1c1e-a556-436c-836d-42052101c38a","details":{"type":"crucible","address":"[fd00:1122:3344:120::3]:32345"}}]},"root":"/pool/ext/1069bdee-fe5a-4164-a856-ff8ae56c07fb/crypt/zone"},{"zone":{"id":"f9600313-fac0-45a1-a1b5-02dd6af468b9","zone_type":"crucible","addresses":["fd00:1122:3344:120::4"],"dataset":{"id":"f9600313-fac0-45a1-a1b5-02dd6af468b9","name":{"pool_name":"oxp_c1f0a9e4-ea10-4fd9-8b6d-79a2bacfec5e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:120::4]:32345"},"services":[{"id":"f9600313-fac0-45a1-a1b5-02dd6af468b9","details":{"type":"crucible","address":"[fd00:1122:3344:120::4]:32345"}}]},"root":"/pool/ext/74046573-78a2-46b4-86dc-40bb2ee29dd5/crypt/zone"},{"zone":{"id":"869e4f7c-5312-4b98-bacc-1508f236bf5a","zone_type":"crucible","addresses":["fd00:1122:3344:120::6"],"dataset":{"id":"869e4f7c-5312-4b98-bacc-1508f236bf5a","name":{"pool_name":"oxp_04aea8dc-4316-432f-a13a-d7d9b2efa3f2","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:120::6]:32345"},"services":[{"id":"869e4f7c-5312-4b98-bacc-1508f236bf5a","details":{"type":"crucible","address":"[fd00:1122:3344:120::6]:32345"}}]},"root":"/pool/ext/0b11e026-f265-49a0-935f-7b234c19c789/crypt/zone"},{"zone":{"id":"31ed5a0c-7caf-4825-b730-85ee94fe27f1","zone_type":"crucible","addresses":["fd00:1122:3344:120::a"],"dataset":{"id":"31ed5a0c-7caf-4825-b730-85ee94fe27f1","name":{"pool_name":"oxp_86cd16cf-d00d-40bc-b14a-8220b1e11476","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:120::a]:32345"},"services":[{"id":"31ed5a0c-7caf-4825-b730-85ee94fe27f1","details":{"type":"crucible","address":"[fd00:1122:3344:120::a]:32345"}}]},"root":"/pool/ext/04aea8dc-4316-432f-a13a-d7d9b2efa3f2/crypt/zone"},{"zone":{"id":"7e5a3c39-152a-4270-b01e-9e144cca4aaa","zone_type":"crucible","addresses":["fd00:1122:3344:120::5"],"dataset":{"id":"7e5a3c39-152a-4270-b01e-9e144cca4aaa","name":{"pool_name":"oxp_1069bdee-fe5a-4164-a856-ff8ae56c07fb","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:120::5]:32345"},"services":[{"id":"7e5a3c39-152a-4270-b01e-9e144cca4aaa","details":{"type":"crucible","address":"[fd00:1122:3344:120::5]:32345"}}]},"root":"/pool/ext/04aea8dc-4316-432f-a13a-d7d9b2efa3f2/crypt/zone"},{"zone":{"id":"9a03a386-7304-4a86-bee8-153ef643195e","zone_type":"crucible","addresses":["fd00:1122:3344:120::7"],"dataset":{"id":"9a03a386-7304-4a86-bee8-153ef643195e","name":{"pool_name":"oxp_35db8700-d6a7-498c-9d2c-08eb9ab41b7c","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:120::7]:32345"},"services":[{"id":"9a03a386-7304-4a86-bee8-153ef643195e","details":{"type":"crucible","address":"[fd00:1122:3344:120::7]:32345"}}]},"root":"/pool/ext/068d2790-1044-41ed-97a5-b493490b14d1/crypt/zone"},{"zone":{"id":"a800d0a7-1020-481c-8be8-ecfd28b7a2be","zone_type":"ntp","addresses":["fd00:1122:3344:120::d"],"dataset":null,"services":[{"id":"a800d0a7-1020-481c-8be8-ecfd28b7a2be","details":{"type":"internal_ntp","address":"[fd00:1122:3344:120::d]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/c1f0a9e4-ea10-4fd9-8b6d-79a2bacfec5e/crypt/zone"},{"zone":{"id":"be469efd-8e07-4b8e-bcee-6fd33373cdef","zone_type":"internal_dns","addresses":["fd00:1122:3344:3::1"],"dataset":{"id":"be469efd-8e07-4b8e-bcee-6fd33373cdef","name":{"pool_name":"oxp_ed8e5a26-5591-405a-b792-408f5b16e444","kind":{"type":"internal_dns"}},"service_address":"[fd00:1122:3344:3::1]:5353"},"services":[{"id":"be469efd-8e07-4b8e-bcee-6fd33373cdef","details":{"type":"internal_dns","http_address":"[fd00:1122:3344:3::1]:5353","dns_address":"[fd00:1122:3344:3::1]:53","gz_address":"fd00:1122:3344:3::2","gz_address_index":2}}]},"root":"/pool/ext/068d2790-1044-41ed-97a5-b493490b14d1/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled3.json b/sled-agent/tests/old-service-ledgers/rack3-sled3.json deleted file mode 100644 index 6bcb626cf6..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack3-sled3.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":4,"requests":[{"zone":{"id":"19d091b8-e005-4ff4-97e1-026de95e3667","zone_type":"crucible","addresses":["fd00:1122:3344:10f::c"],"dataset":{"id":"19d091b8-e005-4ff4-97e1-026de95e3667","name":{"pool_name":"oxp_11a63469-4f57-4976-8620-0055bf82dc97","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10f::c]:32345"},"services":[{"id":"19d091b8-e005-4ff4-97e1-026de95e3667","details":{"type":"crucible","address":"[fd00:1122:3344:10f::c]:32345"}}]},"root":"/pool/ext/6a73a62c-c636-4557-af45-042cb287aee6/crypt/zone"},{"zone":{"id":"57d77171-104e-4977-b2f9-9b529ee7f8a0","zone_type":"crucible","addresses":["fd00:1122:3344:10f::8"],"dataset":{"id":"57d77171-104e-4977-b2f9-9b529ee7f8a0","name":{"pool_name":"oxp_7f3060af-058f-4f52-ab80-902bd13e7ef4","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10f::8]:32345"},"services":[{"id":"57d77171-104e-4977-b2f9-9b529ee7f8a0","details":{"type":"crucible","address":"[fd00:1122:3344:10f::8]:32345"}}]},"root":"/pool/ext/7f3060af-058f-4f52-ab80-902bd13e7ef4/crypt/zone"},{"zone":{"id":"b0371ccf-67da-4562-baf2-eaabe5243e9b","zone_type":"crucible","addresses":["fd00:1122:3344:10f::7"],"dataset":{"id":"b0371ccf-67da-4562-baf2-eaabe5243e9b","name":{"pool_name":"oxp_58ae04cb-26ff-4e30-a20d-9f847bafba4d","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10f::7]:32345"},"services":[{"id":"b0371ccf-67da-4562-baf2-eaabe5243e9b","details":{"type":"crucible","address":"[fd00:1122:3344:10f::7]:32345"}}]},"root":"/pool/ext/125ddcda-f94b-46bc-a10a-94e9acf40265/crypt/zone"},{"zone":{"id":"ae3791ff-2657-4252-bd61-58ec5dc237cd","zone_type":"crucible","addresses":["fd00:1122:3344:10f::9"],"dataset":{"id":"ae3791ff-2657-4252-bd61-58ec5dc237cd","name":{"pool_name":"oxp_125ddcda-f94b-46bc-a10a-94e9acf40265","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10f::9]:32345"},"services":[{"id":"ae3791ff-2657-4252-bd61-58ec5dc237cd","details":{"type":"crucible","address":"[fd00:1122:3344:10f::9]:32345"}}]},"root":"/pool/ext/58ae04cb-26ff-4e30-a20d-9f847bafba4d/crypt/zone"},{"zone":{"id":"73f865dc-5db7-48c6-9dc4-dff56dd8c045","zone_type":"crucible_pantry","addresses":["fd00:1122:3344:10f::3"],"dataset":null,"services":[{"id":"73f865dc-5db7-48c6-9dc4-dff56dd8c045","details":{"type":"crucible_pantry","address":"[fd00:1122:3344:10f::3]:17000"}}]},"root":"/pool/ext/11a63469-4f57-4976-8620-0055bf82dc97/crypt/zone"},{"zone":{"id":"e5d0170a-0d60-4c51-8f72-4c301979690e","zone_type":"crucible","addresses":["fd00:1122:3344:10f::6"],"dataset":{"id":"e5d0170a-0d60-4c51-8f72-4c301979690e","name":{"pool_name":"oxp_efe4cbab-2a39-4d7d-ae6c-83eb3ab8d4b5","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10f::6]:32345"},"services":[{"id":"e5d0170a-0d60-4c51-8f72-4c301979690e","details":{"type":"crucible","address":"[fd00:1122:3344:10f::6]:32345"}}]},"root":"/pool/ext/6a73a62c-c636-4557-af45-042cb287aee6/crypt/zone"},{"zone":{"id":"ea6894de-c575-43bc-86e9-65b8a58499ff","zone_type":"crucible","addresses":["fd00:1122:3344:10f::a"],"dataset":{"id":"ea6894de-c575-43bc-86e9-65b8a58499ff","name":{"pool_name":"oxp_a87dc882-8b88-4a99-9628-5db79072cffa","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10f::a]:32345"},"services":[{"id":"ea6894de-c575-43bc-86e9-65b8a58499ff","details":{"type":"crucible","address":"[fd00:1122:3344:10f::a]:32345"}}]},"root":"/pool/ext/11a63469-4f57-4976-8620-0055bf82dc97/crypt/zone"},{"zone":{"id":"3081dc99-4fa9-4238-adfa-b9ca381c1f7b","zone_type":"crucible","addresses":["fd00:1122:3344:10f::b"],"dataset":{"id":"3081dc99-4fa9-4238-adfa-b9ca381c1f7b","name":{"pool_name":"oxp_6a73a62c-c636-4557-af45-042cb287aee6","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10f::b]:32345"},"services":[{"id":"3081dc99-4fa9-4238-adfa-b9ca381c1f7b","details":{"type":"crucible","address":"[fd00:1122:3344:10f::b]:32345"}}]},"root":"/pool/ext/a87dc882-8b88-4a99-9628-5db79072cffa/crypt/zone"},{"zone":{"id":"b4a3d7c8-487d-4d76-ae4e-a6a51595a5a6","zone_type":"crucible","addresses":["fd00:1122:3344:10f::d"],"dataset":{"id":"b4a3d7c8-487d-4d76-ae4e-a6a51595a5a6","name":{"pool_name":"oxp_a12f87ee-9918-4269-9de4-4bad4fb41caa","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10f::d]:32345"},"services":[{"id":"b4a3d7c8-487d-4d76-ae4e-a6a51595a5a6","details":{"type":"crucible","address":"[fd00:1122:3344:10f::d]:32345"}}]},"root":"/pool/ext/a12f87ee-9918-4269-9de4-4bad4fb41caa/crypt/zone"},{"zone":{"id":"5ebcee26-f76c-4206-8d81-584ac138d3b9","zone_type":"crucible","addresses":["fd00:1122:3344:10f::4"],"dataset":{"id":"5ebcee26-f76c-4206-8d81-584ac138d3b9","name":{"pool_name":"oxp_27f1917e-fb69-496a-9d40-8ef0d0c0ee55","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10f::4]:32345"},"services":[{"id":"5ebcee26-f76c-4206-8d81-584ac138d3b9","details":{"type":"crucible","address":"[fd00:1122:3344:10f::4]:32345"}}]},"root":"/pool/ext/58ae04cb-26ff-4e30-a20d-9f847bafba4d/crypt/zone"},{"zone":{"id":"90b2bc57-3a2a-4117-bb6d-7eda7542329a","zone_type":"crucible","addresses":["fd00:1122:3344:10f::5"],"dataset":{"id":"90b2bc57-3a2a-4117-bb6d-7eda7542329a","name":{"pool_name":"oxp_a222e405-40f6-4fdd-9146-94f7d94ed08a","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10f::5]:32345"},"services":[{"id":"90b2bc57-3a2a-4117-bb6d-7eda7542329a","details":{"type":"crucible","address":"[fd00:1122:3344:10f::5]:32345"}}]},"root":"/pool/ext/a12f87ee-9918-4269-9de4-4bad4fb41caa/crypt/zone"},{"zone":{"id":"0fb540af-58d3-4abc-bfad-e49765c2b1ee","zone_type":"ntp","addresses":["fd00:1122:3344:10f::e"],"dataset":null,"services":[{"id":"0fb540af-58d3-4abc-bfad-e49765c2b1ee","details":{"type":"internal_ntp","address":"[fd00:1122:3344:10f::e]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/58ae04cb-26ff-4e30-a20d-9f847bafba4d/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled30.json b/sled-agent/tests/old-service-ledgers/rack3-sled30.json deleted file mode 100644 index e919de3488..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack3-sled30.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":4,"requests":[{"zone":{"id":"dda0f1c6-84a5-472c-b350-a799c8d3d0eb","zone_type":"crucible","addresses":["fd00:1122:3344:115::8"],"dataset":{"id":"dda0f1c6-84a5-472c-b350-a799c8d3d0eb","name":{"pool_name":"oxp_028b6c9e-5a0e-43d2-a8ed-a5946cf62924","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:115::8]:32345"},"services":[{"id":"dda0f1c6-84a5-472c-b350-a799c8d3d0eb","details":{"type":"crucible","address":"[fd00:1122:3344:115::8]:32345"}}]},"root":"/pool/ext/b8d84b9c-a65e-4c86-8196-69da5317ae63/crypt/zone"},{"zone":{"id":"157672f9-113f-48b7-9808-dff3c3e67dcd","zone_type":"crucible","addresses":["fd00:1122:3344:115::a"],"dataset":{"id":"157672f9-113f-48b7-9808-dff3c3e67dcd","name":{"pool_name":"oxp_4fdca201-b37e-4072-a1cc-3cb7705954eb","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:115::a]:32345"},"services":[{"id":"157672f9-113f-48b7-9808-dff3c3e67dcd","details":{"type":"crucible","address":"[fd00:1122:3344:115::a]:32345"}}]},"root":"/pool/ext/b8d84b9c-a65e-4c86-8196-69da5317ae63/crypt/zone"},{"zone":{"id":"5a7d4f67-a70f-4d8b-8d35-4dc600991fb5","zone_type":"crucible","addresses":["fd00:1122:3344:115::5"],"dataset":{"id":"5a7d4f67-a70f-4d8b-8d35-4dc600991fb5","name":{"pool_name":"oxp_11a991e5-19a9-48b0-8186-34249ef67957","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:115::5]:32345"},"services":[{"id":"5a7d4f67-a70f-4d8b-8d35-4dc600991fb5","details":{"type":"crucible","address":"[fd00:1122:3344:115::5]:32345"}}]},"root":"/pool/ext/1e9c9764-aaa4-4681-b110-a937b4c52748/crypt/zone"},{"zone":{"id":"c7036645-b680-4816-834f-8ae1af24c159","zone_type":"crucible","addresses":["fd00:1122:3344:115::b"],"dataset":{"id":"c7036645-b680-4816-834f-8ae1af24c159","name":{"pool_name":"oxp_0780be56-c13d-4c6a-a1ac-37753a0da820","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:115::b]:32345"},"services":[{"id":"c7036645-b680-4816-834f-8ae1af24c159","details":{"type":"crucible","address":"[fd00:1122:3344:115::b]:32345"}}]},"root":"/pool/ext/80a8d756-ee22-4c88-8b5b-4a46f7eca249/crypt/zone"},{"zone":{"id":"45e47e4b-708f-40b5-a8c8-fbfd73696d45","zone_type":"crucible","addresses":["fd00:1122:3344:115::7"],"dataset":{"id":"45e47e4b-708f-40b5-a8c8-fbfd73696d45","name":{"pool_name":"oxp_80a8d756-ee22-4c88-8b5b-4a46f7eca249","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:115::7]:32345"},"services":[{"id":"45e47e4b-708f-40b5-a8c8-fbfd73696d45","details":{"type":"crucible","address":"[fd00:1122:3344:115::7]:32345"}}]},"root":"/pool/ext/4fdca201-b37e-4072-a1cc-3cb7705954eb/crypt/zone"},{"zone":{"id":"e805b0c1-3f80-49da-8dc1-caaf843e5003","zone_type":"crucible","addresses":["fd00:1122:3344:115::c"],"dataset":{"id":"e805b0c1-3f80-49da-8dc1-caaf843e5003","name":{"pool_name":"oxp_d54e1ed7-e589-4413-a487-6e9a257104e7","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:115::c]:32345"},"services":[{"id":"e805b0c1-3f80-49da-8dc1-caaf843e5003","details":{"type":"crucible","address":"[fd00:1122:3344:115::c]:32345"}}]},"root":"/pool/ext/d54e1ed7-e589-4413-a487-6e9a257104e7/crypt/zone"},{"zone":{"id":"e47d3f81-3df6-4c35-bec6-41277bc74c07","zone_type":"crucible","addresses":["fd00:1122:3344:115::4"],"dataset":{"id":"e47d3f81-3df6-4c35-bec6-41277bc74c07","name":{"pool_name":"oxp_b8d84b9c-a65e-4c86-8196-69da5317ae63","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:115::4]:32345"},"services":[{"id":"e47d3f81-3df6-4c35-bec6-41277bc74c07","details":{"type":"crucible","address":"[fd00:1122:3344:115::4]:32345"}}]},"root":"/pool/ext/772b3aaa-3501-4dc7-9b3d-048b8b1f7970/crypt/zone"},{"zone":{"id":"2a796a69-b061-44c7-b2df-35bc611f10f5","zone_type":"crucible","addresses":["fd00:1122:3344:115::6"],"dataset":{"id":"2a796a69-b061-44c7-b2df-35bc611f10f5","name":{"pool_name":"oxp_73abe9e0-d38e-48fc-bdec-b094bfa5670d","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:115::6]:32345"},"services":[{"id":"2a796a69-b061-44c7-b2df-35bc611f10f5","details":{"type":"crucible","address":"[fd00:1122:3344:115::6]:32345"}}]},"root":"/pool/ext/028b6c9e-5a0e-43d2-a8ed-a5946cf62924/crypt/zone"},{"zone":{"id":"4e1d2af1-8ef4-4762-aa80-b08da08b45bb","zone_type":"crucible","addresses":["fd00:1122:3344:115::3"],"dataset":{"id":"4e1d2af1-8ef4-4762-aa80-b08da08b45bb","name":{"pool_name":"oxp_772b3aaa-3501-4dc7-9b3d-048b8b1f7970","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:115::3]:32345"},"services":[{"id":"4e1d2af1-8ef4-4762-aa80-b08da08b45bb","details":{"type":"crucible","address":"[fd00:1122:3344:115::3]:32345"}}]},"root":"/pool/ext/d54e1ed7-e589-4413-a487-6e9a257104e7/crypt/zone"},{"zone":{"id":"fb1b10d5-b7cb-416d-98fc-b5d3bc02d495","zone_type":"crucible","addresses":["fd00:1122:3344:115::9"],"dataset":{"id":"fb1b10d5-b7cb-416d-98fc-b5d3bc02d495","name":{"pool_name":"oxp_1e9c9764-aaa4-4681-b110-a937b4c52748","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:115::9]:32345"},"services":[{"id":"fb1b10d5-b7cb-416d-98fc-b5d3bc02d495","details":{"type":"crucible","address":"[fd00:1122:3344:115::9]:32345"}}]},"root":"/pool/ext/b8d84b9c-a65e-4c86-8196-69da5317ae63/crypt/zone"},{"zone":{"id":"5155463c-8a09-45a5-ad1b-817f2e93b284","zone_type":"ntp","addresses":["fd00:1122:3344:115::d"],"dataset":null,"services":[{"id":"5155463c-8a09-45a5-ad1b-817f2e93b284","details":{"type":"internal_ntp","address":"[fd00:1122:3344:115::d]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/772b3aaa-3501-4dc7-9b3d-048b8b1f7970/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled31.json b/sled-agent/tests/old-service-ledgers/rack3-sled31.json deleted file mode 100644 index d984227227..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack3-sled31.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":4,"requests":[{"zone":{"id":"a0eae689-8e6b-4297-bb3d-8b7ffc5c4a07","zone_type":"crucible","addresses":["fd00:1122:3344:102::c"],"dataset":{"id":"a0eae689-8e6b-4297-bb3d-8b7ffc5c4a07","name":{"pool_name":"oxp_274cb567-fd74-4e00-b9c7-6ca367b3fda4","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::c]:32345"},"services":[{"id":"a0eae689-8e6b-4297-bb3d-8b7ffc5c4a07","details":{"type":"crucible","address":"[fd00:1122:3344:102::c]:32345"}}]},"root":"/pool/ext/1443b190-de16-42b0-b881-e87e875dd507/crypt/zone"},{"zone":{"id":"9cea406d-451e-4328-9052-b58487f799a5","zone_type":"crucible","addresses":["fd00:1122:3344:102::b"],"dataset":{"id":"9cea406d-451e-4328-9052-b58487f799a5","name":{"pool_name":"oxp_89c7f72e-632c-462b-a515-01cd80683711","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::b]:32345"},"services":[{"id":"9cea406d-451e-4328-9052-b58487f799a5","details":{"type":"crucible","address":"[fd00:1122:3344:102::b]:32345"}}]},"root":"/pool/ext/274cb567-fd74-4e00-b9c7-6ca367b3fda4/crypt/zone"},{"zone":{"id":"9c7dad7e-7f60-4bf4-8efc-0883a17e7cf6","zone_type":"crucible","addresses":["fd00:1122:3344:102::6"],"dataset":{"id":"9c7dad7e-7f60-4bf4-8efc-0883a17e7cf6","name":{"pool_name":"oxp_2c8e5637-b989-4b8f-82ac-ff2e9102b560","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::6]:32345"},"services":[{"id":"9c7dad7e-7f60-4bf4-8efc-0883a17e7cf6","details":{"type":"crucible","address":"[fd00:1122:3344:102::6]:32345"}}]},"root":"/pool/ext/1443b190-de16-42b0-b881-e87e875dd507/crypt/zone"},{"zone":{"id":"73015cba-79c6-4a67-97d8-fa0819cbf750","zone_type":"crucible","addresses":["fd00:1122:3344:102::a"],"dataset":{"id":"73015cba-79c6-4a67-97d8-fa0819cbf750","name":{"pool_name":"oxp_fa62108e-f7bb-4f6d-86f3-8094a1ea8352","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::a]:32345"},"services":[{"id":"73015cba-79c6-4a67-97d8-fa0819cbf750","details":{"type":"crucible","address":"[fd00:1122:3344:102::a]:32345"}}]},"root":"/pool/ext/2c8e5637-b989-4b8f-82ac-ff2e9102b560/crypt/zone"},{"zone":{"id":"f9ca3097-072e-4e7f-9f50-eb7c7ae39b6f","zone_type":"crucible","addresses":["fd00:1122:3344:102::5"],"dataset":{"id":"f9ca3097-072e-4e7f-9f50-eb7c7ae39b6f","name":{"pool_name":"oxp_42c6602c-2ccf-48ce-8344-693c832fd693","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::5]:32345"},"services":[{"id":"f9ca3097-072e-4e7f-9f50-eb7c7ae39b6f","details":{"type":"crucible","address":"[fd00:1122:3344:102::5]:32345"}}]},"root":"/pool/ext/2c8e5637-b989-4b8f-82ac-ff2e9102b560/crypt/zone"},{"zone":{"id":"e7855e05-a125-4a80-ac2c-8a2db96e1bf8","zone_type":"crucible","addresses":["fd00:1122:3344:102::7"],"dataset":{"id":"e7855e05-a125-4a80-ac2c-8a2db96e1bf8","name":{"pool_name":"oxp_1f72afd3-d2aa-46a8-b81a-54dbcc2f6317","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::7]:32345"},"services":[{"id":"e7855e05-a125-4a80-ac2c-8a2db96e1bf8","details":{"type":"crucible","address":"[fd00:1122:3344:102::7]:32345"}}]},"root":"/pool/ext/42c6602c-2ccf-48ce-8344-693c832fd693/crypt/zone"},{"zone":{"id":"e5de9bc9-e996-4fea-8318-ad7a8a6be4a3","zone_type":"crucible","addresses":["fd00:1122:3344:102::4"],"dataset":{"id":"e5de9bc9-e996-4fea-8318-ad7a8a6be4a3","name":{"pool_name":"oxp_1443b190-de16-42b0-b881-e87e875dd507","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::4]:32345"},"services":[{"id":"e5de9bc9-e996-4fea-8318-ad7a8a6be4a3","details":{"type":"crucible","address":"[fd00:1122:3344:102::4]:32345"}}]},"root":"/pool/ext/89c7f72e-632c-462b-a515-01cd80683711/crypt/zone"},{"zone":{"id":"cd0d0aac-44ff-4566-9260-a64ae6cecef4","zone_type":"crucible","addresses":["fd00:1122:3344:102::8"],"dataset":{"id":"cd0d0aac-44ff-4566-9260-a64ae6cecef4","name":{"pool_name":"oxp_92c0d1f6-cb4d-4ddb-b5ba-979fb3491812","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::8]:32345"},"services":[{"id":"cd0d0aac-44ff-4566-9260-a64ae6cecef4","details":{"type":"crucible","address":"[fd00:1122:3344:102::8]:32345"}}]},"root":"/pool/ext/89c7f72e-632c-462b-a515-01cd80683711/crypt/zone"},{"zone":{"id":"a8230592-0e7a-46c8-a653-7587a27f05bf","zone_type":"crucible","addresses":["fd00:1122:3344:102::9"],"dataset":{"id":"a8230592-0e7a-46c8-a653-7587a27f05bf","name":{"pool_name":"oxp_1b7873de-99fd-454f-b576-bff695524133","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::9]:32345"},"services":[{"id":"a8230592-0e7a-46c8-a653-7587a27f05bf","details":{"type":"crucible","address":"[fd00:1122:3344:102::9]:32345"}}]},"root":"/pool/ext/92c0d1f6-cb4d-4ddb-b5ba-979fb3491812/crypt/zone"},{"zone":{"id":"c19ffbb1-4dc1-4825-a3cf-080e9b543b16","zone_type":"crucible","addresses":["fd00:1122:3344:102::d"],"dataset":{"id":"c19ffbb1-4dc1-4825-a3cf-080e9b543b16","name":{"pool_name":"oxp_67823df7-511c-4984-b98c-7a8f5c40c22d","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::d]:32345"},"services":[{"id":"c19ffbb1-4dc1-4825-a3cf-080e9b543b16","details":{"type":"crucible","address":"[fd00:1122:3344:102::d]:32345"}}]},"root":"/pool/ext/1443b190-de16-42b0-b881-e87e875dd507/crypt/zone"},{"zone":{"id":"ff30fe7c-51f3-43b9-a788-d8f94a7bb028","zone_type":"cockroach_db","addresses":["fd00:1122:3344:102::3"],"dataset":{"id":"ff30fe7c-51f3-43b9-a788-d8f94a7bb028","name":{"pool_name":"oxp_1443b190-de16-42b0-b881-e87e875dd507","kind":{"type":"cockroach_db"}},"service_address":"[fd00:1122:3344:102::3]:32221"},"services":[{"id":"ff30fe7c-51f3-43b9-a788-d8f94a7bb028","details":{"type":"cockroach_db","address":"[fd00:1122:3344:102::3]:32221"}}]},"root":"/pool/ext/fa62108e-f7bb-4f6d-86f3-8094a1ea8352/crypt/zone"},{"zone":{"id":"16b50c55-8117-4efd-aabf-0273677b89d5","zone_type":"ntp","addresses":["fd00:1122:3344:102::e"],"dataset":null,"services":[{"id":"16b50c55-8117-4efd-aabf-0273677b89d5","details":{"type":"internal_ntp","address":"[fd00:1122:3344:102::e]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/fa62108e-f7bb-4f6d-86f3-8094a1ea8352/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled4.json b/sled-agent/tests/old-service-ledgers/rack3-sled4.json deleted file mode 100644 index e9e5ce5569..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack3-sled4.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":4,"requests":[{"zone":{"id":"22452953-ee80-4659-a555-8e027bf205b0","zone_type":"crucible","addresses":["fd00:1122:3344:10c::4"],"dataset":{"id":"22452953-ee80-4659-a555-8e027bf205b0","name":{"pool_name":"oxp_92ba1667-a6f7-4913-9b00-14825384c7bf","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10c::4]:32345"},"services":[{"id":"22452953-ee80-4659-a555-8e027bf205b0","details":{"type":"crucible","address":"[fd00:1122:3344:10c::4]:32345"}}]},"root":"/pool/ext/ab62b941-5f84-42c7-929d-295b20efffe7/crypt/zone"},{"zone":{"id":"9a5a2fcf-44a0-4468-979a-a71686cef627","zone_type":"crucible","addresses":["fd00:1122:3344:10c::3"],"dataset":{"id":"9a5a2fcf-44a0-4468-979a-a71686cef627","name":{"pool_name":"oxp_dbfdc981-1b81-4d7d-9449-9530890b199a","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10c::3]:32345"},"services":[{"id":"9a5a2fcf-44a0-4468-979a-a71686cef627","details":{"type":"crucible","address":"[fd00:1122:3344:10c::3]:32345"}}]},"root":"/pool/ext/74ac4da9-cdae-4c08-8431-11211184aa09/crypt/zone"},{"zone":{"id":"a014f12e-2636-4258-af76-e01d9b8d1c1f","zone_type":"crucible","addresses":["fd00:1122:3344:10c::b"],"dataset":{"id":"a014f12e-2636-4258-af76-e01d9b8d1c1f","name":{"pool_name":"oxp_ab62b941-5f84-42c7-929d-295b20efffe7","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10c::b]:32345"},"services":[{"id":"a014f12e-2636-4258-af76-e01d9b8d1c1f","details":{"type":"crucible","address":"[fd00:1122:3344:10c::b]:32345"}}]},"root":"/pool/ext/a624a843-1c4e-41c3-a1d2-4be7a6c57e9b/crypt/zone"},{"zone":{"id":"431768b8-26ba-4ab4-b616-9e183bb79b8b","zone_type":"crucible","addresses":["fd00:1122:3344:10c::7"],"dataset":{"id":"431768b8-26ba-4ab4-b616-9e183bb79b8b","name":{"pool_name":"oxp_7c121177-3210-4457-9b42-3657add6e166","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10c::7]:32345"},"services":[{"id":"431768b8-26ba-4ab4-b616-9e183bb79b8b","details":{"type":"crucible","address":"[fd00:1122:3344:10c::7]:32345"}}]},"root":"/pool/ext/74ac4da9-cdae-4c08-8431-11211184aa09/crypt/zone"},{"zone":{"id":"22992c56-bd5a-4d0f-86c5-d6f8e87b7bbb","zone_type":"crucible","addresses":["fd00:1122:3344:10c::9"],"dataset":{"id":"22992c56-bd5a-4d0f-86c5-d6f8e87b7bbb","name":{"pool_name":"oxp_842bdd28-196e-4b18-83db-68bd81176a44","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10c::9]:32345"},"services":[{"id":"22992c56-bd5a-4d0f-86c5-d6f8e87b7bbb","details":{"type":"crucible","address":"[fd00:1122:3344:10c::9]:32345"}}]},"root":"/pool/ext/74ac4da9-cdae-4c08-8431-11211184aa09/crypt/zone"},{"zone":{"id":"de376149-aa45-4660-9ae6-15e8ba4a4233","zone_type":"crucible","addresses":["fd00:1122:3344:10c::5"],"dataset":{"id":"de376149-aa45-4660-9ae6-15e8ba4a4233","name":{"pool_name":"oxp_25856a84-6707-4b94-81d1-b43d5bc990d7","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10c::5]:32345"},"services":[{"id":"de376149-aa45-4660-9ae6-15e8ba4a4233","details":{"type":"crucible","address":"[fd00:1122:3344:10c::5]:32345"}}]},"root":"/pool/ext/7c121177-3210-4457-9b42-3657add6e166/crypt/zone"},{"zone":{"id":"ceeba69d-8c0a-47df-a37b-7f1b90f23016","zone_type":"crucible","addresses":["fd00:1122:3344:10c::a"],"dataset":{"id":"ceeba69d-8c0a-47df-a37b-7f1b90f23016","name":{"pool_name":"oxp_a624a843-1c4e-41c3-a1d2-4be7a6c57e9b","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10c::a]:32345"},"services":[{"id":"ceeba69d-8c0a-47df-a37b-7f1b90f23016","details":{"type":"crucible","address":"[fd00:1122:3344:10c::a]:32345"}}]},"root":"/pool/ext/74ac4da9-cdae-4c08-8431-11211184aa09/crypt/zone"},{"zone":{"id":"65293ce4-2e63-4336-9207-3c61f58667f9","zone_type":"crucible","addresses":["fd00:1122:3344:10c::c"],"dataset":{"id":"65293ce4-2e63-4336-9207-3c61f58667f9","name":{"pool_name":"oxp_74ac4da9-cdae-4c08-8431-11211184aa09","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10c::c]:32345"},"services":[{"id":"65293ce4-2e63-4336-9207-3c61f58667f9","details":{"type":"crucible","address":"[fd00:1122:3344:10c::c]:32345"}}]},"root":"/pool/ext/842bdd28-196e-4b18-83db-68bd81176a44/crypt/zone"},{"zone":{"id":"e8f55a5d-65f9-436c-bc25-1d1a7070e876","zone_type":"crucible","addresses":["fd00:1122:3344:10c::6"],"dataset":{"id":"e8f55a5d-65f9-436c-bc25-1d1a7070e876","name":{"pool_name":"oxp_9bfe385c-16dd-4209-bc0b-f28ae75d58e3","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10c::6]:32345"},"services":[{"id":"e8f55a5d-65f9-436c-bc25-1d1a7070e876","details":{"type":"crucible","address":"[fd00:1122:3344:10c::6]:32345"}}]},"root":"/pool/ext/92ba1667-a6f7-4913-9b00-14825384c7bf/crypt/zone"},{"zone":{"id":"2dfbd4c6-afbf-4c8c-bf40-764f02727852","zone_type":"crucible","addresses":["fd00:1122:3344:10c::8"],"dataset":{"id":"2dfbd4c6-afbf-4c8c-bf40-764f02727852","name":{"pool_name":"oxp_55eb093d-6b6f-418c-9767-09afe4c51fff","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10c::8]:32345"},"services":[{"id":"2dfbd4c6-afbf-4c8c-bf40-764f02727852","details":{"type":"crucible","address":"[fd00:1122:3344:10c::8]:32345"}}]},"root":"/pool/ext/dbfdc981-1b81-4d7d-9449-9530890b199a/crypt/zone"},{"zone":{"id":"8c73baf7-1a58-4e2c-b4d1-966c89a18d03","zone_type":"ntp","addresses":["fd00:1122:3344:10c::d"],"dataset":null,"services":[{"id":"8c73baf7-1a58-4e2c-b4d1-966c89a18d03","details":{"type":"internal_ntp","address":"[fd00:1122:3344:10c::d]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/842bdd28-196e-4b18-83db-68bd81176a44/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled5.json b/sled-agent/tests/old-service-ledgers/rack3-sled5.json deleted file mode 100644 index ea7b5ec40a..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack3-sled5.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":4,"requests":[{"zone":{"id":"2f488e7b-fd93-48a6-8b2b-61f6e8336268","zone_type":"crucible","addresses":["fd00:1122:3344:101::b"],"dataset":{"id":"2f488e7b-fd93-48a6-8b2b-61f6e8336268","name":{"pool_name":"oxp_5840a3b7-f765-45d3-8a41-7f543f936bee","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::b]:32345"},"services":[{"id":"2f488e7b-fd93-48a6-8b2b-61f6e8336268","details":{"type":"crucible","address":"[fd00:1122:3344:101::b]:32345"}}]},"root":"/pool/ext/dd084b76-1130-4ad3-9196-6b02be607fe9/crypt/zone"},{"zone":{"id":"1ed5fd3f-933a-4921-a91f-5c286823f8d4","zone_type":"crucible","addresses":["fd00:1122:3344:101::a"],"dataset":{"id":"1ed5fd3f-933a-4921-a91f-5c286823f8d4","name":{"pool_name":"oxp_c1e807e7-b64a-4dbd-b845-ffed0b9a54f1","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::a]:32345"},"services":[{"id":"1ed5fd3f-933a-4921-a91f-5c286823f8d4","details":{"type":"crucible","address":"[fd00:1122:3344:101::a]:32345"}}]},"root":"/pool/ext/be06ea9c-df86-4fec-b5dd-8809710893af/crypt/zone"},{"zone":{"id":"0f8f1013-465d-4b49-b55d-f0b9bf6f789a","zone_type":"crucible","addresses":["fd00:1122:3344:101::6"],"dataset":{"id":"0f8f1013-465d-4b49-b55d-f0b9bf6f789a","name":{"pool_name":"oxp_4dfa7003-0305-47f5-b23d-88a228c1e12e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::6]:32345"},"services":[{"id":"0f8f1013-465d-4b49-b55d-f0b9bf6f789a","details":{"type":"crucible","address":"[fd00:1122:3344:101::6]:32345"}}]},"root":"/pool/ext/be06ea9c-df86-4fec-b5dd-8809710893af/crypt/zone"},{"zone":{"id":"2e4ef017-6c62-40bc-bab5-f2e01addad22","zone_type":"crucible","addresses":["fd00:1122:3344:101::7"],"dataset":{"id":"2e4ef017-6c62-40bc-bab5-f2e01addad22","name":{"pool_name":"oxp_d94e9c58-e6d1-444b-b7d8-19ac17dea042","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::7]:32345"},"services":[{"id":"2e4ef017-6c62-40bc-bab5-f2e01addad22","details":{"type":"crucible","address":"[fd00:1122:3344:101::7]:32345"}}]},"root":"/pool/ext/c1e807e7-b64a-4dbd-b845-ffed0b9a54f1/crypt/zone"},{"zone":{"id":"6a0baf13-a80b-4778-a0ab-a69cd851de2d","zone_type":"crucible","addresses":["fd00:1122:3344:101::9"],"dataset":{"id":"6a0baf13-a80b-4778-a0ab-a69cd851de2d","name":{"pool_name":"oxp_be06ea9c-df86-4fec-b5dd-8809710893af","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::9]:32345"},"services":[{"id":"6a0baf13-a80b-4778-a0ab-a69cd851de2d","details":{"type":"crucible","address":"[fd00:1122:3344:101::9]:32345"}}]},"root":"/pool/ext/a9d419d4-5915-4a40-baa3-3512785de034/crypt/zone"},{"zone":{"id":"391ec257-fd47-4cc8-9bfa-49a0747a9a67","zone_type":"crucible","addresses":["fd00:1122:3344:101::8"],"dataset":{"id":"391ec257-fd47-4cc8-9bfa-49a0747a9a67","name":{"pool_name":"oxp_a9d419d4-5915-4a40-baa3-3512785de034","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::8]:32345"},"services":[{"id":"391ec257-fd47-4cc8-9bfa-49a0747a9a67","details":{"type":"crucible","address":"[fd00:1122:3344:101::8]:32345"}}]},"root":"/pool/ext/709d5d04-5dff-4558-8b5d-fbc2a7d83036/crypt/zone"},{"zone":{"id":"fd8e615a-f170-4da9-b8d0-2a5a123d8682","zone_type":"crucible_pantry","addresses":["fd00:1122:3344:101::3"],"dataset":null,"services":[{"id":"fd8e615a-f170-4da9-b8d0-2a5a123d8682","details":{"type":"crucible_pantry","address":"[fd00:1122:3344:101::3]:17000"}}]},"root":"/pool/ext/dd084b76-1130-4ad3-9196-6b02be607fe9/crypt/zone"},{"zone":{"id":"f8a793f4-cd08-49ec-8fee-6bcd37092fdc","zone_type":"crucible","addresses":["fd00:1122:3344:101::c"],"dataset":{"id":"f8a793f4-cd08-49ec-8fee-6bcd37092fdc","name":{"pool_name":"oxp_709d5d04-5dff-4558-8b5d-fbc2a7d83036","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::c]:32345"},"services":[{"id":"f8a793f4-cd08-49ec-8fee-6bcd37092fdc","details":{"type":"crucible","address":"[fd00:1122:3344:101::c]:32345"}}]},"root":"/pool/ext/d94e9c58-e6d1-444b-b7d8-19ac17dea042/crypt/zone"},{"zone":{"id":"c67d44be-d6b8-4a08-a7e0-3ab300749ad6","zone_type":"crucible","addresses":["fd00:1122:3344:101::4"],"dataset":{"id":"c67d44be-d6b8-4a08-a7e0-3ab300749ad6","name":{"pool_name":"oxp_231cd696-2839-4a9a-ae42-6d875a98a797","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::4]:32345"},"services":[{"id":"c67d44be-d6b8-4a08-a7e0-3ab300749ad6","details":{"type":"crucible","address":"[fd00:1122:3344:101::4]:32345"}}]},"root":"/pool/ext/709d5d04-5dff-4558-8b5d-fbc2a7d83036/crypt/zone"},{"zone":{"id":"e91b4957-8165-451d-9fa5-090c3a39f199","zone_type":"crucible","addresses":["fd00:1122:3344:101::d"],"dataset":{"id":"e91b4957-8165-451d-9fa5-090c3a39f199","name":{"pool_name":"oxp_dd084b76-1130-4ad3-9196-6b02be607fe9","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::d]:32345"},"services":[{"id":"e91b4957-8165-451d-9fa5-090c3a39f199","details":{"type":"crucible","address":"[fd00:1122:3344:101::d]:32345"}}]},"root":"/pool/ext/5840a3b7-f765-45d3-8a41-7f543f936bee/crypt/zone"},{"zone":{"id":"5e737b6e-d33d-4a2c-b8c0-3cad9d05a68f","zone_type":"crucible","addresses":["fd00:1122:3344:101::5"],"dataset":{"id":"5e737b6e-d33d-4a2c-b8c0-3cad9d05a68f","name":{"pool_name":"oxp_8fa4f837-c6f3-4c65-88d4-21eb3cd7ffee","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::5]:32345"},"services":[{"id":"5e737b6e-d33d-4a2c-b8c0-3cad9d05a68f","details":{"type":"crucible","address":"[fd00:1122:3344:101::5]:32345"}}]},"root":"/pool/ext/dd084b76-1130-4ad3-9196-6b02be607fe9/crypt/zone"},{"zone":{"id":"7e6b7816-b1a6-40f3-894a-a5d5c0571dbb","zone_type":"ntp","addresses":["fd00:1122:3344:101::e"],"dataset":null,"services":[{"id":"7e6b7816-b1a6-40f3-894a-a5d5c0571dbb","details":{"type":"internal_ntp","address":"[fd00:1122:3344:101::e]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/be06ea9c-df86-4fec-b5dd-8809710893af/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled6.json b/sled-agent/tests/old-service-ledgers/rack3-sled6.json deleted file mode 100644 index 2c499813cd..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack3-sled6.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":4,"requests":[{"zone":{"id":"eafffae7-69fd-49e1-9541-7cf237ab12b3","zone_type":"crucible","addresses":["fd00:1122:3344:110::3"],"dataset":{"id":"eafffae7-69fd-49e1-9541-7cf237ab12b3","name":{"pool_name":"oxp_929404cd-2522-4440-b21c-91d466a9a7e0","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:110::3]:32345"},"services":[{"id":"eafffae7-69fd-49e1-9541-7cf237ab12b3","details":{"type":"crucible","address":"[fd00:1122:3344:110::3]:32345"}}]},"root":"/pool/ext/aff390ed-8d70-49fa-9000-5420b54ab118/crypt/zone"},{"zone":{"id":"f4bccf15-d69f-402d-9bd2-7959a4cb2823","zone_type":"crucible","addresses":["fd00:1122:3344:110::9"],"dataset":{"id":"f4bccf15-d69f-402d-9bd2-7959a4cb2823","name":{"pool_name":"oxp_f80f96be-a3d7-490a-96a7-faf7da80a579","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:110::9]:32345"},"services":[{"id":"f4bccf15-d69f-402d-9bd2-7959a4cb2823","details":{"type":"crucible","address":"[fd00:1122:3344:110::9]:32345"}}]},"root":"/pool/ext/6bcd54c8-d4a8-429d-8f17-cf02615eb063/crypt/zone"},{"zone":{"id":"82e51c9d-c187-4baa-8307-e46eeafc5ff2","zone_type":"crucible","addresses":["fd00:1122:3344:110::5"],"dataset":{"id":"82e51c9d-c187-4baa-8307-e46eeafc5ff2","name":{"pool_name":"oxp_37d86199-6834-49d9-888a-88ff6f281b29","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:110::5]:32345"},"services":[{"id":"82e51c9d-c187-4baa-8307-e46eeafc5ff2","details":{"type":"crucible","address":"[fd00:1122:3344:110::5]:32345"}}]},"root":"/pool/ext/d2e27e2a-2deb-42ae-84a7-c2d06f3aeb4f/crypt/zone"},{"zone":{"id":"cf667caf-304c-40c4-acce-f0eb05d011ef","zone_type":"crucible","addresses":["fd00:1122:3344:110::8"],"dataset":{"id":"cf667caf-304c-40c4-acce-f0eb05d011ef","name":{"pool_name":"oxp_625c0110-644e-4d63-8321-b85ab5642260","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:110::8]:32345"},"services":[{"id":"cf667caf-304c-40c4-acce-f0eb05d011ef","details":{"type":"crucible","address":"[fd00:1122:3344:110::8]:32345"}}]},"root":"/pool/ext/d2e27e2a-2deb-42ae-84a7-c2d06f3aeb4f/crypt/zone"},{"zone":{"id":"14e60912-108e-4dd3-984e-2332a183b346","zone_type":"crucible","addresses":["fd00:1122:3344:110::b"],"dataset":{"id":"14e60912-108e-4dd3-984e-2332a183b346","name":{"pool_name":"oxp_fa6470f5-0a4c-4fef-b0b1-57c8749c6cca","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:110::b]:32345"},"services":[{"id":"14e60912-108e-4dd3-984e-2332a183b346","details":{"type":"crucible","address":"[fd00:1122:3344:110::b]:32345"}}]},"root":"/pool/ext/6c5ab641-3bd4-4d8c-96f4-4f56c1045142/crypt/zone"},{"zone":{"id":"1aacf923-c96f-4bab-acb0-63f28e86eef6","zone_type":"crucible","addresses":["fd00:1122:3344:110::c"],"dataset":{"id":"1aacf923-c96f-4bab-acb0-63f28e86eef6","name":{"pool_name":"oxp_21b0f3ed-d27f-4996-968b-bf2b494d9308","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:110::c]:32345"},"services":[{"id":"1aacf923-c96f-4bab-acb0-63f28e86eef6","details":{"type":"crucible","address":"[fd00:1122:3344:110::c]:32345"}}]},"root":"/pool/ext/625c0110-644e-4d63-8321-b85ab5642260/crypt/zone"},{"zone":{"id":"b9db0845-04d3-4dc1-84ba-224749562a6c","zone_type":"crucible","addresses":["fd00:1122:3344:110::6"],"dataset":{"id":"b9db0845-04d3-4dc1-84ba-224749562a6c","name":{"pool_name":"oxp_d2e27e2a-2deb-42ae-84a7-c2d06f3aeb4f","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:110::6]:32345"},"services":[{"id":"b9db0845-04d3-4dc1-84ba-224749562a6c","details":{"type":"crucible","address":"[fd00:1122:3344:110::6]:32345"}}]},"root":"/pool/ext/aff390ed-8d70-49fa-9000-5420b54ab118/crypt/zone"},{"zone":{"id":"38b51865-ee80-4e1b-a40b-3452951f9022","zone_type":"crucible","addresses":["fd00:1122:3344:110::7"],"dataset":{"id":"38b51865-ee80-4e1b-a40b-3452951f9022","name":{"pool_name":"oxp_6bcd54c8-d4a8-429d-8f17-cf02615eb063","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:110::7]:32345"},"services":[{"id":"38b51865-ee80-4e1b-a40b-3452951f9022","details":{"type":"crucible","address":"[fd00:1122:3344:110::7]:32345"}}]},"root":"/pool/ext/37d86199-6834-49d9-888a-88ff6f281b29/crypt/zone"},{"zone":{"id":"4bc441f6-f7e5-4d68-8751-53ef1e251c47","zone_type":"crucible","addresses":["fd00:1122:3344:110::a"],"dataset":{"id":"4bc441f6-f7e5-4d68-8751-53ef1e251c47","name":{"pool_name":"oxp_6c5ab641-3bd4-4d8c-96f4-4f56c1045142","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:110::a]:32345"},"services":[{"id":"4bc441f6-f7e5-4d68-8751-53ef1e251c47","details":{"type":"crucible","address":"[fd00:1122:3344:110::a]:32345"}}]},"root":"/pool/ext/21b0f3ed-d27f-4996-968b-bf2b494d9308/crypt/zone"},{"zone":{"id":"d2c20cf8-ed4c-4815-add9-45996364f721","zone_type":"crucible","addresses":["fd00:1122:3344:110::4"],"dataset":{"id":"d2c20cf8-ed4c-4815-add9-45996364f721","name":{"pool_name":"oxp_aff390ed-8d70-49fa-9000-5420b54ab118","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:110::4]:32345"},"services":[{"id":"d2c20cf8-ed4c-4815-add9-45996364f721","details":{"type":"crucible","address":"[fd00:1122:3344:110::4]:32345"}}]},"root":"/pool/ext/6c5ab641-3bd4-4d8c-96f4-4f56c1045142/crypt/zone"},{"zone":{"id":"1bb548cb-889a-411e-8c67-d1b785225180","zone_type":"ntp","addresses":["fd00:1122:3344:110::d"],"dataset":null,"services":[{"id":"1bb548cb-889a-411e-8c67-d1b785225180","details":{"type":"internal_ntp","address":"[fd00:1122:3344:110::d]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/6bcd54c8-d4a8-429d-8f17-cf02615eb063/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled7.json b/sled-agent/tests/old-service-ledgers/rack3-sled7.json deleted file mode 100644 index fb701a2bdb..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack3-sled7.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":4,"requests":[{"zone":{"id":"2eb74fa3-71ec-484c-8ffa-3daeab0e4c78","zone_type":"crucible","addresses":["fd00:1122:3344:11d::3"],"dataset":{"id":"2eb74fa3-71ec-484c-8ffa-3daeab0e4c78","name":{"pool_name":"oxp_c6b63fea-e3e2-4806-b8dc-bdfe7b5c3d89","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11d::3]:32345"},"services":[{"id":"2eb74fa3-71ec-484c-8ffa-3daeab0e4c78","details":{"type":"crucible","address":"[fd00:1122:3344:11d::3]:32345"}}]},"root":"/pool/ext/9f20cbae-7a63-4c31-9386-2ac3cbe12030/crypt/zone"},{"zone":{"id":"9f92bfcf-7435-44a6-8e77-0597f93cd0b4","zone_type":"crucible","addresses":["fd00:1122:3344:11d::7"],"dataset":{"id":"9f92bfcf-7435-44a6-8e77-0597f93cd0b4","name":{"pool_name":"oxp_9fa336f1-2b69-4ebf-9553-e3bab7e3e6ef","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11d::7]:32345"},"services":[{"id":"9f92bfcf-7435-44a6-8e77-0597f93cd0b4","details":{"type":"crucible","address":"[fd00:1122:3344:11d::7]:32345"}}]},"root":"/pool/ext/e05a6264-63f2-4961-bc14-57b4f65614c0/crypt/zone"},{"zone":{"id":"1bf9aed4-9fd3-4d87-b8e7-7f066d25ec1d","zone_type":"crucible","addresses":["fd00:1122:3344:11d::b"],"dataset":{"id":"1bf9aed4-9fd3-4d87-b8e7-7f066d25ec1d","name":{"pool_name":"oxp_a5a52f47-9c9a-4519-83dc-abc56619495d","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11d::b]:32345"},"services":[{"id":"1bf9aed4-9fd3-4d87-b8e7-7f066d25ec1d","details":{"type":"crucible","address":"[fd00:1122:3344:11d::b]:32345"}}]},"root":"/pool/ext/cbcad26e-5e52-41b7-9875-1a84d30d8a15/crypt/zone"},{"zone":{"id":"2a722aa7-cd8a-445d-83fe-57fc9b9a8249","zone_type":"crucible","addresses":["fd00:1122:3344:11d::8"],"dataset":{"id":"2a722aa7-cd8a-445d-83fe-57fc9b9a8249","name":{"pool_name":"oxp_1f4b71eb-505f-4706-912c-b13dd3f2eafb","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11d::8]:32345"},"services":[{"id":"2a722aa7-cd8a-445d-83fe-57fc9b9a8249","details":{"type":"crucible","address":"[fd00:1122:3344:11d::8]:32345"}}]},"root":"/pool/ext/a5a52f47-9c9a-4519-83dc-abc56619495d/crypt/zone"},{"zone":{"id":"76af5b23-d833-435c-b848-2a09d9fad9a1","zone_type":"crucible","addresses":["fd00:1122:3344:11d::c"],"dataset":{"id":"76af5b23-d833-435c-b848-2a09d9fad9a1","name":{"pool_name":"oxp_cbcad26e-5e52-41b7-9875-1a84d30d8a15","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11d::c]:32345"},"services":[{"id":"76af5b23-d833-435c-b848-2a09d9fad9a1","details":{"type":"crucible","address":"[fd00:1122:3344:11d::c]:32345"}}]},"root":"/pool/ext/9f20cbae-7a63-4c31-9386-2ac3cbe12030/crypt/zone"},{"zone":{"id":"3a412bf4-a385-4e66-9ada-a87f6536d6ca","zone_type":"crucible","addresses":["fd00:1122:3344:11d::4"],"dataset":{"id":"3a412bf4-a385-4e66-9ada-a87f6536d6ca","name":{"pool_name":"oxp_e05a6264-63f2-4961-bc14-57b4f65614c0","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11d::4]:32345"},"services":[{"id":"3a412bf4-a385-4e66-9ada-a87f6536d6ca","details":{"type":"crucible","address":"[fd00:1122:3344:11d::4]:32345"}}]},"root":"/pool/ext/e05a6264-63f2-4961-bc14-57b4f65614c0/crypt/zone"},{"zone":{"id":"99a25fa7-8231-4a46-a6ec-ffc5281db1f8","zone_type":"crucible","addresses":["fd00:1122:3344:11d::5"],"dataset":{"id":"99a25fa7-8231-4a46-a6ec-ffc5281db1f8","name":{"pool_name":"oxp_722494ab-9a2b-481b-ac11-292fded682a5","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11d::5]:32345"},"services":[{"id":"99a25fa7-8231-4a46-a6ec-ffc5281db1f8","details":{"type":"crucible","address":"[fd00:1122:3344:11d::5]:32345"}}]},"root":"/pool/ext/e05a6264-63f2-4961-bc14-57b4f65614c0/crypt/zone"},{"zone":{"id":"06c7ddc8-9b3e-48ef-9874-0c40874e9877","zone_type":"crucible","addresses":["fd00:1122:3344:11d::a"],"dataset":{"id":"06c7ddc8-9b3e-48ef-9874-0c40874e9877","name":{"pool_name":"oxp_8c3972d1-5b17-4479-88cc-1c33e4344160","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11d::a]:32345"},"services":[{"id":"06c7ddc8-9b3e-48ef-9874-0c40874e9877","details":{"type":"crucible","address":"[fd00:1122:3344:11d::a]:32345"}}]},"root":"/pool/ext/8c3972d1-5b17-4479-88cc-1c33e4344160/crypt/zone"},{"zone":{"id":"1212b2dc-157d-4bd3-94af-fb5db1d91f24","zone_type":"crucible","addresses":["fd00:1122:3344:11d::9"],"dataset":{"id":"1212b2dc-157d-4bd3-94af-fb5db1d91f24","name":{"pool_name":"oxp_9f20cbae-7a63-4c31-9386-2ac3cbe12030","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11d::9]:32345"},"services":[{"id":"1212b2dc-157d-4bd3-94af-fb5db1d91f24","details":{"type":"crucible","address":"[fd00:1122:3344:11d::9]:32345"}}]},"root":"/pool/ext/977aa6c3-2026-4178-9948-e09f78008575/crypt/zone"},{"zone":{"id":"b1fb5f2e-b20d-4f4c-9f6f-bbeb1a98dd50","zone_type":"crucible","addresses":["fd00:1122:3344:11d::6"],"dataset":{"id":"b1fb5f2e-b20d-4f4c-9f6f-bbeb1a98dd50","name":{"pool_name":"oxp_977aa6c3-2026-4178-9948-e09f78008575","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11d::6]:32345"},"services":[{"id":"b1fb5f2e-b20d-4f4c-9f6f-bbeb1a98dd50","details":{"type":"crucible","address":"[fd00:1122:3344:11d::6]:32345"}}]},"root":"/pool/ext/722494ab-9a2b-481b-ac11-292fded682a5/crypt/zone"},{"zone":{"id":"e68dde0f-0647-46db-ae1c-711835c13e25","zone_type":"ntp","addresses":["fd00:1122:3344:11d::d"],"dataset":null,"services":[{"id":"e68dde0f-0647-46db-ae1c-711835c13e25","details":{"type":"internal_ntp","address":"[fd00:1122:3344:11d::d]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/1f4b71eb-505f-4706-912c-b13dd3f2eafb/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled8.json b/sled-agent/tests/old-service-ledgers/rack3-sled8.json deleted file mode 100644 index cf96f8ae81..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack3-sled8.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":4,"requests":[{"zone":{"id":"85c18b7c-a100-458c-b18d-ecfdacaefac4","zone_type":"crucible","addresses":["fd00:1122:3344:10e::5"],"dataset":{"id":"85c18b7c-a100-458c-b18d-ecfdacaefac4","name":{"pool_name":"oxp_07b266bc-86c3-4a76-9522-8b34ba1ae78c","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10e::5]:32345"},"services":[{"id":"85c18b7c-a100-458c-b18d-ecfdacaefac4","details":{"type":"crucible","address":"[fd00:1122:3344:10e::5]:32345"}}]},"root":"/pool/ext/5b88e44e-f886-4de8-8a6b-48ea5ed9d70b/crypt/zone"},{"zone":{"id":"db303465-7879-4d86-8da8-a0c7162e5184","zone_type":"crucible","addresses":["fd00:1122:3344:10e::4"],"dataset":{"id":"db303465-7879-4d86-8da8-a0c7162e5184","name":{"pool_name":"oxp_e9488a32-880d-44a2-8948-db0b7e3a35b5","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10e::4]:32345"},"services":[{"id":"db303465-7879-4d86-8da8-a0c7162e5184","details":{"type":"crucible","address":"[fd00:1122:3344:10e::4]:32345"}}]},"root":"/pool/ext/8d798756-7200-4db4-9faf-f41b75106a63/crypt/zone"},{"zone":{"id":"c44ce6be-512d-4104-9260-a5b8fe373937","zone_type":"crucible","addresses":["fd00:1122:3344:10e::9"],"dataset":{"id":"c44ce6be-512d-4104-9260-a5b8fe373937","name":{"pool_name":"oxp_025dfc06-5aeb-407f-adc8-ba18dc9bba35","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10e::9]:32345"},"services":[{"id":"c44ce6be-512d-4104-9260-a5b8fe373937","details":{"type":"crucible","address":"[fd00:1122:3344:10e::9]:32345"}}]},"root":"/pool/ext/1544ce68-3544-4cba-b3b6-1927d08b78a5/crypt/zone"},{"zone":{"id":"1cfdb5b6-e568-436a-a85f-7fecf1b8eef2","zone_type":"nexus","addresses":["fd00:1122:3344:10e::3"],"dataset":null,"services":[{"id":"1cfdb5b6-e568-436a-a85f-7fecf1b8eef2","details":{"type":"nexus","internal_address":"[fd00:1122:3344:10e::3]:12221","external_ip":"45.154.216.36","nic":{"id":"569754a2-a5e0-4aa8-90a7-2fa65f43b667","kind":{"type":"service","id":"1cfdb5b6-e568-436a-a85f-7fecf1b8eef2"},"name":"nexus-1cfdb5b6-e568-436a-a85f-7fecf1b8eef2","ip":"172.30.2.6","mac":"A8:40:25:FF:EC:6B","subnet":"172.30.2.0/24","vni":100,"primary":true,"slot":0},"external_tls":true,"external_dns_servers":["1.1.1.1","8.8.8.8"]}}]},"root":"/pool/ext/025dfc06-5aeb-407f-adc8-ba18dc9bba35/crypt/zone"},{"zone":{"id":"44a68792-ca14-442e-b7a9-11970d50ba0e","zone_type":"crucible","addresses":["fd00:1122:3344:10e::a"],"dataset":{"id":"44a68792-ca14-442e-b7a9-11970d50ba0e","name":{"pool_name":"oxp_2a492098-7df3-4409-9466-561edb7aa99b","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10e::a]:32345"},"services":[{"id":"44a68792-ca14-442e-b7a9-11970d50ba0e","details":{"type":"crucible","address":"[fd00:1122:3344:10e::a]:32345"}}]},"root":"/pool/ext/1544ce68-3544-4cba-b3b6-1927d08b78a5/crypt/zone"},{"zone":{"id":"514cf0ca-6d23-434e-9785-446b83b2f029","zone_type":"crucible","addresses":["fd00:1122:3344:10e::7"],"dataset":{"id":"514cf0ca-6d23-434e-9785-446b83b2f029","name":{"pool_name":"oxp_5b88e44e-f886-4de8-8a6b-48ea5ed9d70b","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10e::7]:32345"},"services":[{"id":"514cf0ca-6d23-434e-9785-446b83b2f029","details":{"type":"crucible","address":"[fd00:1122:3344:10e::7]:32345"}}]},"root":"/pool/ext/5b88e44e-f886-4de8-8a6b-48ea5ed9d70b/crypt/zone"},{"zone":{"id":"bc6d8347-8f64-4031-912c-932349df07fe","zone_type":"crucible","addresses":["fd00:1122:3344:10e::6"],"dataset":{"id":"bc6d8347-8f64-4031-912c-932349df07fe","name":{"pool_name":"oxp_1544ce68-3544-4cba-b3b6-1927d08b78a5","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10e::6]:32345"},"services":[{"id":"bc6d8347-8f64-4031-912c-932349df07fe","details":{"type":"crucible","address":"[fd00:1122:3344:10e::6]:32345"}}]},"root":"/pool/ext/1544ce68-3544-4cba-b3b6-1927d08b78a5/crypt/zone"},{"zone":{"id":"1ab0a4f5-99ad-4341-8c89-7fd03e5ccb08","zone_type":"crucible","addresses":["fd00:1122:3344:10e::b"],"dataset":{"id":"1ab0a4f5-99ad-4341-8c89-7fd03e5ccb08","name":{"pool_name":"oxp_033eb462-968f-42ce-9c29-377bd40a3014","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10e::b]:32345"},"services":[{"id":"1ab0a4f5-99ad-4341-8c89-7fd03e5ccb08","details":{"type":"crucible","address":"[fd00:1122:3344:10e::b]:32345"}}]},"root":"/pool/ext/9e1a0803-7453-4eac-91c9-d7891ecd634f/crypt/zone"},{"zone":{"id":"d6f2520b-3d04-44d9-bd46-6ffccfcb46d2","zone_type":"crucible","addresses":["fd00:1122:3344:10e::8"],"dataset":{"id":"d6f2520b-3d04-44d9-bd46-6ffccfcb46d2","name":{"pool_name":"oxp_36e8d29c-1e88-4c2b-8f59-f312201067c3","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10e::8]:32345"},"services":[{"id":"d6f2520b-3d04-44d9-bd46-6ffccfcb46d2","details":{"type":"crucible","address":"[fd00:1122:3344:10e::8]:32345"}}]},"root":"/pool/ext/1544ce68-3544-4cba-b3b6-1927d08b78a5/crypt/zone"},{"zone":{"id":"d6da9d13-bfcf-469d-a99e-faeb5e30be32","zone_type":"crucible","addresses":["fd00:1122:3344:10e::c"],"dataset":{"id":"d6da9d13-bfcf-469d-a99e-faeb5e30be32","name":{"pool_name":"oxp_9e1a0803-7453-4eac-91c9-d7891ecd634f","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10e::c]:32345"},"services":[{"id":"d6da9d13-bfcf-469d-a99e-faeb5e30be32","details":{"type":"crucible","address":"[fd00:1122:3344:10e::c]:32345"}}]},"root":"/pool/ext/8d798756-7200-4db4-9faf-f41b75106a63/crypt/zone"},{"zone":{"id":"a1dc59c2-5883-4fb8-83be-ac2d95d255d1","zone_type":"crucible","addresses":["fd00:1122:3344:10e::d"],"dataset":{"id":"a1dc59c2-5883-4fb8-83be-ac2d95d255d1","name":{"pool_name":"oxp_8d798756-7200-4db4-9faf-f41b75106a63","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10e::d]:32345"},"services":[{"id":"a1dc59c2-5883-4fb8-83be-ac2d95d255d1","details":{"type":"crucible","address":"[fd00:1122:3344:10e::d]:32345"}}]},"root":"/pool/ext/36e8d29c-1e88-4c2b-8f59-f312201067c3/crypt/zone"},{"zone":{"id":"48f25dba-7392-44ce-9bb0-28489ebc44bc","zone_type":"ntp","addresses":["fd00:1122:3344:10e::e"],"dataset":null,"services":[{"id":"48f25dba-7392-44ce-9bb0-28489ebc44bc","details":{"type":"internal_ntp","address":"[fd00:1122:3344:10e::e]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/5b88e44e-f886-4de8-8a6b-48ea5ed9d70b/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled9.json b/sled-agent/tests/old-service-ledgers/rack3-sled9.json deleted file mode 100644 index c225f50081..0000000000 --- a/sled-agent/tests/old-service-ledgers/rack3-sled9.json +++ /dev/null @@ -1 +0,0 @@ -{"generation":4,"requests":[{"zone":{"id":"b452e5e1-ab4c-4994-9679-ef21b3b4fee9","zone_type":"crucible","addresses":["fd00:1122:3344:10b::6"],"dataset":{"id":"b452e5e1-ab4c-4994-9679-ef21b3b4fee9","name":{"pool_name":"oxp_d63a297d-ae6a-4072-9dca-dda404044989","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::6]:32345"},"services":[{"id":"b452e5e1-ab4c-4994-9679-ef21b3b4fee9","details":{"type":"crucible","address":"[fd00:1122:3344:10b::6]:32345"}}]},"root":"/pool/ext/7c204111-31df-4c32-9a3e-780411f700fd/crypt/zone"},{"zone":{"id":"e9826cdc-6d3a-4eff-b1b5-ec4364ebe6b9","zone_type":"oximeter","addresses":["fd00:1122:3344:10b::3"],"dataset":null,"services":[{"id":"e9826cdc-6d3a-4eff-b1b5-ec4364ebe6b9","details":{"type":"oximeter","address":"[fd00:1122:3344:10b::3]:12223"}}]},"root":"/pool/ext/7c204111-31df-4c32-9a3e-780411f700fd/crypt/zone"},{"zone":{"id":"b0cde4a8-f27c-46e8-8355-756be9045afc","zone_type":"crucible","addresses":["fd00:1122:3344:10b::b"],"dataset":{"id":"b0cde4a8-f27c-46e8-8355-756be9045afc","name":{"pool_name":"oxp_07c1a8e7-51f5-4f12-a43d-734719fef92b","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::b]:32345"},"services":[{"id":"b0cde4a8-f27c-46e8-8355-756be9045afc","details":{"type":"crucible","address":"[fd00:1122:3344:10b::b]:32345"}}]},"root":"/pool/ext/1f6adf64-c9b9-4ed7-b3e2-37fb25624646/crypt/zone"},{"zone":{"id":"e2f70cf6-e285-4212-9b01-77ebf2ca9219","zone_type":"crucible","addresses":["fd00:1122:3344:10b::d"],"dataset":{"id":"e2f70cf6-e285-4212-9b01-77ebf2ca9219","name":{"pool_name":"oxp_a809f28a-7f25-4362-bc56-0cbdd72af2cb","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::d]:32345"},"services":[{"id":"e2f70cf6-e285-4212-9b01-77ebf2ca9219","details":{"type":"crucible","address":"[fd00:1122:3344:10b::d]:32345"}}]},"root":"/pool/ext/92a1bd39-6e8a-4226-b9d0-e3e8a9b8504f/crypt/zone"},{"zone":{"id":"b0949c9d-4aa1-4bc4-9cb3-5875b9166885","zone_type":"crucible","addresses":["fd00:1122:3344:10b::a"],"dataset":{"id":"b0949c9d-4aa1-4bc4-9cb3-5875b9166885","name":{"pool_name":"oxp_af0cc12b-43c5-473a-89a7-28351fbbb430","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::a]:32345"},"services":[{"id":"b0949c9d-4aa1-4bc4-9cb3-5875b9166885","details":{"type":"crucible","address":"[fd00:1122:3344:10b::a]:32345"}}]},"root":"/pool/ext/cf1594ed-7c0c-467c-b0af-a689dcb427a3/crypt/zone"},{"zone":{"id":"7cea4d59-a8ca-4826-901d-8d5bd935dc09","zone_type":"crucible","addresses":["fd00:1122:3344:10b::9"],"dataset":{"id":"7cea4d59-a8ca-4826-901d-8d5bd935dc09","name":{"pool_name":"oxp_d75dae09-4992-4a61-ab7d-5ae1d2b068ba","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::9]:32345"},"services":[{"id":"7cea4d59-a8ca-4826-901d-8d5bd935dc09","details":{"type":"crucible","address":"[fd00:1122:3344:10b::9]:32345"}}]},"root":"/pool/ext/a809f28a-7f25-4362-bc56-0cbdd72af2cb/crypt/zone"},{"zone":{"id":"08adaeee-c3b5-4cd8-8fbd-ac371b3101c9","zone_type":"crucible","addresses":["fd00:1122:3344:10b::4"],"dataset":{"id":"08adaeee-c3b5-4cd8-8fbd-ac371b3101c9","name":{"pool_name":"oxp_d9f23187-fbf9-4ea5-a103-bc112263a9a7","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::4]:32345"},"services":[{"id":"08adaeee-c3b5-4cd8-8fbd-ac371b3101c9","details":{"type":"crucible","address":"[fd00:1122:3344:10b::4]:32345"}}]},"root":"/pool/ext/7c204111-31df-4c32-9a3e-780411f700fd/crypt/zone"},{"zone":{"id":"3da1ade5-3fcb-4e64-aa08-81ee8a9ef723","zone_type":"crucible","addresses":["fd00:1122:3344:10b::8"],"dataset":{"id":"3da1ade5-3fcb-4e64-aa08-81ee8a9ef723","name":{"pool_name":"oxp_1f6adf64-c9b9-4ed7-b3e2-37fb25624646","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::8]:32345"},"services":[{"id":"3da1ade5-3fcb-4e64-aa08-81ee8a9ef723","details":{"type":"crucible","address":"[fd00:1122:3344:10b::8]:32345"}}]},"root":"/pool/ext/07c1a8e7-51f5-4f12-a43d-734719fef92b/crypt/zone"},{"zone":{"id":"816f26a7-4c28-4a39-b9ad-a036678520ab","zone_type":"crucible","addresses":["fd00:1122:3344:10b::7"],"dataset":{"id":"816f26a7-4c28-4a39-b9ad-a036678520ab","name":{"pool_name":"oxp_92a1bd39-6e8a-4226-b9d0-e3e8a9b8504f","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::7]:32345"},"services":[{"id":"816f26a7-4c28-4a39-b9ad-a036678520ab","details":{"type":"crucible","address":"[fd00:1122:3344:10b::7]:32345"}}]},"root":"/pool/ext/d9f23187-fbf9-4ea5-a103-bc112263a9a7/crypt/zone"},{"zone":{"id":"839f9839-409f-45d3-b8a6-7085507b90f6","zone_type":"crucible","addresses":["fd00:1122:3344:10b::c"],"dataset":{"id":"839f9839-409f-45d3-b8a6-7085507b90f6","name":{"pool_name":"oxp_7c204111-31df-4c32-9a3e-780411f700fd","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::c]:32345"},"services":[{"id":"839f9839-409f-45d3-b8a6-7085507b90f6","details":{"type":"crucible","address":"[fd00:1122:3344:10b::c]:32345"}}]},"root":"/pool/ext/af0cc12b-43c5-473a-89a7-28351fbbb430/crypt/zone"},{"zone":{"id":"c717c81f-a228-4412-a34e-90f8c491d847","zone_type":"crucible","addresses":["fd00:1122:3344:10b::5"],"dataset":{"id":"c717c81f-a228-4412-a34e-90f8c491d847","name":{"pool_name":"oxp_cf1594ed-7c0c-467c-b0af-a689dcb427a3","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::5]:32345"},"services":[{"id":"c717c81f-a228-4412-a34e-90f8c491d847","details":{"type":"crucible","address":"[fd00:1122:3344:10b::5]:32345"}}]},"root":"/pool/ext/d63a297d-ae6a-4072-9dca-dda404044989/crypt/zone"},{"zone":{"id":"e1fa2023-6c86-40a4-ae59-a0de112cf7a9","zone_type":"ntp","addresses":["fd00:1122:3344:10b::e"],"dataset":null,"services":[{"id":"e1fa2023-6c86-40a4-ae59-a0de112cf7a9","details":{"type":"internal_ntp","address":"[fd00:1122:3344:10b::e]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/d9f23187-fbf9-4ea5-a103-bc112263a9a7/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack2-sled10.json b/sled-agent/tests/output/new-zones-ledgers/rack2-sled10.json deleted file mode 100644 index c00a65e8ea..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack2-sled10.json +++ /dev/null @@ -1,195 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 4, - "zones": [ - { - "zone": { - "id": "04eef8aa-055c-42ab-bdb6-c982f63c9be0", - "underlay_address": "fd00:1122:3344:107::d", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:107::d]:32345", - "dataset": { - "pool_name": "oxp_845ff39a-3205-416f-8bda-e35829107c8a" - } - } - }, - "root": "/pool/ext/43efdd6d-7419-437a-a282-fc45bfafd042/crypt/zone" - }, - { - "zone": { - "id": "8568c997-fbbb-46a8-8549-b78284530ffc", - "underlay_address": "fd00:1122:3344:107::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:107::5]:32345", - "dataset": { - "pool_name": "oxp_0e485ad3-04e6-404b-b619-87d4fea9f5ae" - } - } - }, - "root": "/pool/ext/9b61d4b2-66f6-459f-86f4-13d0b8c5d6cf/crypt/zone" - }, - { - "zone": { - "id": "6cec1d60-5c1a-4c1b-9632-2b4bc76bd37c", - "underlay_address": "fd00:1122:3344:107::e", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:107::e]:32345", - "dataset": { - "pool_name": "oxp_62a4c68a-2073-42d0-8e49-01f5e8b90cd4" - } - } - }, - "root": "/pool/ext/845ff39a-3205-416f-8bda-e35829107c8a/crypt/zone" - }, - { - "zone": { - "id": "aa646c82-c6d7-4d0c-8401-150130927759", - "underlay_address": "fd00:1122:3344:107::4", - "zone_type": { - "type": "clickhouse", - "address": "[fd00:1122:3344:107::4]:8123", - "dataset": { - "pool_name": "oxp_0e485ad3-04e6-404b-b619-87d4fea9f5ae" - } - } - }, - "root": "/pool/ext/fd82dcc7-00dd-4d01-826a-937a7d8238fb/crypt/zone" - }, - { - "zone": { - "id": "2f294ca1-7a4f-468f-8966-2b7915804729", - "underlay_address": "fd00:1122:3344:107::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:107::7]:32345", - "dataset": { - "pool_name": "oxp_43efdd6d-7419-437a-a282-fc45bfafd042" - } - } - }, - "root": "/pool/ext/fd82dcc7-00dd-4d01-826a-937a7d8238fb/crypt/zone" - }, - { - "zone": { - "id": "1a77bd1d-4fd4-4d6c-a105-17f942d94ba6", - "underlay_address": "fd00:1122:3344:107::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:107::c]:32345", - "dataset": { - "pool_name": "oxp_b6bdfdaf-9c0d-4b74-926c-49ff3ed05562" - } - } - }, - "root": "/pool/ext/9b61d4b2-66f6-459f-86f4-13d0b8c5d6cf/crypt/zone" - }, - { - "zone": { - "id": "f65a6668-1aea-4deb-81ed-191fbe469328", - "underlay_address": "fd00:1122:3344:107::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:107::9]:32345", - "dataset": { - "pool_name": "oxp_9b61d4b2-66f6-459f-86f4-13d0b8c5d6cf" - } - } - }, - "root": "/pool/ext/d0584f4a-20ba-436d-a75b-7709e80deb79/crypt/zone" - }, - { - "zone": { - "id": "ee8bce67-8f8e-4221-97b0-85f1860d66d0", - "underlay_address": "fd00:1122:3344:107::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:107::8]:32345", - "dataset": { - "pool_name": "oxp_b252b176-3974-436a-915b-60382b21eb76" - } - } - }, - "root": "/pool/ext/b6bdfdaf-9c0d-4b74-926c-49ff3ed05562/crypt/zone" - }, - { - "zone": { - "id": "cf3b2d54-5e36-4c93-b44f-8bf36ac98071", - "underlay_address": "fd00:1122:3344:107::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:107::b]:32345", - "dataset": { - "pool_name": "oxp_d0584f4a-20ba-436d-a75b-7709e80deb79" - } - } - }, - "root": "/pool/ext/4c157f35-865d-4310-9d81-c6259cb69293/crypt/zone" - }, - { - "zone": { - "id": "5c8c244c-00dc-4b16-aa17-6d9eb4827fab", - "underlay_address": "fd00:1122:3344:107::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:107::a]:32345", - "dataset": { - "pool_name": "oxp_4c157f35-865d-4310-9d81-c6259cb69293" - } - } - }, - "root": "/pool/ext/845ff39a-3205-416f-8bda-e35829107c8a/crypt/zone" - }, - { - "zone": { - "id": "7d5e942b-926c-442d-937a-76cc4aa72bf3", - "underlay_address": "fd00:1122:3344:107::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:107::6]:32345", - "dataset": { - "pool_name": "oxp_fd82dcc7-00dd-4d01-826a-937a7d8238fb" - } - } - }, - "root": "/pool/ext/b252b176-3974-436a-915b-60382b21eb76/crypt/zone" - }, - { - "zone": { - "id": "a3628a56-6f85-43b5-be50-71d8f0e04877", - "underlay_address": "fd00:1122:3344:107::3", - "zone_type": { - "type": "cockroach_db", - "address": "[fd00:1122:3344:107::3]:32221", - "dataset": { - "pool_name": "oxp_0e485ad3-04e6-404b-b619-87d4fea9f5ae" - } - } - }, - "root": "/pool/ext/4c157f35-865d-4310-9d81-c6259cb69293/crypt/zone" - }, - { - "zone": { - "id": "7529be1c-ca8b-441a-89aa-37166cc450df", - "underlay_address": "fd00:1122:3344:107::f", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:107::f]:123", - "ntp_servers": [ - "c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55.host.control-plane.oxide.internal", - "6ea2684c-115e-48a6-8453-ab52d1cecd73.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/fd82dcc7-00dd-4d01-826a-937a7d8238fb/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack2-sled11.json b/sled-agent/tests/output/new-zones-ledgers/rack2-sled11.json deleted file mode 100644 index 79aae3e8c1..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack2-sled11.json +++ /dev/null @@ -1,196 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 4, - "zones": [ - { - "zone": { - "id": "605be8b9-c652-4a5f-94ca-068ec7a39472", - "underlay_address": "fd00:1122:3344:106::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:106::a]:32345", - "dataset": { - "pool_name": "oxp_cf14d1b9-b4db-4594-b3ab-a9957e770ce9" - } - } - }, - "root": "/pool/ext/cf5f8849-0c5a-475b-8683-6d17da88d1d1/crypt/zone" - }, - { - "zone": { - "id": "af8a8712-457c-4ea7-a8b6-aecb04761c1b", - "underlay_address": "fd00:1122:3344:106::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:106::9]:32345", - "dataset": { - "pool_name": "oxp_cf5f8849-0c5a-475b-8683-6d17da88d1d1" - } - } - }, - "root": "/pool/ext/7f778610-7328-4554-98f6-b17f74f551c7/crypt/zone" - }, - { - "zone": { - "id": "0022703b-dcfc-44d4-897a-b42f6f53b433", - "underlay_address": "fd00:1122:3344:106::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:106::c]:32345", - "dataset": { - "pool_name": "oxp_025725fa-9e40-4b46-b018-c420408394ef" - } - } - }, - "root": "/pool/ext/025725fa-9e40-4b46-b018-c420408394ef/crypt/zone" - }, - { - "zone": { - "id": "fffddf56-10ca-4b62-9be3-5b3764a5f682", - "underlay_address": "fd00:1122:3344:106::d", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:106::d]:32345", - "dataset": { - "pool_name": "oxp_4d2f5aaf-eb14-4b1e-aa99-ae38ec844605" - } - } - }, - "root": "/pool/ext/834c9aad-c53b-4357-bc3f-f422efa63848/crypt/zone" - }, - { - "zone": { - "id": "9b8194ee-917d-4abc-a55c-94cea6cdaea1", - "underlay_address": "fd00:1122:3344:106::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:106::6]:32345", - "dataset": { - "pool_name": "oxp_d7665e0d-9354-4341-a76f-965d7c49f277" - } - } - }, - "root": "/pool/ext/cf5f8849-0c5a-475b-8683-6d17da88d1d1/crypt/zone" - }, - { - "zone": { - "id": "b369e133-485c-4d98-8fee-83542d1fd94d", - "underlay_address": "fd00:1122:3344:106::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:106::4]:32345", - "dataset": { - "pool_name": "oxp_4366f80d-3902-4b93-8f2d-380008e805fc" - } - } - }, - "root": "/pool/ext/025725fa-9e40-4b46-b018-c420408394ef/crypt/zone" - }, - { - "zone": { - "id": "edd99650-5df1-4241-815d-253e4ef2399c", - "underlay_address": "fd00:1122:3344:106::3", - "zone_type": { - "type": "external_dns", - "dataset": { - "pool_name": "oxp_4366f80d-3902-4b93-8f2d-380008e805fc" - }, - "http_address": "[fd00:1122:3344:106::3]:5353", - "dns_address": "172.20.26.1:53", - "nic": { - "id": "99b759fc-8e2e-44b7-aca8-93c3b201974d", - "kind": { - "type": "service", - "id": "edd99650-5df1-4241-815d-253e4ef2399c" - }, - "name": "external-dns-edd99650-5df1-4241-815d-253e4ef2399c", - "ip": "172.30.1.5", - "mac": "A8:40:25:FF:B0:9C", - "subnet": "172.30.1.0/24", - "vni": 100, - "primary": true, - "slot": 0 - } - } - }, - "root": "/pool/ext/7f778610-7328-4554-98f6-b17f74f551c7/crypt/zone" - }, - { - "zone": { - "id": "46d1afcc-cc3f-4b17-aafc-054dd4862d15", - "underlay_address": "fd00:1122:3344:106::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:106::5]:32345", - "dataset": { - "pool_name": "oxp_7f778610-7328-4554-98f6-b17f74f551c7" - } - } - }, - "root": "/pool/ext/cf5f8849-0c5a-475b-8683-6d17da88d1d1/crypt/zone" - }, - { - "zone": { - "id": "12afe1c3-bfe6-4278-8240-91d401347d36", - "underlay_address": "fd00:1122:3344:106::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:106::8]:32345", - "dataset": { - "pool_name": "oxp_534bcd4b-502f-4109-af6e-4b28a22c20f1" - } - } - }, - "root": "/pool/ext/4366f80d-3902-4b93-8f2d-380008e805fc/crypt/zone" - }, - { - "zone": { - "id": "c33b5912-9985-43ed-98f2-41297e2b796a", - "underlay_address": "fd00:1122:3344:106::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:106::b]:32345", - "dataset": { - "pool_name": "oxp_834c9aad-c53b-4357-bc3f-f422efa63848" - } - } - }, - "root": "/pool/ext/d7665e0d-9354-4341-a76f-965d7c49f277/crypt/zone" - }, - { - "zone": { - "id": "65b3db59-9361-4100-9cee-04e32a8c67d3", - "underlay_address": "fd00:1122:3344:106::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:106::7]:32345", - "dataset": { - "pool_name": "oxp_32b5303f-f667-4345-84d2-c7eec63b91b2" - } - } - }, - "root": "/pool/ext/d7665e0d-9354-4341-a76f-965d7c49f277/crypt/zone" - }, - { - "zone": { - "id": "82500cc9-f33d-4d59-9e6e-d70ea6133077", - "underlay_address": "fd00:1122:3344:106::e", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:106::e]:123", - "ntp_servers": [ - "c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55.host.control-plane.oxide.internal", - "6ea2684c-115e-48a6-8453-ab52d1cecd73.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/cf14d1b9-b4db-4594-b3ab-a9957e770ce9/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack2-sled12.json b/sled-agent/tests/output/new-zones-ledgers/rack2-sled12.json deleted file mode 100644 index 39ebad3183..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack2-sled12.json +++ /dev/null @@ -1,232 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 5, - "zones": [ - { - "zone": { - "id": "a76b3357-b690-43b8-8352-3300568ffc2b", - "underlay_address": "fd00:1122:3344:104::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:104::a]:32345", - "dataset": { - "pool_name": "oxp_05715ad8-59a1-44ab-ad5f-0cdffb46baab" - } - } - }, - "root": "/pool/ext/2ec2a731-3340-4777-b1bb-4a906c598174/crypt/zone" - }, - { - "zone": { - "id": "8d202759-ca06-4383-b50f-7f3ec4062bf7", - "underlay_address": "fd00:1122:3344:104::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:104::4]:32345", - "dataset": { - "pool_name": "oxp_56e32a8f-0877-4437-9cab-94a4928b1495" - } - } - }, - "root": "/pool/ext/613b58fc-5a80-42dc-a61c-b143cf220fb5/crypt/zone" - }, - { - "zone": { - "id": "fcdda266-fc6a-4518-89db-aec007a4b682", - "underlay_address": "fd00:1122:3344:104::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:104::b]:32345", - "dataset": { - "pool_name": "oxp_7e1293ad-b903-4054-aeae-2182d5e4a785" - } - } - }, - "root": "/pool/ext/416fd29e-d3b5-4fdf-8101-d0d163fa0706/crypt/zone" - }, - { - "zone": { - "id": "167cf6a2-ec51-4de2-bc6c-7785bbc0e436", - "underlay_address": "fd00:1122:3344:104::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:104::c]:32345", - "dataset": { - "pool_name": "oxp_f96c8d49-fdf7-4bd6-84f6-c282202d1abc" - } - } - }, - "root": "/pool/ext/56e32a8f-0877-4437-9cab-94a4928b1495/crypt/zone" - }, - { - "zone": { - "id": "c6fde82d-8dae-4ef0-b557-6c3d094d9454", - "underlay_address": "fd00:1122:3344:104::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:104::9]:32345", - "dataset": { - "pool_name": "oxp_416fd29e-d3b5-4fdf-8101-d0d163fa0706" - } - } - }, - "root": "/pool/ext/3af01cc4-1f16-47d9-a489-abafcb91c2db/crypt/zone" - }, - { - "zone": { - "id": "650f5da7-86a0-4ade-af0f-bc96e021ded0", - "underlay_address": "fd00:1122:3344:104::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:104::5]:32345", - "dataset": { - "pool_name": "oxp_b4a71d3d-1ecd-418a-9a52-8d118f82082b" - } - } - }, - "root": "/pool/ext/613b58fc-5a80-42dc-a61c-b143cf220fb5/crypt/zone" - }, - { - "zone": { - "id": "7ce9a2c5-2d37-4188-b7b5-a9db819396c3", - "underlay_address": "fd00:1122:3344:104::d", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:104::d]:32345", - "dataset": { - "pool_name": "oxp_c87d16b8-e814-4159-8562-f8d7fdd19d13" - } - } - }, - "root": "/pool/ext/416fd29e-d3b5-4fdf-8101-d0d163fa0706/crypt/zone" - }, - { - "zone": { - "id": "23e1cf01-70ab-422f-997b-6216158965c3", - "underlay_address": "fd00:1122:3344:104::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:104::8]:32345", - "dataset": { - "pool_name": "oxp_3af01cc4-1f16-47d9-a489-abafcb91c2db" - } - } - }, - "root": "/pool/ext/3af01cc4-1f16-47d9-a489-abafcb91c2db/crypt/zone" - }, - { - "zone": { - "id": "50209816-89fb-48ed-9595-16899d114844", - "underlay_address": "fd00:1122:3344:104::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:104::6]:32345", - "dataset": { - "pool_name": "oxp_2ec2a731-3340-4777-b1bb-4a906c598174" - } - } - }, - "root": "/pool/ext/416fd29e-d3b5-4fdf-8101-d0d163fa0706/crypt/zone" - }, - { - "zone": { - "id": "20b100d0-84c3-4119-aa9b-0c632b0b6a3a", - "underlay_address": "fd00:1122:3344:104::3", - "zone_type": { - "type": "nexus", - "internal_address": "[fd00:1122:3344:104::3]:12221", - "external_ip": "172.20.26.4", - "nic": { - "id": "364b0ecd-bf08-4cac-a993-bbf4a70564c7", - "kind": { - "type": "service", - "id": "20b100d0-84c3-4119-aa9b-0c632b0b6a3a" - }, - "name": "nexus-20b100d0-84c3-4119-aa9b-0c632b0b6a3a", - "ip": "172.30.2.6", - "mac": "A8:40:25:FF:B4:C1", - "subnet": "172.30.2.0/24", - "vni": 100, - "primary": true, - "slot": 0 - }, - "external_tls": true, - "external_dns_servers": [ - "1.1.1.1", - "9.9.9.9" - ] - } - }, - "root": "/pool/ext/c87d16b8-e814-4159-8562-f8d7fdd19d13/crypt/zone" - }, - { - "zone": { - "id": "8bc0f29e-0c20-437e-b8ca-7b9844acda22", - "underlay_address": "fd00:1122:3344:104::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:104::7]:32345", - "dataset": { - "pool_name": "oxp_613b58fc-5a80-42dc-a61c-b143cf220fb5" - } - } - }, - "root": "/pool/ext/56e32a8f-0877-4437-9cab-94a4928b1495/crypt/zone" - }, - { - "zone": { - "id": "c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55", - "underlay_address": "fd00:1122:3344:104::e", - "zone_type": { - "type": "boundary_ntp", - "address": "[fd00:1122:3344:104::e]:123", - "ntp_servers": [ - "ntp.eng.oxide.computer" - ], - "dns_servers": [ - "1.1.1.1", - "9.9.9.9" - ], - "domain": null, - "nic": { - "id": "a4b9bacf-6c04-431a-81ad-9bf0302af96e", - "kind": { - "type": "service", - "id": "c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55" - }, - "name": "ntp-c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55", - "ip": "172.30.3.5", - "mac": "A8:40:25:FF:B2:52", - "subnet": "172.30.3.0/24", - "vni": 100, - "primary": true, - "slot": 0 - }, - "snat_cfg": { - "ip": "172.20.26.6", - "first_port": 0, - "last_port": 16383 - } - } - }, - "root": "/pool/ext/3af01cc4-1f16-47d9-a489-abafcb91c2db/crypt/zone" - }, - { - "zone": { - "id": "51c9ad09-7814-4643-8ad4-689ccbe53fbd", - "underlay_address": "fd00:1122:3344:1::1", - "zone_type": { - "type": "internal_dns", - "dataset": { - "pool_name": "oxp_56e32a8f-0877-4437-9cab-94a4928b1495" - }, - "http_address": "[fd00:1122:3344:1::1]:5353", - "dns_address": "[fd00:1122:3344:1::1]:53", - "gz_address": "fd00:1122:3344:1::2", - "gz_address_index": 0 - } - }, - "root": "/pool/ext/3af01cc4-1f16-47d9-a489-abafcb91c2db/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack2-sled14.json b/sled-agent/tests/output/new-zones-ledgers/rack2-sled14.json deleted file mode 100644 index 25dfb72a78..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack2-sled14.json +++ /dev/null @@ -1,192 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 4, - "zones": [ - { - "zone": { - "id": "ee8b2cfa-87fe-46a6-98ef-23640b80a968", - "underlay_address": "fd00:1122:3344:10b::d", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10b::d]:32345", - "dataset": { - "pool_name": "oxp_4a624324-003a-4255-98e8-546a90b5b7fa" - } - } - }, - "root": "/pool/ext/6b9ec5f1-859f-459c-9c06-6a51ba87786f/crypt/zone" - }, - { - "zone": { - "id": "9228f8ca-2a83-439f-9cb7-f2801b5fea27", - "underlay_address": "fd00:1122:3344:10b::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10b::6]:32345", - "dataset": { - "pool_name": "oxp_6b9ec5f1-859f-459c-9c06-6a51ba87786f" - } - } - }, - "root": "/pool/ext/6b9ec5f1-859f-459c-9c06-6a51ba87786f/crypt/zone" - }, - { - "zone": { - "id": "ee44cdde-7ac9-4469-9f1d-e8bcfeb5cc46", - "underlay_address": "fd00:1122:3344:10b::e", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10b::e]:32345", - "dataset": { - "pool_name": "oxp_11b02ce7-7e50-486f-86c2-de8af9575a45" - } - } - }, - "root": "/pool/ext/11b02ce7-7e50-486f-86c2-de8af9575a45/crypt/zone" - }, - { - "zone": { - "id": "96bac0b1-8b34-4c81-9e76-6404d2c37630", - "underlay_address": "fd00:1122:3344:10b::4", - "zone_type": { - "type": "crucible_pantry", - "address": "[fd00:1122:3344:10b::4]:17000" - } - }, - "root": "/pool/ext/350b2814-7b7f-40f1-9bf6-9818a1ef49bb/crypt/zone" - }, - { - "zone": { - "id": "d4e1e554-7b98-4413-809e-4a42561c3d0c", - "underlay_address": "fd00:1122:3344:10b::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10b::a]:32345", - "dataset": { - "pool_name": "oxp_e6d2fe1d-c74d-40cd-8fae-bc7d06bdaac8" - } - } - }, - "root": "/pool/ext/6b9ec5f1-859f-459c-9c06-6a51ba87786f/crypt/zone" - }, - { - "zone": { - "id": "1dd69b02-a032-46c3-8e2a-5012e8314455", - "underlay_address": "fd00:1122:3344:10b::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10b::b]:32345", - "dataset": { - "pool_name": "oxp_350b2814-7b7f-40f1-9bf6-9818a1ef49bb" - } - } - }, - "root": "/pool/ext/350b2814-7b7f-40f1-9bf6-9818a1ef49bb/crypt/zone" - }, - { - "zone": { - "id": "921f7752-d2f3-40df-a739-5cb1390abc2c", - "underlay_address": "fd00:1122:3344:10b::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10b::8]:32345", - "dataset": { - "pool_name": "oxp_2d1ebe24-6deb-4f81-8450-6842de28126c" - } - } - }, - "root": "/pool/ext/91ea7bb6-2be7-4498-9b0d-a0521509ec00/crypt/zone" - }, - { - "zone": { - "id": "609b25e8-9750-4308-ae6f-7202907a3675", - "underlay_address": "fd00:1122:3344:10b::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10b::9]:32345", - "dataset": { - "pool_name": "oxp_91ea7bb6-2be7-4498-9b0d-a0521509ec00" - } - } - }, - "root": "/pool/ext/2d1ebe24-6deb-4f81-8450-6842de28126c/crypt/zone" - }, - { - "zone": { - "id": "a232eba2-e94f-4592-a5a6-ec23f9be3296", - "underlay_address": "fd00:1122:3344:10b::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10b::5]:32345", - "dataset": { - "pool_name": "oxp_e12f29b8-1ab8-431e-bc96-1c1298947980" - } - } - }, - "root": "/pool/ext/021afd19-2f87-4def-9284-ab7add1dd6ae/crypt/zone" - }, - { - "zone": { - "id": "800d1758-9312-4b1a-8f02-dc6d644c2a9b", - "underlay_address": "fd00:1122:3344:10b::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10b::c]:32345", - "dataset": { - "pool_name": "oxp_b6932bb0-bab8-4876-914a-9c75a600e794" - } - } - }, - "root": "/pool/ext/b6932bb0-bab8-4876-914a-9c75a600e794/crypt/zone" - }, - { - "zone": { - "id": "668a4d4a-96dc-4b45-866b-bed3d64c26ec", - "underlay_address": "fd00:1122:3344:10b::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10b::7]:32345", - "dataset": { - "pool_name": "oxp_021afd19-2f87-4def-9284-ab7add1dd6ae" - } - } - }, - "root": "/pool/ext/91ea7bb6-2be7-4498-9b0d-a0521509ec00/crypt/zone" - }, - { - "zone": { - "id": "8bbea076-ff60-4330-8302-383e18140ef3", - "underlay_address": "fd00:1122:3344:10b::3", - "zone_type": { - "type": "cockroach_db", - "address": "[fd00:1122:3344:10b::3]:32221", - "dataset": { - "pool_name": "oxp_e12f29b8-1ab8-431e-bc96-1c1298947980" - } - } - }, - "root": "/pool/ext/4a624324-003a-4255-98e8-546a90b5b7fa/crypt/zone" - }, - { - "zone": { - "id": "3ccea933-89f2-4ce5-8367-efb0afeffe97", - "underlay_address": "fd00:1122:3344:10b::f", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:10b::f]:123", - "ntp_servers": [ - "c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55.host.control-plane.oxide.internal", - "6ea2684c-115e-48a6-8453-ab52d1cecd73.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/4a624324-003a-4255-98e8-546a90b5b7fa/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack2-sled16.json b/sled-agent/tests/output/new-zones-ledgers/rack2-sled16.json deleted file mode 100644 index 905742e678..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack2-sled16.json +++ /dev/null @@ -1,192 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 4, - "zones": [ - { - "zone": { - "id": "b12aa520-a769-4eac-b56b-09960550a831", - "underlay_address": "fd00:1122:3344:108::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:108::7]:32345", - "dataset": { - "pool_name": "oxp_34dadf3f-f60c-4acc-b82b-4b0c82224222" - } - } - }, - "root": "/pool/ext/8be8c577-23ac-452e-a205-6d9c95088f61/crypt/zone" - }, - { - "zone": { - "id": "9bdc40ee-ccba-4d18-9efb-a30596e2d290", - "underlay_address": "fd00:1122:3344:108::d", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:108::d]:32345", - "dataset": { - "pool_name": "oxp_eb81728c-3b83-42fb-8133-ac32a0bdf70f" - } - } - }, - "root": "/pool/ext/8be8c577-23ac-452e-a205-6d9c95088f61/crypt/zone" - }, - { - "zone": { - "id": "c9a367c7-64d7-48e4-b484-9ecb4e8faea7", - "underlay_address": "fd00:1122:3344:108::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:108::9]:32345", - "dataset": { - "pool_name": "oxp_76ab5a67-e20f-4bf0-87b3-01fcc4144bd2" - } - } - }, - "root": "/pool/ext/34dadf3f-f60c-4acc-b82b-4b0c82224222/crypt/zone" - }, - { - "zone": { - "id": "bc5124d8-65e8-4879-bfac-64d59003d482", - "underlay_address": "fd00:1122:3344:108::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:108::a]:32345", - "dataset": { - "pool_name": "oxp_5fac7a1d-e855-46e1-b8c2-dd848ac4fee6" - } - } - }, - "root": "/pool/ext/0c4ef358-5533-43db-ad38-a8eff716e53a/crypt/zone" - }, - { - "zone": { - "id": "5cc7c840-8e6b-48c8-ac4b-f4297f8cf61a", - "underlay_address": "fd00:1122:3344:108::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:108::c]:32345", - "dataset": { - "pool_name": "oxp_0c4ef358-5533-43db-ad38-a8eff716e53a" - } - } - }, - "root": "/pool/ext/6d3e9cc6-f03b-4055-9785-05711d5e4fdc/crypt/zone" - }, - { - "zone": { - "id": "3b767edf-a72d-4d80-a0fc-65d6801ed0e0", - "underlay_address": "fd00:1122:3344:108::e", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:108::e]:32345", - "dataset": { - "pool_name": "oxp_f522118c-5dcd-4116-8044-07f0cceec52e" - } - } - }, - "root": "/pool/ext/5fac7a1d-e855-46e1-b8c2-dd848ac4fee6/crypt/zone" - }, - { - "zone": { - "id": "f3c02ed6-fbc5-45c3-a030-409f74b450fd", - "underlay_address": "fd00:1122:3344:108::4", - "zone_type": { - "type": "crucible_pantry", - "address": "[fd00:1122:3344:108::4]:17000" - } - }, - "root": "/pool/ext/eb81728c-3b83-42fb-8133-ac32a0bdf70f/crypt/zone" - }, - { - "zone": { - "id": "85bd9bdb-1ec5-4a8d-badb-8b5d502546a1", - "underlay_address": "fd00:1122:3344:108::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:108::5]:32345", - "dataset": { - "pool_name": "oxp_416232c1-bc8f-403f-bacb-28403dd8fced" - } - } - }, - "root": "/pool/ext/34dadf3f-f60c-4acc-b82b-4b0c82224222/crypt/zone" - }, - { - "zone": { - "id": "d2f1c3df-d4e0-4469-b50e-f1871da86ebf", - "underlay_address": "fd00:1122:3344:108::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:108::6]:32345", - "dataset": { - "pool_name": "oxp_6d3e9cc6-f03b-4055-9785-05711d5e4fdc" - } - } - }, - "root": "/pool/ext/34dadf3f-f60c-4acc-b82b-4b0c82224222/crypt/zone" - }, - { - "zone": { - "id": "88fe3c12-4c55-47df-b4ee-ed26b795439d", - "underlay_address": "fd00:1122:3344:108::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:108::8]:32345", - "dataset": { - "pool_name": "oxp_8be8c577-23ac-452e-a205-6d9c95088f61" - } - } - }, - "root": "/pool/ext/34dadf3f-f60c-4acc-b82b-4b0c82224222/crypt/zone" - }, - { - "zone": { - "id": "4d20175a-588b-44b8-8b9c-b16c6c3a97a0", - "underlay_address": "fd00:1122:3344:108::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:108::b]:32345", - "dataset": { - "pool_name": "oxp_a726cacd-fa35-4ed2-ade6-31ad928b24cb" - } - } - }, - "root": "/pool/ext/0c4ef358-5533-43db-ad38-a8eff716e53a/crypt/zone" - }, - { - "zone": { - "id": "e86845b5-eabd-49f5-9a10-6dfef9066209", - "underlay_address": "fd00:1122:3344:108::3", - "zone_type": { - "type": "cockroach_db", - "address": "[fd00:1122:3344:108::3]:32221", - "dataset": { - "pool_name": "oxp_416232c1-bc8f-403f-bacb-28403dd8fced" - } - } - }, - "root": "/pool/ext/416232c1-bc8f-403f-bacb-28403dd8fced/crypt/zone" - }, - { - "zone": { - "id": "209b6213-588b-43b6-a89b-19ee5c84ffba", - "underlay_address": "fd00:1122:3344:108::f", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:108::f]:123", - "ntp_servers": [ - "c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55.host.control-plane.oxide.internal", - "6ea2684c-115e-48a6-8453-ab52d1cecd73.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/416232c1-bc8f-403f-bacb-28403dd8fced/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack2-sled17.json b/sled-agent/tests/output/new-zones-ledgers/rack2-sled17.json deleted file mode 100644 index 1cccd0467b..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack2-sled17.json +++ /dev/null @@ -1,181 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 4, - "zones": [ - { - "zone": { - "id": "90b53c3d-42fa-4ca9-bbfc-96fff245b508", - "underlay_address": "fd00:1122:3344:109::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:109::4]:32345", - "dataset": { - "pool_name": "oxp_ae56280b-17ce-4266-8573-e1da9db6c6bb" - } - } - }, - "root": "/pool/ext/b0e1a261-b932-47c4-81e9-1977275ae9d9/crypt/zone" - }, - { - "zone": { - "id": "4f9f2e1d-be04-4e8b-a50b-ffb18557a650", - "underlay_address": "fd00:1122:3344:109::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:109::5]:32345", - "dataset": { - "pool_name": "oxp_d5b07362-64db-4b18-a3e9-8d7cbabae2d5" - } - } - }, - "root": "/pool/ext/027a82e8-daa3-4fa6-8205-ed03445e1086/crypt/zone" - }, - { - "zone": { - "id": "2fa5671d-3109-4f11-ae70-1280f4fa3b89", - "underlay_address": "fd00:1122:3344:109::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:109::6]:32345", - "dataset": { - "pool_name": "oxp_9ba7bfbf-b9a2-4237-a142-94c1e68de984" - } - } - }, - "root": "/pool/ext/3cafbb47-c194-4a42-99ff-34dfeab999ed/crypt/zone" - }, - { - "zone": { - "id": "b63c6882-ca90-4156-b561-4781ab4a0962", - "underlay_address": "fd00:1122:3344:109::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:109::7]:32345", - "dataset": { - "pool_name": "oxp_b0e1a261-b932-47c4-81e9-1977275ae9d9" - } - } - }, - "root": "/pool/ext/d5b07362-64db-4b18-a3e9-8d7cbabae2d5/crypt/zone" - }, - { - "zone": { - "id": "f71344eb-f7e2-439d-82a0-9941e6868fb6", - "underlay_address": "fd00:1122:3344:109::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:109::9]:32345", - "dataset": { - "pool_name": "oxp_027a82e8-daa3-4fa6-8205-ed03445e1086" - } - } - }, - "root": "/pool/ext/027a82e8-daa3-4fa6-8205-ed03445e1086/crypt/zone" - }, - { - "zone": { - "id": "a60cf0d7-12d5-43cb-aa3f-7a9e84de08fb", - "underlay_address": "fd00:1122:3344:109::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:109::a]:32345", - "dataset": { - "pool_name": "oxp_8736aaf9-4d72-42b1-8e4f-07644d999c8b" - } - } - }, - "root": "/pool/ext/8736aaf9-4d72-42b1-8e4f-07644d999c8b/crypt/zone" - }, - { - "zone": { - "id": "5d0e03b2-8958-4c43-8851-bf819f102958", - "underlay_address": "fd00:1122:3344:109::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:109::8]:32345", - "dataset": { - "pool_name": "oxp_62426615-7832-49e7-9426-e39ffeb42c69" - } - } - }, - "root": "/pool/ext/07fc8ec9-1216-4d98-be34-c2970b585e61/crypt/zone" - }, - { - "zone": { - "id": "accc05a2-ec80-4856-a825-ec6b7f700eaa", - "underlay_address": "fd00:1122:3344:109::d", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:109::d]:32345", - "dataset": { - "pool_name": "oxp_dc083c53-7014-4482-8a79-f338ba2b0fb4" - } - } - }, - "root": "/pool/ext/027a82e8-daa3-4fa6-8205-ed03445e1086/crypt/zone" - }, - { - "zone": { - "id": "2e32fdcc-737a-4430-8290-cb7028ea4d50", - "underlay_address": "fd00:1122:3344:109::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:109::b]:32345", - "dataset": { - "pool_name": "oxp_3cafbb47-c194-4a42-99ff-34dfeab999ed" - } - } - }, - "root": "/pool/ext/027a82e8-daa3-4fa6-8205-ed03445e1086/crypt/zone" - }, - { - "zone": { - "id": "a97c6ae2-37f6-4d93-a66e-cb5cd3c6aaa2", - "underlay_address": "fd00:1122:3344:109::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:109::c]:32345", - "dataset": { - "pool_name": "oxp_07fc8ec9-1216-4d98-be34-c2970b585e61" - } - } - }, - "root": "/pool/ext/07fc8ec9-1216-4d98-be34-c2970b585e61/crypt/zone" - }, - { - "zone": { - "id": "3237a532-acaa-4ebe-bf11-dde794fea739", - "underlay_address": "fd00:1122:3344:109::3", - "zone_type": { - "type": "cockroach_db", - "address": "[fd00:1122:3344:109::3]:32221", - "dataset": { - "pool_name": "oxp_ae56280b-17ce-4266-8573-e1da9db6c6bb" - } - } - }, - "root": "/pool/ext/027a82e8-daa3-4fa6-8205-ed03445e1086/crypt/zone" - }, - { - "zone": { - "id": "83257100-5590-484a-b72a-a079389d8da6", - "underlay_address": "fd00:1122:3344:109::e", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:109::e]:123", - "ntp_servers": [ - "c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55.host.control-plane.oxide.internal", - "6ea2684c-115e-48a6-8453-ab52d1cecd73.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/3cafbb47-c194-4a42-99ff-34dfeab999ed/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack2-sled21.json b/sled-agent/tests/output/new-zones-ledgers/rack2-sled21.json deleted file mode 100644 index 35caa638e8..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack2-sled21.json +++ /dev/null @@ -1,232 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 5, - "zones": [ - { - "zone": { - "id": "0437b69d-73a8-4231-86f9-6b5556e7e7ef", - "underlay_address": "fd00:1122:3344:102::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:102::5]:32345", - "dataset": { - "pool_name": "oxp_aa0ffe35-76db-42ab-adf2-ceb072bdf811" - } - } - }, - "root": "/pool/ext/0d2805da-6d24-4e57-a700-0c3865c05544/crypt/zone" - }, - { - "zone": { - "id": "47234ca5-305f-436a-9e9a-36bca9667680", - "underlay_address": "fd00:1122:3344:102::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:102::b]:32345", - "dataset": { - "pool_name": "oxp_0d2805da-6d24-4e57-a700-0c3865c05544" - } - } - }, - "root": "/pool/ext/160691d8-33a1-4d7d-a48a-c3fd27d76822/crypt/zone" - }, - { - "zone": { - "id": "2898657e-4141-4c05-851b-147bffc6bbbd", - "underlay_address": "fd00:1122:3344:102::3", - "zone_type": { - "type": "nexus", - "internal_address": "[fd00:1122:3344:102::3]:12221", - "external_ip": "172.20.26.5", - "nic": { - "id": "2e9a412e-c79a-48fe-8fa4-f5a6afed1040", - "kind": { - "type": "service", - "id": "2898657e-4141-4c05-851b-147bffc6bbbd" - }, - "name": "nexus-2898657e-4141-4c05-851b-147bffc6bbbd", - "ip": "172.30.2.7", - "mac": "A8:40:25:FF:C6:59", - "subnet": "172.30.2.0/24", - "vni": 100, - "primary": true, - "slot": 0 - }, - "external_tls": true, - "external_dns_servers": [ - "1.1.1.1", - "9.9.9.9" - ] - } - }, - "root": "/pool/ext/c0b4ecc1-a145-443f-90d1-2e8136b007bc/crypt/zone" - }, - { - "zone": { - "id": "cf98c4d6-4a7b-49c0-9b14-48a8adf52ce9", - "underlay_address": "fd00:1122:3344:102::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:102::c]:32345", - "dataset": { - "pool_name": "oxp_c0b4ecc1-a145-443f-90d1-2e8136b007bc" - } - } - }, - "root": "/pool/ext/f6acd70a-d6cb-464d-a460-dd5c60301562/crypt/zone" - }, - { - "zone": { - "id": "13c1e91e-bfcc-4eea-8185-412fc37fdea3", - "underlay_address": "fd00:1122:3344:102::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:102::9]:32345", - "dataset": { - "pool_name": "oxp_e9b0a2e4-8060-41bd-a3b5-d0642246d06d" - } - } - }, - "root": "/pool/ext/c0b4ecc1-a145-443f-90d1-2e8136b007bc/crypt/zone" - }, - { - "zone": { - "id": "c9cb60af-9e0e-4b3b-b971-53138a9b8d27", - "underlay_address": "fd00:1122:3344:102::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:102::4]:32345", - "dataset": { - "pool_name": "oxp_77749ec7-39a9-489d-904b-87f7223c4e3c" - } - } - }, - "root": "/pool/ext/77749ec7-39a9-489d-904b-87f7223c4e3c/crypt/zone" - }, - { - "zone": { - "id": "32995cfa-47ec-4b84-8514-7c1c8a86c19d", - "underlay_address": "fd00:1122:3344:102::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:102::8]:32345", - "dataset": { - "pool_name": "oxp_eac83f81-eb51-4f3e-874e-82f55dd952ba" - } - } - }, - "root": "/pool/ext/0d2805da-6d24-4e57-a700-0c3865c05544/crypt/zone" - }, - { - "zone": { - "id": "b93d2e2d-d54b-4503-85c3-9878e3cee9c7", - "underlay_address": "fd00:1122:3344:102::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:102::a]:32345", - "dataset": { - "pool_name": "oxp_160691d8-33a1-4d7d-a48a-c3fd27d76822" - } - } - }, - "root": "/pool/ext/138663ad-a382-4595-baf0-08f6b0276a67/crypt/zone" - }, - { - "zone": { - "id": "2ebbac4f-7b0f-43eb-99fd-dd6ff7f9e097", - "underlay_address": "fd00:1122:3344:102::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:102::6]:32345", - "dataset": { - "pool_name": "oxp_138663ad-a382-4595-baf0-08f6b0276a67" - } - } - }, - "root": "/pool/ext/e9b0a2e4-8060-41bd-a3b5-d0642246d06d/crypt/zone" - }, - { - "zone": { - "id": "d0eea3b2-e5ac-42bf-97b7-531b78fa06d1", - "underlay_address": "fd00:1122:3344:102::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:102::7]:32345", - "dataset": { - "pool_name": "oxp_69f0b863-f73f-42b2-9822-b2cb99f09003" - } - } - }, - "root": "/pool/ext/138663ad-a382-4595-baf0-08f6b0276a67/crypt/zone" - }, - { - "zone": { - "id": "2b34cd1d-ea7d-41a1-82b9-75550fdf6eb0", - "underlay_address": "fd00:1122:3344:102::d", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:102::d]:32345", - "dataset": { - "pool_name": "oxp_f6acd70a-d6cb-464d-a460-dd5c60301562" - } - } - }, - "root": "/pool/ext/c0b4ecc1-a145-443f-90d1-2e8136b007bc/crypt/zone" - }, - { - "zone": { - "id": "6ea2684c-115e-48a6-8453-ab52d1cecd73", - "underlay_address": "fd00:1122:3344:102::e", - "zone_type": { - "type": "boundary_ntp", - "address": "[fd00:1122:3344:102::e]:123", - "ntp_servers": [ - "ntp.eng.oxide.computer" - ], - "dns_servers": [ - "1.1.1.1", - "9.9.9.9" - ], - "domain": null, - "nic": { - "id": "4effd079-ed4e-4cf6-8545-bb9574f516d2", - "kind": { - "type": "service", - "id": "6ea2684c-115e-48a6-8453-ab52d1cecd73" - }, - "name": "ntp-6ea2684c-115e-48a6-8453-ab52d1cecd73", - "ip": "172.30.3.6", - "mac": "A8:40:25:FF:A0:F9", - "subnet": "172.30.3.0/24", - "vni": 100, - "primary": true, - "slot": 0 - }, - "snat_cfg": { - "ip": "172.20.26.7", - "first_port": 16384, - "last_port": 32767 - } - } - }, - "root": "/pool/ext/aa0ffe35-76db-42ab-adf2-ceb072bdf811/crypt/zone" - }, - { - "zone": { - "id": "3a1ea15f-06a4-4afd-959a-c3a00b2bdd80", - "underlay_address": "fd00:1122:3344:2::1", - "zone_type": { - "type": "internal_dns", - "dataset": { - "pool_name": "oxp_77749ec7-39a9-489d-904b-87f7223c4e3c" - }, - "http_address": "[fd00:1122:3344:2::1]:5353", - "dns_address": "[fd00:1122:3344:2::1]:53", - "gz_address": "fd00:1122:3344:2::2", - "gz_address_index": 1 - } - }, - "root": "/pool/ext/69f0b863-f73f-42b2-9822-b2cb99f09003/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack2-sled23.json b/sled-agent/tests/output/new-zones-ledgers/rack2-sled23.json deleted file mode 100644 index 94fcb3a327..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack2-sled23.json +++ /dev/null @@ -1,195 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 5, - "zones": [ - { - "zone": { - "id": "1876cdcf-b2e7-4b79-ad2e-67df716e1860", - "underlay_address": "fd00:1122:3344:10a::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10a::8]:32345", - "dataset": { - "pool_name": "oxp_d4c6bdc6-5e99-4f6c-b57a-9bfcb9a76be4" - } - } - }, - "root": "/pool/ext/86c58ea3-1413-4af3-9aff-9c0a3d758459/crypt/zone" - }, - { - "zone": { - "id": "0e708ee3-b7a6-4993-a88a-4489add33e29", - "underlay_address": "fd00:1122:3344:10a::d", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10a::d]:32345", - "dataset": { - "pool_name": "oxp_718ad834-b415-4abb-934d-9f987cde0a96" - } - } - }, - "root": "/pool/ext/30f7d236-c835-46cc-bc27-9099a6826f67/crypt/zone" - }, - { - "zone": { - "id": "4e1b9a65-848f-4649-b360-1df0d135b44d", - "underlay_address": "fd00:1122:3344:10a::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10a::c]:32345", - "dataset": { - "pool_name": "oxp_88ee08c6-1c0f-44c2-9110-b8d5a7589ebb" - } - } - }, - "root": "/pool/ext/30f7d236-c835-46cc-bc27-9099a6826f67/crypt/zone" - }, - { - "zone": { - "id": "da510a57-3af1-4d2b-b2ed-2e8849f27d8b", - "underlay_address": "fd00:1122:3344:10a::3", - "zone_type": { - "type": "oximeter", - "address": "[fd00:1122:3344:10a::3]:12223" - } - }, - "root": "/pool/ext/718ad834-b415-4abb-934d-9f987cde0a96/crypt/zone" - }, - { - "zone": { - "id": "d4d9acc8-3e0b-4fab-a0a2-d21920fabd7e", - "underlay_address": "fd00:1122:3344:10a::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10a::6]:32345", - "dataset": { - "pool_name": "oxp_9dfe424f-cba6-4bfb-a3dd-e8bd7fdea57d" - } - } - }, - "root": "/pool/ext/30f7d236-c835-46cc-bc27-9099a6826f67/crypt/zone" - }, - { - "zone": { - "id": "fcb75972-836b-4f55-ba21-9722832cf5c2", - "underlay_address": "fd00:1122:3344:10a::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10a::7]:32345", - "dataset": { - "pool_name": "oxp_9005671f-3d90-4ed1-be15-ad65b9a65bd5" - } - } - }, - "root": "/pool/ext/d4c6bdc6-5e99-4f6c-b57a-9bfcb9a76be4/crypt/zone" - }, - { - "zone": { - "id": "624beba0-7dcd-4d55-af05-4670c6fcb1fb", - "underlay_address": "fd00:1122:3344:10a::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10a::4]:32345", - "dataset": { - "pool_name": "oxp_93867156-a43d-4c03-a899-1535e566c8bd" - } - } - }, - "root": "/pool/ext/93867156-a43d-4c03-a899-1535e566c8bd/crypt/zone" - }, - { - "zone": { - "id": "26fb3830-898e-4086-afaf-8f9654716b8c", - "underlay_address": "fd00:1122:3344:10a::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10a::b]:32345", - "dataset": { - "pool_name": "oxp_86c58ea3-1413-4af3-9aff-9c0a3d758459" - } - } - }, - "root": "/pool/ext/93867156-a43d-4c03-a899-1535e566c8bd/crypt/zone" - }, - { - "zone": { - "id": "a3ef7eba-c08e-48ef-ae7a-89e2fcb49b66", - "underlay_address": "fd00:1122:3344:10a::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10a::a]:32345", - "dataset": { - "pool_name": "oxp_cd3fdbae-a9d9-4db7-866a-bca36f6dd634" - } - } - }, - "root": "/pool/ext/718ad834-b415-4abb-934d-9f987cde0a96/crypt/zone" - }, - { - "zone": { - "id": "5c1d4a02-f33b-433a-81f5-5c149e3433bd", - "underlay_address": "fd00:1122:3344:10a::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10a::5]:32345", - "dataset": { - "pool_name": "oxp_9adfc865-2eef-4880-a6e3-9d2f88c8efd0" - } - } - }, - "root": "/pool/ext/cd3fdbae-a9d9-4db7-866a-bca36f6dd634/crypt/zone" - }, - { - "zone": { - "id": "ee77efe9-81d0-4395-a237-15e30c2c2d04", - "underlay_address": "fd00:1122:3344:10a::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10a::9]:32345", - "dataset": { - "pool_name": "oxp_30f7d236-c835-46cc-bc27-9099a6826f67" - } - } - }, - "root": "/pool/ext/88ee08c6-1c0f-44c2-9110-b8d5a7589ebb/crypt/zone" - }, - { - "zone": { - "id": "71ab91b7-48d4-4d31-b47e-59f29f419116", - "underlay_address": "fd00:1122:3344:10a::e", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:10a::e]:123", - "ntp_servers": [ - "c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55.host.control-plane.oxide.internal", - "6ea2684c-115e-48a6-8453-ab52d1cecd73.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/cd3fdbae-a9d9-4db7-866a-bca36f6dd634/crypt/zone" - }, - { - "zone": { - "id": "46ccd8fe-4e3c-4307-97ae-1f7ac505082a", - "underlay_address": "fd00:1122:3344:3::1", - "zone_type": { - "type": "internal_dns", - "dataset": { - "pool_name": "oxp_93867156-a43d-4c03-a899-1535e566c8bd" - }, - "http_address": "[fd00:1122:3344:3::1]:5353", - "dns_address": "[fd00:1122:3344:3::1]:53", - "gz_address": "fd00:1122:3344:3::2", - "gz_address_index": 2 - } - }, - "root": "/pool/ext/9dfe424f-cba6-4bfb-a3dd-e8bd7fdea57d/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack2-sled25.json b/sled-agent/tests/output/new-zones-ledgers/rack2-sled25.json deleted file mode 100644 index 09a07149cf..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack2-sled25.json +++ /dev/null @@ -1,196 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 4, - "zones": [ - { - "zone": { - "id": "180d466d-eb36-4546-8922-e52c4c076823", - "underlay_address": "fd00:1122:3344:101::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:101::5]:32345", - "dataset": { - "pool_name": "oxp_ac789935-fa42-4d00-8967-df0d96dbb74e" - } - } - }, - "root": "/pool/ext/d732addc-cfe8-4c2c-8028-72eb4481b04e/crypt/zone" - }, - { - "zone": { - "id": "b5af0303-bc03-40a3-b733-0396d705dfbf", - "underlay_address": "fd00:1122:3344:101::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:101::7]:32345", - "dataset": { - "pool_name": "oxp_d732addc-cfe8-4c2c-8028-72eb4481b04e" - } - } - }, - "root": "/pool/ext/677b0057-3a80-461b-aca8-c2cb501a7278/crypt/zone" - }, - { - "zone": { - "id": "9c7c805a-f5ed-4e48-86e3-7aa81a718881", - "underlay_address": "fd00:1122:3344:101::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:101::c]:32345", - "dataset": { - "pool_name": "oxp_923c930c-80f8-448d-8321-cebfc6c41760" - } - } - }, - "root": "/pool/ext/ac789935-fa42-4d00-8967-df0d96dbb74e/crypt/zone" - }, - { - "zone": { - "id": "4e49c83c-2d4a-491a-91ac-4ab022026dcf", - "underlay_address": "fd00:1122:3344:101::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:101::4]:32345", - "dataset": { - "pool_name": "oxp_c99e6032-1d4f-47d2-9efe-ae2b2479554e" - } - } - }, - "root": "/pool/ext/653065d2-ab70-47c9-b832-34238fdc95ef/crypt/zone" - }, - { - "zone": { - "id": "0e38475e-b8b2-4813-bf80-3c170081081a", - "underlay_address": "fd00:1122:3344:101::d", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:101::d]:32345", - "dataset": { - "pool_name": "oxp_653065d2-ab70-47c9-b832-34238fdc95ef" - } - } - }, - "root": "/pool/ext/4c7ad252-55c2-4a1a-9d93-9dfcdfdfacca/crypt/zone" - }, - { - "zone": { - "id": "75123e60-1116-4b8d-a466-7302220127da", - "underlay_address": "fd00:1122:3344:101::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:101::8]:32345", - "dataset": { - "pool_name": "oxp_c764a8ae-6862-4eec-9db0-cc6ea478e4a7" - } - } - }, - "root": "/pool/ext/c764a8ae-6862-4eec-9db0-cc6ea478e4a7/crypt/zone" - }, - { - "zone": { - "id": "fbd0379c-97fa-49ea-8980-17ae30ffff3c", - "underlay_address": "fd00:1122:3344:101::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:101::b]:32345", - "dataset": { - "pool_name": "oxp_fcb0e4c7-e046-4cf5-ad35-3ad90e1eb90c" - } - } - }, - "root": "/pool/ext/4c7ad252-55c2-4a1a-9d93-9dfcdfdfacca/crypt/zone" - }, - { - "zone": { - "id": "ec635326-cd1d-4f73-b8e6-c3a36a7020db", - "underlay_address": "fd00:1122:3344:101::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:101::a]:32345", - "dataset": { - "pool_name": "oxp_6bfb4120-488d-4f3d-90ef-e9bfa523b388" - } - } - }, - "root": "/pool/ext/c99e6032-1d4f-47d2-9efe-ae2b2479554e/crypt/zone" - }, - { - "zone": { - "id": "f500d564-c40a-4eca-ac8a-a26b435f2037", - "underlay_address": "fd00:1122:3344:101::3", - "zone_type": { - "type": "external_dns", - "dataset": { - "pool_name": "oxp_c99e6032-1d4f-47d2-9efe-ae2b2479554e" - }, - "http_address": "[fd00:1122:3344:101::3]:5353", - "dns_address": "172.20.26.2:53", - "nic": { - "id": "b0b42776-3914-4a69-889f-4831dc72327c", - "kind": { - "type": "service", - "id": "f500d564-c40a-4eca-ac8a-a26b435f2037" - }, - "name": "external-dns-f500d564-c40a-4eca-ac8a-a26b435f2037", - "ip": "172.30.1.6", - "mac": "A8:40:25:FF:D0:B4", - "subnet": "172.30.1.0/24", - "vni": 100, - "primary": true, - "slot": 0 - } - } - }, - "root": "/pool/ext/ac789935-fa42-4d00-8967-df0d96dbb74e/crypt/zone" - }, - { - "zone": { - "id": "56d4dbcc-3b4a-4ed0-8795-7734aadcc4c0", - "underlay_address": "fd00:1122:3344:101::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:101::9]:32345", - "dataset": { - "pool_name": "oxp_4c7ad252-55c2-4a1a-9d93-9dfcdfdfacca" - } - } - }, - "root": "/pool/ext/4c7ad252-55c2-4a1a-9d93-9dfcdfdfacca/crypt/zone" - }, - { - "zone": { - "id": "0d3a1bd5-f6fe-49cb-807a-190dabc90103", - "underlay_address": "fd00:1122:3344:101::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:101::6]:32345", - "dataset": { - "pool_name": "oxp_677b0057-3a80-461b-aca8-c2cb501a7278" - } - } - }, - "root": "/pool/ext/6bfb4120-488d-4f3d-90ef-e9bfa523b388/crypt/zone" - }, - { - "zone": { - "id": "d34c7184-5d4e-4cb5-8f91-df74a343ffbc", - "underlay_address": "fd00:1122:3344:101::e", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:101::e]:123", - "ntp_servers": [ - "c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55.host.control-plane.oxide.internal", - "6ea2684c-115e-48a6-8453-ab52d1cecd73.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/ac789935-fa42-4d00-8967-df0d96dbb74e/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack2-sled8.json b/sled-agent/tests/output/new-zones-ledgers/rack2-sled8.json deleted file mode 100644 index 669889b3c5..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack2-sled8.json +++ /dev/null @@ -1,198 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 4, - "zones": [ - { - "zone": { - "id": "7153983f-8fd7-4fb9-92ac-0f07a07798b4", - "underlay_address": "fd00:1122:3344:103::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:103::a]:32345", - "dataset": { - "pool_name": "oxp_bf428719-1b16-4503-99f4-ad95846d916f" - } - } - }, - "root": "/pool/ext/26e698bb-006d-4208-94b9-d1bc279111fa/crypt/zone" - }, - { - "zone": { - "id": "7d44ba36-4a69-490a-bc40-f6f90a4208d4", - "underlay_address": "fd00:1122:3344:103::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:103::c]:32345", - "dataset": { - "pool_name": "oxp_414e235b-55c3-4dc1-a568-8adf4ea1a052" - } - } - }, - "root": "/pool/ext/cf940e15-dbc5-481b-866a-4de4b018898e/crypt/zone" - }, - { - "zone": { - "id": "65a11c18-7f59-41ac-b9e7-680627f996e7", - "underlay_address": "fd00:1122:3344:103::3", - "zone_type": { - "type": "nexus", - "internal_address": "[fd00:1122:3344:103::3]:12221", - "external_ip": "172.20.26.3", - "nic": { - "id": "a3e13dde-a2bc-4170-ad84-aad8085b6034", - "kind": { - "type": "service", - "id": "65a11c18-7f59-41ac-b9e7-680627f996e7" - }, - "name": "nexus-65a11c18-7f59-41ac-b9e7-680627f996e7", - "ip": "172.30.2.5", - "mac": "A8:40:25:FF:A6:83", - "subnet": "172.30.2.0/24", - "vni": 100, - "primary": true, - "slot": 0 - }, - "external_tls": true, - "external_dns_servers": [ - "1.1.1.1", - "9.9.9.9" - ] - } - }, - "root": "/pool/ext/e126ddcc-8bee-46ba-8199-2a74df0ba040/crypt/zone" - }, - { - "zone": { - "id": "072fdae8-2adf-4fd2-94ce-e9b0663b91e7", - "underlay_address": "fd00:1122:3344:103::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:103::b]:32345", - "dataset": { - "pool_name": "oxp_26e698bb-006d-4208-94b9-d1bc279111fa" - } - } - }, - "root": "/pool/ext/bf428719-1b16-4503-99f4-ad95846d916f/crypt/zone" - }, - { - "zone": { - "id": "01f93020-7e7d-4185-93fb-6ca234056c82", - "underlay_address": "fd00:1122:3344:103::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:103::5]:32345", - "dataset": { - "pool_name": "oxp_7b24095a-72df-45e3-984f-2b795e052ac7" - } - } - }, - "root": "/pool/ext/7b24095a-72df-45e3-984f-2b795e052ac7/crypt/zone" - }, - { - "zone": { - "id": "e238116d-e5cc-43d4-9c8a-6f138ae8a15d", - "underlay_address": "fd00:1122:3344:103::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:103::6]:32345", - "dataset": { - "pool_name": "oxp_e126ddcc-8bee-46ba-8199-2a74df0ba040" - } - } - }, - "root": "/pool/ext/7b24095a-72df-45e3-984f-2b795e052ac7/crypt/zone" - }, - { - "zone": { - "id": "585cd8c5-c41e-4be4-beb8-bfbef9b53856", - "underlay_address": "fd00:1122:3344:103::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:103::7]:32345", - "dataset": { - "pool_name": "oxp_6340805e-c5af-418d-8bd1-fc0085667f33" - } - } - }, - "root": "/pool/ext/414e235b-55c3-4dc1-a568-8adf4ea1a052/crypt/zone" - }, - { - "zone": { - "id": "0b41c560-3b20-42f4-82ad-92f5bb575d6b", - "underlay_address": "fd00:1122:3344:103::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:103::9]:32345", - "dataset": { - "pool_name": "oxp_b93f880e-c55b-4d6c-9a16-939d84b628fc" - } - } - }, - "root": "/pool/ext/6340805e-c5af-418d-8bd1-fc0085667f33/crypt/zone" - }, - { - "zone": { - "id": "0ccf27c0-e32d-4b52-a2c5-6db0c64a26f9", - "underlay_address": "fd00:1122:3344:103::d", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:103::d]:32345", - "dataset": { - "pool_name": "oxp_2115b084-be0f-4fba-941b-33a659798a9e" - } - } - }, - "root": "/pool/ext/414e235b-55c3-4dc1-a568-8adf4ea1a052/crypt/zone" - }, - { - "zone": { - "id": "a6ba8273-0320-4dab-b801-281f041b0c50", - "underlay_address": "fd00:1122:3344:103::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:103::4]:32345", - "dataset": { - "pool_name": "oxp_8a199f12-4f5c-483a-8aca-f97856658a35" - } - } - }, - "root": "/pool/ext/b93f880e-c55b-4d6c-9a16-939d84b628fc/crypt/zone" - }, - { - "zone": { - "id": "b9b7b4c2-284a-4ec1-80ea-75b7a43b71c4", - "underlay_address": "fd00:1122:3344:103::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:103::8]:32345", - "dataset": { - "pool_name": "oxp_cf940e15-dbc5-481b-866a-4de4b018898e" - } - } - }, - "root": "/pool/ext/cf940e15-dbc5-481b-866a-4de4b018898e/crypt/zone" - }, - { - "zone": { - "id": "7a85d50e-b524-41c1-a052-118027eb77db", - "underlay_address": "fd00:1122:3344:103::e", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:103::e]:123", - "ntp_servers": [ - "c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55.host.control-plane.oxide.internal", - "6ea2684c-115e-48a6-8453-ab52d1cecd73.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/b93f880e-c55b-4d6c-9a16-939d84b628fc/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack2-sled9.json b/sled-agent/tests/output/new-zones-ledgers/rack2-sled9.json deleted file mode 100644 index d4a429f9b0..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack2-sled9.json +++ /dev/null @@ -1,192 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 4, - "zones": [ - { - "zone": { - "id": "912346a2-d7e6-427e-b373-e8dcbe4fcea9", - "underlay_address": "fd00:1122:3344:105::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:105::5]:32345", - "dataset": { - "pool_name": "oxp_b358fb1e-f52a-4a63-9aab-170225509b37" - } - } - }, - "root": "/pool/ext/0ae29053-29a2-489e-a1e6-6aec0ecd05f8/crypt/zone" - }, - { - "zone": { - "id": "3d420dff-c616-4c7d-bab1-0f9c2b5396bf", - "underlay_address": "fd00:1122:3344:105::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:105::a]:32345", - "dataset": { - "pool_name": "oxp_4eb2e4eb-41d8-496c-9a5a-687d7e004aa4" - } - } - }, - "root": "/pool/ext/eb1234a5-fdf7-4977-94d5-2eef25ce56a1/crypt/zone" - }, - { - "zone": { - "id": "9c5d88c9-8ff1-4f23-9438-7b81322eaf68", - "underlay_address": "fd00:1122:3344:105::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:105::b]:32345", - "dataset": { - "pool_name": "oxp_aadf48eb-6ff0-40b5-a092-1fdd06c03e11" - } - } - }, - "root": "/pool/ext/4358f47f-f21e-4cc8-829e-0c7fc2400a59/crypt/zone" - }, - { - "zone": { - "id": "f9c1deca-1898-429e-8c93-254c7aa7bae6", - "underlay_address": "fd00:1122:3344:105::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:105::8]:32345", - "dataset": { - "pool_name": "oxp_d1cb6b7d-2b92-4b7d-8a4d-551987f0277e" - } - } - }, - "root": "/pool/ext/f8b11629-ced6-412a-9c3f-d169b99ee996/crypt/zone" - }, - { - "zone": { - "id": "ce8563f3-4a93-45ff-b727-cbfbee6aa413", - "underlay_address": "fd00:1122:3344:105::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:105::9]:32345", - "dataset": { - "pool_name": "oxp_4358f47f-f21e-4cc8-829e-0c7fc2400a59" - } - } - }, - "root": "/pool/ext/eb1234a5-fdf7-4977-94d5-2eef25ce56a1/crypt/zone" - }, - { - "zone": { - "id": "9470ea7d-1920-4b4b-8fca-e7659a1ef733", - "underlay_address": "fd00:1122:3344:105::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:105::c]:32345", - "dataset": { - "pool_name": "oxp_17eff217-f0b1-4353-b133-0f68bbd5ceaa" - } - } - }, - "root": "/pool/ext/eb1234a5-fdf7-4977-94d5-2eef25ce56a1/crypt/zone" - }, - { - "zone": { - "id": "375296e5-0a23-466c-b605-4204080f8103", - "underlay_address": "fd00:1122:3344:105::4", - "zone_type": { - "type": "crucible_pantry", - "address": "[fd00:1122:3344:105::4]:17000" - } - }, - "root": "/pool/ext/4eb2e4eb-41d8-496c-9a5a-687d7e004aa4/crypt/zone" - }, - { - "zone": { - "id": "f9940969-b0e8-4e8c-86c7-4bc49cd15a5f", - "underlay_address": "fd00:1122:3344:105::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:105::7]:32345", - "dataset": { - "pool_name": "oxp_f8b11629-ced6-412a-9c3f-d169b99ee996" - } - } - }, - "root": "/pool/ext/17eff217-f0b1-4353-b133-0f68bbd5ceaa/crypt/zone" - }, - { - "zone": { - "id": "23dca27d-c79b-4930-a817-392e8aeaa4c1", - "underlay_address": "fd00:1122:3344:105::e", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:105::e]:32345", - "dataset": { - "pool_name": "oxp_57650e05-36ff-4de8-865f-b9562bdb67f5" - } - } - }, - "root": "/pool/ext/0ae29053-29a2-489e-a1e6-6aec0ecd05f8/crypt/zone" - }, - { - "zone": { - "id": "92d3e4e9-0768-4772-83c1-23cce52190e9", - "underlay_address": "fd00:1122:3344:105::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:105::6]:32345", - "dataset": { - "pool_name": "oxp_eb1234a5-fdf7-4977-94d5-2eef25ce56a1" - } - } - }, - "root": "/pool/ext/b358fb1e-f52a-4a63-9aab-170225509b37/crypt/zone" - }, - { - "zone": { - "id": "b3e9fee2-24d2-44e7-8539-a6918e85cf2b", - "underlay_address": "fd00:1122:3344:105::d", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:105::d]:32345", - "dataset": { - "pool_name": "oxp_0ae29053-29a2-489e-a1e6-6aec0ecd05f8" - } - } - }, - "root": "/pool/ext/eb1234a5-fdf7-4977-94d5-2eef25ce56a1/crypt/zone" - }, - { - "zone": { - "id": "4c3ef132-ec83-4b1b-9574-7c7d3035f9e9", - "underlay_address": "fd00:1122:3344:105::3", - "zone_type": { - "type": "cockroach_db", - "address": "[fd00:1122:3344:105::3]:32221", - "dataset": { - "pool_name": "oxp_b358fb1e-f52a-4a63-9aab-170225509b37" - } - } - }, - "root": "/pool/ext/d1cb6b7d-2b92-4b7d-8a4d-551987f0277e/crypt/zone" - }, - { - "zone": { - "id": "76b79b96-eaa2-4341-9aba-e77cfc92e0a9", - "underlay_address": "fd00:1122:3344:105::f", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:105::f]:123", - "ntp_servers": [ - "c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55.host.control-plane.oxide.internal", - "6ea2684c-115e-48a6-8453-ab52d1cecd73.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/0ae29053-29a2-489e-a1e6-6aec0ecd05f8/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled0.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled0.json deleted file mode 100644 index db6c55f556..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack3-sled0.json +++ /dev/null @@ -1,181 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 4, - "zones": [ - { - "zone": { - "id": "0710ecea-dbc4-417f-a6f7-1b97c3045db1", - "underlay_address": "fd00:1122:3344:116::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:116::6]:32345", - "dataset": { - "pool_name": "oxp_d5313ef5-019c-4c47-bc5e-63794107a1bb" - } - } - }, - "root": "/pool/ext/904e93a9-d175-4a20-9006-8c1e847aecf7/crypt/zone" - }, - { - "zone": { - "id": "28b29d14-d55f-4b55-bbc1-f66e46ae3e70", - "underlay_address": "fd00:1122:3344:116::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:116::9]:32345", - "dataset": { - "pool_name": "oxp_60755ffe-e9ee-4619-a751-8b3ea6405e67" - } - } - }, - "root": "/pool/ext/d5313ef5-019c-4c47-bc5e-63794107a1bb/crypt/zone" - }, - { - "zone": { - "id": "6f8f9fd2-b139-4069-a7e2-8d40efd58f6c", - "underlay_address": "fd00:1122:3344:116::d", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:116::d]:32345", - "dataset": { - "pool_name": "oxp_ccd2cb0b-782f-4026-a160-6d1192f04ca3" - } - } - }, - "root": "/pool/ext/d5313ef5-019c-4c47-bc5e-63794107a1bb/crypt/zone" - }, - { - "zone": { - "id": "450308ad-bf4d-40ff-ba62-f3290f7fffaf", - "underlay_address": "fd00:1122:3344:116::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:116::4]:32345", - "dataset": { - "pool_name": "oxp_46b09442-65ba-4d59-9121-9803fe3b724b" - } - } - }, - "root": "/pool/ext/54d901cc-f75e-417d-8a9f-24363136d0ef/crypt/zone" - }, - { - "zone": { - "id": "9a22bbaa-eab4-4a32-8546-9882dc029483", - "underlay_address": "fd00:1122:3344:116::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:116::8]:32345", - "dataset": { - "pool_name": "oxp_93e3f350-75a0-4af0-bdac-baf9b423926f" - } - } - }, - "root": "/pool/ext/d5313ef5-019c-4c47-bc5e-63794107a1bb/crypt/zone" - }, - { - "zone": { - "id": "63a9dc49-0b5b-4483-95ed-553b545dc202", - "underlay_address": "fd00:1122:3344:116::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:116::a]:32345", - "dataset": { - "pool_name": "oxp_e3532845-76c0-42a9-903b-a07f7992e937" - } - } - }, - "root": "/pool/ext/60755ffe-e9ee-4619-a751-8b3ea6405e67/crypt/zone" - }, - { - "zone": { - "id": "1fef5b6c-78e4-4ad9-9973-9d8c78f1e232", - "underlay_address": "fd00:1122:3344:116::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:116::7]:32345", - "dataset": { - "pool_name": "oxp_54d901cc-f75e-417d-8a9f-24363136d0ef" - } - } - }, - "root": "/pool/ext/90d7b6f9-3e28-48b0-86ac-0486728075cf/crypt/zone" - }, - { - "zone": { - "id": "b2aab21a-cccd-4aa9-977f-a32090e6eaa7", - "underlay_address": "fd00:1122:3344:116::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:116::5]:32345", - "dataset": { - "pool_name": "oxp_90d7b6f9-3e28-48b0-86ac-0486728075cf" - } - } - }, - "root": "/pool/ext/46b09442-65ba-4d59-9121-9803fe3b724b/crypt/zone" - }, - { - "zone": { - "id": "fc1bbf28-24f3-4c1f-b367-2bc8231eb7d4", - "underlay_address": "fd00:1122:3344:116::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:116::b]:32345", - "dataset": { - "pool_name": "oxp_0a7bb0d3-408b-42b1-8846-76cf106a9580" - } - } - }, - "root": "/pool/ext/e3532845-76c0-42a9-903b-a07f7992e937/crypt/zone" - }, - { - "zone": { - "id": "bcb7617a-f76a-4912-8ccc-802d2a697e3c", - "underlay_address": "fd00:1122:3344:116::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:116::c]:32345", - "dataset": { - "pool_name": "oxp_904e93a9-d175-4a20-9006-8c1e847aecf7" - } - } - }, - "root": "/pool/ext/ccd2cb0b-782f-4026-a160-6d1192f04ca3/crypt/zone" - }, - { - "zone": { - "id": "371fba3a-658b-469b-b675-c90cc0d39254", - "underlay_address": "fd00:1122:3344:116::3", - "zone_type": { - "type": "cockroach_db", - "address": "[fd00:1122:3344:116::3]:32221", - "dataset": { - "pool_name": "oxp_46b09442-65ba-4d59-9121-9803fe3b724b" - } - } - }, - "root": "/pool/ext/46b09442-65ba-4d59-9121-9803fe3b724b/crypt/zone" - }, - { - "zone": { - "id": "5a4d89f5-49e0-4566-a99c-342d1bb26b1c", - "underlay_address": "fd00:1122:3344:116::e", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:116::e]:123", - "ntp_servers": [ - "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", - "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/60755ffe-e9ee-4619-a751-8b3ea6405e67/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled1.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled1.json deleted file mode 100644 index ae3e3d8f4a..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack3-sled1.json +++ /dev/null @@ -1,167 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 4, - "zones": [ - { - "zone": { - "id": "f401d06c-46fc-42f8-aa51-7515a51355ce", - "underlay_address": "fd00:1122:3344:11c::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11c::8]:32345", - "dataset": { - "pool_name": "oxp_8a88768a-2dd5-43b7-bd40-0db77be4d3a8" - } - } - }, - "root": "/pool/ext/19d23d27-6a33-4203-b8c1-4b0df4ac791f/crypt/zone" - }, - { - "zone": { - "id": "721c96ea-08d4-4c89-828f-600e7e344916", - "underlay_address": "fd00:1122:3344:11c::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11c::6]:32345", - "dataset": { - "pool_name": "oxp_15259003-fb04-4547-b4a9-b4511893c0fd" - } - } - }, - "root": "/pool/ext/d2a8ed82-22ef-46d8-ad40-e1cb2cecebee/crypt/zone" - }, - { - "zone": { - "id": "ca17bdf9-51c5-4e1e-b822-856609070ec6", - "underlay_address": "fd00:1122:3344:11c::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11c::5]:32345", - "dataset": { - "pool_name": "oxp_d2a8ed82-22ef-46d8-ad40-e1cb2cecebee" - } - } - }, - "root": "/pool/ext/15259003-fb04-4547-b4a9-b4511893c0fd/crypt/zone" - }, - { - "zone": { - "id": "5825447e-1b5b-4960-b202-e75853d3d250", - "underlay_address": "fd00:1122:3344:11c::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11c::9]:32345", - "dataset": { - "pool_name": "oxp_04e94454-cbd4-4cee-ad69-42372bcbabd5" - } - } - }, - "root": "/pool/ext/542e0fb3-552c-4d3b-b853-da1f13b581a0/crypt/zone" - }, - { - "zone": { - "id": "b937d3f0-1352-47a2-b9d1-a9ccf9c82b16", - "underlay_address": "fd00:1122:3344:11c::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11c::c]:32345", - "dataset": { - "pool_name": "oxp_542e0fb3-552c-4d3b-b853-da1f13b581a0" - } - } - }, - "root": "/pool/ext/eedd1d58-4892-456f-aaf7-9d650c7921ca/crypt/zone" - }, - { - "zone": { - "id": "d63a677b-8dac-44ee-89a2-cc4cb151254d", - "underlay_address": "fd00:1122:3344:11c::3", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11c::3]:32345", - "dataset": { - "pool_name": "oxp_45b5f1ee-7b66-4d74-8364-54fa0c73775f" - } - } - }, - "root": "/pool/ext/8a88768a-2dd5-43b7-bd40-0db77be4d3a8/crypt/zone" - }, - { - "zone": { - "id": "abcb92ea-9f17-4cd8-897b-9d0d1ef7903a", - "underlay_address": "fd00:1122:3344:11c::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11c::4]:32345", - "dataset": { - "pool_name": "oxp_341d49db-c06a-416d-90e1-b0a3426ed02e" - } - } - }, - "root": "/pool/ext/eedd1d58-4892-456f-aaf7-9d650c7921ca/crypt/zone" - }, - { - "zone": { - "id": "000ac89d-db07-47ae-83cf-d9cafef013de", - "underlay_address": "fd00:1122:3344:11c::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11c::b]:32345", - "dataset": { - "pool_name": "oxp_eedd1d58-4892-456f-aaf7-9d650c7921ca" - } - } - }, - "root": "/pool/ext/04e94454-cbd4-4cee-ad69-42372bcbabd5/crypt/zone" - }, - { - "zone": { - "id": "29e1e2e4-695e-4c05-8f0c-c16a0a61d390", - "underlay_address": "fd00:1122:3344:11c::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11c::7]:32345", - "dataset": { - "pool_name": "oxp_19d23d27-6a33-4203-b8c1-4b0df4ac791f" - } - } - }, - "root": "/pool/ext/d2a8ed82-22ef-46d8-ad40-e1cb2cecebee/crypt/zone" - }, - { - "zone": { - "id": "9fa7d7be-a6de-4d36-b56b-d1cc5ca7c82c", - "underlay_address": "fd00:1122:3344:11c::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11c::a]:32345", - "dataset": { - "pool_name": "oxp_0fd7a0b1-ed4b-4dc6-8c44-a49c9628c7e1" - } - } - }, - "root": "/pool/ext/d2a8ed82-22ef-46d8-ad40-e1cb2cecebee/crypt/zone" - }, - { - "zone": { - "id": "249db5f1-45e2-4a5c-a91f-cc51dbd87040", - "underlay_address": "fd00:1122:3344:11c::d", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:11c::d]:123", - "ntp_servers": [ - "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", - "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/542e0fb3-552c-4d3b-b853-da1f13b581a0/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled11.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled11.json deleted file mode 100644 index c94417ffb8..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack3-sled11.json +++ /dev/null @@ -1,201 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 5, - "zones": [ - { - "zone": { - "id": "7ddd0738-59df-4b67-a41e-7f0de9827187", - "underlay_address": "fd00:1122:3344:11e::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11e::4]:32345", - "dataset": { - "pool_name": "oxp_09af632a-6b1b-4a18-8c91-d392da38b02f" - } - } - }, - "root": "/pool/ext/09af632a-6b1b-4a18-8c91-d392da38b02f/crypt/zone" - }, - { - "zone": { - "id": "9706189f-713a-4394-b5dc-45dcf67dc46e", - "underlay_address": "fd00:1122:3344:11e::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11e::9]:32345", - "dataset": { - "pool_name": "oxp_4e1837c8-91ab-4d1d-abfd-f5144d88535e" - } - } - }, - "root": "/pool/ext/2f0d47cb-28d1-4350-8656-60c6121f773b/crypt/zone" - }, - { - "zone": { - "id": "7bdd841b-5e34-4c19-9066-b12578651446", - "underlay_address": "fd00:1122:3344:11e::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11e::a]:32345", - "dataset": { - "pool_name": "oxp_78d1e7f7-8d11-4fed-8b1e-be58908aea2f" - } - } - }, - "root": "/pool/ext/62c23f4b-8e7b-4cd8-9055-19c1d8bd5ac8/crypt/zone" - }, - { - "zone": { - "id": "74c0f60b-de5f-4456-a85f-f992a6e10424", - "underlay_address": "fd00:1122:3344:11e::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11e::b]:32345", - "dataset": { - "pool_name": "oxp_3b81d709-bf10-4dd7-a2c0-759d8acc2da0" - } - } - }, - "root": "/pool/ext/09af632a-6b1b-4a18-8c91-d392da38b02f/crypt/zone" - }, - { - "zone": { - "id": "da81ce6f-bd38-440e-b966-8a743092fa21", - "underlay_address": "fd00:1122:3344:11e::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11e::6]:32345", - "dataset": { - "pool_name": "oxp_62c23f4b-8e7b-4cd8-9055-19c1d8bd5ac8" - } - } - }, - "root": "/pool/ext/215dd02b-0de6-488a-9e65-5e588cd079fb/crypt/zone" - }, - { - "zone": { - "id": "febbca37-5279-400f-a2e9-6b5271b2d2fc", - "underlay_address": "fd00:1122:3344:11e::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11e::7]:32345", - "dataset": { - "pool_name": "oxp_fb33e773-fb93-41a0-8078-b653b9078dda" - } - } - }, - "root": "/pool/ext/2f0d47cb-28d1-4350-8656-60c6121f773b/crypt/zone" - }, - { - "zone": { - "id": "5100e222-5ea4-4e67-9040-679137e666c8", - "underlay_address": "fd00:1122:3344:11e::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11e::5]:32345", - "dataset": { - "pool_name": "oxp_23767587-2253-431b-8944-18b9bfefcb3d" - } - } - }, - "root": "/pool/ext/3b81d709-bf10-4dd7-a2c0-759d8acc2da0/crypt/zone" - }, - { - "zone": { - "id": "c7ec3bc8-08ca-4901-a45e-0d68db72c6a7", - "underlay_address": "fd00:1122:3344:11e::3", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11e::3]:32345", - "dataset": { - "pool_name": "oxp_2f0d47cb-28d1-4350-8656-60c6121f773b" - } - } - }, - "root": "/pool/ext/215dd02b-0de6-488a-9e65-5e588cd079fb/crypt/zone" - }, - { - "zone": { - "id": "1fc80dd3-0fd9-4403-96bd-5bbf9eb0f15a", - "underlay_address": "fd00:1122:3344:11e::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11e::c]:32345", - "dataset": { - "pool_name": "oxp_2c932d54-41fb-4ffe-a57f-0479b9e5841e" - } - } - }, - "root": "/pool/ext/3b81d709-bf10-4dd7-a2c0-759d8acc2da0/crypt/zone" - }, - { - "zone": { - "id": "4eacc68d-5699-440a-ab33-c75f259e4cc3", - "underlay_address": "fd00:1122:3344:11e::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11e::8]:32345", - "dataset": { - "pool_name": "oxp_215dd02b-0de6-488a-9e65-5e588cd079fb" - } - } - }, - "root": "/pool/ext/4e1837c8-91ab-4d1d-abfd-f5144d88535e/crypt/zone" - }, - { - "zone": { - "id": "cb901d3e-8811-4c4c-a274-a44130501ecf", - "underlay_address": "fd00:1122:3344:11e::d", - "zone_type": { - "type": "boundary_ntp", - "address": "[fd00:1122:3344:11e::d]:123", - "ntp_servers": [ - "time.cloudflare.com" - ], - "dns_servers": [ - "1.1.1.1", - "8.8.8.8" - ], - "domain": null, - "nic": { - "id": "bcf9d9eb-b4ba-4fd5-91e0-55a3414ae049", - "kind": { - "type": "service", - "id": "cb901d3e-8811-4c4c-a274-a44130501ecf" - }, - "name": "ntp-cb901d3e-8811-4c4c-a274-a44130501ecf", - "ip": "172.30.3.6", - "mac": "A8:40:25:FF:D5:2F", - "subnet": "172.30.3.0/24", - "vni": 100, - "primary": true, - "slot": 0 - }, - "snat_cfg": { - "ip": "45.154.216.39", - "first_port": 16384, - "last_port": 32767 - } - } - }, - "root": "/pool/ext/23767587-2253-431b-8944-18b9bfefcb3d/crypt/zone" - }, - { - "zone": { - "id": "be4aada9-d160-401d-a630-a0764c039702", - "underlay_address": "fd00:1122:3344:2::1", - "zone_type": { - "type": "internal_dns", - "dataset": { - "pool_name": "oxp_2f0d47cb-28d1-4350-8656-60c6121f773b" - }, - "http_address": "[fd00:1122:3344:2::1]:5353", - "dns_address": "[fd00:1122:3344:2::1]:53", - "gz_address": "fd00:1122:3344:2::2", - "gz_address_index": 1 - } - }, - "root": "/pool/ext/78d1e7f7-8d11-4fed-8b1e-be58908aea2f/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled12.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled12.json deleted file mode 100644 index bfc30cf160..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack3-sled12.json +++ /dev/null @@ -1,181 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 4, - "zones": [ - { - "zone": { - "id": "d8f1b9d2-fa2e-4f03-bbea-2039448d7792", - "underlay_address": "fd00:1122:3344:112::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:112::5]:32345", - "dataset": { - "pool_name": "oxp_7d7ed1b7-7b77-4f0a-abb1-27de7cb584d1" - } - } - }, - "root": "/pool/ext/78d9f0ae-8e7f-450e-abc2-76b983efa5cd/crypt/zone" - }, - { - "zone": { - "id": "2074a935-c0b3-4c4f-aae5-a29adae3e1ac", - "underlay_address": "fd00:1122:3344:112::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:112::8]:32345", - "dataset": { - "pool_name": "oxp_ac663368-45fb-447c-811e-561c68e37bdd" - } - } - }, - "root": "/pool/ext/ac663368-45fb-447c-811e-561c68e37bdd/crypt/zone" - }, - { - "zone": { - "id": "2885d3c7-ad7d-445c-8630-dc6c81f8caa0", - "underlay_address": "fd00:1122:3344:112::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:112::a]:32345", - "dataset": { - "pool_name": "oxp_8e82e8da-e1c5-4867-bc1c-b5441f9c1010" - } - } - }, - "root": "/pool/ext/8e82e8da-e1c5-4867-bc1c-b5441f9c1010/crypt/zone" - }, - { - "zone": { - "id": "1eca241b-6868-4c59-876b-58356654f3b5", - "underlay_address": "fd00:1122:3344:112::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:112::c]:32345", - "dataset": { - "pool_name": "oxp_fde16c69-aa47-4a15-bb3f-3a5861ae45bd" - } - } - }, - "root": "/pool/ext/7d7ed1b7-7b77-4f0a-abb1-27de7cb584d1/crypt/zone" - }, - { - "zone": { - "id": "cc656f2e-8542-4986-8524-2f55984939c1", - "underlay_address": "fd00:1122:3344:112::d", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:112::d]:32345", - "dataset": { - "pool_name": "oxp_21e6d0f9-887e-4d6f-9a00-4cd61139eea6" - } - } - }, - "root": "/pool/ext/21e6d0f9-887e-4d6f-9a00-4cd61139eea6/crypt/zone" - }, - { - "zone": { - "id": "dfb1ebce-a4c7-4b50-9435-9a79b884c1af", - "underlay_address": "fd00:1122:3344:112::3", - "zone_type": { - "type": "clickhouse", - "address": "[fd00:1122:3344:112::3]:8123", - "dataset": { - "pool_name": "oxp_4f045315-de51-46ed-a011-16496615278f" - } - } - }, - "root": "/pool/ext/7d7ed1b7-7b77-4f0a-abb1-27de7cb584d1/crypt/zone" - }, - { - "zone": { - "id": "a95d90ed-b2b1-4a5d-8d0d-4195b34bc764", - "underlay_address": "fd00:1122:3344:112::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:112::6]:32345", - "dataset": { - "pool_name": "oxp_d2c77c69-14d7-442e-8b47-a0d7af5a0e7e" - } - } - }, - "root": "/pool/ext/fad56ff1-ad9f-4215-b584-522eab18cf7b/crypt/zone" - }, - { - "zone": { - "id": "1d3ebc90-d5a5-4cb0-ae90-50bb2163ae13", - "underlay_address": "fd00:1122:3344:112::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:112::b]:32345", - "dataset": { - "pool_name": "oxp_fad56ff1-ad9f-4215-b584-522eab18cf7b" - } - } - }, - "root": "/pool/ext/7d7ed1b7-7b77-4f0a-abb1-27de7cb584d1/crypt/zone" - }, - { - "zone": { - "id": "7af9f38b-0c7a-402e-8db3-7c7fb50b4665", - "underlay_address": "fd00:1122:3344:112::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:112::9]:32345", - "dataset": { - "pool_name": "oxp_d0693580-5c5a-449f-803f-ce7188ebc580" - } - } - }, - "root": "/pool/ext/d2c77c69-14d7-442e-8b47-a0d7af5a0e7e/crypt/zone" - }, - { - "zone": { - "id": "94d9bb0a-ecd2-4501-b960-60982f55ad12", - "underlay_address": "fd00:1122:3344:112::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:112::7]:32345", - "dataset": { - "pool_name": "oxp_78d9f0ae-8e7f-450e-abc2-76b983efa5cd" - } - } - }, - "root": "/pool/ext/ac663368-45fb-447c-811e-561c68e37bdd/crypt/zone" - }, - { - "zone": { - "id": "277c1105-576e-4ec1-8e2c-cbae2f5ac9f6", - "underlay_address": "fd00:1122:3344:112::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:112::4]:32345", - "dataset": { - "pool_name": "oxp_4f045315-de51-46ed-a011-16496615278f" - } - } - }, - "root": "/pool/ext/7d7ed1b7-7b77-4f0a-abb1-27de7cb584d1/crypt/zone" - }, - { - "zone": { - "id": "555c3407-a76c-4ea4-a17a-a670d85a59b0", - "underlay_address": "fd00:1122:3344:112::e", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:112::e]:123", - "ntp_servers": [ - "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", - "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/8e82e8da-e1c5-4867-bc1c-b5441f9c1010/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled13.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled13.json deleted file mode 100644 index 66c04be148..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack3-sled13.json +++ /dev/null @@ -1,201 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 5, - "zones": [ - { - "zone": { - "id": "fbcf51c9-a732-4a03-8c19-cfb5b819cb7a", - "underlay_address": "fd00:1122:3344:104::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:104::5]:32345", - "dataset": { - "pool_name": "oxp_382a2961-cd27-4a9c-901d-468a45ff5708" - } - } - }, - "root": "/pool/ext/e99994ae-61ca-4742-a02c-eb0a8a5b69ff/crypt/zone" - }, - { - "zone": { - "id": "7f8a5026-1f1d-4ab3-8c04-077bfda2f815", - "underlay_address": "fd00:1122:3344:104::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:104::4]:32345", - "dataset": { - "pool_name": "oxp_9c99b9b6-8018-455e-a58a-c048ddd3e11b" - } - } - }, - "root": "/pool/ext/22c79e54-37ef-4ad2-a6cb-a7ee3e4f7167/crypt/zone" - }, - { - "zone": { - "id": "6d45d856-0e49-4eb7-ad76-989a9ae636a2", - "underlay_address": "fd00:1122:3344:104::3", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:104::3]:32345", - "dataset": { - "pool_name": "oxp_b74a84fa-b4c8-4c5f-92f4-f4e62a0a311d" - } - } - }, - "root": "/pool/ext/9c99b9b6-8018-455e-a58a-c048ddd3e11b/crypt/zone" - }, - { - "zone": { - "id": "c8dc7fff-72c8-49eb-a552-d605f8655134", - "underlay_address": "fd00:1122:3344:104::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:104::6]:32345", - "dataset": { - "pool_name": "oxp_22c79e54-37ef-4ad2-a6cb-a7ee3e4f7167" - } - } - }, - "root": "/pool/ext/22c79e54-37ef-4ad2-a6cb-a7ee3e4f7167/crypt/zone" - }, - { - "zone": { - "id": "128a90f5-8889-4665-8343-2c7098f2922c", - "underlay_address": "fd00:1122:3344:104::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:104::7]:32345", - "dataset": { - "pool_name": "oxp_8b3d0b51-c6a5-4d2c-827a-0d0d1471136d" - } - } - }, - "root": "/pool/ext/29cd042b-e772-4d26-ac85-ef16009950bd/crypt/zone" - }, - { - "zone": { - "id": "a72f1878-3b03-4267-9024-5df5ebae69de", - "underlay_address": "fd00:1122:3344:104::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:104::a]:32345", - "dataset": { - "pool_name": "oxp_e99994ae-61ca-4742-a02c-eb0a8a5b69ff" - } - } - }, - "root": "/pool/ext/8b3d0b51-c6a5-4d2c-827a-0d0d1471136d/crypt/zone" - }, - { - "zone": { - "id": "6a9165a2-9b66-485a-aaf0-70d89d60bb6c", - "underlay_address": "fd00:1122:3344:104::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:104::b]:32345", - "dataset": { - "pool_name": "oxp_6a02f05f-e400-4c80-8df8-89aaecb6c12b" - } - } - }, - "root": "/pool/ext/9c99b9b6-8018-455e-a58a-c048ddd3e11b/crypt/zone" - }, - { - "zone": { - "id": "9677c4ed-96bc-4dcb-ae74-f7a3e9d2b5e2", - "underlay_address": "fd00:1122:3344:104::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:104::c]:32345", - "dataset": { - "pool_name": "oxp_7c30978f-ee87-4e53-8fdf-3455e5e851b7" - } - } - }, - "root": "/pool/ext/29cd042b-e772-4d26-ac85-ef16009950bd/crypt/zone" - }, - { - "zone": { - "id": "179039e7-3ffd-4b76-9379-bef41d42a5ff", - "underlay_address": "fd00:1122:3344:104::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:104::8]:32345", - "dataset": { - "pool_name": "oxp_4db7e002-e112-4bfc-a41e-8ae26991b01e" - } - } - }, - "root": "/pool/ext/8b3d0b51-c6a5-4d2c-827a-0d0d1471136d/crypt/zone" - }, - { - "zone": { - "id": "6067e31e-b6a3-4114-9e49-0296adc8e7af", - "underlay_address": "fd00:1122:3344:104::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:104::9]:32345", - "dataset": { - "pool_name": "oxp_29cd042b-e772-4d26-ac85-ef16009950bd" - } - } - }, - "root": "/pool/ext/9c99b9b6-8018-455e-a58a-c048ddd3e11b/crypt/zone" - }, - { - "zone": { - "id": "440dd615-e11f-4a5d-aeb4-dcf88bb314de", - "underlay_address": "fd00:1122:3344:104::d", - "zone_type": { - "type": "boundary_ntp", - "address": "[fd00:1122:3344:104::d]:123", - "ntp_servers": [ - "time.cloudflare.com" - ], - "dns_servers": [ - "1.1.1.1", - "8.8.8.8" - ], - "domain": null, - "nic": { - "id": "0b52fe1b-f4cc-43b1-9ac3-4ebb4ab60133", - "kind": { - "type": "service", - "id": "440dd615-e11f-4a5d-aeb4-dcf88bb314de" - }, - "name": "ntp-440dd615-e11f-4a5d-aeb4-dcf88bb314de", - "ip": "172.30.3.5", - "mac": "A8:40:25:FF:85:1E", - "subnet": "172.30.3.0/24", - "vni": 100, - "primary": true, - "slot": 0 - }, - "snat_cfg": { - "ip": "45.154.216.38", - "first_port": 0, - "last_port": 16383 - } - } - }, - "root": "/pool/ext/382a2961-cd27-4a9c-901d-468a45ff5708/crypt/zone" - }, - { - "zone": { - "id": "06e2de03-bd92-404c-a8ea-a13185539d24", - "underlay_address": "fd00:1122:3344:1::1", - "zone_type": { - "type": "internal_dns", - "dataset": { - "pool_name": "oxp_b74a84fa-b4c8-4c5f-92f4-f4e62a0a311d" - }, - "http_address": "[fd00:1122:3344:1::1]:5353", - "dns_address": "[fd00:1122:3344:1::1]:53", - "gz_address": "fd00:1122:3344:1::2", - "gz_address_index": 0 - } - }, - "root": "/pool/ext/e99994ae-61ca-4742-a02c-eb0a8a5b69ff/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled14.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled14.json deleted file mode 100644 index e8d061dbfd..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack3-sled14.json +++ /dev/null @@ -1,198 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 4, - "zones": [ - { - "zone": { - "id": "ac35afab-a312-43c3-a42d-04b8e99fcbde", - "underlay_address": "fd00:1122:3344:111::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:111::4]:32345", - "dataset": { - "pool_name": "oxp_6601065c-c172-4118-81b4-16adde7e9401" - } - } - }, - "root": "/pool/ext/24d7e250-9fc6-459e-8155-30f8e8ccb28c/crypt/zone" - }, - { - "zone": { - "id": "6cd94da2-35b9-4683-a931-29ad4a5ed0ef", - "underlay_address": "fd00:1122:3344:111::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:111::c]:32345", - "dataset": { - "pool_name": "oxp_58276eba-a53c-4ef3-b374-4cdcde4d6e12" - } - } - }, - "root": "/pool/ext/24d7e250-9fc6-459e-8155-30f8e8ccb28c/crypt/zone" - }, - { - "zone": { - "id": "41f07d39-fcc0-4796-8b7c-7cfcd9135f78", - "underlay_address": "fd00:1122:3344:111::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:111::9]:32345", - "dataset": { - "pool_name": "oxp_4b90abdc-3348-4158-bedc-5bcd56e281d8" - } - } - }, - "root": "/pool/ext/8e955f54-fbef-4021-9eec-457825468813/crypt/zone" - }, - { - "zone": { - "id": "44c35566-dd64-4e4a-896e-c50aaa3df14f", - "underlay_address": "fd00:1122:3344:111::3", - "zone_type": { - "type": "nexus", - "internal_address": "[fd00:1122:3344:111::3]:12221", - "external_ip": "45.154.216.37", - "nic": { - "id": "6f824d20-6ce0-4e8b-9ce3-b12dd2b59913", - "kind": { - "type": "service", - "id": "44c35566-dd64-4e4a-896e-c50aaa3df14f" - }, - "name": "nexus-44c35566-dd64-4e4a-896e-c50aaa3df14f", - "ip": "172.30.2.7", - "mac": "A8:40:25:FF:E8:5F", - "subnet": "172.30.2.0/24", - "vni": 100, - "primary": true, - "slot": 0 - }, - "external_tls": true, - "external_dns_servers": [ - "1.1.1.1", - "8.8.8.8" - ] - } - }, - "root": "/pool/ext/435d7a1b-2865-4d49-903f-a68f464ade4d/crypt/zone" - }, - { - "zone": { - "id": "e5020d24-8652-456b-bf92-cd7d255a34c5", - "underlay_address": "fd00:1122:3344:111::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:111::6]:32345", - "dataset": { - "pool_name": "oxp_f6925045-363d-4e18-9bde-ee2987b33d21" - } - } - }, - "root": "/pool/ext/6601065c-c172-4118-81b4-16adde7e9401/crypt/zone" - }, - { - "zone": { - "id": "8f25f258-afd7-4351-83e4-24220ec0c251", - "underlay_address": "fd00:1122:3344:111::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:111::8]:32345", - "dataset": { - "pool_name": "oxp_8e955f54-fbef-4021-9eec-457825468813" - } - } - }, - "root": "/pool/ext/6601065c-c172-4118-81b4-16adde7e9401/crypt/zone" - }, - { - "zone": { - "id": "26aa50ec-d70a-47ea-85fc-e55c62a2e0c6", - "underlay_address": "fd00:1122:3344:111::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:111::5]:32345", - "dataset": { - "pool_name": "oxp_24d7e250-9fc6-459e-8155-30f8e8ccb28c" - } - } - }, - "root": "/pool/ext/435d7a1b-2865-4d49-903f-a68f464ade4d/crypt/zone" - }, - { - "zone": { - "id": "68dc212f-a96a-420f-8334-b11ee5d7cb95", - "underlay_address": "fd00:1122:3344:111::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:111::7]:32345", - "dataset": { - "pool_name": "oxp_4353b00b-937e-4d07-aea6-014c57b6f12c" - } - } - }, - "root": "/pool/ext/24d7e250-9fc6-459e-8155-30f8e8ccb28c/crypt/zone" - }, - { - "zone": { - "id": "475140fa-a5dc-4ec1-876d-751c48adfc37", - "underlay_address": "fd00:1122:3344:111::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:111::a]:32345", - "dataset": { - "pool_name": "oxp_ee55b053-6874-4e20-86b5-2e105e64c068" - } - } - }, - "root": "/pool/ext/ee55b053-6874-4e20-86b5-2e105e64c068/crypt/zone" - }, - { - "zone": { - "id": "09d5a8c9-00db-4914-a2c6-7ae3d2da4558", - "underlay_address": "fd00:1122:3344:111::d", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:111::d]:32345", - "dataset": { - "pool_name": "oxp_9ab5aba5-47dc-4bc4-8f6d-7cbe0f98a9a2" - } - } - }, - "root": "/pool/ext/8e955f54-fbef-4021-9eec-457825468813/crypt/zone" - }, - { - "zone": { - "id": "014f6a39-ad64-4f0a-9fef-01ca0d184cbf", - "underlay_address": "fd00:1122:3344:111::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:111::b]:32345", - "dataset": { - "pool_name": "oxp_435d7a1b-2865-4d49-903f-a68f464ade4d" - } - } - }, - "root": "/pool/ext/f6925045-363d-4e18-9bde-ee2987b33d21/crypt/zone" - }, - { - "zone": { - "id": "aceaf348-ba07-4965-a543-63a800826fe8", - "underlay_address": "fd00:1122:3344:111::e", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:111::e]:123", - "ntp_servers": [ - "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", - "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/8e955f54-fbef-4021-9eec-457825468813/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled15.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled15.json deleted file mode 100644 index e3b3dba86a..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack3-sled15.json +++ /dev/null @@ -1,196 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 4, - "zones": [ - { - "zone": { - "id": "09a9ecee-1e7c-4819-b27a-73bb61099ce7", - "underlay_address": "fd00:1122:3344:114::3", - "zone_type": { - "type": "external_dns", - "dataset": { - "pool_name": "oxp_b7fbb6db-aa4a-4a6d-8206-b7bdc000d56e" - }, - "http_address": "[fd00:1122:3344:114::3]:5353", - "dns_address": "45.154.216.33:53", - "nic": { - "id": "400ca77b-7fee-47d5-8f17-1f4b9c729f27", - "kind": { - "type": "service", - "id": "09a9ecee-1e7c-4819-b27a-73bb61099ce7" - }, - "name": "external-dns-09a9ecee-1e7c-4819-b27a-73bb61099ce7", - "ip": "172.30.1.5", - "mac": "A8:40:25:FF:B7:C7", - "subnet": "172.30.1.0/24", - "vni": 100, - "primary": true, - "slot": 0 - } - } - }, - "root": "/pool/ext/9e878b1e-bf92-4155-8162-640851c2f5d5/crypt/zone" - }, - { - "zone": { - "id": "1792e003-55f7-49b8-906c-4160db91bc23", - "underlay_address": "fd00:1122:3344:114::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:114::5]:32345", - "dataset": { - "pool_name": "oxp_7f3a760f-a4c0-456f-8a22-2d06ecac1022" - } - } - }, - "root": "/pool/ext/76f09ad5-c96c-4748-bbe4-71afaea7bc5e/crypt/zone" - }, - { - "zone": { - "id": "73bc7c0e-1034-449f-8920-4a1f418653ff", - "underlay_address": "fd00:1122:3344:114::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:114::8]:32345", - "dataset": { - "pool_name": "oxp_e87037be-1cdf-4c6e-a8a3-c27b830eaef9" - } - } - }, - "root": "/pool/ext/b7fbb6db-aa4a-4a6d-8206-b7bdc000d56e/crypt/zone" - }, - { - "zone": { - "id": "06dc6619-6251-4543-9a10-da1698af49d5", - "underlay_address": "fd00:1122:3344:114::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:114::9]:32345", - "dataset": { - "pool_name": "oxp_ee34c530-ce70-4f1a-8c97-d0ebb77ccfc8" - } - } - }, - "root": "/pool/ext/9e878b1e-bf92-4155-8162-640851c2f5d5/crypt/zone" - }, - { - "zone": { - "id": "0d796c52-37ca-490d-b42f-dcc22fe5fd6b", - "underlay_address": "fd00:1122:3344:114::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:114::c]:32345", - "dataset": { - "pool_name": "oxp_9ec2b893-d486-4b24-a077-1a297f9eb15f" - } - } - }, - "root": "/pool/ext/9e72c0e2-4895-4791-b606-2f18e432fb69/crypt/zone" - }, - { - "zone": { - "id": "91d0011f-de44-4823-bc26-a447affa39bc", - "underlay_address": "fd00:1122:3344:114::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:114::a]:32345", - "dataset": { - "pool_name": "oxp_85e81a14-031d-4a63-a91f-981c64e91f60" - } - } - }, - "root": "/pool/ext/b7fbb6db-aa4a-4a6d-8206-b7bdc000d56e/crypt/zone" - }, - { - "zone": { - "id": "0c44a2f1-559a-459c-9931-e0e7964d41c6", - "underlay_address": "fd00:1122:3344:114::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:114::b]:32345", - "dataset": { - "pool_name": "oxp_76f09ad5-c96c-4748-bbe4-71afaea7bc5e" - } - } - }, - "root": "/pool/ext/e87037be-1cdf-4c6e-a8a3-c27b830eaef9/crypt/zone" - }, - { - "zone": { - "id": "ea363819-96f6-4fb6-a203-f18414f1c60e", - "underlay_address": "fd00:1122:3344:114::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:114::4]:32345", - "dataset": { - "pool_name": "oxp_b7fbb6db-aa4a-4a6d-8206-b7bdc000d56e" - } - } - }, - "root": "/pool/ext/b7fbb6db-aa4a-4a6d-8206-b7bdc000d56e/crypt/zone" - }, - { - "zone": { - "id": "21592c39-da6b-4527-842e-edeeceffafa1", - "underlay_address": "fd00:1122:3344:114::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:114::6]:32345", - "dataset": { - "pool_name": "oxp_9e72c0e2-4895-4791-b606-2f18e432fb69" - } - } - }, - "root": "/pool/ext/7aff8429-b65d-4a53-a796-7221ac7581a9/crypt/zone" - }, - { - "zone": { - "id": "f33b1263-f1b2-43a6-a8aa-5f8570dd4e72", - "underlay_address": "fd00:1122:3344:114::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:114::7]:32345", - "dataset": { - "pool_name": "oxp_9e878b1e-bf92-4155-8162-640851c2f5d5" - } - } - }, - "root": "/pool/ext/7f3a760f-a4c0-456f-8a22-2d06ecac1022/crypt/zone" - }, - { - "zone": { - "id": "6f42b469-5a36-4048-a152-e884f7e8a206", - "underlay_address": "fd00:1122:3344:114::d", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:114::d]:32345", - "dataset": { - "pool_name": "oxp_7aff8429-b65d-4a53-a796-7221ac7581a9" - } - } - }, - "root": "/pool/ext/9e72c0e2-4895-4791-b606-2f18e432fb69/crypt/zone" - }, - { - "zone": { - "id": "ad77d594-8f78-4d33-a5e4-59887060178e", - "underlay_address": "fd00:1122:3344:114::e", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:114::e]:123", - "ntp_servers": [ - "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", - "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/85e81a14-031d-4a63-a91f-981c64e91f60/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled16.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled16.json deleted file mode 100644 index 3cd727e1bc..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack3-sled16.json +++ /dev/null @@ -1,167 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 4, - "zones": [ - { - "zone": { - "id": "dcb9a4ae-2c89-4a74-905b-b7936ff49c19", - "underlay_address": "fd00:1122:3344:11f::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11f::9]:32345", - "dataset": { - "pool_name": "oxp_af509039-d27f-4095-bc9d-cecbc5c606db" - } - } - }, - "root": "/pool/ext/44ee0fb4-6034-44e8-b3de-b3a44457ffca/crypt/zone" - }, - { - "zone": { - "id": "dbd46f71-ec39-4b72-a77d-9d281ccb37e0", - "underlay_address": "fd00:1122:3344:11f::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11f::b]:32345", - "dataset": { - "pool_name": "oxp_44ee0fb4-6034-44e8-b3de-b3a44457ffca" - } - } - }, - "root": "/pool/ext/5e32c0a3-1210-402b-91fb-256946eeac2b/crypt/zone" - }, - { - "zone": { - "id": "a1f30569-a5c6-4a6d-922e-241966aea142", - "underlay_address": "fd00:1122:3344:11f::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11f::6]:32345", - "dataset": { - "pool_name": "oxp_d2133e8b-51cc-455e-89d0-5454fd4fe109" - } - } - }, - "root": "/pool/ext/3f57835b-1469-499a-8757-7cc56acc5d49/crypt/zone" - }, - { - "zone": { - "id": "a33e25ae-4e41-40f4-843d-3d12f62d8cb6", - "underlay_address": "fd00:1122:3344:11f::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11f::8]:32345", - "dataset": { - "pool_name": "oxp_c8e4a7f4-1ae6-4683-8397-ea53475a53e8" - } - } - }, - "root": "/pool/ext/5e32c0a3-1210-402b-91fb-256946eeac2b/crypt/zone" - }, - { - "zone": { - "id": "65ed75c2-2d80-4de5-a6f6-adfa6516c7cf", - "underlay_address": "fd00:1122:3344:11f::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11f::c]:32345", - "dataset": { - "pool_name": "oxp_3f57835b-1469-499a-8757-7cc56acc5d49" - } - } - }, - "root": "/pool/ext/cd8cd75c-632b-4527-889a-7ca0c080fe2c/crypt/zone" - }, - { - "zone": { - "id": "bc6ccf18-6b9b-4687-8b70-c7917d972ae0", - "underlay_address": "fd00:1122:3344:11f::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11f::a]:32345", - "dataset": { - "pool_name": "oxp_cd8cd75c-632b-4527-889a-7ca0c080fe2c" - } - } - }, - "root": "/pool/ext/5e32c0a3-1210-402b-91fb-256946eeac2b/crypt/zone" - }, - { - "zone": { - "id": "06233bfe-a857-4819-aefe-212af9eeb90f", - "underlay_address": "fd00:1122:3344:11f::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11f::5]:32345", - "dataset": { - "pool_name": "oxp_c8a1aaf1-d27c-45fd-9f8d-80ac6bf6865d" - } - } - }, - "root": "/pool/ext/af509039-d27f-4095-bc9d-cecbc5c606db/crypt/zone" - }, - { - "zone": { - "id": "0bbfef71-9eae-43b6-b5e7-0060ce9269dd", - "underlay_address": "fd00:1122:3344:11f::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11f::4]:32345", - "dataset": { - "pool_name": "oxp_5e32c0a3-1210-402b-91fb-256946eeac2b" - } - } - }, - "root": "/pool/ext/af509039-d27f-4095-bc9d-cecbc5c606db/crypt/zone" - }, - { - "zone": { - "id": "550e10ee-24d1-444f-80be-2744dd321e0f", - "underlay_address": "fd00:1122:3344:11f::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11f::7]:32345", - "dataset": { - "pool_name": "oxp_f437ce0e-eb45-4be8-b1fe-33ed2656eb01" - } - } - }, - "root": "/pool/ext/44ee0fb4-6034-44e8-b3de-b3a44457ffca/crypt/zone" - }, - { - "zone": { - "id": "86d768f3-ece2-4956-983f-999bdb23a983", - "underlay_address": "fd00:1122:3344:11f::3", - "zone_type": { - "type": "cockroach_db", - "address": "[fd00:1122:3344:11f::3]:32221", - "dataset": { - "pool_name": "oxp_5e32c0a3-1210-402b-91fb-256946eeac2b" - } - } - }, - "root": "/pool/ext/c8a1aaf1-d27c-45fd-9f8d-80ac6bf6865d/crypt/zone" - }, - { - "zone": { - "id": "2f358812-f72c-4838-a5ea-7d78d0954be0", - "underlay_address": "fd00:1122:3344:11f::d", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:11f::d]:123", - "ntp_servers": [ - "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", - "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/f437ce0e-eb45-4be8-b1fe-33ed2656eb01/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled17.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled17.json deleted file mode 100644 index 09981ecacc..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack3-sled17.json +++ /dev/null @@ -1,167 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 4, - "zones": [ - { - "zone": { - "id": "525a19a2-d4ac-418d-bdcf-2ce26e7abe70", - "underlay_address": "fd00:1122:3344:107::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:107::a]:32345", - "dataset": { - "pool_name": "oxp_cb774d2f-ff86-4fd7-866b-17a6b10e61f0" - } - } - }, - "root": "/pool/ext/e17b68b5-f50c-4fc3-b55a-80d284c6c32d/crypt/zone" - }, - { - "zone": { - "id": "7af188e1-6175-4769-9e4f-2ca7a98b76f6", - "underlay_address": "fd00:1122:3344:107::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:107::4]:32345", - "dataset": { - "pool_name": "oxp_0cbbcf22-770d-4e75-9148-e6109b129093" - } - } - }, - "root": "/pool/ext/b998e8df-ea69-4bdd-84cb-b7f17075b060/crypt/zone" - }, - { - "zone": { - "id": "2544540f-6ffc-46c0-84bf-f42a110c02d7", - "underlay_address": "fd00:1122:3344:107::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:107::6]:32345", - "dataset": { - "pool_name": "oxp_e17b68b5-f50c-4fc3-b55a-80d284c6c32d" - } - } - }, - "root": "/pool/ext/521fa477-4d83-49a8-a5cf-c267b7f0c409/crypt/zone" - }, - { - "zone": { - "id": "cfc20f72-cac2-4681-a6d8-e5a0accafbb7", - "underlay_address": "fd00:1122:3344:107::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:107::7]:32345", - "dataset": { - "pool_name": "oxp_b998e8df-ea69-4bdd-84cb-b7f17075b060" - } - } - }, - "root": "/pool/ext/0cbbcf22-770d-4e75-9148-e6109b129093/crypt/zone" - }, - { - "zone": { - "id": "e24be791-5773-425e-a3df-e35ca81570c7", - "underlay_address": "fd00:1122:3344:107::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:107::9]:32345", - "dataset": { - "pool_name": "oxp_7849c221-dc7f-43ac-ac47-bc51864e083b" - } - } - }, - "root": "/pool/ext/7849c221-dc7f-43ac-ac47-bc51864e083b/crypt/zone" - }, - { - "zone": { - "id": "170856ee-21cf-4780-8903-175d558bc7cc", - "underlay_address": "fd00:1122:3344:107::3", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:107::3]:32345", - "dataset": { - "pool_name": "oxp_618e21e5-77d4-40ba-9f8e-7960e9ad92e2" - } - } - }, - "root": "/pool/ext/aa7a37fb-2f03-4d5c-916b-db3a4fc269ac/crypt/zone" - }, - { - "zone": { - "id": "604278ff-525a-4d41-82ff-07aef3174d38", - "underlay_address": "fd00:1122:3344:107::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:107::5]:32345", - "dataset": { - "pool_name": "oxp_521fa477-4d83-49a8-a5cf-c267b7f0c409" - } - } - }, - "root": "/pool/ext/0cbbcf22-770d-4e75-9148-e6109b129093/crypt/zone" - }, - { - "zone": { - "id": "d0d4fcc0-6ed0-410a-99c7-5daf34014421", - "underlay_address": "fd00:1122:3344:107::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:107::b]:32345", - "dataset": { - "pool_name": "oxp_aa7a37fb-2f03-4d5c-916b-db3a4fc269ac" - } - } - }, - "root": "/pool/ext/aa7a37fb-2f03-4d5c-916b-db3a4fc269ac/crypt/zone" - }, - { - "zone": { - "id": "c935df7b-2629-48ee-bc10-20508301905d", - "underlay_address": "fd00:1122:3344:107::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:107::c]:32345", - "dataset": { - "pool_name": "oxp_793fd018-5fdc-4e54-9c45-f8023fa3ea18" - } - } - }, - "root": "/pool/ext/7849c221-dc7f-43ac-ac47-bc51864e083b/crypt/zone" - }, - { - "zone": { - "id": "4ba5f3b6-8be5-4a85-bc57-a5e3b0b867d8", - "underlay_address": "fd00:1122:3344:107::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:107::8]:32345", - "dataset": { - "pool_name": "oxp_e80e7996-c572-481e-8c22-61c16c6e47f4" - } - } - }, - "root": "/pool/ext/e17b68b5-f50c-4fc3-b55a-80d284c6c32d/crypt/zone" - }, - { - "zone": { - "id": "395c9d6e-3bd0-445e-9269-46c3260edb83", - "underlay_address": "fd00:1122:3344:107::d", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:107::d]:123", - "ntp_servers": [ - "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", - "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/0cbbcf22-770d-4e75-9148-e6109b129093/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled18.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled18.json deleted file mode 100644 index 708019883e..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack3-sled18.json +++ /dev/null @@ -1,167 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 4, - "zones": [ - { - "zone": { - "id": "c7096dd4-e429-4a6f-9725-041a77ef2513", - "underlay_address": "fd00:1122:3344:11a::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11a::6]:32345", - "dataset": { - "pool_name": "oxp_dcf62af6-c0f9-4eb5-9b23-9424ef8f3d32" - } - } - }, - "root": "/pool/ext/b869e463-c8b9-4c12-a6b9-13175b3896dd/crypt/zone" - }, - { - "zone": { - "id": "09dd367f-b32f-43f3-aa53-11ccec1cd0c9", - "underlay_address": "fd00:1122:3344:11a::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11a::9]:32345", - "dataset": { - "pool_name": "oxp_d7d00317-42c7-4d1e-a04c-85491fb230cd" - } - } - }, - "root": "/pool/ext/d7d00317-42c7-4d1e-a04c-85491fb230cd/crypt/zone" - }, - { - "zone": { - "id": "fb2f85f1-05b3-432f-9bb5-63fb27a762b1", - "underlay_address": "fd00:1122:3344:11a::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11a::5]:32345", - "dataset": { - "pool_name": "oxp_db4a9949-68da-4c1c-9a1c-49083eba14fe" - } - } - }, - "root": "/pool/ext/db4a9949-68da-4c1c-9a1c-49083eba14fe/crypt/zone" - }, - { - "zone": { - "id": "5b89425e-69e4-4305-8f33-dc5768a1849e", - "underlay_address": "fd00:1122:3344:11a::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11a::a]:32345", - "dataset": { - "pool_name": "oxp_64a1bad7-d1b1-4e39-a3f3-9b8d73c4709e" - } - } - }, - "root": "/pool/ext/64a1bad7-d1b1-4e39-a3f3-9b8d73c4709e/crypt/zone" - }, - { - "zone": { - "id": "a5156db4-273a-4f8b-b8d8-df77062a6c63", - "underlay_address": "fd00:1122:3344:11a::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11a::4]:32345", - "dataset": { - "pool_name": "oxp_b869e463-c8b9-4c12-a6b9-13175b3896dd" - } - } - }, - "root": "/pool/ext/dcf62af6-c0f9-4eb5-9b23-9424ef8f3d32/crypt/zone" - }, - { - "zone": { - "id": "1f2d2f86-b69b-4130-bb9b-e62ba0cb6802", - "underlay_address": "fd00:1122:3344:11a::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11a::b]:32345", - "dataset": { - "pool_name": "oxp_153ffee4-5d7a-4786-ad33-d5567b434fe0" - } - } - }, - "root": "/pool/ext/174a067d-1c5a-49f7-a29f-1e62ab1c3796/crypt/zone" - }, - { - "zone": { - "id": "1e249cc9-52e7-4d66-b713-8ace1392e991", - "underlay_address": "fd00:1122:3344:11a::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11a::7]:32345", - "dataset": { - "pool_name": "oxp_04b6215e-9651-4a3c-ba1b-b8a1e67b3d89" - } - } - }, - "root": "/pool/ext/db4a9949-68da-4c1c-9a1c-49083eba14fe/crypt/zone" - }, - { - "zone": { - "id": "eb779538-2b1b-4d1d-8c7e-b15f04db6e53", - "underlay_address": "fd00:1122:3344:11a::3", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11a::3]:32345", - "dataset": { - "pool_name": "oxp_aacb8524-3562-4f97-a616-9023230d6efa" - } - } - }, - "root": "/pool/ext/174a067d-1c5a-49f7-a29f-1e62ab1c3796/crypt/zone" - }, - { - "zone": { - "id": "b575d52d-be7d-46af-814b-91e6d18f3464", - "underlay_address": "fd00:1122:3344:11a::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11a::8]:32345", - "dataset": { - "pool_name": "oxp_174a067d-1c5a-49f7-a29f-1e62ab1c3796" - } - } - }, - "root": "/pool/ext/64a1bad7-d1b1-4e39-a3f3-9b8d73c4709e/crypt/zone" - }, - { - "zone": { - "id": "274200bc-eac7-47d7-8a57-4b7be794caba", - "underlay_address": "fd00:1122:3344:11a::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11a::c]:32345", - "dataset": { - "pool_name": "oxp_2e7644e4-7d46-42bf-8e7a-9c3f39085b3f" - } - } - }, - "root": "/pool/ext/2e7644e4-7d46-42bf-8e7a-9c3f39085b3f/crypt/zone" - }, - { - "zone": { - "id": "bc20ba3a-df62-4a62-97c2-75b5653f84b4", - "underlay_address": "fd00:1122:3344:11a::d", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:11a::d]:123", - "ntp_servers": [ - "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", - "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/04b6215e-9651-4a3c-ba1b-b8a1e67b3d89/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled19.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled19.json deleted file mode 100644 index 197df304e3..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack3-sled19.json +++ /dev/null @@ -1,181 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 4, - "zones": [ - { - "zone": { - "id": "9c73abb9-edb8-4aa2-835b-c25ebe4466d9", - "underlay_address": "fd00:1122:3344:109::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:109::7]:32345", - "dataset": { - "pool_name": "oxp_b7a3032f-7b8c-4a6a-9fa2-e5773bfdbc94" - } - } - }, - "root": "/pool/ext/46d21f3d-23be-4361-b5c5-9d0f6ece5b8c/crypt/zone" - }, - { - "zone": { - "id": "ca576bda-cbdd-4bb9-9d75-ce06d569e926", - "underlay_address": "fd00:1122:3344:109::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:109::a]:32345", - "dataset": { - "pool_name": "oxp_863c4bc4-9c7e-453c-99d8-a3d509f49f3e" - } - } - }, - "root": "/pool/ext/7e67cb32-0c00-4090-9647-eb7bae75deeb/crypt/zone" - }, - { - "zone": { - "id": "f010978d-346e-49cd-b265-7607a25685f9", - "underlay_address": "fd00:1122:3344:109::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:109::c]:32345", - "dataset": { - "pool_name": "oxp_9bc1dab8-2d2a-4f92-bdfb-94ebca7881f1" - } - } - }, - "root": "/pool/ext/9bc1dab8-2d2a-4f92-bdfb-94ebca7881f1/crypt/zone" - }, - { - "zone": { - "id": "daff4162-cc81-4586-a457-91d767b8f1d9", - "underlay_address": "fd00:1122:3344:109::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:109::6]:32345", - "dataset": { - "pool_name": "oxp_b9b5b50c-e823-41ae-9585-01b818883521" - } - } - }, - "root": "/pool/ext/de682b18-afaf-4d53-b62e-934f6bd4a1f8/crypt/zone" - }, - { - "zone": { - "id": "9f300d3d-e698-4cc8-be4c-1f81ac8c927f", - "underlay_address": "fd00:1122:3344:109::d", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:109::d]:32345", - "dataset": { - "pool_name": "oxp_f1d82c22-ad7d-4cda-9ab0-8f5f496d90ce" - } - } - }, - "root": "/pool/ext/de682b18-afaf-4d53-b62e-934f6bd4a1f8/crypt/zone" - }, - { - "zone": { - "id": "8db7c7be-da40-4a1c-9681-4d02606a7eb7", - "underlay_address": "fd00:1122:3344:109::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:109::9]:32345", - "dataset": { - "pool_name": "oxp_46d21f3d-23be-4361-b5c5-9d0f6ece5b8c" - } - } - }, - "root": "/pool/ext/b7a3032f-7b8c-4a6a-9fa2-e5773bfdbc94/crypt/zone" - }, - { - "zone": { - "id": "b990911b-805a-4f9d-bd83-e977f5b19a35", - "underlay_address": "fd00:1122:3344:109::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:109::4]:32345", - "dataset": { - "pool_name": "oxp_7e67cb32-0c00-4090-9647-eb7bae75deeb" - } - } - }, - "root": "/pool/ext/de682b18-afaf-4d53-b62e-934f6bd4a1f8/crypt/zone" - }, - { - "zone": { - "id": "c99392f5-8f30-41ac-9eeb-12d7f4b707f1", - "underlay_address": "fd00:1122:3344:109::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:109::b]:32345", - "dataset": { - "pool_name": "oxp_de682b18-afaf-4d53-b62e-934f6bd4a1f8" - } - } - }, - "root": "/pool/ext/46d21f3d-23be-4361-b5c5-9d0f6ece5b8c/crypt/zone" - }, - { - "zone": { - "id": "7f6cb339-9eb1-4866-8a4f-383bad25b36f", - "underlay_address": "fd00:1122:3344:109::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:109::5]:32345", - "dataset": { - "pool_name": "oxp_458cbfa3-3752-415d-8a3b-fb64e88468e1" - } - } - }, - "root": "/pool/ext/b9b5b50c-e823-41ae-9585-01b818883521/crypt/zone" - }, - { - "zone": { - "id": "11946372-f253-4648-b00c-c7874a7b2888", - "underlay_address": "fd00:1122:3344:109::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:109::8]:32345", - "dataset": { - "pool_name": "oxp_d73332f5-b2a5-46c0-94cf-c5c5712abfe8" - } - } - }, - "root": "/pool/ext/b9b5b50c-e823-41ae-9585-01b818883521/crypt/zone" - }, - { - "zone": { - "id": "58ece9e1-387f-4d2f-a42f-69cd34f9f380", - "underlay_address": "fd00:1122:3344:109::3", - "zone_type": { - "type": "cockroach_db", - "address": "[fd00:1122:3344:109::3]:32221", - "dataset": { - "pool_name": "oxp_7e67cb32-0c00-4090-9647-eb7bae75deeb" - } - } - }, - "root": "/pool/ext/b9b5b50c-e823-41ae-9585-01b818883521/crypt/zone" - }, - { - "zone": { - "id": "f016a25a-deb5-4f20-bdb0-2425c00d41a6", - "underlay_address": "fd00:1122:3344:109::e", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:109::e]:123", - "ntp_servers": [ - "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", - "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/b9b5b50c-e823-41ae-9585-01b818883521/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled2.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled2.json deleted file mode 100644 index ba6ab6f915..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack3-sled2.json +++ /dev/null @@ -1,167 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 4, - "zones": [ - { - "zone": { - "id": "dd799dd4-03f9-451d-85e2-844155753a03", - "underlay_address": "fd00:1122:3344:10a::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10a::7]:32345", - "dataset": { - "pool_name": "oxp_7dcf3acc-bde9-4306-bb46-4c6a6cbbb7ba" - } - } - }, - "root": "/pool/ext/7dcf3acc-bde9-4306-bb46-4c6a6cbbb7ba/crypt/zone" - }, - { - "zone": { - "id": "dbf9346d-b46d-4402-bb44-92ce20fb5290", - "underlay_address": "fd00:1122:3344:10a::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10a::9]:32345", - "dataset": { - "pool_name": "oxp_9275d50f-da2c-4f84-9775-598a364309ad" - } - } - }, - "root": "/pool/ext/d83e36ef-dd7a-4cc2-be19-379b1114c031/crypt/zone" - }, - { - "zone": { - "id": "9a55ebdd-eeef-4954-b0a1-e32b04837f14", - "underlay_address": "fd00:1122:3344:10a::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10a::4]:32345", - "dataset": { - "pool_name": "oxp_7f30f77e-5998-4676-a226-b433b5940e77" - } - } - }, - "root": "/pool/ext/9275d50f-da2c-4f84-9775-598a364309ad/crypt/zone" - }, - { - "zone": { - "id": "bc2935f8-e4fa-4015-968e-f90985533a6a", - "underlay_address": "fd00:1122:3344:10a::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10a::6]:32345", - "dataset": { - "pool_name": "oxp_022c9d58-e91f-480d-bda6-0cf32ce3b1f5" - } - } - }, - "root": "/pool/ext/c395dcc3-6ece-4b3f-b143-e111a54ef7da/crypt/zone" - }, - { - "zone": { - "id": "63f8c861-fa1d-4121-92d9-7efa5ef7f5a0", - "underlay_address": "fd00:1122:3344:10a::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10a::a]:32345", - "dataset": { - "pool_name": "oxp_3c805784-f403-4d01-9eb0-4f77d0821980" - } - } - }, - "root": "/pool/ext/9275d50f-da2c-4f84-9775-598a364309ad/crypt/zone" - }, - { - "zone": { - "id": "4996dcf9-78de-4f69-94fa-c09cc86a8d3c", - "underlay_address": "fd00:1122:3344:10a::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10a::b]:32345", - "dataset": { - "pool_name": "oxp_f9fe9ce6-be0d-4974-bc30-78a8f1330496" - } - } - }, - "root": "/pool/ext/9275d50f-da2c-4f84-9775-598a364309ad/crypt/zone" - }, - { - "zone": { - "id": "36b9a4bf-7b30-4fe7-903d-3b722c79fa86", - "underlay_address": "fd00:1122:3344:10a::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10a::c]:32345", - "dataset": { - "pool_name": "oxp_cb1052e0-4c70-4d37-b979-dd55e6a25f08" - } - } - }, - "root": "/pool/ext/3c805784-f403-4d01-9eb0-4f77d0821980/crypt/zone" - }, - { - "zone": { - "id": "a109a902-6a27-41b6-a881-c353e28e5389", - "underlay_address": "fd00:1122:3344:10a::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10a::8]:32345", - "dataset": { - "pool_name": "oxp_d83e36ef-dd7a-4cc2-be19-379b1114c031" - } - } - }, - "root": "/pool/ext/d83e36ef-dd7a-4cc2-be19-379b1114c031/crypt/zone" - }, - { - "zone": { - "id": "d2a9a0bc-ea12-44e3-ac4a-904c76120d11", - "underlay_address": "fd00:1122:3344:10a::3", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10a::3]:32345", - "dataset": { - "pool_name": "oxp_c395dcc3-6ece-4b3f-b143-e111a54ef7da" - } - } - }, - "root": "/pool/ext/9898a289-2f0d-43a6-b053-850f6e784e9a/crypt/zone" - }, - { - "zone": { - "id": "b3c3e53b-d9ec-4dd8-bd2c-bd811319aa44", - "underlay_address": "fd00:1122:3344:10a::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10a::5]:32345", - "dataset": { - "pool_name": "oxp_9898a289-2f0d-43a6-b053-850f6e784e9a" - } - } - }, - "root": "/pool/ext/9275d50f-da2c-4f84-9775-598a364309ad/crypt/zone" - }, - { - "zone": { - "id": "7b445d3b-fd25-4538-ac3f-f439c66d1223", - "underlay_address": "fd00:1122:3344:10a::d", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:10a::d]:123", - "ntp_servers": [ - "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", - "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/f9fe9ce6-be0d-4974-bc30-78a8f1330496/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled20.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled20.json deleted file mode 100644 index f02f1f05e5..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack3-sled20.json +++ /dev/null @@ -1,198 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 4, - "zones": [ - { - "zone": { - "id": "4b49e669-264d-4bfb-8ab1-555b520b679c", - "underlay_address": "fd00:1122:3344:108::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:108::c]:32345", - "dataset": { - "pool_name": "oxp_799a1c86-9e1a-4626-91e2-a19f7ff5356e" - } - } - }, - "root": "/pool/ext/d2478613-b7c9-4bd3-856f-1fe8e9c903c2/crypt/zone" - }, - { - "zone": { - "id": "d802baae-9c3f-437a-85fe-cd72653b6db1", - "underlay_address": "fd00:1122:3344:108::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:108::5]:32345", - "dataset": { - "pool_name": "oxp_d2478613-b7c9-4bd3-856f-1fe8e9c903c2" - } - } - }, - "root": "/pool/ext/116f216c-e151-410f-82bf-8913904cf7b4/crypt/zone" - }, - { - "zone": { - "id": "e5f69e60-3421-49a4-8c1d-2db8cbb6a5e9", - "underlay_address": "fd00:1122:3344:108::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:108::b]:32345", - "dataset": { - "pool_name": "oxp_116f216c-e151-410f-82bf-8913904cf7b4" - } - } - }, - "root": "/pool/ext/eea15142-4635-4e40-b0b4-b0c4f13eca3c/crypt/zone" - }, - { - "zone": { - "id": "3e598962-ef8c-4cb6-bdfe-ec8563939d6a", - "underlay_address": "fd00:1122:3344:108::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:108::4]:32345", - "dataset": { - "pool_name": "oxp_ababce44-01d1-4c50-b389-f60464c5dde9" - } - } - }, - "root": "/pool/ext/ababce44-01d1-4c50-b389-f60464c5dde9/crypt/zone" - }, - { - "zone": { - "id": "25355c9f-cc2b-4b24-8eaa-65190f8936a8", - "underlay_address": "fd00:1122:3344:108::d", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:108::d]:32345", - "dataset": { - "pool_name": "oxp_fed46d41-136d-4462-8782-359014efba59" - } - } - }, - "root": "/pool/ext/eea15142-4635-4e40-b0b4-b0c4f13eca3c/crypt/zone" - }, - { - "zone": { - "id": "efb2f16c-ebad-4192-b575-dcb4d9b1d5cd", - "underlay_address": "fd00:1122:3344:108::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:108::a]:32345", - "dataset": { - "pool_name": "oxp_bf509067-0165-456d-98ae-72c86378e626" - } - } - }, - "root": "/pool/ext/95220093-e3b8-4f7f-9f5a-cb32cb75180a/crypt/zone" - }, - { - "zone": { - "id": "89191f0d-4e0b-47fa-9a9e-fbe2a6db1385", - "underlay_address": "fd00:1122:3344:108::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:108::8]:32345", - "dataset": { - "pool_name": "oxp_eea15142-4635-4e40-b0b4-b0c4f13eca3c" - } - } - }, - "root": "/pool/ext/eea15142-4635-4e40-b0b4-b0c4f13eca3c/crypt/zone" - }, - { - "zone": { - "id": "e4589324-c528-49c7-9141-35e0a7af6947", - "underlay_address": "fd00:1122:3344:108::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:108::6]:32345", - "dataset": { - "pool_name": "oxp_95220093-e3b8-4f7f-9f5a-cb32cb75180a" - } - } - }, - "root": "/pool/ext/ababce44-01d1-4c50-b389-f60464c5dde9/crypt/zone" - }, - { - "zone": { - "id": "95ebe94d-0e68-421d-9260-c30bd7fe4bd6", - "underlay_address": "fd00:1122:3344:108::3", - "zone_type": { - "type": "nexus", - "internal_address": "[fd00:1122:3344:108::3]:12221", - "external_ip": "45.154.216.35", - "nic": { - "id": "301aa595-f072-4da3-a533-99647b44a66a", - "kind": { - "type": "service", - "id": "95ebe94d-0e68-421d-9260-c30bd7fe4bd6" - }, - "name": "nexus-95ebe94d-0e68-421d-9260-c30bd7fe4bd6", - "ip": "172.30.2.5", - "mac": "A8:40:25:FF:F1:30", - "subnet": "172.30.2.0/24", - "vni": 100, - "primary": true, - "slot": 0 - }, - "external_tls": true, - "external_dns_servers": [ - "1.1.1.1", - "8.8.8.8" - ] - } - }, - "root": "/pool/ext/eea15142-4635-4e40-b0b4-b0c4f13eca3c/crypt/zone" - }, - { - "zone": { - "id": "4b7a7052-f8e8-4196-8d6b-315943986ce6", - "underlay_address": "fd00:1122:3344:108::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:108::7]:32345", - "dataset": { - "pool_name": "oxp_a549421c-2f12-45cc-b691-202f0a9bfa8b" - } - } - }, - "root": "/pool/ext/bf509067-0165-456d-98ae-72c86378e626/crypt/zone" - }, - { - "zone": { - "id": "71b8ff53-c781-47bb-8ddc-2c7129680542", - "underlay_address": "fd00:1122:3344:108::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:108::9]:32345", - "dataset": { - "pool_name": "oxp_9d19f891-a3d9-4c6e-b1e1-6b0b085a9440" - } - } - }, - "root": "/pool/ext/fed46d41-136d-4462-8782-359014efba59/crypt/zone" - }, - { - "zone": { - "id": "eaf7bf77-f4c2-4016-9909-4b88a27e9d9a", - "underlay_address": "fd00:1122:3344:108::e", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:108::e]:123", - "ntp_servers": [ - "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", - "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/ababce44-01d1-4c50-b389-f60464c5dde9/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled21.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled21.json deleted file mode 100644 index d6c19b96ed..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack3-sled21.json +++ /dev/null @@ -1,167 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 4, - "zones": [ - { - "zone": { - "id": "a91e4af3-5d18-4b08-8cb6-0583db8f8842", - "underlay_address": "fd00:1122:3344:117::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:117::a]:32345", - "dataset": { - "pool_name": "oxp_4b2896b8-5f0e-42fb-a474-658b28421e65" - } - } - }, - "root": "/pool/ext/23393ed9-acee-4686-861f-7fc825af1249/crypt/zone" - }, - { - "zone": { - "id": "1ce74512-ce3a-4125-95f1-12c86e0275d5", - "underlay_address": "fd00:1122:3344:117::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:117::8]:32345", - "dataset": { - "pool_name": "oxp_46ece76f-ef00-4dd0-9f73-326c63959470" - } - } - }, - "root": "/pool/ext/1bd5955e-14a9-463f-adeb-f12bcb45a6c1/crypt/zone" - }, - { - "zone": { - "id": "fef5d35f-9622-4dee-8635-d26e9f7f6869", - "underlay_address": "fd00:1122:3344:117::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:117::4]:32345", - "dataset": { - "pool_name": "oxp_e4d7c2e8-016b-4617-afb5-38a2d9c1b508" - } - } - }, - "root": "/pool/ext/e372bba3-ef60-466f-b819-a3d5b9acbe77/crypt/zone" - }, - { - "zone": { - "id": "4f024a31-cd38-4219-8381-9f1af70d1d54", - "underlay_address": "fd00:1122:3344:117::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:117::c]:32345", - "dataset": { - "pool_name": "oxp_7cb2a3c2-9d33-4c6a-af57-669f251cf4cf" - } - } - }, - "root": "/pool/ext/cfbd185d-e185-4aaa-a598-9216124ceec4/crypt/zone" - }, - { - "zone": { - "id": "d00e1d0b-e12f-420a-a4df-21e4cac176f6", - "underlay_address": "fd00:1122:3344:117::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:117::b]:32345", - "dataset": { - "pool_name": "oxp_e372bba3-ef60-466f-b819-a3d5b9acbe77" - } - } - }, - "root": "/pool/ext/cfbd185d-e185-4aaa-a598-9216124ceec4/crypt/zone" - }, - { - "zone": { - "id": "1598058a-6064-449e-b39c-1e3d345ed793", - "underlay_address": "fd00:1122:3344:117::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:117::5]:32345", - "dataset": { - "pool_name": "oxp_022a8d67-1e00-49f3-81ed-a0a1bc187cfa" - } - } - }, - "root": "/pool/ext/022a8d67-1e00-49f3-81ed-a0a1bc187cfa/crypt/zone" - }, - { - "zone": { - "id": "c723c4b8-3031-4b25-8c16-fe08bc0b5f00", - "underlay_address": "fd00:1122:3344:117::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:117::7]:32345", - "dataset": { - "pool_name": "oxp_23393ed9-acee-4686-861f-7fc825af1249" - } - } - }, - "root": "/pool/ext/1bd5955e-14a9-463f-adeb-f12bcb45a6c1/crypt/zone" - }, - { - "zone": { - "id": "7751b307-888f-46c8-8787-75d2f3fdaef3", - "underlay_address": "fd00:1122:3344:117::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:117::9]:32345", - "dataset": { - "pool_name": "oxp_e54e53d4-f68f-4b19-b8c1-9d5ab42e51c1" - } - } - }, - "root": "/pool/ext/e372bba3-ef60-466f-b819-a3d5b9acbe77/crypt/zone" - }, - { - "zone": { - "id": "89413ff1-d5de-4931-8389-e84e7ea321af", - "underlay_address": "fd00:1122:3344:117::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:117::6]:32345", - "dataset": { - "pool_name": "oxp_1bd5955e-14a9-463f-adeb-f12bcb45a6c1" - } - } - }, - "root": "/pool/ext/1bd5955e-14a9-463f-adeb-f12bcb45a6c1/crypt/zone" - }, - { - "zone": { - "id": "287b0b24-72aa-41b5-a597-8523d84225ef", - "underlay_address": "fd00:1122:3344:117::3", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:117::3]:32345", - "dataset": { - "pool_name": "oxp_cfbd185d-e185-4aaa-a598-9216124ceec4" - } - } - }, - "root": "/pool/ext/cfbd185d-e185-4aaa-a598-9216124ceec4/crypt/zone" - }, - { - "zone": { - "id": "4728253e-c534-4a5b-b707-c64ac9a8eb8c", - "underlay_address": "fd00:1122:3344:117::d", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:117::d]:123", - "ntp_servers": [ - "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", - "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/cfbd185d-e185-4aaa-a598-9216124ceec4/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled22.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled22.json deleted file mode 100644 index 1cd6fed362..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack3-sled22.json +++ /dev/null @@ -1,167 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 4, - "zones": [ - { - "zone": { - "id": "49f20cd1-a8a3-4fa8-9209-59da60cd8f9b", - "underlay_address": "fd00:1122:3344:103::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:103::5]:32345", - "dataset": { - "pool_name": "oxp_13a9ef4a-f33a-4781-8f83-712c07a79b1f" - } - } - }, - "root": "/pool/ext/711eff4e-736c-478e-83aa-ae86f5efbf1d/crypt/zone" - }, - { - "zone": { - "id": "896fd564-f94e-496b-9fcf-ddfbfcfac9f7", - "underlay_address": "fd00:1122:3344:103::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:103::c]:32345", - "dataset": { - "pool_name": "oxp_0944c0a2-0fb7-4f51-bced-52cc257cd2f6" - } - } - }, - "root": "/pool/ext/bc54d8c5-955d-429d-84e0-a20a4e5e27a3/crypt/zone" - }, - { - "zone": { - "id": "911fb8b3-05c2-4af7-8974-6c74a61d94ad", - "underlay_address": "fd00:1122:3344:103::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:103::9]:32345", - "dataset": { - "pool_name": "oxp_29f59fce-a867-4571-9d2e-b03fa5c13510" - } - } - }, - "root": "/pool/ext/711eff4e-736c-478e-83aa-ae86f5efbf1d/crypt/zone" - }, - { - "zone": { - "id": "682b34db-0b06-4770-a8fe-74437cf184d6", - "underlay_address": "fd00:1122:3344:103::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:103::6]:32345", - "dataset": { - "pool_name": "oxp_094d11d2-8049-4138-bcf4-562f5f8e77c0" - } - } - }, - "root": "/pool/ext/0944c0a2-0fb7-4f51-bced-52cc257cd2f6/crypt/zone" - }, - { - "zone": { - "id": "d8d20365-ecd3-4fd5-9495-c0670e3bd5d9", - "underlay_address": "fd00:1122:3344:103::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:103::a]:32345", - "dataset": { - "pool_name": "oxp_fb97ff7b-0225-400c-a137-3b38a786c0a0" - } - } - }, - "root": "/pool/ext/094d11d2-8049-4138-bcf4-562f5f8e77c0/crypt/zone" - }, - { - "zone": { - "id": "673620b6-44d9-4310-8e17-3024ac84e708", - "underlay_address": "fd00:1122:3344:103::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:103::7]:32345", - "dataset": { - "pool_name": "oxp_711eff4e-736c-478e-83aa-ae86f5efbf1d" - } - } - }, - "root": "/pool/ext/fb97ff7b-0225-400c-a137-3b38a786c0a0/crypt/zone" - }, - { - "zone": { - "id": "bf6dfc04-4d4c-41b6-a011-40ffc3bc5080", - "underlay_address": "fd00:1122:3344:103::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:103::8]:32345", - "dataset": { - "pool_name": "oxp_f815f1b6-48ef-436d-8768-eb08227e2386" - } - } - }, - "root": "/pool/ext/13a9ef4a-f33a-4781-8f83-712c07a79b1f/crypt/zone" - }, - { - "zone": { - "id": "ac8a82a8-fb6f-4635-a9a9-d98617eab390", - "underlay_address": "fd00:1122:3344:103::3", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:103::3]:32345", - "dataset": { - "pool_name": "oxp_97d6c860-4e2f-496e-974b-2e293fee6af9" - } - } - }, - "root": "/pool/ext/0944c0a2-0fb7-4f51-bced-52cc257cd2f6/crypt/zone" - }, - { - "zone": { - "id": "4ed66558-4815-4b85-9b94-9edf3ee69ead", - "underlay_address": "fd00:1122:3344:103::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:103::4]:32345", - "dataset": { - "pool_name": "oxp_bc54d8c5-955d-429d-84e0-a20a4e5e27a3" - } - } - }, - "root": "/pool/ext/13a9ef4a-f33a-4781-8f83-712c07a79b1f/crypt/zone" - }, - { - "zone": { - "id": "8a71c6ee-b08d-4c3d-b13c-c9cebc4c328a", - "underlay_address": "fd00:1122:3344:103::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:103::b]:32345", - "dataset": { - "pool_name": "oxp_2bdfa429-09bd-4fa1-aa20-eea99f0d2b85" - } - } - }, - "root": "/pool/ext/29f59fce-a867-4571-9d2e-b03fa5c13510/crypt/zone" - }, - { - "zone": { - "id": "7e6b8962-7a1e-4d7b-b7ea-49e64a51d98d", - "underlay_address": "fd00:1122:3344:103::d", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:103::d]:123", - "ntp_servers": [ - "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", - "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/2bdfa429-09bd-4fa1-aa20-eea99f0d2b85/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled23.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled23.json deleted file mode 100644 index ab171ad8cd..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack3-sled23.json +++ /dev/null @@ -1,181 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 4, - "zones": [ - { - "zone": { - "id": "6b7e931d-4b91-4dc6-9a7b-4c19ac669e5d", - "underlay_address": "fd00:1122:3344:105::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:105::4]:32345", - "dataset": { - "pool_name": "oxp_24dab7f5-164a-47f3-a878-f32ab1e68cce" - } - } - }, - "root": "/pool/ext/ad493851-2d11-4c2d-8d75-989579d9616a/crypt/zone" - }, - { - "zone": { - "id": "6c58e7aa-71e1-4868-9d4b-e12c7ef40303", - "underlay_address": "fd00:1122:3344:105::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:105::a]:32345", - "dataset": { - "pool_name": "oxp_d664c9e8-bc81-4225-a618-a8ae2d057186" - } - } - }, - "root": "/pool/ext/ad493851-2d11-4c2d-8d75-989579d9616a/crypt/zone" - }, - { - "zone": { - "id": "51c6dc8d-b1a4-454a-9b19-01e45eb0b599", - "underlay_address": "fd00:1122:3344:105::d", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:105::d]:32345", - "dataset": { - "pool_name": "oxp_f5f85537-eb25-4d0e-8e94-b775c41abd73" - } - } - }, - "root": "/pool/ext/4f1eafe9-b28d-49d3-83e2-ceac8721d6b5/crypt/zone" - }, - { - "zone": { - "id": "8cbffa61-0bd0-4ad2-bd7d-30fe0dd57469", - "underlay_address": "fd00:1122:3344:105::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:105::9]:32345", - "dataset": { - "pool_name": "oxp_88abca38-3f61-4d4b-80a1-4ea3e4827f84" - } - } - }, - "root": "/pool/ext/88abca38-3f61-4d4b-80a1-4ea3e4827f84/crypt/zone" - }, - { - "zone": { - "id": "2177f37f-2ac9-4e66-bf74-a10bd91f4d33", - "underlay_address": "fd00:1122:3344:105::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:105::6]:32345", - "dataset": { - "pool_name": "oxp_59e20871-4670-40d6-8ff4-aa97899fc991" - } - } - }, - "root": "/pool/ext/4f1eafe9-b28d-49d3-83e2-ceac8721d6b5/crypt/zone" - }, - { - "zone": { - "id": "e4e43855-4879-4910-a2ba-40f625c1cc2d", - "underlay_address": "fd00:1122:3344:105::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:105::b]:32345", - "dataset": { - "pool_name": "oxp_967d2f05-b141-44f5-837d-9b2aa67ee128" - } - } - }, - "root": "/pool/ext/6b6f34cd-6d3d-4832-a4e6-3df112c97133/crypt/zone" - }, - { - "zone": { - "id": "8d2517e1-f9ad-40f2-abb9-2f5122839910", - "underlay_address": "fd00:1122:3344:105::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:105::7]:32345", - "dataset": { - "pool_name": "oxp_ad493851-2d11-4c2d-8d75-989579d9616a" - } - } - }, - "root": "/pool/ext/88abca38-3f61-4d4b-80a1-4ea3e4827f84/crypt/zone" - }, - { - "zone": { - "id": "44cb3698-a7b1-4388-9165-ac76082ec8bc", - "underlay_address": "fd00:1122:3344:105::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:105::5]:32345", - "dataset": { - "pool_name": "oxp_4292a83c-8c1f-4b2e-9120-72e0c510bf3c" - } - } - }, - "root": "/pool/ext/24dab7f5-164a-47f3-a878-f32ab1e68cce/crypt/zone" - }, - { - "zone": { - "id": "931b5c86-9d72-4518-bfd6-97863152ac65", - "underlay_address": "fd00:1122:3344:105::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:105::c]:32345", - "dataset": { - "pool_name": "oxp_6b6f34cd-6d3d-4832-a4e6-3df112c97133" - } - } - }, - "root": "/pool/ext/ad493851-2d11-4c2d-8d75-989579d9616a/crypt/zone" - }, - { - "zone": { - "id": "ac568073-1889-463e-8cc4-cfed16ce2a34", - "underlay_address": "fd00:1122:3344:105::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:105::8]:32345", - "dataset": { - "pool_name": "oxp_4f1eafe9-b28d-49d3-83e2-ceac8721d6b5" - } - } - }, - "root": "/pool/ext/4292a83c-8c1f-4b2e-9120-72e0c510bf3c/crypt/zone" - }, - { - "zone": { - "id": "e8f86fbb-864e-4d5a-961c-b50b54ae853e", - "underlay_address": "fd00:1122:3344:105::3", - "zone_type": { - "type": "cockroach_db", - "address": "[fd00:1122:3344:105::3]:32221", - "dataset": { - "pool_name": "oxp_24dab7f5-164a-47f3-a878-f32ab1e68cce" - } - } - }, - "root": "/pool/ext/4f1eafe9-b28d-49d3-83e2-ceac8721d6b5/crypt/zone" - }, - { - "zone": { - "id": "c79caea0-37b1-49d6-ae6e-8cf849d91374", - "underlay_address": "fd00:1122:3344:105::e", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:105::e]:123", - "ntp_servers": [ - "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", - "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/24dab7f5-164a-47f3-a878-f32ab1e68cce/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled24.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled24.json deleted file mode 100644 index 9968abe6d9..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack3-sled24.json +++ /dev/null @@ -1,167 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 4, - "zones": [ - { - "zone": { - "id": "d2b1e468-bc3c-4d08-b855-ae3327465375", - "underlay_address": "fd00:1122:3344:106::3", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:106::3]:32345", - "dataset": { - "pool_name": "oxp_9db196bf-828d-4e55-a2c1-dd9d579d3908" - } - } - }, - "root": "/pool/ext/74df4c92-edbb-4431-a770-1d015110e66b/crypt/zone" - }, - { - "zone": { - "id": "61f94a16-79fd-42e3-b225-a4dc67228437", - "underlay_address": "fd00:1122:3344:106::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:106::6]:32345", - "dataset": { - "pool_name": "oxp_d77d5b08-5f70-496a-997b-b38804dc3b8a" - } - } - }, - "root": "/pool/ext/daf9e3cd-5a40-4eba-a0f6-4f94dab37dae/crypt/zone" - }, - { - "zone": { - "id": "7d32ef34-dec5-4fd8-899e-20bbc473a3ee", - "underlay_address": "fd00:1122:3344:106::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:106::7]:32345", - "dataset": { - "pool_name": "oxp_50c1b653-6231-41fe-b3cf-b7ba709a0746" - } - } - }, - "root": "/pool/ext/9db196bf-828d-4e55-a2c1-dd9d579d3908/crypt/zone" - }, - { - "zone": { - "id": "c34b7ae5-26b9-4651-a3c4-20bba2bd0d2c", - "underlay_address": "fd00:1122:3344:106::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:106::5]:32345", - "dataset": { - "pool_name": "oxp_88aea92c-ab92-44c1-9471-eb8e30e075d3" - } - } - }, - "root": "/pool/ext/8da316d4-6b18-4980-a0a8-6e76e72cc40d/crypt/zone" - }, - { - "zone": { - "id": "36472be8-9a70-4c14-bd02-439b725cec1a", - "underlay_address": "fd00:1122:3344:106::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:106::8]:32345", - "dataset": { - "pool_name": "oxp_54544b3a-1513-4db2-911e-7c1eb4b12385" - } - } - }, - "root": "/pool/ext/54544b3a-1513-4db2-911e-7c1eb4b12385/crypt/zone" - }, - { - "zone": { - "id": "2548f8ab-5255-4334-a1fb-5d7d95213129", - "underlay_address": "fd00:1122:3344:106::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:106::9]:32345", - "dataset": { - "pool_name": "oxp_08050450-967f-431c-9a12-0d051aff020e" - } - } - }, - "root": "/pool/ext/08050450-967f-431c-9a12-0d051aff020e/crypt/zone" - }, - { - "zone": { - "id": "1455c069-853c-49cd-853a-3ea81b89acd4", - "underlay_address": "fd00:1122:3344:106::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:106::c]:32345", - "dataset": { - "pool_name": "oxp_8da316d4-6b18-4980-a0a8-6e76e72cc40d" - } - } - }, - "root": "/pool/ext/08050450-967f-431c-9a12-0d051aff020e/crypt/zone" - }, - { - "zone": { - "id": "27c0244b-f91a-46c3-bc96-e8eec009371e", - "underlay_address": "fd00:1122:3344:106::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:106::b]:32345", - "dataset": { - "pool_name": "oxp_daf9e3cd-5a40-4eba-a0f6-4f94dab37dae" - } - } - }, - "root": "/pool/ext/74df4c92-edbb-4431-a770-1d015110e66b/crypt/zone" - }, - { - "zone": { - "id": "9e46d837-1e0f-42b6-a352-84e6946b8734", - "underlay_address": "fd00:1122:3344:106::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:106::4]:32345", - "dataset": { - "pool_name": "oxp_74df4c92-edbb-4431-a770-1d015110e66b" - } - } - }, - "root": "/pool/ext/15f94c39-d48c-41f6-a913-cc1d04aef1a2/crypt/zone" - }, - { - "zone": { - "id": "b972fcd4-c1b3-4b3c-9e24-f59c7a7cb192", - "underlay_address": "fd00:1122:3344:106::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:106::a]:32345", - "dataset": { - "pool_name": "oxp_15f94c39-d48c-41f6-a913-cc1d04aef1a2" - } - } - }, - "root": "/pool/ext/74df4c92-edbb-4431-a770-1d015110e66b/crypt/zone" - }, - { - "zone": { - "id": "e1c8c655-1950-42d5-ae1f-a4ce84854bbc", - "underlay_address": "fd00:1122:3344:106::d", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:106::d]:123", - "ntp_servers": [ - "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", - "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/15f94c39-d48c-41f6-a913-cc1d04aef1a2/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled25.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled25.json deleted file mode 100644 index 8deca6b56a..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack3-sled25.json +++ /dev/null @@ -1,196 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 4, - "zones": [ - { - "zone": { - "id": "10b80058-9b2e-4d6c-8a1a-a61a8258c12f", - "underlay_address": "fd00:1122:3344:118::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:118::9]:32345", - "dataset": { - "pool_name": "oxp_953c19bb-9fff-4488-8a7b-29de9994a948" - } - } - }, - "root": "/pool/ext/a78caf97-6145-4908-83b5-a03a6d2e0ac4/crypt/zone" - }, - { - "zone": { - "id": "f58fef96-7b5e-40c2-9482-669088a19209", - "underlay_address": "fd00:1122:3344:118::d", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:118::d]:32345", - "dataset": { - "pool_name": "oxp_d7976706-d6ed-4465-8b04-450c96d8feec" - } - } - }, - "root": "/pool/ext/d7976706-d6ed-4465-8b04-450c96d8feec/crypt/zone" - }, - { - "zone": { - "id": "624f1168-47b6-4aa1-84da-e20a0d74d783", - "underlay_address": "fd00:1122:3344:118::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:118::b]:32345", - "dataset": { - "pool_name": "oxp_a78caf97-6145-4908-83b5-a03a6d2e0ac4" - } - } - }, - "root": "/pool/ext/a5b16ffe-a834-4a83-a4e9-487d4cbb7e3d/crypt/zone" - }, - { - "zone": { - "id": "8ea85412-19b4-45c1-a53c-027ddd629296", - "underlay_address": "fd00:1122:3344:118::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:118::6]:32345", - "dataset": { - "pool_name": "oxp_d5f4c903-155a-4c91-aadd-6039a4f64821" - } - } - }, - "root": "/pool/ext/7d2a7685-c1c9-4d2d-a2bb-df65d96ea3e2/crypt/zone" - }, - { - "zone": { - "id": "fd226b82-71d7-4719-b32c-a6c7abe28a2a", - "underlay_address": "fd00:1122:3344:118::3", - "zone_type": { - "type": "external_dns", - "dataset": { - "pool_name": "oxp_84a80b58-70e9-439c-9558-5b343d9a4b53" - }, - "http_address": "[fd00:1122:3344:118::3]:5353", - "dns_address": "45.154.216.34:53", - "nic": { - "id": "7f72b6fd-1120-44dc-b3a7-f727502ba47c", - "kind": { - "type": "service", - "id": "fd226b82-71d7-4719-b32c-a6c7abe28a2a" - }, - "name": "external-dns-fd226b82-71d7-4719-b32c-a6c7abe28a2a", - "ip": "172.30.1.6", - "mac": "A8:40:25:FF:9E:D1", - "subnet": "172.30.1.0/24", - "vni": 100, - "primary": true, - "slot": 0 - } - } - }, - "root": "/pool/ext/a5b16ffe-a834-4a83-a4e9-487d4cbb7e3d/crypt/zone" - }, - { - "zone": { - "id": "08d0c38d-f0d9-45b9-856d-b85059fe5f07", - "underlay_address": "fd00:1122:3344:118::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:118::4]:32345", - "dataset": { - "pool_name": "oxp_84a80b58-70e9-439c-9558-5b343d9a4b53" - } - } - }, - "root": "/pool/ext/a5b16ffe-a834-4a83-a4e9-487d4cbb7e3d/crypt/zone" - }, - { - "zone": { - "id": "5de7d3fd-4a3f-4fdd-b6b2-d1186e16dce5", - "underlay_address": "fd00:1122:3344:118::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:118::7]:32345", - "dataset": { - "pool_name": "oxp_d76e058f-2d1e-4b15-b3a0-e5509a246876" - } - } - }, - "root": "/pool/ext/a5b16ffe-a834-4a83-a4e9-487d4cbb7e3d/crypt/zone" - }, - { - "zone": { - "id": "5d0f5cad-10b3-497c-903b-eeeabce920e2", - "underlay_address": "fd00:1122:3344:118::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:118::8]:32345", - "dataset": { - "pool_name": "oxp_3a3ad639-8800-4951-bc2a-201d269e47a2" - } - } - }, - "root": "/pool/ext/3a3ad639-8800-4951-bc2a-201d269e47a2/crypt/zone" - }, - { - "zone": { - "id": "39f9cefa-801c-4843-9fb9-05446ffbdd1a", - "underlay_address": "fd00:1122:3344:118::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:118::a]:32345", - "dataset": { - "pool_name": "oxp_7d2a7685-c1c9-4d2d-a2bb-df65d96ea3e2" - } - } - }, - "root": "/pool/ext/a78caf97-6145-4908-83b5-a03a6d2e0ac4/crypt/zone" - }, - { - "zone": { - "id": "0711e710-7fdd-4e68-94c8-294b8677e804", - "underlay_address": "fd00:1122:3344:118::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:118::5]:32345", - "dataset": { - "pool_name": "oxp_a5b16ffe-a834-4a83-a4e9-487d4cbb7e3d" - } - } - }, - "root": "/pool/ext/3a3ad639-8800-4951-bc2a-201d269e47a2/crypt/zone" - }, - { - "zone": { - "id": "318a62cc-5c6c-4805-9fb6-c0f6a75ce31c", - "underlay_address": "fd00:1122:3344:118::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:118::c]:32345", - "dataset": { - "pool_name": "oxp_1d5f0ba3-6b31-4cea-a9a9-2065a538887d" - } - } - }, - "root": "/pool/ext/d7976706-d6ed-4465-8b04-450c96d8feec/crypt/zone" - }, - { - "zone": { - "id": "463d0498-85b9-40eb-af96-d99af58a587c", - "underlay_address": "fd00:1122:3344:118::e", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:118::e]:123", - "ntp_servers": [ - "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", - "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/d5f4c903-155a-4c91-aadd-6039a4f64821/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled26.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled26.json deleted file mode 100644 index a3c5d97b53..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack3-sled26.json +++ /dev/null @@ -1,178 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 4, - "zones": [ - { - "zone": { - "id": "d8b3de97-cc79-48f6-83ad-02017c21223b", - "underlay_address": "fd00:1122:3344:119::3", - "zone_type": { - "type": "crucible_pantry", - "address": "[fd00:1122:3344:119::3]:17000" - } - }, - "root": "/pool/ext/e0faea44-8b5c-40b0-bb75-a1aec1a10377/crypt/zone" - }, - { - "zone": { - "id": "adba1a3b-5bac-44d5-aa5a-879dc6eadb5f", - "underlay_address": "fd00:1122:3344:119::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:119::c]:32345", - "dataset": { - "pool_name": "oxp_21c339c3-6461-4bdb-8b0e-c0f9f08ee10b" - } - } - }, - "root": "/pool/ext/f5c73c28-2168-4321-b737-4ca6663155c9/crypt/zone" - }, - { - "zone": { - "id": "42bb9833-5c39-4aba-b2c4-da2ca1287728", - "underlay_address": "fd00:1122:3344:119::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:119::a]:32345", - "dataset": { - "pool_name": "oxp_1f91451d-a466-4c9a-a6e6-0abd7985595f" - } - } - }, - "root": "/pool/ext/21c339c3-6461-4bdb-8b0e-c0f9f08ee10b/crypt/zone" - }, - { - "zone": { - "id": "197695e1-d949-4982-b679-6e5c9ab4bcc7", - "underlay_address": "fd00:1122:3344:119::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:119::b]:32345", - "dataset": { - "pool_name": "oxp_e0faea44-8b5c-40b0-bb75-a1aec1a10377" - } - } - }, - "root": "/pool/ext/b31e1815-cae0-4145-940c-874fff63bdd5/crypt/zone" - }, - { - "zone": { - "id": "bf99d4f8-edf1-4de5-98d4-8e6a24965005", - "underlay_address": "fd00:1122:3344:119::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:119::8]:32345", - "dataset": { - "pool_name": "oxp_ef2c3afb-6962-4f6b-b567-14766bbd9ec0" - } - } - }, - "root": "/pool/ext/21c339c3-6461-4bdb-8b0e-c0f9f08ee10b/crypt/zone" - }, - { - "zone": { - "id": "390d1853-8be9-4987-b8b6-f022999bf4e7", - "underlay_address": "fd00:1122:3344:119::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:119::7]:32345", - "dataset": { - "pool_name": "oxp_06eed00a-d8d3-4b9d-84c9-23fce535f63e" - } - } - }, - "root": "/pool/ext/ef2c3afb-6962-4f6b-b567-14766bbd9ec0/crypt/zone" - }, - { - "zone": { - "id": "76fe2161-90df-41b5-9c94-067de9c29db1", - "underlay_address": "fd00:1122:3344:119::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:119::4]:32345", - "dataset": { - "pool_name": "oxp_f5c73c28-2168-4321-b737-4ca6663155c9" - } - } - }, - "root": "/pool/ext/ef2c3afb-6962-4f6b-b567-14766bbd9ec0/crypt/zone" - }, - { - "zone": { - "id": "f49dc522-2b13-4055-964c-8315671096aa", - "underlay_address": "fd00:1122:3344:119::d", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:119::d]:32345", - "dataset": { - "pool_name": "oxp_662c278b-7f5f-4c7e-91ff-70207e8a307b" - } - } - }, - "root": "/pool/ext/1f91451d-a466-4c9a-a6e6-0abd7985595f/crypt/zone" - }, - { - "zone": { - "id": "08cc7bd6-368e-4d16-a619-28b17eff35af", - "underlay_address": "fd00:1122:3344:119::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:119::9]:32345", - "dataset": { - "pool_name": "oxp_5516b9ac-b139-40da-aa3b-f094568ba095" - } - } - }, - "root": "/pool/ext/06eed00a-d8d3-4b9d-84c9-23fce535f63e/crypt/zone" - }, - { - "zone": { - "id": "74b0613f-bce8-4922-93e0-b5bfccfc8443", - "underlay_address": "fd00:1122:3344:119::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:119::5]:32345", - "dataset": { - "pool_name": "oxp_b31e1815-cae0-4145-940c-874fff63bdd5" - } - } - }, - "root": "/pool/ext/21c339c3-6461-4bdb-8b0e-c0f9f08ee10b/crypt/zone" - }, - { - "zone": { - "id": "55fcfc62-8435-475f-a2aa-29373901b993", - "underlay_address": "fd00:1122:3344:119::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:119::6]:32345", - "dataset": { - "pool_name": "oxp_eadf6a03-1028-4d48-ac0d-0d27ef2c8c0f" - } - } - }, - "root": "/pool/ext/1f91451d-a466-4c9a-a6e6-0abd7985595f/crypt/zone" - }, - { - "zone": { - "id": "d52ccea3-6d7f-43a6-a19f-e0409f4e9cdc", - "underlay_address": "fd00:1122:3344:119::e", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:119::e]:123", - "ntp_servers": [ - "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", - "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/f5c73c28-2168-4321-b737-4ca6663155c9/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled27.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled27.json deleted file mode 100644 index 193df7a567..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack3-sled27.json +++ /dev/null @@ -1,167 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 4, - "zones": [ - { - "zone": { - "id": "095e612f-e218-4a16-aa6e-98c3d69a470a", - "underlay_address": "fd00:1122:3344:10d::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10d::a]:32345", - "dataset": { - "pool_name": "oxp_9f657858-623f-4d78-9841-6e620b5ede30" - } - } - }, - "root": "/pool/ext/2d086b51-2b77-4bc7-adc6-43586ea38ce9/crypt/zone" - }, - { - "zone": { - "id": "de818730-0e3b-4567-94e7-344bd9b6f564", - "underlay_address": "fd00:1122:3344:10d::3", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10d::3]:32345", - "dataset": { - "pool_name": "oxp_ba6ab301-07e1-4d35-80ac-59612f2c2bdb" - } - } - }, - "root": "/pool/ext/7cee2806-e898-47d8-b568-e276a6e271f8/crypt/zone" - }, - { - "zone": { - "id": "6a21dc3c-3a9d-4520-9a91-7d8f2737bcd4", - "underlay_address": "fd00:1122:3344:10d::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10d::4]:32345", - "dataset": { - "pool_name": "oxp_7cee2806-e898-47d8-b568-e276a6e271f8" - } - } - }, - "root": "/pool/ext/cef23d87-31ed-40d5-99b8-12d7be8e46e7/crypt/zone" - }, - { - "zone": { - "id": "e01b7f45-b8d7-4944-ba5b-41fb699889a9", - "underlay_address": "fd00:1122:3344:10d::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10d::b]:32345", - "dataset": { - "pool_name": "oxp_d9af8878-50bd-4425-95d9-e6556ce92cfa" - } - } - }, - "root": "/pool/ext/6fe9bcaa-88cb-451d-b086-24a3ad53fa22/crypt/zone" - }, - { - "zone": { - "id": "4271ef62-d319-4e80-b157-915321cec8c7", - "underlay_address": "fd00:1122:3344:10d::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10d::c]:32345", - "dataset": { - "pool_name": "oxp_ba8ee7dd-cdfb-48bd-92ce-4dc45e070930" - } - } - }, - "root": "/pool/ext/9f657858-623f-4d78-9841-6e620b5ede30/crypt/zone" - }, - { - "zone": { - "id": "6bdcc159-aeb9-4903-9486-dd8b43a3dc16", - "underlay_address": "fd00:1122:3344:10d::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10d::8]:32345", - "dataset": { - "pool_name": "oxp_5b03a5dc-bb5a-4bf4-bc21-0af849cd1dab" - } - } - }, - "root": "/pool/ext/d9af8878-50bd-4425-95d9-e6556ce92cfa/crypt/zone" - }, - { - "zone": { - "id": "85540e54-cdd7-4baa-920c-5cf54cbc1f83", - "underlay_address": "fd00:1122:3344:10d::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10d::7]:32345", - "dataset": { - "pool_name": "oxp_ee24f9a6-84ab-49a5-a28f-e394abfcaa95" - } - } - }, - "root": "/pool/ext/9f657858-623f-4d78-9841-6e620b5ede30/crypt/zone" - }, - { - "zone": { - "id": "750d1a0b-6a14-46c5-9a0b-a504caefb198", - "underlay_address": "fd00:1122:3344:10d::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10d::9]:32345", - "dataset": { - "pool_name": "oxp_cef23d87-31ed-40d5-99b8-12d7be8e46e7" - } - } - }, - "root": "/pool/ext/ba8ee7dd-cdfb-48bd-92ce-4dc45e070930/crypt/zone" - }, - { - "zone": { - "id": "b5996893-1a9a-434e-a257-d702694f058b", - "underlay_address": "fd00:1122:3344:10d::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10d::6]:32345", - "dataset": { - "pool_name": "oxp_2d086b51-2b77-4bc7-adc6-43586ea38ce9" - } - } - }, - "root": "/pool/ext/7cee2806-e898-47d8-b568-e276a6e271f8/crypt/zone" - }, - { - "zone": { - "id": "8b36686a-b98d-451a-9124-a3583000a83a", - "underlay_address": "fd00:1122:3344:10d::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10d::5]:32345", - "dataset": { - "pool_name": "oxp_6fe9bcaa-88cb-451d-b086-24a3ad53fa22" - } - } - }, - "root": "/pool/ext/9f657858-623f-4d78-9841-6e620b5ede30/crypt/zone" - }, - { - "zone": { - "id": "88d695a2-c8c1-41af-85b0-77424f4d650d", - "underlay_address": "fd00:1122:3344:10d::d", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:10d::d]:123", - "ntp_servers": [ - "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", - "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/ba6ab301-07e1-4d35-80ac-59612f2c2bdb/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled28.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled28.json deleted file mode 100644 index 210b388a19..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack3-sled28.json +++ /dev/null @@ -1,167 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 4, - "zones": [ - { - "zone": { - "id": "a126365d-f459-43bf-9f99-dbe1c4cdecf8", - "underlay_address": "fd00:1122:3344:113::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:113::4]:32345", - "dataset": { - "pool_name": "oxp_c99eabb2-6815-416a-9660-87e2609b357a" - } - } - }, - "root": "/pool/ext/6461a450-f043-4d1e-bc03-4a68ed5fe94a/crypt/zone" - }, - { - "zone": { - "id": "52f57ef8-546a-43bd-a0f3-8c42b99c37a6", - "underlay_address": "fd00:1122:3344:113::3", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:113::3]:32345", - "dataset": { - "pool_name": "oxp_f6530e9c-6d64-44fa-93d5-ae427916fbf1" - } - } - }, - "root": "/pool/ext/97662260-6b62-450f-9d7e-42f7dee5d568/crypt/zone" - }, - { - "zone": { - "id": "3ee87855-9423-43ff-800a-fa4fdbf1d956", - "underlay_address": "fd00:1122:3344:113::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:113::a]:32345", - "dataset": { - "pool_name": "oxp_6461a450-f043-4d1e-bc03-4a68ed5fe94a" - } - } - }, - "root": "/pool/ext/9515dc86-fe62-4d4f-b38d-b3461cc042fc/crypt/zone" - }, - { - "zone": { - "id": "55d0ddf9-9b24-4a7a-b97f-248e240f9ba6", - "underlay_address": "fd00:1122:3344:113::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:113::5]:32345", - "dataset": { - "pool_name": "oxp_97662260-6b62-450f-9d7e-42f7dee5d568" - } - } - }, - "root": "/pool/ext/9515dc86-fe62-4d4f-b38d-b3461cc042fc/crypt/zone" - }, - { - "zone": { - "id": "014cad37-56a7-4b2a-9c9e-505b15b4de85", - "underlay_address": "fd00:1122:3344:113::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:113::b]:32345", - "dataset": { - "pool_name": "oxp_8529ce8e-21d2-4b23-b9fd-6b90c7ae4f90" - } - } - }, - "root": "/pool/ext/6461a450-f043-4d1e-bc03-4a68ed5fe94a/crypt/zone" - }, - { - "zone": { - "id": "e14fb192-aaab-42ab-aa86-c85f13955940", - "underlay_address": "fd00:1122:3344:113::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:113::6]:32345", - "dataset": { - "pool_name": "oxp_5a9455ca-fb01-4549-9a70-7579c031779d" - } - } - }, - "root": "/pool/ext/f6530e9c-6d64-44fa-93d5-ae427916fbf1/crypt/zone" - }, - { - "zone": { - "id": "14540609-9371-442b-8486-88c244e97cd4", - "underlay_address": "fd00:1122:3344:113::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:113::8]:32345", - "dataset": { - "pool_name": "oxp_2916d6f3-8775-4887-a6d3-f9723982756f" - } - } - }, - "root": "/pool/ext/8529ce8e-21d2-4b23-b9fd-6b90c7ae4f90/crypt/zone" - }, - { - "zone": { - "id": "97a6b35f-0af9-41eb-93a1-f8bc5dbba357", - "underlay_address": "fd00:1122:3344:113::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:113::7]:32345", - "dataset": { - "pool_name": "oxp_9515dc86-fe62-4d4f-b38d-b3461cc042fc" - } - } - }, - "root": "/pool/ext/8529ce8e-21d2-4b23-b9fd-6b90c7ae4f90/crypt/zone" - }, - { - "zone": { - "id": "5734aa24-cb66-4b0a-9eb2-564646f8d729", - "underlay_address": "fd00:1122:3344:113::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:113::9]:32345", - "dataset": { - "pool_name": "oxp_9f889a6c-17b1-4edd-9659-458d91439dc1" - } - } - }, - "root": "/pool/ext/a5074e7f-8d3b-40e0-a79e-dbd9af9d5693/crypt/zone" - }, - { - "zone": { - "id": "ba86eca1-1427-4540-b4a6-1d9a0e1bc656", - "underlay_address": "fd00:1122:3344:113::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:113::c]:32345", - "dataset": { - "pool_name": "oxp_a5074e7f-8d3b-40e0-a79e-dbd9af9d5693" - } - } - }, - "root": "/pool/ext/2916d6f3-8775-4887-a6d3-f9723982756f/crypt/zone" - }, - { - "zone": { - "id": "6634dbc4-d22f-40a4-8cd3-4f271d781fa1", - "underlay_address": "fd00:1122:3344:113::d", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:113::d]:123", - "ntp_servers": [ - "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", - "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/a5074e7f-8d3b-40e0-a79e-dbd9af9d5693/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled29.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled29.json deleted file mode 100644 index ccd1bd65be..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack3-sled29.json +++ /dev/null @@ -1,184 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 5, - "zones": [ - { - "zone": { - "id": "1cdd1ebf-9321-4f2d-914c-1e617f60b41a", - "underlay_address": "fd00:1122:3344:120::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:120::8]:32345", - "dataset": { - "pool_name": "oxp_74046573-78a2-46b4-86dc-40bb2ee29dd5" - } - } - }, - "root": "/pool/ext/c1f0a9e4-ea10-4fd9-8b6d-79a2bacfec5e/crypt/zone" - }, - { - "zone": { - "id": "720a0d08-d1c0-43ba-af86-f2dac1a53639", - "underlay_address": "fd00:1122:3344:120::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:120::c]:32345", - "dataset": { - "pool_name": "oxp_068d2790-1044-41ed-97a5-b493490b14d1" - } - } - }, - "root": "/pool/ext/86cd16cf-d00d-40bc-b14a-8220b1e11476/crypt/zone" - }, - { - "zone": { - "id": "d9f0b97b-2cef-4155-b45f-7db89263e4cf", - "underlay_address": "fd00:1122:3344:120::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:120::9]:32345", - "dataset": { - "pool_name": "oxp_8171bf0d-e61e-43f9-87d6-ec8833b80102" - } - } - }, - "root": "/pool/ext/86cd16cf-d00d-40bc-b14a-8220b1e11476/crypt/zone" - }, - { - "zone": { - "id": "018edff1-0d95-45a3-9a01-39c419bec55a", - "underlay_address": "fd00:1122:3344:120::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:120::b]:32345", - "dataset": { - "pool_name": "oxp_0b11e026-f265-49a0-935f-7b234c19c789" - } - } - }, - "root": "/pool/ext/35db8700-d6a7-498c-9d2c-08eb9ab41b7c/crypt/zone" - }, - { - "zone": { - "id": "f8cc1c1e-a556-436c-836d-42052101c38a", - "underlay_address": "fd00:1122:3344:120::3", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:120::3]:32345", - "dataset": { - "pool_name": "oxp_ed8e5a26-5591-405a-b792-408f5b16e444" - } - } - }, - "root": "/pool/ext/1069bdee-fe5a-4164-a856-ff8ae56c07fb/crypt/zone" - }, - { - "zone": { - "id": "f9600313-fac0-45a1-a1b5-02dd6af468b9", - "underlay_address": "fd00:1122:3344:120::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:120::4]:32345", - "dataset": { - "pool_name": "oxp_c1f0a9e4-ea10-4fd9-8b6d-79a2bacfec5e" - } - } - }, - "root": "/pool/ext/74046573-78a2-46b4-86dc-40bb2ee29dd5/crypt/zone" - }, - { - "zone": { - "id": "869e4f7c-5312-4b98-bacc-1508f236bf5a", - "underlay_address": "fd00:1122:3344:120::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:120::6]:32345", - "dataset": { - "pool_name": "oxp_04aea8dc-4316-432f-a13a-d7d9b2efa3f2" - } - } - }, - "root": "/pool/ext/0b11e026-f265-49a0-935f-7b234c19c789/crypt/zone" - }, - { - "zone": { - "id": "31ed5a0c-7caf-4825-b730-85ee94fe27f1", - "underlay_address": "fd00:1122:3344:120::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:120::a]:32345", - "dataset": { - "pool_name": "oxp_86cd16cf-d00d-40bc-b14a-8220b1e11476" - } - } - }, - "root": "/pool/ext/04aea8dc-4316-432f-a13a-d7d9b2efa3f2/crypt/zone" - }, - { - "zone": { - "id": "7e5a3c39-152a-4270-b01e-9e144cca4aaa", - "underlay_address": "fd00:1122:3344:120::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:120::5]:32345", - "dataset": { - "pool_name": "oxp_1069bdee-fe5a-4164-a856-ff8ae56c07fb" - } - } - }, - "root": "/pool/ext/04aea8dc-4316-432f-a13a-d7d9b2efa3f2/crypt/zone" - }, - { - "zone": { - "id": "9a03a386-7304-4a86-bee8-153ef643195e", - "underlay_address": "fd00:1122:3344:120::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:120::7]:32345", - "dataset": { - "pool_name": "oxp_35db8700-d6a7-498c-9d2c-08eb9ab41b7c" - } - } - }, - "root": "/pool/ext/068d2790-1044-41ed-97a5-b493490b14d1/crypt/zone" - }, - { - "zone": { - "id": "a800d0a7-1020-481c-8be8-ecfd28b7a2be", - "underlay_address": "fd00:1122:3344:120::d", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:120::d]:123", - "ntp_servers": [ - "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", - "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/c1f0a9e4-ea10-4fd9-8b6d-79a2bacfec5e/crypt/zone" - }, - { - "zone": { - "id": "be469efd-8e07-4b8e-bcee-6fd33373cdef", - "underlay_address": "fd00:1122:3344:3::1", - "zone_type": { - "type": "internal_dns", - "dataset": { - "pool_name": "oxp_ed8e5a26-5591-405a-b792-408f5b16e444" - }, - "http_address": "[fd00:1122:3344:3::1]:5353", - "dns_address": "[fd00:1122:3344:3::1]:53", - "gz_address": "fd00:1122:3344:3::2", - "gz_address_index": 2 - } - }, - "root": "/pool/ext/068d2790-1044-41ed-97a5-b493490b14d1/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled3.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled3.json deleted file mode 100644 index 5da6d95389..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack3-sled3.json +++ /dev/null @@ -1,178 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 4, - "zones": [ - { - "zone": { - "id": "19d091b8-e005-4ff4-97e1-026de95e3667", - "underlay_address": "fd00:1122:3344:10f::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10f::c]:32345", - "dataset": { - "pool_name": "oxp_11a63469-4f57-4976-8620-0055bf82dc97" - } - } - }, - "root": "/pool/ext/6a73a62c-c636-4557-af45-042cb287aee6/crypt/zone" - }, - { - "zone": { - "id": "57d77171-104e-4977-b2f9-9b529ee7f8a0", - "underlay_address": "fd00:1122:3344:10f::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10f::8]:32345", - "dataset": { - "pool_name": "oxp_7f3060af-058f-4f52-ab80-902bd13e7ef4" - } - } - }, - "root": "/pool/ext/7f3060af-058f-4f52-ab80-902bd13e7ef4/crypt/zone" - }, - { - "zone": { - "id": "b0371ccf-67da-4562-baf2-eaabe5243e9b", - "underlay_address": "fd00:1122:3344:10f::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10f::7]:32345", - "dataset": { - "pool_name": "oxp_58ae04cb-26ff-4e30-a20d-9f847bafba4d" - } - } - }, - "root": "/pool/ext/125ddcda-f94b-46bc-a10a-94e9acf40265/crypt/zone" - }, - { - "zone": { - "id": "ae3791ff-2657-4252-bd61-58ec5dc237cd", - "underlay_address": "fd00:1122:3344:10f::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10f::9]:32345", - "dataset": { - "pool_name": "oxp_125ddcda-f94b-46bc-a10a-94e9acf40265" - } - } - }, - "root": "/pool/ext/58ae04cb-26ff-4e30-a20d-9f847bafba4d/crypt/zone" - }, - { - "zone": { - "id": "73f865dc-5db7-48c6-9dc4-dff56dd8c045", - "underlay_address": "fd00:1122:3344:10f::3", - "zone_type": { - "type": "crucible_pantry", - "address": "[fd00:1122:3344:10f::3]:17000" - } - }, - "root": "/pool/ext/11a63469-4f57-4976-8620-0055bf82dc97/crypt/zone" - }, - { - "zone": { - "id": "e5d0170a-0d60-4c51-8f72-4c301979690e", - "underlay_address": "fd00:1122:3344:10f::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10f::6]:32345", - "dataset": { - "pool_name": "oxp_efe4cbab-2a39-4d7d-ae6c-83eb3ab8d4b5" - } - } - }, - "root": "/pool/ext/6a73a62c-c636-4557-af45-042cb287aee6/crypt/zone" - }, - { - "zone": { - "id": "ea6894de-c575-43bc-86e9-65b8a58499ff", - "underlay_address": "fd00:1122:3344:10f::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10f::a]:32345", - "dataset": { - "pool_name": "oxp_a87dc882-8b88-4a99-9628-5db79072cffa" - } - } - }, - "root": "/pool/ext/11a63469-4f57-4976-8620-0055bf82dc97/crypt/zone" - }, - { - "zone": { - "id": "3081dc99-4fa9-4238-adfa-b9ca381c1f7b", - "underlay_address": "fd00:1122:3344:10f::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10f::b]:32345", - "dataset": { - "pool_name": "oxp_6a73a62c-c636-4557-af45-042cb287aee6" - } - } - }, - "root": "/pool/ext/a87dc882-8b88-4a99-9628-5db79072cffa/crypt/zone" - }, - { - "zone": { - "id": "b4a3d7c8-487d-4d76-ae4e-a6a51595a5a6", - "underlay_address": "fd00:1122:3344:10f::d", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10f::d]:32345", - "dataset": { - "pool_name": "oxp_a12f87ee-9918-4269-9de4-4bad4fb41caa" - } - } - }, - "root": "/pool/ext/a12f87ee-9918-4269-9de4-4bad4fb41caa/crypt/zone" - }, - { - "zone": { - "id": "5ebcee26-f76c-4206-8d81-584ac138d3b9", - "underlay_address": "fd00:1122:3344:10f::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10f::4]:32345", - "dataset": { - "pool_name": "oxp_27f1917e-fb69-496a-9d40-8ef0d0c0ee55" - } - } - }, - "root": "/pool/ext/58ae04cb-26ff-4e30-a20d-9f847bafba4d/crypt/zone" - }, - { - "zone": { - "id": "90b2bc57-3a2a-4117-bb6d-7eda7542329a", - "underlay_address": "fd00:1122:3344:10f::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10f::5]:32345", - "dataset": { - "pool_name": "oxp_a222e405-40f6-4fdd-9146-94f7d94ed08a" - } - } - }, - "root": "/pool/ext/a12f87ee-9918-4269-9de4-4bad4fb41caa/crypt/zone" - }, - { - "zone": { - "id": "0fb540af-58d3-4abc-bfad-e49765c2b1ee", - "underlay_address": "fd00:1122:3344:10f::e", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:10f::e]:123", - "ntp_servers": [ - "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", - "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/58ae04cb-26ff-4e30-a20d-9f847bafba4d/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled30.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled30.json deleted file mode 100644 index c92a638b85..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack3-sled30.json +++ /dev/null @@ -1,167 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 4, - "zones": [ - { - "zone": { - "id": "dda0f1c6-84a5-472c-b350-a799c8d3d0eb", - "underlay_address": "fd00:1122:3344:115::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:115::8]:32345", - "dataset": { - "pool_name": "oxp_028b6c9e-5a0e-43d2-a8ed-a5946cf62924" - } - } - }, - "root": "/pool/ext/b8d84b9c-a65e-4c86-8196-69da5317ae63/crypt/zone" - }, - { - "zone": { - "id": "157672f9-113f-48b7-9808-dff3c3e67dcd", - "underlay_address": "fd00:1122:3344:115::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:115::a]:32345", - "dataset": { - "pool_name": "oxp_4fdca201-b37e-4072-a1cc-3cb7705954eb" - } - } - }, - "root": "/pool/ext/b8d84b9c-a65e-4c86-8196-69da5317ae63/crypt/zone" - }, - { - "zone": { - "id": "5a7d4f67-a70f-4d8b-8d35-4dc600991fb5", - "underlay_address": "fd00:1122:3344:115::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:115::5]:32345", - "dataset": { - "pool_name": "oxp_11a991e5-19a9-48b0-8186-34249ef67957" - } - } - }, - "root": "/pool/ext/1e9c9764-aaa4-4681-b110-a937b4c52748/crypt/zone" - }, - { - "zone": { - "id": "c7036645-b680-4816-834f-8ae1af24c159", - "underlay_address": "fd00:1122:3344:115::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:115::b]:32345", - "dataset": { - "pool_name": "oxp_0780be56-c13d-4c6a-a1ac-37753a0da820" - } - } - }, - "root": "/pool/ext/80a8d756-ee22-4c88-8b5b-4a46f7eca249/crypt/zone" - }, - { - "zone": { - "id": "45e47e4b-708f-40b5-a8c8-fbfd73696d45", - "underlay_address": "fd00:1122:3344:115::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:115::7]:32345", - "dataset": { - "pool_name": "oxp_80a8d756-ee22-4c88-8b5b-4a46f7eca249" - } - } - }, - "root": "/pool/ext/4fdca201-b37e-4072-a1cc-3cb7705954eb/crypt/zone" - }, - { - "zone": { - "id": "e805b0c1-3f80-49da-8dc1-caaf843e5003", - "underlay_address": "fd00:1122:3344:115::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:115::c]:32345", - "dataset": { - "pool_name": "oxp_d54e1ed7-e589-4413-a487-6e9a257104e7" - } - } - }, - "root": "/pool/ext/d54e1ed7-e589-4413-a487-6e9a257104e7/crypt/zone" - }, - { - "zone": { - "id": "e47d3f81-3df6-4c35-bec6-41277bc74c07", - "underlay_address": "fd00:1122:3344:115::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:115::4]:32345", - "dataset": { - "pool_name": "oxp_b8d84b9c-a65e-4c86-8196-69da5317ae63" - } - } - }, - "root": "/pool/ext/772b3aaa-3501-4dc7-9b3d-048b8b1f7970/crypt/zone" - }, - { - "zone": { - "id": "2a796a69-b061-44c7-b2df-35bc611f10f5", - "underlay_address": "fd00:1122:3344:115::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:115::6]:32345", - "dataset": { - "pool_name": "oxp_73abe9e0-d38e-48fc-bdec-b094bfa5670d" - } - } - }, - "root": "/pool/ext/028b6c9e-5a0e-43d2-a8ed-a5946cf62924/crypt/zone" - }, - { - "zone": { - "id": "4e1d2af1-8ef4-4762-aa80-b08da08b45bb", - "underlay_address": "fd00:1122:3344:115::3", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:115::3]:32345", - "dataset": { - "pool_name": "oxp_772b3aaa-3501-4dc7-9b3d-048b8b1f7970" - } - } - }, - "root": "/pool/ext/d54e1ed7-e589-4413-a487-6e9a257104e7/crypt/zone" - }, - { - "zone": { - "id": "fb1b10d5-b7cb-416d-98fc-b5d3bc02d495", - "underlay_address": "fd00:1122:3344:115::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:115::9]:32345", - "dataset": { - "pool_name": "oxp_1e9c9764-aaa4-4681-b110-a937b4c52748" - } - } - }, - "root": "/pool/ext/b8d84b9c-a65e-4c86-8196-69da5317ae63/crypt/zone" - }, - { - "zone": { - "id": "5155463c-8a09-45a5-ad1b-817f2e93b284", - "underlay_address": "fd00:1122:3344:115::d", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:115::d]:123", - "ntp_servers": [ - "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", - "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/772b3aaa-3501-4dc7-9b3d-048b8b1f7970/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled31.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled31.json deleted file mode 100644 index 5e38262740..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack3-sled31.json +++ /dev/null @@ -1,181 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 4, - "zones": [ - { - "zone": { - "id": "a0eae689-8e6b-4297-bb3d-8b7ffc5c4a07", - "underlay_address": "fd00:1122:3344:102::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:102::c]:32345", - "dataset": { - "pool_name": "oxp_274cb567-fd74-4e00-b9c7-6ca367b3fda4" - } - } - }, - "root": "/pool/ext/1443b190-de16-42b0-b881-e87e875dd507/crypt/zone" - }, - { - "zone": { - "id": "9cea406d-451e-4328-9052-b58487f799a5", - "underlay_address": "fd00:1122:3344:102::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:102::b]:32345", - "dataset": { - "pool_name": "oxp_89c7f72e-632c-462b-a515-01cd80683711" - } - } - }, - "root": "/pool/ext/274cb567-fd74-4e00-b9c7-6ca367b3fda4/crypt/zone" - }, - { - "zone": { - "id": "9c7dad7e-7f60-4bf4-8efc-0883a17e7cf6", - "underlay_address": "fd00:1122:3344:102::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:102::6]:32345", - "dataset": { - "pool_name": "oxp_2c8e5637-b989-4b8f-82ac-ff2e9102b560" - } - } - }, - "root": "/pool/ext/1443b190-de16-42b0-b881-e87e875dd507/crypt/zone" - }, - { - "zone": { - "id": "73015cba-79c6-4a67-97d8-fa0819cbf750", - "underlay_address": "fd00:1122:3344:102::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:102::a]:32345", - "dataset": { - "pool_name": "oxp_fa62108e-f7bb-4f6d-86f3-8094a1ea8352" - } - } - }, - "root": "/pool/ext/2c8e5637-b989-4b8f-82ac-ff2e9102b560/crypt/zone" - }, - { - "zone": { - "id": "f9ca3097-072e-4e7f-9f50-eb7c7ae39b6f", - "underlay_address": "fd00:1122:3344:102::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:102::5]:32345", - "dataset": { - "pool_name": "oxp_42c6602c-2ccf-48ce-8344-693c832fd693" - } - } - }, - "root": "/pool/ext/2c8e5637-b989-4b8f-82ac-ff2e9102b560/crypt/zone" - }, - { - "zone": { - "id": "e7855e05-a125-4a80-ac2c-8a2db96e1bf8", - "underlay_address": "fd00:1122:3344:102::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:102::7]:32345", - "dataset": { - "pool_name": "oxp_1f72afd3-d2aa-46a8-b81a-54dbcc2f6317" - } - } - }, - "root": "/pool/ext/42c6602c-2ccf-48ce-8344-693c832fd693/crypt/zone" - }, - { - "zone": { - "id": "e5de9bc9-e996-4fea-8318-ad7a8a6be4a3", - "underlay_address": "fd00:1122:3344:102::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:102::4]:32345", - "dataset": { - "pool_name": "oxp_1443b190-de16-42b0-b881-e87e875dd507" - } - } - }, - "root": "/pool/ext/89c7f72e-632c-462b-a515-01cd80683711/crypt/zone" - }, - { - "zone": { - "id": "cd0d0aac-44ff-4566-9260-a64ae6cecef4", - "underlay_address": "fd00:1122:3344:102::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:102::8]:32345", - "dataset": { - "pool_name": "oxp_92c0d1f6-cb4d-4ddb-b5ba-979fb3491812" - } - } - }, - "root": "/pool/ext/89c7f72e-632c-462b-a515-01cd80683711/crypt/zone" - }, - { - "zone": { - "id": "a8230592-0e7a-46c8-a653-7587a27f05bf", - "underlay_address": "fd00:1122:3344:102::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:102::9]:32345", - "dataset": { - "pool_name": "oxp_1b7873de-99fd-454f-b576-bff695524133" - } - } - }, - "root": "/pool/ext/92c0d1f6-cb4d-4ddb-b5ba-979fb3491812/crypt/zone" - }, - { - "zone": { - "id": "c19ffbb1-4dc1-4825-a3cf-080e9b543b16", - "underlay_address": "fd00:1122:3344:102::d", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:102::d]:32345", - "dataset": { - "pool_name": "oxp_67823df7-511c-4984-b98c-7a8f5c40c22d" - } - } - }, - "root": "/pool/ext/1443b190-de16-42b0-b881-e87e875dd507/crypt/zone" - }, - { - "zone": { - "id": "ff30fe7c-51f3-43b9-a788-d8f94a7bb028", - "underlay_address": "fd00:1122:3344:102::3", - "zone_type": { - "type": "cockroach_db", - "address": "[fd00:1122:3344:102::3]:32221", - "dataset": { - "pool_name": "oxp_1443b190-de16-42b0-b881-e87e875dd507" - } - } - }, - "root": "/pool/ext/fa62108e-f7bb-4f6d-86f3-8094a1ea8352/crypt/zone" - }, - { - "zone": { - "id": "16b50c55-8117-4efd-aabf-0273677b89d5", - "underlay_address": "fd00:1122:3344:102::e", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:102::e]:123", - "ntp_servers": [ - "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", - "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/fa62108e-f7bb-4f6d-86f3-8094a1ea8352/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled4.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled4.json deleted file mode 100644 index 7c1d269d61..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack3-sled4.json +++ /dev/null @@ -1,167 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 4, - "zones": [ - { - "zone": { - "id": "22452953-ee80-4659-a555-8e027bf205b0", - "underlay_address": "fd00:1122:3344:10c::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10c::4]:32345", - "dataset": { - "pool_name": "oxp_92ba1667-a6f7-4913-9b00-14825384c7bf" - } - } - }, - "root": "/pool/ext/ab62b941-5f84-42c7-929d-295b20efffe7/crypt/zone" - }, - { - "zone": { - "id": "9a5a2fcf-44a0-4468-979a-a71686cef627", - "underlay_address": "fd00:1122:3344:10c::3", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10c::3]:32345", - "dataset": { - "pool_name": "oxp_dbfdc981-1b81-4d7d-9449-9530890b199a" - } - } - }, - "root": "/pool/ext/74ac4da9-cdae-4c08-8431-11211184aa09/crypt/zone" - }, - { - "zone": { - "id": "a014f12e-2636-4258-af76-e01d9b8d1c1f", - "underlay_address": "fd00:1122:3344:10c::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10c::b]:32345", - "dataset": { - "pool_name": "oxp_ab62b941-5f84-42c7-929d-295b20efffe7" - } - } - }, - "root": "/pool/ext/a624a843-1c4e-41c3-a1d2-4be7a6c57e9b/crypt/zone" - }, - { - "zone": { - "id": "431768b8-26ba-4ab4-b616-9e183bb79b8b", - "underlay_address": "fd00:1122:3344:10c::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10c::7]:32345", - "dataset": { - "pool_name": "oxp_7c121177-3210-4457-9b42-3657add6e166" - } - } - }, - "root": "/pool/ext/74ac4da9-cdae-4c08-8431-11211184aa09/crypt/zone" - }, - { - "zone": { - "id": "22992c56-bd5a-4d0f-86c5-d6f8e87b7bbb", - "underlay_address": "fd00:1122:3344:10c::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10c::9]:32345", - "dataset": { - "pool_name": "oxp_842bdd28-196e-4b18-83db-68bd81176a44" - } - } - }, - "root": "/pool/ext/74ac4da9-cdae-4c08-8431-11211184aa09/crypt/zone" - }, - { - "zone": { - "id": "de376149-aa45-4660-9ae6-15e8ba4a4233", - "underlay_address": "fd00:1122:3344:10c::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10c::5]:32345", - "dataset": { - "pool_name": "oxp_25856a84-6707-4b94-81d1-b43d5bc990d7" - } - } - }, - "root": "/pool/ext/7c121177-3210-4457-9b42-3657add6e166/crypt/zone" - }, - { - "zone": { - "id": "ceeba69d-8c0a-47df-a37b-7f1b90f23016", - "underlay_address": "fd00:1122:3344:10c::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10c::a]:32345", - "dataset": { - "pool_name": "oxp_a624a843-1c4e-41c3-a1d2-4be7a6c57e9b" - } - } - }, - "root": "/pool/ext/74ac4da9-cdae-4c08-8431-11211184aa09/crypt/zone" - }, - { - "zone": { - "id": "65293ce4-2e63-4336-9207-3c61f58667f9", - "underlay_address": "fd00:1122:3344:10c::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10c::c]:32345", - "dataset": { - "pool_name": "oxp_74ac4da9-cdae-4c08-8431-11211184aa09" - } - } - }, - "root": "/pool/ext/842bdd28-196e-4b18-83db-68bd81176a44/crypt/zone" - }, - { - "zone": { - "id": "e8f55a5d-65f9-436c-bc25-1d1a7070e876", - "underlay_address": "fd00:1122:3344:10c::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10c::6]:32345", - "dataset": { - "pool_name": "oxp_9bfe385c-16dd-4209-bc0b-f28ae75d58e3" - } - } - }, - "root": "/pool/ext/92ba1667-a6f7-4913-9b00-14825384c7bf/crypt/zone" - }, - { - "zone": { - "id": "2dfbd4c6-afbf-4c8c-bf40-764f02727852", - "underlay_address": "fd00:1122:3344:10c::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10c::8]:32345", - "dataset": { - "pool_name": "oxp_55eb093d-6b6f-418c-9767-09afe4c51fff" - } - } - }, - "root": "/pool/ext/dbfdc981-1b81-4d7d-9449-9530890b199a/crypt/zone" - }, - { - "zone": { - "id": "8c73baf7-1a58-4e2c-b4d1-966c89a18d03", - "underlay_address": "fd00:1122:3344:10c::d", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:10c::d]:123", - "ntp_servers": [ - "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", - "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/842bdd28-196e-4b18-83db-68bd81176a44/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled5.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled5.json deleted file mode 100644 index acbfa17eda..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack3-sled5.json +++ /dev/null @@ -1,178 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 4, - "zones": [ - { - "zone": { - "id": "2f488e7b-fd93-48a6-8b2b-61f6e8336268", - "underlay_address": "fd00:1122:3344:101::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:101::b]:32345", - "dataset": { - "pool_name": "oxp_5840a3b7-f765-45d3-8a41-7f543f936bee" - } - } - }, - "root": "/pool/ext/dd084b76-1130-4ad3-9196-6b02be607fe9/crypt/zone" - }, - { - "zone": { - "id": "1ed5fd3f-933a-4921-a91f-5c286823f8d4", - "underlay_address": "fd00:1122:3344:101::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:101::a]:32345", - "dataset": { - "pool_name": "oxp_c1e807e7-b64a-4dbd-b845-ffed0b9a54f1" - } - } - }, - "root": "/pool/ext/be06ea9c-df86-4fec-b5dd-8809710893af/crypt/zone" - }, - { - "zone": { - "id": "0f8f1013-465d-4b49-b55d-f0b9bf6f789a", - "underlay_address": "fd00:1122:3344:101::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:101::6]:32345", - "dataset": { - "pool_name": "oxp_4dfa7003-0305-47f5-b23d-88a228c1e12e" - } - } - }, - "root": "/pool/ext/be06ea9c-df86-4fec-b5dd-8809710893af/crypt/zone" - }, - { - "zone": { - "id": "2e4ef017-6c62-40bc-bab5-f2e01addad22", - "underlay_address": "fd00:1122:3344:101::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:101::7]:32345", - "dataset": { - "pool_name": "oxp_d94e9c58-e6d1-444b-b7d8-19ac17dea042" - } - } - }, - "root": "/pool/ext/c1e807e7-b64a-4dbd-b845-ffed0b9a54f1/crypt/zone" - }, - { - "zone": { - "id": "6a0baf13-a80b-4778-a0ab-a69cd851de2d", - "underlay_address": "fd00:1122:3344:101::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:101::9]:32345", - "dataset": { - "pool_name": "oxp_be06ea9c-df86-4fec-b5dd-8809710893af" - } - } - }, - "root": "/pool/ext/a9d419d4-5915-4a40-baa3-3512785de034/crypt/zone" - }, - { - "zone": { - "id": "391ec257-fd47-4cc8-9bfa-49a0747a9a67", - "underlay_address": "fd00:1122:3344:101::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:101::8]:32345", - "dataset": { - "pool_name": "oxp_a9d419d4-5915-4a40-baa3-3512785de034" - } - } - }, - "root": "/pool/ext/709d5d04-5dff-4558-8b5d-fbc2a7d83036/crypt/zone" - }, - { - "zone": { - "id": "fd8e615a-f170-4da9-b8d0-2a5a123d8682", - "underlay_address": "fd00:1122:3344:101::3", - "zone_type": { - "type": "crucible_pantry", - "address": "[fd00:1122:3344:101::3]:17000" - } - }, - "root": "/pool/ext/dd084b76-1130-4ad3-9196-6b02be607fe9/crypt/zone" - }, - { - "zone": { - "id": "f8a793f4-cd08-49ec-8fee-6bcd37092fdc", - "underlay_address": "fd00:1122:3344:101::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:101::c]:32345", - "dataset": { - "pool_name": "oxp_709d5d04-5dff-4558-8b5d-fbc2a7d83036" - } - } - }, - "root": "/pool/ext/d94e9c58-e6d1-444b-b7d8-19ac17dea042/crypt/zone" - }, - { - "zone": { - "id": "c67d44be-d6b8-4a08-a7e0-3ab300749ad6", - "underlay_address": "fd00:1122:3344:101::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:101::4]:32345", - "dataset": { - "pool_name": "oxp_231cd696-2839-4a9a-ae42-6d875a98a797" - } - } - }, - "root": "/pool/ext/709d5d04-5dff-4558-8b5d-fbc2a7d83036/crypt/zone" - }, - { - "zone": { - "id": "e91b4957-8165-451d-9fa5-090c3a39f199", - "underlay_address": "fd00:1122:3344:101::d", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:101::d]:32345", - "dataset": { - "pool_name": "oxp_dd084b76-1130-4ad3-9196-6b02be607fe9" - } - } - }, - "root": "/pool/ext/5840a3b7-f765-45d3-8a41-7f543f936bee/crypt/zone" - }, - { - "zone": { - "id": "5e737b6e-d33d-4a2c-b8c0-3cad9d05a68f", - "underlay_address": "fd00:1122:3344:101::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:101::5]:32345", - "dataset": { - "pool_name": "oxp_8fa4f837-c6f3-4c65-88d4-21eb3cd7ffee" - } - } - }, - "root": "/pool/ext/dd084b76-1130-4ad3-9196-6b02be607fe9/crypt/zone" - }, - { - "zone": { - "id": "7e6b7816-b1a6-40f3-894a-a5d5c0571dbb", - "underlay_address": "fd00:1122:3344:101::e", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:101::e]:123", - "ntp_servers": [ - "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", - "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/be06ea9c-df86-4fec-b5dd-8809710893af/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled6.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled6.json deleted file mode 100644 index ce4b6f03cd..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack3-sled6.json +++ /dev/null @@ -1,167 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 4, - "zones": [ - { - "zone": { - "id": "eafffae7-69fd-49e1-9541-7cf237ab12b3", - "underlay_address": "fd00:1122:3344:110::3", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:110::3]:32345", - "dataset": { - "pool_name": "oxp_929404cd-2522-4440-b21c-91d466a9a7e0" - } - } - }, - "root": "/pool/ext/aff390ed-8d70-49fa-9000-5420b54ab118/crypt/zone" - }, - { - "zone": { - "id": "f4bccf15-d69f-402d-9bd2-7959a4cb2823", - "underlay_address": "fd00:1122:3344:110::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:110::9]:32345", - "dataset": { - "pool_name": "oxp_f80f96be-a3d7-490a-96a7-faf7da80a579" - } - } - }, - "root": "/pool/ext/6bcd54c8-d4a8-429d-8f17-cf02615eb063/crypt/zone" - }, - { - "zone": { - "id": "82e51c9d-c187-4baa-8307-e46eeafc5ff2", - "underlay_address": "fd00:1122:3344:110::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:110::5]:32345", - "dataset": { - "pool_name": "oxp_37d86199-6834-49d9-888a-88ff6f281b29" - } - } - }, - "root": "/pool/ext/d2e27e2a-2deb-42ae-84a7-c2d06f3aeb4f/crypt/zone" - }, - { - "zone": { - "id": "cf667caf-304c-40c4-acce-f0eb05d011ef", - "underlay_address": "fd00:1122:3344:110::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:110::8]:32345", - "dataset": { - "pool_name": "oxp_625c0110-644e-4d63-8321-b85ab5642260" - } - } - }, - "root": "/pool/ext/d2e27e2a-2deb-42ae-84a7-c2d06f3aeb4f/crypt/zone" - }, - { - "zone": { - "id": "14e60912-108e-4dd3-984e-2332a183b346", - "underlay_address": "fd00:1122:3344:110::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:110::b]:32345", - "dataset": { - "pool_name": "oxp_fa6470f5-0a4c-4fef-b0b1-57c8749c6cca" - } - } - }, - "root": "/pool/ext/6c5ab641-3bd4-4d8c-96f4-4f56c1045142/crypt/zone" - }, - { - "zone": { - "id": "1aacf923-c96f-4bab-acb0-63f28e86eef6", - "underlay_address": "fd00:1122:3344:110::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:110::c]:32345", - "dataset": { - "pool_name": "oxp_21b0f3ed-d27f-4996-968b-bf2b494d9308" - } - } - }, - "root": "/pool/ext/625c0110-644e-4d63-8321-b85ab5642260/crypt/zone" - }, - { - "zone": { - "id": "b9db0845-04d3-4dc1-84ba-224749562a6c", - "underlay_address": "fd00:1122:3344:110::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:110::6]:32345", - "dataset": { - "pool_name": "oxp_d2e27e2a-2deb-42ae-84a7-c2d06f3aeb4f" - } - } - }, - "root": "/pool/ext/aff390ed-8d70-49fa-9000-5420b54ab118/crypt/zone" - }, - { - "zone": { - "id": "38b51865-ee80-4e1b-a40b-3452951f9022", - "underlay_address": "fd00:1122:3344:110::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:110::7]:32345", - "dataset": { - "pool_name": "oxp_6bcd54c8-d4a8-429d-8f17-cf02615eb063" - } - } - }, - "root": "/pool/ext/37d86199-6834-49d9-888a-88ff6f281b29/crypt/zone" - }, - { - "zone": { - "id": "4bc441f6-f7e5-4d68-8751-53ef1e251c47", - "underlay_address": "fd00:1122:3344:110::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:110::a]:32345", - "dataset": { - "pool_name": "oxp_6c5ab641-3bd4-4d8c-96f4-4f56c1045142" - } - } - }, - "root": "/pool/ext/21b0f3ed-d27f-4996-968b-bf2b494d9308/crypt/zone" - }, - { - "zone": { - "id": "d2c20cf8-ed4c-4815-add9-45996364f721", - "underlay_address": "fd00:1122:3344:110::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:110::4]:32345", - "dataset": { - "pool_name": "oxp_aff390ed-8d70-49fa-9000-5420b54ab118" - } - } - }, - "root": "/pool/ext/6c5ab641-3bd4-4d8c-96f4-4f56c1045142/crypt/zone" - }, - { - "zone": { - "id": "1bb548cb-889a-411e-8c67-d1b785225180", - "underlay_address": "fd00:1122:3344:110::d", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:110::d]:123", - "ntp_servers": [ - "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", - "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/6bcd54c8-d4a8-429d-8f17-cf02615eb063/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled7.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled7.json deleted file mode 100644 index 62653d0767..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack3-sled7.json +++ /dev/null @@ -1,167 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 4, - "zones": [ - { - "zone": { - "id": "2eb74fa3-71ec-484c-8ffa-3daeab0e4c78", - "underlay_address": "fd00:1122:3344:11d::3", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11d::3]:32345", - "dataset": { - "pool_name": "oxp_c6b63fea-e3e2-4806-b8dc-bdfe7b5c3d89" - } - } - }, - "root": "/pool/ext/9f20cbae-7a63-4c31-9386-2ac3cbe12030/crypt/zone" - }, - { - "zone": { - "id": "9f92bfcf-7435-44a6-8e77-0597f93cd0b4", - "underlay_address": "fd00:1122:3344:11d::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11d::7]:32345", - "dataset": { - "pool_name": "oxp_9fa336f1-2b69-4ebf-9553-e3bab7e3e6ef" - } - } - }, - "root": "/pool/ext/e05a6264-63f2-4961-bc14-57b4f65614c0/crypt/zone" - }, - { - "zone": { - "id": "1bf9aed4-9fd3-4d87-b8e7-7f066d25ec1d", - "underlay_address": "fd00:1122:3344:11d::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11d::b]:32345", - "dataset": { - "pool_name": "oxp_a5a52f47-9c9a-4519-83dc-abc56619495d" - } - } - }, - "root": "/pool/ext/cbcad26e-5e52-41b7-9875-1a84d30d8a15/crypt/zone" - }, - { - "zone": { - "id": "2a722aa7-cd8a-445d-83fe-57fc9b9a8249", - "underlay_address": "fd00:1122:3344:11d::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11d::8]:32345", - "dataset": { - "pool_name": "oxp_1f4b71eb-505f-4706-912c-b13dd3f2eafb" - } - } - }, - "root": "/pool/ext/a5a52f47-9c9a-4519-83dc-abc56619495d/crypt/zone" - }, - { - "zone": { - "id": "76af5b23-d833-435c-b848-2a09d9fad9a1", - "underlay_address": "fd00:1122:3344:11d::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11d::c]:32345", - "dataset": { - "pool_name": "oxp_cbcad26e-5e52-41b7-9875-1a84d30d8a15" - } - } - }, - "root": "/pool/ext/9f20cbae-7a63-4c31-9386-2ac3cbe12030/crypt/zone" - }, - { - "zone": { - "id": "3a412bf4-a385-4e66-9ada-a87f6536d6ca", - "underlay_address": "fd00:1122:3344:11d::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11d::4]:32345", - "dataset": { - "pool_name": "oxp_e05a6264-63f2-4961-bc14-57b4f65614c0" - } - } - }, - "root": "/pool/ext/e05a6264-63f2-4961-bc14-57b4f65614c0/crypt/zone" - }, - { - "zone": { - "id": "99a25fa7-8231-4a46-a6ec-ffc5281db1f8", - "underlay_address": "fd00:1122:3344:11d::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11d::5]:32345", - "dataset": { - "pool_name": "oxp_722494ab-9a2b-481b-ac11-292fded682a5" - } - } - }, - "root": "/pool/ext/e05a6264-63f2-4961-bc14-57b4f65614c0/crypt/zone" - }, - { - "zone": { - "id": "06c7ddc8-9b3e-48ef-9874-0c40874e9877", - "underlay_address": "fd00:1122:3344:11d::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11d::a]:32345", - "dataset": { - "pool_name": "oxp_8c3972d1-5b17-4479-88cc-1c33e4344160" - } - } - }, - "root": "/pool/ext/8c3972d1-5b17-4479-88cc-1c33e4344160/crypt/zone" - }, - { - "zone": { - "id": "1212b2dc-157d-4bd3-94af-fb5db1d91f24", - "underlay_address": "fd00:1122:3344:11d::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11d::9]:32345", - "dataset": { - "pool_name": "oxp_9f20cbae-7a63-4c31-9386-2ac3cbe12030" - } - } - }, - "root": "/pool/ext/977aa6c3-2026-4178-9948-e09f78008575/crypt/zone" - }, - { - "zone": { - "id": "b1fb5f2e-b20d-4f4c-9f6f-bbeb1a98dd50", - "underlay_address": "fd00:1122:3344:11d::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:11d::6]:32345", - "dataset": { - "pool_name": "oxp_977aa6c3-2026-4178-9948-e09f78008575" - } - } - }, - "root": "/pool/ext/722494ab-9a2b-481b-ac11-292fded682a5/crypt/zone" - }, - { - "zone": { - "id": "e68dde0f-0647-46db-ae1c-711835c13e25", - "underlay_address": "fd00:1122:3344:11d::d", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:11d::d]:123", - "ntp_servers": [ - "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", - "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/1f4b71eb-505f-4706-912c-b13dd3f2eafb/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled8.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled8.json deleted file mode 100644 index b848826231..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack3-sled8.json +++ /dev/null @@ -1,198 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 4, - "zones": [ - { - "zone": { - "id": "85c18b7c-a100-458c-b18d-ecfdacaefac4", - "underlay_address": "fd00:1122:3344:10e::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10e::5]:32345", - "dataset": { - "pool_name": "oxp_07b266bc-86c3-4a76-9522-8b34ba1ae78c" - } - } - }, - "root": "/pool/ext/5b88e44e-f886-4de8-8a6b-48ea5ed9d70b/crypt/zone" - }, - { - "zone": { - "id": "db303465-7879-4d86-8da8-a0c7162e5184", - "underlay_address": "fd00:1122:3344:10e::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10e::4]:32345", - "dataset": { - "pool_name": "oxp_e9488a32-880d-44a2-8948-db0b7e3a35b5" - } - } - }, - "root": "/pool/ext/8d798756-7200-4db4-9faf-f41b75106a63/crypt/zone" - }, - { - "zone": { - "id": "c44ce6be-512d-4104-9260-a5b8fe373937", - "underlay_address": "fd00:1122:3344:10e::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10e::9]:32345", - "dataset": { - "pool_name": "oxp_025dfc06-5aeb-407f-adc8-ba18dc9bba35" - } - } - }, - "root": "/pool/ext/1544ce68-3544-4cba-b3b6-1927d08b78a5/crypt/zone" - }, - { - "zone": { - "id": "1cfdb5b6-e568-436a-a85f-7fecf1b8eef2", - "underlay_address": "fd00:1122:3344:10e::3", - "zone_type": { - "type": "nexus", - "internal_address": "[fd00:1122:3344:10e::3]:12221", - "external_ip": "45.154.216.36", - "nic": { - "id": "569754a2-a5e0-4aa8-90a7-2fa65f43b667", - "kind": { - "type": "service", - "id": "1cfdb5b6-e568-436a-a85f-7fecf1b8eef2" - }, - "name": "nexus-1cfdb5b6-e568-436a-a85f-7fecf1b8eef2", - "ip": "172.30.2.6", - "mac": "A8:40:25:FF:EC:6B", - "subnet": "172.30.2.0/24", - "vni": 100, - "primary": true, - "slot": 0 - }, - "external_tls": true, - "external_dns_servers": [ - "1.1.1.1", - "8.8.8.8" - ] - } - }, - "root": "/pool/ext/025dfc06-5aeb-407f-adc8-ba18dc9bba35/crypt/zone" - }, - { - "zone": { - "id": "44a68792-ca14-442e-b7a9-11970d50ba0e", - "underlay_address": "fd00:1122:3344:10e::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10e::a]:32345", - "dataset": { - "pool_name": "oxp_2a492098-7df3-4409-9466-561edb7aa99b" - } - } - }, - "root": "/pool/ext/1544ce68-3544-4cba-b3b6-1927d08b78a5/crypt/zone" - }, - { - "zone": { - "id": "514cf0ca-6d23-434e-9785-446b83b2f029", - "underlay_address": "fd00:1122:3344:10e::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10e::7]:32345", - "dataset": { - "pool_name": "oxp_5b88e44e-f886-4de8-8a6b-48ea5ed9d70b" - } - } - }, - "root": "/pool/ext/5b88e44e-f886-4de8-8a6b-48ea5ed9d70b/crypt/zone" - }, - { - "zone": { - "id": "bc6d8347-8f64-4031-912c-932349df07fe", - "underlay_address": "fd00:1122:3344:10e::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10e::6]:32345", - "dataset": { - "pool_name": "oxp_1544ce68-3544-4cba-b3b6-1927d08b78a5" - } - } - }, - "root": "/pool/ext/1544ce68-3544-4cba-b3b6-1927d08b78a5/crypt/zone" - }, - { - "zone": { - "id": "1ab0a4f5-99ad-4341-8c89-7fd03e5ccb08", - "underlay_address": "fd00:1122:3344:10e::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10e::b]:32345", - "dataset": { - "pool_name": "oxp_033eb462-968f-42ce-9c29-377bd40a3014" - } - } - }, - "root": "/pool/ext/9e1a0803-7453-4eac-91c9-d7891ecd634f/crypt/zone" - }, - { - "zone": { - "id": "d6f2520b-3d04-44d9-bd46-6ffccfcb46d2", - "underlay_address": "fd00:1122:3344:10e::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10e::8]:32345", - "dataset": { - "pool_name": "oxp_36e8d29c-1e88-4c2b-8f59-f312201067c3" - } - } - }, - "root": "/pool/ext/1544ce68-3544-4cba-b3b6-1927d08b78a5/crypt/zone" - }, - { - "zone": { - "id": "d6da9d13-bfcf-469d-a99e-faeb5e30be32", - "underlay_address": "fd00:1122:3344:10e::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10e::c]:32345", - "dataset": { - "pool_name": "oxp_9e1a0803-7453-4eac-91c9-d7891ecd634f" - } - } - }, - "root": "/pool/ext/8d798756-7200-4db4-9faf-f41b75106a63/crypt/zone" - }, - { - "zone": { - "id": "a1dc59c2-5883-4fb8-83be-ac2d95d255d1", - "underlay_address": "fd00:1122:3344:10e::d", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10e::d]:32345", - "dataset": { - "pool_name": "oxp_8d798756-7200-4db4-9faf-f41b75106a63" - } - } - }, - "root": "/pool/ext/36e8d29c-1e88-4c2b-8f59-f312201067c3/crypt/zone" - }, - { - "zone": { - "id": "48f25dba-7392-44ce-9bb0-28489ebc44bc", - "underlay_address": "fd00:1122:3344:10e::e", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:10e::e]:123", - "ntp_servers": [ - "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", - "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/5b88e44e-f886-4de8-8a6b-48ea5ed9d70b/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled9.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled9.json deleted file mode 100644 index 62d45a2f5a..0000000000 --- a/sled-agent/tests/output/new-zones-ledgers/rack3-sled9.json +++ /dev/null @@ -1,178 +0,0 @@ -{ - "omicron_generation": 2, - "ledger_generation": 4, - "zones": [ - { - "zone": { - "id": "b452e5e1-ab4c-4994-9679-ef21b3b4fee9", - "underlay_address": "fd00:1122:3344:10b::6", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10b::6]:32345", - "dataset": { - "pool_name": "oxp_d63a297d-ae6a-4072-9dca-dda404044989" - } - } - }, - "root": "/pool/ext/7c204111-31df-4c32-9a3e-780411f700fd/crypt/zone" - }, - { - "zone": { - "id": "e9826cdc-6d3a-4eff-b1b5-ec4364ebe6b9", - "underlay_address": "fd00:1122:3344:10b::3", - "zone_type": { - "type": "oximeter", - "address": "[fd00:1122:3344:10b::3]:12223" - } - }, - "root": "/pool/ext/7c204111-31df-4c32-9a3e-780411f700fd/crypt/zone" - }, - { - "zone": { - "id": "b0cde4a8-f27c-46e8-8355-756be9045afc", - "underlay_address": "fd00:1122:3344:10b::b", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10b::b]:32345", - "dataset": { - "pool_name": "oxp_07c1a8e7-51f5-4f12-a43d-734719fef92b" - } - } - }, - "root": "/pool/ext/1f6adf64-c9b9-4ed7-b3e2-37fb25624646/crypt/zone" - }, - { - "zone": { - "id": "e2f70cf6-e285-4212-9b01-77ebf2ca9219", - "underlay_address": "fd00:1122:3344:10b::d", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10b::d]:32345", - "dataset": { - "pool_name": "oxp_a809f28a-7f25-4362-bc56-0cbdd72af2cb" - } - } - }, - "root": "/pool/ext/92a1bd39-6e8a-4226-b9d0-e3e8a9b8504f/crypt/zone" - }, - { - "zone": { - "id": "b0949c9d-4aa1-4bc4-9cb3-5875b9166885", - "underlay_address": "fd00:1122:3344:10b::a", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10b::a]:32345", - "dataset": { - "pool_name": "oxp_af0cc12b-43c5-473a-89a7-28351fbbb430" - } - } - }, - "root": "/pool/ext/cf1594ed-7c0c-467c-b0af-a689dcb427a3/crypt/zone" - }, - { - "zone": { - "id": "7cea4d59-a8ca-4826-901d-8d5bd935dc09", - "underlay_address": "fd00:1122:3344:10b::9", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10b::9]:32345", - "dataset": { - "pool_name": "oxp_d75dae09-4992-4a61-ab7d-5ae1d2b068ba" - } - } - }, - "root": "/pool/ext/a809f28a-7f25-4362-bc56-0cbdd72af2cb/crypt/zone" - }, - { - "zone": { - "id": "08adaeee-c3b5-4cd8-8fbd-ac371b3101c9", - "underlay_address": "fd00:1122:3344:10b::4", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10b::4]:32345", - "dataset": { - "pool_name": "oxp_d9f23187-fbf9-4ea5-a103-bc112263a9a7" - } - } - }, - "root": "/pool/ext/7c204111-31df-4c32-9a3e-780411f700fd/crypt/zone" - }, - { - "zone": { - "id": "3da1ade5-3fcb-4e64-aa08-81ee8a9ef723", - "underlay_address": "fd00:1122:3344:10b::8", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10b::8]:32345", - "dataset": { - "pool_name": "oxp_1f6adf64-c9b9-4ed7-b3e2-37fb25624646" - } - } - }, - "root": "/pool/ext/07c1a8e7-51f5-4f12-a43d-734719fef92b/crypt/zone" - }, - { - "zone": { - "id": "816f26a7-4c28-4a39-b9ad-a036678520ab", - "underlay_address": "fd00:1122:3344:10b::7", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10b::7]:32345", - "dataset": { - "pool_name": "oxp_92a1bd39-6e8a-4226-b9d0-e3e8a9b8504f" - } - } - }, - "root": "/pool/ext/d9f23187-fbf9-4ea5-a103-bc112263a9a7/crypt/zone" - }, - { - "zone": { - "id": "839f9839-409f-45d3-b8a6-7085507b90f6", - "underlay_address": "fd00:1122:3344:10b::c", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10b::c]:32345", - "dataset": { - "pool_name": "oxp_7c204111-31df-4c32-9a3e-780411f700fd" - } - } - }, - "root": "/pool/ext/af0cc12b-43c5-473a-89a7-28351fbbb430/crypt/zone" - }, - { - "zone": { - "id": "c717c81f-a228-4412-a34e-90f8c491d847", - "underlay_address": "fd00:1122:3344:10b::5", - "zone_type": { - "type": "crucible", - "address": "[fd00:1122:3344:10b::5]:32345", - "dataset": { - "pool_name": "oxp_cf1594ed-7c0c-467c-b0af-a689dcb427a3" - } - } - }, - "root": "/pool/ext/d63a297d-ae6a-4072-9dca-dda404044989/crypt/zone" - }, - { - "zone": { - "id": "e1fa2023-6c86-40a4-ae59-a0de112cf7a9", - "underlay_address": "fd00:1122:3344:10b::e", - "zone_type": { - "type": "internal_ntp", - "address": "[fd00:1122:3344:10b::e]:123", - "ntp_servers": [ - "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", - "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" - ], - "dns_servers": [ - "fd00:1122:3344:1::1", - "fd00:1122:3344:2::1", - "fd00:1122:3344:3::1" - ], - "domain": null - } - }, - "root": "/pool/ext/d9f23187-fbf9-4ea5-a103-bc112263a9a7/crypt/zone" - } - ] -} \ No newline at end of file diff --git a/sled-hardware/src/disk.rs b/sled-hardware/src/disk.rs index d48dd88c3d..5dfd9e2c23 100644 --- a/sled-hardware/src/disk.rs +++ b/sled-hardware/src/disk.rs @@ -132,6 +132,54 @@ impl DiskPaths { } } +#[derive( + Debug, Clone, PartialEq, Eq, Hash, Ord, PartialOrd, Deserialize, Serialize, +)] +pub struct DiskFirmware { + active_slot: u8, + next_active_slot: Option, + slot1_read_only: bool, + // NB: This vec is 0 indexed while active_slot and next_active_slot are + // referring to "slots" in terms of the NVMe spec which defines slots 1-7. + // If the active_slot is 1, then it will be slot_firmware_versions[0] in the + // vector. + slot_firmware_versions: Vec>, +} + +impl DiskFirmware { + pub fn active_slot(&self) -> u8 { + self.active_slot + } + + pub fn next_active_slot(&self) -> Option { + self.next_active_slot + } + + pub fn slot1_read_only(&self) -> bool { + self.slot1_read_only + } + + pub fn slots(&self) -> &[Option] { + self.slot_firmware_versions.as_slice() + } +} + +impl DiskFirmware { + pub fn new( + active_slot: u8, + next_active_slot: Option, + slot1_read_only: bool, + slots: Vec>, + ) -> Self { + Self { + active_slot, + next_active_slot, + slot1_read_only, + slot_firmware_versions: slots, + } + } +} + /// A disk which has been observed by monitoring hardware. /// /// No guarantees are made about the partitions which exist within this disk. @@ -147,6 +195,7 @@ pub struct UnparsedDisk { variant: DiskVariant, identity: DiskIdentity, is_boot_disk: bool, + firmware: DiskFirmware, } impl UnparsedDisk { @@ -157,6 +206,7 @@ impl UnparsedDisk { variant: DiskVariant, identity: DiskIdentity, is_boot_disk: bool, + firmware: DiskFirmware, ) -> Self { Self { paths: DiskPaths { devfs_path, dev_path }, @@ -164,6 +214,7 @@ impl UnparsedDisk { variant, identity, is_boot_disk, + firmware, } } @@ -190,6 +241,10 @@ impl UnparsedDisk { pub fn slot(&self) -> i64 { self.slot } + + pub fn firmware(&self) -> &DiskFirmware { + &self.firmware + } } /// A physical disk that is partitioned to contain exactly one zpool @@ -212,6 +267,7 @@ pub struct PooledDisk { // This embeds the assumtion that there is exactly one parsed zpool per // disk. pub zpool_name: ZpoolName, + pub firmware: DiskFirmware, } impl PooledDisk { @@ -252,6 +308,7 @@ impl PooledDisk { is_boot_disk: unparsed_disk.is_boot_disk, partitions, zpool_name, + firmware: unparsed_disk.firmware, }) } } diff --git a/sled-hardware/src/illumos/mod.rs b/sled-hardware/src/illumos/mod.rs index e9a47de29e..40d7e6aad5 100644 --- a/sled-hardware/src/illumos/mod.rs +++ b/sled-hardware/src/illumos/mod.rs @@ -2,12 +2,14 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. +use crate::DiskFirmware; use crate::{ DendriteAsic, DiskVariant, HardwareUpdate, SledMode, UnparsedDisk, }; use camino::Utf8PathBuf; use gethostname::gethostname; use illumos_devinfo::{DevInfo, DevLinkType, DevLinks, Node, Property}; +use libnvme::{controller::Controller, Nvme}; use omicron_common::disk::DiskIdentity; use sled_hardware_types::Baseboard; use slog::debug; @@ -56,6 +58,24 @@ enum Error { #[error("Failed to issue request to sysconf: {0}")] SysconfError(#[from] sysconf::Error), + + #[error("Node {node} missing device insance")] + MissingNvmeDevinfoInstance { node: String }, + + #[error("Failed to init nvme handle: {0}")] + NvmeHandleInit(#[from] libnvme::NvmeInitError), + + #[error("libnvme error: {0}")] + Nvme(#[from] libnvme::NvmeError), + + #[error("libnvme controller error: {0}")] + NvmeController(#[from] libnvme::controller::NvmeControllerError), + + #[error("Unable to grab NVMe Controller lock")] + NvmeControllerLocked, + + #[error("Failed to get NVMe Controller's firmware log page: {0}")] + FirmwareLogPage(#[from] libnvme::firmware::FirmwareLogPageError), } const GIMLET_ROOT_NODE_NAME: &str = "Oxide,Gimlet"; @@ -105,7 +125,7 @@ impl TryFrom for BootStorageUnit { // A snapshot of information about the underlying hardware struct HardwareSnapshot { tofino: TofinoSnapshot, - disks: HashSet, + disks: HashMap, baseboard: Baseboard, } @@ -151,7 +171,7 @@ impl HardwareSnapshot { let tofino = get_tofino_snapshot(log, &mut device_info); // Monitor for block devices. - let mut disks = HashSet::new(); + let mut disks = HashMap::new(); let mut node_walker = device_info.walk_driver("blkdev"); while let Some(node) = node_walker.next().transpose().map_err(Error::DevInfo)? @@ -184,7 +204,7 @@ enum TofinoView { // which services are currently executing. struct HardwareView { tofino: TofinoView, - disks: HashSet, + disks: HashMap, baseboard: Option, online_processor_count: u32, usable_physical_ram_bytes: u64, @@ -199,7 +219,7 @@ impl HardwareView { fn new() -> Result { Ok(Self { tofino: TofinoView::Real(TofinoSnapshot::new()), - disks: HashSet::new(), + disks: HashMap::new(), baseboard: None, online_processor_count: sysconf::online_processor_count()?, usable_physical_ram_bytes: sysconf::usable_physical_ram_bytes()?, @@ -209,7 +229,7 @@ impl HardwareView { fn new_stub_tofino(active: bool) -> Result { Ok(Self { tofino: TofinoView::Stub { active }, - disks: HashSet::new(), + disks: HashMap::new(), baseboard: None, online_processor_count: sysconf::online_processor_count()?, usable_physical_ram_bytes: sysconf::usable_physical_ram_bytes()?, @@ -250,17 +270,38 @@ impl HardwareView { polled_hw: &HardwareSnapshot, updates: &mut Vec, ) { - // In old set, not in new set. - let removed = self.disks.difference(&polled_hw.disks); - // In new set, not in old set. - let added = polled_hw.disks.difference(&self.disks); + let mut added = Vec::new(); + let mut removed = Vec::new(); + let mut updated = Vec::new(); + + // Find new or updated disks. + for (key, value) in &polled_hw.disks { + match self.disks.get(&key) { + Some(found) => { + if value != found { + updated.push(value.clone()); + } + } + None => added.push(value.clone()), + } + } + + // Find disks which have been removed. + for (key, value) in &self.disks { + if !polled_hw.disks.contains_key(key) { + removed.push(value.clone()); + } + } use HardwareUpdate::*; for disk in removed { - updates.push(DiskRemoved(disk.clone())); + updates.push(DiskRemoved(disk)); } for disk in added { - updates.push(DiskAdded(disk.clone())); + updates.push(DiskAdded(disk)); + } + for disk in updated { + updates.push(DiskUpdated(disk)); } self.disks.clone_from(&polled_hw.disks); @@ -424,7 +465,7 @@ fn find_properties<'a, const N: usize>( fn poll_blkdev_node( log: &Logger, - disks: &mut HashSet, + disks: &mut HashMap, node: Node<'_>, boot_storage_unit: BootStorageUnit, ) -> Result<(), Error> { @@ -459,6 +500,13 @@ fn poll_blkdev_node( // We expect that the parent of the "blkdev" node is an "nvme" driver. let nvme_node = get_parent_node(&node, "nvme")?; + // Importantly we grab the NVMe instance and not the blkdev instance. + // Eventually we should switch the logic here to search for nvme instances + // and confirm that we only have one blkdev sibling: + // https://github.com/oxidecomputer/omicron/issues/5241 + let nvme_instance = nvme_node + .instance() + .ok_or(Error::MissingNvmeDevinfoInstance { node: node.node_name() })?; let vendor_id = i64_from_property(&find_properties(&nvme_node, ["vendor-id"])?[0])?; @@ -492,15 +540,42 @@ fn poll_blkdev_node( return Err(Error::UnrecognizedSlot { slot }); }; + let nvme = Nvme::new()?; + let controller = Controller::init_by_instance(&nvme, nvme_instance)?; + let controller_lock = match controller.try_read_lock() { + libnvme::controller::TryLockResult::Ok(locked) => locked, + // We should only hit this if something in the system has locked the + // controller in question for writing. + libnvme::controller::TryLockResult::Locked(_) => { + warn!( + log, + "NVMe Controller is already locked so we will try again + in the next hardware snapshot" + ); + return Err(Error::NvmeControllerLocked); + } + libnvme::controller::TryLockResult::Err(err) => { + return Err(Error::from(err)) + } + }; + let firmware_log_page = controller_lock.get_firmware_log_page()?; + let firmware = DiskFirmware::new( + firmware_log_page.active_slot, + firmware_log_page.next_active_slot, + firmware_log_page.slot1_is_read_only, + firmware_log_page.slot_iter().map(|s| s.map(str::to_string)).collect(), + ); + let disk = UnparsedDisk::new( Utf8PathBuf::from(&devfs_path), dev_path, slot, variant, - device_id, + device_id.clone(), slot_is_boot_disk(slot, boot_storage_unit), + firmware.clone(), ); - disks.insert(disk); + disks.insert(device_id, disk); Ok(()) } @@ -546,8 +621,11 @@ fn poll_device_tree( // UnparsedDisks. Add those to the HardwareSnapshot here if they // are missing (which they will be for non-gimlets). for observed_disk in nongimlet_observed_disks { - if !inner.disks.contains(observed_disk) { - inner.disks.insert(observed_disk.clone()); + let identity = observed_disk.identity(); + if !inner.disks.contains_key(identity) { + inner + .disks + .insert(identity.clone(), observed_disk.clone()); } } } @@ -707,7 +785,7 @@ impl HardwareManager { self.inner.lock().unwrap().usable_physical_ram_bytes } - pub fn disks(&self) -> HashSet { + pub fn disks(&self) -> HashMap { self.inner.lock().unwrap().disks.clone() } diff --git a/sled-hardware/src/illumos/partitions.rs b/sled-hardware/src/illumos/partitions.rs index 0308e842c0..1386d07866 100644 --- a/sled-hardware/src/illumos/partitions.rs +++ b/sled-hardware/src/illumos/partitions.rs @@ -75,6 +75,8 @@ pub enum NvmeFormattingError { NvmeInit(#[from] libnvme::NvmeInitError), #[error(transparent)] Nvme(#[from] libnvme::NvmeError), + #[error(transparent)] + NvmeController(#[from] libnvme::controller::NvmeControllerError), #[error("Device is missing expected LBA format")] LbaFormatMissing, #[error("Device has {0} active namespaces but we expected 1")] diff --git a/sled-hardware/src/lib.rs b/sled-hardware/src/lib.rs index 607f72e25c..d210fbb1ce 100644 --- a/sled-hardware/src/lib.rs +++ b/sled-hardware/src/lib.rs @@ -34,6 +34,7 @@ pub enum HardwareUpdate { TofinoUnloaded, DiskAdded(UnparsedDisk), DiskRemoved(UnparsedDisk), + DiskUpdated(UnparsedDisk), } // The type of networking 'ASIC' the Dendrite service is expected to manage diff --git a/sled-hardware/src/non_illumos/mod.rs b/sled-hardware/src/non_illumos/mod.rs index 3516962577..955be9a35e 100644 --- a/sled-hardware/src/non_illumos/mod.rs +++ b/sled-hardware/src/non_illumos/mod.rs @@ -10,7 +10,7 @@ use omicron_common::disk::DiskIdentity; use omicron_uuid_kinds::ZpoolUuid; use sled_hardware_types::Baseboard; use slog::Logger; -use std::collections::HashSet; +use std::collections::HashMap; use tokio::sync::broadcast; #[derive(Debug, thiserror::Error)] @@ -51,7 +51,7 @@ impl HardwareManager { unimplemented!("Accessing hardware unsupported on non-illumos"); } - pub fn disks(&self) -> HashSet { + pub fn disks(&self) -> HashMap { unimplemented!("Accessing hardware unsupported on non-illumos"); } diff --git a/sled-storage/src/disk.rs b/sled-storage/src/disk.rs index 608d3678da..c67cce0dfc 100644 --- a/sled-storage/src/disk.rs +++ b/sled-storage/src/disk.rs @@ -16,7 +16,8 @@ use omicron_uuid_kinds::ZpoolUuid; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use sled_hardware::{ - DiskVariant, Partition, PooledDisk, PooledDiskError, UnparsedDisk, + DiskFirmware, DiskVariant, Partition, PooledDisk, PooledDiskError, + UnparsedDisk, }; use slog::{info, Logger}; use uuid::Uuid; @@ -103,6 +104,11 @@ pub struct SyntheticDisk { // system. const SYNTHETIC_SLOT_OFFSET: i64 = 1024; +// A generic name for the firmware in slot1 of an NVMe device. +// +// bhyve for example uses "1.0" and marks slot1 as read-only. +const SYNTHETIC_FIRMWARE_SLOT1: &str = "synthetic 1.0"; + impl SyntheticDisk { // "Manages" a SyntheticDisk by ensuring that it has a Zpool and importing // it. If the zpool already exists, it is imported, but not re-created. @@ -151,6 +157,7 @@ pub struct RawSyntheticDisk { pub identity: DiskIdentity, pub variant: DiskVariant, pub slot: i64, + pub firmware: DiskFirmware, } impl RawSyntheticDisk { @@ -195,11 +202,19 @@ impl RawSyntheticDisk { model: format!("synthetic-model-{variant:?}"), }; + let firmware = DiskFirmware::new( + 1, + None, + true, + vec![Some(SYNTHETIC_FIRMWARE_SLOT1.to_string())], + ); + Ok(Self { path: path.into(), identity, variant, slot: slot + SYNTHETIC_SLOT_OFFSET, + firmware, }) } } @@ -278,6 +293,13 @@ impl RawDisk { Self::Synthetic(disk) => disk.slot, } } + + pub fn firmware(&self) -> &DiskFirmware { + match self { + RawDisk::Real(unparsed) => unparsed.firmware(), + RawDisk::Synthetic(synthetic) => &synthetic.firmware, + } + } } /// A physical [`PooledDisk`] or a [`SyntheticDisk`] that contains or is backed @@ -413,6 +435,24 @@ impl Disk { Self::Synthetic(disk) => disk.raw.slot, } } + + pub(crate) fn update_firmware_metadata(&mut self, raw_disk: &RawDisk) { + match self { + Disk::Real(pooled_disk) => { + pooled_disk.firmware = raw_disk.firmware().clone(); + } + Disk::Synthetic(synthetic_disk) => { + synthetic_disk.raw.firmware = raw_disk.firmware().clone(); + } + } + } + + pub fn firmware(&self) -> &DiskFirmware { + match self { + Disk::Real(disk) => &disk.firmware, + Disk::Synthetic(disk) => &disk.raw.firmware, + } + } } impl From for RawDisk { @@ -425,6 +465,7 @@ impl From for RawDisk { pooled_disk.variant, pooled_disk.identity, pooled_disk.is_boot_disk, + pooled_disk.firmware, )), Disk::Synthetic(synthetic_disk) => { RawDisk::Synthetic(synthetic_disk.raw) diff --git a/sled-storage/src/manager.rs b/sled-storage/src/manager.rs index 4f45f1771e..9e31568e00 100644 --- a/sled-storage/src/manager.rs +++ b/sled-storage/src/manager.rs @@ -101,6 +101,10 @@ pub(crate) enum StorageRequest { raw_disk: RawDisk, tx: DebugIgnore>>, }, + DetectedRawDiskUpdate { + raw_disk: RawDisk, + tx: DebugIgnore>>, + }, DetectedRawDiskRemoval { raw_disk: RawDisk, tx: DebugIgnore>>, @@ -187,6 +191,27 @@ impl StorageHandle { rx.map(|result| result.unwrap()) } + /// Updates a disk, if it's tracked by the storage manager, as well + /// as any associated zpools. + /// + /// Returns a future which completes once the notification has been + /// processed. Awaiting this future is optional. + pub async fn detected_raw_disk_update( + &self, + raw_disk: RawDisk, + ) -> impl Future> { + let (tx, rx) = oneshot::channel(); + self.tx + .send(StorageRequest::DetectedRawDiskUpdate { + raw_disk, + tx: tx.into(), + }) + .await + .unwrap(); + + rx.map(|result| result.unwrap()) + } + /// Ensures that the storage manager tracks exactly the provided disks. /// /// This acts similar to a batch [Self::detected_raw_disk] for all new disks, and @@ -388,6 +413,13 @@ impl StorageManager { } let _ = tx.0.send(result); } + StorageRequest::DetectedRawDiskUpdate { raw_disk, tx } => { + let result = self.detected_raw_disk_update(raw_disk).await; + if let Err(ref err) = &result { + warn!(self.log, "Failed to apply raw disk update"; "err" => ?err); + } + let _ = tx.0.send(result); + } StorageRequest::DetectedRawDiskRemoval { raw_disk, tx } => { self.detected_raw_disk_removal(raw_disk); let _ = tx.0.send(Ok(())); @@ -475,7 +507,7 @@ impl StorageManager { // coordination with the control plane at large. let needs_synchronization = matches!(raw_disk.variant(), DiskVariant::U2); - self.resources.insert_disk(raw_disk).await?; + self.resources.insert_or_update_disk(raw_disk).await?; if needs_synchronization { match self.state { @@ -501,6 +533,18 @@ impl StorageManager { Ok(()) } + /// Updates some information about the underlying disk within this sled. + /// + /// Things that can currently be updated: + /// - DiskFirmware + async fn detected_raw_disk_update( + &mut self, + raw_disk: RawDisk, + ) -> Result<(), Error> { + // We aren't worried about synchronizing as the disk should already be managed. + self.resources.insert_or_update_disk(raw_disk).await + } + async fn load_ledger(&self) -> Option> { let ledger_paths = self.all_omicron_disk_ledgers().await; let log = self.log.new(o!("request" => "load_ledger")); @@ -776,7 +820,7 @@ impl StorageManager { .resources .disks() .iter_all() - .filter_map(|(id, _variant, _slot)| { + .filter_map(|(id, _variant, _slot, _firmware)| { if !all_ids.contains(id) { Some(id.clone()) } else { @@ -863,7 +907,7 @@ mod tests { use crate::dataset::DatasetKind; use crate::disk::RawSyntheticDisk; use crate::manager_test_harness::StorageManagerTestHarness; - use crate::resources::DiskManagementError; + use crate::resources::{DiskManagementError, ManagedDisk}; use super::*; use camino_tempfile::tempdir_in; @@ -871,6 +915,7 @@ mod tests { use omicron_common::ledger; use omicron_test_utils::dev::test_setup_log; use omicron_uuid_kinds::ZpoolUuid; + use sled_hardware::DiskFirmware; use std::sync::atomic::Ordering; use uuid::Uuid; @@ -1001,6 +1046,64 @@ mod tests { logctx.cleanup_successful(); } + #[tokio::test] + async fn update_rawdisk_firmware() { + const FW_REV: &str = "firmware-2.0"; + illumos_utils::USE_MOCKS.store(false, Ordering::SeqCst); + let logctx = test_setup_log("update_u2_firmware"); + let mut harness = StorageManagerTestHarness::new(&logctx.log).await; + harness.handle().key_manager_ready().await; + + // Add a representative scenario for a small sled: a U.2 and M.2. + let mut raw_disks = + harness.add_vdevs(&["u2_under_test.vdev", "m2_helping.vdev"]).await; + + // This disks should exist, but only the M.2 should have a zpool. + let all_disks_gen1 = harness.handle().get_latest_disks().await; + + for rd in &mut raw_disks { + if let RawDisk::Synthetic(ref mut disk) = rd { + let mut slots = disk.firmware.slots().to_vec(); + // "Install" a new firmware version into slot2 + slots.push(Some(FW_REV.to_string())); + disk.firmware = DiskFirmware::new( + disk.firmware.active_slot(), + disk.firmware.next_active_slot(), + disk.firmware.slot1_read_only(), + slots, + ); + } + harness.update_vdev(rd).await; + } + + let all_disks_gen2 = harness.handle().get_latest_disks().await; + + // Disks should now be different due to the mock firmware update. + assert_ne!(all_disks_gen1, all_disks_gen2); + + // Now let's verify we saw the correct firmware update. + for rd in &raw_disks { + let managed = + all_disks_gen2.values.get(rd.identity()).expect("disk exists"); + match managed { + ManagedDisk::ExplicitlyManaged(disk) + | ManagedDisk::ImplicitlyManaged(disk) => { + assert_eq!( + disk.firmware(), + rd.firmware(), + "didn't see firmware update" + ); + } + ManagedDisk::Unmanaged(disk) => { + assert_eq!(disk, rd, "didn't see firmware update"); + } + } + } + + harness.cleanup().await; + logctx.cleanup_successful(); + } + #[tokio::test] async fn wait_for_boot_disk() { illumos_utils::USE_MOCKS.store(false, Ordering::SeqCst); diff --git a/sled-storage/src/manager_test_harness.rs b/sled-storage/src/manager_test_harness.rs index a2180a95b5..74c2967a84 100644 --- a/sled-storage/src/manager_test_harness.rs +++ b/sled-storage/src/manager_test_harness.rs @@ -300,6 +300,18 @@ impl StorageManagerTestHarness { .expect("Failed to remove vdev"); } + // Update a vdev. + // + // Note: currently the only portion of a vdev that we update is the firmware + // metadata. + pub async fn update_vdev(&mut self, raw: &RawDisk) { + self.handle + .detected_raw_disk_update(raw.clone()) + .await + .await + .expect("Failed to update vdev"); + } + // Adds a vdev to the set of "tracked" devices. pub async fn add_vdev_as(&mut self, raw_disk: RawDisk) { self.handle diff --git a/sled-storage/src/resources.rs b/sled-storage/src/resources.rs index b44c8e5b53..5cc4672e1e 100644 --- a/sled-storage/src/resources.rs +++ b/sled-storage/src/resources.rs @@ -16,7 +16,7 @@ use omicron_common::disk::DiskIdentity; use omicron_uuid_kinds::ZpoolUuid; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; -use sled_hardware::DiskVariant; +use sled_hardware::{DiskFirmware, DiskVariant}; use slog::{info, o, warn, Logger}; use std::collections::BTreeMap; use std::sync::Arc; @@ -241,16 +241,17 @@ impl AllDisks { /// Returns an iterator over all disks, managed or not. pub fn iter_all( &self, - ) -> impl Iterator { + ) -> impl Iterator + { self.values.iter().map(|(identity, disk)| match disk { ManagedDisk::ExplicitlyManaged(disk) => { - (identity, disk.variant(), disk.slot()) + (identity, disk.variant(), disk.slot(), disk.firmware()) } ManagedDisk::ImplicitlyManaged(disk) => { - (identity, disk.variant(), disk.slot()) + (identity, disk.variant(), disk.slot(), disk.firmware()) } ManagedDisk::Unmanaged(raw) => { - (identity, raw.variant(), raw.slot()) + (identity, raw.variant(), raw.slot(), raw.firmware()) } }) } @@ -478,7 +479,7 @@ impl StorageResources { Ok(ManagedDisk::ExplicitlyManaged(disk)) } - /// Tracks a new disk. + /// Tracks a new disk, or updates an existing disk. /// /// For U.2s: Does not automatically attempt to manage disks -- for this, /// the caller will need to also invoke @@ -486,18 +487,49 @@ impl StorageResources { /// /// For M.2s: As no additional control plane guidance is necessary to adopt /// M.2s, these are automatically managed. - pub(crate) async fn insert_disk( + pub(crate) async fn insert_or_update_disk( &mut self, disk: RawDisk, ) -> Result<(), Error> { let disk_identity = disk.identity().clone(); info!(self.log, "Inserting disk"; "identity" => ?disk_identity); - if self.disks.values.contains_key(&disk_identity) { - info!(self.log, "Disk already exists"; "identity" => ?disk_identity); + + // This is a trade-off for simplicity even though we may be potentially + // cloning data before we know if there is a write action to perform. + let disks = Arc::make_mut(&mut self.disks.values); + + // First check if there are any updates we need to apply to existing + // managed disks. + if let Some(managed) = disks.get_mut(&disk_identity) { + let mut updated = false; + match managed { + ManagedDisk::ExplicitlyManaged(mdisk) + | ManagedDisk::ImplicitlyManaged(mdisk) => { + let old = RawDisk::from(mdisk.clone()); + if old != disk { + mdisk.update_firmware_metadata(&disk); + updated = true; + } + } + ManagedDisk::Unmanaged(raw) => { + if raw != &disk { + *raw = disk; + updated = true; + } + } + }; + + if updated { + self.disk_updates.send_replace(self.disks.clone()); + } else { + info!(self.log, "Disk already exists and has no updates"; + "identity" => ?disk_identity); + } + return Ok(()); } - let disks = Arc::make_mut(&mut self.disks.values); + // If there's no update then we are inserting a new disk. match disk.variant() { DiskVariant::U2 => { disks.insert(disk_identity, ManagedDisk::Unmanaged(disk)); diff --git a/smf/nexus/multi-sled/config-partial.toml b/smf/nexus/multi-sled/config-partial.toml index d4612ba15e..e63eb411c3 100644 --- a/smf/nexus/multi-sled/config-partial.toml +++ b/smf/nexus/multi-sled/config-partial.toml @@ -5,8 +5,8 @@ [console] # Directory for static assets. Absolute path or relative to CWD. static_dir = "/var/nexus/static" -session_idle_timeout_minutes = 60 -session_absolute_timeout_minutes = 480 +session_idle_timeout_minutes = 480 # 8 hours +session_absolute_timeout_minutes = 1440 # 24 hours [authn] schemes_external = ["session_cookie", "access_token"] @@ -56,6 +56,7 @@ blueprints.period_secs_collect_crdb_node_ids = 180 sync_service_zone_nat.period_secs = 30 switch_port_settings_manager.period_secs = 30 region_replacement.period_secs = 30 +region_replacement_driver.period_secs = 10 service_firewall_propagation.period_secs = 300 v2p_mapping_propagation.period_secs = 30 instance_watcher.period_secs = 30 diff --git a/smf/nexus/single-sled/config-partial.toml b/smf/nexus/single-sled/config-partial.toml index 3b158d0387..ced1da17b3 100644 --- a/smf/nexus/single-sled/config-partial.toml +++ b/smf/nexus/single-sled/config-partial.toml @@ -5,8 +5,8 @@ [console] # Directory for static assets. Absolute path or relative to CWD. static_dir = "/var/nexus/static" -session_idle_timeout_minutes = 60 -session_absolute_timeout_minutes = 480 +session_idle_timeout_minutes = 480 # 8 hours +session_absolute_timeout_minutes = 1440 # 24 hours [authn] schemes_external = ["session_cookie", "access_token"] @@ -56,6 +56,7 @@ blueprints.period_secs_collect_crdb_node_ids = 180 sync_service_zone_nat.period_secs = 30 switch_port_settings_manager.period_secs = 30 region_replacement.period_secs = 30 +region_replacement_driver.period_secs = 10 service_firewall_propagation.period_secs = 300 v2p_mapping_propagation.period_secs = 30 instance_watcher.period_secs = 30 diff --git a/sp-sim/src/gimlet.rs b/sp-sim/src/gimlet.rs index 280248d034..4e0b264e64 100644 --- a/sp-sim/src/gimlet.rs +++ b/sp-sim/src/gimlet.rs @@ -1275,12 +1275,13 @@ impl SpHandler for Handler { "port" => ?port, "component" => ?component, ); - if component == SpComponent::ROT { - Ok(rot_slot_id_to_u16(self.rot_active_slot)) - } else { + match component { + SpComponent::ROT => Ok(rot_slot_id_to_u16(self.rot_active_slot)), + // The only active component is stage0 + SpComponent::STAGE0 => Ok(0), // The real SP returns `RequestUnsupportedForComponent` for anything // other than the RoT, including SP_ITSELF. - Err(SpError::RequestUnsupportedForComponent) + _ => Err(SpError::RequestUnsupportedForComponent), } } @@ -1300,16 +1301,27 @@ impl SpHandler for Handler { "slot" => slot, "persist" => persist, ); - if component == SpComponent::ROT { - self.rot_active_slot = rot_slot_id_from_u16(slot)?; - Ok(()) - } else if component == SpComponent::HOST_CPU_BOOT_FLASH { - self.update_state.set_active_host_slot(slot); - Ok(()) - } else { - // The real SP returns `RequestUnsupportedForComponent` for anything - // other than the RoT and host boot flash, including SP_ITSELF. - Err(SpError::RequestUnsupportedForComponent) + match component { + SpComponent::ROT => { + self.rot_active_slot = rot_slot_id_from_u16(slot)?; + Ok(()) + } + SpComponent::STAGE0 => { + if slot == 1 { + return Ok(()); + } else { + Err(SpError::RequestUnsupportedForComponent) + } + } + SpComponent::HOST_CPU_BOOT_FLASH => { + self.update_state.set_active_host_slot(slot); + Ok(()) + } + _ => { + // The real SP returns `RequestUnsupportedForComponent` for anything + // other than the RoT and host boot flash, including SP_ITSELF. + Err(SpError::RequestUnsupportedForComponent) + } } } diff --git a/tools/build-global-zone-packages.sh b/tools/build-global-zone-packages.sh deleted file mode 100755 index fe00e53383..0000000000 --- a/tools/build-global-zone-packages.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env bash - -set -eux - -TOOLS_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" - -# Use the default "out" dir in omicron to find the needed packages if one isn't given -tarball_src_dir="$(readlink -f "${1:-"$TOOLS_DIR/../out"}")" -# Stash the final tgz in the given src dir if a different target isn't given -out_dir="$(readlink -f "${2:-"$tarball_src_dir"}")" - -# Make sure needed packages exist -deps=( - "$tarball_src_dir/omicron-sled-agent.tar" - "$tarball_src_dir/mg-ddm-gz.tar" - "$tarball_src_dir/pumpkind-gz.tar" - "$tarball_src_dir/propolis-server.tar.gz" - "$tarball_src_dir/overlay.tar.gz" - "$tarball_src_dir/oxlog.tar" -) -for dep in "${deps[@]}"; do - if [[ ! -e $dep ]]; then - echo "Missing Global Zone dep: $(basename "$dep")" - exit 1 - fi -done - -# Assemble global zone files in a temporary directory. -tmp_gz=$(mktemp -d) -trap 'cd /; rm -rf "$tmp_gz"' EXIT # Cleanup on exit - -# Header file, identifying this is intended to be layered in the global zone. -# Within the ramdisk, this means that all files under "root/foo" should appear -# in the global zone as "/foo". -echo '{"v":"1","t":"layer"}' > "$tmp_gz/oxide.json" - -# Extract the sled-agent tarball for re-packaging into the layered GZ archive. -pkg_dir="$tmp_gz/root/opt/oxide/sled-agent" -mkdir -p "$pkg_dir" -cd "$pkg_dir" -tar -xvfz "$tarball_src_dir/omicron-sled-agent.tar" -# Ensure that the manifest for the sled agent exists in a location where it may -# be automatically initialized. -mkdir -p "$tmp_gz/root/lib/svc/manifest/site/" -mv pkg/manifest.xml "$tmp_gz/root/lib/svc/manifest/site/sled-agent.xml" -cd - -# Extract the mg-ddm tarball for re-packaging into the layered GZ archive. -pkg_dir="$tmp_gz/root/opt/oxide/mg-ddm" -mkdir -p "$pkg_dir" -cd "$pkg_dir" -tar -xvfz "$tarball_src_dir/mg-ddm-gz.tar" -cd - -# Extract the pumpkind tarball for re-packaging into the layered GZ archive. -pkg_dir="$tmp_gz/root/opt/oxide/pumpkind" -mkdir -p "$pkg_dir" -cd "$pkg_dir" -tar -xvfz "$tarball_src_dir/pumpkind-gz.tar" -cd - -# Extract the oxlog tarball for re-packaging into the layered GZ archive. -pkg_dir="$tmp_gz/root/opt/oxide/oxlog" -mkdir -p "$pkg_dir" -cd "$pkg_dir" -tar -xvfz "$tarball_src_dir/oxlog.tar" -cd - - -# propolis should be bundled with this OS: Put the propolis-server zone image -# under /opt/oxide in the gz. -cp "$tarball_src_dir/propolis-server.tar.gz" "$tmp_gz/root/opt/oxide" - -# The zone overlay should also be bundled. -cp "$tarball_src_dir/overlay.tar.gz" "$tmp_gz/root/opt/oxide" - -# Create the final output and we're done -cd "$tmp_gz" && tar cvfz "$out_dir"/global-zone-packages.tar.gz oxide.json root diff --git a/tools/build-trampoline-global-zone-packages.sh b/tools/build-trampoline-global-zone-packages.sh deleted file mode 100755 index ee8e7b3371..0000000000 --- a/tools/build-trampoline-global-zone-packages.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env bash - -set -eux - -TOOLS_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" - -# Use the default "out" dir in omicron to find the needed packages if one isn't given -tarball_src_dir="$(readlink -f "${1:-"$TOOLS_DIR/../out"}")" -# Stash the final tgz in the given src dir if a different target isn't given -out_dir="$(readlink -f "${2:-$tarball_src_dir}")" - -# Make sure needed packages exist -deps=( - "$tarball_src_dir"/installinator.tar - "$tarball_src_dir"/mg-ddm-gz.tar -) -for dep in "${deps[@]}"; do - if [[ ! -e $dep ]]; then - echo "Missing Trampoline Global Zone dep: $(basename "$dep")" - exit 1 - fi -done - -# Assemble global zone files in a temporary directory. -tmp_trampoline=$(mktemp -d) -trap 'cd /; rm -rf "$tmp_trampoline"' EXIT # Cleanup on exit - -# Header file, identifying this is intended to be layered in the global zone. -# Within the ramdisk, this means that all files under "root/foo" should appear -# in the global zone as "/foo". -echo '{"v":"1","t":"layer"}' > "$tmp_trampoline/oxide.json" - -# Extract the installinator tarball for re-packaging into the layered GZ archive. -pkg_dir="$tmp_trampoline/root/opt/oxide/installinator" -mkdir -p "$pkg_dir" -cd "$pkg_dir" -tar -xvfz "$tarball_src_dir/installinator.tar" -# Ensure that the manifest for the sled agent exists in a location where it may -# be automatically initialized. -mkdir -p "$tmp_trampoline/root/lib/svc/manifest/site/" -mv pkg/manifest.xml "$tmp_trampoline/root/lib/svc/manifest/site/installinator.xml" -cd - -# Extract the mg-ddm tarball for re-packaging into the layered GZ archive. -pkg_dir="$tmp_trampoline/root/opt/oxide/mg-ddm" -mkdir -p "$pkg_dir" -cd "$pkg_dir" -tar -xvfz "$tarball_src_dir/mg-ddm-gz.tar" -cd - - -# Create the final output and we're done -cd "$tmp_trampoline" && tar cvfz "$out_dir"/trampoline-global-zone-packages.tar.gz oxide.json root diff --git a/tools/clickhouse_checksums b/tools/clickhouse_checksums index afddb15cab..54ffaddb3d 100644 --- a/tools/clickhouse_checksums +++ b/tools/clickhouse_checksums @@ -1,3 +1,3 @@ -CIDL_MD5_DARWIN="3e20c3284b7e6b0cfcfedf622ecf547a" -CIDL_MD5_LINUX="f6c30a25a86deac3bad6c50dcf758fd5" -CIDL_MD5_ILLUMOS="409222de8ecb59e5dd97dcc942ccdffe" +CIDL_SHA256_DARWIN="275843f5942bf84a27cfededa2314d70d4a3300d7762045e1f1b5cd93e97c6a1" +CIDL_SHA256_LINUX="fdcf9b224123b65e4cee9ba5d5bee42538ada3deb10ce412d3b0e8da74e2a873" +CIDL_SHA256_ILLUMOS="29ad158e985b088f6b8987b33dac96dcbaed9bdffc95ae0e880273cc125320b3" diff --git a/tools/console_version b/tools/console_version index 4c720590d5..ad65f7dedf 100644 --- a/tools/console_version +++ b/tools/console_version @@ -1,2 +1,2 @@ -COMMIT="a9b325e94a6bbb309d68cf586298b4f77aa452ab" -SHA2="d41f22b4d575fc622b2749ea9e81eec11d78a4aae46f61b0472a7958b78be7f7" +COMMIT="34c648b525da61f7c7a6ecf1c036d3c3947cccd6" +SHA2="23bfeef7174515c667f978de04eda7f1b22f5616b4cc24ee045e9bd7347e3678" diff --git a/tools/dendrite_openapi_version b/tools/dendrite_openapi_version index 0d6d6f810e..811e58346c 100755 --- a/tools/dendrite_openapi_version +++ b/tools/dendrite_openapi_version @@ -1,2 +1,2 @@ -COMMIT="861c00bacbdf7a6e22471f0dabd8f926409b5292" +COMMIT="a262fe770c173f7879cd942c98ab28a829890661" SHA2="12dc61e7c62b2e1ee1cf3c2bf7cdda6bee6ec96925d2fc1c021c6c1a8fdd56cd" diff --git a/tools/dendrite_stub_checksums b/tools/dendrite_stub_checksums index 75c76f3585..a42cdeca2a 100644 --- a/tools/dendrite_stub_checksums +++ b/tools/dendrite_stub_checksums @@ -1,3 +1,3 @@ -CIDL_SHA256_ILLUMOS="1db849892c60b22f600fb081d4b0145d8ecd98acce9fad3094499a5d2159d001" -CIDL_SHA256_LINUX_DPD="4022e8c0de268c4bc38046b29a48d021b3204e6c2dc8371f2de67f42019720c0" +CIDL_SHA256_ILLUMOS="6f991dacd72c63d7fcff734b1f5c406c001e4d509f7b36e68b89d8b07f69ed79" +CIDL_SHA256_LINUX_DPD="d97029a5c2c2f136fc76dbfa941ef0b114135232c6f96948453b5c83f744beb7" CIDL_SHA256_LINUX_SWADM="a1308303fd0d8f8ac272288e801beb913f695dcf820dd53f5c03871e6b8674f7" diff --git a/tools/maghemite_ddm_openapi_version b/tools/maghemite_ddm_openapi_version index 9a93e6b556..569d3d7813 100644 --- a/tools/maghemite_ddm_openapi_version +++ b/tools/maghemite_ddm_openapi_version @@ -1,2 +1,2 @@ -COMMIT="5630887d0373857f77cb264f84aa19bdec720ce3" -SHA2="004e873e4120aa26460271368485266b75b7f964e5ed4dbee8fb5db4519470d7" +COMMIT="3c3fa8482fe09a01da62fbd35efe124ea9cac9e7" +SHA2="007bfb717ccbc077c0250dee3121aeb0c5bb0d1c16795429a514fa4f8635a5ef" diff --git a/tools/maghemite_mg_openapi_version b/tools/maghemite_mg_openapi_version index 22918c581a..de64133971 100644 --- a/tools/maghemite_mg_openapi_version +++ b/tools/maghemite_mg_openapi_version @@ -1,2 +1,2 @@ -COMMIT="5630887d0373857f77cb264f84aa19bdec720ce3" -SHA2="fdb33ee7425923560534672264008ef8948d227afce948ab704de092ad72157c" +COMMIT="3c3fa8482fe09a01da62fbd35efe124ea9cac9e7" +SHA2="e4b42ab9daad90f0c561a830b62a9d17e294b4d0da0a6d44b4030929b0c37b7e" diff --git a/tools/maghemite_mgd_checksums b/tools/maghemite_mgd_checksums index d2ad05383d..f9d4fd4491 100644 --- a/tools/maghemite_mgd_checksums +++ b/tools/maghemite_mgd_checksums @@ -1,2 +1,2 @@ -CIDL_SHA256="6ae4bc3b332e91706c1c6633a7fc218aac65b7feff5643ee2dbbe79b841e0df3" -MGD_LINUX_SHA256="7930008cf8ce535a8b31043fc3edde0e825bd54d75f73234929bd0037ecc3a41" +CIDL_SHA256="51f446933f0d8c426b15ea0845b66664da9b9a129893d12b25d7912b52f07362" +MGD_LINUX_SHA256="736067394778cc4c38fecb1ca8647db3ca7ab1b5c4446f3ce2b5350379ba95b7" diff --git a/tools/opte_version b/tools/opte_version index 529b93110f..ff992938ae 100644 --- a/tools/opte_version +++ b/tools/opte_version @@ -1 +1 @@ -0.31.262 +0.32.265 diff --git a/tools/permslip_production b/tools/permslip_production index c384c2cd51..5e9b76f980 100644 --- a/tools/permslip_production +++ b/tools/permslip_production @@ -1 +1,2 @@ -3feecf35522ecc07a23fbe934c752ecbf248672ce55c29102de485927edc12e6 manifest-oxide-rot-1-v1.0.11.toml +905d38cb8298c72ecac5cf31f792919fbcd69a4ad656c40e53b3ce2d80140111 manifest-oxide-rot-1-v1.0.12.toml +74e754e68705cf6fed4152a92bc1ee9667d1d98a21fc12993a2232dbe34bfccb manifest-bootleby-v1.3.0.toml diff --git a/tools/permslip_staging b/tools/permslip_staging index 3764a4569e..a38bff708e 100644 --- a/tools/permslip_staging +++ b/tools/permslip_staging @@ -1,4 +1,5 @@ -844c56d542700c4b613d9cd7aee5ab306c8d0b969e5dfe194b1b7468a6a9752b manifest-gimlet-v1.0.21.toml -b973cc9feb20f7bba447e7f5291c4070387fa9992deab81301f67f0a3844cd0c manifest-oxide-rot-1-v1.0.11.toml -ca14a77639db3b71c60234e4edebd01ff31ba5a93a842100a991dbf3ad6e94fb manifest-psc-v1.0.20.toml -af0f6c7d0723db33a2972343cc42e4c2ee2ab8884c49808c9c3d8289c193f97b manifest-sidecar-v1.0.21.toml +c28eaa13638f55100a42916727227242ee02d18cebecb1412d6af5c8aa945b99 manifest-gimlet-v1.0.22.toml +201ff5580bb4b0b01419d7c5e580af9926103e2b6d3024e6b49cee6fab415519 manifest-oxide-rot-1-v1.0.12.toml +6d53bfbfdd6baa3fc150153a003abfac6d4b46c34f61fa7a8ec2af8af19a7d5a manifest-psc-v1.0.21.toml +d608dba3fa5a1fce3592ff3f643319787218b84706134147e5918f5bd1c0345d manifest-sidecar-v1.0.22.toml +c0fecaefac7674138337f3bd4ce4ce5b884053dead5ec27b575701471631ea2f manifest-bootleby-v1.3.0.toml diff --git a/tools/softnpu_version b/tools/softnpu_version new file mode 100644 index 0000000000..03f74d8865 --- /dev/null +++ b/tools/softnpu_version @@ -0,0 +1,2 @@ +COMMIT="3203c51cf4473d30991b522062ac0df2e045c2f2" +SHA2="36095c7f9d613b9208415aeb67335836a25f72eed2f7a41931ba7d91ddb00568" diff --git a/tufaceous-lib/src/assemble/manifest.rs b/tufaceous-lib/src/assemble/manifest.rs index 1c4a676f4c..e9187ff0af 100644 --- a/tufaceous-lib/src/assemble/manifest.rs +++ b/tufaceous-lib/src/assemble/manifest.rs @@ -279,6 +279,9 @@ impl<'a> FakeDataAttributes<'a> { use hubtools::{CabooseBuilder, HubrisArchiveBuilder}; let board = match self.kind { + KnownArtifactKind::GimletRotBootloader + | KnownArtifactKind::PscRotBootloader + | KnownArtifactKind::SwitchRotBootloader => "SimRotStage0", // non-Hubris artifacts: just make fake data KnownArtifactKind::Host | KnownArtifactKind::Trampoline @@ -287,11 +290,11 @@ impl<'a> FakeDataAttributes<'a> { // hubris artifacts: build a fake archive (SimGimletSp and // SimGimletRot are used by sp-sim) KnownArtifactKind::GimletSp => "SimGimletSp", - KnownArtifactKind::GimletRot => "SimGimletRot", + KnownArtifactKind::GimletRot => "SimRot", KnownArtifactKind::PscSp => "fake-psc-sp", KnownArtifactKind::PscRot => "fake-psc-rot", - KnownArtifactKind::SwitchSp => "fake-sidecar-sp", - KnownArtifactKind::SwitchRot => "fake-sidecar-rot", + KnownArtifactKind::SwitchSp => "SimSidecarSp", + KnownArtifactKind::SwitchRot => "SimRot", }; let caboose = CabooseBuilder::default() diff --git a/tufaceous/manifests/fake.toml b/tufaceous/manifests/fake.toml index cc7ccabd74..a71a5e853f 100644 --- a/tufaceous/manifests/fake.toml +++ b/tufaceous/manifests/fake.toml @@ -68,3 +68,20 @@ version = "1.0.0" kind = "composite-rot" archive_a = { kind = "fake", size = "512KiB" } archive_b = { kind = "fake", size = "512KiB" } + +[[artifact.gimlet_rot_bootloader]] +name = "fake-gimlet-rot-bootloader" +version = "1.0.0" +source = { kind = "fake", size = "1MiB" } + +[[artifact.psc_rot_bootloader]] +name = "fake-psc-rot-bootloader" +version = "1.0.0" +source = { kind = "fake", size = "1MiB" } + +[[artifact.switch_rot_bootloader]] +name = "fake-switch-rot-bootloader" +version = "1.0.0" +source = { kind = "fake", size = "1MiB" } + + diff --git a/update-common/src/artifacts/update_plan.rs b/update-common/src/artifacts/update_plan.rs index c5b171d648..ae5a582be3 100644 --- a/update-common/src/artifacts/update_plan.rs +++ b/update-common/src/artifacts/update_plan.rs @@ -44,12 +44,15 @@ pub struct UpdatePlan { pub gimlet_sp: BTreeMap, pub gimlet_rot_a: Vec, pub gimlet_rot_b: Vec, + pub gimlet_rot_bootloader: Vec, pub psc_sp: BTreeMap, pub psc_rot_a: Vec, pub psc_rot_b: Vec, + pub psc_rot_bootloader: Vec, pub sidecar_sp: BTreeMap, pub sidecar_rot_a: Vec, pub sidecar_rot_b: Vec, + pub sidecar_rot_bootloader: Vec, // Note: The Trampoline image is broken into phase1/phase2 as part of our // update plan (because they go to different destinations), but the two @@ -84,12 +87,15 @@ pub struct UpdatePlanBuilder<'a> { gimlet_sp: BTreeMap, gimlet_rot_a: Vec, gimlet_rot_b: Vec, + gimlet_rot_bootloader: Vec, psc_sp: BTreeMap, psc_rot_a: Vec, psc_rot_b: Vec, + psc_rot_bootloader: Vec, sidecar_sp: BTreeMap, sidecar_rot_a: Vec, sidecar_rot_b: Vec, + sidecar_rot_bootloader: Vec, // We always send phase 1 images (regardless of host or trampoline) to the // SP via MGS, so we retain their data. @@ -130,12 +136,15 @@ impl<'a> UpdatePlanBuilder<'a> { gimlet_sp: BTreeMap::new(), gimlet_rot_a: Vec::new(), gimlet_rot_b: Vec::new(), + gimlet_rot_bootloader: Vec::new(), psc_sp: BTreeMap::new(), psc_rot_a: Vec::new(), psc_rot_b: Vec::new(), + psc_rot_bootloader: Vec::new(), sidecar_sp: BTreeMap::new(), sidecar_rot_a: Vec::new(), sidecar_rot_b: Vec::new(), + sidecar_rot_bootloader: Vec::new(), host_phase_1: None, trampoline_phase_1: None, trampoline_phase_2: None, @@ -187,6 +196,17 @@ impl<'a> UpdatePlanBuilder<'a> { | KnownArtifactKind::SwitchRot => { self.add_rot_artifact(artifact_id, artifact_kind, stream).await } + KnownArtifactKind::GimletRotBootloader + | KnownArtifactKind::PscRotBootloader + | KnownArtifactKind::SwitchRotBootloader => { + self.add_rot_bootloader_artifact( + artifact_id, + artifact_kind, + artifact_hash, + stream, + ) + .await + } KnownArtifactKind::Host => { self.add_host_artifact(artifact_id, stream) } @@ -221,7 +241,10 @@ impl<'a> UpdatePlanBuilder<'a> { | KnownArtifactKind::Trampoline | KnownArtifactKind::ControlPlane | KnownArtifactKind::PscRot - | KnownArtifactKind::SwitchRot => unreachable!(), + | KnownArtifactKind::SwitchRot + | KnownArtifactKind::GimletRotBootloader + | KnownArtifactKind::PscRotBootloader + | KnownArtifactKind::SwitchRotBootloader => unreachable!(), }; let mut stream = std::pin::pin!(stream); @@ -274,6 +297,74 @@ impl<'a> UpdatePlanBuilder<'a> { Ok(()) } + async fn add_rot_bootloader_artifact( + &mut self, + artifact_id: ArtifactId, + artifact_kind: KnownArtifactKind, + artifact_hash: ArtifactHash, + stream: impl Stream> + Send, + ) -> Result<(), RepositoryError> { + // We're only called with an RoT bootloader kind. + let (bootloader, bootloader_kind) = match artifact_kind { + KnownArtifactKind::GimletRotBootloader => ( + &mut self.gimlet_rot_bootloader, + ArtifactKind::GIMLET_ROT_STAGE0, + ), + KnownArtifactKind::PscRotBootloader => { + (&mut self.psc_rot_bootloader, ArtifactKind::PSC_ROT_STAGE0) + } + KnownArtifactKind::SwitchRotBootloader => ( + &mut self.sidecar_rot_bootloader, + ArtifactKind::SWITCH_ROT_STAGE0, + ), + KnownArtifactKind::GimletRot + | KnownArtifactKind::Host + | KnownArtifactKind::Trampoline + | KnownArtifactKind::ControlPlane + | KnownArtifactKind::PscRot + | KnownArtifactKind::SwitchRot + | KnownArtifactKind::GimletSp + | KnownArtifactKind::PscSp + | KnownArtifactKind::SwitchSp => unreachable!(), + }; + + let mut stream = std::pin::pin!(stream); + + // RoT images are small, and hubtools wants a `&[u8]` to parse, so we'll + // read the whole thing into memory. + let mut data = Vec::new(); + while let Some(res) = stream.next().await { + let chunk = res.map_err(|error| RepositoryError::ReadArtifact { + kind: artifact_kind.into(), + error: Box::new(error), + })?; + data.extend_from_slice(&chunk); + } + + let artifact_hash_id = + ArtifactHashId { kind: artifact_kind.into(), hash: artifact_hash }; + let data = self + .extracted_artifacts + .store( + artifact_hash_id, + futures::stream::iter([Ok(Bytes::from(data))]), + ) + .await?; + bootloader.push(ArtifactIdData { + id: artifact_id.clone(), + data: data.clone(), + }); + + self.record_extracted_artifact( + artifact_id, + data, + bootloader_kind, + self.log, + )?; + + Ok(()) + } + async fn add_rot_artifact( &mut self, artifact_id: ArtifactId, @@ -305,7 +396,10 @@ impl<'a> UpdatePlanBuilder<'a> { | KnownArtifactKind::Trampoline | KnownArtifactKind::ControlPlane | KnownArtifactKind::PscSp - | KnownArtifactKind::SwitchSp => unreachable!(), + | KnownArtifactKind::SwitchSp + | KnownArtifactKind::GimletRotBootloader + | KnownArtifactKind::SwitchRotBootloader + | KnownArtifactKind::PscRotBootloader => unreachable!(), }; let (rot_a_data, rot_b_data) = Self::extract_nested_artifact_pair( @@ -694,6 +788,18 @@ impl<'a> UpdatePlanBuilder<'a> { KnownArtifactKind::SwitchRot, self.sidecar_rot_a.is_empty() || self.sidecar_rot_b.is_empty(), ), + ( + KnownArtifactKind::GimletRotBootloader, + self.gimlet_rot_bootloader.is_empty(), + ), + ( + KnownArtifactKind::PscRotBootloader, + self.psc_rot_bootloader.is_empty(), + ), + ( + KnownArtifactKind::SwitchRotBootloader, + self.sidecar_rot_bootloader.is_empty(), + ), ] { if no_artifacts { return Err(RepositoryError::MissingArtifactKind(kind)); @@ -732,6 +838,37 @@ impl<'a> UpdatePlanBuilder<'a> { } } + // Same check for the RoT bootloader. We are explicitly treating the + // bootloader as distinct from the main A/B images here. + for (kind, mut single_board_rot_artifacts) in [ + ( + KnownArtifactKind::GimletRotBootloader, + self.gimlet_rot_bootloader.iter(), + ), + ( + KnownArtifactKind::PscRotBootloader, + self.psc_rot_bootloader.iter(), + ), + ( + KnownArtifactKind::SwitchRotBootloader, + self.sidecar_rot_bootloader.iter(), + ), + ] { + // We know each of these iterators has at least 1 element (checked + // above) so we can safely unwrap the first. + let version = + &single_board_rot_artifacts.next().unwrap().id.version; + for artifact in single_board_rot_artifacts { + if artifact.id.version != *version { + return Err(RepositoryError::MultipleVersionsPresent { + kind, + v1: version.clone(), + v2: artifact.id.version.clone(), + }); + } + } + } + // Repeat the same version check for all SP images. (This is a separate // loop because the types of the iterators don't match.) for (kind, mut single_board_sp_artifacts) in [ @@ -758,12 +895,15 @@ impl<'a> UpdatePlanBuilder<'a> { gimlet_sp: self.gimlet_sp, // checked above gimlet_rot_a: self.gimlet_rot_a, // checked above gimlet_rot_b: self.gimlet_rot_b, // checked above + gimlet_rot_bootloader: self.gimlet_rot_bootloader, // checked above psc_sp: self.psc_sp, // checked above psc_rot_a: self.psc_rot_a, // checked above psc_rot_b: self.psc_rot_b, // checked above + psc_rot_bootloader: self.psc_rot_bootloader, // checked above sidecar_sp: self.sidecar_sp, // checked above sidecar_rot_a: self.sidecar_rot_a, // checked above sidecar_rot_b: self.sidecar_rot_b, // checked above + sidecar_rot_bootloader: self.sidecar_rot_bootloader, // checked above host_phase_1: self.host_phase_1.ok_or( RepositoryError::MissingArtifactKind(KnownArtifactKind::Host), )?, @@ -941,6 +1081,22 @@ mod tests { builder.build_to_vec().unwrap() } + fn make_fake_rot_bootloader_image(board: &str, sign: &str) -> Vec { + use hubtools::{CabooseBuilder, HubrisArchiveBuilder}; + + let caboose = CabooseBuilder::default() + .git_commit("this-is-fake-data") + .board(board) + .version("0.0.0") + .name(board) + .sign(sign) + .build(); + + let mut builder = HubrisArchiveBuilder::with_fake_image(); + builder.write_caboose(caboose.as_slice()).unwrap(); + builder.build_to_vec().unwrap() + } + // See documentation for extract_nested_artifact_pair for why multi_thread // is required. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] @@ -1077,6 +1233,40 @@ mod tests { .unwrap(); } + let gimlet_rot_bootloader = + make_fake_rot_bootloader_image("test-gimlet-a", "test-gimlet-a"); + let psc_rot_bootloader = + make_fake_rot_bootloader_image("test-psc-a", "test-psc-a"); + let switch_rot_bootloader = + make_fake_rot_bootloader_image("test-sidecar-a", "test-sidecar-a"); + + for (kind, artifact) in [ + ( + KnownArtifactKind::GimletRotBootloader, + gimlet_rot_bootloader.clone(), + ), + (KnownArtifactKind::PscRotBootloader, psc_rot_bootloader.clone()), + ( + KnownArtifactKind::SwitchRotBootloader, + switch_rot_bootloader.clone(), + ), + ] { + let hash = ArtifactHash(Sha256::digest(&artifact).into()); + let id = ArtifactId { + name: format!("{kind:?}"), + version: VERSION_0, + kind: kind.into(), + }; + plan_builder + .add_artifact( + id, + hash, + futures::stream::iter([Ok(Bytes::from(artifact))]), + ) + .await + .unwrap(); + } + let UpdatePlanBuildOutput { plan, by_id, .. } = plan_builder.build().unwrap(); @@ -1142,7 +1332,10 @@ mod tests { | KnownArtifactKind::Trampoline | KnownArtifactKind::GimletRot | KnownArtifactKind::PscRot - | KnownArtifactKind::SwitchRot => {} + | KnownArtifactKind::SwitchRot + | KnownArtifactKind::SwitchRotBootloader + | KnownArtifactKind::GimletRotBootloader + | KnownArtifactKind::PscRotBootloader => {} } } @@ -1186,6 +1379,19 @@ mod tests { sidecar_rot.archive_b ); + assert_eq!( + read_to_vec(&plan.gimlet_rot_bootloader[0].data).await, + gimlet_rot_bootloader + ); + assert_eq!( + read_to_vec(&plan.sidecar_rot_bootloader[0].data).await, + switch_rot_bootloader + ); + assert_eq!( + read_to_vec(&plan.psc_rot_bootloader[0].data).await, + psc_rot_bootloader + ); + logctx.cleanup_successful(); } diff --git a/wicket-common/src/update_events.rs b/wicket-common/src/update_events.rs index fe92887646..630ad2d905 100644 --- a/wicket-common/src/update_events.rs +++ b/wicket-common/src/update_events.rs @@ -32,6 +32,7 @@ pub enum WicketdEngineSpec {} )] #[serde(tag = "component", rename_all = "snake_case")] pub enum UpdateComponent { + RotBootloader, Rot, Sp, Host, @@ -42,6 +43,7 @@ pub enum UpdateComponent { pub enum UpdateStepId { TestStep, SetHostPowerState { state: PowerState }, + InterrogateRotBootloader, InterrogateRot, InterrogateSp, SpComponentUpdate, @@ -257,6 +259,21 @@ pub enum SpComponentUpdateTerminalError { }, #[error("RoT booted into unexpected slot {active_slot}")] RotUnexpectedActiveSlot { active_slot: u16 }, + #[error("Getting RoT boot info failed")] + GetRotBootInfoFailed { + #[source] + error: anyhow::Error, + }, + #[error("Unexpected error returned from RoT bootloader update")] + RotBootloaderError { + #[source] + error: anyhow::Error, + }, + #[error("setting currently-active RoT bootloader slot failed")] + SetRotBootloaderActiveSlotFailed { + #[source] + error: anyhow::Error, + }, } impl update_engine::AsError for SpComponentUpdateTerminalError { diff --git a/wicket/README.md b/wicket/README.md index 0a24acbe8e..fc1c93fe83 100644 --- a/wicket/README.md +++ b/wicket/README.md @@ -148,7 +148,18 @@ it on an as-needed basis. ### Using a real SP -TODO +The easiest way is to change the mgs config to point to a running SP instead +of a simulated SP + +``` +[[switch.port]] +kind = "simulated" +fake-interface = "fake-sled1" +# Your SP address here +addr = "[fe80::c1d:93ff:fe20:ffe0%2]:11111" +ignition-target = 3 +location = { switch0 = ["sled", 1], switch1 = ["sled", 1] } +``` ### Running wicketd diff --git a/wicket/src/cli/rack_update.rs b/wicket/src/cli/rack_update.rs index ccacea0e38..44a2076b22 100644 --- a/wicket/src/cli/rack_update.rs +++ b/wicket/src/cli/rack_update.rs @@ -98,6 +98,10 @@ pub(crate) struct StartRackUpdateArgs { #[clap(flatten)] component_ids: ComponentIdSelector, + /// Force update the RoT Bootloader even if the version is the same. + #[clap(long, help_heading = "Update options")] + force_update_rot_bootloader: bool, + /// Force update the RoT even if the version is the same. #[clap(long, help_heading = "Update options")] force_update_rot: bool, @@ -125,6 +129,7 @@ impl StartRackUpdateArgs { let update_ids = self.component_ids.to_component_ids()?; let options = CreateStartUpdateOptions { + force_update_rot_bootloader: self.force_update_rot_bootloader, force_update_rot: self.force_update_rot, force_update_sp: self.force_update_sp, } diff --git a/wicket/src/runner.rs b/wicket/src/runner.rs index e83d321459..77fbb82df8 100644 --- a/wicket/src/runner.rs +++ b/wicket/src/runner.rs @@ -176,6 +176,10 @@ impl RunnerCore { Action::StartUpdate(component_id) => { if let Some(wicketd) = wicketd { let options = CreateStartUpdateOptions { + force_update_rot_bootloader: self + .state + .force_update_state + .force_update_rot_bootloader, force_update_rot: self .state .force_update_state diff --git a/wicket/src/state/force_update.rs b/wicket/src/state/force_update.rs index 72533f1378..feafac88a7 100644 --- a/wicket/src/state/force_update.rs +++ b/wicket/src/state/force_update.rs @@ -7,6 +7,7 @@ use wicket_common::update_events::UpdateComponent; #[derive(Debug, Clone, Copy, Serialize, Deserialize)] pub struct ForceUpdateState { + pub force_update_rot_bootloader: bool, pub force_update_rot: bool, pub force_update_sp: bool, selected_component: UpdateComponent, @@ -15,9 +16,10 @@ pub struct ForceUpdateState { impl Default for ForceUpdateState { fn default() -> Self { Self { + force_update_rot_bootloader: false, force_update_rot: false, force_update_sp: false, - selected_component: UpdateComponent::Rot, + selected_component: UpdateComponent::RotBootloader, } } } @@ -28,20 +30,29 @@ impl ForceUpdateState { } pub fn next_component(&mut self) { - if self.selected_component == UpdateComponent::Rot { - self.selected_component = UpdateComponent::Sp; - } else { - self.selected_component = UpdateComponent::Rot; - } + self.selected_component = match self.selected_component { + UpdateComponent::RotBootloader => UpdateComponent::Rot, + UpdateComponent::Rot => UpdateComponent::Sp, + UpdateComponent::Sp => UpdateComponent::RotBootloader, + _ => unreachable!(), + }; } pub fn prev_component(&mut self) { - // We only have 2 components; next/prev are both toggles. - self.next_component(); + self.selected_component = match self.selected_component { + UpdateComponent::RotBootloader => UpdateComponent::Sp, + UpdateComponent::Rot => UpdateComponent::RotBootloader, + UpdateComponent::Sp => UpdateComponent::Rot, + _ => unreachable!(), + }; } pub fn toggle(&mut self, component: UpdateComponent) { match component { + UpdateComponent::RotBootloader => { + self.force_update_rot_bootloader = + !self.force_update_rot_bootloader + } UpdateComponent::Rot => { self.force_update_rot = !self.force_update_rot; } diff --git a/wicket/src/state/inventory.rs b/wicket/src/state/inventory.rs index 5cfe536dfb..0ab187cc48 100644 --- a/wicket/src/state/inventory.rs +++ b/wicket/src/state/inventory.rs @@ -171,6 +171,21 @@ impl Component { self.sp().rot.as_ref().and_then(|rot| rot.caboose_b.as_ref()), ) } + + pub fn stage0_version(&self) -> String { + version_or_unknown( + self.sp().rot.as_ref().and_then(|rot| rot.caboose_stage0.as_ref()), + ) + } + + pub fn stage0next_version(&self) -> String { + version_or_unknown( + self.sp() + .rot + .as_ref() + .and_then(|rot| rot.caboose_stage0next.as_ref()), + ) + } } /// The component type and its slot. @@ -256,6 +271,14 @@ impl ComponentId { } } + pub fn rot_bootloader_known_artifact_kind(&self) -> KnownArtifactKind { + match self { + ComponentId::Sled(_) => KnownArtifactKind::GimletRotBootloader, + ComponentId::Switch(_) => KnownArtifactKind::SwitchRotBootloader, + ComponentId::Psc(_) => KnownArtifactKind::PscRotBootloader, + } + } + pub fn to_string_uppercase(&self) -> String { let mut s = self.to_string(); s.make_ascii_uppercase(); diff --git a/wicket/src/state/update.rs b/wicket/src/state/update.rs index 77bbdd83d2..31876365e2 100644 --- a/wicket/src/state/update.rs +++ b/wicket/src/state/update.rs @@ -46,6 +46,7 @@ impl RackUpdateState { *id, vec![ UpdateComponent::Rot, + UpdateComponent::RotBootloader, UpdateComponent::Sp, UpdateComponent::Host, ], @@ -55,14 +56,22 @@ impl RackUpdateState { *id, UpdateItem::new( *id, - vec![UpdateComponent::Rot, UpdateComponent::Sp], + vec![ + UpdateComponent::Rot, + UpdateComponent::RotBootloader, + UpdateComponent::Sp, + ], ), ), ComponentId::Psc(_) => ( *id, UpdateItem::new( *id, - vec![UpdateComponent::Rot, UpdateComponent::Sp], + vec![ + UpdateComponent::Rot, + UpdateComponent::RotBootloader, + UpdateComponent::Sp, + ], ), ), }) @@ -429,6 +438,7 @@ fn update_component_state( #[allow(unused)] pub fn update_component_title(component: UpdateComponent) -> &'static str { match component { + UpdateComponent::RotBootloader => "ROT_BOOTLOADER", UpdateComponent::Rot => "ROT", UpdateComponent::Sp => "SP", UpdateComponent::Host => "HOST", @@ -436,6 +446,7 @@ pub fn update_component_title(component: UpdateComponent) -> &'static str { } pub struct CreateStartUpdateOptions { + pub(crate) force_update_rot_bootloader: bool, pub(crate) force_update_rot: bool, pub(crate) force_update_sp: bool, } @@ -454,7 +465,9 @@ impl CreateStartUpdateOptions { as a u64", ) }); - + let test_simulate_rot_bootloader_result = get_update_simulated_result( + "WICKET_UPDATE_TEST_SIMULATE_ROT_BOOTLOADER_RESULT", + )?; let test_simulate_rot_result = get_update_simulated_result( "WICKET_UPDATE_TEST_SIMULATE_ROT_RESULT", )?; @@ -465,8 +478,10 @@ impl CreateStartUpdateOptions { Ok(StartUpdateOptions { test_error, test_step_seconds, + test_simulate_rot_bootloader_result, test_simulate_rot_result, test_simulate_sp_result, + skip_rot_bootloader_version_check: self.force_update_rot_bootloader, skip_rot_version_check: self.force_update_rot, skip_sp_version_check: self.force_update_sp, }) diff --git a/wicket/src/ui/panes/overview.rs b/wicket/src/ui/panes/overview.rs index 7d60c41772..45d02311aa 100644 --- a/wicket/src/ui/panes/overview.rs +++ b/wicket/src/ui/panes/overview.rs @@ -770,12 +770,12 @@ fn inventory_description(component: &Component) -> Text { .into(), ); } - if let Some(_) = slot_a_error { + if let Some(e) = slot_a_error { spans.push( vec![ nest_bullet(), Span::styled("Image status: ", label_style), - Span::styled("Error: ", bad_style), + Span::styled(format!("Error: {e:?}"), bad_style), ] .into(), ); @@ -813,12 +813,12 @@ fn inventory_description(component: &Component) -> Text { .into(), ); } - if let Some(_) = slot_b_error { + if let Some(e) = slot_b_error { spans.push( vec![ nest_bullet(), Span::styled("Image status: ", label_style), - Span::styled("Error: ", bad_style), + Span::styled(format!("Error: {e:?}"), bad_style), ] .into(), ); @@ -857,12 +857,12 @@ fn inventory_description(component: &Component) -> Text { .into(), ); } - if let Some(_) = stage0_error { + if let Some(e) = stage0_error { spans.push( vec![ nest_bullet(), Span::styled("Image status: ", label_style), - Span::styled("Error: ", bad_style), + Span::styled(format!("Error: {e:?}"), bad_style), ] .into(), ); @@ -902,12 +902,12 @@ fn inventory_description(component: &Component) -> Text { .into(), ); } - if let Some(_) = stage0next_error { + if let Some(e) = stage0next_error { spans.push( vec![ nest_bullet(), Span::styled("Image status: ", label_style), - Span::styled("Error: ", bad_style), + Span::styled(format!("Error: {e:?}"), bad_style), ] .into(), ); diff --git a/wicket/src/ui/panes/update.rs b/wicket/src/ui/panes/update.rs index 664c647eac..6269228bc2 100644 --- a/wicket/src/ui/panes/update.rs +++ b/wicket/src/ui/panes/update.rs @@ -150,7 +150,7 @@ pub struct UpdatePane { /// TODO: Move following state into global `State` so that recorder snapshots /// capture all state. /// - /// TODO: The generic parameter is carried over from earlier versions + /// TODO: The usize generic parameter is carried over from earlier versions /// of tui-tree-widget, but there's likely a better index type. tree_state: TreeState, items: Vec>, @@ -1711,6 +1711,7 @@ struct ComponentForceUpdateSelectionState { } struct ForceUpdateSelectionState { + rot_bootloader: Option, rot: Option, sp: Option, } @@ -1722,6 +1723,7 @@ impl From<&'_ State> for ForceUpdateSelectionState { let inventory = &state.inventory; let update_item = &state.update_state.items[&component_id]; + let mut rot_bootloader = None; let mut rot = None; let mut sp = None; @@ -1737,6 +1739,22 @@ impl From<&'_ State> for ForceUpdateSelectionState { let installed_version = active_installed_version(&component_id, component, inventory); match component { + UpdateComponent::RotBootloader => { + assert!( + rot_bootloader.is_none(), + "update item contains multiple RoT bootloader entries" + ); + if artifact_version == installed_version { + rot_bootloader = + Some(ComponentForceUpdateSelectionState { + version: artifact_version, + toggled_on: state + .force_update_state + .force_update_rot_bootloader, + selected: false, // set below + }); + } + } UpdateComponent::Rot => { assert!( rot.is_none(), @@ -1773,28 +1791,63 @@ impl From<&'_ State> for ForceUpdateSelectionState { // If we only have one force-updateable component, mark it as selected; // otherwise, respect the option currently selected in `State`. - match (rot.as_mut(), sp.as_mut()) { - (Some(rot), None) => rot.selected = true, - (None, Some(sp)) => sp.selected = true, - (Some(rot), Some(sp)) => { + match (rot_bootloader.as_mut(), rot.as_mut(), sp.as_mut()) { + (Some(rot_bootloader), None, None) => { + rot_bootloader.selected = true + } + (None, Some(rot), None) => rot.selected = true, + (None, None, Some(sp)) => sp.selected = true, + // Two selected + (Some(rot_bootloader), Some(rot), None) => { + if state.force_update_state.selected_component() + == UpdateComponent::RotBootloader + { + rot_bootloader.selected = true + } else { + rot.selected = true + } + } + (None, Some(rot), Some(sp)) => { if state.force_update_state.selected_component() == UpdateComponent::Rot { - rot.selected = true; + rot.selected = true } else { - sp.selected = true; + sp.selected = true } } - (None, None) => (), + (Some(rot_bootloader), None, Some(sp)) => { + if state.force_update_state.selected_component() + == UpdateComponent::RotBootloader + { + rot_bootloader.selected = true + } else { + sp.selected = true + } + } + // All three + (Some(rot_bootloader), Some(rot), Some(sp)) => { + match state.force_update_state.selected_component() { + UpdateComponent::Rot => rot.selected = true, + UpdateComponent::Sp => sp.selected = true, + UpdateComponent::RotBootloader => { + rot_bootloader.selected = true + } + _ => (), + } + } + (None, None, None) => (), } - Self { rot, sp } + Self { rot_bootloader, rot, sp } } } impl ForceUpdateSelectionState { fn num_spans(&self) -> usize { - usize::from(self.rot.is_some()) + usize::from(self.sp.is_some()) + usize::from(self.rot.is_some()) + + usize::from(self.sp.is_some()) + + usize::from(self.rot_bootloader.is_some()) } fn next_component(&self, state: &mut State) { @@ -1826,6 +1879,13 @@ impl ForceUpdateSelectionState { state.force_update_state.toggle(UpdateComponent::Rot); } else if self.sp.as_ref().map(|sp| sp.selected).unwrap_or(false) { state.force_update_state.toggle(UpdateComponent::Sp); + } else if self + .rot_bootloader + .as_ref() + .map(|rot_bootloader| rot_bootloader.selected) + .unwrap_or(false) + { + state.force_update_state.toggle(UpdateComponent::RotBootloader); } } @@ -1850,6 +1910,9 @@ impl ForceUpdateSelectionState { } let mut spans = Vec::new(); + if let Some(rot_bootloader) = self.rot_bootloader.as_ref() { + spans.push(make_spans("RoT Bootloader", rot_bootloader)); + } if let Some(rot) = self.rot.as_ref() { spans.push(make_spans("RoT", rot)); } @@ -2201,6 +2264,10 @@ fn active_installed_version( ) -> String { let component = inventory.get_inventory(id); match update_component { + UpdateComponent::RotBootloader => component.map_or_else( + || "UNKNOWN".to_string(), + |component| component.stage0_version(), + ), UpdateComponent::Sp => component.map_or_else( || "UNKNOWN".to_string(), |component| component.sp_version_active(), @@ -2254,6 +2321,26 @@ fn all_installed_versions( ] }, ), + UpdateComponent::RotBootloader => component.map_or_else( + || { + vec![InstalledVersion { + title: base_title.into(), + version: "UNKNOWN".into(), + }] + }, + |component| { + vec![ + InstalledVersion { + title: base_title.into(), + version: component.stage0_version().into(), + }, + InstalledVersion { + title: format!("{base_title}_NEXT").into(), + version: component.stage0next_version().into(), + }, + ] + }, + ), UpdateComponent::Rot => component.map_or_else( || { vec![InstalledVersion { @@ -2301,6 +2388,9 @@ fn artifact_version( versions: &BTreeMap, ) -> String { let artifact = match (id, component) { + (ComponentId::Sled(_), UpdateComponent::RotBootloader) => { + KnownArtifactKind::GimletRotBootloader + } (ComponentId::Sled(_), UpdateComponent::Rot) => { KnownArtifactKind::GimletRot } @@ -2310,12 +2400,18 @@ fn artifact_version( (ComponentId::Sled(_), UpdateComponent::Host) => { KnownArtifactKind::Host } + (ComponentId::Switch(_), UpdateComponent::RotBootloader) => { + KnownArtifactKind::SwitchRotBootloader + } (ComponentId::Switch(_), UpdateComponent::Rot) => { KnownArtifactKind::SwitchRot } (ComponentId::Switch(_), UpdateComponent::Sp) => { KnownArtifactKind::SwitchSp } + (ComponentId::Psc(_), UpdateComponent::RotBootloader) => { + KnownArtifactKind::PscRotBootloader + } (ComponentId::Psc(_), UpdateComponent::Rot) => { KnownArtifactKind::PscRot } @@ -2363,7 +2459,7 @@ impl Control for UpdatePane { [ Constraint::Length(3), Constraint::Length(3), - Constraint::Length(6), + Constraint::Length(8), Constraint::Min(0), Constraint::Length(3), ] diff --git a/wicketd/src/http_entrypoints.rs b/wicketd/src/http_entrypoints.rs index 999428ff06..001974e085 100644 --- a/wicketd/src/http_entrypoints.rs +++ b/wicketd/src/http_entrypoints.rs @@ -697,6 +697,12 @@ pub(crate) struct StartUpdateOptions { /// This is used for testing. pub(crate) test_step_seconds: Option, + /// If passed in, simulates a result for the RoT Bootloader update. + /// + /// This is used for testing. + pub(crate) test_simulate_rot_bootloader_result: + Option, + /// If passed in, simulates a result for the RoT update. /// /// This is used for testing. @@ -709,7 +715,10 @@ pub(crate) struct StartUpdateOptions { /// If true, skip the check on the current RoT version and always update it /// regardless of whether the update appears to be neeeded. - #[allow(dead_code)] // TODO actually use this + pub(crate) skip_rot_bootloader_version_check: bool, + + /// If true, skip the check on the current RoT version and always update it + /// regardless of whether the update appears to be neeeded. pub(crate) skip_rot_version_check: bool, /// If true, skip the check on the current SP version and always update it diff --git a/wicketd/src/update_tracker.rs b/wicketd/src/update_tracker.rs index 10253bc2f7..6de7090ce4 100644 --- a/wicketd/src/update_tracker.rs +++ b/wicketd/src/update_tracker.rs @@ -23,12 +23,15 @@ use display_error_chain::DisplayErrorChain; use dropshot::HttpError; use futures::Stream; use futures::TryFutureExt; +use gateway_client::types::GetRotBootInfoParams; use gateway_client::types::HostPhase2Progress; use gateway_client::types::HostPhase2RecoveryImageId; use gateway_client::types::HostStartupOptions; use gateway_client::types::InstallinatorImageId; use gateway_client::types::PowerState; use gateway_client::types::RotCfpaSlot; +use gateway_client::types::RotImageError; +use gateway_client::types::RotState; use gateway_client::types::SpComponentFirmwareSlot; use gateway_client::types::SpIdentifier; use gateway_client::types::SpType; @@ -862,19 +865,45 @@ impl UpdateDriver { define_test_steps(&engine, secs); } - let (rot_a, rot_b, sp_artifacts) = match update_cx.sp.type_ { - SpType::Sled => { - (&plan.gimlet_rot_a, &plan.gimlet_rot_b, &plan.gimlet_sp) - } - SpType::Power => (&plan.psc_rot_a, &plan.psc_rot_b, &plan.psc_sp), - SpType::Switch => { - (&plan.sidecar_rot_a, &plan.sidecar_rot_b, &plan.sidecar_sp) - } - }; + let (rot_a, rot_b, sp_artifacts, rot_bootloader) = + match update_cx.sp.type_ { + SpType::Sled => ( + &plan.gimlet_rot_a, + &plan.gimlet_rot_b, + &plan.gimlet_sp, + &plan.gimlet_rot_bootloader, + ), + SpType::Power => ( + &plan.psc_rot_a, + &plan.psc_rot_b, + &plan.psc_sp, + &plan.psc_rot_bootloader, + ), + SpType::Switch => ( + &plan.sidecar_rot_a, + &plan.sidecar_rot_b, + &plan.sidecar_sp, + &plan.sidecar_rot_bootloader, + ), + }; + let rot_bootloader_registrar = + engine.for_component(UpdateComponent::RotBootloader); let rot_registrar = engine.for_component(UpdateComponent::Rot); let sp_registrar = engine.for_component(UpdateComponent::Sp); + // There are some extra checks and verifications needed to + // before we can update the RoT bootloader + let rot_bootloader_interrogation = rot_bootloader_registrar + .new_step( + UpdateStepId::InterrogateRot, + "Checking current RoT bootloader version", + move |_cx| async move { + update_cx.interrogate_rot_bootloader(rot_bootloader).await + }, + ) + .register(); + // To update the RoT, we have to know which slot (A or B) it is // currently executing; we must update the _other_ slot. We also want to // know its current version (so we can skip updating if we only need to @@ -946,6 +975,94 @@ impl UpdateDriver { }, ) .register(); + + // Send the bootloader update to the RoT. + let inner_cx = SpComponentUpdateContext::new( + update_cx, + UpdateComponent::RotBootloader, + ); + rot_bootloader_registrar + .new_step( + UpdateStepId::SpComponentUpdate, + "Updating RoT bootloader", + move |cx| async move { + if let Some(result) = opts.test_simulate_rot_bootloader_result { + return simulate_result(result); + } + + let rot_bootloader_interrogation = + match rot_bootloader_interrogation.into_value(cx.token()).await { + Some(v) => v, + None => return StepSkipped::new( + (), + "Skipping bootloader update, check interrogation step", + ).into(), + }; + + let bootloader_has_this_version = rot_bootloader_interrogation + .active_version_matches_artifact_to_apply(); + + let sp_can_update = rot_bootloader_interrogation.sp_can_update_bootloader(&update_cx.mgs_client).await; + + if !sp_can_update { + return StepSkipped::new( + (), + "SP version needs to be upgraded before RoT bootloader can be updated", + ) + .into(); + + } + + // If this RoT already has this version, skip the rest of + // this step, UNLESS we've been told to skip this version + // check. + if bootloader_has_this_version && !opts.skip_rot_bootloader_version_check { + return StepSkipped::new( + (), + format!( + "RoT bootloader already at version {}", + rot_bootloader_interrogation.available_artifacts_version, + ), + ) + .into(); + } + + let artifact_to_apply = rot_bootloader_interrogation + .choose_artifact_to_apply( + &update_cx.mgs_client, + &update_cx.log, + ) + .await?; + + cx.with_nested_engine(|engine| { + inner_cx.register_steps( + engine, + rot_bootloader_interrogation.slot_to_update, + artifact_to_apply, + ); + Ok(()) + }) + .await?; + + // If we updated despite the RoT already having the version + // we updated to, make this step return a warning with that + // message; otherwise, this is a normal success. + if bootloader_has_this_version { + StepWarning::new( + (), + format!( + "RoT bootloader updated despite already having version {}", + rot_bootloader_interrogation.available_artifacts_version + ), + ) + .into() + } else { + StepSuccess::new(()).into() + } + }, + ) + .register(); + // Send the update to the RoT. let inner_cx = SpComponentUpdateContext::new(update_cx, UpdateComponent::Rot); @@ -1588,6 +1705,43 @@ struct RotInterrogation { } impl RotInterrogation { + async fn sp_can_update_bootloader( + &self, + client: &gateway_client::Client, + ) -> bool { + let sp_caboose = client + .sp_component_caboose_get( + self.sp.type_, + self.sp.slot, + SpComponent::SP_ITSELF.const_as_str(), + 0, + ) + .await + .ok() + .map(|v| v.into_inner()); + + // Older versions of the SP have a bug that prevents setting + // the active slot for the RoT bootloader. Check for these + // and skip the update until the SP gets updated + const MIN_GIMLET_VERSION: SemverVersion = SemverVersion::new(1, 0, 21); + const MIN_SWITCH_VERSION: SemverVersion = SemverVersion::new(1, 0, 21); + const MIN_PSC_VERSION: SemverVersion = SemverVersion::new(1, 0, 20); + + match sp_caboose { + // If we can't get the SP caboose for whatever reason don't risk + // trying an update + None => false, + Some(caboose) => match caboose.version.parse::() { + Ok(vers) => match self.sp.type_ { + SpType::Sled => vers >= MIN_GIMLET_VERSION, + SpType::Switch => vers >= MIN_SWITCH_VERSION, + SpType::Power => vers >= MIN_PSC_VERSION, + }, + Err(_) => false, + }, + } + } + fn active_version_matches_artifact_to_apply(&self) -> bool { Some(&self.available_artifacts_version) == self.active_version.as_ref() } @@ -1599,6 +1753,9 @@ impl RotInterrogation { /// their CMPA/CFPA pages, if we fail to fetch them _and_ /// `available_artifacts` has exactly one item, we will return that one /// item. + /// + /// This is also applicable to the RoT bootloader which follows the + /// same vaildation method async fn choose_artifact_to_apply( &self, client: &gateway_client::Client, @@ -1844,6 +2001,151 @@ impl UpdateContext { }) } + async fn interrogate_rot_bootloader( + &self, + rot_bootloader: &[ArtifactIdData], + ) -> Result>, UpdateTerminalError> { + // We have a known set of bootloader FWID that don't have cabooses. + static KNOWN_MISSING_CABOOSE: [&str; 18] = [ + "1122095f4a3797db8a7d6279ae889ddde0316631f1f3bc204bdc39c2d75707af", + "1525832a663024f6421c13c0f7c7d9e9b32ebf433898565a2ad8112e7d237ead", + "29fc0d31e1739865c7f3d4bb5f5b86779db92a65a2decbd59e42f6e95dd84698", + "37aa40d0ea12e1290477a84014cd03dbc6fa9817223d1546a10847510d75c383", + "53cb91f4a3fbb69efa733a9eb326bd9f71c849782b0eea4306ebc66620158d44", + "60effb7fd6c4780138887e0d65c9e9b9c8447ce4ea3ea71e08194aec2847b185", + "77b8fc4308221dfe123d93431c21b57fa896db65c015ca82e22a337c7aa7cd77", + "77c2b94e3a83fc6b3c8924d38b0d23ac7c1e7a15defa910ee3f850b41af9ca4c", + "8c58b2272fe2da219ab0757ff27398b8d4a459eb4e75c32c782f98d684269352", + "9dd79a4e7609bd4af8e39a03f77b997b35f5050409a2ecd19de1e7d16184b1f3", + "b123a0f683f4e7b60238840139c9f3dbfe2b2c61597d9cdd4e92c718f7f98bb7", + "ba08df44e7282a1daeae2d9346b99ca741bfc2649c12aa8292f413a1c84d80b7", + "bfa9adfc127886aeaa1ac58d30c07c76e89592c29fc83dfa88062e7f3a48335e", + "c23a53858e94932a95945f28730e41ae4a2d1a8db4776283245eda143b6b2994", + "e7ec5dae7ac462cc7f7561a91ef244a2ece0894ff212995fcccb1e86438cb665", + "ee688a237a480e9fd111a7f70cc4c6f9ac837dcac65a01e7cfa29f7c28545d07", + "f31442015da37523a13ffaa173b4dfe0b069c6d890cf1c9748a898001fe4110e", + "fa73f26fb73b27b5db8f425320e206df5ebf3e137475d40be76b540ea8bd2af9", + ]; + + // We already validated at repo-upload time there is at least one RoT + // artifact available and that all available RoT artifacts are the same + // version, so we can unwrap the first artifact here and assume its + // version matches any subsequent artifacts. + // TODO this needs to be fixed for multi version to work! + let available_artifacts_version = rot_bootloader + .get(0) + .expect("no RoT artifacts available") + .id + .version + .clone(); + + let stage0_fwid = match self + .mgs_client + .sp_rot_boot_info( + self.sp.type_, + self.sp.slot, + SpComponent::ROT.const_as_str(), + &GetRotBootInfoParams { + version: + gateway_messages::RotBootInfo::HIGHEST_KNOWN_VERSION, + }, + ) + .await + { + Ok(v) => match v.into_inner() { + // the minimum we will ever return is 3 + RotState::V2 { .. } => unreachable!(), + RotState::V3 { stage0_fwid, .. } => stage0_fwid, + // ugh + RotState::CommunicationFailed { message } => { + return StepWarning::new( + None, + format!( + "Failed to communicate with the RoT: {message}. Will not proceed with update." + ), + ) + .into(); + } + }, + // If we can't run `rot_boot_info` there's a chance we can't do + // antything else with stage0 either + Err(e) => return StepWarning::new( + None, + format!("Failed to run `rot_boot_info`: {e:?}. Will not proceed with update."), + ) + .into(), + }; + + // Read the caboose of the currently running version (always 0) + // When updating from older stage0 we may not have a caboose so an error here + // need not be fatal + // TODO make this fatal at some point + let caboose = self + .mgs_client + .sp_component_caboose_get( + self.sp.type_, + self.sp.slot, + SpComponent::STAGE0.const_as_str(), + 0, + ) + .await + .map(|v| v.into_inner()) + .ok(); + + let available_artifacts = rot_bootloader.to_vec(); + let make_result = |active_version| { + Some(RotInterrogation { + // We always update slot 1 + slot_to_update: 1, + available_artifacts, + available_artifacts_version, + sp: self.sp, + active_version, + }) + }; + + match caboose { + Some(c) => { + let message = format!( + "RoT bootloader version {} (git commit {})", + c.version, c.git_commit + ); + + match c.version.parse::() { + Ok(version) => StepSuccess::new(make_result(Some(version))) + .with_message(message) + .into(), + Err(err) => StepWarning::new( + make_result(None), + format!( + "{message} (failed to parse RoT bootloader version: {err})" + ), + ) + .into(), + } + } + None => { + if KNOWN_MISSING_CABOOSE.contains(&stage0_fwid.as_str()) { + StepWarning::new( + make_result(None), + format!( + "fwid {stage0_fwid} is known to be missing a caboose." + ), + ) + .into() + } else { + StepWarning::new( + None, + format!( + "fwid {stage0_fwid} is _not_ supposed to be missing a caboose. Will not proceed with update" + ), + ) + .into() + } + } + } + } + async fn interrogate_rot( &self, rot_a: &[ArtifactIdData], @@ -1923,6 +2225,52 @@ impl UpdateContext { } } + /// Poll the RoT asking for its boot information. This is used to check + /// state after RoT bootloader updates + async fn wait_for_rot_boot_info( + &self, + timeout: Duration, + ) -> anyhow::Result<(Option, Option)> { + let mut ticker = tokio::time::interval(Duration::from_secs(1)); + + let start = Instant::now(); + loop { + ticker.tick().await; + match self.get_rot_boot_info().await { + Ok(state) => match state { + // the minimum we will ever return is 3 + RotState::V2 { .. } => unreachable!(), + RotState::V3 { stage0_error, stage0next_error, .. } => { + return Ok((stage0_error, stage0next_error)) + } + // ugh + RotState::CommunicationFailed { message } => { + if start.elapsed() < timeout { + warn!( + self.log, + "failed getting RoT boot info (will retry)"; + "error" => %message, + ); + } else { + return Err(anyhow!(message)); + } + } + }, + Err(error) => { + if start.elapsed() < timeout { + warn!( + self.log, + "failed getting RoT boot info (will retry)"; + "error" => %error, + ); + } else { + return Err(error); + } + } + } + } + } + /// Poll the RoT asking for its currently active slot, allowing failures up /// to a fixed timeout to give time for it to boot. /// @@ -1930,16 +2278,14 @@ impl UpdateContext { async fn wait_for_rot_reboot( &self, timeout: Duration, + component: &str, ) -> anyhow::Result { let mut ticker = tokio::time::interval(Duration::from_secs(1)); let start = Instant::now(); loop { ticker.tick().await; - match self - .get_component_active_slot(SpComponent::ROT.const_as_str()) - .await - { + match self.get_component_active_slot(component).await { Ok(slot) => return Ok(slot), Err(error) => { if start.elapsed() < timeout { @@ -2083,6 +2429,22 @@ impl UpdateContext { StepSuccess::new(()).into() } + async fn get_rot_boot_info(&self) -> anyhow::Result { + self.mgs_client + .sp_rot_boot_info( + self.sp.type_, + self.sp.slot, + SpComponent::ROT.const_as_str(), + &GetRotBootInfoParams { + version: + gateway_messages::RotBootInfo::HIGHEST_KNOWN_VERSION, + }, + ) + .await + .context("failed to get RoT boot info") + .map(|res| res.into_inner()) + } + async fn get_component_active_slot( &self, component: &str, @@ -2325,6 +2687,9 @@ impl<'a> SpComponentUpdateContext<'a> { let update_cx = self.update_cx; let component_name = match self.component { + UpdateComponent::RotBootloader => { + SpComponent::STAGE0.const_as_str() + } UpdateComponent::Rot => SpComponent::ROT.const_as_str(), UpdateComponent::Sp => SpComponent::SP_ITSELF.const_as_str(), UpdateComponent::Host => { @@ -2434,13 +2799,130 @@ impl<'a> SpComponentUpdateContext<'a> { // to stage updates for example, but for wicketd-driven recovery it's // fine to do this immediately.) match component { + UpdateComponent::RotBootloader => { + const WAIT_FOR_BOOT_TIMEOUT: Duration = Duration::from_secs(30); + + // We need to reset the RoT in order to check the signature on what we just + // updated + registrar + .new_step( + SpComponentUpdateStepId::Resetting, + "Resetting the RoT to check the bootloader signature", + move |_cx| async move { + update_cx + .reset_sp_component(SpComponent::ROT.const_as_str()) + .await + .map_err(|error| { + SpComponentUpdateTerminalError::RotResetFailed { + error, + } + })?; + StepSuccess::new(()).into() + }, + ) + .register(); + + registrar + .new_step( + SpComponentUpdateStepId::Resetting, + "Waiting for RoT to boot".to_string(), + move |_cx| async move { + let (_, stage0next_error) = update_cx + .wait_for_rot_boot_info(WAIT_FOR_BOOT_TIMEOUT) + .await + .map_err(|error| { + SpComponentUpdateTerminalError::GetRotBootInfoFailed { error } + })?; + + // check that stage0next is valid before we try to set the component + if let Some(error) = stage0next_error { + return Err(SpComponentUpdateTerminalError::RotBootloaderError { + error: anyhow!(format!("{error:?}")) + }); + } + StepSuccess::new(()).into() + }, + ) + .register(); + + // Actually set stage0 to use the new firmware + registrar + .new_step( + SpComponentUpdateStepId::SettingActiveBootSlot, + format!("Setting {component_name} active slot to {firmware_slot}"), + move |_cx| async move { + update_cx + .set_component_active_slot( + component_name, + firmware_slot, + true, + ) + .await + .map_err(|error| { + SpComponentUpdateTerminalError::SetRotBootloaderActiveSlotFailed { + error, + } + })?; + StepSuccess::new(()).into() + }, + ) + .register(); + + // Now reset (again) to boot into the new stage0 + registrar + .new_step( + SpComponentUpdateStepId::Resetting, + "Resetting the RoT to boot into the new bootloader", + move |_cx| async move { + update_cx + .reset_sp_component(SpComponent::ROT.const_as_str()) + .await + .map_err(|error| { + SpComponentUpdateTerminalError::RotResetFailed { + error, + } + })?; + StepSuccess::new(()).into() + }, + ) + .register(); + + registrar + .new_step( + SpComponentUpdateStepId::Resetting, + "Checking the new RoT bootloader".to_string(), + move |_cx| async move { + let (stage0_error, stage0next_error) = update_cx + .wait_for_rot_boot_info(WAIT_FOR_BOOT_TIMEOUT) + .await + .map_err(|error| { + SpComponentUpdateTerminalError::GetRotActiveSlotFailed { error } + })?; + + // Both the active and pending slots should be valid after this spot + if let Some(error) = stage0_error { + return Err(SpComponentUpdateTerminalError::RotBootloaderError { + error: anyhow!(format!("{error:?}")) + }); + } + if let Some(error) = stage0next_error { + return Err(SpComponentUpdateTerminalError::RotBootloaderError { + error: anyhow!(format!("{error:?}")) + }); + } + + StepSuccess::new(()).into() + }, + ) + .register(); + } UpdateComponent::Rot => { // Prior to rebooting the RoT, we have to tell it to boot into // the firmware slot we just updated. registrar .new_step( SpComponentUpdateStepId::SettingActiveBootSlot, - format!("Setting RoT active slot to {firmware_slot}"), + format!("Setting {component_name} active slot to {firmware_slot}"), move |_cx| async move { update_cx .set_component_active_slot( @@ -2463,7 +2945,7 @@ impl<'a> SpComponentUpdateContext<'a> { registrar .new_step( SpComponentUpdateStepId::Resetting, - "Resetting RoT", + format!("Resetting {component_name}"), move |_cx| async move { update_cx .reset_sp_component(component_name) @@ -2502,7 +2984,7 @@ impl<'a> SpComponentUpdateContext<'a> { const WAIT_FOR_BOOT_TIMEOUT: Duration = Duration::from_secs(30); let active_slot = update_cx - .wait_for_rot_reboot(WAIT_FOR_BOOT_TIMEOUT) + .wait_for_rot_reboot(WAIT_FOR_BOOT_TIMEOUT, component_name) .await .map_err(|error| { SpComponentUpdateTerminalError::GetRotActiveSlotFailed { error } @@ -2518,6 +3000,7 @@ impl<'a> SpComponentUpdateContext<'a> { } UpdateComponent::Sp => { // Nothing special to do on the SP - just reset it. + // TODO fixup the SP to also set the active slot registrar .new_step( SpComponentUpdateStepId::Resetting, diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 1b21b72495..7dfc9a1402 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -83,17 +83,17 @@ pem-rfc7468 = { version = "0.7.0", default-features = false, features = ["std"] petgraph = { version = "0.6.5", features = ["serde-1"] } postgres-types = { version = "0.2.6", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } predicates = { version = "3.1.0" } -proc-macro2 = { version = "1.0.82" } +proc-macro2 = { version = "1.0.86" } regex = { version = "1.10.4" } regex-automata = { version = "0.4.6", default-features = false, features = ["dfa", "hybrid", "meta", "nfa", "perf", "unicode"] } regex-syntax = { version = "0.8.3" } reqwest = { version = "0.11.27", features = ["blocking", "cookies", "json", "rustls-tls", "stream"] } ring = { version = "0.17.8", features = ["std"] } -schemars = { version = "0.8.20", features = ["bytes", "chrono", "uuid1"] } +schemars = { version = "0.8.21", features = ["bytes", "chrono", "uuid1"] } scopeguard = { version = "1.2.0" } semver = { version = "1.0.23", features = ["serde"] } serde = { version = "1.0.203", features = ["alloc", "derive", "rc"] } -serde_json = { version = "1.0.117", features = ["raw_value", "unbounded_depth"] } +serde_json = { version = "1.0.118", features = ["raw_value", "unbounded_depth"] } sha2 = { version = "0.10.8", features = ["oid"] } similar = { version = "2.5.0", features = ["inline", "unicode"] } slog = { version = "2.7.0", features = ["dynamic-keys", "max_level_trace", "release_max_level_debug", "release_max_level_trace"] } @@ -101,7 +101,7 @@ smallvec = { version = "1.13.2", default-features = false, features = ["const_ne spin = { version = "0.9.8" } string_cache = { version = "0.8.7" } subtle = { version = "2.5.0" } -syn-f595c2ba2a3f28df = { package = "syn", version = "2.0.64", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } +syn-f595c2ba2a3f28df = { package = "syn", version = "2.0.68", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } time = { version = "0.3.36", features = ["formatting", "local-offset", "macros", "parsing"] } tokio = { version = "1.37.0", features = ["full", "test-util"] } tokio-postgres = { version = "0.7.10", features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } @@ -187,17 +187,17 @@ pem-rfc7468 = { version = "0.7.0", default-features = false, features = ["std"] petgraph = { version = "0.6.5", features = ["serde-1"] } postgres-types = { version = "0.2.6", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } predicates = { version = "3.1.0" } -proc-macro2 = { version = "1.0.82" } +proc-macro2 = { version = "1.0.86" } regex = { version = "1.10.4" } regex-automata = { version = "0.4.6", default-features = false, features = ["dfa", "hybrid", "meta", "nfa", "perf", "unicode"] } regex-syntax = { version = "0.8.3" } reqwest = { version = "0.11.27", features = ["blocking", "cookies", "json", "rustls-tls", "stream"] } ring = { version = "0.17.8", features = ["std"] } -schemars = { version = "0.8.20", features = ["bytes", "chrono", "uuid1"] } +schemars = { version = "0.8.21", features = ["bytes", "chrono", "uuid1"] } scopeguard = { version = "1.2.0" } semver = { version = "1.0.23", features = ["serde"] } serde = { version = "1.0.203", features = ["alloc", "derive", "rc"] } -serde_json = { version = "1.0.117", features = ["raw_value", "unbounded_depth"] } +serde_json = { version = "1.0.118", features = ["raw_value", "unbounded_depth"] } sha2 = { version = "0.10.8", features = ["oid"] } similar = { version = "2.5.0", features = ["inline", "unicode"] } slog = { version = "2.7.0", features = ["dynamic-keys", "max_level_trace", "release_max_level_debug", "release_max_level_trace"] } @@ -206,7 +206,7 @@ spin = { version = "0.9.8" } string_cache = { version = "0.8.7" } subtle = { version = "2.5.0" } syn-dff4ba8e3ae991db = { package = "syn", version = "1.0.109", features = ["extra-traits", "fold", "full", "visit"] } -syn-f595c2ba2a3f28df = { package = "syn", version = "2.0.64", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } +syn-f595c2ba2a3f28df = { package = "syn", version = "2.0.68", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } time = { version = "0.3.36", features = ["formatting", "local-offset", "macros", "parsing"] } time-macros = { version = "0.2.18", default-features = false, features = ["formatting", "parsing"] } tokio = { version = "1.37.0", features = ["full", "test-util"] }