diff --git a/.config/nextest.toml b/.config/nextest.toml index 79774e3658..4f927d2396 100644 --- a/.config/nextest.toml +++ b/.config/nextest.toml @@ -3,7 +3,7 @@ # # The required version should be bumped up if we need new features, performance # improvements or bugfixes that are present in newer versions of nextest. -nextest-version = { required = "0.9.59", recommended = "0.9.59" } +nextest-version = { required = "0.9.59", recommended = "0.9.64" } experimental = ["setup-scripts"] @@ -17,7 +17,9 @@ setup = 'crdb-seed' fail-fast = false [script.crdb-seed] -command = 'cargo run -p crdb-seed' +# Use the test profile for this executable since that's how almost all +# invocations of nextest happen. +command = 'cargo run -p crdb-seed --profile test' # The ClickHouse cluster tests currently rely on a hard-coded set of ports for # the nodes in the cluster. We would like to relax this in the future, at which diff --git a/.github/buildomat/build-and-test.sh b/.github/buildomat/build-and-test.sh index 6fda8bb8d7..34f81bab68 100755 --- a/.github/buildomat/build-and-test.sh +++ b/.github/buildomat/build-and-test.sh @@ -7,7 +7,7 @@ set -o xtrace # NOTE: This version should be in sync with the recommended version in # .config/nextest.toml. (Maybe build an automated way to pull the recommended # version in the future.) -NEXTEST_VERSION='0.9.59' +NEXTEST_VERSION='0.9.64' cargo --version rustc --version diff --git a/.github/buildomat/jobs/deploy.sh b/.github/buildomat/jobs/deploy.sh index ff9b44fc40..f4f1e0a999 100755 --- a/.github/buildomat/jobs/deploy.sh +++ b/.github/buildomat/jobs/deploy.sh @@ -2,7 +2,7 @@ #: #: name = "helios / deploy" #: variety = "basic" -#: target = "lab-2.0-opte-0.25" +#: target = "lab-2.0-opte-0.27" #: output_rules = [ #: "%/var/svc/log/oxide-sled-agent:default.log*", #: "%/pool/ext/*/crypt/zone/oxz_*/root/var/svc/log/oxide-*.log*", @@ -281,19 +281,15 @@ rmdir pkg E2E_TLS_CERT="/opt/oxide/sled-agent/pkg/initial-tls-cert.pem" # -# Image-related tests use images served by catacomb. The lab network is -# IPv4-only; the propolis zones are IPv6-only. These steps set up tcpproxy -# configured to proxy to catacomb via port 54321 in the global zone. +# Download the Oxide CLI and images from catacomb. # pfexec mkdir -p /usr/oxide -pfexec rm -f /usr/oxide/tcpproxy -pfexec curl -sSfL -o /usr/oxide/tcpproxy \ - http://catacomb.eng.oxide.computer:12346/tcpproxy -pfexec chmod +x /usr/oxide/tcpproxy -pfexec rm -f /var/svc/manifest/site/tcpproxy.xml -pfexec curl -sSfL -o /var/svc/manifest/site/tcpproxy.xml \ - http://catacomb.eng.oxide.computer:12346/tcpproxy.xml -pfexec svccfg import /var/svc/manifest/site/tcpproxy.xml +pfexec curl -sSfL -o /usr/oxide/oxide \ + http://catacomb.eng.oxide.computer:12346/oxide-v0.1.0 +pfexec chmod +x /usr/oxide/oxide + +curl -sSfL -o debian-11-genericcloud-amd64.raw \ + http://catacomb.eng.oxide.computer:12346/debian-11-genericcloud-amd64.raw # # The lab-netdev target is a ramdisk system that is always cleared @@ -336,7 +332,38 @@ echo "Waited for chrony: ${retry}s" export RUST_BACKTRACE=1 export E2E_TLS_CERT IPPOOL_START IPPOOL_END -./tests/bootstrap +eval "$(./tests/bootstrap)" +export OXIDE_HOST OXIDE_TOKEN + +# +# The Nexus resolved in `$OXIDE_RESOLVE` is not necessarily the same one that we +# successfully talked to in bootstrap, so wait a bit for it to fully come online. +# +retry=0 +while ! curl -sSf "$OXIDE_HOST/v1/ping" --resolve "$OXIDE_RESOLVE" --cacert "$E2E_TLS_CERT"; do + if [[ $retry -gt 60 ]]; then + echo "$OXIDE_RESOLVE failed to come up after 60 seconds" + exit 1 + fi + sleep 1 + retry=$((retry + 1)) +done + +/usr/oxide/oxide --resolve "$OXIDE_RESOLVE" --cacert "$E2E_TLS_CERT" \ + project create --name images --description "some images" +/usr/oxide/oxide --resolve "$OXIDE_RESOLVE" --cacert "$E2E_TLS_CERT" \ + disk import \ + --path debian-11-genericcloud-amd64.raw \ + --disk debian11-boot \ + --project images \ + --description "debian 11 cloud image from distros" \ + --snapshot debian11-snapshot \ + --image debian11 \ + --image-description "debian 11 original base image" \ + --image-os debian \ + --image-version "11" +/usr/oxide/oxide --resolve "$OXIDE_RESOLVE" --cacert "$E2E_TLS_CERT" \ + image promote --project images --image debian11 rm ./tests/bootstrap for test_bin in tests/*; do diff --git a/.github/buildomat/jobs/package.sh b/.github/buildomat/jobs/package.sh index 0605ab6883..350ab37233 100755 --- a/.github/buildomat/jobs/package.sh +++ b/.github/buildomat/jobs/package.sh @@ -37,7 +37,7 @@ rustc --version # trampoline global zone images. # COMMIT=$(git rev-parse HEAD) -VERSION="1.0.4-0.ci+git${COMMIT:0:11}" +VERSION="5.0.0-0.ci+git${COMMIT:0:11}" echo "$VERSION" >/work/version.txt ptime -m ./tools/install_builder_prerequisites.sh -yp diff --git a/.github/workflows/hakari.yml b/.github/workflows/hakari.yml index cc67b91fce..b16f1ca9d7 100644 --- a/.github/workflows/hakari.yml +++ b/.github/workflows/hakari.yml @@ -24,7 +24,7 @@ jobs: with: toolchain: stable - name: Install cargo-hakari - uses: taiki-e/install-action@ccc14bdc8d34cddf54e4f9fb2da0c208427207a3 # v2 + uses: taiki-e/install-action@6ee6c3ab83eab434138dfa928d72abc7eae14793 # v2 with: tool: cargo-hakari - name: Check workspace-hack Cargo.toml is up-to-date diff --git a/.github/workflows/update-maghemite.yml b/.github/workflows/update-maghemite.yml index b3611f9987..7ced0adf5e 100644 --- a/.github/workflows/update-maghemite.yml +++ b/.github/workflows/update-maghemite.yml @@ -47,7 +47,7 @@ jobs: - name: Extract new maghemite package version run: | - eval $(cat tools/maghemite_openapi_version | grep COMMIT) + eval $(cat tools/maghemite_mg_openapi_version | grep COMMIT) echo "version=${COMMIT:0:7}" >> $GITHUB_OUTPUT id: updated @@ -55,7 +55,7 @@ jobs: run: | . ./tools/reflector/helpers.sh - PATHS=("tools/maghemite_openapi_version") + PATHS=("tools/maghemite_ddm_openapi_version" "tools/maghemite_mg_openapi_version" "tools/maghemite_mgd_checksums") CHANGES=() commit $TARGET_BRANCH $INT_BRANCH ${{ inputs.reflector_user_id }} PATHS CHANGES diff --git a/.github/workflows/validate-openapi-spec.yml b/.github/workflows/validate-openapi-spec.yml index 39c6c1debb..10f1dd5b46 100644 --- a/.github/workflows/validate-openapi-spec.yml +++ b/.github/workflows/validate-openapi-spec.yml @@ -13,7 +13,7 @@ jobs: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: ref: ${{ github.event.pull_request.head.sha }} # see omicron#4461 - - uses: actions/setup-node@1a4442cacd436585916779262731d5b162bc6ec7 # v3.8.2 + - uses: actions/setup-node@8f152de45cc393bb48ce5d89d36b731f54556e65 # v4.0.0 with: node-version: '18' - name: Install our tools diff --git a/Cargo.lock b/Cargo.lock index 39857def92..6143b84056 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -33,7 +33,7 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac1f845298e95f983ff1944b728ae08b8cebab80d684f0a832ed0fc74dfa27e2" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cipher", "cpufeatures", ] @@ -54,13 +54,15 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.3" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" +checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", + "getrandom 0.2.10", "once_cell", "version_check", + "zerocopy 0.7.26", ] [[package]] @@ -250,7 +252,7 @@ checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" [[package]] name = "async-bb8-diesel" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/async-bb8-diesel?rev=1446f7e0c1f05f33a0581abd51fa873c7652ab61#1446f7e0c1f05f33a0581abd51fa873c7652ab61" +source = "git+https://github.com/oxidecomputer/async-bb8-diesel?rev=ed7ab5ef0513ba303d33efd41d3e9e381169d59b#ed7ab5ef0513ba303d33efd41d3e9e381169d59b" dependencies = [ "async-trait", "bb8", @@ -259,6 +261,17 @@ dependencies = [ "tokio", ] +[[package]] +name = "async-recursion" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fd55a5ba1179988837d24ab4c7cc8ed6efdeff578ede0416b4225a5fca35bd0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.32", +] + [[package]] name = "async-stream" version = "0.3.5" @@ -313,7 +326,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4d45f362125ed144544e57b0ec6de8fd6a296d41a6252fc4a20c0cf12e9ed3a" dependencies = [ - "rustix 0.38.9", + "rustix 0.38.25", "tempfile", "windows-sys 0.48.0", ] @@ -370,7 +383,7 @@ checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" dependencies = [ "addr2line", "cc", - "cfg-if 1.0.0", + "cfg-if", "libc", "miniz_oxide", "object 0.32.1", @@ -383,12 +396,6 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" -[[package]] -name = "base64" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" - [[package]] name = "base64" version = "0.21.5" @@ -447,7 +454,7 @@ dependencies = [ [[package]] name = "bhyve_api" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=54398875a2125227d13827d4236dce943c019b1c#54398875a2125227d13827d4236dce943c019b1c" +source = "git+https://github.com/oxidecomputer/propolis?rev=f1571ce141421cff3d3328f43e7722f5df96fdda#f1571ce141421cff3d3328f43e7722f5df96fdda" dependencies = [ "bhyve_api_sys", "libc", @@ -457,7 +464,7 @@ dependencies = [ [[package]] name = "bhyve_api_sys" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=54398875a2125227d13827d4236dce943c019b1c#54398875a2125227d13827d4236dce943c019b1c" +source = "git+https://github.com/oxidecomputer/propolis?rev=f1571ce141421cff3d3328f43e7722f5df96fdda#f1571ce141421cff3d3328f43e7722f5df96fdda" dependencies = [ "libc", "strum", @@ -743,9 +750,9 @@ dependencies = [ [[package]] name = "camino-tempfile" -version = "1.0.2" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2ab15a83d13f75dbd86f082bdefd160b628476ef58d3b900a0ef74e001bb097" +checksum = "cb905055fa81e4d427f919b2cd0d76a998267de7d225ea767a1894743a5263c2" dependencies = [ "camino", "tempfile", @@ -775,9 +782,9 @@ dependencies = [ [[package]] name = "cargo_metadata" -version = "0.18.0" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb9ac64500cc83ce4b9f8dafa78186aa008c8dea77a09b94cd307fd0cd5022a8" +checksum = "2d886547e41f740c616ae73108f6eb70afe6d940c7bc697cb30f13daec073037" dependencies = [ "camino", "cargo-platform", @@ -837,10 +844,14 @@ dependencies = [ ] [[package]] -name = "cfg-if" -version = "0.1.10" +name = "cfg-expr" +version = "0.15.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" +checksum = "03915af431787e6ffdcc74c645077518c6b6e01f80b761e0fbbfa288536311b3" +dependencies = [ + "smallvec 1.11.2", + "target-lexicon", +] [[package]] name = "cfg-if" @@ -854,7 +865,7 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cipher", "cpufeatures", ] @@ -992,6 +1003,12 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd7cc57abe963c6d3b9d8be5b06ba7c8957a930305ca90304f24ef040aa6f961" +[[package]] +name = "cobs" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67ba02a97a2bd10f4b59b25c7973101c79642302776489e030cd13cdab09ed15" + [[package]] name = "colorchoice" version = "1.0.0" @@ -1114,7 +1131,7 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -1179,7 +1196,7 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2801af0d36612ae591caa9568261fddce32ce6e08a7275ea334a06a4ad021a2c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crossbeam-channel", "crossbeam-deque", "crossbeam-epoch", @@ -1193,7 +1210,7 @@ version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crossbeam-utils", ] @@ -1203,7 +1220,7 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crossbeam-epoch", "crossbeam-utils", ] @@ -1215,7 +1232,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" dependencies = [ "autocfg", - "cfg-if 1.0.0", + "cfg-if", "crossbeam-utils", "memoffset 0.9.0", "scopeguard", @@ -1227,7 +1244,7 @@ version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crossbeam-utils", ] @@ -1237,7 +1254,7 @@ version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -1270,7 +1287,7 @@ dependencies = [ [[package]] name = "crucible-agent-client" version = "0.0.1" -source = "git+https://github.com/oxidecomputer/crucible?rev=51a3121c8318fc7ac97d74f917ce1d37962e785f#51a3121c8318fc7ac97d74f917ce1d37962e785f" +source = "git+https://github.com/oxidecomputer/crucible?rev=fab27994d0bd12725c17d6b478a9bfc2673ad6f4#fab27994d0bd12725c17d6b478a9bfc2673ad6f4" dependencies = [ "anyhow", "chrono", @@ -1286,7 +1303,7 @@ dependencies = [ [[package]] name = "crucible-pantry-client" version = "0.0.1" -source = "git+https://github.com/oxidecomputer/crucible?rev=51a3121c8318fc7ac97d74f917ce1d37962e785f#51a3121c8318fc7ac97d74f917ce1d37962e785f" +source = "git+https://github.com/oxidecomputer/crucible?rev=fab27994d0bd12725c17d6b478a9bfc2673ad6f4#fab27994d0bd12725c17d6b478a9bfc2673ad6f4" dependencies = [ "anyhow", "chrono", @@ -1303,7 +1320,7 @@ dependencies = [ [[package]] name = "crucible-smf" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/crucible?rev=51a3121c8318fc7ac97d74f917ce1d37962e785f#51a3121c8318fc7ac97d74f917ce1d37962e785f" +source = "git+https://github.com/oxidecomputer/crucible?rev=fab27994d0bd12725c17d6b478a9bfc2673ad6f4#fab27994d0bd12725c17d6b478a9bfc2673ad6f4" dependencies = [ "crucible-workspace-hack", "libc", @@ -1353,7 +1370,7 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6bd9c8e659a473bce955ae5c35b116af38af11a7acb0b480e01f3ed348aeb40" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "memchr", ] @@ -1372,7 +1389,7 @@ version = "4.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "622178105f911d937a42cdb140730ba4a3ed2becd8ae6ce39c7d28b5d75d4588" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "curve25519-dalek-derive", "digest", @@ -1465,19 +1482,6 @@ dependencies = [ "syn 2.0.32", ] -[[package]] -name = "dashmap" -version = "5.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edd72493923899c6f10c641bdbdeddc7183d6396641d99c1a0d1597f37f92e28" -dependencies = [ - "cfg-if 1.0.0", - "hashbrown 0.14.2", - "lock_api", - "once_cell", - "parking_lot_core 0.9.8", -] - [[package]] name = "data-encoding" version = "2.4.0" @@ -1539,6 +1543,38 @@ version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffe7ed1d93f4553003e20b629abe9085e1e81b1429520f897f8f8860bc6dfc21" +[[package]] +name = "defmt" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8a2d011b2fee29fb7d659b83c43fce9a2cb4df453e16d441a51448e448f3f98" +dependencies = [ + "bitflags 1.3.2", + "defmt-macros", +] + +[[package]] +name = "defmt-macros" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54f0216f6c5acb5ae1a47050a6645024e6edafc2ee32d421955eccfef12ef92e" +dependencies = [ + "defmt-parser", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.32", +] + +[[package]] +name = "defmt-parser" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "269924c02afd7f94bc4cecbfa5c379f6ffcf9766b3408fe63d22c728654eccd0" +dependencies = [ + "thiserror", +] + [[package]] name = "der" version = "0.7.8" @@ -1574,9 +1610,9 @@ dependencies = [ [[package]] name = "derive-where" -version = "1.2.5" +version = "1.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "146398d62142a0f35248a608f17edf0dde57338354966d6e41d0eb2d16980ccb" +checksum = "48d9b1fc2a6d7e19c89e706a3769e31ee862ac7a4c810c7c0ff3910e1a42a4ce" dependencies = [ "proc-macro2", "quote", @@ -1660,9 +1696,9 @@ dependencies = [ [[package]] name = "diesel_derives" -version = "2.1.1" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e054665eaf6d97d1e7125512bb2d35d07c73ac86cc6920174cb42d1ab697a554" +checksum = "ef8337737574f55a468005a83499da720f20c65586241ffea339db9ecdfd2b44" dependencies = [ "diesel_table_macro_syntax", "proc-macro2", @@ -1718,7 +1754,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "dirs-sys-next", ] @@ -1779,8 +1815,8 @@ dependencies = [ "omicron-test-utils", "omicron-workspace-hack", "openapi-lint", - "openapiv3 1.0.3", - "pretty-hex 0.3.0", + "openapiv3", + "pretty-hex 0.4.0", "schemars", "serde", "serde_json", @@ -1871,7 +1907,7 @@ source = "git+https://github.com/oxidecomputer/dropshot?branch=main#ff87a0175a6c dependencies = [ "async-stream", "async-trait", - "base64 0.21.5", + "base64", "bytes", "camino", "chrono", @@ -1884,7 +1920,7 @@ dependencies = [ "hyper", "indexmap 2.1.0", "multer", - "openapiv3 2.0.0-rc.1", + "openapiv3", "paste", "percent-encoding", "proc-macro2", @@ -1939,6 +1975,20 @@ version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbfc4744c1b8f2a09adc0e55242f60b1af195d88596bd8700be74418c056c555" +[[package]] +name = "ecdsa" +version = "0.16.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" +dependencies = [ + "der", + "digest", + "elliptic-curve", + "rfc6979", + "signature 2.1.0", + "spki", +] + [[package]] name = "ed25519" version = "1.5.3" @@ -1980,9 +2030,9 @@ checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" [[package]] name = "elliptic-curve" -version = "0.13.5" +version = "0.13.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "968405c8fdc9b3bf4df0a6638858cc0b52462836ab6b1c87377785dd09cf1c0b" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" dependencies = [ "base16ct", "crypto-bigint", @@ -1991,11 +2041,20 @@ dependencies = [ "generic-array", "group", "hkdf", + "pem-rfc7468", + "pkcs8", "rand_core 0.6.4", + "sec1", "subtle", "zeroize", ] +[[package]] +name = "embedded-io" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef1a6892d9eef45c8fa6b9e0086428a2cca8491aca8f787c534a3d6d0bcb3ced" + [[package]] name = "ena" version = "0.14.2" @@ -2017,7 +2076,7 @@ version = "0.8.33" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -2026,9 +2085,10 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "base64 0.21.5", + "base64", "chrono", "http", + "hyper", "omicron-sled-agent", "omicron-test-utils", "omicron-workspace-hack", @@ -2037,9 +2097,12 @@ dependencies = [ "reqwest", "russh", "russh-keys", + "serde", + "serde_json", "tokio", "toml 0.8.8", "trust-dns-resolver", + "uuid", ] [[package]] @@ -2139,8 +2202,8 @@ version = "3.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef033ed5e9bad94e55838ca0ca906db0e043f517adda0c8b79c7a8c66c93c1b5" dependencies = [ - "cfg-if 1.0.0", - "rustix 0.38.9", + "cfg-if", + "rustix 0.38.25", "windows-sys 0.48.0", ] @@ -2162,14 +2225,14 @@ checksum = "d0870c84016d4b481be5c9f323c24f65e31e901ae618f0e80f4308fb00de1d2d" [[package]] name = "filetime" -version = "0.2.22" +version = "0.2.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4029edd3e734da6fe05b6cd7bd2960760a616bd2ddd0d59a0124746d6272af0" +checksum = "1ee447700ac8aa0b2f2bd7bc4462ad686ba06baa6727ac149a2d6277f0d240fd" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", - "redox_syscall 0.3.5", - "windows-sys 0.48.0", + "redox_syscall 0.4.1", + "windows-sys 0.52.0", ] [[package]] @@ -2265,9 +2328,9 @@ checksum = "aa9a19cbb55df58761df49b23516a86d432839add4af60fc256da840f66ed35b" [[package]] name = "form_urlencoded" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" dependencies = [ "percent-encoding", ] @@ -2280,9 +2343,9 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "fs-err" -version = "2.10.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5fd9bcbe8b1087cbd395b51498c01bc997cef73e778a80b77a811af5e2d29f" +checksum = "88a41f105fe1d5b6b34b2055e3dc59bb79b46b48b2040b9e6c7b4b5de097aa41" dependencies = [ "autocfg", ] @@ -2432,7 +2495,7 @@ dependencies = [ "slog-term", "termios", "tokio", - "tokio-tungstenite 0.18.0", + "tokio-tungstenite", "uuid", ] @@ -2440,7 +2503,7 @@ dependencies = [ name = "gateway-client" version = "0.1.0" dependencies = [ - "base64 0.21.5", + "base64", "chrono", "gateway-messages", "omicron-workspace-hack", @@ -2490,7 +2553,7 @@ dependencies = [ "serde", "serde-big-array 0.5.1", "slog", - "socket2 0.5.4", + "socket2 0.5.5", "string_cache", "thiserror", "tlvc 0.3.1 (git+https://github.com/oxidecomputer/tlvc.git?branch=main)", @@ -2543,7 +2606,7 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "wasi 0.9.0+wasi-snapshot-preview1", ] @@ -2554,7 +2617,7 @@ version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "libc", "wasi 0.11.0+wasi-snapshot-preview1", @@ -2607,6 +2670,39 @@ dependencies = [ "subtle", ] +[[package]] +name = "guppy" +version = "0.17.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "114a100a9aa9f4c468a7b9e96626cdab267bb652660d8408e8f6d56d4c310edd" +dependencies = [ + "ahash", + "camino", + "cargo_metadata", + "cfg-if", + "debug-ignore", + "fixedbitset", + "guppy-workspace-hack", + "indexmap 2.1.0", + "itertools 0.12.0", + "nested", + "once_cell", + "pathdiff", + "petgraph", + "semver 1.0.20", + "serde", + "serde_json", + "smallvec 1.11.2", + "static_assertions", + "target-spec", +] + +[[package]] +name = "guppy-workspace-hack" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92620684d99f750bae383ecb3be3748142d6095760afd5cbcf2261e9a279d780" + [[package]] name = "h2" version = "0.3.21" @@ -2668,7 +2764,7 @@ version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06683b93020a07e3dbcf5f8c0f6d40080d725bea7936fc01ad345c01b97dc270" dependencies = [ - "base64 0.21.5", + "base64", "bytes", "headers-core", "http", @@ -3029,9 +3125,9 @@ dependencies = [ [[package]] name = "idna" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -3050,7 +3146,7 @@ dependencies = [ [[package]] name = "illumos-sys-hdrs" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=258a8b59902dd36fc7ee5425e6b1fb5fc80d4649#258a8b59902dd36fc7ee5425e6b1fb5fc80d4649" +source = "git+https://github.com/oxidecomputer/opte?rev=24ceba1969269e4d81bda83d8968d7d7f713c46b#24ceba1969269e4d81bda83d8968d7d7f713c46b" [[package]] name = "illumos-utils" @@ -3061,7 +3157,8 @@ dependencies = [ "bhyve_api", "byteorder", "camino", - "cfg-if 1.0.0", + "camino-tempfile", + "cfg-if", "crucible-smf", "futures", "ipnetwork", @@ -3233,7 +3330,7 @@ dependencies = [ "omicron-test-utils", "omicron-workspace-hack", "openapi-lint", - "openapiv3 1.0.3", + "openapiv3", "schemars", "serde", "serde_json", @@ -3249,12 +3346,16 @@ dependencies = [ "anyhow", "camino", "illumos-utils", + "libc", "omicron-workspace-hack", + "proptest", "schemars", "serde", "serde_json", "serde_with", + "test-strategy", "thiserror", + "tokio", "update-engine", ] @@ -3264,7 +3365,7 @@ version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -3344,7 +3445,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.4", + "socket2 0.5.5", "widestring", "windows-sys 0.48.0", "winreg", @@ -3373,7 +3474,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi 0.3.2", - "rustix 0.38.9", + "rustix 0.38.25", "windows-sys 0.48.0", ] @@ -3453,10 +3554,10 @@ dependencies = [ [[package]] name = "kstat-macro" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=258a8b59902dd36fc7ee5425e6b1fb5fc80d4649#258a8b59902dd36fc7ee5425e6b1fb5fc80d4649" +source = "git+https://github.com/oxidecomputer/opte?rev=24ceba1969269e4d81bda83d8968d7d7f713c46b#24ceba1969269e4d81bda83d8968d7d7f713c46b" dependencies = [ "quote", - "syn 1.0.109", + "syn 2.0.32", ] [[package]] @@ -3546,7 +3647,7 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "winapi", ] @@ -3562,7 +3663,7 @@ version = "0.1.0" source = "git+https://github.com/oxidecomputer/netadm-sys#f114bd0d543d886cd453932e9f0967de57289bc2" dependencies = [ "anyhow", - "cfg-if 1.0.0", + "cfg-if", "colored", "dlpi", "libc", @@ -3626,9 +3727,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.5" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57bcfdad1b858c2db7c38303a6d2ad4dfaf5eb53dfeb0910128b2c26d6158503" +checksum = "969488b55f8ac402214f3f5fd243ebb7206cf82de60d3172994707a4bcc2b829" [[package]] name = "lock_api" @@ -3825,9 +3926,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" +checksum = "3dce281c5e46beae905d4de1870d8b1509a9142b62eedf18b443b011ca8343d0" dependencies = [ "libc", "log", @@ -3841,7 +3942,7 @@ version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c84490118f2ee2d74570d114f3d0493cbf02790df303d2707606c3e14e07c96" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "downcast", "fragile", "lazy_static", @@ -3856,7 +3957,7 @@ version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "proc-macro2", "quote", "syn 1.0.109", @@ -3907,6 +4008,12 @@ dependencies = [ "tempfile", ] +[[package]] +name = "nested" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca2b420f638f07fe83056b55ea190bb815f609ec5a35e7017884a10f78839c9e" + [[package]] name = "new_debug_unreachable" version = "1.0.4" @@ -3982,6 +4089,7 @@ dependencies = [ "sled-agent-client", "steno", "strum", + "thiserror", "uuid", ] @@ -3994,9 +4102,10 @@ dependencies = [ "async-bb8-diesel", "async-trait", "authz-macros", - "base64 0.21.5", + "base64", "bb8", "camino", + "camino-tempfile", "chrono", "cookie", "db-macros", @@ -4026,14 +4135,15 @@ dependencies = [ "omicron-sled-agent", "omicron-test-utils", "omicron-workspace-hack", - "openapiv3 1.0.3", + "openapiv3", "openssl", "oso", "oximeter", "paste", - "pem 1.1.1", + "pem", "petgraph", "pq-sys", + "rand 0.8.5", "rcgen", "ref-cast", "regex", @@ -4049,7 +4159,6 @@ dependencies = [ "steno", "strum", "subprocess", - "tempfile", "term", "thiserror", "tokio", @@ -4074,6 +4183,7 @@ name = "nexus-inventory" version = "0.1.0" dependencies = [ "anyhow", + "base64", "chrono", "expectorate", "gateway-client", @@ -4153,7 +4263,7 @@ version = "0.1.0" dependencies = [ "anyhow", "api_identity", - "base64 0.21.5", + "base64", "chrono", "dns-service-client", "futures", @@ -4177,7 +4287,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77a5d83df9f36fe23f0c3648c6bbb8b0298bb5f1939c8f2704431371f4b84d43" dependencies = [ - "smallvec 1.11.0", + "smallvec 1.11.2", ] [[package]] @@ -4186,7 +4296,7 @@ version = "0.26.2" source = "git+https://github.com/jgallagher/nix?branch=r0.26-illumos#c1a3636db0524f194b714cfd117cd9b637b8b10e" dependencies = [ "bitflags 1.3.2", - "cfg-if 1.0.0", + "cfg-if", "libc", "memoffset 0.7.1", "pin-utils", @@ -4263,7 +4373,7 @@ dependencies = [ "num-traits", "rand 0.8.5", "serde", - "smallvec 1.11.0", + "smallvec 1.11.2", "zeroize", ] @@ -4533,7 +4643,7 @@ name = "omicron-gateway" version = "0.1.0" dependencies = [ "anyhow", - "base64 0.21.5", + "base64", "clap 4.4.3", "dropshot", "expectorate", @@ -4551,7 +4661,7 @@ dependencies = [ "omicron-workspace-hack", "once_cell", "openapi-lint", - "openapiv3 1.0.3", + "openapiv3", "schemars", "serde", "serde_json", @@ -4564,7 +4674,7 @@ dependencies = [ "thiserror", "tokio", "tokio-stream", - "tokio-tungstenite 0.18.0", + "tokio-tungstenite", "toml 0.8.8", "uuid", ] @@ -4577,7 +4687,8 @@ dependencies = [ "assert_matches", "async-bb8-diesel", "async-trait", - "base64 0.21.5", + "base64", + "buf-list", "camino", "cancel-safe-futures", "chrono", @@ -4627,7 +4738,7 @@ dependencies = [ "omicron-workspace-hack", "once_cell", "openapi-lint", - "openapiv3 1.0.3", + "openapiv3", "openssl", "oxide-client", "oximeter", @@ -4637,7 +4748,7 @@ dependencies = [ "oximeter-producer", "parse-display", "paste", - "pem 1.1.1", + "pem", "petgraph", "pq-sys", "pretty_assertions", @@ -4648,7 +4759,7 @@ dependencies = [ "ref-cast", "regex", "reqwest", - "ring 0.16.20", + "ring 0.17.7", "rustls", "samael", "schemars", @@ -4719,6 +4830,7 @@ dependencies = [ "tabled", "textwrap 0.16.0", "tokio", + "unicode-width", "uuid", ] @@ -4738,7 +4850,7 @@ dependencies = [ "petgraph", "rayon", "reqwest", - "ring 0.16.20", + "ring 0.17.7", "semver 1.0.20", "serde", "sled-hardware", @@ -4785,19 +4897,20 @@ dependencies = [ "anyhow", "assert_matches", "async-trait", - "base64 0.21.5", + "base64", "bootstore", "bootstrap-agent-client", "bytes", "camino", "camino-tempfile", "cancel-safe-futures", - "cfg-if 1.0.0", + "cfg-if", "chrono", "clap 4.4.3", "crucible-agent-client", "ddm-admin-client", "derive_more", + "display-error-chain", "dns-server", "dns-service-client", "dpd-client", @@ -4807,10 +4920,13 @@ dependencies = [ "futures", "gateway-client", "glob", + "guppy", + "hex", "http", "hyper", "hyper-staticfile", "illumos-utils", + "installinator-common", "internal-dns", "ipnetwork", "itertools 0.12.0", @@ -4824,7 +4940,7 @@ dependencies = [ "omicron-workspace-hack", "once_cell", "openapi-lint", - "openapiv3 1.0.3", + "openapiv3", "opte-ioctl", "oximeter", "oximeter-instruments", @@ -4838,8 +4954,8 @@ dependencies = [ "schemars", "semver 1.0.20", "serde", + "serde_human_bytes", "serde_json", - "serial_test", "sha3", "sled-agent-client", "sled-hardware", @@ -4856,6 +4972,8 @@ dependencies = [ "thiserror", "tofino", "tokio", + "tokio-stream", + "tokio-util", "toml 0.8.8", "usdt", "uuid", @@ -4880,11 +4998,11 @@ dependencies = [ "libc", "omicron-common", "omicron-workspace-hack", - "pem 1.1.1", + "pem", "rcgen", "regex", "reqwest", - "ring 0.16.20", + "ring 0.17.7", "rustls", "slog", "subprocess", @@ -4900,7 +5018,9 @@ dependencies = [ name = "omicron-workspace-hack" version = "0.1.0" dependencies = [ + "ahash", "anyhow", + "base16ct", "bit-set", "bit-vec", "bitflags 1.3.2", @@ -4919,9 +5039,13 @@ dependencies = [ "crossbeam-utils", "crossterm", "crypto-common", + "der", "diesel", "digest", "either", + "elliptic-curve", + "errno", + "ff", "flate2", "futures", "futures-channel", @@ -4933,8 +5057,10 @@ dependencies = [ "gateway-messages", "generic-array", "getrandom 0.2.10", + "group", "hashbrown 0.13.2", "hex", + "hmac", "hyper", "hyper-rustls", "indexmap 2.1.0", @@ -4953,7 +5079,8 @@ dependencies = [ "num-iter", "num-traits", "once_cell", - "openapiv3 2.0.0-rc.1", + "openapiv3", + "pem-rfc7468", "petgraph", "postgres-types", "ppv-lite86", @@ -4965,16 +5092,16 @@ dependencies = [ "regex-automata 0.4.3", "regex-syntax 0.8.2", "reqwest", - "ring 0.16.20", - "rustix 0.38.9", + "ring 0.17.7", + "rustix 0.38.25", "schemars", "semver 1.0.20", "serde", "serde_json", "sha2", - "signature 2.1.0", "similar", "slog", + "snafu", "spin 0.9.8", "string_cache", "subtle", @@ -4985,6 +5112,7 @@ dependencies = [ "tokio", "tokio-postgres", "tokio-stream", + "tokio-util", "toml 0.7.8", "toml_datetime", "toml_edit 0.19.15", @@ -4996,6 +5124,7 @@ dependencies = [ "usdt", "uuid", "yasna", + "zerocopy 0.7.26", "zeroize", "zip", ] @@ -5028,9 +5157,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.18.0" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "oorandom" @@ -5047,31 +5176,20 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openapi-lint" version = "0.4.0" -source = "git+https://github.com/oxidecomputer/openapi-lint?branch=main#bb69a3a4a184d966bac2a0df2be5c9038d9867d0" +source = "git+https://github.com/oxidecomputer/openapi-lint?branch=main#ef442ee4343e97b6d9c217d3e7533962fe7d7236" dependencies = [ "heck 0.4.1", "indexmap 2.1.0", "lazy_static", - "openapiv3 1.0.3", + "openapiv3", "regex", ] [[package]] name = "openapiv3" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75e56d5c441965b6425165b7e3223cc933ca469834f4a8b4786817a1f9dc4f13" -dependencies = [ - "indexmap 2.1.0", - "serde", - "serde_json", -] - -[[package]] -name = "openapiv3" -version = "2.0.0-rc.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25316406f0191559189c56d99731b63130775de7284d98df5e976ce67882ca8a" +checksum = "cc02deea53ffe807708244e5914f6b099ad7015a207ee24317c22112e17d9c5c" dependencies = [ "indexmap 2.1.0", "serde", @@ -5080,12 +5198,12 @@ dependencies = [ [[package]] name = "openssl" -version = "0.10.57" +version = "0.10.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bac25ee399abb46215765b1cb35bc0212377e58a061560d8b29b024fd0430e7c" +checksum = "79a4c6c3a2b158f7f8f2a2fc5a969fa3a068df6fc9dbb4a43845436e3af7c800" dependencies = [ "bitflags 2.4.0", - "cfg-if 1.0.0", + "cfg-if", "foreign-types 0.3.2", "libc", "once_cell", @@ -5112,9 +5230,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.93" +version = "0.9.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db4d56a4c0478783083cfafcc42493dd4a981d41669da64b4572a2a089b51b1d" +checksum = "3812c071ba60da8b5677cc12bcb1d42989a65553772897a7e0355545a819838f" dependencies = [ "cc", "libc", @@ -5125,37 +5243,35 @@ dependencies = [ [[package]] name = "opte" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=258a8b59902dd36fc7ee5425e6b1fb5fc80d4649#258a8b59902dd36fc7ee5425e6b1fb5fc80d4649" +source = "git+https://github.com/oxidecomputer/opte?rev=24ceba1969269e4d81bda83d8968d7d7f713c46b#24ceba1969269e4d81bda83d8968d7d7f713c46b" dependencies = [ - "cfg-if 0.1.10", + "cfg-if", "dyn-clone", "illumos-sys-hdrs", "kstat-macro", "opte-api", "postcard", "serde", - "smoltcp 0.8.2", + "smoltcp 0.10.0", "version_check", - "zerocopy 0.6.4", ] [[package]] name = "opte-api" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=258a8b59902dd36fc7ee5425e6b1fb5fc80d4649#258a8b59902dd36fc7ee5425e6b1fb5fc80d4649" +source = "git+https://github.com/oxidecomputer/opte?rev=24ceba1969269e4d81bda83d8968d7d7f713c46b#24ceba1969269e4d81bda83d8968d7d7f713c46b" dependencies = [ - "cfg-if 0.1.10", "illumos-sys-hdrs", "ipnetwork", "postcard", "serde", - "smoltcp 0.8.2", + "smoltcp 0.10.0", ] [[package]] name = "opte-ioctl" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=258a8b59902dd36fc7ee5425e6b1fb5fc80d4649#258a8b59902dd36fc7ee5425e6b1fb5fc80d4649" +source = "git+https://github.com/oxidecomputer/opte?rev=24ceba1969269e4d81bda83d8968d7d7f713c46b#24ceba1969269e4d81bda83d8968d7d7f713c46b" dependencies = [ "libc", "libnet", @@ -5208,7 +5324,7 @@ name = "oxide-client" version = "0.1.0" dependencies = [ "anyhow", - "base64 0.21.5", + "base64", "chrono", "futures", "http", @@ -5229,14 +5345,13 @@ dependencies = [ [[package]] name = "oxide-vpc" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=258a8b59902dd36fc7ee5425e6b1fb5fc80d4649#258a8b59902dd36fc7ee5425e6b1fb5fc80d4649" +source = "git+https://github.com/oxidecomputer/opte?rev=24ceba1969269e4d81bda83d8968d7d7f713c46b#24ceba1969269e4d81bda83d8968d7d7f713c46b" dependencies = [ - "cfg-if 0.1.10", "illumos-sys-hdrs", "opte", "serde", - "smoltcp 0.8.2", - "zerocopy 0.6.4", + "smoltcp 0.10.0", + "zerocopy 0.7.26", ] [[package]] @@ -5250,9 +5365,11 @@ dependencies = [ "omicron-common", "omicron-workspace-hack", "oximeter-macro-impl", + "regex", "rstest", "schemars", "serde", + "serde_json", "strum", "thiserror", "trybuild", @@ -5293,7 +5410,7 @@ dependencies = [ "omicron-test-utils", "omicron-workspace-hack", "openapi-lint", - "openapiv3 1.0.3", + "openapiv3", "oximeter", "oximeter-client", "oximeter-db", @@ -5354,7 +5471,7 @@ dependencies = [ name = "oximeter-instruments" version = "0.1.0" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "chrono", "dropshot", "futures", @@ -5402,6 +5519,18 @@ dependencies = [ "uuid", ] +[[package]] +name = "p256" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" +dependencies = [ + "ecdsa", + "elliptic-curve", + "primeorder", + "sha2", +] + [[package]] name = "packed_struct" version = "0.10.1" @@ -5462,11 +5591,11 @@ version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "instant", "libc", "redox_syscall 0.2.16", - "smallvec 1.11.0", + "smallvec 1.11.2", "winapi", ] @@ -5476,10 +5605,10 @@ version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "redox_syscall 0.3.5", - "smallvec 1.11.0", + "smallvec 1.11.2", "windows-targets 0.48.5", ] @@ -5550,29 +5679,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" [[package]] -name = "path-absolutize" -version = "3.1.0" +name = "path-slash" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43eb3595c63a214e1b37b44f44b0a84900ef7ae0b4c5efce59e123d246d7a0de" -dependencies = [ - "path-dedot", -] +checksum = "498a099351efa4becc6a19c72aa9270598e8fd274ca47052e37455241c88b696" [[package]] -name = "path-dedot" -version = "3.1.0" +name = "pathdiff" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d55e486337acb9973cdea3ec5638c1b3bcb22e573b2b7b41969e0c744d5a15e" +checksum = "8835116a5c179084a830efb3adc117ab007512b535bc1a21c991d3b32a6b44dd" dependencies = [ - "once_cell", + "camino", ] -[[package]] -name = "path-slash" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "498a099351efa4becc6a19c72aa9270598e8fd274ca47052e37455241c88b696" - [[package]] name = "pbkdf2" version = "0.11.0" @@ -5600,22 +5720,13 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" -[[package]] -name = "pem" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" -dependencies = [ - "base64 0.13.1", -] - [[package]] name = "pem" version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3163d2912b7c3b52d651a055f2c7eec9ba5cd22d26ef75b8dd3a59980b185923" dependencies = [ - "base64 0.21.5", + "base64", "serde", ] @@ -5630,9 +5741,9 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" @@ -5843,7 +5954,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d52cff9d1d4dee5fe6d03729099f4a310a41179e0a10dbf542039873f2e826fb" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "opaque-debug", "universal-hash", @@ -5857,27 +5968,22 @@ checksum = "31114a898e107c51bb1609ffaf55a0e011cf6a4d7f1170d0015a165082c0338b" [[package]] name = "postcard" -version = "0.7.3" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a25c0b0ae06fcffe600ad392aabfa535696c8973f2253d9ac83171924c58a858" +checksum = "a55c51ee6c0db07e68448e336cf8ea4131a620edefebf9893e759b2d793420f8" dependencies = [ - "postcard-cobs", + "cobs", + "embedded-io", "serde", ] -[[package]] -name = "postcard-cobs" -version = "0.1.5-pre" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c68cb38ed13fd7bc9dd5db8f165b7c8d9c1a315104083a2b10f11354c2af97f" - [[package]] name = "postgres-protocol" version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49b6c5ef183cd3ab4ba005f1ca64c21e8bd97ce4699cfea9e8d9a2c4958ca520" dependencies = [ - "base64 0.21.5", + "base64", "byteorder", "bytes", "fallible-iterator", @@ -5977,9 +6083,9 @@ checksum = "bc5c99d529f0d30937f6f4b8a86d988047327bb88d04d2c4afc356de74722131" [[package]] name = "pretty-hex" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6fa0831dd7cc608c38a5e323422a0077678fa5744aa2be4ad91c4ece8eec8d5" +checksum = "23c6b968ed37d62e35b4febaba13bfa231b0b7929d68b8a94e65445a17e2d35f" [[package]] name = "pretty_assertions" @@ -6001,6 +6107,15 @@ dependencies = [ "syn 2.0.32", ] +[[package]] +name = "primeorder" +version = "0.13.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6" +dependencies = [ + "elliptic-curve", +] + [[package]] name = "proc-macro-crate" version = "1.3.1" @@ -6078,7 +6193,7 @@ dependencies = [ "heck 0.4.1", "http", "indexmap 2.1.0", - "openapiv3 2.0.0-rc.1", + "openapiv3", "proc-macro2", "quote", "regex", @@ -6096,7 +6211,7 @@ name = "progenitor-macro" version = "0.4.0" source = "git+https://github.com/oxidecomputer/progenitor?branch=main#9339b57628e1e76b1d7131ef93a6c0db2ab0a762" dependencies = [ - "openapiv3 2.0.0-rc.1", + "openapiv3", "proc-macro2", "progenitor-impl", "quote", @@ -6111,10 +6226,10 @@ dependencies = [ [[package]] name = "propolis-client" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=54398875a2125227d13827d4236dce943c019b1c#54398875a2125227d13827d4236dce943c019b1c" +source = "git+https://github.com/oxidecomputer/propolis?rev=f1571ce141421cff3d3328f43e7722f5df96fdda#f1571ce141421cff3d3328f43e7722f5df96fdda" dependencies = [ "async-trait", - "base64 0.21.5", + "base64", "futures", "progenitor", "rand 0.8.5", @@ -6125,18 +6240,18 @@ dependencies = [ "slog", "thiserror", "tokio", - "tokio-tungstenite 0.20.1", + "tokio-tungstenite", "uuid", ] [[package]] name = "propolis-mock-server" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=54398875a2125227d13827d4236dce943c019b1c#54398875a2125227d13827d4236dce943c019b1c" +source = "git+https://github.com/oxidecomputer/propolis?rev=f1571ce141421cff3d3328f43e7722f5df96fdda#f1571ce141421cff3d3328f43e7722f5df96fdda" dependencies = [ "anyhow", "atty", - "base64 0.21.5", + "base64", "clap 4.4.3", "dropshot", "futures", @@ -6155,14 +6270,14 @@ dependencies = [ "slog-term", "thiserror", "tokio", - "tokio-tungstenite 0.20.1", + "tokio-tungstenite", "uuid", ] [[package]] name = "propolis_types" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=54398875a2125227d13827d4236dce943c019b1c#54398875a2125227d13827d4236dce943c019b1c" +source = "git+https://github.com/oxidecomputer/propolis?rev=f1571ce141421cff3d3328f43e7722f5df96fdda#f1571ce141421cff3d3328f43e7722f5df96fdda" dependencies = [ "schemars", "serde", @@ -6391,7 +6506,7 @@ version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52c4f3084aa3bc7dfbba4eff4fab2a54db4324965d8872ab933565e6fbd83bc6" dependencies = [ - "pem 3.0.2", + "pem", "ring 0.16.20", "time", "yasna", @@ -6424,6 +6539,15 @@ dependencies = [ "bitflags 1.3.2", ] +[[package]] +name = "redox_syscall" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +dependencies = [ + "bitflags 1.3.2", +] + [[package]] name = "redox_users" version = "0.4.3" @@ -6559,7 +6683,7 @@ version = "0.11.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" dependencies = [ - "base64 0.21.5", + "base64", "bytes", "encoding_rs", "futures-core", @@ -6608,6 +6732,16 @@ dependencies = [ "quick-error", ] +[[package]] +name = "rfc6979" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" +dependencies = [ + "hmac", + "subtle", +] + [[package]] name = "ring" version = "0.16.20" @@ -6625,9 +6759,9 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.5" +version = "0.17.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb0205304757e5d899b9c2e448b867ffd03ae7f988002e47cd24954391394d0b" +checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" dependencies = [ "cc", "getrandom 0.2.10", @@ -6643,7 +6777,7 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b91f7eff05f748767f183df4320a63d6936e9c6107d97c9e6bdd9784f4289c94" dependencies = [ - "base64 0.21.5", + "base64", "bitflags 2.4.0", "serde", "serde_derive", @@ -6651,13 +6785,13 @@ dependencies = [ [[package]] name = "rpassword" -version = "7.2.0" +version = "7.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6678cf63ab3491898c0d021b493c94c9b221d91295294a2a5746eacbe5928322" +checksum = "80472be3c897911d0137b2d2b9055faf6eeac5b14e324073d83bc17b191d7e3f" dependencies = [ "libc", "rtoolbox", - "winapi", + "windows-sys 0.48.0", ] [[package]] @@ -6702,7 +6836,7 @@ version = "0.18.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d428f8247852f894ee1be110b375111b586d4fa431f6c46e64ba5a0dcccbe605" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "glob", "proc-macro2", "quote", @@ -6725,9 +6859,9 @@ dependencies = [ [[package]] name = "russh" -version = "0.39.0" +version = "0.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7878311587d0353a854d5be954fbe68bdf6e77873933b484d1e45db12bb2f8cf" +checksum = "98bee7ebcce06bfc40a46b9d90205c6132d899bb9095c5ce9da3cdad8ec0833d" dependencies = [ "aes", "aes-gcm", @@ -6770,9 +6904,9 @@ dependencies = [ [[package]] name = "russh-keys" -version = "0.38.0" +version = "0.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "557ab9190022dff78116ebed5e391abbd3f424b06cd643dfe262346ab91ed8c9" +checksum = "3b5d5a656fe1c3024d829d054cd8c0c78dc831e4b2d4b08360569c3b38f3017f" dependencies = [ "aes", "async-trait", @@ -6792,11 +6926,13 @@ dependencies = [ "md5", "num-bigint", "num-integer", + "p256", "pbkdf2 0.11.0", "rand 0.7.3", "rand_core 0.6.4", "russh-cryptovec", "serde", + "sha1", "sha2", "thiserror", "tokio", @@ -6810,7 +6946,7 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a5885493fdf0be6cdff808d1533ce878d21cfa49c7086fa00c66355cd9141bfc" dependencies = [ - "base64 0.21.5", + "base64", "blake2b_simd", "constant_time_eq 0.3.0", "crossbeam-utils", @@ -6875,14 +7011,14 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.9" +version = "0.38.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bfe0f2582b4931a45d1fa608f8a8722e8b3c7ac54dd6d5f3b3212791fedef49" +checksum = "dc99bc2d4f1fed22595588a013687477aedf3cdcfb26558c559edb67b4d9b22e" dependencies = [ "bitflags 2.4.0", "errno", "libc", - "linux-raw-sys 0.4.5", + "linux-raw-sys 0.4.11", "windows-sys 0.48.0", ] @@ -6893,7 +7029,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "629648aced5775d558af50b2b4c7b02983a04b312126d45eeead26e7caa498b9" dependencies = [ "log", - "ring 0.17.5", + "ring 0.17.7", "rustls-webpki", "sct", ] @@ -6916,7 +7052,7 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" dependencies = [ - "base64 0.21.5", + "base64", ] [[package]] @@ -6925,7 +7061,7 @@ version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring 0.17.5", + "ring 0.17.7", "untrusted 0.9.0", ] @@ -6987,7 +7123,7 @@ name = "samael" version = "0.0.10" source = "git+https://github.com/njaremko/samael?branch=master#52028e45d11ceb7114bf0c730a9971207e965602" dependencies = [ - "base64 0.21.5", + "base64", "bindgen", "chrono", "data-encoding", @@ -7078,6 +7214,20 @@ dependencies = [ "untrusted 0.7.1", ] +[[package]] +name = "sec1" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" +dependencies = [ + "base16ct", + "der", + "generic-array", + "pkcs8", + "subtle", + "zeroize", +] + [[package]] name = "secrecy" version = "0.8.0" @@ -7145,9 +7295,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.192" +version = "1.0.193" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bca2a08484b285dcb282d0f67b26cadc0df8b19f8c12502c13d966bf9482f001" +checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89" dependencies = [ "serde_derive", ] @@ -7183,9 +7333,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.192" +version = "1.0.193" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6c7207fbec9faa48073f3e3074cbe553af6ea512d7c21ba46e434e70ea9fbc1" +checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" dependencies = [ "proc-macro2", "quote", @@ -7299,14 +7449,15 @@ dependencies = [ [[package]] name = "serde_with" -version = "2.3.3" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07ff71d2c147a7b57362cead5e22f772cd52f6ab31cfcd9edcd7f6aeb2a0afbe" +checksum = "64cd236ccc1b7a29e7e2739f27c0b2dd199804abc4290e32f59f3b68d6405c23" dependencies = [ - "base64 0.13.1", + "base64", "chrono", "hex", "indexmap 1.9.3", + "indexmap 2.1.0", "serde", "serde_json", "serde_with_macros", @@ -7315,9 +7466,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "2.3.3" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "881b6f881b17d13214e5d494c939ebab463d01264ce1811e9d4ac3a882e7695f" +checksum = "93634eb5f75a2323b16de4748022ac4297f9e76b6dced2be287a099f41b5e788" dependencies = [ "darling 0.20.3", "proc-macro2", @@ -7338,38 +7489,13 @@ dependencies = [ "unsafe-libyaml", ] -[[package]] -name = "serial_test" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c789ec87f4687d022a2405cf46e0cd6284889f1839de292cadeb6c6019506f2" -dependencies = [ - "dashmap", - "futures", - "lazy_static", - "log", - "parking_lot 0.12.1", - "serial_test_derive", -] - -[[package]] -name = "serial_test_derive" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b64f9e531ce97c88b4778aad0ceee079216071cffec6ac9b904277f8f92e7fe3" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "sha1" version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest", ] @@ -7380,7 +7506,7 @@ version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest", ] @@ -7467,9 +7593,9 @@ dependencies = [ [[package]] name = "similar" -version = "2.2.1" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "420acb44afdae038210c99e69aae24109f32f15500aa708e81d46c9f29d55fcf" +checksum = "2aeaf503862c419d66959f5d7ca015337d864e9c49485d771b732e2a20453597" dependencies = [ "bstr 0.2.17", "unicode-segmentation", @@ -7528,6 +7654,7 @@ dependencies = [ "progenitor", "regress", "reqwest", + "schemars", "serde", "sled-storage", "slog", @@ -7540,7 +7667,7 @@ version = "0.1.0" dependencies = [ "anyhow", "camino", - "cfg-if 1.0.0", + "cfg-if", "futures", "illumos-devinfo", "illumos-utils", @@ -7553,7 +7680,6 @@ dependencies = [ "rand 0.8.5", "schemars", "serde", - "serial_test", "slog", "thiserror", "tofino", @@ -7568,7 +7694,7 @@ dependencies = [ "async-trait", "camino", "camino-tempfile", - "cfg-if 1.0.0", + "cfg-if", "derive_more", "glob", "illumos-utils", @@ -7704,9 +7830,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.11.0" +version = "1.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" +checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970" [[package]] name = "smawk" @@ -7725,24 +7851,27 @@ dependencies = [ [[package]] name = "smoltcp" -version = "0.8.2" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee34c1e1bfc7e9206cc0fb8030a90129b4e319ab53856249bb27642cab914fb3" +checksum = "7e9786ac45091b96f946693e05bfa4d8ca93e2d3341237d97a380107a6b38dea" dependencies = [ "bitflags 1.3.2", "byteorder", + "cfg-if", + "heapless", "managed", ] [[package]] name = "smoltcp" -version = "0.9.1" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e9786ac45091b96f946693e05bfa4d8ca93e2d3341237d97a380107a6b38dea" +checksum = "8d2e3a36ac8fea7b94e666dfa3871063d6e0a5c9d5d4fec9a1a6b7b6760f0229" dependencies = [ "bitflags 1.3.2", "byteorder", - "cfg-if 1.0.0", + "cfg-if", + "defmt", "heapless", "managed", ] @@ -7754,6 +7883,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4de37ad025c587a29e8f3f5605c00f70b98715ef90b9061a815b9e59e9042d6" dependencies = [ "doc-comment", + "futures-core", + "pin-project", "snafu-derive", ] @@ -7781,9 +7912,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4031e820eb552adee9295814c0ced9e5cf38ddf1e8b7d566d6de8e2538ea989e" +checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" dependencies = [ "libc", "windows-sys 0.48.0", @@ -8159,6 +8290,24 @@ dependencies = [ "xattr", ] +[[package]] +name = "target-lexicon" +version = "0.12.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14c39fd04924ca3a864207c66fc2cd7d22d7c016007f9ce846cbb9326331930a" + +[[package]] +name = "target-spec" +version = "3.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48b81540ee78bd9de9f7dca2378f264cf1f4193da6e2d09b54c0d595131a48f1" +dependencies = [ + "cfg-expr", + "guppy-workspace-hack", + "target-lexicon", + "unicode-ident", +] + [[package]] name = "tempdir" version = "0.3.7" @@ -8171,14 +8320,14 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.8.0" +version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" +checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "fastrand", - "redox_syscall 0.3.5", - "rustix 0.38.9", + "redox_syscall 0.4.1", + "rustix 0.38.25", "windows-sys 0.48.0", ] @@ -8316,7 +8465,7 @@ version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "once_cell", ] @@ -8438,9 +8587,9 @@ dependencies = [ [[package]] name = "tokio" -version = "1.33.0" +version = "1.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f38200e3ef7995e5ef13baec2f432a6da0aa9ac495b2c0e8f3b7eec2c92d653" +checksum = "841d45b238a16291a4e1584e61820b8ae57d696cc5015c459c229ccc6990cc1c" dependencies = [ "backtrace", "bytes", @@ -8450,16 +8599,16 @@ dependencies = [ "parking_lot 0.12.1", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.4", + "socket2 0.5.5", "tokio-macros", "windows-sys 0.48.0", ] [[package]] name = "tokio-macros" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" +checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", @@ -8496,7 +8645,7 @@ dependencies = [ "postgres-protocol", "postgres-types", "rand 0.8.5", - "socket2 0.5.4", + "socket2 0.5.5", "tokio", "tokio-util", "whoami", @@ -8523,18 +8672,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "tokio-tungstenite" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54319c93411147bced34cb5609a80e0a8e44c5999c93903a81cd866630ec0bfd" -dependencies = [ - "futures-util", - "log", - "tokio", - "tungstenite 0.18.0", -] - [[package]] name = "tokio-tungstenite" version = "0.20.1" @@ -8544,7 +8681,7 @@ dependencies = [ "futures-util", "log", "tokio", - "tungstenite 0.20.1", + "tungstenite", ] [[package]] @@ -8650,27 +8787,34 @@ checksum = "ea68304e134ecd095ac6c3574494fc62b909f416c4fca77e440530221e549d3d" [[package]] name = "tough" -version = "0.14.0" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eda3efa9005cf9c1966984c3b9a44c3f37b7ed2c95ba338d6ad51bba70e989a0" +checksum = "49455926f64001de53ef047c2053e2f17440e412b8b1e958d4ad8a6008db7128" dependencies = [ + "async-recursion", + "async-trait", + "bytes", "chrono", "dyn-clone", + "futures", + "futures-core", "globset", "hex", "log", "olpc-cjson", - "path-absolutize", - "pem 1.1.1", + "pem", "percent-encoding", "reqwest", - "ring 0.16.20", + "ring 0.17.7", "serde", "serde_json", "serde_plain", "snafu", "tempfile", - "untrusted 0.7.1", + "tokio", + "tokio-util", + "typed-path", + "untrusted 0.9.0", "url", "walkdir", ] @@ -8687,7 +8831,7 @@ version = "0.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "log", "pin-project-lite", "tracing-attributes", @@ -8720,7 +8864,7 @@ version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c408c32e6a9dbb38037cece35740f2cf23c875d8ca134d33631cec83f74d3fe" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "data-encoding", "futures-channel", "futures-util", @@ -8741,7 +8885,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4f7f83d1e4a0e4358ac54c5c3681e5d7da5efc5a7a632c90bb6d6669ddd9bc26" dependencies = [ "async-trait", - "cfg-if 1.0.0", + "cfg-if", "data-encoding", "enum-as-inner", "futures-channel", @@ -8751,7 +8895,7 @@ dependencies = [ "ipnet", "lazy_static", "rand 0.8.5", - "smallvec 1.11.0", + "smallvec 1.11.2", "thiserror", "tinyvec", "tokio", @@ -8765,14 +8909,14 @@ version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aff21aa4dcefb0a1afbfac26deb0adc93888c7d295fb63ab273ef276ba2b7cfe" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "futures-util", "ipconfig", "lazy_static", "lru-cache", "parking_lot 0.12.1", "resolv-conf", - "smallvec 1.11.0", + "smallvec 1.11.2", "thiserror", "tokio", "tracing", @@ -8787,7 +8931,7 @@ checksum = "99022f9befa6daec2a860be68ac28b1f0d9d7ccf441d8c5a695e35a58d88840d" dependencies = [ "async-trait", "bytes", - "cfg-if 1.0.0", + "cfg-if", "enum-as-inner", "futures-executor", "futures-util", @@ -8844,6 +8988,7 @@ dependencies = [ "slog-envlogger", "slog-term", "tempfile", + "tokio", "tufaceous-lib", ] @@ -8852,6 +8997,7 @@ name = "tufaceous-lib" version = "0.1.0" dependencies = [ "anyhow", + "async-trait", "buf-list", "bytes", "bytesize", @@ -8861,6 +9007,7 @@ dependencies = [ "debug-ignore", "flate2", "fs-err", + "futures", "hex", "hubtools", "itertools 0.12.0", @@ -8868,13 +9015,14 @@ dependencies = [ "omicron-test-utils", "omicron-workspace-hack", "rand 0.8.5", - "ring 0.16.20", + "ring 0.17.7", "serde", "serde_json", "serde_path_to_error", "sha2", "slog", "tar", + "tokio", "toml 0.8.8", "tough", "url", @@ -8891,25 +9039,6 @@ dependencies = [ "unicode-width", ] -[[package]] -name = "tungstenite" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ee6ab729cd4cf0fd55218530c4522ed30b7b6081752839b68fcec8d0960788" -dependencies = [ - "base64 0.13.1", - "byteorder", - "bytes", - "http", - "httparse", - "log", - "rand 0.8.5", - "sha1", - "thiserror", - "url", - "utf-8", -] - [[package]] name = "tungstenite" version = "0.20.1" @@ -8929,6 +9058,12 @@ dependencies = [ "utf-8", ] +[[package]] +name = "typed-path" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a90726108dab678edab76459751e1cc7c597c3484a6384d6423191255fa641b" + [[package]] name = "typenum" version = "1.16.0" @@ -9108,12 +9243,12 @@ dependencies = [ [[package]] name = "url" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" +checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" dependencies = [ "form_urlencoded", - "idna 0.4.0", + "idna 0.5.0", "percent-encoding", ] @@ -9192,9 +9327,9 @@ checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "uuid" -version = "1.5.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88ad59a7560b41a70d191093a945f0b87bc1deeda46fb237479708a1d6b6cdfc" +checksum = "5e395fcf16a7a3d8127ec99782007af141946b4795001f876d54fb0d55978560" dependencies = [ "getrandom 0.2.10", "serde", @@ -9311,7 +9446,7 @@ version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "wasm-bindgen-macro", ] @@ -9336,7 +9471,7 @@ version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "wasm-bindgen", "web-sys", @@ -9510,8 +9645,9 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "base64 0.21.5", + "base64", "bootstrap-agent-client", + "buf-list", "bytes", "camino", "camino-tempfile", @@ -9549,7 +9685,7 @@ dependencies = [ "omicron-test-utils", "omicron-workspace-hack", "openapi-lint", - "openapiv3 1.0.3", + "openapiv3", "rand 0.8.5", "reqwest", "schemars", @@ -9662,6 +9798,15 @@ dependencies = [ "windows-targets 0.48.5", ] +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.0", +] + [[package]] name = "windows-targets" version = "0.42.2" @@ -9692,6 +9837,21 @@ dependencies = [ "windows_x86_64_msvc 0.48.5", ] +[[package]] +name = "windows-targets" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +dependencies = [ + "windows_aarch64_gnullvm 0.52.0", + "windows_aarch64_msvc 0.52.0", + "windows_i686_gnu 0.52.0", + "windows_i686_msvc 0.52.0", + "windows_x86_64_gnu 0.52.0", + "windows_x86_64_gnullvm 0.52.0", + "windows_x86_64_msvc 0.52.0", +] + [[package]] name = "windows_aarch64_gnullvm" version = "0.42.2" @@ -9704,6 +9864,12 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" + [[package]] name = "windows_aarch64_msvc" version = "0.42.2" @@ -9716,6 +9882,12 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" + [[package]] name = "windows_i686_gnu" version = "0.42.2" @@ -9728,6 +9900,12 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" +[[package]] +name = "windows_i686_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" + [[package]] name = "windows_i686_msvc" version = "0.42.2" @@ -9740,6 +9918,12 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" +[[package]] +name = "windows_i686_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" + [[package]] name = "windows_x86_64_gnu" version = "0.42.2" @@ -9752,6 +9936,12 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" + [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" @@ -9764,6 +9954,12 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" + [[package]] name = "windows_x86_64_msvc" version = "0.42.2" @@ -9776,6 +9972,12 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" + [[package]] name = "winnow" version = "0.5.15" @@ -9791,7 +9993,7 @@ version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "windows-sys 0.48.0", ] @@ -9872,6 +10074,16 @@ dependencies = [ "zerocopy-derive 0.6.4", ] +[[package]] +name = "zerocopy" +version = "0.7.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e97e415490559a91254a2979b4829267a57d2fcd741a98eee8b722fb57289aa0" +dependencies = [ + "byteorder", + "zerocopy-derive 0.7.26", +] + [[package]] name = "zerocopy-derive" version = "0.2.0" @@ -9894,11 +10106,22 @@ dependencies = [ "syn 2.0.32", ] +[[package]] +name = "zerocopy-derive" +version = "0.7.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd7e48ccf166952882ca8bd778a43502c64f33bf94c12ebe2a7f08e5a0f6689f" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.32", +] + [[package]] name = "zeroize" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" +checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" dependencies = [ "zeroize_derive", ] diff --git a/Cargo.toml b/Cargo.toml index 75c0eae33e..1f157b0853 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -91,7 +91,9 @@ default-members = [ "dev-tools/omdb", "dev-tools/omicron-dev", "dev-tools/thing-flinger", - "dev-tools/xtask", + # Do not include xtask in the list of default members, because this causes + # hakari to not work as well and build times to be longer. + # See omicron#4392. "dns-server", "gateway-cli", "gateway-test-utils", @@ -142,7 +144,7 @@ api_identity = { path = "api_identity" } approx = "0.5.1" assert_matches = "1.5.0" assert_cmd = "2.0.12" -async-bb8-diesel = { git = "https://github.com/oxidecomputer/async-bb8-diesel", rev = "1446f7e0c1f05f33a0581abd51fa873c7652ab61" } +async-bb8-diesel = { git = "https://github.com/oxidecomputer/async-bb8-diesel", rev = "ed7ab5ef0513ba303d33efd41d3e9e381169d59b" } async-trait = "0.1.74" atomicwrites = "0.4.2" authz-macros = { path = "nexus/authz-macros" } @@ -158,7 +160,7 @@ byteorder = "1.5.0" bytes = "1.5.0" bytesize = "1.3.0" camino = "1.1" -camino-tempfile = "1.0.2" +camino-tempfile = "1.1.1" cancel-safe-futures = "0.1.5" chacha20poly1305 = "0.10.1" ciborium = "0.2.1" @@ -169,9 +171,9 @@ cookie = "0.18" criterion = { version = "0.5.1", features = [ "async_tokio" ] } crossbeam = "0.8" crossterm = { version = "0.27.0", features = ["event-stream"] } -crucible-agent-client = { git = "https://github.com/oxidecomputer/crucible", rev = "51a3121c8318fc7ac97d74f917ce1d37962e785f" } -crucible-pantry-client = { git = "https://github.com/oxidecomputer/crucible", rev = "51a3121c8318fc7ac97d74f917ce1d37962e785f" } -crucible-smf = { git = "https://github.com/oxidecomputer/crucible", rev = "51a3121c8318fc7ac97d74f917ce1d37962e785f" } +crucible-agent-client = { git = "https://github.com/oxidecomputer/crucible", rev = "fab27994d0bd12725c17d6b478a9bfc2673ad6f4" } +crucible-pantry-client = { git = "https://github.com/oxidecomputer/crucible", rev = "fab27994d0bd12725c17d6b478a9bfc2673ad6f4" } +crucible-smf = { git = "https://github.com/oxidecomputer/crucible", rev = "fab27994d0bd12725c17d6b478a9bfc2673ad6f4" } curve25519-dalek = "4" datatest-stable = "0.2.3" display-error-chain = "0.2.0" @@ -179,7 +181,7 @@ ddm-admin-client = { path = "clients/ddm-admin-client" } db-macros = { path = "nexus/db-macros" } debug-ignore = "1.0.5" derive_more = "0.99.17" -derive-where = "1.2.5" +derive-where = "1.2.6" diesel = { version = "2.1.4", features = ["postgres", "r2d2", "chrono", "serde_json", "network-address", "uuid"] } diesel-dtrace = { git = "https://github.com/oxidecomputer/diesel-dtrace", branch = "main" } dns-server = { path = "dns-server" } @@ -189,17 +191,18 @@ dropshot = { git = "https://github.com/oxidecomputer/dropshot", branch = "main", either = "1.9.0" expectorate = "1.1.0" fatfs = "0.3.6" -filetime = "0.2.22" +filetime = "0.2.23" flate2 = "1.0.28" flume = "0.11.0" foreign-types = "0.3.2" -fs-err = "2.10.0" +fs-err = "2.11.0" futures = "0.3.29" gateway-client = { path = "clients/gateway-client" } gateway-messages = { git = "https://github.com/oxidecomputer/management-gateway-service", rev = "2739c18e80697aa6bc235c935176d14b4d757ee9", default-features = false, features = ["std"] } gateway-sp-comms = { git = "https://github.com/oxidecomputer/management-gateway-service", rev = "2739c18e80697aa6bc235c935176d14b4d757ee9" } gateway-test-utils = { path = "gateway-test-utils" } glob = "0.3.1" +guppy = "0.17.4" headers = "0.3.9" heck = "0.4" hex = "0.4.3" @@ -258,15 +261,15 @@ omicron-sled-agent = { path = "sled-agent" } omicron-test-utils = { path = "test-utils" } omicron-zone-package = "0.9.1" oxide-client = { path = "clients/oxide-client" } -oxide-vpc = { git = "https://github.com/oxidecomputer/opte", rev = "258a8b59902dd36fc7ee5425e6b1fb5fc80d4649", features = [ "api", "std" ] } -once_cell = "1.18.0" +oxide-vpc = { git = "https://github.com/oxidecomputer/opte", rev = "24ceba1969269e4d81bda83d8968d7d7f713c46b", features = [ "api", "std" ] } +once_cell = "1.19.0" openapi-lint = { git = "https://github.com/oxidecomputer/openapi-lint", branch = "main" } -openapiv3 = "1.0" +openapiv3 = "2.0.0" # must match samael's crate! openssl = "0.10" openssl-sys = "0.9" openssl-probe = "0.1.5" -opte-ioctl = { git = "https://github.com/oxidecomputer/opte", rev = "258a8b59902dd36fc7ee5425e6b1fb5fc80d4649" } +opte-ioctl = { git = "https://github.com/oxidecomputer/opte", rev = "24ceba1969269e4d81bda83d8968d7d7f713c46b" } oso = "0.27" owo-colors = "3.5.0" oximeter = { path = "oximeter/oximeter" } @@ -280,19 +283,19 @@ p256 = "0.13" parse-display = "0.8.2" partial-io = { version = "0.5.4", features = ["proptest1", "tokio1"] } paste = "1.0.14" -percent-encoding = "2.3.0" -pem = "1.1" +percent-encoding = "2.3.1" +pem = "3.0" petgraph = "0.6.4" postgres-protocol = "0.6.6" predicates = "3.0.4" pretty_assertions = "1.4.0" -pretty-hex = "0.3.0" +pretty-hex = "0.4.0" proc-macro2 = "1.0" progenitor = { git = "https://github.com/oxidecomputer/progenitor", branch = "main" } progenitor-client = { git = "https://github.com/oxidecomputer/progenitor", branch = "main" } -bhyve_api = { git = "https://github.com/oxidecomputer/propolis", rev = "54398875a2125227d13827d4236dce943c019b1c" } -propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "54398875a2125227d13827d4236dce943c019b1c" } -propolis-mock-server = { git = "https://github.com/oxidecomputer/propolis", rev = "54398875a2125227d13827d4236dce943c019b1c" } +bhyve_api = { git = "https://github.com/oxidecomputer/propolis", rev = "f1571ce141421cff3d3328f43e7722f5df96fdda" } +propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "f1571ce141421cff3d3328f43e7722f5df96fdda" } +propolis-mock-server = { git = "https://github.com/oxidecomputer/propolis", rev = "f1571ce141421cff3d3328f43e7722f5df96fdda" } proptest = "1.4.0" quote = "1.0" rand = "0.8.5" @@ -303,8 +306,8 @@ ref-cast = "1.0" regex = "1.10.2" regress = "0.7.1" reqwest = { version = "0.11", default-features = false } -ring = "0.16" -rpassword = "7.2.0" +ring = "0.17.7" +rpassword = "7.3.1" rstest = "0.18.2" rustfmt-wrapper = "0.2" rustls = "0.21.9" @@ -319,8 +322,7 @@ serde_json = "1.0.108" serde_path_to_error = "0.1.14" serde_tokenstream = "0.2" serde_urlencoded = "0.7.1" -serde_with = "2.3.3" -serial_test = "0.10" +serde_with = "3.4.0" sha2 = "0.10.8" sha3 = "0.10.8" shell-words = "1.1.0" @@ -363,15 +365,15 @@ textwrap = "0.16.0" test-strategy = "0.3.1" thiserror = "1.0" tofino = { git = "http://github.com/oxidecomputer/tofino", branch = "main" } -tokio = "1.33.0" +tokio = "1.35.0" tokio-postgres = { version = "0.7", features = [ "with-chrono-0_4", "with-uuid-1" ] } tokio-stream = "0.1.14" -tokio-tungstenite = "0.18" -tokio-util = "0.7.10" +tokio-tungstenite = "0.20" +tokio-util = { version = "0.7.10", features = ["io", "io-util"] } toml = "0.8.8" toml_edit = "0.21.0" topological-sort = "0.2.2" -tough = { version = "0.14", features = [ "http" ] } +tough = { version = "0.16.0", features = [ "http" ] } trust-dns-client = "0.22" trust-dns-proto = "0.22" trust-dns-resolver = "0.22" @@ -382,22 +384,36 @@ tufaceous-lib = { path = "tufaceous-lib" } unicode-width = "0.1.11" update-engine = { path = "update-engine" } usdt = "0.3" -uuid = { version = "1.5.0", features = ["serde", "v4"] } +uuid = { version = "1.6.1", features = ["serde", "v4"] } walkdir = "2.4" wicket = { path = "wicket" } wicket-common = { path = "wicket-common" } wicketd-client = { path = "clients/wicketd-client" } -zeroize = { version = "1.6.0", features = ["zeroize_derive", "std"] } +zeroize = { version = "1.7.0", features = ["zeroize_derive", "std"] } zip = { version = "0.6.6", default-features = false, features = ["deflate","bzip2"] } zone = { version = "0.3", default-features = false, features = ["async"] } +# NOTE: The test profile inherits from the dev profile, so settings under +# profile.dev get inherited. AVOID setting anything under profile.test: that +# will cause dev and test builds to diverge, which will cause more Cargo build +# cache misses. + [profile.dev] -panic = "abort" +# Note: This used to be panic = "abort" earlier, but that caused a lot of +# duplicate dependency builds. Letting panic be "unwind" causes dependencies +# across `cargo test` and `cargo run` to be unified. See omicron#4392. +panic = "unwind" + # See https://github.com/oxidecomputer/omicron/issues/4009 for some background context here. # By reducing the debug level (though keeping enough to have meaningful # backtraces), we reduce incremental build time and binary size significantly. debug = "line-tables-only" +[profile.dev.build-override] +# Setting this to line-tables-only results in a large improvement in build +# times, because it allows target and host dependencies to be unified. +debug = "line-tables-only" + # `bindgen` is used by `samael`'s build script; building it with optimizations # makes that build script run ~5x faster, more than offsetting the additional # build time added to `bindgen` itself. @@ -428,112 +444,108 @@ panic = "abort" # proptest based test generation and shrinking is expensive. Let's optimize it. [profile.dev.package.proptest] opt-level = 3 -[profile.test.package.proptest] -opt-level = 3 [profile.dev.package.bootstore] opt-level = 3 -[profile.test.package.bootstore] -opt-level = 3 # Crypto stuff always needs optimizations -[profile.test.package.sha3] +[profile.dev.package.sha3] opt-level = 3 -[profile.test.package.sha2] +[profile.dev.package.sha2] opt-level = 3 -[profile.test.package.hkdf] +[profile.dev.package.hkdf] opt-level = 3 -[profile.test.package.chacha20poly1305] +[profile.dev.package.chacha20poly1305] opt-level = 3 -[profile.test.package.chacha20] +[profile.dev.package.chacha20] opt-level = 3 -[profile.test.package.vsss-rs] +[profile.dev.package.vsss-rs] opt-level = 3 -[profile.test.package.curve25519-dalek] +[profile.dev.package.curve25519-dalek] opt-level = 3 -[profile.test.package.aead] +[profile.dev.package.aead] opt-level = 3 -[profile.test.package.aes] +[profile.dev.package.aes] opt-level = 3 -[profile.test.package.aes-gcm] +[profile.dev.package.aes-gcm] opt-level = 3 -[profile.test.package.bcrypt-pbkdf] +[profile.dev.package.bcrypt-pbkdf] opt-level = 3 -[profile.test.package.blake2] +[profile.dev.package.blake2] opt-level = 3 -[profile.test.package.blake2b_simd] +[profile.dev.package.blake2b_simd] opt-level = 3 -[profile.test.package.block-buffer] +[profile.dev.package.block-buffer] opt-level = 3 -[profile.test.package.block-padding] +[profile.dev.package.block-padding] opt-level = 3 -[profile.test.package.blowfish] +[profile.dev.package.blowfish] opt-level = 3 -[profile.test.package.constant_time_eq] +[profile.dev.package.constant_time_eq] opt-level = 3 -[profile.test.package.crypto-bigint] +[profile.dev.package.crypto-bigint] opt-level = 3 -[profile.test.package.crypto-common] +[profile.dev.package.crypto-common] opt-level = 3 -[profile.test.package.ctr] +[profile.dev.package.ctr] opt-level = 3 -[profile.test.package.cbc] +[profile.dev.package.cbc] opt-level = 3 -[profile.test.package.digest] +[profile.dev.package.digest] opt-level = 3 -[profile.test.package.ed25519] +[profile.dev.package.ed25519] opt-level = 3 -[profile.test.package.ed25519-dalek] +[profile.dev.package.ed25519-dalek] opt-level = 3 -[profile.test.package.elliptic-curve] +[profile.dev.package.elliptic-curve] opt-level = 3 -[profile.test.package.generic-array] +[profile.dev.package.generic-array] opt-level = 3 -[profile.test.package.getrandom] +[profile.dev.package.getrandom] opt-level = 3 -[profile.test.package.hmac] +[profile.dev.package.hmac] opt-level = 3 -[profile.test.package.lpc55_sign] +[profile.dev.package.lpc55_sign] opt-level = 3 -[profile.test.package.md5] +[profile.dev.package.md5] opt-level = 3 -[profile.test.package.md-5] +[profile.dev.package.md-5] opt-level = 3 -[profile.test.package.num-bigint] +[profile.dev.package.num-bigint] opt-level = 3 -[profile.test.package.num-bigint-dig] +[profile.dev.package.num-bigint-dig] opt-level = 3 -[profile.test.package.rand] +[profile.dev.package.rand] opt-level = 3 -[profile.test.package.rand_chacha] +[profile.dev.package.rand_chacha] opt-level = 3 -[profile.test.package.rand_core] +[profile.dev.package.rand_core] opt-level = 3 -[profile.test.package.rand_hc] +[profile.dev.package.rand_hc] opt-level = 3 -[profile.test.package.rand_xorshift] +[profile.dev.package.rand_xorshift] opt-level = 3 -[profile.test.package.rsa] +[profile.dev.package.rsa] opt-level = 3 -[profile.test.package.salty] +[profile.dev.package.salty] opt-level = 3 -[profile.test.package.signature] +[profile.dev.package.signature] opt-level = 3 -[profile.test.package.subtle] +[profile.dev.package.subtle] opt-level = 3 -[profile.test.package.tiny-keccak] +[profile.dev.package.tiny-keccak] opt-level = 3 -[profile.test.package.uuid] +[profile.dev.package.uuid] opt-level = 3 -[profile.test.package.cipher] +[profile.dev.package.cipher] opt-level = 3 -[profile.test.package.cpufeatures] +[profile.dev.package.cpufeatures] opt-level = 3 -[profile.test.package.poly1305] +[profile.dev.package.poly1305] opt-level = 3 -[profile.test.package.inout] +[profile.dev.package.inout] opt-level = 3 -[profile.test.package.keccak] +[profile.dev.package.keccak] opt-level = 3 # diff --git a/bootstore/src/schemes/v0/request_manager.rs b/bootstore/src/schemes/v0/request_manager.rs index 780213430c..90466fdc07 100644 --- a/bootstore/src/schemes/v0/request_manager.rs +++ b/bootstore/src/schemes/v0/request_manager.rs @@ -109,7 +109,7 @@ impl RequestManager { let expiry = now + self.config.rack_init_timeout; let mut acks = InitAcks::default(); acks.expected = - packages.keys().cloned().filter(|id| id != &self.id).collect(); + packages.keys().filter(|&id| id != &self.id).cloned().collect(); let req = TrackableRequest::InitRack { rack_uuid, packages: packages.clone(), diff --git a/certificates/src/lib.rs b/certificates/src/lib.rs index 6bd7fa32de..442a9cfdd5 100644 --- a/certificates/src/lib.rs +++ b/certificates/src/lib.rs @@ -60,14 +60,14 @@ impl From for Error { | InvalidValidationHostname(_) | ErrorValidatingHostname(_) | NoDnsNameMatchingHostname { .. } - | UnsupportedPurpose => Error::InvalidValue { - label: String::from("certificate"), - message: DisplayErrorChain::new(&error).to_string(), - }, - BadPrivateKey(_) => Error::InvalidValue { - label: String::from("private-key"), - message: DisplayErrorChain::new(&error).to_string(), - }, + | UnsupportedPurpose => Error::invalid_value( + "certificate", + DisplayErrorChain::new(&error).to_string(), + ), + BadPrivateKey(_) => Error::invalid_value( + "private-key", + DisplayErrorChain::new(&error).to_string(), + ), Unexpected(_) => Error::InternalError { internal_message: DisplayErrorChain::new(&error).to_string(), }, diff --git a/clients/nexus-client/src/lib.rs b/clients/nexus-client/src/lib.rs index 23ceb114fc..3ecba7e710 100644 --- a/clients/nexus-client/src/lib.rs +++ b/clients/nexus-client/src/lib.rs @@ -202,6 +202,19 @@ impl From<&types::InstanceState> } } +impl From + for types::ProducerKind +{ + fn from(kind: omicron_common::api::internal::nexus::ProducerKind) -> Self { + use omicron_common::api::internal::nexus::ProducerKind; + match kind { + ProducerKind::SledAgent => Self::SledAgent, + ProducerKind::Service => Self::Service, + ProducerKind::Instance => Self::Instance, + } + } +} + impl From<&omicron_common::api::internal::nexus::ProducerEndpoint> for types::ProducerEndpoint { @@ -212,6 +225,7 @@ impl From<&omicron_common::api::internal::nexus::ProducerEndpoint> address: s.address.to_string(), base_route: s.base_route.clone(), id: s.id, + kind: s.kind.into(), interval: s.interval.into(), } } diff --git a/clients/oximeter-client/src/lib.rs b/clients/oximeter-client/src/lib.rs index 7bd17d7e76..11aa1452f8 100644 --- a/clients/oximeter-client/src/lib.rs +++ b/clients/oximeter-client/src/lib.rs @@ -20,6 +20,19 @@ impl From for types::Duration { } } +impl From + for types::ProducerKind +{ + fn from(kind: omicron_common::api::internal::nexus::ProducerKind) -> Self { + use omicron_common::api::internal::nexus; + match kind { + nexus::ProducerKind::Service => Self::Service, + nexus::ProducerKind::SledAgent => Self::SledAgent, + nexus::ProducerKind::Instance => Self::Instance, + } + } +} + impl From<&omicron_common::api::internal::nexus::ProducerEndpoint> for types::ProducerEndpoint { @@ -30,6 +43,7 @@ impl From<&omicron_common::api::internal::nexus::ProducerEndpoint> address: s.address.to_string(), base_route: s.base_route.clone(), id: s.id, + kind: s.kind.into(), interval: s.interval.into(), } } diff --git a/clients/sled-agent-client/Cargo.toml b/clients/sled-agent-client/Cargo.toml index e2cc737e70..18ca342a2b 100644 --- a/clients/sled-agent-client/Cargo.toml +++ b/clients/sled-agent-client/Cargo.toml @@ -12,6 +12,7 @@ progenitor.workspace = true ipnetwork.workspace = true regress.workspace = true reqwest = { workspace = true, features = [ "json", "rustls-tls", "stream" ] } +schemars.workspace = true serde.workspace = true slog.workspace = true sled-storage.workspace = true diff --git a/clients/sled-agent-client/src/lib.rs b/clients/sled-agent-client/src/lib.rs index 30b554a021..0bbd27cf3e 100644 --- a/clients/sled-agent-client/src/lib.rs +++ b/clients/sled-agent-client/src/lib.rs @@ -6,11 +6,11 @@ use async_trait::async_trait; use std::convert::TryFrom; -use std::str::FromStr; use uuid::Uuid; progenitor::generate_api!( spec = "../../openapi/sled-agent.json", + derives = [ schemars::JsonSchema ], inner_type = slog::Logger, pre_hook = (|log: &slog::Logger, request: &reqwest::Request| { slog::debug!(log, "client request"; @@ -529,27 +529,3 @@ impl TestInterfaces for Client { .expect("disk_finish_transition() failed unexpectedly"); } } - -impl From for types::DatasetKind { - fn from(k: sled_storage::dataset::DatasetKind) -> Self { - use sled_storage::dataset::DatasetKind::*; - match k { - CockroachDb => Self::CockroachDb, - Crucible => Self::Crucible, - Clickhouse => Self::Clickhouse, - ClickhouseKeeper => Self::ClickhouseKeeper, - ExternalDns => Self::ExternalDns, - InternalDns => Self::InternalDns, - } - } -} - -impl From for types::DatasetName { - fn from(n: sled_storage::dataset::DatasetName) -> Self { - Self { - pool_name: types::ZpoolName::from_str(&n.pool().to_string()) - .unwrap(), - kind: n.dataset().clone().into(), - } - } -} diff --git a/common/src/api/external/error.rs b/common/src/api/external/error.rs index e508b7ecba..2661db7bb6 100644 --- a/common/src/api/external/error.rs +++ b/common/src/api/external/error.rs @@ -35,16 +35,16 @@ pub enum Error { ObjectAlreadyExists { type_name: ResourceType, object_name: String }, /// The request was well-formed, but the operation cannot be completed given /// the current state of the system. - #[error("Invalid Request: {message}")] - InvalidRequest { message: String }, + #[error("Invalid Request: {}", .message.display_internal())] + InvalidRequest { message: MessagePair }, /// Authentication credentials were required but either missing or invalid. /// The HTTP status code is called "Unauthorized", but it's more accurate to /// call it "Unauthenticated". #[error("Missing or invalid credentials")] Unauthenticated { internal_message: String }, /// The specified input field is not valid. - #[error("Invalid Value: {label}, {message}")] - InvalidValue { label: String, message: String }, + #[error("Invalid Value: {label}, {}", .message.display_internal())] + InvalidValue { label: String, message: MessagePair }, /// The request is not authorized to perform the requested operation. #[error("Forbidden")] Forbidden, @@ -55,15 +55,86 @@ pub enum Error { /// The system (or part of it) is unavailable. #[error("Service Unavailable: {internal_message}")] ServiceUnavailable { internal_message: String }, - /// Method Not Allowed - #[error("Method Not Allowed: {internal_message}")] - MethodNotAllowed { internal_message: String }, + + /// There is insufficient capacity to perform the requested operation. + /// + /// This variant is translated to 507 Insufficient Storage, and it carries + /// both an external and an internal message. The external message is + /// intended for operator consumption and is intended to not leak any + /// implementation details. + #[error("Insufficient Capacity: {}", .message.display_internal())] + InsufficientCapacity { message: MessagePair }, #[error("Type version mismatch! {internal_message}")] TypeVersionMismatch { internal_message: String }, - #[error("Conflict: {internal_message}")] - Conflict { internal_message: String }, + #[error("Conflict: {}", .message.display_internal())] + Conflict { message: MessagePair }, +} + +/// Represents an error message which has an external component, along with +/// some internal context possibly attached to it. +#[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)] +pub struct MessagePair { + external_message: String, + internal_context: String, +} + +impl MessagePair { + pub fn new(external_message: String) -> Self { + Self { external_message, internal_context: String::new() } + } + + pub fn new_full( + external_message: String, + internal_context: String, + ) -> Self { + Self { external_message, internal_context } + } + + pub fn external_message(&self) -> &str { + &self.external_message + } + + pub fn internal_context(&self) -> &str { + &self.internal_context + } + + fn with_internal_context(self, context: C) -> Self + where + C: Display + Send + Sync + 'static, + { + let internal_context = if self.internal_context.is_empty() { + context.to_string() + } else { + format!("{}: {}", context, self.internal_context) + }; + Self { external_message: self.external_message, internal_context } + } + + pub fn into_internal_external(self) -> (String, String) { + let internal = self.display_internal().to_string(); + (internal, self.external_message) + } + + // Do not implement `fmt::Display` for this enum because we don't want users to + // accidentally display the internal message to the client. Instead, use a + // private formatter. + fn display_internal(&self) -> MessagePairDisplayInternal<'_> { + MessagePairDisplayInternal(self) + } +} + +struct MessagePairDisplayInternal<'a>(&'a MessagePair); + +impl<'a> Display for MessagePairDisplayInternal<'a> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0.external_message)?; + if !self.0.internal_context.is_empty() { + write!(f, " (with internal context: {})", self.0.internal_context)?; + } + Ok(()) + } } /// Indicates how an object was looked up (for an `ObjectNotFound` error) @@ -119,7 +190,7 @@ impl Error { | Error::InvalidRequest { .. } | Error::InvalidValue { .. } | Error::Forbidden - | Error::MethodNotAllowed { .. } + | Error::InsufficientCapacity { .. } | Error::InternalError { .. } | Error::TypeVersionMismatch { .. } | Error::Conflict { .. } => false, @@ -151,8 +222,20 @@ impl Error { /// /// This should be used for failures due possibly to invalid client input /// or malformed requests. - pub fn invalid_request(message: &str) -> Error { - Error::InvalidRequest { message: message.to_owned() } + pub fn invalid_request(message: impl Into) -> Error { + Error::InvalidRequest { message: MessagePair::new(message.into()) } + } + + /// Generates an [`Error::InvalidValue`] error with the specific label and + /// message. + pub fn invalid_value( + label: impl Into, + message: impl Into, + ) -> Error { + Error::InvalidValue { + label: label.into(), + message: MessagePair::new(message.into()), + } } /// Generates an [`Error::ServiceUnavailable`] error with the specific @@ -166,6 +249,27 @@ impl Error { Error::ServiceUnavailable { internal_message: message.to_owned() } } + /// Generates an [`Error::InsufficientCapacity`] error with external and + /// and internal messages. + /// + /// This should be used for failures where there is insufficient capacity, + /// and where the caller must either take action or wait until capacity is + /// freed. + /// + /// In the future, we may want to provide more help here: e.g. a link to a + /// status or support page. + pub fn insufficient_capacity( + external_message: impl Into, + internal_message: impl Into, + ) -> Error { + Error::InsufficientCapacity { + message: MessagePair::new_full( + external_message.into(), + internal_message.into(), + ), + } + } + /// Generates an [`Error::TypeVersionMismatch`] with a specific message. /// /// TypeVersionMismatch errors are a specific type of error arising from differences @@ -186,8 +290,8 @@ impl Error { /// retried. The internal message should provide more information about the /// source of the conflict and possible actions the caller can take to /// resolve it (if any). - pub fn conflict(message: &str) -> Error { - Error::Conflict { internal_message: message.to_owned() } + pub fn conflict(message: impl Into) -> Error { + Error::Conflict { message: MessagePair::new(message.into()) } } /// Given an [`Error`] with an internal message, return the same error with @@ -201,9 +305,14 @@ impl Error { match self { Error::ObjectNotFound { .. } | Error::ObjectAlreadyExists { .. } - | Error::InvalidRequest { .. } - | Error::InvalidValue { .. } | Error::Forbidden => self, + Error::InvalidRequest { message } => Error::InvalidRequest { + message: message.with_internal_context(context), + }, + Error::InvalidValue { label, message } => Error::InvalidValue { + label, + message: message.with_internal_context(context), + }, Error::Unauthenticated { internal_message } => { Error::Unauthenticated { internal_message: format!( @@ -223,12 +332,9 @@ impl Error { ), } } - Error::MethodNotAllowed { internal_message } => { - Error::MethodNotAllowed { - internal_message: format!( - "{}: {}", - context, internal_message - ), + Error::InsufficientCapacity { message } => { + Error::InsufficientCapacity { + message: message.with_internal_context(context), } } Error::TypeVersionMismatch { internal_message } => { @@ -239,8 +345,8 @@ impl Error { ), } } - Error::Conflict { internal_message } => Error::Conflict { - internal_message: format!("{}: {}", context, internal_message), + Error::Conflict { message } => Error::Conflict { + message: message.with_internal_context(context), }, } } @@ -292,28 +398,29 @@ impl From for HttpError { internal_message, }, - Error::InvalidRequest { message } => HttpError::for_bad_request( - Some(String::from("InvalidRequest")), - message, - ), - - Error::InvalidValue { label, message } => { - let message = - format!("unsupported value for \"{}\": {}", label, message); - HttpError::for_bad_request( - Some(String::from("InvalidValue")), - message, - ) + Error::InvalidRequest { message } => { + let (internal_message, external_message) = + message.into_internal_external(); + HttpError { + status_code: http::StatusCode::BAD_REQUEST, + error_code: Some(String::from("InvalidRequest")), + external_message, + internal_message, + } } - // TODO: RFC-7231 requires that 405s generate an Accept header to describe - // what methods are available in the response - Error::MethodNotAllowed { internal_message } => { - HttpError::for_client_error( - Some(String::from("MethodNotAllowed")), - http::StatusCode::METHOD_NOT_ALLOWED, + Error::InvalidValue { label, message } => { + let (internal_message, external_message) = + message.into_internal_external(); + HttpError { + status_code: http::StatusCode::BAD_REQUEST, + error_code: Some(String::from("InvalidValue")), + external_message: format!( + "unsupported value for \"{}\": {}", + label, external_message + ), internal_message, - ) + } } Error::Forbidden => HttpError::for_client_error( @@ -333,16 +440,35 @@ impl From for HttpError { ) } + Error::InsufficientCapacity { message } => { + let (internal_message, external_message) = + message.into_internal_external(); + // Need to construct an `HttpError` explicitly to present both + // an internal and an external message. + HttpError { + status_code: http::StatusCode::INSUFFICIENT_STORAGE, + error_code: Some(String::from("InsufficientCapacity")), + external_message: format!( + "Insufficient capacity: {}", + external_message + ), + internal_message, + } + } + Error::TypeVersionMismatch { internal_message } => { HttpError::for_internal_error(internal_message) } - Error::Conflict { internal_message } => { - HttpError::for_client_error( - Some(String::from("Conflict")), - http::StatusCode::CONFLICT, + Error::Conflict { message } => { + let (internal_message, external_message) = + message.into_internal_external(); + HttpError { + status_code: http::StatusCode::CONFLICT, + error_code: Some(String::from("Conflict")), + external_message, internal_message, - ) + } } } } diff --git a/common/src/api/external/mod.rs b/common/src/api/external/mod.rs index adf661516a..a6d729593b 100644 --- a/common/src/api/external/mod.rs +++ b/common/src/api/external/mod.rs @@ -316,10 +316,7 @@ impl Name { /// `Name::try_from(String)` that marshals any error into an appropriate /// `Error`. pub fn from_param(value: String, label: &str) -> Result { - value.parse().map_err(|e| Error::InvalidValue { - label: String::from(label), - message: e, - }) + value.parse().map_err(|e| Error::invalid_value(label, e)) } /// Return the `&str` representing the actual name. @@ -409,7 +406,7 @@ impl SemverVersion { /// This is the official ECMAScript-compatible validation regex for /// semver: /// - const VALIDATION_REGEX: &str = r"^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$"; + const VALIDATION_REGEX: &'static str = r"^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$"; } impl JsonSchema for SemverVersion { @@ -622,6 +619,7 @@ impl From for i64 { Debug, Deserialize, Eq, + Hash, JsonSchema, Ord, PartialEq, @@ -751,6 +749,7 @@ pub enum ResourceType { Zpool, Vmm, Ipv4NatEntry, + FloatingIp, } // IDENTITY METADATA @@ -2826,10 +2825,10 @@ mod test { assert!(result.is_err()); assert_eq!( result, - Err(Error::InvalidValue { - label: "the_name".to_string(), - message: "name requires at least one character".to_string() - }) + Err(Error::invalid_value( + "the_name", + "name requires at least one character" + )) ); } diff --git a/common/src/api/internal/nexus.rs b/common/src/api/internal/nexus.rs index a4a539ad9b..780e60b1a2 100644 --- a/common/src/api/internal/nexus.rs +++ b/common/src/api/internal/nexus.rs @@ -84,13 +84,34 @@ pub struct SledInstanceState { // Oximeter producer/collector objects. +/// The kind of metric producer this is. +#[derive(Clone, Copy, Debug, Deserialize, JsonSchema, PartialEq, Serialize)] +#[serde(rename_all = "snake_case")] +pub enum ProducerKind { + /// The producer is a sled-agent. + SledAgent, + /// The producer is an Omicron-managed service. + Service, + /// The producer is a Propolis VMM managing a guest instance. + Instance, +} + /// Information announced by a metric server, used so that clients can contact it and collect /// available metric data from it. #[derive(Clone, Debug, Deserialize, JsonSchema, Serialize, PartialEq)] pub struct ProducerEndpoint { + /// A unique ID for this producer. pub id: Uuid, + /// The kind of producer. + pub kind: ProducerKind, + /// The IP address and port at which `oximeter` can collect metrics from the + /// producer. pub address: SocketAddr, + /// The API base route from which `oximeter` can collect metrics. + /// + /// The full route is `{base_route}/{id}`. pub base_route: String, + /// The interval on which `oximeter` should collect metrics. pub interval: Duration, } diff --git a/common/src/api/internal/shared.rs b/common/src/api/internal/shared.rs index 155fbf971b..c8d8b1c786 100644 --- a/common/src/api/internal/shared.rs +++ b/common/src/api/internal/shared.rs @@ -140,6 +140,9 @@ pub struct PortConfigV1 { pub uplink_port_fec: PortFec, /// BGP peers on this port pub bgp_peers: Vec, + /// Whether or not to set autonegotiation + #[serde(default)] + pub autoneg: bool, } impl From for PortConfigV1 { @@ -155,6 +158,7 @@ impl From for PortConfigV1 { uplink_port_speed: value.uplink_port_speed, uplink_port_fec: value.uplink_port_fec, bgp_peers: vec![], + autoneg: false, } } } @@ -260,7 +264,7 @@ pub enum ExternalPortDiscovery { } /// Switchport Speed options -#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, JsonSchema)] +#[derive(Copy, Clone, Debug, Deserialize, Serialize, PartialEq, JsonSchema)] #[serde(rename_all = "snake_case")] pub enum PortSpeed { #[serde(alias = "0G")] @@ -284,7 +288,7 @@ pub enum PortSpeed { } /// Switchport FEC options -#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, JsonSchema)] +#[derive(Copy, Clone, Debug, Deserialize, Serialize, PartialEq, JsonSchema)] #[serde(rename_all = "snake_case")] pub enum PortFec { Firecode, diff --git a/common/src/ledger.rs b/common/src/ledger.rs index ae028998e2..71d03fa8ee 100644 --- a/common/src/ledger.rs +++ b/common/src/ledger.rs @@ -7,7 +7,7 @@ use async_trait::async_trait; use camino::{Utf8Path, Utf8PathBuf}; use serde::{de::DeserializeOwned, Serialize}; -use slog::{error, info, warn, Logger}; +use slog::{debug, info, warn, Logger}; #[derive(thiserror::Error, Debug)] pub enum Error { @@ -54,6 +54,7 @@ impl From for crate::api::external::Error { /// /// This structure is intended to help with serialization and deserialization /// of configuration information to both M.2s. +#[derive(Debug)] pub struct Ledger { log: Logger, ledger: T, @@ -87,7 +88,7 @@ impl Ledger { match T::read_from(log, &path).await { Ok(ledger) => ledgers.push(ledger), Err(err) => { - error!(log, "Failed to read ledger: {err}"; "path" => %path) + debug!(log, "Failed to read ledger: {err}"; "path" => %path) } } } @@ -183,7 +184,7 @@ pub trait Ledgerable: DeserializeOwned + Serialize + Send + Sync { err, }) } else { - warn!(log, "No ledger in {path}"); + info!(log, "No ledger in {path}"); Err(Error::NotFound) } } diff --git a/common/src/lib.rs b/common/src/lib.rs index 1d2ed0afdb..0d63de90fb 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -31,6 +31,8 @@ pub mod postgres_config; pub mod update; pub mod vlan; +pub use update::hex_schema; + #[macro_export] macro_rules! generate_logging_api { ($path:literal) => { diff --git a/common/src/nexus_config.rs b/common/src/nexus_config.rs index 94c39b4436..740823e755 100644 --- a/common/src/nexus_config.rs +++ b/common/src/nexus_config.rs @@ -339,6 +339,8 @@ pub struct BackgroundTaskConfig { pub nat_cleanup: NatCleanupConfig, /// configuration for inventory tasks pub inventory: InventoryConfig, + /// configuration for phantom disks task + pub phantom_disks: PhantomDiskConfig, } #[serde_as] @@ -386,7 +388,7 @@ pub struct NatCleanupConfig { pub struct InventoryConfig { /// period (in seconds) for periodic activations of this background task /// - /// Each activation fetches information about all harware and software in + /// Each activation fetches information about all hardware and software in /// the system and inserts it into the database. This generates a moderate /// amount of data. #[serde_as(as = "DurationSeconds")] @@ -405,6 +407,14 @@ pub struct InventoryConfig { pub disable: bool, } +#[serde_as] +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub struct PhantomDiskConfig { + /// period (in seconds) for periodic activations of this background task + #[serde_as(as = "DurationSeconds")] + pub period_secs: Duration, +} + /// Configuration for a nexus server #[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] pub struct PackageConfig { @@ -508,8 +518,9 @@ mod test { BackgroundTaskConfig, Config, ConfigDropshotWithTls, ConsoleConfig, Database, DeploymentConfig, DnsTasksConfig, DpdConfig, ExternalEndpointsConfig, InternalDns, InventoryConfig, LoadError, - LoadErrorKind, MgdConfig, NatCleanupConfig, PackageConfig, SchemeName, - TimeseriesDbConfig, Tunables, UpdatesConfig, + LoadErrorKind, MgdConfig, NatCleanupConfig, PackageConfig, + PhantomDiskConfig, SchemeName, TimeseriesDbConfig, Tunables, + UpdatesConfig, }; use crate::address::{Ipv6Subnet, RACK_PREFIX}; use crate::api::internal::shared::SwitchLocation; @@ -663,6 +674,7 @@ mod test { inventory.period_secs = 10 inventory.nkeep = 11 inventory.disable = false + phantom_disks.period_secs = 30 [default_region_allocation_strategy] type = "random" seed = 0 @@ -764,7 +776,10 @@ mod test { period_secs: Duration::from_secs(10), nkeep: 11, disable: false, - } + }, + phantom_disks: PhantomDiskConfig { + period_secs: Duration::from_secs(30), + }, }, default_region_allocation_strategy: crate::nexus_config::RegionAllocationStrategy::Random { @@ -822,6 +837,7 @@ mod test { inventory.period_secs = 10 inventory.nkeep = 3 inventory.disable = false + phantom_disks.period_secs = 30 [default_region_allocation_strategy] type = "random" "##, diff --git a/common/src/update.rs b/common/src/update.rs index 81256eb526..28d5ae50a6 100644 --- a/common/src/update.rs +++ b/common/src/update.rs @@ -296,7 +296,9 @@ impl FromStr for ArtifactHash { } } -fn hex_schema(gen: &mut SchemaGenerator) -> Schema { +/// Produce an OpenAPI schema describing a hex array of a specific length (e.g., +/// a hash digest). +pub fn hex_schema(gen: &mut SchemaGenerator) -> Schema { let mut schema: SchemaObject = ::json_schema(gen).into(); schema.format = Some(format!("hex string ({N} bytes)")); schema.into() diff --git a/common/src/vlan.rs b/common/src/vlan.rs index 45776e09ac..5e5765ffe2 100644 --- a/common/src/vlan.rs +++ b/common/src/vlan.rs @@ -20,10 +20,10 @@ impl VlanID { /// Creates a new VLAN ID, returning an error if it is out of range. pub fn new(id: u16) -> Result { if VLAN_MAX < id { - return Err(Error::InvalidValue { - label: id.to_string(), - message: "Invalid VLAN value".to_string(), - }); + return Err(Error::invalid_value( + id.to_string(), + "Invalid VLAN value", + )); } Ok(Self(id)) } @@ -38,9 +38,9 @@ impl fmt::Display for VlanID { impl FromStr for VlanID { type Err = Error; fn from_str(s: &str) -> Result { - Self::new(s.parse().map_err(|e| Error::InvalidValue { - label: s.to_string(), - message: format!("{}", e), - })?) + Self::new( + s.parse::() + .map_err(|e| Error::invalid_value(s, e.to_string()))?, + ) } } diff --git a/dev-tools/omdb/Cargo.toml b/dev-tools/omdb/Cargo.toml index a8834a0b29..7544374906 100644 --- a/dev-tools/omdb/Cargo.toml +++ b/dev-tools/omdb/Cargo.toml @@ -37,6 +37,7 @@ strum.workspace = true tabled.workspace = true textwrap.workspace = true tokio = { workspace = true, features = [ "full" ] } +unicode-width.workspace = true uuid.workspace = true ipnetwork.workspace = true omicron-workspace-hack.workspace = true diff --git a/dev-tools/omdb/src/bin/omdb/db.rs b/dev-tools/omdb/src/bin/omdb/db.rs index 5fa19a1a27..08a783d8c8 100644 --- a/dev-tools/omdb/src/bin/omdb/db.rs +++ b/dev-tools/omdb/src/bin/omdb/db.rs @@ -51,6 +51,7 @@ use nexus_db_model::Sled; use nexus_db_model::Snapshot; use nexus_db_model::SnapshotState; use nexus_db_model::SwCaboose; +use nexus_db_model::SwRotPage; use nexus_db_model::Vmm; use nexus_db_model::Volume; use nexus_db_model::Zpool; @@ -70,10 +71,12 @@ use nexus_types::internal_api::params::DnsRecord; use nexus_types::internal_api::params::Srv; use nexus_types::inventory::CabooseWhich; use nexus_types::inventory::Collection; +use nexus_types::inventory::RotPageWhich; use omicron_common::api::external::DataPageParams; use omicron_common::api::external::Generation; use omicron_common::postgres_config::PostgresConfigWithUrl; use sled_agent_client::types::VolumeConstructionRequest; +use std::borrow::Cow; use std::cmp::Ordering; use std::collections::BTreeMap; use std::collections::BTreeSet; @@ -247,6 +250,8 @@ enum InventoryCommands { Cabooses, /// list and show details from particular collections Collections(CollectionsArgs), + /// list all root of trust pages ever found + RotPages, } #[derive(Debug, Args)] @@ -267,6 +272,9 @@ enum CollectionsCommands { struct CollectionsShowArgs { /// id of the collection id: Uuid, + /// show long strings in their entirety + #[clap(long)] + show_long_strings: bool, } #[derive(Debug, Args)] @@ -2233,9 +2241,25 @@ async fn cmd_db_inventory( command: CollectionsCommands::List, }) => cmd_db_inventory_collections_list(&conn, limit).await, InventoryCommands::Collections(CollectionsArgs { - command: CollectionsCommands::Show(CollectionsShowArgs { id }), + command: + CollectionsCommands::Show(CollectionsShowArgs { + id, + show_long_strings, + }), }) => { - cmd_db_inventory_collections_show(opctx, datastore, id, limit).await + let long_string_formatter = + LongStringFormatter { show_long_strings }; + cmd_db_inventory_collections_show( + opctx, + datastore, + id, + limit, + long_string_formatter, + ) + .await + } + InventoryCommands::RotPages => { + cmd_db_inventory_rot_pages(&conn, limit).await } } } @@ -2318,6 +2342,41 @@ async fn cmd_db_inventory_cabooses( Ok(()) } +async fn cmd_db_inventory_rot_pages( + conn: &DataStoreConnection<'_>, + limit: NonZeroU32, +) -> Result<(), anyhow::Error> { + #[derive(Tabled)] + #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] + struct RotPageRow { + id: Uuid, + data_base64: String, + } + + use db::schema::sw_root_of_trust_page::dsl; + let mut rot_pages = dsl::sw_root_of_trust_page + .limit(i64::from(u32::from(limit))) + .select(SwRotPage::as_select()) + .load_async(&**conn) + .await + .context("loading rot_pages")?; + check_limit(&rot_pages, limit, || "loading rot_pages"); + rot_pages.sort(); + + let rows = rot_pages.into_iter().map(|rot_page| RotPageRow { + id: rot_page.id, + data_base64: rot_page.data_base64, + }); + let table = tabled::Table::new(rows) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + + println!("{}", table); + + Ok(()) +} + async fn cmd_db_inventory_collections_list( conn: &DataStoreConnection<'_>, limit: NonZeroU32, @@ -2400,6 +2459,7 @@ async fn cmd_db_inventory_collections_show( datastore: &DataStore, id: Uuid, limit: NonZeroU32, + long_string_formatter: LongStringFormatter, ) -> Result<(), anyhow::Error> { let (collection, incomplete) = datastore .inventory_collection_read_best_effort(opctx, id, limit) @@ -2411,14 +2471,14 @@ async fn cmd_db_inventory_collections_show( inv_collection_print(&collection).await?; let nerrors = inv_collection_print_errors(&collection).await?; - inv_collection_print_devices(&collection).await?; + inv_collection_print_devices(&collection, &long_string_formatter).await?; if nerrors > 0 { eprintln!( "warning: {} collection error{} {} reported above", nerrors, + if nerrors == 1 { "" } else { "s" }, if nerrors == 1 { "was" } else { "were" }, - if nerrors == 1 { "" } else { "s" } ); } @@ -2467,6 +2527,7 @@ async fn inv_collection_print_errors( async fn inv_collection_print_devices( collection: &Collection, + long_string_formatter: &LongStringFormatter, ) -> Result<(), anyhow::Error> { // Assemble a list of baseboard ids, sorted first by device type (sled, // switch, power), then by slot number. This is the order in which we will @@ -2545,6 +2606,30 @@ async fn inv_collection_print_devices( .to_string(); println!("{}", textwrap::indent(&table.to_string(), " ")); + #[derive(Tabled)] + #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] + struct RotPageRow<'a> { + slot: String, + data_base64: Cow<'a, str>, + } + + println!(" RoT pages:"); + let rot_page_rows: Vec<_> = RotPageWhich::iter() + .filter_map(|which| { + collection.rot_page_for(which, baseboard_id).map(|d| (which, d)) + }) + .map(|(which, found_page)| RotPageRow { + slot: format!("{which:?}"), + data_base64: long_string_formatter + .maybe_truncate(&found_page.page.data_base64), + }) + .collect(); + let table = tabled::Table::new(rot_page_rows) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + println!("{}", textwrap::indent(&table.to_string(), " ")); + if let Some(rot) = rot { println!(" RoT: active slot: slot {:?}", rot.active_slot); println!( @@ -2617,3 +2702,41 @@ async fn inv_collection_print_devices( Ok(()) } + +#[derive(Debug)] +struct LongStringFormatter { + show_long_strings: bool, +} + +impl LongStringFormatter { + fn maybe_truncate<'a>(&self, s: &'a str) -> Cow<'a, str> { + use unicode_width::UnicodeWidthChar; + + // pick an arbitrary width at which we'll truncate, knowing that these + // strings are probably contained in tables with other columns + const TRUNCATE_AT_WIDTH: usize = 32; + + // quick check for short strings or if we should show long strings in + // their entirety + if self.show_long_strings || s.len() <= TRUNCATE_AT_WIDTH { + return s.into(); + } + + // longer check; we'll do the proper thing here and check the unicode + // width, and we don't really care about speed, so we can just iterate + // over chars + let mut width = 0; + for (pos, ch) in s.char_indices() { + let ch_width = UnicodeWidthChar::width(ch).unwrap_or(0); + if width + ch_width > TRUNCATE_AT_WIDTH { + let (prefix, _) = s.split_at(pos); + return format!("{prefix}...").into(); + } + width += ch_width; + } + + // if we didn't break out of the loop, `s` in its entirety is not too + // wide, so return it as-is + s.into() + } +} diff --git a/dev-tools/omdb/src/bin/omdb/nexus.rs b/dev-tools/omdb/src/bin/omdb/nexus.rs index 9f91d38504..df5248b52d 100644 --- a/dev-tools/omdb/src/bin/omdb/nexus.rs +++ b/dev-tools/omdb/src/bin/omdb/nexus.rs @@ -515,6 +515,32 @@ fn print_task_details(bgtask: &BackgroundTask, details: &serde_json::Value) { ); } }; + } else if name == "phantom_disks" { + #[derive(Deserialize)] + struct TaskSuccess { + /// how many phantom disks were deleted ok + phantom_disk_deleted_ok: usize, + + /// how many phantom disks could not be deleted + phantom_disk_deleted_err: usize, + } + + match serde_json::from_value::(details.clone()) { + Err(error) => eprintln!( + "warning: failed to interpret task details: {:?}: {:?}", + error, details + ), + Ok(success) => { + println!( + " number of phantom disks deleted: {}", + success.phantom_disk_deleted_ok + ); + println!( + " number of phantom disk delete errors: {}", + success.phantom_disk_deleted_err + ); + } + }; } else { println!( "warning: unknown background task: {:?} \ diff --git a/dev-tools/omdb/tests/env.out b/dev-tools/omdb/tests/env.out index fd50d80c81..c08f592852 100644 --- a/dev-tools/omdb/tests/env.out +++ b/dev-tools/omdb/tests/env.out @@ -66,6 +66,10 @@ task: "nat_v4_garbage_collector" predetermined retention policy +task: "phantom_disks" + detects and un-deletes phantom disks + + --------------------------------------------- stderr: note: using Nexus URL http://127.0.0.1:REDACTED_PORT @@ -131,6 +135,10 @@ task: "nat_v4_garbage_collector" predetermined retention policy +task: "phantom_disks" + detects and un-deletes phantom disks + + --------------------------------------------- stderr: note: Nexus URL not specified. Will pick one from DNS. @@ -183,6 +191,10 @@ task: "nat_v4_garbage_collector" predetermined retention policy +task: "phantom_disks" + detects and un-deletes phantom disks + + --------------------------------------------- stderr: note: Nexus URL not specified. Will pick one from DNS. diff --git a/dev-tools/omdb/tests/successes.out b/dev-tools/omdb/tests/successes.out index 6bc3a85e8a..65520ab59c 100644 --- a/dev-tools/omdb/tests/successes.out +++ b/dev-tools/omdb/tests/successes.out @@ -260,6 +260,10 @@ task: "nat_v4_garbage_collector" predetermined retention policy +task: "phantom_disks" + detects and un-deletes phantom disks + + --------------------------------------------- stderr: note: using Nexus URL http://127.0.0.1:REDACTED_PORT/ @@ -357,6 +361,14 @@ task: "inventory_collection" last collection started: last collection done: +task: "phantom_disks" + configured period: every 30s + currently executing: no + last completed activation: iter 2, triggered by an explicit signal + started at (s ago) and ran for ms + number of phantom disks deleted: 0 + number of phantom disk delete errors: 0 + --------------------------------------------- stderr: note: using Nexus URL http://127.0.0.1:REDACTED_PORT/ diff --git a/dev-tools/omicron-dev/tests/test_omicron_dev.rs b/dev-tools/omicron-dev/tests/test_omicron_dev.rs index f1e8177243..7e78e5dc5a 100644 --- a/dev-tools/omicron-dev/tests/test_omicron_dev.rs +++ b/dev-tools/omicron-dev/tests/test_omicron_dev.rs @@ -27,7 +27,7 @@ use subprocess::Redirection; const CMD_OMICRON_DEV: &str = env!("CARGO_BIN_EXE_omicron-dev"); /// timeout used for various things that should be pretty quick -const TIMEOUT: Duration = Duration::from_secs(15); +const TIMEOUT: Duration = Duration::from_secs(30); fn path_to_omicron_dev() -> PathBuf { path_to_executable(CMD_OMICRON_DEV) diff --git a/docs/http-status-codes.adoc b/docs/http-status-codes.adoc index e02f9ea8a5..4628edcab5 100644 --- a/docs/http-status-codes.adoc +++ b/docs/http-status-codes.adoc @@ -18,7 +18,8 @@ This doc is aimed at the public API. For consistency, we should use the same er ** "403 Forbidden" is used when the user provided valid credentials (they were authenticated), but they're not authorized to access the resource, _and_ we don't mind telling them that the resource exists (e.g., accessing "/sleds"). ** "404 Not Found" is used when the user provided valid credentials (they were authenticated), but they're not authorized to access the resource, and they're not even allowed to know whether it exists (e.g., accessing a particular Project). * "500 Internal Server Error" is used for any kind of _bug_ or unhandled server-side condition. -* "503 Service unavailable" is used when the service (or an internal service on which the service depends) is overloaded or actually unavailable. +* "503 Service Unavailable" is used when the service (or an internal service on which the service depends) is overloaded or actually unavailable. +* "507 Insufficient Storage" is used if there isn't sufficient capacity available for a particular operation (for example, if there isn't enough disk space available to allocate a new virtual disk). There's more discussion about the 400-level and 500-level codes below. diff --git a/end-to-end-tests/Cargo.toml b/end-to-end-tests/Cargo.toml index e78a8792d3..8a1f91eee8 100644 --- a/end-to-end-tests/Cargo.toml +++ b/end-to-end-tests/Cargo.toml @@ -10,14 +10,18 @@ async-trait.workspace = true base64.workspace = true chrono.workspace = true http.workspace = true +hyper.workspace = true omicron-sled-agent.workspace = true omicron-test-utils.workspace = true oxide-client.workspace = true rand.workspace = true reqwest.workspace = true -russh = "0.39.0" -russh-keys = "0.38.0" +russh = "0.40.0" +russh-keys = "0.40.0" +serde.workspace = true +serde_json.workspace = true tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } toml.workspace = true trust-dns-resolver.workspace = true +uuid.workspace = true omicron-workspace-hack.workspace = true diff --git a/end-to-end-tests/src/bin/bootstrap.rs b/end-to-end-tests/src/bin/bootstrap.rs index c9001937db..83a37b8c21 100644 --- a/end-to-end-tests/src/bin/bootstrap.rs +++ b/end-to-end-tests/src/bin/bootstrap.rs @@ -1,18 +1,23 @@ use anyhow::Result; -use end_to_end_tests::helpers::ctx::{build_client, Context}; +use end_to_end_tests::helpers::ctx::{ClientParams, Context}; use end_to_end_tests::helpers::{generate_name, get_system_ip_pool}; use omicron_test_utils::dev::poll::{wait_for_condition, CondCheckError}; use oxide_client::types::{ - ByteCount, DiskCreate, DiskSource, IpRange, Ipv4Range, + ByteCount, DeviceAccessTokenRequest, DeviceAuthRequest, DeviceAuthVerify, + DiskCreate, DiskSource, IpRange, Ipv4Range, }; use oxide_client::{ - ClientDisksExt, ClientProjectsExt, ClientSystemNetworkingExt, + ClientDisksExt, ClientHiddenExt, ClientProjectsExt, + ClientSystemNetworkingExt, }; +use serde::{de::DeserializeOwned, Deserialize}; use std::time::Duration; +use uuid::Uuid; #[tokio::main] async fn main() -> Result<()> { - let client = build_client().await?; + let params = ClientParams::new()?; + let client = params.build_client().await?; // ===== ENSURE NEXUS IS UP ===== // eprintln!("waiting for nexus to come up..."); @@ -71,8 +76,61 @@ async fn main() -> Result<()> { .disk(disk_name) .send() .await?; - ctx.cleanup().await?; + // ===== PRINT CLI ENVIRONMENT ===== // + let client_id = Uuid::new_v4(); + let DeviceAuthResponse { device_code, user_code } = + deserialize_byte_stream( + ctx.client + .device_auth_request() + .body(DeviceAuthRequest { client_id }) + .send() + .await?, + ) + .await?; + ctx.client + .device_auth_confirm() + .body(DeviceAuthVerify { user_code }) + .send() + .await?; + let DeviceAccessTokenGrant { access_token } = deserialize_byte_stream( + ctx.client + .device_access_token() + .body(DeviceAccessTokenRequest { + client_id, + device_code, + grant_type: "urn:ietf:params:oauth:grant-type:device_code" + .to_string(), + }) + .send() + .await?, + ) + .await?; + + println!("OXIDE_HOST={}", params.base_url()); + println!("OXIDE_RESOLVE={}", params.resolve_nexus().await?); + println!("OXIDE_TOKEN={}", access_token); + + ctx.cleanup().await?; eprintln!("let's roll."); Ok(()) } + +async fn deserialize_byte_stream( + response: oxide_client::ResponseValue, +) -> Result { + let body = hyper::Body::wrap_stream(response.into_inner_stream()); + let bytes = hyper::body::to_bytes(body).await?; + Ok(serde_json::from_slice(&bytes)?) +} + +#[derive(Deserialize)] +struct DeviceAuthResponse { + device_code: String, + user_code: String, +} + +#[derive(Deserialize)] +struct DeviceAccessTokenGrant { + access_token: String, +} diff --git a/end-to-end-tests/src/helpers/ctx.rs b/end-to-end-tests/src/helpers/ctx.rs index 1c95703807..2c66bd4724 100644 --- a/end-to-end-tests/src/helpers/ctx.rs +++ b/end-to-end-tests/src/helpers/ctx.rs @@ -5,7 +5,8 @@ use omicron_sled_agent::rack_setup::config::SetupServiceConfig; use omicron_test_utils::dev::poll::{wait_for_condition, CondCheckError}; use oxide_client::types::{Name, ProjectCreate}; use oxide_client::CustomDnsResolver; -use oxide_client::{Client, ClientProjectsExt, ClientVpcsExt}; +use oxide_client::{Client, ClientImagesExt, ClientProjectsExt, ClientVpcsExt}; +use reqwest::dns::Resolve; use reqwest::header::{HeaderMap, HeaderValue}; use reqwest::Url; use std::net::IpAddr; @@ -13,6 +14,7 @@ use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; use trust_dns_resolver::error::ResolveErrorKind; +use uuid::Uuid; const RSS_CONFIG_STR: &str = include_str!(concat!( env!("CARGO_MANIFEST_DIR"), @@ -30,7 +32,7 @@ pub struct Context { impl Context { pub async fn new() -> Result { - Context::from_client(build_client().await?).await + Context::from_client(ClientParams::new()?.build_client().await?).await } pub async fn from_client(client: Client) -> Result { @@ -48,6 +50,10 @@ impl Context { Ok(Context { client, project_name }) } + pub async fn get_silo_image_id(&self, name: &str) -> Result { + Ok(self.client.image_view().image(name).send().await?.id) + } + pub async fn cleanup(self) -> Result<()> { self.client .vpc_subnet_delete() @@ -179,6 +185,27 @@ impl ClientParams { format!("{}://{}", self.proto, self.nexus_dns_name) } + pub async fn resolve_nexus(&self) -> Result { + let address = self + .resolver + .resolve(self.nexus_dns_name.parse()?) + .await + .map_err(anyhow::Error::msg)? + .next() + .with_context(|| { + format!( + "{} did not resolve to any addresses", + self.nexus_dns_name + ) + })?; + let port = match self.proto { + "http" => 80, + "https" => 443, + _ => unreachable!(), + }; + Ok(format!("{}:{}:{}", self.nexus_dns_name, port, address.ip())) + } + pub fn reqwest_builder(&self) -> reqwest::ClientBuilder { let mut builder = reqwest::ClientBuilder::new().dns_resolver(self.resolver.clone()); @@ -189,77 +216,77 @@ impl ClientParams { builder } -} -pub async fn build_client() -> Result { - // Prepare to make a login request. - let client_params = ClientParams::new()?; - let config = &client_params.rss_config; - let base_url = client_params.base_url(); - let silo_name = config.recovery_silo.silo_name.as_str(); - let login_url = format!("{}/v1/login/{}/local", base_url, silo_name); - let username: oxide_client::types::UserId = - config.recovery_silo.user_name.as_str().parse().map_err(|s| { - anyhow!("parsing configured recovery user name: {:?}", s) - })?; - // See the comment in the config file about this password. - let password: oxide_client::types::Password = "oxide".parse().unwrap(); + pub async fn build_client(&self) -> Result { + // Prepare to make a login request. + let config = &self.rss_config; + let base_url = self.base_url(); + let silo_name = config.recovery_silo.silo_name.as_str(); + let login_url = format!("{}/v1/login/{}/local", base_url, silo_name); + let username: oxide_client::types::UserId = + config.recovery_silo.user_name.as_str().parse().map_err(|s| { + anyhow!("parsing configured recovery user name: {:?}", s) + })?; + // See the comment in the config file about this password. + let password: oxide_client::types::Password = "oxide".parse().unwrap(); - // By the time we get here, Nexus might not be up yet. It may not have - // published its names to external DNS, and even if it has, it may not have - // opened its external listening socket. So we have to retry a bit until we - // succeed. - let session_token = wait_for_condition( - || async { - // Use a raw reqwest client because it's not clear that Progenitor - // is intended to support endpoints that return 300-level response - // codes. See progenitor#451. - eprintln!("{}: attempting to log into API", Utc::now()); + // By the time we get here, Nexus might not be up yet. It may not have + // published its names to external DNS, and even if it has, it may not have + // opened its external listening socket. So we have to retry a bit until we + // succeed. + let session_token = wait_for_condition( + || async { + // Use a raw reqwest client because it's not clear that Progenitor + // is intended to support endpoints that return 300-level response + // codes. See progenitor#451. + eprintln!("{}: attempting to log into API", Utc::now()); - let builder = client_params - .reqwest_builder() - .connect_timeout(Duration::from_secs(15)) - .timeout(Duration::from_secs(60)); + let builder = self + .reqwest_builder() + .connect_timeout(Duration::from_secs(15)) + .timeout(Duration::from_secs(60)); - oxide_client::login( - builder, - &login_url, - username.clone(), - password.clone(), - ) - .await - .map_err(|e| { - eprintln!("{}: login failed: {:#}", Utc::now(), e); - if let oxide_client::LoginError::RequestError(e) = &e { - if e.is_connect() { - return CondCheckError::NotYet; + oxide_client::login( + builder, + &login_url, + username.clone(), + password.clone(), + ) + .await + .map_err(|e| { + eprintln!("{}: login failed: {:#}", Utc::now(), e); + if let oxide_client::LoginError::RequestError(e) = &e { + if e.is_connect() { + return CondCheckError::NotYet; + } } - } - CondCheckError::Failed(e) - }) - }, - &Duration::from_secs(1), - &Duration::from_secs(600), - ) - .await - .context("logging in")?; + CondCheckError::Failed(e) + }) + }, + &Duration::from_secs(1), + &Duration::from_secs(600), + ) + .await + .context("logging in")?; - eprintln!("{}: login succeeded", Utc::now()); + eprintln!("{}: login succeeded", Utc::now()); - let mut headers = HeaderMap::new(); - headers.insert( - http::header::COOKIE, - HeaderValue::from_str(&format!("session={}", session_token)).unwrap(), - ); + let mut headers = HeaderMap::new(); + headers.insert( + http::header::COOKIE, + HeaderValue::from_str(&format!("session={}", session_token)) + .unwrap(), + ); - let reqwest_client = client_params - .reqwest_builder() - .default_headers(headers) - .connect_timeout(Duration::from_secs(15)) - .timeout(Duration::from_secs(60)) - .build()?; - Ok(Client::new_with_client(&base_url, reqwest_client)) + let reqwest_client = self + .reqwest_builder() + .default_headers(headers) + .connect_timeout(Duration::from_secs(15)) + .timeout(Duration::from_secs(60)) + .build()?; + Ok(Client::new_with_client(&base_url, reqwest_client)) + } } async fn wait_for_records( diff --git a/end-to-end-tests/src/instance_launch.rs b/end-to-end-tests/src/instance_launch.rs index 30ccd0d4a3..b3d1406070 100644 --- a/end-to-end-tests/src/instance_launch.rs +++ b/end-to-end-tests/src/instance_launch.rs @@ -5,13 +5,11 @@ use anyhow::{ensure, Context as _, Result}; use async_trait::async_trait; use omicron_test_utils::dev::poll::{wait_for_condition, CondCheckError}; use oxide_client::types::{ - ByteCount, DiskCreate, DiskSource, ExternalIpCreate, ImageCreate, - ImageSource, InstanceCpuCount, InstanceCreate, InstanceDiskAttachment, - InstanceNetworkInterfaceAttachment, SshKeyCreate, -}; -use oxide_client::{ - ClientDisksExt, ClientImagesExt, ClientInstancesExt, ClientSessionExt, + ByteCount, DiskCreate, DiskSource, ExternalIpCreate, InstanceCpuCount, + InstanceCreate, InstanceDiskAttachment, InstanceNetworkInterfaceAttachment, + SshKeyCreate, }; +use oxide_client::{ClientDisksExt, ClientInstancesExt, ClientSessionExt}; use russh::{ChannelMsg, Disconnect}; use russh_keys::key::{KeyPair, PublicKey}; use russh_keys::PublicKeyBase64; @@ -38,26 +36,6 @@ async fn instance_launch() -> Result<()> { .send() .await?; - eprintln!("create system image"); - let image_id = ctx - .client - .image_create() - .body(ImageCreate { - name: generate_name("debian")?, - description: String::new(), - os: "debian".try_into().map_err(anyhow::Error::msg)?, - version: "propolis-blob".into(), - source: ImageSource::Url { - url: - "http://[fd00:1122:3344:101::1]:54321/debian-11-genericcloud-amd64.raw" - .into(), - block_size: 512.try_into().map_err(anyhow::Error::msg)?, - }, - }) - .send() - .await? - .id; - eprintln!("create disk"); let disk_name = generate_name("disk")?; let disk_name = ctx @@ -67,7 +45,9 @@ async fn instance_launch() -> Result<()> { .body(DiskCreate { name: disk_name.clone(), description: String::new(), - disk_source: DiskSource::Image { image_id }, + disk_source: DiskSource::Image { + image_id: ctx.get_silo_image_id("debian11").await?, + }, size: ByteCount(2048 * 1024 * 1024), }) .send() diff --git a/gateway/src/serial_console.rs b/gateway/src/serial_console.rs index eb6183fdfb..3e49f8526a 100644 --- a/gateway/src/serial_console.rs +++ b/gateway/src/serial_console.rs @@ -48,8 +48,12 @@ pub(crate) async fn run( log: Logger, ) -> WebsocketChannelResult { let upgraded = conn.into_inner(); - let config = - WebSocketConfig { max_send_queue: Some(4096), ..Default::default() }; + let config = WebSocketConfig { + // Maintain a max write buffer size of 2 MB (this is only relevant if + // writes are failing). + max_write_buffer_size: 2 * 1024 * 1024, + ..Default::default() + }; let ws_stream = WebSocketStream::from_raw_socket(upgraded, Role::Server, Some(config)) .await; diff --git a/gateway/tests/integration_tests/serial_console.rs b/gateway/tests/integration_tests/serial_console.rs index 9ab26bef4a..11cb9674a7 100644 --- a/gateway/tests/integration_tests/serial_console.rs +++ b/gateway/tests/integration_tests/serial_console.rs @@ -100,11 +100,12 @@ async fn serial_console_detach() { } tungstenite::Error::ConnectionClosed | tungstenite::Error::AlreadyClosed + | tungstenite::Error::AttackAttempt | tungstenite::Error::Io(_) | tungstenite::Error::Tls(_) | tungstenite::Error::Capacity(_) | tungstenite::Error::Protocol(_) - | tungstenite::Error::SendQueueFull(_) + | tungstenite::Error::WriteBufferFull(_) | tungstenite::Error::Utf8 | tungstenite::Error::Url(_) | tungstenite::Error::HttpFormat(_) => panic!("unexpected error"), diff --git a/illumos-utils/Cargo.toml b/illumos-utils/Cargo.toml index 497454e047..8296eace5c 100644 --- a/illumos-utils/Cargo.toml +++ b/illumos-utils/Cargo.toml @@ -11,6 +11,7 @@ async-trait.workspace = true bhyve_api.workspace = true byteorder.workspace = true camino.workspace = true +camino-tempfile.workspace = true cfg-if.workspace = true crucible-smf.workspace = true futures.workspace = true diff --git a/illumos-utils/src/opte/port_manager.rs b/illumos-utils/src/opte/port_manager.rs index f0a8d8d839..3558ef1c78 100644 --- a/illumos-utils/src/opte/port_manager.rs +++ b/illumos-utils/src/opte/port_manager.rs @@ -20,6 +20,7 @@ use omicron_common::api::internal::shared::NetworkInterfaceKind; use omicron_common::api::internal::shared::SourceNatConfig; use oxide_vpc::api::AddRouterEntryReq; use oxide_vpc::api::DhcpCfg; +use oxide_vpc::api::ExternalIpCfg; use oxide_vpc::api::IpCfg; use oxide_vpc::api::IpCidr; use oxide_vpc::api::Ipv4Cfg; @@ -99,7 +100,8 @@ impl PortManager { &self, nic: &NetworkInterface, source_nat: Option, - external_ips: &[IpAddr], + ephemeral_ip: Option, + floating_ips: &[IpAddr], firewall_rules: &[VpcFirewallRule], dhcp_config: DhcpCfg, ) -> Result<(Port, PortTicket), Error> { @@ -111,13 +113,6 @@ impl PortManager { let boundary_services = default_boundary_services(); // Describe the external IP addresses for this port. - // - // Note that we're currently only taking the first address, which is all - // that OPTE supports. The array is guaranteed to be limited by Nexus. - // See https://github.com/oxidecomputer/omicron/issues/1467 - // See https://github.com/oxidecomputer/opte/issues/196 - let external_ip = external_ips.get(0); - macro_rules! ip_cfg { ($ip:expr, $log_prefix:literal, $ip_t:path, $cidr_t:path, $ipcfg_e:path, $ipcfg_t:ident, $snat_t:ident) => {{ @@ -152,25 +147,43 @@ impl PortManager { } None => None, }; - let external_ip = match external_ip { - Some($ip_t(ip)) => Some((*ip).into()), + let ephemeral_ip = match ephemeral_ip { + Some($ip_t(ip)) => Some(ip.into()), Some(_) => { error!( self.inner.log, - concat!($log_prefix, " external IP"); - "external_ip" => ?external_ip, + concat!($log_prefix, " ephemeral IP"); + "ephemeral_ip" => ?ephemeral_ip, ); return Err(Error::InvalidPortIpConfig); } None => None, }; + let floating_ips: Vec<_> = floating_ips + .iter() + .copied() + .map(|ip| match ip { + $ip_t(ip) => Ok(ip.into()), + _ => { + error!( + self.inner.log, + concat!($log_prefix, " ephemeral IP"); + "ephemeral_ip" => ?ephemeral_ip, + ); + Err(Error::InvalidPortIpConfig) + } + }) + .collect::, _>>()?; $ipcfg_e($ipcfg_t { vpc_subnet, private_ip: $ip.into(), gateway_ip: gateway_ip.into(), - snat, - external_ips: external_ip, + external_ips: ExternalIpCfg { + ephemeral_ip, + snat, + floating_ips, + }, }) }} } diff --git a/illumos-utils/src/running_zone.rs b/illumos-utils/src/running_zone.rs index bdf7ed0cbf..ea80a6d34b 100644 --- a/illumos-utils/src/running_zone.rs +++ b/illumos-utils/src/running_zone.rs @@ -11,10 +11,12 @@ use crate::opte::{Port, PortTicket}; use crate::svc::wait_for_service; use crate::zone::{AddressRequest, IPADM, ZONE_PREFIX}; use camino::{Utf8Path, Utf8PathBuf}; +use camino_tempfile::Utf8TempDir; use ipnetwork::IpNetwork; use omicron_common::backoff; use slog::{error, info, o, warn, Logger}; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; +use std::sync::Arc; #[cfg(target_os = "illumos")] use std::sync::OnceLock; #[cfg(target_os = "illumos")] @@ -214,7 +216,7 @@ mod zenter { // the contracts used for this come from templates that define becoming // empty as a critical event. pub fn contract_reaper(log: Logger) { - const EVENT_PATH: &[u8] = b"/system/contract/process/pbundle"; + const EVENT_PATH: &'static [u8] = b"/system/contract/process/pbundle"; const CT_PR_EV_EMPTY: u64 = 1; let cpath = CString::new(EVENT_PATH).unwrap(); @@ -327,7 +329,8 @@ mod zenter { } impl Template { - const TEMPLATE_PATH: &[u8] = b"/system/contract/process/template\0"; + const TEMPLATE_PATH: &'static [u8] = + b"/system/contract/process/template\0"; // Constants related to how the contract below is managed. See // `usr/src/uts/common/sys/contract/process.h` in the illumos sources @@ -1042,7 +1045,7 @@ pub struct ServiceProcess { pub log_file: Utf8PathBuf, } -/// Errors returned from [`InstalledZone::install`]. +/// Errors returned from [`ZoneBuilder::install`]. #[derive(thiserror::Error, Debug)] pub enum InstallZoneError { #[error("Cannot create '{zone}': failed to create control VNIC: {err}")] @@ -1062,6 +1065,9 @@ pub enum InstallZoneError { #[error("Failed to find zone image '{image}' from {paths:?}")] ImageNotFound { image: String, paths: Vec }, + + #[error("Attempted to call install() on underspecified ZoneBuilder")] + IncompleteBuilder, } pub struct InstalledZone { @@ -1118,24 +1124,208 @@ impl InstalledZone { &self.zonepath } - // TODO: This would benefit from a "builder-pattern" interface. - #[allow(clippy::too_many_arguments)] - pub async fn install( - log: &Logger, - underlay_vnic_allocator: &VnicAllocator, - zone_root_path: &Utf8Path, - zone_image_paths: &[Utf8PathBuf], - zone_type: &str, - unique_name: Option, - datasets: &[zone::Dataset], - filesystems: &[zone::Fs], - data_links: &[String], - devices: &[zone::Device], - opte_ports: Vec<(Port, PortTicket)>, - bootstrap_vnic: Option, - links: Vec, - limit_priv: Vec, - ) -> Result { + pub fn site_profile_xml_path(&self) -> Utf8PathBuf { + let mut path: Utf8PathBuf = self.zonepath().into(); + path.push("root/var/svc/profile/site.xml"); + path + } +} + +#[derive(Clone)] +pub struct FakeZoneBuilderConfig { + temp_dir: Arc, +} + +#[derive(Clone, Default)] +pub struct ZoneBuilderFactory { + // Why this is part of this builder/factory and not some separate builder + // type: At time of writing, to the best of my knowledge: + // - If we want builder pattern, we need to return some type of `Self`. + // - If we have a trait that returns `Self` type, we can't turn it into a + // trait object (i.e. Box). + // - Plumbing concrete types as generics through every other type that + // needs to construct zones (and anything else with a lot of parameters) + // seems like a worse idea. + fake_cfg: Option, +} + +impl ZoneBuilderFactory { + /// For use in unit tests that don't require actual zone creation to occur. + pub fn fake() -> Self { + Self { + fake_cfg: Some(FakeZoneBuilderConfig { + temp_dir: Arc::new(Utf8TempDir::new().unwrap()), + }), + } + } + + /// Create a [ZoneBuilder] that inherits this factory's fakeness. + pub fn builder<'a>(&self) -> ZoneBuilder<'a> { + ZoneBuilder { fake_cfg: self.fake_cfg.clone(), ..Default::default() } + } +} + +/// Builder-pattern construct for creating an [InstalledZone]. +/// Created by [ZoneBuilderFactory]. +#[derive(Default)] +pub struct ZoneBuilder<'a> { + log: Option, + underlay_vnic_allocator: Option<&'a VnicAllocator>, + zone_root_path: Option<&'a Utf8Path>, + zone_image_paths: Option<&'a [Utf8PathBuf]>, + zone_type: Option<&'a str>, + unique_name: Option, // actually optional + datasets: Option<&'a [zone::Dataset]>, + filesystems: Option<&'a [zone::Fs]>, + data_links: Option<&'a [String]>, + devices: Option<&'a [zone::Device]>, + opte_ports: Option>, + bootstrap_vnic: Option, // actually optional + links: Option>, + limit_priv: Option>, + fake_cfg: Option, +} + +impl<'a> ZoneBuilder<'a> { + pub fn with_log(mut self, log: Logger) -> Self { + self.log = Some(log); + self + } + + pub fn with_underlay_vnic_allocator( + mut self, + vnic_allocator: &'a VnicAllocator, + ) -> Self { + self.underlay_vnic_allocator = Some(vnic_allocator); + self + } + + pub fn with_zone_root_path(mut self, root_path: &'a Utf8Path) -> Self { + self.zone_root_path = Some(root_path); + self + } + + pub fn with_zone_image_paths( + mut self, + image_paths: &'a [Utf8PathBuf], + ) -> Self { + self.zone_image_paths = Some(image_paths); + self + } + + pub fn with_zone_type(mut self, zone_type: &'a str) -> Self { + self.zone_type = Some(zone_type); + self + } + + pub fn with_unique_name(mut self, uuid: Uuid) -> Self { + self.unique_name = Some(uuid); + self + } + + pub fn with_datasets(mut self, datasets: &'a [zone::Dataset]) -> Self { + self.datasets = Some(datasets); + self + } + + pub fn with_filesystems(mut self, filesystems: &'a [zone::Fs]) -> Self { + self.filesystems = Some(filesystems); + self + } + + pub fn with_data_links(mut self, links: &'a [String]) -> Self { + self.data_links = Some(links); + self + } + + pub fn with_devices(mut self, devices: &'a [zone::Device]) -> Self { + self.devices = Some(devices); + self + } + + pub fn with_opte_ports(mut self, ports: Vec<(Port, PortTicket)>) -> Self { + self.opte_ports = Some(ports); + self + } + + pub fn with_bootstrap_vnic(mut self, vnic: Link) -> Self { + self.bootstrap_vnic = Some(vnic); + self + } + + pub fn with_links(mut self, links: Vec) -> Self { + self.links = Some(links); + self + } + + pub fn with_limit_priv(mut self, limit_priv: Vec) -> Self { + self.limit_priv = Some(limit_priv); + self + } + + fn fake_install(self) -> Result { + let zone = self + .zone_type + .ok_or(InstallZoneError::IncompleteBuilder)? + .to_string(); + let control_vnic = self + .underlay_vnic_allocator + .ok_or(InstallZoneError::IncompleteBuilder)? + .new_control(None) + .map_err(move |err| InstallZoneError::CreateVnic { zone, err })?; + let fake_cfg = self.fake_cfg.unwrap(); + let temp_dir = fake_cfg.temp_dir.path().to_path_buf(); + (|| { + let full_zone_name = InstalledZone::get_zone_name( + self.zone_type?, + self.unique_name, + ); + let zonepath = temp_dir + .join(self.zone_root_path?.strip_prefix("/").unwrap()) + .join(&full_zone_name); + let iz = InstalledZone { + log: self.log?, + zonepath, + name: full_zone_name, + control_vnic, + bootstrap_vnic: self.bootstrap_vnic, + opte_ports: self.opte_ports?, + links: self.links?, + }; + let xml_path = iz.site_profile_xml_path().parent()?.to_path_buf(); + std::fs::create_dir_all(&xml_path) + .unwrap_or_else(|_| panic!("ZoneBuilder::fake_install couldn't create site profile xml path {:?}", xml_path)); + Some(iz) + })() + .ok_or(InstallZoneError::IncompleteBuilder) + } + + pub async fn install(self) -> Result { + if self.fake_cfg.is_some() { + return self.fake_install(); + } + + let Self { + log: Some(log), + underlay_vnic_allocator: Some(underlay_vnic_allocator), + zone_root_path: Some(zone_root_path), + zone_image_paths: Some(zone_image_paths), + zone_type: Some(zone_type), + unique_name, + datasets: Some(datasets), + filesystems: Some(filesystems), + data_links: Some(data_links), + devices: Some(devices), + opte_ports: Some(opte_ports), + bootstrap_vnic, + links: Some(links), + limit_priv: Some(limit_priv), + .. + } = self + else { + return Err(InstallZoneError::IncompleteBuilder); + }; + let control_vnic = underlay_vnic_allocator.new_control(None).map_err(|err| { InstallZoneError::CreateVnic { @@ -1144,7 +1334,8 @@ impl InstalledZone { } })?; - let full_zone_name = Self::get_zone_name(zone_type, unique_name); + let full_zone_name = + InstalledZone::get_zone_name(zone_type, unique_name); // Looks for the image within `zone_image_path`, in order. let image = format!("{}.tar.gz", zone_type); @@ -1182,7 +1373,7 @@ impl InstalledZone { net_device_names.dedup(); Zones::install_omicron_zone( - log, + &log, &zone_root_path, &full_zone_name, &zone_image_path, @@ -1209,12 +1400,6 @@ impl InstalledZone { links, }) } - - pub fn site_profile_xml_path(&self) -> Utf8PathBuf { - let mut path: Utf8PathBuf = self.zonepath().into(); - path.push("root/var/svc/profile/site.xml"); - path - } } /// Return true if the service with the given FMRI appears to be an diff --git a/installinator-common/Cargo.toml b/installinator-common/Cargo.toml index 4381de74eb..dd8540c6f8 100644 --- a/installinator-common/Cargo.toml +++ b/installinator-common/Cargo.toml @@ -8,10 +8,16 @@ license = "MPL-2.0" anyhow.workspace = true camino.workspace = true illumos-utils.workspace = true +libc.workspace = true schemars.workspace = true serde.workspace = true serde_json.workspace = true serde_with.workspace = true thiserror.workspace = true +tokio.workspace = true update-engine.workspace = true omicron-workspace-hack.workspace = true + +[dev-dependencies] +proptest.workspace = true +test-strategy.workspace = true diff --git a/installinator/src/block_size_writer.rs b/installinator-common/src/block_size_writer.rs similarity index 81% rename from installinator/src/block_size_writer.rs rename to installinator-common/src/block_size_writer.rs index 3f41a4ee99..1548594b41 100644 --- a/installinator/src/block_size_writer.rs +++ b/installinator-common/src/block_size_writer.rs @@ -11,31 +11,37 @@ use tokio::io::AsyncWrite; /// `BlockSizeBufWriter` is analogous to a tokio's `BufWriter`, except it /// guarantees that writes made to the underlying writer are always -/// _exactly_ the requested block size, with two exceptions: explicitly -/// calling (1) `flush()` or (2) `shutdown()` will write any -/// buffered-but-not-yet-written data to the underlying buffer regardless of -/// its length. +/// _exactly_ the requested block size, with three exceptions: +/// +/// 1. Calling `flush()` will write any currently-buffered data to the +/// underlying writer, regardless of its length. +/// 2. Similarily, calling `shutdown()` will flush any currently-buffered data +/// to the underlying writer. +/// 3. When `BlockSizeBufWriter` attempts to write a block-length amount of data +/// to the underlying writer, if that writer only accepts a portion of that +/// data, `BlockSizeBufWriter` will continue attempting to write the +/// remainder of the block. /// /// When `BlockSizeBufWriter` is dropped, any buffered data it's holding /// will be discarded. It is critical to manually call /// `BlockSizeBufWriter:flush()` or `BlockSizeBufWriter::shutdown()` prior /// to dropping to avoid data loss. -pub(crate) struct BlockSizeBufWriter { +pub struct BlockSizeBufWriter { inner: W, buf: Vec, block_size: usize, } impl BlockSizeBufWriter { - pub(crate) fn with_block_size(block_size: usize, inner: W) -> Self { + pub fn with_block_size(block_size: usize, inner: W) -> Self { Self { inner, buf: Vec::with_capacity(block_size), block_size } } - pub(crate) fn into_inner(self) -> W { + pub fn into_inner(self) -> W { self.inner } - pub(crate) fn block_size(&self) -> usize { + pub fn block_size(&self) -> usize { self.block_size } @@ -46,6 +52,13 @@ impl BlockSizeBufWriter { fn flush_buf(&mut self, cx: &mut Context<'_>) -> Poll> { let mut written = 0; let mut ret = Ok(()); + + // We expect this loop to execute exactly one time: we try to write the + // entirety of `self.buf` to `self.inner`, and presumably it is a type + // that expects to receive a block of data at once, so we'll immediately + // jump to `written == self.buf.len()`. If it returns `Ok(n)` for some + // `n < self.buf.len()`, we'll loop and try to write the rest of the + // data in less-than-block-sized chunks. while written < self.buf.len() { match ready!( Pin::new(&mut self.inner).poll_write(cx, &self.buf[written..]) @@ -128,8 +141,8 @@ impl AsyncWrite for BlockSizeBufWriter { #[cfg(test)] mod tests { use super::*; - use crate::test_helpers::with_test_runtime; use anyhow::Result; + use std::future::Future; use test_strategy::proptest; use tokio::io::AsyncWriteExt; @@ -167,6 +180,19 @@ mod tests { } } + fn with_test_runtime(f: F) -> T + where + F: FnOnce() -> Fut, + Fut: Future, + { + let runtime = tokio::runtime::Builder::new_current_thread() + .enable_time() + .start_paused(true) + .build() + .expect("tokio Runtime built successfully"); + runtime.block_on(f()) + } + #[proptest] fn proptest_block_writer( chunks: Vec>, diff --git a/installinator-common/src/lib.rs b/installinator-common/src/lib.rs index b77385840f..4771de7b27 100644 --- a/installinator-common/src/lib.rs +++ b/installinator-common/src/lib.rs @@ -4,6 +4,10 @@ //! Common types shared by the installinator client and server. +mod block_size_writer; mod progress; +mod raw_disk_writer; +pub use block_size_writer::*; pub use progress::*; +pub use raw_disk_writer::*; diff --git a/installinator-common/src/raw_disk_writer.rs b/installinator-common/src/raw_disk_writer.rs new file mode 100644 index 0000000000..35d3862e67 --- /dev/null +++ b/installinator-common/src/raw_disk_writer.rs @@ -0,0 +1,123 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Async writer for raw disks on illumos (e.g., host OS phase 2 images written +//! to M.2 drives). + +use crate::BlockSizeBufWriter; +use illumos_utils::dkio; +use illumos_utils::dkio::MediaInfoExtended; +use std::io; +use std::os::fd::AsRawFd; +use std::path::Path; +use std::pin::Pin; +use std::task::Context; +use std::task::Poll; +use tokio::fs::File; +use tokio::io::AsyncWrite; +use tokio::io::AsyncWriteExt; + +/// Writer for illumos raw disks. +/// +/// Construct an instance via [`RawDiskWriter::open()`], write to it just like +/// any other async writer (it will handle passing writes down to the device in +/// chunks of length [`RawDiskWriter::block_size()`]), and then call +/// [`RawDiskWriter::finalize()`]. It is **critical** to call `finalize()`; +/// failure to do so will likely lead to data loss. +/// +/// `RawDiskWriter` attempts to be as conservative as it can about ensuring data +/// is written: +/// +/// * The device is opened with `O_SYNC` +/// * In `finalize()`, the file is `fsync`'d after any remaining data is flushed +/// * In `finalize()`, the disk write cache is flushed (if supported by the +/// target device) +/// +/// Writing an amount of data that is not a multiple of the device's +/// `block_size()` will likely result in a failure when writing / flushing the +/// final not-correctly-sized chunk. +/// +/// This type is illumos-specific due to using dkio for two things: +/// +/// 1. Determining the logical block size of the device +/// 2. Flushing the disk write cache +pub struct RawDiskWriter { + inner: BlockSizeBufWriter, +} + +impl RawDiskWriter { + /// Open the disk device at `path` for writing, and attempt to determine its + /// logical block size via [`MediaInfoExtended`]. + pub async fn open(path: &Path) -> io::Result { + let f = tokio::fs::OpenOptions::new() + .create(false) + .write(true) + .truncate(false) + .custom_flags(libc::O_SYNC) + .open(path) + .await?; + + let media_info = MediaInfoExtended::from_fd(f.as_raw_fd())?; + + let inner = BlockSizeBufWriter::with_block_size( + media_info.logical_block_size as usize, + f, + ); + + Ok(Self { inner }) + } + + /// The logical block size of the underlying device. + pub fn block_size(&self) -> usize { + self.inner.block_size() + } + + /// Flush any remaining data and attempt to ensure synchronization with the + /// device. + pub async fn finalize(mut self) -> io::Result<()> { + // Flush any remaining data in our buffer + self.inner.flush().await?; + + // `fsync` the file... + let f = self.inner.into_inner(); + f.sync_all().await?; + + // ...and also attempt to flush the disk write cache + tokio::task::spawn_blocking(move || { + match dkio::flush_write_cache(f.as_raw_fd()) { + Ok(()) => Ok(()), + // Some drives don't support `flush_write_cache`; we don't want + // to fail in this case. + Err(err) if err.raw_os_error() == Some(libc::ENOTSUP) => Ok(()), + Err(err) => Err(err), + } + }) + .await + .expect("task panicked") + } +} + +impl AsyncWrite for RawDiskWriter { + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + Pin::new(&mut self.inner).poll_write(cx, buf) + } + + fn poll_flush( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + Pin::new(&mut self.inner).poll_flush(cx) + } + + fn poll_shutdown( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + Pin::new(&mut self.inner).poll_shutdown(cx) + } +} diff --git a/installinator/src/lib.rs b/installinator/src/lib.rs index c7de189576..3b1d768a7d 100644 --- a/installinator/src/lib.rs +++ b/installinator/src/lib.rs @@ -4,7 +4,6 @@ mod artifact; mod async_temp_file; -mod block_size_writer; mod bootstrap; mod dispatch; mod errors; diff --git a/installinator/src/write.rs b/installinator/src/write.rs index 22dd2adbf6..380595b4cd 100644 --- a/installinator/src/write.rs +++ b/installinator/src/write.rs @@ -6,7 +6,6 @@ use std::{ collections::{btree_map::Entry, BTreeMap, BTreeSet}, fmt, io::{self, Read}, - os::fd::AsRawFd, time::Duration, }; @@ -15,14 +14,11 @@ use async_trait::async_trait; use buf_list::BufList; use bytes::Buf; use camino::{Utf8Path, Utf8PathBuf}; -use illumos_utils::{ - dkio::{self, MediaInfoExtended}, - zpool::{Zpool, ZpoolName}, -}; +use illumos_utils::zpool::{Zpool, ZpoolName}; use installinator_common::{ - ControlPlaneZonesSpec, ControlPlaneZonesStepId, M2Slot, StepContext, - StepProgress, StepResult, StepSuccess, UpdateEngine, WriteComponent, - WriteError, WriteOutput, WriteSpec, WriteStepId, + ControlPlaneZonesSpec, ControlPlaneZonesStepId, M2Slot, RawDiskWriter, + StepContext, StepProgress, StepResult, StepSuccess, UpdateEngine, + WriteComponent, WriteError, WriteOutput, WriteSpec, WriteStepId, }; use omicron_common::update::{ArtifactHash, ArtifactHashId}; use sha2::{Digest, Sha256}; @@ -36,10 +32,7 @@ use update_engine::{ errors::NestedEngineError, events::ProgressUnits, StepSpec, }; -use crate::{ - async_temp_file::AsyncNamedTempFile, block_size_writer::BlockSizeBufWriter, - hardware::Hardware, -}; +use crate::{async_temp_file::AsyncNamedTempFile, hardware::Hardware}; #[derive(Clone, Debug)] struct ArtifactDestination { @@ -754,28 +747,13 @@ impl WriteTransportWriter for AsyncNamedTempFile { } #[async_trait] -impl WriteTransportWriter for BlockSizeBufWriter { +impl WriteTransportWriter for RawDiskWriter { fn block_size(&self) -> Option { - Some(BlockSizeBufWriter::block_size(self)) + Some(RawDiskWriter::block_size(self)) } async fn finalize(self) -> io::Result<()> { - let f = self.into_inner(); - f.sync_all().await?; - - // We only create `BlockSizeBufWriter` for the raw block device storing - // the OS ramdisk. After `fsync`'ing, also flush the write cache. - tokio::task::spawn_blocking(move || { - match dkio::flush_write_cache(f.as_raw_fd()) { - Ok(()) => Ok(()), - // Some drives don't support `flush_write_cache`; we don't want - // to fail in this case. - Err(err) if err.raw_os_error() == Some(libc::ENOTSUP) => Ok(()), - Err(err) => Err(err), - } - }) - .await - .unwrap() + RawDiskWriter::finalize(self).await } } @@ -810,7 +788,7 @@ struct BlockDeviceTransport; #[async_trait] impl WriteTransport for BlockDeviceTransport { - type W = BlockSizeBufWriter; + type W = RawDiskWriter; async fn make_writer( &mut self, @@ -819,12 +797,7 @@ impl WriteTransport for BlockDeviceTransport { destination: &Utf8Path, total_bytes: u64, ) -> Result { - let f = tokio::fs::OpenOptions::new() - .create(false) - .write(true) - .truncate(false) - .custom_flags(libc::O_SYNC) - .open(destination) + let writer = RawDiskWriter::open(destination.as_std_path()) .await .map_err(|error| WriteError::WriteError { component, @@ -834,18 +807,7 @@ impl WriteTransport for BlockDeviceTransport { error, })?; - let media_info = - MediaInfoExtended::from_fd(f.as_raw_fd()).map_err(|error| { - WriteError::WriteError { - component, - slot, - written_bytes: 0, - total_bytes, - error, - } - })?; - - let block_size = u64::from(media_info.logical_block_size); + let block_size = writer.block_size() as u64; // When writing to a block device, we must write a multiple of the block // size. We can assume the image we're given should be @@ -858,12 +820,15 @@ impl WriteTransport for BlockDeviceTransport { total_bytes, error: io::Error::new( io::ErrorKind::InvalidData, - format!("file size ({total_bytes}) is not a multiple of target device block size ({block_size})") + format!( + "file size ({total_bytes}) is not a multiple of \ + target device block size ({block_size})" + ), ), }); } - Ok(BlockSizeBufWriter::with_block_size(block_size as usize, f)) + Ok(writer) } } diff --git a/nexus/Cargo.toml b/nexus/Cargo.toml index 4fc13a31d8..704a7ab7bd 100644 --- a/nexus/Cargo.toml +++ b/nexus/Cargo.toml @@ -12,6 +12,7 @@ anyhow.workspace = true assert_matches.workspace = true async-trait.workspace = true base64.workspace = true +buf-list.workspace = true cancel-safe-futures.workspace = true camino.workspace = true clap.workspace = true diff --git a/nexus/db-model/Cargo.toml b/nexus/db-model/Cargo.toml index b7514c4806..477ce7d11f 100644 --- a/nexus/db-model/Cargo.toml +++ b/nexus/db-model/Cargo.toml @@ -26,6 +26,7 @@ serde.workspace = true serde_json.workspace = true steno.workspace = true strum.workspace = true +thiserror.workspace = true uuid.workspace = true db-macros.workspace = true diff --git a/nexus/db-model/src/external_ip.rs b/nexus/db-model/src/external_ip.rs index 1152e0109c..1a755f0396 100644 --- a/nexus/db-model/src/external_ip.rs +++ b/nexus/db-model/src/external_ip.rs @@ -7,10 +7,12 @@ use crate::impl_enum_type; use crate::schema::external_ip; +use crate::schema::floating_ip; use crate::Name; use crate::SqlU16; use chrono::DateTime; use chrono::Utc; +use db_macros::Resource; use diesel::Queryable; use diesel::Selectable; use ipnetwork::IpNetwork; @@ -18,6 +20,9 @@ use nexus_types::external_api::shared; use nexus_types::external_api::views; use omicron_common::address::NUM_SOURCE_NAT_PORTS; use omicron_common::api::external::Error; +use omicron_common::api::external::IdentityMetadata; +use serde::Deserialize; +use serde::Serialize; use std::convert::TryFrom; use std::net::IpAddr; use uuid::Uuid; @@ -69,6 +74,30 @@ pub struct ExternalIp { pub ip: IpNetwork, pub first_port: SqlU16, pub last_port: SqlU16, + // Only Some(_) for instance Floating IPs + pub project_id: Option, +} + +/// A view type constructed from `ExternalIp` used to represent Floating IP +/// objects in user-facing APIs. +/// +/// This View type fills a similar niche to `ProjectImage` etc.: we need to +/// represent identity as non-nullable (ditto for parent project) so as to +/// play nicely with authz and resource APIs. +#[derive( + Queryable, Selectable, Clone, Debug, Resource, Serialize, Deserialize, +)] +#[diesel(table_name = floating_ip)] +pub struct FloatingIp { + #[diesel(embed)] + pub identity: FloatingIpIdentity, + + pub ip_pool_id: Uuid, + pub ip_pool_range_id: Uuid, + pub is_service: bool, + pub parent_id: Option, + pub ip: IpNetwork, + pub project_id: Uuid, } impl From for sled_agent_client::types::SourceNatConfig { @@ -93,6 +122,7 @@ pub struct IncompleteExternalIp { is_service: bool, parent_id: Option, pool_id: Uuid, + project_id: Option, // Optional address requesting that a specific IP address be allocated. explicit_ip: Option, // Optional range when requesting a specific SNAT range be allocated. @@ -114,6 +144,7 @@ impl IncompleteExternalIp { is_service: false, parent_id: Some(instance_id), pool_id, + project_id: None, explicit_ip: None, explicit_port_range: None, } @@ -129,6 +160,7 @@ impl IncompleteExternalIp { is_service: false, parent_id: Some(instance_id), pool_id, + project_id: None, explicit_ip: None, explicit_port_range: None, } @@ -138,6 +170,7 @@ impl IncompleteExternalIp { id: Uuid, name: &Name, description: &str, + project_id: Uuid, pool_id: Uuid, ) -> Self { Self { @@ -149,11 +182,35 @@ impl IncompleteExternalIp { is_service: false, parent_id: None, pool_id, + project_id: Some(project_id), explicit_ip: None, explicit_port_range: None, } } + pub fn for_floating_explicit( + id: Uuid, + name: &Name, + description: &str, + project_id: Uuid, + explicit_ip: IpAddr, + pool_id: Uuid, + ) -> Self { + Self { + id, + name: Some(name.clone()), + description: Some(description.to_string()), + time_created: Utc::now(), + kind: IpKind::Floating, + is_service: false, + parent_id: None, + pool_id, + project_id: Some(project_id), + explicit_ip: Some(explicit_ip.into()), + explicit_port_range: None, + } + } + pub fn for_service_explicit( id: Uuid, name: &Name, @@ -171,6 +228,7 @@ impl IncompleteExternalIp { is_service: true, parent_id: Some(service_id), pool_id, + project_id: None, explicit_ip: Some(IpNetwork::from(address)), explicit_port_range: None, } @@ -199,6 +257,7 @@ impl IncompleteExternalIp { is_service: true, parent_id: Some(service_id), pool_id, + project_id: None, explicit_ip: Some(IpNetwork::from(address)), explicit_port_range, } @@ -220,6 +279,7 @@ impl IncompleteExternalIp { is_service: true, parent_id: Some(service_id), pool_id, + project_id: None, explicit_ip: None, explicit_port_range: None, } @@ -235,6 +295,7 @@ impl IncompleteExternalIp { is_service: true, parent_id: Some(service_id), pool_id, + project_id: None, explicit_ip: None, explicit_port_range: None, } @@ -272,6 +333,10 @@ impl IncompleteExternalIp { &self.pool_id } + pub fn project_id(&self) -> &Option { + &self.project_id + } + pub fn explicit_ip(&self) -> &Option { &self.explicit_ip } @@ -308,3 +373,78 @@ impl TryFrom for views::ExternalIp { Ok(views::ExternalIp { kind, ip: ip.ip.ip() }) } } + +impl TryFrom for FloatingIp { + type Error = Error; + + fn try_from(ip: ExternalIp) -> Result { + if ip.kind != IpKind::Floating { + return Err(Error::internal_error( + "attempted to convert non-floating external IP to floating", + )); + } + if ip.is_service { + return Err(Error::internal_error( + "Service IPs should not be exposed in the API", + )); + } + + let project_id = ip.project_id.ok_or(Error::internal_error( + "database schema guarantees parent project for non-service FIP", + ))?; + + let name = ip.name.ok_or(Error::internal_error( + "database schema guarantees ID metadata for non-service FIP", + ))?; + + let description = ip.description.ok_or(Error::internal_error( + "database schema guarantees ID metadata for non-service FIP", + ))?; + + let identity = FloatingIpIdentity { + id: ip.id, + name, + description, + time_created: ip.time_created, + time_modified: ip.time_modified, + time_deleted: ip.time_deleted, + }; + + Ok(FloatingIp { + ip: ip.ip, + identity, + project_id, + ip_pool_id: ip.ip_pool_id, + ip_pool_range_id: ip.ip_pool_range_id, + is_service: ip.is_service, + parent_id: ip.parent_id, + }) + } +} + +impl TryFrom for views::FloatingIp { + type Error = Error; + + fn try_from(ip: ExternalIp) -> Result { + FloatingIp::try_from(ip).map(Into::into) + } +} + +impl From for views::FloatingIp { + fn from(ip: FloatingIp) -> Self { + let identity = IdentityMetadata { + id: ip.identity.id, + name: ip.identity.name.into(), + description: ip.identity.description, + time_created: ip.identity.time_created, + time_modified: ip.identity.time_modified, + }; + + views::FloatingIp { + ip: ip.ip.ip(), + identity, + project_id: ip.project_id, + instance_id: ip.parent_id, + } + } +} diff --git a/nexus/db-model/src/image.rs b/nexus/db-model/src/image.rs index 91a9469d30..6cdf3201be 100644 --- a/nexus/db-model/src/image.rs +++ b/nexus/db-model/src/image.rs @@ -202,7 +202,6 @@ impl From for views::Image { Self { identity: image.identity(), project_id: image.project_id, - url: image.url, os: image.os, version: image.version, digest: image.digest.map(|x| x.into()), diff --git a/nexus/db-model/src/instance_state.rs b/nexus/db-model/src/instance_state.rs index 6baec7afbd..6b4c71da79 100644 --- a/nexus/db-model/src/instance_state.rs +++ b/nexus/db-model/src/instance_state.rs @@ -6,6 +6,7 @@ use super::impl_enum_wrapper; use omicron_common::api::external; use serde::Deserialize; use serde::Serialize; +use std::fmt; use std::io::Write; impl_enum_wrapper!( @@ -40,6 +41,12 @@ impl InstanceState { } } +impl fmt::Display for InstanceState { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.0) + } +} + impl From for sled_agent_client::types::InstanceState { fn from(s: InstanceState) -> Self { use external::InstanceState::*; diff --git a/nexus/db-model/src/inventory.rs b/nexus/db-model/src/inventory.rs index 5b09f289bb..d94334787d 100644 --- a/nexus/db-model/src/inventory.rs +++ b/nexus/db-model/src/inventory.rs @@ -6,7 +6,8 @@ use crate::schema::{ hw_baseboard_id, inv_caboose, inv_collection, inv_collection_error, - inv_root_of_trust, inv_service_processor, sw_caboose, + inv_root_of_trust, inv_root_of_trust_page, inv_service_processor, + sw_caboose, sw_root_of_trust_page, }; use crate::{impl_enum_type, SqlU16, SqlU32}; use chrono::DateTime; @@ -18,7 +19,7 @@ use diesel::pg::Pg; use diesel::serialize::ToSql; use diesel::{serialize, sql_types}; use nexus_types::inventory::{ - BaseboardId, Caboose, Collection, PowerState, RotSlot, + BaseboardId, Caboose, Collection, PowerState, RotPage, RotSlot, }; use uuid::Uuid; @@ -132,6 +133,59 @@ impl From for nexus_types::inventory::CabooseWhich { } } +// See [`nexus_types::inventory::RotPageWhich`]. +impl_enum_type!( + #[derive(SqlType, Debug, QueryId)] + #[diesel(postgres_type(name = "root_of_trust_page_which"))] + pub struct RotPageWhichEnum; + + #[derive(Copy, Clone, Debug, AsExpression, FromSqlRow, PartialEq)] + #[diesel(sql_type = RotPageWhichEnum)] + pub enum RotPageWhich; + + // Enum values + Cmpa => b"cmpa" + CfpaActive => b"cfpa_active" + CfpaInactive => b"cfpa_inactive" + CfpaScratch => b"cfpa_scratch" +); + +impl From for RotPageWhich { + fn from(c: nexus_types::inventory::RotPageWhich) -> Self { + use nexus_types::inventory as nexus_inventory; + match c { + nexus_inventory::RotPageWhich::Cmpa => RotPageWhich::Cmpa, + nexus_inventory::RotPageWhich::CfpaActive => { + RotPageWhich::CfpaActive + } + nexus_inventory::RotPageWhich::CfpaInactive => { + RotPageWhich::CfpaInactive + } + nexus_inventory::RotPageWhich::CfpaScratch => { + RotPageWhich::CfpaScratch + } + } + } +} + +impl From for nexus_types::inventory::RotPageWhich { + fn from(row: RotPageWhich) -> Self { + use nexus_types::inventory as nexus_inventory; + match row { + RotPageWhich::Cmpa => nexus_inventory::RotPageWhich::Cmpa, + RotPageWhich::CfpaActive => { + nexus_inventory::RotPageWhich::CfpaActive + } + RotPageWhich::CfpaInactive => { + nexus_inventory::RotPageWhich::CfpaInactive + } + RotPageWhich::CfpaScratch => { + nexus_inventory::RotPageWhich::CfpaScratch + } + } + } +} + // See [`nexus_types::inventory::SpType`]. impl_enum_type!( #[derive(SqlType, Debug, QueryId)] @@ -271,6 +325,36 @@ impl From for Caboose { } } +/// See [`nexus_types::inventory::RotPage`]. +#[derive( + Queryable, + Insertable, + Clone, + Debug, + Selectable, + Eq, + PartialEq, + Ord, + PartialOrd, +)] +#[diesel(table_name = sw_root_of_trust_page)] +pub struct SwRotPage { + pub id: Uuid, + pub data_base64: String, +} + +impl From for SwRotPage { + fn from(p: RotPage) -> Self { + Self { id: Uuid::new_v4(), data_base64: p.data_base64 } + } +} + +impl From for RotPage { + fn from(row: SwRotPage) -> Self { + Self { data_base64: row.data_base64 } + } +} + /// See [`nexus_types::inventory::Collection`]. #[derive(Queryable, Insertable, Clone, Debug, Selectable)] #[diesel(table_name = inv_collection_error)] @@ -441,3 +525,16 @@ pub struct InvCaboose { pub which: CabooseWhich, pub sw_caboose_id: Uuid, } + +/// See [`nexus_types::inventory::RotPageFound`]. +#[derive(Queryable, Clone, Debug, Selectable)] +#[diesel(table_name = inv_root_of_trust_page)] +pub struct InvRotPage { + pub inv_collection_id: Uuid, + pub hw_baseboard_id: Uuid, + pub time_collected: DateTime, + pub source: String, + + pub which: RotPageWhich, + pub sw_root_of_trust_page_id: Uuid, +} diff --git a/nexus/db-model/src/lib.rs b/nexus/db-model/src/lib.rs index 6b65eb87ec..43bf83fd34 100644 --- a/nexus/db-model/src/lib.rs +++ b/nexus/db-model/src/lib.rs @@ -70,8 +70,10 @@ mod silo_user; mod silo_user_password_hash; mod sled; mod sled_instance; +mod sled_provision_state; mod sled_resource; mod sled_resource_kind; +mod sled_underlay_subnet_allocation; mod snapshot; mod ssh_key; mod switch; @@ -151,8 +153,10 @@ pub use silo_user::*; pub use silo_user_password_hash::*; pub use sled::*; pub use sled_instance::*; +pub use sled_provision_state::*; pub use sled_resource::*; pub use sled_resource_kind::*; +pub use sled_underlay_subnet_allocation::*; pub use snapshot::*; pub use ssh_key::*; pub use switch::*; @@ -285,10 +289,9 @@ macro_rules! impl_enum_type { Ok($model_type::$enum_item) } )* - _ => { - Err(concat!("Unrecognized enum variant for ", - stringify!{$model_type}) - .into()) + other => { + let s = concat!("Unrecognized enum variant for ", stringify!{$model_type}); + Err(format!("{}: (raw bytes: {:?})", s, other).into()) } } } diff --git a/nexus/db-model/src/producer_endpoint.rs b/nexus/db-model/src/producer_endpoint.rs index 29e57b0877..f282f6f08f 100644 --- a/nexus/db-model/src/producer_endpoint.rs +++ b/nexus/db-model/src/producer_endpoint.rs @@ -3,12 +3,47 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. use super::SqlU16; +use crate::impl_enum_type; use crate::schema::metric_producer; use db_macros::Asset; use nexus_types::identity::Asset; use omicron_common::api::internal; use uuid::Uuid; +impl_enum_type!( + #[derive(SqlType, Copy, Clone, Debug, QueryId)] + #[diesel(postgres_type(name = "producer_kind"))] + pub struct ProducerKindEnum; + + #[derive(AsExpression, Copy, Clone, Debug, FromSqlRow, PartialEq)] + #[diesel(sql_type = ProducerKindEnum)] + pub enum ProducerKind; + + SledAgent => b"sled_agent" + Service => b"service" + Instance => b"instance" +); + +impl From for ProducerKind { + fn from(kind: internal::nexus::ProducerKind) -> Self { + match kind { + internal::nexus::ProducerKind::SledAgent => ProducerKind::SledAgent, + internal::nexus::ProducerKind::Service => ProducerKind::Service, + internal::nexus::ProducerKind::Instance => ProducerKind::Instance, + } + } +} + +impl From for internal::nexus::ProducerKind { + fn from(kind: ProducerKind) -> Self { + match kind { + ProducerKind::SledAgent => internal::nexus::ProducerKind::SledAgent, + ProducerKind::Service => internal::nexus::ProducerKind::Service, + ProducerKind::Instance => internal::nexus::ProducerKind::Instance, + } + } +} + /// Information announced by a metric server, used so that clients can contact it and collect /// available metric data from it. #[derive(Queryable, Insertable, Debug, Clone, Selectable, Asset)] @@ -17,6 +52,7 @@ pub struct ProducerEndpoint { #[diesel(embed)] identity: ProducerEndpointIdentity, + pub kind: ProducerKind, pub ip: ipnetwork::IpNetwork, pub port: SqlU16, pub interval: f64, @@ -33,6 +69,7 @@ impl ProducerEndpoint { ) -> Self { Self { identity: ProducerEndpointIdentity::new(endpoint.id), + kind: endpoint.kind.into(), ip: endpoint.address.ip().into(), port: endpoint.address.port().into(), base_route: endpoint.base_route.clone(), diff --git a/nexus/db-model/src/queries/region_allocation.rs b/nexus/db-model/src/queries/region_allocation.rs index 2025e79fb8..a1b9e0373a 100644 --- a/nexus/db-model/src/queries/region_allocation.rs +++ b/nexus/db-model/src/queries/region_allocation.rs @@ -23,6 +23,7 @@ // a CTE (where we want the alias name to come first). use crate::schema::dataset; +use crate::schema::sled; use crate::schema::zpool; table! { @@ -157,6 +158,7 @@ diesel::allow_tables_to_appear_in_same_query!( diesel::allow_tables_to_appear_in_same_query!( old_zpool_usage, zpool, + sled, proposed_dataset_changes, ); diff --git a/nexus/db-model/src/rack.rs b/nexus/db-model/src/rack.rs index f2bc7528d2..580ec155b4 100644 --- a/nexus/db-model/src/rack.rs +++ b/nexus/db-model/src/rack.rs @@ -4,9 +4,8 @@ use crate::schema::rack; use db_macros::Asset; -use ipnetwork::{IpNetwork, Ipv6Network}; +use ipnetwork::IpNetwork; use nexus_types::{external_api::views, identity::Asset}; -use omicron_common::api; use uuid::Uuid; /// Information about a local rack. @@ -29,22 +28,6 @@ impl Rack { rack_subnet: None, } } - - pub fn subnet(&self) -> Result { - match self.rack_subnet { - Some(IpNetwork::V6(subnet)) => Ok(subnet), - Some(IpNetwork::V4(_)) => { - return Err(api::external::Error::InternalError { - internal_message: "rack subnet not IPv6".into(), - }) - } - None => { - return Err(api::external::Error::InternalError { - internal_message: "rack subnet not set".into(), - }) - } - } - } } impl From for views::Rack { diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 4844f2a33f..51501b4894 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -146,6 +146,7 @@ table! { mtu -> Int4, fec -> crate::SwitchLinkFecEnum, speed -> crate::SwitchLinkSpeedEnum, + autoneg -> Bool, } } @@ -399,6 +400,7 @@ table! { id -> Uuid, time_created -> Timestamptz, time_modified -> Timestamptz, + kind -> crate::ProducerKindEnum, ip -> Inet, port -> Int4, interval -> Float8, @@ -523,6 +525,7 @@ table! { time_created -> Timestamptz, time_modified -> Timestamptz, time_deleted -> Nullable, + ip_pool_id -> Uuid, ip_pool_range_id -> Uuid, is_service -> Bool, @@ -531,6 +534,26 @@ table! { ip -> Inet, first_port -> Int4, last_port -> Int4, + + project_id -> Nullable, + } +} + +table! { + floating_ip (id) { + id -> Uuid, + name -> Text, + description -> Text, + time_created -> Timestamptz, + time_modified -> Timestamptz, + time_deleted -> Nullable, + + ip_pool_id -> Uuid, + ip_pool_range_id -> Uuid, + is_service -> Bool, + parent_id -> Nullable, + ip -> Inet, + project_id -> Uuid, } } @@ -740,6 +763,7 @@ table! { ip -> Inet, port -> Int4, last_used_address -> Inet, + provision_state -> crate::SledProvisionStateEnum, } } @@ -754,6 +778,16 @@ table! { } } +table! { + sled_underlay_subnet_allocation (rack_id, sled_id) { + rack_id -> Uuid, + sled_id -> Uuid, + subnet_octet -> Int2, + hw_baseboard_id -> Uuid, + } +} +allow_tables_to_appear_in_same_query!(rack, sled_underlay_subnet_allocation); + table! { switch (id) { id -> Uuid, @@ -1186,6 +1220,13 @@ table! { } } +table! { + sw_root_of_trust_page (id) { + id -> Uuid, + data_base64 -> Text, + } +} + table! { inv_collection (id) { id -> Uuid, @@ -1247,6 +1288,18 @@ table! { } } +table! { + inv_root_of_trust_page (inv_collection_id, hw_baseboard_id, which) { + inv_collection_id -> Uuid, + hw_baseboard_id -> Uuid, + time_collected -> Timestamptz, + source -> Text, + + which -> crate::RotPageWhichEnum, + sw_root_of_trust_page_id -> Uuid, + } +} + table! { bootstore_keys (key, generation) { key -> Text, @@ -1269,7 +1322,7 @@ table! { /// /// This should be updated whenever the schema is changed. For more details, /// refer to: schema/crdb/README.adoc -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(11, 0, 0); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(19, 0, 0); allow_tables_to_appear_in_same_query!( system_update, @@ -1284,6 +1337,11 @@ joinable!(ip_pool_range -> ip_pool (ip_pool_id)); allow_tables_to_appear_in_same_query!(inv_collection, inv_collection_error); joinable!(inv_collection_error -> inv_collection (inv_collection_id)); allow_tables_to_appear_in_same_query!(hw_baseboard_id, sw_caboose, inv_caboose); +allow_tables_to_appear_in_same_query!( + hw_baseboard_id, + sw_root_of_trust_page, + inv_root_of_trust_page +); allow_tables_to_appear_in_same_query!( dataset, @@ -1328,3 +1386,12 @@ allow_tables_to_appear_in_same_query!( switch_port, switch_port_settings_route_config ); + +allow_tables_to_appear_in_same_query!( + switch_port, + switch_port_settings_bgp_peer_config +); + +allow_tables_to_appear_in_same_query!(disk, virtual_provisioning_resource); + +allow_tables_to_appear_in_same_query!(volume, virtual_provisioning_resource); diff --git a/nexus/db-model/src/semver_version.rs b/nexus/db-model/src/semver_version.rs index 966b436149..8e168e11a2 100644 --- a/nexus/db-model/src/semver_version.rs +++ b/nexus/db-model/src/semver_version.rs @@ -68,12 +68,10 @@ fn to_sortable_string(v: semver::Version) -> Result { let max = u64::pow(10, u32::from(PADDED_WIDTH)) - 1; if v.major > max || v.minor > max || v.patch > max { - return Err(external::Error::InvalidValue { - label: "version".to_string(), - message: format!( - "Major, minor, and patch version must be less than {max}" - ), - }); + return Err(external::Error::invalid_value( + "version", + format!("Major, minor, and patch version must be less than {max}"), + )); } let mut result = format!( diff --git a/nexus/db-model/src/sled.rs b/nexus/db-model/src/sled.rs index 5e059946ff..85a6b3139c 100644 --- a/nexus/db-model/src/sled.rs +++ b/nexus/db-model/src/sled.rs @@ -4,11 +4,11 @@ use super::{ByteCount, Generation, SqlU16, SqlU32}; use crate::collection::DatastoreCollectionConfig; -use crate::ipv6; use crate::schema::{physical_disk, service, sled, zpool}; +use crate::{ipv6, SledProvisionState}; use chrono::{DateTime, Utc}; use db_macros::Asset; -use nexus_types::{external_api::views, identity::Asset}; +use nexus_types::{external_api::shared, external_api::views, identity::Asset}; use std::net::Ipv6Addr; use std::net::SocketAddrV6; use uuid::Uuid; @@ -59,41 +59,11 @@ pub struct Sled { /// The last IP address provided to an Oxide service on this sled pub last_used_address: ipv6::Ipv6Addr, + + provision_state: SledProvisionState, } impl Sled { - pub fn new( - id: Uuid, - addr: SocketAddrV6, - baseboard: SledBaseboard, - hardware: SledSystemHardware, - rack_id: Uuid, - ) -> Self { - let last_used_address = { - let mut segments = addr.ip().segments(); - segments[7] += omicron_common::address::RSS_RESERVED_ADDRESSES; - ipv6::Ipv6Addr::from(Ipv6Addr::from(segments)) - }; - Self { - identity: SledIdentity::new(id), - time_deleted: None, - rcgen: Generation::new(), - rack_id, - is_scrimlet: hardware.is_scrimlet, - serial_number: baseboard.serial_number, - part_number: baseboard.part_number, - revision: baseboard.revision, - usable_hardware_threads: SqlU32::new( - hardware.usable_hardware_threads, - ), - usable_physical_ram: hardware.usable_physical_ram, - reservoir_size: hardware.reservoir_size, - ip: ipv6::Ipv6Addr::from(addr.ip()), - port: addr.port().into(), - last_used_address, - } - } - pub fn is_scrimlet(&self) -> bool { self.is_scrimlet } @@ -113,6 +83,10 @@ impl Sled { pub fn serial_number(&self) -> &str { &self.serial_number } + + pub fn provision_state(&self) -> SledProvisionState { + self.provision_state + } } impl From for views::Sled { @@ -120,11 +94,12 @@ impl From for views::Sled { Self { identity: sled.identity(), rack_id: sled.rack_id, - baseboard: views::Baseboard { + baseboard: shared::Baseboard { serial: sled.serial_number, part: sled.part_number, revision: sled.revision, }, + provision_state: sled.provision_state.into(), usable_hardware_threads: sled.usable_hardware_threads.0, usable_physical_ram: *sled.usable_physical_ram, } @@ -153,8 +128,111 @@ impl DatastoreCollectionConfig for Sled { type CollectionIdColumn = service::dsl::sled_id; } +/// Form of `Sled` used for updates from sled-agent. This is missing some +/// columns that are present in `Sled` because sled-agent doesn't control them. +#[derive(Debug, Clone)] +pub struct SledUpdate { + id: Uuid, + + pub rack_id: Uuid, + + is_scrimlet: bool, + serial_number: String, + part_number: String, + revision: i64, + + pub usable_hardware_threads: SqlU32, + pub usable_physical_ram: ByteCount, + pub reservoir_size: ByteCount, + + // ServiceAddress (Sled Agent). + pub ip: ipv6::Ipv6Addr, + pub port: SqlU16, +} + +impl SledUpdate { + pub fn new( + id: Uuid, + addr: SocketAddrV6, + baseboard: SledBaseboard, + hardware: SledSystemHardware, + rack_id: Uuid, + ) -> Self { + Self { + id, + rack_id, + is_scrimlet: hardware.is_scrimlet, + serial_number: baseboard.serial_number, + part_number: baseboard.part_number, + revision: baseboard.revision, + usable_hardware_threads: SqlU32::new( + hardware.usable_hardware_threads, + ), + usable_physical_ram: hardware.usable_physical_ram, + reservoir_size: hardware.reservoir_size, + ip: addr.ip().into(), + port: addr.port().into(), + } + } + + /// Converts self into a form used for inserts of new sleds into the + /// database. + /// + /// This form adds default values for fields that are not present in + /// `SledUpdate`. + pub fn into_insertable(self) -> Sled { + let last_used_address = { + let mut segments = self.ip().segments(); + segments[7] += omicron_common::address::RSS_RESERVED_ADDRESSES; + ipv6::Ipv6Addr::from(Ipv6Addr::from(segments)) + }; + Sled { + identity: SledIdentity::new(self.id), + rcgen: Generation::new(), + time_deleted: None, + rack_id: self.rack_id, + is_scrimlet: self.is_scrimlet, + serial_number: self.serial_number, + part_number: self.part_number, + revision: self.revision, + // By default, sleds start as provisionable. + provision_state: SledProvisionState::Provisionable, + usable_hardware_threads: self.usable_hardware_threads, + usable_physical_ram: self.usable_physical_ram, + reservoir_size: self.reservoir_size, + ip: self.ip, + port: self.port, + last_used_address, + } + } + + pub fn id(&self) -> Uuid { + self.id + } + + pub fn is_scrimlet(&self) -> bool { + self.is_scrimlet + } + + pub fn ip(&self) -> Ipv6Addr { + self.ip.into() + } + + pub fn address(&self) -> SocketAddrV6 { + self.address_with_port(self.port.into()) + } + + pub fn address_with_port(&self, port: u16) -> SocketAddrV6 { + SocketAddrV6::new(self.ip(), port, 0, 0) + } + + pub fn serial_number(&self) -> &str { + &self.serial_number + } +} + /// A set of constraints that can be placed on operations that select a sled. -#[derive(Debug)] +#[derive(Clone, Debug)] pub struct SledReservationConstraints { must_select_from: Vec, } diff --git a/nexus/db-model/src/sled_provision_state.rs b/nexus/db-model/src/sled_provision_state.rs new file mode 100644 index 0000000000..b2b1ee39dc --- /dev/null +++ b/nexus/db-model/src/sled_provision_state.rs @@ -0,0 +1,53 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use super::impl_enum_type; +use nexus_types::external_api::views; +use serde::{Deserialize, Serialize}; +use thiserror::Error; + +impl_enum_type!( + #[derive(Clone, SqlType, Debug, QueryId)] + #[diesel(postgres_type(name = "sled_provision_state"))] + pub struct SledProvisionStateEnum; + + #[derive(Clone, Copy, Debug, AsExpression, FromSqlRow, Serialize, Deserialize, PartialEq)] + #[diesel(sql_type = SledProvisionStateEnum)] + pub enum SledProvisionState; + + // Enum values + Provisionable => b"provisionable" + NonProvisionable => b"non_provisionable" +); + +impl From for views::SledProvisionState { + fn from(state: SledProvisionState) -> Self { + match state { + SledProvisionState::Provisionable => { + views::SledProvisionState::Provisionable + } + SledProvisionState::NonProvisionable => { + views::SledProvisionState::NonProvisionable + } + } + } +} + +impl From for SledProvisionState { + fn from(state: views::SledProvisionState) -> Self { + match state { + views::SledProvisionState::Provisionable => { + SledProvisionState::Provisionable + } + views::SledProvisionState::NonProvisionable => { + SledProvisionState::NonProvisionable + } + } + } +} + +/// An unknown [`views::SledProvisionState`] was encountered. +#[derive(Clone, Debug, Error)] +#[error("Unknown SledProvisionState")] +pub struct UnknownSledProvisionState; diff --git a/nexus/db-model/src/sled_underlay_subnet_allocation.rs b/nexus/db-model/src/sled_underlay_subnet_allocation.rs new file mode 100644 index 0000000000..4da0bea669 --- /dev/null +++ b/nexus/db-model/src/sled_underlay_subnet_allocation.rs @@ -0,0 +1,16 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use crate::schema::sled_underlay_subnet_allocation; +use uuid::Uuid; + +/// Underlay allocation for a sled added to an initialized rack +#[derive(Queryable, Insertable, Debug, Clone, Selectable)] +#[diesel(table_name = sled_underlay_subnet_allocation)] +pub struct SledUnderlaySubnetAllocation { + pub rack_id: Uuid, + pub sled_id: Uuid, + pub subnet_octet: i16, + pub hw_baseboard_id: Uuid, +} diff --git a/nexus/db-model/src/switch.rs b/nexus/db-model/src/switch.rs index c9db100b0a..159888d91e 100644 --- a/nexus/db-model/src/switch.rs +++ b/nexus/db-model/src/switch.rs @@ -2,7 +2,7 @@ use super::Generation; use crate::schema::switch; use chrono::{DateTime, Utc}; use db_macros::Asset; -use nexus_types::{external_api::views, identity::Asset}; +use nexus_types::{external_api::shared, external_api::views, identity::Asset}; use uuid::Uuid; /// Baseboard information about a switch. @@ -57,7 +57,7 @@ impl From for views::Switch { Self { identity: switch.identity(), rack_id: switch.rack_id, - baseboard: views::Baseboard { + baseboard: shared::Baseboard { serial: switch.serial_number, part: switch.part_number, revision: switch.revision, diff --git a/nexus/db-model/src/switch_port.rs b/nexus/db-model/src/switch_port.rs index 44588899b6..6ff8612d2f 100644 --- a/nexus/db-model/src/switch_port.rs +++ b/nexus/db-model/src/switch_port.rs @@ -355,6 +355,7 @@ pub struct SwitchPortLinkConfig { pub mtu: SqlU16, pub fec: SwitchLinkFec, pub speed: SwitchLinkSpeed, + pub autoneg: bool, } impl SwitchPortLinkConfig { @@ -365,6 +366,7 @@ impl SwitchPortLinkConfig { mtu: u16, fec: SwitchLinkFec, speed: SwitchLinkSpeed, + autoneg: bool, ) -> Self { Self { port_settings_id, @@ -372,6 +374,7 @@ impl SwitchPortLinkConfig { link_name, fec, speed, + autoneg, mtu: mtu.into(), } } diff --git a/nexus/db-queries/Cargo.toml b/nexus/db-queries/Cargo.toml index b1b8f3b28f..9d8afd1fea 100644 --- a/nexus/db-queries/Cargo.toml +++ b/nexus/db-queries/Cargo.toml @@ -32,6 +32,7 @@ oso.workspace = true paste.workspace = true # See omicron-rpaths for more about the "pq-sys" dependency. pq-sys = "*" +rand.workspace = true ref-cast.workspace = true samael.workspace = true serde.workspace = true @@ -58,6 +59,7 @@ omicron-workspace-hack.workspace = true [dev-dependencies] assert_matches.workspace = true +camino-tempfile.workspace = true expectorate.workspace = true hyper-rustls.workspace = true gateway-client.workspace = true @@ -75,5 +77,4 @@ regex.workspace = true rustls.workspace = true strum.workspace = true subprocess.workspace = true -tempfile.workspace = true term.workspace = true diff --git a/nexus/db-queries/src/authz/api_resources.rs b/nexus/db-queries/src/authz/api_resources.rs index b22fe1ac25..2dfe2f7174 100644 --- a/nexus/db-queries/src/authz/api_resources.rs +++ b/nexus/db-queries/src/authz/api_resources.rs @@ -791,6 +791,14 @@ authz_resource! { polar_snippet = InProject, } +authz_resource! { + name = "FloatingIp", + parent = "Project", + primary_key = Uuid, + roles_allowed = false, + polar_snippet = InProject, +} + // Customer network integration resources nested below "Fleet" authz_resource! { diff --git a/nexus/db-queries/src/authz/oso_generic.rs b/nexus/db-queries/src/authz/oso_generic.rs index e642062ead..6098379287 100644 --- a/nexus/db-queries/src/authz/oso_generic.rs +++ b/nexus/db-queries/src/authz/oso_generic.rs @@ -131,6 +131,7 @@ pub fn make_omicron_oso(log: &slog::Logger) -> Result { VpcRouter::init(), RouterRoute::init(), VpcSubnet::init(), + FloatingIp::init(), // Silo-level resources Image::init(), SiloImage::init(), diff --git a/nexus/db-queries/src/authz/policy_test/resources.rs b/nexus/db-queries/src/authz/policy_test/resources.rs index 3049f3b9bf..8bdd97923b 100644 --- a/nexus/db-queries/src/authz/policy_test/resources.rs +++ b/nexus/db-queries/src/authz/policy_test/resources.rs @@ -319,6 +319,13 @@ async fn make_project( Uuid::new_v4(), LookupType::ByName(image_name), )); + + let floating_ip_name = format!("{project_name}-fip1"); + builder.new_resource(authz::FloatingIp::new( + project.clone(), + Uuid::new_v4(), + LookupType::ByName(floating_ip_name), + )); } /// Returns the set of authz classes exempted from the coverage test diff --git a/nexus/db-queries/src/db/collection_attach.rs b/nexus/db-queries/src/db/collection_attach.rs index ea4d9d5beb..fccc1aa324 100644 --- a/nexus/db-queries/src/db/collection_attach.rs +++ b/nexus/db-queries/src/db/collection_attach.rs @@ -563,12 +563,9 @@ where #[cfg(test)] mod test { use super::*; - use crate::db::{ - self, error::TransactionError, identity::Resource as IdentityResource, - }; + use crate::db::{self, identity::Resource as IdentityResource}; use async_bb8_diesel::{ - AsyncConnection, AsyncRunQueryDsl, AsyncSimpleConnection, - ConnectionManager, + AsyncRunQueryDsl, AsyncSimpleConnection, ConnectionManager, }; use chrono::Utc; use db_macros::Resource; @@ -999,22 +996,12 @@ mod test { .set(resource::dsl::collection_id.eq(collection_id)), ); - type TxnError = - TransactionError>; - let result = conn - .transaction_async(|conn| async move { - attach_query.attach_and_get_result_async(&conn).await.map_err( - |e| match e { - AttachError::DatabaseError(e) => TxnError::from(e), - e => TxnError::CustomError(e), - }, - ) - }) - .await; - // "attach_and_get_result" should return the "attached" resource. - let (returned_collection, returned_resource) = - result.expect("Attach should have worked"); + let (returned_collection, returned_resource) = attach_query + .attach_and_get_result_async(&conn) + .await + .expect("Attach should have worked"); + assert_eq!( returned_resource.collection_id.expect("Expected a collection ID"), collection_id diff --git a/nexus/db-queries/src/db/collection_detach_many.rs b/nexus/db-queries/src/db/collection_detach_many.rs index 8df6d4aed4..986cfb70b7 100644 --- a/nexus/db-queries/src/db/collection_detach_many.rs +++ b/nexus/db-queries/src/db/collection_detach_many.rs @@ -479,12 +479,9 @@ where mod test { use super::*; use crate::db::collection_attach::DatastoreAttachTarget; - use crate::db::{ - self, error::TransactionError, identity::Resource as IdentityResource, - }; + use crate::db::{self, identity::Resource as IdentityResource}; use async_bb8_diesel::{ - AsyncConnection, AsyncRunQueryDsl, AsyncSimpleConnection, - ConnectionManager, + AsyncRunQueryDsl, AsyncSimpleConnection, ConnectionManager, }; use chrono::Utc; use db_macros::Resource; @@ -919,21 +916,12 @@ mod test { .set(resource::dsl::collection_id.eq(Option::::None)), ); - type TxnError = - TransactionError>; - let result = conn - .transaction_async(|conn| async move { - detach_query.detach_and_get_result_async(&conn).await.map_err( - |e| match e { - DetachManyError::DatabaseError(e) => TxnError::from(e), - e => TxnError::CustomError(e), - }, - ) - }) - .await; - // "detach_and_get_result" should return the "detached" resource. - let returned_collection = result.expect("Detach should have worked"); + let returned_collection = detach_query + .detach_and_get_result_async(&conn) + .await + .expect("Detach should have worked"); + // The returned values should be the latest value in the DB. assert_eq!( returned_collection, diff --git a/nexus/db-queries/src/db/column_walker.rs b/nexus/db-queries/src/db/column_walker.rs new file mode 100644 index 0000000000..64c3b450c8 --- /dev/null +++ b/nexus/db-queries/src/db/column_walker.rs @@ -0,0 +1,112 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! CTE utility for iterating over all columns in a table. + +use diesel::prelude::*; +use std::marker::PhantomData; + +/// Used to iterate over a tuple of columns ("T"). +/// +/// Diesel exposes "AllColumns" as a tuple, which is difficult to iterate over +/// -- after all, all the types are distinct. However, each of these types +/// implements "Column", so we can use a macro to provide a +/// "convertion-to-iterator" implemenation for our expected tuples. +pub(crate) struct ColumnWalker { + remaining: PhantomData, +} + +impl ColumnWalker { + pub fn new() -> Self { + Self { remaining: PhantomData } + } +} + +macro_rules! impl_column_walker { + ( $len:literal $($column:ident)+ ) => ( + impl<$($column: Column),+> IntoIterator for ColumnWalker<($($column,)+)> { + type Item = &'static str; + type IntoIter = std::array::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + [$($column::NAME,)+].into_iter() + } + } + ); +} + +// implementations for 1 - 32 columns +impl_column_walker! { 1 A } +impl_column_walker! { 2 A B } +impl_column_walker! { 3 A B C } +impl_column_walker! { 4 A B C D } +impl_column_walker! { 5 A B C D E } +impl_column_walker! { 6 A B C D E F } +impl_column_walker! { 7 A B C D E F G } +impl_column_walker! { 8 A B C D E F G H } +impl_column_walker! { 9 A B C D E F G H I } +impl_column_walker! { 10 A B C D E F G H I J } +impl_column_walker! { 11 A B C D E F G H I J K } +impl_column_walker! { 12 A B C D E F G H I J K L } +impl_column_walker! { 13 A B C D E F G H I J K L M } +impl_column_walker! { 14 A B C D E F G H I J K L M N } +impl_column_walker! { 15 A B C D E F G H I J K L M N O } +impl_column_walker! { 16 A B C D E F G H I J K L M N O P } +impl_column_walker! { 17 A B C D E F G H I J K L M N O P Q } +impl_column_walker! { 18 A B C D E F G H I J K L M N O P Q R } +impl_column_walker! { 19 A B C D E F G H I J K L M N O P Q R S } +impl_column_walker! { 20 A B C D E F G H I J K L M N O P Q R S T } +impl_column_walker! { 21 A B C D E F G H I J K L M N O P Q R S T U } +impl_column_walker! { 22 A B C D E F G H I J K L M N O P Q R S T U V } +impl_column_walker! { 23 A B C D E F G H I J K L M N O P Q R S T U V W } +impl_column_walker! { 24 A B C D E F G H I J K L M N O P Q R S T U V W X } +impl_column_walker! { 25 A B C D E F G H I J K L M N O P Q R S T U V W X Y } +impl_column_walker! { 26 A B C D E F G H I J K L M N O P Q R S T U V W X Y Z } +impl_column_walker! { 27 A B C D E F G H I J K L M N O P Q R S T U V W X Y Z A1 } +impl_column_walker! { 28 A B C D E F G H I J K L M N O P Q R S T U V W X Y Z A1 B1 } +impl_column_walker! { 29 A B C D E F G H I J K L M N O P Q R S T U V W X Y Z A1 B1 C1 } +impl_column_walker! { 30 A B C D E F G H I J K L M N O P Q R S T U V W X Y Z A1 B1 C1 D1 } +impl_column_walker! { 31 A B C D E F G H I J K L M N O P Q R S T U V W X Y Z A1 B1 C1 D1 E1 } +impl_column_walker! { 32 A B C D E F G H I J K L M N O P Q R S T U V W X Y Z A1 B1 C1 D1 E1 F1 } + +#[cfg(test)] +mod test { + use super::*; + + table! { + test_schema.test_table (id) { + id -> Uuid, + value -> Int4, + time_deleted -> Nullable, + } + } + + // We can convert all a tables columns into an iteratable format. + #[test] + fn test_walk_table() { + let all_columns = + ColumnWalker::<::AllColumns>::new(); + + let mut iter = all_columns.into_iter(); + assert_eq!(iter.next(), Some("id")); + assert_eq!(iter.next(), Some("value")); + assert_eq!(iter.next(), Some("time_deleted")); + assert_eq!(iter.next(), None); + } + + // We can, if we want to, also make a ColumnWalker out of an arbitrary tuple + // of columns. + #[test] + fn test_walk_columns() { + let all_columns = ColumnWalker::<( + test_table::columns::id, + test_table::columns::value, + )>::new(); + + let mut iter = all_columns.into_iter(); + assert_eq!(iter.next(), Some("id")); + assert_eq!(iter.next(), Some("value")); + assert_eq!(iter.next(), None); + } +} diff --git a/nexus/db-queries/src/db/datastore/address_lot.rs b/nexus/db-queries/src/db/datastore/address_lot.rs index 97dfb59eba..5c2ffbf1d0 100644 --- a/nexus/db-queries/src/db/datastore/address_lot.rs +++ b/nexus/db-queries/src/db/datastore/address_lot.rs @@ -13,9 +13,9 @@ use crate::db::error::TransactionError; use crate::db::model::Name; use crate::db::model::{AddressLot, AddressLotBlock, AddressLotReservedBlock}; use crate::db::pagination::paginated; -use async_bb8_diesel::{AsyncConnection, AsyncRunQueryDsl, Connection}; +use crate::transaction_retry::OptionalError; +use async_bb8_diesel::{AsyncRunQueryDsl, Connection}; use chrono::Utc; -use diesel::result::Error as DieselError; use diesel::{ExpressionMethods, QueryDsl, SelectableHelper}; use diesel_dtrace::DTraceConnection; use ipnetwork::IpNetwork; @@ -45,11 +45,12 @@ impl DataStore { use db::schema::address_lot::dsl as lot_dsl; use db::schema::address_lot_block::dsl as block_dsl; - self.pool_connection_authorized(opctx) - .await? - // TODO https://github.com/oxidecomputer/omicron/issues/2811 - // Audit external networking database transaction usage - .transaction_async(|conn| async move { + let conn = self.pool_connection_authorized(opctx).await?; + + // TODO https://github.com/oxidecomputer/omicron/issues/2811 + // Audit external networking database transaction usage + self.transaction_retry_wrapper("address_lot_create") + .transaction(&conn, |conn| async move { let lot = AddressLot::new(¶ms.identity, params.kind.into()); let db_lot: AddressLot = @@ -81,15 +82,14 @@ impl DataStore { Ok(AddressLotCreateResult { lot: db_lot, blocks: db_blocks }) }) .await - .map_err(|e| match e { - DieselError::DatabaseError(_, _) => public_error_from_diesel( + .map_err(|e| { + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::AddressLot, ¶ms.identity.name.as_str(), ), - ), - _ => public_error_from_diesel(e, ErrorHandler::Server), + ) }) } @@ -113,47 +113,54 @@ impl DataStore { LotInUse, } - type TxnError = TransactionError; + let err = OptionalError::new(); // TODO https://github.com/oxidecomputer/omicron/issues/2811 // Audit external networking database transaction usage - conn.transaction_async(|conn| async move { - let rsvd: Vec = - rsvd_block_dsl::address_lot_rsvd_block - .filter(rsvd_block_dsl::address_lot_id.eq(id)) - .select(AddressLotReservedBlock::as_select()) - .limit(1) - .load_async(&conn) - .await?; - - if !rsvd.is_empty() { - Err(TxnError::CustomError(AddressLotDeleteError::LotInUse))?; - } + self.transaction_retry_wrapper("address_lot_delete") + .transaction(&conn, |conn| { + let err = err.clone(); + async move { + let rsvd: Vec = + rsvd_block_dsl::address_lot_rsvd_block + .filter(rsvd_block_dsl::address_lot_id.eq(id)) + .select(AddressLotReservedBlock::as_select()) + .limit(1) + .load_async(&conn) + .await?; + + if !rsvd.is_empty() { + return Err(err.bail(AddressLotDeleteError::LotInUse)); + } + + let now = Utc::now(); + diesel::update(lot_dsl::address_lot) + .filter(lot_dsl::time_deleted.is_null()) + .filter(lot_dsl::id.eq(id)) + .set(lot_dsl::time_deleted.eq(now)) + .execute_async(&conn) + .await?; - let now = Utc::now(); - diesel::update(lot_dsl::address_lot) - .filter(lot_dsl::time_deleted.is_null()) - .filter(lot_dsl::id.eq(id)) - .set(lot_dsl::time_deleted.eq(now)) - .execute_async(&conn) - .await?; - - diesel::delete(block_dsl::address_lot_block) - .filter(block_dsl::address_lot_id.eq(id)) - .execute_async(&conn) - .await?; - - Ok(()) - }) - .await - .map_err(|e| match e { - TxnError::Database(e) => { - public_error_from_diesel(e, ErrorHandler::Server) - } - TxnError::CustomError(AddressLotDeleteError::LotInUse) => { - Error::invalid_request("lot is in use") - } - }) + diesel::delete(block_dsl::address_lot_block) + .filter(block_dsl::address_lot_id.eq(id)) + .execute_async(&conn) + .await?; + + Ok(()) + } + }) + .await + .map_err(|e| { + if let Some(err) = err.take() { + match err { + AddressLotDeleteError::LotInUse => { + Error::invalid_request("lot is in use") + } + } + } else { + public_error_from_diesel(e, ErrorHandler::Server) + } + }) } pub async fn address_lot_list( diff --git a/nexus/db-queries/src/db/datastore/bgp.rs b/nexus/db-queries/src/db/datastore/bgp.rs index ff314a2564..28075b0ded 100644 --- a/nexus/db-queries/src/db/datastore/bgp.rs +++ b/nexus/db-queries/src/db/datastore/bgp.rs @@ -3,11 +3,11 @@ use crate::context::OpContext; use crate::db; use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; -use crate::db::error::TransactionError; use crate::db::model::Name; use crate::db::model::{BgpAnnounceSet, BgpAnnouncement, BgpConfig}; use crate::db::pagination::paginated; -use async_bb8_diesel::{AsyncConnection, AsyncRunQueryDsl}; +use crate::transaction_retry::OptionalError; +use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; use diesel::{ExpressionMethods, QueryDsl, SelectableHelper}; use nexus_types::external_api::params; @@ -30,33 +30,33 @@ impl DataStore { use db::schema::{ bgp_announce_set, bgp_announce_set::dsl as announce_set_dsl, }; - let pool = self.pool_connection_authorized(opctx).await?; - - pool.transaction_async(|conn| async move { - let id: Uuid = match &config.bgp_announce_set_id { - NameOrId::Name(name) => { - announce_set_dsl::bgp_announce_set - .filter(bgp_announce_set::time_deleted.is_null()) - .filter(bgp_announce_set::name.eq(name.to_string())) - .select(bgp_announce_set::id) - .limit(1) - .first_async::(&conn) - .await? - } - NameOrId::Id(id) => *id, - }; - - let config = BgpConfig::from_config_create(config, id); - - let result = diesel::insert_into(dsl::bgp_config) - .values(config.clone()) - .returning(BgpConfig::as_returning()) - .get_result_async(&conn) - .await?; - Ok(result) - }) - .await - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + let conn = self.pool_connection_authorized(opctx).await?; + self.transaction_retry_wrapper("bgp_config_set") + .transaction(&conn, |conn| async move { + let id: Uuid = match &config.bgp_announce_set_id { + NameOrId::Name(name) => { + announce_set_dsl::bgp_announce_set + .filter(bgp_announce_set::time_deleted.is_null()) + .filter(bgp_announce_set::name.eq(name.to_string())) + .select(bgp_announce_set::id) + .limit(1) + .first_async::(&conn) + .await? + } + NameOrId::Id(id) => *id, + }; + + let config = BgpConfig::from_config_create(config, id); + + let result = diesel::insert_into(dsl::bgp_config) + .values(config.clone()) + .returning(BgpConfig::as_returning()) + .get_result_async(&conn) + .await?; + Ok(result) + }) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn bgp_config_delete( @@ -74,54 +74,59 @@ impl DataStore { enum BgpConfigDeleteError { ConfigInUse, } - type TxnError = TransactionError; - - let pool = self.pool_connection_authorized(opctx).await?; - pool.transaction_async(|conn| async move { - let name_or_id = sel.name_or_id.clone(); - - let id: Uuid = match name_or_id { - NameOrId::Id(id) => id, - NameOrId::Name(name) => { - bgp_config_dsl::bgp_config - .filter(bgp_config::name.eq(name.to_string())) - .select(bgp_config::id) - .limit(1) - .first_async::(&conn) - .await? - } - }; - - let count = - sps_bgp_peer_config_dsl::switch_port_settings_bgp_peer_config - .filter(sps_bgp_peer_config::bgp_config_id.eq(id)) - .count() - .execute_async(&conn) - .await?; - - if count > 0 { - return Err(TxnError::CustomError( - BgpConfigDeleteError::ConfigInUse, - )); - } - diesel::update(bgp_config_dsl::bgp_config) - .filter(bgp_config_dsl::id.eq(id)) - .set(bgp_config_dsl::time_deleted.eq(Utc::now())) - .execute_async(&conn) - .await?; + let err = OptionalError::new(); + let conn = self.pool_connection_authorized(opctx).await?; + self.transaction_retry_wrapper("bgp_config_delete") + .transaction(&conn, |conn| { + let err = err.clone(); + async move { + let name_or_id = sel.name_or_id.clone(); + + let id: Uuid = match name_or_id { + NameOrId::Id(id) => id, + NameOrId::Name(name) => { + bgp_config_dsl::bgp_config + .filter(bgp_config::name.eq(name.to_string())) + .select(bgp_config::id) + .limit(1) + .first_async::(&conn) + .await? + } + }; + + let count = + sps_bgp_peer_config_dsl::switch_port_settings_bgp_peer_config + .filter(sps_bgp_peer_config::bgp_config_id.eq(id)) + .count() + .execute_async(&conn) + .await?; + + if count > 0 { + return Err(err.bail(BgpConfigDeleteError::ConfigInUse)); + } + + diesel::update(bgp_config_dsl::bgp_config) + .filter(bgp_config_dsl::id.eq(id)) + .set(bgp_config_dsl::time_deleted.eq(Utc::now())) + .execute_async(&conn) + .await?; - Ok(()) - }) - .await - .map_err(|e| match e { - TxnError::CustomError(BgpConfigDeleteError::ConfigInUse) => { - Error::invalid_request("BGP config in use") - } - TxnError::Database(e) => { - public_error_from_diesel(e, ErrorHandler::Server) - } - }) + Ok(()) + } + }) + .await + .map_err(|e| { + if let Some(err) = err.take() { + match err { + BgpConfigDeleteError::ConfigInUse => { + Error::invalid_request("BGP config in use") + } + } + } else { + public_error_from_diesel(e, ErrorHandler::Server) + } + }) } pub async fn bgp_config_get( @@ -131,7 +136,7 @@ impl DataStore { ) -> LookupResult { use db::schema::bgp_config; use db::schema::bgp_config::dsl; - let pool = self.pool_connection_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; let name_or_id = name_or_id.clone(); @@ -140,14 +145,14 @@ impl DataStore { .filter(bgp_config::name.eq(name.to_string())) .select(BgpConfig::as_select()) .limit(1) - .first_async::(&*pool) + .first_async::(&*conn) .await .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)), NameOrId::Id(id) => dsl::bgp_config .filter(bgp_config::id.eq(id)) .select(BgpConfig::as_select()) .limit(1) - .first_async::(&*pool) + .first_async::(&*conn) .await .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)), }?; @@ -162,7 +167,7 @@ impl DataStore { ) -> ListResultVec { use db::schema::bgp_config::dsl; - let pool = self.pool_connection_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; match pagparams { PaginatedBy::Id(pagparams) => { @@ -176,7 +181,7 @@ impl DataStore { } .filter(dsl::time_deleted.is_null()) .select(BgpConfig::as_select()) - .load_async(&*pool) + .load_async(&*conn) .await .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } @@ -195,47 +200,64 @@ impl DataStore { enum BgpAnnounceListError { AnnounceSetNotFound(Name), } - type TxnError = TransactionError; - - let pool = self.pool_connection_authorized(opctx).await?; - pool.transaction_async(|conn| async move { - let name_or_id = sel.name_or_id.clone(); - - let announce_id: Uuid = match name_or_id { - NameOrId::Id(id) => id, - NameOrId::Name(name) => announce_set_dsl::bgp_announce_set - .filter(bgp_announce_set::time_deleted.is_null()) - .filter(bgp_announce_set::name.eq(name.to_string())) - .select(bgp_announce_set::id) - .limit(1) - .first_async::(&conn) - .await - .map_err(|_| { - TxnError::CustomError( - BgpAnnounceListError::AnnounceSetNotFound( - Name::from(name.clone()), - ), - ) - })?, - }; - - let result = announce_dsl::bgp_announcement - .filter(announce_dsl::announce_set_id.eq(announce_id)) - .select(BgpAnnouncement::as_select()) - .load_async(&conn) - .await?; - - Ok(result) - }) - .await - .map_err(|e| match e { - TxnError::CustomError( - BgpAnnounceListError::AnnounceSetNotFound(name), - ) => Error::not_found_by_name(ResourceType::BgpAnnounceSet, &name), - TxnError::Database(e) => { - public_error_from_diesel(e, ErrorHandler::Server) - } - }) + + let err = OptionalError::new(); + let conn = self.pool_connection_authorized(opctx).await?; + self.transaction_retry_wrapper("bgp_announce_list") + .transaction(&conn, |conn| { + let err = err.clone(); + async move { + let name_or_id = sel.name_or_id.clone(); + + let announce_id: Uuid = match name_or_id { + NameOrId::Id(id) => id, + NameOrId::Name(name) => { + announce_set_dsl::bgp_announce_set + .filter( + bgp_announce_set::time_deleted.is_null(), + ) + .filter( + bgp_announce_set::name.eq(name.to_string()), + ) + .select(bgp_announce_set::id) + .limit(1) + .first_async::(&conn) + .await + .map_err(|e| { + err.bail_retryable_or( + e, + BgpAnnounceListError::AnnounceSetNotFound( + Name::from(name.clone()), + ) + ) + })? + } + }; + + let result = announce_dsl::bgp_announcement + .filter(announce_dsl::announce_set_id.eq(announce_id)) + .select(BgpAnnouncement::as_select()) + .load_async(&conn) + .await?; + + Ok(result) + } + }) + .await + .map_err(|e| { + if let Some(err) = err.take() { + match err { + BgpAnnounceListError::AnnounceSetNotFound(name) => { + Error::not_found_by_name( + ResourceType::BgpAnnounceSet, + &name, + ) + } + } + } else { + public_error_from_diesel(e, ErrorHandler::Server) + } + }) } pub async fn bgp_create_announce_set( @@ -246,37 +268,39 @@ impl DataStore { use db::schema::bgp_announce_set::dsl as announce_set_dsl; use db::schema::bgp_announcement::dsl as bgp_announcement_dsl; - let pool = self.pool_connection_authorized(opctx).await?; - pool.transaction_async(|conn| async move { - let bas: BgpAnnounceSet = announce.clone().into(); + let conn = self.pool_connection_authorized(opctx).await?; + self.transaction_retry_wrapper("bgp_create_announce_set") + .transaction(&conn, |conn| async move { + let bas: BgpAnnounceSet = announce.clone().into(); - let db_as: BgpAnnounceSet = - diesel::insert_into(announce_set_dsl::bgp_announce_set) - .values(bas.clone()) - .returning(BgpAnnounceSet::as_returning()) - .get_result_async::(&conn) - .await?; - - let mut db_annoucements = Vec::new(); - for a in &announce.announcement { - let an = BgpAnnouncement { - announce_set_id: db_as.id(), - address_lot_block_id: bas.identity.id, - network: a.network.into(), - }; - let an = - diesel::insert_into(bgp_announcement_dsl::bgp_announcement) - .values(an.clone()) - .returning(BgpAnnouncement::as_returning()) - .get_result_async::(&conn) + let db_as: BgpAnnounceSet = + diesel::insert_into(announce_set_dsl::bgp_announce_set) + .values(bas.clone()) + .returning(BgpAnnounceSet::as_returning()) + .get_result_async::(&conn) .await?; - db_annoucements.push(an); - } - Ok((db_as, db_annoucements)) - }) - .await - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + let mut db_annoucements = Vec::new(); + for a in &announce.announcement { + let an = BgpAnnouncement { + announce_set_id: db_as.id(), + address_lot_block_id: bas.identity.id, + network: a.network.into(), + }; + let an = diesel::insert_into( + bgp_announcement_dsl::bgp_announcement, + ) + .values(an.clone()) + .returning(BgpAnnouncement::as_returning()) + .get_result_async::(&conn) + .await?; + db_annoucements.push(an); + } + + Ok((db_as, db_annoucements)) + }) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn bgp_delete_announce_set( @@ -295,57 +319,67 @@ impl DataStore { enum BgpAnnounceSetDeleteError { AnnounceSetInUse, } - type TxnError = TransactionError; - let pool = self.pool_connection_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; let name_or_id = sel.name_or_id.clone(); - pool.transaction_async(|conn| async move { - let id: Uuid = match name_or_id { - NameOrId::Name(name) => { - announce_set_dsl::bgp_announce_set - .filter(bgp_announce_set::name.eq(name.to_string())) - .select(bgp_announce_set::id) - .limit(1) - .first_async::(&conn) - .await? - } - NameOrId::Id(id) => id, - }; - - let count = bgp_config_dsl::bgp_config - .filter(bgp_config::bgp_announce_set_id.eq(id)) - .count() - .execute_async(&conn) - .await?; - - if count > 0 { - return Err(TxnError::CustomError( - BgpAnnounceSetDeleteError::AnnounceSetInUse, - )); - } + let err = OptionalError::new(); + self.transaction_retry_wrapper("bgp_delete_announce_set") + .transaction(&conn, |conn| { + let err = err.clone(); + let name_or_id = name_or_id.clone(); + async move { + let id: Uuid = match name_or_id { + NameOrId::Name(name) => { + announce_set_dsl::bgp_announce_set + .filter( + bgp_announce_set::name.eq(name.to_string()), + ) + .select(bgp_announce_set::id) + .limit(1) + .first_async::(&conn) + .await? + } + NameOrId::Id(id) => id, + }; + + let count = bgp_config_dsl::bgp_config + .filter(bgp_config::bgp_announce_set_id.eq(id)) + .count() + .execute_async(&conn) + .await?; - diesel::update(announce_set_dsl::bgp_announce_set) - .filter(announce_set_dsl::id.eq(id)) - .set(announce_set_dsl::time_deleted.eq(Utc::now())) - .execute_async(&conn) - .await?; + if count > 0 { + return Err(err.bail( + BgpAnnounceSetDeleteError::AnnounceSetInUse, + )); + } - diesel::delete(bgp_announcement_dsl::bgp_announcement) - .filter(bgp_announcement_dsl::announce_set_id.eq(id)) - .execute_async(&conn) - .await?; + diesel::update(announce_set_dsl::bgp_announce_set) + .filter(announce_set_dsl::id.eq(id)) + .set(announce_set_dsl::time_deleted.eq(Utc::now())) + .execute_async(&conn) + .await?; - Ok(()) - }) - .await - .map_err(|e| match e { - TxnError::CustomError( - BgpAnnounceSetDeleteError::AnnounceSetInUse, - ) => Error::invalid_request("BGP announce set in use"), - TxnError::Database(e) => { - public_error_from_diesel(e, ErrorHandler::Server) - } - }) + diesel::delete(bgp_announcement_dsl::bgp_announcement) + .filter(bgp_announcement_dsl::announce_set_id.eq(id)) + .execute_async(&conn) + .await?; + + Ok(()) + } + }) + .await + .map_err(|e| { + if let Some(err) = err.take() { + match err { + BgpAnnounceSetDeleteError::AnnounceSetInUse => { + Error::invalid_request("BGP announce set in use") + } + } + } else { + public_error_from_diesel(e, ErrorHandler::Server) + } + }) } } diff --git a/nexus/db-queries/src/db/datastore/db_metadata.rs b/nexus/db-queries/src/db/datastore/db_metadata.rs index 0ae61a7c38..e579bb8476 100644 --- a/nexus/db-queries/src/db/datastore/db_metadata.rs +++ b/nexus/db-queries/src/db/datastore/db_metadata.rs @@ -8,10 +8,7 @@ use super::DataStore; use crate::db; use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; -use crate::db::TransactionError; -use async_bb8_diesel::{ - AsyncConnection, AsyncRunQueryDsl, AsyncSimpleConnection, -}; +use async_bb8_diesel::{AsyncRunQueryDsl, AsyncSimpleConnection}; use camino::{Utf8Path, Utf8PathBuf}; use chrono::Utc; use diesel::prelude::*; @@ -26,12 +23,14 @@ use std::str::FromStr; pub const EARLIEST_SUPPORTED_VERSION: &'static str = "1.0.0"; /// Describes a single file containing a schema change, as SQL. +#[derive(Debug)] pub struct SchemaUpgradeStep { pub path: Utf8PathBuf, pub sql: String, } /// Describes a sequence of files containing schema changes. +#[derive(Debug)] pub struct SchemaUpgrade { pub steps: Vec, } @@ -39,10 +38,18 @@ pub struct SchemaUpgrade { /// Reads a "version directory" and reads all SQL changes into /// a result Vec. /// -/// Any file that starts with "up" and ends with "sql" is considered -/// part of the migration, and fully read to a string. +/// Files that do not begin with "up" and end with ".sql" are ignored. The +/// collection of `up*.sql` files must fall into one of these two conventions: /// -/// These are sorted lexicographically. +/// * "up.sql" with no other files +/// * "up1.sql", "up2.sql", ..., beginning from 1, optionally with leading +/// zeroes (e.g., "up01.sql", "up02.sql", ...). There is no maximum value, but +/// there may not be any gaps (e.g., if "up2.sql" and "up4.sql" exist, so must +/// "up3.sql") and there must not be any repeats (e.g., if "up1.sql" exists, +/// "up01.sql" must not exist). +/// +/// Any violation of these two rules will result in an error. Collections of the +/// second form (`up1.sql`, ...) will be sorted numerically. pub async fn all_sql_for_version_migration>( path: P, ) -> Result { @@ -54,19 +61,83 @@ pub async fn all_sql_for_version_migration>( for entry in entries { let entry = entry.map_err(|err| format!("Invalid entry: {err}"))?; let pathbuf = entry.into_path(); - let is_up = pathbuf - .file_name() - .map(|name| name.starts_with("up")) - .unwrap_or(false); - let is_sql = matches!(pathbuf.extension(), Some("sql")); - if is_up && is_sql { - up_sqls.push(pathbuf); + + // Ensure filename ends with ".sql" + if pathbuf.extension() != Some("sql") { + continue; + } + + // Ensure filename begins with "up", and extract anything in between + // "up" and ".sql". + let Some(remaining_filename) = pathbuf + .file_stem() + .and_then(|file_stem| file_stem.strip_prefix("up")) + else { + continue; + }; + + // Ensure the remaining filename is either empty (i.e., the filename is + // exactly "up.sql") or parseable as an unsigned integer. We give + // "up.sql" the "up_number" 0 (checked in the loop below), and require + // any other number to be nonzero. + if remaining_filename.is_empty() { + up_sqls.push((0, pathbuf)); + } else { + let Ok(up_number) = remaining_filename.parse::() else { + return Err(format!( + "invalid filename (non-numeric `up*.sql`): {pathbuf}", + )); + }; + if up_number == 0 { + return Err(format!( + "invalid filename (`up*.sql` numbering must start at 1): \ + {pathbuf}", + )); + } + up_sqls.push((up_number, pathbuf)); } } up_sqls.sort(); + // Validate that we have a reasonable sequence of `up*.sql` numbers. + match up_sqls.as_slice() { + [] => return Err("no `up*.sql` files found".to_string()), + [(up_number, path)] => { + // For a single file, we allow either `up.sql` (keyed as + // up_number=0) or `up1.sql`; reject any higher number. + if *up_number > 1 { + return Err(format!( + "`up*.sql` numbering must start at 1: found first file \ + {path}" + )); + } + } + _ => { + for (i, (up_number, path)) in up_sqls.iter().enumerate() { + // We have 2 or more `up*.sql`; they should be numbered exactly + // 1..=up_sqls.len(). + if i as u64 + 1 != *up_number { + // We know we have at least two elements, so report an error + // referencing either the next item (if we're first) or the + // previous item (if we're not first). + let (path_a, path_b) = if i == 0 { + let (_, next_path) = &up_sqls[1]; + (path, next_path) + } else { + let (_, prev_path) = &up_sqls[i - 1]; + (prev_path, path) + }; + return Err(format!( + "invalid `up*.sql` combination: {path_a}, {path_b}" + )); + } + } + } + } + + // This collection of `up*.sql` files is valid; read them all, in order. let mut result = SchemaUpgrade { steps: vec![] }; - for path in up_sqls.into_iter() { + for (_, path) in up_sqls.into_iter() { let sql = tokio::fs::read_to_string(&path) .await .map_err(|e| format!("Cannot read {path}: {e}"))?; @@ -341,30 +412,30 @@ impl DataStore { target: &SemverVersion, sql: &String, ) -> Result<(), Error> { - let result = self.pool_connection_unauthorized().await?.transaction_async(|conn| async move { - if target.to_string() != EARLIEST_SUPPORTED_VERSION { - let validate_version_query = format!("SELECT CAST(\ - IF(\ - (\ - SELECT version = '{current}' and target_version = '{target}'\ - FROM omicron.public.db_metadata WHERE singleton = true\ - ),\ - 'true',\ - 'Invalid starting version for schema change'\ - ) AS BOOL\ - );"); - conn.batch_execute_async(&validate_version_query).await?; - } - conn.batch_execute_async(&sql).await?; - Ok::<_, TransactionError<()>>(()) - }).await; + let conn = self.pool_connection_unauthorized().await?; + + let result = self.transaction_retry_wrapper("apply_schema_update") + .transaction(&conn, |conn| async move { + if target.to_string() != EARLIEST_SUPPORTED_VERSION { + let validate_version_query = format!("SELECT CAST(\ + IF(\ + (\ + SELECT version = '{current}' and target_version = '{target}'\ + FROM omicron.public.db_metadata WHERE singleton = true\ + ),\ + 'true',\ + 'Invalid starting version for schema change'\ + ) AS BOOL\ + );"); + conn.batch_execute_async(&validate_version_query).await?; + } + conn.batch_execute_async(&sql).await?; + Ok(()) + }).await; match result { Ok(()) => Ok(()), - Err(TransactionError::CustomError(())) => panic!("No custom error"), - Err(TransactionError::Database(e)) => { - Err(public_error_from_diesel(e, ErrorHandler::Server)) - } + Err(e) => Err(public_error_from_diesel(e, ErrorHandler::Server)), } } @@ -403,11 +474,150 @@ impl DataStore { #[cfg(test)] mod test { use super::*; + use camino_tempfile::Utf8TempDir; use nexus_db_model::schema::SCHEMA_VERSION; use nexus_test_utils::db as test_db; use omicron_test_utils::dev; use std::sync::Arc; + // Confirm that `all_sql_for_version_migration` rejects `up*.sql` files + // where the `*` doesn't contain a positive integer. + #[tokio::test] + async fn all_sql_for_version_migration_rejects_invalid_up_sql_names() { + for (invalid_filename, error_prefix) in [ + ("upA.sql", "invalid filename (non-numeric `up*.sql`)"), + ("up1a.sql", "invalid filename (non-numeric `up*.sql`)"), + ("upaaa1.sql", "invalid filename (non-numeric `up*.sql`)"), + ("up-3.sql", "invalid filename (non-numeric `up*.sql`)"), + ( + "up0.sql", + "invalid filename (`up*.sql` numbering must start at 1)", + ), + ( + "up00.sql", + "invalid filename (`up*.sql` numbering must start at 1)", + ), + ( + "up000.sql", + "invalid filename (`up*.sql` numbering must start at 1)", + ), + ] { + let tempdir = Utf8TempDir::new().unwrap(); + let filename = tempdir.path().join(invalid_filename); + _ = tokio::fs::File::create(&filename).await.unwrap(); + + match all_sql_for_version_migration(tempdir.path()).await { + Ok(upgrade) => { + panic!( + "unexpected success on {invalid_filename} \ + (produced {upgrade:?})" + ); + } + Err(message) => { + assert_eq!(message, format!("{error_prefix}: {filename}")); + } + } + } + } + + // Confirm that `all_sql_for_version_migration` rejects a directory with no + // appriopriately-named files. + #[tokio::test] + async fn all_sql_for_version_migration_rejects_no_up_sql_files() { + for filenames in [ + &[] as &[&str], + &["README.md"], + &["foo.sql", "bar.sql"], + &["up1sql", "up2sql"], + ] { + let tempdir = Utf8TempDir::new().unwrap(); + for filename in filenames { + _ = tokio::fs::File::create(tempdir.path().join(filename)) + .await + .unwrap(); + } + + match all_sql_for_version_migration(tempdir.path()).await { + Ok(upgrade) => { + panic!( + "unexpected success on {filenames:?} \ + (produced {upgrade:?})" + ); + } + Err(message) => { + assert_eq!(message, "no `up*.sql` files found"); + } + } + } + } + + // Confirm that `all_sql_for_version_migration` rejects collections of + // `up*.sql` files with individually-valid names but that do not pass the + // rules of the entire collection. + #[tokio::test] + async fn all_sql_for_version_migration_rejects_invalid_up_sql_collections() + { + for invalid_filenames in [ + &["up.sql", "up1.sql"] as &[&str], + &["up1.sql", "up01.sql"], + &["up1.sql", "up3.sql"], + &["up1.sql", "up2.sql", "up3.sql", "up02.sql"], + ] { + let tempdir = Utf8TempDir::new().unwrap(); + for filename in invalid_filenames { + _ = tokio::fs::File::create(tempdir.path().join(filename)) + .await + .unwrap(); + } + + match all_sql_for_version_migration(tempdir.path()).await { + Ok(upgrade) => { + panic!( + "unexpected success on {invalid_filenames:?} \ + (produced {upgrade:?})" + ); + } + Err(message) => { + assert!( + message.starts_with("invalid `up*.sql` combination: "), + "message did not start with expected prefix: \ + {message:?}" + ); + } + } + } + } + + // Confirm that `all_sql_for_version_migration` accepts legal collections of + // `up*.sql` filenames. + #[tokio::test] + async fn all_sql_for_version_migration_allows_valid_up_sql_collections() { + for filenames in [ + &["up.sql"] as &[&str], + &["up1.sql", "up2.sql"], + &[ + "up01.sql", "up02.sql", "up03.sql", "up04.sql", "up05.sql", + "up06.sql", "up07.sql", "up08.sql", "up09.sql", "up10.sql", + "up11.sql", + ], + &["up00001.sql", "up00002.sql", "up00003.sql"], + ] { + let tempdir = Utf8TempDir::new().unwrap(); + for filename in filenames { + _ = tokio::fs::File::create(tempdir.path().join(filename)) + .await + .unwrap(); + } + + match all_sql_for_version_migration(tempdir.path()).await { + Ok(_) => (), + Err(message) => { + panic!("unexpected failure on {filenames:?}: {message:?}"); + } + } + } + } + // Confirms that calling the internal "ensure_schema" function can succeed // when the database is already at that version. #[tokio::test] @@ -444,7 +654,7 @@ mod test { let conn = pool.pool().get().await.unwrap(); // Mimic the layout of "schema/crdb". - let config_dir = tempfile::TempDir::new().unwrap(); + let config_dir = Utf8TempDir::new().unwrap(); // Helper to create the version directory and "up.sql". let add_upgrade = |version: SemverVersion, sql: String| { @@ -499,8 +709,9 @@ mod test { .await; // Show that the datastores can be created concurrently. - let config = - SchemaConfig { schema_dir: config_dir.path().to_path_buf() }; + let config = SchemaConfig { + schema_dir: config_dir.path().to_path_buf().into_std_path_buf(), + }; let _ = futures::future::join_all((0..10).map(|_| { let log = log.clone(); let pool = pool.clone(); diff --git a/nexus/db-queries/src/db/datastore/device_auth.rs b/nexus/db-queries/src/db/datastore/device_auth.rs index e1facb43f6..8d8e09744c 100644 --- a/nexus/db-queries/src/db/datastore/device_auth.rs +++ b/nexus/db-queries/src/db/datastore/device_auth.rs @@ -10,10 +10,8 @@ use crate::context::OpContext; use crate::db; use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; -use crate::db::error::TransactionError; use crate::db::model::DeviceAccessToken; use crate::db::model::DeviceAuthRequest; -use async_bb8_diesel::AsyncConnection; use async_bb8_diesel::AsyncRunQueryDsl; use diesel::prelude::*; use omicron_common::api::external::CreateResult; @@ -75,35 +73,40 @@ impl DataStore { RequestNotFound, TooManyRequests, } - type TxnError = TransactionError; - self.pool_connection_authorized(opctx) - .await? - .transaction_async(|conn| async move { - match delete_request.execute_async(&conn).await? { - 0 => { - Err(TxnError::CustomError(TokenGrantError::RequestNotFound)) + let err = crate::transaction_retry::OptionalError::new(); + let conn = self.pool_connection_authorized(opctx).await?; + + self.transaction_retry_wrapper("device_access_token_create") + .transaction(&conn, |conn| { + let err = err.clone(); + let insert_token = insert_token.clone(); + let delete_request = delete_request.clone(); + async move { + match delete_request.execute_async(&conn).await? { + 0 => Err(err.bail(TokenGrantError::RequestNotFound)), + 1 => Ok(insert_token.get_result_async(&conn).await?), + _ => Err(err.bail(TokenGrantError::TooManyRequests)), } - 1 => Ok(insert_token.get_result_async(&conn).await?), - _ => Err(TxnError::CustomError( - TokenGrantError::TooManyRequests, - )), } }) .await - .map_err(|e| match e { - TxnError::CustomError(TokenGrantError::RequestNotFound) => { - Error::ObjectNotFound { - type_name: ResourceType::DeviceAuthRequest, - lookup_type: LookupType::ByCompositeId( - authz_request.id(), - ), + .map_err(|e| { + if let Some(err) = err.take() { + match err { + TokenGrantError::RequestNotFound => { + Error::ObjectNotFound { + type_name: ResourceType::DeviceAuthRequest, + lookup_type: LookupType::ByCompositeId( + authz_request.id(), + ), + } + } + TokenGrantError::TooManyRequests => { + Error::internal_error("unexpectedly found multiple device auth requests for the same user code") + } } - } - TxnError::CustomError(TokenGrantError::TooManyRequests) => { - Error::internal_error("unexpectedly found multiple device auth requests for the same user code") - } - TxnError::Database(e) => { + } else { public_error_from_diesel(e, ErrorHandler::Server) } }) diff --git a/nexus/db-queries/src/db/datastore/disk.rs b/nexus/db-queries/src/db/datastore/disk.rs index a0d9bf12c3..94d950f86a 100644 --- a/nexus/db-queries/src/db/datastore/disk.rs +++ b/nexus/db-queries/src/db/datastore/disk.rs @@ -25,11 +25,14 @@ use crate::db::model::DiskUpdate; use crate::db::model::Instance; use crate::db::model::Name; use crate::db::model::Project; +use crate::db::model::VirtualProvisioningResource; +use crate::db::model::Volume; use crate::db::pagination::paginated; use crate::db::queries::disk::DiskSetClauseForAttach; use crate::db::update_and_check::UpdateAndCheck; use crate::db::update_and_check::UpdateStatus; use async_bb8_diesel::AsyncRunQueryDsl; +use chrono::DateTime; use chrono::Utc; use diesel::prelude::*; use omicron_common::api; @@ -564,7 +567,7 @@ impl DataStore { /// Updates a disk record to indicate it has been deleted. /// - /// Returns the volume ID of associated with the deleted disk. + /// Returns the disk before any modifications are made by this function. /// /// Does not attempt to modify any resources (e.g. regions) which may /// belong to the disk. @@ -630,16 +633,12 @@ impl DataStore { // destroyed, don't throw an error. return Ok(disk); } else if !ok_to_delete_states.contains(disk_state.state()) { - return Err(Error::InvalidRequest { - message: format!( - "disk cannot be deleted in state \"{}\"", - disk.runtime_state.disk_state - ), - }); + return Err(Error::invalid_request(format!( + "disk cannot be deleted in state \"{}\"", + disk.runtime_state.disk_state + ))); } else if disk_state.is_attached() { - return Err(Error::InvalidRequest { - message: String::from("disk is attached"), - }); + return Err(Error::invalid_request("disk is attached")); } else { // NOTE: This is a "catch-all" error case, more specific // errors should be preferred as they're more actionable. @@ -652,4 +651,289 @@ impl DataStore { } } } + + /// Set a disk to faulted and un-delete it + /// + /// If the disk delete saga unwinds, then the disk should _not_ remain + /// deleted: disk delete saga should be triggered again in order to fully + /// complete, and the only way to do that is to un-delete the disk. Set it + /// to faulted to ensure that it won't be used. + pub async fn project_undelete_disk_set_faulted_no_auth( + &self, + disk_id: &Uuid, + ) -> Result<(), Error> { + use db::schema::disk::dsl; + let conn = self.pool_connection_unauthorized().await?; + + let faulted = api::external::DiskState::Faulted.label(); + + let result = diesel::update(dsl::disk) + .filter(dsl::time_deleted.is_not_null()) + .filter(dsl::id.eq(*disk_id)) + .set(( + dsl::time_deleted.eq(None::>), + dsl::disk_state.eq(faulted), + )) + .check_if_exists::(*disk_id) + .execute_and_check(&conn) + .await + .map_err(|e| { + public_error_from_diesel( + e, + ErrorHandler::NotFoundByLookup( + ResourceType::Disk, + LookupType::ById(*disk_id), + ), + ) + })?; + + match result.status { + UpdateStatus::Updated => Ok(()), + UpdateStatus::NotUpdatedButExists => { + let disk = result.found; + let disk_state = disk.state(); + + if disk.time_deleted().is_none() + && disk_state.state() == &api::external::DiskState::Faulted + { + // To maintain idempotency, if the disk has already been + // faulted, don't throw an error. + return Ok(()); + } else { + // NOTE: This is a "catch-all" error case, more specific + // errors should be preferred as they're more actionable. + return Err(Error::InternalError { + internal_message: String::from( + "disk exists, but cannot be faulted", + ), + }); + } + } + } + } + + /// Find disks that have been deleted but still have a + /// `virtual_provisioning_resource` record: this indicates that a disk + /// delete saga partially succeeded, then unwound, which (before the fixes + /// in customer-support#58) would mean the disk was deleted but the project + /// it was in could not be deleted (due to an erroneous number of bytes + /// "still provisioned"). + pub async fn find_phantom_disks(&self) -> ListResultVec { + use db::schema::disk::dsl; + use db::schema::virtual_provisioning_resource::dsl as resource_dsl; + use db::schema::volume::dsl as volume_dsl; + + let conn = self.pool_connection_unauthorized().await?; + + let potential_phantom_disks: Vec<( + Disk, + Option, + Option, + )> = dsl::disk + .filter(dsl::time_deleted.is_not_null()) + .left_join( + resource_dsl::virtual_provisioning_resource + .on(resource_dsl::id.eq(dsl::id)), + ) + .left_join(volume_dsl::volume.on(dsl::volume_id.eq(volume_dsl::id))) + .select(( + Disk::as_select(), + Option::::as_select(), + Option::::as_select(), + )) + .load_async(&*conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + // The first forward steps of the disk delete saga (plus the volume + // delete sub saga) are as follows: + // + // 1. soft-delete the disk + // 2. call virtual_provisioning_collection_delete_disk + // 3. soft-delete the disk's volume + // + // Before the fixes as part of customer-support#58, steps 1 and 3 did + // not have undo steps, where step 2 did. In order to detect when the + // disk delete saga unwound, find entries where + // + // 1. the disk and volume are soft-deleted + // 2. the `virtual_provisioning_resource` exists + // + // It's important not to conflict with any currently running disk delete + // saga. + + Ok(potential_phantom_disks + .into_iter() + .filter(|(disk, resource, volume)| { + if let Some(volume) = volume { + // In this branch, the volume record exists. Because it was + // returned by the query above, if it is soft-deleted we + // then know the saga unwound before the volume record could + // be hard deleted. This won't conflict with a running disk + // delete saga, because the resource record should be None + // if the disk and volume were already soft deleted (if + // there is one, the saga will be at or past step 3). + disk.time_deleted().is_some() + && volume.time_deleted.is_some() + && resource.is_some() + } else { + // In this branch, the volume record was hard-deleted. The + // saga could still have unwound after hard deleting the + // volume record, so proceed with filtering. This won't + // conflict with a running disk delete saga because the + // resource record should be None if the disk was soft + // deleted and the volume was hard deleted (if there is one, + // the saga should be almost finished as the volume hard + // delete is the last thing it does). + disk.time_deleted().is_some() && resource.is_some() + } + }) + .map(|(disk, _, _)| disk) + .collect()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use crate::db::datastore::datastore_test; + use nexus_test_utils::db::test_setup_database; + use nexus_types::external_api::params; + use omicron_common::api::external; + use omicron_test_utils::dev; + + #[tokio::test] + async fn test_undelete_disk_set_faulted_idempotent() { + let logctx = + dev::test_setup_log("test_undelete_disk_set_faulted_idempotent"); + let log = logctx.log.new(o!()); + let mut db = test_setup_database(&log).await; + let (opctx, db_datastore) = datastore_test(&logctx, &db).await; + + let silo_id = opctx.authn.actor().unwrap().silo_id().unwrap(); + + let (authz_project, _db_project) = db_datastore + .project_create( + &opctx, + Project::new( + silo_id, + params::ProjectCreate { + identity: external::IdentityMetadataCreateParams { + name: "testpost".parse().unwrap(), + description: "please ignore".to_string(), + }, + }, + ), + ) + .await + .unwrap(); + + let disk = db_datastore + .project_create_disk( + &opctx, + &authz_project, + Disk::new( + Uuid::new_v4(), + authz_project.id(), + Uuid::new_v4(), + params::DiskCreate { + identity: external::IdentityMetadataCreateParams { + name: "first-post".parse().unwrap(), + description: "just trying things out".to_string(), + }, + disk_source: params::DiskSource::Blank { + block_size: params::BlockSize::try_from(512) + .unwrap(), + }, + size: external::ByteCount::from(2147483648), + }, + db::model::BlockSize::Traditional, + DiskRuntimeState::new(), + ) + .unwrap(), + ) + .await + .unwrap(); + + let (.., authz_disk, db_disk) = LookupPath::new(&opctx, &db_datastore) + .disk_id(disk.id()) + .fetch() + .await + .unwrap(); + + db_datastore + .disk_update_runtime( + &opctx, + &authz_disk, + &db_disk.runtime().detach(), + ) + .await + .unwrap(); + + db_datastore + .project_delete_disk_no_auth( + &authz_disk.id(), + &[external::DiskState::Detached], + ) + .await + .unwrap(); + + // Assert initial state - deleting the Disk will make LookupPath::fetch + // not work. + { + LookupPath::new(&opctx, &db_datastore) + .disk_id(disk.id()) + .fetch() + .await + .unwrap_err(); + } + + // Function under test: call this twice to ensure it's idempotent + + db_datastore + .project_undelete_disk_set_faulted_no_auth(&authz_disk.id()) + .await + .unwrap(); + + // Assert state change + + { + let (.., db_disk) = LookupPath::new(&opctx, &db_datastore) + .disk_id(disk.id()) + .fetch() + .await + .unwrap(); + + assert!(db_disk.time_deleted().is_none()); + assert_eq!( + db_disk.runtime().disk_state, + external::DiskState::Faulted.label().to_string() + ); + } + + db_datastore + .project_undelete_disk_set_faulted_no_auth(&authz_disk.id()) + .await + .unwrap(); + + // Assert state is the same after the second call + + { + let (.., db_disk) = LookupPath::new(&opctx, &db_datastore) + .disk_id(disk.id()) + .fetch() + .await + .unwrap(); + + assert!(db_disk.time_deleted().is_none()); + assert_eq!( + db_disk.runtime().disk_state, + external::DiskState::Faulted.label().to_string() + ); + } + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } } diff --git a/nexus/db-queries/src/db/datastore/dns.rs b/nexus/db-queries/src/db/datastore/dns.rs index f7ad97593e..552ad31487 100644 --- a/nexus/db-queries/src/db/datastore/dns.rs +++ b/nexus/db-queries/src/db/datastore/dns.rs @@ -15,6 +15,7 @@ use crate::db::model::DnsZone; use crate::db::model::Generation; use crate::db::model::InitialDnsGroup; use crate::db::pagination::paginated; +use crate::db::pagination::Paginator; use crate::db::pool::DbConnection; use crate::db::TransactionError; use async_bb8_diesel::AsyncConnection; @@ -67,7 +68,9 @@ impl DataStore { dns_group: DnsGroup, ) -> ListResultVec { let conn = self.pool_connection_authorized(opctx).await?; - self.dns_zones_list_all_on_connection(opctx, &conn, dns_group).await + Ok(self + .dns_zones_list_all_on_connection(opctx, &conn, dns_group) + .await?) } /// Variant of [`Self::dns_zones_list_all`] which may be called from a @@ -77,7 +80,7 @@ impl DataStore { opctx: &OpContext, conn: &async_bb8_diesel::Connection, dns_group: DnsGroup, - ) -> ListResultVec { + ) -> Result, TransactionError> { use db::schema::dns_zone::dsl; const LIMIT: usize = 5; @@ -88,8 +91,7 @@ impl DataStore { .limit(i64::try_from(LIMIT).unwrap()) .select(DnsZone::as_select()) .load_async(conn) - .await - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + .await?; bail_unless!( list.len() < LIMIT, @@ -106,12 +108,14 @@ impl DataStore { opctx: &OpContext, dns_group: DnsGroup, ) -> LookupResult { - self.dns_group_latest_version_conn( - opctx, - &*self.pool_connection_authorized(opctx).await?, - dns_group, - ) - .await + let version = self + .dns_group_latest_version_conn( + opctx, + &*self.pool_connection_authorized(opctx).await?, + dns_group, + ) + .await?; + Ok(version) } pub async fn dns_group_latest_version_conn( @@ -119,7 +123,7 @@ impl DataStore { opctx: &OpContext, conn: &async_bb8_diesel::Connection, dns_group: DnsGroup, - ) -> LookupResult { + ) -> Result> { opctx.authorize(authz::Action::Read, &authz::DNS_CONFIG).await?; use db::schema::dns_version::dsl; let versions = dsl::dns_version @@ -128,8 +132,7 @@ impl DataStore { .limit(1) .select(DnsVersion::as_select()) .load_async(conn) - .await - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + .await?; bail_unless!( versions.len() == 1, @@ -240,9 +243,8 @@ impl DataStore { let mut zones = Vec::with_capacity(dns_zones.len()); for zone in dns_zones { let mut zone_records = Vec::new(); - let mut marker = None; - - loop { + let mut paginator = Paginator::new(batch_size); + while let Some(p) = paginator.next() { debug!(log, "listing DNS names for zone"; "dns_zone_id" => zone.id.to_string(), "dns_zone_name" => &zone.zone_name, @@ -250,25 +252,16 @@ impl DataStore { "found_so_far" => zone_records.len(), "batch_size" => batch_size.get(), ); - let pagparams = DataPageParams { - marker: marker.as_ref(), - direction: dropshot::PaginationOrder::Ascending, - limit: batch_size, - }; let names_batch = self - .dns_names_list(opctx, zone.id, version.version, &pagparams) + .dns_names_list( + opctx, + zone.id, + version.version, + &p.current_pagparams(), + ) .await?; - let done = names_batch.len() - < usize::try_from(batch_size.get()).unwrap(); - if let Some((last_name, _)) = names_batch.last() { - marker = Some(last_name.clone()); - } else { - assert!(done); - } + paginator = p.found_batch(&names_batch, &|(n, _)| n.clone()); zone_records.extend(names_batch.into_iter()); - if done { - break; - } } debug!(log, "found all DNS names for zone"; @@ -377,28 +370,17 @@ impl DataStore { opctx: &OpContext, conn: &async_bb8_diesel::Connection, update: DnsVersionUpdateBuilder, - ) -> Result<(), Error> { + ) -> Result<(), TransactionError> { opctx.authorize(authz::Action::Modify, &authz::DNS_CONFIG).await?; let zones = self .dns_zones_list_all_on_connection(opctx, conn, update.dns_group) .await?; - let result = conn - .transaction_async(|c| async move { - self.dns_update_internal(opctx, &c, update, zones) - .await - .map_err(TransactionError::CustomError) - }) - .await; - - match result { - Ok(()) => Ok(()), - Err(TransactionError::CustomError(e)) => Err(e), - Err(TransactionError::Database(e)) => { - Err(public_error_from_diesel(e, ErrorHandler::Server)) - } - } + conn.transaction_async(|c| async move { + self.dns_update_internal(opctx, &c, update, zones).await + }) + .await } // This must only be used inside a transaction. Otherwise, it may make @@ -409,7 +391,7 @@ impl DataStore { conn: &async_bb8_diesel::Connection, update: DnsVersionUpdateBuilder, zones: Vec, - ) -> Result<(), Error> { + ) -> Result<(), TransactionError> { // TODO-scalability TODO-performance This would be much better as a CTE // for all the usual reasons described in RFD 192. Using an interactive // transaction here means that either we wind up holding database locks @@ -455,10 +437,7 @@ impl DataStore { diesel::insert_into(dsl::dns_version) .values(new_version) .execute_async(conn) - .await - .map_err(|e| { - public_error_from_diesel(e, ErrorHandler::Server) - })?; + .await?; } { @@ -480,8 +459,7 @@ impl DataStore { ) .set(dsl::version_removed.eq(new_version_num)) .execute_async(conn) - .await - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + .await?; bail_unless!( nremoved == ntoremove, @@ -495,10 +473,7 @@ impl DataStore { let nadded = diesel::insert_into(dsl::dns_name) .values(new_names) .execute_async(conn) - .await - .map_err(|e| { - public_error_from_diesel(e, ErrorHandler::Server) - })?; + .await?; bail_unless!( nadded == ntoadd, @@ -1684,6 +1659,10 @@ mod test { let conn = datastore.pool_connection_for_tests().await.unwrap(); let error = datastore.dns_update(&opctx, &conn, update).await.unwrap_err(); + let error = match error { + TransactionError::CustomError(err) => err, + _ => panic!("Unexpected error: {:?}", error), + }; assert_eq!( error.to_string(), "Internal Error: updated wrong number of dns_name \ @@ -1707,11 +1686,15 @@ mod test { update.add_name(String::from("n2"), records1.clone()).unwrap(); let conn = datastore.pool_connection_for_tests().await.unwrap(); - let error = - datastore.dns_update(&opctx, &conn, update).await.unwrap_err(); + let error = Error::from( + datastore.dns_update(&opctx, &conn, update).await.unwrap_err(), + ); let msg = error.to_string(); - assert!(msg.starts_with("Internal Error: ")); - assert!(msg.contains("violates unique constraint")); + assert!(msg.starts_with("Internal Error: "), "Message: {msg:}"); + assert!( + msg.contains("violates unique constraint"), + "Message: {msg:}" + ); } let dns_config = datastore diff --git a/nexus/db-queries/src/db/datastore/external_ip.rs b/nexus/db-queries/src/db/datastore/external_ip.rs index e663130a84..ddf396f871 100644 --- a/nexus/db-queries/src/db/datastore/external_ip.rs +++ b/nexus/db-queries/src/db/datastore/external_ip.rs @@ -10,12 +10,16 @@ use crate::authz::ApiResource; use crate::context::OpContext; use crate::db; use crate::db::error::public_error_from_diesel; +use crate::db::error::retryable; use crate::db::error::ErrorHandler; +use crate::db::error::TransactionError; use crate::db::lookup::LookupPath; use crate::db::model::ExternalIp; +use crate::db::model::FloatingIp; use crate::db::model::IncompleteExternalIp; use crate::db::model::IpKind; use crate::db::model::Name; +use crate::db::pagination::paginated; use crate::db::pool::DbConnection; use crate::db::queries::external_ip::NextExternalIp; use crate::db::update_and_check::UpdateAndCheck; @@ -23,10 +27,18 @@ use crate::db::update_and_check::UpdateStatus; use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; use diesel::prelude::*; +use nexus_types::external_api::params; use nexus_types::identity::Resource; +use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::external::CreateResult; +use omicron_common::api::external::DeleteResult; use omicron_common::api::external::Error; +use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; +use omicron_common::api::external::NameOrId; +use omicron_common::api::external::ResourceType; +use omicron_common::api::external::UpdateResult; +use ref_cast::RefCast; use std::net::IpAddr; use uuid::Uuid; @@ -126,13 +138,64 @@ impl DataStore { self.allocate_external_ip(opctx, data).await } + /// Allocates a floating IP address for instance usage. + pub async fn allocate_floating_ip( + &self, + opctx: &OpContext, + project_id: Uuid, + params: params::FloatingIpCreate, + ) -> CreateResult { + let ip_id = Uuid::new_v4(); + + let pool_id = match params.pool { + Some(NameOrId::Name(name)) => { + LookupPath::new(opctx, self) + .ip_pool_name(&Name(name)) + .fetch_for(authz::Action::Read) + .await? + .1 + } + Some(NameOrId::Id(id)) => { + LookupPath::new(opctx, self) + .ip_pool_id(id) + .fetch_for(authz::Action::Read) + .await? + .1 + } + None => self.ip_pools_fetch_default(opctx).await?, + } + .id(); + + let data = if let Some(ip) = params.address { + IncompleteExternalIp::for_floating_explicit( + ip_id, + &Name(params.identity.name), + ¶ms.identity.description, + project_id, + ip, + pool_id, + ) + } else { + IncompleteExternalIp::for_floating( + ip_id, + &Name(params.identity.name), + ¶ms.identity.description, + project_id, + pool_id, + ) + }; + + self.allocate_external_ip(opctx, data).await + } + async fn allocate_external_ip( &self, opctx: &OpContext, data: IncompleteExternalIp, ) -> CreateResult { let conn = self.pool_connection_authorized(opctx).await?; - Self::allocate_external_ip_on_connection(&conn, data).await + let ip = Self::allocate_external_ip_on_connection(&conn, data).await?; + Ok(ip) } /// Variant of [Self::allocate_external_ip] which may be called from a @@ -140,23 +203,49 @@ impl DataStore { pub(crate) async fn allocate_external_ip_on_connection( conn: &async_bb8_diesel::Connection, data: IncompleteExternalIp, - ) -> CreateResult { + ) -> Result> { + use diesel::result::DatabaseErrorKind::UniqueViolation; + // Name needs to be cloned out here (if present) to give users a + // sensible error message on name collision. + let name = data.name().clone(); let explicit_ip = data.explicit_ip().is_some(); NextExternalIp::new(data).get_result_async(conn).await.map_err(|e| { + use diesel::result::Error::DatabaseError; use diesel::result::Error::NotFound; match e { NotFound => { if explicit_ip { - Error::invalid_request( + TransactionError::CustomError(Error::invalid_request( "Requested external IP address not available", - ) + )) } else { - Error::invalid_request( - "No external IP addresses available", + TransactionError::CustomError( + Error::insufficient_capacity( + "No external IP addresses available", + "NextExternalIp::new returned NotFound", + ), ) } } - _ => crate::db::queries::external_ip::from_diesel(e), + DatabaseError(UniqueViolation, ..) if name.is_some() => { + TransactionError::CustomError(public_error_from_diesel( + e, + ErrorHandler::Conflict( + ResourceType::FloatingIp, + name.as_ref() + .map(|m| m.as_str()) + .unwrap_or_default(), + ), + )) + } + _ => { + if retryable(&e) { + return TransactionError::Database(e); + } + TransactionError::CustomError( + crate::db::queries::external_ip::from_diesel(e), + ) + } } }) } @@ -245,8 +334,6 @@ impl DataStore { /// This method returns the number of records deleted, rather than the usual /// `DeleteResult`. That's mostly useful for tests, but could be important /// if callers have some invariants they'd like to check. - // TODO-correctness: This can't be used for Floating IPs, we'll need a - // _detatch_ method for that. pub async fn deallocate_external_ip_by_instance_id( &self, opctx: &OpContext, @@ -265,6 +352,27 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } + /// Detach an individual Floating IP address from its parent instance. + /// + /// As in `deallocate_external_ip_by_instance_id`, this method returns the + /// number of records altered, rather than an `UpdateResult`. + pub async fn detach_floating_ips_by_instance_id( + &self, + opctx: &OpContext, + instance_id: Uuid, + ) -> Result { + use db::schema::external_ip::dsl; + diesel::update(dsl::external_ip) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::is_service.eq(false)) + .filter(dsl::parent_id.eq(instance_id)) + .filter(dsl::kind.eq(IpKind::Floating)) + .set(dsl::parent_id.eq(Option::::None)) + .execute_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + /// Fetch all external IP addresses of any kind for the provided instance pub async fn instance_lookup_external_ips( &self, @@ -281,4 +389,166 @@ impl DataStore { .await .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } + + /// Fetch all Floating IP addresses for the provided project. + pub async fn floating_ips_list( + &self, + opctx: &OpContext, + authz_project: &authz::Project, + pagparams: &PaginatedBy<'_>, + ) -> ListResultVec { + use db::schema::floating_ip::dsl; + + opctx.authorize(authz::Action::ListChildren, authz_project).await?; + + match pagparams { + PaginatedBy::Id(pagparams) => { + paginated(dsl::floating_ip, dsl::id, &pagparams) + } + PaginatedBy::Name(pagparams) => paginated( + dsl::floating_ip, + dsl::name, + &pagparams.map_name(|n| Name::ref_cast(n)), + ), + } + .filter(dsl::project_id.eq(authz_project.id())) + .filter(dsl::time_deleted.is_null()) + .select(FloatingIp::as_select()) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + /// Delete a Floating IP, verifying first that it is not in use. + pub async fn floating_ip_delete( + &self, + opctx: &OpContext, + authz_fip: &authz::FloatingIp, + db_fip: &FloatingIp, + ) -> DeleteResult { + use db::schema::external_ip::dsl; + + // Verify this FIP is not attached to any instances/services. + if db_fip.parent_id.is_some() { + return Err(Error::invalid_request( + "Floating IP cannot be deleted while attached to an instance", + )); + } + + opctx.authorize(authz::Action::Delete, authz_fip).await?; + + let now = Utc::now(); + let updated_rows = diesel::update(dsl::external_ip) + .filter(dsl::id.eq(db_fip.id())) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::parent_id.is_null()) + .set(dsl::time_deleted.eq(now)) + .execute_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel( + e, + ErrorHandler::NotFoundByResource(authz_fip), + ) + })?; + + if updated_rows == 0 { + return Err(Error::invalid_request( + "deletion failed due to concurrent modification", + )); + } + Ok(()) + } + + /// Attaches a Floating IP address to an instance. + pub async fn floating_ip_attach( + &self, + opctx: &OpContext, + authz_fip: &authz::FloatingIp, + db_fip: &FloatingIp, + instance_id: Uuid, + ) -> UpdateResult { + use db::schema::external_ip::dsl; + + // Verify this FIP is not attached to any instances/services. + if db_fip.parent_id.is_some() { + return Err(Error::invalid_request( + "Floating IP cannot be attached to one instance while still attached to another", + )); + } + + let (.., authz_instance, _db_instance) = LookupPath::new(&opctx, self) + .instance_id(instance_id) + .fetch_for(authz::Action::Modify) + .await?; + + opctx.authorize(authz::Action::Modify, authz_fip).await?; + opctx.authorize(authz::Action::Modify, &authz_instance).await?; + + diesel::update(dsl::external_ip) + .filter(dsl::id.eq(db_fip.id())) + .filter(dsl::kind.eq(IpKind::Floating)) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::parent_id.is_null()) + .set(( + dsl::parent_id.eq(Some(instance_id)), + dsl::time_modified.eq(Utc::now()), + )) + .returning(ExternalIp::as_returning()) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel( + e, + ErrorHandler::NotFoundByResource(authz_fip), + ) + }) + .and_then(|r| FloatingIp::try_from(r)) + .map_err(|e| Error::internal_error(&format!("{e}"))) + } + + /// Detaches a Floating IP address from an instance. + pub async fn floating_ip_detach( + &self, + opctx: &OpContext, + authz_fip: &authz::FloatingIp, + db_fip: &FloatingIp, + ) -> UpdateResult { + use db::schema::external_ip::dsl; + + let Some(instance_id) = db_fip.parent_id else { + return Err(Error::invalid_request( + "Floating IP is not attached to an instance", + )); + }; + + let (.., authz_instance, _db_instance) = LookupPath::new(&opctx, self) + .instance_id(instance_id) + .fetch_for(authz::Action::Modify) + .await?; + + opctx.authorize(authz::Action::Modify, authz_fip).await?; + opctx.authorize(authz::Action::Modify, &authz_instance).await?; + + diesel::update(dsl::external_ip) + .filter(dsl::id.eq(db_fip.id())) + .filter(dsl::kind.eq(IpKind::Floating)) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::parent_id.eq(instance_id)) + .set(( + dsl::parent_id.eq(Option::::None), + dsl::time_modified.eq(Utc::now()), + )) + .returning(ExternalIp::as_returning()) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel( + e, + ErrorHandler::NotFoundByResource(authz_fip), + ) + }) + .and_then(|r| FloatingIp::try_from(r)) + .map_err(|e| Error::internal_error(&format!("{e}"))) + } } diff --git a/nexus/db-queries/src/db/datastore/identity_provider.rs b/nexus/db-queries/src/db/datastore/identity_provider.rs index fdc9a020e7..cee577acd6 100644 --- a/nexus/db-queries/src/db/datastore/identity_provider.rs +++ b/nexus/db-queries/src/db/datastore/identity_provider.rs @@ -14,7 +14,6 @@ use crate::db::identity::Resource; use crate::db::model::IdentityProvider; use crate::db::model::Name; use crate::db::pagination::paginated; -use async_bb8_diesel::AsyncConnection; use async_bb8_diesel::AsyncRunQueryDsl; use diesel::prelude::*; use omicron_common::api::external::http_pagination::PaginatedBy; @@ -63,36 +62,47 @@ impl DataStore { assert_eq!(provider.silo_id, authz_idp_list.silo().id()); let name = provider.identity().name.to_string(); - self.pool_connection_authorized(opctx) - .await? - .transaction_async(|conn| async move { - // insert silo identity provider record with type Saml - use db::schema::identity_provider::dsl as idp_dsl; - diesel::insert_into(idp_dsl::identity_provider) - .values(db::model::IdentityProvider { - identity: db::model::IdentityProviderIdentity { - id: provider.identity.id, - name: provider.identity.name.clone(), - description: provider.identity.description.clone(), - time_created: provider.identity.time_created, - time_modified: provider.identity.time_modified, - time_deleted: provider.identity.time_deleted, - }, - silo_id: provider.silo_id, - provider_type: db::model::IdentityProviderType::Saml, - }) - .execute_async(&conn) - .await?; + let conn = self.pool_connection_authorized(opctx).await?; - // insert silo saml identity provider record - use db::schema::saml_identity_provider::dsl; - let result = diesel::insert_into(dsl::saml_identity_provider) - .values(provider) - .returning(db::model::SamlIdentityProvider::as_returning()) - .get_result_async(&conn) - .await?; + self.transaction_retry_wrapper("saml_identity_provider_create") + .transaction(&conn, |conn| { + let provider = provider.clone(); + async move { + // insert silo identity provider record with type Saml + use db::schema::identity_provider::dsl as idp_dsl; + diesel::insert_into(idp_dsl::identity_provider) + .values(db::model::IdentityProvider { + identity: db::model::IdentityProviderIdentity { + id: provider.identity.id, + name: provider.identity.name.clone(), + description: provider + .identity + .description + .clone(), + time_created: provider.identity.time_created, + time_modified: provider.identity.time_modified, + time_deleted: provider.identity.time_deleted, + }, + silo_id: provider.silo_id, + provider_type: + db::model::IdentityProviderType::Saml, + }) + .execute_async(&conn) + .await?; - Ok(result) + // insert silo saml identity provider record + use db::schema::saml_identity_provider::dsl; + let result = + diesel::insert_into(dsl::saml_identity_provider) + .values(provider) + .returning( + db::model::SamlIdentityProvider::as_returning(), + ) + .get_result_async(&conn) + .await?; + + Ok(result) + } }) .await .map_err(|e| { diff --git a/nexus/db-queries/src/db/datastore/inventory.rs b/nexus/db-queries/src/db/datastore/inventory.rs index b743d28ee8..31b24a7e75 100644 --- a/nexus/db-queries/src/db/datastore/inventory.rs +++ b/nexus/db-queries/src/db/datastore/inventory.rs @@ -7,6 +7,7 @@ use crate::authz; use crate::context::OpContext; use crate::db; use crate::db::error::public_error_from_diesel; +use crate::db::error::public_error_from_diesel_lookup; use crate::db::error::ErrorHandler; use crate::db::queries::ALLOW_FULL_TABLE_SCAN_SQL; use crate::db::TransactionError; @@ -21,6 +22,7 @@ use diesel::ExpressionMethods; use diesel::IntoSql; use diesel::JoinOnDsl; use diesel::NullableExpressionMethods; +use diesel::OptionalExtension; use diesel::QueryDsl; use diesel::Table; use futures::future::BoxFuture; @@ -35,13 +37,19 @@ use nexus_db_model::InvCaboose; use nexus_db_model::InvCollection; use nexus_db_model::InvCollectionError; use nexus_db_model::InvRootOfTrust; +use nexus_db_model::InvRotPage; use nexus_db_model::InvServiceProcessor; +use nexus_db_model::RotPageWhichEnum; use nexus_db_model::SpType; use nexus_db_model::SpTypeEnum; use nexus_db_model::SwCaboose; +use nexus_db_model::SwRotPage; +use nexus_types::inventory::BaseboardId; use nexus_types::inventory::Collection; use omicron_common::api::external::Error; use omicron_common::api::external::InternalContext; +use omicron_common::api::external::LookupType; +use omicron_common::api::external::ResourceType; use omicron_common::bail_unless; use std::collections::BTreeMap; use std::collections::BTreeSet; @@ -76,6 +84,11 @@ impl DataStore { .iter() .map(|s| SwCaboose::from((**s).clone())) .collect::>(); + let rot_pages = collection + .rot_pages + .iter() + .map(|p| SwRotPage::from((**p).clone())) + .collect::>(); let error_values = collection .errors .iter() @@ -140,6 +153,19 @@ impl DataStore { .await?; } + // Insert records (and generate ids) for each distinct RoT page that + // we've found. Like baseboards, these might already be present and + // rows in this table are not scoped to a particular collection + // because they only map (immutable) identifiers to UUIDs. + { + use db::schema::sw_root_of_trust_page::dsl; + let _ = diesel::insert_into(dsl::sw_root_of_trust_page) + .values(rot_pages) + .on_conflict_do_nothing() + .execute_async(&conn) + .await?; + } + // Insert a record describing the collection itself. { use db::schema::inv_collection::dsl; @@ -468,6 +494,85 @@ impl DataStore { } } + // Insert rows for the root of trust pages that we found. This is + // almost identical to inserting cabooses above, and just like for + // cabooses, we do this using INSERT INTO ... SELECT. We have these + // three tables: + // + // - `hw_baseboard` with an "id" primary key and lookup columns + // "part_number" and "serial_number" + // - `sw_root_of_trust_page` with an "id" primary key and lookup + // column "data_base64" + // - `inv_root_of_trust_page` with foreign keys "hw_baseboard_id", + // "sw_root_of_trust_page_id", and various other columns + // + // and generate an INSERT INTO query that is structurally the same + // as the caboose query described above. + for (which, tree) in &collection.rot_pages_found { + use db::schema::hw_baseboard_id::dsl as dsl_baseboard_id; + use db::schema::inv_root_of_trust_page::dsl as dsl_inv_rot_page; + use db::schema::sw_root_of_trust_page::dsl as dsl_sw_rot_page; + let db_which = nexus_db_model::RotPageWhich::from(*which); + for (baseboard_id, found_rot_page) in tree { + let selection = db::schema::hw_baseboard_id::table + .inner_join( + db::schema::sw_root_of_trust_page::table.on( + dsl_baseboard_id::part_number + .eq(baseboard_id.part_number.clone()) + .and( + dsl_baseboard_id::serial_number.eq( + baseboard_id.serial_number.clone(), + ), + ) + .and(dsl_sw_rot_page::data_base64.eq( + found_rot_page.page.data_base64.clone(), + )), + ), + ) + .select(( + dsl_baseboard_id::id, + dsl_sw_rot_page::id, + collection_id.into_sql::(), + found_rot_page + .time_collected + .into_sql::(), + found_rot_page + .source + .clone() + .into_sql::(), + db_which.into_sql::(), + )); + + let _ = diesel::insert_into( + db::schema::inv_root_of_trust_page::table, + ) + .values(selection) + .into_columns(( + dsl_inv_rot_page::hw_baseboard_id, + dsl_inv_rot_page::sw_root_of_trust_page_id, + dsl_inv_rot_page::inv_collection_id, + dsl_inv_rot_page::time_collected, + dsl_inv_rot_page::source, + dsl_inv_rot_page::which, + )) + .execute_async(&conn) + .await?; + + // See the comments above. The same applies here. If you + // update the statement below because the schema for + // `inv_root_of_trust_page` has changed, be sure to update + // the code above, too! + let ( + _hw_baseboard_id, + _sw_root_of_trust_page_id, + _inv_collection_id, + _time_collected, + _source, + _which, + ) = dsl_inv_rot_page::inv_root_of_trust_page::all_columns(); + } + } + // Finally, insert the list of errors. { use db::schema::inv_collection_error::dsl as errors_dsl; @@ -720,7 +825,7 @@ impl DataStore { // start removing it and we'd also need to make sure we didn't leak a // collection if we crash while deleting it. let conn = self.pool_connection_authorized(opctx).await?; - let (ncollections, nsps, nrots, ncabooses, nerrors) = conn + let (ncollections, nsps, nrots, ncabooses, nrot_pages, nerrors) = conn .transaction_async(|conn| async move { // Remove the record describing the collection itself. let ncollections = { @@ -729,7 +834,7 @@ impl DataStore { dsl::inv_collection.filter(dsl::id.eq(collection_id)), ) .execute_async(&conn) - .await?; + .await? }; // Remove rows for service processors. @@ -740,7 +845,7 @@ impl DataStore { .filter(dsl::inv_collection_id.eq(collection_id)), ) .execute_async(&conn) - .await?; + .await? }; // Remove rows for roots of trust. @@ -751,7 +856,7 @@ impl DataStore { .filter(dsl::inv_collection_id.eq(collection_id)), ) .execute_async(&conn) - .await?; + .await? }; // Remove rows for cabooses found. @@ -762,7 +867,18 @@ impl DataStore { .filter(dsl::inv_collection_id.eq(collection_id)), ) .execute_async(&conn) - .await?; + .await? + }; + + // Remove rows for root of trust pages found. + let nrot_pages = { + use db::schema::inv_root_of_trust_page::dsl; + diesel::delete( + dsl::inv_root_of_trust_page + .filter(dsl::inv_collection_id.eq(collection_id)), + ) + .execute_async(&conn) + .await? }; // Remove rows for errors encountered. @@ -773,10 +889,10 @@ impl DataStore { .filter(dsl::inv_collection_id.eq(collection_id)), ) .execute_async(&conn) - .await?; + .await? }; - Ok((ncollections, nsps, nrots, ncabooses, nerrors)) + Ok((ncollections, nsps, nrots, ncabooses, nrot_pages, nerrors)) }) .await .map_err(|error| match error { @@ -792,36 +908,69 @@ impl DataStore { "nsps" => nsps, "nrots" => nrots, "ncabooses" => ncabooses, + "nrot_pages" => nrot_pages, "nerrors" => nerrors, ); Ok(()) } + // Find the primary key for `hw_baseboard_id` given a `BaseboardId` + pub async fn find_hw_baseboard_id( + &self, + opctx: &OpContext, + baseboard_id: BaseboardId, + ) -> Result { + opctx.authorize(authz::Action::Read, &authz::INVENTORY).await?; + let conn = self.pool_connection_authorized(opctx).await?; + use db::schema::hw_baseboard_id::dsl; + dsl::hw_baseboard_id + .filter(dsl::serial_number.eq(baseboard_id.serial_number.clone())) + .filter(dsl::part_number.eq(baseboard_id.part_number.clone())) + .select(dsl::id) + .first_async::(&*conn) + .await + .map_err(|e| { + public_error_from_diesel_lookup( + e, + ResourceType::Sled, + &LookupType::ByCompositeId(format!("{baseboard_id:?}")), + ) + }) + } + /// Attempt to read the latest collection while limiting queries to `limit` /// records + /// + /// If there aren't any collections, return `Ok(None)`. pub async fn inventory_get_latest_collection( &self, opctx: &OpContext, limit: NonZeroU32, - ) -> Result { + ) -> Result, Error> { opctx.authorize(authz::Action::Read, &authz::INVENTORY).await?; let conn = self.pool_connection_authorized(opctx).await?; use db::schema::inv_collection::dsl; let collection_id = dsl::inv_collection .select(dsl::id) .order_by(dsl::time_started.desc()) - .limit(1) .first_async::(&*conn) .await + .optional() .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; - self.inventory_collection_read_all_or_nothing( - opctx, - collection_id, - limit, - ) - .await + let Some(collection_id) = collection_id else { + return Ok(None); + }; + + Ok(Some( + self.inventory_collection_read_all_or_nothing( + opctx, + collection_id, + limit, + ) + .await?, + )) } /// Attempt to read the given collection while limiting queries to `limit` @@ -1068,6 +1217,88 @@ impl DataStore { ); } + // Fetch records of RoT pages found. + let inv_rot_page_rows = { + use db::schema::inv_root_of_trust_page::dsl; + dsl::inv_root_of_trust_page + .filter(dsl::inv_collection_id.eq(id)) + .limit(sql_limit) + .select(InvRotPage::as_select()) + .load_async(&*conn) + .await + .map_err(|e| { + public_error_from_diesel(e, ErrorHandler::Server) + })? + }; + limit_reached = limit_reached || inv_rot_page_rows.len() == usize_limit; + + // Collect the unique sw_rot_page_ids for those pages. + let sw_rot_page_ids: BTreeSet<_> = inv_rot_page_rows + .iter() + .map(|inv_rot_page| inv_rot_page.sw_root_of_trust_page_id) + .collect(); + // Fetch the corresponing records. + let rot_pages_by_id: BTreeMap<_, _> = { + use db::schema::sw_root_of_trust_page::dsl; + dsl::sw_root_of_trust_page + .filter(dsl::id.eq_any(sw_rot_page_ids)) + .limit(sql_limit) + .select(SwRotPage::as_select()) + .load_async(&*conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))? + .into_iter() + .map(|sw_rot_page_row| { + ( + sw_rot_page_row.id, + Arc::new(nexus_types::inventory::RotPage::from( + sw_rot_page_row, + )), + ) + }) + .collect() + }; + limit_reached = limit_reached || rot_pages_by_id.len() == usize_limit; + + // Assemble the lists of rot pages found. + let mut rot_pages_found = BTreeMap::new(); + for p in inv_rot_page_rows { + let by_baseboard = rot_pages_found + .entry(nexus_types::inventory::RotPageWhich::from(p.which)) + .or_insert_with(BTreeMap::new); + let Some(bb) = baseboards_by_id.get(&p.hw_baseboard_id) else { + let msg = format!( + "unknown baseboard found in inv_root_of_trust_page: {}", + p.hw_baseboard_id + ); + return Err(Error::internal_error(&msg)); + }; + let Some(sw_rot_page) = + rot_pages_by_id.get(&p.sw_root_of_trust_page_id) + else { + let msg = format!( + "unknown rot page found in inv_root_of_trust_page: {}", + p.sw_root_of_trust_page_id + ); + return Err(Error::internal_error(&msg)); + }; + + let previous = by_baseboard.insert( + bb.clone(), + nexus_types::inventory::RotPageFound { + time_collected: p.time_collected, + source: p.source, + page: sw_rot_page.clone(), + }, + ); + bail_unless!( + previous.is_none(), + "duplicate rot page found: {:?} baseboard {:?}", + p.which, + p.hw_baseboard_id + ); + } + Ok(( Collection { id, @@ -1077,9 +1308,11 @@ impl DataStore { collector, baseboards: baseboards_by_id.values().cloned().collect(), cabooses: cabooses_by_id.values().cloned().collect(), + rot_pages: rot_pages_by_id.values().cloned().collect(), sps, rots, cabooses_found, + rot_pages_found, }, limit_reached, )) @@ -1139,8 +1372,11 @@ mod test { use nexus_inventory::examples::Representative; use nexus_test_utils::db::test_setup_database; use nexus_test_utils::db::ALLOW_FULL_TABLE_SCAN_SQL; + use nexus_types::inventory::BaseboardId; use nexus_types::inventory::CabooseWhich; use nexus_types::inventory::Collection; + use nexus_types::inventory::RotPageWhich; + use omicron_common::api::external::Error; use omicron_test_utils::dev; use std::num::NonZeroU32; use uuid::Uuid; @@ -1156,28 +1392,62 @@ mod test { .await?) } - async fn count_baseboards_cabooses( - conn: &DataStoreConnection<'_>, - ) -> anyhow::Result<(usize, usize)> { - conn.transaction_async(|conn| async move { - conn.batch_execute_async(ALLOW_FULL_TABLE_SCAN_SQL).await.unwrap(); - let bb_count = schema::hw_baseboard_id::dsl::hw_baseboard_id - .select(diesel::dsl::count_star()) - .first_async::(&conn) - .await - .context("failed to count baseboards")?; - let caboose_count = schema::sw_caboose::dsl::sw_caboose - .select(diesel::dsl::count_star()) - .first_async::(&conn) - .await - .context("failed to count cabooses")?; - let bb_count_usize = usize::try_from(bb_count) - .context("failed to convert baseboard count to usize")?; - let caboose_count_usize = usize::try_from(caboose_count) - .context("failed to convert caboose count to usize")?; - Ok((bb_count_usize, caboose_count_usize)) - }) - .await + struct CollectionCounts { + baseboards: usize, + cabooses: usize, + rot_pages: usize, + } + + impl CollectionCounts { + async fn new(conn: &DataStoreConnection<'_>) -> anyhow::Result { + conn.transaction_async(|conn| async move { + conn.batch_execute_async(ALLOW_FULL_TABLE_SCAN_SQL) + .await + .unwrap(); + let bb_count = schema::hw_baseboard_id::dsl::hw_baseboard_id + .select(diesel::dsl::count_star()) + .first_async::(&conn) + .await + .context("failed to count baseboards")?; + let caboose_count = schema::sw_caboose::dsl::sw_caboose + .select(diesel::dsl::count_star()) + .first_async::(&conn) + .await + .context("failed to count cabooses")?; + let rot_page_count = + schema::sw_root_of_trust_page::dsl::sw_root_of_trust_page + .select(diesel::dsl::count_star()) + .first_async::(&conn) + .await + .context("failed to count rot pages")?; + let baseboards = usize::try_from(bb_count) + .context("failed to convert baseboard count to usize")?; + let cabooses = usize::try_from(caboose_count) + .context("failed to convert caboose count to usize")?; + let rot_pages = usize::try_from(rot_page_count) + .context("failed to convert rot page count to usize")?; + Ok(Self { baseboards, cabooses, rot_pages }) + }) + .await + } + } + + #[tokio::test] + async fn test_find_hw_baseboard_id_missing_returns_not_found() { + let logctx = dev::test_setup_log("inventory_insert"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + let baseboard_id = BaseboardId { + serial_number: "some-serial".into(), + part_number: "some-part".into(), + }; + let err = datastore + .find_hw_baseboard_id(&opctx, baseboard_id) + .await + .unwrap_err(); + assert!(matches!(err, Error::ObjectNotFound { .. })); + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); } /// Tests inserting several collections, reading them back, and making sure @@ -1205,14 +1475,15 @@ mod test { .expect("failed to read collection back"); assert_eq!(collection1, collection_read); - // There ought to be no baseboards or cabooses in the databases from - // that collection. + // There ought to be no baseboards, cabooses, or RoT pages in the + // databases from that collection. assert_eq!(collection1.baseboards.len(), 0); assert_eq!(collection1.cabooses.len(), 0); - let (nbaseboards, ncabooses) = - count_baseboards_cabooses(&conn).await.unwrap(); - assert_eq!(collection1.baseboards.len(), nbaseboards); - assert_eq!(collection1.cabooses.len(), ncabooses); + assert_eq!(collection1.rot_pages.len(), 0); + let coll_counts = CollectionCounts::new(&conn).await.unwrap(); + assert_eq!(collection1.baseboards.len(), coll_counts.baseboards); + assert_eq!(collection1.cabooses.len(), coll_counts.cabooses); + assert_eq!(collection1.rot_pages.len(), coll_counts.rot_pages); // Now insert a more complex collection, write it to the database, and // read it back. @@ -1227,14 +1498,16 @@ mod test { .await .expect("failed to read collection back"); assert_eq!(collection2, collection_read); - // Verify that we have exactly the set of cabooses and baseboards in the - // databases that came from this first non-empty collection. + // Verify that we have exactly the set of cabooses, baseboards, and RoT + // pages in the databases that came from this first non-empty + // collection. assert_ne!(collection2.baseboards.len(), collection1.baseboards.len()); assert_ne!(collection2.cabooses.len(), collection1.cabooses.len()); - let (nbaseboards, ncabooses) = - count_baseboards_cabooses(&conn).await.unwrap(); - assert_eq!(collection2.baseboards.len(), nbaseboards); - assert_eq!(collection2.cabooses.len(), ncabooses); + assert_ne!(collection2.rot_pages.len(), collection1.rot_pages.len()); + let coll_counts = CollectionCounts::new(&conn).await.unwrap(); + assert_eq!(collection2.baseboards.len(), coll_counts.baseboards); + assert_eq!(collection2.cabooses.len(), coll_counts.cabooses); + assert_eq!(collection2.rot_pages.len(), coll_counts.rot_pages); // Check that we get an error on the limit being reached for // `read_all_or_nothing` @@ -1249,9 +1522,9 @@ mod test { .is_err()); // Now insert an equivalent collection again. Verify the distinct - // baseboards and cabooses again. This is important: the insertion - // process should re-use the baseboards and cabooses from the previous - // collection. + // baseboards, cabooses, and RoT pages again. This is important: the + // insertion process should re-use the baseboards, cabooses, and RoT + // pages from the previous collection. let Representative { builder, .. } = representative(); let collection3 = builder.build(); datastore @@ -1263,18 +1536,19 @@ mod test { .await .expect("failed to read collection back"); assert_eq!(collection3, collection_read); - // Verify that we have the same number of cabooses and baseboards, since - // those didn't change. + // Verify that we have the same number of cabooses, baseboards, and RoT + // pages, since those didn't change. assert_eq!(collection3.baseboards.len(), collection2.baseboards.len()); assert_eq!(collection3.cabooses.len(), collection2.cabooses.len()); - let (nbaseboards, ncabooses) = - count_baseboards_cabooses(&conn).await.unwrap(); - assert_eq!(collection3.baseboards.len(), nbaseboards); - assert_eq!(collection3.cabooses.len(), ncabooses); + assert_eq!(collection3.rot_pages.len(), collection2.rot_pages.len()); + let coll_counts = CollectionCounts::new(&conn).await.unwrap(); + assert_eq!(collection3.baseboards.len(), coll_counts.baseboards); + assert_eq!(collection3.cabooses.len(), coll_counts.cabooses); + assert_eq!(collection3.rot_pages.len(), coll_counts.rot_pages); // Now insert a collection that's almost equivalent, but has an extra - // couple of baseboards and caboose. Verify that we re-use the existing - // ones, but still insert the new ones. + // couple of baseboards, one caboose, and one RoT page. Verify that we + // re-use the existing ones, but still insert the new ones. let Representative { mut builder, .. } = representative(); builder.found_sp_state( "test suite", @@ -1298,6 +1572,14 @@ mod test { nexus_inventory::examples::caboose("dummy"), ) .unwrap(); + builder + .found_rot_page( + &bb, + RotPageWhich::Cmpa, + "dummy", + nexus_inventory::examples::rot_page("dummy"), + ) + .unwrap(); let collection4 = builder.build(); datastore .inventory_insert_collection(&opctx, &collection4) @@ -1313,14 +1595,15 @@ mod test { collection4.baseboards.len(), collection3.baseboards.len() + 2 ); + assert_eq!(collection4.cabooses.len(), collection3.cabooses.len() + 1); assert_eq!( - collection4.cabooses.len(), - collection3.baseboards.len() + 1 + collection4.rot_pages.len(), + collection3.rot_pages.len() + 1 ); - let (nbaseboards, ncabooses) = - count_baseboards_cabooses(&conn).await.unwrap(); - assert_eq!(collection4.baseboards.len(), nbaseboards); - assert_eq!(collection4.cabooses.len(), ncabooses); + let coll_counts = CollectionCounts::new(&conn).await.unwrap(); + assert_eq!(collection4.baseboards.len(), coll_counts.baseboards); + assert_eq!(collection4.cabooses.len(), coll_counts.cabooses); + assert_eq!(collection4.rot_pages.len(), coll_counts.rot_pages); // This time, go back to our earlier collection. This logically removes // some baseboards. They should still be present in the database, but @@ -1338,12 +1621,14 @@ mod test { assert_eq!(collection5, collection_read); assert_eq!(collection5.baseboards.len(), collection3.baseboards.len()); assert_eq!(collection5.cabooses.len(), collection3.cabooses.len()); + assert_eq!(collection5.rot_pages.len(), collection3.rot_pages.len()); assert_ne!(collection5.baseboards.len(), collection4.baseboards.len()); assert_ne!(collection5.cabooses.len(), collection4.cabooses.len()); - let (nbaseboards, ncabooses) = - count_baseboards_cabooses(&conn).await.unwrap(); - assert_eq!(collection4.baseboards.len(), nbaseboards); - assert_eq!(collection4.cabooses.len(), ncabooses); + assert_ne!(collection5.rot_pages.len(), collection4.rot_pages.len()); + let coll_counts = CollectionCounts::new(&conn).await.unwrap(); + assert_eq!(collection4.baseboards.len(), coll_counts.baseboards); + assert_eq!(collection4.cabooses.len(), coll_counts.cabooses); + assert_eq!(collection4.rot_pages.len(), coll_counts.rot_pages); // Try to insert the same collection again and make sure it fails. let error = datastore @@ -1536,10 +1821,10 @@ mod test { .expect("failed to check that tables were empty"); // We currently keep the baseboard ids and sw_cabooses around. - let (nbaseboards, ncabooses) = - count_baseboards_cabooses(&conn).await.unwrap(); - assert_ne!(nbaseboards, 0); - assert_ne!(ncabooses, 0); + let coll_counts = CollectionCounts::new(&conn).await.unwrap(); + assert_ne!(coll_counts.baseboards, 0); + assert_ne!(coll_counts.cabooses, 0); + assert_ne!(coll_counts.rot_pages, 0); // Clean up. db.cleanup().await.unwrap(); diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index fb300ef833..4497e3f2b4 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -194,11 +194,9 @@ impl DataStore { .optional() .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; if range.is_some() { - return Err(Error::InvalidRequest { - message: - "IP Pool cannot be deleted while it contains IP ranges" - .to_string(), - }); + return Err(Error::invalid_request( + "IP Pool cannot be deleted while it contains IP ranges", + )); } // Delete the pool, conditional on the rcgen not having changed. This @@ -224,10 +222,9 @@ impl DataStore { })?; if updated_rows == 0 { - return Err(Error::InvalidRequest { - message: "deletion failed due to concurrent modification" - .to_string(), - }); + return Err(Error::invalid_request( + "deletion failed due to concurrent modification", + )); } Ok(()) } diff --git a/nexus/db-queries/src/db/datastore/ipv4_nat_entry.rs b/nexus/db-queries/src/db/datastore/ipv4_nat_entry.rs index 274937b299..a44fed4cdf 100644 --- a/nexus/db-queries/src/db/datastore/ipv4_nat_entry.rs +++ b/nexus/db-queries/src/db/datastore/ipv4_nat_entry.rs @@ -36,6 +36,7 @@ impl DataStore { .filter(dsl::sled_address.eq(nat_entry.sled_address)) .filter(dsl::vni.eq(nat_entry.vni)) .filter(dsl::mac.eq(nat_entry.mac)) + .filter(dsl::version_removed.is_null()) .select(( dsl::external_address, dsl::first_port, @@ -123,9 +124,7 @@ impl DataStore { if let Some(nat_entry) = result.first() { Ok(nat_entry.clone()) } else { - Err(Error::InvalidRequest { - message: "no matching records".to_string(), - }) + Err(Error::invalid_request("no matching records")) } } @@ -184,9 +183,7 @@ impl DataStore { if let Some(nat_entry) = result.first() { Ok(nat_entry.clone()) } else { - Err(Error::InvalidRequest { - message: "no matching records".to_string(), - }) + Err(Error::invalid_request("no matching records")) } } @@ -240,9 +237,7 @@ impl DataStore { match latest { Some(value) => Ok(value), - None => Err(Error::InvalidRequest { - message: "sequence table is empty!".to_string(), - }), + None => Err(Error::invalid_request("sequence table is empty!")), } } @@ -275,7 +270,7 @@ mod test { use crate::db::datastore::datastore_test; use chrono::Utc; - use nexus_db_model::{Ipv4NatValues, MacAddr, Vni}; + use nexus_db_model::{Ipv4NatEntry, Ipv4NatValues, MacAddr, Vni}; use nexus_test_utils::db::test_setup_database; use omicron_common::api::external; use omicron_test_utils::dev; @@ -427,7 +422,6 @@ mod test { datastore.ipv4_nat_list_since_version(&opctx, 0, 10).await.unwrap(); assert_eq!(nat_entries.len(), 1); - // version should be unchanged assert_eq!( datastore.ipv4_nat_current_version(&opctx).await.unwrap(), @@ -437,4 +431,150 @@ mod test { db.cleanup().await.unwrap(); logctx.cleanup_successful(); } + + #[tokio::test] + /// Table design and queries should only insert one active NAT entry for a given + /// set of properties, but allow multiple deleted nat entries for the same set + /// of properties. + async fn table_allows_unique_active_multiple_deleted() { + let logctx = dev::test_setup_log("test_nat_version_tracking"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + // We should not have any NAT entries at this moment + let initial_state = + datastore.ipv4_nat_list_since_version(&opctx, 0, 10).await.unwrap(); + + assert!(initial_state.is_empty()); + assert_eq!( + datastore.ipv4_nat_current_version(&opctx).await.unwrap(), + 0 + ); + + // Each change (creation / deletion) to the NAT table should increment the + // version number of the row in the NAT table + let external_address = external::Ipv4Net( + ipnetwork::Ipv4Network::try_from("10.0.0.100").unwrap(), + ); + + let sled_address = external::Ipv6Net( + ipnetwork::Ipv6Network::try_from("fd00:1122:3344:104::1").unwrap(), + ); + + // Add a nat entry. + let nat1 = Ipv4NatValues { + external_address: external_address.into(), + first_port: 0.into(), + last_port: 999.into(), + sled_address: sled_address.into(), + vni: Vni(external::Vni::random()), + mac: MacAddr( + external::MacAddr::from_str("A8:40:25:F5:EB:2A").unwrap(), + ), + }; + + datastore.ensure_ipv4_nat_entry(&opctx, nat1.clone()).await.unwrap(); + + // Try to add it again. It should still only result in a single entry. + datastore.ensure_ipv4_nat_entry(&opctx, nat1.clone()).await.unwrap(); + let first_entry = datastore + .ipv4_nat_find_by_values(&opctx, nat1.clone()) + .await + .unwrap(); + + let nat_entries = + datastore.ipv4_nat_list_since_version(&opctx, 0, 10).await.unwrap(); + + // The NAT table has undergone one change. One entry has been added, + // none deleted, so we should be at version 1. + assert_eq!(nat_entries.len(), 1); + assert_eq!(nat_entries.last().unwrap().version_added, 1); + assert_eq!( + datastore.ipv4_nat_current_version(&opctx).await.unwrap(), + 1 + ); + + datastore.ipv4_nat_delete(&opctx, &first_entry).await.unwrap(); + + // The NAT table has undergone two changes. One entry has been added, + // then deleted, so we should be at version 2. + let nat_entries = datastore + .ipv4_nat_list_since_version(&opctx, 0, 10) + .await + .unwrap() + .into_iter(); + + let active: Vec = nat_entries + .clone() + .filter(|entry| entry.version_removed.is_none()) + .collect(); + + let inactive: Vec = nat_entries + .filter(|entry| entry.version_removed.is_some()) + .collect(); + + assert!(active.is_empty()); + assert_eq!(inactive.len(), 1); + assert_eq!( + datastore.ipv4_nat_current_version(&opctx).await.unwrap(), + 2 + ); + + // Add the same entry back. This simulates the behavior we will see + // when stopping and then restarting an instance. + datastore.ensure_ipv4_nat_entry(&opctx, nat1.clone()).await.unwrap(); + + // The NAT table has undergone three changes. + let nat_entries = datastore + .ipv4_nat_list_since_version(&opctx, 0, 10) + .await + .unwrap() + .into_iter(); + + let active: Vec = nat_entries + .clone() + .filter(|entry| entry.version_removed.is_none()) + .collect(); + + let inactive: Vec = nat_entries + .filter(|entry| entry.version_removed.is_some()) + .collect(); + + assert_eq!(active.len(), 1); + assert_eq!(inactive.len(), 1); + assert_eq!( + datastore.ipv4_nat_current_version(&opctx).await.unwrap(), + 3 + ); + + let second_entry = + datastore.ipv4_nat_find_by_values(&opctx, nat1).await.unwrap(); + datastore.ipv4_nat_delete(&opctx, &second_entry).await.unwrap(); + + // The NAT table has undergone four changes + let nat_entries = datastore + .ipv4_nat_list_since_version(&opctx, 0, 10) + .await + .unwrap() + .into_iter(); + + let active: Vec = nat_entries + .clone() + .filter(|entry| entry.version_removed.is_none()) + .collect(); + + let inactive: Vec = nat_entries + .filter(|entry| entry.version_removed.is_some()) + .collect(); + + assert_eq!(active.len(), 0); + assert_eq!(inactive.len(), 2); + assert_eq!( + datastore.ipv4_nat_current_version(&opctx).await.unwrap(), + 4 + ); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } } diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index 8be3386183..761c3f995f 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -148,6 +148,7 @@ pub type DataStoreConnection<'a> = pub struct DataStore { pool: Arc, virtual_provisioning_collection_producer: crate::provisioning::Producer, + transaction_retry_producer: crate::transaction_retry::Producer, } // The majority of `DataStore`'s methods live in our submodules as a concession @@ -164,6 +165,8 @@ impl DataStore { pool, virtual_provisioning_collection_producer: crate::provisioning::Producer::new(), + transaction_retry_producer: crate::transaction_retry::Producer::new( + ), }; Ok(datastore) } @@ -210,6 +213,29 @@ impl DataStore { self.virtual_provisioning_collection_producer.clone(), ) .unwrap(); + registry + .register_producer(self.transaction_retry_producer.clone()) + .unwrap(); + } + + /// Constructs a transaction retry helper + /// + /// Automatically wraps the underlying producer + pub fn transaction_retry_wrapper( + &self, + name: &'static str, + ) -> crate::transaction_retry::RetryHelper { + crate::transaction_retry::RetryHelper::new( + &self.transaction_retry_producer, + name, + ) + } + + #[cfg(test)] + pub(crate) fn transaction_retry_producer( + &self, + ) -> &crate::transaction_retry::Producer { + &self.transaction_retry_producer } /// Returns a connection to a connection from the database connection pool. @@ -371,9 +397,9 @@ mod test { use crate::db::model::{ BlockSize, ComponentUpdate, ComponentUpdateIdentity, ConsoleSession, Dataset, DatasetKind, ExternalIp, PhysicalDisk, PhysicalDiskKind, - Project, Rack, Region, Service, ServiceKind, SiloUser, Sled, - SledBaseboard, SledSystemHardware, SshKey, SystemUpdate, - UpdateableComponentType, VpcSubnet, Zpool, + Project, Rack, Region, Service, ServiceKind, SiloUser, SledBaseboard, + SledProvisionState, SledSystemHardware, SledUpdate, SshKey, + SystemUpdate, UpdateableComponentType, VpcSubnet, Zpool, }; use crate::db::queries::vpc_subnet::FilterConflictingVpcSubnetRangesQuery; use assert_matches::assert_matches; @@ -599,17 +625,46 @@ mod test { let rack_id = Uuid::new_v4(); let sled_id = Uuid::new_v4(); - let sled = Sled::new( + let sled_update = SledUpdate::new( sled_id, bogus_addr, sled_baseboard_for_test(), sled_system_hardware_for_test(), rack_id, ); - datastore.sled_upsert(sled).await.unwrap(); + datastore.sled_upsert(sled_update).await.unwrap(); sled_id } + // Marks a sled as non-provisionable. + async fn mark_sled_non_provisionable( + datastore: &DataStore, + opctx: &OpContext, + sled_id: Uuid, + ) { + let (authz_sled, sled) = LookupPath::new(opctx, datastore) + .sled_id(sled_id) + .fetch_for(authz::Action::Modify) + .await + .unwrap(); + println!("sled: {:?}", sled); + let old_state = datastore + .sled_set_provision_state( + &opctx, + &authz_sled, + SledProvisionState::NonProvisionable, + ) + .await + .unwrap_or_else(|error| { + panic!( + "error marking sled {sled_id} as non-provisionable: {error}" + ) + }); + // The old state should always be provisionable since that's where we + // start. + assert_eq!(old_state, SledProvisionState::Provisionable); + } + fn test_zpool_size() -> ByteCount { ByteCount::from_gibibytes_u32(100) } @@ -770,13 +825,24 @@ mod test { let logctx = dev::test_setup_log("test_region_allocation_strat_random"); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; - create_test_datasets_for_region_allocation( + let test_datasets = create_test_datasets_for_region_allocation( &opctx, datastore.clone(), + // Even though we're going to mark one sled as non-provisionable to + // test that logic, we aren't forcing the datasets to be on + // distinct sleds, so REGION_REDUNDANCY_THRESHOLD is enough. REGION_REDUNDANCY_THRESHOLD, ) .await; + let non_provisionable_dataset_id = test_datasets[0].dataset_id; + mark_sled_non_provisionable( + &datastore, + &opctx, + test_datasets[0].sled_id, + ) + .await; + // Allocate regions from the datasets for this disk. Do it a few times // for good measure. for alloc_seed in 0..10 { @@ -809,6 +875,9 @@ mod test { // Must be 3 unique datasets assert!(disk_datasets.insert(dataset.id())); + // Dataset must not be non-provisionable. + assert_ne!(dataset.id(), non_provisionable_dataset_id); + // Must be 3 unique zpools assert!(disk_zpools.insert(dataset.pool_id)); @@ -837,12 +906,23 @@ mod test { let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; - // Create a rack without enough sleds for a successful allocation when - // we require 3 distinct sleds. + // Create a rack with enough sleds for a successful allocation when we + // require 3 distinct provisionable sleds. let test_datasets = create_test_datasets_for_region_allocation( &opctx, datastore.clone(), - REGION_REDUNDANCY_THRESHOLD, + // We're going to mark one sled as non-provisionable to test that + // logic, and we *are* forcing the datasets to be on distinct + // sleds: hence threshold + 1. + REGION_REDUNDANCY_THRESHOLD + 1, + ) + .await; + + let non_provisionable_dataset_id = test_datasets[0].dataset_id; + mark_sled_non_provisionable( + &datastore, + &opctx, + test_datasets[0].sled_id, ) .await; @@ -884,6 +964,9 @@ mod test { // Must be 3 unique datasets assert!(disk_datasets.insert(dataset.id())); + // Dataset must not be non-provisionable. + assert_ne!(dataset.id(), non_provisionable_dataset_id); + // Must be 3 unique zpools assert!(disk_zpools.insert(dataset.pool_id)); @@ -916,11 +999,22 @@ mod test { let (opctx, datastore) = datastore_test(&logctx, &db).await; // Create a rack without enough sleds for a successful allocation when - // we require 3 distinct sleds. - create_test_datasets_for_region_allocation( + // we require 3 distinct provisionable sleds. + let test_datasets = create_test_datasets_for_region_allocation( &opctx, datastore.clone(), - REGION_REDUNDANCY_THRESHOLD - 1, + // Here, we need to have REGION_REDUNDANCY_THRESHOLD - 1 + // provisionable sleds to test this failure condition. We're going + // to mark one sled as non-provisionable to test that logic, so we + // need to add 1 to that number. + REGION_REDUNDANCY_THRESHOLD, + ) + .await; + + mark_sled_non_provisionable( + &datastore, + &opctx, + test_datasets[0].sled_id, ) .await; @@ -952,7 +1046,7 @@ mod test { "Saw error: \'{err}\', but expected \'{expected}\'" ); - assert!(matches!(err, Error::ServiceUnavailable { .. })); + assert!(matches!(err, Error::InsufficientCapacity { .. })); } let _ = db.cleanup().await; @@ -1097,7 +1191,7 @@ mod test { "Saw error: \'{err}\', but expected \'{expected}\'" ); - assert!(matches!(err, Error::ServiceUnavailable { .. })); + assert!(matches!(err, Error::InsufficientCapacity { .. })); let _ = db.cleanup().await; logctx.cleanup_successful(); @@ -1205,7 +1299,7 @@ mod test { let rack_id = Uuid::new_v4(); let addr1 = "[fd00:1de::1]:12345".parse().unwrap(); let sled1_id = "0de4b299-e0b4-46f0-d528-85de81a7095f".parse().unwrap(); - let sled1 = db::model::Sled::new( + let sled1 = db::model::SledUpdate::new( sled1_id, addr1, sled_baseboard_for_test(), @@ -1216,7 +1310,7 @@ mod test { let addr2 = "[fd00:1df::1]:12345".parse().unwrap(); let sled2_id = "66285c18-0c79-43e0-e54f-95271f271314".parse().unwrap(); - let sled2 = db::model::Sled::new( + let sled2 = db::model::SledUpdate::new( sled2_id, addr2, sled_baseboard_for_test(), @@ -1567,6 +1661,7 @@ mod test { time_deleted: None, ip_pool_id: Uuid::new_v4(), ip_pool_range_id: Uuid::new_v4(), + project_id: None, is_service: false, parent_id: Some(instance_id), kind: IpKind::Ephemeral, @@ -1627,6 +1722,7 @@ mod test { time_deleted: None, ip_pool_id: Uuid::new_v4(), ip_pool_range_id: Uuid::new_v4(), + project_id: None, is_service: false, parent_id: Some(Uuid::new_v4()), kind: IpKind::SNat, @@ -1673,6 +1769,7 @@ mod test { use crate::db::model::IpKind; use crate::db::schema::external_ip::dsl; use diesel::result::DatabaseErrorKind::CheckViolation; + use diesel::result::DatabaseErrorKind::UniqueViolation; use diesel::result::Error::DatabaseError; let logctx = dev::test_setup_log("test_external_ip_check_constraints"); @@ -1697,6 +1794,7 @@ mod test { time_deleted: None, ip_pool_id: Uuid::new_v4(), ip_pool_range_id: Uuid::new_v4(), + project_id: None, is_service: false, parent_id: Some(Uuid::new_v4()), kind: IpKind::Floating, @@ -1709,151 +1807,190 @@ mod test { // - name // - description // - parent (instance / service) UUID - let names = [ - None, - Some(db::model::Name(Name::try_from("foo".to_string()).unwrap())), - ]; + // - project UUID + let names = [None, Some("foo")]; let descriptions = [None, Some("foo".to_string())]; let parent_ids = [None, Some(Uuid::new_v4())]; + let project_ids = [None, Some(Uuid::new_v4())]; + + let mut seen_pairs = HashSet::new(); // For Floating IPs, both name and description must be non-NULL - for name in names.iter() { - for description in descriptions.iter() { - for parent_id in parent_ids.iter() { - for is_service in [false, true] { - let new_ip = ExternalIp { - id: Uuid::new_v4(), - name: name.clone(), - description: description.clone(), - ip: addresses.next().unwrap().into(), - is_service, - parent_id: *parent_id, - ..ip - }; - let res = diesel::insert_into(dsl::external_ip) - .values(new_ip) - .execute_async(&*conn) - .await; - if name.is_some() && description.is_some() { - // Name/description must be non-NULL, instance ID can be - // either - res.unwrap_or_else(|_| { - panic!( - "Failed to insert Floating IP with valid \ - name, description, and {} ID", - if is_service { - "Service" - } else { - "Instance" - } - ) - }); - } else { - // At least one is not valid, we expect a check violation - let err = res.expect_err( - "Expected a CHECK violation when inserting a \ - Floating IP record with NULL name and/or description", - ); - assert!( - matches!( - err, - DatabaseError( - CheckViolation, - _ - ) - ), - "Expected a CHECK violation when inserting a \ - Floating IP record with NULL name and/or description", - ); - } - } - } + // If they are instance FIPs, they *must* have a project id. + for ( + name, + description, + parent_id, + is_service, + project_id, + modify_name, + ) in itertools::iproduct!( + &names, + &descriptions, + &parent_ids, + [false, true], + &project_ids, + [false, true] + ) { + // Both choices of parent_id are valid, so we need a unique name for each. + let name_local = name.map(|v| { + let name = if modify_name { + v.to_string() + } else { + format!("{v}-with-parent") + }; + db::model::Name(Name::try_from(name).unwrap()) + }); + + // We do name duplicate checking on the `Some` branch, don't steal the + // name intended for another floating IP. + if parent_id.is_none() && modify_name { + continue; + } + + let new_ip = ExternalIp { + id: Uuid::new_v4(), + name: name_local.clone(), + description: description.clone(), + ip: addresses.next().unwrap().into(), + is_service, + parent_id: *parent_id, + project_id: *project_id, + ..ip + }; + + let key = (*project_id, name_local); + + let res = diesel::insert_into(dsl::external_ip) + .values(new_ip) + .execute_async(&*conn) + .await; + + let project_as_expected = (is_service && project_id.is_none()) + || (!is_service && project_id.is_some()); + + let valid_expression = + name.is_some() && description.is_some() && project_as_expected; + let name_exists = seen_pairs.contains(&key); + + if valid_expression && !name_exists { + // Name/description must be non-NULL, instance ID can be + // either + // Names must be unique at fleet level and at project level. + // Project must be NULL if service, non-NULL if instance. + res.unwrap_or_else(|e| { + panic!( + "Failed to insert Floating IP with valid \ + name, description, project ID, and {} ID:\ + {name:?} {description:?} {project_id:?} {:?}\n{e}", + if is_service { "Service" } else { "Instance" }, + &ip.parent_id + ) + }); + + seen_pairs.insert(key); + } else if !valid_expression { + // Several permutations are invalid and we want to detect them all. + // NOTE: CHECK violation will supersede UNIQUE violation below. + let err = res.expect_err( + "Expected a CHECK violation when inserting a \ + Floating IP record with NULL name and/or description, \ + and incorrect project parent relation", + ); + assert!( + matches!(err, DatabaseError(CheckViolation, _)), + "Expected a CHECK violation when inserting a \ + Floating IP record with NULL name and/or description, \ + and incorrect project parent relation", + ); + } else { + let err = res.expect_err( + "Expected a UNIQUE violation when inserting a \ + Floating IP record with existing (name, project_id)", + ); + assert!( + matches!(err, DatabaseError(UniqueViolation, _)), + "Expected a UNIQUE violation when inserting a \ + Floating IP record with existing (name, project_id)", + ); } } - // For other IP types, both name and description must be NULL - for kind in [IpKind::SNat, IpKind::Ephemeral].into_iter() { - for name in names.iter() { - for description in descriptions.iter() { - for parent_id in parent_ids.iter() { - for is_service in [false, true] { - let new_ip = ExternalIp { - id: Uuid::new_v4(), - name: name.clone(), - description: description.clone(), - kind, - ip: addresses.next().unwrap().into(), - is_service, - parent_id: *parent_id, - ..ip - }; - let res = diesel::insert_into(dsl::external_ip) - .values(new_ip.clone()) - .execute_async(&*conn) - .await; - let ip_type = - if is_service { "Service" } else { "Instance" }; - if name.is_none() - && description.is_none() - && parent_id.is_some() - { - // Name/description must be NULL, instance ID cannot - // be NULL. - - if kind == IpKind::Ephemeral && is_service { - // Ephemeral Service IPs aren't supported. - let err = res.unwrap_err(); - assert!( - matches!( - err, - DatabaseError( - CheckViolation, - _ - ) - ), - "Expected a CHECK violation when inserting an \ - Ephemeral Service IP", - ); - } else { - assert!( - res.is_ok(), - "Failed to insert {:?} IP with valid \ - name, description, and {} ID", - kind, - ip_type, - ); - } - } else { - // One is not valid, we expect a check violation - assert!( - res.is_err(), - "Expected a CHECK violation when inserting a \ - {:?} IP record with non-NULL name, description, \ - and/or {} ID", - kind, - ip_type, - ); - let err = res.unwrap_err(); - assert!( - matches!( - err, - DatabaseError( - CheckViolation, - _ - ) - ), - "Expected a CHECK violation when inserting a \ - {:?} IP record with non-NULL name, description, \ - and/or {} ID", - kind, - ip_type, - ); - } - } - } + // For other IP types: name, description and project must be NULL + for (kind, name, description, parent_id, is_service, project_id) in itertools::iproduct!( + [IpKind::SNat, IpKind::Ephemeral], + &names, + &descriptions, + &parent_ids, + [false, true], + &project_ids + ) { + let name_local = name.map(|v| { + db::model::Name(Name::try_from(v.to_string()).unwrap()) + }); + let new_ip = ExternalIp { + id: Uuid::new_v4(), + name: name_local, + description: description.clone(), + kind, + ip: addresses.next().unwrap().into(), + is_service, + parent_id: *parent_id, + project_id: *project_id, + ..ip + }; + let res = diesel::insert_into(dsl::external_ip) + .values(new_ip.clone()) + .execute_async(&*conn) + .await; + let ip_type = if is_service { "Service" } else { "Instance" }; + if name.is_none() + && description.is_none() + && parent_id.is_some() + && project_id.is_none() + { + // Name/description must be NULL, instance ID cannot + // be NULL. + + if kind == IpKind::Ephemeral && is_service { + // Ephemeral Service IPs aren't supported. + let err = res.unwrap_err(); + assert!( + matches!(err, DatabaseError(CheckViolation, _)), + "Expected a CHECK violation when inserting an \ + Ephemeral Service IP", + ); + } else { + assert!( + res.is_ok(), + "Failed to insert {:?} IP with valid \ + name, description, and {} ID", + kind, + ip_type, + ); } + } else { + // One is not valid, we expect a check violation + assert!( + res.is_err(), + "Expected a CHECK violation when inserting a \ + {:?} IP record with non-NULL name, description, \ + and/or {} ID", + kind, + ip_type, + ); + let err = res.unwrap_err(); + assert!( + matches!(err, DatabaseError(CheckViolation, _)), + "Expected a CHECK violation when inserting a \ + {:?} IP record with non-NULL name, description, \ + and/or {} ID", + kind, + ip_type, + ); } } + db.cleanup().await.unwrap(); logctx.cleanup_successful(); } diff --git a/nexus/db-queries/src/db/datastore/network_interface.rs b/nexus/db-queries/src/db/datastore/network_interface.rs index 06550e9439..4d4e43c9a7 100644 --- a/nexus/db-queries/src/db/datastore/network_interface.rs +++ b/nexus/db-queries/src/db/datastore/network_interface.rs @@ -13,7 +13,6 @@ use crate::db::collection_insert::DatastoreCollection; use crate::db::cte_utils::BoxedQuery; use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; -use crate::db::error::TransactionError; use crate::db::model::IncompleteNetworkInterface; use crate::db::model::Instance; use crate::db::model::InstanceNetworkInterface; @@ -25,7 +24,7 @@ use crate::db::model::VpcSubnet; use crate::db::pagination::paginated; use crate::db::pool::DbConnection; use crate::db::queries::network_interface; -use async_bb8_diesel::AsyncConnection; +use crate::transaction_retry::OptionalError; use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; use diesel::prelude::*; @@ -466,77 +465,91 @@ impl DataStore { InstanceNotStopped, FailedToUnsetPrimary(DieselError), } - type TxnError = TransactionError; + + let err = OptionalError::new(); let conn = self.pool_connection_authorized(opctx).await?; if primary { - conn.transaction_async(|conn| async move { - let instance_runtime = - instance_query.get_result_async(&conn).await?.runtime_state; - if instance_runtime.propolis_id.is_some() - || instance_runtime.nexus_state != stopped - { - return Err(TxnError::CustomError( - NetworkInterfaceUpdateError::InstanceNotStopped, - )); - } + self.transaction_retry_wrapper("instance_update_network_interface") + .transaction(&conn, |conn| { + let err = err.clone(); + let stopped = stopped.clone(); + let update_target_query = update_target_query.clone(); + async move { + let instance_runtime = + instance_query.get_result_async(&conn).await?.runtime_state; + if instance_runtime.propolis_id.is_some() + || instance_runtime.nexus_state != stopped + { + return Err(err.bail(NetworkInterfaceUpdateError::InstanceNotStopped)); + } - // First, get the primary interface - let primary_interface = - find_primary_query.get_result_async(&conn).await?; - // If the target and primary are different, we need to toggle - // the primary into a secondary. - if primary_interface.identity.id != interface_id { - use crate::db::schema::network_interface::dsl; - if let Err(e) = diesel::update(dsl::network_interface) - .filter(dsl::id.eq(primary_interface.identity.id)) - .filter(dsl::kind.eq(NetworkInterfaceKind::Instance)) - .filter(dsl::time_deleted.is_null()) - .set(dsl::is_primary.eq(false)) - .execute_async(&conn) - .await - { - return Err(TxnError::CustomError( - NetworkInterfaceUpdateError::FailedToUnsetPrimary( - e, - ), - )); - } - } + // First, get the primary interface + let primary_interface = + find_primary_query.get_result_async(&conn).await?; + // If the target and primary are different, we need to toggle + // the primary into a secondary. + if primary_interface.identity.id != interface_id { + use crate::db::schema::network_interface::dsl; + if let Err(e) = diesel::update(dsl::network_interface) + .filter(dsl::id.eq(primary_interface.identity.id)) + .filter(dsl::kind.eq(NetworkInterfaceKind::Instance)) + .filter(dsl::time_deleted.is_null()) + .set(dsl::is_primary.eq(false)) + .execute_async(&conn) + .await + { + return Err(err.bail_retryable_or_else( + e, + |e| NetworkInterfaceUpdateError::FailedToUnsetPrimary(e) + )); + } + } - // In any case, update the actual target - Ok(update_target_query.get_result_async(&conn).await?) - }) + // In any case, update the actual target + update_target_query.get_result_async(&conn).await + } + }).await } else { // In this case, we can just directly apply the updates. By // construction, `updates.primary` is `None`, so nothing will // be done there. The other columns always need to be updated, and // we're only hitting a single row. Note that we still need to // verify the instance is stopped. - conn.transaction_async(|conn| async move { - let instance_state = - instance_query.get_result_async(&conn).await?.runtime_state; - if instance_state.propolis_id.is_some() - || instance_state.nexus_state != stopped - { - return Err(TxnError::CustomError( - NetworkInterfaceUpdateError::InstanceNotStopped, - )); - } - Ok(update_target_query.get_result_async(&conn).await?) - }) + self.transaction_retry_wrapper("instance_update_network_interface") + .transaction(&conn, |conn| { + let err = err.clone(); + let stopped = stopped.clone(); + let update_target_query = update_target_query.clone(); + async move { + let instance_state = + instance_query.get_result_async(&conn).await?.runtime_state; + if instance_state.propolis_id.is_some() + || instance_state.nexus_state != stopped + { + return Err(err.bail(NetworkInterfaceUpdateError::InstanceNotStopped)); + } + update_target_query.get_result_async(&conn).await + } + }).await } - .await // Convert to `InstanceNetworkInterface` before returning, we know // this is valid as we've filtered appropriately above. .map(NetworkInterface::as_instance) - .map_err(|e| match e { - TxnError::CustomError( - NetworkInterfaceUpdateError::InstanceNotStopped, - ) => Error::invalid_request( - "Instance must be stopped to update its network interfaces", - ), - _ => Error::internal_error(&format!("Transaction error: {:?}", e)), + .map_err(|e| { + if let Some(err) = err.take() { + match err { + NetworkInterfaceUpdateError::InstanceNotStopped => { + return Error::invalid_request( + "Instance must be stopped to update its network interfaces", + ); + }, + NetworkInterfaceUpdateError::FailedToUnsetPrimary(err) => { + return public_error_from_diesel(err, ErrorHandler::Server); + }, + } + } + public_error_from_diesel(e, ErrorHandler::Server) }) } } diff --git a/nexus/db-queries/src/db/datastore/oximeter.rs b/nexus/db-queries/src/db/datastore/oximeter.rs index 55b650ea53..116e8586b0 100644 --- a/nexus/db-queries/src/db/datastore/oximeter.rs +++ b/nexus/db-queries/src/db/datastore/oximeter.rs @@ -96,6 +96,7 @@ impl DataStore { .do_update() .set(( dsl::time_modified.eq(Utc::now()), + dsl::kind.eq(producer.kind), dsl::ip.eq(producer.ip), dsl::port.eq(producer.port), dsl::interval.eq(producer.interval), diff --git a/nexus/db-queries/src/db/datastore/physical_disk.rs b/nexus/db-queries/src/db/datastore/physical_disk.rs index 3c83b91d21..ecb583ee29 100644 --- a/nexus/db-queries/src/db/datastore/physical_disk.rs +++ b/nexus/db-queries/src/db/datastore/physical_disk.rs @@ -141,7 +141,7 @@ mod test { use crate::db::datastore::test::{ sled_baseboard_for_test, sled_system_hardware_for_test, }; - use crate::db::model::{PhysicalDiskKind, Sled}; + use crate::db::model::{PhysicalDiskKind, Sled, SledUpdate}; use dropshot::PaginationOrder; use nexus_test_utils::db::test_setup_database; use nexus_types::identity::Asset; @@ -153,14 +153,14 @@ mod test { let sled_id = Uuid::new_v4(); let addr = SocketAddrV6::new(Ipv6Addr::LOCALHOST, 0, 0, 0); let rack_id = Uuid::new_v4(); - let sled = Sled::new( + let sled_update = SledUpdate::new( sled_id, addr, sled_baseboard_for_test(), sled_system_hardware_for_test(), rack_id, ); - db.sled_upsert(sled) + db.sled_upsert(sled_update) .await .expect("Could not upsert sled during test prep") } diff --git a/nexus/db-queries/src/db/datastore/project.rs b/nexus/db-queries/src/db/datastore/project.rs index c447b5bf98..a9015ea943 100644 --- a/nexus/db-queries/src/db/datastore/project.rs +++ b/nexus/db-queries/src/db/datastore/project.rs @@ -13,7 +13,6 @@ use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; -use crate::db::error::TransactionError; use crate::db::fixed_data::project::SERVICES_PROJECT; use crate::db::fixed_data::silo::INTERNAL_SILO_ID; use crate::db::identity::Resource; @@ -24,7 +23,8 @@ use crate::db::model::ProjectUpdate; use crate::db::model::Silo; use crate::db::model::VirtualProvisioningCollection; use crate::db::pagination::paginated; -use async_bb8_diesel::{AsyncConnection, AsyncRunQueryDsl}; +use crate::transaction_retry::OptionalError; +use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; use diesel::prelude::*; use omicron_common::api::external::http_pagination::PaginatedBy; @@ -78,9 +78,9 @@ macro_rules! generate_fn_to_ensure_none_in_project { "a" }; - return Err(Error::InvalidRequest { - message: format!("project to be deleted contains {article} {object}: {label}"), - }); + return Err(Error::invalid_request( + format!("project to be deleted contains {article} {object}: {label}") + )); } Ok(()) @@ -151,49 +151,62 @@ impl DataStore { use db::schema::project::dsl; + let err = OptionalError::new(); let name = project.name().as_str().to_string(); + let conn = self.pool_connection_authorized(opctx).await?; + let db_project = self - .pool_connection_authorized(opctx) - .await? - .transaction_async(|conn| async move { - let project: Project = Silo::insert_resource( - silo_id, - diesel::insert_into(dsl::project).values(project), - ) - .insert_and_get_result_async(&conn) - .await - .map_err(|e| match e { - AsyncInsertError::CollectionNotFound => { - authz_silo_inner.not_found() - } - AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel( - e, - ErrorHandler::Conflict( - ResourceType::Project, - &name, + .transaction_retry_wrapper("project_create_in_silo") + .transaction(&conn, |conn| { + let err = err.clone(); + + let authz_silo_inner = authz_silo_inner.clone(); + let name = name.clone(); + let project = project.clone(); + async move { + let project: Project = Silo::insert_resource( + silo_id, + diesel::insert_into(dsl::project).values(project), + ) + .insert_and_get_result_async(&conn) + .await + .map_err(|e| match e { + AsyncInsertError::CollectionNotFound => { + err.bail(authz_silo_inner.not_found()) + } + AsyncInsertError::DatabaseError(diesel_error) => err + .bail_retryable_or_else( + diesel_error, + |diesel_error| { + public_error_from_diesel( + diesel_error, + ErrorHandler::Conflict( + ResourceType::Project, + &name, + ), + ) + }, ), - ) - } - })?; - - // Create resource provisioning for the project. - self.virtual_provisioning_collection_create_on_connection( - &conn, - VirtualProvisioningCollection::new( - project.id(), - CollectionTypeProvisioned::Project, - ), - ) - .await?; - Ok(project) + })?; + + // Create resource provisioning for the project. + self.virtual_provisioning_collection_create_on_connection( + &conn, + VirtualProvisioningCollection::new( + project.id(), + CollectionTypeProvisioned::Project, + ), + ) + .await?; + Ok(project) + } }) .await - .map_err(|e| match e { - TransactionError::CustomError(e) => e, - TransactionError::Database(e) => { - public_error_from_diesel(e, ErrorHandler::Server) + .map_err(|e| { + if let Some(err) = err.take() { + return err; } + public_error_from_diesel(e, ErrorHandler::Server) })?; Ok(( @@ -230,47 +243,54 @@ impl DataStore { use db::schema::project::dsl; - type TxnError = TransactionError; - self.pool_connection_authorized(opctx) - .await? - .transaction_async(|conn| async move { - let now = Utc::now(); - let updated_rows = diesel::update(dsl::project) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::id.eq(authz_project.id())) - .filter(dsl::rcgen.eq(db_project.rcgen)) - .set(dsl::time_deleted.eq(now)) - .returning(Project::as_returning()) - .execute_async(&conn) - .await - .map_err(|e| { - public_error_from_diesel( - e, - ErrorHandler::NotFoundByResource(authz_project), - ) - })?; + let err = OptionalError::new(); + let conn = self.pool_connection_authorized(opctx).await?; + + self.transaction_retry_wrapper("project_delete") + .transaction(&conn, |conn| { + let err = err.clone(); + async move { + let now = Utc::now(); + let updated_rows = diesel::update(dsl::project) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(authz_project.id())) + .filter(dsl::rcgen.eq(db_project.rcgen)) + .set(dsl::time_deleted.eq(now)) + .returning(Project::as_returning()) + .execute_async(&conn) + .await + .map_err(|e| { + err.bail_retryable_or_else(e, |e| { + public_error_from_diesel( + e, + ErrorHandler::NotFoundByResource( + authz_project, + ), + ) + }) + })?; + + if updated_rows == 0 { + return Err(err.bail(Error::invalid_request( + "deletion failed due to concurrent modification", + ))); + } - if updated_rows == 0 { - return Err(TxnError::CustomError(Error::InvalidRequest { - message: - "deletion failed due to concurrent modification" - .to_string(), - })); + self.virtual_provisioning_collection_delete_on_connection( + &opctx.log, + &conn, + db_project.id(), + ) + .await?; + Ok(()) } - - self.virtual_provisioning_collection_delete_on_connection( - &conn, - db_project.id(), - ) - .await?; - Ok(()) }) .await - .map_err(|e| match e { - TxnError::CustomError(e) => e, - TxnError::Database(e) => { - public_error_from_diesel(e, ErrorHandler::Server) + .map_err(|e| { + if let Some(err) = err.take() { + return err; } + public_error_from_diesel(e, ErrorHandler::Server) })?; Ok(()) } diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index ae982d86f8..a69386cfd0 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -13,8 +13,9 @@ use crate::db; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; use crate::db::error::public_error_from_diesel; +use crate::db::error::retryable; use crate::db::error::ErrorHandler; -use crate::db::error::TransactionError; +use crate::db::error::MaybeRetryable::*; use crate::db::fixed_data::silo::INTERNAL_SILO_ID; use crate::db::fixed_data::vpc_subnet::DNS_VPC_SUBNET; use crate::db::fixed_data::vpc_subnet::NEXUS_VPC_SUBNET; @@ -26,6 +27,7 @@ use crate::db::model::Rack; use crate::db::model::Zpool; use crate::db::pagination::paginated; use crate::db::pool::DbConnection; +use crate::transaction_retry::OptionalError; use async_bb8_diesel::AsyncConnection; use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; @@ -41,6 +43,7 @@ use nexus_db_model::InitialDnsGroup; use nexus_db_model::PasswordHashString; use nexus_db_model::SiloUser; use nexus_db_model::SiloUserPasswordHash; +use nexus_db_model::SledUnderlaySubnetAllocation; use nexus_types::external_api::params as external_params; use nexus_types::external_api::shared; use nexus_types::external_api::shared::IdentityType; @@ -55,7 +58,9 @@ use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupType; use omicron_common::api::external::ResourceType; use omicron_common::api::external::UpdateResult; +use omicron_common::bail_unless; use std::net::IpAddr; +use std::sync::{Arc, OnceLock}; use uuid::Uuid; /// Groups arguments related to rack initialization @@ -86,18 +91,30 @@ enum RackInitError { DnsSerialization(Error), Silo(Error), RoleAssignment(Error), + // Retryable database error + Retryable(DieselError), + // Other non-retryable database error + Database(DieselError), } -type TxnError = TransactionError; -impl From for Error { - fn from(e: TxnError) -> Self { +// Catch-all for Diesel error conversion into RackInitError, which +// can also label errors as retryable. +impl From for RackInitError { + fn from(e: DieselError) -> Self { + if retryable(&e) { + Self::Retryable(e) + } else { + Self::Database(e) + } + } +} + +impl From for Error { + fn from(e: RackInitError) -> Self { match e { - TxnError::CustomError(RackInitError::AddingIp(err)) => err, - TxnError::CustomError(RackInitError::AddingNic(err)) => err, - TxnError::CustomError(RackInitError::DatasetInsert { - err, - zpool_id, - }) => match err { + RackInitError::AddingIp(err) => err, + RackInitError::AddingNic(err) => err, + RackInitError::DatasetInsert { err, zpool_id } => match err { AsyncInsertError::CollectionNotFound => Error::ObjectNotFound { type_name: ResourceType::Zpool, lookup_type: LookupType::ById(zpool_id), @@ -106,43 +123,36 @@ impl From for Error { public_error_from_diesel(e, ErrorHandler::Server) } }, - TxnError::CustomError(RackInitError::ServiceInsert(err)) => { - Error::internal_error(&format!( - "failed to insert Service record: {:#}", - err - )) - } - TxnError::CustomError(RackInitError::RackUpdate { - err, - rack_id, - }) => public_error_from_diesel( - err, - ErrorHandler::NotFoundByLookup( - ResourceType::Rack, - LookupType::ById(rack_id), - ), + RackInitError::ServiceInsert(err) => Error::internal_error( + &format!("failed to insert Service record: {:#}", err), ), - TxnError::CustomError(RackInitError::DnsSerialization(err)) => { - Error::internal_error(&format!( - "failed to serialize initial DNS records: {:#}", - err - )) - } - TxnError::CustomError(RackInitError::Silo(err)) => { - Error::internal_error(&format!( - "failed to create recovery Silo: {:#}", - err - )) - } - TxnError::CustomError(RackInitError::RoleAssignment(err)) => { - Error::internal_error(&format!( - "failed to assign role to initial user: {:#}", - err - )) - } - TxnError::Database(e) => { - Error::internal_error(&format!("Transaction error: {}", e)) + RackInitError::RackUpdate { err, rack_id } => { + public_error_from_diesel( + err, + ErrorHandler::NotFoundByLookup( + ResourceType::Rack, + LookupType::ById(rack_id), + ), + ) } + RackInitError::DnsSerialization(err) => Error::internal_error( + &format!("failed to serialize initial DNS records: {:#}", err), + ), + RackInitError::Silo(err) => Error::internal_error(&format!( + "failed to create recovery Silo: {:#}", + err + )), + RackInitError::RoleAssignment(err) => Error::internal_error( + &format!("failed to assign role to initial user: {:#}", err), + ), + RackInitError::Retryable(err) => Error::internal_error(&format!( + "failed operation due to database contention: {:#}", + err + )), + RackInitError::Database(err) => Error::internal_error(&format!( + "failed operation due to database error: {:#}", + err + )), } } } @@ -214,8 +224,125 @@ impl DataStore { Ok(()) } - // The following methods which return a `TxnError` take a `conn` parameter - // which comes from the transaction created in `rack_set_initialized`. + // Return the subnet for the rack + pub async fn rack_subnet( + &self, + opctx: &OpContext, + rack_id: Uuid, + ) -> Result { + opctx.authorize(authz::Action::Read, &authz::FLEET).await?; + let conn = self.pool_connection_authorized(opctx).await?; + use db::schema::rack::dsl; + // It's safe to unwrap the returned `rack_subnet` because + // we filter on `rack_subnet.is_not_null()` + let subnet = dsl::rack + .filter(dsl::id.eq(rack_id)) + .filter(dsl::rack_subnet.is_not_null()) + .select(dsl::rack_subnet) + .first_async::>(&*conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + match subnet { + Some(subnet) => Ok(subnet), + None => Err(Error::internal_error( + "DB Error(bug): returned a null subnet for {rack_id}", + )), + } + } + + /// Allocate a rack subnet octet to a given sled + /// + /// 1. Find the existing allocations + /// 2. Calculate the new allocation + /// 3. Save the new allocation, if there isn't one for the given + /// `hw_baseboard_id` + /// 4. Return the new allocation + /// + // TODO: This could all actually be done in SQL using a `next_item` query. + // See https://github.com/oxidecomputer/omicron/issues/4544 + pub async fn allocate_sled_underlay_subnet_octets( + &self, + opctx: &OpContext, + rack_id: Uuid, + hw_baseboard_id: Uuid, + ) -> Result { + // Fetch all the existing allocations via self.rack_id + let allocations = self.rack_subnet_allocations(opctx, rack_id).await?; + + // Calculate the allocation for the new sled by choosing the minimum + // octet. The returned allocations are ordered by octet, so we will know + // when we have a free one. However, if we already have an allocation + // for the given sled then reuse that one. + const MIN_SUBNET_OCTET: i16 = 33; + let mut new_allocation = SledUnderlaySubnetAllocation { + rack_id, + sled_id: Uuid::new_v4(), + subnet_octet: MIN_SUBNET_OCTET, + hw_baseboard_id, + }; + let mut allocation_already_exists = false; + for allocation in allocations { + if allocation.hw_baseboard_id == new_allocation.hw_baseboard_id { + // We already have an allocation for this sled. + new_allocation = allocation; + allocation_already_exists = true; + break; + } + if allocation.subnet_octet == new_allocation.subnet_octet { + bail_unless!( + new_allocation.subnet_octet < 255, + "Too many sled subnets allocated" + ); + new_allocation.subnet_octet += 1; + } + } + + // Write the new allocation row to CRDB. The UNIQUE constraint + // on `subnet_octet` will prevent dueling administrators reusing + // allocations when sleds are being added. We will need another + // mechanism ala generation numbers when we must interleave additions + // and removals of sleds. + if !allocation_already_exists { + self.sled_subnet_allocation_insert(opctx, &new_allocation).await?; + } + + Ok(new_allocation) + } + + /// Return all current underlay allocations for the rack. + /// + /// Order allocations by `subnet_octet` + pub async fn rack_subnet_allocations( + &self, + opctx: &OpContext, + rack_id: Uuid, + ) -> Result, Error> { + opctx.authorize(authz::Action::Read, &authz::FLEET).await?; + use db::schema::sled_underlay_subnet_allocation::dsl as subnet_dsl; + subnet_dsl::sled_underlay_subnet_allocation + .filter(subnet_dsl::rack_id.eq(rack_id)) + .select(SledUnderlaySubnetAllocation::as_select()) + .order_by(subnet_dsl::subnet_octet.asc()) + .load_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + /// Store a new sled subnet allocation in the database + pub async fn sled_subnet_allocation_insert( + &self, + opctx: &OpContext, + allocation: &SledUnderlaySubnetAllocation, + ) -> Result<(), Error> { + opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; + use db::schema::sled_underlay_subnet_allocation::dsl; + diesel::insert_into(dsl::sled_underlay_subnet_allocation) + .values(allocation.clone()) + .execute_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + Ok(()) + } #[allow(clippy::too_many_arguments)] async fn rack_create_recovery_silo( @@ -228,7 +355,7 @@ impl DataStore { recovery_user_id: external_params::UserId, recovery_user_password_hash: omicron_passwords::PasswordHashString, dns_update: DnsVersionUpdateBuilder, - ) -> Result<(), TxnError> { + ) -> Result<(), RackInitError> { let db_silo = self .silo_create_conn( conn, @@ -239,8 +366,10 @@ impl DataStore { dns_update, ) .await - .map_err(RackInitError::Silo) - .map_err(TxnError::CustomError)?; + .map_err(|err| match err.retryable() { + NotRetryable(err) => RackInitError::Silo(err.into()), + Retryable(err) => RackInitError::Retryable(err), + })?; info!(log, "Created recovery silo"); // Create the first user in the initial Recovery Silo @@ -294,8 +423,7 @@ impl DataStore { }], ) .await - .map_err(RackInitError::RoleAssignment) - .map_err(TxnError::CustomError)?; + .map_err(RackInitError::RoleAssignment)?; debug!(log, "Generated role assignment queries"); q1.execute_async(conn).await?; @@ -311,7 +439,7 @@ impl DataStore { log: &slog::Logger, service_pool: &db::model::IpPool, service: internal_params::ServicePutRequest, - ) -> Result<(), TxnError> { + ) -> Result<(), RackInitError> { use internal_params::ServiceKind; let service_db = db::model::Service::new( @@ -321,9 +449,12 @@ impl DataStore { service.address, service.kind.clone().into(), ); - self.service_upsert_conn(conn, service_db).await.map_err(|e| { - TxnError::CustomError(RackInitError::ServiceInsert(e)) - })?; + self.service_upsert_conn(conn, service_db).await.map_err( + |e| match e.retryable() { + Retryable(e) => RackInitError::Retryable(e), + NotRetryable(e) => RackInitError::ServiceInsert(e.into()), + }, + )?; // For services with external connectivity, we record their // explicit IP allocation and create a service NIC as well. @@ -354,9 +485,7 @@ impl DataStore { Some(nic.ip), Some(nic.mac), ) - .map_err(|e| { - TxnError::CustomError(RackInitError::AddingNic(e)) - })?; + .map_err(|e| RackInitError::AddingNic(e))?; Some((db_ip, db_nic)) } ServiceKind::BoundaryNtp { snat, ref nic } => { @@ -378,9 +507,7 @@ impl DataStore { Some(nic.ip), Some(nic.mac), ) - .map_err(|e| { - TxnError::CustomError(RackInitError::AddingNic(e)) - })?; + .map_err(|e| RackInitError::AddingNic(e))?; Some((db_ip, db_nic)) } _ => None, @@ -395,7 +522,10 @@ impl DataStore { IP address for {}", service.kind, ); - TxnError::CustomError(RackInitError::AddingIp(err)) + match err.retryable() { + Retryable(e) => RackInitError::Retryable(e), + NotRetryable(e) => RackInitError::AddingIp(e.into()), + } })?; self.create_network_interface_raw_conn(conn, db_nic) @@ -408,9 +538,10 @@ impl DataStore { _, db::model::NetworkInterfaceKind::Service, ) => Ok(()), - _ => Err(TxnError::CustomError( - RackInitError::AddingNic(e.into_external()), - )), + InsertError::Retryable(err) => { + Err(RackInitError::Retryable(err)) + } + _ => Err(RackInitError::AddingNic(e.into_external())), } })?; } @@ -429,146 +560,187 @@ impl DataStore { opctx.authorize(authz::Action::CreateChild, &authz::FLEET).await?; - let rack_id = rack_init.rack_id; - let services = rack_init.services; - let datasets = rack_init.datasets; - let service_ip_pool_ranges = rack_init.service_ip_pool_ranges; - let internal_dns = rack_init.internal_dns; - let external_dns = rack_init.external_dns; - let (authz_service_pool, service_pool) = self.ip_pools_service_lookup(&opctx).await?; // NOTE: This operation could likely be optimized with a CTE, but given // the low-frequency of calls, this optimization has been deferred. let log = opctx.log.clone(); + let err = Arc::new(OnceLock::new()); + + // NOTE: This transaction cannot yet be made retryable, as it uses + // nested transactions. let rack = self .pool_connection_authorized(opctx) .await? - .transaction_async(|conn| async move { - // Early exit if the rack has already been initialized. - let rack = rack_dsl::rack - .filter(rack_dsl::id.eq(rack_id)) - .select(Rack::as_select()) - .get_result_async(&conn) - .await - .map_err(|e| { - warn!(log, "Initializing Rack: Rack UUID not found"); - TxnError::CustomError(RackInitError::RackUpdate { - err: e, - rack_id, - }) - })?; - if rack.initialized { - info!(log, "Early exit: Rack already initialized"); - return Ok(rack); - } + .transaction_async(|conn| { + let err = err.clone(); + let log = log.clone(); + let authz_service_pool = authz_service_pool.clone(); + let rack_init = rack_init.clone(); + let service_pool = service_pool.clone(); + async move { + let rack_id = rack_init.rack_id; + let services = rack_init.services; + let datasets = rack_init.datasets; + let service_ip_pool_ranges = rack_init.service_ip_pool_ranges; + let internal_dns = rack_init.internal_dns; + let external_dns = rack_init.external_dns; + + // Early exit if the rack has already been initialized. + let rack = rack_dsl::rack + .filter(rack_dsl::id.eq(rack_id)) + .select(Rack::as_select()) + .get_result_async(&conn) + .await + .map_err(|e| { + warn!(log, "Initializing Rack: Rack UUID not found"); + err.set(RackInitError::RackUpdate { + err: e, + rack_id, + }).unwrap(); + DieselError::RollbackTransaction + })?; + if rack.initialized { + info!(log, "Early exit: Rack already initialized"); + return Ok::<_, DieselError>(rack); + } - // Otherwise, insert services and datasets. + // Otherwise, insert services and datasets. - // Set up the IP pool for internal services. - for range in service_ip_pool_ranges { - Self::ip_pool_add_range_on_connection( - &conn, - opctx, - &authz_service_pool, - &range, - ) - .await - .map_err(|err| { - warn!( - log, - "Initializing Rack: Failed to add IP pool range" - ); - TxnError::CustomError(RackInitError::AddingIp(err)) - })?; - } + // Set up the IP pool for internal services. + for range in service_ip_pool_ranges { + Self::ip_pool_add_range_on_connection( + &conn, + opctx, + &authz_service_pool, + &range, + ) + .await + .map_err(|e| { + warn!( + log, + "Initializing Rack: Failed to add IP pool range" + ); + err.set(RackInitError::AddingIp(e)).unwrap(); + DieselError::RollbackTransaction + })?; + } + + // Allocate records for all services. + for service in services { + self.rack_populate_service_records( + &conn, + &log, + &service_pool, + service, + ) + .await + .map_err(|e| { + err.set(e).unwrap(); + DieselError::RollbackTransaction + })?; + } + info!(log, "Inserted services"); + + for dataset in datasets { + use db::schema::dataset::dsl; + let zpool_id = dataset.pool_id; + >::insert_resource( + zpool_id, + diesel::insert_into(dsl::dataset) + .values(dataset.clone()) + .on_conflict(dsl::id) + .do_update() + .set(( + dsl::time_modified.eq(Utc::now()), + dsl::pool_id.eq(excluded(dsl::pool_id)), + dsl::ip.eq(excluded(dsl::ip)), + dsl::port.eq(excluded(dsl::port)), + dsl::kind.eq(excluded(dsl::kind)), + )), + ) + .insert_and_get_result_async(&conn) + .await + .map_err(|e| { + err.set(RackInitError::DatasetInsert { + err: e, + zpool_id, + }).unwrap(); + DieselError::RollbackTransaction + })?; + } + info!(log, "Inserted datasets"); + + // Insert the initial contents of the internal and external DNS + // zones. + Self::load_dns_data(&conn, internal_dns) + .await + .map_err(|e| { + err.set(RackInitError::DnsSerialization(e)).unwrap(); + DieselError::RollbackTransaction + })?; + info!(log, "Populated DNS tables for internal DNS"); - // Allocate records for all services. - for service in services { - self.rack_populate_service_records( + Self::load_dns_data(&conn, external_dns) + .await + .map_err(|e| { + err.set(RackInitError::DnsSerialization(e)).unwrap(); + DieselError::RollbackTransaction + })?; + info!(log, "Populated DNS tables for external DNS"); + + // Create the initial Recovery Silo + self.rack_create_recovery_silo( + &opctx, &conn, &log, - &service_pool, - service, + rack_init.recovery_silo, + rack_init.recovery_silo_fq_dns_name, + rack_init.recovery_user_id, + rack_init.recovery_user_password_hash, + rack_init.dns_update, ) - .await?; - } - info!(log, "Inserted services"); - - for dataset in datasets { - use db::schema::dataset::dsl; - let zpool_id = dataset.pool_id; - >::insert_resource( - zpool_id, - diesel::insert_into(dsl::dataset) - .values(dataset.clone()) - .on_conflict(dsl::id) - .do_update() - .set(( - dsl::time_modified.eq(Utc::now()), - dsl::pool_id.eq(excluded(dsl::pool_id)), - dsl::ip.eq(excluded(dsl::ip)), - dsl::port.eq(excluded(dsl::port)), - dsl::kind.eq(excluded(dsl::kind)), - )), - ) - .insert_and_get_result_async(&conn) .await - .map_err(|err| { - TxnError::CustomError(RackInitError::DatasetInsert { - err, - zpool_id, - }) + .map_err(|e| match e { + RackInitError::Retryable(e) => e, + _ => { + err.set(e).unwrap(); + DieselError::RollbackTransaction + }, })?; - } - info!(log, "Inserted datasets"); - // Insert the initial contents of the internal and external DNS - // zones. - Self::load_dns_data(&conn, internal_dns) - .await - .map_err(RackInitError::DnsSerialization) - .map_err(TxnError::CustomError)?; - info!(log, "Populated DNS tables for internal DNS"); - - Self::load_dns_data(&conn, external_dns) - .await - .map_err(RackInitError::DnsSerialization) - .map_err(TxnError::CustomError)?; - info!(log, "Populated DNS tables for external DNS"); - - // Create the initial Recovery Silo - self.rack_create_recovery_silo( - &opctx, - &conn, - &log, - rack_init.recovery_silo, - rack_init.recovery_silo_fq_dns_name, - rack_init.recovery_user_id, - rack_init.recovery_user_password_hash, - rack_init.dns_update, - ) - .await?; - - let rack = diesel::update(rack_dsl::rack) - .filter(rack_dsl::id.eq(rack_id)) - .set(( - rack_dsl::initialized.eq(true), - rack_dsl::time_modified.eq(Utc::now()), - )) - .returning(Rack::as_returning()) - .get_result_async::(&conn) - .await - .map_err(|err| { - TxnError::CustomError(RackInitError::RackUpdate { - err, - rack_id, - }) - })?; - Ok::<_, TxnError>(rack) - }) - .await?; + let rack = diesel::update(rack_dsl::rack) + .filter(rack_dsl::id.eq(rack_id)) + .set(( + rack_dsl::initialized.eq(true), + rack_dsl::time_modified.eq(Utc::now()), + )) + .returning(Rack::as_returning()) + .get_result_async::(&conn) + .await + .map_err(|e| { + if retryable(&e) { + return e; + } + err.set(RackInitError::RackUpdate { + err: e, + rack_id, + }).unwrap(); + DieselError::RollbackTransaction + })?; + Ok(rack) + } + }, + ) + .await + .map_err(|e| { + if let Some(err) = Arc::try_unwrap(err).unwrap().take() { + err.into() + } else { + Error::internal_error(&format!("Transaction error: {}", e)) + } + })?; Ok(rack) } @@ -623,42 +795,54 @@ impl DataStore { use crate::db::schema::external_ip::dsl as extip_dsl; use crate::db::schema::service::dsl as service_dsl; - type TxnError = TransactionError; - self.pool_connection_authorized(opctx) - .await? - .transaction_async(|conn| async move { - let ips = extip_dsl::external_ip - .inner_join( - service_dsl::service.on(service_dsl::id - .eq(extip_dsl::parent_id.assume_not_null())), - ) - .filter(extip_dsl::parent_id.is_not_null()) - .filter(extip_dsl::time_deleted.is_null()) - .filter(extip_dsl::is_service) - .filter(service_dsl::kind.eq(db::model::ServiceKind::Nexus)) - .select(ExternalIp::as_select()) - .get_results_async(&conn) - .await? - .into_iter() - .map(|external_ip| external_ip.ip.ip()) - .collect(); - - let dns_zones = self - .dns_zones_list_all_on_connection( - opctx, - &conn, - DnsGroup::External, - ) - .await?; - Ok((ips, dns_zones)) + let err = OptionalError::new(); + let conn = self.pool_connection_authorized(opctx).await?; + self.transaction_retry_wrapper("nexus_external_addresses") + .transaction(&conn, |conn| { + let err = err.clone(); + async move { + let ips = extip_dsl::external_ip + .inner_join( + service_dsl::service.on(service_dsl::id + .eq(extip_dsl::parent_id.assume_not_null())), + ) + .filter(extip_dsl::parent_id.is_not_null()) + .filter(extip_dsl::time_deleted.is_null()) + .filter(extip_dsl::is_service) + .filter( + service_dsl::kind.eq(db::model::ServiceKind::Nexus), + ) + .select(ExternalIp::as_select()) + .get_results_async(&conn) + .await? + .into_iter() + .map(|external_ip| external_ip.ip.ip()) + .collect(); + + let dns_zones = self + .dns_zones_list_all_on_connection( + opctx, + &conn, + DnsGroup::External, + ) + .await + .map_err(|e| match e.retryable() { + NotRetryable(not_retryable_err) => { + err.bail(not_retryable_err) + } + Retryable(retryable_err) => retryable_err, + })?; + + Ok((ips, dns_zones)) + } }) .await - .map_err(|error: TxnError| match error { - TransactionError::CustomError(err) => err, - TransactionError::Database(e) => { - public_error_from_diesel(e, ErrorHandler::Server) + .map_err(|e| { + if let Some(err) = err.take() { + return err.into(); } + public_error_from_diesel(e, ErrorHandler::Server) }) } } @@ -680,7 +864,7 @@ mod test { use crate::db::model::Sled; use async_bb8_diesel::AsyncSimpleConnection; use internal_params::DnsRecord; - use nexus_db_model::{DnsGroup, InitialDnsGroup}; + use nexus_db_model::{DnsGroup, InitialDnsGroup, SledUpdate}; use nexus_test_utils::db::test_setup_database; use nexus_types::external_api::shared::SiloIdentityMode; use nexus_types::identity::Asset; @@ -870,14 +1054,14 @@ mod test { async fn create_test_sled(db: &DataStore) -> Sled { let sled_id = Uuid::new_v4(); let addr = SocketAddrV6::new(Ipv6Addr::LOCALHOST, 0, 0, 0); - let sled = Sled::new( + let sled_update = SledUpdate::new( sled_id, addr, sled_baseboard_for_test(), sled_system_hardware_for_test(), rack_id(), ); - db.sled_upsert(sled) + db.sled_upsert(sled_update) .await .expect("Could not upsert sled during test prep") } @@ -892,14 +1076,16 @@ mod test { async fn [](db: &DataStore) -> Vec<$model> { use crate::db::schema::$table::dsl; use nexus_test_utils::db::ALLOW_FULL_TABLE_SCAN_SQL; - db.pool_connection_for_tests() + let conn = db.pool_connection_for_tests() .await - .unwrap() - .transaction_async(|conn| async move { + .unwrap(); + + db.transaction_retry_wrapper(concat!("fn_to_get_all_", stringify!($table))) + .transaction(&conn, |conn| async move { conn.batch_execute_async(ALLOW_FULL_TABLE_SCAN_SQL) .await .unwrap(); - Ok::<_, crate::db::TransactionError<()>>( + Ok( dsl::$table .select($model::as_select()) .get_results_async(&conn) @@ -1518,4 +1704,136 @@ mod test { db.cleanup().await.unwrap(); logctx.cleanup_successful(); } + + #[tokio::test] + async fn rack_sled_subnet_allocations() { + let logctx = dev::test_setup_log("rack_sled_subnet_allocations"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + let rack_id = Uuid::new_v4(); + + // Ensure we get an empty list when there are no allocations + let allocations = + datastore.rack_subnet_allocations(&opctx, rack_id).await.unwrap(); + assert!(allocations.is_empty()); + + // Add 5 allocations + for i in 0..5i16 { + let allocation = SledUnderlaySubnetAllocation { + rack_id, + sled_id: Uuid::new_v4(), + subnet_octet: 33 + i, + hw_baseboard_id: Uuid::new_v4(), + }; + datastore + .sled_subnet_allocation_insert(&opctx, &allocation) + .await + .unwrap(); + } + + // List all 5 allocations + let allocations = + datastore.rack_subnet_allocations(&opctx, rack_id).await.unwrap(); + + assert_eq!(5, allocations.len()); + + // Try to add another allocation for the same octet, but with a distinct + // sled_id. Ensure we get an error due to a unique constraint. + let mut should_fail_allocation = SledUnderlaySubnetAllocation { + rack_id, + sled_id: Uuid::new_v4(), + subnet_octet: 37, + hw_baseboard_id: Uuid::new_v4(), + }; + let _err = datastore + .sled_subnet_allocation_insert(&opctx, &should_fail_allocation) + .await + .unwrap_err(); + + // Adding an allocation for the same {rack_id, sled_id} pair fails + // the second time, even with a distinct subnet_epoch + let mut allocation = should_fail_allocation.clone(); + allocation.subnet_octet = 38; + datastore + .sled_subnet_allocation_insert(&opctx, &allocation) + .await + .unwrap(); + + should_fail_allocation.subnet_octet = 39; + should_fail_allocation.hw_baseboard_id = Uuid::new_v4(); + let _err = datastore + .sled_subnet_allocation_insert(&opctx, &should_fail_allocation) + .await + .unwrap_err(); + + // Allocations outside our expected range fail + let mut should_fail_allocation = SledUnderlaySubnetAllocation { + rack_id, + sled_id: Uuid::new_v4(), + subnet_octet: 32, + hw_baseboard_id: Uuid::new_v4(), + }; + let _err = datastore + .sled_subnet_allocation_insert(&opctx, &should_fail_allocation) + .await + .unwrap_err(); + should_fail_allocation.subnet_octet = 256; + let _err = datastore + .sled_subnet_allocation_insert(&opctx, &should_fail_allocation) + .await + .unwrap_err(); + + // We should have 6 allocations + let allocations = + datastore.rack_subnet_allocations(&opctx, rack_id).await.unwrap(); + + assert_eq!(6, allocations.len()); + assert_eq!( + vec![33, 34, 35, 36, 37, 38], + allocations.iter().map(|a| a.subnet_octet).collect::>() + ); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn allocate_sled_underlay_subnet_octets() { + let logctx = dev::test_setup_log("rack_sled_subnet_allocations"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + let rack_id = Uuid::new_v4(); + + let mut allocated_octets = vec![]; + for _ in 0..5 { + allocated_octets.push( + datastore + .allocate_sled_underlay_subnet_octets( + &opctx, + rack_id, + Uuid::new_v4(), + ) + .await + .unwrap() + .subnet_octet, + ); + } + + let expected = vec![33, 34, 35, 36, 37]; + assert_eq!(expected, allocated_octets); + + // We should have 5 allocations in the DB, sorted appropriately + let allocations = + datastore.rack_subnet_allocations(&opctx, rack_id).await.unwrap(); + assert_eq!(5, allocations.len()); + assert_eq!( + expected, + allocations.iter().map(|a| a.subnet_octet).collect::>() + ); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } } diff --git a/nexus/db-queries/src/db/datastore/region.rs b/nexus/db-queries/src/db/datastore/region.rs index 9465fe2792..b055a3e85c 100644 --- a/nexus/db-queries/src/db/datastore/region.rs +++ b/nexus/db-queries/src/db/datastore/region.rs @@ -10,18 +10,16 @@ use crate::context::OpContext; use crate::db; use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; -use crate::db::error::TransactionError; use crate::db::lookup::LookupPath; use crate::db::model::Dataset; use crate::db::model::Region; -use async_bb8_diesel::AsyncConnection; +use crate::transaction_retry::OptionalError; use async_bb8_diesel::AsyncRunQueryDsl; use diesel::prelude::*; use nexus_types::external_api::params; use omicron_common::api::external; use omicron_common::api::external::DeleteResult; use omicron_common::api::external::Error; -use omicron_common::backoff::{self, BackoffError}; use omicron_common::nexus_config::RegionAllocationStrategy; use slog::Logger; use uuid::Uuid; @@ -152,7 +150,7 @@ impl DataStore { /// Also updates the storage usage on their corresponding datasets. pub async fn regions_hard_delete( &self, - log: &Logger, + _log: &Logger, region_ids: Vec, ) -> DeleteResult { if region_ids.is_empty() { @@ -164,98 +162,79 @@ impl DataStore { #[error("Numeric error: {0}")] NumericError(String), } - type TxnError = TransactionError; - - // Retry this transaction until it succeeds. It's a little heavy in that - // there's a for loop inside that iterates over the datasets the - // argument regions belong to, and it often encounters the "retry - // transaction" error. - let transaction = { - |region_ids: Vec| async { - self.pool_connection_unauthorized() - .await? - .transaction_async(|conn| async move { - use db::schema::dataset::dsl as dataset_dsl; - use db::schema::region::dsl as region_dsl; - - // Remove the regions, collecting datasets they're from. - let datasets = diesel::delete(region_dsl::region) - .filter(region_dsl::id.eq_any(region_ids)) - .returning(region_dsl::dataset_id) - .get_results_async::(&conn).await?; - - // Update datasets to which the regions belonged. - for dataset in datasets { - let dataset_total_occupied_size: Option< - diesel::pg::data_types::PgNumeric, - > = region_dsl::region - .filter(region_dsl::dataset_id.eq(dataset)) - .select(diesel::dsl::sum( - region_dsl::block_size - * region_dsl::blocks_per_extent - * region_dsl::extent_count, - )) - .nullable() - .get_result_async(&conn).await?; - - let dataset_total_occupied_size: i64 = if let Some( - dataset_total_occupied_size, - ) = - dataset_total_occupied_size - { - let dataset_total_occupied_size: db::model::ByteCount = - dataset_total_occupied_size.try_into().map_err( - |e: anyhow::Error| { - TxnError::CustomError( - RegionDeleteError::NumericError( - e.to_string(), - ), - ) - }, - )?; - - dataset_total_occupied_size.into() - } else { - 0 - }; - - diesel::update(dataset_dsl::dataset) - .filter(dataset_dsl::id.eq(dataset)) - .set( - dataset_dsl::size_used - .eq(dataset_total_occupied_size), - ) - .execute_async(&conn).await?; - } - - Ok(()) - }) - .await - .map_err(|e: TxnError| { - if e.retry_transaction() { - BackoffError::transient(Error::internal_error( - &format!("Retryable transaction error {:?}", e) + let err = OptionalError::new(); + let conn = self.pool_connection_unauthorized().await?; + self.transaction_retry_wrapper("regions_hard_delete") + .transaction(&conn, |conn| { + let err = err.clone(); + let region_ids = region_ids.clone(); + async move { + use db::schema::dataset::dsl as dataset_dsl; + use db::schema::region::dsl as region_dsl; + + // Remove the regions, collecting datasets they're from. + let datasets = diesel::delete(region_dsl::region) + .filter(region_dsl::id.eq_any(region_ids)) + .returning(region_dsl::dataset_id) + .get_results_async::(&conn).await?; + + // Update datasets to which the regions belonged. + for dataset in datasets { + let dataset_total_occupied_size: Option< + diesel::pg::data_types::PgNumeric, + > = region_dsl::region + .filter(region_dsl::dataset_id.eq(dataset)) + .select(diesel::dsl::sum( + region_dsl::block_size + * region_dsl::blocks_per_extent + * region_dsl::extent_count, )) + .nullable() + .get_result_async(&conn).await?; + + let dataset_total_occupied_size: i64 = if let Some( + dataset_total_occupied_size, + ) = + dataset_total_occupied_size + { + let dataset_total_occupied_size: db::model::ByteCount = + dataset_total_occupied_size.try_into().map_err( + |e: anyhow::Error| { + err.bail(RegionDeleteError::NumericError( + e.to_string(), + )) + }, + )?; + + dataset_total_occupied_size.into() } else { - BackoffError::Permanent(Error::internal_error( - &format!("Transaction error: {}", e) - )) + 0 + }; + + diesel::update(dataset_dsl::dataset) + .filter(dataset_dsl::id.eq(dataset)) + .set( + dataset_dsl::size_used + .eq(dataset_total_occupied_size), + ) + .execute_async(&conn).await?; + } + Ok(()) + } + }) + .await + .map_err(|e| { + if let Some(err) = err.take() { + match err { + RegionDeleteError::NumericError(err) => { + return Error::internal_error( + &format!("Transaction error: {}", err) + ); } - }) - } - }; - - backoff::retry_notify( - backoff::retry_policy_internal_service_aggressive(), - || async { - let region_ids = region_ids.clone(); - transaction(region_ids).await - }, - |e: Error, delay| { - info!(log, "{:?}, trying again in {:?}", e, delay,); - }, - ) - .await + } + } + public_error_from_diesel(e, ErrorHandler::Server) + }) } /// Return the total occupied size for a dataset diff --git a/nexus/db-queries/src/db/datastore/saga.rs b/nexus/db-queries/src/db/datastore/saga.rs index 2ec0c40799..1cd41a9806 100644 --- a/nexus/db-queries/src/db/datastore/saga.rs +++ b/nexus/db-queries/src/db/datastore/saga.rs @@ -87,8 +87,8 @@ impl DataStore { match result.status { UpdateStatus::Updated => Ok(()), - UpdateStatus::NotUpdatedButExists => Err(Error::InvalidRequest { - message: format!( + UpdateStatus::NotUpdatedButExists => Err(Error::invalid_request( + format!( "failed to update saga {:?} with state {:?}: preconditions not met: \ expected current_sec = {:?}, adopt_generation = {:?}, \ but found current_sec = {:?}, adopt_generation = {:?}, state = {:?}", @@ -100,7 +100,7 @@ impl DataStore { result.found.adopt_generation, result.found.saga_state, ) - }), + )), } } diff --git a/nexus/db-queries/src/db/datastore/service.rs b/nexus/db-queries/src/db/datastore/service.rs index 40bf250abe..df7ed27a6d 100644 --- a/nexus/db-queries/src/db/datastore/service.rs +++ b/nexus/db-queries/src/db/datastore/service.rs @@ -11,7 +11,9 @@ use crate::db; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; use crate::db::error::public_error_from_diesel; +use crate::db::error::retryable; use crate::db::error::ErrorHandler; +use crate::db::error::TransactionError; use crate::db::identity::Asset; use crate::db::model::Service; use crate::db::model::Sled; @@ -38,7 +40,12 @@ impl DataStore { service: Service, ) -> CreateResult { let conn = self.pool_connection_authorized(opctx).await?; - self.service_upsert_conn(&conn, service).await + self.service_upsert_conn(&conn, service).await.map_err(|e| match e { + TransactionError::CustomError(err) => err, + TransactionError::Database(err) => { + public_error_from_diesel(err, ErrorHandler::Server) + } + }) } /// Stores a new service in the database (using an existing db connection). @@ -46,7 +53,7 @@ impl DataStore { &self, conn: &async_bb8_diesel::Connection, service: Service, - ) -> CreateResult { + ) -> Result> { use db::schema::service::dsl; let service_id = service.id(); @@ -68,17 +75,24 @@ impl DataStore { .insert_and_get_result_async(conn) .await .map_err(|e| match e { - AsyncInsertError::CollectionNotFound => Error::ObjectNotFound { - type_name: ResourceType::Sled, - lookup_type: LookupType::ById(sled_id), - }, - AsyncInsertError::DatabaseError(e) => public_error_from_diesel( - e, - ErrorHandler::Conflict( - ResourceType::Service, - &service_id.to_string(), - ), - ), + AsyncInsertError::CollectionNotFound => { + TransactionError::CustomError(Error::ObjectNotFound { + type_name: ResourceType::Sled, + lookup_type: LookupType::ById(sled_id), + }) + } + AsyncInsertError::DatabaseError(e) => { + if retryable(&e) { + return TransactionError::Database(e); + } + TransactionError::CustomError(public_error_from_diesel( + e, + ErrorHandler::Conflict( + ResourceType::Service, + &service_id.to_string(), + ), + )) + } }) } diff --git a/nexus/db-queries/src/db/datastore/silo.rs b/nexus/db-queries/src/db/datastore/silo.rs index ec3658c067..437c171fb0 100644 --- a/nexus/db-queries/src/db/datastore/silo.rs +++ b/nexus/db-queries/src/db/datastore/silo.rs @@ -11,6 +11,7 @@ use crate::context::OpContext; use crate::db; use crate::db::datastore::RunnableQuery; use crate::db::error::public_error_from_diesel; +use crate::db::error::retryable; use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; use crate::db::fixed_data::silo::{DEFAULT_SILO, INTERNAL_SILO}; @@ -123,15 +124,17 @@ impl DataStore { dns_update: DnsVersionUpdateBuilder, ) -> CreateResult { let conn = self.pool_connection_authorized(opctx).await?; - self.silo_create_conn( - &conn, - opctx, - nexus_opctx, - new_silo_params, - new_silo_dns_names, - dns_update, - ) - .await + let silo = self + .silo_create_conn( + &conn, + opctx, + nexus_opctx, + new_silo_params, + new_silo_dns_names, + dns_update, + ) + .await?; + Ok(silo) } pub async fn silo_create_conn( @@ -142,7 +145,7 @@ impl DataStore { new_silo_params: params::SiloCreate, new_silo_dns_names: &[String], dns_update: DnsVersionUpdateBuilder, - ) -> CreateResult { + ) -> Result> { let silo_id = Uuid::new_v4(); let silo_group_id = Uuid::new_v4(); @@ -199,71 +202,71 @@ impl DataStore { None }; - conn.transaction_async(|conn| async move { - let silo = silo_create_query - .get_result_async(&conn) - .await - .map_err(|e| { - public_error_from_diesel( - e, - ErrorHandler::Conflict( - ResourceType::Silo, - new_silo_params.identity.name.as_str(), - ), - ) - })?; - self.virtual_provisioning_collection_create_on_connection( - &conn, - VirtualProvisioningCollection::new( - silo.id(), - CollectionTypeProvisioned::Silo, - ), - ) - .await?; - - if let Some(query) = silo_admin_group_ensure_query { - query.get_result_async(&conn).await?; - } - - if let Some(queries) = silo_admin_group_role_assignment_queries { - let (delete_old_query, insert_new_query) = queries; - delete_old_query.execute_async(&conn).await?; - insert_new_query.execute_async(&conn).await?; - } - - let certificates = new_silo_params - .tls_certificates - .into_iter() - .map(|c| { - Certificate::new( + let silo = conn + .transaction_async(|conn| async move { + let silo = silo_create_query + .get_result_async(&conn) + .await + .map_err(|e| { + if retryable(&e) { + return TransactionError::Database(e); + } + TransactionError::CustomError(public_error_from_diesel( + e, + ErrorHandler::Conflict( + ResourceType::Silo, + new_silo_params.identity.name.as_str(), + ), + )) + })?; + self.virtual_provisioning_collection_create_on_connection( + &conn, + VirtualProvisioningCollection::new( silo.id(), - Uuid::new_v4(), - ServiceKind::Nexus, - c, - new_silo_dns_names, - ) - }) - .collect::, _>>() - .map_err(Error::from)?; - { - use db::schema::certificate::dsl; - diesel::insert_into(dsl::certificate) - .values(certificates) - .execute_async(&conn) - .await?; - } - - self.dns_update(nexus_opctx, &conn, dns_update).await?; + CollectionTypeProvisioned::Silo, + ), + ) + .await?; - Ok(silo) - }) - .await - .map_err(|e| match e { - TransactionError::CustomError(e) => e, - TransactionError::Database(e) => { - public_error_from_diesel(e, ErrorHandler::Server) - } - }) + if let Some(query) = silo_admin_group_ensure_query { + query.get_result_async(&conn).await?; + } + + if let Some(queries) = silo_admin_group_role_assignment_queries + { + let (delete_old_query, insert_new_query) = queries; + delete_old_query.execute_async(&conn).await?; + insert_new_query.execute_async(&conn).await?; + } + + let certificates = new_silo_params + .tls_certificates + .into_iter() + .map(|c| { + Certificate::new( + silo.id(), + Uuid::new_v4(), + ServiceKind::Nexus, + c, + new_silo_dns_names, + ) + }) + .collect::, _>>() + .map_err(Error::from)?; + { + use db::schema::certificate::dsl; + diesel::insert_into(dsl::certificate) + .values(certificates) + .execute_async(&conn) + .await?; + } + + self.dns_update(nexus_opctx, &conn, dns_update).await?; + + Ok::>(silo) + }) + .await?; + Ok(silo) } pub async fn silos_list_by_id( @@ -348,9 +351,9 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; if project_found.is_some() { - return Err(Error::InvalidRequest { - message: "silo to be deleted contains a project".to_string(), - }); + return Err(Error::invalid_request( + "silo to be deleted contains a project", + )); } let now = Utc::now(); @@ -372,15 +375,13 @@ impl DataStore { })?; if updated_rows == 0 { - return Err(TxnError::CustomError(Error::InvalidRequest { - message: - "silo deletion failed due to concurrent modification" - .to_string(), - })); + return Err(TxnError::CustomError(Error::invalid_request( + "silo deletion failed due to concurrent modification", + ))); } self.virtual_provisioning_collection_delete_on_connection( - &conn, id, + &opctx.log, &conn, id, ) .await?; diff --git a/nexus/db-queries/src/db/datastore/silo_group.rs b/nexus/db-queries/src/db/datastore/silo_group.rs index 46f4aae7c9..29fcb7490b 100644 --- a/nexus/db-queries/src/db/datastore/silo_group.rs +++ b/nexus/db-queries/src/db/datastore/silo_group.rs @@ -145,35 +145,39 @@ impl DataStore { ) -> UpdateResult<()> { opctx.authorize(authz::Action::Modify, authz_silo_user).await?; - self.pool_connection_authorized(opctx) - .await? - .transaction_async(|conn| async move { - use db::schema::silo_group_membership::dsl; + let conn = self.pool_connection_authorized(opctx).await?; - // Delete existing memberships for user - let silo_user_id = authz_silo_user.id(); - diesel::delete(dsl::silo_group_membership) - .filter(dsl::silo_user_id.eq(silo_user_id)) - .execute_async(&conn) - .await?; + self.transaction_retry_wrapper("silo_group_membership_replace_for_user") + .transaction(&conn, |conn| { + let silo_group_ids = silo_group_ids.clone(); + async move { + use db::schema::silo_group_membership::dsl; + + // Delete existing memberships for user + let silo_user_id = authz_silo_user.id(); + diesel::delete(dsl::silo_group_membership) + .filter(dsl::silo_user_id.eq(silo_user_id)) + .execute_async(&conn) + .await?; - // Create new memberships for user - let silo_group_memberships: Vec< - db::model::SiloGroupMembership, - > = silo_group_ids - .iter() - .map(|group_id| db::model::SiloGroupMembership { - silo_group_id: *group_id, - silo_user_id, - }) - .collect(); + // Create new memberships for user + let silo_group_memberships: Vec< + db::model::SiloGroupMembership, + > = silo_group_ids + .iter() + .map(|group_id| db::model::SiloGroupMembership { + silo_group_id: *group_id, + silo_user_id, + }) + .collect(); - diesel::insert_into(dsl::silo_group_membership) - .values(silo_group_memberships) - .execute_async(&conn) - .await?; + diesel::insert_into(dsl::silo_group_membership) + .values(silo_group_memberships) + .execute_async(&conn) + .await?; - Ok(()) + Ok(()) + } }) .await .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) diff --git a/nexus/db-queries/src/db/datastore/sled.rs b/nexus/db-queries/src/db/datastore/sled.rs index f4f5188057..7b94d64418 100644 --- a/nexus/db-queries/src/db/datastore/sled.rs +++ b/nexus/db-queries/src/db/datastore/sled.rs @@ -10,12 +10,12 @@ use crate::context::OpContext; use crate::db; use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; -use crate::db::error::TransactionError; -use crate::db::identity::Asset; use crate::db::model::Sled; use crate::db::model::SledResource; +use crate::db::model::SledUpdate; use crate::db::pagination::paginated; -use async_bb8_diesel::AsyncConnection; +use crate::db::update_and_check::UpdateAndCheck; +use crate::transaction_retry::OptionalError; use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; use diesel::prelude::*; @@ -29,21 +29,25 @@ use uuid::Uuid; impl DataStore { /// Stores a new sled in the database. - pub async fn sled_upsert(&self, sled: Sled) -> CreateResult { + pub async fn sled_upsert( + &self, + sled_update: SledUpdate, + ) -> CreateResult { use db::schema::sled::dsl; diesel::insert_into(dsl::sled) - .values(sled.clone()) + .values(sled_update.clone().into_insertable()) .on_conflict(dsl::id) .do_update() .set(( dsl::time_modified.eq(Utc::now()), - dsl::ip.eq(sled.ip), - dsl::port.eq(sled.port), - dsl::rack_id.eq(sled.rack_id), - dsl::is_scrimlet.eq(sled.is_scrimlet()), - dsl::usable_hardware_threads.eq(sled.usable_hardware_threads), - dsl::usable_physical_ram.eq(sled.usable_physical_ram), - dsl::reservoir_size.eq(sled.reservoir_size), + dsl::ip.eq(sled_update.ip), + dsl::port.eq(sled_update.port), + dsl::rack_id.eq(sled_update.rack_id), + dsl::is_scrimlet.eq(sled_update.is_scrimlet()), + dsl::usable_hardware_threads + .eq(sled_update.usable_hardware_threads), + dsl::usable_physical_ram.eq(sled_update.usable_physical_ram), + dsl::reservoir_size.eq(sled_update.reservoir_size), )) .returning(Sled::as_returning()) .get_result_async(&*self.pool_connection_unauthorized().await?) @@ -53,7 +57,7 @@ impl DataStore { e, ErrorHandler::Conflict( ResourceType::Sled, - &sled.id().to_string(), + &sled_update.id().to_string(), ), ) }) @@ -85,118 +89,143 @@ impl DataStore { enum SledReservationError { NotFound, } - type TxnError = TransactionError; - - self.pool_connection_authorized(opctx) - .await? - .transaction_async(|conn| async move { - use db::schema::sled_resource::dsl as resource_dsl; - // Check if resource ID already exists - if so, return it. - let old_resource = resource_dsl::sled_resource - .filter(resource_dsl::id.eq(resource_id)) - .select(SledResource::as_select()) - .limit(1) - .load_async(&conn) - .await?; - - if !old_resource.is_empty() { - return Ok(old_resource[0].clone()); - } - // If it doesn't already exist, find a sled with enough space - // for the resources we're requesting. - use db::schema::sled::dsl as sled_dsl; - // This answers the boolean question: - // "Does the SUM of all hardware thread usage, plus the one we're trying - // to allocate, consume less threads than exists on the sled?" - let sled_has_space_for_threads = - (diesel::dsl::sql::(&format!( - "COALESCE(SUM(CAST({} as INT8)), 0)", - resource_dsl::hardware_threads::NAME - )) + resources.hardware_threads) - .le(sled_dsl::usable_hardware_threads); - - // This answers the boolean question: - // "Does the SUM of all RAM usage, plus the one we're trying - // to allocate, consume less RAM than exists on the sled?" - let sled_has_space_for_rss = - (diesel::dsl::sql::(&format!( - "COALESCE(SUM(CAST({} as INT8)), 0)", - resource_dsl::rss_ram::NAME - )) + resources.rss_ram) - .le(sled_dsl::usable_physical_ram); - - // Determine whether adding this service's reservoir allocation - // to what's allocated on the sled would avoid going over quota. - let sled_has_space_in_reservoir = - (diesel::dsl::sql::(&format!( - "COALESCE(SUM(CAST({} as INT8)), 0)", - resource_dsl::reservoir_ram::NAME - )) + resources.reservoir_ram) - .le(sled_dsl::reservoir_size); - - // Generate a query describing all of the sleds that have space - // for this reservation. - let mut sled_targets = sled_dsl::sled - .left_join( - resource_dsl::sled_resource - .on(resource_dsl::sled_id.eq(sled_dsl::id)), - ) - .group_by(sled_dsl::id) - .having( - sled_has_space_for_threads - .and(sled_has_space_for_rss) - .and(sled_has_space_in_reservoir), - ) - .filter(sled_dsl::time_deleted.is_null()) - .select(sled_dsl::id) - .into_boxed(); - - // Further constrain the sled IDs according to any caller- - // supplied constraints. - if let Some(must_select_from) = constraints.must_select_from() { - sled_targets = sled_targets - .filter(sled_dsl::id.eq_any(must_select_from.to_vec())); - } + let err = OptionalError::new(); - sql_function!(fn random() -> diesel::sql_types::Float); - let sled_targets = sled_targets - .order(random()) - .limit(1) - .get_results_async::(&conn) - .await?; - - if sled_targets.is_empty() { - return Err(TxnError::CustomError( - SledReservationError::NotFound, - )); - } + let conn = self.pool_connection_authorized(opctx).await?; + + self.transaction_retry_wrapper("sled_reservation_create") + .transaction(&conn, |conn| { + // Clone variables into retryable function + let err = err.clone(); + let constraints = constraints.clone(); + let resources = resources.clone(); + + async move { + use db::schema::sled_resource::dsl as resource_dsl; + // Check if resource ID already exists - if so, return it. + let old_resource = resource_dsl::sled_resource + .filter(resource_dsl::id.eq(resource_id)) + .select(SledResource::as_select()) + .limit(1) + .load_async(&conn) + .await?; + + if !old_resource.is_empty() { + return Ok(old_resource[0].clone()); + } + + // If it doesn't already exist, find a sled with enough space + // for the resources we're requesting. + use db::schema::sled::dsl as sled_dsl; + // This answers the boolean question: + // "Does the SUM of all hardware thread usage, plus the one we're trying + // to allocate, consume less threads than exists on the sled?" + let sled_has_space_for_threads = + (diesel::dsl::sql::( + &format!( + "COALESCE(SUM(CAST({} as INT8)), 0)", + resource_dsl::hardware_threads::NAME + ), + ) + resources.hardware_threads) + .le(sled_dsl::usable_hardware_threads); + + // This answers the boolean question: + // "Does the SUM of all RAM usage, plus the one we're trying + // to allocate, consume less RAM than exists on the sled?" + let sled_has_space_for_rss = + (diesel::dsl::sql::( + &format!( + "COALESCE(SUM(CAST({} as INT8)), 0)", + resource_dsl::rss_ram::NAME + ), + ) + resources.rss_ram) + .le(sled_dsl::usable_physical_ram); + + // Determine whether adding this service's reservoir allocation + // to what's allocated on the sled would avoid going over quota. + let sled_has_space_in_reservoir = + (diesel::dsl::sql::( + &format!( + "COALESCE(SUM(CAST({} as INT8)), 0)", + resource_dsl::reservoir_ram::NAME + ), + ) + resources.reservoir_ram) + .le(sled_dsl::reservoir_size); + + // Generate a query describing all of the sleds that have space + // for this reservation. + let mut sled_targets = + sled_dsl::sled + .left_join( + resource_dsl::sled_resource + .on(resource_dsl::sled_id.eq(sled_dsl::id)), + ) + .group_by(sled_dsl::id) + .having( + sled_has_space_for_threads + .and(sled_has_space_for_rss) + .and(sled_has_space_in_reservoir), + ) + .filter(sled_dsl::time_deleted.is_null()) + // Filter out sleds that are not provisionable. + .filter(sled_dsl::provision_state.eq( + db::model::SledProvisionState::Provisionable, + )) + .select(sled_dsl::id) + .into_boxed(); - // Create a SledResource record, associate it with the target - // sled. - let resource = SledResource::new( - resource_id, - sled_targets[0], - resource_kind, - resources, - ); - - Ok(diesel::insert_into(resource_dsl::sled_resource) - .values(resource) - .returning(SledResource::as_returning()) - .get_result_async(&conn) - .await?) + // Further constrain the sled IDs according to any caller- + // supplied constraints. + if let Some(must_select_from) = + constraints.must_select_from() + { + sled_targets = sled_targets.filter( + sled_dsl::id.eq_any(must_select_from.to_vec()), + ); + } + + sql_function!(fn random() -> diesel::sql_types::Float); + let sled_targets = sled_targets + .order(random()) + .limit(1) + .get_results_async::(&conn) + .await?; + + if sled_targets.is_empty() { + return Err(err.bail(SledReservationError::NotFound)); + } + + // Create a SledResource record, associate it with the target + // sled. + let resource = SledResource::new( + resource_id, + sled_targets[0], + resource_kind, + resources, + ); + + diesel::insert_into(resource_dsl::sled_resource) + .values(resource) + .returning(SledResource::as_returning()) + .get_result_async(&conn) + .await + } }) .await - .map_err(|e| match e { - TxnError::CustomError(SledReservationError::NotFound) => { - external::Error::unavail( - "No sleds can fit the requested instance", - ) - } - TxnError::Database(e) => { - public_error_from_diesel(e, ErrorHandler::Server) + .map_err(|e| { + if let Some(err) = err.take() { + match err { + SledReservationError::NotFound => { + return external::Error::insufficient_capacity( + "No sleds can fit the requested instance", + "No sled targets found that had enough \ + capacity to fit the requested instance.", + ); + } + } } + public_error_from_diesel(e, ErrorHandler::Server) }) } @@ -213,6 +242,37 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; Ok(()) } + + /// Sets the provision state for this sled. + /// + /// Returns the previous state. + pub async fn sled_set_provision_state( + &self, + opctx: &OpContext, + authz_sled: &authz::Sled, + state: db::model::SledProvisionState, + ) -> Result { + use db::schema::sled::dsl; + + opctx.authorize(authz::Action::Modify, authz_sled).await?; + + let sled_id = authz_sled.id(); + let query = diesel::update(dsl::sled) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(sled_id)) + .filter(dsl::provision_state.ne(state)) + .set(( + dsl::provision_state.eq(state), + dsl::time_modified.eq(Utc::now()), + )) + .check_if_exists::(sled_id); + let result = query + .execute_and_check(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + Ok(result.found.provision_state()) + } } #[cfg(test)] @@ -222,12 +282,15 @@ mod test { use crate::db::datastore::test::{ sled_baseboard_for_test, sled_system_hardware_for_test, }; + use crate::db::lookup::LookupPath; use crate::db::model::ByteCount; use crate::db::model::SqlU32; use nexus_test_utils::db::test_setup_database; + use nexus_types::identity::Asset; use omicron_common::api::external; use omicron_test_utils::dev; use std::net::{Ipv6Addr, SocketAddrV6}; + use std::num::NonZeroU32; fn rack_id() -> Uuid { Uuid::parse_str(nexus_test_utils::RACK_UUID).unwrap() @@ -239,56 +302,167 @@ mod test { let mut db = test_setup_database(&logctx.log).await; let (_opctx, datastore) = datastore_test(&logctx, &db).await; - let sled_id = Uuid::new_v4(); - let addr = SocketAddrV6::new(Ipv6Addr::LOCALHOST, 0, 0, 0); - let mut sled = Sled::new( - sled_id, - addr, - sled_baseboard_for_test(), - sled_system_hardware_for_test(), - rack_id(), - ); - let observed_sled = datastore - .sled_upsert(sled.clone()) - .await - .expect("Could not upsert sled during test prep"); + let mut sled_update = test_new_sled_update(); + let observed_sled = + datastore.sled_upsert(sled_update.clone()).await.unwrap(); assert_eq!( observed_sled.usable_hardware_threads, - sled.usable_hardware_threads + sled_update.usable_hardware_threads ); - assert_eq!(observed_sled.usable_physical_ram, sled.usable_physical_ram); - assert_eq!(observed_sled.reservoir_size, sled.reservoir_size); + assert_eq!( + observed_sled.usable_physical_ram, + sled_update.usable_physical_ram + ); + assert_eq!(observed_sled.reservoir_size, sled_update.reservoir_size); // Modify the sizes of hardware - sled.usable_hardware_threads = - SqlU32::new(sled.usable_hardware_threads.0 + 1); + sled_update.usable_hardware_threads = + SqlU32::new(sled_update.usable_hardware_threads.0 + 1); const MIB: u64 = 1024 * 1024; - sled.usable_physical_ram = ByteCount::from( + sled_update.usable_physical_ram = ByteCount::from( external::ByteCount::try_from( - sled.usable_physical_ram.0.to_bytes() + MIB, + sled_update.usable_physical_ram.0.to_bytes() + MIB, ) .unwrap(), ); - sled.reservoir_size = ByteCount::from( + sled_update.reservoir_size = ByteCount::from( external::ByteCount::try_from( - sled.reservoir_size.0.to_bytes() + MIB, + sled_update.reservoir_size.0.to_bytes() + MIB, ) .unwrap(), ); // Test that upserting the sled propagates those changes to the DB. let observed_sled = datastore - .sled_upsert(sled.clone()) + .sled_upsert(sled_update.clone()) .await .expect("Could not upsert sled during test prep"); assert_eq!( observed_sled.usable_hardware_threads, - sled.usable_hardware_threads + sled_update.usable_hardware_threads + ); + assert_eq!( + observed_sled.usable_physical_ram, + sled_update.usable_physical_ram + ); + assert_eq!(observed_sled.reservoir_size, sled_update.reservoir_size); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + /// Test that new reservations aren't created on non-provisionable sleds. + #[tokio::test] + async fn sled_reservation_create_non_provisionable() { + let logctx = + dev::test_setup_log("sled_reservation_create_non_provisionable"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + let sled_update = test_new_sled_update(); + let non_provisionable_sled = + datastore.sled_upsert(sled_update.clone()).await.unwrap(); + + let (authz_sled, _) = LookupPath::new(&opctx, &datastore) + .sled_id(non_provisionable_sled.id()) + .fetch_for(authz::Action::Modify) + .await + .unwrap(); + + let old_state = datastore + .sled_set_provision_state( + &opctx, + &authz_sled, + db::model::SledProvisionState::NonProvisionable, + ) + .await + .unwrap(); + assert_eq!( + old_state, + db::model::SledProvisionState::Provisionable, + "a newly created sled starts as provisionable" + ); + + // This should be an error since there are no provisionable sleds. + let resources = db::model::Resources::new( + 1, + // Just require the bare non-zero amount of RAM. + ByteCount::try_from(1024).unwrap(), + ByteCount::try_from(1024).unwrap(), ); - assert_eq!(observed_sled.usable_physical_ram, sled.usable_physical_ram); - assert_eq!(observed_sled.reservoir_size, sled.reservoir_size); + let constraints = db::model::SledReservationConstraints::none(); + let error = datastore + .sled_reservation_create( + &opctx, + Uuid::new_v4(), + db::model::SledResourceKind::Instance, + resources.clone(), + constraints, + ) + .await + .unwrap_err(); + assert!(matches!(error, external::Error::InsufficientCapacity { .. })); + + // Now add a provisionable sled and try again. + let sled_update = test_new_sled_update(); + let provisionable_sled = + datastore.sled_upsert(sled_update.clone()).await.unwrap(); + + let sleds = datastore + .sled_list(&opctx, &first_page(NonZeroU32::new(10).unwrap())) + .await + .unwrap(); + println!("sleds: {:?}", sleds); + + // Try a few times to ensure that resources never get allocated to the + // non-provisionable sled. + for _ in 0..10 { + let constraints = db::model::SledReservationConstraints::none(); + let resource = datastore + .sled_reservation_create( + &opctx, + Uuid::new_v4(), + db::model::SledResourceKind::Instance, + resources.clone(), + constraints, + ) + .await + .unwrap(); + assert_eq!( + resource.sled_id, + provisionable_sled.id(), + "resource is always allocated to the provisionable sled" + ); + + datastore + .sled_reservation_delete(&opctx, resource.id) + .await + .unwrap(); + } db.cleanup().await.unwrap(); logctx.cleanup_successful(); } + + fn test_new_sled_update() -> SledUpdate { + let sled_id = Uuid::new_v4(); + let addr = SocketAddrV6::new(Ipv6Addr::LOCALHOST, 0, 0, 0); + SledUpdate::new( + sled_id, + addr, + sled_baseboard_for_test(), + sled_system_hardware_for_test(), + rack_id(), + ) + } + + /// Returns pagination parameters to fetch the first page of results for a + /// paginated endpoint + fn first_page<'a, T>(limit: NonZeroU32) -> DataPageParams<'a, T> { + DataPageParams { + marker: None, + direction: dropshot::PaginationOrder::Ascending, + limit, + } + } } diff --git a/nexus/db-queries/src/db/datastore/snapshot.rs b/nexus/db-queries/src/db/datastore/snapshot.rs index 7c03e4bd40..7a9eb8d2bc 100644 --- a/nexus/db-queries/src/db/datastore/snapshot.rs +++ b/nexus/db-queries/src/db/datastore/snapshot.rs @@ -20,8 +20,7 @@ use crate::db::model::SnapshotState; use crate::db::pagination::paginated; use crate::db::update_and_check::UpdateAndCheck; use crate::db::update_and_check::UpdateStatus; -use crate::db::TransactionError; -use async_bb8_diesel::AsyncConnection; +use crate::transaction_retry::OptionalError; use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; use diesel::prelude::*; @@ -48,114 +47,99 @@ impl DataStore { let gen = snapshot.gen; opctx.authorize(authz::Action::CreateChild, authz_project).await?; - #[derive(Debug, thiserror::Error)] - pub enum CustomError { - #[error("Resource already exists")] - ResourceAlreadyExists, - - #[error("saw AsyncInsertError")] - InsertError(AsyncInsertError), - } - - type TxnError = TransactionError; - - let snapshot_name = snapshot.name().to_string(); let project_id = snapshot.project_id; - let snapshot: Snapshot = self - .pool_connection_authorized(opctx) - .await? - .transaction_async(|conn| async move { - use db::schema::snapshot::dsl; + let err = OptionalError::new(); + let conn = self.pool_connection_authorized(opctx).await?; - // If an undeleted snapshot exists in the database with the - // same name and project but a different id to the snapshot - // this function was passed as an argument, then return an - // error here. - // - // As written below, - // - // .on_conflict((dsl::project_id, dsl::name)) - // .filter_target(dsl::time_deleted.is_null()) - // .do_update() - // .set(dsl::time_modified.eq(dsl::time_modified)) - // - // will set any existing record's `time_modified` if the - // project id and name match, even if the snapshot ID does - // not match. diesel supports adding a filter below like so - // (marked with >>): - // - // .on_conflict((dsl::project_id, dsl::name)) - // .filter_target(dsl::time_deleted.is_null()) - // .do_update() - // .set(dsl::time_modified.eq(dsl::time_modified)) - // >> .filter(dsl::id.eq(snapshot.id())) - // - // which will restrict the `insert_into`'s set so that it - // only applies if the snapshot ID matches. But, - // AsyncInsertError does not have a ObjectAlreadyExists - // variant, so this will be returned as CollectionNotFound - // due to the `insert_into` failing. - // - // If this function is passed a snapshot with an ID that - // does not match, but a project and name that does, return - // ObjectAlreadyExists here. + let snapshot: Snapshot = self + .transaction_retry_wrapper("project_ensure_snapshot") + .transaction(&conn, |conn| { + let err = err.clone(); + let snapshot = snapshot.clone(); + let snapshot_name = snapshot.name().to_string(); + async move { + use db::schema::snapshot::dsl; - let existing_snapshot_id: Option = dsl::snapshot - .filter(dsl::time_deleted.is_null()) - .filter(dsl::name.eq(snapshot.name().to_string())) - .filter(dsl::project_id.eq(snapshot.project_id)) - .select(dsl::id) - .limit(1) - .first_async(&conn) - .await - .optional()?; + // If an undeleted snapshot exists in the database with the + // same name and project but a different id to the snapshot + // this function was passed as an argument, then return an + // error here. + // + // As written below, + // + // .on_conflict((dsl::project_id, dsl::name)) + // .filter_target(dsl::time_deleted.is_null()) + // .do_update() + // .set(dsl::time_modified.eq(dsl::time_modified)) + // + // will set any existing record's `time_modified` if the + // project id and name match, even if the snapshot ID does + // not match. diesel supports adding a filter below like so + // (marked with >>): + // + // .on_conflict((dsl::project_id, dsl::name)) + // .filter_target(dsl::time_deleted.is_null()) + // .do_update() + // .set(dsl::time_modified.eq(dsl::time_modified)) + // >> .filter(dsl::id.eq(snapshot.id())) + // + // which will restrict the `insert_into`'s set so that it + // only applies if the snapshot ID matches. But, + // AsyncInsertError does not have a ObjectAlreadyExists + // variant, so this will be returned as CollectionNotFound + // due to the `insert_into` failing. + // + // If this function is passed a snapshot with an ID that + // does not match, but a project and name that does, return + // ObjectAlreadyExists here. - if let Some(existing_snapshot_id) = existing_snapshot_id { - if existing_snapshot_id != snapshot.id() { - return Err(TransactionError::CustomError( - CustomError::ResourceAlreadyExists, - )); - } - } + let existing_snapshot_id: Option = dsl::snapshot + .filter(dsl::time_deleted.is_null()) + .filter(dsl::name.eq(snapshot.name().to_string())) + .filter(dsl::project_id.eq(snapshot.project_id)) + .select(dsl::id) + .limit(1) + .first_async(&conn) + .await + .optional()?; - Project::insert_resource( - project_id, - diesel::insert_into(dsl::snapshot) - .values(snapshot) - .on_conflict((dsl::project_id, dsl::name)) - .filter_target(dsl::time_deleted.is_null()) - .do_update() - .set(dsl::time_modified.eq(dsl::time_modified)), - ) - .insert_and_get_result_async(&conn) - .await - .map_err(|e| { - TransactionError::CustomError(CustomError::InsertError(e)) - }) - }) - .await - .map_err(|e: TxnError| match e { - TxnError::CustomError(e) => match e { - CustomError::ResourceAlreadyExists => { - Error::ObjectAlreadyExists { - type_name: ResourceType::Snapshot, - object_name: snapshot_name, + if let Some(existing_snapshot_id) = existing_snapshot_id { + if existing_snapshot_id != snapshot.id() { + return Err(err.bail(Error::ObjectAlreadyExists { + type_name: ResourceType::Snapshot, + object_name: snapshot_name, + })); } } - CustomError::InsertError(e) => match e { + + Project::insert_resource( + project_id, + diesel::insert_into(dsl::snapshot) + .values(snapshot) + .on_conflict((dsl::project_id, dsl::name)) + .filter_target(dsl::time_deleted.is_null()) + .do_update() + .set(dsl::time_modified.eq(dsl::time_modified)), + ) + .insert_and_get_result_async(&conn) + .await + .map_err(|e| match e { AsyncInsertError::CollectionNotFound => { - Error::ObjectNotFound { + err.bail(Error::ObjectNotFound { type_name: ResourceType::Project, lookup_type: LookupType::ById(project_id), - } - } - AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel(e, ErrorHandler::Server) + }) } - }, - }, - TxnError::Database(e) => { + AsyncInsertError::DatabaseError(e) => e, + }) + } + }) + .await + .map_err(|e| { + if let Some(err) = err.take() { + err + } else { public_error_from_diesel(e, ErrorHandler::Server) } })?; diff --git a/nexus/db-queries/src/db/datastore/switch_interface.rs b/nexus/db-queries/src/db/datastore/switch_interface.rs index 88cff50471..67f16fa08f 100644 --- a/nexus/db-queries/src/db/datastore/switch_interface.rs +++ b/nexus/db-queries/src/db/datastore/switch_interface.rs @@ -11,11 +11,10 @@ use crate::db::datastore::address_lot::{ }; use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; -use crate::db::error::TransactionError; use crate::db::model::LoopbackAddress; use crate::db::pagination::paginated; -use async_bb8_diesel::{AsyncConnection, AsyncRunQueryDsl}; -use diesel::result::Error as DieselError; +use crate::transaction_retry::OptionalError; +use async_bb8_diesel::AsyncRunQueryDsl; use diesel::{ExpressionMethods, QueryDsl, SelectableHelper}; use ipnetwork::IpNetwork; use nexus_types::external_api::params::LoopbackAddressCreate; @@ -40,80 +39,78 @@ impl DataStore { ReserveBlock(ReserveBlockError), } - type TxnError = TransactionError; - let conn = self.pool_connection_authorized(opctx).await?; let inet = IpNetwork::new(params.address, params.mask) .map_err(|_| Error::invalid_request("invalid address"))?; + let err = OptionalError::new(); + // TODO https://github.com/oxidecomputer/omicron/issues/2811 // Audit external networking database transaction usage - conn.transaction_async(|conn| async move { - let lot_id = authz_address_lot.id(); - let (block, rsvd_block) = - crate::db::datastore::address_lot::try_reserve_block( - lot_id, - inet.ip().into(), - params.anycast, - &conn, - ) - .await - .map_err(|e| match e { - ReserveBlockTxnError::CustomError(err) => { - TxnError::CustomError( - LoopbackAddressCreateError::ReserveBlock(err), + self.transaction_retry_wrapper("loopback_address_create") + .transaction(&conn, |conn| { + let err = err.clone(); + async move { + let lot_id = authz_address_lot.id(); + let (block, rsvd_block) = + crate::db::datastore::address_lot::try_reserve_block( + lot_id, + inet.ip().into(), + params.anycast, + &conn, ) + .await + .map_err(|e| match e { + ReserveBlockTxnError::CustomError(e) => err.bail( + LoopbackAddressCreateError::ReserveBlock(e), + ), + ReserveBlockTxnError::Database(e) => e, + })?; + + // Address block reserved, now create the loopback address. + + let addr = LoopbackAddress::new( + id, + block.id, + rsvd_block.id, + params.rack_id, + params.switch_location.to_string(), + inet, + params.anycast, + ); + + let db_addr: LoopbackAddress = + diesel::insert_into(dsl::loopback_address) + .values(addr) + .returning(LoopbackAddress::as_returning()) + .get_result_async(&conn) + .await?; + + Ok(db_addr) + } + }) + .await + .map_err(|e| { + if let Some(err) = err.take() { + match err { + LoopbackAddressCreateError::ReserveBlock( + ReserveBlockError::AddressUnavailable, + ) => Error::invalid_request("address unavailable"), + LoopbackAddressCreateError::ReserveBlock( + ReserveBlockError::AddressNotInLot, + ) => Error::invalid_request("address not in lot"), } - ReserveBlockTxnError::Database(err) => { - TxnError::Database(err) - } - })?; - - // Address block reserved, now create the loopback address. - - let addr = LoopbackAddress::new( - id, - block.id, - rsvd_block.id, - params.rack_id, - params.switch_location.to_string(), - inet, - params.anycast, - ); - - let db_addr: LoopbackAddress = - diesel::insert_into(dsl::loopback_address) - .values(addr) - .returning(LoopbackAddress::as_returning()) - .get_result_async(&conn) - .await?; - - Ok(db_addr) - }) - .await - .map_err(|e| match e { - TxnError::CustomError( - LoopbackAddressCreateError::ReserveBlock( - ReserveBlockError::AddressUnavailable, - ), - ) => Error::invalid_request("address unavailable"), - TxnError::CustomError( - LoopbackAddressCreateError::ReserveBlock( - ReserveBlockError::AddressNotInLot, - ), - ) => Error::invalid_request("address not in lot"), - TxnError::Database(e) => match e { - DieselError::DatabaseError(_, _) => public_error_from_diesel( - e, - ErrorHandler::Conflict( - ResourceType::LoopbackAddress, - &format!("lo {}", inet), - ), - ), - _ => public_error_from_diesel(e, ErrorHandler::Server), - }, - }) + } else { + public_error_from_diesel( + e, + ErrorHandler::Conflict( + ResourceType::LoopbackAddress, + &format!("lo {}", inet), + ), + ) + } + }) } pub async fn loopback_address_delete( @@ -130,22 +127,23 @@ impl DataStore { // TODO https://github.com/oxidecomputer/omicron/issues/2811 // Audit external networking database transaction usage - conn.transaction_async(|conn| async move { - let la = diesel::delete(dsl::loopback_address) - .filter(dsl::id.eq(id)) - .returning(LoopbackAddress::as_returning()) - .get_result_async(&conn) - .await?; - - diesel::delete(rsvd_block_dsl::address_lot_rsvd_block) - .filter(rsvd_block_dsl::id.eq(la.rsvd_address_lot_block_id)) - .execute_async(&conn) - .await?; - - Ok(()) - }) - .await - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + self.transaction_retry_wrapper("loopback_address_delete") + .transaction(&conn, |conn| async move { + let la = diesel::delete(dsl::loopback_address) + .filter(dsl::id.eq(id)) + .returning(LoopbackAddress::as_returning()) + .get_result_async(&conn) + .await?; + + diesel::delete(rsvd_block_dsl::address_lot_rsvd_block) + .filter(rsvd_block_dsl::id.eq(la.rsvd_address_lot_block_id)) + .execute_async(&conn) + .await?; + + Ok(()) + }) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn loopback_address_get( diff --git a/nexus/db-queries/src/db/datastore/switch_port.rs b/nexus/db-queries/src/db/datastore/switch_port.rs index f301750ee9..221feee23c 100644 --- a/nexus/db-queries/src/db/datastore/switch_port.rs +++ b/nexus/db-queries/src/db/datastore/switch_port.rs @@ -11,7 +11,6 @@ use crate::db::datastore::address_lot::{ use crate::db::datastore::UpdatePrecondition; use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; -use crate::db::error::TransactionError; use crate::db::model::{ LldpServiceConfig, Name, SwitchInterfaceConfig, SwitchPort, SwitchPortAddressConfig, SwitchPortBgpPeerConfig, SwitchPortConfig, @@ -20,11 +19,11 @@ use crate::db::model::{ SwitchVlanInterfaceConfig, }; use crate::db::pagination::paginated; -use async_bb8_diesel::{AsyncConnection, AsyncRunQueryDsl}; -use diesel::result::Error as DieselError; +use crate::transaction_retry::OptionalError; +use async_bb8_diesel::AsyncRunQueryDsl; use diesel::{ - ExpressionMethods, JoinOnDsl, NullableExpressionMethods, QueryDsl, - SelectableHelper, + CombineDsl, ExpressionMethods, JoinOnDsl, NullableExpressionMethods, + QueryDsl, SelectableHelper, }; use nexus_types::external_api::params; use omicron_common::api::external::http_pagination::PaginatedBy; @@ -163,280 +162,285 @@ impl DataStore { BgpConfigNotFound, ReserveBlock(ReserveBlockError), } - type TxnError = TransactionError; type SpsCreateError = SwitchPortSettingsCreateError; + let err = OptionalError::new(); let conn = self.pool_connection_authorized(opctx).await?; // TODO https://github.com/oxidecomputer/omicron/issues/2811 // Audit external networking database transaction usage - conn.transaction_async(|conn| async move { - // create the top level port settings object - let port_settings = match id { - Some(id) => SwitchPortSettings::with_id(id, ¶ms.identity), - None => SwitchPortSettings::new(¶ms.identity), - }; - //let port_settings = SwitchPortSettings::new(¶ms.identity); - let db_port_settings: SwitchPortSettings = - diesel::insert_into(port_settings_dsl::switch_port_settings) - .values(port_settings) - .returning(SwitchPortSettings::as_returning()) - .get_result_async(&conn) - .await?; - - let psid = db_port_settings.identity.id; - - // add the port config - let port_config = SwitchPortConfig::new( - psid, - params.port_config.geometry.into(), - ); - - let db_port_config: SwitchPortConfig = - diesel::insert_into(port_config_dsl::switch_port_settings_port_config) - .values(port_config) - .returning(SwitchPortConfig::as_returning()) - .get_result_async(&conn) - .await?; - - let mut result = SwitchPortSettingsCombinedResult{ - settings: db_port_settings, - groups: Vec::new(), - port: db_port_config, - links: Vec::new(), - link_lldp: Vec::new(), - interfaces: Vec::new(), - vlan_interfaces: Vec::new(), - routes: Vec::new(), - bgp_peers: Vec::new(), - addresses: Vec::new(), - }; - - //TODO validate link configs consistent with port geometry. - // - https://github.com/oxidecomputer/omicron/issues/2816 - - let mut lldp_config = Vec::with_capacity(params.links.len()); - let mut link_config = Vec::with_capacity(params.links.len()); - - for (link_name, c) in ¶ms.links { - let lldp_config_id = match c.lldp.lldp_config { - Some(_) => todo!(), // TODO actual lldp support - None => None, - }; - let lldp_svc_config = - LldpServiceConfig::new(c.lldp.enabled, lldp_config_id); - - lldp_config.push(lldp_svc_config.clone()); - link_config.push(SwitchPortLinkConfig::new( - psid, - lldp_svc_config.id, - link_name.clone(), - c.mtu, - c.fec.into(), - c.speed.into(), - )); - } - result.link_lldp = - diesel::insert_into(lldp_config_dsl::lldp_service_config) - .values(lldp_config.clone()) - .returning(LldpServiceConfig::as_returning()) - .get_results_async(&conn) - .await?; - result.links = - diesel::insert_into( - link_config_dsl::switch_port_settings_link_config) - .values(link_config) - .returning(SwitchPortLinkConfig::as_returning()) - .get_results_async(&conn) - .await?; - - let mut interface_config = Vec::with_capacity(params.interfaces.len()); - let mut vlan_interface_config = Vec::new(); - for (interface_name, i) in ¶ms.interfaces { - let ifx_config = SwitchInterfaceConfig::new( - psid, - interface_name.clone(), - i.v6_enabled, - i.kind.into(), - ); - interface_config.push(ifx_config.clone()); - if let params::SwitchInterfaceKind::Vlan(vlan_if) = i.kind { - vlan_interface_config.push(SwitchVlanInterfaceConfig::new( - ifx_config.id, - vlan_if.vid, - )); - } - } - result.interfaces = - diesel::insert_into( - interface_config_dsl::switch_port_settings_interface_config) - .values(interface_config) - .returning(SwitchInterfaceConfig::as_returning()) - .get_results_async(&conn) - .await?; - result.vlan_interfaces = - diesel::insert_into(vlan_config_dsl::switch_vlan_interface_config) - .values(vlan_interface_config) - .returning(SwitchVlanInterfaceConfig::as_returning()) - .get_results_async(&conn) - .await?; - - - let mut route_config = Vec::with_capacity(params.routes.len()); - - for (interface_name, r) in ¶ms.routes { - for route in &r.routes { - route_config.push(SwitchPortRouteConfig::new( + self.transaction_retry_wrapper("switch_port_settings_create") + .transaction(&conn, |conn| { + let err = err.clone(); + async move { + // create the top level port settings object + let port_settings = match id { + Some(id) => SwitchPortSettings::with_id(id, ¶ms.identity), + None => SwitchPortSettings::new(¶ms.identity), + }; + //let port_settings = SwitchPortSettings::new(¶ms.identity); + let db_port_settings: SwitchPortSettings = + diesel::insert_into(port_settings_dsl::switch_port_settings) + .values(port_settings) + .returning(SwitchPortSettings::as_returning()) + .get_result_async(&conn) + .await?; + + let psid = db_port_settings.identity.id; + + // add the port config + let port_config = SwitchPortConfig::new( psid, - interface_name.clone(), - route.dst.into(), - route.gw.into(), - route.vid.map(Into::into), - )); - } - } - result.routes = - diesel::insert_into( - route_config_dsl::switch_port_settings_route_config) - .values(route_config) - .returning(SwitchPortRouteConfig::as_returning()) - .get_results_async(&conn) - .await?; + params.port_config.geometry.into(), + ); + + let db_port_config: SwitchPortConfig = + diesel::insert_into(port_config_dsl::switch_port_settings_port_config) + .values(port_config) + .returning(SwitchPortConfig::as_returning()) + .get_result_async(&conn) + .await?; + + let mut result = SwitchPortSettingsCombinedResult{ + settings: db_port_settings, + groups: Vec::new(), + port: db_port_config, + links: Vec::new(), + link_lldp: Vec::new(), + interfaces: Vec::new(), + vlan_interfaces: Vec::new(), + routes: Vec::new(), + bgp_peers: Vec::new(), + addresses: Vec::new(), + }; - let mut bgp_peer_config = Vec::new(); - for (interface_name, p) in ¶ms.bgp_peers { - use db::schema::bgp_config; - let bgp_config_id = match &p.bgp_config { - NameOrId::Id(id) => *id, - NameOrId::Name(name) => { - let name = name.to_string(); - bgp_config_dsl::bgp_config - .filter(bgp_config::time_deleted.is_null()) - .filter(bgp_config::name.eq(name)) - .select(bgp_config::id) - .limit(1) - .first_async::(&conn) - .await - .map_err(|_| - TxnError::CustomError( - SwitchPortSettingsCreateError::BgpConfigNotFound, - ) - )? + //TODO validate link configs consistent with port geometry. + // - https://github.com/oxidecomputer/omicron/issues/2816 + + let mut lldp_config = Vec::with_capacity(params.links.len()); + let mut link_config = Vec::with_capacity(params.links.len()); + + for (link_name, c) in ¶ms.links { + let lldp_config_id = match c.lldp.lldp_config { + Some(_) => todo!(), // TODO actual lldp support + None => None, + }; + let lldp_svc_config = + LldpServiceConfig::new(c.lldp.enabled, lldp_config_id); + + lldp_config.push(lldp_svc_config.clone()); + link_config.push(SwitchPortLinkConfig::new( + psid, + lldp_svc_config.id, + link_name.clone(), + c.mtu, + c.fec.into(), + c.speed.into(), + c.autoneg, + )); } - }; - - bgp_peer_config.push(SwitchPortBgpPeerConfig::new( - psid, - bgp_config_id, - interface_name.clone(), - p.addr.into(), - p.hold_time.into(), - p.idle_hold_time.into(), - p.delay_open.into(), - p.connect_retry.into(), - p.keepalive.into(), - )); - - } - result.bgp_peers = - diesel::insert_into( - bgp_peer_dsl::switch_port_settings_bgp_peer_config) - .values(bgp_peer_config) - .returning(SwitchPortBgpPeerConfig::as_returning()) - .get_results_async(&conn) - .await?; + result.link_lldp = + diesel::insert_into(lldp_config_dsl::lldp_service_config) + .values(lldp_config.clone()) + .returning(LldpServiceConfig::as_returning()) + .get_results_async(&conn) + .await?; + result.links = + diesel::insert_into( + link_config_dsl::switch_port_settings_link_config) + .values(link_config) + .returning(SwitchPortLinkConfig::as_returning()) + .get_results_async(&conn) + .await?; + + let mut interface_config = Vec::with_capacity(params.interfaces.len()); + let mut vlan_interface_config = Vec::new(); + for (interface_name, i) in ¶ms.interfaces { + let ifx_config = SwitchInterfaceConfig::new( + psid, + interface_name.clone(), + i.v6_enabled, + i.kind.into(), + ); + interface_config.push(ifx_config.clone()); + if let params::SwitchInterfaceKind::Vlan(vlan_if) = i.kind { + vlan_interface_config.push(SwitchVlanInterfaceConfig::new( + ifx_config.id, + vlan_if.vid, + )); + } + } + result.interfaces = + diesel::insert_into( + interface_config_dsl::switch_port_settings_interface_config) + .values(interface_config) + .returning(SwitchInterfaceConfig::as_returning()) + .get_results_async(&conn) + .await?; + result.vlan_interfaces = + diesel::insert_into(vlan_config_dsl::switch_vlan_interface_config) + .values(vlan_interface_config) + .returning(SwitchVlanInterfaceConfig::as_returning()) + .get_results_async(&conn) + .await?; + + let mut route_config = Vec::with_capacity(params.routes.len()); + + for (interface_name, r) in ¶ms.routes { + for route in &r.routes { + route_config.push(SwitchPortRouteConfig::new( + psid, + interface_name.clone(), + route.dst.into(), + route.gw.into(), + route.vid.map(Into::into), + )); + } + } + result.routes = + diesel::insert_into( + route_config_dsl::switch_port_settings_route_config) + .values(route_config) + .returning(SwitchPortRouteConfig::as_returning()) + .get_results_async(&conn) + .await?; + + let mut bgp_peer_config = Vec::new(); + for (interface_name, peer_config) in ¶ms.bgp_peers { + for p in &peer_config.peers { + use db::schema::bgp_config; + let bgp_config_id = match &p.bgp_config { + NameOrId::Id(id) => *id, + NameOrId::Name(name) => { + let name = name.to_string(); + bgp_config_dsl::bgp_config + .filter(bgp_config::time_deleted.is_null()) + .filter(bgp_config::name.eq(name)) + .select(bgp_config::id) + .limit(1) + .first_async::(&conn) + .await + .map_err(|diesel_error| { + err.bail_retryable_or( + diesel_error, + SwitchPortSettingsCreateError::BgpConfigNotFound + ) + })? + } + }; + + bgp_peer_config.push(SwitchPortBgpPeerConfig::new( + psid, + bgp_config_id, + interface_name.clone(), + p.addr.into(), + p.hold_time.into(), + p.idle_hold_time.into(), + p.delay_open.into(), + p.connect_retry.into(), + p.keepalive.into(), + )); - let mut address_config = Vec::new(); - use db::schema::address_lot; - for (interface_name, a) in ¶ms.addresses { - for address in &a.addresses { - let address_lot_id = match &address.address_lot { - NameOrId::Id(id) => *id, - NameOrId::Name(name) => { - let name = name.to_string(); - address_lot_dsl::address_lot - .filter(address_lot::time_deleted.is_null()) - .filter(address_lot::name.eq(name)) - .select(address_lot::id) - .limit(1) - .first_async::(&conn) - .await - .map_err(|_| - TxnError::CustomError( - SwitchPortSettingsCreateError::AddressLotNotFound, - ) - )? } - }; - // TODO: Reduce DB round trips needed for reserving ip blocks - // https://github.com/oxidecomputer/omicron/issues/3060 - let (block, rsvd_block) = - crate::db::datastore::address_lot::try_reserve_block( - address_lot_id, - address.address.ip().into(), - // TODO: Should we allow anycast addresses for switch_ports? - // anycast - false, - &conn - ) - .await - .map_err(|e| match e { - ReserveBlockTxnError::CustomError(err) => { - TxnError::CustomError( - SwitchPortSettingsCreateError::ReserveBlock(err) + } + result.bgp_peers = + diesel::insert_into( + bgp_peer_dsl::switch_port_settings_bgp_peer_config) + .values(bgp_peer_config) + .returning(SwitchPortBgpPeerConfig::as_returning()) + .get_results_async(&conn) + .await?; + + let mut address_config = Vec::new(); + use db::schema::address_lot; + for (interface_name, a) in ¶ms.addresses { + for address in &a.addresses { + let address_lot_id = match &address.address_lot { + NameOrId::Id(id) => *id, + NameOrId::Name(name) => { + let name = name.to_string(); + address_lot_dsl::address_lot + .filter(address_lot::time_deleted.is_null()) + .filter(address_lot::name.eq(name)) + .select(address_lot::id) + .limit(1) + .first_async::(&conn) + .await + .map_err(|diesel_error| { + err.bail_retryable_or( + diesel_error, + SwitchPortSettingsCreateError::AddressLotNotFound + ) + })? + } + }; + // TODO: Reduce DB round trips needed for reserving ip blocks + // https://github.com/oxidecomputer/omicron/issues/3060 + let (block, rsvd_block) = + crate::db::datastore::address_lot::try_reserve_block( + address_lot_id, + address.address.ip().into(), + // TODO: Should we allow anycast addresses for switch_ports? + // anycast + false, + &conn ) - } - ReserveBlockTxnError::Database(err) => TxnError::Database(err), - })?; - - address_config.push(SwitchPortAddressConfig::new( - psid, - block.id, - rsvd_block.id, - address.address.into(), - interface_name.clone(), - )); + .await + .map_err(|e| match e { + ReserveBlockTxnError::CustomError(e) => { + err.bail(SwitchPortSettingsCreateError::ReserveBlock(e)) + } + ReserveBlockTxnError::Database(e) => e, + })?; + + address_config.push(SwitchPortAddressConfig::new( + psid, + block.id, + rsvd_block.id, + address.address.into(), + interface_name.clone(), + )); + } + } + result.addresses = + diesel::insert_into( + address_config_dsl::switch_port_settings_address_config) + .values(address_config) + .returning(SwitchPortAddressConfig::as_returning()) + .get_results_async(&conn) + .await?; + + Ok(result) } } - result.addresses = - diesel::insert_into( - address_config_dsl::switch_port_settings_address_config) - .values(address_config) - .returning(SwitchPortAddressConfig::as_returning()) - .get_results_async(&conn) - .await?; - - Ok(result) - }) + ) .await - .map_err(|e| match e { - TxnError::CustomError(SpsCreateError::AddressLotNotFound) => { - Error::invalid_request("AddressLot not found") - } - TxnError::CustomError(SpsCreateError::BgpConfigNotFound) => { - Error::invalid_request("BGP config not found") - } - TxnError::CustomError( - SwitchPortSettingsCreateError::ReserveBlock( - ReserveBlockError::AddressUnavailable - ) - ) => Error::invalid_request("address unavailable"), - TxnError::CustomError( - SwitchPortSettingsCreateError::ReserveBlock( - ReserveBlockError::AddressNotInLot - ) - ) => Error::invalid_request("address not in lot"), - TxnError::Database(e) => match e { - DieselError::DatabaseError(_, _) => public_error_from_diesel( + .map_err(|e| { + if let Some(err) = err.take() { + match err { + SpsCreateError::AddressLotNotFound => { + Error::invalid_request("AddressLot not found") + } + SpsCreateError::BgpConfigNotFound => { + Error::invalid_request("BGP config not found") + } + SwitchPortSettingsCreateError::ReserveBlock( + ReserveBlockError::AddressUnavailable + ) => Error::invalid_request("address unavailable"), + SwitchPortSettingsCreateError::ReserveBlock( + ReserveBlockError::AddressNotInLot + ) => Error::invalid_request("address not in lot"), + } + } else { + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::SwitchPortSettings, params.identity.name.as_str(), ), - ), - _ => public_error_from_diesel(e, ErrorHandler::Server), - }, + ) + } }) } @@ -451,7 +455,6 @@ impl DataStore { enum SwitchPortSettingsDeleteError { SwitchPortSettingsNotFound, } - type TxnError = TransactionError; let conn = self.pool_connection_authorized(opctx).await?; @@ -460,173 +463,178 @@ impl DataStore { Some(name_or_id) => name_or_id, }; + let err = OptionalError::new(); + // TODO https://github.com/oxidecomputer/omicron/issues/2811 // Audit external networking database transaction usage - conn.transaction_async(|conn| async move { - - use db::schema::switch_port_settings; - let id = match selector { - NameOrId::Id(id) => *id, - NameOrId::Name(name) => { - let name = name.to_string(); - port_settings_dsl::switch_port_settings - .filter(switch_port_settings::time_deleted.is_null()) - .filter(switch_port_settings::name.eq(name)) - .select(switch_port_settings::id) - .limit(1) - .first_async::(&conn) - .await - .map_err(|_| - TxnError::CustomError( - SwitchPortSettingsDeleteError::SwitchPortSettingsNotFound, - ) - )? - } - }; + self.transaction_retry_wrapper("switch_port_settings_delete") + .transaction(&conn, |conn| { + let err = err.clone(); + async move { + use db::schema::switch_port_settings; + let id = match selector { + NameOrId::Id(id) => *id, + NameOrId::Name(name) => { + let name = name.to_string(); + port_settings_dsl::switch_port_settings + .filter(switch_port_settings::time_deleted.is_null()) + .filter(switch_port_settings::name.eq(name)) + .select(switch_port_settings::id) + .limit(1) + .first_async::(&conn) + .await + .map_err(|diesel_error| { + err.bail_retryable_or( + diesel_error, + SwitchPortSettingsDeleteError::SwitchPortSettingsNotFound + ) + })? + } + }; - // delete the top level port settings object - diesel::delete(port_settings_dsl::switch_port_settings) - .filter(switch_port_settings::id.eq(id)) - .execute_async(&conn) - .await?; + // delete the top level port settings object + diesel::delete(port_settings_dsl::switch_port_settings) + .filter(switch_port_settings::id.eq(id)) + .execute_async(&conn) + .await?; - // delete the port config object - use db::schema::switch_port_settings_port_config::{ - self as sps_port_config, dsl as port_config_dsl, - }; - diesel::delete(port_config_dsl::switch_port_settings_port_config) - .filter(sps_port_config::port_settings_id.eq(id)) - .execute_async(&conn) - .await?; + // delete the port config object + use db::schema::switch_port_settings_port_config::{ + self as sps_port_config, dsl as port_config_dsl, + }; + diesel::delete(port_config_dsl::switch_port_settings_port_config) + .filter(sps_port_config::port_settings_id.eq(id)) + .execute_async(&conn) + .await?; - // delete the link configs - use db::schema::switch_port_settings_link_config::{ - self as sps_link_config, dsl as link_config_dsl, - }; - let links: Vec = - diesel::delete( - link_config_dsl::switch_port_settings_link_config - ) - .filter( - sps_link_config::port_settings_id.eq(id) - ) - .returning(SwitchPortLinkConfig::as_returning()) - .get_results_async(&conn) - .await?; + // delete the link configs + use db::schema::switch_port_settings_link_config::{ + self as sps_link_config, dsl as link_config_dsl, + }; + let links: Vec = + diesel::delete( + link_config_dsl::switch_port_settings_link_config + ) + .filter( + sps_link_config::port_settings_id.eq(id) + ) + .returning(SwitchPortLinkConfig::as_returning()) + .get_results_async(&conn) + .await?; - // delete lldp configs - use db::schema::lldp_service_config::{self, dsl as lldp_config_dsl}; - let lldp_svc_ids: Vec = links - .iter() - .map(|link| link.lldp_service_config_id) - .collect(); - diesel::delete(lldp_config_dsl::lldp_service_config) - .filter(lldp_service_config::id.eq_any(lldp_svc_ids)) - .execute_async(&conn) - .await?; + // delete lldp configs + use db::schema::lldp_service_config::{self, dsl as lldp_config_dsl}; + let lldp_svc_ids: Vec = links + .iter() + .map(|link| link.lldp_service_config_id) + .collect(); + diesel::delete(lldp_config_dsl::lldp_service_config) + .filter(lldp_service_config::id.eq_any(lldp_svc_ids)) + .execute_async(&conn) + .await?; - // delete interface configs - use db::schema::switch_port_settings_interface_config::{ - self as sps_interface_config, dsl as interface_config_dsl, - }; + // delete interface configs + use db::schema::switch_port_settings_interface_config::{ + self as sps_interface_config, dsl as interface_config_dsl, + }; - let interfaces: Vec = - diesel::delete( - interface_config_dsl::switch_port_settings_interface_config - ) - .filter( - sps_interface_config::port_settings_id.eq(id) - ) - .returning(SwitchInterfaceConfig::as_returning()) - .get_results_async(&conn) - .await?; + let interfaces: Vec = + diesel::delete( + interface_config_dsl::switch_port_settings_interface_config + ) + .filter( + sps_interface_config::port_settings_id.eq(id) + ) + .returning(SwitchInterfaceConfig::as_returning()) + .get_results_async(&conn) + .await?; - // delete any vlan interfaces - use db::schema::switch_vlan_interface_config::{ - self, dsl as vlan_config_dsl, - }; - let interface_ids: Vec = interfaces - .iter() - .map(|interface| interface.id) - .collect(); - - diesel::delete(vlan_config_dsl::switch_vlan_interface_config) - .filter( - switch_vlan_interface_config::interface_config_id.eq_any( - interface_ids + // delete any vlan interfaces + use db::schema::switch_vlan_interface_config::{ + self, dsl as vlan_config_dsl, + }; + let interface_ids: Vec = interfaces + .iter() + .map(|interface| interface.id) + .collect(); + + diesel::delete(vlan_config_dsl::switch_vlan_interface_config) + .filter( + switch_vlan_interface_config::interface_config_id.eq_any( + interface_ids + ) ) + .execute_async(&conn) + .await?; + + // delete route configs + use db::schema::switch_port_settings_route_config; + use db::schema::switch_port_settings_route_config::dsl + as route_config_dsl; + + diesel::delete( + route_config_dsl::switch_port_settings_route_config ) + .filter(switch_port_settings_route_config::port_settings_id.eq(id)) .execute_async(&conn) .await?; - // delete route configs - use db::schema::switch_port_settings_route_config; - use db::schema::switch_port_settings_route_config::dsl - as route_config_dsl; + // delete bgp configurations + use db::schema::switch_port_settings_bgp_peer_config as bgp_peer; + use db::schema::switch_port_settings_bgp_peer_config::dsl + as bgp_peer_dsl; - diesel::delete( - route_config_dsl::switch_port_settings_route_config - ) - .filter(switch_port_settings_route_config::port_settings_id.eq(id)) - .execute_async(&conn) - .await?; + diesel::delete(bgp_peer_dsl::switch_port_settings_bgp_peer_config) + .filter(bgp_peer::port_settings_id.eq(id)) + .execute_async(&conn) + .await?; - // delete bgp configurations - use db::schema::switch_port_settings_bgp_peer_config as bgp_peer; - use db::schema::switch_port_settings_bgp_peer_config::dsl - as bgp_peer_dsl; + // delete address configs + use db::schema::switch_port_settings_address_config::{ + self as address_config, dsl as address_config_dsl, + }; - diesel::delete(bgp_peer_dsl::switch_port_settings_bgp_peer_config) - .filter(bgp_peer::port_settings_id.eq(id)) - .execute_async(&conn) + let port_settings_addrs = diesel::delete( + address_config_dsl::switch_port_settings_address_config, + ) + .filter(address_config::port_settings_id.eq(id)) + .returning(SwitchPortAddressConfig::as_returning()) + .get_results_async(&conn) .await?; - // delete address configs - use db::schema::switch_port_settings_address_config::{ - self as address_config, dsl as address_config_dsl, - }; - - let port_settings_addrs = diesel::delete( - address_config_dsl::switch_port_settings_address_config, - ) - .filter(address_config::port_settings_id.eq(id)) - .returning(SwitchPortAddressConfig::as_returning()) - .get_results_async(&conn) - .await?; + use db::schema::address_lot_rsvd_block::dsl as rsvd_block_dsl; - use db::schema::address_lot_rsvd_block::dsl as rsvd_block_dsl; + for ps in &port_settings_addrs { + diesel::delete(rsvd_block_dsl::address_lot_rsvd_block) + .filter(rsvd_block_dsl::id.eq(ps.rsvd_address_lot_block_id)) + .execute_async(&conn) + .await?; + } - for ps in &port_settings_addrs { - diesel::delete(rsvd_block_dsl::address_lot_rsvd_block) - .filter(rsvd_block_dsl::id.eq(ps.rsvd_address_lot_block_id)) - .execute_async(&conn) - .await?; + Ok(()) } - - Ok(()) }) .await - .map_err(|e| match e { - TxnError::CustomError( - SwitchPortSettingsDeleteError::SwitchPortSettingsNotFound) => { - Error::invalid_request("port settings not found") + .map_err(|e| { + if let Some(err) = err.take() { + match err { + SwitchPortSettingsDeleteError::SwitchPortSettingsNotFound => { + Error::invalid_request("port settings not found") + } + } + } else { + let name = match ¶ms.port_settings { + Some(name_or_id) => name_or_id.to_string(), + None => String::new(), + }; + public_error_from_diesel( + e, + ErrorHandler::Conflict( + ResourceType::SwitchPortSettings, + &name, + ), + ) } - TxnError::Database(e) => match e { - DieselError::DatabaseError(_, _) => { - let name = match ¶ms.port_settings { - Some(name_or_id) => name_or_id.to_string(), - None => String::new(), - }; - public_error_from_diesel( - e, - ErrorHandler::Conflict( - ResourceType::SwitchPortSettings, - &name, - ), - ) - }, - _ => public_error_from_diesel(e, ErrorHandler::Server), - }, }) } @@ -663,174 +671,178 @@ impl DataStore { enum SwitchPortSettingsGetError { NotFound(external::Name), } - type TxnError = TransactionError; + let err = OptionalError::new(); let conn = self.pool_connection_authorized(opctx).await?; // TODO https://github.com/oxidecomputer/omicron/issues/2811 // Audit external networking database transaction usage - conn.transaction_async(|conn| async move { - // get the top level port settings object - use db::schema::switch_port_settings::{ - self, dsl as port_settings_dsl, - }; - - let id = match name_or_id { - NameOrId::Id(id) => *id, - NameOrId::Name(name) => { - let name_str = name.to_string(); + self.transaction_retry_wrapper("switch_port_settings_get") + .transaction(&conn, |conn| { + let err = err.clone(); + async move { + // get the top level port settings object + use db::schema::switch_port_settings::{ + self, dsl as port_settings_dsl, + }; + + let id = match name_or_id { + NameOrId::Id(id) => *id, + NameOrId::Name(name) => { + let name_str = name.to_string(); + port_settings_dsl::switch_port_settings + .filter(switch_port_settings::time_deleted.is_null()) + .filter(switch_port_settings::name.eq(name_str)) + .select(switch_port_settings::id) + .limit(1) + .first_async::(&conn) + .await + .map_err(|diesel_error| { + err.bail_retryable_or_else(diesel_error, |_| { + SwitchPortSettingsGetError::NotFound( + name.clone(), + ) + }) + })? + } + }; + + let settings: SwitchPortSettings = port_settings_dsl::switch_port_settings .filter(switch_port_settings::time_deleted.is_null()) - .filter(switch_port_settings::name.eq(name_str)) - .select(switch_port_settings::id) + .filter(switch_port_settings::id.eq(id)) + .select(SwitchPortSettings::as_select()) .limit(1) - .first_async::(&conn) - .await - .map_err(|_| { - TxnError::CustomError( - SwitchPortSettingsGetError::NotFound( - name.clone(), - ), - ) - })? - } - }; + .first_async::(&conn) + .await?; - let settings: SwitchPortSettings = - port_settings_dsl::switch_port_settings - .filter(switch_port_settings::time_deleted.is_null()) - .filter(switch_port_settings::id.eq(id)) - .select(SwitchPortSettings::as_select()) - .limit(1) - .first_async::(&conn) - .await?; - - // get the port config - use db::schema::switch_port_settings_port_config::{ - self as port_config, dsl as port_config_dsl, - }; - let port: SwitchPortConfig = - port_config_dsl::switch_port_settings_port_config - .filter(port_config::port_settings_id.eq(id)) - .select(SwitchPortConfig::as_select()) - .limit(1) - .first_async::(&conn) - .await?; + // get the port config + use db::schema::switch_port_settings_port_config::{ + self as port_config, dsl as port_config_dsl, + }; + let port: SwitchPortConfig = + port_config_dsl::switch_port_settings_port_config + .filter(port_config::port_settings_id.eq(id)) + .select(SwitchPortConfig::as_select()) + .limit(1) + .first_async::(&conn) + .await?; - // initialize result - let mut result = - SwitchPortSettingsCombinedResult::new(settings, port); + // initialize result + let mut result = + SwitchPortSettingsCombinedResult::new(settings, port); - // get the link configs - use db::schema::switch_port_settings_link_config::{ - self as link_config, dsl as link_config_dsl, - }; + // get the link configs + use db::schema::switch_port_settings_link_config::{ + self as link_config, dsl as link_config_dsl, + }; - result.links = link_config_dsl::switch_port_settings_link_config - .filter(link_config::port_settings_id.eq(id)) - .select(SwitchPortLinkConfig::as_select()) - .load_async::(&conn) - .await?; + result.links = link_config_dsl::switch_port_settings_link_config + .filter(link_config::port_settings_id.eq(id)) + .select(SwitchPortLinkConfig::as_select()) + .load_async::(&conn) + .await?; - let lldp_svc_ids: Vec = result - .links - .iter() - .map(|link| link.lldp_service_config_id) - .collect(); - - use db::schema::lldp_service_config as lldp_config; - use db::schema::lldp_service_config::dsl as lldp_dsl; - result.link_lldp = lldp_dsl::lldp_service_config - .filter(lldp_config::id.eq_any(lldp_svc_ids)) - .select(LldpServiceConfig::as_select()) - .limit(1) - .load_async::(&conn) - .await?; + let lldp_svc_ids: Vec = result + .links + .iter() + .map(|link| link.lldp_service_config_id) + .collect(); + + use db::schema::lldp_service_config as lldp_config; + use db::schema::lldp_service_config::dsl as lldp_dsl; + result.link_lldp = lldp_dsl::lldp_service_config + .filter(lldp_config::id.eq_any(lldp_svc_ids)) + .select(LldpServiceConfig::as_select()) + .limit(1) + .load_async::(&conn) + .await?; - // get the interface configs - use db::schema::switch_port_settings_interface_config::{ - self as interface_config, dsl as interface_config_dsl, - }; + // get the interface configs + use db::schema::switch_port_settings_interface_config::{ + self as interface_config, dsl as interface_config_dsl, + }; - result.interfaces = - interface_config_dsl::switch_port_settings_interface_config - .filter(interface_config::port_settings_id.eq(id)) - .select(SwitchInterfaceConfig::as_select()) - .load_async::(&conn) + result.interfaces = + interface_config_dsl::switch_port_settings_interface_config + .filter(interface_config::port_settings_id.eq(id)) + .select(SwitchInterfaceConfig::as_select()) + .load_async::(&conn) + .await?; + + use db::schema::switch_vlan_interface_config as vlan_config; + use db::schema::switch_vlan_interface_config::dsl as vlan_dsl; + let interface_ids: Vec = result + .interfaces + .iter() + .map(|interface| interface.id) + .collect(); + + result.vlan_interfaces = vlan_dsl::switch_vlan_interface_config + .filter(vlan_config::interface_config_id.eq_any(interface_ids)) + .select(SwitchVlanInterfaceConfig::as_select()) + .load_async::(&conn) .await?; - use db::schema::switch_vlan_interface_config as vlan_config; - use db::schema::switch_vlan_interface_config::dsl as vlan_dsl; - let interface_ids: Vec = result - .interfaces - .iter() - .map(|interface| interface.id) - .collect(); - - result.vlan_interfaces = vlan_dsl::switch_vlan_interface_config - .filter(vlan_config::interface_config_id.eq_any(interface_ids)) - .select(SwitchVlanInterfaceConfig::as_select()) - .load_async::(&conn) - .await?; - - // get the route configs - use db::schema::switch_port_settings_route_config::{ - self as route_config, dsl as route_config_dsl, - }; + // get the route configs + use db::schema::switch_port_settings_route_config::{ + self as route_config, dsl as route_config_dsl, + }; - result.routes = route_config_dsl::switch_port_settings_route_config - .filter(route_config::port_settings_id.eq(id)) - .select(SwitchPortRouteConfig::as_select()) - .load_async::(&conn) - .await?; + result.routes = route_config_dsl::switch_port_settings_route_config + .filter(route_config::port_settings_id.eq(id)) + .select(SwitchPortRouteConfig::as_select()) + .load_async::(&conn) + .await?; - // get the bgp peer configs - use db::schema::switch_port_settings_bgp_peer_config::{ - self as bgp_peer, dsl as bgp_peer_dsl, - }; + // get the bgp peer configs + use db::schema::switch_port_settings_bgp_peer_config::{ + self as bgp_peer, dsl as bgp_peer_dsl, + }; - result.bgp_peers = - bgp_peer_dsl::switch_port_settings_bgp_peer_config - .filter(bgp_peer::port_settings_id.eq(id)) - .select(SwitchPortBgpPeerConfig::as_select()) - .load_async::(&conn) - .await?; + result.bgp_peers = + bgp_peer_dsl::switch_port_settings_bgp_peer_config + .filter(bgp_peer::port_settings_id.eq(id)) + .select(SwitchPortBgpPeerConfig::as_select()) + .load_async::(&conn) + .await?; - // get the address configs - use db::schema::switch_port_settings_address_config::{ - self as address_config, dsl as address_config_dsl, - }; + // get the address configs + use db::schema::switch_port_settings_address_config::{ + self as address_config, dsl as address_config_dsl, + }; - result.addresses = - address_config_dsl::switch_port_settings_address_config - .filter(address_config::port_settings_id.eq(id)) - .select(SwitchPortAddressConfig::as_select()) - .load_async::(&conn) - .await?; + result.addresses = + address_config_dsl::switch_port_settings_address_config + .filter(address_config::port_settings_id.eq(id)) + .select(SwitchPortAddressConfig::as_select()) + .load_async::(&conn) + .await?; - Ok(result) + Ok(result) + } }) .await - .map_err(|e| match e { - TxnError::CustomError(SwitchPortSettingsGetError::NotFound( - name, - )) => Error::not_found_by_name( - ResourceType::SwitchPortSettings, - &name, - ), - TxnError::Database(e) => match e { - DieselError::DatabaseError(_, _) => { - let name = name_or_id.to_string(); - public_error_from_diesel( - e, - ErrorHandler::Conflict( + .map_err(|e| { + if let Some(err) = err.take() { + match err { + SwitchPortSettingsGetError::NotFound(name) => { + Error::not_found_by_name( ResourceType::SwitchPortSettings, &name, - ), - ) + ) + } } - _ => public_error_from_diesel(e, ErrorHandler::Server), - }, + } else { + let name = name_or_id.to_string(); + public_error_from_diesel( + e, + ErrorHandler::Conflict( + ResourceType::SwitchPortSettings, + &name, + ), + ) + } }) } @@ -847,7 +859,8 @@ impl DataStore { enum SwitchPortCreateError { RackNotFound, } - type TxnError = TransactionError; + + let err = OptionalError::new(); let conn = self.pool_connection_authorized(opctx).await?; let switch_port = SwitchPort::new( @@ -858,46 +871,59 @@ impl DataStore { // TODO https://github.com/oxidecomputer/omicron/issues/2811 // Audit external networking database transaction usage - conn.transaction_async(|conn| async move { - use db::schema::rack; - use db::schema::rack::dsl as rack_dsl; - rack_dsl::rack - .filter(rack::id.eq(rack_id)) - .select(rack::id) - .limit(1) - .first_async::(&conn) - .await - .map_err(|_| { - TxnError::CustomError(SwitchPortCreateError::RackNotFound) - })?; - - // insert switch port - use db::schema::switch_port::dsl as switch_port_dsl; - let db_switch_port: SwitchPort = - diesel::insert_into(switch_port_dsl::switch_port) - .values(switch_port) - .returning(SwitchPort::as_returning()) - .get_result_async(&conn) - .await?; + self.transaction_retry_wrapper("switch_port_create") + .transaction(&conn, |conn| { + let err = err.clone(); + let switch_port = switch_port.clone(); + async move { + use db::schema::rack; + use db::schema::rack::dsl as rack_dsl; + rack_dsl::rack + .filter(rack::id.eq(rack_id)) + .select(rack::id) + .limit(1) + .first_async::(&conn) + .await + .map_err(|e| { + err.bail_retryable_or( + e, + SwitchPortCreateError::RackNotFound, + ) + })?; - Ok(db_switch_port) - }) - .await - .map_err(|e| match e { - TxnError::CustomError(SwitchPortCreateError::RackNotFound) => { - Error::invalid_request("rack not found") - } - TxnError::Database(e) => match e { - DieselError::DatabaseError(_, _) => public_error_from_diesel( - e, - ErrorHandler::Conflict( - ResourceType::SwitchPort, - &format!("{}/{}/{}", rack_id, &switch_location, &port,), - ), - ), - _ => public_error_from_diesel(e, ErrorHandler::Server), - }, - }) + // insert switch port + use db::schema::switch_port::dsl as switch_port_dsl; + let db_switch_port: SwitchPort = + diesel::insert_into(switch_port_dsl::switch_port) + .values(switch_port) + .returning(SwitchPort::as_returning()) + .get_result_async(&conn) + .await?; + + Ok(db_switch_port) + } + }) + .await + .map_err(|e| { + if let Some(err) = err.take() { + match err { + SwitchPortCreateError::RackNotFound => { + Error::invalid_request("rack not found") + } + } + } else { + public_error_from_diesel( + e, + ErrorHandler::Conflict( + ResourceType::SwitchPort, + &format!( + "{}/{}/{}", + rack_id, &switch_location, &port, + ), + ), + ) + } + }) } pub async fn switch_port_delete( @@ -911,58 +937,75 @@ impl DataStore { NotFound, ActiveSettings, } - type TxnError = TransactionError; + + let err = OptionalError::new(); let conn = self.pool_connection_authorized(opctx).await?; // TODO https://github.com/oxidecomputer/omicron/issues/2811 // Audit external networking database transaction usage - conn.transaction_async(|conn| async move { - use db::schema::switch_port; - use db::schema::switch_port::dsl as switch_port_dsl; - - let switch_location = params.switch_location.to_string(); - let port_name = portname.to_string(); - let port: SwitchPort = switch_port_dsl::switch_port - .filter(switch_port::rack_id.eq(params.rack_id)) - .filter( - switch_port::switch_location.eq(switch_location.clone()), - ) - .filter(switch_port::port_name.eq(port_name.clone())) - .select(SwitchPort::as_select()) - .limit(1) - .first_async::(&conn) - .await - .map_err(|_| { - TxnError::CustomError(SwitchPortDeleteError::NotFound) - })?; - - if port.port_settings_id.is_some() { - return Err(TxnError::CustomError( - SwitchPortDeleteError::ActiveSettings, - )); - } + self.transaction_retry_wrapper("switch_port_delete") + .transaction(&conn, |conn| { + let err = err.clone(); + async move { + use db::schema::switch_port; + use db::schema::switch_port::dsl as switch_port_dsl; + + let switch_location = params.switch_location.to_string(); + let port_name = portname.to_string(); + let port: SwitchPort = switch_port_dsl::switch_port + .filter(switch_port::rack_id.eq(params.rack_id)) + .filter( + switch_port::switch_location + .eq(switch_location.clone()), + ) + .filter(switch_port::port_name.eq(port_name.clone())) + .select(SwitchPort::as_select()) + .limit(1) + .first_async::(&conn) + .await + .map_err(|diesel_error| { + err.bail_retryable_or( + diesel_error, + SwitchPortDeleteError::NotFound, + ) + })?; - diesel::delete(switch_port_dsl::switch_port) - .filter(switch_port::id.eq(port.id)) - .execute_async(&conn) - .await?; + if port.port_settings_id.is_some() { + return Err( + err.bail(SwitchPortDeleteError::ActiveSettings) + ); + } - Ok(()) - }) - .await - .map_err(|e| match e { - TxnError::CustomError(SwitchPortDeleteError::NotFound) => { - let name = &portname.clone(); - Error::not_found_by_name(ResourceType::SwitchPort, name) - } - TxnError::CustomError(SwitchPortDeleteError::ActiveSettings) => { - Error::invalid_request("must clear port settings first") - } - TxnError::Database(e) => { - public_error_from_diesel(e, ErrorHandler::Server) - } - }) + diesel::delete(switch_port_dsl::switch_port) + .filter(switch_port::id.eq(port.id)) + .execute_async(&conn) + .await?; + + Ok(()) + } + }) + .await + .map_err(|e| { + if let Some(err) = err.take() { + match err { + SwitchPortDeleteError::NotFound => { + let name = &portname.clone(); + Error::not_found_by_name( + ResourceType::SwitchPort, + name, + ) + } + SwitchPortDeleteError::ActiveSettings => { + Error::invalid_request( + "must clear port settings first", + ) + } + } + } else { + public_error_from_diesel(e, ErrorHandler::Server) + } + }) } pub async fn switch_port_list( @@ -1110,6 +1153,7 @@ impl DataStore { ) -> ListResultVec { use db::schema::{ switch_port::dsl as switch_port_dsl, + switch_port_settings_bgp_peer_config::dsl as bgp_peer_config_dsl, switch_port_settings_route_config::dsl as route_config_dsl, }; @@ -1126,6 +1170,18 @@ impl DataStore { // pagination in the future, or maybe a way to constrain the query to // a rack? .limit(64) + .union( + switch_port_dsl::switch_port + .filter(switch_port_dsl::port_settings_id.is_not_null()) + .inner_join( + bgp_peer_config_dsl::switch_port_settings_bgp_peer_config + .on(switch_port_dsl::port_settings_id + .eq(bgp_peer_config_dsl::port_settings_id.nullable()), + ), + ) + .select(SwitchPort::as_select()) + .limit(64), + ) .load_async::( &*self.pool_connection_authorized(opctx).await?, ) @@ -1133,3 +1189,118 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } } + +#[cfg(test)] +mod test { + use crate::db::datastore::{datastore_test, UpdatePrecondition}; + use nexus_test_utils::db::test_setup_database; + use nexus_types::external_api::params::{ + BgpAnnounceSetCreate, BgpConfigCreate, BgpPeer, BgpPeerConfig, + SwitchPortConfig, SwitchPortGeometry, SwitchPortSettingsCreate, + }; + use omicron_common::api::external::{ + IdentityMetadataCreateParams, Name, NameOrId, + }; + use omicron_test_utils::dev; + use std::collections::HashMap; + use uuid::Uuid; + + #[tokio::test] + async fn test_bgp_boundary_switches() { + let logctx = dev::test_setup_log("test_bgp_boundary_switches"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + let rack_id: Uuid = + nexus_test_utils::RACK_UUID.parse().expect("parse uuid"); + let switch0: Name = "switch0".parse().expect("parse switch location"); + let qsfp0: Name = "qsfp0".parse().expect("parse qsfp0"); + + let port_result = datastore + .switch_port_create(&opctx, rack_id, switch0.into(), qsfp0.into()) + .await + .expect("switch port create"); + + let announce_set = BgpAnnounceSetCreate { + identity: IdentityMetadataCreateParams { + name: "test-announce-set".parse().unwrap(), + description: "test bgp announce set".into(), + }, + announcement: Vec::new(), + }; + + datastore.bgp_create_announce_set(&opctx, &announce_set).await.unwrap(); + + let bgp_config = BgpConfigCreate { + identity: IdentityMetadataCreateParams { + name: "test-bgp-config".parse().unwrap(), + description: "test bgp config".into(), + }, + asn: 47, + bgp_announce_set_id: NameOrId::Name( + "test-announce-set".parse().unwrap(), + ), + vrf: None, + }; + + datastore.bgp_config_set(&opctx, &bgp_config).await.unwrap(); + + let settings = SwitchPortSettingsCreate { + identity: IdentityMetadataCreateParams { + name: "test-settings".parse().unwrap(), + description: "test settings".into(), + }, + port_config: SwitchPortConfig { + geometry: SwitchPortGeometry::Qsfp28x1, + }, + groups: Vec::new(), + links: HashMap::new(), + interfaces: HashMap::new(), + routes: HashMap::new(), + bgp_peers: HashMap::from([( + "phy0".into(), + BgpPeerConfig { + peers: vec![BgpPeer { + bgp_announce_set: NameOrId::Name( + "test-announce-set".parse().unwrap(), + ), + bgp_config: NameOrId::Name( + "test-bgp-config".parse().unwrap(), + ), + interface_name: "qsfp0".into(), + addr: "192.168.1.1".parse().unwrap(), + hold_time: 0, + idle_hold_time: 0, + delay_open: 0, + connect_retry: 0, + keepalive: 0, + }], + }, + )]), + addresses: HashMap::new(), + }; + + let settings_result = datastore + .switch_port_settings_create(&opctx, &settings, None) + .await + .unwrap(); + + datastore + .switch_port_set_settings_id( + &opctx, + port_result.id, + Some(settings_result.settings.identity.id), + UpdatePrecondition::DontCare, + ) + .await + .unwrap(); + + let uplink_ports = + datastore.switch_ports_with_uplinks(&opctx).await.unwrap(); + + assert_eq!(uplink_ports.len(), 1); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } +} diff --git a/nexus/db-queries/src/db/datastore/update.rs b/nexus/db-queries/src/db/datastore/update.rs index 8b1eecb781..0790bd458e 100644 --- a/nexus/db-queries/src/db/datastore/update.rs +++ b/nexus/db-queries/src/db/datastore/update.rs @@ -8,15 +8,13 @@ use super::DataStore; use crate::authz; use crate::context::OpContext; use crate::db; -use crate::db::error::{ - public_error_from_diesel, ErrorHandler, TransactionError, -}; +use crate::db::error::{public_error_from_diesel, ErrorHandler}; use crate::db::model::{ ComponentUpdate, SemverVersion, SystemUpdate, UpdateArtifact, UpdateDeployment, UpdateStatus, UpdateableComponent, }; use crate::db::pagination::paginated; -use async_bb8_diesel::{AsyncConnection, AsyncRunQueryDsl}; +use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; use diesel::prelude::*; use nexus_db_model::SystemUpdateComponentUpdate; @@ -141,36 +139,40 @@ impl DataStore { let version_string = update.version.to_string(); - self.pool_connection_authorized(opctx) - .await? - .transaction_async(|conn| async move { - let db_update = diesel::insert_into(component_update::table) - .values(update.clone()) - .returning(ComponentUpdate::as_returning()) - .get_result_async(&conn) - .await?; - - diesel::insert_into(join_table::table) - .values(SystemUpdateComponentUpdate { - system_update_id, - component_update_id: update.id(), - }) - .returning(SystemUpdateComponentUpdate::as_returning()) - .get_result_async(&conn) - .await?; - - Ok(db_update) + let conn = self.pool_connection_authorized(opctx).await?; + + self.transaction_retry_wrapper("create_component_update") + .transaction(&conn, |conn| { + let update = update.clone(); + async move { + let db_update = + diesel::insert_into(component_update::table) + .values(update.clone()) + .returning(ComponentUpdate::as_returning()) + .get_result_async(&conn) + .await?; + + diesel::insert_into(join_table::table) + .values(SystemUpdateComponentUpdate { + system_update_id, + component_update_id: update.id(), + }) + .returning(SystemUpdateComponentUpdate::as_returning()) + .get_result_async(&conn) + .await?; + + Ok(db_update) + } }) .await - .map_err(|e| match e { - TransactionError::CustomError(e) => e, - TransactionError::Database(e) => public_error_from_diesel( + .map_err(|e| { + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::ComponentUpdate, &version_string, ), - ), + ) }) } diff --git a/nexus/db-queries/src/db/datastore/virtual_provisioning_collection.rs b/nexus/db-queries/src/db/datastore/virtual_provisioning_collection.rs index 83856e10c7..230c3941ff 100644 --- a/nexus/db-queries/src/db/datastore/virtual_provisioning_collection.rs +++ b/nexus/db-queries/src/db/datastore/virtual_provisioning_collection.rs @@ -15,6 +15,7 @@ use crate::db::pool::DbConnection; use crate::db::queries::virtual_provisioning_collection_update::VirtualProvisioningCollectionUpdate; use async_bb8_diesel::AsyncRunQueryDsl; use diesel::prelude::*; +use diesel::result::Error as DieselError; use omicron_common::api::external::{DeleteResult, Error}; use uuid::Uuid; @@ -52,13 +53,14 @@ impl DataStore { virtual_provisioning_collection, ) .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub(crate) async fn virtual_provisioning_collection_create_on_connection( &self, conn: &async_bb8_diesel::Connection, virtual_provisioning_collection: VirtualProvisioningCollection, - ) -> Result, Error> { + ) -> Result, DieselError> { use db::schema::virtual_provisioning_collection::dsl; let provisions: Vec = @@ -66,12 +68,10 @@ impl DataStore { .values(virtual_provisioning_collection) .on_conflict_do_nothing() .get_results_async(conn) - .await - .map_err(|e| { - public_error_from_diesel(e, ErrorHandler::Server) - })?; - self.virtual_provisioning_collection_producer - .append_all_metrics(&provisions)?; + .await?; + let _ = self + .virtual_provisioning_collection_producer + .append_all_metrics(&provisions); Ok(provisions) } @@ -103,16 +103,20 @@ impl DataStore { id: Uuid, ) -> DeleteResult { let conn = self.pool_connection_authorized(opctx).await?; - self.virtual_provisioning_collection_delete_on_connection(&conn, id) - .await + self.virtual_provisioning_collection_delete_on_connection( + &opctx.log, &conn, id, + ) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Delete a [`VirtualProvisioningCollection`] object. pub(crate) async fn virtual_provisioning_collection_delete_on_connection( &self, + log: &slog::Logger, conn: &async_bb8_diesel::Connection, id: Uuid, - ) -> DeleteResult { + ) -> Result<(), DieselError> { use db::schema::virtual_provisioning_collection::dsl; // NOTE: We don't really need to extract the value we're deleting from @@ -122,12 +126,12 @@ impl DataStore { .filter(dsl::id.eq(id)) .returning(VirtualProvisioningCollection::as_select()) .get_result_async(conn) - .await - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; - assert!( - collection.is_empty(), - "Collection deleted while non-empty: {collection:?}" - ); + .await?; + + if !collection.is_empty() { + warn!(log, "Collection deleted while non-empty: {collection:?}"); + return Err(DieselError::RollbackTransaction); + } Ok(()) } diff --git a/nexus/db-queries/src/db/datastore/volume.rs b/nexus/db-queries/src/db/datastore/volume.rs index 1e64d784f7..4f31efd610 100644 --- a/nexus/db-queries/src/db/datastore/volume.rs +++ b/nexus/db-queries/src/db/datastore/volume.rs @@ -8,15 +8,14 @@ use super::DataStore; use crate::db; use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; -use crate::db::error::TransactionError; use crate::db::identity::Asset; use crate::db::model::Dataset; use crate::db::model::Region; use crate::db::model::RegionSnapshot; use crate::db::model::Volume; use crate::db::queries::volume::DecreaseCrucibleResourceCountAndSoftDeleteVolume; +use crate::transaction_retry::OptionalError; use anyhow::bail; -use async_bb8_diesel::AsyncConnection; use async_bb8_diesel::AsyncRunQueryDsl; use diesel::prelude::*; use diesel::OptionalExtension; @@ -44,7 +43,6 @@ impl DataStore { #[error("Serde error during Volume creation: {0}")] SerdeError(#[from] serde_json::Error), } - type TxnError = TransactionError; // Grab all the targets that the volume construction request references. // Do this outside the transaction, as the data inside volume doesn't @@ -66,86 +64,91 @@ impl DataStore { crucible_targets }; - self.pool_connection_unauthorized() - .await? - .transaction_async(|conn| async move { - let maybe_volume: Option = dsl::volume - .filter(dsl::id.eq(volume.id())) - .select(Volume::as_select()) - .first_async(&conn) - .await - .optional() - .map_err(|e| { - TxnError::CustomError(VolumeCreationError::Public( - public_error_from_diesel(e, ErrorHandler::Server), - )) - })?; - - // If the volume existed already, return it and do not increase - // usage counts. - if let Some(volume) = maybe_volume { - return Ok(volume); - } - - // TODO do we need on_conflict do_nothing here? if the transaction - // model is read-committed, the SELECT above could return nothing, - // and the INSERT here could still result in a conflict. - // - // See also https://github.com/oxidecomputer/omicron/issues/1168 - let volume: Volume = diesel::insert_into(dsl::volume) - .values(volume.clone()) - .on_conflict(dsl::id) - .do_nothing() - .returning(Volume::as_returning()) - .get_result_async(&conn) - .await - .map_err(|e| { - TxnError::CustomError(VolumeCreationError::Public( - public_error_from_diesel( - e, - ErrorHandler::Conflict( - ResourceType::Volume, - volume.id().to_string().as_str(), - ), - ), - )) - })?; + let err = OptionalError::new(); + let conn = self.pool_connection_unauthorized().await?; + self.transaction_retry_wrapper("volume_create") + .transaction(&conn, |conn| { + let err = err.clone(); + let crucible_targets = crucible_targets.clone(); + let volume = volume.clone(); + async move { + let maybe_volume: Option = dsl::volume + .filter(dsl::id.eq(volume.id())) + .select(Volume::as_select()) + .first_async(&conn) + .await + .optional()?; - // Increase the usage count for Crucible resources according to the - // contents of the volume. + // If the volume existed already, return it and do not increase + // usage counts. + if let Some(volume) = maybe_volume { + return Ok(volume); + } - // Increase the number of uses for each referenced region snapshot. - use db::schema::region_snapshot::dsl as rs_dsl; - for read_only_target in &crucible_targets.read_only_targets { - diesel::update(rs_dsl::region_snapshot) - .filter( - rs_dsl::snapshot_addr.eq(read_only_target.clone()), - ) - .filter(rs_dsl::deleting.eq(false)) - .set( - rs_dsl::volume_references - .eq(rs_dsl::volume_references + 1), - ) - .execute_async(&conn) + // TODO do we need on_conflict do_nothing here? if the transaction + // model is read-committed, the SELECT above could return nothing, + // and the INSERT here could still result in a conflict. + // + // See also https://github.com/oxidecomputer/omicron/issues/1168 + let volume: Volume = diesel::insert_into(dsl::volume) + .values(volume.clone()) + .on_conflict(dsl::id) + .do_nothing() + .returning(Volume::as_returning()) + .get_result_async(&conn) .await .map_err(|e| { - TxnError::CustomError(VolumeCreationError::Public( - public_error_from_diesel( - e, - ErrorHandler::Server, - ), - )) + err.bail_retryable_or_else(e, |e| { + VolumeCreationError::Public( + public_error_from_diesel( + e, + ErrorHandler::Conflict( + ResourceType::Volume, + volume.id().to_string().as_str(), + ), + ), + ) + }) })?; - } - Ok(volume) + // Increase the usage count for Crucible resources according to the + // contents of the volume. + + // Increase the number of uses for each referenced region snapshot. + use db::schema::region_snapshot::dsl as rs_dsl; + for read_only_target in &crucible_targets.read_only_targets + { + diesel::update(rs_dsl::region_snapshot) + .filter( + rs_dsl::snapshot_addr + .eq(read_only_target.clone()), + ) + .filter(rs_dsl::deleting.eq(false)) + .set( + rs_dsl::volume_references + .eq(rs_dsl::volume_references + 1), + ) + .execute_async(&conn) + .await?; + } + + Ok(volume) + } }) .await - .map_err(|e| match e { - TxnError::CustomError(VolumeCreationError::Public(e)) => e, - - _ => { - Error::internal_error(&format!("Transaction error: {}", e)) + .map_err(|e| { + if let Some(err) = err.take() { + match err { + VolumeCreationError::Public(err) => err, + VolumeCreationError::SerdeError(err) => { + Error::internal_error(&format!( + "Transaction error: {}", + err + )) + } + } + } else { + public_error_from_diesel(e, ErrorHandler::Server) } }) } @@ -192,16 +195,12 @@ impl DataStore { #[derive(Debug, thiserror::Error)] enum VolumeGetError { - #[error("Error during volume_checkout: {0}")] - DieselError(#[from] diesel::result::Error), - #[error("Serde error during volume_checkout: {0}")] SerdeError(#[from] serde_json::Error), #[error("Updated {0} database rows, expected {1}")] UnexpectedDatabaseUpdate(usize, usize), } - type TxnError = TransactionError; // We perform a transaction here, to be sure that on completion // of this, the database contains an updated version of the @@ -209,141 +208,141 @@ impl DataStore { // types that require it). The generation number (along with the // rest of the volume data) that was in the database is what is // returned to the caller. - self.pool_connection_unauthorized() - .await? - .transaction_async(|conn| async move { - // Grab the volume in question. - let volume = dsl::volume - .filter(dsl::id.eq(volume_id)) - .select(Volume::as_select()) - .get_result_async(&conn) - .await?; - - // Turn the volume.data into the VolumeConstructionRequest - let vcr: VolumeConstructionRequest = - serde_json::from_str(volume.data()).map_err(|e| { - TxnError::CustomError(VolumeGetError::SerdeError(e)) - })?; - - // Look to see if the VCR is a Volume type, and if so, look at - // its sub_volumes. If they are of type Region, then we need - // to update their generation numbers and record that update - // back to the database. We return to the caller whatever the - // original volume data was we pulled from the database. - match vcr { - VolumeConstructionRequest::Volume { - id, - block_size, - sub_volumes, - read_only_parent, - } => { - let mut update_needed = false; - let mut new_sv = Vec::new(); - for sv in sub_volumes { - match sv { - VolumeConstructionRequest::Region { - block_size, - blocks_per_extent, - extent_count, - opts, - gen, - } => { - update_needed = true; - new_sv.push( - VolumeConstructionRequest::Region { - block_size, - blocks_per_extent, - extent_count, - opts, - gen: gen + 1, - }, - ); - } - _ => { - new_sv.push(sv); + let err = OptionalError::new(); + let conn = self.pool_connection_unauthorized().await?; + + self.transaction_retry_wrapper("volume_checkout") + .transaction(&conn, |conn| { + let err = err.clone(); + async move { + // Grab the volume in question. + let volume = dsl::volume + .filter(dsl::id.eq(volume_id)) + .select(Volume::as_select()) + .get_result_async(&conn) + .await?; + + // Turn the volume.data into the VolumeConstructionRequest + let vcr: VolumeConstructionRequest = + serde_json::from_str(volume.data()).map_err(|e| { + err.bail(VolumeGetError::SerdeError(e)) + })?; + + // Look to see if the VCR is a Volume type, and if so, look at + // its sub_volumes. If they are of type Region, then we need + // to update their generation numbers and record that update + // back to the database. We return to the caller whatever the + // original volume data was we pulled from the database. + match vcr { + VolumeConstructionRequest::Volume { + id, + block_size, + sub_volumes, + read_only_parent, + } => { + let mut update_needed = false; + let mut new_sv = Vec::new(); + for sv in sub_volumes { + match sv { + VolumeConstructionRequest::Region { + block_size, + blocks_per_extent, + extent_count, + opts, + gen, + } => { + update_needed = true; + new_sv.push( + VolumeConstructionRequest::Region { + block_size, + blocks_per_extent, + extent_count, + opts, + gen: gen + 1, + }, + ); + } + _ => { + new_sv.push(sv); + } } } - } - // Only update the volume data if we found the type - // of volume that needed it. - if update_needed { - // Create a new VCR and fill in the contents - // from what the original volume had, but with our - // updated sub_volume records. - let new_vcr = VolumeConstructionRequest::Volume { - id, - block_size, - sub_volumes: new_sv, - read_only_parent, - }; - - let new_volume_data = serde_json::to_string( - &new_vcr, - ) - .map_err(|e| { - TxnError::CustomError( - VolumeGetError::SerdeError(e), - ) - })?; + // Only update the volume data if we found the type + // of volume that needed it. + if update_needed { + // Create a new VCR and fill in the contents + // from what the original volume had, but with our + // updated sub_volume records. + let new_vcr = VolumeConstructionRequest::Volume { + id, + block_size, + sub_volumes: new_sv, + read_only_parent, + }; - // Update the original volume_id with the new - // volume.data. - use db::schema::volume::dsl as volume_dsl; - let num_updated = - diesel::update(volume_dsl::volume) - .filter(volume_dsl::id.eq(volume_id)) - .set(volume_dsl::data.eq(new_volume_data)) - .execute_async(&conn) - .await?; + let new_volume_data = serde_json::to_string( + &new_vcr, + ) + .map_err(|e| { + err.bail(VolumeGetError::SerdeError(e)) + })?; - // This should update just one row. If it does - // not, then something is terribly wrong in the - // database. - if num_updated != 1 { - return Err(TxnError::CustomError( - VolumeGetError::UnexpectedDatabaseUpdate( - num_updated, - 1, - ), - )); + // Update the original volume_id with the new + // volume.data. + use db::schema::volume::dsl as volume_dsl; + let num_updated = + diesel::update(volume_dsl::volume) + .filter(volume_dsl::id.eq(volume_id)) + .set(volume_dsl::data.eq(new_volume_data)) + .execute_async(&conn) + .await?; + + // This should update just one row. If it does + // not, then something is terribly wrong in the + // database. + if num_updated != 1 { + return Err(err.bail( + VolumeGetError::UnexpectedDatabaseUpdate( + num_updated, + 1, + ), + )); + } } } + VolumeConstructionRequest::Region { + block_size: _, + blocks_per_extent: _, + extent_count: _, + opts: _, + gen: _, + } => { + // We don't support a pure Region VCR at the volume + // level in the database, so this choice should + // never be encountered, but I want to know if it is. + panic!("Region not supported as a top level volume"); + } + VolumeConstructionRequest::File { + id: _, + block_size: _, + path: _, + } + | VolumeConstructionRequest::Url { + id: _, + block_size: _, + url: _, + } => {} } - VolumeConstructionRequest::Region { - block_size: _, - blocks_per_extent: _, - extent_count: _, - opts: _, - gen: _, - } => { - // We don't support a pure Region VCR at the volume - // level in the database, so this choice should - // never be encountered, but I want to know if it is. - panic!("Region not supported as a top level volume"); - } - VolumeConstructionRequest::File { - id: _, - block_size: _, - path: _, - } - | VolumeConstructionRequest::Url { - id: _, - block_size: _, - url: _, - } => {} + Ok(volume) } - Ok(volume) }) .await - .map_err(|e| match e { - TxnError::CustomError(VolumeGetError::DieselError(e)) => { - public_error_from_diesel(e, ErrorHandler::Server) - } - - _ => { - Error::internal_error(&format!("Transaction error: {}", e)) + .map_err(|e| { + if let Some(err) = err.take() { + return Error::internal_error(&format!("Transaction error: {}", err)); } + public_error_from_diesel(e, ErrorHandler::Server) }) } @@ -457,7 +456,7 @@ impl DataStore { /// snapshots. pub async fn find_deleted_volume_regions( &self, - ) -> ListResultVec<(Dataset, Region, Volume)> { + ) -> ListResultVec<(Dataset, Region, Option, Volume)> { use db::schema::dataset::dsl as dataset_dsl; use db::schema::region::dsl as region_dsl; use db::schema::region_snapshot::dsl; @@ -494,6 +493,7 @@ impl DataStore { .select(( Dataset::as_select(), Region::as_select(), + Option::::as_select(), Volume::as_select(), )) .load_async(&*self.pool_connection_unauthorized().await?) @@ -637,16 +637,12 @@ impl DataStore { ) -> Result { #[derive(Debug, thiserror::Error)] enum RemoveReadOnlyParentError { - #[error("Error removing read only parent: {0}")] - DieselError(#[from] diesel::result::Error), - #[error("Serde error removing read only parent: {0}")] SerdeError(#[from] serde_json::Error), #[error("Updated {0} database rows, expected {1}")] UnexpectedDatabaseUpdate(usize, usize), } - type TxnError = TransactionError; // In this single transaction: // - Get the given volume from the volume_id from the database @@ -662,170 +658,160 @@ impl DataStore { // data from original volume_id. // - Put the new temp VCR into the temp volume.data, update the // temp_volume in the database. - self.pool_connection_unauthorized() - .await? - .transaction_async(|conn| async move { - // Grab the volume in question. If the volume record was already - // deleted then we can just return. - let volume = { - use db::schema::volume::dsl; - - let volume = dsl::volume - .filter(dsl::id.eq(volume_id)) - .select(Volume::as_select()) - .get_result_async(&conn) - .await - .optional()?; - - let volume = if let Some(v) = volume { - v - } else { - // the volume does not exist, nothing to do. - return Ok(false); + let err = OptionalError::new(); + let conn = self.pool_connection_unauthorized().await?; + self.transaction_retry_wrapper("volume_remove_rop") + .transaction(&conn, |conn| { + let err = err.clone(); + async move { + // Grab the volume in question. If the volume record was already + // deleted then we can just return. + let volume = { + use db::schema::volume::dsl; + + let volume = dsl::volume + .filter(dsl::id.eq(volume_id)) + .select(Volume::as_select()) + .get_result_async(&conn) + .await + .optional()?; + + let volume = if let Some(v) = volume { + v + } else { + // the volume does not exist, nothing to do. + return Ok(false); + }; + + if volume.time_deleted.is_some() { + // this volume is deleted, so let whatever is deleting + // it clean it up. + return Ok(false); + } else { + // A volume record exists, and was not deleted, we + // can attempt to remove its read_only_parent. + volume + } }; - if volume.time_deleted.is_some() { - // this volume is deleted, so let whatever is deleting - // it clean it up. - return Ok(false); - } else { - // A volume record exists, and was not deleted, we - // can attempt to remove its read_only_parent. - volume - } - }; - - // If a read_only_parent exists, remove it from volume_id, and - // attach it to temp_volume_id. - let vcr: VolumeConstructionRequest = - serde_json::from_str( - volume.data() - ) - .map_err(|e| { - TxnError::CustomError( - RemoveReadOnlyParentError::SerdeError( - e, - ), + // If a read_only_parent exists, remove it from volume_id, and + // attach it to temp_volume_id. + let vcr: VolumeConstructionRequest = + serde_json::from_str( + volume.data() ) - })?; - - match vcr { - VolumeConstructionRequest::Volume { - id, - block_size, - sub_volumes, - read_only_parent, - } => { - if read_only_parent.is_none() { - // This volume has no read_only_parent - Ok(false) - } else { - // Create a new VCR and fill in the contents - // from what the original volume had. - let new_vcr = VolumeConstructionRequest::Volume { - id, - block_size, - sub_volumes, - read_only_parent: None, - }; - - let new_volume_data = - serde_json::to_string( - &new_vcr + .map_err(|e| { + err.bail( + RemoveReadOnlyParentError::SerdeError( + e, ) - .map_err(|e| { - TxnError::CustomError( - RemoveReadOnlyParentError::SerdeError( - e, - ), - ) - })?; + ) + })?; - // Update the original volume_id with the new - // volume.data. - use db::schema::volume::dsl as volume_dsl; - let num_updated = diesel::update(volume_dsl::volume) - .filter(volume_dsl::id.eq(volume_id)) - .set(volume_dsl::data.eq(new_volume_data)) - .execute_async(&conn) - .await?; - - // This should update just one row. If it does - // not, then something is terribly wrong in the - // database. - if num_updated != 1 { - return Err(TxnError::CustomError( - RemoveReadOnlyParentError::UnexpectedDatabaseUpdate(num_updated, 1), - )); - } + match vcr { + VolumeConstructionRequest::Volume { + id, + block_size, + sub_volumes, + read_only_parent, + } => { + if read_only_parent.is_none() { + // This volume has no read_only_parent + Ok(false) + } else { + // Create a new VCR and fill in the contents + // from what the original volume had. + let new_vcr = VolumeConstructionRequest::Volume { + id, + block_size, + sub_volumes, + read_only_parent: None, + }; - // Make a new VCR, with the information from - // our temp_volume_id, but the read_only_parent - // from the original volume. - let rop_vcr = VolumeConstructionRequest::Volume { - id: temp_volume_id, - block_size, - sub_volumes: vec![], - read_only_parent, - }; - let rop_volume_data = - serde_json::to_string( - &rop_vcr - ) - .map_err(|e| { - TxnError::CustomError( - RemoveReadOnlyParentError::SerdeError( - e, - ), + let new_volume_data = + serde_json::to_string( + &new_vcr ) - })?; - // Update the temp_volume_id with the volume - // data that contains the read_only_parent. - let num_updated = - diesel::update(volume_dsl::volume) - .filter(volume_dsl::id.eq(temp_volume_id)) - .filter(volume_dsl::time_deleted.is_null()) - .set(volume_dsl::data.eq(rop_volume_data)) + .map_err(|e| { + err.bail(RemoveReadOnlyParentError::SerdeError( + e, + )) + })?; + + // Update the original volume_id with the new + // volume.data. + use db::schema::volume::dsl as volume_dsl; + let num_updated = diesel::update(volume_dsl::volume) + .filter(volume_dsl::id.eq(volume_id)) + .set(volume_dsl::data.eq(new_volume_data)) .execute_async(&conn) .await?; - if num_updated != 1 { - return Err(TxnError::CustomError( - RemoveReadOnlyParentError::UnexpectedDatabaseUpdate(num_updated, 1), - )); + + // This should update just one row. If it does + // not, then something is terribly wrong in the + // database. + if num_updated != 1 { + return Err(err.bail(RemoveReadOnlyParentError::UnexpectedDatabaseUpdate(num_updated, 1))); + } + + // Make a new VCR, with the information from + // our temp_volume_id, but the read_only_parent + // from the original volume. + let rop_vcr = VolumeConstructionRequest::Volume { + id: temp_volume_id, + block_size, + sub_volumes: vec![], + read_only_parent, + }; + let rop_volume_data = + serde_json::to_string( + &rop_vcr + ) + .map_err(|e| { + err.bail(RemoveReadOnlyParentError::SerdeError( + e, + )) + })?; + // Update the temp_volume_id with the volume + // data that contains the read_only_parent. + let num_updated = + diesel::update(volume_dsl::volume) + .filter(volume_dsl::id.eq(temp_volume_id)) + .filter(volume_dsl::time_deleted.is_null()) + .set(volume_dsl::data.eq(rop_volume_data)) + .execute_async(&conn) + .await?; + if num_updated != 1 { + return Err(err.bail(RemoveReadOnlyParentError::UnexpectedDatabaseUpdate(num_updated, 1))); + } + Ok(true) } - Ok(true) } - } - VolumeConstructionRequest::File { id: _, block_size: _, path: _ } - | VolumeConstructionRequest::Region { - block_size: _, - blocks_per_extent: _, - extent_count: _, - opts: _, - gen: _ } - | VolumeConstructionRequest::Url { id: _, block_size: _, url: _ } => { - // Volume has a format that does not contain ROPs - Ok(false) + VolumeConstructionRequest::File { id: _, block_size: _, path: _ } + | VolumeConstructionRequest::Region { + block_size: _, + blocks_per_extent: _, + extent_count: _, + opts: _, + gen: _ } + | VolumeConstructionRequest::Url { id: _, block_size: _, url: _ } => { + // Volume has a format that does not contain ROPs + Ok(false) + } } } }) .await - .map_err(|e| match e { - TxnError::CustomError( - RemoveReadOnlyParentError::DieselError(e), - ) => public_error_from_diesel( - e, - ErrorHandler::Server, - ), - - _ => { - Error::internal_error(&format!("Transaction error: {}", e)) + .map_err(|e| { + if let Some(err) = err.take() { + return Error::internal_error(&format!("Transaction error: {}", err)); } + public_error_from_diesel(e, ErrorHandler::Server) }) } } -#[derive(Default, Debug, Serialize, Deserialize)] +#[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct CrucibleTargets { pub read_only_targets: Vec, } diff --git a/nexus/db-queries/src/db/datastore/vpc.rs b/nexus/db-queries/src/db/datastore/vpc.rs index 6db99465a3..4f0245e283 100644 --- a/nexus/db-queries/src/db/datastore/vpc.rs +++ b/nexus/db-queries/src/db/datastore/vpc.rs @@ -12,7 +12,6 @@ use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; -use crate::db::error::TransactionError; use crate::db::fixed_data::vpc::SERVICES_VPC_ID; use crate::db::identity::Resource; use crate::db::model::IncompleteVpc; @@ -37,7 +36,7 @@ use crate::db::queries::vpc::InsertVpcQuery; use crate::db::queries::vpc::VniSearchIter; use crate::db::queries::vpc_subnet::FilterConflictingVpcSubnetRangesQuery; use crate::db::queries::vpc_subnet::SubnetError; -use async_bb8_diesel::AsyncConnection; +use crate::transaction_retry::OptionalError; use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; use diesel::prelude::*; @@ -340,7 +339,10 @@ impl DataStore { opctx.log, "failed to find a VNI after searching entire range"; ); - Err(Error::unavail("Failed to find a free VNI for this VPC")) + Err(Error::insufficient_capacity( + "No free virtual network was found", + "Failed to find a free VNI for this VPC", + )) } // Internal implementation for creating a VPC. @@ -470,11 +472,9 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))? .is_some() { - return Err(Error::InvalidRequest { - message: String::from( - "VPC cannot be deleted while VPC Subnets exist", - ), - }); + return Err(Error::invalid_request( + "VPC cannot be deleted while VPC Subnets exist", + )); } // Delete the VPC, conditional on the subnet_gen not having changed. @@ -493,11 +493,9 @@ impl DataStore { ) })?; if updated_rows == 0 { - Err(Error::InvalidRequest { - message: String::from( - "deletion failed to to concurrent modification", - ), - }) + Err(Error::invalid_request( + "deletion failed due to concurrent modification", + )) } else { Ok(()) } @@ -580,53 +578,65 @@ impl DataStore { .set(dsl::time_deleted.eq(now)); let rules_is_empty = rules.is_empty(); - let insert_new_query = Vpc::insert_resource( - authz_vpc.id(), - diesel::insert_into(dsl::vpc_firewall_rule).values(rules), - ); - #[derive(Debug)] enum FirewallUpdateError { CollectionNotFound, } - type TxnError = TransactionError; + + let err = OptionalError::new(); // TODO-scalability: Ideally this would be a CTE so we don't need to // hold a transaction open across multiple roundtrips from the database, // but for now we're using a transaction due to the severely decreased // legibility of CTEs via diesel right now. - self.pool_connection_authorized(opctx) - .await? - .transaction_async(|conn| async move { - delete_old_query.execute_async(&conn).await?; - - // The generation count update on the vpc table row will take a - // write lock on the row, ensuring that the vpc was not deleted - // concurently. - if rules_is_empty { - return Ok(vec![]); - } - insert_new_query + let conn = self.pool_connection_authorized(opctx).await?; + + self.transaction_retry_wrapper("vpc_update_firewall_rules") + .transaction(&conn, |conn| { + let err = err.clone(); + let delete_old_query = delete_old_query.clone(); + let rules = rules.clone(); + async move { + delete_old_query.execute_async(&conn).await?; + + // The generation count update on the vpc table row will take a + // write lock on the row, ensuring that the vpc was not deleted + // concurently. + if rules_is_empty { + return Ok(vec![]); + } + Vpc::insert_resource( + authz_vpc.id(), + diesel::insert_into(dsl::vpc_firewall_rule) + .values(rules), + ) .insert_and_get_results_async(&conn) .await .map_err(|e| match e { AsyncInsertError::CollectionNotFound => { - TxnError::CustomError( - FirewallUpdateError::CollectionNotFound, - ) + err.bail(FirewallUpdateError::CollectionNotFound) } - AsyncInsertError::DatabaseError(e) => e.into(), + AsyncInsertError::DatabaseError(e) => e, }) + } }) .await - .map_err(|e| match e { - TxnError::CustomError( - FirewallUpdateError::CollectionNotFound, - ) => Error::not_found_by_id(ResourceType::Vpc, &authz_vpc.id()), - TxnError::Database(e) => public_error_from_diesel( - e, - ErrorHandler::NotFoundByResource(authz_vpc), - ), + .map_err(|e| { + if let Some(err) = err.take() { + match err { + FirewallUpdateError::CollectionNotFound => { + Error::not_found_by_id( + ResourceType::Vpc, + &authz_vpc.id(), + ) + } + } + } else { + public_error_from_diesel( + e, + ErrorHandler::NotFoundByResource(authz_vpc), + ) + } }) } @@ -783,12 +793,10 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))? .is_some() { - return Err(Error::InvalidRequest { - message: String::from( - "VPC Subnet cannot be deleted while \ - network interfaces in the subnet exist", - ), - }); + return Err(Error::invalid_request( + "VPC Subnet cannot be deleted while network interfaces in the \ + subnet exist", + )); } // Delete the subnet, conditional on the rcgen not having changed. @@ -807,11 +815,9 @@ impl DataStore { ) })?; if updated_rows == 0 { - return Err(Error::InvalidRequest { - message: String::from( - "deletion failed to to concurrent modification", - ), - }); + return Err(Error::invalid_request( + "deletion failed due to concurrent modification", + )); } else { Ok(()) } diff --git a/nexus/db-queries/src/db/error.rs b/nexus/db-queries/src/db/error.rs index cbe2b0a71f..fc7f30da93 100644 --- a/nexus/db-queries/src/db/error.rs +++ b/nexus/db-queries/src/db/error.rs @@ -17,7 +17,7 @@ pub enum TransactionError { /// The customizable error type. /// /// This error should be used for all non-Diesel transaction failures. - #[error("Custom transaction error; {0}")] + #[error("Custom transaction error: {0}")] CustomError(T), /// The Diesel error type. @@ -28,31 +28,61 @@ pub enum TransactionError { Database(#[from] DieselError), } +pub fn retryable(error: &DieselError) -> bool { + match error { + DieselError::DatabaseError(kind, boxed_error_information) => match kind + { + DieselErrorKind::SerializationFailure => { + return boxed_error_information + .message() + .starts_with("restart transaction"); + } + _ => false, + }, + _ => false, + } +} + +/// Identifies if the error is retryable or not. +pub enum MaybeRetryable { + /// The error isn't retryable. + NotRetryable(T), + /// The error is retryable. + Retryable(DieselError), +} + +impl TransactionError { + /// Identifies that the error could be returned from a Diesel transaction. + /// + /// Allows callers to propagate arbitrary errors out of transaction contexts + /// without losing information that might be valuable to the calling context, + /// such as "does this particular error indicate that the entire transaction + /// should retry?". + pub fn retryable(self) -> MaybeRetryable { + use MaybeRetryable::*; + + match self { + TransactionError::Database(err) if retryable(&err) => { + Retryable(err) + } + _ => NotRetryable(self), + } + } +} + impl From for TransactionError { fn from(err: PublicError) -> Self { TransactionError::CustomError(err) } } -impl TransactionError { - /// Based on [the CRDB][1] docs, return true if this transaction must be - /// retried. - /// - /// [1]: https://www.cockroachlabs.com/docs/v23.1/transaction-retry-error-reference#client-side-retry-handling - pub fn retry_transaction(&self) -> bool { - match &self { - Self::Database(DieselError::DatabaseError( - kind, - boxed_error_information, - )) => match kind { - DieselErrorKind::SerializationFailure => { - return boxed_error_information - .message() - .starts_with("restart transaction"); - } - _ => false, - }, - _ => false, +impl From> for PublicError { + fn from(err: TransactionError) -> Self { + match err { + TransactionError::CustomError(err) => err, + TransactionError::Database(err) => { + public_error_from_diesel(err, ErrorHandler::Server) + } } } } diff --git a/nexus/db-queries/src/db/lookup.rs b/nexus/db-queries/src/db/lookup.rs index 72a32f562c..028694dc4b 100644 --- a/nexus/db-queries/src/db/lookup.rs +++ b/nexus/db-queries/src/db/lookup.rs @@ -231,6 +231,11 @@ impl<'a> LookupPath<'a> { RouterRoute::PrimaryKey(Root { lookup_root: self }, id) } + /// Select a resource of type FloatingIp, identified by its id + pub fn floating_ip_id(self, id: Uuid) -> FloatingIp<'a> { + FloatingIp::PrimaryKey(Root { lookup_root: self }, id) + } + // Fleet-level resources /// Select a resource of type ConsoleSession, identified by its `token` @@ -632,7 +637,7 @@ lookup_resource! { lookup_resource! { name = "Project", ancestors = [ "Silo" ], - children = [ "Disk", "Instance", "Vpc", "Snapshot", "ProjectImage" ], + children = [ "Disk", "Instance", "Vpc", "Snapshot", "ProjectImage", "FloatingIp" ], lookup_by_name = true, soft_deletes = true, primary_key_columns = [ { column_name = "id", rust_type = Uuid } ] @@ -728,6 +733,15 @@ lookup_resource! { primary_key_columns = [ { column_name = "id", rust_type = Uuid } ] } +lookup_resource! { + name = "FloatingIp", + ancestors = [ "Silo", "Project" ], + children = [], + lookup_by_name = true, + soft_deletes = true, + primary_key_columns = [ { column_name = "id", rust_type = Uuid } ] +} + // Miscellaneous resources nested directly below "Fleet" lookup_resource! { diff --git a/nexus/db-queries/src/db/mod.rs b/nexus/db-queries/src/db/mod.rs index 8b7424a056..924eab363f 100644 --- a/nexus/db-queries/src/db/mod.rs +++ b/nexus/db-queries/src/db/mod.rs @@ -12,15 +12,17 @@ pub mod collection_attach; pub mod collection_detach; pub mod collection_detach_many; pub mod collection_insert; +mod column_walker; mod config; mod cte_utils; // This is marked public for use by the integration tests pub mod datastore; -mod error; +pub(crate) mod error; mod explain; pub mod fixed_data; pub mod lookup; -mod pagination; +// Public for doctests. +pub mod pagination; mod pool; // This is marked public because the error types are used elsewhere, e.g., in // sagas. @@ -41,7 +43,7 @@ pub use nexus_db_model::schema; pub use crate::db::error::TransactionError; pub use config::Config; pub use datastore::DataStore; -pub use pool::Pool; +pub use pool::{DbConnection, Pool}; pub use saga_recovery::{recover, CompletionTask, RecoveryTask}; pub use saga_types::SecId; pub use sec_store::CockroachDbSecStore; diff --git a/nexus/db-queries/src/db/pagination.rs b/nexus/db-queries/src/db/pagination.rs index dd7daab14f..4fc1cf5966 100644 --- a/nexus/db-queries/src/db/pagination.rs +++ b/nexus/db-queries/src/db/pagination.rs @@ -16,6 +16,7 @@ use diesel::AppearsOnTable; use diesel::Column; use diesel::{ExpressionMethods, QueryDsl}; use omicron_common::api::external::DataPageParams; +use std::num::NonZeroU32; // Shorthand alias for "the SQL type of the whole table". type TableSqlType = ::SqlType; @@ -169,6 +170,145 @@ where } } +/// Helper for querying a large number of records from the database in batches +/// +/// Without this helper: a typical way to perform paginated queries would be to +/// invoke some existing "list" function in the datastore that itself is +/// paginated. Such functions accept a `pagparams: &DataPageParams` argument +/// that uses a marker to identify where the next page of results starts. For +/// the first call, the marker inside `pagparams` is `None`. For subsequent +/// calls, it's typically some field from the last item returned in the previous +/// page. You're finished when you get a result set smaller than the batch +/// size. +/// +/// This helper takes care of most of the logic for you. To use this, you first +/// create a `Paginator` with a specific batch_size. Then you call `next()` in +/// a loop. Each iteration will provide you with a `DataPageParams` to use to +/// call your list function. When you've fetched the next page, you have to +/// let the helper look at it to determine if there's another page to fetch and +/// what marker to use. +/// +/// ## Example +/// +/// ``` +/// use nexus_db_queries::db::pagination::Paginator; +/// use omicron_common::api::external::DataPageParams; +/// +/// let batch_size = std::num::NonZeroU32::new(3).unwrap(); +/// +/// // Assume you've got an existing paginated "list items" function. +/// // This simple implementation returns a few full batches, then a partial +/// // batch. +/// type Marker = u32; +/// type Item = u32; +/// let do_query = |pagparams: &DataPageParams<'_, Marker> | { +/// match pagparams.marker { +/// None => (0..batch_size.get()).collect(), +/// Some(x) if *x < 2 * batch_size.get() => (x+1..x+1+batch_size.get()).collect(), +/// Some(x) => vec![*x + 1], +/// } +/// }; +/// +/// // This closure translates from one of the returned item to the field in +/// // that item that servers as the marker. This example is contrived. +/// let item2marker: &dyn Fn(&Item) -> Marker = &|u: &u32| *u; +/// +/// let mut all_records = Vec::new(); +/// let mut paginator = Paginator::new(batch_size); +/// while let Some(p) = paginator.next() { +/// let records_batch = do_query(&p.current_pagparams()); +/// paginator = p.found_batch(&records_batch, item2marker); +/// all_records.extend(records_batch.into_iter()); +/// } +/// +/// // Results are in `all_records`. +/// assert_eq!(all_records, vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); +/// ``` +/// +/// ## Design notes +/// +/// The separation of `Paginator` and `PaginatorHelper` is aimed at making it +/// harder to misuse this interface. We could skip the helper altogether and +/// just have `Paginator::next()` return the DatePageParams directly. But you'd +/// still need a `Paginator::found_batch()`. And it would be easy to forget to +/// call this, leading to an infinite loop at runtime. To avoid this mistake, +/// `Paginator::next()` consumes `self`. You can't get another `Paginator` back +/// until you use `PaginatorHelper::found_batch()`. That also consumes `self` +/// so that you can't keep using the old `DataPageParams`. +pub struct Paginator { + batch_size: NonZeroU32, + state: PaginatorState, +} + +impl Paginator { + pub fn new(batch_size: NonZeroU32) -> Paginator { + Paginator { batch_size, state: PaginatorState::Initial } + } + + pub fn next(self) -> Option> { + match self.state { + PaginatorState::Initial => Some(PaginatorHelper { + batch_size: self.batch_size, + marker: None, + }), + PaginatorState::Middle { marker } => Some(PaginatorHelper { + batch_size: self.batch_size, + marker: Some(marker), + }), + PaginatorState::Done => None, + } + } +} + +enum PaginatorState { + Initial, + Middle { marker: N }, + Done, +} + +pub struct PaginatorHelper { + batch_size: NonZeroU32, + marker: Option, +} + +impl PaginatorHelper { + /// Returns the `DatePageParams` to use to fetch the next page of results + pub fn current_pagparams(&self) -> DataPageParams<'_, N> { + DataPageParams { + marker: self.marker.as_ref(), + direction: dropshot::PaginationOrder::Ascending, + limit: self.batch_size, + } + } + + /// Report a page of results + /// + /// This function looks at the returned results to determine whether we've + /// finished iteration or whether we need to fetch another page (and if so, + /// this determines the marker for the next fetch operation). + /// + /// This function returns a `Paginator` used to make the next request. See + /// the example on `Paginator` for usage. + pub fn found_batch( + self, + batch: &[T], + item2marker: &dyn Fn(&T) -> N, + ) -> Paginator { + let state = + if batch.len() < usize::try_from(self.batch_size.get()).unwrap() { + PaginatorState::Done + } else { + // self.batch_size is non-zero, so if we got at least that many + // items, then there's at least one. + let last = batch.iter().last().unwrap(); + let marker = item2marker(last); + PaginatorState::Middle { marker } + }; + + Paginator { batch_size: self.batch_size, state } + } +} + #[cfg(test)] mod test { use super::*; @@ -433,4 +573,48 @@ mod test { let _ = db.cleanup().await; logctx.cleanup_successful(); } + + #[test] + fn test_paginator() { + // The doctest exercises a basic case for Paginator. Here we test some + // edge cases. + let batch_size = std::num::NonZeroU32::new(3).unwrap(); + + type Marker = u32; + #[derive(Debug, PartialEq, Eq)] + struct Item { + value: String, + marker: Marker, + } + + let do_list = + |query: &dyn Fn(&DataPageParams<'_, Marker>) -> Vec| { + let mut all_records = Vec::new(); + let mut paginator = Paginator::new(batch_size); + while let Some(p) = paginator.next() { + let records_batch = query(&p.current_pagparams()); + paginator = + p.found_batch(&records_batch, &|i: &Item| i.marker); + all_records.extend(records_batch.into_iter()); + } + all_records + }; + + fn mkitem(v: u32) -> Item { + Item { value: v.to_string(), marker: v } + } + + // Trivial case: first page is empty + assert_eq!(Vec::::new(), do_list(&|_| Vec::new())); + + // Exactly one batch-size worth of items + // (exercises the cases where the last non-empty batch is full, and + // where any batch is empty) + let my_query = + |pagparams: &DataPageParams<'_, Marker>| match &pagparams.marker { + None => (0..batch_size.get()).map(mkitem).collect(), + Some(_) => Vec::new(), + }; + assert_eq!(vec![mkitem(0), mkitem(1), mkitem(2)], do_list(&my_query)); + } } diff --git a/nexus/db-queries/src/db/queries/external_ip.rs b/nexus/db-queries/src/db/queries/external_ip.rs index cf182e080d..2a76ea7408 100644 --- a/nexus/db-queries/src/db/queries/external_ip.rs +++ b/nexus/db-queries/src/db/queries/external_ip.rs @@ -98,7 +98,8 @@ const MAX_PORT: u16 = u16::MAX; /// AS kind, /// candidate_ip AS ip, /// CAST(candidate_first_port AS INT4) AS first_port, -/// CAST(candidate_last_port AS INT4) AS last_port +/// CAST(candidate_last_port AS INT4) AS last_port, +/// AS project_id /// FROM /// SELECT * FROM ( /// -- Select all IP addresses by pool and range. @@ -371,6 +372,13 @@ impl NextExternalIp { out.push_identifier(dsl::first_port::NAME)?; out.push_sql(", CAST(candidate_last_port AS INT4) AS "); out.push_identifier(dsl::last_port::NAME)?; + out.push_sql(", "); + + // Project ID, possibly null + out.push_bind_param::, Option>(self.ip.project_id())?; + out.push_sql(" AS "); + out.push_identifier(dsl::project_id::NAME)?; + out.push_sql(" FROM ("); self.push_address_sequence_subquery(out.reborrow())?; out.push_sql(") CROSS JOIN ("); @@ -989,9 +997,10 @@ mod tests { ); assert_eq!( err, - Error::InvalidRequest { - message: String::from("No external IP addresses available"), - } + Error::insufficient_capacity( + "No external IP addresses available", + "NextExternalIp::new returned NotFound", + ), ); context.success().await; } @@ -1045,9 +1054,10 @@ mod tests { ); assert_eq!( res.unwrap_err(), - Error::InvalidRequest { - message: String::from("No external IP addresses available"), - } + Error::insufficient_capacity( + "No external IP addresses available", + "NextExternalIp::new returned NotFound", + ), ); let res = context @@ -1067,9 +1077,10 @@ mod tests { ); assert_eq!( res.unwrap_err(), - Error::InvalidRequest { - message: String::from("No external IP addresses available"), - } + Error::insufficient_capacity( + "No external IP addresses available", + "NextExternalIp::new returned NotFound", + ), ); context.success().await; } @@ -1298,9 +1309,10 @@ mod tests { .expect_err("Should have failed to allocate after pool exhausted"); assert_eq!( err, - Error::InvalidRequest { - message: String::from("No external IP addresses available"), - } + Error::insufficient_capacity( + "No external IP addresses available", + "NextExternalIp::new returned NotFound", + ), ); // But we should be able to allocate another SNat IP diff --git a/nexus/db-queries/src/db/queries/network_interface.rs b/nexus/db-queries/src/db/queries/network_interface.rs index 84a81a7b7a..1dbe57da6f 100644 --- a/nexus/db-queries/src/db/queries/network_interface.rs +++ b/nexus/db-queries/src/db/queries/network_interface.rs @@ -5,6 +5,7 @@ //! Queries for inserting and deleting network interfaces. use crate::db; +use crate::db::error::{public_error_from_diesel, retryable, ErrorHandler}; use crate::db::model::IncompleteNetworkInterface; use crate::db::pool::DbConnection; use crate::db::queries::next_item::DefaultShiftGenerator; @@ -120,6 +121,8 @@ pub enum InsertError { InstanceMustBeStopped(Uuid), /// The instance does not exist at all, or is in the destroyed state. InstanceNotFound(Uuid), + /// The operation occurred within a transaction, and is retryable + Retryable(DieselError), /// Any other error External(external::Error), } @@ -135,7 +138,6 @@ impl InsertError { e: DieselError, interface: &IncompleteNetworkInterface, ) -> Self { - use crate::db::error; match e { // Catch the specific errors designed to communicate the failures we // want to distinguish @@ -143,9 +145,9 @@ impl InsertError { decode_database_error(e, interface) } // Any other error at all is a bug - _ => InsertError::External(error::public_error_from_diesel( + _ => InsertError::External(public_error_from_diesel( e, - error::ErrorHandler::Server, + ErrorHandler::Server, )), } } @@ -209,6 +211,9 @@ impl InsertError { InsertError::InstanceNotFound(id) => { external::Error::not_found_by_id(external::ResourceType::Instance, &id) } + InsertError::Retryable(err) => { + public_error_from_diesel(err, ErrorHandler::Server) + } InsertError::External(e) => e, } } @@ -290,6 +295,10 @@ fn decode_database_error( r#"uuid: incorrect UUID length: non-unique-subnets"#, ); + if retryable(&err) { + return InsertError::Retryable(err); + } + match err { // If the address allocation subquery fails, we'll attempt to insert // NULL for the `ip` column. This checks that the non-NULL constraint on diff --git a/nexus/db-queries/src/db/queries/region_allocation.rs b/nexus/db-queries/src/db/queries/region_allocation.rs index a080af4c37..3c37bf6b2e 100644 --- a/nexus/db-queries/src/db/queries/region_allocation.rs +++ b/nexus/db-queries/src/db/queries/region_allocation.rs @@ -46,19 +46,23 @@ pub fn from_diesel(e: DieselError) -> external::Error { NOT_ENOUGH_UNIQUE_ZPOOLS_SENTINEL, ]; if let Some(sentinel) = matches_sentinel(&e, &sentinels) { + let external_message = "Not enough storage"; match sentinel { NOT_ENOUGH_DATASETS_SENTINEL => { - return external::Error::unavail( + return external::Error::insufficient_capacity( + external_message, "Not enough datasets to allocate disks", ); } NOT_ENOUGH_ZPOOL_SPACE_SENTINEL => { - return external::Error::unavail( + return external::Error::insufficient_capacity( + external_message, "Not enough zpool space to allocate disks. There may not be enough disks with space for the requested region. You may also see this if your rack is in a degraded state, or you're running the default multi-rack topology configuration in a 1-sled development environment.", ); } NOT_ENOUGH_UNIQUE_ZPOOLS_SENTINEL => { - return external::Error::unavail( + return external::Error::insufficient_capacity( + external_message, "Not enough unique zpools selected while allocating disks", ); } @@ -290,6 +294,7 @@ impl CandidateZpools { seed: u128, distinct_sleds: bool, ) -> Self { + use schema::sled::dsl as sled_dsl; use schema::zpool::dsl as zpool_dsl; // Why are we using raw `diesel::dsl::sql` here? @@ -310,13 +315,20 @@ impl CandidateZpools { + diesel::dsl::sql(&zpool_size_delta.to_string())) .le(diesel::dsl::sql(zpool_dsl::total_size::NAME)); + // We need to join on the sled table to access provision_state. + let with_sled = sled_dsl::sled.on(zpool_dsl::sled_id.eq(sled_dsl::id)); let with_zpool = zpool_dsl::zpool - .on(zpool_dsl::id.eq(old_zpool_usage::dsl::pool_id)); + .on(zpool_dsl::id.eq(old_zpool_usage::dsl::pool_id)) + .inner_join(with_sled); + + let sled_is_provisionable = sled_dsl::provision_state + .eq(crate::db::model::SledProvisionState::Provisionable); let base_query = old_zpool_usage .query_source() .inner_join(with_zpool) .filter(it_will_fit) + .filter(sled_is_provisionable) .select((old_zpool_usage::dsl::pool_id,)); let query = if distinct_sleds { diff --git a/nexus/db-queries/src/db/queries/volume.rs b/nexus/db-queries/src/db/queries/volume.rs index 31882dca89..2c1a9af19b 100644 --- a/nexus/db-queries/src/db/queries/volume.rs +++ b/nexus/db-queries/src/db/queries/volume.rs @@ -412,10 +412,11 @@ pub struct DecreaseCrucibleResourceCountAndSoftDeleteVolume { } impl DecreaseCrucibleResourceCountAndSoftDeleteVolume { - const UPDATED_REGION_SNAPSHOTS_TABLE: &str = "updated_region_snapshots"; - const REGION_SNAPSHOTS_TO_CLEAN_UP_TABLE: &str = + const UPDATED_REGION_SNAPSHOTS_TABLE: &'static str = + "updated_region_snapshots"; + const REGION_SNAPSHOTS_TO_CLEAN_UP_TABLE: &'static str = "region_snapshots_to_clean_up"; - const UPDATED_VOLUME_TABLE: &str = "updated_volume"; + const UPDATED_VOLUME_TABLE: &'static str = "updated_volume"; pub fn new(volume_id: Uuid, snapshot_addrs: Vec) -> Self { Self { diff --git a/nexus/db-queries/src/db/saga_recovery.rs b/nexus/db-queries/src/db/saga_recovery.rs index f3eada1645..802093b889 100644 --- a/nexus/db-queries/src/db/saga_recovery.rs +++ b/nexus/db-queries/src/db/saga_recovery.rs @@ -143,8 +143,7 @@ where .await }); - let mut completion_futures = vec![]; - completion_futures.reserve(recovery_futures.len()); + let mut completion_futures = Vec::with_capacity(recovery_futures.len()); // Loads and resumes all sagas in serial. for recovery_future in recovery_futures { let saga_complete_future = recovery_future.await?; diff --git a/nexus/db-queries/src/db/update_and_check.rs b/nexus/db-queries/src/db/update_and_check.rs index d6bf14c083..fed79d5254 100644 --- a/nexus/db-queries/src/db/update_and_check.rs +++ b/nexus/db-queries/src/db/update_and_check.rs @@ -4,6 +4,7 @@ //! CTE implementation for "UPDATE with extended return status". +use super::column_walker::ColumnWalker; use super::pool::DbConnection; use async_bb8_diesel::AsyncRunQueryDsl; use diesel::associations::HasTable; @@ -21,7 +22,7 @@ use std::marker::PhantomData; /// allows referencing generics with names (and extending usage /// without re-stating those generic parameters everywhere). pub trait UpdateStatementExt { - type Table: QuerySource; + type Table: Table + QuerySource; type WhereClause; type Changeset; @@ -32,7 +33,7 @@ pub trait UpdateStatementExt { impl UpdateStatementExt for UpdateStatement where - T: QuerySource, + T: Table + QuerySource, { type Table = T; type WhereClause = U; @@ -201,11 +202,11 @@ where /// /// ```text /// // WITH found AS (SELECT FROM T WHERE ) -/// // updated AS (UPDATE T SET RETURNING *) +/// // updated AS (UPDATE T SET RETURNING ) /// // SELECT /// // found. /// // updated. -/// // found.* +/// // found. /// // FROM /// // found /// // LEFT JOIN @@ -217,41 +218,48 @@ impl QueryFragment for UpdateAndQueryStatement where US: UpdateStatementExt, US::Table: HasTable + Table, + ColumnWalker<<::Table as Table>::AllColumns>: + IntoIterator, PrimaryKey: diesel::Column, UpdateStatement: QueryFragment, { fn walk_ast<'b>(&'b self, mut out: AstPass<'_, 'b, Pg>) -> QueryResult<()> { + let primary_key = as Column>::NAME; + out.push_sql("WITH found AS ("); self.find_subquery.walk_ast(out.reborrow())?; out.push_sql("), updated AS ("); self.update_statement.walk_ast(out.reborrow())?; - // TODO: Only need primary? Or would we actually want - // to pass the returned rows back through the result? - out.push_sql(" RETURNING *) "); + out.push_sql(" RETURNING "); + out.push_identifier(primary_key)?; + out.push_sql(") "); out.push_sql("SELECT"); - let name = as Column>::NAME; out.push_sql(" found."); - out.push_identifier(name)?; + out.push_identifier(primary_key)?; out.push_sql(", updated."); - out.push_identifier(name)?; - // TODO: I'd prefer to list all columns explicitly. But how? - // The types exist within Table::AllColumns, and each one - // has a name as "::Name". - // But Table::AllColumns is a tuple, which makes iteration - // a pain. - // - // TODO: Technically, we're repeating the PK here. - out.push_sql(", found.*"); + out.push_identifier(primary_key)?; + + // List all the "found" columns explicitly. + // This admittedly repeats the primary key, but that keeps the query + // "simple" since it returns all columns in the same order as + // AllColumns. + let all_columns = ColumnWalker::< + <::Table as Table>::AllColumns, + >::new(); + for column in all_columns.into_iter() { + out.push_sql(", found."); + out.push_identifier(column)?; + } out.push_sql(" FROM found LEFT JOIN updated ON"); out.push_sql(" found."); - out.push_identifier(name)?; + out.push_identifier(primary_key)?; out.push_sql(" = "); out.push_sql("updated."); - out.push_identifier(name)?; + out.push_identifier(primary_key)?; Ok(()) } diff --git a/nexus/db-queries/src/lib.rs b/nexus/db-queries/src/lib.rs index a693f7ff42..5d1927ebc7 100644 --- a/nexus/db-queries/src/lib.rs +++ b/nexus/db-queries/src/lib.rs @@ -9,6 +9,7 @@ pub mod authz; pub mod context; pub mod db; pub mod provisioning; +pub mod transaction_retry; #[macro_use] extern crate slog; diff --git a/nexus/db-queries/src/transaction_retry.rs b/nexus/db-queries/src/transaction_retry.rs new file mode 100644 index 0000000000..c474b729f8 --- /dev/null +++ b/nexus/db-queries/src/transaction_retry.rs @@ -0,0 +1,341 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Helper types for performing automatic transaction retries + +use async_bb8_diesel::AsyncConnection; +use chrono::Utc; +use diesel::result::Error as DieselError; +use oximeter::{types::Sample, Metric, MetricsError, Target}; +use rand::{thread_rng, Rng}; +use std::sync::{Arc, Mutex}; +use std::time::Duration; + +// Identifies "which" transaction is retrying +#[derive(Debug, Clone, Target)] +struct DatabaseTransaction { + name: String, +} + +// Identifies that a retry has occurred, and track how long +// the transaction took (either since starting, or since the last +// retry failure was recorded). +#[derive(Debug, Clone, Metric)] +struct RetryData { + #[datum] + latency: f64, + attempt: u32, +} + +// Collects all transaction retry samples +#[derive(Debug, Default, Clone)] +pub(crate) struct Producer { + samples: Arc>>, +} + +impl Producer { + pub(crate) fn new() -> Self { + Self { samples: Arc::new(Mutex::new(vec![])) } + } + + fn append( + &self, + transaction: &DatabaseTransaction, + data: &RetryData, + ) -> Result<(), MetricsError> { + let sample = Sample::new_with_timestamp(Utc::now(), transaction, data)?; + self.samples.lock().unwrap().push(sample); + Ok(()) + } +} + +struct RetryHelperInner { + start: chrono::DateTime, + attempts: u32, +} + +impl RetryHelperInner { + fn new() -> Self { + Self { start: Utc::now(), attempts: 1 } + } + + fn tick(&mut self) -> Self { + let start = self.start; + let attempts = self.attempts; + + self.start = Utc::now(); + self.attempts += 1; + + Self { start, attempts } + } +} + +/// Helper utility for tracking retry attempts and latency. +/// Intended to be used from within "transaction_async_with_retry". +pub struct RetryHelper { + producer: Producer, + name: &'static str, + inner: Mutex, +} + +const MIN_RETRY_BACKOFF: Duration = Duration::from_millis(0); +const MAX_RETRY_BACKOFF: Duration = Duration::from_millis(50); +const MAX_RETRY_ATTEMPTS: u32 = 10; + +impl RetryHelper { + /// Creates a new RetryHelper, and starts a timer tracking the transaction + /// duration. + pub(crate) fn new(producer: &Producer, name: &'static str) -> Self { + Self { + producer: producer.clone(), + name, + inner: Mutex::new(RetryHelperInner::new()), + } + } + + /// Calls the function "f" in an asynchronous, retryable transaction. + pub async fn transaction( + self, + conn: &async_bb8_diesel::Connection, + f: Func, + ) -> Result + where + R: Send + 'static, + Fut: std::future::Future> + Send, + Func: Fn(async_bb8_diesel::Connection) -> Fut + + Send + + Sync, + { + conn.transaction_async_with_retry(f, self.as_callback()).await + } + + // Called upon retryable transaction failure. + // + // This function: + // - Appends a metric identifying the duration of the transaction operation + // - Performs a random (uniform) backoff (limited to less than 50 ms) + // - Returns "true" if the transaction should be restarted + async fn retry_callback(&self) -> bool { + // Look at the current attempt and start time so we can log this + // information before we start sleeping. + let (start, attempt) = { + let inner = self.inner.lock().unwrap(); + (inner.start, inner.attempts) + }; + + let latency = (Utc::now() - start) + .to_std() + .unwrap_or(Duration::ZERO) + .as_secs_f64(); + + let _ = self.producer.append( + &DatabaseTransaction { name: self.name.into() }, + &RetryData { latency, attempt }, + ); + + // This backoff is not exponential, but I'm not sure we actually want + // that much backoff here. If we're repeatedly failing, it would + // probably be better to fail the operation, at which point Oximeter + // will keep track of the failing transaction and identify that it's a + // high-priority target for CTE conversion. + let duration = { + let mut rng = thread_rng(); + rng.gen_range(MIN_RETRY_BACKOFF..MAX_RETRY_BACKOFF) + }; + tokio::time::sleep(duration).await; + + // Now that we've finished sleeping, reset the timer and bump the number + // of attempts we've tried. + let inner = self.inner.lock().unwrap().tick(); + return inner.attempts < MAX_RETRY_ATTEMPTS; + } + + /// Converts this function to a retryable callback that can be used from + /// "transaction_async_with_retry". + pub(crate) fn as_callback( + self, + ) -> impl Fn() -> futures::future::BoxFuture<'static, bool> { + let r = Arc::new(self); + move || { + let r = r.clone(); + Box::pin(async move { r.retry_callback().await }) + } + } +} + +impl oximeter::Producer for Producer { + fn produce( + &mut self, + ) -> Result + 'static>, MetricsError> { + let samples = std::mem::take(&mut *self.samples.lock().unwrap()); + Ok(Box::new(samples.into_iter())) + } +} + +/// Helper utility for passing non-retryable errors out-of-band from +/// transactions. +/// +/// Transactions prefer to act on the `diesel::result::Error` type, +/// but transaction users may want more meaningful error types. +/// This utility helps callers safely propagate back Diesel errors while +/// retaining auxiliary error info. +pub struct OptionalError(Arc>>); + +impl Clone for OptionalError { + fn clone(&self) -> Self { + Self(self.0.clone()) + } +} + +impl OptionalError { + pub fn new() -> Self { + Self(Arc::new(Mutex::new(None))) + } + + /// Sets "Self" to the value of `error` and returns `DieselError::RollbackTransaction`. + pub fn bail(&self, err: E) -> DieselError { + (*self.0.lock().unwrap()).replace(err); + DieselError::RollbackTransaction + } + + /// If `diesel_error` is retryable, returns it without setting Self. + /// + /// Otherwise, sets "Self" to the value of `err`, and returns + /// `DieselError::RollbackTransaction`. + pub fn bail_retryable_or( + &self, + diesel_error: DieselError, + err: E, + ) -> DieselError { + self.bail_retryable_or_else(diesel_error, |_diesel_error| err) + } + + /// If `diesel_error` is retryable, returns it without setting Self. + /// + /// Otherwise, sets "Self" to the value of `f` applied to `diesel_err`, and + /// returns `DieselError::RollbackTransaction`. + pub fn bail_retryable_or_else( + &self, + diesel_error: DieselError, + f: F, + ) -> DieselError + where + F: FnOnce(DieselError) -> E, + { + if crate::db::error::retryable(&diesel_error) { + return diesel_error; + } else { + self.bail(f(diesel_error)) + } + } + + /// If "Self" was previously set to a non-retryable error, return it. + pub fn take(self) -> Option { + (*self.0.lock().unwrap()).take() + } +} + +#[cfg(test)] +mod test { + use super::*; + + use crate::db::datastore::datastore_test; + use nexus_test_utils::db::test_setup_database; + use omicron_test_utils::dev; + use oximeter::types::FieldValue; + + // If a transaction is explicitly rolled back, we should not expect any + // samples to be taken. With no retries, this is just a normal operation + // failure. + #[tokio::test] + async fn test_transaction_rollback_produces_no_samples() { + let logctx = dev::test_setup_log( + "test_transaction_rollback_produces_no_samples", + ); + let mut db = test_setup_database(&logctx.log).await; + let (_opctx, datastore) = datastore_test(&logctx, &db).await; + + let conn = datastore.pool_connection_for_tests().await.unwrap(); + + datastore + .transaction_retry_wrapper( + "test_transaction_rollback_produces_no_samples", + ) + .transaction(&conn, |_conn| async move { + Err::<(), _>(diesel::result::Error::RollbackTransaction) + }) + .await + .expect_err("Should have failed"); + + let samples = datastore + .transaction_retry_producer() + .samples + .lock() + .unwrap() + .clone(); + assert_eq!(samples, vec![]); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + // If a transaction fails with a retryable error, we record samples, + // providing oximeter-level information about the attempts. + #[tokio::test] + async fn test_transaction_retry_produces_samples() { + let logctx = + dev::test_setup_log("test_transaction_retry_produces_samples"); + let mut db = test_setup_database(&logctx.log).await; + let (_opctx, datastore) = datastore_test(&logctx, &db).await; + + let conn = datastore.pool_connection_for_tests().await.unwrap(); + datastore + .transaction_retry_wrapper( + "test_transaction_retry_produces_samples", + ) + .transaction(&conn, |_conn| async move { + Err::<(), _>(diesel::result::Error::DatabaseError( + diesel::result::DatabaseErrorKind::SerializationFailure, + Box::new("restart transaction: Retry forever!".to_string()), + )) + }) + .await + .expect_err("Should have failed"); + + let samples = datastore + .transaction_retry_producer() + .samples + .lock() + .unwrap() + .clone(); + assert_eq!(samples.len(), MAX_RETRY_ATTEMPTS as usize); + + for i in 0..samples.len() { + let sample = &samples[i]; + + assert_eq!( + sample.timeseries_name, + "database_transaction:retry_data" + ); + + let target_fields = sample.sorted_target_fields(); + assert_eq!( + target_fields["name"].value, + FieldValue::String( + "test_transaction_retry_produces_samples".to_string() + ) + ); + + // Attempts are one-indexed + let metric_fields = sample.sorted_metric_fields(); + assert_eq!( + metric_fields["attempt"].value, + FieldValue::U32(u32::try_from(i).unwrap() + 1), + ); + } + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } +} diff --git a/nexus/db-queries/tests/output/authz-roles.out b/nexus/db-queries/tests/output/authz-roles.out index 963f00f7e8..54fb6481a9 100644 --- a/nexus/db-queries/tests/output/authz-roles.out +++ b/nexus/db-queries/tests/output/authz-roles.out @@ -376,6 +376,20 @@ resource: ProjectImage "silo1-proj1-image1" silo1-proj1-viewer ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ unauthenticated ! ! ! ! ! ! ! ! +resource: FloatingIp "silo1-proj1-fip1" + + USER Q R LC RP M MP CC D + fleet-admin ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + fleet-collaborator ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + fleet-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-admin ✘ ✔ ✔ ✔ ✔ ✔ ✔ ✔ + silo1-collaborator ✘ ✔ ✔ ✔ ✔ ✔ ✔ ✔ + silo1-viewer ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + silo1-proj1-admin ✘ ✔ ✔ ✔ ✔ ✔ ✔ ✔ + silo1-proj1-collaborator ✘ ✔ ✔ ✔ ✔ ✔ ✔ ✔ + silo1-proj1-viewer ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + unauthenticated ! ! ! ! ! ! ! ! + resource: Project "silo1-proj2" USER Q R LC RP M MP CC D @@ -488,6 +502,20 @@ resource: ProjectImage "silo1-proj2-image1" silo1-proj1-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ unauthenticated ! ! ! ! ! ! ! ! +resource: FloatingIp "silo1-proj2-fip1" + + USER Q R LC RP M MP CC D + fleet-admin ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + fleet-collaborator ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + fleet-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-admin ✘ ✔ ✔ ✔ ✔ ✔ ✔ ✔ + silo1-collaborator ✘ ✔ ✔ ✔ ✔ ✔ ✔ ✔ + silo1-viewer ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + silo1-proj1-admin ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-collaborator ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + unauthenticated ! ! ! ! ! ! ! ! + resource: Silo "silo2" USER Q R LC RP M MP CC D @@ -768,6 +796,20 @@ resource: ProjectImage "silo2-proj1-image1" silo1-proj1-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ unauthenticated ! ! ! ! ! ! ! ! +resource: FloatingIp "silo2-proj1-fip1" + + USER Q R LC RP M MP CC D + fleet-admin ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + fleet-collaborator ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + fleet-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-admin ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-collaborator ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-admin ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-collaborator ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + unauthenticated ! ! ! ! ! ! ! ! + resource: Rack id "c037e882-8b6d-c8b5-bef4-97e848eb0a50" USER Q R LC RP M MP CC D diff --git a/nexus/examples/config.toml b/nexus/examples/config.toml index 3679fa8196..9d6bf2d22f 100644 --- a/nexus/examples/config.toml +++ b/nexus/examples/config.toml @@ -100,6 +100,7 @@ inventory.period_secs = 600 inventory.nkeep = 5 # Disable inventory collection altogether (for emergencies) inventory.disable = false +phantom_disks.period_secs = 30 [default_region_allocation_strategy] # allocate region on 3 random distinct zpools, on 3 random distinct sleds. diff --git a/nexus/inventory/Cargo.toml b/nexus/inventory/Cargo.toml index 202aff49b2..6bb63cf9f7 100644 --- a/nexus/inventory/Cargo.toml +++ b/nexus/inventory/Cargo.toml @@ -6,6 +6,7 @@ license = "MPL-2.0" [dependencies] anyhow.workspace = true +base64.workspace = true chrono.workspace = true gateway-client.workspace = true gateway-messages.workspace = true diff --git a/nexus/inventory/src/builder.rs b/nexus/inventory/src/builder.rs index ad008ee4df..188a48b553 100644 --- a/nexus/inventory/src/builder.rs +++ b/nexus/inventory/src/builder.rs @@ -19,6 +19,9 @@ use nexus_types::inventory::Caboose; use nexus_types::inventory::CabooseFound; use nexus_types::inventory::CabooseWhich; use nexus_types::inventory::Collection; +use nexus_types::inventory::RotPage; +use nexus_types::inventory::RotPageFound; +use nexus_types::inventory::RotPageWhich; use nexus_types::inventory::RotState; use nexus_types::inventory::ServiceProcessor; use std::collections::BTreeMap; @@ -39,10 +42,13 @@ pub struct CollectionBuilder { collector: String, baseboards: BTreeSet>, cabooses: BTreeSet>, + rot_pages: BTreeSet>, sps: BTreeMap, ServiceProcessor>, rots: BTreeMap, RotState>, cabooses_found: BTreeMap, CabooseFound>>, + rot_pages_found: + BTreeMap, RotPageFound>>, } impl CollectionBuilder { @@ -58,9 +64,11 @@ impl CollectionBuilder { collector: collector.to_owned(), baseboards: BTreeSet::new(), cabooses: BTreeSet::new(), + rot_pages: BTreeSet::new(), sps: BTreeMap::new(), rots: BTreeMap::new(), cabooses_found: BTreeMap::new(), + rot_pages_found: BTreeMap::new(), } } @@ -78,9 +86,11 @@ impl CollectionBuilder { collector: self.collector, baseboards: self.baseboards, cabooses: self.cabooses, + rot_pages: self.rot_pages, sps: self.sps, rots: self.rots, cabooses_found: self.cabooses_found, + rot_pages_found: self.rot_pages_found, } } @@ -251,6 +261,75 @@ impl CollectionBuilder { } } + /// Returns true if we already found the root of trust page for `which` for + /// baseboard `baseboard` + /// + /// This is used to avoid requesting it multiple times (from multiple MGS + /// instances). + pub fn found_rot_page_already( + &self, + baseboard: &BaseboardId, + which: RotPageWhich, + ) -> bool { + self.rot_pages_found + .get(&which) + .map(|map| map.contains_key(baseboard)) + .unwrap_or(false) + } + + /// Record the given root of trust page found for the given baseboard + /// + /// The baseboard must previously have been reported using + /// `found_sp_state()`. + /// + /// `source` is an arbitrary string for debugging that describes the MGS + /// that reported this data (generally a URL string). + pub fn found_rot_page( + &mut self, + baseboard: &BaseboardId, + which: RotPageWhich, + source: &str, + page: RotPage, + ) -> Result<(), anyhow::Error> { + // Normalize the page contents: i.e., if we've seen this exact page + // before, use the same record from before. Otherwise, make a new one. + let sw_rot_page = Self::normalize_item(&mut self.rot_pages, page); + let (baseboard, _) = + self.sps.get_key_value(baseboard).ok_or_else(|| { + anyhow!( + "reporting rot page for unknown baseboard: {:?} ({:?})", + baseboard, + sw_rot_page + ) + })?; + let by_id = self.rot_pages_found.entry(which).or_default(); + if let Some(previous) = by_id.insert( + baseboard.clone(), + RotPageFound { + time_collected: now(), + source: source.to_owned(), + page: sw_rot_page.clone(), + }, + ) { + let error = if *previous.page == *sw_rot_page { + anyhow!("reported multiple times (same value)",) + } else { + anyhow!( + "reported rot page multiple times (previously {:?}, \ + now {:?})", + previous, + sw_rot_page + ) + }; + Err(error.context(format!( + "baseboard {:?} rot page {:?}", + baseboard, which + ))) + } else { + Ok(()) + } + } + /// Helper function for normalizing items /// /// If `item` (or its equivalent) is not already in `items`, insert it. @@ -301,6 +380,8 @@ mod test { use crate::examples::representative; use crate::examples::sp_state; use crate::examples::Representative; + use base64::engine::general_purpose::STANDARD as BASE64_STANDARD; + use base64::Engine; use gateway_client::types::PowerState; use gateway_client::types::RotSlot; use gateway_client::types::RotState; @@ -310,6 +391,8 @@ mod test { use nexus_types::inventory::BaseboardId; use nexus_types::inventory::Caboose; use nexus_types::inventory::CabooseWhich; + use nexus_types::inventory::RotPage; + use nexus_types::inventory::RotPageWhich; // Verify the contents of an empty collection. #[test] @@ -326,9 +409,11 @@ mod test { assert_eq!(collection.collector, "test_empty"); assert!(collection.baseboards.is_empty()); assert!(collection.cabooses.is_empty()); + assert!(collection.rot_pages.is_empty()); assert!(collection.sps.is_empty()); assert!(collection.rots.is_empty()); assert!(collection.cabooses_found.is_empty()); + assert!(collection.rot_pages_found.is_empty()); } // Simple test of a single, fairly typical collection that contains just @@ -428,6 +513,33 @@ mod test { } assert!(collection.cabooses.contains(&common_caboose)); + // Verify the common RoT page data. + let common_rot_page_baseboards = [&sled1_bb, &sled3_bb, &switch]; + let common_rot_page = nexus_types::inventory::RotPage { + // base64("1") == "MQ==" + data_base64: "MQ==".to_string(), + }; + for bb in &common_rot_page_baseboards { + let _ = collection.sps.get(*bb).unwrap(); + let p0 = collection.rot_page_for(RotPageWhich::Cmpa, bb).unwrap(); + let p1 = + collection.rot_page_for(RotPageWhich::CfpaActive, bb).unwrap(); + let p2 = collection + .rot_page_for(RotPageWhich::CfpaInactive, bb) + .unwrap(); + let p3 = + collection.rot_page_for(RotPageWhich::CfpaScratch, bb).unwrap(); + assert_eq!(p0.source, "test suite"); + assert_eq!(*p0.page, common_rot_page); + assert_eq!(p1.source, "test suite"); + assert_eq!(*p1.page, common_rot_page); + assert_eq!(p2.source, "test suite"); + assert_eq!(*p2.page, common_rot_page); + assert_eq!(p3.source, "test suite"); + assert_eq!(*p3.page, common_rot_page); + } + assert!(collection.rot_pages.contains(&common_rot_page)); + // Verify the specific, different data for the healthy SPs and RoTs that // we reported. // sled1 @@ -474,6 +586,20 @@ mod test { ); assert_eq!(rot.transient_boot_preference, Some(RotSlot::B)); + // sled 2 did not have any RoT pages reported + assert!(collection + .rot_page_for(RotPageWhich::Cmpa, &sled2_bb) + .is_none()); + assert!(collection + .rot_page_for(RotPageWhich::CfpaActive, &sled2_bb) + .is_none()); + assert!(collection + .rot_page_for(RotPageWhich::CfpaInactive, &sled2_bb) + .is_none()); + assert!(collection + .rot_page_for(RotPageWhich::CfpaScratch, &sled2_bb) + .is_none()); + // switch let sp = collection.sps.get(&switch).unwrap(); assert_eq!(sp.source, "fake MGS 2"); @@ -544,6 +670,38 @@ mod test { assert!(collection.cabooses.contains(c)); assert_eq!(c.board, "board_psc_rot_b"); + // The PSC also has four different RoT pages! + let p = + &collection.rot_page_for(RotPageWhich::Cmpa, &psc).unwrap().page; + assert_eq!( + BASE64_STANDARD.decode(&p.data_base64).unwrap(), + b"psc cmpa" + ); + let p = &collection + .rot_page_for(RotPageWhich::CfpaActive, &psc) + .unwrap() + .page; + assert_eq!( + BASE64_STANDARD.decode(&p.data_base64).unwrap(), + b"psc cfpa active" + ); + let p = &collection + .rot_page_for(RotPageWhich::CfpaInactive, &psc) + .unwrap() + .page; + assert_eq!( + BASE64_STANDARD.decode(&p.data_base64).unwrap(), + b"psc cfpa inactive" + ); + let p = &collection + .rot_page_for(RotPageWhich::CfpaScratch, &psc) + .unwrap() + .page; + assert_eq!( + BASE64_STANDARD.decode(&p.data_base64).unwrap(), + b"psc cfpa scratch" + ); + // Verify the reported SP state for sled3, which did not have a healthy // RoT, nor any cabooses. let sp = collection.sps.get(&sled3_bb).unwrap(); @@ -565,8 +723,9 @@ mod test { assert_eq!(collection.sps.len(), collection.rots.len() + 1); // There should be five cabooses: the four used for the PSC (see above), - // plus the common one. + // plus the common one; same for RoT pages. assert_eq!(collection.cabooses.len(), 5); + assert_eq!(collection.rot_pages.len(), 5); } // Exercises all the failure cases that shouldn't happen in real systems. @@ -704,7 +863,7 @@ mod test { assert_eq!(error.to_string(), error2.to_string(),); // report the same caboose twice with the same contents - let _ = builder + builder .found_caboose( &sled1_bb, CabooseWhich::SpSlot0, @@ -747,12 +906,74 @@ mod test { )); assert!(message.contains(", now ")); + // report RoT page for an unknown baseboard + let rot_page1 = RotPage { data_base64: "page1".to_string() }; + let rot_page2 = RotPage { data_base64: "page2".to_string() }; + assert!(!builder + .found_rot_page_already(&bogus_baseboard, RotPageWhich::Cmpa)); + let error = builder + .found_rot_page( + &bogus_baseboard, + RotPageWhich::Cmpa, + "dummy", + rot_page1.clone(), + ) + .unwrap_err(); + assert_eq!( + error.to_string(), + "reporting rot page for unknown baseboard: \ + BaseboardId { part_number: \"p1\", serial_number: \"bogus\" } \ + (RotPage { data_base64: \"page1\" })" + ); + assert!(!builder + .found_rot_page_already(&bogus_baseboard, RotPageWhich::Cmpa)); + + // report the same rot page twice with the same contents + builder + .found_rot_page( + &sled1_bb, + RotPageWhich::Cmpa, + "dummy", + rot_page1.clone(), + ) + .unwrap(); + let error = builder + .found_rot_page( + &sled1_bb, + RotPageWhich::Cmpa, + "dummy", + rot_page1.clone(), + ) + .unwrap_err(); + assert_eq!( + format!("{:#}", error), + "baseboard BaseboardId { part_number: \"model1\", \ + serial_number: \"s1\" } rot page Cmpa: reported multiple \ + times (same value)" + ); + // report the same rot page again with different contents + let error = builder + .found_rot_page( + &sled1_bb, + RotPageWhich::Cmpa, + "dummy", + rot_page2.clone(), + ) + .unwrap_err(); + let message = format!("{:#}", error); + println!("found error: {}", message); + assert!(message.contains( + "rot page Cmpa: reported rot page multiple times (previously" + )); + assert!(message.contains(", now RotPage { data_base64: \"page2\" }")); + // We should still get a valid collection. let collection = builder.build(); println!("{:#?}", collection); assert_eq!(collection.collector, "test_problems"); - // We should still have the one sled and its SP slot0 caboose. + // We should still have the one sled, its SP slot0 caboose, and its Cmpa + // RoT page. assert!(collection.baseboards.contains(&sled1_bb)); let _ = collection.sps.get(&sled1_bb).unwrap(); let caboose = @@ -769,6 +990,28 @@ mod test { assert!(collection .caboose_for(CabooseWhich::RotSlotB, &sled1_bb) .is_none()); + let rot_page = + collection.rot_page_for(RotPageWhich::Cmpa, &sled1_bb).unwrap(); + assert!(collection.rot_pages.contains(&rot_page.page)); + + // TODO-correctness Is this test correct? We reported the same RoT page + // with different data (rot_page1, then rot_page2). The second + // `found_rot_page` returned an error, but we overwrote the original + // data and did not record the error in `collection.errors`. Should we + // either have kept the original data or returned Ok while returning an + // error? It seems a little strange we returned Err but accepted the new + // data. + assert_eq!(rot_page.page.data_base64, rot_page2.data_base64); + + assert!(collection + .rot_page_for(RotPageWhich::CfpaActive, &sled1_bb) + .is_none()); + assert!(collection + .rot_page_for(RotPageWhich::CfpaInactive, &sled1_bb) + .is_none()); + assert!(collection + .rot_page_for(RotPageWhich::CfpaScratch, &sled1_bb) + .is_none()); // We should see an error. assert_eq!( diff --git a/nexus/inventory/src/collector.rs b/nexus/inventory/src/collector.rs index 1676f44083..7c6570436a 100644 --- a/nexus/inventory/src/collector.rs +++ b/nexus/inventory/src/collector.rs @@ -6,8 +6,13 @@ use crate::builder::CollectionBuilder; use anyhow::Context; +use gateway_client::types::GetCfpaParams; +use gateway_client::types::RotCfpaSlot; +use gateway_messages::SpComponent; use nexus_types::inventory::CabooseWhich; use nexus_types::inventory::Collection; +use nexus_types::inventory::RotPage; +use nexus_types::inventory::RotPageWhich; use slog::{debug, error}; use std::sync::Arc; use strum::IntoEnumIterator; @@ -195,6 +200,84 @@ impl Collector { ); } } + + // For each kind of RoT page that we care about, if it hasn't been + // fetched already, fetch it and record it. Generally, we'd only + // get here for the first MGS client. Assuming that one succeeds, + // the other(s) will skip this loop. + for which in RotPageWhich::iter() { + if self.in_progress.found_rot_page_already(&baseboard_id, which) + { + continue; + } + + let component = SpComponent::ROT.const_as_str(); + + let result = match which { + RotPageWhich::Cmpa => client + .sp_rot_cmpa_get(sp.type_, sp.slot, component) + .await + .map(|response| response.into_inner().base64_data), + RotPageWhich::CfpaActive => client + .sp_rot_cfpa_get( + sp.type_, + sp.slot, + component, + &GetCfpaParams { slot: RotCfpaSlot::Active }, + ) + .await + .map(|response| response.into_inner().base64_data), + RotPageWhich::CfpaInactive => client + .sp_rot_cfpa_get( + sp.type_, + sp.slot, + component, + &GetCfpaParams { slot: RotCfpaSlot::Inactive }, + ) + .await + .map(|response| response.into_inner().base64_data), + RotPageWhich::CfpaScratch => client + .sp_rot_cfpa_get( + sp.type_, + sp.slot, + component, + &GetCfpaParams { slot: RotCfpaSlot::Scratch }, + ) + .await + .map(|response| response.into_inner().base64_data), + } + .with_context(|| { + format!( + "MGS {:?}: SP {:?}: rot page {:?}", + client.baseurl(), + sp, + which + ) + }); + + let page = match result { + Err(error) => { + self.in_progress.found_error(error); + continue; + } + Ok(data_base64) => RotPage { data_base64 }, + }; + if let Err(error) = self.in_progress.found_rot_page( + &baseboard_id, + which, + client.baseurl(), + page, + ) { + error!( + &self.log, + "error reporting rot page: {:?} {:?} {:?}: {:#}", + baseboard_id, + which, + client.baseurl(), + error + ); + } + } } } } @@ -236,6 +319,11 @@ mod test { .unwrap(); } + write!(&mut s, "\nrot pages:\n").unwrap(); + for p in &collection.rot_pages { + write!(&mut s, " data_base64 {:?}\n", p.data_base64).unwrap(); + } + // All we really need to check here is that we're reporting the right // SPs, RoTs, and cabooses. The actual SP data, RoT data, and caboose // data comes straight from MGS. And proper handling of that data is @@ -272,6 +360,22 @@ mod test { } } + write!(&mut s, "\nrot pages found:\n").unwrap(); + for (kind, bb_to_found) in &collection.rot_pages_found { + for (bb, found) in bb_to_found { + write!( + &mut s, + " {:?} baseboard part {:?} serial {:?}: \ + data_base64 {:?}\n", + kind, + bb.part_number, + bb.serial_number, + found.page.data_base64 + ) + .unwrap(); + } + } + write!(&mut s, "\nerrors:\n").unwrap(); for e in &collection.errors { // Some error strings have OS error numbers in them. We want to diff --git a/nexus/inventory/src/examples.rs b/nexus/inventory/src/examples.rs index 52aca397bb..0ce3712942 100644 --- a/nexus/inventory/src/examples.rs +++ b/nexus/inventory/src/examples.rs @@ -13,6 +13,8 @@ use gateway_client::types::SpState; use gateway_client::types::SpType; use nexus_types::inventory::BaseboardId; use nexus_types::inventory::CabooseWhich; +use nexus_types::inventory::RotPage; +use nexus_types::inventory::RotPageWhich; use std::sync::Arc; use strum::IntoEnumIterator; @@ -164,7 +166,7 @@ pub fn representative() -> Representative { for bb in &common_caboose_baseboards { for which in CabooseWhich::iter() { assert!(!builder.found_caboose_already(bb, which)); - let _ = builder + builder .found_caboose(bb, which, "test suite", caboose("1")) .unwrap(); assert!(builder.found_caboose_already(bb, which)); @@ -174,7 +176,7 @@ pub fn representative() -> Representative { // For the PSC, use different cabooses for both slots of both the SP and // RoT, just to exercise that we correctly keep track of different // cabooses. - let _ = builder + builder .found_caboose( &psc_bb, CabooseWhich::SpSlot0, @@ -182,7 +184,7 @@ pub fn representative() -> Representative { caboose("psc_sp_0"), ) .unwrap(); - let _ = builder + builder .found_caboose( &psc_bb, CabooseWhich::SpSlot1, @@ -190,7 +192,7 @@ pub fn representative() -> Representative { caboose("psc_sp_1"), ) .unwrap(); - let _ = builder + builder .found_caboose( &psc_bb, CabooseWhich::RotSlotA, @@ -198,7 +200,7 @@ pub fn representative() -> Representative { caboose("psc_rot_a"), ) .unwrap(); - let _ = builder + builder .found_caboose( &psc_bb, CabooseWhich::RotSlotB, @@ -209,6 +211,59 @@ pub fn representative() -> Representative { // We deliberately provide no cabooses for sled3. + // Report some RoT pages. + + // We'll use the same RoT pages for most of these components, although + // that's not possible in a real system. We deliberately construct a new + // value each time to make sure the builder correctly normalizes it. + let common_rot_page_baseboards = [&sled1_bb, &sled3_bb, &switch1_bb]; + for bb in common_rot_page_baseboards { + for which in RotPageWhich::iter() { + assert!(!builder.found_rot_page_already(bb, which)); + builder + .found_rot_page(bb, which, "test suite", rot_page("1")) + .unwrap(); + assert!(builder.found_rot_page_already(bb, which)); + } + } + + // For the PSC, use different RoT page data for each kind of page, just to + // exercise that we correctly keep track of different data values. + builder + .found_rot_page( + &psc_bb, + RotPageWhich::Cmpa, + "test suite", + rot_page("psc cmpa"), + ) + .unwrap(); + builder + .found_rot_page( + &psc_bb, + RotPageWhich::CfpaActive, + "test suite", + rot_page("psc cfpa active"), + ) + .unwrap(); + builder + .found_rot_page( + &psc_bb, + RotPageWhich::CfpaInactive, + "test suite", + rot_page("psc cfpa inactive"), + ) + .unwrap(); + builder + .found_rot_page( + &psc_bb, + RotPageWhich::CfpaScratch, + "test suite", + rot_page("psc cfpa scratch"), + ) + .unwrap(); + + // We deliberately provide no RoT pages for sled2. + Representative { builder, sleds: [sled1_bb, sled2_bb, sled3_bb], @@ -252,3 +307,10 @@ pub fn caboose(unique: &str) -> SpComponentCaboose { version: format!("version_{}", unique), } } + +pub fn rot_page(unique: &str) -> RotPage { + use base64::Engine; + RotPage { + data_base64: base64::engine::general_purpose::STANDARD.encode(unique), + } +} diff --git a/nexus/inventory/tests/output/collector_basic.txt b/nexus/inventory/tests/output/collector_basic.txt index 4a3bf62d63..b9894ff184 100644 --- a/nexus/inventory/tests/output/collector_basic.txt +++ b/nexus/inventory/tests/output/collector_basic.txt @@ -5,11 +5,21 @@ baseboards: part "FAKE_SIM_SIDECAR" serial "SimSidecar1" cabooses: - board "SimGimletRot" name "SimGimlet" version "0.0.1" git_commit "eeeeeeee" board "SimGimletSp" name "SimGimlet" version "0.0.1" git_commit "ffffffff" - board "SimSidecarRot" name "SimSidecar" version "0.0.1" git_commit "eeeeeeee" + board "SimRot" name "SimGimlet" version "0.0.1" git_commit "eeeeeeee" + board "SimRot" name "SimSidecar" version "0.0.1" git_commit "eeeeeeee" board "SimSidecarSp" name "SimSidecar" version "0.0.1" git_commit "ffffffff" +rot pages: + data_base64 "Z2ltbGV0LWNmcGEtYWN0aXZlAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + data_base64 "Z2ltbGV0LWNmcGEtaW5hY3RpdmUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + data_base64 "Z2ltbGV0LWNmcGEtc2NyYXRjaAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + data_base64 "Z2ltbGV0LWNtcGEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + data_base64 "c2lkZWNhci1jZnBhLWFjdGl2ZQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + data_base64 "c2lkZWNhci1jZnBhLWluYWN0aXZlAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + data_base64 "c2lkZWNhci1jZnBhLXNjcmF0Y2gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + data_base64 "c2lkZWNhci1jbXBhAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + SPs: baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet00" baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01" @@ -31,13 +41,31 @@ cabooses found: SpSlot1 baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01": board "SimGimletSp" SpSlot1 baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": board "SimSidecarSp" SpSlot1 baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": board "SimSidecarSp" - RotSlotA baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet00": board "SimGimletRot" - RotSlotA baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01": board "SimGimletRot" - RotSlotA baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": board "SimSidecarRot" - RotSlotA baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": board "SimSidecarRot" - RotSlotB baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet00": board "SimGimletRot" - RotSlotB baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01": board "SimGimletRot" - RotSlotB baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": board "SimSidecarRot" - RotSlotB baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": board "SimSidecarRot" + RotSlotA baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet00": board "SimRot" + RotSlotA baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01": board "SimRot" + RotSlotA baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": board "SimRot" + RotSlotA baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": board "SimRot" + RotSlotB baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet00": board "SimRot" + RotSlotB baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01": board "SimRot" + RotSlotB baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": board "SimRot" + RotSlotB baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": board "SimRot" + +rot pages found: + Cmpa baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet00": data_base64 "Z2ltbGV0LWNtcGEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + Cmpa baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01": data_base64 "Z2ltbGV0LWNtcGEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + Cmpa baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": data_base64 "c2lkZWNhci1jbXBhAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + Cmpa baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": data_base64 "c2lkZWNhci1jbXBhAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaActive baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet00": data_base64 "Z2ltbGV0LWNmcGEtYWN0aXZlAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaActive baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01": data_base64 "Z2ltbGV0LWNmcGEtYWN0aXZlAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaActive baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": data_base64 "c2lkZWNhci1jZnBhLWFjdGl2ZQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaActive baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": data_base64 "c2lkZWNhci1jZnBhLWFjdGl2ZQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaInactive baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet00": data_base64 "Z2ltbGV0LWNmcGEtaW5hY3RpdmUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaInactive baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01": data_base64 "Z2ltbGV0LWNmcGEtaW5hY3RpdmUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaInactive baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": data_base64 "c2lkZWNhci1jZnBhLWluYWN0aXZlAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaInactive baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": data_base64 "c2lkZWNhci1jZnBhLWluYWN0aXZlAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaScratch baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet00": data_base64 "Z2ltbGV0LWNmcGEtc2NyYXRjaAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaScratch baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01": data_base64 "Z2ltbGV0LWNmcGEtc2NyYXRjaAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaScratch baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": data_base64 "c2lkZWNhci1jZnBhLXNjcmF0Y2gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaScratch baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": data_base64 "c2lkZWNhci1jZnBhLXNjcmF0Y2gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" errors: diff --git a/nexus/inventory/tests/output/collector_errors.txt b/nexus/inventory/tests/output/collector_errors.txt index 4404046253..a50e24ca30 100644 --- a/nexus/inventory/tests/output/collector_errors.txt +++ b/nexus/inventory/tests/output/collector_errors.txt @@ -5,11 +5,21 @@ baseboards: part "FAKE_SIM_SIDECAR" serial "SimSidecar1" cabooses: - board "SimGimletRot" name "SimGimlet" version "0.0.1" git_commit "eeeeeeee" board "SimGimletSp" name "SimGimlet" version "0.0.1" git_commit "ffffffff" - board "SimSidecarRot" name "SimSidecar" version "0.0.1" git_commit "eeeeeeee" + board "SimRot" name "SimGimlet" version "0.0.1" git_commit "eeeeeeee" + board "SimRot" name "SimSidecar" version "0.0.1" git_commit "eeeeeeee" board "SimSidecarSp" name "SimSidecar" version "0.0.1" git_commit "ffffffff" +rot pages: + data_base64 "Z2ltbGV0LWNmcGEtYWN0aXZlAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + data_base64 "Z2ltbGV0LWNmcGEtaW5hY3RpdmUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + data_base64 "Z2ltbGV0LWNmcGEtc2NyYXRjaAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + data_base64 "Z2ltbGV0LWNtcGEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + data_base64 "c2lkZWNhci1jZnBhLWFjdGl2ZQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + data_base64 "c2lkZWNhci1jZnBhLWluYWN0aXZlAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + data_base64 "c2lkZWNhci1jZnBhLXNjcmF0Y2gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + data_base64 "c2lkZWNhci1jbXBhAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + SPs: baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet00" baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01" @@ -31,14 +41,32 @@ cabooses found: SpSlot1 baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01": board "SimGimletSp" SpSlot1 baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": board "SimSidecarSp" SpSlot1 baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": board "SimSidecarSp" - RotSlotA baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet00": board "SimGimletRot" - RotSlotA baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01": board "SimGimletRot" - RotSlotA baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": board "SimSidecarRot" - RotSlotA baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": board "SimSidecarRot" - RotSlotB baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet00": board "SimGimletRot" - RotSlotB baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01": board "SimGimletRot" - RotSlotB baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": board "SimSidecarRot" - RotSlotB baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": board "SimSidecarRot" + RotSlotA baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet00": board "SimRot" + RotSlotA baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01": board "SimRot" + RotSlotA baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": board "SimRot" + RotSlotA baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": board "SimRot" + RotSlotB baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet00": board "SimRot" + RotSlotB baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01": board "SimRot" + RotSlotB baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": board "SimRot" + RotSlotB baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": board "SimRot" + +rot pages found: + Cmpa baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet00": data_base64 "Z2ltbGV0LWNtcGEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + Cmpa baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01": data_base64 "Z2ltbGV0LWNtcGEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + Cmpa baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": data_base64 "c2lkZWNhci1jbXBhAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + Cmpa baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": data_base64 "c2lkZWNhci1jbXBhAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaActive baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet00": data_base64 "Z2ltbGV0LWNmcGEtYWN0aXZlAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaActive baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01": data_base64 "Z2ltbGV0LWNmcGEtYWN0aXZlAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaActive baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": data_base64 "c2lkZWNhci1jZnBhLWFjdGl2ZQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaActive baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": data_base64 "c2lkZWNhci1jZnBhLWFjdGl2ZQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaInactive baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet00": data_base64 "Z2ltbGV0LWNmcGEtaW5hY3RpdmUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaInactive baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01": data_base64 "Z2ltbGV0LWNmcGEtaW5hY3RpdmUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaInactive baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": data_base64 "c2lkZWNhci1jZnBhLWluYWN0aXZlAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaInactive baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": data_base64 "c2lkZWNhci1jZnBhLWluYWN0aXZlAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaScratch baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet00": data_base64 "Z2ltbGV0LWNmcGEtc2NyYXRjaAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaScratch baseboard part "FAKE_SIM_GIMLET" serial "SimGimlet01": data_base64 "Z2ltbGV0LWNmcGEtc2NyYXRjaAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaScratch baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar0": data_base64 "c2lkZWNhci1jZnBhLXNjcmF0Y2gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + CfpaScratch baseboard part "FAKE_SIM_SIDECAR" serial "SimSidecar1": data_base64 "c2lkZWNhci1jZnBhLXNjcmF0Y2gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" errors: error: MGS "http://[100::1]:12345": listing ignition targets: Communication Error <> diff --git a/nexus/src/app/address_lot.rs b/nexus/src/app/address_lot.rs index b87ae1b09f..847021bdd4 100644 --- a/nexus/src/app/address_lot.rs +++ b/nexus/src/app/address_lot.rs @@ -94,10 +94,9 @@ fn validate_blocks(lot: ¶ms::AddressLotCreate) -> Result<(), Error> { validate_v6_block(first, last)? } _ => { - return Err(Error::InvalidRequest { - message: "Block bounds must be in same address family" - .into(), - }) + return Err(Error::invalid_request( + "Block bounds must be in same address family", + )); } } } @@ -106,18 +105,18 @@ fn validate_blocks(lot: ¶ms::AddressLotCreate) -> Result<(), Error> { fn validate_v4_block(first: &Ipv4Addr, last: &Ipv4Addr) -> Result<(), Error> { if first > last { - return Err(Error::InvalidRequest { - message: "Invalid range, first must be <= last".into(), - }); + return Err(Error::invalid_request( + "Invalid range, first must be <= last", + )); } Ok(()) } fn validate_v6_block(first: &Ipv6Addr, last: &Ipv6Addr) -> Result<(), Error> { if first > last { - return Err(Error::InvalidRequest { - message: "Invalid range, first must be <= last".into(), - }); + return Err(Error::invalid_request( + "Invalid range, first must be <= last", + )); } Ok(()) } diff --git a/nexus/src/app/background/dns_config.rs b/nexus/src/app/background/dns_config.rs index 654e9c0bf1..805ae813fe 100644 --- a/nexus/src/app/background/dns_config.rs +++ b/nexus/src/app/background/dns_config.rs @@ -166,7 +166,6 @@ mod test { use crate::app::background::init::test::read_internal_dns_zone_id; use crate::app::background::init::test::write_test_dns_generation; use assert_matches::assert_matches; - use async_bb8_diesel::AsyncConnection; use async_bb8_diesel::AsyncRunQueryDsl; use async_bb8_diesel::AsyncSimpleConnection; use diesel::ExpressionMethods; @@ -237,11 +236,11 @@ mod test { ); // Similarly, wipe all of the state and verify that we handle that okay. + let conn = datastore.pool_connection_for_tests().await.unwrap(); + datastore - .pool_connection_for_tests() - .await - .unwrap() - .transaction_async(|conn| async move { + .transaction_retry_wrapper("dns_config_test_basic") + .transaction(&conn, |conn| async move { conn.batch_execute_async( nexus_test_utils::db::ALLOW_FULL_TABLE_SCAN_SQL, ) @@ -265,7 +264,7 @@ mod test { .execute_async(&conn) .await .unwrap(); - Ok::<_, nexus_db_queries::db::TransactionError<()>>(()) + Ok(()) }) .await .unwrap(); diff --git a/nexus/src/app/background/init.rs b/nexus/src/app/background/init.rs index d27248ffdc..d30d2162c4 100644 --- a/nexus/src/app/background/init.rs +++ b/nexus/src/app/background/init.rs @@ -11,6 +11,7 @@ use super::dns_servers; use super::external_endpoints; use super::inventory_collection; use super::nat_cleanup; +use super::phantom_disks; use nexus_db_model::DnsGroup; use nexus_db_queries::context::OpContext; use nexus_db_queries::db::DataStore; @@ -52,6 +53,9 @@ pub struct BackgroundTasks { /// task handle for the task that collects inventory pub task_inventory_collection: common::TaskHandle, + + /// task handle for the task that detects phantom disks + pub task_phantom_disks: common::TaskHandle, } impl BackgroundTasks { @@ -122,7 +126,7 @@ impl BackgroundTasks { // Background task: inventory collector let task_inventory_collection = { let collector = inventory_collection::InventoryCollector::new( - datastore, + datastore.clone(), resolver, &nexus_id.to_string(), config.inventory.nkeep, @@ -143,6 +147,22 @@ impl BackgroundTasks { task }; + // Background task: phantom disk detection + let task_phantom_disks = { + let detector = phantom_disks::PhantomDiskDetector::new(datastore); + + let task = driver.register( + String::from("phantom_disks"), + String::from("detects and un-deletes phantom disks"), + config.phantom_disks.period_secs, + Box::new(detector), + opctx.child(BTreeMap::new()), + vec![], + ); + + task + }; + BackgroundTasks { driver, task_internal_dns_config, @@ -153,6 +173,7 @@ impl BackgroundTasks { external_endpoints, nat_cleanup, task_inventory_collection, + task_phantom_disks, } } @@ -226,14 +247,12 @@ fn init_dns( #[cfg(test)] pub mod test { - use async_bb8_diesel::AsyncConnection; use async_bb8_diesel::AsyncRunQueryDsl; use dropshot::HandlerTaskMode; use nexus_db_model::DnsGroup; use nexus_db_model::Generation; use nexus_db_queries::context::OpContext; use nexus_db_queries::db::DataStore; - use nexus_db_queries::db::TransactionError; use nexus_test_utils_macros::nexus_test; use nexus_types::internal_api::params as nexus_params; use nexus_types::internal_api::params::ServiceKind; @@ -425,11 +444,11 @@ pub mod test { datastore: &DataStore, internal_dns_zone_id: Uuid, ) { - type TxnError = TransactionError<()>; { let conn = datastore.pool_connection_for_tests().await.unwrap(); - let _: Result<(), TxnError> = conn - .transaction_async(|conn| async move { + let _: Result<(), _> = datastore + .transaction_retry_wrapper("write_test_dns_generation") + .transaction(&conn, |conn| async move { { use nexus_db_queries::db::model::DnsVersion; use nexus_db_queries::db::schema::dns_version::dsl; diff --git a/nexus/src/app/background/mod.rs b/nexus/src/app/background/mod.rs index 954207cb3c..70b20224d4 100644 --- a/nexus/src/app/background/mod.rs +++ b/nexus/src/app/background/mod.rs @@ -12,6 +12,7 @@ mod external_endpoints; mod init; mod inventory_collection; mod nat_cleanup; +mod phantom_disks; mod status; pub use common::Driver; diff --git a/nexus/src/app/background/phantom_disks.rs b/nexus/src/app/background/phantom_disks.rs new file mode 100644 index 0000000000..b038d70ac6 --- /dev/null +++ b/nexus/src/app/background/phantom_disks.rs @@ -0,0 +1,104 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Background task for detecting and un-deleting phantom disks +//! +//! A "phantom" disk is one where a disk delete saga partially completed but +//! unwound: before a fix for customer-support#58, this would leave disks +//! deleted but would also leave a `virtual_provisioning_resource` record for +//! that disk. There would be no way to re-trigger the disk delete saga as the +//! disk was deleted, so the project that disk was in could not be deleted +//! because associated virtual provisioning resources were still being consumed. +//! +//! The fix for customer-support#58 changes the disk delete saga's unwind to +//! also un-delete the disk and set it to faulted. This enables it to be deleted +//! again. Correcting the disk delete saga's unwind means that phantom disks +//! will not be created in the future when the disk delete saga unwinds, but +//! this background task is required to apply the same fix for disks that are +//! already in this phantom state. + +use super::common::BackgroundTask; +use futures::future::BoxFuture; +use futures::FutureExt; +use nexus_db_queries::context::OpContext; +use nexus_db_queries::db::DataStore; +use serde_json::json; +use std::sync::Arc; + +pub struct PhantomDiskDetector { + datastore: Arc, +} + +impl PhantomDiskDetector { + pub fn new(datastore: Arc) -> Self { + PhantomDiskDetector { datastore } + } +} + +impl BackgroundTask for PhantomDiskDetector { + fn activate<'a, 'b, 'c>( + &'a mut self, + opctx: &'b OpContext, + ) -> BoxFuture<'c, serde_json::Value> + where + 'a: 'c, + 'b: 'c, + { + async { + let log = &opctx.log; + warn!(&log, "phantom disk task started"); + + let phantom_disks = match self.datastore.find_phantom_disks().await + { + Ok(phantom_disks) => phantom_disks, + Err(e) => { + warn!(&log, "error from find_phantom_disks: {:?}", e); + return json!({ + "error": + format!("failed find_phantom_disks: {:#}", e) + }); + } + }; + + let mut phantom_disk_deleted_ok = 0; + let mut phantom_disk_deleted_err = 0; + + for disk in phantom_disks { + warn!(&log, "phantom disk {} found!", disk.id()); + + // If a phantom disk is found, then un-delete it and set it to + // faulted: this will allow a user to request deleting it again. + + let result = self + .datastore + .project_undelete_disk_set_faulted_no_auth(&disk.id()) + .await; + + if let Err(e) = result { + error!( + &log, + "error un-deleting disk {} and setting to faulted: {:#}", + disk.id(), + e, + ); + phantom_disk_deleted_err += 1; + } else { + info!( + &log, + "phandom disk {} un-deleted andset to faulted ok", + disk.id(), + ); + phantom_disk_deleted_ok += 1; + } + } + + warn!(&log, "phantom disk task done"); + json!({ + "phantom_disk_deleted_ok": phantom_disk_deleted_ok, + "phantom_disk_deleted_err": phantom_disk_deleted_err, + }) + } + .boxed() + } +} diff --git a/nexus/src/app/device_auth.rs b/nexus/src/app/device_auth.rs index c9571ee91f..c70b339a36 100644 --- a/nexus/src/app/device_auth.rs +++ b/nexus/src/app/device_auth.rs @@ -114,9 +114,7 @@ impl super::Nexus { token, ) .await?; - Err(Error::InvalidRequest { - message: "device authorization request expired".to_string(), - }) + Err(Error::invalid_request("device authorization request expired")) } else { self.db_datastore .device_access_token_create( diff --git a/nexus/src/app/disk.rs b/nexus/src/app/disk.rs index 28d6c4506c..5dd49a2efb 100644 --- a/nexus/src/app/disk.rs +++ b/nexus/src/app/disk.rs @@ -140,48 +140,48 @@ impl super::Nexus { // Reject disks where the block size doesn't evenly divide the // total size if (params.size.to_bytes() % block_size) != 0 { - return Err(Error::InvalidValue { - label: String::from("size and block_size"), - message: format!( + return Err(Error::invalid_value( + "size and block_size", + format!( "total size must be a multiple of block size {}", block_size, ), - }); + )); } // Reject disks where the size isn't at least // MIN_DISK_SIZE_BYTES if params.size.to_bytes() < MIN_DISK_SIZE_BYTES as u64 { - return Err(Error::InvalidValue { - label: String::from("size"), - message: format!( + return Err(Error::invalid_value( + "size", + format!( "total size must be at least {}", ByteCount::from(MIN_DISK_SIZE_BYTES) ), - }); + )); } // Reject disks where the MIN_DISK_SIZE_BYTES doesn't evenly // divide the size if (params.size.to_bytes() % MIN_DISK_SIZE_BYTES as u64) != 0 { - return Err(Error::InvalidValue { - label: String::from("size"), - message: format!( + return Err(Error::invalid_value( + "size", + format!( "total size must be a multiple of {}", ByteCount::from(MIN_DISK_SIZE_BYTES) ), - }); + )); } // Reject disks where the size is greated than MAX_DISK_SIZE_BYTES if params.size.to_bytes() > MAX_DISK_SIZE_BYTES { - return Err(Error::InvalidValue { - label: String::from("size"), - message: format!( + return Err(Error::invalid_value( + "size", + format!( "total size must be less than {}", ByteCount::try_from(MAX_DISK_SIZE_BYTES).unwrap() ), - }); + )); } Ok(()) @@ -369,32 +369,6 @@ impl super::Nexus { Ok(()) } - /// Import blocks from a URL into a disk - pub(crate) async fn import_blocks_from_url_for_disk( - self: &Arc, - opctx: &OpContext, - disk_lookup: &lookup::Disk<'_>, - params: params::ImportBlocksFromUrl, - ) -> UpdateResult<()> { - let authz_disk: authz::Disk; - - (.., authz_disk) = - disk_lookup.lookup_for(authz::Action::Modify).await?; - - let saga_params = sagas::import_blocks_from_url::Params { - serialized_authn: authn::saga::Serialized::for_opctx(opctx), - disk_id: authz_disk.id(), - - import_params: params.clone(), - }; - - self - .execute_saga::(saga_params) - .await?; - - Ok(()) - } - /// Move a disk from the "ImportReady" state to the "Importing" state, /// blocking any import from URL jobs. pub(crate) async fn disk_manual_import_start( diff --git a/nexus/src/app/external_endpoints.rs b/nexus/src/app/external_endpoints.rs index f95c64d3eb..1ab33c5c9c 100644 --- a/nexus/src/app/external_endpoints.rs +++ b/nexus/src/app/external_endpoints.rs @@ -1539,7 +1539,7 @@ mod test { Err(Error::InvalidRequest { message }) => { assert_eq!(rx_label, "empty"); assert_eq!( - message, + message.external_message(), format!( "HTTP request for unknown server name {:?}", authority.host() diff --git a/nexus/src/app/external_ip.rs b/nexus/src/app/external_ip.rs index 2354e97085..404f597288 100644 --- a/nexus/src/app/external_ip.rs +++ b/nexus/src/app/external_ip.rs @@ -5,11 +5,20 @@ //! External IP addresses for instances use crate::external_api::views::ExternalIp; +use crate::external_api::views::FloatingIp; use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; use nexus_db_queries::db::lookup; +use nexus_db_queries::db::lookup::LookupPath; use nexus_db_queries::db::model::IpKind; +use nexus_types::external_api::params; +use omicron_common::api::external::http_pagination::PaginatedBy; +use omicron_common::api::external::CreateResult; +use omicron_common::api::external::DeleteResult; +use omicron_common::api::external::Error; use omicron_common::api::external::ListResultVec; +use omicron_common::api::external::LookupResult; +use omicron_common::api::external::NameOrId; impl super::Nexus { pub(crate) async fn instance_list_external_ips( @@ -33,4 +42,82 @@ impl super::Nexus { }) .collect::>()) } + + pub(crate) fn floating_ip_lookup<'a>( + &'a self, + opctx: &'a OpContext, + fip_selector: params::FloatingIpSelector, + ) -> LookupResult> { + match fip_selector { + params::FloatingIpSelector { floating_ip: NameOrId::Id(id), project: None } => { + let floating_ip = + LookupPath::new(opctx, &self.db_datastore).floating_ip_id(id); + Ok(floating_ip) + } + params::FloatingIpSelector { + floating_ip: NameOrId::Name(name), + project: Some(project), + } => { + let floating_ip = self + .project_lookup(opctx, params::ProjectSelector { project })? + .floating_ip_name_owned(name.into()); + Ok(floating_ip) + } + params::FloatingIpSelector { + floating_ip: NameOrId::Id(_), + .. + } => Err(Error::invalid_request( + "when providing Floating IP as an ID project should not be specified", + )), + _ => Err(Error::invalid_request( + "Floating IP should either be UUID or project should be specified", + )), + } + } + + pub(crate) async fn floating_ips_list( + &self, + opctx: &OpContext, + project_lookup: &lookup::Project<'_>, + pagparams: &PaginatedBy<'_>, + ) -> ListResultVec { + let (.., authz_project) = + project_lookup.lookup_for(authz::Action::ListChildren).await?; + + Ok(self + .db_datastore + .floating_ips_list(opctx, &authz_project, pagparams) + .await? + .into_iter() + .map(Into::into) + .collect()) + } + + pub(crate) async fn floating_ip_create( + &self, + opctx: &OpContext, + project_lookup: &lookup::Project<'_>, + params: params::FloatingIpCreate, + ) -> CreateResult { + let (.., authz_project) = + project_lookup.lookup_for(authz::Action::CreateChild).await?; + + Ok(self + .db_datastore + .allocate_floating_ip(opctx, authz_project.id(), params) + .await? + .try_into() + .unwrap()) + } + + pub(crate) async fn floating_ip_delete( + &self, + opctx: &OpContext, + ip_lookup: lookup::FloatingIp<'_>, + ) -> DeleteResult { + let (.., authz_fip, db_fip) = + ip_lookup.fetch_for(authz::Action::Delete).await?; + + self.db_datastore.floating_ip_delete(opctx, &authz_fip, &db_fip).await + } } diff --git a/nexus/src/app/image.rs b/nexus/src/app/image.rs index 8fa9308c1d..a7fe75a464 100644 --- a/nexus/src/app/image.rs +++ b/nexus/src/app/image.rs @@ -23,7 +23,6 @@ use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; use omicron_common::api::external::NameOrId; use omicron_common::api::external::UpdateResult; -use std::str::FromStr; use std::sync::Arc; use uuid::Uuid; @@ -96,121 +95,6 @@ impl super::Nexus { } }; let new_image = match ¶ms.source { - params::ImageSource::Url { url, block_size } => { - let db_block_size = db::model::BlockSize::try_from(*block_size) - .map_err(|e| Error::InvalidValue { - label: String::from("block_size"), - message: format!("block_size is invalid: {}", e), - })?; - - let image_id = Uuid::new_v4(); - - let volume_construction_request = - sled_agent_client::types::VolumeConstructionRequest::Url { - id: image_id, - block_size: db_block_size.to_bytes().into(), - url: url.clone(), - }; - - let volume_data = - serde_json::to_string(&volume_construction_request)?; - - // use reqwest to query url for size - let dur = std::time::Duration::from_secs(5); - let client = reqwest::ClientBuilder::new() - .connect_timeout(dur) - .timeout(dur) - .build() - .map_err(|e| { - Error::internal_error(&format!( - "failed to build reqwest client: {}", - e - )) - })?; - - let response = client.head(url).send().await.map_err(|e| { - Error::InvalidValue { - label: String::from("url"), - message: format!("error querying url: {}", e), - } - })?; - - if !response.status().is_success() { - return Err(Error::InvalidValue { - label: String::from("url"), - message: format!( - "querying url returned: {}", - response.status() - ), - }); - } - - // grab total size from content length - let content_length = response - .headers() - .get(reqwest::header::CONTENT_LENGTH) - .ok_or("no content length!") - .map_err(|e| Error::InvalidValue { - label: String::from("url"), - message: format!("error querying url: {}", e), - })?; - - let total_size = - u64::from_str(content_length.to_str().map_err(|e| { - Error::InvalidValue { - label: String::from("url"), - message: format!("content length invalid: {}", e), - } - })?) - .map_err(|e| { - Error::InvalidValue { - label: String::from("url"), - message: format!("content length invalid: {}", e), - } - })?; - - let size: external::ByteCount = total_size.try_into().map_err( - |e: external::ByteCountRangeError| Error::InvalidValue { - label: String::from("size"), - message: format!("total size is invalid: {}", e), - }, - )?; - - // validate total size is divisible by block size - let block_size: u64 = (*block_size).into(); - if (size.to_bytes() % block_size) != 0 { - return Err(Error::InvalidValue { - label: String::from("size"), - message: format!( - "total size {} must be divisible by block size {}", - size.to_bytes(), - block_size - ), - }); - } - - let new_image_volume = - db::model::Volume::new(Uuid::new_v4(), volume_data); - let volume = - self.db_datastore.volume_create(new_image_volume).await?; - - db::model::Image { - identity: db::model::ImageIdentity::new( - image_id, - params.identity.clone(), - ), - silo_id: authz_silo.id(), - project_id: maybe_authz_project.clone().map(|p| p.id()), - volume_id: volume.id(), - url: Some(url.clone()), - os: params.os.clone(), - version: params.version.clone(), - digest: None, // not computed for URL type - block_size: db_block_size, - size: size.into(), - } - } - params::ImageSource::Snapshot { id } => { let image_id = Uuid::new_v4(); @@ -284,9 +168,11 @@ impl super::Nexus { // disk created from this image has to be larger than it. let size: u64 = 100 * 1024 * 1024; let size: external::ByteCount = - size.try_into().map_err(|e| Error::InvalidValue { - label: String::from("size"), - message: format!("size is invalid: {}", e), + size.try_into().map_err(|e| { + Error::invalid_value( + "size", + format!("size is invalid: {}", e), + ) })?; let new_image_volume = @@ -409,9 +295,9 @@ impl super::Nexus { ) .await } - ImageLookup::SiloImage(_) => Err(Error::InvalidRequest { - message: "Cannot promote a silo image".to_string(), - }), + ImageLookup::SiloImage(_) => { + Err(Error::invalid_request("Cannot promote a silo image")) + } } } @@ -437,9 +323,9 @@ impl super::Nexus { ) .await } - ImageLookup::ProjectImage(_) => Err(Error::InvalidRequest { - message: "Cannot demote a project image".to_string(), - }), + ImageLookup::ProjectImage(_) => { + Err(Error::invalid_request("Cannot demote a project image")) + } } } } diff --git a/nexus/src/app/instance.rs b/nexus/src/app/instance.rs index 923bb1777e..987a8ac794 100644 --- a/nexus/src/app/instance.rs +++ b/nexus/src/app/instance.rs @@ -5,6 +5,7 @@ //! Virtual Machine Instances use super::MAX_DISKS_PER_INSTANCE; +use super::MAX_EPHEMERAL_IPS_PER_INSTANCE; use super::MAX_EXTERNAL_IPS_PER_INSTANCE; use super::MAX_MEMORY_BYTES_PER_INSTANCE; use super::MAX_NICS_PER_INSTANCE; @@ -52,6 +53,7 @@ use sled_agent_client::types::InstanceProperties; use sled_agent_client::types::InstancePutMigrationIdsBody; use sled_agent_client::types::InstancePutStateBody; use sled_agent_client::types::SourceNatConfig; +use std::matches; use std::net::SocketAddr; use std::sync::Arc; use tokio::io::{AsyncRead, AsyncWrite}; @@ -168,6 +170,18 @@ impl super::Nexus { MAX_EXTERNAL_IPS_PER_INSTANCE, ))); } + if params + .external_ips + .iter() + .filter(|v| matches!(v, params::ExternalIpCreate::Ephemeral { .. })) + .count() + > MAX_EPHEMERAL_IPS_PER_INSTANCE + { + return Err(Error::invalid_request(&format!( + "An instance may not have more than {} ephemeral IP address", + MAX_EPHEMERAL_IPS_PER_INSTANCE, + ))); + } if let params::InstanceNetworkInterfaceAttachment::Create(ref ifaces) = params.network_interfaces { @@ -197,13 +211,13 @@ impl super::Nexus { // Reject instances where the memory is not at least // MIN_MEMORY_BYTES_PER_INSTANCE if params.memory.to_bytes() < MIN_MEMORY_BYTES_PER_INSTANCE as u64 { - return Err(Error::InvalidValue { - label: String::from("size"), - message: format!( + return Err(Error::invalid_value( + "size", + format!( "memory must be at least {}", ByteCount::from(MIN_MEMORY_BYTES_PER_INSTANCE) ), - }); + )); } // Reject instances where the memory is not divisible by @@ -211,24 +225,24 @@ impl super::Nexus { if (params.memory.to_bytes() % MIN_MEMORY_BYTES_PER_INSTANCE as u64) != 0 { - return Err(Error::InvalidValue { - label: String::from("size"), - message: format!( + return Err(Error::invalid_value( + "size", + format!( "memory must be divisible by {}", ByteCount::from(MIN_MEMORY_BYTES_PER_INSTANCE) ), - }); + )); } // Reject instances where the memory is greater than the limit if params.memory.to_bytes() > MAX_MEMORY_BYTES_PER_INSTANCE { - return Err(Error::InvalidValue { - label: String::from("size"), - message: format!( + return Err(Error::invalid_value( + "size", + format!( "memory must be less than or equal to {}", ByteCount::try_from(MAX_MEMORY_BYTES_PER_INSTANCE).unwrap() ), - }); + )); } let saga_params = sagas::instance_create::Params { @@ -362,7 +376,7 @@ impl super::Nexus { } if instance.runtime().migration_id.is_some() { - return Err(Error::unavail("instance is already migrating")); + return Err(Error::conflict("instance is already migrating")); } // Kick off the migration saga @@ -771,12 +785,10 @@ impl super::Nexus { if allowed { Ok(InstanceStateChangeRequestAction::SendToSled(sled_id)) } else { - Err(Error::InvalidRequest { - message: format!( - "instance state cannot be changed from state \"{}\"", - effective_state - ), - }) + Err(Error::invalid_request(format!( + "instance state cannot be changed from state \"{}\"", + effective_state + ))) } } @@ -885,8 +897,6 @@ impl super::Nexus { .await?; // Collect the external IPs for the instance. - // TODO-correctness: Handle Floating IPs, see - // https://github.com/oxidecomputer/omicron/issues/1334 let (snat_ip, external_ips): (Vec<_>, Vec<_>) = self .db_datastore .instance_lookup_external_ips(&opctx, authz_instance.id()) @@ -895,8 +905,6 @@ impl super::Nexus { .partition(|ip| ip.kind == IpKind::SNat); // Sanity checks on the number and kind of each IP address. - // TODO-correctness: Handle multiple IP addresses, see - // https://github.com/oxidecomputer/omicron/issues/1467 if external_ips.len() > MAX_EXTERNAL_IPS_PER_INSTANCE { return Err(Error::internal_error( format!( @@ -908,8 +916,28 @@ impl super::Nexus { .as_str(), )); } - let external_ips = - external_ips.into_iter().map(|model| model.ip.ip()).collect(); + + // Partition remaining external IPs by class: we can have at most + // one ephemeral ip. + let (ephemeral_ips, floating_ips): (Vec<_>, Vec<_>) = external_ips + .into_iter() + .partition(|ip| ip.kind == IpKind::Ephemeral); + + if ephemeral_ips.len() > MAX_EPHEMERAL_IPS_PER_INSTANCE { + return Err(Error::internal_error( + format!( + "Expected at most {} ephemeral IP for an instance, found {}", + MAX_EPHEMERAL_IPS_PER_INSTANCE, + ephemeral_ips.len() + ) + .as_str(), + )); + } + + let ephemeral_ip = ephemeral_ips.get(0).map(|model| model.ip.ip()); + + let floating_ips = + floating_ips.into_iter().map(|model| model.ip.ip()).collect(); if snat_ip.len() != 1 { return Err(Error::internal_error( "Expected exactly one SNAT IP address for an instance", @@ -985,7 +1013,8 @@ impl super::Nexus { }, nics, source_nat, - external_ips, + ephemeral_ip, + floating_ips, firewall_rules, dhcp_config: sled_agent_client::types::DhcpConfig { dns_servers: self.external_dns_servers.clone(), @@ -1200,10 +1229,9 @@ impl super::Nexus { // permissions on both) without verifying the shared hierarchy. To // mitigate that we verify that their parent projects have the same ID. if authz_project.id() != authz_project_disk.id() { - return Err(Error::InvalidRequest { - message: "disk must be in the same project as the instance" - .to_string(), - }); + return Err(Error::invalid_request( + "disk must be in the same project as the instance", + )); } // TODO(https://github.com/oxidecomputer/omicron/issues/811): @@ -1583,28 +1611,22 @@ impl super::Nexus { | InstanceState::Starting | InstanceState::Stopping | InstanceState::Stopped - | InstanceState::Failed => Err(Error::ServiceUnavailable { - internal_message: format!( - "cannot connect to serial console of instance in state \ - {:?}", - vmm.runtime.state.0 - ), - }), - InstanceState::Destroyed => Err(Error::ServiceUnavailable { - internal_message: format!( - "cannot connect to serial console of instance in state \ - {:?}", - InstanceState::Stopped), - }), + | InstanceState::Failed => { + Err(Error::invalid_request(format!( + "cannot connect to serial console of instance in state \"{}\"", + vmm.runtime.state.0, + ))) + } + InstanceState::Destroyed => Err(Error::invalid_request( + "cannot connect to serial console of destroyed instance", + )), } } else { - Err(Error::ServiceUnavailable { - internal_message: format!( - "instance is in state {:?} and has no active serial console \ + Err(Error::invalid_request(format!( + "instance is {} and has no active serial console \ server", - instance.runtime().nexus_state - ) - }) + instance.runtime().nexus_state + ))) } } diff --git a/nexus/src/app/instance_network.rs b/nexus/src/app/instance_network.rs index abb8c744e1..3db749f43b 100644 --- a/nexus/src/app/instance_network.rs +++ b/nexus/src/app/instance_network.rs @@ -451,8 +451,6 @@ impl super::Nexus { .instance_lookup_external_ips(opctx, instance_id) .await?; - let boundary_switches = self.boundary_switches(opctx).await?; - let mut errors = vec![]; for entry in external_ips { // Soft delete the NAT entry @@ -478,6 +476,9 @@ impl super::Nexus { }?; } + let boundary_switches = + self.boundary_switches(&self.opctx_alloc).await?; + for switch in &boundary_switches { debug!(&self.log, "notifying dendrite of updates"; "instance_id" => %authz_instance.id(), diff --git a/nexus/src/app/mod.rs b/nexus/src/app/mod.rs index 18c9dae841..d4c2d596f8 100644 --- a/nexus/src/app/mod.rs +++ b/nexus/src/app/mod.rs @@ -79,8 +79,13 @@ pub(crate) use nexus_db_queries::db::queries::disk::MAX_DISKS_PER_INSTANCE; pub(crate) const MAX_NICS_PER_INSTANCE: usize = 8; -// TODO-completeness: Support multiple external IPs -pub(crate) const MAX_EXTERNAL_IPS_PER_INSTANCE: usize = 1; +// XXX: Might want to recast as max *floating* IPs, we have at most one +// ephemeral (so bounded in saga by design). +// The value here is arbitrary, but we need *a* limit for the instance +// create saga to have a bounded DAG. We might want to only enforce +// this during instance create (rather than live attach) in future. +pub(crate) const MAX_EXTERNAL_IPS_PER_INSTANCE: usize = 32; +pub(crate) const MAX_EPHEMERAL_IPS_PER_INSTANCE: usize = 1; pub const MAX_VCPU_PER_INSTANCE: u16 = 64; diff --git a/nexus/src/app/oximeter.rs b/nexus/src/app/oximeter.rs index 7dfa2fb68b..a168b35293 100644 --- a/nexus/src/app/oximeter.rs +++ b/nexus/src/app/oximeter.rs @@ -127,6 +127,7 @@ impl super::Nexus { for producer in producers.into_iter() { let producer_info = oximeter_client::types::ProducerEndpoint { id: producer.id(), + kind: nexus::ProducerKind::from(producer.kind).into(), address: SocketAddr::new( producer.ip.ip(), producer.port.try_into().unwrap(), @@ -149,6 +150,7 @@ impl super::Nexus { pub(crate) async fn register_as_producer(&self, address: SocketAddr) { let producer_endpoint = nexus::ProducerEndpoint { id: self.id, + kind: nexus::ProducerKind::Service, address, base_route: String::from("/metrics/collect"), interval: Duration::from_secs(10), diff --git a/nexus/src/app/rack.rs b/nexus/src/app/rack.rs index 1c2e49e260..1643ac301d 100644 --- a/nexus/src/app/rack.rs +++ b/nexus/src/app/rack.rs @@ -10,7 +10,7 @@ use crate::external_api::params::CertificateCreate; use crate::external_api::shared::ServiceUsingCertificate; use crate::internal_api::params::RackInitializationRequest; use gateway_client::types::SpType; -use ipnetwork::IpNetwork; +use ipnetwork::{IpNetwork, Ipv6Network}; use nexus_db_model::DnsGroup; use nexus_db_model::InitialDnsGroup; use nexus_db_model::{SwitchLinkFec, SwitchLinkSpeed}; @@ -23,19 +23,26 @@ use nexus_db_queries::db::lookup::LookupPath; use nexus_types::external_api::params::Address; use nexus_types::external_api::params::AddressConfig; use nexus_types::external_api::params::AddressLotBlockCreate; +use nexus_types::external_api::params::BgpAnnounceSetCreate; +use nexus_types::external_api::params::BgpAnnouncementCreate; +use nexus_types::external_api::params::BgpConfigCreate; +use nexus_types::external_api::params::BgpPeer; +use nexus_types::external_api::params::LinkConfig; +use nexus_types::external_api::params::LldpServiceConfig; use nexus_types::external_api::params::RouteConfig; use nexus_types::external_api::params::SwitchPortConfig; use nexus_types::external_api::params::{ - AddressLotCreate, LoopbackAddressCreate, Route, SiloCreate, + AddressLotCreate, BgpPeerConfig, LoopbackAddressCreate, Route, SiloCreate, SwitchPortSettingsCreate, }; +use nexus_types::external_api::shared::Baseboard; use nexus_types::external_api::shared::FleetRole; use nexus_types::external_api::shared::SiloIdentityMode; use nexus_types::external_api::shared::SiloRole; +use nexus_types::external_api::shared::UninitializedSled; use nexus_types::external_api::views; -use nexus_types::external_api::views::Baseboard; -use nexus_types::external_api::views::UninitializedSled; use nexus_types::internal_api::params::DnsRecord; +use omicron_common::address::{get_64_subnet, Ipv6Subnet, RACK_PREFIX}; use omicron_common::api::external::AddressLotKind; use omicron_common::api::external::DataPageParams; use omicron_common::api::external::Error; @@ -45,10 +52,13 @@ use omicron_common::api::external::LookupResult; use omicron_common::api::external::Name; use omicron_common::api::external::NameOrId; use omicron_common::api::internal::shared::ExternalPortDiscovery; +use sled_agent_client::types::AddSledRequest; use sled_agent_client::types::EarlyNetworkConfigBody; +use sled_agent_client::types::StartSledAgentRequest; +use sled_agent_client::types::StartSledAgentRequestBody; use sled_agent_client::types::{ - BgpConfig, BgpPeerConfig, EarlyNetworkConfig, PortConfigV1, - RackNetworkConfigV1, RouteConfig as SledRouteConfig, + BgpConfig, BgpPeerConfig as SledBgpPeerConfig, EarlyNetworkConfig, + PortConfigV1, RackNetworkConfigV1, RouteConfig as SledRouteConfig, }; use std::collections::BTreeMap; use std::collections::BTreeSet; @@ -201,10 +211,9 @@ impl super::Nexus { }; let rack_network_config = request.rack_network_config.as_ref().ok_or( - Error::InvalidRequest { - message: "cannot initialize a rack without a network config" - .into(), - }, + Error::invalid_request( + "cannot initialize a rack without a network config", + ), )?; self.db_datastore @@ -402,6 +411,108 @@ impl super::Nexus { Error::internal_error(&format!("unable to retrieve authz_address_lot for infra address_lot: {e}")) })?; + let mut bgp_configs = HashMap::new(); + + for bgp_config in &rack_network_config.bgp { + bgp_configs.insert(bgp_config.asn, bgp_config.clone()); + + let bgp_config_name: Name = + format!("as{}", bgp_config.asn).parse().unwrap(); + + let announce_set_name: Name = + format!("as{}-announce", bgp_config.asn).parse().unwrap(); + + let address_lot_name: Name = + format!("as{}-lot", bgp_config.asn).parse().unwrap(); + + self.db_datastore + .address_lot_create( + &opctx, + &AddressLotCreate { + identity: IdentityMetadataCreateParams { + name: address_lot_name, + description: format!( + "Address lot for announce set in as {}", + bgp_config.asn + ), + }, + kind: AddressLotKind::Infra, + blocks: bgp_config + .originate + .iter() + .map(|o| AddressLotBlockCreate { + first_address: o.network().into(), + last_address: o.broadcast().into(), + }) + .collect(), + }, + ) + .await + .map_err(|e| { + Error::internal_error(&format!( + "unable to create address lot for BGP as {}: {}", + bgp_config.asn, e + )) + })?; + + self.db_datastore + .bgp_create_announce_set( + &opctx, + &BgpAnnounceSetCreate { + identity: IdentityMetadataCreateParams { + name: announce_set_name.clone(), + description: format!( + "Announce set for AS {}", + bgp_config.asn + ), + }, + announcement: bgp_config + .originate + .iter() + .map(|x| BgpAnnouncementCreate { + address_lot_block: NameOrId::Name( + format!("as{}", bgp_config.asn) + .parse() + .unwrap(), + ), + network: IpNetwork::from(*x).into(), + }) + .collect(), + }, + ) + .await + .map_err(|e| { + Error::internal_error(&format!( + "unable to create bgp announce set for as {}: {}", + bgp_config.asn, e + )) + })?; + + self.db_datastore + .bgp_config_set( + &opctx, + &BgpConfigCreate { + identity: IdentityMetadataCreateParams { + name: bgp_config_name, + description: format!( + "BGP config for AS {}", + bgp_config.asn + ), + }, + asn: bgp_config.asn, + bgp_announce_set_id: announce_set_name.into(), + vrf: None, + }, + ) + .await + .map_err(|e| { + Error::internal_error(&format!( + "unable to set bgp config for as {}: {}", + bgp_config.asn, e + )) + })?; + } + for (idx, uplink_config) in rack_network_config.ports.iter().enumerate() { @@ -499,6 +610,43 @@ impl super::Nexus { .routes .insert("phy0".to_string(), RouteConfig { routes }); + let peers: Vec = uplink_config + .bgp_peers + .iter() + .map(|r| BgpPeer { + bgp_announce_set: NameOrId::Name( + format!("as{}-announce", r.asn).parse().unwrap(), + ), + bgp_config: NameOrId::Name( + format!("as{}", r.asn).parse().unwrap(), + ), + interface_name: "phy0".into(), + addr: r.addr.into(), + hold_time: r.hold_time.unwrap_or(6) as u32, + idle_hold_time: r.idle_hold_time.unwrap_or(3) as u32, + delay_open: r.delay_open.unwrap_or(0) as u32, + connect_retry: r.connect_retry.unwrap_or(3) as u32, + keepalive: r.keepalive.unwrap_or(2) as u32, + }) + .collect(); + + port_settings_params + .bgp_peers + .insert("phy0".to_string(), BgpPeerConfig { peers }); + + let link = LinkConfig { + mtu: 1500, //TODO https://github.com/oxidecomputer/omicron/issues/2274 + lldp: LldpServiceConfig { + enabled: false, + lldp_config: None, + }, + fec: uplink_config.uplink_port_fec.into(), + speed: uplink_config.uplink_port_speed.into(), + autoneg: uplink_config.autoneg, + }; + + port_settings_params.links.insert("phy".to_string(), link); + match self .db_datastore .switch_port_settings_create( @@ -584,20 +732,7 @@ impl super::Nexus { if rack.rack_subnet.is_some() { return Ok(()); } - let addr = self - .sled_list(opctx, &DataPageParams::max_page()) - .await? - .get(0) - .ok_or(Error::InternalError { - internal_message: "no sleds at time of bootstore sync".into(), - })? - .address(); - - let sa = sled_agent_client::Client::new( - &format!("http://{}", addr), - self.log.clone(), - ); - + let sa = self.get_any_sled_agent(opctx).await?; let result = sa .read_network_bootstore_config_cache() .await @@ -619,7 +754,7 @@ impl super::Nexus { opctx: &OpContext, ) -> Result { let rack = self.rack_lookup(opctx, &self.rack_id).await?; - let subnet = rack.subnet()?; + let subnet = rack_subnet(rack.rack_subnet)?; let db_ports = self.active_port_settings(opctx).await?; let mut ports = Vec::new(); @@ -667,7 +802,7 @@ impl super::Nexus { addresses: info.addresses.iter().map(|a| a.address).collect(), bgp_peers: peer_info .iter() - .map(|(p, asn, addr)| BgpPeerConfig { + .map(|(p, asn, addr)| SledBgpPeerConfig { addr: *addr, asn: *asn, port: port.port_name.clone(), @@ -682,16 +817,21 @@ impl super::Nexus { port: port.port_name.clone(), uplink_port_fec: info .links - .get(0) //TODO breakout support + .get(0) //TODO https://github.com/oxidecomputer/omicron/issues/3062 .map(|l| l.fec) .unwrap_or(SwitchLinkFec::None) .into(), uplink_port_speed: info .links - .get(0) //TODO breakout support + .get(0) //TODO https://github.com/oxidecomputer/omicron/issues/3062 .map(|l| l.speed) .unwrap_or(SwitchLinkSpeed::Speed100G) .into(), + autoneg: info + .links + .get(0) //TODO breakout support + .map(|l| l.autoneg) + .unwrap_or(false), }; ports.push(p); @@ -726,18 +866,38 @@ impl super::Nexus { &self, opctx: &OpContext, ) -> ListResultVec { + debug!(self.log, "Getting latest collection"); // Grab the SPs from the last collection - let limit = NonZeroU32::new(50).unwrap(); + // + // We set a limit of 200 here to give us some breathing room when + // querying for cabooses and RoT pages, each of which is "4 per SP/RoT", + // which in a single fully populated rack works out to (32 sleds + 2 + // switches + 1 psc) * 4 = 140. + // + // This feels bad and probably needs more thought; see + // https://github.com/oxidecomputer/omicron/issues/4621 where this limit + // being too low bit us, and it will link to a more general followup + // issue. + let limit = NonZeroU32::new(200).unwrap(); let collection = self .db_datastore .inventory_get_latest_collection(opctx, limit) .await?; + + // There can't be any uninitialized sleds we know about + // if there is no inventory. + let Some(collection) = collection else { + return Ok(vec![]); + }; + let pagparams = DataPageParams { marker: None, direction: dropshot::PaginationOrder::Descending, // TODO: This limit is only suitable for a single sled cluster limit: NonZeroU32::new(32).unwrap(), }; + + debug!(self.log, "Listing sleds"); let sleds = self.db_datastore.sled_list(opctx, &pagparams).await?; let mut uninitialized_sleds: Vec = collection @@ -767,4 +927,106 @@ impl super::Nexus { uninitialized_sleds.retain(|s| !sled_baseboards.contains(&s.baseboard)); Ok(uninitialized_sleds) } + + /// Add a sled to an intialized rack + pub(crate) async fn add_sled_to_initialized_rack( + &self, + opctx: &OpContext, + sled: UninitializedSled, + ) -> Result<(), Error> { + let baseboard_id = sled.baseboard.clone().into(); + let hw_baseboard_id = + self.db_datastore.find_hw_baseboard_id(opctx, baseboard_id).await?; + + let subnet = self.db_datastore.rack_subnet(opctx, sled.rack_id).await?; + let rack_subnet = + Ipv6Subnet::::from(rack_subnet(Some(subnet))?); + + let allocation = self + .db_datastore + .allocate_sled_underlay_subnet_octets( + opctx, + sled.rack_id, + hw_baseboard_id, + ) + .await?; + + // Convert the baseboard as necessary + let baseboard = sled_agent_client::types::Baseboard::Gimlet { + identifier: sled.baseboard.serial.clone(), + model: sled.baseboard.part.clone(), + revision: sled.baseboard.revision, + }; + + // Make the call to sled-agent + let req = AddSledRequest { + sled_id: baseboard, + start_request: StartSledAgentRequest { + generation: 0, + schema_version: 1, + body: StartSledAgentRequestBody { + id: allocation.sled_id, + rack_id: allocation.rack_id, + use_trust_quorum: true, + is_lrtq_learner: true, + subnet: sled_agent_client::types::Ipv6Subnet { + net: get_64_subnet( + rack_subnet, + allocation.subnet_octet.try_into().unwrap(), + ) + .net() + .into(), + }, + }, + }, + }; + let sa = self.get_any_sled_agent(opctx).await?; + sa.add_sled_to_initialized_rack(&req).await.map_err(|e| { + Error::InternalError { + internal_message: format!( + "failed to add sled with baseboard {:?} to rack {}: {e}", + sled.baseboard, allocation.rack_id + ), + } + })?; + + Ok(()) + } + + async fn get_any_sled_agent( + &self, + opctx: &OpContext, + ) -> Result { + let addr = self + .sled_list(opctx, &DataPageParams::max_page()) + .await? + .get(0) + .ok_or(Error::InternalError { + internal_message: "no sled agents available".into(), + })? + .address(); + + Ok(sled_agent_client::Client::new( + &format!("http://{}", addr), + self.log.clone(), + )) + } +} + +pub fn rack_subnet( + rack_subnet: Option, +) -> Result { + match rack_subnet { + Some(IpNetwork::V6(subnet)) => Ok(subnet), + Some(IpNetwork::V4(_)) => { + return Err(Error::InternalError { + internal_message: "rack subnet not IPv6".into(), + }) + } + None => { + return Err(Error::InternalError { + internal_message: "rack subnet not set".into(), + }) + } + } } diff --git a/nexus/src/app/sagas/common_storage.rs b/nexus/src/app/sagas/common_storage.rs index a57afb215d..a7350d91fd 100644 --- a/nexus/src/app/sagas/common_storage.rs +++ b/nexus/src/app/sagas/common_storage.rs @@ -73,7 +73,9 @@ pub(crate) async fn ensure_region_in_dataset( let log_create_failure = |_, delay| { warn!( log, - "Region requested, not yet created. Retrying in {:?}", delay + "Region requested, not yet created. Retrying in {:?}", + delay; + "region" => %region.id(), ); }; @@ -157,7 +159,12 @@ pub(super) async fn delete_crucible_region( .await; if let Err(e) = result { - error!(log, "delete_crucible_region: region_get saw {:?}", e); + error!( + log, + "delete_crucible_region: region_get saw {:?}", + e; + "region_id" => %region_id, + ); match e { crucible_agent_client::Error::ErrorResponse(rv) => { match rv.status() { @@ -191,7 +198,12 @@ pub(super) async fn delete_crucible_region( }) .await .map_err(|e| { - error!(log, "delete_crucible_region: region_delete saw {:?}", e); + error!( + log, + "delete_crucible_region: region_delete saw {:?}", + e; + "region_id" => %region_id, + ); match e { crucible_agent_client::Error::ErrorResponse(rv) => { match rv.status() { @@ -226,7 +238,12 @@ pub(super) async fn delete_crucible_region( }) .await .map_err(|e| { - error!(log, "delete_crucible_region: region_get saw {:?}", e); + error!( + log, + "delete_crucible_region: region_get saw {:?}", + e; + "region_id" => %region_id, + ); match e { crucible_agent_client::Error::ErrorResponse(rv) => { @@ -250,29 +267,33 @@ pub(super) async fn delete_crucible_region( })?; match region.state { - RegionState::Tombstoned => { - Err(BackoffError::transient(WaitError::Transient(anyhow!( - "region {} not deleted yet", - region_id.to_string(), - )))) - } + RegionState::Tombstoned => Err(BackoffError::transient( + WaitError::Transient(anyhow!("region not deleted yet")), + )), RegionState::Destroyed => { - info!(log, "region {} deleted", region_id.to_string(),); + info!( + log, + "region deleted"; + "region_id" => %region_id, + ); Ok(()) } - _ => { - Err(BackoffError::transient(WaitError::Transient(anyhow!( - "region {} unexpected state", - region_id.to_string(), - )))) - } + _ => Err(BackoffError::transient(WaitError::Transient( + anyhow!("region unexpected state {:?}", region.state), + ))), } }, |e: WaitError, delay| { - info!(log, "{:?}, trying again in {:?}", e, delay,); + info!( + log, + "{:?}, trying again in {:?}", + e, + delay; + "region_id" => %region_id, + ); }, ) .await @@ -338,8 +359,10 @@ pub(super) async fn delete_crucible_running_snapshot( .map_err(|e| { error!( log, - "delete_crucible_snapshot: region_delete_running_snapshot saw {:?}", - e + "delete_crucible_running_snapshot: region_delete_running_snapshot saw {:?}", + e; + "region_id" => %region_id, + "snapshot_id" => %snapshot_id, ); match e { crucible_agent_client::Error::ErrorResponse(rv) => { @@ -377,7 +400,14 @@ pub(super) async fn delete_crucible_running_snapshot( }) .await .map_err(|e| { - error!(log, "delete_crucible_snapshot: region_get_snapshots saw {:?}", e); + error!( + log, + "delete_crucible_running_snapshot: region_get_snapshots saw {:?}", + e; + "region_id" => %region_id, + "snapshot_id" => %snapshot_id, + ); + match e { crucible_agent_client::Error::ErrorResponse(rv) => { match rv.status() { @@ -409,19 +439,17 @@ pub(super) async fn delete_crucible_running_snapshot( Some(running_snapshot) => { info!( log, - "region {} snapshot {} running_snapshot is Some, state is {}", - region_id.to_string(), - snapshot_id.to_string(), - running_snapshot.state.to_string(), + "running_snapshot is Some, state is {}", + running_snapshot.state.to_string(); + "region_id" => %region_id, + "snapshot_id" => %snapshot_id, ); match running_snapshot.state { RegionState::Tombstoned => { Err(BackoffError::transient( WaitError::Transient(anyhow!( - "region {} snapshot {} running_snapshot not deleted yet", - region_id.to_string(), - snapshot_id.to_string(), + "running_snapshot tombstoned, not deleted yet", ) ))) } @@ -429,9 +457,7 @@ pub(super) async fn delete_crucible_running_snapshot( RegionState::Destroyed => { info!( log, - "region {} snapshot {} running_snapshot deleted", - region_id.to_string(), - snapshot_id.to_string(), + "running_snapshot deleted", ); Ok(()) @@ -440,9 +466,7 @@ pub(super) async fn delete_crucible_running_snapshot( _ => { Err(BackoffError::transient( WaitError::Transient(anyhow!( - "region {} snapshot {} running_snapshot unexpected state", - region_id.to_string(), - snapshot_id.to_string(), + "running_snapshot unexpected state", ) ))) } @@ -453,9 +477,9 @@ pub(super) async fn delete_crucible_running_snapshot( // deleted? info!( log, - "region {} snapshot {} running_snapshot is None", - region_id.to_string(), - snapshot_id.to_string(), + "running_snapshot is None"; + "region_id" => %region_id, + "snapshot_id" => %snapshot_id, ); // break here - it's possible that the running snapshot @@ -469,7 +493,9 @@ pub(super) async fn delete_crucible_running_snapshot( log, "{:?}, trying again in {:?}", e, - delay, + delay; + "region_id" => %region_id, + "snapshot_id" => %snapshot_id, ); } ) @@ -494,7 +520,14 @@ pub(super) async fn delete_crucible_snapshot( region_id: Uuid, snapshot_id: Uuid, ) -> Result<(), Error> { - // delete snapshot - this endpoint is synchronous, it is not only a request + // Unlike other Crucible agent endpoints, this one is synchronous in that it + // is not only a request to the Crucible agent: `zfs destroy` is performed + // right away. However this is still a request to illumos that may not take + // effect right away. Wait until the snapshot no longer appears in the list + // of region snapshots, meaning it was not returned from `zfs list`. + + info!(log, "deleting region {region_id} snapshot {snapshot_id}"); + retry_until_known_result(log, || async { client .region_delete_snapshot( @@ -507,7 +540,10 @@ pub(super) async fn delete_crucible_snapshot( .map_err(|e| { error!( log, - "delete_crucible_snapshot: region_delete_snapshot saw {:?}", e + "delete_crucible_snapshot: region_delete_snapshot saw {:?}", + e; + "region_id" => %region_id, + "snapshot_id" => %snapshot_id, ); match e { crucible_agent_client::Error::ErrorResponse(rv) => { @@ -524,7 +560,101 @@ pub(super) async fn delete_crucible_snapshot( } })?; - Ok(()) + #[derive(Debug, thiserror::Error)] + enum WaitError { + #[error("Transient error: {0}")] + Transient(#[from] anyhow::Error), + + #[error("Permanent error: {0}")] + Permanent(#[from] Error), + } + + backoff::retry_notify( + backoff::retry_policy_internal_service_aggressive(), + || async { + let response = retry_until_known_result(log, || async { + client + .region_get_snapshots(&RegionId(region_id.to_string())) + .await + }) + .await + .map_err(|e| { + error!( + log, + "delete_crucible_snapshot: region_get_snapshots saw {:?}", + e; + "region_id" => %region_id, + "snapshot_id" => %snapshot_id, + ); + match e { + crucible_agent_client::Error::ErrorResponse(rv) => { + match rv.status() { + status if status.is_client_error() => { + BackoffError::Permanent(WaitError::Permanent( + Error::invalid_request(&rv.message), + )) + } + _ => BackoffError::Permanent(WaitError::Permanent( + Error::internal_error(&rv.message), + )), + } + } + _ => BackoffError::Permanent(WaitError::Permanent( + Error::internal_error( + "unexpected failure during `region_get_snapshots`", + ), + )), + } + })?; + + if response + .snapshots + .iter() + .any(|x| x.name == snapshot_id.to_string()) + { + info!( + log, + "snapshot still exists, waiting"; + "region_id" => %region_id, + "snapshot_id" => %snapshot_id, + ); + + Err(BackoffError::transient(WaitError::Transient(anyhow!( + "snapshot not deleted yet", + )))) + } else { + info!( + log, + "snapshot deleted"; + "region_id" => %region_id, + "snapshot_id" => %snapshot_id, + ); + + Ok(()) + } + }, + |e: WaitError, delay| { + info!( + log, + "{:?}, trying again in {:?}", + e, + delay; + "region_id" => %region_id, + "snapshot_id" => %snapshot_id, + ); + }, + ) + .await + .map_err(|e| match e { + WaitError::Transient(e) => { + // The backoff crate can be configured with a maximum elapsed time + // before giving up, which means that Transient could be returned + // here. Our current policies do **not** set this though. + Error::internal_error(&e.to_string()) + } + + WaitError::Permanent(e) => e, + }) } // Given a list of datasets and region snapshots, send DELETE calls to the @@ -645,10 +775,8 @@ pub(crate) async fn call_pantry_attach_for_disk( info!( log, - "sending attach for disk {} volume {} to endpoint {}", - disk_id, + "sending attach for disk {disk_id} volume {} to endpoint {endpoint}", disk.volume_id, - endpoint, ); let volume_construction_request: crucible_pantry_client::types::VolumeConstructionRequest = @@ -684,7 +812,7 @@ pub(crate) async fn call_pantry_detach_for_disk( ) -> Result<(), ActionError> { let endpoint = format!("http://{}", pantry_address); - info!(log, "sending detach for disk {} to endpoint {}", disk_id, endpoint,); + info!(log, "sending detach for disk {disk_id} to endpoint {endpoint}"); let client = crucible_pantry_client::Client::new(&endpoint); diff --git a/nexus/src/app/sagas/disk_create.rs b/nexus/src/app/sagas/disk_create.rs index fe403a7d41..4883afaddc 100644 --- a/nexus/src/app/sagas/disk_create.rs +++ b/nexus/src/app/sagas/disk_create.rs @@ -830,9 +830,7 @@ pub(crate) mod test { app::saga::create_saga_dag, app::sagas::disk_create::Params, app::sagas::disk_create::SagaDiskCreate, external_api::params, }; - use async_bb8_diesel::{ - AsyncConnection, AsyncRunQueryDsl, AsyncSimpleConnection, - }; + use async_bb8_diesel::{AsyncRunQueryDsl, AsyncSimpleConnection}; use diesel::{ ExpressionMethods, OptionalExtension, QueryDsl, SelectableHelper, }; @@ -972,27 +970,25 @@ pub(crate) mod test { use nexus_db_queries::db::model::VirtualProvisioningCollection; use nexus_db_queries::db::schema::virtual_provisioning_collection::dsl; + let conn = datastore.pool_connection_for_tests().await.unwrap(); + datastore - .pool_connection_for_tests() - .await - .unwrap() - .transaction_async(|conn| async move { + .transaction_retry_wrapper( + "no_virtual_provisioning_collection_records_using_storage", + ) + .transaction(&conn, |conn| async move { conn.batch_execute_async( nexus_test_utils::db::ALLOW_FULL_TABLE_SCAN_SQL, ) .await .unwrap(); - Ok::<_, nexus_db_queries::db::TransactionError<()>>( - dsl::virtual_provisioning_collection - .filter(dsl::virtual_disk_bytes_provisioned.ne(0)) - .select(VirtualProvisioningCollection::as_select()) - .get_results_async::( - &conn, - ) - .await - .unwrap() - .is_empty(), - ) + Ok(dsl::virtual_provisioning_collection + .filter(dsl::virtual_disk_bytes_provisioned.ne(0)) + .select(VirtualProvisioningCollection::as_select()) + .get_results_async::(&conn) + .await + .unwrap() + .is_empty()) }) .await .unwrap() diff --git a/nexus/src/app/sagas/disk_delete.rs b/nexus/src/app/sagas/disk_delete.rs index f2d80d64f5..8f6d74da0a 100644 --- a/nexus/src/app/sagas/disk_delete.rs +++ b/nexus/src/app/sagas/disk_delete.rs @@ -32,10 +32,8 @@ pub(crate) struct Params { declare_saga_actions! { disk_delete; DELETE_DISK_RECORD -> "deleted_disk" { - // TODO: See the comment on the "DeleteRegions" step, - // we may want to un-delete the disk if we cannot remove - // underlying regions. + sdd_delete_disk_record + - sdd_delete_disk_record_undo } SPACE_ACCOUNT -> "no_result1" { + sdd_account_space @@ -117,6 +115,21 @@ async fn sdd_delete_disk_record( Ok(disk) } +async fn sdd_delete_disk_record_undo( + sagactx: NexusActionContext, +) -> Result<(), anyhow::Error> { + let osagactx = sagactx.user_data(); + let params = sagactx.saga_params::()?; + + osagactx + .datastore() + .project_undelete_disk_set_faulted_no_auth(¶ms.disk_id) + .await + .map_err(ActionError::action_failed)?; + + Ok(()) +} + async fn sdd_account_space( sagactx: NexusActionContext, ) -> Result<(), ActionError> { diff --git a/nexus/src/app/sagas/import_blocks_from_url.rs b/nexus/src/app/sagas/import_blocks_from_url.rs deleted file mode 100644 index ffee40ba72..0000000000 --- a/nexus/src/app/sagas/import_blocks_from_url.rs +++ /dev/null @@ -1,413 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -//! For disks in state ImportReady, send a request to import blocks from a URL. -//! Note the Pantry they're attached to must have addressability to the URL! - -use super::declare_saga_actions; -use super::ActionRegistry; -use super::NexusActionContext; -use super::NexusSaga; -use super::SagaInitError; -use crate::app::sagas::retry_until_known_result; -use nexus_db_model::Generation; -use nexus_db_queries::db::lookup::LookupPath; -use nexus_db_queries::{authn, authz}; -use nexus_types::external_api::params; -use omicron_common::api::external; -use omicron_common::api::external::Error; -use serde::Deserialize; -use serde::Serialize; -use std::net::SocketAddrV6; -use steno::ActionError; -use uuid::Uuid; - -#[derive(Debug, Deserialize, Serialize)] -pub(crate) struct Params { - pub serialized_authn: authn::saga::Serialized, - pub disk_id: Uuid, - - pub import_params: params::ImportBlocksFromUrl, -} - -declare_saga_actions! { - import_blocks_from_url; - SET_IMPORTING_STATE -> "disk_generation_number" { - + sibfu_get_importing_state - - sibfu_get_importing_state_undo - } - GET_PANTRY_ADDRESS -> "pantry_address" { - + sibfu_get_pantry_address - } - CALL_PANTRY_IMPORT_FROM_URL_FOR_DISK -> "call_pantry_import_from_url_for_disk" { - + sibfu_call_pantry_import_from_url_for_disk - } - WAIT_FOR_IMPORT_FROM_URL -> "wait_for_import_from_url" { - + sibfu_wait_for_import_from_url - } - SET_IMPORT_READY_STATE -> "set_import_ready_state" { - + sibfu_get_import_ready_state - } -} - -#[derive(Debug)] -pub(crate) struct SagaImportBlocksFromUrl; -impl NexusSaga for SagaImportBlocksFromUrl { - const NAME: &'static str = "import-blocks-from-url"; - type Params = Params; - - fn register_actions(registry: &mut ActionRegistry) { - import_blocks_from_url_register_actions(registry); - } - - fn make_saga_dag( - _params: &Self::Params, - mut builder: steno::DagBuilder, - ) -> Result { - builder.append(set_importing_state_action()); - - builder.append(get_pantry_address_action()); - - // Call the Pantry's /import_from_url - builder.append(call_pantry_import_from_url_for_disk_action()); - - // Wait for import_from_url job to complete - builder.append(wait_for_import_from_url_action()); - - // Set ImportReady state - builder.append(set_import_ready_state_action()); - - Ok(builder.build()?) - } -} - -async fn sibfu_get_importing_state( - sagactx: NexusActionContext, -) -> Result { - let log = sagactx.user_data().log(); - let osagactx = sagactx.user_data(); - let params = sagactx.saga_params::()?; - let opctx = crate::context::op_context_for_saga_action( - &sagactx, - ¶ms.serialized_authn, - ); - - let (.., authz_disk, db_disk) = - LookupPath::new(&opctx, &osagactx.datastore()) - .disk_id(params.disk_id) - .fetch_for(authz::Action::Modify) - .await - .map_err(ActionError::action_failed)?; - - match db_disk.state().into() { - external::DiskState::ImportReady => { - info!( - log, - "setting disk {} to state importing_from_url", - db_disk.id(), - ); - - osagactx - .datastore() - .disk_update_runtime( - &opctx, - &authz_disk, - &db_disk.runtime().importing_from_url(), - ) - .await - .map_err(ActionError::action_failed)?; - - // Record the disk's new generation number as this saga node's output. It - // will be important later to *only* transition this disk out of maintenance - // if the generation number matches what *this* saga is doing. - let (.., db_disk) = LookupPath::new(&opctx, &osagactx.datastore()) - .disk_id(params.disk_id) - .fetch_for(authz::Action::Read) - .await - .map_err(ActionError::action_failed)?; - - Ok(db_disk.runtime().gen) - } - - _ => Err(ActionError::action_failed(Error::invalid_request(&format!( - "cannot import blocks from a url into disk in state {:?}", - db_disk.state() - )))), - } -} - -async fn sibfu_get_importing_state_undo( - sagactx: NexusActionContext, -) -> Result<(), anyhow::Error> { - let log = sagactx.user_data().log(); - let osagactx = sagactx.user_data(); - let params = sagactx.saga_params::()?; - let opctx = crate::context::op_context_for_saga_action( - &sagactx, - ¶ms.serialized_authn, - ); - - let (.., authz_disk, db_disk) = - LookupPath::new(&opctx, &osagactx.datastore()) - .disk_id(params.disk_id) - .fetch_for(authz::Action::Modify) - .await - .map_err(ActionError::action_failed)?; - - let expected_disk_generation_number = - sagactx.lookup::("disk_generation_number")?; - - match db_disk.state().into() { - external::DiskState::ImportingFromUrl => { - // A previous execution of *this* saga may hav already transitioned this disk to - // import_ready. Another saga racing with this one may have transitioned the disk to - // importing - only set this disk to import_ready if the generation number matches this - // saga. - if expected_disk_generation_number == db_disk.runtime().gen { - info!( - log, - "undo: setting disk {} state from importing_from_url to import_ready", - params.disk_id - ); - - osagactx - .datastore() - .disk_update_runtime( - &opctx, - &authz_disk, - &db_disk.runtime().import_ready(), - ) - .await - .map_err(ActionError::action_failed)?; - } else { - info!( - log, - "disk {} has generation number {:?}, which doesn't match the expected {:?}: skip setting to import_ready", - params.disk_id, - db_disk.runtime().gen, - expected_disk_generation_number, - ); - } - } - - external::DiskState::ImportReady => { - info!(log, "disk {} already import_ready", params.disk_id); - } - - _ => { - warn!(log, "disk is in state {:?}", db_disk.state()); - } - } - - Ok(()) -} - -async fn sibfu_get_pantry_address( - sagactx: NexusActionContext, -) -> Result { - let log = sagactx.user_data().log(); - let osagactx = sagactx.user_data(); - let params = sagactx.saga_params::()?; - let opctx = crate::context::op_context_for_saga_action( - &sagactx, - ¶ms.serialized_authn, - ); - - let (.., db_disk) = LookupPath::new(&opctx, &osagactx.datastore()) - .disk_id(params.disk_id) - .fetch_for(authz::Action::Modify) - .await - .map_err(ActionError::action_failed)?; - - // At any stage of executing this saga, if the disk moves from state - // importing to detached, it will be detached from the corresponding Pantry. - // Any subsequent saga nodes will fail because the pantry address is stored - // as part of the saga state, and requests sent to that Pantry with the - // disk's id will fail. - let pantry_address = db_disk.pantry_address().ok_or_else(|| { - ActionError::action_failed(String::from("disk not attached to pantry!")) - })?; - - info!(log, "disk {} is using pantry at {}", db_disk.id(), pantry_address); - - Ok(pantry_address) -} - -async fn sibfu_call_pantry_import_from_url_for_disk( - sagactx: NexusActionContext, -) -> Result { - let log = sagactx.user_data().log(); - let params = sagactx.saga_params::()?; - - let pantry_address = sagactx.lookup::("pantry_address")?; - let endpoint = format!("http://{}", pantry_address); - - info!( - log, - "sending import from url {} request for disk {} to pantry endpoint {}", - params.import_params.url, - params.disk_id, - endpoint, - ); - - let disk_id = params.disk_id.to_string(); - - let client = crucible_pantry_client::Client::new(&endpoint); - - let request = crucible_pantry_client::types::ImportFromUrlRequest { - url: params.import_params.url, - expected_digest: if let Some(expected_digest) = - params.import_params.expected_digest - { - match expected_digest { - nexus_types::external_api::params::ExpectedDigest::Sha256( - v, - ) => Some( - crucible_pantry_client::types::ExpectedDigest::Sha256(v), - ), - } - } else { - None - }, - }; - - let response = retry_until_known_result(log, || async { - client.import_from_url(&disk_id, &request).await - }) - .await - .map_err(|e| { - ActionError::action_failed(format!( - "import from url failed with {:?}", - e - )) - })?; - - Ok(response.job_id.clone()) -} - -async fn sibfu_wait_for_import_from_url( - sagactx: NexusActionContext, -) -> Result<(), ActionError> { - let log = sagactx.user_data().log(); - let params = sagactx.saga_params::()?; - - let pantry_address = sagactx.lookup::("pantry_address")?; - let job_id = - sagactx.lookup::("call_pantry_import_from_url_for_disk")?; - - let endpoint = format!("http://{}", pantry_address); - - let client = crucible_pantry_client::Client::new(&endpoint); - - info!( - log, - "waiting for import from url job {} for disk {} to complete on pantry {}", - job_id, - params.disk_id, - endpoint, - ); - - loop { - let result = retry_until_known_result(log, || async { - client.is_job_finished(&job_id).await - }) - .await - .map_err(|e| { - ActionError::action_failed(format!( - "is_job_finished failed with {:?}", - e - )) - })?; - - if result.job_is_finished { - break; - } - - tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; - } - - info!( - log, - "import from url job {} for disk {} on pantry {} completed", - job_id, - params.disk_id, - endpoint, - ); - - let response = retry_until_known_result(log, || async { - client.job_result_ok(&job_id).await - }) - .await - .map_err(|e| { - ActionError::action_failed(format!("job_result_ok failed with {:?}", e)) - })?; - - if !response.job_result_ok { - return Err(ActionError::action_failed(format!("Job {job_id} failed"))); - } - - Ok(()) -} - -async fn sibfu_get_import_ready_state( - sagactx: NexusActionContext, -) -> Result<(), ActionError> { - let log = sagactx.user_data().log(); - let osagactx = sagactx.user_data(); - let params = sagactx.saga_params::()?; - let opctx = crate::context::op_context_for_saga_action( - &sagactx, - ¶ms.serialized_authn, - ); - - let (.., authz_disk, db_disk) = - LookupPath::new(&opctx, &osagactx.datastore()) - .disk_id(params.disk_id) - .fetch_for(authz::Action::Modify) - .await - .map_err(ActionError::action_failed)?; - - let expected_disk_generation_number = - sagactx.lookup::("disk_generation_number")?; - - match db_disk.state().into() { - external::DiskState::ImportingFromUrl => { - if expected_disk_generation_number == db_disk.runtime().gen { - info!( - log, - "setting disk {} state from importing_from_url to import_ready", - params.disk_id - ); - - osagactx - .datastore() - .disk_update_runtime( - &opctx, - &authz_disk, - &db_disk.runtime().import_ready(), - ) - .await - .map_err(ActionError::action_failed)?; - } else { - info!( - log, - "disk {} has generation number {:?}, which doesn't match the expected {:?}: skip setting to import_ready", - params.disk_id, - db_disk.runtime().gen, - expected_disk_generation_number, - ); - } - } - - external::DiskState::ImportReady => { - info!(log, "disk {} already import_ready", params.disk_id); - } - - _ => { - warn!(log, "disk is in state {:?}", db_disk.state()); - } - } - - Ok(()) -} diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index 153e0323e7..5149825842 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -598,35 +598,55 @@ async fn sic_allocate_instance_snat_ip_undo( async fn sic_allocate_instance_external_ip( sagactx: NexusActionContext, ) -> Result<(), ActionError> { + // XXX: may wish to restructure partially: we have at most one ephemeral + // and then at most $n$ floating. let osagactx = sagactx.user_data(); let datastore = osagactx.datastore(); let repeat_saga_params = sagactx.saga_params::()?; let saga_params = repeat_saga_params.saga_params; let ip_index = repeat_saga_params.which; - let ip_params = saga_params.create_params.external_ips.get(ip_index); - let ip_params = match ip_params { - None => { - return Ok(()); - } - Some(ref prs) => prs, + let Some(ip_params) = saga_params.create_params.external_ips.get(ip_index) + else { + return Ok(()); }; let opctx = crate::context::op_context_for_saga_action( &sagactx, &saga_params.serialized_authn, ); let instance_id = repeat_saga_params.instance_id; - let ip_id = repeat_saga_params.new_id; - // Collect the possible pool name for this IP address - let pool_name = match ip_params { + match ip_params { + // Allocate a new IP address from the target, possibly default, pool params::ExternalIpCreate::Ephemeral { ref pool_name } => { - pool_name.as_ref().map(|name| db::model::Name(name.clone())) + let pool_name = + pool_name.as_ref().map(|name| db::model::Name(name.clone())); + let ip_id = repeat_saga_params.new_id; + datastore + .allocate_instance_ephemeral_ip( + &opctx, + ip_id, + instance_id, + pool_name, + ) + .await + .map_err(ActionError::action_failed)?; } - }; - datastore - .allocate_instance_ephemeral_ip(&opctx, ip_id, instance_id, pool_name) - .await - .map_err(ActionError::action_failed)?; + // Set the parent of an existing floating IP to the new instance's ID. + params::ExternalIpCreate::Floating { ref floating_ip_name } => { + let floating_ip_name = db::model::Name(floating_ip_name.clone()); + let (.., authz_fip, db_fip) = LookupPath::new(&opctx, &datastore) + .project_id(saga_params.project_id) + .floating_ip_name(&floating_ip_name) + .fetch_for(authz::Action::Modify) + .await + .map_err(ActionError::action_failed)?; + + datastore + .floating_ip_attach(&opctx, &authz_fip, &db_fip, instance_id) + .await + .map_err(ActionError::action_failed)?; + } + } Ok(()) } @@ -638,16 +658,31 @@ async fn sic_allocate_instance_external_ip_undo( let repeat_saga_params = sagactx.saga_params::()?; let saga_params = repeat_saga_params.saga_params; let ip_index = repeat_saga_params.which; - if ip_index >= saga_params.create_params.external_ips.len() { - return Ok(()); - } - let opctx = crate::context::op_context_for_saga_action( &sagactx, &saga_params.serialized_authn, ); - let ip_id = repeat_saga_params.new_id; - datastore.deallocate_external_ip(&opctx, ip_id).await?; + let Some(ip_params) = saga_params.create_params.external_ips.get(ip_index) + else { + return Ok(()); + }; + + match ip_params { + params::ExternalIpCreate::Ephemeral { .. } => { + let ip_id = repeat_saga_params.new_id; + datastore.deallocate_external_ip(&opctx, ip_id).await?; + } + params::ExternalIpCreate::Floating { floating_ip_name } => { + let floating_ip_name = db::model::Name(floating_ip_name.clone()); + let (.., authz_fip, db_fip) = LookupPath::new(&opctx, &datastore) + .project_id(saga_params.project_id) + .floating_ip_name(&floating_ip_name) + .fetch_for(authz::Action::Modify) + .await?; + + datastore.floating_ip_detach(&opctx, &authz_fip, &db_fip).await?; + } + } Ok(()) } @@ -866,9 +901,7 @@ pub mod test { app::sagas::instance_create::SagaInstanceCreate, app::sagas::test_helpers, external_api::params, }; - use async_bb8_diesel::{ - AsyncConnection, AsyncRunQueryDsl, AsyncSimpleConnection, - }; + use async_bb8_diesel::{AsyncRunQueryDsl, AsyncSimpleConnection}; use diesel::{ BoolExpressionMethods, ExpressionMethods, OptionalExtension, QueryDsl, SelectableHelper, @@ -1013,30 +1046,28 @@ pub mod test { use nexus_db_queries::db::model::SledResource; use nexus_db_queries::db::schema::sled_resource::dsl; + let conn = datastore.pool_connection_for_tests().await.unwrap(); + datastore - .pool_connection_for_tests() - .await - .unwrap() - .transaction_async(|conn| async move { + .transaction_retry_wrapper( + "no_sled_resource_instance_records_exist", + ) + .transaction(&conn, |conn| async move { conn.batch_execute_async( nexus_test_utils::db::ALLOW_FULL_TABLE_SCAN_SQL, ) .await .unwrap(); - Ok::<_, nexus_db_queries::db::TransactionError<()>>( - dsl::sled_resource - .filter( - dsl::kind.eq( - nexus_db_queries::db::model::SledResourceKind::Instance, - ), - ) - .select(SledResource::as_select()) - .get_results_async::(&conn) - .await - .unwrap() - .is_empty(), - ) + Ok(dsl::sled_resource + .filter(dsl::kind.eq( + nexus_db_queries::db::model::SledResourceKind::Instance, + )) + .select(SledResource::as_select()) + .get_results_async::(&conn) + .await + .unwrap() + .is_empty()) }) .await .unwrap() @@ -1048,16 +1079,17 @@ pub mod test { use nexus_db_queries::db::model::VirtualProvisioningResource; use nexus_db_queries::db::schema::virtual_provisioning_resource::dsl; - datastore.pool_connection_for_tests() - .await - .unwrap() - .transaction_async(|conn| async move { + let conn = datastore.pool_connection_for_tests().await.unwrap(); + + datastore + .transaction_retry_wrapper("no_virtual_provisioning_resource_records_exist") + .transaction(&conn, |conn| async move { conn .batch_execute_async(nexus_test_utils::db::ALLOW_FULL_TABLE_SCAN_SQL) .await .unwrap(); - Ok::<_, nexus_db_queries::db::TransactionError<()>>( + Ok( dsl::virtual_provisioning_resource .filter(dsl::resource_type.eq(nexus_db_queries::db::model::ResourceTypeProvisioned::Instance.to_string())) .select(VirtualProvisioningResource::as_select()) @@ -1075,31 +1107,29 @@ pub mod test { use nexus_db_queries::db::model::VirtualProvisioningCollection; use nexus_db_queries::db::schema::virtual_provisioning_collection::dsl; + let conn = datastore.pool_connection_for_tests().await.unwrap(); + datastore - .pool_connection_for_tests() - .await - .unwrap() - .transaction_async(|conn| async move { + .transaction_retry_wrapper( + "no_virtual_provisioning_collection_records_using_instances", + ) + .transaction(&conn, |conn| async move { conn.batch_execute_async( nexus_test_utils::db::ALLOW_FULL_TABLE_SCAN_SQL, ) .await .unwrap(); - Ok::<_, nexus_db_queries::db::TransactionError<()>>( - dsl::virtual_provisioning_collection - .filter( - dsl::cpus_provisioned - .ne(0) - .or(dsl::ram_provisioned.ne(0)), - ) - .select(VirtualProvisioningCollection::as_select()) - .get_results_async::( - &conn, - ) - .await - .unwrap() - .is_empty(), - ) + Ok(dsl::virtual_provisioning_collection + .filter( + dsl::cpus_provisioned + .ne(0) + .or(dsl::ram_provisioned.ne(0)), + ) + .select(VirtualProvisioningCollection::as_select()) + .get_results_async::(&conn) + .await + .unwrap() + .is_empty()) }) .await .unwrap() diff --git a/nexus/src/app/sagas/instance_delete.rs b/nexus/src/app/sagas/instance_delete.rs index 1605465c74..7802312b10 100644 --- a/nexus/src/app/sagas/instance_delete.rs +++ b/nexus/src/app/sagas/instance_delete.rs @@ -8,6 +8,7 @@ use super::ActionRegistry; use super::NexusActionContext; use super::NexusSaga; use crate::app::sagas::declare_saga_actions; +use nexus_db_queries::db::lookup::LookupPath; use nexus_db_queries::{authn, authz, db}; use omicron_common::api::external::{Error, ResourceType}; use omicron_common::api::internal::shared::SwitchLocation; @@ -39,6 +40,9 @@ declare_saga_actions! { DEALLOCATE_EXTERNAL_IP -> "no_result3" { + sid_deallocate_external_ip } + INSTANCE_DELETE_NAT -> "no_result4" { + + sid_delete_nat + } } // instance delete saga: definition @@ -57,6 +61,7 @@ impl NexusSaga for SagaInstanceDelete { _params: &Self::Params, mut builder: steno::DagBuilder, ) -> Result { + builder.append(instance_delete_nat_action()); builder.append(instance_delete_record_action()); builder.append(delete_network_interfaces_action()); builder.append(deallocate_external_ip_action()); @@ -110,6 +115,32 @@ async fn sid_delete_network_interfaces( Ok(()) } +async fn sid_delete_nat( + sagactx: NexusActionContext, +) -> Result<(), ActionError> { + let params = sagactx.saga_params::()?; + let instance_id = params.authz_instance.id(); + let osagactx = sagactx.user_data(); + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); + + let (.., authz_instance) = LookupPath::new(&opctx, &osagactx.datastore()) + .instance_id(instance_id) + .lookup_for(authz::Action::Modify) + .await + .map_err(ActionError::action_failed)?; + + osagactx + .nexus() + .instance_delete_dpd_config(&opctx, &authz_instance) + .await + .map_err(ActionError::action_failed)?; + + Ok(()) +} + async fn sid_deallocate_external_ip( sagactx: NexusActionContext, ) -> Result<(), ActionError> { @@ -127,6 +158,11 @@ async fn sid_deallocate_external_ip( ) .await .map_err(ActionError::action_failed)?; + osagactx + .datastore() + .detach_floating_ips_by_instance_id(&opctx, params.authz_instance.id()) + .await + .map_err(ActionError::action_failed)?; Ok(()) } diff --git a/nexus/src/app/sagas/mod.rs b/nexus/src/app/sagas/mod.rs index 5b1843be3d..c5918d32ef 100644 --- a/nexus/src/app/sagas/mod.rs +++ b/nexus/src/app/sagas/mod.rs @@ -23,7 +23,6 @@ pub mod disk_create; pub mod disk_delete; pub mod finalize_disk; pub mod image_delete; -pub mod import_blocks_from_url; mod instance_common; pub mod instance_create; pub mod instance_delete; @@ -36,6 +35,7 @@ pub mod snapshot_create; pub mod snapshot_delete; pub mod switch_port_settings_apply; pub mod switch_port_settings_clear; +pub mod switch_port_settings_common; pub mod test_saga; pub mod volume_delete; pub mod volume_remove_rop; @@ -124,7 +124,6 @@ fn make_action_registry() -> ActionRegistry { ::register_actions( &mut registry, ); - ::register_actions(&mut registry); ::register_actions( &mut registry, ); diff --git a/nexus/src/app/sagas/project_create.rs b/nexus/src/app/sagas/project_create.rs index 135e20ff06..40acc822c0 100644 --- a/nexus/src/app/sagas/project_create.rs +++ b/nexus/src/app/sagas/project_create.rs @@ -157,9 +157,7 @@ mod test { app::saga::create_saga_dag, app::sagas::project_create::Params, app::sagas::project_create::SagaProjectCreate, external_api::params, }; - use async_bb8_diesel::{ - AsyncConnection, AsyncRunQueryDsl, AsyncSimpleConnection, - }; + use async_bb8_diesel::{AsyncRunQueryDsl, AsyncSimpleConnection}; use diesel::{ ExpressionMethods, OptionalExtension, QueryDsl, SelectableHelper, }; @@ -233,15 +231,16 @@ mod test { use nexus_db_queries::db::model::VirtualProvisioningCollection; use nexus_db_queries::db::schema::virtual_provisioning_collection::dsl; - datastore.pool_connection_for_tests() - .await - .unwrap() - .transaction_async(|conn| async move { + let conn = datastore.pool_connection_for_tests().await.unwrap(); + + datastore + .transaction_retry_wrapper("no_virtual_provisioning_collection_records_for_projects") + .transaction(&conn, |conn| async move { conn .batch_execute_async(nexus_test_utils::db::ALLOW_FULL_TABLE_SCAN_SQL) .await .unwrap(); - Ok::<_, nexus_db_queries::db::TransactionError<()>>( + Ok( dsl::virtual_provisioning_collection .filter(dsl::collection_type.eq(nexus_db_queries::db::model::CollectionTypeProvisioned::Project.to_string())) // ignore built-in services project diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index 5a686b2f3d..3b4dfc0043 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -496,17 +496,19 @@ async fn ssc_create_destination_volume_record( async fn ssc_create_destination_volume_record_undo( sagactx: NexusActionContext, ) -> Result<(), anyhow::Error> { + let log = sagactx.user_data().log(); let osagactx = sagactx.user_data(); let destination_volume_id = sagactx.lookup::("destination_volume_id")?; - osagactx - .datastore() - .decrease_crucible_resource_count_and_soft_delete_volume( - destination_volume_id, - ) - .await?; + // This saga contains what is necessary to clean up the destination volume + // resources. It's safe here to perform a volume hard delete without + // decreasing the crucible resource count because the destination volume is + // guaranteed to never have read only resources that require that + // accounting. + + info!(log, "hard deleting volume {}", destination_volume_id,); osagactx.datastore().volume_hard_delete(destination_volume_id).await?; @@ -1396,17 +1398,22 @@ async fn ssc_create_volume_record_undo( let osagactx = sagactx.user_data(); let volume_id = sagactx.lookup::("volume_id")?; + // `volume_create` will increase the resource count for read only resources + // in a volume, which there are guaranteed to be for snapshot volumes. + // decreasing crucible resources is necessary as an undo step. Do not call + // `volume_hard_delete` here: soft deleting volumes is necessary for + // `find_deleted_volume_regions` to work. + info!( log, "calling decrease crucible resource count for volume {}", volume_id ); + osagactx .datastore() .decrease_crucible_resource_count_and_soft_delete_volume(volume_id) .await?; - osagactx.datastore().volume_hard_delete(volume_id).await?; - Ok(()) } diff --git a/nexus/src/app/sagas/snapshot_delete.rs b/nexus/src/app/sagas/snapshot_delete.rs index 0589b1ea03..75fc16754d 100644 --- a/nexus/src/app/sagas/snapshot_delete.rs +++ b/nexus/src/app/sagas/snapshot_delete.rs @@ -26,6 +26,9 @@ declare_saga_actions! { SPACE_ACCOUNT -> "no_result2" { + ssd_account_space } + NOOP -> "no_result3" { + + ssd_noop + } } #[derive(Debug)] @@ -71,7 +74,7 @@ impl NexusSaga for SagaSnapshotDelete { DELETE_VOLUME_DESTINATION_PARAMS, serde_json::to_value(&volume_delete_params).map_err(|e| { super::SagaInitError::SerializeError( - String::from("volume_id"), + String::from("destination_volume_id"), e, ) })?, @@ -83,16 +86,21 @@ impl NexusSaga for SagaSnapshotDelete { )); sagas::volume_delete::create_dag(subsaga_builder) }; - builder.append(steno::Node::subsaga( - "delete_volume", - make_volume_delete_dag()?, - DELETE_VOLUME_PARAMS, - )); - builder.append(steno::Node::subsaga( - "delete_destination_volume", - make_volume_delete_dag()?, - DELETE_VOLUME_DESTINATION_PARAMS, - )); + + builder.append_parallel(vec![ + steno::Node::subsaga( + "delete_volume", + make_volume_delete_dag()?, + DELETE_VOLUME_PARAMS, + ), + steno::Node::subsaga( + "delete_destination_volume", + make_volume_delete_dag()?, + DELETE_VOLUME_DESTINATION_PARAMS, + ), + ]); + + builder.append(noop_action()); Ok(builder.build()?) } @@ -148,3 +156,8 @@ async fn ssd_account_space( .map_err(ActionError::action_failed)?; Ok(()) } + +// Sagas must end in one node, not parallel +async fn ssd_noop(_sagactx: NexusActionContext) -> Result<(), ActionError> { + Ok(()) +} diff --git a/nexus/src/app/sagas/switch_port_settings_apply.rs b/nexus/src/app/sagas/switch_port_settings_apply.rs index 0c06d6ff83..0d6bb52421 100644 --- a/nexus/src/app/sagas/switch_port_settings_apply.rs +++ b/nexus/src/app/sagas/switch_port_settings_apply.rs @@ -3,53 +3,30 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. use super::{NexusActionContext, NEXUS_DPD_TAG}; -use crate::app::map_switch_zone_addrs; use crate::app::sagas::retry_until_known_result; +use crate::app::sagas::switch_port_settings_common::{ + api_to_dpd_port_settings, ensure_switch_port_bgp_settings, + ensure_switch_port_uplink, select_dendrite_client, select_mg_client, + switch_sled_agent, write_bootstore_config, +}; use crate::app::sagas::{ declare_saga_actions, ActionRegistry, NexusSaga, SagaInitError, }; -use crate::Nexus; use anyhow::Error; use db::datastore::SwitchPortSettingsCombinedResult; -use dpd_client::types::{ - LinkCreate, LinkId, LinkSettings, PortFec, PortId, PortSettings, PortSpeed, - RouteSettingsV4, RouteSettingsV6, -}; -use dpd_client::{Ipv4Cidr, Ipv6Cidr}; -use internal_dns::ServiceName; -use ipnetwork::IpNetwork; -use mg_admin_client::types::Prefix4; -use mg_admin_client::types::{ApplyRequest, BgpPeerConfig}; -use nexus_db_model::{SwitchLinkFec, SwitchLinkSpeed, NETWORK_KEY}; -use nexus_db_queries::context::OpContext; +use dpd_client::types::PortId; +use nexus_db_model::NETWORK_KEY; use nexus_db_queries::db::datastore::UpdatePrecondition; use nexus_db_queries::{authn, db}; -use nexus_types::external_api::params; -use omicron_common::address::SLED_AGENT_PORT; use omicron_common::api::external::{self, NameOrId}; -use omicron_common::api::internal::shared::{ - ParseSwitchLocationError, SwitchLocation, -}; +use omicron_common::api::internal::shared::SwitchLocation; use serde::{Deserialize, Serialize}; -use sled_agent_client::types::PortConfigV1; -use sled_agent_client::types::RouteConfig; -use sled_agent_client::types::{BgpConfig, EarlyNetworkConfig}; -use sled_agent_client::types::{ - BgpPeerConfig as OmicronBgpPeerConfig, HostPortConfig, -}; -use std::collections::HashMap; -use std::net::SocketAddrV6; -use std::net::{IpAddr, Ipv6Addr}; +use std::net::IpAddr; use std::str::FromStr; use std::sync::Arc; use steno::ActionError; use uuid::Uuid; -// This is more of an implementation detail of the BGP implementation. It -// defines the maximum time the peering engine will wait for external messages -// before breaking to check for shutdown conditions. -const BGP_SESSION_RESOLUTION: u64 = 100; - // switch port settings apply saga: input parameters #[derive(Debug, Deserialize, Serialize)] @@ -176,96 +153,15 @@ async fn spa_get_switch_port_settings( Ok(port_settings) } -pub(crate) fn api_to_dpd_port_settings( - settings: &SwitchPortSettingsCombinedResult, -) -> Result { - let mut dpd_port_settings = PortSettings { - links: HashMap::new(), - v4_routes: HashMap::new(), - v6_routes: HashMap::new(), - }; - - //TODO breakouts - let link_id = LinkId(0); - - for l in settings.links.iter() { - dpd_port_settings.links.insert( - link_id.to_string(), - LinkSettings { - params: LinkCreate { - autoneg: false, - lane: Some(LinkId(0)), - kr: false, - fec: match l.fec { - SwitchLinkFec::Firecode => PortFec::Firecode, - SwitchLinkFec::Rs => PortFec::Rs, - SwitchLinkFec::None => PortFec::None, - }, - speed: match l.speed { - SwitchLinkSpeed::Speed0G => PortSpeed::Speed0G, - SwitchLinkSpeed::Speed1G => PortSpeed::Speed1G, - SwitchLinkSpeed::Speed10G => PortSpeed::Speed10G, - SwitchLinkSpeed::Speed25G => PortSpeed::Speed25G, - SwitchLinkSpeed::Speed40G => PortSpeed::Speed40G, - SwitchLinkSpeed::Speed50G => PortSpeed::Speed50G, - SwitchLinkSpeed::Speed100G => PortSpeed::Speed100G, - SwitchLinkSpeed::Speed200G => PortSpeed::Speed200G, - SwitchLinkSpeed::Speed400G => PortSpeed::Speed400G, - }, - }, - //TODO won't work for breakouts - addrs: settings - .addresses - .iter() - .map(|a| a.address.ip()) - .collect(), - }, - ); - } - - for r in &settings.routes { - match &r.dst { - IpNetwork::V4(n) => { - let gw = match r.gw.ip() { - IpAddr::V4(gw) => gw, - IpAddr::V6(_) => { - return Err( - "IPv4 destination cannot have IPv6 nexthop".into() - ) - } - }; - dpd_port_settings.v4_routes.insert( - Ipv4Cidr { prefix: n.ip(), prefix_len: n.prefix() } - .to_string(), - vec![RouteSettingsV4 { link_id: link_id.0, nexthop: gw }], - ); - } - IpNetwork::V6(n) => { - let gw = match r.gw.ip() { - IpAddr::V6(gw) => gw, - IpAddr::V4(_) => { - return Err( - "IPv6 destination cannot have IPv4 nexthop".into() - ) - } - }; - dpd_port_settings.v6_routes.insert( - Ipv6Cidr { prefix: n.ip(), prefix_len: n.prefix() } - .to_string(), - vec![RouteSettingsV6 { link_id: link_id.0, nexthop: gw }], - ); - } - } - } - - Ok(dpd_port_settings) -} - async fn spa_ensure_switch_port_settings( sagactx: NexusActionContext, ) -> Result<(), ActionError> { let params = sagactx.saga_params::()?; let log = sagactx.user_data().log(); + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); let settings = sagactx .lookup::("switch_port_settings")?; @@ -276,7 +172,7 @@ async fn spa_ensure_switch_port_settings( })?; let dpd_client: Arc = - select_dendrite_client(&sagactx).await?; + select_dendrite_client(&sagactx, &opctx, params.switch_port_id).await?; let dpd_port_settings = api_to_dpd_port_settings(&settings).map_err(|e| { @@ -333,8 +229,8 @@ async fn spa_undo_ensure_switch_port_settings( .lookup::>("original_switch_port_settings_id") .map_err(|e| external::Error::internal_error(&e.to_string()))?; - let dpd_client: Arc = - select_dendrite_client(&sagactx).await?; + let dpd_client = + select_dendrite_client(&sagactx, &opctx, params.switch_port_id).await?; let id = match orig_port_settings_id { Some(id) => id, @@ -380,101 +276,6 @@ async fn spa_undo_ensure_switch_port_settings( Ok(()) } -async fn spa_ensure_switch_port_bgp_settings( - sagactx: NexusActionContext, -) -> Result<(), ActionError> { - let settings = sagactx - .lookup::("switch_port_settings") - .map_err(|e| { - ActionError::action_failed(format!( - "lookup switch port settings: {e}" - )) - })?; - - ensure_switch_port_bgp_settings(sagactx, settings).await -} - -pub(crate) async fn ensure_switch_port_bgp_settings( - sagactx: NexusActionContext, - settings: SwitchPortSettingsCombinedResult, -) -> Result<(), ActionError> { - let osagactx = sagactx.user_data(); - let nexus = osagactx.nexus(); - let params = sagactx.saga_params::()?; - - let opctx = crate::context::op_context_for_saga_action( - &sagactx, - ¶ms.serialized_authn, - ); - let mg_client: Arc = - select_mg_client(&sagactx).await.map_err(|e| { - ActionError::action_failed(format!("select mg client: {e}")) - })?; - - let mut bgp_peer_configs = Vec::new(); - - for peer in settings.bgp_peers { - let config = nexus - .bgp_config_get(&opctx, peer.bgp_config_id.into()) - .await - .map_err(|e| { - ActionError::action_failed(format!("get bgp config: {e}")) - })?; - - let announcements = nexus - .bgp_announce_list( - &opctx, - ¶ms::BgpAnnounceSetSelector { - name_or_id: NameOrId::Id(config.bgp_announce_set_id), - }, - ) - .await - .map_err(|e| { - ActionError::action_failed(format!( - "get bgp announcements: {e}" - )) - })?; - - let mut prefixes = Vec::new(); - for a in &announcements { - let value = match a.network.ip() { - IpAddr::V4(value) => Ok(value), - IpAddr::V6(_) => Err(ActionError::action_failed( - "IPv6 announcement not yet supported".to_string(), - )), - }?; - prefixes.push(Prefix4 { value, length: a.network.prefix() }); - } - - let bpc = BgpPeerConfig { - asn: *config.asn, - name: format!("{}", peer.addr.ip()), //TODO user defined name? - host: format!("{}:179", peer.addr.ip()), - hold_time: peer.hold_time.0.into(), - idle_hold_time: peer.idle_hold_time.0.into(), - delay_open: peer.delay_open.0.into(), - connect_retry: peer.connect_retry.0.into(), - keepalive: peer.keepalive.0.into(), - resolution: BGP_SESSION_RESOLUTION, - originate: prefixes, - }; - - bgp_peer_configs.push(bpc); - } - - mg_client - .inner - .bgp_apply(&ApplyRequest { - peer_group: params.switch_port_name.clone(), - peers: bgp_peer_configs, - }) - .await - .map_err(|e| { - ActionError::action_failed(format!("apply bgp settings: {e}")) - })?; - - Ok(()) -} async fn spa_undo_ensure_switch_port_bgp_settings( sagactx: NexusActionContext, ) -> Result<(), Error> { @@ -497,9 +298,13 @@ async fn spa_undo_ensure_switch_port_bgp_settings( })?; let mg_client: Arc = - select_mg_client(&sagactx).await.map_err(|e| { - ActionError::action_failed(format!("select mg client (undo): {e}")) - })?; + select_mg_client(&sagactx, &opctx, params.switch_port_id) + .await + .map_err(|e| { + ActionError::action_failed(format!( + "select mg client (undo): {e}" + )) + })?; for peer in settings.bgp_peers { let config = nexus @@ -592,96 +397,39 @@ async fn spa_undo_ensure_switch_port_bootstore_network_settings( async fn spa_ensure_switch_port_uplink( sagactx: NexusActionContext, ) -> Result<(), ActionError> { - ensure_switch_port_uplink(sagactx, false, None).await + let params = sagactx.saga_params::()?; + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); + ensure_switch_port_uplink( + sagactx, + &opctx, + false, + None, + params.switch_port_id, + params.switch_port_name, + ) + .await } async fn spa_undo_ensure_switch_port_uplink( sagactx: NexusActionContext, ) -> Result<(), Error> { - Ok(ensure_switch_port_uplink(sagactx, true, None).await?) -} - -pub(crate) async fn ensure_switch_port_uplink( - sagactx: NexusActionContext, - skip_self: bool, - inject: Option, -) -> Result<(), ActionError> { let params = sagactx.saga_params::()?; - let opctx = crate::context::op_context_for_saga_action( &sagactx, ¶ms.serialized_authn, ); - let osagactx = sagactx.user_data(); - let nexus = osagactx.nexus(); - - let switch_port = nexus - .get_switch_port(&opctx, params.switch_port_id) - .await - .map_err(|e| { - ActionError::action_failed(format!( - "get switch port for uplink: {e}" - )) - })?; - - let switch_location: SwitchLocation = - switch_port.switch_location.parse().map_err(|e| { - ActionError::action_failed(format!( - "get switch location for uplink: {e:?}", - )) - })?; - - let mut uplinks: Vec = Vec::new(); - - // The sled agent uplinks interface is an all or nothing interface, so we - // need to get all the uplink configs for all the ports. - let active_ports = - nexus.active_port_settings(&opctx).await.map_err(|e| { - ActionError::action_failed(format!( - "get active switch port settings: {e}" - )) - })?; - - for (port, info) in &active_ports { - // Since we are undoing establishing uplinks for the settings - // associated with this port we skip adding this ports uplinks - // to the list - effectively removing them. - if skip_self && port.id == switch_port.id { - continue; - } - uplinks.push(HostPortConfig { - port: port.port_name.clone(), - addrs: info.addresses.iter().map(|a| a.address).collect(), - }) - } - - if let Some(id) = inject { - let opctx = crate::context::op_context_for_saga_action( - &sagactx, - ¶ms.serialized_authn, - ); - let settings = nexus - .switch_port_settings_get(&opctx, &id.into()) - .await - .map_err(|e| { - ActionError::action_failed(format!( - "get switch port settings for injection: {e}" - )) - })?; - uplinks.push(HostPortConfig { - port: params.switch_port_name.clone(), - addrs: settings.addresses.iter().map(|a| a.address).collect(), - }) - } - - let sc = switch_sled_agent(switch_location, &sagactx).await?; - sc.uplink_ensure(&sled_agent_client::types::SwitchPorts { uplinks }) - .await - .map_err(|e| { - ActionError::action_failed(format!("ensure uplink: {e}")) - })?; - - Ok(()) + Ok(ensure_switch_port_uplink( + sagactx, + &opctx, + true, + None, + params.switch_port_id, + params.switch_port_name, + ) + .await?) } // a common route representation for dendrite and port settings @@ -725,349 +473,29 @@ async fn spa_disassociate_switch_port( Ok(()) } -pub(crate) async fn select_dendrite_client( - sagactx: &NexusActionContext, -) -> Result, ActionError> { - let osagactx = sagactx.user_data(); - let params = sagactx.saga_params::()?; - let nexus = osagactx.nexus(); - let opctx = crate::context::op_context_for_saga_action( - &sagactx, - ¶ms.serialized_authn, - ); - - let switch_port = nexus - .get_switch_port(&opctx, params.switch_port_id) - .await +async fn spa_ensure_switch_port_bgp_settings( + sagactx: NexusActionContext, +) -> Result<(), ActionError> { + let settings = sagactx + .lookup::("switch_port_settings") .map_err(|e| { ActionError::action_failed(format!( - "get switch port for dendrite client selection {e}" + "lookup switch port settings: {e}" )) })?; - let switch_location: SwitchLocation = - switch_port.switch_location.parse().map_err( - |e: ParseSwitchLocationError| { - ActionError::action_failed(format!( - "get switch location for uplink: {e:?}", - )) - }, - )?; - - let dpd_client: Arc = osagactx - .nexus() - .dpd_clients - .get(&switch_location) - .ok_or_else(|| { - ActionError::action_failed(format!( - "requested switch not available: {switch_location}" - )) - })? - .clone(); - Ok(dpd_client) -} - -pub(crate) async fn select_mg_client( - sagactx: &NexusActionContext, -) -> Result, ActionError> { - let osagactx = sagactx.user_data(); let params = sagactx.saga_params::()?; - let nexus = osagactx.nexus(); let opctx = crate::context::op_context_for_saga_action( &sagactx, ¶ms.serialized_authn, ); - let switch_port = nexus - .get_switch_port(&opctx, params.switch_port_id) - .await - .map_err(|e| { - ActionError::action_failed(format!( - "get switch port for mg client selection: {e}" - )) - })?; - - let switch_location: SwitchLocation = - switch_port.switch_location.parse().map_err( - |e: ParseSwitchLocationError| { - ActionError::action_failed(format!( - "get switch location for uplink: {e:?}", - )) - }, - )?; - - let mg_client: Arc = osagactx - .nexus() - .mg_clients - .get(&switch_location) - .ok_or_else(|| { - ActionError::action_failed(format!( - "requested switch not available: {switch_location}" - )) - })? - .clone(); - Ok(mg_client) -} - -pub(crate) async fn get_scrimlet_address( - location: SwitchLocation, - nexus: &Arc, -) -> Result { - /* TODO this depends on DNS entries only coming from RSS, it's broken - on the upgrade path - nexus - .resolver() - .await - .lookup_socket_v6(ServiceName::Scrimlet(location)) - .await - .map_err(|e| e.to_string()) - .map_err(|e| { - ActionError::action_failed(format!( - "scrimlet dns lookup failed {e}", - )) - }) - */ - let result = nexus - .resolver() - .await - .lookup_all_ipv6(ServiceName::Dendrite) - .await - .map_err(|e| { - ActionError::action_failed(format!( - "scrimlet dns lookup failed {e}", - )) - }); - - let mappings = match result { - Ok(addrs) => map_switch_zone_addrs(&nexus.log, addrs).await, - Err(e) => { - warn!(nexus.log, "Failed to lookup Dendrite address: {e}"); - return Err(ActionError::action_failed(format!( - "switch mapping failed {e}", - ))); - } - }; - - let addr = match mappings.get(&location) { - Some(addr) => addr, - None => { - return Err(ActionError::action_failed(format!( - "address for switch at location: {location} not found", - ))); - } - }; - - let mut segments = addr.segments(); - segments[7] = 1; - let addr = Ipv6Addr::from(segments); - - Ok(SocketAddrV6::new(addr, SLED_AGENT_PORT, 0, 0)) -} - -#[derive(Clone, Debug)] -pub struct EarlyNetworkPortUpdate { - port: PortConfigV1, - bgp_configs: Vec, -} - -pub(crate) async fn bootstore_update( - nexus: &Arc, - opctx: &OpContext, - switch_port_id: Uuid, - switch_port_name: &str, - settings: &SwitchPortSettingsCombinedResult, -) -> Result { - let switch_port = - nexus.get_switch_port(&opctx, switch_port_id).await.map_err(|e| { - ActionError::action_failed(format!( - "get switch port for uplink: {e}" - )) - })?; - - let switch_location: SwitchLocation = - switch_port.switch_location.parse().map_err( - |e: ParseSwitchLocationError| { - ActionError::action_failed(format!( - "get switch location for uplink: {e:?}", - )) - }, - )?; - - let mut peer_info = Vec::new(); - let mut bgp_configs = Vec::new(); - for p in &settings.bgp_peers { - let bgp_config = nexus - .bgp_config_get(&opctx, p.bgp_config_id.into()) - .await - .map_err(|e| { - ActionError::action_failed(format!("get bgp config: {e}")) - })?; - - let announcements = nexus - .bgp_announce_list( - &opctx, - ¶ms::BgpAnnounceSetSelector { - name_or_id: NameOrId::Id(bgp_config.bgp_announce_set_id), - }, - ) - .await - .map_err(|e| { - ActionError::action_failed(format!( - "get bgp announcements: {e}" - )) - })?; - - peer_info.push((p, bgp_config.asn.0)); - bgp_configs.push(BgpConfig { - asn: bgp_config.asn.0, - originate: announcements - .iter() - .filter_map(|a| match a.network { - IpNetwork::V4(net) => Some(net.into()), - //TODO v6 - _ => None, - }) - .collect(), - }); - } - - let update = EarlyNetworkPortUpdate { - port: PortConfigV1 { - routes: settings - .routes - .iter() - .map(|r| RouteConfig { destination: r.dst, nexthop: r.gw.ip() }) - .collect(), - addresses: settings.addresses.iter().map(|a| a.address).collect(), - switch: switch_location, - port: switch_port_name.into(), - uplink_port_fec: settings - .links - .get(0) - .map(|l| l.fec) - .unwrap_or(SwitchLinkFec::None) - .into(), - uplink_port_speed: settings - .links - .get(0) - .map(|l| l.speed) - .unwrap_or(SwitchLinkSpeed::Speed100G) - .into(), - bgp_peers: peer_info - .iter() - .filter_map(|(p, asn)| { - //TODO v6 - match p.addr.ip() { - IpAddr::V4(addr) => Some(OmicronBgpPeerConfig { - asn: *asn, - port: switch_port_name.into(), - addr, - hold_time: Some(p.hold_time.0.into()), - connect_retry: Some(p.connect_retry.0.into()), - delay_open: Some(p.delay_open.0.into()), - idle_hold_time: Some(p.idle_hold_time.0.into()), - keepalive: Some(p.keepalive.0.into()), - }), - IpAddr::V6(_) => { - warn!(opctx.log, "IPv6 peers not yet supported"); - None - } - } - }) - .collect(), - }, - bgp_configs, - }; - - Ok(update) -} - -pub(crate) async fn read_bootstore_config( - sa: &sled_agent_client::Client, -) -> Result { - Ok(sa - .read_network_bootstore_config_cache() - .await - .map_err(|e| { - ActionError::action_failed(format!( - "read bootstore network config: {e}" - )) - })? - .into_inner()) -} - -pub(crate) async fn write_bootstore_config( - sa: &sled_agent_client::Client, - config: &EarlyNetworkConfig, -) -> Result<(), ActionError> { - sa.write_network_bootstore_config(config).await.map_err(|e| { - ActionError::action_failed(format!( - "write bootstore network config: {e}" - )) - })?; - Ok(()) -} - -#[derive(Clone, Debug, Default)] -pub(crate) struct BootstoreNetworkPortChange { - previous_port_config: Option, - changed_bgp_configs: Vec, - added_bgp_configs: Vec, -} - -pub(crate) fn apply_bootstore_update( - config: &mut EarlyNetworkConfig, - update: &EarlyNetworkPortUpdate, -) -> Result { - let mut change = BootstoreNetworkPortChange::default(); - - let rack_net_config = match &mut config.body.rack_network_config { - Some(cfg) => cfg, - None => { - return Err(ActionError::action_failed( - "rack network config not yet initialized".to_string(), - )) - } - }; - - for port in &mut rack_net_config.ports { - if port.port == update.port.port { - change.previous_port_config = Some(port.clone()); - *port = update.port.clone(); - break; - } - } - if change.previous_port_config.is_none() { - rack_net_config.ports.push(update.port.clone()); - } - - for updated_bgp in &update.bgp_configs { - let mut exists = false; - for resident_bgp in &mut rack_net_config.bgp { - if resident_bgp.asn == updated_bgp.asn { - change.changed_bgp_configs.push(resident_bgp.clone()); - *resident_bgp = updated_bgp.clone(); - exists = true; - break; - } - } - if !exists { - change.added_bgp_configs.push(updated_bgp.clone()); - } - } - rack_net_config.bgp.extend_from_slice(&change.added_bgp_configs); - - Ok(change) -} - -pub(crate) async fn switch_sled_agent( - location: SwitchLocation, - sagactx: &NexusActionContext, -) -> Result { - let nexus = sagactx.user_data().nexus(); - let sled_agent_addr = get_scrimlet_address(location, nexus).await?; - Ok(sled_agent_client::Client::new( - &format!("http://{}", sled_agent_addr), - sagactx.user_data().log().clone(), - )) + ensure_switch_port_bgp_settings( + sagactx, + &opctx, + settings, + params.switch_port_name.clone(), + params.switch_port_id, + ) + .await } diff --git a/nexus/src/app/sagas/switch_port_settings_clear.rs b/nexus/src/app/sagas/switch_port_settings_clear.rs index 1ab2f6be0c..0d876f8159 100644 --- a/nexus/src/app/sagas/switch_port_settings_clear.rs +++ b/nexus/src/app/sagas/switch_port_settings_clear.rs @@ -2,14 +2,13 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -use super::switch_port_settings_apply::select_dendrite_client; use super::{NexusActionContext, NEXUS_DPD_TAG}; use crate::app::sagas::retry_until_known_result; -use crate::app::sagas::switch_port_settings_apply::{ +use crate::app::sagas::switch_port_settings_common::{ api_to_dpd_port_settings, apply_bootstore_update, bootstore_update, ensure_switch_port_bgp_settings, ensure_switch_port_uplink, - read_bootstore_config, select_mg_client, switch_sled_agent, - write_bootstore_config, + read_bootstore_config, select_dendrite_client, select_mg_client, + switch_sled_agent, write_bootstore_config, }; use crate::app::sagas::{ declare_saga_actions, ActionRegistry, NexusSaga, SagaInitError, @@ -147,11 +146,16 @@ async fn spa_clear_switch_port_settings( ) -> Result<(), ActionError> { let params = sagactx.saga_params::()?; let log = sagactx.user_data().log(); + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); let port_id: PortId = PortId::from_str(¶ms.port_name) .map_err(|e| ActionError::action_failed(e.to_string()))?; - let dpd_client = select_dendrite_client(&sagactx).await?; + let dpd_client = + select_dendrite_client(&sagactx, &opctx, params.switch_port_id).await?; retry_until_known_result(log, || async { dpd_client.port_settings_clear(&port_id, Some(NEXUS_DPD_TAG)).await @@ -191,7 +195,8 @@ async fn spa_undo_clear_switch_port_settings( .await .map_err(ActionError::action_failed)?; - let dpd_client = select_dendrite_client(&sagactx).await?; + let dpd_client = + select_dendrite_client(&sagactx, &opctx, params.switch_port_id).await?; let dpd_port_settings = api_to_dpd_port_settings(&settings) .map_err(ActionError::action_failed)?; @@ -214,7 +219,20 @@ async fn spa_undo_clear_switch_port_settings( async fn spa_clear_switch_port_uplink( sagactx: NexusActionContext, ) -> Result<(), ActionError> { - ensure_switch_port_uplink(sagactx, true, None).await + let params = sagactx.saga_params::()?; + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); + ensure_switch_port_uplink( + sagactx, + &opctx, + true, + None, + params.switch_port_id, + params.port_name.clone(), + ) + .await } async fn spa_undo_clear_switch_port_uplink( @@ -223,8 +241,21 @@ async fn spa_undo_clear_switch_port_uplink( let id = sagactx .lookup::>("original_switch_port_settings_id") .map_err(|e| external::Error::internal_error(&e.to_string()))?; + let params = sagactx.saga_params::()?; + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); - Ok(ensure_switch_port_uplink(sagactx, false, id).await?) + Ok(ensure_switch_port_uplink( + sagactx, + &opctx, + false, + id, + params.switch_port_id, + params.port_name.clone(), + ) + .await?) } async fn spa_clear_switch_port_bgp_settings( @@ -257,9 +288,13 @@ async fn spa_clear_switch_port_bgp_settings( .map_err(ActionError::action_failed)?; let mg_client: Arc = - select_mg_client(&sagactx).await.map_err(|e| { - ActionError::action_failed(format!("select mg client (undo): {e}")) - })?; + select_mg_client(&sagactx, &opctx, params.switch_port_id) + .await + .map_err(|e| { + ActionError::action_failed(format!( + "select mg client (undo): {e}" + )) + })?; for peer in settings.bgp_peers { let config = nexus @@ -306,7 +341,14 @@ async fn spa_undo_clear_switch_port_bgp_settings( let settings = nexus.switch_port_settings_get(&opctx, &NameOrId::Id(id)).await?; - Ok(ensure_switch_port_bgp_settings(sagactx, settings).await?) + Ok(ensure_switch_port_bgp_settings( + sagactx, + &opctx, + settings, + params.port_name.clone(), + params.switch_port_id, + ) + .await?) } async fn spa_clear_switch_port_bootstore_network_settings( diff --git a/nexus/src/app/sagas/switch_port_settings_common.rs b/nexus/src/app/sagas/switch_port_settings_common.rs new file mode 100644 index 0000000000..9ef23ebf44 --- /dev/null +++ b/nexus/src/app/sagas/switch_port_settings_common.rs @@ -0,0 +1,636 @@ +use super::NexusActionContext; +use crate::app::map_switch_zone_addrs; +use crate::Nexus; +use db::datastore::SwitchPortSettingsCombinedResult; +use dpd_client::types::{ + LinkCreate, LinkId, LinkSettings, PortFec, PortSettings, PortSpeed, + RouteSettingsV4, RouteSettingsV6, +}; +use dpd_client::{Ipv4Cidr, Ipv6Cidr}; +use internal_dns::ServiceName; +use ipnetwork::IpNetwork; +use mg_admin_client::types::Prefix4; +use mg_admin_client::types::{ApplyRequest, BgpPeerConfig}; +use nexus_db_model::{SwitchLinkFec, SwitchLinkSpeed}; +use nexus_db_queries::context::OpContext; +use nexus_db_queries::db; +use nexus_types::external_api::params; +use omicron_common::address::SLED_AGENT_PORT; +use omicron_common::api::external::NameOrId; +use omicron_common::api::internal::shared::{ + ParseSwitchLocationError, SwitchLocation, +}; +use sled_agent_client::types::PortConfigV1; +use sled_agent_client::types::RouteConfig; +use sled_agent_client::types::{BgpConfig, EarlyNetworkConfig}; +use sled_agent_client::types::{ + BgpPeerConfig as OmicronBgpPeerConfig, HostPortConfig, +}; +use std::collections::HashMap; +use std::net::SocketAddrV6; +use std::net::{IpAddr, Ipv6Addr}; +use std::sync::Arc; +use steno::ActionError; +use uuid::Uuid; + +// This is more of an implementation detail of the BGP implementation. It +// defines the maximum time the peering engine will wait for external messages +// before breaking to check for shutdown conditions. +const BGP_SESSION_RESOLUTION: u64 = 100; + +pub(crate) fn api_to_dpd_port_settings( + settings: &SwitchPortSettingsCombinedResult, +) -> Result { + let mut dpd_port_settings = PortSettings { + links: HashMap::new(), + v4_routes: HashMap::new(), + v6_routes: HashMap::new(), + }; + + //TODO breakouts + let link_id = LinkId(0); + + for l in settings.links.iter() { + dpd_port_settings.links.insert( + link_id.to_string(), + LinkSettings { + params: LinkCreate { + autoneg: l.autoneg, + lane: Some(LinkId(0)), + kr: false, + fec: match l.fec { + SwitchLinkFec::Firecode => PortFec::Firecode, + SwitchLinkFec::Rs => PortFec::Rs, + SwitchLinkFec::None => PortFec::None, + }, + speed: match l.speed { + SwitchLinkSpeed::Speed0G => PortSpeed::Speed0G, + SwitchLinkSpeed::Speed1G => PortSpeed::Speed1G, + SwitchLinkSpeed::Speed10G => PortSpeed::Speed10G, + SwitchLinkSpeed::Speed25G => PortSpeed::Speed25G, + SwitchLinkSpeed::Speed40G => PortSpeed::Speed40G, + SwitchLinkSpeed::Speed50G => PortSpeed::Speed50G, + SwitchLinkSpeed::Speed100G => PortSpeed::Speed100G, + SwitchLinkSpeed::Speed200G => PortSpeed::Speed200G, + SwitchLinkSpeed::Speed400G => PortSpeed::Speed400G, + }, + }, + //TODO won't work for breakouts + addrs: settings + .addresses + .iter() + .map(|a| a.address.ip()) + .collect(), + }, + ); + } + + for r in &settings.routes { + match &r.dst { + IpNetwork::V4(n) => { + let gw = match r.gw.ip() { + IpAddr::V4(gw) => gw, + IpAddr::V6(_) => { + return Err( + "IPv4 destination cannot have IPv6 nexthop".into() + ) + } + }; + dpd_port_settings.v4_routes.insert( + Ipv4Cidr { prefix: n.ip(), prefix_len: n.prefix() } + .to_string(), + vec![RouteSettingsV4 { link_id: link_id.0, nexthop: gw }], + ); + } + IpNetwork::V6(n) => { + let gw = match r.gw.ip() { + IpAddr::V6(gw) => gw, + IpAddr::V4(_) => { + return Err( + "IPv6 destination cannot have IPv4 nexthop".into() + ) + } + }; + dpd_port_settings.v6_routes.insert( + Ipv6Cidr { prefix: n.ip(), prefix_len: n.prefix() } + .to_string(), + vec![RouteSettingsV6 { link_id: link_id.0, nexthop: gw }], + ); + } + } + } + + Ok(dpd_port_settings) +} + +pub(crate) fn apply_bootstore_update( + config: &mut EarlyNetworkConfig, + update: &EarlyNetworkPortUpdate, +) -> Result { + let mut change = BootstoreNetworkPortChange::default(); + + let rack_net_config = match &mut config.body.rack_network_config { + Some(cfg) => cfg, + None => { + return Err(ActionError::action_failed( + "rack network config not yet initialized".to_string(), + )) + } + }; + + for port in &mut rack_net_config.ports { + if port.port == update.port.port { + change.previous_port_config = Some(port.clone()); + *port = update.port.clone(); + break; + } + } + if change.previous_port_config.is_none() { + rack_net_config.ports.push(update.port.clone()); + } + + for updated_bgp in &update.bgp_configs { + let mut exists = false; + for resident_bgp in &mut rack_net_config.bgp { + if resident_bgp.asn == updated_bgp.asn { + change.changed_bgp_configs.push(resident_bgp.clone()); + *resident_bgp = updated_bgp.clone(); + exists = true; + break; + } + } + if !exists { + change.added_bgp_configs.push(updated_bgp.clone()); + } + } + rack_net_config.bgp.extend_from_slice(&change.added_bgp_configs); + + Ok(change) +} + +pub(crate) async fn bootstore_update( + nexus: &Arc, + opctx: &OpContext, + switch_port_id: Uuid, + switch_port_name: &str, + settings: &SwitchPortSettingsCombinedResult, +) -> Result { + let switch_port = + nexus.get_switch_port(&opctx, switch_port_id).await.map_err(|e| { + ActionError::action_failed(format!( + "get switch port for uplink: {e}" + )) + })?; + + let switch_location: SwitchLocation = + switch_port.switch_location.parse().map_err( + |e: ParseSwitchLocationError| { + ActionError::action_failed(format!( + "get switch location for uplink: {e:?}", + )) + }, + )?; + + let mut peer_info = Vec::new(); + let mut bgp_configs = Vec::new(); + for p in &settings.bgp_peers { + let bgp_config = nexus + .bgp_config_get(&opctx, p.bgp_config_id.into()) + .await + .map_err(|e| { + ActionError::action_failed(format!("get bgp config: {e}")) + })?; + + let announcements = nexus + .bgp_announce_list( + &opctx, + ¶ms::BgpAnnounceSetSelector { + name_or_id: NameOrId::Id(bgp_config.bgp_announce_set_id), + }, + ) + .await + .map_err(|e| { + ActionError::action_failed(format!( + "get bgp announcements: {e}" + )) + })?; + + peer_info.push((p, bgp_config.asn.0)); + bgp_configs.push(BgpConfig { + asn: bgp_config.asn.0, + originate: announcements + .iter() + .filter_map(|a| match a.network { + IpNetwork::V4(net) => Some(net.into()), + //TODO v6 + _ => None, + }) + .collect(), + }); + } + + let update = EarlyNetworkPortUpdate { + port: PortConfigV1 { + routes: settings + .routes + .iter() + .map(|r| RouteConfig { destination: r.dst, nexthop: r.gw.ip() }) + .collect(), + addresses: settings.addresses.iter().map(|a| a.address).collect(), + switch: switch_location, + port: switch_port_name.into(), + uplink_port_fec: settings + .links + .get(0) + .map(|l| l.fec) + .unwrap_or(SwitchLinkFec::None) + .into(), + uplink_port_speed: settings + .links + .get(0) + .map(|l| l.speed) + .unwrap_or(SwitchLinkSpeed::Speed100G) + .into(), + autoneg: settings.links.get(0).map(|l| l.autoneg).unwrap_or(false), + bgp_peers: peer_info + .iter() + .filter_map(|(p, asn)| { + //TODO v6 + match p.addr.ip() { + IpAddr::V4(addr) => Some(OmicronBgpPeerConfig { + asn: *asn, + port: switch_port_name.into(), + addr, + hold_time: Some(p.hold_time.0.into()), + connect_retry: Some(p.connect_retry.0.into()), + delay_open: Some(p.delay_open.0.into()), + idle_hold_time: Some(p.idle_hold_time.0.into()), + keepalive: Some(p.keepalive.0.into()), + }), + IpAddr::V6(_) => { + warn!(opctx.log, "IPv6 peers not yet supported"); + None + } + } + }) + .collect(), + }, + bgp_configs, + }; + + Ok(update) +} + +pub(crate) async fn ensure_switch_port_uplink( + sagactx: NexusActionContext, + opctx: &OpContext, + skip_self: bool, + inject: Option, + switch_port_id: Uuid, + switch_port_name: String, +) -> Result<(), ActionError> { + let osagactx = sagactx.user_data(); + let nexus = osagactx.nexus(); + + let switch_port = + nexus.get_switch_port(&opctx, switch_port_id).await.map_err(|e| { + ActionError::action_failed(format!( + "get switch port for uplink: {e}" + )) + })?; + + let switch_location: SwitchLocation = + switch_port.switch_location.parse().map_err(|e| { + ActionError::action_failed(format!( + "get switch location for uplink: {e:?}", + )) + })?; + + let mut uplinks: Vec = Vec::new(); + + // The sled agent uplinks interface is an all or nothing interface, so we + // need to get all the uplink configs for all the ports. + let active_ports = + nexus.active_port_settings(&opctx).await.map_err(|e| { + ActionError::action_failed(format!( + "get active switch port settings: {e}" + )) + })?; + + for (port, info) in &active_ports { + // Since we are undoing establishing uplinks for the settings + // associated with this port we skip adding this ports uplinks + // to the list - effectively removing them. + if skip_self && port.id == switch_port.id { + continue; + } + uplinks.push(HostPortConfig { + port: port.port_name.clone(), + addrs: info.addresses.iter().map(|a| a.address).collect(), + }) + } + + if let Some(id) = inject { + let settings = nexus + .switch_port_settings_get(&opctx, &id.into()) + .await + .map_err(|e| { + ActionError::action_failed(format!( + "get switch port settings for injection: {e}" + )) + })?; + uplinks.push(HostPortConfig { + port: switch_port_name.clone(), + addrs: settings.addresses.iter().map(|a| a.address).collect(), + }) + } + + let sc = switch_sled_agent(switch_location, &sagactx).await?; + sc.uplink_ensure(&sled_agent_client::types::SwitchPorts { uplinks }) + .await + .map_err(|e| { + ActionError::action_failed(format!("ensure uplink: {e}")) + })?; + + Ok(()) +} + +pub(crate) async fn read_bootstore_config( + sa: &sled_agent_client::Client, +) -> Result { + Ok(sa + .read_network_bootstore_config_cache() + .await + .map_err(|e| { + ActionError::action_failed(format!( + "read bootstore network config: {e}" + )) + })? + .into_inner()) +} + +pub(crate) async fn write_bootstore_config( + sa: &sled_agent_client::Client, + config: &EarlyNetworkConfig, +) -> Result<(), ActionError> { + sa.write_network_bootstore_config(config).await.map_err(|e| { + ActionError::action_failed(format!( + "write bootstore network config: {e}" + )) + })?; + Ok(()) +} + +pub(crate) async fn select_mg_client( + sagactx: &NexusActionContext, + opctx: &OpContext, + switch_port_id: Uuid, +) -> Result, ActionError> { + let osagactx = sagactx.user_data(); + let nexus = osagactx.nexus(); + + let switch_port = + nexus.get_switch_port(&opctx, switch_port_id).await.map_err(|e| { + ActionError::action_failed(format!( + "get switch port for mg client selection: {e}" + )) + })?; + + let switch_location: SwitchLocation = + switch_port.switch_location.parse().map_err( + |e: ParseSwitchLocationError| { + ActionError::action_failed(format!( + "get switch location for uplink: {e:?}", + )) + }, + )?; + + let mg_client: Arc = osagactx + .nexus() + .mg_clients + .get(&switch_location) + .ok_or_else(|| { + ActionError::action_failed(format!( + "requested switch not available: {switch_location}" + )) + })? + .clone(); + Ok(mg_client) +} + +pub(crate) async fn switch_sled_agent( + location: SwitchLocation, + sagactx: &NexusActionContext, +) -> Result { + let nexus = sagactx.user_data().nexus(); + let sled_agent_addr = get_scrimlet_address(location, nexus).await?; + Ok(sled_agent_client::Client::new( + &format!("http://{}", sled_agent_addr), + sagactx.user_data().log().clone(), + )) +} + +pub(crate) async fn ensure_switch_port_bgp_settings( + sagactx: NexusActionContext, + opctx: &OpContext, + settings: SwitchPortSettingsCombinedResult, + switch_port_name: String, + switch_port_id: Uuid, +) -> Result<(), ActionError> { + let osagactx = sagactx.user_data(); + let nexus = osagactx.nexus(); + let mg_client: Arc = + select_mg_client(&sagactx, opctx, switch_port_id).await.map_err( + |e| ActionError::action_failed(format!("select mg client: {e}")), + )?; + + let mut bgp_peer_configs = HashMap::>::new(); + + let mut cfg: Option = None; + + for peer in settings.bgp_peers { + let config = nexus + .bgp_config_get(&opctx, peer.bgp_config_id.into()) + .await + .map_err(|e| { + ActionError::action_failed(format!("get bgp config: {e}")) + })?; + + if let Some(cfg) = &cfg { + if config.asn != cfg.asn { + return Err(ActionError::action_failed( + "bad request: only one AS allowed per switch".to_string(), + )); + } + } else { + cfg = Some(config); + } + + let bpc = BgpPeerConfig { + name: format!("{}", peer.addr.ip()), //TODO user defined name? + host: format!("{}:179", peer.addr.ip()), + hold_time: peer.hold_time.0.into(), + idle_hold_time: peer.idle_hold_time.0.into(), + delay_open: peer.delay_open.0.into(), + connect_retry: peer.connect_retry.0.into(), + keepalive: peer.keepalive.0.into(), + resolution: BGP_SESSION_RESOLUTION, + passive: false, + }; + + match bgp_peer_configs.get_mut(&switch_port_name) { + Some(peers) => { + peers.push(bpc); + } + None => { + bgp_peer_configs.insert(switch_port_name.clone(), vec![bpc]); + } + } + } + + if let Some(cfg) = &cfg { + let announcements = nexus + .bgp_announce_list( + &opctx, + ¶ms::BgpAnnounceSetSelector { + name_or_id: NameOrId::Id(cfg.bgp_announce_set_id), + }, + ) + .await + .map_err(|e| { + ActionError::action_failed(format!( + "get bgp announcements: {e}" + )) + })?; + + let mut prefixes = Vec::new(); + for a in &announcements { + let value = match a.network.ip() { + IpAddr::V4(value) => Ok(value), + IpAddr::V6(_) => Err(ActionError::action_failed( + "bad request: IPv6 announcement not yet supported" + .to_string(), + )), + }?; + prefixes.push(Prefix4 { value, length: a.network.prefix() }); + } + mg_client + .inner + .bgp_apply(&ApplyRequest { + asn: cfg.asn.0, + peers: bgp_peer_configs, + originate: prefixes, + }) + .await + .map_err(|e| { + ActionError::action_failed(format!("apply bgp settings: {e}")) + })?; + } + + Ok(()) +} + +pub(crate) async fn get_scrimlet_address( + location: SwitchLocation, + nexus: &Arc, +) -> Result { + /* TODO this depends on DNS entries only coming from RSS, it's broken + on the upgrade path + nexus + .resolver() + .await + .lookup_socket_v6(ServiceName::Scrimlet(location)) + .await + .map_err(|e| e.to_string()) + .map_err(|e| { + ActionError::action_failed(format!( + "scrimlet dns lookup failed {e}", + )) + }) + */ + let result = nexus + .resolver() + .await + .lookup_all_ipv6(ServiceName::Dendrite) + .await + .map_err(|e| { + ActionError::action_failed(format!( + "scrimlet dns lookup failed {e}", + )) + }); + + let mappings = match result { + Ok(addrs) => map_switch_zone_addrs(&nexus.log, addrs).await, + Err(e) => { + warn!(nexus.log, "Failed to lookup Dendrite address: {e}"); + return Err(ActionError::action_failed(format!( + "switch mapping failed {e}", + ))); + } + }; + + let addr = match mappings.get(&location) { + Some(addr) => addr, + None => { + return Err(ActionError::action_failed(format!( + "address for switch at location: {location} not found", + ))); + } + }; + + let mut segments = addr.segments(); + segments[7] = 1; + let addr = Ipv6Addr::from(segments); + + Ok(SocketAddrV6::new(addr, SLED_AGENT_PORT, 0, 0)) +} + +#[derive(Clone, Debug, Default)] +pub(crate) struct BootstoreNetworkPortChange { + previous_port_config: Option, + changed_bgp_configs: Vec, + added_bgp_configs: Vec, +} + +#[derive(Clone, Debug)] +pub struct EarlyNetworkPortUpdate { + port: PortConfigV1, + bgp_configs: Vec, +} + +pub(crate) async fn select_dendrite_client( + sagactx: &NexusActionContext, + opctx: &OpContext, + switch_port_id: Uuid, +) -> Result, ActionError> { + let osagactx = sagactx.user_data(); + let nexus = osagactx.nexus(); + + let switch_port = + nexus.get_switch_port(&opctx, switch_port_id).await.map_err(|e| { + ActionError::action_failed(format!( + "get switch port for dendrite client selection {e}" + )) + })?; + + let switch_location: SwitchLocation = + switch_port.switch_location.parse().map_err( + |e: ParseSwitchLocationError| { + ActionError::action_failed(format!( + "get switch location for uplink: {e:?}", + )) + }, + )?; + + let dpd_client: Arc = osagactx + .nexus() + .dpd_clients + .get(&switch_location) + .ok_or_else(|| { + ActionError::action_failed(format!( + "requested switch not available: {switch_location}" + )) + })? + .clone(); + Ok(dpd_client) +} diff --git a/nexus/src/app/sagas/test_helpers.rs b/nexus/src/app/sagas/test_helpers.rs index eccb013b66..3110bd318a 100644 --- a/nexus/src/app/sagas/test_helpers.rs +++ b/nexus/src/app/sagas/test_helpers.rs @@ -10,9 +10,7 @@ use crate::{ app::{saga::create_saga_dag, test_interfaces::TestInterfaces as _}, Nexus, }; -use async_bb8_diesel::{ - AsyncConnection, AsyncRunQueryDsl, AsyncSimpleConnection, -}; +use async_bb8_diesel::{AsyncRunQueryDsl, AsyncSimpleConnection}; use diesel::{ExpressionMethods, QueryDsl, SelectableHelper}; use futures::future::BoxFuture; use nexus_db_queries::{ @@ -434,11 +432,10 @@ pub(crate) async fn assert_no_failed_undo_steps( ) { use nexus_db_queries::db::model::saga_types::SagaNodeEvent; + let conn = datastore.pool_connection_for_tests().await.unwrap(); let saga_node_events: Vec = datastore - .pool_connection_for_tests() - .await - .unwrap() - .transaction_async(|conn| async move { + .transaction_retry_wrapper("assert_no_failed_undo_steps") + .transaction(&conn, |conn| async move { use nexus_db_queries::db::schema::saga_node_event::dsl; conn.batch_execute_async( @@ -447,14 +444,12 @@ pub(crate) async fn assert_no_failed_undo_steps( .await .unwrap(); - Ok::<_, nexus_db_queries::db::TransactionError<()>>( - dsl::saga_node_event - .filter(dsl::event_type.eq(String::from("undo_failed"))) - .select(SagaNodeEvent::as_select()) - .load_async::(&conn) - .await - .unwrap(), - ) + Ok(dsl::saga_node_event + .filter(dsl::event_type.eq(String::from("undo_failed"))) + .select(SagaNodeEvent::as_select()) + .load_async::(&conn) + .await + .unwrap()) }) .await .unwrap(); diff --git a/nexus/src/app/sagas/volume_delete.rs b/nexus/src/app/sagas/volume_delete.rs index 43530e913c..22425a0b99 100644 --- a/nexus/src/app/sagas/volume_delete.rs +++ b/nexus/src/app/sagas/volume_delete.rs @@ -332,6 +332,74 @@ async fn svd_delete_crucible_snapshot_records( /// be a different volume id (i.e. for a previously deleted disk) than the one /// in this saga's params struct. /// +/// It's insufficient to rely on the struct of CrucibleResources to clean up +/// that is returned as part of svd_decrease_crucible_resource_count. Imagine a +/// disk that is composed of three regions (a subset of +/// [`VolumeConstructionRequest`] is shown here): +/// +/// { +/// "type": "volume", +/// "id": "6b353c87-afac-4ee2-b71a-6fe35fcf9e46", +/// "sub_volumes": [ +/// { +/// "type": "region", +/// "opts": { +/// "targets": [ +/// "[fd00:1122:3344:101::5]:1000", +/// "[fd00:1122:3344:102::9]:1000", +/// "[fd00:1122:3344:103::2]:1000" +/// ], +/// "read_only": false +/// } +/// } +/// ], +/// "read_only_parent": null, +/// } +/// +/// Taking a snapshot of this will produce the following volume: +/// +/// { +/// "type": "volume", +/// "id": "1ef7282e-a3fb-4222-85a8-b16d3fbfd738", <-- new UUID +/// "sub_volumes": [ +/// { +/// "type": "region", +/// "opts": { +/// "targets": [ +/// "[fd00:1122:3344:101::5]:1001", <-- port changed +/// "[fd00:1122:3344:102::9]:1001", <-- port changed +/// "[fd00:1122:3344:103::2]:1001" <-- port changed +/// ], +/// "read_only": true <-- read_only now true +/// } +/// } +/// ], +/// "read_only_parent": null, +/// } +/// +/// The snapshot targets will use the same IP but different port: snapshots are +/// initially located on the same filesystem as their region. +/// +/// The disk's volume has no read only resources, while the snapshot's volume +/// does. The disk volume's targets are all regions (backed by downstairs that +/// are read/write) while the snapshot volume's targets are all snapshots +/// (backed by volumes that are read-only). The two volumes are linked in the +/// sense that the snapshots from the second are contained *within* the regions +/// of the first, reflecting the resource nesting from ZFS. This is also +/// reflected in the REST endpoint that the Crucible agent uses: +/// +/// /crucible/0/regions/{id}/snapshots/{name} +/// +/// If the disk is then deleted, the volume delete saga will run for the first +/// volume shown here. The CrucibleResources struct returned as part of +/// [`svd_decrease_crucible_resource_count`] will contain *nothing* to clean up: +/// the regions contain snapshots that are part of other volumes and cannot be +/// deleted, and the disk's volume doesn't reference any read-only resources. +/// +/// This is expected and normal: regions are "leaked" all the time due to +/// snapshots preventing their deletion. This part of the saga detects when +/// those regions can be cleaned up. +/// /// Note: each delete of a snapshot could trigger another delete of a region, if /// that region's use has gone to zero. A snapshot delete will never trigger /// another snapshot delete. @@ -353,42 +421,46 @@ async fn svd_delete_freed_crucible_regions( }, )?; - // Send DELETE calls to the corresponding Crucible agents - delete_crucible_regions( - log, + for (dataset, region, region_snapshot, volume) in freed_datasets_regions_and_volumes - .iter() - .map(|(d, r, _)| (d.clone(), r.clone())) - .collect(), - ) - .await - .map_err(|e| { - ActionError::action_failed(format!( - "failed to delete_crucible_regions: {:?}", - e, - )) - })?; + { + if region_snapshot.is_some() { + // We cannot delete this region yet, the snapshot has not been + // deleted. This can occur when multiple volume delete sagas run + // concurrently: one will decrement the crucible resources (but + // hasn't made the appropriate DELETE calls to remove the running + // snapshots and snapshots yet), and the other will be here trying + // to delete the region. This race results in the crucible agent + // returning "must delete snapshots first" and causing saga unwinds. + // + // Another volume delete (probably the one racing with this one!) + // will pick up this region and remove it. + continue; + } + + // Send DELETE calls to the corresponding Crucible agents + delete_crucible_regions(log, vec![(dataset.clone(), region.clone())]) + .await + .map_err(|e| { + ActionError::action_failed(format!( + "failed to delete_crucible_regions: {:?}", + e, + )) + })?; - // Remove region DB records - osagactx - .datastore() - .regions_hard_delete( - log, - freed_datasets_regions_and_volumes - .iter() - .map(|(_, r, _)| r.id()) - .collect(), - ) - .await - .map_err(|e| { - ActionError::action_failed(format!( - "failed to regions_hard_delete: {:?}", - e, - )) - })?; + // Remove region DB record + osagactx + .datastore() + .regions_hard_delete(log, vec![region.id()]) + .await + .map_err(|e| { + ActionError::action_failed(format!( + "failed to regions_hard_delete: {:?}", + e, + )) + })?; - // Remove volume DB records - for (_, _, volume) in &freed_datasets_regions_and_volumes { + // Remove volume DB record osagactx.datastore().volume_hard_delete(volume.id()).await.map_err( |e| { ActionError::action_failed(format!( diff --git a/nexus/src/app/session.rs b/nexus/src/app/session.rs index 891124e1ac..7adf1c9bdd 100644 --- a/nexus/src/app/session.rs +++ b/nexus/src/app/session.rs @@ -154,7 +154,7 @@ impl super::Nexus { | Error::Forbidden | Error::InternalError { .. } | Error::ServiceUnavailable { .. } - | Error::MethodNotAllowed { .. } + | Error::InsufficientCapacity { .. } | Error::TypeVersionMismatch { .. } | Error::Conflict { .. } => { Reason::UnknownError { source: error } diff --git a/nexus/src/app/silo.rs b/nexus/src/app/silo.rs index a6ffd8ef5e..f5f3fa00e7 100644 --- a/nexus/src/app/silo.rs +++ b/nexus/src/app/silo.rs @@ -822,25 +822,24 @@ impl super::Nexus { })?; let response = client.get(url).send().await.map_err(|e| { - Error::InvalidValue { - label: String::from("url"), - message: format!("error querying url: {}", e), - } + Error::invalid_value( + "url", + format!("error querying url: {e}"), + ) })?; if !response.status().is_success() { - return Err(Error::InvalidValue { - label: String::from("url"), - message: format!( - "querying url returned: {}", - response.status() - ), - }); + return Err(Error::invalid_value( + "url", + format!("querying url returned: {}", response.status()), + )); } - response.text().await.map_err(|e| Error::InvalidValue { - label: String::from("url"), - message: format!("error getting text from url: {}", e), + response.text().await.map_err(|e| { + Error::invalid_value( + "url", + format!("error getting text from url: {e}"), + ) })? } @@ -849,12 +848,11 @@ impl super::Nexus { &base64::engine::general_purpose::STANDARD, data, ) - .map_err(|e| Error::InvalidValue { - label: String::from("data"), - message: format!( - "error getting decoding base64 data: {}", - e - ), + .map_err(|e| { + Error::invalid_value( + "data", + format!("error getting decoding base64 data: {e}"), + ) })?; String::from_utf8_lossy(&bytes).into_owned() } diff --git a/nexus/src/app/sled.rs b/nexus/src/app/sled.rs index da89e7e25a..44efc2934e 100644 --- a/nexus/src/app/sled.rs +++ b/nexus/src/app/sled.rs @@ -8,6 +8,7 @@ use crate::internal_api::params::{ PhysicalDiskDeleteRequest, PhysicalDiskPutRequest, SledAgentStartupInfo, SledRole, ZpoolPutRequest, }; +use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; use nexus_db_queries::db; use nexus_db_queries::db::lookup; @@ -38,6 +39,9 @@ impl super::Nexus { // TODO-robustness we should have a limit on how many sled agents there can // be (for graceful degradation at large scale). + // + // TODO-multisled: This should not use the rack_id for the given nexus, + // unless the DNS lookups at sled-agent are only for rack-local nexuses. pub(crate) async fn upsert_sled( &self, opctx: &OpContext, @@ -51,7 +55,7 @@ impl super::Nexus { SledRole::Scrimlet => true, }; - let sled = db::model::Sled::new( + let sled = db::model::SledUpdate::new( id, info.sa_address, db::model::SledBaseboard { @@ -139,6 +143,20 @@ impl super::Nexus { .await } + /// Returns the old state. + pub(crate) async fn sled_set_provision_state( + &self, + opctx: &OpContext, + sled_lookup: &lookup::Sled<'_>, + state: db::model::SledProvisionState, + ) -> Result { + let (authz_sled,) = + sled_lookup.lookup_for(authz::Action::Modify).await?; + self.db_datastore + .sled_set_provision_state(opctx, &authz_sled, state) + .await + } + // Physical disks pub(crate) async fn sled_list_physical_disks( diff --git a/nexus/src/app/switch_interface.rs b/nexus/src/app/switch_interface.rs index cfb0541742..0acb2b7fe7 100644 --- a/nexus/src/app/switch_interface.rs +++ b/nexus/src/app/switch_interface.rs @@ -95,9 +95,9 @@ impl super::Nexus { pub fn validate_switch_location(switch_location: &str) -> Result<(), Error> { if switch_location != "switch0" && switch_location != "switch1" { - return Err(Error::InvalidRequest { - message: "Switch location must be switch0 or switch1".into(), - }); + return Err(Error::invalid_request( + "Switch location must be switch0 or switch1", + )); } Ok(()) } diff --git a/nexus/src/app/switch_port.rs b/nexus/src/app/switch_port.rs index acc57459fd..b9f0f94fa0 100644 --- a/nexus/src/app/switch_port.rs +++ b/nexus/src/app/switch_port.rs @@ -117,7 +117,6 @@ impl super::Nexus { .map_err(|e| { let msg = e.to_string(); if msg.contains("bad request") { - //return HttpError::for_client_error(None, StatusCode::BAD_REQUEST, msg.to_string()) external::Error::invalid_request(&msg.to_string()) } else { e @@ -255,7 +254,15 @@ impl super::Nexus { >( saga_params, ) - .await?; + .await + .map_err(|e| { + let msg = e.to_string(); + if msg.contains("bad request") { + external::Error::invalid_request(&msg.to_string()) + } else { + e + } + })?; Ok(()) } diff --git a/nexus/src/app/test_interfaces.rs b/nexus/src/app/test_interfaces.rs index ad2ea50e07..581b9a89bb 100644 --- a/nexus/src/app/test_interfaces.rs +++ b/nexus/src/app/test_interfaces.rs @@ -10,7 +10,9 @@ use sled_agent_client::Client as SledAgentClient; use std::sync::Arc; use uuid::Uuid; -pub use super::update::SpUpdateError; +pub use super::update::HostPhase1Updater; +pub use super::update::MgsClients; +pub use super::update::RotUpdater; pub use super::update::SpUpdater; pub use super::update::UpdateProgress; pub use gateway_client::types::SpType; diff --git a/nexus/src/app/update/common_sp_update.rs b/nexus/src/app/update/common_sp_update.rs new file mode 100644 index 0000000000..69a5b132a2 --- /dev/null +++ b/nexus/src/app/update/common_sp_update.rs @@ -0,0 +1,239 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Module containing implementation details shared amongst all MGS-to-SP-driven +//! updates. + +use super::MgsClients; +use super::UpdateProgress; +use gateway_client::types::SpType; +use gateway_client::types::SpUpdateStatus; +use slog::Logger; +use std::time::Duration; +use tokio::sync::watch; +use uuid::Uuid; + +type GatewayClientError = gateway_client::Error; + +/// Error type returned when an update to a component managed by the SP fails. +/// +/// Note that the SP manages itself, as well, so "SP component" here includes +/// the SP. +#[derive(Debug, thiserror::Error)] +pub enum SpComponentUpdateError { + #[error("error communicating with MGS")] + MgsCommunication(#[from] GatewayClientError), + #[error("different update is now preparing ({0})")] + DifferentUpdatePreparing(Uuid), + #[error("different update is now in progress ({0})")] + DifferentUpdateInProgress(Uuid), + #[error("different update is now complete ({0})")] + DifferentUpdateComplete(Uuid), + #[error("different update is now aborted ({0})")] + DifferentUpdateAborted(Uuid), + #[error("different update failed ({0})")] + DifferentUpdateFailed(Uuid), + #[error("update status lost (did the SP reset?)")] + UpdateStatusLost, + #[error("update was aborted")] + UpdateAborted, + #[error("update failed (error code {0})")] + UpdateFailedWithCode(u32), + #[error("update failed (error message {0})")] + UpdateFailedWithMessage(String), +} + +pub(super) trait SpComponentUpdater { + /// The target component. + /// + /// Should be produced via `SpComponent::const_as_str()`. + fn component(&self) -> &'static str; + + /// The type of the target SP. + fn target_sp_type(&self) -> SpType; + + /// The slot number of the target SP. + fn target_sp_slot(&self) -> u32; + + /// The target firmware slot for the component. + fn firmware_slot(&self) -> u16; + + /// The ID of this update. + fn update_id(&self) -> Uuid; + + /// The update payload data to send to MGS. + // TODO-performance This has to be convertible into a `reqwest::Body`, so we + // return an owned Vec. That requires all our implementors to clone the data + // at least once; maybe we should use `Bytes` instead (which is cheap to + // clone and also convertible into a reqwest::Body)? + fn update_data(&self) -> Vec; + + /// The sending half of the watch channel to report update progress. + fn progress(&self) -> &watch::Sender>; + + /// Logger to use while performing this update. + fn logger(&self) -> &Logger; +} + +pub(super) async fn deliver_update( + updater: &(dyn SpComponentUpdater + Send + Sync), + mgs_clients: &mut MgsClients, +) -> Result<(), SpComponentUpdateError> { + // How frequently do we poll MGS for the update progress? + const STATUS_POLL_INTERVAL: Duration = Duration::from_secs(3); + + // Start the update. + mgs_clients + .try_all_serially(updater.logger(), |client| async move { + client + .sp_component_update( + updater.target_sp_type(), + updater.target_sp_slot(), + updater.component(), + updater.firmware_slot(), + &updater.update_id(), + reqwest::Body::from(updater.update_data()), + ) + .await?; + updater.progress().send_replace(Some(UpdateProgress::Started)); + info!( + updater.logger(), "update started"; + "mgs_addr" => client.baseurl(), + ); + Ok(()) + }) + .await?; + + // Wait for the update to complete. + loop { + let status = mgs_clients + .try_all_serially(updater.logger(), |client| async move { + let update_status = client + .sp_component_update_status( + updater.target_sp_type(), + updater.target_sp_slot(), + updater.component(), + ) + .await?; + + debug!( + updater.logger(), "got update status"; + "mgs_addr" => client.baseurl(), + "status" => ?update_status, + ); + + Ok(update_status) + }) + .await?; + + if status_is_complete( + status.into_inner(), + updater.update_id(), + updater.progress(), + updater.logger(), + )? { + updater.progress().send_replace(Some(UpdateProgress::InProgress { + progress: Some(1.0), + })); + return Ok(()); + } + + tokio::time::sleep(STATUS_POLL_INTERVAL).await; + } +} + +fn status_is_complete( + status: SpUpdateStatus, + update_id: Uuid, + progress_tx: &watch::Sender>, + log: &Logger, +) -> Result { + match status { + // For `Preparing` and `InProgress`, we could check the progress + // information returned by these steps and try to check that + // we're still _making_ progress, but every Nexus instance needs + // to do that anyway in case we (or the MGS instance delivering + // the update) crash, so we'll omit that check here. Instead, we + // just sleep and we'll poll again shortly. + SpUpdateStatus::Preparing { id, progress } => { + if id == update_id { + let progress = progress.and_then(|progress| { + if progress.current > progress.total { + warn!( + log, "nonsense preparing progress"; + "current" => progress.current, + "total" => progress.total, + ); + None + } else if progress.total == 0 { + None + } else { + Some( + f64::from(progress.current) + / f64::from(progress.total), + ) + } + }); + progress_tx + .send_replace(Some(UpdateProgress::Preparing { progress })); + Ok(false) + } else { + Err(SpComponentUpdateError::DifferentUpdatePreparing(id)) + } + } + SpUpdateStatus::InProgress { id, bytes_received, total_bytes } => { + if id == update_id { + let progress = if bytes_received > total_bytes { + warn!( + log, "nonsense update progress"; + "bytes_received" => bytes_received, + "total_bytes" => total_bytes, + ); + None + } else if total_bytes == 0 { + None + } else { + Some(f64::from(bytes_received) / f64::from(total_bytes)) + }; + progress_tx.send_replace(Some(UpdateProgress::InProgress { + progress, + })); + Ok(false) + } else { + Err(SpComponentUpdateError::DifferentUpdateInProgress(id)) + } + } + SpUpdateStatus::Complete { id } => { + if id == update_id { + Ok(true) + } else { + Err(SpComponentUpdateError::DifferentUpdateComplete(id)) + } + } + SpUpdateStatus::None => Err(SpComponentUpdateError::UpdateStatusLost), + SpUpdateStatus::Aborted { id } => { + if id == update_id { + Err(SpComponentUpdateError::UpdateAborted) + } else { + Err(SpComponentUpdateError::DifferentUpdateAborted(id)) + } + } + SpUpdateStatus::Failed { code, id } => { + if id == update_id { + Err(SpComponentUpdateError::UpdateFailedWithCode(code)) + } else { + Err(SpComponentUpdateError::DifferentUpdateFailed(id)) + } + } + SpUpdateStatus::RotError { id, message } => { + if id == update_id { + Err(SpComponentUpdateError::UpdateFailedWithMessage(format!( + "rot error: {message}" + ))) + } else { + Err(SpComponentUpdateError::DifferentUpdateFailed(id)) + } + } + } +} diff --git a/nexus/src/app/update/host_phase1_updater.rs b/nexus/src/app/update/host_phase1_updater.rs new file mode 100644 index 0000000000..fb013d0ffe --- /dev/null +++ b/nexus/src/app/update/host_phase1_updater.rs @@ -0,0 +1,177 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Module containing types for updating host OS phase1 images via MGS. + +use super::common_sp_update::deliver_update; +use super::common_sp_update::SpComponentUpdater; +use super::MgsClients; +use super::SpComponentUpdateError; +use super::UpdateProgress; +use gateway_client::types::SpComponentFirmwareSlot; +use gateway_client::types::SpType; +use gateway_client::SpComponent; +use slog::Logger; +use tokio::sync::watch; +use uuid::Uuid; + +type GatewayClientError = gateway_client::Error; + +pub struct HostPhase1Updater { + log: Logger, + progress: watch::Sender>, + sp_type: SpType, + sp_slot: u32, + target_host_slot: u16, + update_id: Uuid, + // TODO-clarity maybe a newtype for this? TBD how we get this from + // wherever it's stored, which might give us a stronger type already. + phase1_data: Vec, +} + +impl HostPhase1Updater { + pub fn new( + sp_type: SpType, + sp_slot: u32, + target_host_slot: u16, + update_id: Uuid, + phase1_data: Vec, + log: &Logger, + ) -> Self { + let log = log.new(slog::o!( + "component" => "HostPhase1Updater", + "sp_type" => format!("{sp_type:?}"), + "sp_slot" => sp_slot, + "target_host_slot" => target_host_slot, + "update_id" => format!("{update_id}"), + )); + let progress = watch::Sender::new(None); + Self { + log, + progress, + sp_type, + sp_slot, + target_host_slot, + update_id, + phase1_data, + } + } + + pub fn progress_watcher(&self) -> watch::Receiver> { + self.progress.subscribe() + } + + /// Drive this host phase 1 update to completion (or failure). + /// + /// Only one MGS instance is required to drive an update; however, if + /// multiple MGS instances are available and passed to this method and an + /// error occurs communicating with one instance, `HostPhase1Updater` will + /// try the remaining instances before failing. + pub async fn update( + mut self, + mgs_clients: &mut MgsClients, + ) -> Result<(), SpComponentUpdateError> { + // The async block below wants a `&self` reference, but we take `self` + // for API clarity (to start a new update, the caller should construct a + // new instance of the updater). Create a `&self` ref that we use + // through the remainder of this method. + let me = &self; + + // Prior to delivering the update, ensure the correct target slot is + // activated. + // + // TODO-correctness Should we be doing this, or should a higher level + // executor set this up before calling us? + mgs_clients + .try_all_serially(&self.log, |client| async move { + me.mark_target_slot_active(&client).await + }) + .await?; + + // Deliver and drive the update to completion + deliver_update(&mut self, mgs_clients).await?; + + // Unlike SP and RoT updates, we have nothing to do after delivery of + // the update completes; signal to any watchers that we're done. + self.progress.send_replace(Some(UpdateProgress::Complete)); + + // wait for any progress watchers to be dropped before we return; + // otherwise, they'll get `RecvError`s when trying to check the current + // status + self.progress.closed().await; + + Ok(()) + } + + async fn mark_target_slot_active( + &self, + client: &gateway_client::Client, + ) -> Result<(), GatewayClientError> { + // TODO-correctness Should we always persist this choice? + let persist = true; + + let slot = self.firmware_slot(); + + // TODO-correctness Until + // https://github.com/oxidecomputer/hubris/issues/1172 is fixed, the + // host must be in A2 for this operation to succeed. After it is fixed, + // there will still be a window while a host is booting where this + // operation can fail. How do we handle this? + client + .sp_component_active_slot_set( + self.sp_type, + self.sp_slot, + self.component(), + persist, + &SpComponentFirmwareSlot { slot }, + ) + .await?; + + // TODO-correctness Should we send some kind of update to + // `self.progress`? We haven't actually started delivering an update + // yet, but it seems weird to give no indication that we have + // successfully (potentially) modified the state of the target sled. + + info!( + self.log, "host phase1 target slot marked active"; + "mgs_addr" => client.baseurl(), + ); + + Ok(()) + } +} + +impl SpComponentUpdater for HostPhase1Updater { + fn component(&self) -> &'static str { + SpComponent::HOST_CPU_BOOT_FLASH.const_as_str() + } + + fn target_sp_type(&self) -> SpType { + self.sp_type + } + + fn target_sp_slot(&self) -> u32 { + self.sp_slot + } + + fn firmware_slot(&self) -> u16 { + self.target_host_slot + } + + fn update_id(&self) -> Uuid { + self.update_id + } + + fn update_data(&self) -> Vec { + self.phase1_data.clone() + } + + fn progress(&self) -> &watch::Sender> { + &self.progress + } + + fn logger(&self) -> &Logger { + &self.log + } +} diff --git a/nexus/src/app/update/mgs_clients.rs b/nexus/src/app/update/mgs_clients.rs new file mode 100644 index 0000000000..4b200a1819 --- /dev/null +++ b/nexus/src/app/update/mgs_clients.rs @@ -0,0 +1,94 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Module providing support for handling failover between multiple MGS clients + +use futures::Future; +use gateway_client::Client; +use slog::Logger; +use std::collections::VecDeque; +use std::sync::Arc; + +pub(super) type GatewayClientError = + gateway_client::Error; + +#[derive(Debug, Clone)] +pub struct MgsClients { + clients: VecDeque>, +} + +impl MgsClients { + /// Create a new `MgsClients` with the given `clients`. + /// + /// # Panics + /// + /// Panics if `clients` is empty. + pub fn new>>>(clients: T) -> Self { + let clients = clients.into(); + assert!(!clients.is_empty()); + Self { clients } + } + + /// Create a new `MgsClients` with the given `clients`. + /// + /// # Panics + /// + /// Panics if `clients` is empty. + pub fn from_clients>(clients: I) -> Self { + let clients = clients + .into_iter() + .map(Arc::new) + .collect::>>(); + Self::new(clients) + } + + /// Run `op` against all clients in sequence until either one succeeds (in + /// which case the success value is returned), one fails with a + /// non-communication error (in which case that error is returned), or all + /// of them fail with communication errors (in which case the communication + /// error from the last-attempted client is returned). + /// + /// On a successful return, the internal client list will be reordered so + /// any future accesses will attempt the most-recently-successful client. + pub(super) async fn try_all_serially( + &mut self, + log: &Logger, + op: F, + ) -> Result + where + // Seems like it would be nicer to take `&Client` here instead of + // needing to clone each `Arc`, but there's currently no decent way of + // doing that without boxing the returned future: + // https://users.rust-lang.org/t/how-to-express-that-the-future-returned-by-a-closure-lives-only-as-long-as-its-argument/90039/10 + F: Fn(Arc) -> Fut, + Fut: Future>, + { + let mut last_err = None; + for (i, client) in self.clients.iter().enumerate() { + match op(Arc::clone(client)).await { + Ok(value) => { + self.clients.rotate_left(i); + return Ok(value); + } + Err(GatewayClientError::CommunicationError(err)) => { + if i < self.clients.len() { + warn!( + log, "communication error with MGS; \ + will try next client"; + "mgs_addr" => client.baseurl(), + "err" => %err, + ); + } + last_err = Some(err); + continue; + } + Err(err) => return Err(err), + } + } + + // The only way to get here is if all clients failed with communication + // errors. Return the error from the last MGS we tried. + Err(GatewayClientError::CommunicationError(last_err.unwrap())) + } +} diff --git a/nexus/src/app/update/mod.rs b/nexus/src/app/update/mod.rs index 4196cd8a71..36d4dbcb9e 100644 --- a/nexus/src/app/update/mod.rs +++ b/nexus/src/app/update/mod.rs @@ -26,9 +26,26 @@ use std::path::Path; use tokio::io::AsyncWriteExt; use uuid::Uuid; +mod common_sp_update; +mod host_phase1_updater; +mod mgs_clients; +mod rot_updater; mod sp_updater; -pub use sp_updater::{SpUpdateError, SpUpdater, UpdateProgress}; +pub use common_sp_update::SpComponentUpdateError; +pub use host_phase1_updater::HostPhase1Updater; +pub use mgs_clients::MgsClients; +pub use rot_updater::RotUpdater; +pub use sp_updater::SpUpdater; + +#[derive(Debug, PartialEq, Clone)] +pub enum UpdateProgress { + Started, + Preparing { progress: Option }, + InProgress { progress: Option }, + Complete, + Failed(String), +} static BASE_ARTIFACT_DIR: &str = "/var/tmp/oxide_artifacts"; @@ -51,14 +68,10 @@ impl super::Nexus { opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; let updates_config = self.updates_config.as_ref().ok_or_else(|| { - Error::InvalidRequest { - message: "updates system not configured".into(), - } + Error::invalid_request("updates system not configured") })?; let base_url = self.tuf_base_url(opctx).await?.ok_or_else(|| { - Error::InvalidRequest { - message: "updates system not configured".into(), - } + Error::invalid_request("updates system not configured") })?; let trusted_root = tokio::fs::read(&updates_config.trusted_root) .await @@ -69,14 +82,14 @@ impl super::Nexus { ), })?; - let artifacts = tokio::task::spawn_blocking(move || { - crate::updates::read_artifacts(&trusted_root, base_url) - }) - .await - .unwrap() - .map_err(|e| Error::InternalError { - internal_message: format!("error trying to refresh updates: {}", e), - })?; + let artifacts = crate::updates::read_artifacts(&trusted_root, base_url) + .await + .map_err(|e| Error::InternalError { + internal_message: format!( + "error trying to refresh updates: {}", + e + ), + })?; // FIXME: if we hit an error in any of these database calls, the // available artifact table will be out of sync with the current @@ -141,9 +154,7 @@ impl super::Nexus { ) -> Result, Error> { let mut base_url = self.tuf_base_url(opctx).await?.ok_or_else(|| { - Error::InvalidRequest { - message: "updates system not configured".into(), - } + Error::invalid_request("updates system not configured") })?; if !base_url.ends_with('/') { base_url.push('/'); diff --git a/nexus/src/app/update/rot_updater.rs b/nexus/src/app/update/rot_updater.rs new file mode 100644 index 0000000000..12126a7de9 --- /dev/null +++ b/nexus/src/app/update/rot_updater.rs @@ -0,0 +1,193 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Module containing types for updating RoTs via MGS. + +use super::common_sp_update::deliver_update; +use super::common_sp_update::SpComponentUpdater; +use super::MgsClients; +use super::SpComponentUpdateError; +use super::UpdateProgress; +use gateway_client::types::RotSlot; +use gateway_client::types::SpComponentFirmwareSlot; +use gateway_client::types::SpType; +use gateway_client::SpComponent; +use slog::Logger; +use tokio::sync::watch; +use uuid::Uuid; + +type GatewayClientError = gateway_client::Error; + +pub struct RotUpdater { + log: Logger, + progress: watch::Sender>, + sp_type: SpType, + sp_slot: u32, + target_rot_slot: RotSlot, + update_id: Uuid, + // TODO-clarity maybe a newtype for this? TBD how we get this from + // wherever it's stored, which might give us a stronger type already. + rot_hubris_archive: Vec, +} + +impl RotUpdater { + pub fn new( + sp_type: SpType, + sp_slot: u32, + target_rot_slot: RotSlot, + update_id: Uuid, + rot_hubris_archive: Vec, + log: &Logger, + ) -> Self { + let log = log.new(slog::o!( + "component" => "RotUpdater", + "sp_type" => format!("{sp_type:?}"), + "sp_slot" => sp_slot, + "target_rot_slot" => format!("{target_rot_slot:?}"), + "update_id" => format!("{update_id}"), + )); + let progress = watch::Sender::new(None); + Self { + log, + progress, + sp_type, + sp_slot, + target_rot_slot, + update_id, + rot_hubris_archive, + } + } + + pub fn progress_watcher(&self) -> watch::Receiver> { + self.progress.subscribe() + } + + /// Drive this RoT update to completion (or failure). + /// + /// Only one MGS instance is required to drive an update; however, if + /// multiple MGS instances are available and passed to this method and an + /// error occurs communicating with one instance, `RotUpdater` will try the + /// remaining instances before failing. + pub async fn update( + mut self, + mgs_clients: &mut MgsClients, + ) -> Result<(), SpComponentUpdateError> { + // Deliver and drive the update to "completion" (which isn't really + // complete for the RoT, since we still have to do the steps below after + // the delivery of the update completes). + deliver_update(&mut self, mgs_clients).await?; + + // The async blocks below want `&self` references, but we take `self` + // for API clarity (to start a new update, the caller should construct a + // new updater). Create a `&self` ref that we use through the remainder + // of this method. + let me = &self; + + mgs_clients + .try_all_serially(&self.log, |client| async move { + me.mark_target_slot_active(&client).await + }) + .await?; + + mgs_clients + .try_all_serially(&self.log, |client| async move { + me.finalize_update_via_reset(&client).await + }) + .await?; + + // wait for any progress watchers to be dropped before we return; + // otherwise, they'll get `RecvError`s when trying to check the current + // status + self.progress.closed().await; + + Ok(()) + } + + async fn mark_target_slot_active( + &self, + client: &gateway_client::Client, + ) -> Result<(), GatewayClientError> { + // RoT currently doesn't support non-persistent slot swapping, so always + // tell it to persist our choice. + let persist = true; + + let slot = self.firmware_slot(); + + client + .sp_component_active_slot_set( + self.sp_type, + self.sp_slot, + self.component(), + persist, + &SpComponentFirmwareSlot { slot }, + ) + .await?; + + // TODO-correctness Should we send some kind of update to + // `self.progress`? We already sent `InProgress(1.0)` when the update + // finished delivering. Or perhaps we shouldn't even be doing this step + // and the reset, and let our caller handle the finalization? + + info!( + self.log, "RoT target slot marked active"; + "mgs_addr" => client.baseurl(), + ); + + Ok(()) + } + + async fn finalize_update_via_reset( + &self, + client: &gateway_client::Client, + ) -> Result<(), GatewayClientError> { + client + .sp_component_reset(self.sp_type, self.sp_slot, self.component()) + .await?; + + self.progress.send_replace(Some(UpdateProgress::Complete)); + info!( + self.log, "RoT update complete"; + "mgs_addr" => client.baseurl(), + ); + + Ok(()) + } +} + +impl SpComponentUpdater for RotUpdater { + fn component(&self) -> &'static str { + SpComponent::ROT.const_as_str() + } + + fn target_sp_type(&self) -> SpType { + self.sp_type + } + + fn target_sp_slot(&self) -> u32 { + self.sp_slot + } + + fn firmware_slot(&self) -> u16 { + match self.target_rot_slot { + RotSlot::A => 0, + RotSlot::B => 1, + } + } + + fn update_id(&self) -> Uuid { + self.update_id + } + + fn update_data(&self) -> Vec { + self.rot_hubris_archive.clone() + } + + fn progress(&self) -> &watch::Sender> { + &self.progress + } + + fn logger(&self) -> &Logger { + &self.log + } +} diff --git a/nexus/src/app/update/sp_updater.rs b/nexus/src/app/update/sp_updater.rs index 9abb2ad222..2a6ddc6de6 100644 --- a/nexus/src/app/update/sp_updater.rs +++ b/nexus/src/app/update/sp_updater.rs @@ -4,40 +4,19 @@ //! Module containing types for updating SPs via MGS. -use futures::Future; +use super::common_sp_update::deliver_update; +use super::common_sp_update::SpComponentUpdater; +use super::MgsClients; +use super::SpComponentUpdateError; +use super::UpdateProgress; use gateway_client::types::SpType; -use gateway_client::types::SpUpdateStatus; use gateway_client::SpComponent; use slog::Logger; -use std::collections::VecDeque; -use std::sync::Arc; -use std::time::Duration; use tokio::sync::watch; use uuid::Uuid; type GatewayClientError = gateway_client::Error; -#[derive(Debug, thiserror::Error)] -pub enum SpUpdateError { - #[error("error communicating with MGS")] - MgsCommunication(#[from] GatewayClientError), - - // Error returned when we successfully start an update but it fails to - // complete successfully. - #[error("update failed to complete: {0}")] - FailedToComplete(String), -} - -// TODO-cleanup Probably share this with other update implementations? -#[derive(Debug, PartialEq, Clone)] -pub enum UpdateProgress { - Started, - Preparing { progress: Option }, - InProgress { progress: Option }, - Complete, - Failed(String), -} - pub struct SpUpdater { log: Logger, progress: watch::Sender>, @@ -58,6 +37,7 @@ impl SpUpdater { log: &Logger, ) -> Self { let log = log.new(slog::o!( + "component" => "SpUpdater", "sp_type" => format!("{sp_type:?}"), "sp_slot" => sp_slot, "update_id" => format!("{update_id}"), @@ -76,270 +56,86 @@ impl SpUpdater { /// multiple MGS instances are available and passed to this method and an /// error occurs communicating with one instance, `SpUpdater` will try the /// remaining instances before failing. - /// - /// # Panics - /// - /// If `mgs_clients` is empty. - pub async fn update>>>( - self, - mgs_clients: T, - ) -> Result<(), SpUpdateError> { - let mut mgs_clients = mgs_clients.into(); - assert!(!mgs_clients.is_empty()); - - // The async blocks below want `&self` references, but we take `self` + pub async fn update( + mut self, + mgs_clients: &mut MgsClients, + ) -> Result<(), SpComponentUpdateError> { + // Deliver and drive the update to "completion" (which isn't really + // complete for the SP, since we still have to reset it after the + // delivery of the update completes). + deliver_update(&mut self, mgs_clients).await?; + + // The async block below wants a `&self` reference, but we take `self` // for API clarity (to start a new SP update, the caller should // construct a new `SpUpdater`). Create a `&self` ref that we use // through the remainder of this method. let me = &self; - me.try_all_mgs_clients(&mut mgs_clients, |client| async move { - me.start_update_one_mgs(&client).await - }) - .await?; - - // `wait_for_update_completion` uses `try_all_mgs_clients` internally, - // so we don't wrap it here. - me.wait_for_update_completion(&mut mgs_clients).await?; + mgs_clients + .try_all_serially(&self.log, |client| async move { + me.finalize_update_via_reset(&client).await + }) + .await?; - me.try_all_mgs_clients(&mut mgs_clients, |client| async move { - me.finalize_update_via_reset_one_mgs(&client).await - }) - .await?; + // wait for any progress watchers to be dropped before we return; + // otherwise, they'll get `RecvError`s when trying to check the current + // status + self.progress.closed().await; Ok(()) } - // Helper method to run `op` against all clients. If `op` returns - // successfully for one client, that client will be rotated to the front of - // the list (so any subsequent operations can start with the first client). - async fn try_all_mgs_clients( - &self, - mgs_clients: &mut VecDeque>, - op: F, - ) -> Result - where - F: Fn(Arc) -> Fut, - Fut: Future>, - { - let mut last_err = None; - for (i, client) in mgs_clients.iter().enumerate() { - match op(Arc::clone(client)).await { - Ok(val) => { - // Shift our list of MGS clients such that the one we just - // used is at the front for subsequent requests. - mgs_clients.rotate_left(i); - return Ok(val); - } - // If we have an error communicating with an MGS instance - // (timeout, unexpected connection close, etc.), we'll move on - // and try the next MGS client. If this was the last client, - // we'll stash the error in `last_err` and return it below the - // loop. - Err(GatewayClientError::CommunicationError(err)) => { - last_err = Some(err); - continue; - } - Err(err) => return Err(err), - } - } - - // We know we have at least one `mgs_client`, so the only way to get - // here is if all clients failed with connection errors. Return the - // error from the last MGS we tried. - Err(GatewayClientError::CommunicationError(last_err.unwrap())) - } - - async fn start_update_one_mgs( + async fn finalize_update_via_reset( &self, client: &gateway_client::Client, ) -> Result<(), GatewayClientError> { - // The SP has two firmware slots, but they're aren't individually - // labled. We always request an update to slot 0, which means "the - // inactive slot". - let firmware_slot = 0; - - // Start the update. client - .sp_component_update( - self.sp_type, - self.sp_slot, - SpComponent::SP_ITSELF.const_as_str(), - firmware_slot, - &self.update_id, - reqwest::Body::from(self.sp_hubris_archive.clone()), - ) + .sp_component_reset(self.sp_type, self.sp_slot, self.component()) .await?; - self.progress.send_replace(Some(UpdateProgress::Started)); - + self.progress.send_replace(Some(UpdateProgress::Complete)); info!( - self.log, "SP update started"; + self.log, "SP update complete"; "mgs_addr" => client.baseurl(), ); Ok(()) } +} - async fn wait_for_update_completion( - &self, - mgs_clients: &mut VecDeque>, - ) -> Result<(), SpUpdateError> { - // How frequently do we poll MGS for the update progress? - const STATUS_POLL_INTERVAL: Duration = Duration::from_secs(3); - - loop { - let update_status = self - .try_all_mgs_clients(mgs_clients, |client| async move { - let update_status = client - .sp_component_update_status( - self.sp_type, - self.sp_slot, - SpComponent::SP_ITSELF.const_as_str(), - ) - .await?; +impl SpComponentUpdater for SpUpdater { + fn component(&self) -> &'static str { + SpComponent::SP_ITSELF.const_as_str() + } - info!( - self.log, "got SP update status"; - "mgs_addr" => client.baseurl(), - "status" => ?update_status, - ); + fn target_sp_type(&self) -> SpType { + self.sp_type + } - Ok(update_status) - }) - .await? - .into_inner(); + fn target_sp_slot(&self) -> u32 { + self.sp_slot + } - // The majority of possible update statuses indicate failure; we'll - // handle the small number of non-failure cases by either - // `continue`ing or `return`ing; all other branches will give us an - // error string that we can report. - let error_message = match update_status { - // For `Preparing` and `InProgress`, we could check the progress - // information returned by these steps and try to check that - // we're still _making_ progress, but every Nexus instance needs - // to do that anyway in case we (or the MGS instance delivering - // the update) crash, so we'll omit that check here. Instead, we - // just sleep and we'll poll again shortly. - SpUpdateStatus::Preparing { id, progress } => { - if id == self.update_id { - let progress = progress.and_then(|progress| { - if progress.current > progress.total { - warn!( - self.log, "nonsense SP preparing progress"; - "current" => progress.current, - "total" => progress.total, - ); - None - } else if progress.total == 0 { - None - } else { - Some( - f64::from(progress.current) - / f64::from(progress.total), - ) - } - }); - self.progress.send_replace(Some( - UpdateProgress::Preparing { progress }, - )); - tokio::time::sleep(STATUS_POLL_INTERVAL).await; - continue; - } else { - format!("different update is now preparing ({id})") - } - } - SpUpdateStatus::InProgress { - id, - bytes_received, - total_bytes, - } => { - if id == self.update_id { - let progress = if bytes_received > total_bytes { - warn!( - self.log, "nonsense SP progress"; - "bytes_received" => bytes_received, - "total_bytes" => total_bytes, - ); - None - } else if total_bytes == 0 { - None - } else { - Some( - f64::from(bytes_received) - / f64::from(total_bytes), - ) - }; - self.progress.send_replace(Some( - UpdateProgress::InProgress { progress }, - )); - tokio::time::sleep(STATUS_POLL_INTERVAL).await; - continue; - } else { - format!("different update is now in progress ({id})") - } - } - SpUpdateStatus::Complete { id } => { - if id == self.update_id { - self.progress.send_replace(Some( - UpdateProgress::InProgress { progress: Some(1.0) }, - )); - return Ok(()); - } else { - format!("different update is now in complete ({id})") - } - } - SpUpdateStatus::None => { - "update status lost (did the SP reset?)".to_string() - } - SpUpdateStatus::Aborted { id } => { - if id == self.update_id { - "update was aborted".to_string() - } else { - format!("different update is now in complete ({id})") - } - } - SpUpdateStatus::Failed { code, id } => { - if id == self.update_id { - format!("update failed (error code {code})") - } else { - format!("different update failed ({id})") - } - } - SpUpdateStatus::RotError { id, message } => { - if id == self.update_id { - format!("update failed (rot error: {message})") - } else { - format!("different update failed with rot error ({id})") - } - } - }; + fn firmware_slot(&self) -> u16 { + // The SP has two firmware slots, but they're aren't individually + // labled. We always request an update to slot 0, which means "the + // inactive slot". + 0 + } - self.progress.send_replace(Some(UpdateProgress::Failed( - error_message.clone(), - ))); - return Err(SpUpdateError::FailedToComplete(error_message)); - } + fn update_id(&self) -> Uuid { + self.update_id } - async fn finalize_update_via_reset_one_mgs( - &self, - client: &gateway_client::Client, - ) -> Result<(), GatewayClientError> { - client - .sp_component_reset( - self.sp_type, - self.sp_slot, - SpComponent::SP_ITSELF.const_as_str(), - ) - .await?; + fn update_data(&self) -> Vec { + self.sp_hubris_archive.clone() + } - self.progress.send_replace(Some(UpdateProgress::Complete)); - info!( - self.log, "SP update complete"; - "mgs_addr" => client.baseurl(), - ); + fn progress(&self) -> &watch::Sender> { + &self.progress + } - Ok(()) + fn logger(&self) -> &Logger { + &self.log } } diff --git a/nexus/src/app/vpc_router.rs b/nexus/src/app/vpc_router.rs index 81577f88e8..523a450bbd 100644 --- a/nexus/src/app/vpc_router.rs +++ b/nexus/src/app/vpc_router.rs @@ -129,9 +129,7 @@ impl super::Nexus { // router kind cannot be changed, but it might be able to save us a // database round-trip. if db_router.kind == VpcRouterKind::System { - return Err(Error::MethodNotAllowed { - internal_message: "Cannot delete system router".to_string(), - }); + return Err(Error::invalid_request("Cannot delete system router")); } self.db_datastore.vpc_delete_router(opctx, &authz_router).await } @@ -229,14 +227,12 @@ impl super::Nexus { match db_route.kind.0 { RouterRouteKind::Custom | RouterRouteKind::Default => (), _ => { - return Err(Error::MethodNotAllowed { - internal_message: format!( - "routes of type {} from the system table of VPC {:?} \ + return Err(Error::invalid_request(format!( + "routes of type {} from the system table of VPC {:?} \ are not modifiable", - db_route.kind.0, - vpc.id() - ), - }) + db_route.kind.0, + vpc.id() + ))); } } self.db_datastore @@ -255,10 +251,9 @@ impl super::Nexus { // Only custom routes can be deleted // TODO Shouldn't this constraint be checked by the database query? if db_route.kind.0 != RouterRouteKind::Custom { - return Err(Error::MethodNotAllowed { - internal_message: "DELETE not allowed on system routes" - .to_string(), - }); + return Err(Error::invalid_request( + "DELETE not allowed on system routes", + )); } self.db_datastore.router_delete_route(opctx, &authz_route).await } diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 428632bcf5..a6fd7a3ccb 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -6,10 +6,11 @@ use super::{ console_api, device_auth, params, + shared::UninitializedSled, views::{ self, Certificate, Group, IdentityProvider, Image, IpPool, IpPoolRange, - PhysicalDisk, Project, Rack, Role, Silo, Sled, Snapshot, SshKey, - UninitializedSled, User, UserBuiltin, Vpc, VpcRouter, VpcSubnet, + PhysicalDisk, Project, Rack, Role, Silo, Sled, Snapshot, SshKey, User, + UserBuiltin, Vpc, VpcRouter, VpcSubnet, }, }; use crate::external_api::shared; @@ -139,6 +140,11 @@ pub(crate) fn external_api() -> NexusApiDescription { api.register(ip_pool_service_range_add)?; api.register(ip_pool_service_range_remove)?; + api.register(floating_ip_list)?; + api.register(floating_ip_create)?; + api.register(floating_ip_view)?; + api.register(floating_ip_delete)?; + api.register(disk_list)?; api.register(disk_create)?; api.register(disk_view)?; @@ -148,7 +154,6 @@ pub(crate) fn external_api() -> NexusApiDescription { api.register(disk_bulk_write_import_start)?; api.register(disk_bulk_write_import)?; api.register(disk_bulk_write_import_stop)?; - api.register(disk_import_blocks_from_url)?; api.register(disk_finalize_import)?; api.register(instance_list)?; @@ -217,12 +222,14 @@ pub(crate) fn external_api() -> NexusApiDescription { api.register(rack_view)?; api.register(sled_list)?; api.register(sled_view)?; + api.register(sled_set_provision_state)?; api.register(sled_instance_list)?; api.register(sled_physical_disk_list)?; api.register(physical_disk_list)?; api.register(switch_list)?; api.register(switch_view)?; api.register(uninitialized_sled_list)?; + api.register(add_sled_to_initialized_rack)?; api.register(user_builtin_list)?; api.register(user_builtin_view)?; @@ -1518,6 +1525,126 @@ async fn ip_pool_service_range_remove( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } +// Floating IP Addresses + +/// List all Floating IPs +#[endpoint { + method = GET, + path = "/v1/floating-ips", + tags = ["floating-ips"], +}] +async fn floating_ip_list( + rqctx: RequestContext>, + query_params: Query>, +) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.nexus; + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + let query = query_params.into_inner(); + let pag_params = data_page_params_for(&rqctx, &query)?; + let scan_params = ScanByNameOrId::from_query(&query)?; + let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; + let project_lookup = + nexus.project_lookup(&opctx, scan_params.selector.clone())?; + let ips = nexus + .floating_ips_list(&opctx, &project_lookup, &paginated_by) + .await?; + Ok(HttpResponseOk(ScanByNameOrId::results_page( + &query, + ips, + &marker_for_name_or_id, + )?)) + }; + apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + +/// Create a Floating IP +#[endpoint { + method = POST, + path = "/v1/floating-ips", + tags = ["floating-ips"], +}] +async fn floating_ip_create( + rqctx: RequestContext>, + query_params: Query, + floating_params: TypedBody, +) -> Result, HttpError> { + let apictx = rqctx.context(); + let nexus = &apictx.nexus; + let floating_params = floating_params.into_inner(); + let handler = async { + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + let project_lookup = + nexus.project_lookup(&opctx, query_params.into_inner())?; + let ip = nexus + .floating_ip_create(&opctx, &project_lookup, floating_params) + .await?; + Ok(HttpResponseCreated(ip)) + }; + apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + +/// Delete a Floating IP +#[endpoint { + method = DELETE, + path = "/v1/floating-ips/{floating_ip}", + tags = ["floating-ips"], +}] +async fn floating_ip_delete( + rqctx: RequestContext>, + path_params: Path, + query_params: Query, +) -> Result { + let apictx = rqctx.context(); + let handler = async { + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let floating_ip_selector = params::FloatingIpSelector { + floating_ip: path.floating_ip, + project: query.project, + }; + let fip_lookup = + nexus.floating_ip_lookup(&opctx, floating_ip_selector)?; + + nexus.floating_ip_delete(&opctx, fip_lookup).await?; + Ok(HttpResponseDeleted()) + }; + apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + +/// Fetch a floating IP +#[endpoint { + method = GET, + path = "/v1/floating-ips/{floating_ip}", + tags = ["floating-ips"] +}] +async fn floating_ip_view( + rqctx: RequestContext>, + path_params: Path, + query_params: Query, +) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let floating_ip_selector = params::FloatingIpSelector { + floating_ip: path.floating_ip, + project: query.project, + }; + let (.., fip) = nexus + .floating_ip_lookup(&opctx, floating_ip_selector)? + .fetch() + .await?; + Ok(HttpResponseOk(fip.into())) + }; + apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + // Disks /// List disks @@ -1788,39 +1915,6 @@ async fn disk_bulk_write_import_stop( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/// Request to import blocks from URL -#[endpoint { - method = POST, - path = "/v1/disks/{disk}/import", - tags = ["disks"], -}] -async fn disk_import_blocks_from_url( - rqctx: RequestContext>, - path_params: Path, - query_params: Query, - import_params: TypedBody, -) -> Result { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let params = import_params.into_inner(); - - let disk_selector = - params::DiskSelector { disk: path.disk, project: query.project }; - let disk_lookup = nexus.disk_lookup(&opctx, disk_selector)?; - - nexus - .import_blocks_from_url_for_disk(&opctx, &disk_lookup, params) - .await?; - - Ok(HttpResponseUpdatedNoContent()) - }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await -} - /// Confirm disk block import completion #[endpoint { method = POST, @@ -4402,6 +4496,31 @@ async fn uninitialized_sled_list( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } +/// Add a sled to an initialized rack +// +// TODO: In the future this should really be a PUT request, once we resolve +// https://github.com/oxidecomputer/omicron/issues/4494. It should also +// explicitly be tied to a rack via a `rack_id` path param. For now we assume +// we are only operating on single rack systems. +#[endpoint { + method = POST, + path = "/v1/system/hardware/sleds/", + tags = ["system/hardware"] +}] +async fn add_sled_to_initialized_rack( + rqctx: RequestContext>, + sled: TypedBody, +) -> Result { + let apictx = rqctx.context(); + let nexus = &apictx.nexus; + let handler = async { + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + nexus.add_sled_to_initialized_rack(&opctx, sled.into_inner()).await?; + Ok(HttpResponseUpdatedNoContent()) + }; + apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + // Sleds /// List sleds @@ -4456,6 +4575,44 @@ async fn sled_view( apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await } +/// Set the sled's provision state. +#[endpoint { + method = PUT, + path = "/v1/system/hardware/sleds/{sled_id}/provision-state", + tags = ["system/hardware"], +}] +async fn sled_set_provision_state( + rqctx: RequestContext>, + path_params: Path, + new_provision_state: TypedBody, +) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.nexus; + + let path = path_params.into_inner(); + let provision_state = new_provision_state.into_inner().state; + + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + // Convert the external `SledProvisionState` into our internal data model. + let new_state = db::model::SledProvisionState::from(provision_state); + + let sled_lookup = nexus.sled_lookup(&opctx, &path.sled_id)?; + + let old_state = nexus + .sled_set_provision_state(&opctx, &sled_lookup, new_state) + .await?; + + let response = params::SledProvisionStateResponse { + old_state: old_state.into(), + new_state: new_state.into(), + }; + + Ok(HttpResponseOk(response)) + }; + apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + /// List instances running on a given sled #[endpoint { method = GET, @@ -5241,10 +5398,7 @@ async fn role_list( WhichPage::First(..) => None, WhichPage::Next(RolePage { last_seen }) => { Some(last_seen.split_once('.').ok_or_else(|| { - Error::InvalidValue { - label: last_seen.clone(), - message: String::from("bad page token"), - } + Error::invalid_value(last_seen.clone(), "bad page token") })?) .map(|(s1, s2)| (s1.to_string(), s2.to_string())) } diff --git a/nexus/src/external_api/tag-config.json b/nexus/src/external_api/tag-config.json index 07eb198016..3bc8006cee 100644 --- a/nexus/src/external_api/tag-config.json +++ b/nexus/src/external_api/tag-config.json @@ -8,6 +8,12 @@ "url": "http://docs.oxide.computer/api/disks" } }, + "floating-ips": { + "description": "Floating IPs allow a project to allocate well-known IPs to instances.", + "external_docs": { + "url": "http://docs.oxide.computer/api/floating-ips" + } + }, "hidden": { "description": "TODO operations that will not ship to customers", "external_docs": { diff --git a/nexus/src/updates.rs b/nexus/src/updates.rs index c2265096dc..2f57868acc 100644 --- a/nexus/src/updates.rs +++ b/nexus/src/updates.rs @@ -2,38 +2,38 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. +use buf_list::BufList; +use futures::TryStreamExt; use nexus_db_queries::db; use omicron_common::update::ArtifactsDocument; use std::convert::TryInto; -// TODO(iliana): make async/.await. awslabs/tough#213 -pub(crate) fn read_artifacts( +pub(crate) async fn read_artifacts( trusted_root: &[u8], mut base_url: String, ) -> Result< Vec, Box, > { - use std::io::Read; - if !base_url.ends_with('/') { base_url.push('/'); } let repository = tough::RepositoryLoader::new( - trusted_root, + &trusted_root, format!("{}metadata/", base_url).parse()?, format!("{}targets/", base_url).parse()?, ) - .load()?; + .load() + .await?; - let mut artifact_document = Vec::new(); - match repository.read_target(&"artifacts.json".parse()?)? { - Some(mut target) => target.read_to_end(&mut artifact_document)?, - None => return Err("artifacts.json missing".into()), - }; + let artifact_document = + match repository.read_target(&"artifacts.json".parse()?).await? { + Some(target) => target.try_collect::().await?, + None => return Err("artifacts.json missing".into()), + }; let artifacts: ArtifactsDocument = - serde_json::from_slice(&artifact_document)?; + serde_json::from_reader(buf_list::Cursor::new(&artifact_document))?; let valid_until = repository .root() diff --git a/nexus/test-utils/src/lib.rs b/nexus/test-utils/src/lib.rs index 647232031d..52ff8910f9 100644 --- a/nexus/test-utils/src/lib.rs +++ b/nexus/test-utils/src/lib.rs @@ -30,6 +30,7 @@ use omicron_common::address::NEXUS_OPTE_IPV4_SUBNET; use omicron_common::api::external::MacAddr; use omicron_common::api::external::{IdentityMetadata, Name}; use omicron_common::api::internal::nexus::ProducerEndpoint; +use omicron_common::api::internal::nexus::ProducerKind; use omicron_common::api::internal::shared::SwitchLocation; use omicron_common::nexus_config; use omicron_common::nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; @@ -1092,6 +1093,7 @@ pub async fn start_producer_server( let producer_address = SocketAddr::new(Ipv6Addr::LOCALHOST.into(), 0); let server_info = ProducerEndpoint { id, + kind: ProducerKind::Service, address: producer_address, base_route: "/collect".to_string(), interval: Duration::from_secs(1), diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index 2368c3f568..1848989bf9 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -21,6 +21,7 @@ use nexus_types::external_api::shared::IdentityType; use nexus_types::external_api::shared::IpRange; use nexus_types::external_api::views; use nexus_types::external_api::views::Certificate; +use nexus_types::external_api::views::FloatingIp; use nexus_types::external_api::views::IpPool; use nexus_types::external_api::views::IpPoolRange; use nexus_types::external_api::views::User; @@ -32,7 +33,9 @@ use omicron_common::api::external::Disk; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::Instance; use omicron_common::api::external::InstanceCpuCount; +use omicron_common::api::external::NameOrId; use omicron_sled_agent::sim::SledAgent; +use std::net::IpAddr; use std::sync::Arc; use uuid::Uuid; @@ -149,6 +152,28 @@ pub async fn create_ip_pool( (pool, range) } +pub async fn create_floating_ip( + client: &ClientTestContext, + fip_name: &str, + project: &str, + address: Option, + parent_pool_name: Option<&str>, +) -> FloatingIp { + object_create( + client, + &format!("/v1/floating-ips?project={project}"), + ¶ms::FloatingIpCreate { + identity: IdentityMetadataCreateParams { + name: fip_name.parse().unwrap(), + description: String::from("a floating ip"), + }, + address, + pool: parent_pool_name.map(|v| NameOrId::Name(v.parse().unwrap())), + }, + ) + .await +} + pub async fn create_certificate( client: &ClientTestContext, cert_name: &str, diff --git a/nexus/tests/config.test.toml b/nexus/tests/config.test.toml index fbed9aed8e..a4436234f0 100644 --- a/nexus/tests/config.test.toml +++ b/nexus/tests/config.test.toml @@ -98,6 +98,7 @@ inventory.period_secs = 600 inventory.nkeep = 3 # Disable inventory collection altogether (for emergencies) inventory.disable = false +phantom_disks.period_secs = 30 [default_region_allocation_strategy] # we only have one sled in the test environment, so we need to use the diff --git a/nexus/tests/integration_tests/disks.rs b/nexus/tests/integration_tests/disks.rs index a5a8339c34..807c054b64 100644 --- a/nexus/tests/integration_tests/disks.rs +++ b/nexus/tests/integration_tests/disks.rs @@ -992,7 +992,7 @@ async fn test_disk_backed_by_multiple_region_sets( .body(Some(&new_disk)) // TODO: this fails! the current allocation algorithm does not split // across datasets - .expect_status(Some(StatusCode::SERVICE_UNAVAILABLE)), + .expect_status(Some(StatusCode::INSUFFICIENT_STORAGE)), ) .authn_as(AuthnMode::PrivilegedUser) .execute() @@ -1026,7 +1026,7 @@ async fn test_disk_too_big(cptestctx: &ControlPlaneTestContext) { NexusRequest::new( RequestBuilder::new(client, Method::POST, &disks_url) .body(Some(&new_disk)) - .expect_status(Some(StatusCode::SERVICE_UNAVAILABLE)), + .expect_status(Some(StatusCode::INSUFFICIENT_STORAGE)), ) .authn_as(AuthnMode::PrivilegedUser) .execute() @@ -1241,6 +1241,138 @@ async fn test_disk_virtual_provisioning_collection( ); } +#[nexus_test] +async fn test_disk_virtual_provisioning_collection_failed_delete( + cptestctx: &ControlPlaneTestContext, +) { + // Confirm that there's a panic deleting a project if a disk deletion fails + let client = &cptestctx.external_client; + let nexus = &cptestctx.server.apictx().nexus; + let datastore = nexus.datastore(); + + let disk_test = DiskTest::new(&cptestctx).await; + + populate_ip_pool(&client, "default", None).await; + let project_id1 = create_project(client, PROJECT_NAME).await.identity.id; + + let opctx = + OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); + + // Create a 1 GB disk + let disk_size = ByteCount::from_gibibytes_u32(1); + let disks_url = get_disks_url(); + let disk_one = params::DiskCreate { + identity: IdentityMetadataCreateParams { + name: "disk-one".parse().unwrap(), + description: String::from("sells rainsticks"), + }, + disk_source: params::DiskSource::Blank { + block_size: params::BlockSize::try_from(512).unwrap(), + }, + size: disk_size, + }; + NexusRequest::new( + RequestBuilder::new(client, Method::POST, &disks_url) + .body(Some(&disk_one)) + .expect_status(Some(StatusCode::CREATED)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("unexpected failure creating 1 GiB disk"); + + // Assert correct virtual provisioning collection numbers + let virtual_provisioning_collection = datastore + .virtual_provisioning_collection_get(&opctx, project_id1) + .await + .unwrap(); + assert_eq!( + virtual_provisioning_collection.virtual_disk_bytes_provisioned.0, + disk_size + ); + + // Set the third agent to fail when deleting regions + let zpool = &disk_test.zpools[2]; + let dataset = &zpool.datasets[0]; + disk_test + .sled_agent + .get_crucible_dataset(zpool.id, dataset.id) + .await + .set_region_deletion_error(true) + .await; + + // Delete the disk - expect this to fail + let disk_url = format!("/v1/disks/{}?project={}", "disk-one", PROJECT_NAME); + + NexusRequest::new( + RequestBuilder::new(client, Method::DELETE, &disk_url) + .expect_status(Some(StatusCode::INTERNAL_SERVER_ERROR)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("unexpected success deleting 1 GiB disk"); + + // The virtual provisioning collection numbers haven't changed + let virtual_provisioning_collection = datastore + .virtual_provisioning_collection_get(&opctx, project_id1) + .await + .unwrap(); + assert_eq!( + virtual_provisioning_collection.virtual_disk_bytes_provisioned.0, + disk_size + ); + + // And the disk is now faulted + let disk = disk_get(&client, &disk_url).await; + assert_eq!(disk.state, DiskState::Faulted); + + // Set the third agent to respond normally + disk_test + .sled_agent + .get_crucible_dataset(zpool.id, dataset.id) + .await + .set_region_deletion_error(false) + .await; + + // Request disk delete again + NexusRequest::new( + RequestBuilder::new(client, Method::DELETE, &disk_url) + .expect_status(Some(StatusCode::NO_CONTENT)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("unexpected failure deleting 1 GiB disk"); + + // Delete the project's default VPC subnet and VPC + let subnet_url = + format!("/v1/vpc-subnets/default?project={}&vpc=default", PROJECT_NAME); + NexusRequest::object_delete(&client, &subnet_url) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("failed to make request"); + + let vpc_url = format!("/v1/vpcs/default?project={}", PROJECT_NAME); + NexusRequest::object_delete(&client, &vpc_url) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("failed to make request"); + + // The project can be deleted now + let url = format!("/v1/projects/{}", PROJECT_NAME); + NexusRequest::new( + RequestBuilder::new(client, Method::DELETE, &url) + .expect_status(Some(StatusCode::NO_CONTENT)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("unexpected failure deleting project"); +} + // Test disk size accounting #[nexus_test] async fn test_disk_size_accounting(cptestctx: &ControlPlaneTestContext) { @@ -1325,7 +1457,7 @@ async fn test_disk_size_accounting(cptestctx: &ControlPlaneTestContext) { NexusRequest::new( RequestBuilder::new(client, Method::POST, &disks_url) .body(Some(&disk_two)) - .expect_status(Some(StatusCode::SERVICE_UNAVAILABLE)), + .expect_status(Some(StatusCode::INSUFFICIENT_STORAGE)), ) .authn_as(AuthnMode::PrivilegedUser) .execute() diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index 64790c49c2..e11902d0fe 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -21,8 +21,10 @@ use nexus_test_utils::SLED_AGENT_UUID; use nexus_test_utils::SWITCH_UUID; use nexus_types::external_api::params; use nexus_types::external_api::shared; +use nexus_types::external_api::shared::Baseboard; use nexus_types::external_api::shared::IpRange; use nexus_types::external_api::shared::Ipv4Range; +use nexus_types::external_api::shared::UninitializedSled; use omicron_common::api::external::AddressLotKind; use omicron_common::api::external::ByteCount; use omicron_common::api::external::IdentityMetadataCreateParams; @@ -39,6 +41,7 @@ use omicron_test_utils::certificates::CertificateChain; use std::net::IpAddr; use std::net::Ipv4Addr; use std::str::FromStr; +use uuid::Uuid; lazy_static! { pub static ref HARDWARE_RACK_URL: String = @@ -47,6 +50,12 @@ lazy_static! { format!("/v1/system/hardware/uninitialized-sleds"); pub static ref HARDWARE_SLED_URL: String = format!("/v1/system/hardware/sleds/{}", SLED_AGENT_UUID); + pub static ref HARDWARE_SLED_PROVISION_STATE_URL: String = + format!("/v1/system/hardware/sleds/{}/provision-state", SLED_AGENT_UUID); + pub static ref DEMO_SLED_PROVISION_STATE: params::SledProvisionStateParams = + params::SledProvisionStateParams { + state: nexus_types::external_api::views::SledProvisionState::NonProvisionable, + }; pub static ref HARDWARE_SWITCH_URL: String = format!("/v1/system/hardware/switches/{}", SWITCH_UUID); pub static ref HARDWARE_DISK_URL: String = @@ -57,6 +66,16 @@ lazy_static! { pub static ref SLED_INSTANCES_URL: String = format!("/v1/system/hardware/sleds/{}/instances", SLED_AGENT_UUID); + pub static ref DEMO_UNINITIALIZED_SLED: UninitializedSled = UninitializedSled { + baseboard: Baseboard { + serial: "demo-serial".to_string(), + part: "demo-part".to_string(), + revision: 6 + }, + rack_id: Uuid::new_v4(), + cubby: 1 + }; + // Global policy pub static ref SYSTEM_POLICY_URL: &'static str = "/v1/system/policy"; @@ -115,6 +134,7 @@ lazy_static! { pub static ref DEMO_PROJECT_URL_INSTANCES: String = format!("/v1/instances?project={}", *DEMO_PROJECT_NAME); pub static ref DEMO_PROJECT_URL_SNAPSHOTS: String = format!("/v1/snapshots?project={}", *DEMO_PROJECT_NAME); pub static ref DEMO_PROJECT_URL_VPCS: String = format!("/v1/vpcs?project={}", *DEMO_PROJECT_NAME); + pub static ref DEMO_PROJECT_URL_FIPS: String = format!("/v1/floating-ips?project={}", *DEMO_PROJECT_NAME); pub static ref DEMO_PROJECT_CREATE: params::ProjectCreate = params::ProjectCreate { identity: IdentityMetadataCreateParams { @@ -242,8 +262,6 @@ lazy_static! { ), }; - pub static ref DEMO_IMPORT_DISK_IMPORT_FROM_URL_URL: String = - format!("/v1/disks/{}/import?{}", *DEMO_IMPORT_DISK_NAME, *DEMO_PROJECT_SELECTOR); pub static ref DEMO_IMPORT_DISK_BULK_WRITE_START_URL: String = format!("/v1/disks/{}/bulk-write-start?{}", *DEMO_IMPORT_DISK_NAME, *DEMO_PROJECT_SELECTOR); pub static ref DEMO_IMPORT_DISK_BULK_WRITE_URL: String = @@ -473,10 +491,7 @@ lazy_static! { name: DEMO_IMAGE_NAME.clone(), description: String::from(""), }, - source: params::ImageSource::Url { - url: HTTP_SERVER.url("/image.raw").to_string(), - block_size: params::BlockSize::try_from(4096).unwrap(), - }, + source: params::ImageSource::YouCanBootAnythingAsLongAsItsAlpine, os: "fake-os".to_string(), version: "1.0".to_string() }; @@ -554,6 +569,22 @@ lazy_static! { }; } +lazy_static! { + // Project Floating IPs + pub static ref DEMO_FLOAT_IP_NAME: Name = "float-ip".parse().unwrap(); + pub static ref DEMO_FLOAT_IP_URL: String = + format!("/v1/floating-ips/{}?project={}", *DEMO_FLOAT_IP_NAME, *DEMO_PROJECT_NAME); + pub static ref DEMO_FLOAT_IP_CREATE: params::FloatingIpCreate = + params::FloatingIpCreate { + identity: IdentityMetadataCreateParams { + name: DEMO_FLOAT_IP_NAME.clone(), + description: String::from("a new IP pool"), + }, + address: Some(std::net::Ipv4Addr::new(10, 0, 0, 141).into()), + pool: None, + }; +} + lazy_static! { // Identity providers pub static ref IDENTITY_PROVIDERS_URL: String = format!("/v1/system/identity-providers?silo=demo-silo"); @@ -1292,20 +1323,6 @@ lazy_static! { ], }, - VerifyEndpoint { - url: &DEMO_IMPORT_DISK_IMPORT_FROM_URL_URL, - visibility: Visibility::Protected, - unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![ - AllowedMethod::Post( - serde_json::to_value(params::ImportBlocksFromUrl { - url: "obviously-fake-url".into(), - expected_digest: None, - }).unwrap() - ) - ], - }, - VerifyEndpoint { url: &DEMO_IMPORT_DISK_BULK_WRITE_START_URL, visibility: Visibility::Protected, @@ -1577,7 +1594,9 @@ lazy_static! { url: "/v1/system/hardware/sleds", visibility: Visibility::Public, unprivileged_access: UnprivilegedAccess::None, - allowed_methods: vec![AllowedMethod::Get], + allowed_methods: vec![AllowedMethod::Get, AllowedMethod::Post( + serde_json::to_value(&*DEMO_UNINITIALIZED_SLED).unwrap() + )], }, VerifyEndpoint { @@ -1594,6 +1613,15 @@ lazy_static! { allowed_methods: vec![AllowedMethod::Get], }, + VerifyEndpoint { + url: &HARDWARE_SLED_PROVISION_STATE_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Put( + serde_json::to_value(&*DEMO_SLED_PROVISION_STATE).unwrap() + )], + }, + VerifyEndpoint { url: "/v1/system/hardware/switches", visibility: Visibility::Public, @@ -1961,6 +1989,29 @@ lazy_static! { allowed_methods: vec![ AllowedMethod::GetNonexistent, ], + }, + + // Floating IPs + VerifyEndpoint { + url: &DEMO_PROJECT_URL_FIPS, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Post( + serde_json::to_value(&*DEMO_FLOAT_IP_CREATE).unwrap(), + ), + AllowedMethod::Get, + ], + }, + + VerifyEndpoint { + url: &DEMO_FLOAT_IP_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Delete, + ], } ]; } diff --git a/nexus/tests/integration_tests/external_ips.rs b/nexus/tests/integration_tests/external_ips.rs new file mode 100644 index 0000000000..f3161dea72 --- /dev/null +++ b/nexus/tests/integration_tests/external_ips.rs @@ -0,0 +1,432 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Tests Floating IP support in the API + +use std::net::IpAddr; +use std::net::Ipv4Addr; + +use crate::integration_tests::instances::instance_simulate; +use dropshot::test_util::ClientTestContext; +use dropshot::HttpErrorResponseBody; +use http::Method; +use http::StatusCode; +use nexus_test_utils::http_testing::AuthnMode; +use nexus_test_utils::http_testing::NexusRequest; +use nexus_test_utils::http_testing::RequestBuilder; +use nexus_test_utils::resource_helpers::create_floating_ip; +use nexus_test_utils::resource_helpers::create_instance_with; +use nexus_test_utils::resource_helpers::create_ip_pool; +use nexus_test_utils::resource_helpers::create_project; +use nexus_test_utils::resource_helpers::populate_ip_pool; +use nexus_test_utils_macros::nexus_test; +use nexus_types::external_api::params; +use nexus_types::external_api::views::FloatingIp; +use omicron_common::address::IpRange; +use omicron_common::address::Ipv4Range; +use omicron_common::api::external::IdentityMetadataCreateParams; +use omicron_common::api::external::Instance; +use uuid::Uuid; + +type ControlPlaneTestContext = + nexus_test_utils::ControlPlaneTestContext; + +const PROJECT_NAME: &str = "rootbeer-float"; + +const FIP_NAMES: &[&str] = &["vanilla", "chocolate", "strawberry", "pistachio"]; + +pub fn get_floating_ips_url(project_name: &str) -> String { + format!("/v1/floating-ips?project={project_name}") +} + +pub fn get_floating_ip_by_name_url( + fip_name: &str, + project_name: &str, +) -> String { + format!("/v1/floating-ips/{fip_name}?project={project_name}") +} + +pub fn get_floating_ip_by_id_url(fip_id: &Uuid) -> String { + format!("/v1/floating-ips/{fip_id}") +} + +#[nexus_test] +async fn test_floating_ip_access(cptestctx: &ControlPlaneTestContext) { + let client = &cptestctx.external_client; + + populate_ip_pool(&client, "default", None).await; + let project = create_project(client, PROJECT_NAME).await; + + // Create a floating IP from the default pool. + let fip_name = FIP_NAMES[0]; + let fip = create_floating_ip( + client, + fip_name, + &project.identity.id.to_string(), + None, + None, + ) + .await; + + // Fetch floating IP by ID + let fetched_fip = + floating_ip_get(&client, &get_floating_ip_by_id_url(&fip.identity.id)) + .await; + assert_eq!(fetched_fip.identity.id, fip.identity.id); + + // Fetch floating IP by name and project_id + let fetched_fip = floating_ip_get( + &client, + &get_floating_ip_by_name_url( + fip.identity.name.as_str(), + &project.identity.id.to_string(), + ), + ) + .await; + assert_eq!(fetched_fip.identity.id, fip.identity.id); + + // Fetch floating IP by name and project_name + let fetched_fip = floating_ip_get( + &client, + &get_floating_ip_by_name_url( + fip.identity.name.as_str(), + project.identity.name.as_str(), + ), + ) + .await; + assert_eq!(fetched_fip.identity.id, fip.identity.id); +} + +#[nexus_test] +async fn test_floating_ip_create(cptestctx: &ControlPlaneTestContext) { + let client = &cptestctx.external_client; + + populate_ip_pool(&client, "default", None).await; + let other_pool_range = IpRange::V4( + Ipv4Range::new(Ipv4Addr::new(10, 1, 0, 1), Ipv4Addr::new(10, 1, 0, 5)) + .unwrap(), + ); + create_ip_pool(&client, "other-pool", Some(other_pool_range)).await; + + let project = create_project(client, PROJECT_NAME).await; + + // Create with no chosen IP and fallback to default pool. + let fip_name = FIP_NAMES[0]; + let fip = create_floating_ip( + client, + fip_name, + project.identity.name.as_str(), + None, + None, + ) + .await; + assert_eq!(fip.identity.name.as_str(), fip_name); + assert_eq!(fip.project_id, project.identity.id); + assert_eq!(fip.instance_id, None); + assert_eq!(fip.ip, IpAddr::from(Ipv4Addr::new(10, 0, 0, 0))); + + // Create with chosen IP and fallback to default pool. + let fip_name = FIP_NAMES[1]; + let ip_addr = "10.0.12.34".parse().unwrap(); + let fip = create_floating_ip( + client, + fip_name, + project.identity.name.as_str(), + Some(ip_addr), + None, + ) + .await; + assert_eq!(fip.identity.name.as_str(), fip_name); + assert_eq!(fip.project_id, project.identity.id); + assert_eq!(fip.instance_id, None); + assert_eq!(fip.ip, ip_addr); + + // Create with no chosen IP from named pool. + let fip_name = FIP_NAMES[2]; + let fip = create_floating_ip( + client, + fip_name, + project.identity.name.as_str(), + None, + Some("other-pool"), + ) + .await; + assert_eq!(fip.identity.name.as_str(), fip_name); + assert_eq!(fip.project_id, project.identity.id); + assert_eq!(fip.instance_id, None); + assert_eq!(fip.ip, IpAddr::from(Ipv4Addr::new(10, 1, 0, 1))); + + // Create with chosen IP from named pool. + let fip_name = FIP_NAMES[3]; + let ip_addr = "10.1.0.5".parse().unwrap(); + let fip = create_floating_ip( + client, + fip_name, + project.identity.name.as_str(), + Some(ip_addr), + Some("other-pool"), + ) + .await; + assert_eq!(fip.identity.name.as_str(), fip_name); + assert_eq!(fip.project_id, project.identity.id); + assert_eq!(fip.instance_id, None); + assert_eq!(fip.ip, ip_addr); +} + +#[nexus_test] +async fn test_floating_ip_create_ip_in_use( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + + populate_ip_pool(&client, "default", None).await; + + let project = create_project(client, PROJECT_NAME).await; + let contested_ip = "10.0.0.0".parse().unwrap(); + + // First create will succeed. + create_floating_ip( + client, + FIP_NAMES[0], + project.identity.name.as_str(), + Some(contested_ip), + None, + ) + .await; + + // Second will fail as the requested IP is in use in the selected + // (default) pool. + let error: HttpErrorResponseBody = NexusRequest::new( + RequestBuilder::new( + client, + Method::POST, + &get_floating_ips_url(PROJECT_NAME), + ) + .body(Some(¶ms::FloatingIpCreate { + identity: IdentityMetadataCreateParams { + name: FIP_NAMES[1].parse().unwrap(), + description: "another fip".into(), + }, + address: Some(contested_ip), + pool: None, + })) + .expect_status(Some(StatusCode::BAD_REQUEST)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + assert_eq!(error.message, "Requested external IP address not available"); +} + +#[nexus_test] +async fn test_floating_ip_create_name_in_use( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + + populate_ip_pool(&client, "default", None).await; + + let project = create_project(client, PROJECT_NAME).await; + let contested_name = FIP_NAMES[0]; + + // First create will succeed. + create_floating_ip( + client, + contested_name, + project.identity.name.as_str(), + None, + None, + ) + .await; + + // Second will fail as the requested name is in use within this + // project. + let error: HttpErrorResponseBody = NexusRequest::new( + RequestBuilder::new( + client, + Method::POST, + &get_floating_ips_url(PROJECT_NAME), + ) + .body(Some(¶ms::FloatingIpCreate { + identity: IdentityMetadataCreateParams { + name: contested_name.parse().unwrap(), + description: "another fip".into(), + }, + address: None, + pool: None, + })) + .expect_status(Some(StatusCode::BAD_REQUEST)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + assert_eq!( + error.message, + format!("already exists: floating-ip \"{contested_name}\""), + ); +} + +#[nexus_test] +async fn test_floating_ip_delete(cptestctx: &ControlPlaneTestContext) { + let client = &cptestctx.external_client; + + populate_ip_pool(&client, "default", None).await; + let project = create_project(client, PROJECT_NAME).await; + + let fip = create_floating_ip( + client, + FIP_NAMES[0], + project.identity.name.as_str(), + None, + None, + ) + .await; + + // Delete the floating IP. + NexusRequest::object_delete( + client, + &get_floating_ip_by_id_url(&fip.identity.id), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap(); +} + +#[nexus_test] +async fn test_floating_ip_attachment(cptestctx: &ControlPlaneTestContext) { + let client = &cptestctx.external_client; + let apictx = &cptestctx.server.apictx(); + let nexus = &apictx.nexus; + + populate_ip_pool(&client, "default", None).await; + let project = create_project(client, PROJECT_NAME).await; + + let fip = create_floating_ip( + client, + FIP_NAMES[0], + project.identity.name.as_str(), + None, + None, + ) + .await; + + // Bind the floating IP to an instance at create time. + let instance_name = "anonymous-diner"; + let instance = create_instance_with( + &client, + PROJECT_NAME, + instance_name, + ¶ms::InstanceNetworkInterfaceAttachment::Default, + vec![], + vec![params::ExternalIpCreate::Floating { + floating_ip_name: FIP_NAMES[0].parse().unwrap(), + }], + ) + .await; + + // Reacquire FIP: parent ID must have updated to match instance. + let fetched_fip = + floating_ip_get(&client, &get_floating_ip_by_id_url(&fip.identity.id)) + .await; + assert_eq!(fetched_fip.instance_id, Some(instance.identity.id)); + + // Try to delete the floating IP, which should fail. + let error: HttpErrorResponseBody = NexusRequest::new( + RequestBuilder::new( + client, + Method::DELETE, + &get_floating_ip_by_id_url(&fip.identity.id), + ) + .expect_status(Some(StatusCode::BAD_REQUEST)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + assert_eq!( + error.message, + format!("Floating IP cannot be deleted while attached to an instance"), + ); + + // Stop and delete the instance. + instance_simulate(nexus, &instance.identity.id).await; + instance_simulate(nexus, &instance.identity.id).await; + + let _: Instance = NexusRequest::new( + RequestBuilder::new( + client, + Method::POST, + &format!("/v1/instances/{}/stop", instance.identity.id), + ) + .body(None as Option<&serde_json::Value>) + .expect_status(Some(StatusCode::ACCEPTED)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + + instance_simulate(nexus, &instance.identity.id).await; + + NexusRequest::object_delete( + &client, + &format!("/v1/instances/{instance_name}?project={PROJECT_NAME}"), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap(); + + // Reacquire FIP again: parent ID must now be unset. + let fetched_fip = + floating_ip_get(&client, &get_floating_ip_by_id_url(&fip.identity.id)) + .await; + assert_eq!(fetched_fip.instance_id, None); + + // Delete the floating IP. + NexusRequest::object_delete( + client, + &get_floating_ip_by_id_url(&fip.identity.id), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap(); +} + +pub async fn floating_ip_get( + client: &ClientTestContext, + fip_url: &str, +) -> FloatingIp { + floating_ip_get_as(client, fip_url, AuthnMode::PrivilegedUser).await +} + +async fn floating_ip_get_as( + client: &ClientTestContext, + fip_url: &str, + authn_as: AuthnMode, +) -> FloatingIp { + NexusRequest::object_get(client, fip_url) + .authn_as(authn_as) + .execute() + .await + .unwrap_or_else(|e| { + panic!("failed to make \"get\" request to {fip_url}: {e}") + }) + .parsed_body() + .unwrap_or_else(|e| { + panic!("failed to make \"get\" request to {fip_url}: {e}") + }) +} diff --git a/nexus/tests/integration_tests/host_phase1_updater.rs b/nexus/tests/integration_tests/host_phase1_updater.rs new file mode 100644 index 0000000000..01d546636e --- /dev/null +++ b/nexus/tests/integration_tests/host_phase1_updater.rs @@ -0,0 +1,584 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Tests `HostPhase1Updater`'s delivery of updates to host phase 1 flash via +//! MGS to SP. + +use gateway_client::types::SpType; +use gateway_messages::{SpPort, UpdateInProgressStatus, UpdateStatus}; +use gateway_test_utils::setup as mgs_setup; +use omicron_nexus::app::test_interfaces::{ + HostPhase1Updater, MgsClients, UpdateProgress, +}; +use rand::RngCore; +use sp_sim::SimulatedSp; +use std::mem; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::Arc; +use std::time::Duration; +use tokio::io::AsyncWriteExt; +use tokio::net::TcpListener; +use tokio::net::TcpStream; +use tokio::sync::mpsc; +use uuid::Uuid; + +fn make_fake_host_phase1_image() -> Vec { + let mut image = vec![0; 128]; + rand::thread_rng().fill_bytes(&mut image); + image +} + +#[tokio::test] +async fn test_host_phase1_updater_updates_sled() { + // Start MGS + Sim SP. + let mgstestctx = mgs_setup::test_setup( + "test_host_phase1_updater_updates_sled", + SpPort::One, + ) + .await; + + // Configure an MGS client. + let mut mgs_clients = + MgsClients::from_clients([gateway_client::Client::new( + &mgstestctx.client.url("/").to_string(), + mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient")), + )]); + + for target_host_slot in [0, 1] { + // Configure and instantiate an `HostPhase1Updater`. + let sp_type = SpType::Sled; + let sp_slot = 0; + let update_id = Uuid::new_v4(); + let phase1_data = make_fake_host_phase1_image(); + + let host_phase1_updater = HostPhase1Updater::new( + sp_type, + sp_slot, + target_host_slot, + update_id, + phase1_data.clone(), + &mgstestctx.logctx.log, + ); + + // Run the update. + host_phase1_updater + .update(&mut mgs_clients) + .await + .expect("update failed"); + + // Ensure the SP received the complete update. + let last_update_image = mgstestctx.simrack.gimlets[sp_slot as usize] + .last_host_phase1_update_data(target_host_slot) + .await + .expect("simulated host phase1 did not receive an update"); + + assert_eq!( + phase1_data.as_slice(), + &*last_update_image, + "simulated host phase1 update contents (len {}) \ + do not match test generated fake image (len {})", + last_update_image.len(), + phase1_data.len(), + ); + } + + mgstestctx.teardown().await; +} + +#[tokio::test] +async fn test_host_phase1_updater_remembers_successful_mgs_instance() { + // Start MGS + Sim SP. + let mgstestctx = mgs_setup::test_setup( + "test_host_phase1_updater_remembers_successful_mgs_instance", + SpPort::One, + ) + .await; + + // Also start a local TCP server that we will claim is an MGS instance, but + // it will close connections immediately after accepting them. This will + // allow us to count how many connections it receives, while simultaneously + // causing errors in the HostPhase1Updater when it attempts to use this + // "MGS". + let (failing_mgs_task, failing_mgs_addr, failing_mgs_conn_counter) = { + let socket = TcpListener::bind("[::1]:0").await.unwrap(); + let addr = socket.local_addr().unwrap(); + let conn_count = Arc::new(AtomicUsize::new(0)); + + let task = { + let conn_count = Arc::clone(&conn_count); + tokio::spawn(async move { + loop { + let (mut stream, _peer) = socket.accept().await.unwrap(); + conn_count.fetch_add(1, Ordering::SeqCst); + stream.shutdown().await.unwrap(); + } + }) + }; + + (task, addr, conn_count) + }; + + // Order the MGS clients such that the bogus MGS that immediately closes + // connections comes first. `HostPhase1Updater` should remember that the + // second MGS instance succeeds, and only send subsequent requests to it: we + // should only see a single attempted connection to the bogus MGS, even + // though delivering an update requires a bare minimum of three requests + // (start the update, query the status, reset the SP) and often more (if + // repeated queries are required to wait for completion). + let mut mgs_clients = MgsClients::from_clients([ + gateway_client::Client::new( + &format!("http://{failing_mgs_addr}"), + mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient1")), + ), + gateway_client::Client::new( + &mgstestctx.client.url("/").to_string(), + mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient")), + ), + ]); + + let sp_type = SpType::Sled; + let sp_slot = 0; + let target_host_slot = 0; + let update_id = Uuid::new_v4(); + let phase1_data = make_fake_host_phase1_image(); + + let host_phase1_updater = HostPhase1Updater::new( + sp_type, + sp_slot, + target_host_slot, + update_id, + phase1_data.clone(), + &mgstestctx.logctx.log, + ); + + host_phase1_updater.update(&mut mgs_clients).await.expect("update failed"); + + let last_update_image = mgstestctx.simrack.gimlets[sp_slot as usize] + .last_host_phase1_update_data(target_host_slot) + .await + .expect("simulated host phase1 did not receive an update"); + + assert_eq!( + phase1_data.as_slice(), + &*last_update_image, + "simulated host phase1 update contents (len {}) \ + do not match test generated fake image (len {})", + last_update_image.len(), + phase1_data.len(), + ); + + // Check that our bogus MGS only received a single connection attempt. + // (After HostPhase1Updater failed to talk to this instance, it should have + // fallen back to the valid one for all further requests.) + assert_eq!( + failing_mgs_conn_counter.load(Ordering::SeqCst), + 1, + "bogus MGS instance didn't receive the expected number of connections" + ); + failing_mgs_task.abort(); + + mgstestctx.teardown().await; +} + +#[tokio::test] +async fn test_host_phase1_updater_switches_mgs_instances_on_failure() { + enum MgsProxy { + One(TcpStream), + Two(TcpStream), + } + + // Start MGS + Sim SP. + let mgstestctx = mgs_setup::test_setup( + "test_host_phase1_updater_switches_mgs_instances_on_failure", + SpPort::One, + ) + .await; + let mgs_bind_addr = mgstestctx.client.bind_address; + + let spawn_mgs_proxy_task = |mut stream: TcpStream| { + tokio::spawn(async move { + let mut mgs_stream = TcpStream::connect(mgs_bind_addr) + .await + .expect("failed to connect to MGS"); + tokio::io::copy_bidirectional(&mut stream, &mut mgs_stream) + .await + .expect("failed to proxy connection to MGS"); + }) + }; + + // Start two MGS proxy tasks; when each receives an incoming TCP connection, + // it forwards that `TcpStream` along the `mgs_proxy_connections` channel + // along with a tag of which proxy it is. We'll use this below to flip flop + // between MGS "instances" (really these two proxies). + let (mgs_proxy_connections_tx, mut mgs_proxy_connections_rx) = + mpsc::unbounded_channel(); + let (mgs_proxy_one_task, mgs_proxy_one_addr) = { + let socket = TcpListener::bind("[::1]:0").await.unwrap(); + let addr = socket.local_addr().unwrap(); + let mgs_proxy_connections_tx = mgs_proxy_connections_tx.clone(); + let task = tokio::spawn(async move { + loop { + let (stream, _peer) = socket.accept().await.unwrap(); + mgs_proxy_connections_tx.send(MgsProxy::One(stream)).unwrap(); + } + }); + (task, addr) + }; + let (mgs_proxy_two_task, mgs_proxy_two_addr) = { + let socket = TcpListener::bind("[::1]:0").await.unwrap(); + let addr = socket.local_addr().unwrap(); + let task = tokio::spawn(async move { + loop { + let (stream, _peer) = socket.accept().await.unwrap(); + mgs_proxy_connections_tx.send(MgsProxy::Two(stream)).unwrap(); + } + }); + (task, addr) + }; + + // Disable connection pooling so each request gets a new TCP connection. + let client = + reqwest::Client::builder().pool_max_idle_per_host(0).build().unwrap(); + + // Configure two MGS clients pointed at our two proxy tasks. + let mut mgs_clients = MgsClients::from_clients([ + gateway_client::Client::new_with_client( + &format!("http://{mgs_proxy_one_addr}"), + client.clone(), + mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient1")), + ), + gateway_client::Client::new_with_client( + &format!("http://{mgs_proxy_two_addr}"), + client, + mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient2")), + ), + ]); + + let sp_type = SpType::Sled; + let sp_slot = 0; + let target_host_slot = 0; + let update_id = Uuid::new_v4(); + let phase1_data = make_fake_host_phase1_image(); + + let host_phase1_updater = HostPhase1Updater::new( + sp_type, + sp_slot, + target_host_slot, + update_id, + phase1_data.clone(), + &mgstestctx.logctx.log, + ); + + // Spawn the actual update task. + let mut update_task = tokio::spawn(async move { + host_phase1_updater.update(&mut mgs_clients).await + }); + + // Loop over incoming requests. We expect this sequence: + // + // 1. Connection arrives on the first proxy + // 2. We spawn a task to service that request, and set `should_swap` + // 3. Connection arrives on the first proxy + // 4. We drop that connection, flip `expected_proxy`, and clear + // `should_swap` + // 5. Connection arrives on the second proxy + // 6. We spawn a task to service that request, and set `should_swap` + // 7. Connection arrives on the second proxy + // 8. We drop that connection, flip `expected_proxy`, and clear + // `should_swap` + // + // ... repeat until the update is complete. + let mut expected_proxy = 0; + let mut proxy_one_count = 0; + let mut proxy_two_count = 0; + let mut total_requests_handled = 0; + let mut should_swap = false; + loop { + tokio::select! { + Some(proxy_stream) = mgs_proxy_connections_rx.recv() => { + let stream = match proxy_stream { + MgsProxy::One(stream) => { + assert_eq!(expected_proxy, 0); + proxy_one_count += 1; + stream + } + MgsProxy::Two(stream) => { + assert_eq!(expected_proxy, 1); + proxy_two_count += 1; + stream + } + }; + + // Should we trigger `HostPhase1Updater` to swap to the other + // MGS (proxy)? If so, do that by dropping this connection + // (which will cause a client failure) and note that we expect + // the next incoming request to come on the other proxy. + if should_swap { + mem::drop(stream); + expected_proxy ^= 1; + should_swap = false; + } else { + // Otherwise, handle this connection. + total_requests_handled += 1; + spawn_mgs_proxy_task(stream); + should_swap = true; + } + } + + result = &mut update_task => { + match result { + Ok(Ok(())) => { + mgs_proxy_one_task.abort(); + mgs_proxy_two_task.abort(); + break; + } + Ok(Err(err)) => panic!("update failed: {err}"), + Err(err) => panic!("update task panicked: {err}"), + } + } + } + } + + // A host flash update requires a minimum of 3 requests to MGS: set the + // active flash slot, post the update, and check the status. There may be + // more requests if the update is not yet complete when the status is + // checked, but we can just check that each of our proxies received at least + // 2 incoming requests; based on our outline above, if we got the minimum of + // 3 requests, it would look like this: + // + // 1. POST update -> first proxy (success) + // 2. GET status -> first proxy (fail) + // 3. GET status retry -> second proxy (success) + // 4. POST reset -> second proxy (fail) + // 5. POST reset -> first proxy (success) + // + // This pattern would repeat if multiple status requests were required, so + // we always expect the first proxy to see exactly one more connection + // attempt than the second (because it went first before they started + // swapping), and the two together should see a total of one less than + // double the number of successful requests required. + assert!(total_requests_handled >= 3); + assert_eq!(proxy_one_count, proxy_two_count + 1); + assert_eq!( + (proxy_one_count + proxy_two_count + 1) / 2, + total_requests_handled + ); + + let last_update_image = mgstestctx.simrack.gimlets[sp_slot as usize] + .last_host_phase1_update_data(target_host_slot) + .await + .expect("simulated host phase1 did not receive an update"); + + assert_eq!( + phase1_data.as_slice(), + &*last_update_image, + "simulated host phase1 update contents (len {}) \ + do not match test generated fake image (len {})", + last_update_image.len(), + phase1_data.len(), + ); + + mgstestctx.teardown().await; +} + +#[tokio::test] +async fn test_host_phase1_updater_delivers_progress() { + // Start MGS + Sim SP. + let mgstestctx = mgs_setup::test_setup( + "test_host_phase1_updater_delivers_progress", + SpPort::One, + ) + .await; + + // Configure an MGS client. + let mut mgs_clients = + MgsClients::from_clients([gateway_client::Client::new( + &mgstestctx.client.url("/").to_string(), + mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient")), + )]); + + let sp_type = SpType::Sled; + let sp_slot = 0; + let target_host_slot = 0; + let update_id = Uuid::new_v4(); + let phase1_data = make_fake_host_phase1_image(); + + let host_phase1_updater = HostPhase1Updater::new( + sp_type, + sp_slot, + target_host_slot, + update_id, + phase1_data.clone(), + &mgstestctx.logctx.log, + ); + + let phase1_data_len = phase1_data.len() as u32; + + // Subscribe to update progress, and check that there is no status yet; we + // haven't started the update. + let mut progress = host_phase1_updater.progress_watcher(); + assert_eq!(*progress.borrow_and_update(), None); + + // Install a semaphore on the requests our target SP will receive so we can + // inspect progress messages without racing. + let target_sp = &mgstestctx.simrack.gimlets[sp_slot as usize]; + let sp_accept_sema = target_sp.install_udp_accept_semaphore().await; + let mut sp_responses = target_sp.responses_sent_count().unwrap(); + + // Spawn the update on a background task so we can watch `progress` as it is + // applied. + let do_update_task = tokio::spawn(async move { + host_phase1_updater.update(&mut mgs_clients).await + }); + + // Allow the SP to respond to 2 messages: the message to activate the target + // flash slot and the "prepare update" messages that triggers the start of an + // update, then ensure we see the "started" progress. + sp_accept_sema.send(2).unwrap(); + progress.changed().await.unwrap(); + assert_eq!(*progress.borrow_and_update(), Some(UpdateProgress::Started)); + + // Ensure our simulated SP is in the state we expect: it's prepared for an + // update but has not yet received any data. + assert_eq!( + target_sp.current_update_status().await, + UpdateStatus::InProgress(UpdateInProgressStatus { + id: update_id.into(), + bytes_received: 0, + total_size: phase1_data_len, + }) + ); + + // Record the number of responses the SP has sent; we'll use + // `sp_responses.changed()` in the loop below, and want to mark whatever + // value this watch channel currently has as seen. + sp_responses.borrow_and_update(); + + // At this point, there are two clients racing each other to talk to our + // simulated SP: + // + // 1. MGS is trying to deliver the update + // 2. `host_phase1_updater` is trying to poll (via MGS) for update status + // + // and we want to ensure that we see any relevant progress reports from + // `host_phase1_updater`. We'll let one MGS -> SP message through at a time + // (waiting until our SP has responded by waiting for a change to + // `sp_responses`) then check its update state: if it changed, the packet we + // let through was data from MGS; otherwise, it was a status request from + // `host_phase1_updater`. + // + // This loop will continue until either: + // + // 1. We see an `UpdateStatus::InProgress` message indicating 100% delivery, + // at which point we break out of the loop + // 2. We time out waiting for the previous step (by timing out for either + // the SP to process a request or `host_phase1_updater` to realize + // there's been progress), at which point we panic and fail this test. + let mut prev_bytes_received = 0; + let mut expect_progress_change = false; + loop { + // Allow the SP to accept and respond to a single UDP packet. + sp_accept_sema.send(1).unwrap(); + + // Wait until the SP has sent a response, with a safety rail that we + // haven't screwed up our untangle-the-race logic: if we don't see the + // SP process any new messages after several seconds, our test is + // broken, so fail. + tokio::time::timeout(Duration::from_secs(10), sp_responses.changed()) + .await + .expect("timeout waiting for SP response count to change") + .expect("sp response count sender dropped"); + + // Inspec the SP's in-memory update state; we expect only `InProgress` + // or `Complete`, and in either case we note whether we expect to see + // status changes from `host_phase1_updater`. + match target_sp.current_update_status().await { + UpdateStatus::InProgress(sp_progress) => { + if sp_progress.bytes_received > prev_bytes_received { + prev_bytes_received = sp_progress.bytes_received; + expect_progress_change = true; + continue; + } + } + UpdateStatus::Complete(_) => { + if prev_bytes_received < phase1_data_len { + break; + } + } + status @ (UpdateStatus::None + | UpdateStatus::Preparing(_) + | UpdateStatus::SpUpdateAuxFlashChckScan { .. } + | UpdateStatus::Aborted(_) + | UpdateStatus::Failed { .. } + | UpdateStatus::RotError { .. }) => { + panic!("unexpected status {status:?}"); + } + } + + // If we get here, the most recent packet did _not_ change the SP's + // internal update state, so it was a status request from + // `host_phase1_updater`. If we expect the updater to see new progress, + // wait for that change here. + if expect_progress_change { + // Safety rail that we haven't screwed up our untangle-the-race + // logic: if we don't see a new progress after several seconds, our + // test is broken, so fail. + tokio::time::timeout(Duration::from_secs(10), progress.changed()) + .await + .expect("progress timeout") + .expect("progress watch sender dropped"); + let status = progress.borrow_and_update().clone().unwrap(); + expect_progress_change = false; + + assert!( + matches!(status, UpdateProgress::InProgress { .. }), + "unexpected progress status {status:?}" + ); + } + } + + // We know the SP has received a complete update, but `HostPhase1Updater` + // may still need to request status to realize that; release the socket + // semaphore so the SP can respond. + sp_accept_sema.send(usize::MAX).unwrap(); + + // Unlike the SP and RoT cases, there are no MGS/SP steps in between the + // update completing and `HostPhase1Updater` sending + // `UpdateProgress::Complete`. Therefore, it's a race whether we'll see + // some number of `InProgress` status before `Complete`, but we should + // quickly move to `Complete`. + loop { + tokio::time::timeout(Duration::from_secs(10), progress.changed()) + .await + .expect("progress timeout") + .expect("progress watch sender dropped"); + let status = progress.borrow_and_update().clone().unwrap(); + match status { + UpdateProgress::Complete => break, + UpdateProgress::InProgress { .. } => continue, + _ => panic!("unexpected progress status {status:?}"), + } + } + + // drop our progress receiver so `do_update_task` can complete + mem::drop(progress); + + do_update_task.await.expect("update task panicked").expect("update failed"); + + let last_update_image = target_sp + .last_host_phase1_update_data(target_host_slot) + .await + .expect("simulated host phase1 did not receive an update"); + + assert_eq!( + phase1_data.as_slice(), + &*last_update_image, + "simulated host phase1 update contents (len {}) \ + do not match test generated fake image (len {})", + last_update_image.len(), + phase1_data.len(), + ); + + mgstestctx.teardown().await; +} diff --git a/nexus/tests/integration_tests/images.rs b/nexus/tests/integration_tests/images.rs index c3db9e8f13..9d608937ce 100644 --- a/nexus/tests/integration_tests/images.rs +++ b/nexus/tests/integration_tests/images.rs @@ -24,15 +24,11 @@ use nexus_types::identity::Resource; use omicron_common::api::external::Disk; use omicron_common::api::external::{ByteCount, IdentityMetadataCreateParams}; -use httptest::{matchers::*, responders::*, Expectation, ServerBuilder}; - type ControlPlaneTestContext = nexus_test_utils::ControlPlaneTestContext; const PROJECT_NAME: &str = "myproj"; -const BLOCK_SIZE: params::BlockSize = params::BlockSize(512); - fn get_project_images_url(project_name: &str) -> String { format!("/v1/images?project={}", project_name) } @@ -56,18 +52,6 @@ async fn test_image_create(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - let server = ServerBuilder::new().run().unwrap(); - server.expect( - Expectation::matching(request::method_path("HEAD", "/image.raw")) - .times(1..) - .respond_with( - status_code(200).append_header( - "Content-Length", - format!("{}", 4096 * 1000), - ), - ), - ); - let images_url = get_project_images_url(PROJECT_NAME); // Before project exists, image list 404s @@ -94,10 +78,9 @@ async fn test_image_create(cptestctx: &ControlPlaneTestContext) { assert_eq!(images.len(), 0); // Create an image in the project - let image_create_params = get_image_create(params::ImageSource::Url { - url: server.url("/image.raw").to_string(), - block_size: BLOCK_SIZE, - }); + let image_create_params = get_image_create( + params::ImageSource::YouCanBootAnythingAsLongAsItsAlpine, + ); NexusRequest::objects_post(client, &images_url, &image_create_params) .authn_as(AuthnMode::PrivilegedUser) @@ -120,18 +103,6 @@ async fn test_silo_image_create(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - let server = ServerBuilder::new().run().unwrap(); - server.expect( - Expectation::matching(request::method_path("HEAD", "/image.raw")) - .times(1..) - .respond_with( - status_code(200).append_header( - "Content-Length", - format!("{}", 4096 * 1000), - ), - ), - ); - let silo_images_url = "/v1/images"; // Expect no images in the silo @@ -144,10 +115,9 @@ async fn test_silo_image_create(cptestctx: &ControlPlaneTestContext) { assert_eq!(images.len(), 0); // Create an image in the project - let image_create_params = get_image_create(params::ImageSource::Url { - url: server.url("/image.raw").to_string(), - block_size: BLOCK_SIZE, - }); + let image_create_params = get_image_create( + params::ImageSource::YouCanBootAnythingAsLongAsItsAlpine, + ); // Create image NexusRequest::objects_post(client, &silo_images_url, &image_create_params) @@ -165,162 +135,6 @@ async fn test_silo_image_create(cptestctx: &ControlPlaneTestContext) { assert_eq!(images[0].identity.name, "alpine-edge"); } -#[nexus_test] -async fn test_image_create_url_404(cptestctx: &ControlPlaneTestContext) { - let client = &cptestctx.external_client; - DiskTest::new(&cptestctx).await; - - // need a project to post to - create_project(client, PROJECT_NAME).await; - - let server = ServerBuilder::new().run().unwrap(); - server.expect( - Expectation::matching(request::method_path("HEAD", "/image.raw")) - .times(1..) - .respond_with(status_code(404)), - ); - - let image_create_params = get_image_create(params::ImageSource::Url { - url: server.url("/image.raw").to_string(), - block_size: BLOCK_SIZE, - }); - - let images_url = get_project_images_url(PROJECT_NAME); - - let error = NexusRequest::new( - RequestBuilder::new(client, Method::POST, &images_url) - .body(Some(&image_create_params)) - .expect_status(Some(StatusCode::BAD_REQUEST)), - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .expect("unexpected success") - .parsed_body::() - .unwrap(); - assert_eq!( - error.message, - format!("unsupported value for \"url\": querying url returned: 404 Not Found") - ); -} - -#[nexus_test] -async fn test_image_create_bad_url(cptestctx: &ControlPlaneTestContext) { - let client = &cptestctx.external_client; - DiskTest::new(&cptestctx).await; - - // need a project to post to - create_project(client, PROJECT_NAME).await; - - let image_create_params = get_image_create(params::ImageSource::Url { - url: "not_a_url".to_string(), - block_size: BLOCK_SIZE, - }); - - let images_url = get_project_images_url(PROJECT_NAME); - - let error = NexusRequest::new( - RequestBuilder::new(client, Method::POST, &images_url) - .body(Some(&image_create_params)) - .expect_status(Some(StatusCode::BAD_REQUEST)), - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .expect("unexpected success") - .parsed_body::() - .unwrap(); - assert_eq!( - error.message, - format!("unsupported value for \"url\": error querying url: builder error: relative URL without a base") - ); -} - -#[nexus_test] -async fn test_image_create_bad_content_length( - cptestctx: &ControlPlaneTestContext, -) { - let client = &cptestctx.external_client; - DiskTest::new(&cptestctx).await; - - // need a project to post to - create_project(client, PROJECT_NAME).await; - - let server = ServerBuilder::new().run().unwrap(); - server.expect( - Expectation::matching(request::method_path("HEAD", "/image.raw")) - .times(1..) - .respond_with( - status_code(200).append_header("Content-Length", "bad"), - ), - ); - - let image_create_params = get_image_create(params::ImageSource::Url { - url: server.url("/image.raw").to_string(), - block_size: BLOCK_SIZE, - }); - - let images_url = get_project_images_url(PROJECT_NAME); - - let error = NexusRequest::new( - RequestBuilder::new(client, Method::POST, &images_url) - .body(Some(&image_create_params)) - .expect_status(Some(StatusCode::BAD_REQUEST)), - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .expect("unexpected success") - .parsed_body::() - .unwrap(); - assert_eq!( - error.message, - format!("unsupported value for \"url\": content length invalid: invalid digit found in string") - ); -} - -#[nexus_test] -async fn test_image_create_bad_image_size(cptestctx: &ControlPlaneTestContext) { - let client = &cptestctx.external_client; - DiskTest::new(&cptestctx).await; - - // need a project to post to - create_project(client, PROJECT_NAME).await; - - let server = ServerBuilder::new().run().unwrap(); - server.expect( - Expectation::matching(request::method_path("HEAD", "/image.raw")) - .times(1..) - .respond_with(status_code(200).append_header( - "Content-Length", - format!("{}", 4096 * 1000 + 100), - )), - ); - - let image_create_params = get_image_create(params::ImageSource::Url { - url: server.url("/image.raw").to_string(), - block_size: BLOCK_SIZE, - }); - - let images_url = get_project_images_url(PROJECT_NAME); - - let error = NexusRequest::new( - RequestBuilder::new(client, Method::POST, &images_url) - .body(Some(&image_create_params)) - .expect_status(Some(StatusCode::BAD_REQUEST)), - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .expect("unexpected success") - .parsed_body::() - .unwrap(); - assert_eq!( - error.message, - format!("unsupported value for \"size\": total size {} must be divisible by block size {}", 4096*1000 + 100, 512) - ); -} - #[nexus_test] async fn test_make_disk_from_image(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; @@ -329,23 +143,10 @@ async fn test_make_disk_from_image(cptestctx: &ControlPlaneTestContext) { // need a project to post both disk and image to create_project(client, PROJECT_NAME).await; - let server = ServerBuilder::new().run().unwrap(); - server.expect( - Expectation::matching(request::method_path("HEAD", "/alpine/edge.raw")) - .times(1..) - .respond_with( - status_code(200).append_header( - "Content-Length", - format!("{}", 4096 * 1000), - ), - ), - ); - // Create an image in the project - let image_create_params = get_image_create(params::ImageSource::Url { - url: server.url("/alpine/edge.raw").to_string(), - block_size: BLOCK_SIZE, - }); + let image_create_params = get_image_create( + params::ImageSource::YouCanBootAnythingAsLongAsItsAlpine, + ); let images_url = get_project_images_url(PROJECT_NAME); @@ -384,23 +185,10 @@ async fn test_make_disk_from_other_project_image_fails( create_project(client, PROJECT_NAME).await; let another_project = create_project(client, "another-proj").await; - let server = ServerBuilder::new().run().unwrap(); - server.expect( - Expectation::matching(request::method_path("HEAD", "/image.raw")) - .times(1..) - .respond_with( - status_code(200).append_header( - "Content-Length", - format!("{}", 4096 * 1000), - ), - ), - ); - let images_url = get_project_images_url(PROJECT_NAME); - let image_create_params = get_image_create(params::ImageSource::Url { - url: server.url("/image.raw").to_string(), - block_size: BLOCK_SIZE, - }); + let image_create_params = get_image_create( + params::ImageSource::YouCanBootAnythingAsLongAsItsAlpine, + ); let image = NexusRequest::objects_post(client, &images_url, &image_create_params) .authn_as(AuthnMode::PrivilegedUser) @@ -443,20 +231,10 @@ async fn test_make_disk_from_image_too_small( // need a project to post both disk and image to create_project(client, PROJECT_NAME).await; - let server = ServerBuilder::new().run().unwrap(); - server.expect( - Expectation::matching(request::method_path("HEAD", "/alpine/edge.raw")) - .times(1..) - .respond_with( - status_code(200).append_header("Content-Length", "2147483648"), - ), - ); - // Create an image in the project - let image_create_params = get_image_create(params::ImageSource::Url { - url: server.url("/alpine/edge.raw").to_string(), - block_size: BLOCK_SIZE, - }); + let image_create_params = get_image_create( + params::ImageSource::YouCanBootAnythingAsLongAsItsAlpine, + ); let images_url = get_project_images_url(PROJECT_NAME); @@ -474,7 +252,9 @@ async fn test_make_disk_from_image_too_small( disk_source: params::DiskSource::Image { image_id: alpine_image.identity.id, }, - size: ByteCount::from(1073741824), + + // Nexus defines YouCanBootAnythingAsLongAsItsAlpine size as 100M + size: ByteCount::from(90 * 1024 * 1024), }; let disks_url = format!("/v1/disks?project={}", PROJECT_NAME); @@ -493,7 +273,7 @@ async fn test_make_disk_from_image_too_small( error.message, format!( "disk size {} must be greater than or equal to image size {}", - 1073741824_u32, 2147483648_u32, + 94371840_u32, 104857600_u32, ) ); } @@ -503,18 +283,6 @@ async fn test_image_promotion(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - let server = ServerBuilder::new().run().unwrap(); - server.expect( - Expectation::matching(request::method_path("HEAD", "/image.raw")) - .times(1..) - .respond_with( - status_code(200).append_header( - "Content-Length", - format!("{}", 4096 * 1000), - ), - ), - ); - let silo_images_url = "/v1/images"; let images_url = get_project_images_url(PROJECT_NAME); @@ -528,10 +296,9 @@ async fn test_image_promotion(cptestctx: &ControlPlaneTestContext) { assert_eq!(images.len(), 0); - let image_create_params = get_image_create(params::ImageSource::Url { - url: server.url("/image.raw").to_string(), - block_size: BLOCK_SIZE, - }); + let image_create_params = get_image_create( + params::ImageSource::YouCanBootAnythingAsLongAsItsAlpine, + ); NexusRequest::objects_post(client, &images_url, &image_create_params) .authn_as(AuthnMode::PrivilegedUser) @@ -631,28 +398,15 @@ async fn test_image_from_other_project_snapshot_fails( let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; - let server = ServerBuilder::new().run().unwrap(); - server.expect( - Expectation::matching(request::method_path("HEAD", "/image.raw")) - .times(1..) - .respond_with( - status_code(200).append_header( - "Content-Length", - format!("{}", 4096 * 1000), - ), - ), - ); - create_project(client, PROJECT_NAME).await; let images_url = get_project_images_url(PROJECT_NAME); let disks_url = format!("/v1/disks?project={}", PROJECT_NAME); let snapshots_url = format!("/v1/snapshots?project={}", PROJECT_NAME); // Create an image - let image_create_params = get_image_create(params::ImageSource::Url { - url: server.url("/image.raw").to_string(), - block_size: BLOCK_SIZE, - }); + let image_create_params = get_image_create( + params::ImageSource::YouCanBootAnythingAsLongAsItsAlpine, + ); let image: views::Image = NexusRequest::objects_post(client, &images_url, &image_create_params) .authn_as(AuthnMode::PrivilegedUser) @@ -749,25 +503,12 @@ async fn test_image_deletion_permissions(cptestctx: &ControlPlaneTestContext) { // Create an image in the default silo using the privileged user - let server = ServerBuilder::new().run().unwrap(); - server.expect( - Expectation::matching(request::method_path("HEAD", "/image.raw")) - .times(1..) - .respond_with( - status_code(200).append_header( - "Content-Length", - format!("{}", 4096 * 1000), - ), - ), - ); - let silo_images_url = "/v1/images"; let images_url = get_project_images_url(PROJECT_NAME); - let image_create_params = get_image_create(params::ImageSource::Url { - url: server.url("/image.raw").to_string(), - block_size: BLOCK_SIZE, - }); + let image_create_params = get_image_create( + params::ImageSource::YouCanBootAnythingAsLongAsItsAlpine, + ); let image = NexusRequest::objects_post(client, &images_url, &image_create_params) diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index ea633be9dc..19b507f5bb 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -4,11 +4,14 @@ //! Tests basic instance support in the API +use super::external_ips::floating_ip_get; +use super::external_ips::get_floating_ip_by_id_url; use super::metrics::{get_latest_silo_metric, get_latest_system_metric}; use camino::Utf8Path; use http::method::Method; use http::StatusCode; +use itertools::Itertools; use nexus_db_queries::context::OpContext; use nexus_db_queries::db::fixed_data::silo::DEFAULT_SILO; use nexus_db_queries::db::fixed_data::silo::SILO_ID; @@ -18,6 +21,7 @@ use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; use nexus_test_utils::resource_helpers::create_disk; +use nexus_test_utils::resource_helpers::create_floating_ip; use nexus_test_utils::resource_helpers::create_ip_pool; use nexus_test_utils::resource_helpers::create_local_user; use nexus_test_utils::resource_helpers::create_silo; @@ -54,6 +58,7 @@ use omicron_nexus::TestInterfaces as _; use omicron_sled_agent::sim::SledAgent; use sled_agent_client::TestInterfaces as _; use std::convert::TryFrom; +use std::net::Ipv4Addr; use std::sync::Arc; use uuid::Uuid; @@ -68,8 +73,6 @@ use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::shared::SiloRole; use omicron_sled_agent::sim; -use httptest::{matchers::*, responders::*, Expectation, ServerBuilder}; - type ControlPlaneTestContext = nexus_test_utils::ControlPlaneTestContext; @@ -1275,18 +1278,6 @@ async fn test_instance_using_image_from_other_project_fails( let client = &cptestctx.external_client; create_org_and_project(&client).await; - let server = ServerBuilder::new().run().unwrap(); - server.expect( - Expectation::matching(request::method_path("HEAD", "/image.raw")) - .times(1..) - .respond_with( - status_code(200).append_header( - "Content-Length", - format!("{}", 4096 * 1000), - ), - ), - ); - // Create an image in springfield-squidport. let images_url = format!("/v1/images?project={}", PROJECT_NAME); let image_create_params = params::ImageCreate { @@ -1298,10 +1289,7 @@ async fn test_instance_using_image_from_other_project_fails( }, os: "alpine".to_string(), version: "edge".to_string(), - source: params::ImageSource::Url { - url: server.url("/image.raw").to_string(), - block_size: params::BlockSize::try_from(512).unwrap(), - }, + source: params::ImageSource::YouCanBootAnythingAsLongAsItsAlpine, }; let image = NexusRequest::objects_post(client, &images_url, &image_create_params) @@ -3202,7 +3190,7 @@ async fn test_instances_memory_greater_than_max_size( assert!(error.message.contains("memory must be less than")); } -async fn expect_instance_start_fail_unavailable( +async fn expect_instance_start_fail_507( client: &ClientTestContext, instance_name: &str, ) { @@ -3211,13 +3199,15 @@ async fn expect_instance_start_fail_unavailable( http::Method::POST, &get_instance_start_url(instance_name), ) - .expect_status(Some(http::StatusCode::SERVICE_UNAVAILABLE)); + .expect_status(Some(http::StatusCode::INSUFFICIENT_STORAGE)); NexusRequest::new(builder) .authn_as(AuthnMode::PrivilegedUser) .execute() .await - .expect("Expected instance start to fail with SERVICE_UNAVAILABLE"); + .expect( + "Expected instance start to fail with 507 Insufficient Storage", + ); } async fn expect_instance_start_ok( @@ -3308,9 +3298,7 @@ async fn test_cannot_provision_instance_beyond_cpu_capacity( for config in &configs { match config.2 { Ok(_) => expect_instance_start_ok(client, config.0).await, - Err(_) => { - expect_instance_start_fail_unavailable(client, config.0).await - } + Err(_) => expect_instance_start_fail_507(client, config.0).await, } } @@ -3416,9 +3404,7 @@ async fn test_cannot_provision_instance_beyond_ram_capacity( for config in &configs { match config.2 { Ok(_) => expect_instance_start_ok(client, config.0).await, - Err(_) => { - expect_instance_start_fail_unavailable(client, config.0).await - } + Err(_) => expect_instance_start_fail_507(client, config.0).await, } } @@ -3645,6 +3631,139 @@ async fn test_instance_ephemeral_ip_from_correct_pool( ); } +#[nexus_test] +async fn test_instance_attach_several_external_ips( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + + let _ = create_project(&client, PROJECT_NAME).await; + + // Create a single (large) IP pool + let default_pool_range = IpRange::V4( + Ipv4Range::new( + std::net::Ipv4Addr::new(10, 0, 0, 1), + std::net::Ipv4Addr::new(10, 0, 0, 10), + ) + .unwrap(), + ); + populate_ip_pool(&client, "default", Some(default_pool_range)).await; + + // Create several floating IPs for the instance, totalling 8 IPs. + let mut external_ip_create = + vec![params::ExternalIpCreate::Ephemeral { pool_name: None }]; + let mut fips = vec![]; + for i in 1..8 { + let name = format!("fip-{i}"); + fips.push( + create_floating_ip(&client, &name, PROJECT_NAME, None, None).await, + ); + external_ip_create.push(params::ExternalIpCreate::Floating { + floating_ip_name: name.parse().unwrap(), + }); + } + + // Create an instance with pool name blank, expect IP from default pool + let instance_name = "many-fips"; + let instance = create_instance_with( + &client, + PROJECT_NAME, + instance_name, + ¶ms::InstanceNetworkInterfaceAttachment::Default, + vec![], + external_ip_create, + ) + .await; + + // Verify that all external IPs are visible on the instance and have + // been allocated in order. + let external_ips = + fetch_instance_external_ips(&client, instance_name).await; + assert_eq!(external_ips.len(), 8); + eprintln!("{external_ips:?}"); + for (i, eip) in external_ips + .iter() + .sorted_unstable_by(|a, b| a.ip.cmp(&b.ip)) + .enumerate() + { + let last_octet = i + if i != external_ips.len() - 1 { + assert_eq!(eip.kind, IpKind::Floating); + 1 + } else { + // SNAT will occupy 1.0.0.8 here, since it it alloc'd before + // the ephemeral. + assert_eq!(eip.kind, IpKind::Ephemeral); + 2 + }; + assert_eq!(eip.ip, Ipv4Addr::new(10, 0, 0, last_octet as u8)); + } + + // Verify that all floating IPs are bound to their parent instance. + for fip in fips { + let fetched_fip = floating_ip_get( + &client, + &get_floating_ip_by_id_url(&fip.identity.id), + ) + .await; + assert_eq!(fetched_fip.instance_id, Some(instance.identity.id)); + } +} + +#[nexus_test] +async fn test_instance_allow_only_one_ephemeral_ip( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + + let _ = create_project(&client, PROJECT_NAME).await; + + // Create one IP pool with space for two ephemerals. + let default_pool_range = IpRange::V4( + Ipv4Range::new( + std::net::Ipv4Addr::new(10, 0, 0, 1), + std::net::Ipv4Addr::new(10, 0, 0, 2), + ) + .unwrap(), + ); + populate_ip_pool(&client, "default", Some(default_pool_range)).await; + + let ephemeral_create = params::ExternalIpCreate::Ephemeral { + pool_name: Some("default".parse().unwrap()), + }; + let error: HttpErrorResponseBody = NexusRequest::new( + RequestBuilder::new(client, Method::POST, &get_instances_url()) + .body(Some(¶ms::InstanceCreate { + identity: IdentityMetadataCreateParams { + name: "default-pool-inst".parse().unwrap(), + description: "instance default-pool-inst".into(), + }, + ncpus: InstanceCpuCount(4), + memory: ByteCount::from_gibibytes_u32(1), + hostname: String::from("the_host"), + user_data: + b"#cloud-config\nsystem_info:\n default_user:\n name: oxide" + .to_vec(), + network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, + external_ips: vec![ + ephemeral_create.clone(), ephemeral_create + ], + disks: vec![], + start: true, + })) + .expect_status(Some(StatusCode::BAD_REQUEST)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + assert_eq!( + error.message, + "An instance may not have more than 1 ephemeral IP address" + ); +} + async fn create_instance_with_pool( client: &ClientTestContext, instance_name: &str, @@ -3663,10 +3782,10 @@ async fn create_instance_with_pool( .await } -async fn fetch_instance_ephemeral_ip( +async fn fetch_instance_external_ips( client: &ClientTestContext, instance_name: &str, -) -> views::ExternalIp { +) -> Vec { let ips_url = format!( "/v1/instances/{}/external-ips?project={}", instance_name, PROJECT_NAME @@ -3678,9 +3797,18 @@ async fn fetch_instance_ephemeral_ip( .expect("Failed to fetch external IPs") .parsed_body::>() .expect("Failed to parse external IPs"); - assert_eq!(ips.items.len(), 1); - assert_eq!(ips.items[0].kind, IpKind::Ephemeral); - ips.items[0].clone() + ips.items +} + +async fn fetch_instance_ephemeral_ip( + client: &ClientTestContext, + instance_name: &str, +) -> views::ExternalIp { + fetch_instance_external_ips(client, instance_name) + .await + .into_iter() + .find(|v| v.kind == IpKind::Ephemeral) + .unwrap() } #[nexus_test] @@ -3783,6 +3911,30 @@ async fn test_instance_create_in_silo(cptestctx: &ControlPlaneTestContext) { instance_simulate_with_opctx(nexus, &instance.identity.id, &opctx).await; let instance = instance_get_as(&client, &instance_url, authn).await; assert_eq!(instance.runtime.run_state, InstanceState::Running); + + // Stop the instance + NexusRequest::new( + RequestBuilder::new( + client, + Method::POST, + &format!("/v1/instances/{}/stop", instance.identity.id), + ) + .body(None as Option<&serde_json::Value>) + .expect_status(Some(StatusCode::ACCEPTED)), + ) + .authn_as(AuthnMode::SiloUser(user_id)) + .execute() + .await + .expect("Failed to stop the instance"); + + instance_simulate_with_opctx(nexus, &instance.identity.id, &opctx).await; + + // Delete the instance + NexusRequest::object_delete(client, &instance_url) + .authn_as(AuthnMode::SiloUser(user_id)) + .execute() + .await + .expect("Failed to delete the instance"); } /// Test that appropriate OPTE V2P mappings are created and deleted. diff --git a/nexus/tests/integration_tests/mod.rs b/nexus/tests/integration_tests/mod.rs index e0bb09de4f..53de24c518 100644 --- a/nexus/tests/integration_tests/mod.rs +++ b/nexus/tests/integration_tests/mod.rs @@ -12,6 +12,8 @@ mod commands; mod console_api; mod device_auth; mod disks; +mod external_ips; +mod host_phase1_updater; mod images; mod initialization; mod instances; @@ -25,6 +27,7 @@ mod projects; mod rack; mod role_assignments; mod roles_builtin; +mod rot_updater; mod router_routes; mod saml; mod schema; diff --git a/nexus/tests/integration_tests/oximeter.rs b/nexus/tests/integration_tests/oximeter.rs index 65aaa18642..7dc453d713 100644 --- a/nexus/tests/integration_tests/oximeter.rs +++ b/nexus/tests/integration_tests/oximeter.rs @@ -9,6 +9,7 @@ use http::StatusCode; use nexus_test_interface::NexusServer; use nexus_test_utils_macros::nexus_test; use omicron_common::api::internal::nexus::ProducerEndpoint; +use omicron_common::api::internal::nexus::ProducerKind; use omicron_test_utils::dev::poll::{wait_for_condition, CondCheckError}; use oximeter_db::DbWrite; use std::collections::BTreeSet; @@ -360,6 +361,7 @@ async fn test_oximeter_collector_reregistration_gets_all_assignments() { ids.insert(id); let info = ProducerEndpoint { id, + kind: ProducerKind::Service, address: SocketAddr::new(Ipv6Addr::LOCALHOST.into(), 12345), base_route: String::from("/collect"), interval: Duration::from_secs(1), diff --git a/nexus/tests/integration_tests/pantry.rs b/nexus/tests/integration_tests/pantry.rs index 26e27e92ee..dc4e8e6c95 100644 --- a/nexus/tests/integration_tests/pantry.rs +++ b/nexus/tests/integration_tests/pantry.rs @@ -302,25 +302,6 @@ async fn bulk_write_stop( .unwrap(); } -async fn import_blocks_from_url(client: &ClientTestContext) { - // Import blocks from a URL - let import_blocks_from_url_url = - format!("/v1/disks/{}/import?project={}", DISK_NAME, PROJECT_NAME,); - - NexusRequest::new( - RequestBuilder::new(client, Method::POST, &import_blocks_from_url_url) - .body(Some(¶ms::ImportBlocksFromUrl { - url: "http://fake.endpoint/image.iso".to_string(), - expected_digest: None, - })) - .expect_status(Some(StatusCode::NO_CONTENT)), - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap(); -} - async fn finalize_import( client: &ClientTestContext, expected_status: StatusCode, @@ -461,33 +442,6 @@ async fn test_cannot_mount_import_from_bulk_writes_disk( .await; } -// Test the normal flow of importing from a URL -#[nexus_test] -async fn test_import_blocks_from_url(cptestctx: &ControlPlaneTestContext) { - let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; - - DiskTest::new(&cptestctx).await; - create_org_and_project(client).await; - - create_disk_with_state_importing_blocks(client).await; - - // Import blocks from a URL - import_blocks_from_url(client).await; - - // Validate disk is in state ImportReady - validate_disk_state(client, DiskState::ImportReady).await; - - // Finalize import - finalize_import(client, StatusCode::NO_CONTENT).await; - - // Validate disk is in state Detached - validate_disk_state(client, DiskState::Detached).await; - - // Create an instance to attach the disk. - create_instance_and_attach_disk(client, nexus, StatusCode::ACCEPTED).await; -} - // Test the normal flow of importing from bulk writes #[nexus_test] async fn test_import_blocks_with_bulk_write( diff --git a/nexus/tests/integration_tests/rack.rs b/nexus/tests/integration_tests/rack.rs index 2c191f27ae..9f77223871 100644 --- a/nexus/tests/integration_tests/rack.rs +++ b/nexus/tests/integration_tests/rack.rs @@ -10,8 +10,14 @@ use nexus_test_utils::http_testing::RequestBuilder; use nexus_test_utils::TEST_SUITE_PASSWORD; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; +use nexus_types::external_api::shared::UninitializedSled; use nexus_types::external_api::views::Rack; +use nexus_types::internal_api::params::Baseboard; +use nexus_types::internal_api::params::SledAgentStartupInfo; +use nexus_types::internal_api::params::SledRole; +use omicron_common::api::external::ByteCount; use omicron_nexus::TestInterfaces; +use uuid::Uuid; type ControlPlaneTestContext = nexus_test_utils::ControlPlaneTestContext; @@ -77,3 +83,63 @@ async fn test_rack_initialization(cptestctx: &ControlPlaneTestContext) { ) .await; } + +#[nexus_test] +async fn test_uninitialized_sled_list(cptestctx: &ControlPlaneTestContext) { + let internal_client = &cptestctx.internal_client; + let external_client = &cptestctx.external_client; + let list_url = "/v1/system/hardware/uninitialized-sleds"; + let mut uninitialized_sleds = + NexusRequest::object_get(external_client, &list_url) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("failed to get uninitialized sleds") + .parsed_body::>() + .unwrap(); + debug!(cptestctx.logctx.log, "{:#?}", uninitialized_sleds); + + // There are currently two fake sim gimlets created in the latest inventory + // collection as part of test setup. + assert_eq!(2, uninitialized_sleds.len()); + + // Insert one of these fake sleds into the `sled` table. + // Just pick some random fields other than `baseboard` + let baseboard = uninitialized_sleds.pop().unwrap().baseboard; + let sled_uuid = Uuid::new_v4(); + let sa = SledAgentStartupInfo { + sa_address: "[fd00:1122:3344:01::1]:8080".parse().unwrap(), + role: SledRole::Gimlet, + baseboard: Baseboard { + serial_number: baseboard.serial, + part_number: baseboard.part, + revision: baseboard.revision, + }, + usable_hardware_threads: 32, + usable_physical_ram: ByteCount::from_gibibytes_u32(100), + reservoir_size: ByteCount::from_mebibytes_u32(100), + }; + internal_client + .make_request( + Method::POST, + format!("/sled-agents/{sled_uuid}").as_str(), + Some(&sa), + StatusCode::NO_CONTENT, + ) + .await + .unwrap(); + + // Ensure there's only one unintialized sled remaining, and it's not + // the one that was just added into the `sled` table + let uninitialized_sleds_2 = + NexusRequest::object_get(external_client, &list_url) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("failed to get uninitialized sleds") + .parsed_body::>() + .unwrap(); + debug!(cptestctx.logctx.log, "{:#?}", uninitialized_sleds); + assert_eq!(1, uninitialized_sleds_2.len()); + assert_eq!(uninitialized_sleds, uninitialized_sleds_2); +} diff --git a/nexus/tests/integration_tests/rot_updater.rs b/nexus/tests/integration_tests/rot_updater.rs new file mode 100644 index 0000000000..2e6d65f8b1 --- /dev/null +++ b/nexus/tests/integration_tests/rot_updater.rs @@ -0,0 +1,631 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Tests `RotUpdater`'s delivery of updates to RoTs via MGS + +use gateway_client::types::{RotSlot, SpType}; +use gateway_messages::{SpPort, UpdateInProgressStatus, UpdateStatus}; +use gateway_test_utils::setup as mgs_setup; +use hubtools::RawHubrisArchive; +use hubtools::{CabooseBuilder, HubrisArchiveBuilder}; +use omicron_nexus::app::test_interfaces::{ + MgsClients, RotUpdater, UpdateProgress, +}; +use sp_sim::SimulatedSp; +use sp_sim::SIM_ROT_BOARD; +use std::mem; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::Arc; +use std::time::Duration; +use tokio::io::AsyncWriteExt; +use tokio::net::TcpListener; +use tokio::net::TcpStream; +use tokio::sync::mpsc; +use uuid::Uuid; + +fn make_fake_rot_image() -> Vec { + let caboose = CabooseBuilder::default() + .git_commit("fake-git-commit") + .board(SIM_ROT_BOARD) + .version("0.0.0") + .name("fake-name") + .build(); + + let mut builder = HubrisArchiveBuilder::with_fake_image(); + builder.write_caboose(caboose.as_slice()).unwrap(); + builder.build_to_vec().unwrap() +} + +#[tokio::test] +async fn test_rot_updater_updates_sled() { + // Start MGS + Sim SP. + let mgstestctx = + mgs_setup::test_setup("test_rot_updater_updates_sled", SpPort::One) + .await; + + // Configure an MGS client. + let mut mgs_clients = + MgsClients::from_clients([gateway_client::Client::new( + &mgstestctx.client.url("/").to_string(), + mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient")), + )]); + + // Configure and instantiate an `RotUpdater`. + let sp_type = SpType::Sled; + let sp_slot = 0; + let update_id = Uuid::new_v4(); + let hubris_archive = make_fake_rot_image(); + let target_rot_slot = RotSlot::B; + + let rot_updater = RotUpdater::new( + sp_type, + sp_slot, + target_rot_slot, + update_id, + hubris_archive.clone(), + &mgstestctx.logctx.log, + ); + + // Run the update. + rot_updater.update(&mut mgs_clients).await.expect("update failed"); + + // Ensure the RoT received the complete update. + let last_update_image = mgstestctx.simrack.gimlets[sp_slot as usize] + .last_rot_update_data() + .await + .expect("simulated RoT did not receive an update"); + + let hubris_archive = RawHubrisArchive::from_vec(hubris_archive).unwrap(); + + assert_eq!( + hubris_archive.image.data.as_slice(), + &*last_update_image, + "simulated RoT update contents (len {}) \ + do not match test generated fake image (len {})", + last_update_image.len(), + hubris_archive.image.data.len() + ); + + mgstestctx.teardown().await; +} + +#[tokio::test] +async fn test_rot_updater_updates_switch() { + // Start MGS + Sim SP. + let mgstestctx = + mgs_setup::test_setup("test_rot_updater_updates_switch", SpPort::One) + .await; + + // Configure an MGS client. + let mut mgs_clients = + MgsClients::from_clients([gateway_client::Client::new( + &mgstestctx.client.url("/").to_string(), + mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient")), + )]); + + let sp_type = SpType::Switch; + let sp_slot = 0; + let update_id = Uuid::new_v4(); + let hubris_archive = make_fake_rot_image(); + let target_rot_slot = RotSlot::B; + + let rot_updater = RotUpdater::new( + sp_type, + sp_slot, + target_rot_slot, + update_id, + hubris_archive.clone(), + &mgstestctx.logctx.log, + ); + + rot_updater.update(&mut mgs_clients).await.expect("update failed"); + + let last_update_image = mgstestctx.simrack.sidecars[sp_slot as usize] + .last_rot_update_data() + .await + .expect("simulated RoT did not receive an update"); + + let hubris_archive = RawHubrisArchive::from_vec(hubris_archive).unwrap(); + + assert_eq!( + hubris_archive.image.data.as_slice(), + &*last_update_image, + "simulated RoT update contents (len {}) \ + do not match test generated fake image (len {})", + last_update_image.len(), + hubris_archive.image.data.len() + ); + + mgstestctx.teardown().await; +} + +#[tokio::test] +async fn test_rot_updater_remembers_successful_mgs_instance() { + // Start MGS + Sim SP. + let mgstestctx = mgs_setup::test_setup( + "test_rot_updater_remembers_successful_mgs_instance", + SpPort::One, + ) + .await; + + // Also start a local TCP server that we will claim is an MGS instance, but + // it will close connections immediately after accepting them. This will + // allow us to count how many connections it receives, while simultaneously + // causing errors in the RotUpdater when it attempts to use this "MGS". + let (failing_mgs_task, failing_mgs_addr, failing_mgs_conn_counter) = { + let socket = TcpListener::bind("[::1]:0").await.unwrap(); + let addr = socket.local_addr().unwrap(); + let conn_count = Arc::new(AtomicUsize::new(0)); + + let task = { + let conn_count = Arc::clone(&conn_count); + tokio::spawn(async move { + loop { + let (mut stream, _peer) = socket.accept().await.unwrap(); + conn_count.fetch_add(1, Ordering::SeqCst); + stream.shutdown().await.unwrap(); + } + }) + }; + + (task, addr, conn_count) + }; + + // Order the MGS clients such that the bogus MGS that immediately closes + // connections comes first. `RotUpdater` should remember that the second MGS + // instance succeeds, and only send subsequent requests to it: we should + // only see a single attempted connection to the bogus MGS, even though + // delivering an update requires a bare minimum of three requests (start the + // update, query the status, reset the RoT) and often more (if repeated + // queries are required to wait for completion). + let mut mgs_clients = MgsClients::from_clients([ + gateway_client::Client::new( + &format!("http://{failing_mgs_addr}"), + mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient1")), + ), + gateway_client::Client::new( + &mgstestctx.client.url("/").to_string(), + mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient")), + ), + ]); + + let sp_type = SpType::Sled; + let sp_slot = 0; + let update_id = Uuid::new_v4(); + let hubris_archive = make_fake_rot_image(); + let target_rot_slot = RotSlot::B; + + let rot_updater = RotUpdater::new( + sp_type, + sp_slot, + target_rot_slot, + update_id, + hubris_archive.clone(), + &mgstestctx.logctx.log, + ); + + rot_updater.update(&mut mgs_clients).await.expect("update failed"); + + let last_update_image = mgstestctx.simrack.gimlets[sp_slot as usize] + .last_rot_update_data() + .await + .expect("simulated RoT did not receive an update"); + + let hubris_archive = RawHubrisArchive::from_vec(hubris_archive).unwrap(); + + assert_eq!( + hubris_archive.image.data.as_slice(), + &*last_update_image, + "simulated RoT update contents (len {}) \ + do not match test generated fake image (len {})", + last_update_image.len(), + hubris_archive.image.data.len() + ); + + // Check that our bogus MGS only received a single connection attempt. + // (After RotUpdater failed to talk to this instance, it should have fallen + // back to the valid one for all further requests.) + assert_eq!( + failing_mgs_conn_counter.load(Ordering::SeqCst), + 1, + "bogus MGS instance didn't receive the expected number of connections" + ); + failing_mgs_task.abort(); + + mgstestctx.teardown().await; +} + +#[tokio::test] +async fn test_rot_updater_switches_mgs_instances_on_failure() { + enum MgsProxy { + One(TcpStream), + Two(TcpStream), + } + + // Start MGS + Sim SP. + let mgstestctx = mgs_setup::test_setup( + "test_rot_updater_switches_mgs_instances_on_failure", + SpPort::One, + ) + .await; + let mgs_bind_addr = mgstestctx.client.bind_address; + + let spawn_mgs_proxy_task = |mut stream: TcpStream| { + tokio::spawn(async move { + let mut mgs_stream = TcpStream::connect(mgs_bind_addr) + .await + .expect("failed to connect to MGS"); + tokio::io::copy_bidirectional(&mut stream, &mut mgs_stream) + .await + .expect("failed to proxy connection to MGS"); + }) + }; + + // Start two MGS proxy tasks; when each receives an incoming TCP connection, + // it forwards that `TcpStream` along the `mgs_proxy_connections` channel + // along with a tag of which proxy it is. We'll use this below to flip flop + // between MGS "instances" (really these two proxies). + let (mgs_proxy_connections_tx, mut mgs_proxy_connections_rx) = + mpsc::unbounded_channel(); + let (mgs_proxy_one_task, mgs_proxy_one_addr) = { + let socket = TcpListener::bind("[::1]:0").await.unwrap(); + let addr = socket.local_addr().unwrap(); + let mgs_proxy_connections_tx = mgs_proxy_connections_tx.clone(); + let task = tokio::spawn(async move { + loop { + let (stream, _peer) = socket.accept().await.unwrap(); + mgs_proxy_connections_tx.send(MgsProxy::One(stream)).unwrap(); + } + }); + (task, addr) + }; + let (mgs_proxy_two_task, mgs_proxy_two_addr) = { + let socket = TcpListener::bind("[::1]:0").await.unwrap(); + let addr = socket.local_addr().unwrap(); + let task = tokio::spawn(async move { + loop { + let (stream, _peer) = socket.accept().await.unwrap(); + mgs_proxy_connections_tx.send(MgsProxy::Two(stream)).unwrap(); + } + }); + (task, addr) + }; + + // Disable connection pooling so each request gets a new TCP connection. + let client = + reqwest::Client::builder().pool_max_idle_per_host(0).build().unwrap(); + + // Configure two MGS clients pointed at our two proxy tasks. + let mut mgs_clients = MgsClients::from_clients([ + gateway_client::Client::new_with_client( + &format!("http://{mgs_proxy_one_addr}"), + client.clone(), + mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient1")), + ), + gateway_client::Client::new_with_client( + &format!("http://{mgs_proxy_two_addr}"), + client, + mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient2")), + ), + ]); + + let sp_type = SpType::Sled; + let sp_slot = 0; + let update_id = Uuid::new_v4(); + let hubris_archive = make_fake_rot_image(); + let target_rot_slot = RotSlot::B; + + let rot_updater = RotUpdater::new( + sp_type, + sp_slot, + target_rot_slot, + update_id, + hubris_archive.clone(), + &mgstestctx.logctx.log, + ); + + // Spawn the actual update task. + let mut update_task = + tokio::spawn(async move { rot_updater.update(&mut mgs_clients).await }); + + // Loop over incoming requests. We expect this sequence: + // + // 1. Connection arrives on the first proxy + // 2. We spawn a task to service that request, and set `should_swap` + // 3. Connection arrives on the first proxy + // 4. We drop that connection, flip `expected_proxy`, and clear + // `should_swap` + // 5. Connection arrives on the second proxy + // 6. We spawn a task to service that request, and set `should_swap` + // 7. Connection arrives on the second proxy + // 8. We drop that connection, flip `expected_proxy`, and clear + // `should_swap` + // + // ... repeat until the update is complete. + let mut expected_proxy = 0; + let mut proxy_one_count = 0; + let mut proxy_two_count = 0; + let mut total_requests_handled = 0; + let mut should_swap = false; + loop { + tokio::select! { + Some(proxy_stream) = mgs_proxy_connections_rx.recv() => { + let stream = match proxy_stream { + MgsProxy::One(stream) => { + assert_eq!(expected_proxy, 0); + proxy_one_count += 1; + stream + } + MgsProxy::Two(stream) => { + assert_eq!(expected_proxy, 1); + proxy_two_count += 1; + stream + } + }; + + // Should we trigger `RotUpdater` to swap to the other MGS + // (proxy)? If so, do that by dropping this connection (which + // will cause a client failure) and note that we expect the next + // incoming request to come on the other proxy. + if should_swap { + mem::drop(stream); + expected_proxy ^= 1; + should_swap = false; + } else { + // Otherwise, handle this connection. + total_requests_handled += 1; + spawn_mgs_proxy_task(stream); + should_swap = true; + } + } + + result = &mut update_task => { + match result { + Ok(Ok(())) => { + mgs_proxy_one_task.abort(); + mgs_proxy_two_task.abort(); + break; + } + Ok(Err(err)) => panic!("update failed: {err}"), + Err(err) => panic!("update task panicked: {err}"), + } + } + } + } + + // An RoT update requires a minimum of 4 requests to MGS: post the update, + // check the status, post to mark the new target slot active, and post an + // RoT reset. There may be more requests if the update is not yet complete + // when the status is checked, but we can just check that each of our + // proxies received at least 2 incoming requests; based on our outline + // above, if we got the minimum of 4 requests, it would look like this: + // + // 1. POST update -> first proxy (success) + // 2. GET status -> first proxy (fail) + // 3. GET status retry -> second proxy (success) + // 4. POST new target slot -> second proxy (fail) + // 5. POST new target slot -> first proxy (success) + // 6. POST reset -> first proxy (fail) + // 7. POST reset -> second proxy (success) + // + // This pattern would repeat if multiple status requests were required, so + // we always expect the first proxy to see exactly one more connection + // attempt than the second (because it went first before they started + // swapping), and the two together should see a total of one less than + // double the number of successful requests required. + assert!(total_requests_handled >= 3); + assert_eq!(proxy_one_count, proxy_two_count + 1); + assert_eq!( + (proxy_one_count + proxy_two_count + 1) / 2, + total_requests_handled + ); + + let last_update_image = mgstestctx.simrack.gimlets[sp_slot as usize] + .last_rot_update_data() + .await + .expect("simulated RoT did not receive an update"); + + let hubris_archive = RawHubrisArchive::from_vec(hubris_archive).unwrap(); + + assert_eq!( + hubris_archive.image.data.as_slice(), + &*last_update_image, + "simulated RoT update contents (len {}) \ + do not match test generated fake image (len {})", + last_update_image.len(), + hubris_archive.image.data.len() + ); + + mgstestctx.teardown().await; +} + +#[tokio::test] +async fn test_rot_updater_delivers_progress() { + // Start MGS + Sim SP. + let mgstestctx = mgs_setup::test_setup( + "test_rot_updater_delivers_progress", + SpPort::One, + ) + .await; + + // Configure an MGS client. + let mut mgs_clients = + MgsClients::from_clients([gateway_client::Client::new( + &mgstestctx.client.url("/").to_string(), + mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient")), + )]); + + let sp_type = SpType::Sled; + let sp_slot = 0; + let update_id = Uuid::new_v4(); + let hubris_archive = make_fake_rot_image(); + let target_rot_slot = RotSlot::B; + + let rot_updater = RotUpdater::new( + sp_type, + sp_slot, + target_rot_slot, + update_id, + hubris_archive.clone(), + &mgstestctx.logctx.log, + ); + + let hubris_archive = RawHubrisArchive::from_vec(hubris_archive).unwrap(); + let rot_image_len = hubris_archive.image.data.len() as u32; + + // Subscribe to update progress, and check that there is no status yet; we + // haven't started the update. + let mut progress = rot_updater.progress_watcher(); + assert_eq!(*progress.borrow_and_update(), None); + + // Install a semaphore on the requests our target SP will receive so we can + // inspect progress messages without racing. + let target_sp = &mgstestctx.simrack.gimlets[sp_slot as usize]; + let sp_accept_sema = target_sp.install_udp_accept_semaphore().await; + let mut sp_responses = target_sp.responses_sent_count().unwrap(); + + // Spawn the update on a background task so we can watch `progress` as it is + // applied. + let do_update_task = + tokio::spawn(async move { rot_updater.update(&mut mgs_clients).await }); + + // Allow the SP to respond to 1 message: the "prepare update" messages that + // triggers the start of an update, then ensure we see the "started" + // progress. + sp_accept_sema.send(1).unwrap(); + progress.changed().await.unwrap(); + assert_eq!(*progress.borrow_and_update(), Some(UpdateProgress::Started)); + + // Ensure our simulated SP is in the state we expect: it's prepared for an + // update but has not yet received any data. + assert_eq!( + target_sp.current_update_status().await, + UpdateStatus::InProgress(UpdateInProgressStatus { + id: update_id.into(), + bytes_received: 0, + total_size: rot_image_len, + }) + ); + + // Record the number of responses the SP has sent; we'll use + // `sp_responses.changed()` in the loop below, and want to mark whatever + // value this watch channel currently has as seen. + sp_responses.borrow_and_update(); + + // At this point, there are two clients racing each other to talk to our + // simulated SP: + // + // 1. MGS is trying to deliver the update + // 2. `rot_updater` is trying to poll (via MGS) for update status + // + // and we want to ensure that we see any relevant progress reports from + // `rot_updater`. We'll let one MGS -> SP message through at a time (waiting + // until our SP has responded by waiting for a change to `sp_responses`) + // then check its update state: if it changed, the packet we let through was + // data from MGS; otherwise, it was a status request from `rot_updater`. + // + // This loop will continue until either: + // + // 1. We see an `UpdateStatus::InProgress` message indicating 100% delivery, + // at which point we break out of the loop + // 2. We time out waiting for the previous step (by timing out for either + // the SP to process a request or `rot_updater` to realize there's been + // progress), at which point we panic and fail this test. + let mut prev_bytes_received = 0; + let mut expect_progress_change = false; + loop { + // Allow the SP to accept and respond to a single UDP packet. + sp_accept_sema.send(1).unwrap(); + + // Wait until the SP has sent a response, with a safety rail that we + // haven't screwed up our untangle-the-race logic: if we don't see the + // SP process any new messages after several seconds, our test is + // broken, so fail. + tokio::time::timeout(Duration::from_secs(10), sp_responses.changed()) + .await + .expect("timeout waiting for SP response count to change") + .expect("sp response count sender dropped"); + + // Inspec the SP's in-memory update state; we expect only `InProgress` + // or `Complete`, and in either case we note whether we expect to see + // status changes from `rot_updater`. + match target_sp.current_update_status().await { + UpdateStatus::InProgress(rot_progress) => { + if rot_progress.bytes_received > prev_bytes_received { + prev_bytes_received = rot_progress.bytes_received; + expect_progress_change = true; + continue; + } + } + UpdateStatus::Complete(_) => { + if prev_bytes_received < rot_image_len { + prev_bytes_received = rot_image_len; + } + } + status @ (UpdateStatus::None + | UpdateStatus::Preparing(_) + | UpdateStatus::SpUpdateAuxFlashChckScan { .. } + | UpdateStatus::Aborted(_) + | UpdateStatus::Failed { .. } + | UpdateStatus::RotError { .. }) => { + panic!("unexpected status {status:?}"); + } + } + + // If we get here, the most recent packet did _not_ change the SP's + // internal update state, so it was a status request from `rot_updater`. + // If we expect the updater to see new progress, wait for that change + // here. + if expect_progress_change || prev_bytes_received == rot_image_len { + // Safety rail that we haven't screwed up our untangle-the-race + // logic: if we don't see a new progress after several seconds, our + // test is broken, so fail. + tokio::time::timeout(Duration::from_secs(10), progress.changed()) + .await + .expect("progress timeout") + .expect("progress watch sender dropped"); + let status = progress.borrow_and_update().clone().unwrap(); + expect_progress_change = false; + + // We're done if we've observed the final progress message. + if let UpdateProgress::InProgress { progress: Some(value) } = status + { + if value == 1.0 { + break; + } + } else { + panic!("unexpected progerss status {status:?}"); + } + } + } + + // The update has been fully delivered to the SP, but we don't see an + // `UpdateStatus::Complete` message until the RoT is reset. Release the SP + // semaphore since we're no longer racing to observe intermediate progress, + // and wait for the completion message. + sp_accept_sema.send(usize::MAX).unwrap(); + progress.changed().await.unwrap(); + assert_eq!(*progress.borrow_and_update(), Some(UpdateProgress::Complete)); + + // drop our progress receiver so `do_update_task` can complete + mem::drop(progress); + + do_update_task.await.expect("update task panicked").expect("update failed"); + + let last_update_image = target_sp + .last_rot_update_data() + .await + .expect("simulated RoT did not receive an update"); + + assert_eq!( + hubris_archive.image.data.as_slice(), + &*last_update_image, + "simulated RoT update contents (len {}) \ + do not match test generated fake image (len {})", + last_update_image.len(), + hubris_archive.image.data.len() + ); + + mgstestctx.teardown().await; +} diff --git a/nexus/tests/integration_tests/router_routes.rs b/nexus/tests/integration_tests/router_routes.rs index 7a7a33d49d..10c594bba9 100644 --- a/nexus/tests/integration_tests/router_routes.rs +++ b/nexus/tests/integration_tests/router_routes.rs @@ -69,7 +69,7 @@ async fn test_router_routes(cptestctx: &ControlPlaneTestContext) { // It errors if you try to delete the default route let error: dropshot::HttpErrorResponseBody = NexusRequest::expect_failure( client, - StatusCode::METHOD_NOT_ALLOWED, + StatusCode::BAD_REQUEST, Method::DELETE, get_route_url("system", "default").as_str(), ) diff --git a/nexus/tests/integration_tests/schema.rs b/nexus/tests/integration_tests/schema.rs index 213e7f9e4f..6feafe415d 100644 --- a/nexus/tests/integration_tests/schema.rs +++ b/nexus/tests/integration_tests/schema.rs @@ -629,7 +629,17 @@ impl InformationSchema { self.referential_constraints, other.referential_constraints ); - similar_asserts::assert_eq!(self.statistics, other.statistics); + similar_asserts::assert_eq!( + self.statistics, + other.statistics, + "Statistics did not match. This often means that in dbinit.sql, a new \ + column was added into the middle of a table rather than to the end. \ + If that is the case:\n\n \ + \ + * Change dbinit.sql to add the column to the end of the table.\n\ + * Update nexus/db-model/src/schema.rs and the corresponding \ + Queryable/Insertable struct with the new column ordering." + ); similar_asserts::assert_eq!(self.sequences, other.sequences); similar_asserts::assert_eq!(self.pg_indexes, other.pg_indexes); } diff --git a/nexus/tests/integration_tests/snapshots.rs b/nexus/tests/integration_tests/snapshots.rs index 1dd32e6769..24b04bf718 100644 --- a/nexus/tests/integration_tests/snapshots.rs +++ b/nexus/tests/integration_tests/snapshots.rs @@ -35,8 +35,6 @@ use omicron_common::api::external::Name; use omicron_nexus::app::MIN_DISK_SIZE_BYTES; use uuid::Uuid; -use httptest::{matchers::*, responders::*, Expectation, ServerBuilder}; - type ControlPlaneTestContext = nexus_test_utils::ControlPlaneTestContext; @@ -64,18 +62,6 @@ async fn test_snapshot_basic(cptestctx: &ControlPlaneTestContext) { let disks_url = get_disks_url(); // Define a global image - let server = ServerBuilder::new().run().unwrap(); - server.expect( - Expectation::matching(request::method_path("HEAD", "/image.raw")) - .times(1..) - .respond_with( - status_code(200).append_header( - "Content-Length", - format!("{}", 4096 * 1000), - ), - ), - ); - let image_create_params = params::ImageCreate { identity: IdentityMetadataCreateParams { name: "alpine-edge".parse().unwrap(), @@ -83,10 +69,7 @@ async fn test_snapshot_basic(cptestctx: &ControlPlaneTestContext) { "you can boot any image, as long as it's alpine", ), }, - source: params::ImageSource::Url { - url: server.url("/image.raw").to_string(), - block_size: params::BlockSize::try_from(512).unwrap(), - }, + source: params::ImageSource::YouCanBootAnythingAsLongAsItsAlpine, os: "alpine".to_string(), version: "edge".to_string(), }; @@ -184,18 +167,6 @@ async fn test_snapshot_without_instance(cptestctx: &ControlPlaneTestContext) { let disks_url = get_disks_url(); // Define a global image - let server = ServerBuilder::new().run().unwrap(); - server.expect( - Expectation::matching(request::method_path("HEAD", "/image.raw")) - .times(1..) - .respond_with( - status_code(200).append_header( - "Content-Length", - format!("{}", 4096 * 1000), - ), - ), - ); - let image_create_params = params::ImageCreate { identity: IdentityMetadataCreateParams { name: "alpine-edge".parse().unwrap(), @@ -203,10 +174,7 @@ async fn test_snapshot_without_instance(cptestctx: &ControlPlaneTestContext) { "you can boot any image, as long as it's alpine", ), }, - source: params::ImageSource::Url { - url: server.url("/image.raw").to_string(), - block_size: params::BlockSize::try_from(512).unwrap(), - }, + source: params::ImageSource::YouCanBootAnythingAsLongAsItsAlpine, os: "alpine".to_string(), version: "edge".to_string(), }; @@ -825,7 +793,7 @@ async fn test_cannot_snapshot_if_no_space(cptestctx: &ControlPlaneTestContext) { }, disk: base_disk_name.into(), })) - .expect_status(Some(StatusCode::SERVICE_UNAVAILABLE)), + .expect_status(Some(StatusCode::INSUFFICIENT_STORAGE)), ) .authn_as(AuthnMode::PrivilegedUser) .execute() @@ -842,18 +810,6 @@ async fn test_snapshot_unwind(cptestctx: &ControlPlaneTestContext) { let disks_url = get_disks_url(); // Define a global image - let server = ServerBuilder::new().run().unwrap(); - server.expect( - Expectation::matching(request::method_path("HEAD", "/image.raw")) - .times(1..) - .respond_with( - status_code(200).append_header( - "Content-Length", - format!("{}", 4096 * 1000), - ), - ), - ); - let image_create_params = params::ImageCreate { identity: IdentityMetadataCreateParams { name: "alpine-edge".parse().unwrap(), @@ -861,10 +817,7 @@ async fn test_snapshot_unwind(cptestctx: &ControlPlaneTestContext) { "you can boot any image, as long as it's alpine", ), }, - source: params::ImageSource::Url { - url: server.url("/image.raw").to_string(), - block_size: params::BlockSize::try_from(512).unwrap(), - }, + source: params::ImageSource::YouCanBootAnythingAsLongAsItsAlpine, os: "alpine".to_string(), version: "edge".to_string(), }; diff --git a/nexus/tests/integration_tests/sp_updater.rs b/nexus/tests/integration_tests/sp_updater.rs index 351c28ad9c..1b6764e609 100644 --- a/nexus/tests/integration_tests/sp_updater.rs +++ b/nexus/tests/integration_tests/sp_updater.rs @@ -9,7 +9,9 @@ use gateway_messages::{SpPort, UpdateInProgressStatus, UpdateStatus}; use gateway_test_utils::setup as mgs_setup; use hubtools::RawHubrisArchive; use hubtools::{CabooseBuilder, HubrisArchiveBuilder}; -use omicron_nexus::app::test_interfaces::{SpUpdater, UpdateProgress}; +use omicron_nexus::app::test_interfaces::{ + MgsClients, SpUpdater, UpdateProgress, +}; use sp_sim::SimulatedSp; use sp_sim::SIM_GIMLET_BOARD; use sp_sim::SIM_SIDECAR_BOARD; @@ -44,10 +46,11 @@ async fn test_sp_updater_updates_sled() { .await; // Configure an MGS client. - let mgs_client = Arc::new(gateway_client::Client::new( - &mgstestctx.client.url("/").to_string(), - mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient")), - )); + let mut mgs_clients = + MgsClients::from_clients([gateway_client::Client::new( + &mgstestctx.client.url("/").to_string(), + mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient")), + )]); // Configure and instantiate an `SpUpdater`. let sp_type = SpType::Sled; @@ -64,11 +67,11 @@ async fn test_sp_updater_updates_sled() { ); // Run the update. - sp_updater.update([mgs_client]).await.expect("update failed"); + sp_updater.update(&mut mgs_clients).await.expect("update failed"); // Ensure the SP received the complete update. let last_update_image = mgstestctx.simrack.gimlets[sp_slot as usize] - .last_update_data() + .last_sp_update_data() .await .expect("simulated SP did not receive an update"); @@ -94,10 +97,11 @@ async fn test_sp_updater_updates_switch() { .await; // Configure an MGS client. - let mgs_client = Arc::new(gateway_client::Client::new( - &mgstestctx.client.url("/").to_string(), - mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient")), - )); + let mut mgs_clients = + MgsClients::from_clients([gateway_client::Client::new( + &mgstestctx.client.url("/").to_string(), + mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient")), + )]); let sp_type = SpType::Switch; let sp_slot = 0; @@ -112,10 +116,10 @@ async fn test_sp_updater_updates_switch() { &mgstestctx.logctx.log, ); - sp_updater.update([mgs_client]).await.expect("update failed"); + sp_updater.update(&mut mgs_clients).await.expect("update failed"); let last_update_image = mgstestctx.simrack.sidecars[sp_slot as usize] - .last_update_data() + .last_sp_update_data() .await .expect("simulated SP did not receive an update"); @@ -172,16 +176,16 @@ async fn test_sp_updater_remembers_successful_mgs_instance() { // delivering an update requires a bare minimum of three requests (start the // update, query the status, reset the SP) and often more (if repeated // queries are required to wait for completion). - let mgs_clients = [ - Arc::new(gateway_client::Client::new( + let mut mgs_clients = MgsClients::from_clients([ + gateway_client::Client::new( &format!("http://{failing_mgs_addr}"), mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient1")), - )), - Arc::new(gateway_client::Client::new( + ), + gateway_client::Client::new( &mgstestctx.client.url("/").to_string(), mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient")), - )), - ]; + ), + ]); let sp_type = SpType::Sled; let sp_slot = 0; @@ -196,10 +200,10 @@ async fn test_sp_updater_remembers_successful_mgs_instance() { &mgstestctx.logctx.log, ); - sp_updater.update(mgs_clients).await.expect("update failed"); + sp_updater.update(&mut mgs_clients).await.expect("update failed"); let last_update_image = mgstestctx.simrack.gimlets[sp_slot as usize] - .last_update_data() + .last_sp_update_data() .await .expect("simulated SP did not receive an update"); @@ -288,18 +292,18 @@ async fn test_sp_updater_switches_mgs_instances_on_failure() { reqwest::Client::builder().pool_max_idle_per_host(0).build().unwrap(); // Configure two MGS clients pointed at our two proxy tasks. - let mgs_clients = [ - Arc::new(gateway_client::Client::new_with_client( + let mut mgs_clients = MgsClients::from_clients([ + gateway_client::Client::new_with_client( &format!("http://{mgs_proxy_one_addr}"), client.clone(), mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient1")), - )), - Arc::new(gateway_client::Client::new_with_client( + ), + gateway_client::Client::new_with_client( &format!("http://{mgs_proxy_two_addr}"), client, mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient2")), - )), - ]; + ), + ]); let sp_type = SpType::Sled; let sp_slot = 0; @@ -315,7 +319,8 @@ async fn test_sp_updater_switches_mgs_instances_on_failure() { ); // Spawn the actual update task. - let mut update_task = tokio::spawn(sp_updater.update(mgs_clients)); + let mut update_task = + tokio::spawn(async move { sp_updater.update(&mut mgs_clients).await }); // Loop over incoming requests. We expect this sequence: // @@ -408,7 +413,7 @@ async fn test_sp_updater_switches_mgs_instances_on_failure() { ); let last_update_image = mgstestctx.simrack.gimlets[sp_slot as usize] - .last_update_data() + .last_sp_update_data() .await .expect("simulated SP did not receive an update"); @@ -434,10 +439,11 @@ async fn test_sp_updater_delivers_progress() { .await; // Configure an MGS client. - let mgs_client = Arc::new(gateway_client::Client::new( - &mgstestctx.client.url("/").to_string(), - mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient")), - )); + let mut mgs_clients = + MgsClients::from_clients([gateway_client::Client::new( + &mgstestctx.client.url("/").to_string(), + mgstestctx.logctx.log.new(slog::o!("component" => "MgsClient")), + )]); let sp_type = SpType::Sled; let sp_slot = 0; @@ -468,10 +474,11 @@ async fn test_sp_updater_delivers_progress() { // Spawn the update on a background task so we can watch `progress` as it is // applied. - let do_update_task = tokio::spawn(sp_updater.update([mgs_client])); + let do_update_task = + tokio::spawn(async move { sp_updater.update(&mut mgs_clients).await }); // Allow the SP to respond to 2 messages: the caboose check and the "prepare - // update" messages that trigger the start of an update, then ensure we see + // update" messages that triggers the start of an update, then ensure we see // the "started" progress. sp_accept_sema.send(2).unwrap(); progress.changed().await.unwrap(); @@ -541,7 +548,6 @@ async fn test_sp_updater_delivers_progress() { UpdateStatus::Complete(_) => { if prev_bytes_received < sp_image_len { prev_bytes_received = sp_image_len; - continue; } } status @ (UpdateStatus::None @@ -589,10 +595,13 @@ async fn test_sp_updater_delivers_progress() { progress.changed().await.unwrap(); assert_eq!(*progress.borrow_and_update(), Some(UpdateProgress::Complete)); + // drop our progress receiver so `do_update_task` can complete + mem::drop(progress); + do_update_task.await.expect("update task panicked").expect("update failed"); let last_update_image = target_sp - .last_update_data() + .last_sp_update_data() .await .expect("simulated SP did not receive an update"); diff --git a/nexus/tests/integration_tests/switch_port.rs b/nexus/tests/integration_tests/switch_port.rs index ccd0b50fbe..df4d96c6d1 100644 --- a/nexus/tests/integration_tests/switch_port.rs +++ b/nexus/tests/integration_tests/switch_port.rs @@ -10,7 +10,7 @@ use nexus_test_utils::http_testing::{AuthnMode, NexusRequest, RequestBuilder}; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params::{ Address, AddressConfig, AddressLotBlockCreate, AddressLotCreate, - BgpAnnounceSetCreate, BgpAnnouncementCreate, BgpConfigCreate, + BgpAnnounceSetCreate, BgpAnnouncementCreate, BgpConfigCreate, BgpPeer, BgpPeerConfig, LinkConfig, LinkFec, LinkSpeed, LldpServiceConfig, Route, RouteConfig, SwitchInterfaceConfig, SwitchInterfaceKind, SwitchPortApplySettings, SwitchPortSettingsCreate, @@ -118,6 +118,7 @@ async fn test_port_settings_basic_crud(ctx: &ControlPlaneTestContext) { lldp: LldpServiceConfig { enabled: false, lldp_config: None }, fec: LinkFec::None, speed: LinkSpeed::Speed100G, + autoneg: false, }, ); // interfaces @@ -252,15 +253,17 @@ async fn test_port_settings_basic_crud(ctx: &ControlPlaneTestContext) { settings.bgp_peers.insert( "phy0".into(), BgpPeerConfig { - bgp_config: NameOrId::Name("as47".parse().unwrap()), //TODO - bgp_announce_set: NameOrId::Name("instances".parse().unwrap()), //TODO - interface_name: "phy0".to_string(), - addr: "1.2.3.4".parse().unwrap(), - hold_time: 6, - idle_hold_time: 6, - delay_open: 0, - connect_retry: 3, - keepalive: 2, + peers: vec![BgpPeer { + bgp_config: NameOrId::Name("as47".parse().unwrap()), + bgp_announce_set: NameOrId::Name("instances".parse().unwrap()), + interface_name: "phy0".to_string(), + addr: "1.2.3.4".parse().unwrap(), + hold_time: 6, + idle_hold_time: 6, + delay_open: 0, + connect_retry: 3, + keepalive: 2, + }], }, ); let _created: SwitchPortSettingsView = NexusRequest::objects_post( @@ -318,4 +321,19 @@ async fn test_port_settings_basic_crud(ctx: &ControlPlaneTestContext) { .execute() .await .unwrap(); + + // clear port settings + + NexusRequest::new( + RequestBuilder::new( + client, + Method::DELETE, + &format!("/v1/system/hardware/switch-port/qsfp0/settings?rack_id={rack_id}&switch_location=switch0"), + ) + .expect_status(Some(StatusCode::NO_CONTENT)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap(); } diff --git a/nexus/tests/integration_tests/unauthorized.rs b/nexus/tests/integration_tests/unauthorized.rs index 9936af20bf..1cb2eaca3a 100644 --- a/nexus/tests/integration_tests/unauthorized.rs +++ b/nexus/tests/integration_tests/unauthorized.rs @@ -278,6 +278,12 @@ lazy_static! { body: serde_json::to_value(&*DEMO_IMAGE_CREATE).unwrap(), id_routes: vec!["/v1/images/{id}"], }, + // Create a Floating IP in the project + SetupReq::Post { + url: &DEMO_PROJECT_URL_FIPS, + body: serde_json::to_value(&*DEMO_FLOAT_IP_CREATE).unwrap(), + id_routes: vec!["/v1/floating-ips/{id}"], + }, // Create a SAML identity provider SetupReq::Post { url: &SAML_IDENTITY_PROVIDERS_URL, diff --git a/nexus/tests/integration_tests/updates.rs b/nexus/tests/integration_tests/updates.rs index 918d5ac100..891166ed19 100644 --- a/nexus/tests/integration_tests/updates.rs +++ b/nexus/tests/integration_tests/updates.rs @@ -7,6 +7,7 @@ // - test that an unknown artifact returns 404, not 500 // - tests around target names and artifact names that contain dangerous paths like `../` +use async_trait::async_trait; use chrono::{Duration, Utc}; use dropshot::test_util::LogContext; use dropshot::{ @@ -45,17 +46,22 @@ const UPDATE_COMPONENT: &'static str = "omicron-test-component"; #[tokio::test] async fn test_update_end_to_end() { let mut config = load_test_config(); + let logctx = LogContext::new("test_update_end_to_end", &config.pkg.log); // build the TUF repo let rng = SystemRandom::new(); - let tuf_repo = new_tuf_repo(&rng); + let tuf_repo = new_tuf_repo(&rng).await; + slog::info!( + logctx.log, + "TUF repo created at {}", + tuf_repo.path().display() + ); // serve it over HTTP let dropshot_config = Default::default(); let mut api = ApiDescription::new(); api.register(static_content).unwrap(); let context = FileServerContext { base: tuf_repo.path().to_owned() }; - let logctx = LogContext::new("test_update_end_to_end", &config.pkg.log); let server = HttpServerStarter::new(&dropshot_config, api, context, &logctx.log) .unwrap() @@ -122,9 +128,14 @@ async fn static_content( for component in path.into_inner().path { fs_path.push(component); } - let body = tokio::fs::read(fs_path) - .await - .map_err(|e| HttpError::for_bad_request(None, e.to_string()))?; + let body = tokio::fs::read(fs_path).await.map_err(|e| { + // tough 0.15+ depend on ENOENT being translated into 404. + if e.kind() == std::io::ErrorKind::NotFound { + HttpError::for_not_found(None, e.to_string()) + } else { + HttpError::for_bad_request(None, e.to_string()) + } + })?; Ok(Response::builder().status(StatusCode::OK).body(body.into())?) } @@ -132,7 +143,7 @@ async fn static_content( const TARGET_CONTENTS: &[u8] = b"hello world".as_slice(); -fn new_tuf_repo(rng: &dyn SecureRandom) -> TempDir { +async fn new_tuf_repo(rng: &(dyn SecureRandom + Sync)) -> TempDir { let version = NonZeroU64::new(Utc::now().timestamp().try_into().unwrap()).unwrap(); let expires = Utc::now() + Duration::minutes(5); @@ -180,13 +191,14 @@ fn new_tuf_repo(rng: &dyn SecureRandom) -> TempDir { &signing_keys, rng, ) + .await .unwrap(); // TODO(iliana): there's no way to create a `RepositoryEditor` without having the root.json on // disk. this is really unergonomic. write and upstream a fix let mut root_tmp = NamedTempFile::new().unwrap(); root_tmp.as_file_mut().write_all(signed_root.buffer()).unwrap(); - let mut editor = RepositoryEditor::new(&root_tmp).unwrap(); + let mut editor = RepositoryEditor::new(&root_tmp).await.unwrap(); root_tmp.close().unwrap(); editor @@ -200,19 +212,20 @@ fn new_tuf_repo(rng: &dyn SecureRandom) -> TempDir { .timestamp_expires(expires); let (targets_dir, target_names) = generate_targets(); for target in target_names { - editor.add_target_path(targets_dir.path().join(target)).unwrap(); + editor.add_target_path(targets_dir.path().join(target)).await.unwrap(); } - let signed_repo = editor.sign(&signing_keys).unwrap(); + let signed_repo = editor.sign(&signing_keys).await.unwrap(); let repo = TempDir::new().unwrap(); - signed_repo.write(repo.path().join("metadata")).unwrap(); + signed_repo.write(repo.path().join("metadata")).await.unwrap(); signed_repo .copy_targets( targets_dir, repo.path().join("targets"), PathExists::Fail, ) + .await .unwrap(); repo @@ -257,8 +270,9 @@ impl Debug for KeyKeySource { } } +#[async_trait] impl KeySource for KeyKeySource { - fn as_sign( + async fn as_sign( &self, ) -> Result, Box> { @@ -267,7 +281,7 @@ impl KeySource for KeyKeySource { Ok(Box::new(Ed25519KeyPair::from_pkcs8(self.0.as_ref()).unwrap())) } - fn write( + async fn write( &self, _value: &str, _key_id_hex: &str, diff --git a/nexus/tests/integration_tests/volume_management.rs b/nexus/tests/integration_tests/volume_management.rs index 24a0e5591b..466cb5472e 100644 --- a/nexus/tests/integration_tests/volume_management.rs +++ b/nexus/tests/integration_tests/volume_management.rs @@ -30,8 +30,6 @@ use sled_agent_client::types::{CrucibleOpts, VolumeConstructionRequest}; use std::sync::Arc; use uuid::Uuid; -use httptest::{matchers::*, responders::*, Expectation, ServerBuilder}; - type ControlPlaneTestContext = nexus_test_utils::ControlPlaneTestContext; @@ -63,17 +61,6 @@ async fn create_image(client: &ClientTestContext) -> views::Image { create_org_and_project(client).await; // Define a global image - let server = ServerBuilder::new().run().unwrap(); - server.expect( - Expectation::matching(request::method_path("HEAD", "/image.raw")) - .times(1..) - .respond_with( - status_code(200).append_header( - "Content-Length", - format!("{}", 4096 * 1000), - ), - ), - ); let image_create_params = params::ImageCreate { identity: IdentityMetadataCreateParams { @@ -82,10 +69,7 @@ async fn create_image(client: &ClientTestContext) -> views::Image { "you can boot any image, as long as it's alpine", ), }, - source: params::ImageSource::Url { - url: server.url("/image.raw").to_string(), - block_size: params::BlockSize::try_from(512).unwrap(), - }, + source: params::ImageSource::YouCanBootAnythingAsLongAsItsAlpine, os: "alpine".to_string(), version: "edge".to_string(), }; @@ -379,7 +363,7 @@ async fn test_snapshot_prevents_other_disk( NexusRequest::new( RequestBuilder::new(client, Method::POST, &disks_url) .body(Some(&next_disk)) - .expect_status(Some(StatusCode::SERVICE_UNAVAILABLE)), + .expect_status(Some(StatusCode::INSUFFICIENT_STORAGE)), ) .authn_as(AuthnMode::PrivilegedUser) .execute() diff --git a/nexus/tests/output/nexus_tags.txt b/nexus/tests/output/nexus_tags.txt index 7f0c30c471..5a4a61132e 100644 --- a/nexus/tests/output/nexus_tags.txt +++ b/nexus/tests/output/nexus_tags.txt @@ -6,11 +6,17 @@ disk_bulk_write_import_stop POST /v1/disks/{disk}/bulk-write-st disk_create POST /v1/disks disk_delete DELETE /v1/disks/{disk} disk_finalize_import POST /v1/disks/{disk}/finalize -disk_import_blocks_from_url POST /v1/disks/{disk}/import disk_list GET /v1/disks disk_metrics_list GET /v1/disks/{disk}/metrics/{metric} disk_view GET /v1/disks/{disk} +API operations found with tag "floating-ips" +OPERATION ID METHOD URL PATH +floating_ip_create POST /v1/floating-ips +floating_ip_delete DELETE /v1/floating-ips/{floating_ip} +floating_ip_list GET /v1/floating-ips +floating_ip_view GET /v1/floating-ips/{floating_ip} + API operations found with tag "hidden" OPERATION ID METHOD URL PATH device_access_token POST /device/token @@ -110,6 +116,7 @@ snapshot_view GET /v1/snapshots/{snapshot} API operations found with tag "system/hardware" OPERATION ID METHOD URL PATH +add_sled_to_initialized_rack POST /v1/system/hardware/sleds networking_switch_port_apply_settings POST /v1/system/hardware/switch-port/{port}/settings networking_switch_port_clear_settings DELETE /v1/system/hardware/switch-port/{port}/settings networking_switch_port_list GET /v1/system/hardware/switch-port @@ -119,6 +126,7 @@ rack_view GET /v1/system/hardware/racks/{rac sled_instance_list GET /v1/system/hardware/sleds/{sled_id}/instances sled_list GET /v1/system/hardware/sleds sled_physical_disk_list GET /v1/system/hardware/sleds/{sled_id}/disks +sled_set_provision_state PUT /v1/system/hardware/sleds/{sled_id}/provision-state sled_view GET /v1/system/hardware/sleds/{sled_id} switch_list GET /v1/system/hardware/switches switch_view GET /v1/system/hardware/switches/{switch_id} diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index a0169ae777..cde448c5b7 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -54,6 +54,7 @@ path_param!(VpcPath, vpc, "VPC"); path_param!(SubnetPath, subnet, "subnet"); path_param!(RouterPath, router, "router"); path_param!(RoutePath, route, "route"); +path_param!(FloatingIpPath, floating_ip, "Floating IP"); path_param!(DiskPath, disk, "disk"); path_param!(SnapshotPath, snapshot, "snapshot"); path_param!(ImagePath, image, "image"); @@ -75,6 +76,23 @@ pub struct SledSelector { pub sled: Uuid, } +/// Parameters for `sled_set_provision_state`. +#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq)] +pub struct SledProvisionStateParams { + /// The provision state. + pub state: super::views::SledProvisionState, +} + +/// Response to `sled_set_provision_state`. +#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq)] +pub struct SledProvisionStateResponse { + /// The old provision state. + pub old_state: super::views::SledProvisionState, + + /// The new provision state. + pub new_state: super::views::SledProvisionState, +} + pub struct SwitchSelector { /// ID of the switch pub switch: Uuid, @@ -129,6 +147,14 @@ pub struct OptionalProjectSelector { pub project: Option, } +#[derive(Deserialize, JsonSchema)] +pub struct FloatingIpSelector { + /// Name or ID of the project, only required if `floating_ip` is provided as a `Name` + pub project: Option, + /// Name or ID of the Floating IP + pub floating_ip: NameOrId, +} + #[derive(Deserialize, JsonSchema)] pub struct DiskSelector { /// Name or ID of the project, only required if `disk` is provided as a `Name` @@ -751,6 +777,23 @@ pub struct IpPoolUpdate { pub identity: IdentityMetadataUpdateParams, } +// Floating IPs +/// Parameters for creating a new floating IP address for instances. +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct FloatingIpCreate { + #[serde(flatten)] + pub identity: IdentityMetadataCreateParams, + + /// An IP address to reserve for use as a floating IP. This field is + /// optional: when not set, an address will be automatically chosen from + /// `pool`. If set, then the IP must be available in the resolved `pool`. + pub address: Option, + + /// The parent IP pool that a floating IP is pulled from. If unset, the + /// default pool is selected. + pub pool: Option, +} + // INSTANCES /// Describes an attachment of an `InstanceNetworkInterface` to an `Instance`, @@ -818,7 +861,11 @@ pub enum ExternalIpCreate { /// automatically-assigned from the provided IP Pool, or all available pools /// if not specified. Ephemeral { pool_name: Option }, - // TODO: Add floating IPs: https://github.com/oxidecomputer/omicron/issues/1334 + /// An IP address providing both inbound and outbound access. The address is + /// an existing Floating IP object assigned to the current project. + /// + /// The floating IP must not be in use by another instance or service. + Floating { floating_ip_name: Name }, } /// Create-time parameters for an `Instance` @@ -1172,15 +1219,6 @@ pub enum ExpectedDigest { Sha256(String), } -/// Parameters for importing blocks from a URL to a disk -#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -pub struct ImportBlocksFromUrl { - /// the source to pull blocks from - pub url: String, - /// Expected digest of all blocks when importing from a URL - pub expected_digest: Option, -} - /// Parameters for importing blocks with a bulk write // equivalent to crucible_pantry_client::types::BulkWriteRequest #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] @@ -1337,6 +1375,18 @@ pub enum LinkFec { Rs, } +impl From for LinkFec { + fn from(x: omicron_common::api::internal::shared::PortFec) -> LinkFec { + match x { + omicron_common::api::internal::shared::PortFec::Firecode => { + Self::Firecode + } + omicron_common::api::internal::shared::PortFec::None => Self::None, + omicron_common::api::internal::shared::PortFec::Rs => Self::Rs, + } + } +} + /// The speed of a link. #[derive(Copy, Clone, Debug, Deserialize, Serialize, JsonSchema)] #[serde(rename_all = "snake_case")] @@ -1361,6 +1411,40 @@ pub enum LinkSpeed { Speed400G, } +impl From for LinkSpeed { + fn from(x: omicron_common::api::internal::shared::PortSpeed) -> Self { + match x { + omicron_common::api::internal::shared::PortSpeed::Speed0G => { + Self::Speed0G + } + omicron_common::api::internal::shared::PortSpeed::Speed1G => { + Self::Speed1G + } + omicron_common::api::internal::shared::PortSpeed::Speed10G => { + Self::Speed10G + } + omicron_common::api::internal::shared::PortSpeed::Speed25G => { + Self::Speed25G + } + omicron_common::api::internal::shared::PortSpeed::Speed40G => { + Self::Speed40G + } + omicron_common::api::internal::shared::PortSpeed::Speed50G => { + Self::Speed50G + } + omicron_common::api::internal::shared::PortSpeed::Speed100G => { + Self::Speed100G + } + omicron_common::api::internal::shared::PortSpeed::Speed200G => { + Self::Speed200G + } + omicron_common::api::internal::shared::PortSpeed::Speed400G => { + Self::Speed400G + } + } + } +} + /// Switch link configuration. #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct LinkConfig { @@ -1375,6 +1459,9 @@ pub struct LinkConfig { /// The speed of the link. pub speed: LinkSpeed, + + /// Whether or not to set autonegotiation + pub autoneg: bool, } /// The LLDP configuration associated with a port. LLDP may be either enabled or @@ -1462,12 +1549,17 @@ pub struct BgpConfigListSelector { pub name_or_id: Option, } +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct BgpPeerConfig { + pub peers: Vec, +} + /// A BGP peer configuration for an interface. Includes the set of announcements /// that will be advertised to the peer identified by `addr`. The `bgp_config` /// parameter is a reference to global BGP parameters. The `interface_name` /// indicates what interface the peer should be contacted on. #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -pub struct BgpPeerConfig { +pub struct BgpPeer { /// The set of announcements advertised by the peer. pub bgp_announce_set: NameOrId, @@ -1635,12 +1727,6 @@ pub struct SwitchPortApplySettings { #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] #[serde(tag = "type", rename_all = "snake_case")] pub enum ImageSource { - Url { - url: String, - - /// The block size in bytes - block_size: BlockSize, - }, Snapshot { id: Uuid, }, diff --git a/nexus/types/src/external_api/shared.rs b/nexus/types/src/external_api/shared.rs index 48fbb9c10d..a4c5ae1e62 100644 --- a/nexus/types/src/external_api/shared.rs +++ b/nexus/types/src/external_api/shared.rs @@ -245,6 +245,42 @@ pub enum UpdateableComponentType { HostOmicron, } +/// Properties that uniquely identify an Oxide hardware component +#[derive( + Clone, + Debug, + Serialize, + Deserialize, + JsonSchema, + PartialOrd, + Ord, + PartialEq, + Eq, +)] +pub struct Baseboard { + pub serial: String, + pub part: String, + pub revision: i64, +} + +/// A sled that has not been added to an initialized rack yet +#[derive( + Clone, + Debug, + Serialize, + Deserialize, + JsonSchema, + PartialOrd, + Ord, + PartialEq, + Eq, +)] +pub struct UninitializedSled { + pub baseboard: Baseboard, + pub rack_id: Uuid, + pub cubby: u16, +} + #[cfg(test)] mod test { use super::Policy; diff --git a/nexus/types/src/external_api/views.rs b/nexus/types/src/external_api/views.rs index b34fc7a542..af17e7e840 100644 --- a/nexus/types/src/external_api/views.rs +++ b/nexus/types/src/external_api/views.rs @@ -5,7 +5,7 @@ //! Views are response bodies, most of which are public lenses onto DB models. use crate::external_api::shared::{ - self, IpKind, IpRange, ServiceUsingCertificate, + self, Baseboard, IpKind, IpRange, ServiceUsingCertificate, }; use crate::identity::AssetIdentityMetadata; use api_identity::ObjectIdentity; @@ -133,9 +133,6 @@ pub struct Image { /// ID of the parent project if the image is a project image pub project_id: Option, - /// URL source of this image, if any - pub url: Option, - /// The family of the operating system like Debian, Ubuntu, etc. pub os: String, @@ -265,6 +262,22 @@ pub struct ExternalIp { pub kind: IpKind, } +/// A Floating IP is a well-known IP address which can be attached +/// and detached from instances. +#[derive(ObjectIdentity, Debug, Clone, Deserialize, Serialize, JsonSchema)] +#[serde(rename_all = "snake_case")] +pub struct FloatingIp { + #[serde(flatten)] + pub identity: IdentityMetadata, + /// The IP address held by this resource. + pub ip: IpAddr, + /// The project this resource exists within. + pub project_id: Uuid, + /// The ID of the instance that this Floating IP is attached to, + /// if it is presently in use. + pub instance_id: Option, +} + // RACKS /// View of an Rack @@ -274,44 +287,8 @@ pub struct Rack { pub identity: AssetIdentityMetadata, } -/// View of a sled that has not been added to an initialized rack yet -#[derive( - Clone, - Debug, - Serialize, - Deserialize, - JsonSchema, - PartialOrd, - Ord, - PartialEq, - Eq, -)] -pub struct UninitializedSled { - pub baseboard: Baseboard, - pub rack_id: Uuid, - pub cubby: u16, -} - // FRUs -/// Properties that uniquely identify an Oxide hardware component -#[derive( - Clone, - Debug, - Serialize, - Deserialize, - JsonSchema, - PartialOrd, - Ord, - PartialEq, - Eq, -)] -pub struct Baseboard { - pub serial: String, - pub part: String, - pub revision: i64, -} - // SLEDS /// An operator's view of a Sled. @@ -322,12 +299,32 @@ pub struct Sled { pub baseboard: Baseboard, /// The rack to which this Sled is currently attached pub rack_id: Uuid, + /// The provision state of the sled. + pub provision_state: SledProvisionState, /// The number of hardware threads which can execute on this sled pub usable_hardware_threads: u32, /// Amount of RAM which may be used by the Sled's OS pub usable_physical_ram: ByteCount, } +/// The provision state of a sled. +/// +/// This controls whether new resources are going to be provisioned on this +/// sled. +#[derive( + Copy, Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, +)] +#[serde(rename_all = "snake_case")] +pub enum SledProvisionState { + /// New resources will be provisioned on this sled. + Provisionable, + + /// New resources will not be provisioned on this sled. However, existing + /// resources will continue to be on this sled unless manually migrated + /// off. + NonProvisionable, +} + /// An operator's view of an instance running on a given sled #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct SledInstance { diff --git a/nexus/types/src/internal_api/params.rs b/nexus/types/src/internal_api/params.rs index c0991ebb17..bc25e8d4bd 100644 --- a/nexus/types/src/internal_api/params.rs +++ b/nexus/types/src/internal_api/params.rs @@ -25,7 +25,7 @@ use uuid::Uuid; /// /// Note that this may change if the sled is physically moved /// within the rack. -#[derive(Serialize, Deserialize, JsonSchema)] +#[derive(Serialize, Deserialize, JsonSchema, Debug)] #[serde(rename_all = "snake_case")] pub enum SledRole { /// The sled is a general compute sled. @@ -45,7 +45,7 @@ pub struct Baseboard { } /// Sent by a sled agent on startup to Nexus to request further instruction -#[derive(Serialize, Deserialize, JsonSchema)] +#[derive(Serialize, Deserialize, Debug, JsonSchema)] pub struct SledAgentStartupInfo { /// The address of the sled agent's API endpoint pub sa_address: SocketAddrV6, diff --git a/nexus/types/src/inventory.rs b/nexus/types/src/inventory.rs index 112eec3a65..9401727162 100644 --- a/nexus/types/src/inventory.rs +++ b/nexus/types/src/inventory.rs @@ -20,6 +20,8 @@ use std::sync::Arc; use strum::EnumIter; use uuid::Uuid; +use crate::external_api::shared::Baseboard; + /// Results of collecting hardware/software inventory from various Omicron /// components /// @@ -56,6 +58,11 @@ pub struct Collection { /// /// In practice, these will be inserted into the `sw_caboose` table. pub cabooses: BTreeSet>, + /// unique root of trust page contents that were found in this collection + /// + /// In practice, these will be inserted into the `sw_root_of_trust_page` + /// table. + pub rot_pages: BTreeSet>, /// all service processors, keyed by baseboard id /// @@ -73,6 +80,14 @@ pub struct Collection { /// In practice, these will be inserted into the `inv_caboose` table. pub cabooses_found: BTreeMap, CabooseFound>>, + /// all root of trust page contents found, keyed first by the kind of page + /// (`RotPageWhich`), then the baseboard id of the sled where they were + /// found + /// + /// In practice, these will be inserted into the `inv_root_of_trust_page` + /// table. + pub rot_pages_found: + BTreeMap, RotPageFound>>, } impl Collection { @@ -85,6 +100,16 @@ impl Collection { .get(&which) .and_then(|by_bb| by_bb.get(baseboard_id)) } + + pub fn rot_page_for( + &self, + which: RotPageWhich, + baseboard_id: &BaseboardId, + ) -> Option<&RotPageFound> { + self.rot_pages_found + .get(&which) + .and_then(|by_bb| by_bb.get(baseboard_id)) + } } /// A unique baseboard id found during a collection @@ -108,6 +133,12 @@ pub struct BaseboardId { pub serial_number: String, } +impl From for BaseboardId { + fn from(value: Baseboard) -> Self { + BaseboardId { part_number: value.part, serial_number: value.serial } + } +} + /// Caboose contents found during a collection /// /// These are normalized in the database. Each distinct `Caboose` is assigned a @@ -177,3 +208,57 @@ pub enum CabooseWhich { RotSlotA, RotSlotB, } + +/// Root of trust page contents found during a collection +/// +/// These are normalized in the database. Each distinct `RotPage` is assigned a +/// uuid and shared across many possible collections that reference it. +#[derive(Clone, Debug, Ord, Eq, PartialOrd, PartialEq)] +pub struct RotPage { + pub data_base64: String, +} + +/// Indicates that a particular `RotPage` was found (at a particular time from a +/// particular source, but these are only for debugging) +#[derive(Clone, Debug, Ord, Eq, PartialOrd, PartialEq)] +pub struct RotPageFound { + pub time_collected: DateTime, + pub source: String, + pub page: Arc, +} + +/// Describes which root of trust page this is +#[derive(Clone, Copy, Debug, EnumIter, PartialEq, Eq, PartialOrd, Ord)] +pub enum RotPageWhich { + Cmpa, + CfpaActive, + CfpaInactive, + CfpaScratch, +} + +/// Trait to convert between the two MGS root of trust page types and a tuple of +/// `([RotPageWhich], [RotPage])`. +/// +/// This cannot use the standard `From` trait due to orphan rules: we do not own +/// the `gateway_client` type, and tuples are always considered foreign. +pub trait IntoRotPage { + fn into_rot_page(self) -> (RotPageWhich, RotPage); +} + +impl IntoRotPage for gateway_client::types::RotCmpa { + fn into_rot_page(self) -> (RotPageWhich, RotPage) { + (RotPageWhich::Cmpa, RotPage { data_base64: self.base64_data }) + } +} + +impl IntoRotPage for gateway_client::types::RotCfpa { + fn into_rot_page(self) -> (RotPageWhich, RotPage) { + use gateway_client::types::RotCfpaSlot; + let which = match self.slot { + RotCfpaSlot::Active => RotPageWhich::CfpaActive, + RotCfpaSlot::Inactive => RotPageWhich::CfpaInactive, + RotCfpaSlot::Scratch => RotPageWhich::CfpaScratch, + }; + (which, RotPage { data_base64: self.base64_data }) + } +} diff --git a/openapi/bootstrap-agent.json b/openapi/bootstrap-agent.json index 2c7ffbc337..0c5bd15050 100644 --- a/openapi/bootstrap-agent.json +++ b/openapi/bootstrap-agent.json @@ -510,6 +510,11 @@ "$ref": "#/components/schemas/IpNetwork" } }, + "autoneg": { + "description": "Whether or not to set autonegotiation", + "default": false, + "type": "boolean" + }, "bgp_peers": { "description": "BGP peers on this port", "type": "array", diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index fcb285d9eb..f909710ab4 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -2568,9 +2568,60 @@ "datum", "type" ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/MissingDatum" + }, + "type": { + "type": "string", + "enum": [ + "missing" + ] + } + }, + "required": [ + "datum", + "type" + ] } ] }, + "DatumType": { + "description": "The type of an individual datum of a metric.", + "type": "string", + "enum": [ + "bool", + "i8", + "u8", + "i16", + "u16", + "i32", + "u32", + "i64", + "u64", + "f32", + "f64", + "string", + "bytes", + "cumulative_i64", + "cumulative_u64", + "cumulative_f32", + "cumulative_f64", + "histogram_i8", + "histogram_u8", + "histogram_i16", + "histogram_u16", + "histogram_i32", + "histogram_u32", + "histogram_i64", + "histogram_u64", + "histogram_f32", + "histogram_f64" + ] + }, "DiskRuntimeState": { "description": "Runtime state of the Disk, which includes its attach state and some minimal metadata", "type": "object", @@ -4128,9 +4179,91 @@ "content", "type" ] + }, + { + "type": "object", + "properties": { + "content": { + "type": "object", + "properties": { + "datum_type": { + "$ref": "#/components/schemas/DatumType" + } + }, + "required": [ + "datum_type" + ] + }, + "type": { + "type": "string", + "enum": [ + "missing_datum_requires_start_time" + ] + } + }, + "required": [ + "content", + "type" + ] + }, + { + "type": "object", + "properties": { + "content": { + "type": "object", + "properties": { + "datum_type": { + "$ref": "#/components/schemas/DatumType" + } + }, + "required": [ + "datum_type" + ] + }, + "type": { + "type": "string", + "enum": [ + "missing_datum_cannot_have_start_time" + ] + } + }, + "required": [ + "content", + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "invalid_timeseries_name" + ] + } + }, + "required": [ + "type" + ] } ] }, + "MissingDatum": { + "type": "object", + "properties": { + "datum_type": { + "$ref": "#/components/schemas/DatumType" + }, + "start_time": { + "nullable": true, + "type": "string", + "format": "date-time" + } + }, + "required": [ + "datum_type" + ] + }, "Name": { "title": "A name unique within the parent collection", "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID though they may contain a UUID.", @@ -4240,6 +4373,11 @@ "$ref": "#/components/schemas/IpNetwork" } }, + "autoneg": { + "description": "Whether or not to set autonegotiation", + "default": false, + "type": "boolean" + }, "bgp_peers": { "description": "BGP peers on this port", "type": "array", @@ -4322,24 +4460,67 @@ "type": "object", "properties": { "address": { + "description": "The IP address and port at which `oximeter` can collect metrics from the producer.", "type": "string" }, "base_route": { + "description": "The API base route from which `oximeter` can collect metrics.\n\nThe full route is `{base_route}/{id}`.", "type": "string" }, "id": { + "description": "A unique ID for this producer.", "type": "string", "format": "uuid" }, "interval": { - "$ref": "#/components/schemas/Duration" + "description": "The interval on which `oximeter` should collect metrics.", + "allOf": [ + { + "$ref": "#/components/schemas/Duration" + } + ] + }, + "kind": { + "description": "The kind of producer.", + "allOf": [ + { + "$ref": "#/components/schemas/ProducerKind" + } + ] } }, "required": [ "address", "base_route", "id", - "interval" + "interval", + "kind" + ] + }, + "ProducerKind": { + "description": "The kind of metric producer this is.", + "oneOf": [ + { + "description": "The producer is a sled-agent.", + "type": "string", + "enum": [ + "sled_agent" + ] + }, + { + "description": "The producer is an Omicron-managed service.", + "type": "string", + "enum": [ + "service" + ] + }, + { + "description": "The producer is a Propolis VMM managing a guest instance.", + "type": "string", + "enum": [ + "instance" + ] + } ] }, "ProducerResultsItem": { diff --git a/openapi/nexus.json b/openapi/nexus.json index 0d19e81d9a..7afb6cdc2f 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -699,55 +699,6 @@ } } }, - "/v1/disks/{disk}/import": { - "post": { - "tags": [ - "disks" - ], - "summary": "Request to import blocks from URL", - "operationId": "disk_import_blocks_from_url", - "parameters": [ - { - "in": "path", - "name": "disk", - "description": "Name or ID of the disk", - "required": true, - "schema": { - "$ref": "#/components/schemas/NameOrId" - } - }, - { - "in": "query", - "name": "project", - "description": "Name or ID of the project", - "schema": { - "$ref": "#/components/schemas/NameOrId" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ImportBlocksFromUrl" - } - } - }, - "required": true - }, - "responses": { - "204": { - "description": "resource updated" - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - } - }, "/v1/disks/{disk}/metrics/{metric}": { "get": { "tags": [ @@ -853,6 +804,204 @@ } } }, + "/v1/floating-ips": { + "get": { + "tags": [ + "floating-ips" + ], + "summary": "List all Floating IPs", + "operationId": "floating_ip_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FloatingIpResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [ + "project" + ] + } + }, + "post": { + "tags": [ + "floating-ips" + ], + "summary": "Create a Floating IP", + "operationId": "floating_ip_create", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FloatingIpCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FloatingIp" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/floating-ips/{floating_ip}": { + "get": { + "tags": [ + "floating-ips" + ], + "summary": "Fetch a floating IP", + "operationId": "floating_ip_view", + "parameters": [ + { + "in": "path", + "name": "floating_ip", + "description": "Name or ID of the Floating IP", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FloatingIp" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "floating-ips" + ], + "summary": "Delete a Floating IP", + "operationId": "floating_ip_delete", + "parameters": [ + { + "in": "path", + "name": "floating_ip", + "description": "Name or ID of the Floating IP", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, "/v1/groups": { "get": { "tags": [ @@ -3610,6 +3759,34 @@ "x-dropshot-pagination": { "required": [] } + }, + "post": { + "tags": [ + "system/hardware" + ], + "summary": "Add a sled to an initialized rack", + "operationId": "add_sled_to_initialized_rack", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UninitializedSled" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } } }, "/v1/system/hardware/sleds/{sled_id}": { @@ -3789,6 +3966,55 @@ } } }, + "/v1/system/hardware/sleds/{sled_id}/provision-state": { + "put": { + "tags": [ + "system/hardware" + ], + "summary": "Set the sled's provision state.", + "operationId": "sled_set_provision_state", + "parameters": [ + { + "in": "path", + "name": "sled_id", + "description": "ID of the sled", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SledProvisionStateParams" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SledProvisionStateResponse" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, "/v1/system/hardware/switch-port": { "get": { "tags": [ @@ -7788,7 +8014,7 @@ "switch" ] }, - "BgpPeerConfig": { + "BgpPeer": { "description": "A BGP peer configuration for an interface. Includes the set of announcements that will be advertised to the peer identified by `addr`. The `bgp_config` parameter is a reference to global BGP parameters. The `interface_name` indicates what interface the peer should be contacted on.", "type": "object", "properties": { @@ -7860,6 +8086,20 @@ "keepalive" ] }, + "BgpPeerConfig": { + "type": "object", + "properties": { + "peers": { + "type": "array", + "items": { + "$ref": "#/components/schemas/BgpPeer" + } + } + }, + "required": [ + "peers" + ] + }, "BgpPeerState": { "description": "The current state of a BGP peer.", "oneOf": [ @@ -9651,9 +9891,60 @@ "datum", "type" ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/MissingDatum" + }, + "type": { + "type": "string", + "enum": [ + "missing" + ] + } + }, + "required": [ + "datum", + "type" + ] } ] }, + "DatumType": { + "description": "The type of an individual datum of a metric.", + "type": "string", + "enum": [ + "bool", + "i8", + "u8", + "i16", + "u16", + "i32", + "u32", + "i64", + "u64", + "f32", + "f64", + "string", + "bytes", + "cumulative_i64", + "cumulative_u64", + "cumulative_f32", + "cumulative_f64", + "histogram_i8", + "histogram_u8", + "histogram_i16", + "histogram_u16", + "histogram_i32", + "histogram_u32", + "histogram_i64", + "histogram_u64", + "histogram_f32", + "histogram_f64" + ] + }, "DerEncodedKeyPair": { "type": "object", "properties": { @@ -10187,22 +10478,6 @@ "request_id" ] }, - "ExpectedDigest": { - "oneOf": [ - { - "type": "object", - "properties": { - "sha256": { - "type": "string" - } - }, - "required": [ - "sha256" - ], - "additionalProperties": false - } - ] - }, "ExternalIp": { "type": "object", "properties": { @@ -10244,6 +10519,25 @@ "required": [ "type" ] + }, + { + "description": "An IP address providing both inbound and outbound access. The address is an existing Floating IP object assigned to the current project.\n\nThe floating IP must not be in use by another instance or service.", + "type": "object", + "properties": { + "floating_ip_name": { + "$ref": "#/components/schemas/Name" + }, + "type": { + "type": "string", + "enum": [ + "floating" + ] + } + }, + "required": [ + "floating_ip_name", + "type" + ] } ] }, @@ -10328,6 +10622,116 @@ "role_name" ] }, + "FloatingIp": { + "description": "A Floating IP is a well-known IP address which can be attached and detached from instances.", + "type": "object", + "properties": { + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "instance_id": { + "nullable": true, + "description": "The ID of the instance that this Floating IP is attached to, if it is presently in use.", + "type": "string", + "format": "uuid" + }, + "ip": { + "description": "The IP address held by this resource.", + "type": "string", + "format": "ip" + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "project_id": { + "description": "The project this resource exists within.", + "type": "string", + "format": "uuid" + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "description", + "id", + "ip", + "name", + "project_id", + "time_created", + "time_modified" + ] + }, + "FloatingIpCreate": { + "description": "Parameters for creating a new floating IP address for instances.", + "type": "object", + "properties": { + "address": { + "nullable": true, + "description": "An IP address to reserve for use as a floating IP. This field is optional: when not set, an address will be automatically chosen from `pool`. If set, then the IP must be available in the resolved `pool`.", + "type": "string", + "format": "ip" + }, + "description": { + "type": "string" + }, + "name": { + "$ref": "#/components/schemas/Name" + }, + "pool": { + "nullable": true, + "description": "The parent IP pool that a floating IP is pulled from. If unset, the default pool is selected.", + "allOf": [ + { + "$ref": "#/components/schemas/NameOrId" + } + ] + } + }, + "required": [ + "description", + "name" + ] + }, + "FloatingIpResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/FloatingIp" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, "Group": { "description": "View of a Group", "type": "object", @@ -10828,11 +11232,6 @@ "type": "string", "format": "date-time" }, - "url": { - "nullable": true, - "description": "URL source of this image, if any", - "type": "string" - }, "version": { "description": "Version of the operating system", "type": "string" @@ -10909,33 +11308,6 @@ "ImageSource": { "description": "The source of the underlying image.", "oneOf": [ - { - "type": "object", - "properties": { - "block_size": { - "description": "The block size in bytes", - "allOf": [ - { - "$ref": "#/components/schemas/BlockSize" - } - ] - }, - "type": { - "type": "string", - "enum": [ - "url" - ] - }, - "url": { - "type": "string" - } - }, - "required": [ - "block_size", - "type", - "url" - ] - }, { "type": "object", "properties": { @@ -10990,28 +11362,6 @@ "offset" ] }, - "ImportBlocksFromUrl": { - "description": "Parameters for importing blocks from a URL to a disk", - "type": "object", - "properties": { - "expected_digest": { - "nullable": true, - "description": "Expected digest of all blocks when importing from a URL", - "allOf": [ - { - "$ref": "#/components/schemas/ExpectedDigest" - } - ] - }, - "url": { - "description": "the source to pull blocks from", - "type": "string" - } - }, - "required": [ - "url" - ] - }, "Instance": { "description": "View of an Instance", "type": "object", @@ -11861,6 +12211,10 @@ "description": "Switch link configuration.", "type": "object", "properties": { + "autoneg": { + "description": "Whether or not to set autonegotiation", + "type": "boolean" + }, "fec": { "description": "The forward error correction mode of the link.", "allOf": [ @@ -11893,6 +12247,7 @@ } }, "required": [ + "autoneg", "fec", "lldp", "mtu", @@ -12173,6 +12528,22 @@ "items" ] }, + "MissingDatum": { + "type": "object", + "properties": { + "datum_type": { + "$ref": "#/components/schemas/DatumType" + }, + "start_time": { + "nullable": true, + "type": "string", + "format": "date-time" + } + }, + "required": [ + "datum_type" + ] + }, "Name": { "title": "A name unique within the parent collection", "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID though they may contain a UUID.", @@ -12948,6 +13319,14 @@ "type": "string", "format": "uuid" }, + "provision_state": { + "description": "The provision state of the sled.", + "allOf": [ + { + "$ref": "#/components/schemas/SledProvisionState" + } + ] + }, "rack_id": { "description": "The rack to which this Sled is currently attached", "type": "string", @@ -12981,6 +13360,7 @@ "required": [ "baseboard", "id", + "provision_state", "rack_id", "time_created", "time_modified", @@ -13071,6 +13451,68 @@ "items" ] }, + "SledProvisionState": { + "description": "The provision state of a sled.\n\nThis controls whether new resources are going to be provisioned on this sled.", + "oneOf": [ + { + "description": "New resources will be provisioned on this sled.", + "type": "string", + "enum": [ + "provisionable" + ] + }, + { + "description": "New resources will not be provisioned on this sled. However, existing resources will continue to be on this sled unless manually migrated off.", + "type": "string", + "enum": [ + "non_provisionable" + ] + } + ] + }, + "SledProvisionStateParams": { + "description": "Parameters for `sled_set_provision_state`.", + "type": "object", + "properties": { + "state": { + "description": "The provision state.", + "allOf": [ + { + "$ref": "#/components/schemas/SledProvisionState" + } + ] + } + }, + "required": [ + "state" + ] + }, + "SledProvisionStateResponse": { + "description": "Response to `sled_set_provision_state`.", + "type": "object", + "properties": { + "new_state": { + "description": "The new provision state.", + "allOf": [ + { + "$ref": "#/components/schemas/SledProvisionState" + } + ] + }, + "old_state": { + "description": "The old provision state.", + "allOf": [ + { + "$ref": "#/components/schemas/SledProvisionState" + } + ] + } + }, + "required": [ + "new_state", + "old_state" + ] + }, "SledResultsPage": { "description": "A single page of results", "type": "object", @@ -13971,7 +14413,7 @@ ] }, "UninitializedSled": { - "description": "View of a sled that has not been added to an initialized rack yet", + "description": "A sled that has not been added to an initialized rack yet", "type": "object", "properties": { "baseboard": { @@ -15032,6 +15474,13 @@ "url": "http://docs.oxide.computer/api/disks" } }, + { + "name": "floating-ips", + "description": "Floating IPs allow a project to allocate well-known IPs to instances.", + "externalDocs": { + "url": "http://docs.oxide.computer/api/floating-ips" + } + }, { "name": "hidden", "description": "TODO operations that will not ship to customers", diff --git a/openapi/oximeter.json b/openapi/oximeter.json index 529d20e921..f5c78d53cd 100644 --- a/openapi/oximeter.json +++ b/openapi/oximeter.json @@ -191,24 +191,41 @@ "type": "object", "properties": { "address": { + "description": "The IP address and port at which `oximeter` can collect metrics from the producer.", "type": "string" }, "base_route": { + "description": "The API base route from which `oximeter` can collect metrics.\n\nThe full route is `{base_route}/{id}`.", "type": "string" }, "id": { + "description": "A unique ID for this producer.", "type": "string", "format": "uuid" }, "interval": { - "$ref": "#/components/schemas/Duration" + "description": "The interval on which `oximeter` should collect metrics.", + "allOf": [ + { + "$ref": "#/components/schemas/Duration" + } + ] + }, + "kind": { + "description": "The kind of producer.", + "allOf": [ + { + "$ref": "#/components/schemas/ProducerKind" + } + ] } }, "required": [ "address", "base_route", "id", - "interval" + "interval", + "kind" ] }, "ProducerEndpointResultsPage": { @@ -231,6 +248,32 @@ "required": [ "items" ] + }, + "ProducerKind": { + "description": "The kind of metric producer this is.", + "oneOf": [ + { + "description": "The producer is a sled-agent.", + "type": "string", + "enum": [ + "sled_agent" + ] + }, + { + "description": "The producer is an Omicron-managed service.", + "type": "string", + "enum": [ + "service" + ] + }, + { + "description": "The producer is a Propolis VMM managing a guest instance.", + "type": "string", + "enum": [ + "instance" + ] + } + ] } }, "responses": { diff --git a/openapi/sled-agent.json b/openapi/sled-agent.json index ed202ddbdb..d71f8de644 100644 --- a/openapi/sled-agent.json +++ b/openapi/sled-agent.json @@ -10,6 +10,132 @@ "version": "0.0.1" }, "paths": { + "/boot-disk/{boot_disk}/os/write": { + "post": { + "summary": "Write a new host OS image to the specified boot disk", + "operationId": "host_os_write_start", + "parameters": [ + { + "in": "path", + "name": "boot_disk", + "required": true, + "schema": { + "$ref": "#/components/schemas/M2Slot" + } + }, + { + "in": "query", + "name": "sha3_256_digest", + "required": true, + "schema": { + "type": "string", + "format": "hex string (32 bytes)" + } + }, + { + "in": "query", + "name": "update_id", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "requestBody": { + "content": { + "application/octet-stream": { + "schema": { + "type": "string", + "format": "binary" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/boot-disk/{boot_disk}/os/write/status": { + "get": { + "summary": "Get the status of writing a new host OS", + "operationId": "host_os_write_status_get", + "parameters": [ + { + "in": "path", + "name": "boot_disk", + "required": true, + "schema": { + "$ref": "#/components/schemas/M2Slot" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BootDiskOsWriteStatus" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/boot-disk/{boot_disk}/os/write/status/{update_id}": { + "delete": { + "summary": "Clear the status of a completed write of a new host OS", + "operationId": "host_os_write_status_delete", + "parameters": [ + { + "in": "path", + "name": "boot_disk", + "required": true, + "schema": { + "$ref": "#/components/schemas/M2Slot" + } + }, + { + "in": "path", + "name": "update_id", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, "/cockroachdb": { "post": { "summary": "Initializes a CockroachDB cluster", @@ -377,14 +503,35 @@ } } }, - "/services": { + "/omicron-zones": { + "get": { + "operationId": "omicron_zones_get", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OmicronZonesConfig" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, "put": { - "operationId": "services_put", + "operationId": "omicron_zones_put", "requestBody": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/ServiceEnsureBody" + "$ref": "#/components/schemas/OmicronZonesConfig" } } }, @@ -2114,6 +2261,162 @@ "range" ] }, + "BootDiskOsWriteProgress": { + "description": "Current progress of an OS image being written to disk.", + "oneOf": [ + { + "description": "The image is still being uploaded.", + "type": "object", + "properties": { + "bytes_received": { + "type": "integer", + "format": "uint", + "minimum": 0 + }, + "state": { + "type": "string", + "enum": [ + "receiving_uploaded_image" + ] + } + }, + "required": [ + "bytes_received", + "state" + ] + }, + { + "description": "The image is being written to disk.", + "type": "object", + "properties": { + "bytes_written": { + "type": "integer", + "format": "uint", + "minimum": 0 + }, + "state": { + "type": "string", + "enum": [ + "writing_image_to_disk" + ] + } + }, + "required": [ + "bytes_written", + "state" + ] + }, + { + "description": "The image is being read back from disk for validation.", + "type": "object", + "properties": { + "bytes_read": { + "type": "integer", + "format": "uint", + "minimum": 0 + }, + "state": { + "type": "string", + "enum": [ + "validating_written_image" + ] + } + }, + "required": [ + "bytes_read", + "state" + ] + } + ] + }, + "BootDiskOsWriteStatus": { + "description": "Status of an update to a boot disk OS.", + "oneOf": [ + { + "description": "No update has been started for this disk, or any previously-started update has completed and had its status cleared.", + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "no_update_started" + ] + } + }, + "required": [ + "status" + ] + }, + { + "description": "An update is currently running.", + "type": "object", + "properties": { + "progress": { + "$ref": "#/components/schemas/BootDiskOsWriteProgress" + }, + "status": { + "type": "string", + "enum": [ + "in_progress" + ] + }, + "update_id": { + "type": "string", + "format": "uuid" + } + }, + "required": [ + "progress", + "status", + "update_id" + ] + }, + { + "description": "The most recent update completed successfully.", + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "complete" + ] + }, + "update_id": { + "type": "string", + "format": "uuid" + } + }, + "required": [ + "status", + "update_id" + ] + }, + { + "description": "The most recent update failed.", + "type": "object", + "properties": { + "message": { + "type": "string" + }, + "status": { + "type": "string", + "enum": [ + "failed" + ] + }, + "update_id": { + "type": "string", + "format": "uuid" + } + }, + "required": [ + "message", + "status", + "update_id" + ] + } + ] + }, "BundleUtilization": { "description": "The portion of a debug dataset used for zone bundles.", "type": "object", @@ -2370,168 +2673,43 @@ "value" ] }, - "DatasetKind": { - "description": "The type of a dataset, and an auxiliary information necessary to successfully launch a zone managing the associated data.", + "Datum": { + "description": "A `Datum` is a single sampled data point from a metric.", "oneOf": [ { "type": "object", "properties": { + "datum": { + "type": "boolean" + }, "type": { "type": "string", "enum": [ - "cockroach_db" + "bool" ] } }, "required": [ + "datum", "type" ] }, { "type": "object", "properties": { + "datum": { + "type": "integer", + "format": "int8" + }, "type": { "type": "string", "enum": [ - "crucible" + "i8" ] } }, "required": [ - "type" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "clickhouse" - ] - } - }, - "required": [ - "type" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "clickhouse_keeper" - ] - } - }, - "required": [ - "type" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "external_dns" - ] - } - }, - "required": [ - "type" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "internal_dns" - ] - } - }, - "required": [ - "type" - ] - } - ] - }, - "DatasetName": { - "type": "object", - "properties": { - "kind": { - "$ref": "#/components/schemas/DatasetKind" - }, - "pool_name": { - "$ref": "#/components/schemas/ZpoolName" - } - }, - "required": [ - "kind", - "pool_name" - ] - }, - "DatasetRequest": { - "description": "Describes a request to provision a specific dataset", - "type": "object", - "properties": { - "id": { - "type": "string", - "format": "uuid" - }, - "name": { - "$ref": "#/components/schemas/DatasetName" - }, - "service_address": { - "type": "string" - } - }, - "required": [ - "id", - "name", - "service_address" - ] - }, - "Datum": { - "description": "A `Datum` is a single sampled data point from a metric.", - "oneOf": [ - { - "type": "object", - "properties": { - "datum": { - "type": "boolean" - }, - "type": { - "type": "string", - "enum": [ - "bool" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "type": "integer", - "format": "int8" - }, - "type": { - "type": "string", - "enum": [ - "i8" - ] - } - }, - "required": [ - "datum", + "datum", "type" ] }, @@ -3002,9 +3180,60 @@ "datum", "type" ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/MissingDatum" + }, + "type": { + "type": "string", + "enum": [ + "missing" + ] + } + }, + "required": [ + "datum", + "type" + ] } ] }, + "DatumType": { + "description": "The type of an individual datum of a metric.", + "type": "string", + "enum": [ + "bool", + "i8", + "u8", + "i16", + "u16", + "i32", + "u32", + "i64", + "u64", + "f32", + "f64", + "string", + "bytes", + "cumulative_i64", + "cumulative_u64", + "cumulative_f32", + "cumulative_f64", + "histogram_i8", + "histogram_u8", + "histogram_i16", + "histogram_u16", + "histogram_i32", + "histogram_u32", + "histogram_i64", + "histogram_u64", + "histogram_f32", + "histogram_f64" + ] + }, "DeleteVirtualNetworkInterfaceHost": { "description": "The data needed to identify a virtual IP for which a sled maintains an OPTE virtual-to-physical mapping such that that mapping can be deleted.", "type": "object", @@ -4305,18 +4534,23 @@ "$ref": "#/components/schemas/DiskRequest" } }, - "external_ips": { + "ephemeral_ip": { + "nullable": true, "description": "Zero or more external IP addresses (either floating or ephemeral), provided to an instance to allow inbound connectivity.", + "type": "string", + "format": "ip" + }, + "firewall_rules": { "type": "array", "items": { - "type": "string", - "format": "ip" + "$ref": "#/components/schemas/VpcFirewallRule" } }, - "firewall_rules": { + "floating_ips": { "type": "array", "items": { - "$ref": "#/components/schemas/VpcFirewallRule" + "type": "string", + "format": "ip" } }, "nics": { @@ -4335,8 +4569,8 @@ "required": [ "dhcp_config", "disks", - "external_ips", "firewall_rules", + "floating_ips", "nics", "properties", "source_nat" @@ -4923,7 +5157,89 @@ "content", "type" ] + }, + { + "type": "object", + "properties": { + "content": { + "type": "object", + "properties": { + "datum_type": { + "$ref": "#/components/schemas/DatumType" + } + }, + "required": [ + "datum_type" + ] + }, + "type": { + "type": "string", + "enum": [ + "missing_datum_requires_start_time" + ] + } + }, + "required": [ + "content", + "type" + ] + }, + { + "type": "object", + "properties": { + "content": { + "type": "object", + "properties": { + "datum_type": { + "$ref": "#/components/schemas/DatumType" + } + }, + "required": [ + "datum_type" + ] + }, + "type": { + "type": "string", + "enum": [ + "missing_datum_cannot_have_start_time" + ] + } + }, + "required": [ + "content", + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "invalid_timeseries_name" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, + "MissingDatum": { + "type": "object", + "properties": { + "datum_type": { + "$ref": "#/components/schemas/DatumType" + }, + "start_time": { + "nullable": true, + "type": "string", + "format": "date-time" } + }, + "required": [ + "datum_type" ] }, "Name": { @@ -5027,367 +5343,317 @@ } ] }, - "PortConfigV1": { + "OmicronZoneConfig": { + "description": "Describes one Omicron-managed zone running on a sled", "type": "object", "properties": { - "addresses": { - "description": "This port's addresses.", - "type": "array", - "items": { - "$ref": "#/components/schemas/IpNetwork" - } - }, - "bgp_peers": { - "description": "BGP peers on this port", - "type": "array", - "items": { - "$ref": "#/components/schemas/BgpPeerConfig" - } - }, - "port": { - "description": "Nmae of the port this config applies to.", - "type": "string" + "id": { + "type": "string", + "format": "uuid" }, - "routes": { - "description": "The set of routes associated with this port.", - "type": "array", - "items": { - "$ref": "#/components/schemas/RouteConfig" - } + "underlay_address": { + "type": "string", + "format": "ipv6" }, - "switch": { - "description": "Switch the port belongs to.", - "allOf": [ - { - "$ref": "#/components/schemas/SwitchLocation" - } - ] - }, - "uplink_port_fec": { - "description": "Port forward error correction type.", - "allOf": [ - { - "$ref": "#/components/schemas/PortFec" - } - ] - }, - "uplink_port_speed": { - "description": "Port speed.", - "allOf": [ - { - "$ref": "#/components/schemas/PortSpeed" - } - ] + "zone_type": { + "$ref": "#/components/schemas/OmicronZoneType" } }, "required": [ - "addresses", - "bgp_peers", - "port", - "routes", - "switch", - "uplink_port_fec", - "uplink_port_speed" - ] - }, - "PortFec": { - "description": "Switchport FEC options", - "type": "string", - "enum": [ - "firecode", - "none", - "rs" - ] - }, - "PortSpeed": { - "description": "Switchport Speed options", - "type": "string", - "enum": [ - "speed0_g", - "speed1_g", - "speed10_g", - "speed25_g", - "speed40_g", - "speed50_g", - "speed100_g", - "speed200_g", - "speed400_g" + "id", + "underlay_address", + "zone_type" ] }, - "PriorityDimension": { - "description": "A dimension along with bundles can be sorted, to determine priority.", - "oneOf": [ - { - "description": "Sorting by time, with older bundles with lower priority.", - "type": "string", - "enum": [ - "time" - ] - }, - { - "description": "Sorting by the cause for creating the bundle.", - "type": "string", - "enum": [ - "cause" - ] + "OmicronZoneDataset": { + "description": "Describes a persistent ZFS dataset associated with an Omicron zone", + "type": "object", + "properties": { + "pool_name": { + "$ref": "#/components/schemas/ZpoolName" } - ] - }, - "PriorityOrder": { - "description": "The priority order for bundles during cleanup.\n\nBundles are sorted along the dimensions in [`PriorityDimension`], with each dimension appearing exactly once. During cleanup, lesser-priority bundles are pruned first, to maintain the dataset quota. Note that bundles are sorted by each dimension in the order in which they appear, with each dimension having higher priority than the next.", - "type": "array", - "items": { - "$ref": "#/components/schemas/PriorityDimension" }, - "minItems": 2, - "maxItems": 2 + "required": [ + "pool_name" + ] }, - "ProducerResultsItem": { + "OmicronZoneType": { + "description": "Describes what kind of zone this is (i.e., what component is running in it) as well as any type-specific configuration", "oneOf": [ { "type": "object", "properties": { - "info": { + "address": { + "type": "string" + }, + "dns_servers": { "type": "array", "items": { - "$ref": "#/components/schemas/Sample" + "type": "string", + "format": "ip" } }, - "status": { + "domain": { + "nullable": true, + "type": "string" + }, + "nic": { + "description": "The service vNIC providing outbound connectivity using OPTE.", + "allOf": [ + { + "$ref": "#/components/schemas/NetworkInterface" + } + ] + }, + "ntp_servers": { + "type": "array", + "items": { + "type": "string" + } + }, + "snat_cfg": { + "description": "The SNAT configuration for outbound connections.", + "allOf": [ + { + "$ref": "#/components/schemas/SourceNatConfig" + } + ] + }, + "type": { "type": "string", "enum": [ - "ok" + "boundary_ntp" ] } }, "required": [ - "info", - "status" + "address", + "dns_servers", + "nic", + "ntp_servers", + "snat_cfg", + "type" ] }, { "type": "object", "properties": { - "info": { - "$ref": "#/components/schemas/MetricsError" + "address": { + "type": "string" }, - "status": { + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" + }, + "type": { "type": "string", "enum": [ - "err" + "clickhouse" ] } }, "required": [ - "info", - "status" + "address", + "dataset", + "type" ] - } - ] - }, - "QuantizationError": { - "description": "Errors occurring during quantizated bin generation.", - "oneOf": [ + }, { "type": "object", "properties": { + "address": { + "type": "string" + }, + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" + }, "type": { "type": "string", "enum": [ - "overflow" + "clickhouse_keeper" ] } }, "required": [ + "address", + "dataset", "type" ] }, { "type": "object", "properties": { + "address": { + "type": "string" + }, + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" + }, "type": { "type": "string", "enum": [ - "precision" + "cockroach_db" ] } }, "required": [ + "address", + "dataset", "type" ] }, { "type": "object", "properties": { + "address": { + "type": "string" + }, + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" + }, "type": { "type": "string", "enum": [ - "invalid_base" + "crucible" ] } }, "required": [ + "address", + "dataset", "type" ] }, { "type": "object", "properties": { + "address": { + "type": "string" + }, "type": { "type": "string", "enum": [ - "invalid_steps" + "crucible_pantry" ] } }, "required": [ + "address", "type" ] }, { "type": "object", "properties": { + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" + }, + "dns_address": { + "description": "The address at which the external DNS server is reachable.", + "type": "string" + }, + "http_address": { + "description": "The address at which the external DNS server API is reachable.", + "type": "string" + }, + "nic": { + "description": "The service vNIC providing external connectivity using OPTE.", + "allOf": [ + { + "$ref": "#/components/schemas/NetworkInterface" + } + ] + }, "type": { "type": "string", "enum": [ - "uneven_steps_for_base" + "external_dns" ] } }, "required": [ + "dataset", + "dns_address", + "http_address", + "nic", "type" ] }, { "type": "object", "properties": { + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" + }, + "dns_address": { + "type": "string" + }, + "gz_address": { + "description": "The addresses in the global zone which should be created\n\nFor the DNS service, which exists outside the sleds's typical subnet - adding an address in the GZ is necessary to allow inter-zone traffic routing.", + "type": "string", + "format": "ipv6" + }, + "gz_address_index": { + "description": "The address is also identified with an auxiliary bit of information to ensure that the created global zone address can have a unique name.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "http_address": { + "type": "string" + }, "type": { "type": "string", "enum": [ - "powers_out_of_order" + "internal_dns" ] } }, "required": [ + "dataset", + "dns_address", + "gz_address", + "gz_address_index", + "http_address", "type" ] - } - ] - }, - "RackNetworkConfigV1": { - "description": "Initial network configuration", - "type": "object", - "properties": { - "bgp": { - "description": "BGP configurations for connecting the rack to external networks", - "type": "array", - "items": { - "$ref": "#/components/schemas/BgpConfig" - } }, - "infra_ip_first": { - "description": "First ip address to be used for configuring network infrastructure", - "type": "string", - "format": "ipv4" - }, - "infra_ip_last": { - "description": "Last ip address to be used for configuring network infrastructure", - "type": "string", - "format": "ipv4" - }, - "ports": { - "description": "Uplinks for connecting the rack to external networks", - "type": "array", - "items": { - "$ref": "#/components/schemas/PortConfigV1" - } - }, - "rack_subnet": { - "$ref": "#/components/schemas/Ipv6Network" - } - }, - "required": [ - "bgp", - "infra_ip_first", - "infra_ip_last", - "ports", - "rack_subnet" - ] - }, - "RouteConfig": { - "type": "object", - "properties": { - "destination": { - "description": "The destination of the route.", - "allOf": [ - { - "$ref": "#/components/schemas/IpNetwork" - } - ] - }, - "nexthop": { - "description": "The nexthop/gateway address.", - "type": "string", - "format": "ip" - } - }, - "required": [ - "destination", - "nexthop" - ] - }, - "Sample": { - "description": "A concrete type representing a single, timestamped measurement from a timeseries.", - "type": "object", - "properties": { - "measurement": { - "description": "The measured value of the metric at this sample", - "allOf": [ - { - "$ref": "#/components/schemas/Measurement" + { + "type": "object", + "properties": { + "address": { + "type": "string" + }, + "dns_servers": { + "type": "array", + "items": { + "type": "string", + "format": "ip" + } + }, + "domain": { + "nullable": true, + "type": "string" + }, + "ntp_servers": { + "type": "array", + "items": { + "type": "string" + } + }, + "type": { + "type": "string", + "enum": [ + "internal_ntp" + ] } + }, + "required": [ + "address", + "dns_servers", + "ntp_servers", + "type" ] }, - "metric": { - "$ref": "#/components/schemas/FieldSet" - }, - "target": { - "$ref": "#/components/schemas/FieldSet" - }, - "timeseries_name": { - "description": "The name of the timeseries this sample belongs to", - "type": "string" - } - }, - "required": [ - "measurement", - "metric", - "target", - "timeseries_name" - ] - }, - "SemverVersion": { - "type": "string", - "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$" - }, - "ServiceEnsureBody": { - "description": "Used to request that the Sled initialize multiple services.", - "type": "object", - "properties": { - "services": { - "type": "array", - "items": { - "$ref": "#/components/schemas/ServiceZoneRequest" - } - } - }, - "required": [ - "services" - ] - }, - "ServiceType": { - "description": "Describes service-specific parameters.", - "oneOf": [ { "type": "object", "properties": { @@ -5439,331 +5705,396 @@ { "type": "object", "properties": { - "dns_address": { - "description": "The address at which the external DNS server is reachable.", - "type": "string" - }, - "http_address": { - "description": "The address at which the external DNS server API is reachable.", + "address": { "type": "string" }, - "nic": { - "description": "The service vNIC providing external connectivity using OPTE.", - "allOf": [ - { - "$ref": "#/components/schemas/NetworkInterface" - } - ] - }, "type": { "type": "string", "enum": [ - "external_dns" + "oximeter" ] } }, "required": [ - "dns_address", - "http_address", - "nic", + "address", "type" ] + } + ] + }, + "OmicronZonesConfig": { + "description": "Describes the set of Omicron-managed zones running on a sled", + "type": "object", + "properties": { + "generation": { + "description": "generation number of this configuration\n\nThis generation number is owned by the control plane (i.e., RSS or Nexus, depending on whether RSS-to-Nexus handoff has happened). It should not be bumped within Sled Agent.\n\nSled Agent rejects attempts to set the configuration to a generation older than the one it's currently running.", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] }, - { - "type": "object", - "properties": { - "dns_address": { - "type": "string" - }, - "gz_address": { - "description": "The addresses in the global zone which should be created\n\nFor the DNS service, which exists outside the sleds's typical subnet - adding an address in the GZ is necessary to allow inter-zone traffic routing.", - "type": "string", - "format": "ipv6" - }, - "gz_address_index": { - "description": "The address is also identified with an auxiliary bit of information to ensure that the created global zone address can have a unique name.", - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "http_address": { - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "internal_dns" - ] + "zones": { + "description": "list of running zones", + "type": "array", + "items": { + "$ref": "#/components/schemas/OmicronZoneConfig" + } + } + }, + "required": [ + "generation", + "zones" + ] + }, + "PortConfigV1": { + "type": "object", + "properties": { + "addresses": { + "description": "This port's addresses.", + "type": "array", + "items": { + "$ref": "#/components/schemas/IpNetwork" + } + }, + "autoneg": { + "description": "Whether or not to set autonegotiation", + "default": false, + "type": "boolean" + }, + "bgp_peers": { + "description": "BGP peers on this port", + "type": "array", + "items": { + "$ref": "#/components/schemas/BgpPeerConfig" + } + }, + "port": { + "description": "Nmae of the port this config applies to.", + "type": "string" + }, + "routes": { + "description": "The set of routes associated with this port.", + "type": "array", + "items": { + "$ref": "#/components/schemas/RouteConfig" + } + }, + "switch": { + "description": "Switch the port belongs to.", + "allOf": [ + { + "$ref": "#/components/schemas/SwitchLocation" } - }, - "required": [ - "dns_address", - "gz_address", - "gz_address_index", - "http_address", - "type" ] }, + "uplink_port_fec": { + "description": "Port forward error correction type.", + "allOf": [ + { + "$ref": "#/components/schemas/PortFec" + } + ] + }, + "uplink_port_speed": { + "description": "Port speed.", + "allOf": [ + { + "$ref": "#/components/schemas/PortSpeed" + } + ] + } + }, + "required": [ + "addresses", + "bgp_peers", + "port", + "routes", + "switch", + "uplink_port_fec", + "uplink_port_speed" + ] + }, + "PortFec": { + "description": "Switchport FEC options", + "type": "string", + "enum": [ + "firecode", + "none", + "rs" + ] + }, + "PortSpeed": { + "description": "Switchport Speed options", + "type": "string", + "enum": [ + "speed0_g", + "speed1_g", + "speed10_g", + "speed25_g", + "speed40_g", + "speed50_g", + "speed100_g", + "speed200_g", + "speed400_g" + ] + }, + "PriorityDimension": { + "description": "A dimension along with bundles can be sorted, to determine priority.", + "oneOf": [ + { + "description": "Sorting by time, with older bundles with lower priority.", + "type": "string", + "enum": [ + "time" + ] + }, + { + "description": "Sorting by the cause for creating the bundle.", + "type": "string", + "enum": [ + "cause" + ] + } + ] + }, + "PriorityOrder": { + "description": "The priority order for bundles during cleanup.\n\nBundles are sorted along the dimensions in [`PriorityDimension`], with each dimension appearing exactly once. During cleanup, lesser-priority bundles are pruned first, to maintain the dataset quota. Note that bundles are sorted by each dimension in the order in which they appear, with each dimension having higher priority than the next.", + "type": "array", + "items": { + "$ref": "#/components/schemas/PriorityDimension" + }, + "minItems": 2, + "maxItems": 2 + }, + "ProducerResultsItem": { + "oneOf": [ { "type": "object", "properties": { - "address": { - "type": "string" + "info": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Sample" + } }, - "type": { + "status": { "type": "string", "enum": [ - "oximeter" + "ok" ] } }, "required": [ - "address", - "type" + "info", + "status" ] }, { "type": "object", "properties": { - "address": { - "type": "string" + "info": { + "$ref": "#/components/schemas/MetricsError" }, - "type": { + "status": { "type": "string", "enum": [ - "crucible_pantry" + "err" ] } }, "required": [ - "address", - "type" + "info", + "status" ] - }, + } + ] + }, + "QuantizationError": { + "description": "Errors occurring during quantizated bin generation.", + "oneOf": [ { "type": "object", "properties": { - "address": { - "type": "string" - }, - "dns_servers": { - "type": "array", - "items": { - "type": "string", - "format": "ip" - } - }, - "domain": { - "nullable": true, - "type": "string" - }, - "nic": { - "description": "The service vNIC providing outbound connectivity using OPTE.", - "allOf": [ - { - "$ref": "#/components/schemas/NetworkInterface" - } - ] - }, - "ntp_servers": { - "type": "array", - "items": { - "type": "string" - } - }, - "snat_cfg": { - "description": "The SNAT configuration for outbound connections.", - "allOf": [ - { - "$ref": "#/components/schemas/SourceNatConfig" - } - ] - }, "type": { "type": "string", "enum": [ - "boundary_ntp" + "overflow" ] } }, "required": [ - "address", - "dns_servers", - "nic", - "ntp_servers", - "snat_cfg", "type" ] }, { "type": "object", "properties": { - "address": { - "type": "string" - }, - "dns_servers": { - "type": "array", - "items": { - "type": "string", - "format": "ip" - } - }, - "domain": { - "nullable": true, - "type": "string" - }, - "ntp_servers": { - "type": "array", - "items": { - "type": "string" - } - }, "type": { "type": "string", "enum": [ - "internal_ntp" + "precision" ] } }, "required": [ - "address", - "dns_servers", - "ntp_servers", "type" ] }, { "type": "object", "properties": { - "address": { - "type": "string" - }, "type": { "type": "string", "enum": [ - "clickhouse" + "invalid_base" ] } }, "required": [ - "address", "type" ] }, { "type": "object", "properties": { - "address": { - "type": "string" - }, "type": { "type": "string", "enum": [ - "clickhouse_keeper" + "invalid_steps" ] } }, "required": [ - "address", "type" ] }, { "type": "object", "properties": { - "address": { - "type": "string" - }, "type": { "type": "string", "enum": [ - "cockroach_db" + "uneven_steps_for_base" ] } }, "required": [ - "address", "type" ] }, { "type": "object", "properties": { - "address": { - "type": "string" - }, "type": { "type": "string", "enum": [ - "crucible" + "powers_out_of_order" ] } }, "required": [ - "address", "type" ] } ] }, - "ServiceZoneRequest": { - "description": "Describes a request to create a zone running one or more services.", + "RackNetworkConfigV1": { + "description": "Initial network configuration", "type": "object", "properties": { - "addresses": { + "bgp": { + "description": "BGP configurations for connecting the rack to external networks", "type": "array", "items": { - "type": "string", - "format": "ipv6" + "$ref": "#/components/schemas/BgpConfig" } }, - "dataset": { - "nullable": true, - "default": null, - "allOf": [ - { - "$ref": "#/components/schemas/DatasetRequest" - } - ] + "infra_ip_first": { + "description": "First ip address to be used for configuring network infrastructure", + "type": "string", + "format": "ipv4" }, - "id": { + "infra_ip_last": { + "description": "Last ip address to be used for configuring network infrastructure", "type": "string", - "format": "uuid" + "format": "ipv4" }, - "services": { + "ports": { + "description": "Uplinks for connecting the rack to external networks", "type": "array", "items": { - "$ref": "#/components/schemas/ServiceZoneService" + "$ref": "#/components/schemas/PortConfigV1" } }, - "zone_type": { - "$ref": "#/components/schemas/ZoneType" + "rack_subnet": { + "$ref": "#/components/schemas/Ipv6Network" } }, "required": [ - "addresses", - "id", - "services", - "zone_type" + "bgp", + "infra_ip_first", + "infra_ip_last", + "ports", + "rack_subnet" ] }, - "ServiceZoneService": { - "description": "Used to request that the Sled initialize a single service.", + "RouteConfig": { "type": "object", "properties": { - "details": { - "$ref": "#/components/schemas/ServiceType" + "destination": { + "description": "The destination of the route.", + "allOf": [ + { + "$ref": "#/components/schemas/IpNetwork" + } + ] }, - "id": { + "nexthop": { + "description": "The nexthop/gateway address.", "type": "string", - "format": "uuid" + "format": "ip" } }, "required": [ - "details", - "id" + "destination", + "nexthop" + ] + }, + "Sample": { + "description": "A concrete type representing a single, timestamped measurement from a timeseries.", + "type": "object", + "properties": { + "measurement": { + "description": "The measured value of the metric at this sample", + "allOf": [ + { + "$ref": "#/components/schemas/Measurement" + } + ] + }, + "metric": { + "$ref": "#/components/schemas/FieldSet" + }, + "target": { + "$ref": "#/components/schemas/FieldSet" + }, + "timeseries_name": { + "description": "The name of the timeseries this sample belongs to", + "type": "string" + } + }, + "required": [ + "measurement", + "metric", + "target", + "timeseries_name" ] }, + "SemverVersion": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$" + }, "SetVirtualNetworkInterfaceHost": { "description": "A mapping from a virtual NIC to a physical host", "type": "object", @@ -6429,23 +6760,6 @@ "version" ] }, - "ZoneType": { - "description": "The type of zone which may be requested from Sled Agent", - "type": "string", - "enum": [ - "clickhouse", - "clickhouse_keeper", - "cockroach_db", - "crucible_pantry", - "crucible", - "external_dns", - "internal_dns", - "nexus", - "ntp", - "oximeter", - "switch" - ] - }, "Zpool": { "type": "object", "properties": { @@ -6467,6 +6781,14 @@ "description": "Zpool names are of the format ox{i,p}_. They are either Internal or External, and should be unique", "type": "string", "pattern": "^ox[ip]_[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + }, + "M2Slot": { + "description": "An M.2 slot that was written.", + "type": "string", + "enum": [ + "A", + "B" + ] } }, "responses": { diff --git a/openapi/wicketd.json b/openapi/wicketd.json index 60ad9a42df..804b2029c6 100644 --- a/openapi/wicketd.json +++ b/openapi/wicketd.json @@ -1545,6 +1545,11 @@ "$ref": "#/components/schemas/IpNetwork" } }, + "autoneg": { + "description": "Whether or not to set autonegotiation", + "default": false, + "type": "boolean" + }, "bgp_peers": { "description": "BGP peers on this port", "type": "array", diff --git a/oximeter/collector/src/agent.rs b/oximeter/collector/src/agent.rs index 23ff32ed66..4135125a48 100644 --- a/oximeter/collector/src/agent.rs +++ b/oximeter/collector/src/agent.rs @@ -648,6 +648,7 @@ mod tests { use hyper::Response; use hyper::Server; use hyper::StatusCode; + use omicron_common::api::internal::nexus::ProducerKind; use omicron_test_utils::dev::test_setup_log; use std::convert::Infallible; use std::net::Ipv6Addr; @@ -658,6 +659,24 @@ mod tests { use tokio::time::Instant; use uuid::Uuid; + // Interval on which oximeter collects from producers in these tests. + const COLLECTION_INTERVAL: Duration = Duration::from_secs(1); + + // Interval in calls to `tokio::time::advance`. This must be sufficiently + // small relative to `COLLECTION_INTERVAL` to ensure all ticks of internal + // timers complete as expected. + const TICK_INTERVAL: Duration = Duration::from_millis(10); + + // Total number of collection attempts. + const N_COLLECTIONS: u64 = 5; + + // Period these tests wait using `tokio::time::advance()` before checking + // their test conditions. + const TEST_WAIT_PERIOD: Duration = Duration::from_millis( + COLLECTION_INTERVAL.as_millis() as u64 * N_COLLECTIONS + + COLLECTION_INTERVAL.as_millis() as u64 / 2, + ); + // Test that we count successful collections from a target correctly. #[tokio::test] async fn test_self_stat_collection_count() { @@ -691,12 +710,12 @@ mod tests { let _task = tokio::task::spawn(server); // Register the dummy producer. - let interval = Duration::from_secs(1); let endpoint = ProducerEndpoint { id: Uuid::new_v4(), + kind: ProducerKind::Service, address, base_route: String::from("/"), - interval, + interval: COLLECTION_INTERVAL, }; collector .register_producer(endpoint) @@ -706,10 +725,8 @@ mod tests { // Step time until there has been exactly `N_COLLECTIONS` collections. tokio::time::pause(); let now = Instant::now(); - const N_COLLECTIONS: usize = 5; - let wait_for = interval * N_COLLECTIONS as u32 + interval / 2; - while now.elapsed() < wait_for { - tokio::time::advance(interval / 10).await; + while now.elapsed() < TEST_WAIT_PERIOD { + tokio::time::advance(TICK_INTERVAL).await; } // Request the statistics from the task itself. @@ -727,7 +744,7 @@ mod tests { .await .expect("failed to request statistics from task"); let stats = rx.await.expect("failed to receive statistics from task"); - assert_eq!(stats.collections.datum.value(), N_COLLECTIONS as u64); + assert_eq!(stats.collections.datum.value(), N_COLLECTIONS); assert!(stats.failed_collections.is_empty()); logctx.cleanup_successful(); } @@ -749,9 +766,9 @@ mod tests { // Register a bogus producer, which is equivalent to a producer that is // unreachable. - let interval = Duration::from_secs(1); let endpoint = ProducerEndpoint { id: Uuid::new_v4(), + kind: ProducerKind::Service, address: SocketAddr::V6(SocketAddrV6::new( Ipv6Addr::LOCALHOST, 0, @@ -759,7 +776,7 @@ mod tests { 0, )), base_route: String::from("/"), - interval, + interval: COLLECTION_INTERVAL, }; collector .register_producer(endpoint) @@ -769,10 +786,8 @@ mod tests { // Step time until there has been exactly `N_COLLECTIONS` collections. tokio::time::pause(); let now = Instant::now(); - const N_COLLECTIONS: usize = 5; - let wait_for = interval * N_COLLECTIONS as u32 + interval / 2; - while now.elapsed() < wait_for { - tokio::time::advance(interval / 10).await; + while now.elapsed() < TEST_WAIT_PERIOD { + tokio::time::advance(TICK_INTERVAL).await; } // Request the statistics from the task itself. @@ -798,7 +813,7 @@ mod tests { .unwrap() .datum .value(), - N_COLLECTIONS as u64 + N_COLLECTIONS, ); assert_eq!(stats.failed_collections.len(), 1); logctx.cleanup_successful(); @@ -837,12 +852,12 @@ mod tests { let _task = tokio::task::spawn(server); // Register the rather flaky producer. - let interval = Duration::from_secs(1); let endpoint = ProducerEndpoint { id: Uuid::new_v4(), + kind: ProducerKind::Service, address, base_route: String::from("/"), - interval, + interval: COLLECTION_INTERVAL, }; collector .register_producer(endpoint) @@ -852,10 +867,8 @@ mod tests { // Step time until there has been exactly `N_COLLECTIONS` collections. tokio::time::pause(); let now = Instant::now(); - const N_COLLECTIONS: usize = 5; - let wait_for = interval * N_COLLECTIONS as u32 + interval / 2; - while now.elapsed() < wait_for { - tokio::time::advance(interval / 10).await; + while now.elapsed() < TEST_WAIT_PERIOD { + tokio::time::advance(TICK_INTERVAL).await; } // Request the statistics from the task itself. @@ -881,7 +894,7 @@ mod tests { .unwrap() .datum .value(), - N_COLLECTIONS as u64 + N_COLLECTIONS, ); assert_eq!(stats.failed_collections.len(), 1); logctx.cleanup_successful(); diff --git a/oximeter/collector/src/self_stats.rs b/oximeter/collector/src/self_stats.rs index dd1701203e..8d39e6e282 100644 --- a/oximeter/collector/src/self_stats.rs +++ b/oximeter/collector/src/self_stats.rs @@ -154,8 +154,15 @@ impl CollectionTaskStats { #[cfg(test)] mod tests { + use super::Collections; + use super::Cumulative; + use super::FailedCollections; use super::FailureReason; + use super::OximeterCollector; use super::StatusCode; + use oximeter::schema::SchemaSet; + use std::net::IpAddr; + use std::net::Ipv6Addr; #[test] fn test_failure_reason_serialization() { @@ -168,4 +175,50 @@ mod tests { assert_eq!(variant.to_string(), *as_str); } } + + const fn collector() -> OximeterCollector { + OximeterCollector { + collector_id: uuid::uuid!("cfebaa5f-3ba9-4bb5-9145-648d287df78a"), + collector_ip: IpAddr::V6(Ipv6Addr::LOCALHOST), + collector_port: 12345, + } + } + + fn collections() -> Collections { + Collections { + producer_id: uuid::uuid!("718452ab-7cca-42f6-b8b1-1aaaa1b09104"), + producer_ip: IpAddr::V6(Ipv6Addr::LOCALHOST), + producer_port: 12345, + base_route: String::from("/"), + datum: Cumulative::new(0), + } + } + + fn failed_collections() -> FailedCollections { + FailedCollections { + producer_id: uuid::uuid!("718452ab-7cca-42f6-b8b1-1aaaa1b09104"), + producer_ip: IpAddr::V6(Ipv6Addr::LOCALHOST), + producer_port: 12345, + base_route: String::from("/"), + reason: FailureReason::Unreachable.to_string(), + datum: Cumulative::new(0), + } + } + + // Check that the self-stat timeseries schema have not changed. + #[test] + fn test_no_schema_changes() { + let collector = collector(); + let collections = collections(); + let failed = failed_collections(); + let mut set = SchemaSet::default(); + assert!(set.insert_checked(&collector, &collections).is_none()); + assert!(set.insert_checked(&collector, &failed).is_none()); + + const PATH: &'static str = concat!( + env!("CARGO_MANIFEST_DIR"), + "/tests/output/self-stat-schema.json" + ); + set.assert_contents(PATH); + } } diff --git a/oximeter/collector/tests/output/self-stat-schema.json b/oximeter/collector/tests/output/self-stat-schema.json new file mode 100644 index 0000000000..0caf2d27e9 --- /dev/null +++ b/oximeter/collector/tests/output/self-stat-schema.json @@ -0,0 +1,91 @@ +{ + "oximeter_collector:collections": { + "timeseries_name": "oximeter_collector:collections", + "field_schema": [ + { + "name": "base_route", + "field_type": "string", + "source": "metric" + }, + { + "name": "collector_id", + "field_type": "uuid", + "source": "target" + }, + { + "name": "collector_ip", + "field_type": "ip_addr", + "source": "target" + }, + { + "name": "collector_port", + "field_type": "u16", + "source": "target" + }, + { + "name": "producer_id", + "field_type": "uuid", + "source": "metric" + }, + { + "name": "producer_ip", + "field_type": "ip_addr", + "source": "metric" + }, + { + "name": "producer_port", + "field_type": "u16", + "source": "metric" + } + ], + "datum_type": "cumulative_u64", + "created": "2023-12-04T17:49:47.797495948Z" + }, + "oximeter_collector:failed_collections": { + "timeseries_name": "oximeter_collector:failed_collections", + "field_schema": [ + { + "name": "base_route", + "field_type": "string", + "source": "metric" + }, + { + "name": "collector_id", + "field_type": "uuid", + "source": "target" + }, + { + "name": "collector_ip", + "field_type": "ip_addr", + "source": "target" + }, + { + "name": "collector_port", + "field_type": "u16", + "source": "target" + }, + { + "name": "producer_id", + "field_type": "uuid", + "source": "metric" + }, + { + "name": "producer_ip", + "field_type": "ip_addr", + "source": "metric" + }, + { + "name": "producer_port", + "field_type": "u16", + "source": "metric" + }, + { + "name": "reason", + "field_type": "string", + "source": "metric" + } + ], + "datum_type": "cumulative_u64", + "created": "2023-12-04T17:49:47.799970009Z" + } +} \ No newline at end of file diff --git a/oximeter/db/notes.txt b/oximeter/db/notes.txt deleted file mode 100644 index 66c3871d46..0000000000 --- a/oximeter/db/notes.txt +++ /dev/null @@ -1,232 +0,0 @@ -Some notes on querying - -For pagination: - -- Timeseries name is enough for paginated list timeseries endpoint. -It's just normal keyset pagination. - -- For the timeseries data, we'll be using limit/offset pagination. We'll -run the query to get the consistent timeseries keys each time. This is -the `ScanParams` part of the `WhichPage`. The `PageSelector` is the offset. - - -Now, how to run more complex queries? A good example is something like, -aggregating the timeseries across all but one field. For example, let's -look at the Nexus HTTP latency data. The fields are: - -- name (String) -- id (Uuid) -- route (String) -- method (String) -- status_code (I64) - -Imagine we wanted to look at the average latency by route, so averaged -across all methods and status codes. (Let's ingore name/id) - -We need to group the timeseries keys by route, to find the set of keys -consistent with each different route. ClickHouse provides the `groupArray` -function, which is an aggregate function that collects multiple values -into an array. So we can do: - -``` -SELECT - field_value, - groupArray(timeseries_key) -FROM fields_string -WHERE field_name = 'route' -GROUP BY field_value; - - -┌─field_value───────────────────────────────────────────┬─groupArray(timeseries_key)────────────────┐ -│ /metrics/producers │ [1916712826069192294,6228796576473532827] │ -│ /metrics/collectors │ [1500085842574282480] │ -│ /metrics/collect/e6bff1ff-24fb-49dc-a54e-c6a350cd4d6c │ [15389669872422126367] │ -│ /sled_agents/fb0f7546-4d46-40ca-9d56-cbb810684ca7 │ [1166666993114742619] │ -└───────────────────────────────────────────────────────┴───────────────────────────────────────────┘ -``` - -This gives an array of timeseries keys where the route is each of the values -on the left. - -So at a very high level, we can average all the timeseries values where the keys -are in each of these different arrays. - - -This kinda works. It produces an array of arrays, the counts for each of the -histograms, grouped by the field value. - -``` -SELECT - field_value, - groupArray(counts) -FROM -( - SELECT - field_value, - timeseries_key - FROM fields_string - WHERE field_name = 'route' -) AS f0 -INNER JOIN -( - SELECT * - FROM measurements_histogramf64 -) AS meas USING (timeseries_key) -GROUP BY field_value -``` - -We can extend this `groupArray(bins), groupArray(counts)` to get both. - - -Ok, we're getting somewhere. The aggregation "combinators" modify the behavior of -aggregations, in pretty suprising and powerful ways. For example: - -``` -SELECT - field_value, - sumForEach(counts) -FROM -( - SELECT - field_value, - timeseries_key - FROM fields_string - WHERE field_name = 'route' -) AS f0 -INNER JOIN -( - SELECT * - FROM measurements_histogramf64 -) AS meas USING (timeseries_key) -GROUP BY field_value -``` - -This applies the `-ForEach` combinator to the sum aggregation. This applies the -aggregation to corresponding elements of a sequence (table?) of arrays. We can -do this with any of the aggregations, `avg`, `min`, etc. - - -The `-Resample` combinator also looks interesting. It uses its arguments to create -a set of intervals, and applies the aggregation within each of those intervals. -So sort of a group-by interval or window function. - -Another useful method is `toStartOfInterval`. This takes a timestamp and an interval, -say 5 seconds, or 10 minutes, and returns the interval into which that timestamp -falls. Could be very helpful for aligning/binning data to time intervals. But -it does "round", in that the bins don't start at the first timestamp, but at -the rounded-down interval from that timestamp. - -It's possible to build intervals that start exactly at the first timestamp with: - -``` -SELECT - timestamp, - toStartOfInterval(timestamp, toIntervalMinute(1)) + ( - SELECT toSecond(min(timestamp)) - FROM measurements_histogramf64 - ) -FROM measurements_histogramf64 -``` - -Or some other rounding shenanigans. - - -Putting lots of this together: - -``` -SELECT - f0.field_name, - f0.field_value, - f1.field_name, - f1.field_value, - minForEach(bins), - avgForEach(counts) -FROM -( - SELECT - field_name, - field_value, - timeseries_key - FROM fields_string - WHERE field_name = 'route' -) AS f0 -INNER JOIN -( - SELECT - field_name, - field_value, - timeseries_key - FROM fields_i64 - WHERE field_name = 'status_code' -) AS f1 ON f0.timeseries_key = f1.timeseries_key -INNER JOIN -( - SELECT * - FROM measurements_histogramf64 -) AS meas ON f1.timeseries_key = meas.timeseries_key -GROUP BY - f0.field_name, - f0.field_value, - f1.field_name, - f1.field_value -``` - -This selects the field name/value, and the bin and average count for each -histogram, grouping by route and status code. - -These inner select statements look similar to the ones we already -implement in `field.as_query`. But in that case we select *, and here we -probably don't want to do that to avoid errors about things not being -in aggregations or group by's. - -This works (or is syntactically valid) for scalars, if we replace the -combinators with their non-combinator version: e.g, `avgForEach` -> `avg`. - - -Other rando thoughts. - -It'd be nice to have the query builder be able to handle all these, but -I'm not sure how worth it that is. For example, I don't even think we need -the timeseries keys in this query. For the fields where we are specifying -a condition, we have subqueries like: - -``` -SELECT * -FROM fields_{TYPE} -WHERE field_name = NAME -AND field_value OP VALUE; -``` - -For ones where we _don't_ care, we just have the first three lines: - -``` -SELECT * -FROM fields_{TYPE} -WHERE field_name = NAME; -``` - -We can join successive entries on timeseries keys. - -For straight SELECT queries, that's pretty much it, like we have currently. -For AGGREGATION queries, we need to - -- Have a group-by for each (field_name, field_value) pair. This is true -even when we're unselective on the field, because we are still taking that -field, and we still need to group the keys accordingly. -- Select the consistent timeseries keys. This is so we can correlate the -results of the aggregation back to the field names/values which we still -get from the key-select query. -- Apply the aggregation to the measurements. For scalars, this just the -aggregation. For histograms, this is the `-Array` or `-ForEach` combinator -for that aggregation, depending on what we're applying. -- ??? to the timestamps? -- some alignment, grouping, subsampling? It seems -this has to come from the aggregation query, because there's not a useful -default. - -Speaking of defaults, how do these functions behave with missing data? -Or more subtly, what happens if two histograms (say) have the same number -of bins, but the actual bin edges are different? ClickHouse itself doesn't -deal with this AFAICT, which means we'd need to do that in the client. -Ah, but that is unlikely, since we're only aggregating data from the -same timeseries, with the same key. So far anyway. I'm not sure what'll -happen when we start correlating data between timeseries. diff --git a/oximeter/db/schema/replicated/4/up01.sql b/oximeter/db/schema/replicated/4/up01.sql new file mode 100644 index 0000000000..f36745ae2e --- /dev/null +++ b/oximeter/db/schema/replicated/4/up01.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_bool_local MODIFY COLUMN datum Nullable(UInt8) \ No newline at end of file diff --git a/oximeter/db/schema/replicated/4/up02.sql b/oximeter/db/schema/replicated/4/up02.sql new file mode 100644 index 0000000000..0f76398652 --- /dev/null +++ b/oximeter/db/schema/replicated/4/up02.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_bool MODIFY COLUMN datum Nullable(UInt8) \ No newline at end of file diff --git a/oximeter/db/schema/replicated/4/up03.sql b/oximeter/db/schema/replicated/4/up03.sql new file mode 100644 index 0000000000..175b23d71b --- /dev/null +++ b/oximeter/db/schema/replicated/4/up03.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_i8_local MODIFY COLUMN datum Nullable(Int8) \ No newline at end of file diff --git a/oximeter/db/schema/replicated/4/up04.sql b/oximeter/db/schema/replicated/4/up04.sql new file mode 100644 index 0000000000..4c8f22d8e6 --- /dev/null +++ b/oximeter/db/schema/replicated/4/up04.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_i8 MODIFY COLUMN datum Nullable(Int8) \ No newline at end of file diff --git a/oximeter/db/schema/replicated/4/up05.sql b/oximeter/db/schema/replicated/4/up05.sql new file mode 100644 index 0000000000..82490a81ca --- /dev/null +++ b/oximeter/db/schema/replicated/4/up05.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_u8_local MODIFY COLUMN datum Nullable(UInt8) \ No newline at end of file diff --git a/oximeter/db/schema/replicated/4/up06.sql b/oximeter/db/schema/replicated/4/up06.sql new file mode 100644 index 0000000000..c689682127 --- /dev/null +++ b/oximeter/db/schema/replicated/4/up06.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_u8 MODIFY COLUMN datum Nullable(UInt8) \ No newline at end of file diff --git a/oximeter/db/schema/replicated/4/up07.sql b/oximeter/db/schema/replicated/4/up07.sql new file mode 100644 index 0000000000..43eb40515b --- /dev/null +++ b/oximeter/db/schema/replicated/4/up07.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_i16_local MODIFY COLUMN datum Nullable(Int16) \ No newline at end of file diff --git a/oximeter/db/schema/replicated/4/up08.sql b/oximeter/db/schema/replicated/4/up08.sql new file mode 100644 index 0000000000..1d983a3c83 --- /dev/null +++ b/oximeter/db/schema/replicated/4/up08.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_i16 MODIFY COLUMN datum Nullable(Int16) \ No newline at end of file diff --git a/oximeter/db/schema/replicated/4/up09.sql b/oximeter/db/schema/replicated/4/up09.sql new file mode 100644 index 0000000000..e52c2adf5f --- /dev/null +++ b/oximeter/db/schema/replicated/4/up09.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_u16_local MODIFY COLUMN datum Nullable(UInt16) \ No newline at end of file diff --git a/oximeter/db/schema/replicated/4/up10.sql b/oximeter/db/schema/replicated/4/up10.sql new file mode 100644 index 0000000000..d8a69fff1a --- /dev/null +++ b/oximeter/db/schema/replicated/4/up10.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_u16 MODIFY COLUMN datum Nullable(UInt16) \ No newline at end of file diff --git a/oximeter/db/schema/replicated/4/up11.sql b/oximeter/db/schema/replicated/4/up11.sql new file mode 100644 index 0000000000..b3c2d8de92 --- /dev/null +++ b/oximeter/db/schema/replicated/4/up11.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_i32_local MODIFY COLUMN datum Nullable(Int32) \ No newline at end of file diff --git a/oximeter/db/schema/replicated/4/up12.sql b/oximeter/db/schema/replicated/4/up12.sql new file mode 100644 index 0000000000..65fca2e1b2 --- /dev/null +++ b/oximeter/db/schema/replicated/4/up12.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_i32 MODIFY COLUMN datum Nullable(Int32) \ No newline at end of file diff --git a/oximeter/db/schema/replicated/4/up13.sql b/oximeter/db/schema/replicated/4/up13.sql new file mode 100644 index 0000000000..df7c520e35 --- /dev/null +++ b/oximeter/db/schema/replicated/4/up13.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_u32_local MODIFY COLUMN datum Nullable(UInt32) \ No newline at end of file diff --git a/oximeter/db/schema/replicated/4/up14.sql b/oximeter/db/schema/replicated/4/up14.sql new file mode 100644 index 0000000000..a4cb43fb90 --- /dev/null +++ b/oximeter/db/schema/replicated/4/up14.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_u32 MODIFY COLUMN datum Nullable(UInt32) \ No newline at end of file diff --git a/oximeter/db/schema/replicated/4/up15.sql b/oximeter/db/schema/replicated/4/up15.sql new file mode 100644 index 0000000000..f7583dbdee --- /dev/null +++ b/oximeter/db/schema/replicated/4/up15.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_i64_local MODIFY COLUMN datum Nullable(Int64) \ No newline at end of file diff --git a/oximeter/db/schema/replicated/4/up16.sql b/oximeter/db/schema/replicated/4/up16.sql new file mode 100644 index 0000000000..b458243d74 --- /dev/null +++ b/oximeter/db/schema/replicated/4/up16.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_i64 MODIFY COLUMN datum Nullable(Int64) \ No newline at end of file diff --git a/oximeter/db/schema/replicated/4/up17.sql b/oximeter/db/schema/replicated/4/up17.sql new file mode 100644 index 0000000000..9229a97704 --- /dev/null +++ b/oximeter/db/schema/replicated/4/up17.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_u64_local MODIFY COLUMN datum Nullable(UInt64) \ No newline at end of file diff --git a/oximeter/db/schema/replicated/4/up18.sql b/oximeter/db/schema/replicated/4/up18.sql new file mode 100644 index 0000000000..6e2a2a5191 --- /dev/null +++ b/oximeter/db/schema/replicated/4/up18.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_u64 MODIFY COLUMN datum Nullable(UInt64) \ No newline at end of file diff --git a/oximeter/db/schema/replicated/4/up19.sql b/oximeter/db/schema/replicated/4/up19.sql new file mode 100644 index 0000000000..8f16b5d41e --- /dev/null +++ b/oximeter/db/schema/replicated/4/up19.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_f32_local MODIFY COLUMN datum Nullable(Float32) \ No newline at end of file diff --git a/oximeter/db/schema/replicated/4/up20.sql b/oximeter/db/schema/replicated/4/up20.sql new file mode 100644 index 0000000000..9263592740 --- /dev/null +++ b/oximeter/db/schema/replicated/4/up20.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_f32 MODIFY COLUMN datum Nullable(Float32) \ No newline at end of file diff --git a/oximeter/db/schema/replicated/4/up21.sql b/oximeter/db/schema/replicated/4/up21.sql new file mode 100644 index 0000000000..72abba6216 --- /dev/null +++ b/oximeter/db/schema/replicated/4/up21.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_f64_local MODIFY COLUMN datum Nullable(Float64) \ No newline at end of file diff --git a/oximeter/db/schema/replicated/4/up22.sql b/oximeter/db/schema/replicated/4/up22.sql new file mode 100644 index 0000000000..0d8522bc03 --- /dev/null +++ b/oximeter/db/schema/replicated/4/up22.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_f64 MODIFY COLUMN datum Nullable(Float64) \ No newline at end of file diff --git a/oximeter/db/schema/replicated/4/up23.sql b/oximeter/db/schema/replicated/4/up23.sql new file mode 100644 index 0000000000..96b94c2895 --- /dev/null +++ b/oximeter/db/schema/replicated/4/up23.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_cumulativei64_local MODIFY COLUMN datum Nullable(Int64) \ No newline at end of file diff --git a/oximeter/db/schema/replicated/4/up24.sql b/oximeter/db/schema/replicated/4/up24.sql new file mode 100644 index 0000000000..55df76c25f --- /dev/null +++ b/oximeter/db/schema/replicated/4/up24.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_cumulativei64 MODIFY COLUMN datum Nullable(Int64) \ No newline at end of file diff --git a/oximeter/db/schema/replicated/4/up25.sql b/oximeter/db/schema/replicated/4/up25.sql new file mode 100644 index 0000000000..fac7369482 --- /dev/null +++ b/oximeter/db/schema/replicated/4/up25.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_cumulativeu64_local MODIFY COLUMN datum Nullable(UInt64) \ No newline at end of file diff --git a/oximeter/db/schema/replicated/4/up26.sql b/oximeter/db/schema/replicated/4/up26.sql new file mode 100644 index 0000000000..182b2b4704 --- /dev/null +++ b/oximeter/db/schema/replicated/4/up26.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_cumulativeu64 MODIFY COLUMN datum Nullable(UInt64) \ No newline at end of file diff --git a/oximeter/db/schema/replicated/4/up27.sql b/oximeter/db/schema/replicated/4/up27.sql new file mode 100644 index 0000000000..b482d00f81 --- /dev/null +++ b/oximeter/db/schema/replicated/4/up27.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_cumulativef32_local MODIFY COLUMN datum Nullable(Float32) \ No newline at end of file diff --git a/oximeter/db/schema/replicated/4/up28.sql b/oximeter/db/schema/replicated/4/up28.sql new file mode 100644 index 0000000000..cefbe56395 --- /dev/null +++ b/oximeter/db/schema/replicated/4/up28.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_cumulativef32 MODIFY COLUMN datum Nullable(Float32) \ No newline at end of file diff --git a/oximeter/db/schema/replicated/4/up29.sql b/oximeter/db/schema/replicated/4/up29.sql new file mode 100644 index 0000000000..59e21f353d --- /dev/null +++ b/oximeter/db/schema/replicated/4/up29.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_cumulativef64_local MODIFY COLUMN datum Nullable(Float64) \ No newline at end of file diff --git a/oximeter/db/schema/replicated/4/up30.sql b/oximeter/db/schema/replicated/4/up30.sql new file mode 100644 index 0000000000..a609e6ad3c --- /dev/null +++ b/oximeter/db/schema/replicated/4/up30.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_cumulativef64 MODIFY COLUMN datum Nullable(Float64) \ No newline at end of file diff --git a/oximeter/db/schema/replicated/4/up31.sql b/oximeter/db/schema/replicated/4/up31.sql new file mode 100644 index 0000000000..3726895dd0 --- /dev/null +++ b/oximeter/db/schema/replicated/4/up31.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_string_local MODIFY COLUMN datum Nullable(String); diff --git a/oximeter/db/schema/replicated/4/up32.sql b/oximeter/db/schema/replicated/4/up32.sql new file mode 100644 index 0000000000..5a09705e7e --- /dev/null +++ b/oximeter/db/schema/replicated/4/up32.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_string MODIFY COLUMN datum Nullable(String); diff --git a/oximeter/db/schema/replicated/db-init.sql b/oximeter/db/schema/replicated/db-init.sql index 4429f41364..27df02b709 100644 --- a/oximeter/db/schema/replicated/db-init.sql +++ b/oximeter/db/schema/replicated/db-init.sql @@ -24,7 +24,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_bool_local ON CLUSTER oximeter_ timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), - datum UInt8 + datum Nullable(UInt8) ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/measurements_bool_local', '{replica}') ORDER BY (timeseries_name, timeseries_key, timestamp) @@ -35,7 +35,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_bool ON CLUSTER oximeter_cluste timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), - datum UInt8 + datum Nullable(UInt8) ) ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_bool_local', xxHash64(splitByChar(':', timeseries_name)[1])); @@ -44,7 +44,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_i8_local ON CLUSTER oximeter_cl timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), - datum Int8 + datum Nullable(Int8) ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/measurements_i8_local', '{replica}') ORDER BY (timeseries_name, timeseries_key, timestamp) @@ -55,7 +55,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_i8 ON CLUSTER oximeter_cluster timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), - datum Int8 + datum Nullable(Int8) ) ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_i8_local', xxHash64(splitByChar(':', timeseries_name)[1])); @@ -64,7 +64,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_u8_local ON CLUSTER oximeter_cl timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), - datum UInt8 + datum Nullable(UInt8) ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/measurements_u8_local', '{replica}') ORDER BY (timeseries_name, timeseries_key, timestamp) @@ -75,7 +75,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_u8 ON CLUSTER oximeter_cluster timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), - datum UInt8 + datum Nullable(UInt8) ) ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_u8_local', xxHash64(splitByChar(':', timeseries_name)[1])); @@ -84,7 +84,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_i16_local ON CLUSTER oximeter_c timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), - datum Int16 + datum Nullable(Int16) ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/measurements_i16_local', '{replica}') ORDER BY (timeseries_name, timeseries_key, timestamp) @@ -95,7 +95,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_i16 ON CLUSTER oximeter_cluster timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), - datum Int16 + datum Nullable(Int16) ) ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_i16_local', xxHash64(splitByChar(':', timeseries_name)[1])); @@ -104,7 +104,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_u16_local ON CLUSTER oximeter_c timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), - datum UInt16 + datum Nullable(UInt16) ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/measurements_u16_local', '{replica}') ORDER BY (timeseries_name, timeseries_key, timestamp) @@ -115,7 +115,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_u16 ON CLUSTER oximeter_cluster timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), - datum UInt16 + datum Nullable(UInt16) ) ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_u16_local', xxHash64(splitByChar(':', timeseries_name)[1])); @@ -124,7 +124,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_i32_local ON CLUSTER oximeter_c timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), - datum Int32 + datum Nullable(Int32) ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/measurements_i32_local', '{replica}') ORDER BY (timeseries_name, timeseries_key, timestamp) @@ -135,7 +135,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_i32 ON CLUSTER oximeter_cluster timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), - datum Int32 + datum Nullable(Int32) ) ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_i32_local', xxHash64(splitByChar(':', timeseries_name)[1])); @@ -144,7 +144,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_u32_local ON CLUSTER oximeter_c timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), - datum UInt32 + datum Nullable(UInt32) ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/measurements_u32_local', '{replica}') ORDER BY (timeseries_name, timeseries_key, timestamp) @@ -155,7 +155,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_u32 ON CLUSTER oximeter_cluster timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), - datum UInt32 + datum Nullable(UInt32) ) ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_u32_local', xxHash64(splitByChar(':', timeseries_name)[1])); @@ -164,7 +164,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_i64_local ON CLUSTER oximeter_c timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), - datum Int64 + datum Nullable(Int64) ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/measurements_i64_local', '{replica}') ORDER BY (timeseries_name, timeseries_key, timestamp) @@ -175,7 +175,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_i64 ON CLUSTER oximeter_cluster timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), - datum Int64 + datum Nullable(Int64) ) ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_i64_local', xxHash64(splitByChar(':', timeseries_name)[1])); @@ -184,7 +184,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_u64_local ON CLUSTER oximeter_c timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), - datum UInt64 + datum Nullable(UInt64) ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/measurements_u64_local', '{replica}') ORDER BY (timeseries_name, timeseries_key, timestamp) @@ -195,7 +195,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_u64 ON CLUSTER oximeter_cluster timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), - datum UInt64 + datum Nullable(UInt64) ) ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_u64_local', xxHash64(splitByChar(':', timeseries_name)[1])); @@ -204,7 +204,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_f32_local ON CLUSTER oximeter_c timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), - datum Float32 + datum Nullable(Float32) ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/measurements_f32_local', '{replica}') ORDER BY (timeseries_name, timeseries_key, timestamp) @@ -215,7 +215,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_f32 ON CLUSTER oximeter_cluster timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), - datum Float32 + datum Nullable(Float32) ) ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_f32_local', xxHash64(splitByChar(':', timeseries_name)[1])); @@ -224,7 +224,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_f64_local ON CLUSTER oximeter_c timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), - datum Float64 + datum Nullable(Float64) ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/measurements_f64_local', '{replica}') ORDER BY (timeseries_name, timeseries_key, timestamp) @@ -235,7 +235,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_f64 ON CLUSTER oximeter_cluster timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), - datum Float64 + datum Nullable(Float64) ) ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_f64_local', xxHash64(splitByChar(':', timeseries_name)[1])); @@ -244,7 +244,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_string_local ON CLUSTER oximete timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), - datum String + datum Nullable(String) ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/measurements_string_local', '{replica}') ORDER BY (timeseries_name, timeseries_key, timestamp) @@ -255,7 +255,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_string ON CLUSTER oximeter_clus timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), - datum String + datum Nullable(String) ) ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_string_local', xxHash64(splitByChar(':', timeseries_name)[1])); @@ -285,7 +285,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_cumulativei64_local ON CLUSTER timeseries_key UInt64, start_time DateTime64(9, 'UTC'), timestamp DateTime64(9, 'UTC'), - datum Int64 + datum Nullable(Int64) ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/measurements_cumulativei64_local', '{replica}') ORDER BY (timeseries_name, timeseries_key, start_time, timestamp) @@ -297,7 +297,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_cumulativei64 ON CLUSTER oximet timeseries_key UInt64, start_time DateTime64(9, 'UTC'), timestamp DateTime64(9, 'UTC'), - datum Int64 + datum Nullable(Int64) ) ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_cumulativei64_local', xxHash64(splitByChar(':', timeseries_name)[1])); @@ -307,7 +307,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_cumulativeu64_local ON CLUSTER timeseries_key UInt64, start_time DateTime64(9, 'UTC'), timestamp DateTime64(9, 'UTC'), - datum UInt64 + datum Nullable(UInt64) ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/measurements_cumulativeu64_local', '{replica}') ORDER BY (timeseries_name, timeseries_key, start_time, timestamp) @@ -319,7 +319,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_cumulativeu64 ON CLUSTER oximet timeseries_key UInt64, start_time DateTime64(9, 'UTC'), timestamp DateTime64(9, 'UTC'), - datum UInt64 + datum Nullable(UInt64) ) ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_cumulativeu64_local', xxHash64(splitByChar(':', timeseries_name)[1])); @@ -329,7 +329,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_cumulativef32_local ON CLUSTER timeseries_key UInt64, start_time DateTime64(9, 'UTC'), timestamp DateTime64(9, 'UTC'), - datum Float32 + datum Nullable(Float32) ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/measurements_cumulativef32_local', '{replica}') ORDER BY (timeseries_name, timeseries_key, start_time, timestamp) @@ -341,7 +341,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_cumulativef32 ON CLUSTER oximet timeseries_key UInt64, start_time DateTime64(9, 'UTC'), timestamp DateTime64(9, 'UTC'), - datum Float32 + datum Nullable(Float32) ) ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_cumulativef32_local', xxHash64(splitByChar(':', timeseries_name)[1])); @@ -351,7 +351,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_cumulativef64_local ON CLUSTER timeseries_key UInt64, start_time DateTime64(9, 'UTC'), timestamp DateTime64(9, 'UTC'), - datum Float64 + datum Nullable(Float64) ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/measurements_cumulativef64_local', '{replica}') ORDER BY (timeseries_name, timeseries_key, start_time, timestamp) @@ -363,7 +363,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_cumulativef64 ON CLUSTER oximet timeseries_key UInt64, start_time DateTime64(9, 'UTC'), timestamp DateTime64(9, 'UTC'), - datum Float64 + datum Nullable(Float64) ) ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_cumulativef64_local', xxHash64(splitByChar(':', timeseries_name)[1])); diff --git a/oximeter/db/schema/single-node/4/up01.sql b/oximeter/db/schema/single-node/4/up01.sql new file mode 100644 index 0000000000..ccccc9c5fb --- /dev/null +++ b/oximeter/db/schema/single-node/4/up01.sql @@ -0,0 +1,9 @@ +/* + * To support missing measurements, we are making all scalar datum columns + * Nullable, so that a NULL value (None in Rust) represents a missing datum at + * the provided timestamp. + * + * Note that arrays cannot be made Nullable, so we need to use an empty array as + * the sentinel value implying a missing measurement. + */ +ALTER TABLE oximeter.measurements_bool MODIFY COLUMN datum Nullable(UInt8) diff --git a/oximeter/db/schema/single-node/4/up02.sql b/oximeter/db/schema/single-node/4/up02.sql new file mode 100644 index 0000000000..4c8f22d8e6 --- /dev/null +++ b/oximeter/db/schema/single-node/4/up02.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_i8 MODIFY COLUMN datum Nullable(Int8) \ No newline at end of file diff --git a/oximeter/db/schema/single-node/4/up03.sql b/oximeter/db/schema/single-node/4/up03.sql new file mode 100644 index 0000000000..c689682127 --- /dev/null +++ b/oximeter/db/schema/single-node/4/up03.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_u8 MODIFY COLUMN datum Nullable(UInt8) \ No newline at end of file diff --git a/oximeter/db/schema/single-node/4/up04.sql b/oximeter/db/schema/single-node/4/up04.sql new file mode 100644 index 0000000000..1d983a3c83 --- /dev/null +++ b/oximeter/db/schema/single-node/4/up04.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_i16 MODIFY COLUMN datum Nullable(Int16) \ No newline at end of file diff --git a/oximeter/db/schema/single-node/4/up05.sql b/oximeter/db/schema/single-node/4/up05.sql new file mode 100644 index 0000000000..d8a69fff1a --- /dev/null +++ b/oximeter/db/schema/single-node/4/up05.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_u16 MODIFY COLUMN datum Nullable(UInt16) \ No newline at end of file diff --git a/oximeter/db/schema/single-node/4/up06.sql b/oximeter/db/schema/single-node/4/up06.sql new file mode 100644 index 0000000000..65fca2e1b2 --- /dev/null +++ b/oximeter/db/schema/single-node/4/up06.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_i32 MODIFY COLUMN datum Nullable(Int32) \ No newline at end of file diff --git a/oximeter/db/schema/single-node/4/up07.sql b/oximeter/db/schema/single-node/4/up07.sql new file mode 100644 index 0000000000..a4cb43fb90 --- /dev/null +++ b/oximeter/db/schema/single-node/4/up07.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_u32 MODIFY COLUMN datum Nullable(UInt32) \ No newline at end of file diff --git a/oximeter/db/schema/single-node/4/up08.sql b/oximeter/db/schema/single-node/4/up08.sql new file mode 100644 index 0000000000..b458243d74 --- /dev/null +++ b/oximeter/db/schema/single-node/4/up08.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_i64 MODIFY COLUMN datum Nullable(Int64) \ No newline at end of file diff --git a/oximeter/db/schema/single-node/4/up09.sql b/oximeter/db/schema/single-node/4/up09.sql new file mode 100644 index 0000000000..6e2a2a5191 --- /dev/null +++ b/oximeter/db/schema/single-node/4/up09.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_u64 MODIFY COLUMN datum Nullable(UInt64) \ No newline at end of file diff --git a/oximeter/db/schema/single-node/4/up10.sql b/oximeter/db/schema/single-node/4/up10.sql new file mode 100644 index 0000000000..9263592740 --- /dev/null +++ b/oximeter/db/schema/single-node/4/up10.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_f32 MODIFY COLUMN datum Nullable(Float32) \ No newline at end of file diff --git a/oximeter/db/schema/single-node/4/up11.sql b/oximeter/db/schema/single-node/4/up11.sql new file mode 100644 index 0000000000..0d8522bc03 --- /dev/null +++ b/oximeter/db/schema/single-node/4/up11.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_f64 MODIFY COLUMN datum Nullable(Float64) \ No newline at end of file diff --git a/oximeter/db/schema/single-node/4/up12.sql b/oximeter/db/schema/single-node/4/up12.sql new file mode 100644 index 0000000000..55df76c25f --- /dev/null +++ b/oximeter/db/schema/single-node/4/up12.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_cumulativei64 MODIFY COLUMN datum Nullable(Int64) \ No newline at end of file diff --git a/oximeter/db/schema/single-node/4/up13.sql b/oximeter/db/schema/single-node/4/up13.sql new file mode 100644 index 0000000000..182b2b4704 --- /dev/null +++ b/oximeter/db/schema/single-node/4/up13.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_cumulativeu64 MODIFY COLUMN datum Nullable(UInt64) \ No newline at end of file diff --git a/oximeter/db/schema/single-node/4/up14.sql b/oximeter/db/schema/single-node/4/up14.sql new file mode 100644 index 0000000000..cefbe56395 --- /dev/null +++ b/oximeter/db/schema/single-node/4/up14.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_cumulativef32 MODIFY COLUMN datum Nullable(Float32) \ No newline at end of file diff --git a/oximeter/db/schema/single-node/4/up15.sql b/oximeter/db/schema/single-node/4/up15.sql new file mode 100644 index 0000000000..a609e6ad3c --- /dev/null +++ b/oximeter/db/schema/single-node/4/up15.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_cumulativef64 MODIFY COLUMN datum Nullable(Float64) \ No newline at end of file diff --git a/oximeter/db/schema/single-node/4/up16.sql b/oximeter/db/schema/single-node/4/up16.sql new file mode 100644 index 0000000000..5a09705e7e --- /dev/null +++ b/oximeter/db/schema/single-node/4/up16.sql @@ -0,0 +1 @@ +ALTER TABLE oximeter.measurements_string MODIFY COLUMN datum Nullable(String); diff --git a/oximeter/db/schema/single-node/db-init.sql b/oximeter/db/schema/single-node/db-init.sql index ee5e91c4b7..510c1071c8 100644 --- a/oximeter/db/schema/single-node/db-init.sql +++ b/oximeter/db/schema/single-node/db-init.sql @@ -24,7 +24,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_bool timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), - datum UInt8 + datum Nullable(UInt8) ) ENGINE = MergeTree() ORDER BY (timeseries_name, timeseries_key, timestamp) @@ -35,7 +35,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_i8 timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), - datum Int8 + datum Nullable(Int8) ) ENGINE = MergeTree() ORDER BY (timeseries_name, timeseries_key, timestamp) @@ -46,7 +46,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_u8 timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), - datum UInt8 + datum Nullable(UInt8) ) ENGINE = MergeTree() ORDER BY (timeseries_name, timeseries_key, timestamp) @@ -57,7 +57,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_i16 timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), - datum Int16 + datum Nullable(Int16) ) ENGINE = MergeTree() ORDER BY (timeseries_name, timeseries_key, timestamp) @@ -68,7 +68,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_u16 timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), - datum UInt16 + datum Nullable(UInt16) ) ENGINE = MergeTree() ORDER BY (timeseries_name, timeseries_key, timestamp) @@ -79,7 +79,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_i32 timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), - datum Int32 + datum Nullable(Int32) ) ENGINE = MergeTree() ORDER BY (timeseries_name, timeseries_key, timestamp) @@ -90,7 +90,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_u32 timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), - datum UInt32 + datum Nullable(UInt32) ) ENGINE = MergeTree() ORDER BY (timeseries_name, timeseries_key, timestamp) @@ -101,7 +101,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_i64 timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), - datum Int64 + datum Nullable(Int64) ) ENGINE = MergeTree() ORDER BY (timeseries_name, timeseries_key, timestamp) @@ -112,7 +112,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_u64 timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), - datum UInt64 + datum Nullable(UInt64) ) ENGINE = MergeTree() ORDER BY (timeseries_name, timeseries_key, timestamp) @@ -123,7 +123,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_f32 timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), - datum Float32 + datum Nullable(Float32) ) ENGINE = MergeTree() ORDER BY (timeseries_name, timeseries_key, timestamp) @@ -134,7 +134,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_f64 timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), - datum Float64 + datum Nullable(Float64) ) ENGINE = MergeTree() ORDER BY (timeseries_name, timeseries_key, timestamp) @@ -145,7 +145,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_string timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), - datum String + datum Nullable(String) ) ENGINE = MergeTree() ORDER BY (timeseries_name, timeseries_key, timestamp) @@ -156,6 +156,13 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_bytes timeseries_name String, timeseries_key UInt64, timestamp DateTime64(9, 'UTC'), + /* + * NOTE: Right now we can't unambiguously record a nullable byte array. + * Arrays cannot be nested in `Nullable()` types, and encoding the array as + * a string isn't palatable for a few reasons. + * See: https://github.com/oxidecomputer/omicron/issues/4551 for more + * details. + */ datum Array(UInt8) ) ENGINE = MergeTree() @@ -168,7 +175,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_cumulativei64 timeseries_key UInt64, start_time DateTime64(9, 'UTC'), timestamp DateTime64(9, 'UTC'), - datum Int64 + datum Nullable(Int64) ) ENGINE = MergeTree() ORDER BY (timeseries_name, timeseries_key, start_time, timestamp) @@ -180,7 +187,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_cumulativeu64 timeseries_key UInt64, start_time DateTime64(9, 'UTC'), timestamp DateTime64(9, 'UTC'), - datum UInt64 + datum Nullable(UInt64) ) ENGINE = MergeTree() ORDER BY (timeseries_name, timeseries_key, start_time, timestamp) @@ -192,7 +199,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_cumulativef32 timeseries_key UInt64, start_time DateTime64(9, 'UTC'), timestamp DateTime64(9, 'UTC'), - datum Float32 + datum Nullable(Float32) ) ENGINE = MergeTree() ORDER BY (timeseries_name, timeseries_key, start_time, timestamp) @@ -205,7 +212,7 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_cumulativef64 timeseries_key UInt64, start_time DateTime64(9, 'UTC'), timestamp DateTime64(9, 'UTC'), - datum Float64 + datum Nullable(Float64) ) ENGINE = MergeTree() ORDER BY (timeseries_name, timeseries_key, start_time, timestamp) @@ -217,6 +224,16 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_histogrami8 timeseries_key UInt64, start_time DateTime64(9, 'UTC'), timestamp DateTime64(9, 'UTC'), + /* + * NOTE: Array types cannot be Nullable, see + * https://clickhouse.com/docs/en/sql-reference/data-types/nullable + * for more details. + * + * This means we need to use empty arrays to indicate a missing value. This + * is unfortunate, and at this point relies on the fact that an + * `oximeter::Histogram` cannot have zero bins. If that changes, we'll need + * to figure out another way to represent missing samples here. + */ bins Array(Int8), counts Array(UInt64) ) diff --git a/oximeter/db/src/client.rs b/oximeter/db/src/client.rs index e1ed06554c..d295d0dcdf 100644 --- a/oximeter/db/src/client.rs +++ b/oximeter/db/src/client.rs @@ -710,7 +710,7 @@ impl Client { &self, sample: &Sample, ) -> Result, Error> { - let sample_schema = model::schema_for(sample); + let sample_schema = TimeseriesSchema::from(sample); let name = sample_schema.timeseries_name.clone(); let mut schema = self.schema.lock().await; @@ -1190,7 +1190,7 @@ mod tests { use super::*; use crate::query; use crate::query::field_table_name; - use crate::query::measurement_table_name; + use bytes::Bytes; use chrono::Utc; use omicron_test_utils::dev::clickhouse::{ ClickHouseCluster, ClickHouseInstance, @@ -1198,8 +1198,10 @@ mod tests { use omicron_test_utils::dev::test_setup_log; use oximeter::histogram::Histogram; use oximeter::test_util; + use oximeter::types::MissingDatum; use oximeter::Datum; use oximeter::FieldValue; + use oximeter::Measurement; use oximeter::Metric; use oximeter::Target; use std::net::Ipv6Addr; @@ -1871,7 +1873,7 @@ mod tests { client.insert_samples(&[sample.clone()]).await.unwrap(); // The internal map should now contain both the new timeseries schema - let actual_schema = model::schema_for(&sample); + let actual_schema = TimeseriesSchema::from(&sample); let timeseries_name = TimeseriesName::try_from(sample.timeseries_name.as_str()).unwrap(); let expected_schema = client @@ -2957,76 +2959,102 @@ mod tests { Ok(()) } + async fn test_recall_missing_scalar_measurement_impl( + measurement: Measurement, + client: &Client, + ) -> Result<(), Error> { + let start_time = if measurement.datum().is_cumulative() { + Some(Utc::now()) + } else { + None + }; + let missing_datum = Datum::from( + MissingDatum::new(measurement.datum_type(), start_time).unwrap(), + ); + let missing_measurement = Measurement::new(Utc::now(), missing_datum); + test_recall_measurement_impl(missing_measurement, client).await?; + Ok(()) + } + async fn recall_measurement_bool_test( client: &Client, ) -> Result<(), Error> { let datum = Datum::Bool(true); - let as_json = serde_json::Value::from(1_u64); - test_recall_measurement_impl::(datum, None, as_json, client) + let measurement = Measurement::new(Utc::now(), datum); + test_recall_measurement_impl(measurement.clone(), client).await?; + test_recall_missing_scalar_measurement_impl(measurement, client) .await?; Ok(()) } async fn recall_measurement_i8_test(client: &Client) -> Result<(), Error> { let datum = Datum::I8(1); - let as_json = serde_json::Value::from(1_i8); - test_recall_measurement_impl::(datum, None, as_json, client) + let measurement = Measurement::new(Utc::now(), datum); + test_recall_measurement_impl(measurement.clone(), client).await?; + test_recall_missing_scalar_measurement_impl(measurement, client) .await?; Ok(()) } async fn recall_measurement_u8_test(client: &Client) -> Result<(), Error> { let datum = Datum::U8(1); - let as_json = serde_json::Value::from(1_u8); - test_recall_measurement_impl::(datum, None, as_json, client) + let measurement = Measurement::new(Utc::now(), datum); + test_recall_measurement_impl(measurement.clone(), client).await?; + test_recall_missing_scalar_measurement_impl(measurement, client) .await?; Ok(()) } async fn recall_measurement_i16_test(client: &Client) -> Result<(), Error> { let datum = Datum::I16(1); - let as_json = serde_json::Value::from(1_i16); - test_recall_measurement_impl::(datum, None, as_json, client) + let measurement = Measurement::new(Utc::now(), datum); + test_recall_measurement_impl(measurement.clone(), client).await?; + test_recall_missing_scalar_measurement_impl(measurement, client) .await?; Ok(()) } async fn recall_measurement_u16_test(client: &Client) -> Result<(), Error> { let datum = Datum::U16(1); - let as_json = serde_json::Value::from(1_u16); - test_recall_measurement_impl::(datum, None, as_json, client) + let measurement = Measurement::new(Utc::now(), datum); + test_recall_measurement_impl(measurement.clone(), client).await?; + test_recall_missing_scalar_measurement_impl(measurement, client) .await?; Ok(()) } async fn recall_measurement_i32_test(client: &Client) -> Result<(), Error> { let datum = Datum::I32(1); - let as_json = serde_json::Value::from(1_i32); - test_recall_measurement_impl::(datum, None, as_json, client) + let measurement = Measurement::new(Utc::now(), datum); + test_recall_measurement_impl(measurement.clone(), client).await?; + test_recall_missing_scalar_measurement_impl(measurement, client) .await?; Ok(()) } async fn recall_measurement_u32_test(client: &Client) -> Result<(), Error> { let datum = Datum::U32(1); - let as_json = serde_json::Value::from(1_u32); - test_recall_measurement_impl::(datum, None, as_json, client) + let measurement = Measurement::new(Utc::now(), datum); + test_recall_measurement_impl(measurement.clone(), client).await?; + test_recall_missing_scalar_measurement_impl(measurement, client) .await?; Ok(()) } async fn recall_measurement_i64_test(client: &Client) -> Result<(), Error> { let datum = Datum::I64(1); - let as_json = serde_json::Value::from(1_i64); - test_recall_measurement_impl::(datum, None, as_json, client) + let measurement = Measurement::new(Utc::now(), datum); + test_recall_measurement_impl(measurement.clone(), client).await?; + test_recall_missing_scalar_measurement_impl(measurement, client) .await?; Ok(()) } async fn recall_measurement_u64_test(client: &Client) -> Result<(), Error> { let datum = Datum::U64(1); - let as_json = serde_json::Value::from(1_u64); - test_recall_measurement_impl::(datum, None, as_json, client) + let measurement = Measurement::new(Utc::now(), datum); + test_recall_measurement_impl(measurement.clone(), client).await?; + test_recall_missing_scalar_measurement_impl(measurement, client) .await?; Ok(()) } @@ -3034,9 +3062,9 @@ mod tests { async fn recall_measurement_f32_test(client: &Client) -> Result<(), Error> { const VALUE: f32 = 1.1; let datum = Datum::F32(VALUE); - // NOTE: This is intentionally an f64. - let as_json = serde_json::Value::from(1.1_f64); - test_recall_measurement_impl::(datum, None, as_json, client) + let measurement = Measurement::new(Utc::now(), datum); + test_recall_measurement_impl(measurement.clone(), client).await?; + test_recall_missing_scalar_measurement_impl(measurement, client) .await?; Ok(()) } @@ -3044,18 +3072,43 @@ mod tests { async fn recall_measurement_f64_test(client: &Client) -> Result<(), Error> { const VALUE: f64 = 1.1; let datum = Datum::F64(VALUE); - let as_json = serde_json::Value::from(VALUE); - test_recall_measurement_impl::(datum, None, as_json, client) + let measurement = Measurement::new(Utc::now(), datum); + test_recall_measurement_impl(measurement.clone(), client).await?; + test_recall_missing_scalar_measurement_impl(measurement, client) .await?; Ok(()) } + async fn recall_measurement_string_test( + client: &Client, + ) -> Result<(), Error> { + let value = String::from("foo"); + let datum = Datum::String(value.clone()); + let measurement = Measurement::new(Utc::now(), datum); + test_recall_measurement_impl(measurement.clone(), client).await?; + test_recall_missing_scalar_measurement_impl(measurement, client) + .await?; + Ok(()) + } + + async fn recall_measurement_bytes_test( + client: &Client, + ) -> Result<(), Error> { + let value = Bytes::from(vec![0, 1, 2]); + let datum = Datum::Bytes(value.clone()); + let measurement = Measurement::new(Utc::now(), datum); + test_recall_measurement_impl(measurement.clone(), client).await?; + // NOTE: We don't currently support missing byte array samples. + Ok(()) + } + async fn recall_measurement_cumulative_i64_test( client: &Client, ) -> Result<(), Error> { let datum = Datum::CumulativeI64(1.into()); - let as_json = serde_json::Value::from(1_i64); - test_recall_measurement_impl::(datum, None, as_json, client) + let measurement = Measurement::new(Utc::now(), datum); + test_recall_measurement_impl(measurement.clone(), client).await?; + test_recall_missing_scalar_measurement_impl(measurement, client) .await?; Ok(()) } @@ -3064,8 +3117,9 @@ mod tests { client: &Client, ) -> Result<(), Error> { let datum = Datum::CumulativeU64(1.into()); - let as_json = serde_json::Value::from(1_u64); - test_recall_measurement_impl::(datum, None, as_json, client) + let measurement = Measurement::new(Utc::now(), datum); + test_recall_measurement_impl(measurement.clone(), client).await?; + test_recall_missing_scalar_measurement_impl(measurement, client) .await?; Ok(()) } @@ -3074,8 +3128,9 @@ mod tests { client: &Client, ) -> Result<(), Error> { let datum = Datum::CumulativeF64(1.1.into()); - let as_json = serde_json::Value::from(1.1_f64); - test_recall_measurement_impl::(datum, None, as_json, client) + let measurement = Measurement::new(Utc::now(), datum); + test_recall_measurement_impl(measurement.clone(), client).await?; + test_recall_missing_scalar_measurement_impl(measurement, client) .await?; Ok(()) } @@ -3089,13 +3144,15 @@ mod tests { Datum: From>, serde_json::Value: From, { - let (bins, counts) = hist.to_arrays(); let datum = Datum::from(hist); - let as_json = serde_json::Value::Array( - counts.into_iter().map(Into::into).collect(), + let measurement = Measurement::new(Utc::now(), datum); + let missing_datum = Datum::Missing( + MissingDatum::new(measurement.datum_type(), Some(Utc::now())) + .unwrap(), ); - test_recall_measurement_impl(datum, Some(bins), as_json, client) - .await?; + let missing_measurement = Measurement::new(Utc::now(), missing_datum); + test_recall_measurement_impl(measurement, client).await?; + test_recall_measurement_impl(missing_measurement, client).await?; Ok(()) } @@ -3192,54 +3249,23 @@ mod tests { Ok(()) } - async fn test_recall_measurement_impl + Copy>( - datum: Datum, - maybe_bins: Option>, - json_datum: serde_json::Value, + async fn test_recall_measurement_impl( + measurement: Measurement, client: &Client, ) -> Result<(), Error> { // Insert a record from this datum. const TIMESERIES_NAME: &str = "foo:bar"; const TIMESERIES_KEY: u64 = 101; - let mut inserted_row = serde_json::Map::new(); - inserted_row - .insert("timeseries_name".to_string(), TIMESERIES_NAME.into()); - inserted_row - .insert("timeseries_key".to_string(), TIMESERIES_KEY.into()); - inserted_row.insert( - "timestamp".to_string(), - Utc::now() - .format(crate::DATABASE_TIMESTAMP_FORMAT) - .to_string() - .into(), - ); - - // Insert the start time and possibly bins. - if let Some(start_time) = datum.start_time() { - inserted_row.insert( - "start_time".to_string(), - start_time - .format(crate::DATABASE_TIMESTAMP_FORMAT) - .to_string() - .into(), - ); - } - if let Some(bins) = &maybe_bins { - let bins = serde_json::Value::Array( - bins.iter().copied().map(Into::into).collect(), + let (measurement_table, inserted_row) = + crate::model::unroll_measurement_row_impl( + TIMESERIES_NAME.to_string(), + TIMESERIES_KEY, + &measurement, ); - inserted_row.insert("bins".to_string(), bins); - inserted_row.insert("counts".to_string(), json_datum); - } else { - inserted_row.insert("datum".to_string(), json_datum); - } - let inserted_row = serde_json::Value::from(inserted_row); - - let measurement_table = measurement_table_name(datum.datum_type()); - let row = serde_json::to_string(&inserted_row).unwrap(); let insert_sql = format!( - "INSERT INTO oximeter.{measurement_table} FORMAT JSONEachRow {row}", + "INSERT INTO {measurement_table} FORMAT JSONEachRow {inserted_row}", ); + println!("Inserted row: {}", inserted_row); client .execute(insert_sql) .await @@ -3247,21 +3273,22 @@ mod tests { // Select it exactly back out. let select_sql = format!( - "SELECT * FROM oximeter.{} LIMIT 2 FORMAT {};", + "SELECT * FROM {} WHERE timestamp = '{}' FORMAT {};", measurement_table, + measurement.timestamp().format(crate::DATABASE_TIMESTAMP_FORMAT), crate::DATABASE_SELECT_FORMAT, ); let body = client .execute_with_body(select_sql) .await .expect("Failed to select measurement row"); - println!("{}", body); - let actual_row: serde_json::Value = serde_json::from_str(&body) - .expect("Failed to parse measurement row JSON"); - println!("{actual_row:?}"); - println!("{inserted_row:?}"); + let (_, actual_row) = crate::model::parse_measurement_from_row( + &body, + measurement.datum_type(), + ); + println!("Actual row: {actual_row:?}"); assert_eq!( - actual_row, inserted_row, + actual_row, measurement, "Actual and expected measurement rows do not match" ); Ok(()) @@ -3311,6 +3338,10 @@ mod tests { recall_measurement_f64_test(&client).await.unwrap(); + recall_measurement_string_test(&client).await.unwrap(); + + recall_measurement_bytes_test(&client).await.unwrap(); + recall_measurement_cumulative_i64_test(&client).await.unwrap(); recall_measurement_cumulative_u64_test(&client).await.unwrap(); diff --git a/oximeter/db/src/lib.rs b/oximeter/db/src/lib.rs index 425c5189ee..9029319048 100644 --- a/oximeter/db/src/lib.rs +++ b/oximeter/db/src/lib.rs @@ -7,13 +7,23 @@ // Copyright 2023 Oxide Computer Company use crate::query::StringFieldSelector; -use chrono::{DateTime, Utc}; -use dropshot::{EmptyScanParams, PaginationParams}; -pub use oximeter::{DatumType, Field, FieldType, Measurement, Sample}; +use chrono::DateTime; +use chrono::Utc; +use dropshot::EmptyScanParams; +use dropshot::PaginationParams; +pub use oximeter::schema::FieldSchema; +pub use oximeter::schema::FieldSource; +pub use oximeter::schema::TimeseriesName; +pub use oximeter::schema::TimeseriesSchema; +pub use oximeter::DatumType; +pub use oximeter::Field; +pub use oximeter::FieldType; +pub use oximeter::Measurement; +pub use oximeter::Sample; use schemars::JsonSchema; -use serde::{Deserialize, Serialize}; +use serde::Deserialize; +use serde::Serialize; use std::collections::BTreeMap; -use std::collections::BTreeSet; use std::convert::TryFrom; use std::io; use std::num::NonZeroU32; @@ -23,7 +33,8 @@ use thiserror::Error; mod client; pub mod model; pub mod query; -pub use client::{Client, DbWrite}; +pub use client::Client; +pub use client::DbWrite; pub use model::OXIMETER_VERSION; @@ -78,9 +89,6 @@ pub enum Error { #[error("The field comparison {op} is not valid for the type {ty}")] InvalidFieldCmp { op: String, ty: FieldType }, - #[error("Invalid timeseries name")] - InvalidTimeseriesName, - #[error("Query must resolve to a single timeseries if limit is specified")] InvalidLimitQuery, @@ -117,136 +125,6 @@ pub enum Error { NonSequentialSchemaVersions, } -/// A timeseries name. -/// -/// Timeseries are named by concatenating the names of their target and metric, joined with a -/// colon. -#[derive( - Debug, Clone, PartialEq, PartialOrd, Ord, Eq, Hash, Serialize, Deserialize, -)] -#[serde(try_from = "&str")] -pub struct TimeseriesName(String); - -impl JsonSchema for TimeseriesName { - fn schema_name() -> String { - "TimeseriesName".to_string() - } - - fn json_schema( - _: &mut schemars::gen::SchemaGenerator, - ) -> schemars::schema::Schema { - schemars::schema::SchemaObject { - metadata: Some(Box::new(schemars::schema::Metadata { - title: Some("The name of a timeseries".to_string()), - description: Some( - "Names are constructed by concatenating the target \ - and metric names with ':'. Target and metric \ - names must be lowercase alphanumeric characters \ - with '_' separating words." - .to_string(), - ), - ..Default::default() - })), - instance_type: Some(schemars::schema::InstanceType::String.into()), - string: Some(Box::new(schemars::schema::StringValidation { - pattern: Some(TIMESERIES_NAME_REGEX.to_string()), - ..Default::default() - })), - ..Default::default() - } - .into() - } -} - -impl std::ops::Deref for TimeseriesName { - type Target = String; - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl std::fmt::Display for TimeseriesName { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.0) - } -} - -impl std::convert::TryFrom<&str> for TimeseriesName { - type Error = Error; - fn try_from(s: &str) -> Result { - validate_timeseries_name(s).map(|s| TimeseriesName(s.to_string())) - } -} - -impl std::convert::TryFrom for TimeseriesName { - type Error = Error; - fn try_from(s: String) -> Result { - validate_timeseries_name(&s)?; - Ok(TimeseriesName(s)) - } -} - -impl std::str::FromStr for TimeseriesName { - type Err = Error; - fn from_str(s: &str) -> Result { - s.try_into() - } -} - -impl PartialEq for TimeseriesName -where - T: AsRef, -{ - fn eq(&self, other: &T) -> bool { - self.0.eq(other.as_ref()) - } -} - -fn validate_timeseries_name(s: &str) -> Result<&str, Error> { - if regex::Regex::new(TIMESERIES_NAME_REGEX).unwrap().is_match(s) { - Ok(s) - } else { - Err(Error::InvalidTimeseriesName) - } -} - -/// The schema for a timeseries. -/// -/// This includes the name of the timeseries, as well as the datum type of its metric and the -/// schema for each field. -#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -pub struct TimeseriesSchema { - pub timeseries_name: TimeseriesName, - pub field_schema: BTreeSet, - pub datum_type: DatumType, - pub created: DateTime, -} - -impl TimeseriesSchema { - /// Return the schema for the given field. - pub fn field_schema(&self, name: S) -> Option<&FieldSchema> - where - S: AsRef, - { - self.field_schema.iter().find(|field| field.name == name.as_ref()) - } - - /// Return the target and metric component names for this timeseries - pub fn component_names(&self) -> (&str, &str) { - self.timeseries_name - .split_once(':') - .expect("Incorrectly formatted timseries name") - } -} - -impl PartialEq for TimeseriesSchema { - fn eq(&self, other: &TimeseriesSchema) -> bool { - self.timeseries_name == other.timeseries_name - && self.datum_type == other.datum_type - && self.field_schema == other.field_schema - } -} - impl From for TimeseriesSchema { fn from(schema: model::DbTimeseriesSchema) -> TimeseriesSchema { TimeseriesSchema { @@ -285,25 +163,6 @@ pub struct Timeseries { pub measurements: Vec, } -/// The source from which a field is derived, the target or metric. -#[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - PartialOrd, - Ord, - Deserialize, - Serialize, - JsonSchema, -)] -#[serde(rename_all = "snake_case")] -pub enum FieldSource { - Target, - Metric, -} - #[derive( Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Deserialize, Serialize, )] @@ -329,24 +188,6 @@ impl From for DbFieldSource { } } -/// The name and type information for a field of a timeseries schema. -#[derive( - Clone, - Debug, - PartialEq, - Eq, - PartialOrd, - Ord, - Deserialize, - Serialize, - JsonSchema, -)] -pub struct FieldSchema { - pub name: String, - pub ty: FieldType, - pub source: FieldSource, -} - /// Type used to paginate request to list timeseries schema. pub type TimeseriesSchemaPaginationParams = PaginationParams; @@ -422,19 +263,6 @@ const DATABASE_NAME: &str = "oximeter"; // See https://clickhouse.com/docs/en/interfaces/formats/#jsoneachrow for details. const DATABASE_SELECT_FORMAT: &str = "JSONEachRow"; -// Regular expression describing valid timeseries names. -// -// Names are derived from the names of the Rust structs for the target and metric, converted to -// snake case. So the names must be valid identifiers, and generally: -// -// - Start with lowercase a-z -// - Any number of alphanumerics -// - Zero or more of the above, delimited by '-'. -// -// That describes the target/metric name, and the timeseries is two of those, joined with ':'. -const TIMESERIES_NAME_REGEX: &str = - "(([a-z]+[a-z0-9]*)(_([a-z0-9]+))*):(([a-z]+[a-z0-9]*)(_([a-z0-9]+))*)"; - #[cfg(test)] mod tests { use super::*; @@ -548,71 +376,16 @@ mod tests { ); } - // Test that we correctly order field across a target and metric. - // - // In an earlier commit, we switched from storing fields in an unordered Vec - // to using a BTree{Map,Set} to ensure ordering by name. However, the - // `TimeseriesSchema` type stored all its fields by chaining the sorted - // fields from the target and metric, without then sorting _across_ them. - // - // This was exacerbated by the error reporting, where we did in fact sort - // all fields across the target and metric, making it difficult to tell how - // the derived schema was different, if at all. - // - // This test generates a sample with a schema where the target and metric - // fields are sorted within them, but not across them. We check that the - // derived schema are actually equal, which means we've imposed that - // ordering when deriving the schema. - #[test] - fn test_schema_field_ordering_across_target_metric() { - let target_field = FieldSchema { - name: String::from("later"), - ty: FieldType::U64, - source: FieldSource::Target, - }; - let metric_field = FieldSchema { - name: String::from("earlier"), - ty: FieldType::U64, - source: FieldSource::Metric, - }; - let timeseries_name: TimeseriesName = "foo:bar".parse().unwrap(); - let datum_type = DatumType::U64; - let field_schema = - [target_field.clone(), metric_field.clone()].into_iter().collect(); - let expected_schema = TimeseriesSchema { - timeseries_name, - field_schema, - datum_type, - created: Utc::now(), - }; - - #[derive(oximeter::Target)] - struct Foo { - later: u64, - } - #[derive(oximeter::Metric)] - struct Bar { - earlier: u64, - datum: u64, - } - - let target = Foo { later: 1 }; - let metric = Bar { earlier: 2, datum: 10 }; - let sample = Sample::new(&target, &metric).unwrap(); - let derived_schema = model::schema_for(&sample); - assert_eq!(derived_schema, expected_schema); - } - #[test] fn test_unsorted_db_fields_are_sorted_on_read() { let target_field = FieldSchema { name: String::from("later"), - ty: FieldType::U64, + field_type: FieldType::U64, source: FieldSource::Target, }; let metric_field = FieldSchema { name: String::from("earlier"), - ty: FieldType::U64, + field_type: FieldType::U64, source: FieldSource::Metric, }; let timeseries_name: TimeseriesName = "foo:bar".parse().unwrap(); @@ -632,7 +405,10 @@ mod tests { // the extracted model type. let db_fields = DbFieldList { names: vec![target_field.name.clone(), metric_field.name.clone()], - types: vec![target_field.ty.into(), metric_field.ty.into()], + types: vec![ + target_field.field_type.into(), + metric_field.field_type.into(), + ], sources: vec![ target_field.source.into(), metric_field.source.into(), @@ -646,23 +422,4 @@ mod tests { }; assert_eq!(expected_schema, TimeseriesSchema::from(db_schema)); } - - #[test] - fn test_field_schema_ordering() { - let mut fields = BTreeSet::new(); - fields.insert(FieldSchema { - name: String::from("second"), - ty: FieldType::U64, - source: FieldSource::Target, - }); - fields.insert(FieldSchema { - name: String::from("first"), - ty: FieldType::U64, - source: FieldSource::Target, - }); - let mut iter = fields.iter(); - assert_eq!(iter.next().unwrap().name, "first"); - assert_eq!(iter.next().unwrap().name, "second"); - assert!(iter.next().is_none()); - } } diff --git a/oximeter/db/src/model.rs b/oximeter/db/src/model.rs index 715e025a04..b1b45eabc4 100644 --- a/oximeter/db/src/model.rs +++ b/oximeter/db/src/model.rs @@ -12,7 +12,6 @@ use crate::FieldSource; use crate::Metric; use crate::Target; use crate::TimeseriesKey; -use crate::TimeseriesName; use crate::TimeseriesSchema; use bytes::Bytes; use chrono::DateTime; @@ -26,6 +25,7 @@ use oximeter::types::Field; use oximeter::types::FieldType; use oximeter::types::FieldValue; use oximeter::types::Measurement; +use oximeter::types::MissingDatum; use oximeter::types::Sample; use serde::Deserialize; use serde::Serialize; @@ -43,7 +43,7 @@ use uuid::Uuid; /// - [`crate::Client::initialize_db_with_version`] /// - [`crate::Client::ensure_schema`] /// - The `clickhouse-schema-updater` binary in this crate -pub const OXIMETER_VERSION: u64 = 3; +pub const OXIMETER_VERSION: u64 = 4; // Wrapper type to represent a boolean in the database. // @@ -117,7 +117,7 @@ impl From for BTreeSet { .zip(list.sources) .map(|((name, ty), source)| FieldSchema { name, - ty: ty.into(), + field_type: ty.into(), source: source.into(), }) .collect() @@ -130,8 +130,8 @@ impl From> for DbFieldList { let mut types = Vec::with_capacity(list.len()); let mut sources = Vec::with_capacity(list.len()); for field in list.into_iter() { - names.push(field.name); - types.push(field.ty.into()); + names.push(field.name.to_string()); + types.push(field.field_type.into()); sources.push(field.source.into()); } DbFieldList { names, types, sources } @@ -212,6 +212,7 @@ impl From for DbFieldType { } } } + #[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq)] pub enum DbDatumType { Bool, @@ -402,7 +403,7 @@ macro_rules! declare_measurement_row { timeseries_key: TimeseriesKey, #[serde(with = "serde_timestamp")] timestamp: DateTime, - datum: $datum_type, + datum: Option<$datum_type>, } impl_table_name!{$name, "measurements", $data_type} @@ -433,7 +434,7 @@ macro_rules! declare_cumulative_measurement_row { start_time: DateTime, #[serde(with = "serde_timestamp")] timestamp: DateTime, - datum: $datum_type, + datum: Option<$datum_type>, } impl_table_name!{$name, "measurements", $data_type} @@ -456,6 +457,22 @@ struct DbHistogram { pub counts: Vec, } +// We use an empty histogram to indicate a missing sample. +// +// While ClickHouse supports nullable types, the inner type can't be a +// "composite", which includes arrays. I.e., `Nullable(Array(UInt8))` can't be +// used. This is unfortunate, but we are aided by the fact that it's not +// possible to have an `oximeter` histogram that contains zero bins right now. +// This is checked by a test in `oximeter::histogram`. +// +// That means we can currently use an empty array from the database as a +// sentinel for a missing sample. +impl DbHistogram { + fn null() -> Self { + Self { bins: vec![], counts: vec![] } + } +} + impl From<&Histogram> for DbHistogram where T: traits::HistogramSupport, @@ -647,334 +664,571 @@ pub(crate) fn unroll_measurement_row(sample: &Sample) -> (String, String) { let timeseries_name = sample.timeseries_name.clone(); let timeseries_key = crate::timeseries_key(sample); let measurement = &sample.measurement; + unroll_measurement_row_impl(timeseries_name, timeseries_key, measurement) +} + +/// Given a sample's measurement, return a table name and row to insert. +/// +/// This returns a tuple giving the name of the table, and the JSON +/// representation for the serialized row to be inserted into that table, +/// written out as a string. +pub(crate) fn unroll_measurement_row_impl( + timeseries_name: String, + timeseries_key: TimeseriesKey, + measurement: &Measurement, +) -> (String, String) { let timestamp = measurement.timestamp(); let extract_start_time = |measurement: &Measurement| { measurement .start_time() .expect("Cumulative measurements must have a start time") }; + match measurement.datum() { Datum::Bool(inner) => { + let datum = Some(DbBool::from(*inner)); let row = BoolMeasurementRow { timeseries_name, timeseries_key, timestamp, - datum: DbBool::from(*inner), + datum, }; (row.table_name(), serde_json::to_string(&row).unwrap()) } Datum::I8(inner) => { + let datum = Some(*inner); let row = I8MeasurementRow { timeseries_name, timeseries_key, timestamp, - datum: *inner, + datum, }; (row.table_name(), serde_json::to_string(&row).unwrap()) } Datum::U8(inner) => { + let datum = Some(*inner); let row = U8MeasurementRow { timeseries_name, timeseries_key, timestamp, - datum: *inner, + datum, }; (row.table_name(), serde_json::to_string(&row).unwrap()) } Datum::I16(inner) => { + let datum = Some(*inner); let row = I16MeasurementRow { timeseries_name, timeseries_key, timestamp, - datum: *inner, + datum, }; (row.table_name(), serde_json::to_string(&row).unwrap()) } Datum::U16(inner) => { + let datum = Some(*inner); let row = U16MeasurementRow { timeseries_name, timeseries_key, timestamp, - datum: *inner, + datum, }; (row.table_name(), serde_json::to_string(&row).unwrap()) } Datum::I32(inner) => { + let datum = Some(*inner); let row = I32MeasurementRow { timeseries_name, timeseries_key, timestamp, - datum: *inner, + datum, }; (row.table_name(), serde_json::to_string(&row).unwrap()) } Datum::U32(inner) => { + let datum = Some(*inner); let row = U32MeasurementRow { timeseries_name, timeseries_key, timestamp, - datum: *inner, + datum, }; (row.table_name(), serde_json::to_string(&row).unwrap()) } Datum::I64(inner) => { + let datum = Some(*inner); let row = I64MeasurementRow { timeseries_name, timeseries_key, timestamp, - datum: *inner, + datum, }; (row.table_name(), serde_json::to_string(&row).unwrap()) } Datum::U64(inner) => { + let datum = Some(*inner); let row = U64MeasurementRow { timeseries_name, timeseries_key, timestamp, - datum: *inner, + datum, }; (row.table_name(), serde_json::to_string(&row).unwrap()) } Datum::F32(inner) => { + let datum = Some(*inner); let row = F32MeasurementRow { timeseries_name, timeseries_key, timestamp, - datum: *inner, + datum, }; (row.table_name(), serde_json::to_string(&row).unwrap()) } Datum::F64(inner) => { + let datum = Some(*inner); let row = F64MeasurementRow { timeseries_name, timeseries_key, timestamp, - datum: *inner, + datum, }; (row.table_name(), serde_json::to_string(&row).unwrap()) } - Datum::String(ref inner) => { + Datum::String(inner) => { + let datum = Some(inner.clone()); let row = StringMeasurementRow { timeseries_name, timeseries_key, timestamp, - datum: inner.clone(), + datum, }; (row.table_name(), serde_json::to_string(&row).unwrap()) } - Datum::Bytes(ref inner) => { + Datum::Bytes(inner) => { + let datum = Some(inner.clone()); let row = BytesMeasurementRow { timeseries_name, timeseries_key, timestamp, - datum: inner.clone(), + datum, }; (row.table_name(), serde_json::to_string(&row).unwrap()) } Datum::CumulativeI64(inner) => { + let datum = Some(inner.value()); let row = CumulativeI64MeasurementRow { timeseries_name, timeseries_key, start_time: extract_start_time(measurement), timestamp, - datum: inner.value(), + datum, }; (row.table_name(), serde_json::to_string(&row).unwrap()) } Datum::CumulativeU64(inner) => { + let datum = Some(inner.value()); let row = CumulativeU64MeasurementRow { timeseries_name, timeseries_key, start_time: extract_start_time(measurement), timestamp, - datum: inner.value(), + datum, }; (row.table_name(), serde_json::to_string(&row).unwrap()) } Datum::CumulativeF32(inner) => { + let datum = Some(inner.value()); let row = CumulativeF32MeasurementRow { timeseries_name, timeseries_key, start_time: extract_start_time(measurement), timestamp, - datum: inner.value(), + datum, }; (row.table_name(), serde_json::to_string(&row).unwrap()) } Datum::CumulativeF64(inner) => { + let datum = Some(inner.value()); let row = CumulativeF64MeasurementRow { timeseries_name, timeseries_key, start_time: extract_start_time(measurement), timestamp, - datum: inner.value(), + datum, }; (row.table_name(), serde_json::to_string(&row).unwrap()) } - Datum::HistogramI8(ref inner) => { + Datum::HistogramI8(inner) => { + let datum = DbHistogram::from(inner); let row = HistogramI8MeasurementRow { timeseries_name, timeseries_key, start_time: extract_start_time(measurement), timestamp, - datum: DbHistogram::from(inner), + datum, }; (row.table_name(), serde_json::to_string(&row).unwrap()) } - Datum::HistogramU8(ref inner) => { + Datum::HistogramU8(inner) => { + let datum = DbHistogram::from(inner); let row = HistogramU8MeasurementRow { timeseries_name, timeseries_key, start_time: extract_start_time(measurement), timestamp, - datum: DbHistogram::from(inner), + datum, }; (row.table_name(), serde_json::to_string(&row).unwrap()) } - Datum::HistogramI16(ref inner) => { + Datum::HistogramI16(inner) => { + let datum = DbHistogram::from(inner); let row = HistogramI16MeasurementRow { timeseries_name, timeseries_key, start_time: extract_start_time(measurement), timestamp, - datum: DbHistogram::from(inner), + datum, }; (row.table_name(), serde_json::to_string(&row).unwrap()) } - Datum::HistogramU16(ref inner) => { + Datum::HistogramU16(inner) => { + let datum = DbHistogram::from(inner); let row = HistogramU16MeasurementRow { timeseries_name, timeseries_key, start_time: extract_start_time(measurement), timestamp, - datum: DbHistogram::from(inner), + datum, }; (row.table_name(), serde_json::to_string(&row).unwrap()) } - Datum::HistogramI32(ref inner) => { + Datum::HistogramI32(inner) => { + let datum = DbHistogram::from(inner); let row = HistogramI32MeasurementRow { timeseries_name, timeseries_key, start_time: extract_start_time(measurement), timestamp, - datum: DbHistogram::from(inner), + datum, }; (row.table_name(), serde_json::to_string(&row).unwrap()) } - Datum::HistogramU32(ref inner) => { + Datum::HistogramU32(inner) => { + let datum = DbHistogram::from(inner); let row = HistogramU32MeasurementRow { timeseries_name, timeseries_key, start_time: extract_start_time(measurement), timestamp, - datum: DbHistogram::from(inner), + datum, }; (row.table_name(), serde_json::to_string(&row).unwrap()) } - Datum::HistogramI64(ref inner) => { + Datum::HistogramI64(inner) => { + let datum = DbHistogram::from(inner); let row = HistogramI64MeasurementRow { timeseries_name, timeseries_key, start_time: extract_start_time(measurement), timestamp, - datum: DbHistogram::from(inner), + datum, }; (row.table_name(), serde_json::to_string(&row).unwrap()) } - Datum::HistogramU64(ref inner) => { + Datum::HistogramU64(inner) => { + let datum = DbHistogram::from(inner); let row = HistogramU64MeasurementRow { timeseries_name, timeseries_key, start_time: extract_start_time(measurement), timestamp, - datum: DbHistogram::from(inner), + datum, }; (row.table_name(), serde_json::to_string(&row).unwrap()) } - Datum::HistogramF32(ref inner) => { + Datum::HistogramF32(inner) => { + let datum = DbHistogram::from(inner); let row = HistogramF32MeasurementRow { timeseries_name, timeseries_key, start_time: extract_start_time(measurement), timestamp, - datum: DbHistogram::from(inner), + datum, }; (row.table_name(), serde_json::to_string(&row).unwrap()) } - Datum::HistogramF64(ref inner) => { + Datum::HistogramF64(inner) => { + let datum = DbHistogram::from(inner); let row = HistogramF64MeasurementRow { timeseries_name, timeseries_key, start_time: extract_start_time(measurement), timestamp, - datum: DbHistogram::from(inner), + datum, }; (row.table_name(), serde_json::to_string(&row).unwrap()) } - } -} - -/// Return the schema for a `Sample`. -pub(crate) fn schema_for(sample: &Sample) -> TimeseriesSchema { - // The fields are iterated through whatever order the `Target` or `Metric` - // impl chooses. We'll store in a set ordered by field name, to ignore the - // declaration order. - let created = Utc::now(); - let field_schema = sample - .target_fields() - .map(|field| FieldSchema { - name: field.name.clone(), - ty: field.value.field_type(), - source: FieldSource::Target, - }) - .chain(sample.metric_fields().map(|field| FieldSchema { - name: field.name.clone(), - ty: field.value.field_type(), - source: FieldSource::Metric, - })) - .collect(); - TimeseriesSchema { - timeseries_name: TimeseriesName::try_from( - sample.timeseries_name.as_str(), - ) - .expect("Failed to parse timeseries name"), - field_schema, - datum_type: sample.measurement.datum_type(), - created, - } -} - -/// Return the schema for a `Target` and `Metric` -pub(crate) fn schema_for_parts(target: &T, metric: &M) -> TimeseriesSchema -where - T: traits::Target, - M: traits::Metric, -{ - let make_field_schema = |name: &str, - value: FieldValue, - source: FieldSource| { - FieldSchema { name: name.to_string(), ty: value.field_type(), source } - }; - let target_field_schema = - target.field_names().iter().zip(target.field_values()); - let metric_field_schema = - metric.field_names().iter().zip(metric.field_values()); - let field_schema = target_field_schema - .map(|(name, value)| { - make_field_schema(name, value, FieldSource::Target) - }) - .chain(metric_field_schema.map(|(name, value)| { - make_field_schema(name, value, FieldSource::Metric) - })) - .collect(); - TimeseriesSchema { - timeseries_name: TimeseriesName::try_from(oximeter::timeseries_name( - target, metric, - )) - .expect("Failed to parse timeseries name"), - field_schema, - datum_type: metric.datum_type(), - created: Utc::now(), + Datum::Missing(missing) => { + match missing.datum_type() { + DatumType::Bool => { + let row = BoolMeasurementRow { + timeseries_name, + timeseries_key, + timestamp, + datum: None, + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + DatumType::I8 => { + let row = I8MeasurementRow { + timeseries_name, + timeseries_key, + timestamp, + datum: None, + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + DatumType::U8 => { + let row = U8MeasurementRow { + timeseries_name, + timeseries_key, + timestamp, + datum: None, + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + DatumType::I16 => { + let row = I16MeasurementRow { + timeseries_name, + timeseries_key, + timestamp, + datum: None, + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + DatumType::U16 => { + let row = U16MeasurementRow { + timeseries_name, + timeseries_key, + timestamp, + datum: None, + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + DatumType::I32 => { + let row = I32MeasurementRow { + timeseries_name, + timeseries_key, + timestamp, + datum: None, + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + DatumType::U32 => { + let row = U32MeasurementRow { + timeseries_name, + timeseries_key, + timestamp, + datum: None, + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + DatumType::I64 => { + let row = I64MeasurementRow { + timeseries_name, + timeseries_key, + timestamp, + datum: None, + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + DatumType::U64 => { + let row = U64MeasurementRow { + timeseries_name, + timeseries_key, + timestamp, + datum: None, + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + DatumType::F32 => { + let row = F32MeasurementRow { + timeseries_name, + timeseries_key, + timestamp, + datum: None, + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + DatumType::F64 => { + let row = F64MeasurementRow { + timeseries_name, + timeseries_key, + timestamp, + datum: None, + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + DatumType::String => { + let row = StringMeasurementRow { + timeseries_name, + timeseries_key, + timestamp, + datum: None, + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + DatumType::Bytes => { + // See https://github.com/oxidecomputer/omicron/issues/4551. + // + // This is actually unreachable today because the constuctor + // for `oximeter::types::MissingDatum` fails when using a + // `DatumType::Bytes`. + unreachable!(); + } + DatumType::CumulativeI64 => { + let row = CumulativeI64MeasurementRow { + timeseries_name, + timeseries_key, + start_time: extract_start_time(measurement), + timestamp, + datum: None, + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + DatumType::CumulativeU64 => { + let row = CumulativeU64MeasurementRow { + timeseries_name, + timeseries_key, + start_time: extract_start_time(measurement), + timestamp, + datum: None, + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + DatumType::CumulativeF32 => { + let row = CumulativeF32MeasurementRow { + timeseries_name, + timeseries_key, + start_time: extract_start_time(measurement), + timestamp, + datum: None, + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + DatumType::CumulativeF64 => { + let row = CumulativeF64MeasurementRow { + timeseries_name, + timeseries_key, + start_time: extract_start_time(measurement), + timestamp, + datum: None, + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + DatumType::HistogramI8 => { + let row = HistogramI8MeasurementRow { + timeseries_name, + timeseries_key, + start_time: extract_start_time(measurement), + timestamp, + datum: DbHistogram::null(), + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + DatumType::HistogramU8 => { + let row = HistogramU8MeasurementRow { + timeseries_name, + timeseries_key, + start_time: extract_start_time(measurement), + timestamp, + datum: DbHistogram::null(), + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + DatumType::HistogramI16 => { + let row = HistogramI16MeasurementRow { + timeseries_name, + timeseries_key, + start_time: extract_start_time(measurement), + timestamp, + datum: DbHistogram::null(), + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + DatumType::HistogramU16 => { + let row = HistogramU16MeasurementRow { + timeseries_name, + timeseries_key, + start_time: extract_start_time(measurement), + timestamp, + datum: DbHistogram::null(), + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + DatumType::HistogramI32 => { + let row = HistogramI32MeasurementRow { + timeseries_name, + timeseries_key, + start_time: extract_start_time(measurement), + timestamp, + datum: DbHistogram::null(), + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + DatumType::HistogramU32 => { + let row = HistogramU32MeasurementRow { + timeseries_name, + timeseries_key, + start_time: extract_start_time(measurement), + timestamp, + datum: DbHistogram::null(), + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + DatumType::HistogramI64 => { + let row = HistogramI64MeasurementRow { + timeseries_name, + timeseries_key, + start_time: extract_start_time(measurement), + timestamp, + datum: DbHistogram::null(), + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + DatumType::HistogramU64 => { + let row = HistogramU64MeasurementRow { + timeseries_name, + timeseries_key, + start_time: extract_start_time(measurement), + timestamp, + datum: DbHistogram::null(), + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + DatumType::HistogramF32 => { + let row = HistogramF32MeasurementRow { + timeseries_name, + timeseries_key, + start_time: extract_start_time(measurement), + timestamp, + datum: DbHistogram::null(), + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + DatumType::HistogramF64 => { + let row = HistogramF64MeasurementRow { + timeseries_name, + timeseries_key, + start_time: extract_start_time(measurement), + timestamp, + datum: DbHistogram::null(), + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + } + } } } @@ -984,7 +1238,7 @@ struct DbTimeseriesScalarGaugeSample { timeseries_key: TimeseriesKey, #[serde(with = "serde_timestamp")] timestamp: DateTime, - datum: T, + datum: Option, } // A scalar timestamped sample from a cumulative timeseries, as extracted from a query to the @@ -996,7 +1250,7 @@ struct DbTimeseriesScalarCumulativeSample { start_time: DateTime, #[serde(with = "serde_timestamp")] timestamp: DateTime, - datum: T, + datum: Option, } // A histogram timestamped sample from a timeseries, as extracted from a query to the database. @@ -1014,9 +1268,15 @@ struct DbTimeseriesHistogramSample { impl From> for Measurement where Datum: From, + T: FromDbScalar, { fn from(sample: DbTimeseriesScalarGaugeSample) -> Measurement { - let datum = Datum::from(sample.datum); + let datum = match sample.datum { + Some(datum) => Datum::from(datum), + None => { + Datum::Missing(MissingDatum::new(T::DATUM_TYPE, None).unwrap()) + } + }; Measurement::new(sample.timestamp, datum) } } @@ -1024,12 +1284,19 @@ where impl From> for Measurement where Datum: From>, - T: traits::Cumulative, + T: traits::Cumulative + FromDbCumulative, { fn from(sample: DbTimeseriesScalarCumulativeSample) -> Measurement { - let cumulative = - Cumulative::with_start_time(sample.start_time, sample.datum); - let datum = Datum::from(cumulative); + let datum = match sample.datum { + Some(datum) => Datum::from(Cumulative::with_start_time( + sample.start_time, + datum, + )), + None => Datum::Missing( + MissingDatum::new(T::DATUM_TYPE, Some(sample.start_time)) + .unwrap(), + ), + }; Measurement::new(sample.timestamp, datum) } } @@ -1037,26 +1304,157 @@ where impl From> for Measurement where Datum: From>, - T: traits::HistogramSupport, + T: traits::HistogramSupport + FromDbHistogram, { fn from(sample: DbTimeseriesHistogramSample) -> Measurement { - let datum = Datum::from( - Histogram::from_arrays( - sample.start_time, - sample.bins, - sample.counts, + let datum = if sample.bins.is_empty() { + assert!(sample.counts.is_empty()); + Datum::Missing( + MissingDatum::new(T::DATUM_TYPE, Some(sample.start_time)) + .unwrap(), ) - .unwrap(), - ); + } else { + Datum::from( + Histogram::from_arrays( + sample.start_time, + sample.bins, + sample.counts, + ) + .unwrap(), + ) + }; Measurement::new(sample.timestamp, datum) } } +// Helper trait providing the DatumType for a corresponding scalar DB value. +// +// This is used in `parse_timeseries_scalar_gauge_measurement`. +trait FromDbScalar { + const DATUM_TYPE: DatumType; +} + +impl FromDbScalar for DbBool { + const DATUM_TYPE: DatumType = DatumType::Bool; +} + +impl FromDbScalar for i8 { + const DATUM_TYPE: DatumType = DatumType::I8; +} + +impl FromDbScalar for u8 { + const DATUM_TYPE: DatumType = DatumType::U8; +} + +impl FromDbScalar for i16 { + const DATUM_TYPE: DatumType = DatumType::I16; +} + +impl FromDbScalar for u16 { + const DATUM_TYPE: DatumType = DatumType::U16; +} + +impl FromDbScalar for i32 { + const DATUM_TYPE: DatumType = DatumType::I32; +} + +impl FromDbScalar for u32 { + const DATUM_TYPE: DatumType = DatumType::U32; +} + +impl FromDbScalar for i64 { + const DATUM_TYPE: DatumType = DatumType::I64; +} + +impl FromDbScalar for u64 { + const DATUM_TYPE: DatumType = DatumType::U64; +} + +impl FromDbScalar for f32 { + const DATUM_TYPE: DatumType = DatumType::F32; +} + +impl FromDbScalar for f64 { + const DATUM_TYPE: DatumType = DatumType::F64; +} + +impl FromDbScalar for String { + const DATUM_TYPE: DatumType = DatumType::String; +} + +impl FromDbScalar for Bytes { + const DATUM_TYPE: DatumType = DatumType::Bytes; +} + +trait FromDbCumulative { + const DATUM_TYPE: DatumType; +} + +impl FromDbCumulative for i64 { + const DATUM_TYPE: DatumType = DatumType::CumulativeI64; +} + +impl FromDbCumulative for u64 { + const DATUM_TYPE: DatumType = DatumType::CumulativeU64; +} + +impl FromDbCumulative for f32 { + const DATUM_TYPE: DatumType = DatumType::CumulativeF32; +} + +impl FromDbCumulative for f64 { + const DATUM_TYPE: DatumType = DatumType::CumulativeF64; +} + +trait FromDbHistogram { + const DATUM_TYPE: DatumType; +} + +impl FromDbHistogram for i8 { + const DATUM_TYPE: DatumType = DatumType::HistogramI8; +} + +impl FromDbHistogram for u8 { + const DATUM_TYPE: DatumType = DatumType::HistogramU8; +} + +impl FromDbHistogram for i16 { + const DATUM_TYPE: DatumType = DatumType::HistogramI16; +} + +impl FromDbHistogram for u16 { + const DATUM_TYPE: DatumType = DatumType::HistogramU16; +} + +impl FromDbHistogram for i32 { + const DATUM_TYPE: DatumType = DatumType::HistogramI32; +} + +impl FromDbHistogram for u32 { + const DATUM_TYPE: DatumType = DatumType::HistogramU32; +} + +impl FromDbHistogram for i64 { + const DATUM_TYPE: DatumType = DatumType::HistogramI64; +} + +impl FromDbHistogram for u64 { + const DATUM_TYPE: DatumType = DatumType::HistogramU64; +} + +impl FromDbHistogram for f32 { + const DATUM_TYPE: DatumType = DatumType::HistogramF32; +} + +impl FromDbHistogram for f64 { + const DATUM_TYPE: DatumType = DatumType::HistogramF64; +} + fn parse_timeseries_scalar_gauge_measurement<'a, T>( line: &'a str, ) -> (TimeseriesKey, Measurement) where - T: Deserialize<'a> + Into, + T: Deserialize<'a> + Into + FromDbScalar, Datum: From, { let sample = @@ -1068,7 +1466,7 @@ fn parse_timeseries_scalar_cumulative_measurement<'a, T>( line: &'a str, ) -> (TimeseriesKey, Measurement) where - T: Deserialize<'a> + traits::Cumulative, + T: Deserialize<'a> + traits::Cumulative + FromDbCumulative, Datum: From>, { let sample = @@ -1081,7 +1479,7 @@ fn parse_timeseries_histogram_measurement( line: &str, ) -> (TimeseriesKey, Measurement) where - T: Into + traits::HistogramSupport, + T: Into + traits::HistogramSupport + FromDbHistogram, Datum: From>, { let sample = @@ -1206,11 +1604,10 @@ pub(crate) fn parse_field_select_row( "Expected pairs of (field_name, field_value) from the field query" ); let (target_name, metric_name) = schema.component_names(); - let mut n_fields = 0; let mut target_fields = Vec::new(); let mut metric_fields = Vec::new(); let mut actual_fields = row.fields.values(); - while n_fields < schema.field_schema.len() { + for _ in 0..schema.field_schema.len() { // Extract the field name from the row and find a matching expected field. let actual_field_name = actual_fields .next() @@ -1219,7 +1616,7 @@ pub(crate) fn parse_field_select_row( .as_str() .expect("Expected a string field name") .to_string(); - let expected_field = schema.field_schema(&name).expect( + let expected_field = schema.schema_for_field(&name).expect( "Found field with name that is not part of the timeseries schema", ); @@ -1227,7 +1624,7 @@ pub(crate) fn parse_field_select_row( let actual_field_value = actual_fields .next() .expect("Missing a field value from a field select query"); - let value = match expected_field.ty { + let value = match expected_field.field_type { FieldType::Bool => { FieldValue::Bool(bool::from(DbBool::from( actual_field_value @@ -1334,7 +1731,6 @@ pub(crate) fn parse_field_select_row( FieldSource::Target => target_fields.push(field), FieldSource::Metric => metric_fields.push(field), } - n_fields += 1; } ( row.timeseries_key, @@ -1411,12 +1807,12 @@ mod tests { let list: BTreeSet<_> = [ FieldSchema { name: String::from("field0"), - ty: FieldType::I64, + field_type: FieldType::I64, source: FieldSource::Target, }, FieldSchema { name: String::from("field1"), - ty: FieldType::IpAddr, + field_type: FieldType::IpAddr, source: FieldSource::Metric, }, ] @@ -1459,6 +1855,27 @@ mod tests { } } + // Test that we correctly unroll a row when the measurement is missing its + // datum. + #[test] + fn test_unroll_missing_measurement_row() { + let sample = test_util::make_sample(); + let missing_sample = test_util::make_missing_sample(); + let (table_name, row) = unroll_measurement_row(&sample); + let (missing_table_name, missing_row) = + unroll_measurement_row(&missing_sample); + let row = serde_json::from_str::(&row).unwrap(); + let missing_row = + serde_json::from_str::(&missing_row).unwrap(); + println!("{row:#?}"); + println!("{missing_row:#?}"); + assert_eq!(table_name, missing_table_name); + assert_eq!(row.timeseries_name, missing_row.timeseries_name); + assert_eq!(row.timeseries_key, missing_row.timeseries_key); + assert!(row.datum.is_some()); + assert!(missing_row.datum.is_none()); + } + #[test] fn test_unroll_measurement_row() { let sample = test_util::make_hist_sample(); @@ -1473,14 +1890,13 @@ mod tests { ) .unwrap(); let measurement = &sample.measurement; - if let Datum::HistogramF64(hist) = measurement.datum() { - assert_eq!( - hist, &unpacked_hist, - "Unpacking histogram from database representation failed" - ); - } else { + let Datum::HistogramF64(hist) = measurement.datum() else { panic!("Expected a histogram measurement"); - } + }; + assert_eq!( + hist, &unpacked_hist, + "Unpacking histogram from database representation failed" + ); assert_eq!(unpacked.start_time, measurement.start_time().unwrap()); } @@ -1582,12 +1998,11 @@ mod tests { assert_eq!(key, 12); assert_eq!(measurement.start_time().unwrap(), start_time); assert_eq!(measurement.timestamp(), timestamp); - if let Datum::HistogramI64(hist) = measurement.datum() { - assert_eq!(hist.n_bins(), 3); - assert_eq!(hist.n_samples(), 2); - } else { + let Datum::HistogramI64(hist) = measurement.datum() else { panic!("Expected a histogram sample"); - } + }; + assert_eq!(hist.n_bins(), 3); + assert_eq!(hist.n_samples(), 2); } #[test] @@ -1624,4 +2039,14 @@ mod tests { "Histogram reconstructed from paired arrays is not correct" ); } + #[test] + fn test_parse_bytes_measurement() { + let s = r#"{"timeseries_key": 101, "timestamp": "2023-11-21 18:25:21.963714255", "datum": "\u0001\u0002\u0003"}"#; + let (_, meas) = parse_timeseries_scalar_gauge_measurement::(&s); + println!("{meas:?}"); + let Datum::Bytes(b) = meas.datum() else { + unreachable!(); + }; + assert_eq!(b.to_vec(), vec![1, 2, 3]); + } } diff --git a/oximeter/db/src/query.rs b/oximeter/db/src/query.rs index 6a55d3f518..2caefb24c3 100644 --- a/oximeter/db/src/query.rs +++ b/oximeter/db/src/query.rs @@ -101,7 +101,7 @@ impl SelectQueryBuilder { let field_name = field_name.as_ref().to_string(); let field_schema = self .timeseries_schema - .field_schema(&field_name) + .schema_for_field(&field_name) .ok_or_else(|| Error::NoSuchField { timeseries_name: self .timeseries_schema @@ -110,7 +110,7 @@ impl SelectQueryBuilder { field_name: field_name.clone(), })?; let field_value: FieldValue = field_value.into(); - let expected_type = field_schema.ty; + let expected_type = field_schema.field_type; let found_type = field_value.field_type(); if expected_type != found_type { return Err(Error::IncorrectFieldType { @@ -150,7 +150,7 @@ impl SelectQueryBuilder { ) -> Result { let field_schema = self .timeseries_schema - .field_schema(&selector.name) + .schema_for_field(&selector.name) .ok_or_else(|| Error::NoSuchField { timeseries_name: self .timeseries_schema @@ -158,13 +158,14 @@ impl SelectQueryBuilder { .to_string(), field_name: selector.name.clone(), })?; - if !selector.op.valid_for_type(field_schema.ty) { + let field_type = field_schema.field_type; + if !selector.op.valid_for_type(field_type) { return Err(Error::InvalidFieldCmp { op: format!("{:?}", selector.op), - ty: field_schema.ty, + ty: field_schema.field_type, }); } - let field_value = match field_schema.ty { + let field_value = match field_type { FieldType::String => FieldValue::from(&selector.value), FieldType::I8 => parse_selector_field_value::( &field_schema, @@ -214,9 +215,9 @@ impl SelectQueryBuilder { let comparison = FieldComparison { op: selector.op, value: field_value }; let selector = FieldSelector { - name: field_schema.name.clone(), + name: field_schema.name.to_string(), comparison: Some(comparison), - ty: field_schema.ty, + ty: field_type, }; self.field_selectors.insert(field_schema.clone(), selector); Ok(self) @@ -248,7 +249,7 @@ impl SelectQueryBuilder { T: Target, M: Metric, { - let schema = crate::model::schema_for_parts(target, metric); + let schema = TimeseriesSchema::new(target, metric); let mut builder = Self::new(&schema); let target_fields = target.field_names().iter().zip(target.field_values()); @@ -279,9 +280,9 @@ impl SelectQueryBuilder { for field in timeseries_schema.field_schema.iter() { let key = field.clone(); field_selectors.entry(key).or_insert_with(|| FieldSelector { - name: field.name.clone(), + name: field.name.to_string(), comparison: None, - ty: field.ty, + ty: field.field_type, }); } SelectQuery { @@ -309,8 +310,8 @@ where { Ok(FieldValue::from(s.parse::().map_err(|_| { Error::InvalidFieldValue { - field_name: field.name.clone(), - field_type: field.ty, + field_name: field.name.to_string(), + field_type: field.field_type, value: s.to_string(), } })?)) @@ -778,12 +779,12 @@ mod tests { field_schema: [ FieldSchema { name: "f0".to_string(), - ty: FieldType::I64, + field_type: FieldType::I64, source: FieldSource::Target, }, FieldSchema { name: "f1".to_string(), - ty: FieldType::Bool, + field_type: FieldType::Bool, source: FieldSource::Target, }, ] @@ -981,6 +982,7 @@ mod tests { "Expected an exact comparison when building a query from parts", ); + println!("{builder:#?}"); assert_eq!( builder.field_selector(FieldSource::Metric, "baz").unwrap(), &FieldSelector { @@ -1002,12 +1004,12 @@ mod tests { field_schema: [ FieldSchema { name: "f0".to_string(), - ty: FieldType::I64, + field_type: FieldType::I64, source: FieldSource::Target, }, FieldSchema { name: "f1".to_string(), - ty: FieldType::Bool, + field_type: FieldType::Bool, source: FieldSource::Target, }, ] @@ -1065,12 +1067,12 @@ mod tests { field_schema: [ FieldSchema { name: "f0".to_string(), - ty: FieldType::I64, + field_type: FieldType::I64, source: FieldSource::Target, }, FieldSchema { name: "f1".to_string(), - ty: FieldType::Bool, + field_type: FieldType::Bool, source: FieldSource::Target, }, ] @@ -1116,12 +1118,12 @@ mod tests { field_schema: [ FieldSchema { name: "f0".to_string(), - ty: FieldType::I64, + field_type: FieldType::I64, source: FieldSource::Target, }, FieldSchema { name: "f1".to_string(), - ty: FieldType::Bool, + field_type: FieldType::Bool, source: FieldSource::Target, }, ] diff --git a/oximeter/instruments/src/kstat/link.rs b/oximeter/instruments/src/kstat/link.rs index d22ac60378..03397c4108 100644 --- a/oximeter/instruments/src/kstat/link.rs +++ b/oximeter/instruments/src/kstat/link.rs @@ -268,8 +268,8 @@ mod tests { } impl TestEtherstub { - const PFEXEC: &str = "/usr/bin/pfexec"; - const DLADM: &str = "/usr/sbin/dladm"; + const PFEXEC: &'static str = "/usr/bin/pfexec"; + const DLADM: &'static str = "/usr/sbin/dladm"; fn new() -> Self { let name = format!( "kstest{}0", diff --git a/oximeter/oximeter/Cargo.toml b/oximeter/oximeter/Cargo.toml index 8a69494d5a..b545c697de 100644 --- a/oximeter/oximeter/Cargo.toml +++ b/oximeter/oximeter/Cargo.toml @@ -11,8 +11,10 @@ chrono.workspace = true num.workspace = true omicron-common.workspace = true oximeter-macro-impl.workspace = true +regex.workspace = true schemars = { workspace = true, features = [ "uuid1", "bytes", "chrono" ] } serde.workspace = true +serde_json.workspace = true strum.workspace = true thiserror.workspace = true uuid.workspace = true @@ -21,4 +23,5 @@ omicron-workspace-hack.workspace = true [dev-dependencies] approx.workspace = true rstest.workspace = true +serde_json.workspace = true trybuild.workspace = true diff --git a/oximeter/oximeter/src/histogram.rs b/oximeter/oximeter/src/histogram.rs index c399384ffa..aaf9297ca4 100644 --- a/oximeter/oximeter/src/histogram.rs +++ b/oximeter/oximeter/src/histogram.rs @@ -1353,13 +1353,10 @@ mod tests { } #[test] - fn test_foo() { - let bins: Vec = 10u16.bins(1, 3, 30.try_into().unwrap()).unwrap(); - println!("{bins:?}"); - dbg!(bins.len()); - let hist = Histogram::new(&bins).unwrap(); - for bin in hist.iter() { - println!("{}", bin.range); - } + fn test_empty_bins_not_supported() { + assert!(matches!( + Histogram::::new(&[]).unwrap_err(), + HistogramError::EmptyBins + )); } } diff --git a/oximeter/oximeter/src/lib.rs b/oximeter/oximeter/src/lib.rs index 2ced404eae..1855762abe 100644 --- a/oximeter/oximeter/src/lib.rs +++ b/oximeter/oximeter/src/lib.rs @@ -108,10 +108,14 @@ pub use oximeter_macro_impl::*; extern crate self as oximeter; pub mod histogram; +pub mod schema; pub mod test_util; pub mod traits; pub mod types; +pub use schema::FieldSchema; +pub use schema::TimeseriesName; +pub use schema::TimeseriesSchema; pub use traits::Metric; pub use traits::Producer; pub use traits::Target; diff --git a/oximeter/oximeter/src/schema.rs b/oximeter/oximeter/src/schema.rs new file mode 100644 index 0000000000..b6953fda52 --- /dev/null +++ b/oximeter/oximeter/src/schema.rs @@ -0,0 +1,640 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +// Copyright 2023 Oxide Computer Company + +//! Tools for working with schema for fields and timeseries. + +use crate::types::DatumType; +use crate::types::FieldType; +use crate::types::MetricsError; +use crate::types::Sample; +use crate::Metric; +use crate::Target; +use chrono::DateTime; +use chrono::Utc; +use schemars::JsonSchema; +use serde::Deserialize; +use serde::Serialize; +use std::collections::btree_map::Entry; +use std::collections::BTreeMap; +use std::collections::BTreeSet; +use std::fmt::Write; +use std::path::Path; + +/// The name and type information for a field of a timeseries schema. +#[derive( + Clone, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Deserialize, + Serialize, + JsonSchema, +)] +pub struct FieldSchema { + pub name: String, + pub field_type: FieldType, + pub source: FieldSource, +} + +/// The source from which a field is derived, the target or metric. +#[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Deserialize, + Serialize, + JsonSchema, +)] +#[serde(rename_all = "snake_case")] +pub enum FieldSource { + Target, + Metric, +} + +/// A timeseries name. +/// +/// Timeseries are named by concatenating the names of their target and metric, joined with a +/// colon. +#[derive( + Debug, Clone, PartialEq, PartialOrd, Ord, Eq, Hash, Serialize, Deserialize, +)] +#[serde(try_from = "&str")] +pub struct TimeseriesName(String); + +impl JsonSchema for TimeseriesName { + fn schema_name() -> String { + "TimeseriesName".to_string() + } + + fn json_schema( + _: &mut schemars::gen::SchemaGenerator, + ) -> schemars::schema::Schema { + schemars::schema::SchemaObject { + metadata: Some(Box::new(schemars::schema::Metadata { + title: Some("The name of a timeseries".to_string()), + description: Some( + "Names are constructed by concatenating the target \ + and metric names with ':'. Target and metric \ + names must be lowercase alphanumeric characters \ + with '_' separating words." + .to_string(), + ), + ..Default::default() + })), + instance_type: Some(schemars::schema::InstanceType::String.into()), + string: Some(Box::new(schemars::schema::StringValidation { + pattern: Some(TIMESERIES_NAME_REGEX.to_string()), + ..Default::default() + })), + ..Default::default() + } + .into() + } +} + +impl std::ops::Deref for TimeseriesName { + type Target = String; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl std::fmt::Display for TimeseriesName { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +impl std::convert::TryFrom<&str> for TimeseriesName { + type Error = MetricsError; + fn try_from(s: &str) -> Result { + validate_timeseries_name(s).map(|s| TimeseriesName(s.to_string())) + } +} + +impl std::convert::TryFrom for TimeseriesName { + type Error = MetricsError; + fn try_from(s: String) -> Result { + validate_timeseries_name(&s)?; + Ok(TimeseriesName(s)) + } +} + +impl std::str::FromStr for TimeseriesName { + type Err = MetricsError; + fn from_str(s: &str) -> Result { + s.try_into() + } +} + +impl PartialEq for TimeseriesName +where + T: AsRef, +{ + fn eq(&self, other: &T) -> bool { + self.0.eq(other.as_ref()) + } +} + +fn validate_timeseries_name(s: &str) -> Result<&str, MetricsError> { + if regex::Regex::new(TIMESERIES_NAME_REGEX).unwrap().is_match(s) { + Ok(s) + } else { + Err(MetricsError::InvalidTimeseriesName) + } +} + +/// The schema for a timeseries. +/// +/// This includes the name of the timeseries, as well as the datum type of its metric and the +/// schema for each field. +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct TimeseriesSchema { + pub timeseries_name: TimeseriesName, + pub field_schema: BTreeSet, + pub datum_type: DatumType, + pub created: DateTime, +} + +impl From<&Sample> for TimeseriesSchema { + fn from(sample: &Sample) -> Self { + let timeseries_name = sample.timeseries_name.parse().unwrap(); + let mut field_schema = BTreeSet::new(); + for field in sample.target_fields() { + let schema = FieldSchema { + name: field.name.clone(), + field_type: field.value.field_type(), + source: FieldSource::Target, + }; + field_schema.insert(schema); + } + for field in sample.metric_fields() { + let schema = FieldSchema { + name: field.name.clone(), + field_type: field.value.field_type(), + source: FieldSource::Metric, + }; + field_schema.insert(schema); + } + let datum_type = sample.measurement.datum_type(); + Self { timeseries_name, field_schema, datum_type, created: Utc::now() } + } +} + +impl TimeseriesSchema { + /// Construct a timeseries schema from a target and metric. + pub fn new(target: &T, metric: &M) -> Self + where + T: Target, + M: Metric, + { + let timeseries_name = + TimeseriesName::try_from(crate::timeseries_name(target, metric)) + .unwrap(); + let mut field_schema = BTreeSet::new(); + for field in target.fields() { + let schema = FieldSchema { + name: field.name.clone(), + field_type: field.value.field_type(), + source: FieldSource::Target, + }; + field_schema.insert(schema); + } + for field in metric.fields() { + let schema = FieldSchema { + name: field.name.clone(), + field_type: field.value.field_type(), + source: FieldSource::Metric, + }; + field_schema.insert(schema); + } + let datum_type = metric.datum_type(); + Self { timeseries_name, field_schema, datum_type, created: Utc::now() } + } + + /// Construct a timeseries schema from a sample + pub fn from_sample(sample: &Sample) -> Self { + Self::from(sample) + } + + /// Return the schema for the given field. + pub fn schema_for_field(&self, name: S) -> Option<&FieldSchema> + where + S: AsRef, + { + self.field_schema.iter().find(|field| field.name == name.as_ref()) + } + + /// Return the target and metric component names for this timeseries + pub fn component_names(&self) -> (&str, &str) { + self.timeseries_name + .split_once(':') + .expect("Incorrectly formatted timseries name") + } +} + +impl PartialEq for TimeseriesSchema { + fn eq(&self, other: &TimeseriesSchema) -> bool { + self.timeseries_name == other.timeseries_name + && self.datum_type == other.datum_type + && self.field_schema == other.field_schema + } +} + +// Regular expression describing valid timeseries names. +// +// Names are derived from the names of the Rust structs for the target and metric, converted to +// snake case. So the names must be valid identifiers, and generally: +// +// - Start with lowercase a-z +// - Any number of alphanumerics +// - Zero or more of the above, delimited by '-'. +// +// That describes the target/metric name, and the timeseries is two of those, joined with ':'. +const TIMESERIES_NAME_REGEX: &str = + "(([a-z]+[a-z0-9]*)(_([a-z0-9]+))*):(([a-z]+[a-z0-9]*)(_([a-z0-9]+))*)"; + +/// A set of timeseries schema, useful for testing changes to targets or +/// metrics. +#[derive(Debug, Default, Deserialize, PartialEq, Serialize)] +pub struct SchemaSet { + #[serde(flatten)] + inner: BTreeMap, +} + +impl SchemaSet { + /// Insert a timeseries schema, checking for conflicts. + /// + /// This inserts the schema derived from `target` and `metric`. If one + /// does _not_ already exist in `self` or a _matching_ one exists, `None` + /// is returned. + /// + /// If the derived schema _conflicts_ with one in `self`, the existing + /// schema is returned. + pub fn insert_checked( + &mut self, + target: &T, + metric: &M, + ) -> Option + where + T: Target, + M: Metric, + { + let new = TimeseriesSchema::new(target, metric); + let name = new.timeseries_name.clone(); + match self.inner.entry(name) { + Entry::Vacant(entry) => { + entry.insert(new); + None + } + Entry::Occupied(entry) => { + let existing = entry.get(); + if existing == &new { + None + } else { + Some(existing.clone()) + } + } + } + } + + /// Compare the set of schema against the contents of a file. + /// + /// This function loads a `SchemaSet` from the provided JSON file, and + /// asserts that the contained schema matches those in `self`. Note that + /// equality of `TimeseriesSchema` ignores creation timestamps, so this + /// compares the "identity" data: timeseries name, field names, field types, + /// and field sources. + /// + /// This is intentionally similar to `expectorate::assert_contents()`. If + /// the provided file doesn't exist, it's treated as empty. If it does, a + /// `SchemaSet` is deserialized from it and a comparison between that and + /// `self` is done. + /// + /// You can use `EXPECTORATE=overwrite` to overwrite the existing file, + /// rather than panicking. + pub fn assert_contents(&self, path: impl AsRef) { + let path = path.as_ref(); + let v = std::env::var_os("EXPECTORATE"); + let overwrite = + v.as_deref().and_then(std::ffi::OsStr::to_str) == Some("overwrite"); + let expected_contents = serde_json::to_string_pretty(self).unwrap(); + if overwrite { + if let Err(e) = std::fs::write(path, &expected_contents) { + panic!( + "Failed to write contents to '{}': {}", + path.display(), + e + ); + } + } else { + // If the file doesn't exist, it's just empty and we'll create an + // empty set of schema. + let contents = if !path.exists() { + String::from("{}") + } else { + match std::fs::read_to_string(path) { + Err(e) => { + panic!("Failed to read '{}': {}", path.display(), e) + } + Ok(c) => c, + } + }; + let other: Self = serde_json::from_str(&contents).unwrap(); + if self == &other { + return; + } + + let mut diffs = String::new(); + writeln!( + &mut diffs, + "Timeseries schema in \"{}\" do not match\n", + path.display() + ) + .unwrap(); + + // Print schema in self that are not in the file, or mismatched + // schema. + for (name, schema) in self.inner.iter() { + let Some(other_schema) = other.inner.get(name) else { + writeln!( + &mut diffs, + "File is missing timeseries \"{}\"", + name + ) + .unwrap(); + continue; + }; + if schema == other_schema { + continue; + } + writeln!(&mut diffs, "Timeseries \"{name}\" differs").unwrap(); + + // Print out any differences in the datum type. + if schema.datum_type != other_schema.datum_type { + writeln!( + &mut diffs, + " Expected datum type: {}", + schema.datum_type + ) + .unwrap(); + writeln!( + &mut diffs, + " Actual datum type: {}", + other_schema.datum_type + ) + .unwrap(); + } + + // Print fields in self that are not in other, or are mismatched + for field in schema.field_schema.iter() { + let Some(other_field) = + other_schema.field_schema.get(field) + else { + writeln!( + &mut diffs, + " File is missing {:?} field \"{}\"", + field.source, field.name, + ) + .unwrap(); + continue; + }; + if field == other_field { + continue; + } + + writeln!( + &mut diffs, + " File has mismatched field \"{}\"", + field.name + ) + .unwrap(); + writeln!( + &mut diffs, + " Expected type: {}", + field.field_type + ) + .unwrap(); + writeln!( + &mut diffs, + " Actual type: {}", + other_field.field_type + ) + .unwrap(); + writeln!( + &mut diffs, + " Expected source: {:?}", + field.source + ) + .unwrap(); + writeln!( + &mut diffs, + " Actual source: {:?}", + other_field.source + ) + .unwrap(); + } + + // Print fields in other that are not in self, fields that are + // in both but don't match are taken care of in the above loop. + for other_field in other_schema.field_schema.iter() { + if schema.field_schema.contains(other_field) { + continue; + } + + writeln!( + &mut diffs, + " Current set is missing {:?} field \"{}\"", + other_field.source, other_field.name, + ) + .unwrap(); + } + } + + // Print schema that are in the file, but not self. Those that don't + // match are handled in the above block. + for key in other.inner.keys() { + if !self.inner.contains_key(key) { + writeln!( + &mut diffs, + " Current set is missing timeseries \"{}\"", + key + ) + .unwrap(); + } + } + panic!("{}", diffs); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::convert::TryFrom; + use uuid::Uuid; + + #[test] + fn test_timeseries_name() { + let name = TimeseriesName::try_from("foo:bar").unwrap(); + assert_eq!(format!("{}", name), "foo:bar"); + } + + #[test] + fn test_timeseries_name_from_str() { + assert!(TimeseriesName::try_from("a:b").is_ok()); + assert!(TimeseriesName::try_from("a_a:b_b").is_ok()); + assert!(TimeseriesName::try_from("a0:b0").is_ok()); + assert!(TimeseriesName::try_from("a_0:b_0").is_ok()); + + assert!(TimeseriesName::try_from("_:b").is_err()); + assert!(TimeseriesName::try_from("a_:b").is_err()); + assert!(TimeseriesName::try_from("0:b").is_err()); + assert!(TimeseriesName::try_from(":b").is_err()); + assert!(TimeseriesName::try_from("a:").is_err()); + assert!(TimeseriesName::try_from("123").is_err()); + } + + #[derive(Target)] + struct MyTarget { + id: Uuid, + name: String, + } + + const ID: Uuid = uuid::uuid!("ca565ef4-65dc-4ab0-8622-7be43ed72105"); + + impl Default for MyTarget { + fn default() -> Self { + Self { id: ID, name: String::from("name") } + } + } + + #[derive(Metric)] + struct MyMetric { + happy: bool, + datum: u64, + } + + impl Default for MyMetric { + fn default() -> Self { + Self { happy: true, datum: 0 } + } + } + + #[test] + fn test_timeseries_schema_from_parts() { + let target = MyTarget::default(); + let metric = MyMetric::default(); + let schema = TimeseriesSchema::new(&target, &metric); + + assert_eq!(schema.timeseries_name, "my_target:my_metric"); + let f = schema.schema_for_field("id").unwrap(); + assert_eq!(f.name, "id"); + assert_eq!(f.field_type, FieldType::Uuid); + assert_eq!(f.source, FieldSource::Target); + + let f = schema.schema_for_field("name").unwrap(); + assert_eq!(f.name, "name"); + assert_eq!(f.field_type, FieldType::String); + assert_eq!(f.source, FieldSource::Target); + + let f = schema.schema_for_field("happy").unwrap(); + assert_eq!(f.name, "happy"); + assert_eq!(f.field_type, FieldType::Bool); + assert_eq!(f.source, FieldSource::Metric); + assert_eq!(schema.datum_type, DatumType::U64); + } + + #[test] + fn test_timeseries_schema_from_sample() { + let target = MyTarget::default(); + let metric = MyMetric::default(); + let sample = Sample::new(&target, &metric).unwrap(); + let schema = TimeseriesSchema::new(&target, &metric); + let schema_from_sample = TimeseriesSchema::from(&sample); + assert_eq!(schema, schema_from_sample); + } + + // Test that we correctly order field across a target and metric. + // + // In an earlier commit, we switched from storing fields in an unordered Vec + // to using a BTree{Map,Set} to ensure ordering by name. However, the + // `TimeseriesSchema` type stored all its fields by chaining the sorted + // fields from the target and metric, without then sorting _across_ them. + // + // This was exacerbated by the error reporting, where we did in fact sort + // all fields across the target and metric, making it difficult to tell how + // the derived schema was different, if at all. + // + // This test generates a sample with a schema where the target and metric + // fields are sorted within them, but not across them. We check that the + // derived schema are actually equal, which means we've imposed that + // ordering when deriving the schema. + #[test] + fn test_schema_field_ordering_across_target_metric() { + let target_field = FieldSchema { + name: String::from("later"), + field_type: FieldType::U64, + source: FieldSource::Target, + }; + let metric_field = FieldSchema { + name: String::from("earlier"), + field_type: FieldType::U64, + source: FieldSource::Metric, + }; + let timeseries_name: TimeseriesName = "foo:bar".parse().unwrap(); + let datum_type = DatumType::U64; + let field_schema = + [target_field.clone(), metric_field.clone()].into_iter().collect(); + let expected_schema = TimeseriesSchema { + timeseries_name, + field_schema, + datum_type, + created: Utc::now(), + }; + + #[derive(oximeter::Target)] + struct Foo { + later: u64, + } + #[derive(oximeter::Metric)] + struct Bar { + earlier: u64, + datum: u64, + } + + let target = Foo { later: 1 }; + let metric = Bar { earlier: 2, datum: 10 }; + let sample = Sample::new(&target, &metric).unwrap(); + let derived_schema = TimeseriesSchema::from(&sample); + assert_eq!(derived_schema, expected_schema); + } + + #[test] + fn test_field_schema_ordering() { + let mut fields = BTreeSet::new(); + fields.insert(FieldSchema { + name: String::from("second"), + field_type: FieldType::U64, + source: FieldSource::Target, + }); + fields.insert(FieldSchema { + name: String::from("first"), + field_type: FieldType::U64, + source: FieldSource::Target, + }); + let mut iter = fields.iter(); + assert_eq!(iter.next().unwrap().name, "first"); + assert_eq!(iter.next().unwrap().name, "second"); + assert!(iter.next().is_none()); + } +} diff --git a/oximeter/oximeter/src/test_util.rs b/oximeter/oximeter/src/test_util.rs index f3750d6d83..a9778d03bc 100644 --- a/oximeter/oximeter/src/test_util.rs +++ b/oximeter/oximeter/src/test_util.rs @@ -48,19 +48,27 @@ pub struct TestHistogram { pub datum: Histogram, } +const ID: Uuid = uuid::uuid!("e00ced4d-39d1-446a-ae85-a67f05c9750b"); + pub fn make_sample() -> Sample { let target = TestTarget::default(); - let metric = TestMetric { id: Uuid::new_v4(), good: true, datum: 1 }; + let metric = TestMetric { id: ID, good: true, datum: 1 }; Sample::new(&target, &metric).unwrap() } +pub fn make_missing_sample() -> Sample { + let target = TestTarget::default(); + let metric = TestMetric { id: ID, good: true, datum: 1 }; + Sample::new_missing(&target, &metric).unwrap() +} + pub fn make_hist_sample() -> Sample { let target = TestTarget::default(); let mut hist = histogram::Histogram::new(&[0.0, 5.0, 10.0]).unwrap(); hist.sample(1.0).unwrap(); hist.sample(2.0).unwrap(); hist.sample(6.0).unwrap(); - let metric = TestHistogram { id: Uuid::new_v4(), good: true, datum: hist }; + let metric = TestHistogram { id: ID, good: true, datum: hist }; Sample::new(&target, &metric).unwrap() } diff --git a/oximeter/oximeter/src/traits.rs b/oximeter/oximeter/src/traits.rs index 096abb8023..0934d231e3 100644 --- a/oximeter/oximeter/src/traits.rs +++ b/oximeter/oximeter/src/traits.rs @@ -30,8 +30,15 @@ use std::ops::AddAssign; /// definition can be thought of as a schema, and an instance of that struct as identifying an /// individual target. /// -/// Target fields may have one of a set of supported types: `bool`, `i64`, `String`, `IpAddr`, or -/// `Uuid`. Any number of fields greater than zero is supported. +/// Target fields may have one of a set of supported types: +/// +/// - `bool` +/// - any fixed-width integer, e.g., `u8` or `i64` +/// - `String` +/// - `IpAddr` +/// - `Uuid` +/// +/// Any number of fields greater than zero is supported. /// /// Examples /// -------- @@ -105,9 +112,28 @@ pub trait Target { /// One field of the struct is special, describing the actual measured data that the metric /// represents. This should be a field named `datum`, or another field (with any name you choose) /// annotated with the `#[datum]` attribute. This field represents the underlying data for the -/// metric, and must be one of the supported types, implementing the [`Datum`] trait. This can -/// be any of: `i64`, `f64`, `bool`, `String`, or `Bytes` for gauges, and `Cumulative` or -/// `Histogram` for cumulative metrics, where `T` is `i64` or `f64`. +/// metric, and must be one of the supported types, implementing the [`Datum`] trait. +/// +/// For gauge types, this can be any of: +/// +/// - `bool` +/// - a fixed-width integer, e.g. `u8` or `i64` +/// - `f32` or `f64` +/// - `String` +/// - `Bytes` +/// +/// Cumulative types can be any of `Cumulative`, where `T` is +/// +/// - `i64` +/// - `u64` +/// - `f32` +/// - `f64` +/// +/// Histogram types can be any `Histogram`, wher `T` is: +/// +/// - a fixed-width integer, e.g. `u8` or `i64` +/// - `f32` +/// - `f64` /// /// The value of the metric's data is _measured_ by using the `measure()` method, which returns a /// [`Measurement`]. This describes a timestamped data point for the metric. diff --git a/oximeter/oximeter/src/types.rs b/oximeter/oximeter/src/types.rs index 325974781e..3d74bec72c 100644 --- a/oximeter/oximeter/src/types.rs +++ b/oximeter/oximeter/src/types.rs @@ -369,6 +369,7 @@ pub enum Datum { HistogramU64(histogram::Histogram), HistogramF32(histogram::Histogram), HistogramF64(histogram::Histogram), + Missing(MissingDatum), } impl Datum { @@ -402,6 +403,7 @@ impl Datum { Datum::HistogramU64(_) => DatumType::HistogramU64, Datum::HistogramF32(_) => DatumType::HistogramF32, Datum::HistogramF64(_) => DatumType::HistogramF64, + Datum::Missing(ref inner) => inner.datum_type(), } } @@ -440,6 +442,7 @@ impl Datum { Datum::HistogramU64(ref inner) => Some(inner.start_time()), Datum::HistogramF32(ref inner) => Some(inner.start_time()), Datum::HistogramF64(ref inner) => Some(inner.start_time()), + Datum::Missing(ref inner) => inner.start_time(), } } } @@ -495,6 +498,60 @@ impl From<&str> for Datum { } } +#[derive(Clone, Copy, Debug, Deserialize, JsonSchema, PartialEq, Serialize)] +pub struct MissingDatum { + datum_type: DatumType, + start_time: Option>, +} + +impl MissingDatum { + pub fn datum_type(&self) -> DatumType { + self.datum_type + } + + pub fn start_time(&self) -> Option> { + self.start_time + } + + pub fn new( + datum_type: DatumType, + start_time: Option>, + ) -> Result { + // See https://github.com/oxidecomputer/omicron/issues/4551. + if datum_type == DatumType::Bytes { + return Err(MetricsError::DatumError(String::from( + "Missing samples from byte array types are not supported", + ))); + } + if datum_type.is_cumulative() && start_time.is_none() { + return Err(MetricsError::MissingDatumRequiresStartTime { + datum_type, + }); + } + if !datum_type.is_cumulative() && start_time.is_some() { + return Err(MetricsError::MissingDatumCannotHaveStartTime { + datum_type, + }); + } + Ok(Self { datum_type, start_time }) + } +} + +impl From for Datum { + fn from(d: MissingDatum) -> Datum { + Datum::Missing(d) + } +} + +impl From<&M> for MissingDatum { + fn from(metric: &M) -> Self { + MissingDatum { + datum_type: metric.datum_type(), + start_time: metric.start_time(), + } + } +} + /// A `Measurement` is a timestamped datum from a single metric #[derive(Clone, Debug, PartialEq, JsonSchema, Serialize, Deserialize)] pub struct Measurement { @@ -516,6 +573,11 @@ impl Measurement { Self { timestamp, datum: datum.into() } } + /// Return true if this measurement represents a missing datum. + pub fn is_missing(&self) -> bool { + matches!(self.datum, Datum::Missing(_)) + } + /// Return the datum for this measurement pub fn datum(&self) -> &Datum { &self.datum @@ -561,6 +623,14 @@ pub enum MetricsError { /// A field name is duplicated between the target and metric. #[error("Field '{name}' is duplicated between the target and metric")] DuplicateFieldName { name: String }, + + #[error("Missing datum of type {datum_type} requires a start time")] + MissingDatumRequiresStartTime { datum_type: DatumType }, + + #[error("Missing datum of type {datum_type} cannot have a start time")] + MissingDatumCannotHaveStartTime { datum_type: DatumType }, + #[error("Invalid timeseries name")] + InvalidTimeseriesName, } impl From for omicron_common::api::external::Error { @@ -734,6 +804,29 @@ impl Sample { }) } + /// Construct a new missing sample, recorded at the time of the supplied + /// timestamp. + pub fn new_missing_with_timestamp( + timestamp: DateTime, + target: &T, + metric: &M, + ) -> Result + where + T: traits::Target, + M: traits::Metric, + { + let target_fields = FieldSet::from_target(target); + let metric_fields = FieldSet::from_metric(metric); + Self::verify_field_names(&target_fields, &metric_fields)?; + let datum = Datum::Missing(MissingDatum::from(metric)); + Ok(Self { + timeseries_name: crate::timeseries_name(target, metric), + target: target_fields, + metric: metric_fields, + measurement: Measurement { timestamp, datum }, + }) + } + /// Construct a new sample, created at the time the function is called. /// /// This materializes the data from the target and metric, and stores that information along @@ -746,6 +839,18 @@ impl Sample { Self::new_with_timestamp(Utc::now(), target, metric) } + /// Construct a new sample with a missing measurement. + pub fn new_missing( + target: &T, + metric: &M, + ) -> Result + where + T: traits::Target, + M: traits::Metric, + { + Self::new_missing_with_timestamp(Utc::now(), target, metric) + } + /// Return the fields for this sample. /// /// This returns the target fields and metric fields, chained, although there is no distinction @@ -951,7 +1056,7 @@ mod tests { fn test_measurement() { let measurement = Measurement::new(chrono::Utc::now(), 0i64); assert_eq!(measurement.datum_type(), DatumType::I64); - assert_eq!(measurement.start_time(), None); + assert!(measurement.start_time().is_none()); let datum = Cumulative::new(0i64); let measurement = Measurement::new(chrono::Utc::now(), datum); diff --git a/oximeter/producer/examples/producer.rs b/oximeter/producer/examples/producer.rs index dd9722c80a..8dbe0b6ad9 100644 --- a/oximeter/producer/examples/producer.rs +++ b/oximeter/producer/examples/producer.rs @@ -15,6 +15,7 @@ use dropshot::ConfigLogging; use dropshot::ConfigLoggingLevel; use dropshot::HandlerTaskMode; use omicron_common::api::internal::nexus::ProducerEndpoint; +use omicron_common::api::internal::nexus::ProducerKind; use oximeter::types::Cumulative; use oximeter::types::ProducerRegistry; use oximeter::types::Sample; @@ -124,6 +125,7 @@ async fn main() -> anyhow::Result<()> { registry.register_producer(producer).unwrap(); let server_info = ProducerEndpoint { id: registry.producer_id(), + kind: ProducerKind::Service, address: args.address, base_route: "/collect".to_string(), interval: Duration::from_secs(10), diff --git a/package-manifest.toml b/package-manifest.toml index ca96341f2a..8516a50e65 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -384,10 +384,10 @@ only_for_targets.image = "standard" # 3. Use source.type = "manual" instead of "prebuilt" source.type = "prebuilt" source.repo = "crucible" -source.commit = "51a3121c8318fc7ac97d74f917ce1d37962e785f" +source.commit = "fab27994d0bd12725c17d6b478a9bfc2673ad6f4" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/crucible/image//crucible.sha256.txt -source.sha256 = "897d0fd6c0b82db42256a63a13c228152e1117434afa2681f649b291e3c6f46d" +source.sha256 = "850b468c308cf63ef9e10addee36a923a91b7ab64af0fa0836130c830fb42863" output.type = "zone" [package.crucible-pantry] @@ -395,10 +395,10 @@ service_name = "crucible_pantry" only_for_targets.image = "standard" source.type = "prebuilt" source.repo = "crucible" -source.commit = "51a3121c8318fc7ac97d74f917ce1d37962e785f" +source.commit = "fab27994d0bd12725c17d6b478a9bfc2673ad6f4" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/crucible/image//crucible-pantry.sha256.txt -source.sha256 = "fe545de7ac4f15454d7827927149c5f0fc68ce9545b4f1ef96aac9ac8039805a" +source.sha256 = "893f845caa5d9b146137b503e80d5615cbd6e9d393745e81e772b10a9072b58b" output.type = "zone" # Refer to @@ -409,10 +409,10 @@ service_name = "propolis-server" only_for_targets.image = "standard" source.type = "prebuilt" source.repo = "propolis" -source.commit = "54398875a2125227d13827d4236dce943c019b1c" +source.commit = "f1571ce141421cff3d3328f43e7722f5df96fdda" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/propolis/image//propolis-server.sha256.txt -source.sha256 = "01b8563db6626f90ee3fb6d97e7921b0a680373d843c1bea7ebf46fcea4f7b28" +source.sha256 = "6e2607f103419a6338936434f3e67afb7cbe14d6397f2d01982ba94b8d0182a9" output.type = "zone" [package.mg-ddm-gz] @@ -425,7 +425,7 @@ source.repo = "maghemite" # `tools/maghemite_openapi_version`. Failing to do so will cause a failure when # building `ddm-admin-client` (which will instruct you to update # `tools/maghemite_openapi_version`). -source.commit = "12b392be94ff93abc3017bf2610a3b18e2174a2d" +source.commit = "2fd39b75df696961e5ea190c7d74dd91f4849cd3" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/maghemite/image//maghemite.sha256.txt source.sha256 = "38851c79c85d53e997db748520fb27c82299ce7e58a550e35646a548498f1271" @@ -441,7 +441,7 @@ source.repo = "maghemite" # `tools/maghemite_openapi_version`. Failing to do so will cause a failure when # building `ddm-admin-client` (which will instruct you to update # `tools/maghemite_openapi_version`). -source.commit = "12b392be94ff93abc3017bf2610a3b18e2174a2d" +source.commit = "2fd39b75df696961e5ea190c7d74dd91f4849cd3" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/maghemite/image//mg-ddm.sha256.txt source.sha256 = "8cd94e9a6f6175081ce78f0281085a08a5306cde453d8e21deb28050945b1d88" @@ -456,10 +456,10 @@ source.repo = "maghemite" # `tools/maghemite_openapi_version`. Failing to do so will cause a failure when # building `ddm-admin-client` (which will instruct you to update # `tools/maghemite_openapi_version`). -source.commit = "12b392be94ff93abc3017bf2610a3b18e2174a2d" +source.commit = "2fd39b75df696961e5ea190c7d74dd91f4849cd3" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/maghemite/image//mg-ddm.sha256.txt -source.sha256 = "c4a7a626c84a28de3d2c6bfd85592bda2abad8cf5b41b2ce90b9c03904ccd3df" +source.sha256 = "802636775fa77dc6eec193e65fde87e403f6a11531745d47ef5e7ff13b242890" output.type = "zone" output.intermediate_only = true @@ -476,8 +476,8 @@ only_for_targets.image = "standard" # 2. Copy dendrite.tar.gz from dendrite/out to omicron/out source.type = "prebuilt" source.repo = "dendrite" -source.commit = "8ff834e7d0a6adb263240edd40537f2c0768f1a4" -source.sha256 = "c00e79f55e0bdf048069b2d18a4d009ddfef46e7e5d846887cf96e843a8884bd" +source.commit = "45e05b2a90203d84510e0c8e902d9449b09ffd9b" +source.sha256 = "b14e73c8091a004472f9825b9b81b2c685bc5a48801704380a80481499060ad9" output.type = "zone" output.intermediate_only = true @@ -501,8 +501,8 @@ only_for_targets.image = "standard" # 2. Copy the output zone image from dendrite/out to omicron/out source.type = "prebuilt" source.repo = "dendrite" -source.commit = "8ff834e7d0a6adb263240edd40537f2c0768f1a4" -source.sha256 = "428cce1e9aa399b1b49c04e7fd0bc1cb0e3f3fae6fda96055892a42e010c9d6f" +source.commit = "45e05b2a90203d84510e0c8e902d9449b09ffd9b" +source.sha256 = "06575bea6173d16f6d206b580956ae2cdc72c65df2eb2f40dac01468ab49e336" output.type = "zone" output.intermediate_only = true @@ -519,8 +519,8 @@ only_for_targets.image = "standard" # 2. Copy dendrite.tar.gz from dendrite/out to omicron/out/dendrite-softnpu.tar.gz source.type = "prebuilt" source.repo = "dendrite" -source.commit = "8ff834e7d0a6adb263240edd40537f2c0768f1a4" -source.sha256 = "5dd3534bec5eb4f857d0bf3994b26650288f650d409eec6aaa29860a2f481c37" +source.commit = "45e05b2a90203d84510e0c8e902d9449b09ffd9b" +source.sha256 = "db2a398426fe59bd911eed91a3db7731a7a4d57e31dd357d89828d04b0891e2a" output.type = "zone" output.intermediate_only = true diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 804ff08cce..65ee8a9912 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -4,5 +4,5 @@ # # We choose a specific toolchain (rather than "stable") for repeatability. The # intent is to keep this up-to-date with recently-released stable Rust. -channel = "1.73.0" +channel = "1.74.0" profile = "default" diff --git a/schema/all-zone-requests.json b/schema/all-zone-requests.json index 468f00ee0c..4eb56d379d 100644 --- a/schema/all-zone-requests.json +++ b/schema/all-zone-requests.json @@ -1,6 +1,7 @@ { "$schema": "http://json-schema.org/draft-07/schema#", "title": "AllZoneRequests", + "description": "A wrapper around `ZoneRequest` that allows it to be serialized to a JSON file.", "type": "object", "required": [ "generation", @@ -8,7 +9,12 @@ ], "properties": { "generation": { - "$ref": "#/definitions/Generation" + "description": "ledger generation (not an Omicron-provided generation)", + "allOf": [ + { + "$ref": "#/definitions/Generation" + } + ] }, "requests": { "type": "array", @@ -719,6 +725,7 @@ "minimum": 0.0 }, "ZoneRequest": { + "description": "This struct represents the combo of \"what zone did you ask for\" + \"where did we put it\".", "type": "object", "required": [ "root", @@ -734,7 +741,7 @@ } }, "ZoneType": { - "description": "The type of zone which may be requested from Sled Agent", + "description": "The type of zone that Sled Agent may run", "type": "string", "enum": [ "clickhouse", diff --git a/schema/all-zones-requests.json b/schema/all-zones-requests.json new file mode 100644 index 0000000000..0e43e9ee21 --- /dev/null +++ b/schema/all-zones-requests.json @@ -0,0 +1,632 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "OmicronZonesConfigLocal", + "description": "Combines the Nexus-provided `OmicronZonesConfig` (which describes what Nexus wants for all of its zones) with the locally-determined configuration for these zones.", + "type": "object", + "required": [ + "ledger_generation", + "omicron_generation", + "zones" + ], + "properties": { + "ledger_generation": { + "description": "ledger-managed generation number\n\nThis generation is managed by the ledger facility itself. It's bumped whenever we write a new ledger. In practice, we don't currently have any reason to bump this _for a given Omicron generation_ so it's somewhat redundant. In principle, if we needed to modify the ledgered configuration due to some event that doesn't change the Omicron config (e.g., if we wanted to move the root filesystem to a different path), we could do that by bumping this generation.", + "allOf": [ + { + "$ref": "#/definitions/Generation" + } + ] + }, + "omicron_generation": { + "description": "generation of the Omicron-provided part of the configuration\n\nThis generation number is outside of Sled Agent's control. We store exactly what we were given and use this number to decide when to fail requests to establish an outdated configuration.\n\nYou can think of this as a major version number, with `ledger_generation` being a minor version number. See `is_newer_than()`.", + "allOf": [ + { + "$ref": "#/definitions/Generation" + } + ] + }, + "zones": { + "type": "array", + "items": { + "$ref": "#/definitions/OmicronZoneConfigLocal" + } + } + }, + "definitions": { + "Generation": { + "description": "Generation numbers stored in the database, used for optimistic concurrency control", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "IpNet": { + "oneOf": [ + { + "title": "v4", + "allOf": [ + { + "$ref": "#/definitions/Ipv4Net" + } + ] + }, + { + "title": "v6", + "allOf": [ + { + "$ref": "#/definitions/Ipv6Net" + } + ] + } + ] + }, + "Ipv4Net": { + "title": "An IPv4 subnet", + "description": "An IPv4 subnet, including prefix and subnet mask", + "examples": [ + "192.168.1.0/24" + ], + "type": "string", + "pattern": "^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|1[0-9]|2[0-9]|3[0-2])$" + }, + "Ipv6Net": { + "title": "An IPv6 subnet", + "description": "An IPv6 subnet, including prefix and subnet mask", + "examples": [ + "fd12:3456::/64" + ], + "type": "string", + "pattern": "^([fF][dD])[0-9a-fA-F]{2}:(([0-9a-fA-F]{1,4}:){6}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,6}:)([0-9a-fA-F]{1,4})?\\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])$" + }, + "MacAddr": { + "title": "A MAC address", + "description": "A Media Access Control address, in EUI-48 format", + "examples": [ + "ff:ff:ff:ff:ff:ff" + ], + "type": "string", + "maxLength": 17, + "minLength": 5, + "pattern": "^([0-9a-fA-F]{0,2}:){5}[0-9a-fA-F]{0,2}$" + }, + "Name": { + "title": "A name unique within the parent collection", + "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID though they may contain a UUID.", + "type": "string", + "maxLength": 63, + "minLength": 1, + "pattern": "^(?![0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$)^[a-z][a-z0-9-]*[a-zA-Z0-9]*$" + }, + "NetworkInterface": { + "description": "Information required to construct a virtual network interface", + "type": "object", + "required": [ + "id", + "ip", + "kind", + "mac", + "name", + "primary", + "slot", + "subnet", + "vni" + ], + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "ip": { + "type": "string", + "format": "ip" + }, + "kind": { + "$ref": "#/definitions/NetworkInterfaceKind" + }, + "mac": { + "$ref": "#/definitions/MacAddr" + }, + "name": { + "$ref": "#/definitions/Name" + }, + "primary": { + "type": "boolean" + }, + "slot": { + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, + "subnet": { + "$ref": "#/definitions/IpNet" + }, + "vni": { + "$ref": "#/definitions/Vni" + } + } + }, + "NetworkInterfaceKind": { + "description": "The type of network interface", + "oneOf": [ + { + "description": "A vNIC attached to a guest instance", + "type": "object", + "required": [ + "id", + "type" + ], + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "type": { + "type": "string", + "enum": [ + "instance" + ] + } + } + }, + { + "description": "A vNIC associated with an internal service", + "type": "object", + "required": [ + "id", + "type" + ], + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "type": { + "type": "string", + "enum": [ + "service" + ] + } + } + } + ] + }, + "OmicronZoneConfig": { + "description": "Describes one Omicron-managed zone running on a sled", + "type": "object", + "required": [ + "id", + "underlay_address", + "zone_type" + ], + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "underlay_address": { + "type": "string", + "format": "ipv6" + }, + "zone_type": { + "$ref": "#/definitions/OmicronZoneType" + } + } + }, + "OmicronZoneConfigLocal": { + "description": "Combines the Nexus-provided `OmicronZoneConfig` (which describes what Nexus wants for this zone) with any locally-determined configuration (like the path to the root filesystem)", + "type": "object", + "required": [ + "root", + "zone" + ], + "properties": { + "root": { + "type": "string" + }, + "zone": { + "$ref": "#/definitions/OmicronZoneConfig" + } + } + }, + "OmicronZoneDataset": { + "description": "Describes a persistent ZFS dataset associated with an Omicron zone", + "type": "object", + "required": [ + "pool_name" + ], + "properties": { + "pool_name": { + "$ref": "#/definitions/ZpoolName" + } + } + }, + "OmicronZoneType": { + "description": "Describes what kind of zone this is (i.e., what component is running in it) as well as any type-specific configuration", + "oneOf": [ + { + "type": "object", + "required": [ + "address", + "dns_servers", + "nic", + "ntp_servers", + "snat_cfg", + "type" + ], + "properties": { + "address": { + "type": "string" + }, + "dns_servers": { + "type": "array", + "items": { + "type": "string", + "format": "ip" + } + }, + "domain": { + "type": [ + "string", + "null" + ] + }, + "nic": { + "description": "The service vNIC providing outbound connectivity using OPTE.", + "allOf": [ + { + "$ref": "#/definitions/NetworkInterface" + } + ] + }, + "ntp_servers": { + "type": "array", + "items": { + "type": "string" + } + }, + "snat_cfg": { + "description": "The SNAT configuration for outbound connections.", + "allOf": [ + { + "$ref": "#/definitions/SourceNatConfig" + } + ] + }, + "type": { + "type": "string", + "enum": [ + "boundary_ntp" + ] + } + } + }, + { + "type": "object", + "required": [ + "address", + "dataset", + "type" + ], + "properties": { + "address": { + "type": "string" + }, + "dataset": { + "$ref": "#/definitions/OmicronZoneDataset" + }, + "type": { + "type": "string", + "enum": [ + "clickhouse" + ] + } + } + }, + { + "type": "object", + "required": [ + "address", + "dataset", + "type" + ], + "properties": { + "address": { + "type": "string" + }, + "dataset": { + "$ref": "#/definitions/OmicronZoneDataset" + }, + "type": { + "type": "string", + "enum": [ + "clickhouse_keeper" + ] + } + } + }, + { + "type": "object", + "required": [ + "address", + "dataset", + "type" + ], + "properties": { + "address": { + "type": "string" + }, + "dataset": { + "$ref": "#/definitions/OmicronZoneDataset" + }, + "type": { + "type": "string", + "enum": [ + "cockroach_db" + ] + } + } + }, + { + "type": "object", + "required": [ + "address", + "dataset", + "type" + ], + "properties": { + "address": { + "type": "string" + }, + "dataset": { + "$ref": "#/definitions/OmicronZoneDataset" + }, + "type": { + "type": "string", + "enum": [ + "crucible" + ] + } + } + }, + { + "type": "object", + "required": [ + "address", + "type" + ], + "properties": { + "address": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "crucible_pantry" + ] + } + } + }, + { + "type": "object", + "required": [ + "dataset", + "dns_address", + "http_address", + "nic", + "type" + ], + "properties": { + "dataset": { + "$ref": "#/definitions/OmicronZoneDataset" + }, + "dns_address": { + "description": "The address at which the external DNS server is reachable.", + "type": "string" + }, + "http_address": { + "description": "The address at which the external DNS server API is reachable.", + "type": "string" + }, + "nic": { + "description": "The service vNIC providing external connectivity using OPTE.", + "allOf": [ + { + "$ref": "#/definitions/NetworkInterface" + } + ] + }, + "type": { + "type": "string", + "enum": [ + "external_dns" + ] + } + } + }, + { + "type": "object", + "required": [ + "dataset", + "dns_address", + "gz_address", + "gz_address_index", + "http_address", + "type" + ], + "properties": { + "dataset": { + "$ref": "#/definitions/OmicronZoneDataset" + }, + "dns_address": { + "type": "string" + }, + "gz_address": { + "description": "The addresses in the global zone which should be created\n\nFor the DNS service, which exists outside the sleds's typical subnet - adding an address in the GZ is necessary to allow inter-zone traffic routing.", + "type": "string", + "format": "ipv6" + }, + "gz_address_index": { + "description": "The address is also identified with an auxiliary bit of information to ensure that the created global zone address can have a unique name.", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "http_address": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "internal_dns" + ] + } + } + }, + { + "type": "object", + "required": [ + "address", + "dns_servers", + "ntp_servers", + "type" + ], + "properties": { + "address": { + "type": "string" + }, + "dns_servers": { + "type": "array", + "items": { + "type": "string", + "format": "ip" + } + }, + "domain": { + "type": [ + "string", + "null" + ] + }, + "ntp_servers": { + "type": "array", + "items": { + "type": "string" + } + }, + "type": { + "type": "string", + "enum": [ + "internal_ntp" + ] + } + } + }, + { + "type": "object", + "required": [ + "external_dns_servers", + "external_ip", + "external_tls", + "internal_address", + "nic", + "type" + ], + "properties": { + "external_dns_servers": { + "description": "External DNS servers Nexus can use to resolve external hosts.", + "type": "array", + "items": { + "type": "string", + "format": "ip" + } + }, + "external_ip": { + "description": "The address at which the external nexus server is reachable.", + "type": "string", + "format": "ip" + }, + "external_tls": { + "description": "Whether Nexus's external endpoint should use TLS", + "type": "boolean" + }, + "internal_address": { + "description": "The address at which the internal nexus server is reachable.", + "type": "string" + }, + "nic": { + "description": "The service vNIC providing external connectivity using OPTE.", + "allOf": [ + { + "$ref": "#/definitions/NetworkInterface" + } + ] + }, + "type": { + "type": "string", + "enum": [ + "nexus" + ] + } + } + }, + { + "type": "object", + "required": [ + "address", + "type" + ], + "properties": { + "address": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "oximeter" + ] + } + } + } + ] + }, + "SourceNatConfig": { + "description": "An IP address and port range used for source NAT, i.e., making outbound network connections from guests or services.", + "type": "object", + "required": [ + "first_port", + "ip", + "last_port" + ], + "properties": { + "first_port": { + "description": "The first port used for source NAT, inclusive.", + "type": "integer", + "format": "uint16", + "minimum": 0.0 + }, + "ip": { + "description": "The external address provided to the instance or service.", + "type": "string", + "format": "ip" + }, + "last_port": { + "description": "The last port used for source NAT, also inclusive.", + "type": "integer", + "format": "uint16", + "minimum": 0.0 + } + } + }, + "Vni": { + "description": "A Geneve Virtual Network Identifier", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "ZpoolName": { + "title": "The name of a Zpool", + "description": "Zpool names are of the format ox{i,p}_. They are either Internal or External, and should be unique", + "type": "string", + "pattern": "^ox[ip]_[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + } + } +} \ No newline at end of file diff --git a/schema/crdb/11.0.0/README.md b/schema/crdb/10.0.0/README.md similarity index 100% rename from schema/crdb/11.0.0/README.md rename to schema/crdb/10.0.0/README.md diff --git a/schema/crdb/11.0.0/up01.sql b/schema/crdb/10.0.0/up01.sql similarity index 100% rename from schema/crdb/11.0.0/up01.sql rename to schema/crdb/10.0.0/up01.sql diff --git a/schema/crdb/11.0.0/up02.sql b/schema/crdb/10.0.0/up02.sql similarity index 100% rename from schema/crdb/11.0.0/up02.sql rename to schema/crdb/10.0.0/up02.sql diff --git a/schema/crdb/11.0.0/up03.sql b/schema/crdb/10.0.0/up03.sql similarity index 100% rename from schema/crdb/11.0.0/up03.sql rename to schema/crdb/10.0.0/up03.sql diff --git a/schema/crdb/11.0.0/up04.sql b/schema/crdb/10.0.0/up04.sql similarity index 100% rename from schema/crdb/11.0.0/up04.sql rename to schema/crdb/10.0.0/up04.sql diff --git a/schema/crdb/11.0.0/up05.sql b/schema/crdb/10.0.0/up05.sql similarity index 100% rename from schema/crdb/11.0.0/up05.sql rename to schema/crdb/10.0.0/up05.sql diff --git a/schema/crdb/11.0.0/up06.sql b/schema/crdb/10.0.0/up06.sql similarity index 100% rename from schema/crdb/11.0.0/up06.sql rename to schema/crdb/10.0.0/up06.sql diff --git a/schema/crdb/11.0.0/up07.sql b/schema/crdb/10.0.0/up07.sql similarity index 100% rename from schema/crdb/11.0.0/up07.sql rename to schema/crdb/10.0.0/up07.sql diff --git a/schema/crdb/11.0.0/up08.sql b/schema/crdb/10.0.0/up08.sql similarity index 100% rename from schema/crdb/11.0.0/up08.sql rename to schema/crdb/10.0.0/up08.sql diff --git a/schema/crdb/11.0.0/up09.sql b/schema/crdb/10.0.0/up09.sql similarity index 100% rename from schema/crdb/11.0.0/up09.sql rename to schema/crdb/10.0.0/up09.sql diff --git a/schema/crdb/11.0.0/up10.sql b/schema/crdb/10.0.0/up10.sql similarity index 100% rename from schema/crdb/11.0.0/up10.sql rename to schema/crdb/10.0.0/up10.sql diff --git a/schema/crdb/11.0.0/up11.sql b/schema/crdb/10.0.0/up11.sql similarity index 100% rename from schema/crdb/11.0.0/up11.sql rename to schema/crdb/10.0.0/up11.sql diff --git a/schema/crdb/11.0.0/up12.sql b/schema/crdb/10.0.0/up12.sql similarity index 100% rename from schema/crdb/11.0.0/up12.sql rename to schema/crdb/10.0.0/up12.sql diff --git a/schema/crdb/12.0.0/up01.sql b/schema/crdb/12.0.0/up01.sql new file mode 100644 index 0000000000..36f2f810ca --- /dev/null +++ b/schema/crdb/12.0.0/up01.sql @@ -0,0 +1,27 @@ +/* + * Drop the entire metric producer assignment table. + * + * Programs wishing to produce metrics need to register with Nexus. That creates + * an assignment of the producer to a collector, which is recorded in this + * table. That registration is idempotent, and every _current_ producer will + * register when it restarts. For example, `dpd` includes a task that registers + * with Nexus, so each time it (re)starts, that registration will happen. + * + * With that in mind, dropping this table is safe, because as of today, all + * software updates reuqire that the whole control plane be offline. We know + * that these entries will be recreated shortly, as the services registering + * producers are restarted. + * + * The current metric producers are: + * + * - `dpd` + * - Each `nexus` instance + * - Each `sled-agent` instance + * - The Propolis server for each guest Instance + * + * Another reason we're dropping the table is because we will add a new column, + * `kind`, in a following update file, but we don't have a good way to backfill + * that value for existing rows. We also don't need to, because these services + * will soon reregister, and provide us with a value. + */ +DROP TABLE IF EXISTS omicron.public.metric_producer; diff --git a/schema/crdb/12.0.0/up02.sql b/schema/crdb/12.0.0/up02.sql new file mode 100644 index 0000000000..96c4c5d6b4 --- /dev/null +++ b/schema/crdb/12.0.0/up02.sql @@ -0,0 +1,11 @@ +/* + * The kind of metric producer each record corresponds to. + */ +CREATE TYPE IF NOT EXISTS omicron.public.producer_kind AS ENUM ( + -- A sled agent for an entry in the sled table. + 'sled_agent', + -- A service in the omicron.public.service table + 'service', + -- A Propolis VMM for an instance in the omicron.public.instance table + 'instance' +); diff --git a/schema/crdb/12.0.0/up03.sql b/schema/crdb/12.0.0/up03.sql new file mode 100644 index 0000000000..fc57667541 --- /dev/null +++ b/schema/crdb/12.0.0/up03.sql @@ -0,0 +1,17 @@ +/* + * Recreate the metric producer assignment table. + * + * Note that we're adding the `kind` column here, using the new enum in the + * previous update SQL file. + */ +CREATE TABLE IF NOT EXISTS omicron.public.metric_producer ( + id UUID PRIMARY KEY, + time_created TIMESTAMPTZ NOT NULL, + time_modified TIMESTAMPTZ NOT NULL, + kind omicron.public.producer_kind, + ip INET NOT NULL, + port INT4 CHECK (port BETWEEN 0 AND 65535) NOT NULL, + interval FLOAT NOT NULL, + base_route STRING(512) NOT NULL, + oximeter_id UUID NOT NULL +); diff --git a/schema/crdb/12.0.0/up04.sql b/schema/crdb/12.0.0/up04.sql new file mode 100644 index 0000000000..cad33ddcf2 --- /dev/null +++ b/schema/crdb/12.0.0/up04.sql @@ -0,0 +1,8 @@ +/* + * Recreate index to support looking up a producer by its assigned oximeter + * collector ID. + */ +CREATE UNIQUE INDEX IF NOT EXISTS lookup_producer_by_oximeter ON omicron.public.metric_producer ( + oximeter_id, + id +); diff --git a/schema/crdb/13.0.0/up1.sql b/schema/crdb/13.0.0/up1.sql new file mode 100644 index 0000000000..c6ca3bcb13 --- /dev/null +++ b/schema/crdb/13.0.0/up1.sql @@ -0,0 +1,4 @@ +CREATE TABLE IF NOT EXISTS omicron.public.sw_root_of_trust_page ( + id UUID PRIMARY KEY, + data_base64 TEXT NOT NULL +); diff --git a/schema/crdb/13.0.0/up2.sql b/schema/crdb/13.0.0/up2.sql new file mode 100644 index 0000000000..5d8e775038 --- /dev/null +++ b/schema/crdb/13.0.0/up2.sql @@ -0,0 +1,2 @@ +CREATE UNIQUE INDEX IF NOT EXISTS root_of_trust_page_properties + on omicron.public.sw_root_of_trust_page (data_base64); diff --git a/schema/crdb/13.0.0/up3.sql b/schema/crdb/13.0.0/up3.sql new file mode 100644 index 0000000000..9fb407e7b9 --- /dev/null +++ b/schema/crdb/13.0.0/up3.sql @@ -0,0 +1,6 @@ +CREATE TYPE IF NOT EXISTS omicron.public.root_of_trust_page_which AS ENUM ( + 'cmpa', + 'cfpa_active', + 'cfpa_inactive', + 'cfpa_scratch' +); diff --git a/schema/crdb/13.0.0/up4.sql b/schema/crdb/13.0.0/up4.sql new file mode 100644 index 0000000000..9d227c7427 --- /dev/null +++ b/schema/crdb/13.0.0/up4.sql @@ -0,0 +1,17 @@ +CREATE TABLE IF NOT EXISTS omicron.public.inv_root_of_trust_page ( + -- where this observation came from + -- (foreign key into `inv_collection` table) + inv_collection_id UUID NOT NULL, + -- which system this SP reports it is part of + -- (foreign key into `hw_baseboard_id` table) + hw_baseboard_id UUID NOT NULL, + -- when this observation was made + time_collected TIMESTAMPTZ NOT NULL, + -- which MGS instance reported this data + source TEXT NOT NULL, + + which omicron.public.root_of_trust_page_which NOT NULL, + sw_root_of_trust_page_id UUID NOT NULL, + + PRIMARY KEY (inv_collection_id, hw_baseboard_id, which) +); diff --git a/schema/crdb/14.0.0/up1.sql b/schema/crdb/14.0.0/up1.sql new file mode 100644 index 0000000000..3bff831ceb --- /dev/null +++ b/schema/crdb/14.0.0/up1.sql @@ -0,0 +1,37 @@ +-- Table of all sled subnets allocated for sleds added to an already initialized +-- rack. The sleds in this table and their allocated subnets are created before +-- a sled is added to the `sled` table. Addition to the `sled` table occurs +-- after the sled is initialized and notifies Nexus about itself. +-- +-- For simplicity and space savings, this table doesn't actually contain the +-- full subnets for a given sled, but only the octet that extends a /56 rack +-- subnet to a /64 sled subnet. The rack subnet is maintained in the `rack` +-- table. +-- +-- This table does not include subnet octets allocated during RSS and therefore +-- all of the octets start at 33. This makes the data in this table purely additive +-- post-RSS, which also implies that we cannot re-use subnet octets if an original +-- sled that was part of RSS was removed from the cluster. +CREATE TABLE IF NOT EXISTS omicron.public.sled_underlay_subnet_allocation ( + -- The physical identity of the sled + -- (foreign key into `hw_baseboard_id` table) + hw_baseboard_id UUID PRIMARY KEY, + + -- The rack to which a sled is being added + -- (foreign key into `rack` table) + -- + -- We require this because the sled is not yet part of the sled table when + -- we first allocate a subnet for it. + rack_id UUID NOT NULL, + + -- The sled to which a subnet is being allocated + -- + -- Eventually will be a foreign key into the `sled` table when the sled notifies nexus + -- about itself after initialization. + sled_id UUID NOT NULL, + + -- The octet that extends a /56 rack subnet to a /64 sled subnet + -- + -- Always between 33 and 255 inclusive + subnet_octet INT2 NOT NULL UNIQUE CHECK (subnet_octet BETWEEN 33 AND 255) +); diff --git a/schema/crdb/14.0.0/up2.sql b/schema/crdb/14.0.0/up2.sql new file mode 100644 index 0000000000..c3e18fa166 --- /dev/null +++ b/schema/crdb/14.0.0/up2.sql @@ -0,0 +1,5 @@ +-- Add an index which allows pagination by {rack_id, sled_id} pairs. +CREATE UNIQUE INDEX IF NOT EXISTS lookup_subnet_allocation_by_rack_and_sled ON omicron.public.sled_underlay_subnet_allocation ( + rack_id, + sled_id +); diff --git a/schema/crdb/15.0.0/up1.sql b/schema/crdb/15.0.0/up1.sql new file mode 100644 index 0000000000..04baa76370 --- /dev/null +++ b/schema/crdb/15.0.0/up1.sql @@ -0,0 +1,6 @@ +CREATE TYPE IF NOT EXISTS omicron.public.sled_provision_state AS ENUM ( + -- New resources can be provisioned onto the sled + 'provisionable', + -- New resources must not be provisioned onto the sled + 'non_provisionable' +); diff --git a/schema/crdb/15.0.0/up2.sql b/schema/crdb/15.0.0/up2.sql new file mode 100644 index 0000000000..e3ea2ba11c --- /dev/null +++ b/schema/crdb/15.0.0/up2.sql @@ -0,0 +1,3 @@ +ALTER TABLE omicron.public.sled + ADD COLUMN IF NOT EXISTS provision_state omicron.public.sled_provision_state + NOT NULL DEFAULT 'provisionable'; diff --git a/schema/crdb/15.0.0/up3.sql b/schema/crdb/15.0.0/up3.sql new file mode 100644 index 0000000000..aaa3feac20 --- /dev/null +++ b/schema/crdb/15.0.0/up3.sql @@ -0,0 +1,5 @@ +-- Drop the default column value for provision_state -- it should always be set +-- by Nexus. +ALTER TABLE omicron.public.sled + ALTER COLUMN provision_state + DROP DEFAULT; diff --git a/schema/crdb/16.0.0/up01.sql b/schema/crdb/16.0.0/up01.sql new file mode 100644 index 0000000000..f9806c5917 --- /dev/null +++ b/schema/crdb/16.0.0/up01.sql @@ -0,0 +1,14 @@ +/* + * Previous commits added the optional kind of a producer. In this version, + * we're making the value required and not nullable. We'll first delete all + * records with a NULL kind -- there should not be any, since all producers both + * in an out of tree have been updated. Nonetheless, this is safe because + * currently we're updating offline, and all producers should re-register when + * they are restarted. + * + * NOTE: Full table scans are disallowed, however we don't have an index on + * producer kind (and don't currently need one). Allow full table scans for the + * context of this one statement. + */ +SET LOCAL disallow_full_table_scans = off; +DELETE FROM omicron.public.metric_producer WHERE kind IS NULL; diff --git a/schema/crdb/16.0.0/up02.sql b/schema/crdb/16.0.0/up02.sql new file mode 100644 index 0000000000..9c1ad2ea47 --- /dev/null +++ b/schema/crdb/16.0.0/up02.sql @@ -0,0 +1,4 @@ +/* + * Next, we make the field itself required in the database. + */ +ALTER TABLE IF EXISTS omicron.public.metric_producer ALTER COLUMN kind SET NOT NULL; diff --git a/schema/crdb/17.0.0/up1.sql b/schema/crdb/17.0.0/up1.sql new file mode 100644 index 0000000000..d28d5ca4b5 --- /dev/null +++ b/schema/crdb/17.0.0/up1.sql @@ -0,0 +1 @@ +ALTER TABLE omicron.public.switch_port_settings_link_config ADD COLUMN IF NOT EXISTS autoneg BOOL NOT NULL DEFAULT false; diff --git a/schema/crdb/18.0.0/up01.sql b/schema/crdb/18.0.0/up01.sql new file mode 100644 index 0000000000..018bb36dcb --- /dev/null +++ b/schema/crdb/18.0.0/up01.sql @@ -0,0 +1,4 @@ +CREATE UNIQUE INDEX IF NOT EXISTS lookup_deleted_disk ON omicron.public.disk ( + id +) WHERE + time_deleted IS NOT NULL; diff --git a/schema/crdb/19.0.0/up01.sql b/schema/crdb/19.0.0/up01.sql new file mode 100644 index 0000000000..6cfa92f4c2 --- /dev/null +++ b/schema/crdb/19.0.0/up01.sql @@ -0,0 +1 @@ +ALTER TABLE omicron.public.external_ip ADD COLUMN IF NOT EXISTS project_id UUID; diff --git a/schema/crdb/19.0.0/up02.sql b/schema/crdb/19.0.0/up02.sql new file mode 100644 index 0000000000..733c46b0dc --- /dev/null +++ b/schema/crdb/19.0.0/up02.sql @@ -0,0 +1,4 @@ +ALTER TABLE omicron.public.external_ip ADD CONSTRAINT IF NOT EXISTS null_project_id CHECK ( + (kind = 'floating' AND is_service = FALSE AND project_id IS NOT NULL) OR + ((kind != 'floating' OR is_service = TRUE) AND project_id IS NULL) +); diff --git a/schema/crdb/19.0.0/up03.sql b/schema/crdb/19.0.0/up03.sql new file mode 100644 index 0000000000..d3577edc12 --- /dev/null +++ b/schema/crdb/19.0.0/up03.sql @@ -0,0 +1,6 @@ +CREATE UNIQUE INDEX IF NOT EXISTS lookup_floating_ip_by_name on omicron.public.external_ip ( + name +) WHERE + kind = 'floating' AND + time_deleted is NULL AND + project_id is NULL; diff --git a/schema/crdb/19.0.0/up04.sql b/schema/crdb/19.0.0/up04.sql new file mode 100644 index 0000000000..9a40dc99c5 --- /dev/null +++ b/schema/crdb/19.0.0/up04.sql @@ -0,0 +1,7 @@ +CREATE UNIQUE INDEX IF NOT EXISTS lookup_floating_ip_by_name_and_project on omicron.public.external_ip ( + project_id, + name +) WHERE + kind = 'floating' AND + time_deleted is NULL AND + project_id is NOT NULL; diff --git a/schema/crdb/19.0.0/up05.sql b/schema/crdb/19.0.0/up05.sql new file mode 100644 index 0000000000..3e172e3e70 --- /dev/null +++ b/schema/crdb/19.0.0/up05.sql @@ -0,0 +1,19 @@ +CREATE VIEW IF NOT EXISTS omicron.public.floating_ip AS +SELECT + id, + name, + description, + time_created, + time_modified, + time_deleted, + ip_pool_id, + ip_pool_range_id, + is_service, + parent_id, + ip, + project_id +FROM + omicron.public.external_ip +WHERE + omicron.public.external_ip.kind = 'floating' AND + project_id IS NOT NULL; diff --git a/schema/crdb/19.0.0/up06.sql b/schema/crdb/19.0.0/up06.sql new file mode 100644 index 0000000000..30c0b3773a --- /dev/null +++ b/schema/crdb/19.0.0/up06.sql @@ -0,0 +1,3 @@ +ALTER TABLE omicron.public.external_ip ADD CONSTRAINT IF NOT EXISTS null_non_fip_parent_id CHECK ( + (kind != 'floating' AND parent_id is NOT NULL) OR (kind = 'floating') +); diff --git a/schema/crdb/README.adoc b/schema/crdb/README.adoc index fba36ed73b..5b9c2f6a10 100644 --- a/schema/crdb/README.adoc +++ b/schema/crdb/README.adoc @@ -14,9 +14,11 @@ We use the following conventions: appear in each file. More on this below. ** If there's only one statement required, we put it into `up.sql`. ** If more than one change is needed, any number of files starting with `up` - and ending with `.sql` may be used. These files will be sorted in - lexicographic order before being executed. Each will be executed in a - separate transaction. + and ending with `.sql` may be used. These files must follow a + numerically-increasing pattern starting with 1 (leading prefixes are allowed, + so `up1.sql`, `up2.sql`, ..., or `up01.sql`, `up02.sql`, etc.), and they will + be sorted numerically by these values. Each will be executed in a separate + transaction. ** CockroachDB documentation recommends the following: "Execute schema changes ... in an explicit transaction consisting of the single schema change statement.". Practically this means: If you want to change multiple @@ -65,7 +67,8 @@ Process: * If only one SQL statement is necessary to get from `OLD_VERSION` to `NEW_VERSION`, put that statement into `schema/crdb/NEW_VERSION/up.sql`. If multiple statements are required, put each one into a separate file, naming - these `schema/crdb/NEW_VERSION/upN.sql` for as many `N` as you need. + these `schema/crdb/NEW_VERSION/upN.sql` for as many `N` as you need, staring + with `N=1`. ** Each file should contain _either_ one schema-modifying statement _or_ some number of data-modifying statements. You can combine multiple data-modifying statements. But you should not mix schema-modifying statements and diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index a74cabfe6e..0bf365a2f1 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -73,6 +73,13 @@ CREATE TABLE IF NOT EXISTS omicron.public.rack ( * Sleds */ +CREATE TYPE IF NOT EXISTS omicron.public.sled_provision_state AS ENUM ( + -- New resources can be provisioned onto the sled + 'provisionable', + -- New resources must not be provisioned onto the sled + 'non_provisionable' +); + CREATE TABLE IF NOT EXISTS omicron.public.sled ( /* Identity metadata (asset) */ id UUID PRIMARY KEY, @@ -104,6 +111,9 @@ CREATE TABLE IF NOT EXISTS omicron.public.sled ( /* The last address allocated to an Oxide service on this sled. */ last_used_address INET NOT NULL, + /* The state of whether resources should be provisioned onto the sled */ + provision_state omicron.public.sled_provision_state NOT NULL, + -- This constraint should be upheld, even for deleted disks -- in the fleet. CONSTRAINT serial_part_revision_unique UNIQUE ( @@ -158,6 +168,51 @@ CREATE UNIQUE INDEX IF NOT EXISTS lookup_resource_by_sled ON omicron.public.sled id ); + +-- Table of all sled subnets allocated for sleds added to an already initialized +-- rack. The sleds in this table and their allocated subnets are created before +-- a sled is added to the `sled` table. Addition to the `sled` table occurs +-- after the sled is initialized and notifies Nexus about itself. +-- +-- For simplicity and space savings, this table doesn't actually contain the +-- full subnets for a given sled, but only the octet that extends a /56 rack +-- subnet to a /64 sled subnet. The rack subnet is maintained in the `rack` +-- table. +-- +-- This table does not include subnet octets allocated during RSS and therefore +-- all of the octets start at 33. This makes the data in this table purely additive +-- post-RSS, which also implies that we cannot re-use subnet octets if an original +-- sled that was part of RSS was removed from the cluster. +CREATE TABLE IF NOT EXISTS omicron.public.sled_underlay_subnet_allocation ( + -- The physical identity of the sled + -- (foreign key into `hw_baseboard_id` table) + hw_baseboard_id UUID PRIMARY KEY, + + -- The rack to which a sled is being added + -- (foreign key into `rack` table) + -- + -- We require this because the sled is not yet part of the sled table when + -- we first allocate a subnet for it. + rack_id UUID NOT NULL, + + -- The sled to which a subnet is being allocated + -- + -- Eventually will be a foreign key into the `sled` table when the sled notifies nexus + -- about itself after initialization. + sled_id UUID NOT NULL, + + -- The octet that extends a /56 rack subnet to a /64 sled subnet + -- + -- Always between 33 and 255 inclusive + subnet_octet INT2 NOT NULL UNIQUE CHECK (subnet_octet BETWEEN 33 AND 255) +); + +-- Add an index which allows pagination by {rack_id, sled_id} pairs. +CREATE UNIQUE INDEX IF NOT EXISTS lookup_subnet_allocation_by_rack_and_sled ON omicron.public.sled_underlay_subnet_allocation ( + rack_id, + sled_id +); + /* * Switches */ @@ -971,6 +1026,11 @@ CREATE UNIQUE INDEX IF NOT EXISTS lookup_disk_by_instance ON omicron.public.disk ) WHERE time_deleted IS NULL AND attach_instance_id IS NOT NULL; +CREATE UNIQUE INDEX IF NOT EXISTS lookup_deleted_disk ON omicron.public.disk ( + id +) WHERE + time_deleted IS NOT NULL; + CREATE TABLE IF NOT EXISTS omicron.public.image ( /* Identity metadata (resource) */ id UUID PRIMARY KEY, @@ -1108,6 +1168,18 @@ CREATE TABLE IF NOT EXISTS omicron.public.oximeter ( port INT4 CHECK (port BETWEEN 0 AND 65535) NOT NULL ); +/* + * The kind of metric producer each record corresponds to. + */ +CREATE TYPE IF NOT EXISTS omicron.public.producer_kind AS ENUM ( + -- A sled agent for an entry in the sled table. + 'sled_agent', + -- A service in the omicron.public.service table + 'service', + -- A Propolis VMM for an instance in the omicron.public.instance table + 'instance' +); + /* * Information about registered metric producers. */ @@ -1115,6 +1187,7 @@ CREATE TABLE IF NOT EXISTS omicron.public.metric_producer ( id UUID PRIMARY KEY, time_created TIMESTAMPTZ NOT NULL, time_modified TIMESTAMPTZ NOT NULL, + kind omicron.public.producer_kind NOT NULL, ip INET NOT NULL, port INT4 CHECK (port BETWEEN 0 AND 65535) NOT NULL, interval FLOAT NOT NULL, @@ -1589,6 +1662,9 @@ CREATE TABLE IF NOT EXISTS omicron.public.external_ip ( /* The last port in the allowed range, also inclusive. */ last_port INT4 NOT NULL, + /* FK to the `project` table. */ + project_id UUID, + /* The name must be non-NULL iff this is a floating IP. */ CONSTRAINT null_fip_name CHECK ( (kind != 'floating' AND name IS NULL) OR @@ -1601,6 +1677,14 @@ CREATE TABLE IF NOT EXISTS omicron.public.external_ip ( (kind = 'floating' AND description IS NOT NULL) ), + /* Only floating IPs can be attached to a project, and + * they must have a parent project if they are instance FIPs. + */ + CONSTRAINT null_project_id CHECK ( + (kind = 'floating' AND is_service = FALSE AND project_id is NOT NULL) OR + ((kind != 'floating' OR is_service = TRUE) AND project_id IS NULL) + ), + /* * Only nullable if this is a floating IP, which may exist not * attached to any instance or service yet. @@ -1644,6 +1728,43 @@ CREATE UNIQUE INDEX IF NOT EXISTS lookup_external_ip_by_parent ON omicron.public ) WHERE parent_id IS NOT NULL AND time_deleted IS NULL; +/* Enforce name-uniqueness of floating (service) IPs at fleet level. */ +CREATE UNIQUE INDEX IF NOT EXISTS lookup_floating_ip_by_name on omicron.public.external_ip ( + name +) WHERE + kind = 'floating' AND + time_deleted is NULL AND + project_id is NULL; + +/* Enforce name-uniqueness of floating IPs at project level. */ +CREATE UNIQUE INDEX IF NOT EXISTS lookup_floating_ip_by_name_and_project on omicron.public.external_ip ( + project_id, + name +) WHERE + kind = 'floating' AND + time_deleted is NULL AND + project_id is NOT NULL; + +CREATE VIEW IF NOT EXISTS omicron.public.floating_ip AS +SELECT + id, + name, + description, + time_created, + time_modified, + time_deleted, + ip_pool_id, + ip_pool_range_id, + is_service, + parent_id, + ip, + project_id +FROM + omicron.public.external_ip +WHERE + omicron.public.external_ip.kind = 'floating' AND + project_id IS NOT NULL; + /*******************************************************************/ /* @@ -2614,13 +2735,20 @@ CREATE TABLE IF NOT EXISTS omicron.public.sw_caboose ( board TEXT NOT NULL, git_commit TEXT NOT NULL, name TEXT NOT NULL, - -- The MGS response that provides this field indicates that it can be NULL. - -- But that's only to support old software that we no longer support. version TEXT NOT NULL ); CREATE UNIQUE INDEX IF NOT EXISTS caboose_properties on omicron.public.sw_caboose (board, git_commit, name, version); +/* root of trust pages: this table assigns unique ids to distinct RoT CMPA + and CFPA page contents, each of which is a 512-byte blob */ +CREATE TABLE IF NOT EXISTS omicron.public.sw_root_of_trust_page ( + id UUID PRIMARY KEY, + data_base64 TEXT NOT NULL +); +CREATE UNIQUE INDEX IF NOT EXISTS root_of_trust_page_properties + on omicron.public.sw_root_of_trust_page (data_base64); + /* Inventory Collections */ -- list of all collections @@ -2728,6 +2856,32 @@ CREATE TABLE IF NOT EXISTS omicron.public.inv_caboose ( PRIMARY KEY (inv_collection_id, hw_baseboard_id, which) ); +CREATE TYPE IF NOT EXISTS omicron.public.root_of_trust_page_which AS ENUM ( + 'cmpa', + 'cfpa_active', + 'cfpa_inactive', + 'cfpa_scratch' +); + +-- root of trust key signing pages found +CREATE TABLE IF NOT EXISTS omicron.public.inv_root_of_trust_page ( + -- where this observation came from + -- (foreign key into `inv_collection` table) + inv_collection_id UUID NOT NULL, + -- which system this SP reports it is part of + -- (foreign key into `hw_baseboard_id` table) + hw_baseboard_id UUID NOT NULL, + -- when this observation was made + time_collected TIMESTAMPTZ NOT NULL, + -- which MGS instance reported this data + source TEXT NOT NULL, + + which omicron.public.root_of_trust_page_which NOT NULL, + sw_root_of_trust_page_id UUID NOT NULL, + + PRIMARY KEY (inv_collection_id, hw_baseboard_id, which) +); + /*******************************************************************/ /* @@ -2899,6 +3053,8 @@ CREATE TABLE IF NOT EXISTS omicron.public.db_metadata ( CHECK (singleton = true) ); +ALTER TABLE omicron.public.switch_port_settings_link_config ADD COLUMN IF NOT EXISTS autoneg BOOL NOT NULL DEFAULT false; + INSERT INTO omicron.public.db_metadata ( singleton, time_created, @@ -2906,7 +3062,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - ( TRUE, NOW(), NOW(), '11.0.0', NULL) + ( TRUE, NOW(), NOW(), '19.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; diff --git a/schema/rss-service-plan.json b/schema/rss-service-plan-v2.json similarity index 80% rename from schema/rss-service-plan.json rename to schema/rss-service-plan-v2.json index 725caf0900..0bcd27b9cc 100644 --- a/schema/rss-service-plan.json +++ b/schema/rss-service-plan-v2.json @@ -13,136 +13,11 @@ "services": { "type": "object", "additionalProperties": { - "$ref": "#/definitions/SledRequest" + "$ref": "#/definitions/SledConfig" } } }, "definitions": { - "DatasetKind": { - "description": "The type of a dataset, and an auxiliary information necessary to successfully launch a zone managing the associated data.", - "oneOf": [ - { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "cockroach_db" - ] - } - } - }, - { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "crucible" - ] - } - } - }, - { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "clickhouse" - ] - } - } - }, - { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "clickhouse_keeper" - ] - } - } - }, - { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "external_dns" - ] - } - } - }, - { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "internal_dns" - ] - } - } - } - ] - }, - "DatasetName": { - "type": "object", - "required": [ - "kind", - "pool_name" - ], - "properties": { - "kind": { - "$ref": "#/definitions/DatasetKind" - }, - "pool_name": { - "$ref": "#/definitions/ZpoolName" - } - } - }, - "DatasetRequest": { - "description": "Describes a request to provision a specific dataset", - "type": "object", - "required": [ - "id", - "name", - "service_address" - ], - "properties": { - "id": { - "type": "string", - "format": "uuid" - }, - "name": { - "$ref": "#/definitions/DatasetName" - }, - "service_address": { - "type": "string" - } - } - }, "DnsConfigParams": { "type": "object", "required": [ @@ -399,53 +274,96 @@ } ] }, - "ServiceType": { - "description": "Describes service-specific parameters.", + "OmicronZoneConfig": { + "description": "Describes one Omicron-managed zone running on a sled", + "type": "object", + "required": [ + "id", + "underlay_address", + "zone_type" + ], + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "underlay_address": { + "type": "string", + "format": "ipv6" + }, + "zone_type": { + "$ref": "#/definitions/OmicronZoneType" + } + } + }, + "OmicronZoneDataset": { + "description": "Describes a persistent ZFS dataset associated with an Omicron zone", + "type": "object", + "required": [ + "pool_name" + ], + "properties": { + "pool_name": { + "$ref": "#/definitions/ZpoolName" + } + } + }, + "OmicronZoneType": { + "description": "Describes what kind of zone this is (i.e., what component is running in it) as well as any type-specific configuration", "oneOf": [ { "type": "object", "required": [ - "external_dns_servers", - "external_ip", - "external_tls", - "internal_address", + "address", + "dns_servers", "nic", + "ntp_servers", + "snat_cfg", "type" ], "properties": { - "external_dns_servers": { - "description": "External DNS servers Nexus can use to resolve external hosts.", + "address": { + "type": "string" + }, + "dns_servers": { "type": "array", "items": { "type": "string", "format": "ip" } }, - "external_ip": { - "description": "The address at which the external nexus server is reachable.", - "type": "string", - "format": "ip" - }, - "external_tls": { - "description": "Whether Nexus's external endpoint should use TLS", - "type": "boolean" - }, - "internal_address": { - "description": "The address at which the internal nexus server is reachable.", - "type": "string" + "domain": { + "type": [ + "string", + "null" + ] }, "nic": { - "description": "The service vNIC providing external connectivity using OPTE.", + "description": "The service vNIC providing outbound connectivity using OPTE.", "allOf": [ { "$ref": "#/definitions/NetworkInterface" } ] }, + "ntp_servers": { + "type": "array", + "items": { + "type": "string" + } + }, + "snat_cfg": { + "description": "The SNAT configuration for outbound connections.", + "allOf": [ + { + "$ref": "#/definitions/SourceNatConfig" + } + ] + }, "type": { "type": "string", "enum": [ - "nexus" + "boundary_ntp" ] } } @@ -453,32 +371,21 @@ { "type": "object", "required": [ - "dns_address", - "http_address", - "nic", + "address", + "dataset", "type" ], "properties": { - "dns_address": { - "description": "The address at which the external DNS server is reachable.", + "address": { "type": "string" }, - "http_address": { - "description": "The address at which the external DNS server API is reachable.", - "type": "string" - }, - "nic": { - "description": "The service vNIC providing external connectivity using OPTE.", - "allOf": [ - { - "$ref": "#/definitions/NetworkInterface" - } - ] + "dataset": { + "$ref": "#/definitions/OmicronZoneDataset" }, "type": { "type": "string", "enum": [ - "external_dns" + "clickhouse" ] } } @@ -486,34 +393,43 @@ { "type": "object", "required": [ - "dns_address", - "gz_address", - "gz_address_index", - "http_address", + "address", + "dataset", "type" ], "properties": { - "dns_address": { + "address": { "type": "string" }, - "gz_address": { - "description": "The addresses in the global zone which should be created\n\nFor the DNS service, which exists outside the sleds's typical subnet - adding an address in the GZ is necessary to allow inter-zone traffic routing.", - "type": "string", - "format": "ipv6" - }, - "gz_address_index": { - "description": "The address is also identified with an auxiliary bit of information to ensure that the created global zone address can have a unique name.", - "type": "integer", - "format": "uint32", - "minimum": 0.0 + "dataset": { + "$ref": "#/definitions/OmicronZoneDataset" }, - "http_address": { + "type": { + "type": "string", + "enum": [ + "clickhouse_keeper" + ] + } + } + }, + { + "type": "object", + "required": [ + "address", + "dataset", + "type" + ], + "properties": { + "address": { "type": "string" }, + "dataset": { + "$ref": "#/definitions/OmicronZoneDataset" + }, "type": { "type": "string", "enum": [ - "internal_dns" + "cockroach_db" ] } } @@ -522,16 +438,20 @@ "type": "object", "required": [ "address", + "dataset", "type" ], "properties": { "address": { "type": "string" }, + "dataset": { + "$ref": "#/definitions/OmicronZoneDataset" + }, "type": { "type": "string", "enum": [ - "oximeter" + "crucible" ] } } @@ -557,56 +477,75 @@ { "type": "object", "required": [ - "address", - "dns_servers", + "dataset", + "dns_address", + "http_address", "nic", - "ntp_servers", - "snat_cfg", "type" ], "properties": { - "address": { - "type": "string" + "dataset": { + "$ref": "#/definitions/OmicronZoneDataset" }, - "dns_servers": { - "type": "array", - "items": { - "type": "string", - "format": "ip" - } + "dns_address": { + "description": "The address at which the external DNS server is reachable.", + "type": "string" }, - "domain": { - "type": [ - "string", - "null" - ] + "http_address": { + "description": "The address at which the external DNS server API is reachable.", + "type": "string" }, "nic": { - "description": "The service vNIC providing outbound connectivity using OPTE.", + "description": "The service vNIC providing external connectivity using OPTE.", "allOf": [ { "$ref": "#/definitions/NetworkInterface" } ] }, - "ntp_servers": { - "type": "array", - "items": { - "type": "string" - } - }, - "snat_cfg": { - "description": "The SNAT configuration for outbound connections.", - "allOf": [ - { - "$ref": "#/definitions/SourceNatConfig" - } + "type": { + "type": "string", + "enum": [ + "external_dns" ] + } + } + }, + { + "type": "object", + "required": [ + "dataset", + "dns_address", + "gz_address", + "gz_address_index", + "http_address", + "type" + ], + "properties": { + "dataset": { + "$ref": "#/definitions/OmicronZoneDataset" + }, + "dns_address": { + "type": "string" + }, + "gz_address": { + "description": "The addresses in the global zone which should be created\n\nFor the DNS service, which exists outside the sleds's typical subnet - adding an address in the GZ is necessary to allow inter-zone traffic routing.", + "type": "string", + "format": "ipv6" + }, + "gz_address_index": { + "description": "The address is also identified with an auxiliary bit of information to ensure that the created global zone address can have a unique name.", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "http_address": { + "type": "string" }, "type": { "type": "string", "enum": [ - "boundary_ntp" + "internal_dns" ] } } @@ -653,53 +592,47 @@ { "type": "object", "required": [ - "address", + "external_dns_servers", + "external_ip", + "external_tls", + "internal_address", + "nic", "type" ], "properties": { - "address": { - "type": "string" + "external_dns_servers": { + "description": "External DNS servers Nexus can use to resolve external hosts.", + "type": "array", + "items": { + "type": "string", + "format": "ip" + } }, - "type": { + "external_ip": { + "description": "The address at which the external nexus server is reachable.", "type": "string", - "enum": [ - "clickhouse" - ] - } - } - }, - { - "type": "object", - "required": [ - "address", - "type" - ], - "properties": { - "address": { + "format": "ip" + }, + "external_tls": { + "description": "Whether Nexus's external endpoint should use TLS", + "type": "boolean" + }, + "internal_address": { + "description": "The address at which the internal nexus server is reachable.", "type": "string" }, - "type": { - "type": "string", - "enum": [ - "clickhouse_keeper" + "nic": { + "description": "The service vNIC providing external connectivity using OPTE.", + "allOf": [ + { + "$ref": "#/definitions/NetworkInterface" + } ] - } - } - }, - { - "type": "object", - "required": [ - "address", - "type" - ], - "properties": { - "address": { - "type": "string" }, "type": { "type": "string", "enum": [ - "cockroach_db" + "nexus" ] } } @@ -717,82 +650,24 @@ "type": { "type": "string", "enum": [ - "crucible" + "oximeter" ] } } } ] }, - "ServiceZoneRequest": { - "description": "Describes a request to create a zone running one or more services.", - "type": "object", - "required": [ - "addresses", - "id", - "services", - "zone_type" - ], - "properties": { - "addresses": { - "type": "array", - "items": { - "type": "string", - "format": "ipv6" - } - }, - "dataset": { - "default": null, - "anyOf": [ - { - "$ref": "#/definitions/DatasetRequest" - }, - { - "type": "null" - } - ] - }, - "id": { - "type": "string", - "format": "uuid" - }, - "services": { - "type": "array", - "items": { - "$ref": "#/definitions/ServiceZoneService" - } - }, - "zone_type": { - "$ref": "#/definitions/ZoneType" - } - } - }, - "ServiceZoneService": { - "description": "Used to request that the Sled initialize a single service.", + "SledConfig": { "type": "object", "required": [ - "details", - "id" + "zones" ], "properties": { - "details": { - "$ref": "#/definitions/ServiceType" - }, - "id": { - "type": "string", - "format": "uuid" - } - } - }, - "SledRequest": { - "type": "object", - "properties": { - "service": { - "description": "Services to be instantiated.", - "default": [], + "zones": { + "description": "zones configured for this sled", "type": "array", "items": { - "$ref": "#/definitions/ServiceZoneRequest" + "$ref": "#/definitions/OmicronZoneConfig" } } } @@ -860,23 +735,6 @@ "format": "uint32", "minimum": 0.0 }, - "ZoneType": { - "description": "The type of zone which may be requested from Sled Agent", - "type": "string", - "enum": [ - "clickhouse", - "clickhouse_keeper", - "cockroach_db", - "crucible_pantry", - "crucible", - "external_dns", - "internal_dns", - "nexus", - "ntp", - "oximeter", - "switch" - ] - }, "ZpoolName": { "title": "The name of a Zpool", "description": "Zpool names are of the format ox{i,p}_. They are either Internal or External, and should be unique", diff --git a/schema/rss-sled-plan.json b/schema/rss-sled-plan.json index 2ce8ae3bdc..2ef7a7b58a 100644 --- a/schema/rss-sled-plan.json +++ b/schema/rss-sled-plan.json @@ -381,6 +381,11 @@ "$ref": "#/definitions/IpNetwork" } }, + "autoneg": { + "description": "Whether or not to set autonegotiation", + "default": false, + "type": "boolean" + }, "bgp_peers": { "description": "BGP peers on this port", "type": "array", diff --git a/sled-agent/Cargo.toml b/sled-agent/Cargo.toml index 61e61709e1..b734248f32 100644 --- a/sled-agent/Cargo.toml +++ b/sled-agent/Cargo.toml @@ -25,14 +25,17 @@ derive_more.workspace = true dns-server.workspace = true dns-service-client.workspace = true dpd-client.workspace = true +display-error-chain.workspace = true dropshot.workspace = true flate2.workspace = true futures.workspace = true glob.workspace = true +hex.workspace = true http.workspace = true hyper-staticfile.workspace = true gateway-client.workspace = true illumos-utils.workspace = true +installinator-common.workspace = true internal-dns.workspace = true ipnetwork.workspace = true itertools.workspace = true @@ -53,6 +56,7 @@ reqwest = { workspace = true, features = ["rustls-tls", "stream"] } schemars = { workspace = true, features = [ "chrono", "uuid1" ] } semver.workspace = true serde.workspace = true +serde_human_bytes.workspace = true serde_json = {workspace = true, features = ["raw_value"]} sha3.workspace = true sled-agent-client.workspace = true @@ -81,6 +85,7 @@ opte-ioctl.workspace = true [dev-dependencies] assert_matches.workspace = true expectorate.workspace = true +guppy.workspace = true http.workspace = true hyper.workspace = true omicron-test-utils.workspace = true @@ -88,11 +93,12 @@ openapi-lint.workspace = true openapiv3.workspace = true pretty_assertions.workspace = true rcgen.workspace = true -serial_test.workspace = true subprocess.workspace = true slog-async.workspace = true slog-term.workspace = true tempfile.workspace = true +tokio-stream.workspace = true +tokio-util.workspace = true illumos-utils = { workspace = true, features = ["testing", "tmp_keypath"] } sled-storage = { workspace = true, features = ["testing"] } diff --git a/sled-agent/src/bin/services-ledger-check-migrate.rs b/sled-agent/src/bin/services-ledger-check-migrate.rs new file mode 100644 index 0000000000..456fdc74b7 --- /dev/null +++ b/sled-agent/src/bin/services-ledger-check-migrate.rs @@ -0,0 +1,80 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Test-migrates one or more old-format services ledger files to new-format +//! Omicron zones ledgers + +use anyhow::Context; +use camino::Utf8PathBuf; +use clap::Args; +use clap::Parser; +use omicron_common::cmd::fatal; +use omicron_common::cmd::CmdError; +use omicron_sled_agent::services::OmicronZonesConfigLocal; +use omicron_sled_agent::services_migration::AllZoneRequests; + +#[tokio::main] +async fn main() { + if let Err(message) = do_run().await { + fatal(CmdError::Failure(message)); + } +} + +#[derive(Debug, Parser)] +#[clap(about = "Test conversion of old-format services ledgers to new-format \ + zones ledgers")] +enum Converter { + /// checks whether one or more ledger file(s) can be converted successfully + Check(CheckArgs), + + /// for a given ledger file, prints the converted form + Show(ShowArgs), +} + +#[derive(Debug, Args)] +struct CheckArgs { + #[clap(action)] + files: Vec, +} + +#[derive(Debug, Args)] +struct ShowArgs { + #[clap(action)] + file: Utf8PathBuf, +} + +async fn do_run() -> Result<(), anyhow::Error> { + let args = Converter::parse(); + + let (files, do_show) = match args { + Converter::Check(CheckArgs { files }) => (files, false), + Converter::Show(ShowArgs { file }) => (vec![file], true), + }; + + for file_path in &files { + let contents = tokio::fs::read_to_string(file_path) + .await + .with_context(|| format!("read {:?}", &file_path))?; + let parsed: AllZoneRequests = serde_json::from_str(&contents) + .with_context(|| format!("parse {:?}", &file_path))?; + let converted = OmicronZonesConfigLocal::try_from(parsed) + .with_context(|| format!("convert contents of {:?}", &file_path))?; + if do_show { + println!( + "{:#}", + serde_json::to_string_pretty(&converted).with_context( + || format!("print contents of {:?}", &file_path) + )? + ); + } + eprintln!( + "{}: processed okay (zones: {})", + file_path, + converted.zones.len() + ); + } + + eprintln!("all files processed okay (files: {})", files.len()); + Ok(()) +} diff --git a/sled-agent/src/boot_disk_os_writer.rs b/sled-agent/src/boot_disk_os_writer.rs new file mode 100644 index 0000000000..a0798ed174 --- /dev/null +++ b/sled-agent/src/boot_disk_os_writer.rs @@ -0,0 +1,1669 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! This module provides `BootDiskOsWriter`, via which sled-agent can write new +//! OS images to its boot disks. + +use crate::http_entrypoints::BootDiskOsWriteProgress; +use crate::http_entrypoints::BootDiskOsWriteStatus; +use async_trait::async_trait; +use bytes::Bytes; +use camino::Utf8PathBuf; +use display_error_chain::DisplayErrorChain; +use dropshot::HttpError; +use futures::Stream; +use futures::TryStreamExt; +use installinator_common::M2Slot; +use installinator_common::RawDiskWriter; +use sha3::Digest; +use sha3::Sha3_256; +use slog::Logger; +use std::collections::btree_map::Entry; +use std::collections::BTreeMap; +use std::io; +use std::io::Read; +use std::path::Path; +use std::sync::Arc; +use std::sync::Mutex; +use tokio::fs::File; +use tokio::io::AsyncReadExt; +use tokio::io::AsyncSeekExt; +use tokio::io::AsyncWrite; +use tokio::io::AsyncWriteExt; +use tokio::io::BufReader; +use tokio::sync::oneshot; +use tokio::sync::oneshot::error::TryRecvError; +use tokio::sync::watch; +use uuid::Uuid; + +impl BootDiskOsWriteStatus { + fn from_result( + update_id: Uuid, + result: &Result<(), Arc>, + ) -> Self { + match result { + Ok(()) => Self::Complete { update_id }, + Err(err) => Self::Failed { + update_id, + message: DisplayErrorChain::new(err).to_string(), + }, + } + } +} + +#[derive(Debug, thiserror::Error)] +pub(crate) enum BootDiskOsWriteError { + // This variant should be impossible in production, as we build with + // panic=abort, but may be constructed in tests (e.g., during tokio runtime + // shutdown). + #[error("internal error (task panic)")] + TaskPanic, + #[error("an update is still running ({0})")] + UpdateRunning(Uuid), + #[error("a previous update completed ({0}); clear its status before starting a new update")] + CannotStartWithoutClearingPreviousStatus(Uuid), + #[error("failed to create temporary file")] + FailedCreatingTempfile(#[source] io::Error), + #[error("failed writing to temporary file")] + FailedWritingTempfile(#[source] io::Error), + #[error("failed downloading image from HTTP client")] + FailedDownloadingImage(#[source] HttpError), + #[error("hash mismatch in image from HTTP client: expected {expected} but got {got}")] + UploadedImageHashMismatch { expected: String, got: String }, + #[error("failed to open disk for writing {path}")] + FailedOpenDiskForWrite { + #[source] + error: io::Error, + path: Utf8PathBuf, + }, + #[error("image size ({image_size}) is not a multiple of disk block size ({disk_block_size})")] + ImageSizeNotMultipleOfBlockSize { + image_size: usize, + disk_block_size: usize, + }, + #[error("failed reading from temporary file")] + FailedReadingTempfile(#[source] io::Error), + #[error("failed writing to disk {path}")] + FailedWritingDisk { + #[source] + error: io::Error, + path: Utf8PathBuf, + }, + #[error("failed to open disk for reading {path}")] + FailedOpenDiskForRead { + #[source] + error: io::Error, + path: Utf8PathBuf, + }, + #[error("failed reading from disk {path}")] + FailedReadingDisk { + #[source] + error: io::Error, + path: Utf8PathBuf, + }, + #[error("hash mismatch after writing disk {path}: expected {expected} but got {got}")] + WrittenImageHashMismatch { + path: Utf8PathBuf, + expected: String, + got: String, + }, + #[error("unexpected update ID {0}: cannot clear status")] + WrongUpdateIdClearingStatus(Uuid), +} + +impl From<&BootDiskOsWriteError> for HttpError { + fn from(error: &BootDiskOsWriteError) -> Self { + let message = DisplayErrorChain::new(error).to_string(); + match error { + BootDiskOsWriteError::UpdateRunning(_) + | BootDiskOsWriteError::CannotStartWithoutClearingPreviousStatus( + _, + ) + | BootDiskOsWriteError::FailedDownloadingImage(_) + | BootDiskOsWriteError::UploadedImageHashMismatch { .. } + | BootDiskOsWriteError::ImageSizeNotMultipleOfBlockSize { + .. + } + | BootDiskOsWriteError::WrongUpdateIdClearingStatus(_) => { + HttpError::for_bad_request(None, message) + } + BootDiskOsWriteError::TaskPanic + | BootDiskOsWriteError::FailedCreatingTempfile(_) + | BootDiskOsWriteError::FailedWritingTempfile(_) + | BootDiskOsWriteError::FailedReadingTempfile(_) + | BootDiskOsWriteError::FailedOpenDiskForWrite { .. } + | BootDiskOsWriteError::FailedOpenDiskForRead { .. } + | BootDiskOsWriteError::FailedWritingDisk { .. } + | BootDiskOsWriteError::FailedReadingDisk { .. } + | BootDiskOsWriteError::WrittenImageHashMismatch { .. } => { + HttpError { + status_code: http::StatusCode::SERVICE_UNAVAILABLE, + error_code: None, + external_message: message.clone(), + internal_message: message, + } + } + } + } +} + +// Note to future maintainers: `installinator` uses the `update_engine` crate to +// drive its process (which includes writing the boot disk). We could also use +// `update_engine` inside `BootDiskOsWriter`; instead, we've hand-rolled a state +// machine with manual progress reporting. The current implementation is +// _probably_ simple enough that this was a reasonable choice, but if it becomes +// more complex (or if additional work needs to be done that `update_engine` +// would make easier), consider switching it over. +#[derive(Debug)] +pub(crate) struct BootDiskOsWriter { + // Note: We use a std Mutex here to avoid cancellation issues with tokio + // Mutex. We never need to keep the lock held longer than necessary to copy + // or replace the current writer state. + states: Mutex>, + log: Logger, +} + +impl BootDiskOsWriter { + pub(crate) fn new(log: &Logger) -> Self { + Self { + states: Mutex::default(), + log: log.new(slog::o!("component" => "BootDiskOsWriter")), + } + } + + /// Attempt to start a new update to the given disk (identified by both its + /// slot and the path to its devfs device). + /// + /// This method will return after the `image_upload` stream has been saved + /// to a local temporary file, but before the update has completed. Callers + /// must poll `status()` to discover when the running update completes (or + /// fails). + /// + /// # Errors + /// + /// This method will return an error and not start an update if any of the + /// following are true: + /// + /// * A previously-started update of this same `boot_disk` is still running + /// * A previously-completed update has not had its status cleared + /// * The `image_upload` stream returns an error + /// * The hash of the data provided by `image_upload` does not match + /// `sha3_256_digest` + /// * Any of a variety of I/O errors occurs while copying from + /// `image_upload` to a temporary file + /// + /// In all but the first case, the error returned will also be saved and + /// returned when `status()` is called (until another update is started). + pub(crate) async fn start_update( + &self, + boot_disk: M2Slot, + disk_devfs_path: Utf8PathBuf, + update_id: Uuid, + sha3_256_digest: [u8; 32], + image_upload: S, + ) -> Result<(), Arc> + where + S: Stream> + Send + 'static, + { + self.start_update_impl( + boot_disk, + disk_devfs_path, + update_id, + sha3_256_digest, + image_upload, + RealDiskInterface {}, + ) + .await + } + + async fn start_update_impl( + &self, + boot_disk: M2Slot, + disk_devfs_path: Utf8PathBuf, + update_id: Uuid, + sha3_256_digest: [u8; 32], + image_upload: S, + disk_writer: Writer, + ) -> Result<(), Arc> + where + S: Stream> + Send + 'static, + Writer: DiskInterface + Send + Sync + 'static, + { + // Construct a closure that will spawn a task to drive this update, but + // don't actually start it yet: we only allow an update to start if + // there's not currently an update running targetting the same slot, so + // we'll spawn this after checking that below. + let spawn_update_task = || { + let (uploaded_image_tx, uploaded_image_rx) = oneshot::channel(); + let (progress_tx, progress_rx) = watch::channel( + BootDiskOsWriteProgress::ReceivingUploadedImage { + bytes_received: 0, + }, + ); + let (complete_tx, complete_rx) = oneshot::channel(); + let task = BootDiskOsWriteTask { + log: self + .log + .new(slog::o!("update_id" => update_id.to_string())), + disk_devfs_path, + sha3_256_digest, + progress_tx, + disk_interface: disk_writer, + }; + tokio::spawn(task.run( + image_upload, + uploaded_image_tx, + complete_tx, + )); + ( + uploaded_image_rx, + TaskRunningState { update_id, progress_rx, complete_rx }, + ) + }; + + // Either call `spawn_update_task` and get back the handle to + // `uploaded_image_rx`, or return an error (if another update for this + // boot disk is still running). + let uploaded_image_rx = { + let mut states = self.states.lock().unwrap(); + match states.entry(boot_disk) { + Entry::Vacant(slot) => { + let (uploaded_image_rx, running) = spawn_update_task(); + slot.insert(WriterState::TaskRunning(running)); + uploaded_image_rx + } + Entry::Occupied(mut slot) => match slot.get_mut() { + WriterState::TaskRunning(running) => { + // It's possible this task is actually complete and a + // result is sitting in the `running.complete_rx` + // oneshot, but for the purposes of starting a new + // update it doesn't matter either way: we'll refuse to + // start. Return the "another update running" error; the + // caller will have to check the `status()`, which will + // trigger a "see if it's actually done after all" + // check. + return Err(Arc::new( + BootDiskOsWriteError::UpdateRunning( + running.update_id, + ), + )); + } + WriterState::Complete(complete) => { + return Err(Arc::new( + BootDiskOsWriteError::CannotStartWithoutClearingPreviousStatus( + complete.update_id, + ))); + } + }, + } + }; + + // We've now spawned a task to drive the update, and we want to wait for + // it to finish copying from `image_upload`. + uploaded_image_rx.await.map_err(|_| BootDiskOsWriteError::TaskPanic)? + } + + /// Clear the status of a finished or failed update with the given ID + /// targetting `boot_disk`. + /// + /// If no update has ever been started for this `boot_disk`, returns + /// `Ok(())`. + /// + /// # Errors + /// + /// Fails if an update to `boot_disk` is currently running; only terminal + /// statuses can be cleared. Fails if the most recent terminal status + /// targetting `boot_disk` had a different update ID. + pub(crate) fn clear_terminal_status( + &self, + boot_disk: M2Slot, + update_id: Uuid, + ) -> Result<(), BootDiskOsWriteError> { + let mut states = self.states.lock().unwrap(); + let mut slot = match states.entry(boot_disk) { + // No status; nothing to clear. + Entry::Vacant(_slot) => return Ok(()), + Entry::Occupied(slot) => slot, + }; + + match slot.get_mut() { + WriterState::Complete(complete) => { + if complete.update_id == update_id { + slot.remove(); + Ok(()) + } else { + Err(BootDiskOsWriteError::WrongUpdateIdClearingStatus( + complete.update_id, + )) + } + } + WriterState::TaskRunning(running) => { + // Check whether the task is _actually_ still running, + // or whether it's done and just waiting for us to + // realize it. + match running.complete_rx.try_recv() { + Ok(result) => { + if running.update_id == update_id { + // This is a little surprising but legal: we've been + // asked to clear the terminal status of this + // update_id, even though we just realized it + // finished. + slot.remove(); + Ok(()) + } else { + let running_update_id = running.update_id; + // A different update just finished; store the + // result we got from the oneshot and don't remove + // the status. + slot.insert(WriterState::Complete( + TaskCompleteState { + update_id: running_update_id, + result, + }, + )); + Err(BootDiskOsWriteError::WrongUpdateIdClearingStatus( + running_update_id + )) + } + } + Err(TryRecvError::Empty) => Err( + BootDiskOsWriteError::UpdateRunning(running.update_id), + ), + Err(TryRecvError::Closed) => { + Err(BootDiskOsWriteError::TaskPanic) + } + } + } + } + } + + /// Get the status of any update running that targets `boot_disk`. + pub(crate) fn status(&self, boot_disk: M2Slot) -> BootDiskOsWriteStatus { + let mut states = self.states.lock().unwrap(); + let mut slot = match states.entry(boot_disk) { + Entry::Vacant(_) => return BootDiskOsWriteStatus::NoUpdateStarted, + Entry::Occupied(slot) => slot, + }; + + match slot.get_mut() { + WriterState::TaskRunning(running) => { + // Is the task actually still running? Check and see if it's + // sent us a result that we just haven't noticed yet. + match running.complete_rx.try_recv() { + Ok(result) => { + let update_id = running.update_id; + let status = BootDiskOsWriteStatus::from_result( + update_id, &result, + ); + slot.insert(WriterState::Complete(TaskCompleteState { + update_id, + result, + })); + status + } + Err(TryRecvError::Empty) => { + let progress = *running.progress_rx.borrow_and_update(); + BootDiskOsWriteStatus::InProgress { + update_id: running.update_id, + progress, + } + } + Err(TryRecvError::Closed) => { + let update_id = running.update_id; + let result = + Err(Arc::new(BootDiskOsWriteError::TaskPanic)); + let status = BootDiskOsWriteStatus::from_result( + update_id, &result, + ); + slot.insert(WriterState::Complete(TaskCompleteState { + update_id, + result, + })); + status + } + } + } + WriterState::Complete(complete) => { + BootDiskOsWriteStatus::from_result( + complete.update_id, + &complete.result, + ) + } + } + } +} + +#[derive(Debug)] +enum WriterState { + /// A task is running to write a new image to a boot disk. + TaskRunning(TaskRunningState), + /// The result of the most recent write. + Complete(TaskCompleteState), +} + +#[derive(Debug)] +struct TaskRunningState { + update_id: Uuid, + progress_rx: watch::Receiver, + complete_rx: oneshot::Receiver>>, +} + +#[derive(Debug)] +struct TaskCompleteState { + update_id: Uuid, + result: Result<(), Arc>, +} + +#[derive(Debug)] +struct BootDiskOsWriteTask { + log: Logger, + sha3_256_digest: [u8; 32], + disk_devfs_path: Utf8PathBuf, + progress_tx: watch::Sender, + disk_interface: W, +} + +impl BootDiskOsWriteTask { + async fn run( + self, + image_upload: S, + uploaded_image_tx: oneshot::Sender< + Result<(), Arc>, + >, + complete_tx: oneshot::Sender>>, + ) where + S: Stream> + Send + 'static, + { + let result = self.run_impl(image_upload, uploaded_image_tx).await; + + // It's possible (albeit unlikely) our caller has discarded the receive + // half of this channel; ignore any send error. + _ = complete_tx.send(result); + } + + async fn run_impl( + self, + image_upload: S, + uploaded_image_tx: oneshot::Sender< + Result<(), Arc>, + >, + ) -> Result<(), Arc> + where + S: Stream> + Send + 'static, + { + // Copy from `image_upload` into a tempfile, then report the result on + // `uploaded_image_tx`. Our dropshot server will not respond to the + // client that requested this update until we finish this step and send + // a response on `uploaded_image_tx`, as `image_upload` is the + // `StreamingBody` attached to their request. + // + // If this step fails, we will send the error to the client who sent the + // request _and_ store a copy of the same error in our current update + // state. + let (image_tempfile, image_size) = match self + .download_body_to_tempfile(image_upload) + .await + .map_err(Arc::new) + { + Ok(tempfile) => { + _ = uploaded_image_tx.send(Ok(())); + tempfile + } + Err(err) => { + _ = uploaded_image_tx.send(Err(Arc::clone(&err))); + return Err(err); + } + }; + + let disk_block_size = self + .copy_tempfile_to_disk(image_tempfile, image_size) + .await + .map_err(Arc::new)?; + + self.validate_written_image(image_size, disk_block_size) + .await + .map_err(Arc::new)?; + + Ok(()) + } + + async fn download_body_to_tempfile( + &self, + image_upload: S, + ) -> Result<(File, usize), BootDiskOsWriteError> + where + S: Stream> + Send + 'static, + { + let tempfile = camino_tempfile::tempfile() + .map_err(BootDiskOsWriteError::FailedCreatingTempfile)?; + + let mut tempfile = + tokio::io::BufWriter::new(tokio::fs::File::from_std(tempfile)); + + let mut image_upload = std::pin::pin!(image_upload); + let mut hasher = Sha3_256::default(); + let mut bytes_received = 0; + + // Stream the uploaded image into our tempfile. + while let Some(bytes) = image_upload + .try_next() + .await + .map_err(BootDiskOsWriteError::FailedDownloadingImage)? + { + hasher.update(&bytes); + tempfile + .write_all(&bytes) + .await + .map_err(BootDiskOsWriteError::FailedWritingTempfile)?; + bytes_received += bytes.len(); + self.progress_tx.send_modify(|progress| { + *progress = BootDiskOsWriteProgress::ReceivingUploadedImage { + bytes_received, + } + }); + } + + // Flush any remaining buffered data. + tempfile + .flush() + .await + .map_err(BootDiskOsWriteError::FailedWritingTempfile)?; + + // Rewind the tempfile. + let mut tempfile = tempfile.into_inner(); + tempfile + .rewind() + .await + .map_err(BootDiskOsWriteError::FailedWritingTempfile)?; + + // Ensure the data the client sent us matches the hash they also sent + // us. A failure here means either the client lied or something has gone + // horribly wrong. + let hash = hasher.finalize(); + let expected_hash_str = hex::encode(&self.sha3_256_digest); + if hash == self.sha3_256_digest.into() { + info!( + self.log, "received uploaded image"; + "bytes_received" => bytes_received, + "hash" => expected_hash_str, + ); + + Ok((tempfile, bytes_received)) + } else { + let computed_hash_str = hex::encode(&hash); + error!( + self.log, "received uploaded image: incorrect hash"; + "bytes_received" => bytes_received, + "computed_hash" => &computed_hash_str, + "expected_hash" => &expected_hash_str, + ); + + Err(BootDiskOsWriteError::UploadedImageHashMismatch { + expected: expected_hash_str, + got: computed_hash_str, + }) + } + } + + /// Copy from `image_tempfile` to the disk device at `self.disk_devfs_path`. + /// Returns the block size of that disk. + async fn copy_tempfile_to_disk( + &self, + image_tempfile: File, + image_size: usize, + ) -> Result { + let mut disk_writer = self + .disk_interface + .open_writer(self.disk_devfs_path.as_std_path()) + .await + .map_err(|error| BootDiskOsWriteError::FailedOpenDiskForWrite { + error, + path: self.disk_devfs_path.clone(), + })?; + + let disk_block_size = disk_writer.block_size(); + + if image_size % disk_block_size != 0 { + return Err( + BootDiskOsWriteError::ImageSizeNotMultipleOfBlockSize { + image_size, + disk_block_size, + }, + ); + } + let num_blocks = image_size / disk_block_size; + + let mut buf = vec![0; disk_block_size]; + let mut image_tempfile = BufReader::new(image_tempfile); + + for block in 0..num_blocks { + image_tempfile + .read_exact(&mut buf) + .await + .map_err(BootDiskOsWriteError::FailedReadingTempfile)?; + + disk_writer.write_all(&buf).await.map_err(|error| { + BootDiskOsWriteError::FailedWritingDisk { + error, + path: self.disk_devfs_path.clone(), + } + })?; + + self.progress_tx.send_modify(|progress| { + *progress = BootDiskOsWriteProgress::WritingImageToDisk { + bytes_written: (block + 1) * buf.len(), + } + }); + } + + disk_writer.finalize().await.map_err(|error| { + BootDiskOsWriteError::FailedWritingDisk { + error, + path: self.disk_devfs_path.clone(), + } + })?; + + info!( + self.log, "copied OS image to disk"; + "path" => %self.disk_devfs_path, + "bytes_written" => image_size, + ); + + Ok(disk_block_size) + } + + async fn validate_written_image( + self, + image_size: usize, + disk_block_size: usize, + ) -> Result<(), BootDiskOsWriteError> { + // We're reading the OS image back from disk and hashing it; this can + // all be synchronous inside a spawn_blocking. + tokio::task::spawn_blocking(move || { + let mut f = self + .disk_interface + .open_reader(self.disk_devfs_path.as_std_path()) + .map_err(|error| { + BootDiskOsWriteError::FailedOpenDiskForRead { + error, + path: self.disk_devfs_path.clone(), + } + })?; + + let mut buf = vec![0; disk_block_size]; + let mut hasher = Sha3_256::default(); + let mut bytes_read = 0; + + while bytes_read < image_size { + // We already confirmed while writing the image that the image + // size is an exact multiple of the disk block size, so we can + // always read a full `buf` here. + f.read_exact(&mut buf).map_err(|error| { + BootDiskOsWriteError::FailedReadingDisk { + error, + path: self.disk_devfs_path.clone(), + } + })?; + + hasher.update(&buf); + bytes_read += buf.len(); + self.progress_tx.send_modify(|progress| { + *progress = + BootDiskOsWriteProgress::ValidatingWrittenImage { + bytes_read, + }; + }); + } + + let expected_hash_str = hex::encode(&self.sha3_256_digest); + let hash = hasher.finalize(); + if hash == self.sha3_256_digest.into() { + info!( + self.log, "validated OS image written to disk"; + "path" => %self.disk_devfs_path, + "hash" => expected_hash_str, + ); + Ok(()) + } else { + let computed_hash_str = hex::encode(&hash); + error!( + self.log, "failed to validate written OS image"; + "bytes_hashed" => image_size, + "computed_hash" => &computed_hash_str, + "expected_hash" => &expected_hash_str, + ); + Err(BootDiskOsWriteError::WrittenImageHashMismatch { + path: self.disk_devfs_path, + expected: expected_hash_str, + got: computed_hash_str, + }) + } + }) + .await + .expect("blocking task panicked") + } +} + +// Utility traits to allow injecting an in-memory "disk" for unit tests. +#[async_trait] +trait DiskWriter: AsyncWrite + Send + Sized + Unpin { + fn block_size(&self) -> usize; + async fn finalize(self) -> io::Result<()>; +} +#[async_trait] +trait DiskInterface: Send + Sync + 'static { + type Writer: DiskWriter; + type Reader: io::Read + Send; + async fn open_writer(&self, path: &Path) -> io::Result; + fn open_reader(&self, path: &Path) -> io::Result; +} + +#[async_trait] +impl DiskWriter for RawDiskWriter { + fn block_size(&self) -> usize { + RawDiskWriter::block_size(self) + } + + async fn finalize(self) -> io::Result<()> { + RawDiskWriter::finalize(self).await + } +} + +struct RealDiskInterface {} + +#[async_trait] +impl DiskInterface for RealDiskInterface { + type Writer = RawDiskWriter; + type Reader = std::fs::File; + + async fn open_writer(&self, path: &Path) -> io::Result { + RawDiskWriter::open(path).await + } + + fn open_reader(&self, path: &Path) -> io::Result { + std::fs::File::open(path) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use futures::future; + use futures::stream; + use installinator_common::BlockSizeBufWriter; + use omicron_test_utils::dev::test_setup_log; + use rand::RngCore; + use std::mem; + use std::pin::Pin; + use std::task::ready; + use std::task::Context; + use std::task::Poll; + use std::time::Duration; + use tokio::sync::mpsc; + use tokio::sync::Semaphore; + use tokio_stream::wrappers::UnboundedReceiverStream; + use tokio_util::sync::PollSemaphore; + + // Most of the tests below end up looping while calling + // `BootDiskOsWriter::status()` waiting for a specific status message to + // arrive. If we get that wrong (or the code under test is wrong!), that + // could end up looping forever, so we run all the relevant bits of the + // tests under a tokio timeout. We expect all the tests to complete very + // quickly in general (< 1 second), so we'll pick something + // outrageously-long-enough that if we hit it, we're almost certainly + // dealing with a hung test. + const TEST_TIMEOUT: Duration = Duration::from_secs(30); + + #[derive(Debug, Clone, PartialEq, Eq)] + struct InMemoryDiskContents { + path: Utf8PathBuf, + data: Vec, + } + + #[derive(Debug, Clone)] + struct InMemoryDiskInterface { + semaphore: Arc, + finalized_writes: Arc>>, + } + + impl InMemoryDiskInterface { + const BLOCK_SIZE: usize = 16; + + fn new(semaphore: Semaphore) -> Self { + Self { + semaphore: Arc::new(semaphore), + finalized_writes: Arc::default(), + } + } + } + + #[async_trait] + impl DiskInterface for InMemoryDiskInterface { + type Writer = InMemoryDiskWriter; + type Reader = io::Cursor>; + + async fn open_writer(&self, path: &Path) -> io::Result { + Ok(InMemoryDiskWriter { + opened_path: path + .to_owned() + .try_into() + .expect("non-utf8 test path"), + data: BlockSizeBufWriter::with_block_size( + Self::BLOCK_SIZE, + Vec::new(), + ), + semaphore: PollSemaphore::new(Arc::clone(&self.semaphore)), + finalized_writes: Arc::clone(&self.finalized_writes), + }) + } + + fn open_reader(&self, path: &Path) -> io::Result { + let written_files = self.finalized_writes.lock().unwrap(); + for contents in written_files.iter() { + if contents.path == path { + return Ok(io::Cursor::new(contents.data.clone())); + } + } + Err(io::Error::new( + io::ErrorKind::Other, + format!("no written file for {}", path.display()), + )) + } + } + + struct InMemoryDiskWriter { + opened_path: Utf8PathBuf, + data: BlockSizeBufWriter>, + semaphore: PollSemaphore, + finalized_writes: Arc>>, + } + + #[async_trait] + impl DiskWriter for InMemoryDiskWriter { + fn block_size(&self) -> usize { + self.data.block_size() + } + + async fn finalize(mut self) -> io::Result<()> { + self.data.flush().await?; + + let mut finalized = self.finalized_writes.lock().unwrap(); + finalized.push(InMemoryDiskContents { + path: self.opened_path, + data: self.data.into_inner(), + }); + + Ok(()) + } + } + + impl AsyncWrite for InMemoryDiskWriter { + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + let permit = match ready!(self.semaphore.poll_acquire(cx)) { + Some(permit) => permit, + None => panic!("test semaphore closed"), + }; + let result = Pin::new(&mut self.data).poll_write(cx, buf); + permit.forget(); + result + } + + fn poll_flush( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + Pin::new(&mut self.data).poll_flush(cx) + } + + fn poll_shutdown( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + Pin::new(&mut self.data).poll_shutdown(cx) + } + } + + fn expect_in_progress( + status: BootDiskOsWriteStatus, + ) -> BootDiskOsWriteProgress { + let BootDiskOsWriteStatus::InProgress { progress, .. } = status else { + panic!("expected Status::InProgress; got {status:?}"); + }; + progress + } + + #[tokio::test] + async fn boot_disk_os_writer_delivers_upload_progress_and_rejects_bad_hashes( + ) { + let logctx = + test_setup_log("boot_disk_os_writer_delivers_upload_progress_and_rejects_bad_hashes"); + + let writer = Arc::new(BootDiskOsWriter::new(&logctx.log)); + let boot_disk = M2Slot::A; + + // We'll give the writer an intentionally-wrong sha3 digest and confirm + // it rejects the upload based on this. + let claimed_sha3_digest = [0; 32]; + + // Construct an in-memory stream around an mpsc channel as our client + // upload. + let (upload_tx, upload_rx) = mpsc::unbounded_channel(); + + // Spawn the `start_update` onto a background task; this won't end until + // we close (or send an error on) `upload_tx`. + let start_update_task = { + let writer = Arc::clone(&writer); + tokio::spawn(async move { + writer + .start_update( + boot_disk, + "/does-not-matter".into(), + Uuid::new_v4(), + claimed_sha3_digest, + UnboundedReceiverStream::new(upload_rx), + ) + .await + }) + }; + + // As we stream data in, we'll compute the actual hash to check against + // the error we expect to see. + let mut actual_data_hasher = Sha3_256::new(); + + // Run the rest of the test under a timeout to catch any incorrect + // assumptions that result in a hang. + tokio::time::timeout(TEST_TIMEOUT, async move { + // We're racing `writer`'s spawning of the actual update task; spin + // until we transition from "no update" to "receiving uploaded + // image". + loop { + match writer.status(boot_disk) { + BootDiskOsWriteStatus::NoUpdateStarted => { + tokio::time::sleep(Duration::from_millis(50)).await; + continue; + } + BootDiskOsWriteStatus::InProgress { progress, .. } => { + assert_eq!( + progress, + BootDiskOsWriteProgress::ReceivingUploadedImage { + bytes_received: 0 + } + ); + break; + } + status @ (BootDiskOsWriteStatus::Complete { .. } + | BootDiskOsWriteStatus::Failed { .. }) => { + panic!("unexpected status {status:?}") + } + } + } + + let mut prev_bytes_received = 0; + + // Send a few chunks of data. After each, we're racing with `writer` + // which has to copy that data to a temp file before the status will + // change, so loop until we see what we expect. Our TEST_TIMEOUT + // ensures we don't stay here forever if something goes wrong. + for i in 1..=10 { + let data_len = i * 100; + let chunk = vec![0; data_len]; + actual_data_hasher.update(&chunk); + upload_tx.send(Ok(Bytes::from(chunk))).unwrap(); + + loop { + let progress = expect_in_progress(writer.status(boot_disk)); + + // If we lost the race, the status is still what it was + // previously; sleep briefly and check again. + if progress + == (BootDiskOsWriteProgress::ReceivingUploadedImage { + bytes_received: prev_bytes_received, + }) + { + tokio::time::sleep(Duration::from_millis(50)).await; + continue; + } + + // It's not the old status; it should be exactly the new + // status. If it is, update our count and break out of this + // inner loop. + assert_eq!( + progress, + BootDiskOsWriteProgress::ReceivingUploadedImage { + bytes_received: prev_bytes_received + data_len + } + ); + prev_bytes_received += data_len; + println!("chunk {i}: got {progress:?}"); + break; + } + } + + // Close the channel; `writer` should recognize the upload is + // complete, then realize there's a hash mismatch and fail the + // request. + mem::drop(upload_tx); + + // We expect to see an upload hash mismatch error with these hex + // strings. + let expected_hash = hex::encode(claimed_sha3_digest); + let got_hash = hex::encode(actual_data_hasher.finalize()); + + let start_update_result = start_update_task.await.unwrap(); + let error = start_update_result.unwrap_err(); + match &*error { + BootDiskOsWriteError::UploadedImageHashMismatch { + expected, + got, + } => { + assert_eq!(*got, got_hash); + assert_eq!(*expected, expected_hash); + } + _ => panic!("unexpected error {error:?}"), + } + + // The same error should be present in the current update status. + let expected_error = + BootDiskOsWriteError::UploadedImageHashMismatch { + expected: expected_hash.clone(), + got: got_hash.clone(), + }; + let status = writer.status(boot_disk); + match status { + BootDiskOsWriteStatus::Failed { message, .. } => { + assert_eq!( + message, + DisplayErrorChain::new(&expected_error).to_string() + ); + } + BootDiskOsWriteStatus::NoUpdateStarted + | BootDiskOsWriteStatus::InProgress { .. } + | BootDiskOsWriteStatus::Complete { .. } => { + panic!("unexpected status {status:?}") + } + } + }) + .await + .unwrap(); + + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn boot_disk_os_writer_writes_data_to_disk() { + let logctx = test_setup_log("boot_disk_os_writer_writes_data_to_disk"); + + // generate a small, random "OS image" consisting of 10 "blocks" + let num_data_blocks = 10; + let data_len = num_data_blocks * InMemoryDiskInterface::BLOCK_SIZE; + let mut data = vec![0; data_len]; + rand::thread_rng().fill_bytes(&mut data); + let data_hash = Sha3_256::digest(&data); + + // generate a disk writer with a 0-permit semaphore; we'll inject + // permits in the main loop below to force single-stepping through + // writing the data + let inject_disk_interface = + InMemoryDiskInterface::new(Semaphore::new(0)); + let shared_semaphore = Arc::clone(&inject_disk_interface.semaphore); + + let writer = Arc::new(BootDiskOsWriter::new(&logctx.log)); + let boot_disk = M2Slot::A; + let disk_devfs_path = "/unit-test/disk"; + + writer + .start_update_impl( + boot_disk, + disk_devfs_path.into(), + Uuid::new_v4(), + data_hash.into(), + stream::once(future::ready(Ok(Bytes::from(data.clone())))), + inject_disk_interface, + ) + .await + .unwrap(); + + // Run the rest of the test under a timeout to catch any incorrect + // assumptions that result in a hang. + tokio::time::timeout(TEST_TIMEOUT, async move { + // Wait until `writer` has copied our data into a temp file + loop { + let progress = expect_in_progress(writer.status(boot_disk)); + match progress { + BootDiskOsWriteProgress::ReceivingUploadedImage { + bytes_received, + } => { + if bytes_received == data.len() { + break; + } else { + println!( + "got status with {} bytes received", + bytes_received + ); + } + } + _ => panic!("unexpected progress {progress:?}"), + } + } + + for i in 0..num_data_blocks { + // Add one permit to our shared semaphore, allowing one block of + // data to be written to the "disk". + shared_semaphore.add_permits(1); + + // Did we just release the write of the final block? If so, + // break; we'll wait for completion below. + if i + 1 == num_data_blocks { + break; + } + + // Wait until we see the status we expect for a not-yet-last + // block (i.e., that the disk is still being written). + loop { + let progress = expect_in_progress(writer.status(boot_disk)); + match progress { + BootDiskOsWriteProgress::WritingImageToDisk { + bytes_written, + } if (i + 1) * InMemoryDiskInterface::BLOCK_SIZE + == bytes_written => + { + println!("saw expected progress for block {i}"); + break; + } + _ => { + // This is not an error: we could still be in + // `ReceivingUploadedImage` or the previous + // block's `WritingImageToDisk` + println!("saw irrelevant progress {progress:?}"); + tokio::time::sleep(Duration::from_millis(50)).await; + continue; + } + } + } + } + + // The last block is being or has been written, and after that the + // writer will reread it to validate the hash. We won't bother + // repeating the same machinery to check each step of that process; + // we'll just wait for the eventual successful completion. + loop { + let status = writer.status(boot_disk); + match status { + BootDiskOsWriteStatus::Complete { .. } => break, + BootDiskOsWriteStatus::InProgress { .. } => { + println!("saw irrelevant progress {status:?}"); + tokio::time::sleep(Duration::from_millis(50)).await; + continue; + } + BootDiskOsWriteStatus::NoUpdateStarted + | BootDiskOsWriteStatus::Failed { .. } => { + panic!("unexpected status {status:?}") + } + } + } + }) + .await + .unwrap(); + + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn boot_disk_os_writer_fails_if_reading_from_disk_doesnt_match() { + let logctx = test_setup_log( + "boot_disk_os_writer_fails_if_reading_from_disk_doesnt_match", + ); + + // generate a small, random "OS image" consisting of 10 "blocks" + let num_data_blocks = 10; + let data_len = num_data_blocks * InMemoryDiskInterface::BLOCK_SIZE; + let mut data = vec![0; data_len]; + rand::thread_rng().fill_bytes(&mut data); + let original_data_hash = Sha3_256::digest(&data); + + // generate a disk writer with (effectively) unlimited semaphore + // permits, since we don't need to throttle the "disk writing" + let inject_disk_interface = + InMemoryDiskInterface::new(Semaphore::new(Semaphore::MAX_PERMITS)); + + let writer = Arc::new(BootDiskOsWriter::new(&logctx.log)); + let boot_disk = M2Slot::A; + let disk_devfs_path = "/unit-test/disk"; + + // copy the data and corrupt it, then stage this in + // `inject_disk_interface` so that it returns this corrupted data when + // "reading" the disk + let mut bad_data = data.clone(); + bad_data[0] ^= 1; // bit flip + let bad_data_hash = Sha3_256::digest(&bad_data); + inject_disk_interface.finalized_writes.lock().unwrap().push( + InMemoryDiskContents { + path: disk_devfs_path.into(), + data: bad_data, + }, + ); + + writer + .start_update_impl( + boot_disk, + disk_devfs_path.into(), + Uuid::new_v4(), + original_data_hash.into(), + stream::once(future::ready(Ok(Bytes::from(data.clone())))), + inject_disk_interface, + ) + .await + .unwrap(); + + // We expect the update to eventually fail; wait for it to do so. + let failure_message = tokio::time::timeout(TEST_TIMEOUT, async move { + loop { + let status = writer.status(boot_disk); + match status { + BootDiskOsWriteStatus::Failed { message, .. } => { + return message; + } + BootDiskOsWriteStatus::InProgress { .. } => { + println!("saw irrelevant status {status:?}"); + tokio::time::sleep(Duration::from_millis(50)).await; + continue; + } + BootDiskOsWriteStatus::Complete { .. } + | BootDiskOsWriteStatus::NoUpdateStarted => { + panic!("unexpected status {status:?}"); + } + } + } + }) + .await + .unwrap(); + + // Confirm that the update fails for the reason we expect: when + // re-reading what had been written to disk, it got our corrupt data + // (which hashes to `bad_data_hash`) instead of the expected + // `original_data_hash`. + let expected_error = BootDiskOsWriteError::WrittenImageHashMismatch { + path: disk_devfs_path.into(), + expected: hex::encode(&original_data_hash), + got: hex::encode(&bad_data_hash), + }; + + assert_eq!( + failure_message, + DisplayErrorChain::new(&expected_error).to_string() + ); + + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn boot_disk_os_writer_can_update_both_slots_simultaneously() { + let logctx = test_setup_log( + "boot_disk_os_writer_can_update_both_slots_simultaneously", + ); + + // generate two small, random "OS image"s consisting of 10 "blocks" each + let num_data_blocks = 10; + let data_len = num_data_blocks * InMemoryDiskInterface::BLOCK_SIZE; + let mut data_a = vec![0; data_len]; + let mut data_b = vec![0; data_len]; + rand::thread_rng().fill_bytes(&mut data_a); + rand::thread_rng().fill_bytes(&mut data_b); + let data_hash_a = Sha3_256::digest(&data_a); + let data_hash_b = Sha3_256::digest(&data_b); + + // generate a disk writer with no semaphore permits so the updates block + // until we get a chance to start both of them + let inject_disk_interface = + InMemoryDiskInterface::new(Semaphore::new(0)); + let shared_semaphore = Arc::clone(&inject_disk_interface.semaphore); + + let writer = Arc::new(BootDiskOsWriter::new(&logctx.log)); + let disk_devfs_path_a = "/unit-test/disk/a"; + let disk_devfs_path_b = "/unit-test/disk/b"; + + let update_id_a = Uuid::new_v4(); + let update_id_b = Uuid::new_v4(); + + writer + .start_update_impl( + M2Slot::A, + disk_devfs_path_a.into(), + update_id_a, + data_hash_a.into(), + stream::once(future::ready(Ok(Bytes::from(data_a.clone())))), + inject_disk_interface.clone(), + ) + .await + .unwrap(); + + writer + .start_update_impl( + M2Slot::B, + disk_devfs_path_b.into(), + update_id_b, + data_hash_b.into(), + stream::once(future::ready(Ok(Bytes::from(data_b.clone())))), + inject_disk_interface.clone(), + ) + .await + .unwrap(); + + // Both updates have successfully started; unblock the "disks". + shared_semaphore.add_permits(Semaphore::MAX_PERMITS); + + // Wait for both updates to complete successfully. + for boot_disk in [M2Slot::A, M2Slot::B] { + tokio::time::timeout(TEST_TIMEOUT, async { + loop { + let status = writer.status(boot_disk); + match status { + BootDiskOsWriteStatus::InProgress { .. } => { + println!("saw irrelevant status {status:?}"); + tokio::time::sleep(Duration::from_millis(50)).await; + continue; + } + BootDiskOsWriteStatus::Complete { update_id } => { + match boot_disk { + M2Slot::A => assert_eq!(update_id, update_id_a), + M2Slot::B => assert_eq!(update_id, update_id_b), + } + break; + } + BootDiskOsWriteStatus::Failed { .. } + | BootDiskOsWriteStatus::NoUpdateStarted => { + panic!("unexpected status {status:?}"); + } + } + } + }) + .await + .unwrap(); + } + + // Ensure each "disk" saw the expected contents. + let expected_disks = [ + InMemoryDiskContents { + path: disk_devfs_path_a.into(), + data: data_a, + }, + InMemoryDiskContents { + path: disk_devfs_path_b.into(), + data: data_b, + }, + ]; + let written_disks = + inject_disk_interface.finalized_writes.lock().unwrap(); + assert_eq!(written_disks.len(), expected_disks.len()); + for expected in expected_disks { + assert!( + written_disks.contains(&expected), + "written disks missing expected contents for {}", + expected.path, + ); + } + + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn boot_disk_os_writer_rejects_new_updates_while_old_running() { + let logctx = test_setup_log( + "boot_disk_os_writer_rejects_new_updates_while_old_running", + ); + + // generate two small, random "OS image"s consisting of 10 "blocks" each + let num_data_blocks = 10; + let data_len = num_data_blocks * InMemoryDiskInterface::BLOCK_SIZE; + let mut data_a = vec![0; data_len]; + let mut data_b = vec![0; data_len]; + rand::thread_rng().fill_bytes(&mut data_a); + rand::thread_rng().fill_bytes(&mut data_b); + let data_hash_a = Sha3_256::digest(&data_a); + let data_hash_b = Sha3_256::digest(&data_b); + + // generate a disk writer with no semaphore permits so the updates block + // until we get a chance to (try to) start both of them + let inject_disk_interface = + InMemoryDiskInterface::new(Semaphore::new(0)); + let shared_semaphore = Arc::clone(&inject_disk_interface.semaphore); + + let writer = Arc::new(BootDiskOsWriter::new(&logctx.log)); + let disk_devfs_path = "/unit-test/disk"; + let boot_disk = M2Slot::A; + + let update_id_a = Uuid::new_v4(); + let update_id_b = Uuid::new_v4(); + + writer + .start_update_impl( + boot_disk, + disk_devfs_path.into(), + update_id_a, + data_hash_a.into(), + stream::once(future::ready(Ok(Bytes::from(data_a.clone())))), + inject_disk_interface.clone(), + ) + .await + .unwrap(); + + let error = writer + .start_update_impl( + boot_disk, + disk_devfs_path.into(), + update_id_b, + data_hash_b.into(), + stream::once(future::ready(Ok(Bytes::from(data_b.clone())))), + inject_disk_interface.clone(), + ) + .await + .unwrap_err(); + match &*error { + BootDiskOsWriteError::UpdateRunning(running_id) => { + assert_eq!(*running_id, update_id_a); + } + _ => panic!("unexpected error {error}"), + } + + // Both update attempts started; unblock the "disk". + shared_semaphore.add_permits(Semaphore::MAX_PERMITS); + + // Wait for the first update to complete successfully. + tokio::time::timeout(TEST_TIMEOUT, async { + loop { + let status = writer.status(boot_disk); + match status { + BootDiskOsWriteStatus::InProgress { .. } => { + println!("saw irrelevant status {status:?}"); + tokio::time::sleep(Duration::from_millis(50)).await; + continue; + } + BootDiskOsWriteStatus::Complete { update_id } => { + assert_eq!(update_id, update_id_a); + break; + } + BootDiskOsWriteStatus::Failed { .. } + | BootDiskOsWriteStatus::NoUpdateStarted => { + panic!("unexpected status {status:?}"); + } + } + } + }) + .await + .unwrap(); + + // Ensure we wrote the contents of the first update. + let expected_disks = [InMemoryDiskContents { + path: disk_devfs_path.into(), + data: data_a, + }]; + let written_disks = + inject_disk_interface.finalized_writes.lock().unwrap(); + assert_eq!(*written_disks, expected_disks); + + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn boot_disk_os_writer_rejects_new_updates_while_old_completed() { + let logctx = test_setup_log( + "boot_disk_os_writer_rejects_new_updates_while_old_completed", + ); + + // generate two small, random "OS image"s consisting of 10 "blocks" each + let num_data_blocks = 10; + let data_len = num_data_blocks * InMemoryDiskInterface::BLOCK_SIZE; + let mut data_a = vec![0; data_len]; + let mut data_b = vec![0; data_len]; + rand::thread_rng().fill_bytes(&mut data_a); + rand::thread_rng().fill_bytes(&mut data_b); + let data_hash_a = Sha3_256::digest(&data_a); + let data_hash_b = Sha3_256::digest(&data_b); + + // generate a disk writer with effectively infinite semaphore permits + let inject_disk_interface = + InMemoryDiskInterface::new(Semaphore::new(Semaphore::MAX_PERMITS)); + + let writer = Arc::new(BootDiskOsWriter::new(&logctx.log)); + let disk_devfs_path = "/unit-test/disk"; + let boot_disk = M2Slot::A; + + let update_id_a = Uuid::new_v4(); + let update_id_b = Uuid::new_v4(); + + writer + .start_update_impl( + boot_disk, + disk_devfs_path.into(), + update_id_a, + data_hash_a.into(), + stream::once(future::ready(Ok(Bytes::from(data_a.clone())))), + inject_disk_interface.clone(), + ) + .await + .unwrap(); + + // Wait for the first update to complete successfully. + tokio::time::timeout(TEST_TIMEOUT, async { + loop { + let status = writer.status(boot_disk); + match status { + BootDiskOsWriteStatus::InProgress { update_id, .. } => { + assert_eq!(update_id, update_id_a); + println!("saw irrelevant status {status:?}"); + tokio::time::sleep(Duration::from_millis(50)).await; + continue; + } + BootDiskOsWriteStatus::Complete { update_id } => { + assert_eq!(update_id, update_id_a); + break; + } + BootDiskOsWriteStatus::Failed { .. } + | BootDiskOsWriteStatus::NoUpdateStarted => { + panic!("unexpected status {status:?}"); + } + } + } + }) + .await + .unwrap(); + + // Ensure we wrote the contents of the first update. + let expected_disks = [InMemoryDiskContents { + path: disk_devfs_path.into(), + data: data_a, + }]; + { + let mut written_disks = + inject_disk_interface.finalized_writes.lock().unwrap(); + assert_eq!(*written_disks, expected_disks); + written_disks.clear(); + } + + // Check that we get the expected error when attempting to start another + // update to this same disk. + let expected_error = + BootDiskOsWriteError::CannotStartWithoutClearingPreviousStatus( + update_id_a, + ); + let error = writer + .start_update_impl( + boot_disk, + disk_devfs_path.into(), + update_id_b, + data_hash_b.into(), + stream::once(future::ready(Ok(Bytes::from(data_b.clone())))), + inject_disk_interface.clone(), + ) + .await + .unwrap_err(); + assert_eq!(error.to_string(), expected_error.to_string()); + + // We should not be able to clear the status with an incorrect update + // ID. + let expected_error = + BootDiskOsWriteError::WrongUpdateIdClearingStatus(update_id_a); + let error = + writer.clear_terminal_status(boot_disk, update_id_b).unwrap_err(); + assert_eq!(error.to_string(), expected_error.to_string()); + + // We should be able to clear the status with the correct update ID, and + // then start the new one. + writer.clear_terminal_status(boot_disk, update_id_a).unwrap(); + writer + .start_update_impl( + boot_disk, + disk_devfs_path.into(), + update_id_b, + data_hash_b.into(), + stream::once(future::ready(Ok(Bytes::from(data_b.clone())))), + inject_disk_interface.clone(), + ) + .await + .unwrap(); + + // Wait for the second update to complete successfully. + tokio::time::timeout(TEST_TIMEOUT, async { + loop { + let status = writer.status(boot_disk); + match status { + BootDiskOsWriteStatus::InProgress { update_id, .. } => { + assert_eq!(update_id, update_id_b); + println!("saw irrelevant status {status:?}"); + tokio::time::sleep(Duration::from_millis(50)).await; + continue; + } + BootDiskOsWriteStatus::Complete { update_id } => { + assert_eq!(update_id, update_id_b); + break; + } + BootDiskOsWriteStatus::Failed { .. } + | BootDiskOsWriteStatus::NoUpdateStarted => { + panic!("unexpected status {status:?}"); + } + } + } + }) + .await + .unwrap(); + + // Ensure we wrote the contents of the second update. + let expected_disks = [InMemoryDiskContents { + path: disk_devfs_path.into(), + data: data_b, + }]; + { + let mut written_disks = + inject_disk_interface.finalized_writes.lock().unwrap(); + assert_eq!(*written_disks, expected_disks); + written_disks.clear(); + } + + logctx.cleanup_successful(); + } +} diff --git a/sled-agent/src/bootstrap/early_networking.rs b/sled-agent/src/bootstrap/early_networking.rs index bec309dc27..75958a2f37 100644 --- a/sled-agent/src/bootstrap/early_networking.rs +++ b/sled-agent/src/bootstrap/early_networking.rs @@ -22,8 +22,8 @@ use mg_admin_client::Client as MgdClient; use omicron_common::address::{Ipv6Subnet, MGD_PORT, MGS_PORT}; use omicron_common::address::{DDMD_PORT, DENDRITE_PORT}; use omicron_common::api::internal::shared::{ - PortConfigV1, PortFec, PortSpeed, RackNetworkConfig, RackNetworkConfigV1, - SwitchLocation, UplinkConfig, + BgpConfig, PortConfigV1, PortFec, PortSpeed, RackNetworkConfig, + RackNetworkConfigV1, SwitchLocation, UplinkConfig, }; use omicron_common::backoff::{ retry_notify, retry_policy_local, BackoffError, ExponentialBackoff, @@ -472,23 +472,37 @@ impl<'a> EarlyNetworkSetup<'a> { )) })?; + let mut config: Option = None; + let mut bgp_peer_configs = HashMap::>::new(); + // Iterate through ports and apply BGP config. for port in &our_ports { - let mut bgp_peer_configs = Vec::new(); for peer in &port.bgp_peers { - let config = rack_network_config - .bgp - .iter() - .find(|x| x.asn == peer.asn) - .ok_or(EarlyNetworkSetupError::BgpConfigurationError( - format!( - "asn {} referenced by peer undefined", - peer.asn - ), - ))?; + if let Some(config) = &config { + if peer.asn != config.asn { + return Err(EarlyNetworkSetupError::BadConfig( + "only one ASN per switch is supported".into(), + )); + } + } else { + config = Some( + rack_network_config + .bgp + .iter() + .find(|x| x.asn == peer.asn) + .ok_or( + EarlyNetworkSetupError::BgpConfigurationError( + format!( + "asn {} referenced by peer undefined", + peer.asn + ), + ), + )? + .clone(), + ); + } let bpc = BgpPeerConfig { - asn: peer.asn, name: format!("{}", peer.addr), host: format!("{}:179", peer.addr), hold_time: peer.hold_time.unwrap_or(6), @@ -497,30 +511,41 @@ impl<'a> EarlyNetworkSetup<'a> { connect_retry: peer.connect_retry.unwrap_or(3), keepalive: peer.keepalive.unwrap_or(2), resolution: BGP_SESSION_RESOLUTION, - originate: config - .originate - .iter() - .map(|x| Prefix4 { length: x.prefix(), value: x.ip() }) - .collect(), + passive: false, }; - bgp_peer_configs.push(bpc); + match bgp_peer_configs.get_mut(&port.port) { + Some(peers) => { + peers.push(bpc); + } + None => { + bgp_peer_configs.insert(port.port.clone(), vec![bpc]); + } + } } + } - if bgp_peer_configs.is_empty() { - continue; + if !bgp_peer_configs.is_empty() { + if let Some(config) = &config { + mgd.inner + .bgp_apply(&ApplyRequest { + asn: config.asn, + peers: bgp_peer_configs, + originate: config + .originate + .iter() + .map(|x| Prefix4 { + length: x.prefix(), + value: x.ip(), + }) + .collect(), + }) + .await + .map_err(|e| { + EarlyNetworkSetupError::BgpConfigurationError(format!( + "BGP peer configuration failed: {e}", + )) + })?; } - - mgd.inner - .bgp_apply(&ApplyRequest { - peer_group: port.port.clone(), - peers: bgp_peer_configs, - }) - .await - .map_err(|e| { - EarlyNetworkSetupError::BgpConfigurationError(format!( - "BGP peer configuration failed: {e}", - )) - })?; } Ok(our_ports) @@ -548,23 +573,20 @@ impl<'a> EarlyNetworkSetup<'a> { let mut addrs = Vec::new(); for a in &port_config.addresses { + // TODO We're discarding the `uplink_cidr.prefix()` here and only using + // the IP address; at some point we probably need to give the full CIDR + // to dendrite? addrs.push(a.ip()); } - // TODO We're discarding the `uplink_cidr.prefix()` here and only using - // the IP address; at some point we probably need to give the full CIDR - // to dendrite? let link_settings = LinkSettings { - // TODO Allow user to configure link properties - // https://github.com/oxidecomputer/omicron/issues/3061 params: LinkCreate { - autoneg: false, - kr: false, + autoneg: port_config.autoneg, + kr: false, //NOTE: kr does not apply to user configurable links. fec: convert_fec(&port_config.uplink_port_fec), speed: convert_speed(&port_config.uplink_port_speed), lane: Some(LinkId(0)), }, - //addrs: vec![addr], addrs, }; dpd_port_settings.links.insert(link_id.to_string(), link_settings); @@ -685,6 +707,65 @@ pub struct EarlyNetworkConfig { pub body: EarlyNetworkConfigBody, } +impl EarlyNetworkConfig { + // Note: This currently only converts between v0 and v1 or deserializes v1 of + // `EarlyNetworkConfig`. + pub fn deserialize_bootstore_config( + log: &Logger, + config: &bootstore::NetworkConfig, + ) -> Result { + // Try to deserialize the latest version of the data structure (v1). If + // that succeeds we are done. + let v1_error = + match serde_json::from_slice::(&config.blob) { + Ok(val) => return Ok(val), + Err(error) => { + // Log this error and continue trying to deserialize older + // versions. + warn!( + log, + "Failed to deserialize EarlyNetworkConfig \ + as v1, trying next as v0: {}", + error, + ); + error + } + }; + + match serde_json::from_slice::(&config.blob) { + Ok(val) => { + // Convert from v0 to v1 + return Ok(EarlyNetworkConfig { + generation: val.generation, + schema_version: 1, + body: EarlyNetworkConfigBody { + ntp_servers: val.ntp_servers, + rack_network_config: val.rack_network_config.map( + |v0_config| { + RackNetworkConfigV0::to_v1( + val.rack_subnet, + v0_config, + ) + }, + ), + }, + }); + } + Err(error) => { + // Log this error. + warn!( + log, + "Failed to deserialize EarlyNetworkConfig as v0: {}", error, + ); + } + }; + + // Return the v1 error preferentially over the v0 error as it's more + // likely to be useful. + Err(v1_error) + } +} + /// This is the actual configuration of EarlyNetworking. /// /// We nest it below the "header" of `generation` and `schema_version` so that @@ -714,39 +795,6 @@ impl From for bootstore::NetworkConfig { } } -// Note: This currently only converts between v0 and v1 or deserializes v1 of -// `EarlyNetworkConfig`. -impl TryFrom for EarlyNetworkConfig { - type Error = serde_json::Error; - - fn try_from( - value: bootstore::NetworkConfig, - ) -> std::result::Result { - // Try to deserialize the latest version of the data structure (v1). If - // that succeeds we are done. - if let Ok(val) = - serde_json::from_slice::(&value.blob) - { - return Ok(val); - } - - // We don't have the latest version. Try to deserialize v0 and then - // convert it to the latest version. - let v0 = serde_json::from_slice::(&value.blob)?; - - Ok(EarlyNetworkConfig { - generation: v0.generation, - schema_version: 1, - body: EarlyNetworkConfigBody { - ntp_servers: v0.ntp_servers, - rack_network_config: v0.rack_network_config.map(|v0_config| { - RackNetworkConfigV0::to_v1(v0.rack_subnet, v0_config) - }), - }, - }) - } -} - /// Deprecated, use `RackNetworkConfig` instead. Cannot actually deprecate due to /// /// @@ -818,9 +866,13 @@ fn convert_fec(fec: &PortFec) -> dpd_client::types::PortFec { mod tests { use super::*; use omicron_common::api::internal::shared::RouteConfig; + use omicron_test_utils::dev::test_setup_log; #[test] fn serialized_early_network_config_v0_to_v1_conversion() { + let logctx = test_setup_log( + "serialized_early_network_config_v0_to_v1_conversion", + ); let v0 = EarlyNetworkConfigV0 { generation: 1, rack_subnet: Ipv6Addr::UNSPECIFIED, @@ -844,7 +896,11 @@ mod tests { let bootstore_conf = bootstore::NetworkConfig { generation: 1, blob: v0_serialized }; - let v1 = EarlyNetworkConfig::try_from(bootstore_conf).unwrap(); + let v1 = EarlyNetworkConfig::deserialize_bootstore_config( + &logctx.log, + &bootstore_conf, + ) + .unwrap(); let v0_rack_network_config = v0.rack_network_config.unwrap(); let uplink = v0_rack_network_config.uplinks[0].clone(); let expected = EarlyNetworkConfig { @@ -866,6 +922,7 @@ mod tests { port: uplink.uplink_port, uplink_port_speed: uplink.uplink_port_speed, uplink_port_fec: uplink.uplink_port_fec, + autoneg: false, bgp_peers: vec![], }], bgp: vec![], @@ -874,5 +931,7 @@ mod tests { }; assert_eq!(expected, v1); + + logctx.cleanup_successful(); } } diff --git a/sled-agent/src/bootstrap/params.rs b/sled-agent/src/bootstrap/params.rs index ab85915dc1..79189e7f49 100644 --- a/sled-agent/src/bootstrap/params.rs +++ b/sled-agent/src/bootstrap/params.rs @@ -347,6 +347,17 @@ pub(super) mod version { pub(crate) const V1: u32 = 1; } +#[cfg(test)] +pub fn test_config() -> RackInitializeRequest { + let manifest = std::env::var("CARGO_MANIFEST_DIR") + .expect("Cannot access manifest directory"); + let manifest = camino::Utf8PathBuf::from(manifest); + let path = manifest.join("../smf/sled-agent/non-gimlet/config-rss.toml"); + let contents = std::fs::read_to_string(&path).unwrap(); + toml::from_str(&contents) + .unwrap_or_else(|e| panic!("failed to parse {:?}: {}", &path, e)) +} + #[cfg(test)] mod tests { use std::net::Ipv6Addr; diff --git a/sled-agent/src/bootstrap/server.rs b/sled-agent/src/bootstrap/server.rs index f4948de83b..999e4cc0c8 100644 --- a/sled-agent/src/bootstrap/server.rs +++ b/sled-agent/src/bootstrap/server.rs @@ -259,7 +259,7 @@ impl Server { // we're responsible for, while continuing to handle hardware // notifications. This cannot fail: we retry indefinitely until // we're done loading services. - sled_agent.cold_boot_load_services().await; + sled_agent.load_services().await; SledAgentState::ServerStarted(sled_agent_server) } else { SledAgentState::Bootstrapping( diff --git a/sled-agent/src/common/disk.rs b/sled-agent/src/common/disk.rs index 57868937d0..54c56825eb 100644 --- a/sled-agent/src/common/disk.rs +++ b/sled-agent/src/common/disk.rs @@ -118,12 +118,10 @@ impl DiskStates { | DiskState::ImportingFromBulkWrites | DiskState::Destroyed | DiskState::Faulted => { - return Err(Error::InvalidRequest { - message: format!( - "cannot detach from {}", - self.current.disk_state - ), - }); + return Err(Error::invalid_request(format!( + "cannot detach from {}", + self.current.disk_state + ))); } }; } @@ -134,9 +132,9 @@ impl DiskStates { // (which is a no-op anyway). DiskState::Attaching(id) | DiskState::Attached(id) => { if uuid != id { - return Err(Error::InvalidRequest { - message: "disk is already attached".to_string(), - }); + return Err(Error::invalid_request( + "disk is already attached", + )); } return Ok(None); } @@ -157,12 +155,10 @@ impl DiskStates { | DiskState::Detaching(_) | DiskState::Destroyed | DiskState::Faulted => { - return Err(Error::InvalidRequest { - message: format!( - "cannot attach from {}", - self.current.disk_state - ), - }); + return Err(Error::invalid_request(format!( + "cannot attach from {}", + self.current.disk_state + ))); } } } diff --git a/sled-agent/src/config.rs b/sled-agent/src/config.rs index a596cf83db..058f343e2a 100644 --- a/sled-agent/src/config.rs +++ b/sled-agent/src/config.rs @@ -6,6 +6,7 @@ use crate::updates::ConfigUpdates; use camino::{Utf8Path, Utf8PathBuf}; +use dropshot::ConfigDropshot; use dropshot::ConfigLogging; use illumos_utils::dladm::Dladm; use illumos_utils::dladm::FindPhysicalLinkError; @@ -44,6 +45,11 @@ pub struct SoftPortConfig { #[derive(Clone, Debug, Deserialize)] #[serde(deny_unknown_fields)] pub struct Config { + /// Configuration for the sled agent dropshot server + /// + /// If the `bind_address` is set, it will be ignored. The remaining fields + /// will be respected. + pub dropshot: ConfigDropshot, /// Configuration for the sled agent debug log pub log: ConfigLogging, /// The sled's mode of operation (auto detect or force gimlet/scrimlet). diff --git a/sled-agent/src/http_entrypoints.rs b/sled-agent/src/http_entrypoints.rs index 2d0e2c4001..8c8a5f2a03 100644 --- a/sled-agent/src/http_entrypoints.rs +++ b/sled-agent/src/http_entrypoints.rs @@ -10,7 +10,7 @@ use crate::bootstrap::params::AddSledRequest; use crate::params::{ CleanupContextUpdate, DiskEnsureBody, InstanceEnsureBody, InstancePutMigrationIdsBody, InstancePutStateBody, - InstancePutStateResponse, InstanceUnregisterResponse, ServiceEnsureBody, + InstancePutStateResponse, InstanceUnregisterResponse, OmicronZonesConfig, SledRole, TimeSync, VpcFirewallRulesEnsureBody, ZoneBundleId, ZoneBundleMetadata, Zpool, }; @@ -18,14 +18,17 @@ use crate::sled_agent::Error as SledAgentError; use crate::zone_bundle; use bootstore::schemes::v0::NetworkConfig; use camino::Utf8PathBuf; +use display_error_chain::DisplayErrorChain; use dropshot::{ endpoint, ApiDescription, FreeformBody, HttpError, HttpResponseCreated, HttpResponseDeleted, HttpResponseHeaders, HttpResponseOk, - HttpResponseUpdatedNoContent, Path, Query, RequestContext, TypedBody, + HttpResponseUpdatedNoContent, Path, Query, RequestContext, StreamingBody, + TypedBody, }; use illumos_utils::opte::params::{ DeleteVirtualNetworkInterfaceHost, SetVirtualNetworkInterfaceHost, }; +use installinator_common::M2Slot; use omicron_common::api::external::Error; use omicron_common::api::internal::nexus::{ DiskRuntimeState, SledInstanceState, UpdateArtifactId, @@ -36,6 +39,7 @@ use oximeter_producer::collect; use oximeter_producer::ProducerIdPathParams; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; +use sled_hardware::DiskVariant; use std::collections::BTreeMap; use uuid::Uuid; @@ -51,7 +55,8 @@ pub fn api() -> SledApiDescription { api.register(instance_put_state)?; api.register(instance_register)?; api.register(instance_unregister)?; - api.register(services_put)?; + api.register(omicron_zones_get)?; + api.register(omicron_zones_put)?; api.register(zones_list)?; api.register(zone_bundle_list)?; api.register(zone_bundle_list_all)?; @@ -74,6 +79,9 @@ pub fn api() -> SledApiDescription { api.register(write_network_bootstore_config)?; api.register(add_sled_to_initialized_rack)?; api.register(metrics_collect)?; + api.register(host_os_write_start)?; + api.register(host_os_write_status_get)?; + api.register(host_os_write_status_delete)?; Ok(()) } @@ -315,44 +323,28 @@ async fn zones_list( sa.zones_list().await.map(HttpResponseOk).map_err(HttpError::from) } +#[endpoint { + method = GET, + path = "/omicron-zones", +}] +async fn omicron_zones_get( + rqctx: RequestContext, +) -> Result, HttpError> { + let sa = rqctx.context(); + Ok(HttpResponseOk(sa.omicron_zones_list().await?)) +} + #[endpoint { method = PUT, - path = "/services", + path = "/omicron-zones", }] -async fn services_put( +async fn omicron_zones_put( rqctx: RequestContext, - body: TypedBody, + body: TypedBody, ) -> Result { - let sa = rqctx.context().clone(); + let sa = rqctx.context(); let body_args = body.into_inner(); - - // Spawn a separate task to run `services_ensure`: cancellation of this - // endpoint's future (as might happen if the client abandons the request or - // times out) could result in leaving zones partially configured and the - // in-memory state of the service manager invalid. See: - // oxidecomputer/omicron#3098. - let handler = async move { - match sa.services_ensure(body_args).await { - Ok(()) => Ok(()), - Err(e) => { - // Log the error here to make things clear even if the client - // has already disconnected. - error!(sa.logger(), "failed to initialize services: {e}"); - Err(e) - } - } - }; - match tokio::spawn(handler).await { - Ok(result) => result.map_err(|e| Error::from(e))?, - - Err(e) => { - return Err(HttpError::for_internal_error(format!( - "unexpected failure awaiting \"services_ensure\": {:#}", - e - ))); - } - } - + sa.omicron_zones_ensure(body_args).await?; Ok(HttpResponseUpdatedNoContent()) } @@ -674,7 +666,10 @@ async fn read_network_bootstore_config_cache( })?; let config = match config { - Some(config) => EarlyNetworkConfig::try_from(config).map_err(|e| { + Some(config) => EarlyNetworkConfig::deserialize_bootstore_config( + &rqctx.log, &config, + ) + .map_err(|e| { HttpError::for_internal_error(format!( "deserialize early network config: {e}" )) @@ -767,3 +762,166 @@ async fn metrics_collect( let producer_id = path_params.into_inner().producer_id; collect(&sa.metrics_registry(), producer_id).await } + +#[derive(Clone, Copy, Debug, Deserialize, JsonSchema, Serialize)] +pub struct BootDiskPathParams { + pub boot_disk: M2Slot, +} + +#[derive(Clone, Copy, Debug, Deserialize, JsonSchema, Serialize)] +pub struct BootDiskUpdatePathParams { + pub boot_disk: M2Slot, + pub update_id: Uuid, +} + +#[derive(Clone, Copy, Debug, Deserialize, JsonSchema, Serialize)] +pub struct BootDiskWriteStartQueryParams { + pub update_id: Uuid, + // TODO do we already have sha2-256 hashes of the OS images, and if so + // should we use that instead? Another option is to use the external API + // `Digest` type, although it predates `serde_human_bytes` so just stores + // the hash as a `String`. + #[serde(with = "serde_human_bytes::hex_array")] + #[schemars(schema_with = "omicron_common::hex_schema::<32>")] + pub sha3_256_digest: [u8; 32], +} + +/// Write a new host OS image to the specified boot disk +#[endpoint { + method = POST, + path = "/boot-disk/{boot_disk}/os/write", +}] +async fn host_os_write_start( + request_context: RequestContext, + path_params: Path, + query_params: Query, + body: StreamingBody, +) -> Result { + let sa = request_context.context(); + let boot_disk = path_params.into_inner().boot_disk; + + // Find our corresponding disk. + let maybe_disk_path = + sa.storage().get_latest_resources().await.disks().values().find_map( + |(disk, _pool)| { + // Synthetic disks panic if asked for their `slot()`, so filter + // them out first; additionally, filter out any non-M2 disks. + if disk.is_synthetic() || disk.variant() != DiskVariant::M2 { + return None; + } + + // Convert this M2 disk's slot to an M2Slot, and skip any that + // don't match the requested boot_disk. + let Ok(slot) = M2Slot::try_from(disk.slot()) else { + return None; + }; + if slot != boot_disk { + return None; + } + + let raw_devs_path = true; + Some(disk.boot_image_devfs_path(raw_devs_path)) + }, + ); + + let disk_path = match maybe_disk_path { + Some(Ok(path)) => path, + Some(Err(err)) => { + let message = format!( + "failed to find devfs path for {boot_disk:?}: {}", + DisplayErrorChain::new(&err) + ); + return Err(HttpError { + status_code: http::StatusCode::SERVICE_UNAVAILABLE, + error_code: None, + external_message: message.clone(), + internal_message: message, + }); + } + None => { + let message = format!("no disk found for slot {boot_disk:?}",); + return Err(HttpError { + status_code: http::StatusCode::SERVICE_UNAVAILABLE, + error_code: None, + external_message: message.clone(), + internal_message: message, + }); + } + }; + + let BootDiskWriteStartQueryParams { update_id, sha3_256_digest } = + query_params.into_inner(); + sa.boot_disk_os_writer() + .start_update( + boot_disk, + disk_path, + update_id, + sha3_256_digest, + body.into_stream(), + ) + .await + .map_err(|err| HttpError::from(&*err))?; + Ok(HttpResponseUpdatedNoContent()) +} + +/// Current progress of an OS image being written to disk. +#[derive( + Debug, Clone, Copy, PartialEq, Eq, Deserialize, JsonSchema, Serialize, +)] +#[serde(tag = "state", rename_all = "snake_case")] +pub enum BootDiskOsWriteProgress { + /// The image is still being uploaded. + ReceivingUploadedImage { bytes_received: usize }, + /// The image is being written to disk. + WritingImageToDisk { bytes_written: usize }, + /// The image is being read back from disk for validation. + ValidatingWrittenImage { bytes_read: usize }, +} + +/// Status of an update to a boot disk OS. +#[derive(Debug, Clone, Deserialize, JsonSchema, Serialize)] +#[serde(tag = "status", rename_all = "snake_case")] +pub enum BootDiskOsWriteStatus { + /// No update has been started for this disk, or any previously-started + /// update has completed and had its status cleared. + NoUpdateStarted, + /// An update is currently running. + InProgress { update_id: Uuid, progress: BootDiskOsWriteProgress }, + /// The most recent update completed successfully. + Complete { update_id: Uuid }, + /// The most recent update failed. + Failed { update_id: Uuid, message: String }, +} + +/// Get the status of writing a new host OS +#[endpoint { + method = GET, + path = "/boot-disk/{boot_disk}/os/write/status", +}] +async fn host_os_write_status_get( + request_context: RequestContext, + path_params: Path, +) -> Result, HttpError> { + let sa = request_context.context(); + let boot_disk = path_params.into_inner().boot_disk; + let status = sa.boot_disk_os_writer().status(boot_disk); + Ok(HttpResponseOk(status)) +} + +/// Clear the status of a completed write of a new host OS +#[endpoint { + method = DELETE, + path = "/boot-disk/{boot_disk}/os/write/status/{update_id}", +}] +async fn host_os_write_status_delete( + request_context: RequestContext, + path_params: Path, +) -> Result { + let sa = request_context.context(); + let BootDiskUpdatePathParams { boot_disk, update_id } = + path_params.into_inner(); + sa.boot_disk_os_writer() + .clear_terminal_status(boot_disk, update_id) + .map_err(|err| HttpError::from(&err))?; + Ok(HttpResponseUpdatedNoContent()) +} diff --git a/sled-agent/src/instance.rs b/sled-agent/src/instance.rs index a6f022f5f2..057402c57a 100644 --- a/sled-agent/src/instance.rs +++ b/sled-agent/src/instance.rs @@ -26,7 +26,7 @@ use futures::lock::{Mutex, MutexGuard}; use illumos_utils::dladm::Etherstub; use illumos_utils::link::VnicAllocator; use illumos_utils::opte::{DhcpCfg, PortManager}; -use illumos_utils::running_zone::{InstalledZone, RunningZone}; +use illumos_utils::running_zone::{RunningZone, ZoneBuilderFactory}; use illumos_utils::svc::wait_for_service; use illumos_utils::zone::Zones; use illumos_utils::zone::PROPOLIS_ZONE_PREFIX; @@ -208,7 +208,8 @@ struct InstanceInner { // Guest NIC and OPTE port information requested_nics: Vec, source_nat: SourceNatConfig, - external_ips: Vec, + ephemeral_ip: Option, + floating_ips: Vec, firewall_rules: Vec, dhcp_config: DhcpCfg, @@ -226,6 +227,9 @@ struct InstanceInner { // Storage resources storage: StorageHandle, + // Used to create propolis zones + zone_builder_factory: ZoneBuilderFactory, + // Object used to collect zone bundles from this instance when terminated. zone_bundler: ZoneBundler, @@ -611,6 +615,7 @@ impl Instance { port_manager, storage, zone_bundler, + zone_builder_factory, } = services; let mut dhcp_config = DhcpCfg { @@ -665,7 +670,8 @@ impl Instance { port_manager, requested_nics: hardware.nics, source_nat: hardware.source_nat, - external_ips: hardware.external_ips, + ephemeral_ip: hardware.ephemeral_ip, + floating_ips: hardware.floating_ips, firewall_rules: hardware.firewall_rules, dhcp_config, requested_disks: hardware.disks, @@ -678,6 +684,7 @@ impl Instance { running_state: None, nexus_client, storage, + zone_builder_factory, zone_bundler, instance_ticket: ticket, }; @@ -856,13 +863,11 @@ impl Instance { } return Err(Error::Transition( - omicron_common::api::external::Error::Conflict { - internal_message: format!( - "wrong instance state generation: expected {}, got {}", - inner.state.instance().gen, - old_runtime.gen - ), - }, + omicron_common::api::external::Error::conflict(format!( + "wrong instance state generation: expected {}, got {}", + inner.state.instance().gen, + old_runtime.gen + )), )); } @@ -877,15 +882,20 @@ impl Instance { // Create OPTE ports for the instance let mut opte_ports = Vec::with_capacity(inner.requested_nics.len()); for nic in inner.requested_nics.iter() { - let (snat, external_ips) = if nic.primary { - (Some(inner.source_nat), &inner.external_ips[..]) + let (snat, ephemeral_ip, floating_ips) = if nic.primary { + ( + Some(inner.source_nat), + inner.ephemeral_ip, + &inner.floating_ips[..], + ) } else { - (None, &[][..]) + (None, None, &[][..]) }; let port = inner.port_manager.create_port( nic, snat, - external_ips, + ephemeral_ip, + floating_ips, &inner.firewall_rules, inner.dhcp_config.clone(), )?; @@ -904,31 +914,28 @@ impl Instance { .choose(&mut rng) .ok_or_else(|| Error::U2NotFound)? .clone(); - let installed_zone = InstalledZone::install( - &inner.log, - &inner.vnic_allocator, - &root, - &["/opt/oxide".into()], - "propolis-server", - Some(*inner.propolis_id()), - // dataset= - &[], - // filesystems= - &[], - // data_links= - &[], - &[ + let installed_zone = inner + .zone_builder_factory + .builder() + .with_log(inner.log.clone()) + .with_underlay_vnic_allocator(&inner.vnic_allocator) + .with_zone_root_path(&root) + .with_zone_image_paths(&["/opt/oxide".into()]) + .with_zone_type("propolis-server") + .with_unique_name(*inner.propolis_id()) + .with_datasets(&[]) + .with_filesystems(&[]) + .with_data_links(&[]) + .with_devices(&[ zone::Device { name: "/dev/vmm/*".to_string() }, zone::Device { name: "/dev/vmmctl".to_string() }, zone::Device { name: "/dev/viona".to_string() }, - ], - opte_ports, - // physical_nic= - None, - vec![], - vec![], - ) - .await?; + ]) + .with_opte_ports(opte_ports) + .with_links(vec![]) + .with_limit_priv(vec![]) + .install() + .await?; let gateway = inner.port_manager.underlay_ip(); diff --git a/sled-agent/src/instance_manager.rs b/sled-agent/src/instance_manager.rs index fa40a876f0..c1b7e402a4 100644 --- a/sled-agent/src/instance_manager.rs +++ b/sled-agent/src/instance_manager.rs @@ -17,6 +17,7 @@ use crate::zone_bundle::ZoneBundler; use illumos_utils::dladm::Etherstub; use illumos_utils::link::VnicAllocator; use illumos_utils::opte::PortManager; +use illumos_utils::running_zone::ZoneBuilderFactory; use illumos_utils::vmm_reservoir; use omicron_common::api::external::ByteCount; use omicron_common::api::internal::nexus::InstanceRuntimeState; @@ -76,6 +77,7 @@ struct InstanceManagerInternal { port_manager: PortManager, storage: StorageHandle, zone_bundler: ZoneBundler, + zone_builder_factory: ZoneBuilderFactory, } pub(crate) struct InstanceManagerServices { @@ -84,6 +86,7 @@ pub(crate) struct InstanceManagerServices { pub port_manager: PortManager, pub storage: StorageHandle, pub zone_bundler: ZoneBundler, + pub zone_builder_factory: ZoneBuilderFactory, } /// All instances currently running on the sled. @@ -100,6 +103,7 @@ impl InstanceManager { port_manager: PortManager, storage: StorageHandle, zone_bundler: ZoneBundler, + zone_builder_factory: ZoneBuilderFactory, ) -> Result { Ok(InstanceManager { inner: Arc::new(InstanceManagerInternal { @@ -113,6 +117,7 @@ impl InstanceManager { port_manager, storage, zone_bundler, + zone_builder_factory, }), }) } @@ -266,6 +271,10 @@ impl InstanceManager { port_manager: self.inner.port_manager.clone(), storage: self.inner.storage.clone(), zone_bundler: self.inner.zone_bundler.clone(), + zone_builder_factory: self + .inner + .zone_builder_factory + .clone(), }; let state = crate::instance::InstanceInitialState { diff --git a/sled-agent/src/lib.rs b/sled-agent/src/lib.rs index 924fd4bd92..527b483ee8 100644 --- a/sled-agent/src/lib.rs +++ b/sled-agent/src/lib.rs @@ -18,6 +18,7 @@ pub mod common; // Modules for the non-simulated sled agent. mod backing_fs; +mod boot_disk_os_writer; pub mod bootstrap; pub mod config; pub(crate) mod dump_setup; @@ -32,7 +33,8 @@ pub mod params; mod profile; pub mod rack_setup; pub mod server; -mod services; +pub mod services; +pub mod services_migration; mod sled_agent; mod smf_helper; mod storage_monitor; diff --git a/sled-agent/src/params.rs b/sled-agent/src/params.rs index b22bd84975..a7d91e2b93 100644 --- a/sled-agent/src/params.rs +++ b/sled-agent/src/params.rs @@ -9,6 +9,8 @@ pub use crate::zone_bundle::ZoneBundleMetadata; pub use illumos_utils::opte::params::DhcpConfig; pub use illumos_utils::opte::params::VpcFirewallRule; pub use illumos_utils::opte::params::VpcFirewallRulesEnsureBody; +use illumos_utils::zpool::ZpoolName; +use omicron_common::api::external::Generation; use omicron_common::api::internal::nexus::{ DiskRuntimeState, InstanceProperties, InstanceRuntimeState, SledInstanceState, VmmRuntimeState, @@ -18,13 +20,13 @@ use omicron_common::api::internal::shared::{ }; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; -use sled_hardware::Baseboard; pub use sled_hardware::DendriteAsic; +use sled_storage::dataset::DatasetKind; use sled_storage::dataset::DatasetName; use std::fmt::{Debug, Display, Formatter, Result as FormatResult}; use std::net::{IpAddr, Ipv6Addr, SocketAddr, SocketAddrV6}; +use std::str::FromStr; use std::time::Duration; -use thiserror::Error; use uuid::Uuid; /// Used to request a Disk state change @@ -68,7 +70,8 @@ pub struct InstanceHardware { pub source_nat: SourceNatConfig, /// Zero or more external IP addresses (either floating or ephemeral), /// provided to an instance to allow inbound connectivity. - pub external_ips: Vec, + pub ephemeral_ip: Option, + pub floating_ips: Vec, pub firewall_rules: Vec, pub dhcp_config: DhcpConfig, // TODO: replace `propolis_client::*` with locally-modeled request type @@ -229,252 +232,7 @@ pub struct Zpool { pub disk_type: DiskType, } -/// Describes service-specific parameters. -#[derive( - Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, -)] -#[serde(tag = "type", rename_all = "snake_case")] -pub enum ServiceType { - Nexus { - /// The address at which the internal nexus server is reachable. - internal_address: SocketAddrV6, - /// The address at which the external nexus server is reachable. - external_ip: IpAddr, - /// The service vNIC providing external connectivity using OPTE. - nic: NetworkInterface, - /// Whether Nexus's external endpoint should use TLS - external_tls: bool, - /// External DNS servers Nexus can use to resolve external hosts. - external_dns_servers: Vec, - }, - ExternalDns { - /// The address at which the external DNS server API is reachable. - http_address: SocketAddrV6, - /// The address at which the external DNS server is reachable. - dns_address: SocketAddr, - /// The service vNIC providing external connectivity using OPTE. - nic: NetworkInterface, - }, - InternalDns { - http_address: SocketAddrV6, - dns_address: SocketAddrV6, - /// The addresses in the global zone which should be created - /// - /// For the DNS service, which exists outside the sleds's typical subnet - adding an - /// address in the GZ is necessary to allow inter-zone traffic routing. - gz_address: Ipv6Addr, - - /// The address is also identified with an auxiliary bit of information - /// to ensure that the created global zone address can have a unique name. - gz_address_index: u32, - }, - Oximeter { - address: SocketAddrV6, - }, - // We should never receive external requests to start wicketd, MGS, sp-sim - // dendrite, tfport, or maghemite: these are all services running in the - // global zone or switch zone that we start autonomously. We tag them with - // `serde(skip)` both to omit them from our OpenAPI definition and to avoid - // needing their contained types to implement `JsonSchema + Deserialize + - // Serialize`. - #[serde(skip)] - ManagementGatewayService, - #[serde(skip)] - Wicketd { - baseboard: Baseboard, - }, - #[serde(skip)] - Dendrite { - asic: DendriteAsic, - }, - #[serde(skip)] - Tfport { - pkt_source: String, - asic: DendriteAsic, - }, - #[serde(skip)] - Uplink, - #[serde(skip)] - MgDdm { - mode: String, - }, - #[serde(skip)] - Mgd, - #[serde(skip)] - SpSim, - CruciblePantry { - address: SocketAddrV6, - }, - BoundaryNtp { - address: SocketAddrV6, - ntp_servers: Vec, - dns_servers: Vec, - domain: Option, - /// The service vNIC providing outbound connectivity using OPTE. - nic: NetworkInterface, - /// The SNAT configuration for outbound connections. - snat_cfg: SourceNatConfig, - }, - InternalNtp { - address: SocketAddrV6, - ntp_servers: Vec, - dns_servers: Vec, - domain: Option, - }, - Clickhouse { - address: SocketAddrV6, - }, - ClickhouseKeeper { - address: SocketAddrV6, - }, - CockroachDb { - address: SocketAddrV6, - }, - Crucible { - address: SocketAddrV6, - }, -} - -impl std::fmt::Display for ServiceType { - fn fmt(&self, f: &mut Formatter<'_>) -> FormatResult { - match self { - ServiceType::Nexus { .. } => write!(f, "nexus"), - ServiceType::ExternalDns { .. } => write!(f, "external_dns"), - ServiceType::InternalDns { .. } => write!(f, "internal_dns"), - ServiceType::Oximeter { .. } => write!(f, "oximeter"), - ServiceType::ManagementGatewayService => write!(f, "mgs"), - ServiceType::Wicketd { .. } => write!(f, "wicketd"), - ServiceType::Dendrite { .. } => write!(f, "dendrite"), - ServiceType::Tfport { .. } => write!(f, "tfport"), - ServiceType::Uplink { .. } => write!(f, "uplink"), - ServiceType::CruciblePantry { .. } => write!(f, "crucible/pantry"), - ServiceType::BoundaryNtp { .. } - | ServiceType::InternalNtp { .. } => write!(f, "ntp"), - ServiceType::MgDdm { .. } => write!(f, "mg-ddm"), - ServiceType::Mgd => write!(f, "mgd"), - ServiceType::SpSim => write!(f, "sp-sim"), - ServiceType::Clickhouse { .. } => write!(f, "clickhouse"), - ServiceType::ClickhouseKeeper { .. } => { - write!(f, "clickhouse_keeper") - } - ServiceType::CockroachDb { .. } => write!(f, "cockroachdb"), - ServiceType::Crucible { .. } => write!(f, "crucible"), - } - } -} - -impl crate::smf_helper::Service for ServiceType { - fn service_name(&self) -> String { - self.to_string() - } - fn smf_name(&self) -> String { - format!("svc:/oxide/{}", self.service_name()) - } - fn should_import(&self) -> bool { - true - } -} - -/// Error returned by attempting to convert an internal service (i.e., a service -/// started autonomously by sled-agent) into a -/// `sled_agent_client::types::ServiceType` to be sent to a remote sled-agent. -#[derive(Debug, Clone, Copy, Error)] -#[error("This service may only be started autonomously by sled-agent")] -pub struct AutonomousServiceOnlyError; - -impl TryFrom for sled_agent_client::types::ServiceType { - type Error = AutonomousServiceOnlyError; - - fn try_from(s: ServiceType) -> Result { - use sled_agent_client::types::ServiceType as AutoSt; - use ServiceType as St; - - match s { - St::Nexus { - internal_address, - external_ip, - nic, - external_tls, - external_dns_servers, - } => Ok(AutoSt::Nexus { - internal_address: internal_address.to_string(), - external_ip, - nic: nic.into(), - external_tls, - external_dns_servers, - }), - St::ExternalDns { http_address, dns_address, nic } => { - Ok(AutoSt::ExternalDns { - http_address: http_address.to_string(), - dns_address: dns_address.to_string(), - nic: nic.into(), - }) - } - St::InternalDns { - http_address, - dns_address, - gz_address, - gz_address_index, - } => Ok(AutoSt::InternalDns { - http_address: http_address.to_string(), - dns_address: dns_address.to_string(), - gz_address, - gz_address_index, - }), - St::Oximeter { address } => { - Ok(AutoSt::Oximeter { address: address.to_string() }) - } - St::CruciblePantry { address } => { - Ok(AutoSt::CruciblePantry { address: address.to_string() }) - } - St::BoundaryNtp { - address, - ntp_servers, - dns_servers, - domain, - nic, - snat_cfg, - } => Ok(AutoSt::BoundaryNtp { - address: address.to_string(), - ntp_servers, - dns_servers, - domain, - nic: nic.into(), - snat_cfg: snat_cfg.into(), - }), - St::InternalNtp { address, ntp_servers, dns_servers, domain } => { - Ok(AutoSt::InternalNtp { - address: address.to_string(), - ntp_servers, - dns_servers, - domain, - }) - } - St::Clickhouse { address } => { - Ok(AutoSt::Clickhouse { address: address.to_string() }) - } - St::ClickhouseKeeper { address } => { - Ok(AutoSt::ClickhouseKeeper { address: address.to_string() }) - } - St::CockroachDb { address } => { - Ok(AutoSt::CockroachDb { address: address.to_string() }) - } - St::Crucible { address } => { - Ok(AutoSt::Crucible { address: address.to_string() }) - } - St::ManagementGatewayService - | St::SpSim - | St::Wicketd { .. } - | St::Dendrite { .. } - | St::Tfport { .. } - | St::Uplink - | St::Mgd - | St::MgDdm { .. } => Err(AutonomousServiceOnlyError), - } - } -} - -/// The type of zone which may be requested from Sled Agent +/// The type of zone that Sled Agent may run #[derive( Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, )] @@ -493,24 +251,6 @@ pub enum ZoneType { Switch, } -impl From for sled_agent_client::types::ZoneType { - fn from(zt: ZoneType) -> Self { - match zt { - ZoneType::Clickhouse => Self::Clickhouse, - ZoneType::ClickhouseKeeper => Self::ClickhouseKeeper, - ZoneType::CockroachDb => Self::CockroachDb, - ZoneType::Crucible => Self::Crucible, - ZoneType::CruciblePantry => Self::CruciblePantry, - ZoneType::InternalDns => Self::InternalDns, - ZoneType::ExternalDns => Self::ExternalDns, - ZoneType::Nexus => Self::Nexus, - ZoneType::Ntp => Self::Ntp, - ZoneType::Oximeter => Self::Oximeter, - ZoneType::Switch => Self::Switch, - } - } -} - impl std::fmt::Display for ZoneType { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { use ZoneType::*; @@ -531,280 +271,516 @@ impl std::fmt::Display for ZoneType { } } -/// Describes a request to provision a specific dataset +/// Generation 1 of `OmicronZonesConfig` is always the set of no zones. +pub const OMICRON_ZONES_CONFIG_INITIAL_GENERATION: u32 = 1; + +/// Describes the set of Omicron-managed zones running on a sled #[derive( Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, )] -pub struct DatasetRequest { - pub id: Uuid, - pub name: DatasetName, - pub service_address: SocketAddrV6, -} - -impl From for sled_agent_client::types::DatasetRequest { - fn from(d: DatasetRequest) -> Self { +pub struct OmicronZonesConfig { + /// generation number of this configuration + /// + /// This generation number is owned by the control plane (i.e., RSS or + /// Nexus, depending on whether RSS-to-Nexus handoff has happened). It + /// should not be bumped within Sled Agent. + /// + /// Sled Agent rejects attempts to set the configuration to a generation + /// older than the one it's currently running. + pub generation: Generation, + + /// list of running zones + pub zones: Vec, +} + +impl From for sled_agent_client::types::OmicronZonesConfig { + fn from(local: OmicronZonesConfig) -> Self { Self { - id: d.id, - name: d.name.into(), - service_address: d.service_address.to_string(), + generation: local.generation.into(), + zones: local.zones.into_iter().map(|s| s.into()).collect(), } } } -/// Describes a request to create a zone running one or more services. +/// Describes one Omicron-managed zone running on a sled #[derive( Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, )] -pub struct ServiceZoneRequest { - // The UUID of the zone to be initialized. - // TODO: Should this be removed? If we have UUIDs on the services, what's - // the point of this? +pub struct OmicronZoneConfig { pub id: Uuid, - // The type of the zone to be created. - pub zone_type: ZoneType, - // The addresses on which the service should listen for requests. - pub addresses: Vec, - // Datasets which should be managed by this service. - #[serde(default)] - pub dataset: Option, - // Services that should be run in the zone - pub services: Vec, -} - -impl ServiceZoneRequest { - // The full name of the zone, if it was to be created as a zone. - pub fn zone_name(&self) -> String { - illumos_utils::running_zone::InstalledZone::get_zone_name( - &self.zone_type.to_string(), - self.zone_name_unique_identifier(), - ) - } + pub underlay_address: Ipv6Addr, + pub zone_type: OmicronZoneType, +} - // The name of a unique identifier for the zone, if one is necessary. - pub fn zone_name_unique_identifier(&self) -> Option { - match &self.zone_type { - // The switch zone is necessarily a singleton. - ZoneType::Switch => None, - // All other zones should be identified by their zone UUID. - ZoneType::Clickhouse - | ZoneType::ClickhouseKeeper - | ZoneType::CockroachDb - | ZoneType::Crucible - | ZoneType::ExternalDns - | ZoneType::InternalDns - | ZoneType::Nexus - | ZoneType::CruciblePantry - | ZoneType::Ntp - | ZoneType::Oximeter => Some(self.id), +impl From for sled_agent_client::types::OmicronZoneConfig { + fn from(local: OmicronZoneConfig) -> Self { + Self { + id: local.id, + underlay_address: local.underlay_address, + zone_type: local.zone_type.into(), } } } -impl TryFrom - for sled_agent_client::types::ServiceZoneRequest -{ - type Error = AutonomousServiceOnlyError; +impl OmicronZoneConfig { + /// If this kind of zone has an associated dataset, returns the dataset's + /// name. Othrwise, returns `None`. + pub fn dataset_name(&self) -> Option { + self.zone_type.dataset_name() + } - fn try_from(s: ServiceZoneRequest) -> Result { - let mut services = Vec::with_capacity(s.services.len()); - for service in s.services { - services.push(service.try_into()?); - } + /// If this kind of zone has an associated dataset, return the dataset's + /// name and the associated "service address". Otherwise, returns `None`. + pub fn dataset_name_and_address( + &self, + ) -> Option<(DatasetName, SocketAddrV6)> { + self.zone_type.dataset_name_and_address() + } - Ok(Self { - id: s.id, - zone_type: s.zone_type.into(), - addresses: s.addresses, - dataset: s.dataset.map(|d| d.into()), - services, - }) + /// Returns the name that is (or will be) used for the illumos zone + /// associated with this zone + pub fn zone_name(&self) -> String { + illumos_utils::running_zone::InstalledZone::get_zone_name( + &self.zone_type.zone_type_str(), + Some(self.id), + ) } -} -impl ServiceZoneRequest { - pub fn into_nexus_service_req( + /// Returns the structure that describes this zone to Nexus during rack + /// initialization + pub fn to_nexus_service_req( &self, sled_id: Uuid, - ) -> Result< - Vec, - AutonomousServiceOnlyError, - > { + ) -> nexus_client::types::ServicePutRequest { use nexus_client::types as NexusTypes; - let mut services = vec![]; - for svc in &self.services { - let service_id = svc.id; - let zone_id = Some(self.id); - match &svc.details { - ServiceType::Nexus { - external_ip, - internal_address, - nic, - .. - } => { - services.push(NexusTypes::ServicePutRequest { - service_id, - zone_id, - sled_id, - address: internal_address.to_string(), - kind: NexusTypes::ServiceKind::Nexus { - external_address: *external_ip, - nic: NexusTypes::ServiceNic { - id: nic.id, - name: nic.name.clone(), - ip: nic.ip, - mac: nic.mac, - }, - }, - }); - } - ServiceType::ExternalDns { http_address, dns_address, nic } => { - services.push(NexusTypes::ServicePutRequest { - service_id, - zone_id, - sled_id, - address: http_address.to_string(), - kind: NexusTypes::ServiceKind::ExternalDns { - external_address: dns_address.ip(), - nic: NexusTypes::ServiceNic { - id: nic.id, - name: nic.name.clone(), - ip: nic.ip, - mac: nic.mac, - }, - }, - }); - } - ServiceType::InternalDns { http_address, .. } => { - services.push(NexusTypes::ServicePutRequest { - service_id, - zone_id, - sled_id, - address: http_address.to_string(), - kind: NexusTypes::ServiceKind::InternalDns, - }); + let service_id = self.id; + let zone_id = Some(self.id); + match &self.zone_type { + OmicronZoneType::Nexus { + external_ip, + internal_address, + nic, + .. + } => NexusTypes::ServicePutRequest { + service_id, + zone_id, + sled_id, + address: internal_address.to_string(), + kind: NexusTypes::ServiceKind::Nexus { + external_address: *external_ip, + nic: NexusTypes::ServiceNic { + id: nic.id, + name: nic.name.clone(), + ip: nic.ip, + mac: nic.mac, + }, + }, + }, + OmicronZoneType::ExternalDns { + http_address, + dns_address, + nic, + .. + } => NexusTypes::ServicePutRequest { + service_id, + zone_id, + sled_id, + address: http_address.to_string(), + kind: NexusTypes::ServiceKind::ExternalDns { + external_address: dns_address.ip(), + nic: NexusTypes::ServiceNic { + id: nic.id, + name: nic.name.clone(), + ip: nic.ip, + mac: nic.mac, + }, + }, + }, + OmicronZoneType::InternalDns { http_address, .. } => { + NexusTypes::ServicePutRequest { + service_id, + zone_id, + sled_id, + address: http_address.to_string(), + kind: NexusTypes::ServiceKind::InternalDns, } - ServiceType::Oximeter { address } => { - services.push(NexusTypes::ServicePutRequest { - service_id, - zone_id, - sled_id, - address: address.to_string(), - kind: NexusTypes::ServiceKind::Oximeter, - }); + } + OmicronZoneType::Oximeter { address } => { + NexusTypes::ServicePutRequest { + service_id, + zone_id, + sled_id, + address: address.to_string(), + kind: NexusTypes::ServiceKind::Oximeter, } - ServiceType::CruciblePantry { address } => { - services.push(NexusTypes::ServicePutRequest { - service_id, - zone_id, - sled_id, - address: address.to_string(), - kind: NexusTypes::ServiceKind::CruciblePantry, - }); + } + OmicronZoneType::CruciblePantry { address } => { + NexusTypes::ServicePutRequest { + service_id, + zone_id, + sled_id, + address: address.to_string(), + kind: NexusTypes::ServiceKind::CruciblePantry, } - ServiceType::BoundaryNtp { address, snat_cfg, nic, .. } => { - services.push(NexusTypes::ServicePutRequest { - service_id, - zone_id, - sled_id, - address: address.to_string(), - kind: NexusTypes::ServiceKind::BoundaryNtp { - snat: snat_cfg.into(), - nic: NexusTypes::ServiceNic { - id: nic.id, - name: nic.name.clone(), - ip: nic.ip, - mac: nic.mac, - }, + } + OmicronZoneType::BoundaryNtp { address, snat_cfg, nic, .. } => { + NexusTypes::ServicePutRequest { + service_id, + zone_id, + sled_id, + address: address.to_string(), + kind: NexusTypes::ServiceKind::BoundaryNtp { + snat: snat_cfg.into(), + nic: NexusTypes::ServiceNic { + id: nic.id, + name: nic.name.clone(), + ip: nic.ip, + mac: nic.mac, }, - }); + }, } - ServiceType::InternalNtp { address, .. } => { - services.push(NexusTypes::ServicePutRequest { - service_id, - zone_id, - sled_id, - address: address.to_string(), - kind: NexusTypes::ServiceKind::InternalNtp, - }); - } - ServiceType::Clickhouse { address } => { - services.push(NexusTypes::ServicePutRequest { - service_id, - zone_id, - sled_id, - address: address.to_string(), - kind: NexusTypes::ServiceKind::Clickhouse, - }); + } + OmicronZoneType::InternalNtp { address, .. } => { + NexusTypes::ServicePutRequest { + service_id, + zone_id, + sled_id, + address: address.to_string(), + kind: NexusTypes::ServiceKind::InternalNtp, } - ServiceType::ClickhouseKeeper { address } => { - services.push(NexusTypes::ServicePutRequest { - service_id, - zone_id, - sled_id, - address: address.to_string(), - kind: NexusTypes::ServiceKind::ClickhouseKeeper, - }); + } + OmicronZoneType::Clickhouse { address, .. } => { + NexusTypes::ServicePutRequest { + service_id, + zone_id, + sled_id, + address: address.to_string(), + kind: NexusTypes::ServiceKind::Clickhouse, } - ServiceType::Crucible { address } => { - services.push(NexusTypes::ServicePutRequest { - service_id, - zone_id, - sled_id, - address: address.to_string(), - kind: NexusTypes::ServiceKind::Crucible, - }); + } + OmicronZoneType::ClickhouseKeeper { address, .. } => { + NexusTypes::ServicePutRequest { + service_id, + zone_id, + sled_id, + address: address.to_string(), + kind: NexusTypes::ServiceKind::ClickhouseKeeper, } - ServiceType::CockroachDb { address } => { - services.push(NexusTypes::ServicePutRequest { - service_id, - zone_id, - sled_id, - address: address.to_string(), - kind: NexusTypes::ServiceKind::Cockroach, - }); + } + OmicronZoneType::Crucible { address, .. } => { + NexusTypes::ServicePutRequest { + service_id, + zone_id, + sled_id, + address: address.to_string(), + kind: NexusTypes::ServiceKind::Crucible, } - ServiceType::ManagementGatewayService - | ServiceType::SpSim - | ServiceType::Wicketd { .. } - | ServiceType::Dendrite { .. } - | ServiceType::MgDdm { .. } - | ServiceType::Mgd - | ServiceType::Tfport { .. } - | ServiceType::Uplink => { - return Err(AutonomousServiceOnlyError); + } + OmicronZoneType::CockroachDb { address, .. } => { + NexusTypes::ServicePutRequest { + service_id, + zone_id, + sled_id, + address: address.to_string(), + kind: NexusTypes::ServiceKind::Cockroach, } } } + } +} - Ok(services) +/// Describes a persistent ZFS dataset associated with an Omicron zone +#[derive( + Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, +)] +pub struct OmicronZoneDataset { + pub pool_name: ZpoolName, +} + +impl From for sled_agent_client::types::OmicronZoneDataset { + fn from(local: OmicronZoneDataset) -> Self { + Self { + pool_name: sled_agent_client::types::ZpoolName::from_str( + &local.pool_name.to_string(), + ) + .unwrap(), + } } } -/// Used to request that the Sled initialize a single service. +/// Describes what kind of zone this is (i.e., what component is running in it) +/// as well as any type-specific configuration #[derive( Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, )] -pub struct ServiceZoneService { - pub id: Uuid, - pub details: ServiceType, +#[serde(tag = "type", rename_all = "snake_case")] +pub enum OmicronZoneType { + BoundaryNtp { + address: SocketAddrV6, + ntp_servers: Vec, + dns_servers: Vec, + domain: Option, + /// The service vNIC providing outbound connectivity using OPTE. + nic: NetworkInterface, + /// The SNAT configuration for outbound connections. + snat_cfg: SourceNatConfig, + }, + + Clickhouse { + address: SocketAddrV6, + dataset: OmicronZoneDataset, + }, + + ClickhouseKeeper { + address: SocketAddrV6, + dataset: OmicronZoneDataset, + }, + CockroachDb { + address: SocketAddrV6, + dataset: OmicronZoneDataset, + }, + + Crucible { + address: SocketAddrV6, + dataset: OmicronZoneDataset, + }, + CruciblePantry { + address: SocketAddrV6, + }, + ExternalDns { + dataset: OmicronZoneDataset, + /// The address at which the external DNS server API is reachable. + http_address: SocketAddrV6, + /// The address at which the external DNS server is reachable. + dns_address: SocketAddr, + /// The service vNIC providing external connectivity using OPTE. + nic: NetworkInterface, + }, + InternalDns { + dataset: OmicronZoneDataset, + http_address: SocketAddrV6, + dns_address: SocketAddrV6, + /// The addresses in the global zone which should be created + /// + /// For the DNS service, which exists outside the sleds's typical subnet + /// - adding an address in the GZ is necessary to allow inter-zone + /// traffic routing. + gz_address: Ipv6Addr, + + /// The address is also identified with an auxiliary bit of information + /// to ensure that the created global zone address can have a unique + /// name. + gz_address_index: u32, + }, + InternalNtp { + address: SocketAddrV6, + ntp_servers: Vec, + dns_servers: Vec, + domain: Option, + }, + Nexus { + /// The address at which the internal nexus server is reachable. + internal_address: SocketAddrV6, + /// The address at which the external nexus server is reachable. + external_ip: IpAddr, + /// The service vNIC providing external connectivity using OPTE. + nic: NetworkInterface, + /// Whether Nexus's external endpoint should use TLS + external_tls: bool, + /// External DNS servers Nexus can use to resolve external hosts. + external_dns_servers: Vec, + }, + Oximeter { + address: SocketAddrV6, + }, } -impl TryFrom - for sled_agent_client::types::ServiceZoneService -{ - type Error = AutonomousServiceOnlyError; +impl OmicronZoneType { + /// Returns a canonical string identifying the type of zone this is + /// + /// This is used to construct zone names, SMF service names, etc. + pub fn zone_type_str(&self) -> String { + match self { + OmicronZoneType::BoundaryNtp { .. } + | OmicronZoneType::InternalNtp { .. } => ZoneType::Ntp, + + OmicronZoneType::Clickhouse { .. } => ZoneType::Clickhouse, + OmicronZoneType::ClickhouseKeeper { .. } => { + ZoneType::ClickhouseKeeper + } + OmicronZoneType::CockroachDb { .. } => ZoneType::CockroachDb, + OmicronZoneType::Crucible { .. } => ZoneType::Crucible, + OmicronZoneType::CruciblePantry { .. } => ZoneType::CruciblePantry, + OmicronZoneType::ExternalDns { .. } => ZoneType::ExternalDns, + OmicronZoneType::InternalDns { .. } => ZoneType::InternalDns, + OmicronZoneType::Nexus { .. } => ZoneType::Nexus, + OmicronZoneType::Oximeter { .. } => ZoneType::Oximeter, + } + .to_string() + } + + /// If this kind of zone has an associated dataset, returns the dataset's + /// name. Othrwise, returns `None`. + pub fn dataset_name(&self) -> Option { + self.dataset_name_and_address().map(|d| d.0) + } + + /// If this kind of zone has an associated dataset, return the dataset's + /// name and the associated "service address". Otherwise, returns `None`. + pub fn dataset_name_and_address( + &self, + ) -> Option<(DatasetName, SocketAddrV6)> { + let (dataset, dataset_kind, address) = match self { + OmicronZoneType::BoundaryNtp { .. } + | OmicronZoneType::InternalNtp { .. } + | OmicronZoneType::Nexus { .. } + | OmicronZoneType::Oximeter { .. } + | OmicronZoneType::CruciblePantry { .. } => None, + OmicronZoneType::Clickhouse { dataset, address, .. } => { + Some((dataset, DatasetKind::Clickhouse, address)) + } + OmicronZoneType::ClickhouseKeeper { dataset, address, .. } => { + Some((dataset, DatasetKind::ClickhouseKeeper, address)) + } + OmicronZoneType::CockroachDb { dataset, address, .. } => { + Some((dataset, DatasetKind::CockroachDb, address)) + } + OmicronZoneType::Crucible { dataset, address, .. } => { + Some((dataset, DatasetKind::Crucible, address)) + } + OmicronZoneType::ExternalDns { dataset, http_address, .. } => { + Some((dataset, DatasetKind::ExternalDns, http_address)) + } + OmicronZoneType::InternalDns { dataset, http_address, .. } => { + Some((dataset, DatasetKind::InternalDns, http_address)) + } + }?; - fn try_from(s: ServiceZoneService) -> Result { - let details = s.details.try_into()?; - Ok(Self { id: s.id, details }) + Some(( + DatasetName::new(dataset.pool_name.clone(), dataset_kind), + *address, + )) } } -/// Used to request that the Sled initialize multiple services. -#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq)] -pub struct ServiceEnsureBody { - pub services: Vec, +impl crate::smf_helper::Service for OmicronZoneType { + fn service_name(&self) -> String { + // For historical reasons, crucible-pantry is the only zone type whose + // SMF service does not match the canonical name that we use for the + // zone. + match self { + OmicronZoneType::CruciblePantry { .. } => { + "crucible/pantry".to_owned() + } + _ => self.zone_type_str(), + } + } + fn smf_name(&self) -> String { + format!("svc:/oxide/{}", self.service_name()) + } + fn should_import(&self) -> bool { + true + } +} + +impl From for sled_agent_client::types::OmicronZoneType { + fn from(local: OmicronZoneType) -> Self { + use sled_agent_client::types::OmicronZoneType as Other; + match local { + OmicronZoneType::BoundaryNtp { + address, + ntp_servers, + dns_servers, + domain, + nic, + snat_cfg, + } => Other::BoundaryNtp { + address: address.to_string(), + dns_servers, + domain, + ntp_servers, + snat_cfg: snat_cfg.into(), + nic: nic.into(), + }, + OmicronZoneType::Clickhouse { address, dataset } => { + Other::Clickhouse { + address: address.to_string(), + dataset: dataset.into(), + } + } + OmicronZoneType::ClickhouseKeeper { address, dataset } => { + Other::ClickhouseKeeper { + address: address.to_string(), + dataset: dataset.into(), + } + } + OmicronZoneType::CockroachDb { address, dataset } => { + Other::CockroachDb { + address: address.to_string(), + dataset: dataset.into(), + } + } + OmicronZoneType::Crucible { address, dataset } => Other::Crucible { + address: address.to_string(), + dataset: dataset.into(), + }, + OmicronZoneType::CruciblePantry { address } => { + Other::CruciblePantry { address: address.to_string() } + } + OmicronZoneType::ExternalDns { + dataset, + http_address, + dns_address, + nic, + } => Other::ExternalDns { + dataset: dataset.into(), + http_address: http_address.to_string(), + dns_address: dns_address.to_string(), + nic: nic.into(), + }, + OmicronZoneType::InternalDns { + dataset, + http_address, + dns_address, + gz_address, + gz_address_index, + } => Other::InternalDns { + dataset: dataset.into(), + http_address: http_address.to_string(), + dns_address: dns_address.to_string(), + gz_address, + gz_address_index, + }, + OmicronZoneType::InternalNtp { + address, + ntp_servers, + dns_servers, + domain, + } => Other::InternalNtp { + address: address.to_string(), + ntp_servers, + dns_servers, + domain, + }, + OmicronZoneType::Nexus { + internal_address, + external_ip, + nic, + external_tls, + external_dns_servers, + } => Other::Nexus { + external_dns_servers, + external_ip, + external_tls, + internal_address: internal_address.to_string(), + nic: nic.into(), + }, + OmicronZoneType::Oximeter { address } => { + Other::Oximeter { address: address.to_string() } + } + } + } } #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq)] diff --git a/sled-agent/src/rack_setup/plan/service.rs b/sled-agent/src/rack_setup/plan/service.rs index 980f5b6ebd..441c7fd842 100644 --- a/sled-agent/src/rack_setup/plan/service.rs +++ b/sled-agent/src/rack_setup/plan/service.rs @@ -5,10 +5,7 @@ //! Plan generation for "where should services be initialized". use crate::bootstrap::params::StartSledAgentRequest; -use crate::params::{ - DatasetRequest, ServiceType, ServiceZoneRequest, ServiceZoneService, - ZoneType, -}; +use crate::params::{OmicronZoneConfig, OmicronZoneDataset, OmicronZoneType}; use crate::rack_setup::config::SetupServiceConfig as Config; use camino::Utf8PathBuf; use dns_service_client::types::DnsConfigParams; @@ -97,20 +94,20 @@ pub enum PlanError { #[error("Ran out of sleds / U2 storage pools")] NotEnoughSleds, + + #[error("Found only v1 service plan")] + FoundV1, } -#[derive( - Clone, Debug, Default, Deserialize, Serialize, PartialEq, JsonSchema, -)] -pub struct SledRequest { - /// Services to be instantiated. - #[serde(default, rename = "service")] - pub services: Vec, +#[derive(Clone, Debug, Default, Serialize, Deserialize, JsonSchema)] +pub struct SledConfig { + /// zones configured for this sled + pub zones: Vec, } #[derive(Clone, Debug, Serialize, Deserialize, JsonSchema)] pub struct Plan { - pub services: HashMap, + pub services: HashMap, pub dns_config: DnsConfigParams, } @@ -120,7 +117,8 @@ impl Ledgerable for Plan { } fn generation_bump(&mut self) {} } -const RSS_SERVICE_PLAN_FILENAME: &str = "rss-service-plan.json"; +const RSS_SERVICE_PLAN_V1_FILENAME: &str = "rss-service-plan.json"; +const RSS_SERVICE_PLAN_FILENAME: &str = "rss-service-plan-v2.json"; impl Plan { pub async fn load( @@ -142,11 +140,60 @@ impl Plan { if let Some(ledger) = ledger { info!(log, "RSS plan already created, loading from file"); Ok(Some(ledger.data().clone())) + } else if Self::has_v1(storage_manager).await.map_err(|err| { + PlanError::Io { + message: String::from("looking for v1 RSS plan"), + err, + } + })? { + // If we found no current-version service plan, but we _do_ find + // a v1 plan present, bail out. We do not expect to ever see this + // in practice because that would indicate that: + // + // - We ran RSS previously on this same system using an older + // version of the software that generates v1 service plans and it + // got far enough through RSS to have written the v1 service plan. + // - That means it must have finished initializing all sled agents, + // including itself, causing it to record a + // `StartSledAgentRequest`s in its ledger -- while still running + // the older RSS. + // - But we're currently running software that knows about v2 + // service plans. Thus, this process started some time after that + // ledger was written. + // - But the bootstrap agent refuses to execute RSS if it has a + // local `StartSledAgentRequest` ledgered. So we shouldn't get + // here if all of the above happened. + // + // This sounds like a complicated set of assumptions. If we got + // this wrong, we'll fail spuriously here and we'll have to figure + // out what happened. But the alternative is doing extra work to + // support a condition that we do not believe can ever happen in any + // system. + Err(PlanError::FoundV1) } else { Ok(None) } } + async fn has_v1( + storage_manager: &StorageHandle, + ) -> Result { + let paths = storage_manager + .get_latest_resources() + .await + .all_m2_mountpoints(CONFIG_DATASET) + .into_iter() + .map(|p| p.join(RSS_SERVICE_PLAN_V1_FILENAME)); + + for p in paths { + if p.try_exists()? { + return Ok(true); + } + } + + Ok(false) + } + async fn is_sled_scrimlet( log: &Logger, address: SocketAddrV6, @@ -235,41 +282,13 @@ impl Plan { Ok(u2_zpools) } - pub async fn create( - log: &Logger, + pub fn create_transient( config: &Config, - storage_manager: &StorageHandle, - sleds: &HashMap, + mut sled_info: Vec, ) -> Result { let mut dns_builder = internal_dns::DnsConfigBuilder::new(); let mut svc_port_builder = ServicePortBuilder::new(config); - // Load the information we need about each Sled to be able to allocate - // components on it. - let mut sled_info = { - let result: Result, PlanError> = - futures::future::try_join_all(sleds.values().map( - |sled_request| async { - let subnet = sled_request.body.subnet; - let sled_address = get_sled_address(subnet); - let u2_zpools = - Self::get_u2_zpools_from_sled(log, sled_address) - .await?; - let is_scrimlet = - Self::is_sled_scrimlet(log, sled_address).await?; - Ok(SledInfo::new( - sled_request.body.id, - subnet, - sled_address, - u2_zpools, - is_scrimlet, - )) - }, - )) - .await; - result? - }; - // Scrimlets get DNS records for running Dendrite. let scrimlets: Vec<_> = sled_info.iter().filter(|s| s.is_scrimlet).collect(); @@ -348,24 +367,18 @@ impl Plan { let dataset_name = sled.alloc_from_u2_zpool(DatasetKind::InternalDns)?; - sled.request.services.push(ServiceZoneRequest { + sled.request.zones.push(OmicronZoneConfig { id, - zone_type: ZoneType::InternalDns, - addresses: vec![ip], - dataset: Some(DatasetRequest { - id, - name: dataset_name, - service_address: http_address, - }), - services: vec![ServiceZoneService { - id, - details: ServiceType::InternalDns { - http_address, - dns_address, - gz_address: dns_subnet.gz_address().ip(), - gz_address_index: i.try_into().expect("Giant indices?"), + underlay_address: ip, + zone_type: OmicronZoneType::InternalDns { + dataset: OmicronZoneDataset { + pool_name: dataset_name.pool().clone(), }, - }], + http_address, + dns_address, + gz_address: dns_subnet.gz_address().ip(), + gz_address_index: i.try_into().expect("Giant indices?"), + }, }); } @@ -386,19 +399,15 @@ impl Plan { .unwrap(); let dataset_name = sled.alloc_from_u2_zpool(DatasetKind::CockroachDb)?; - sled.request.services.push(ServiceZoneRequest { + sled.request.zones.push(OmicronZoneConfig { id, - zone_type: ZoneType::CockroachDb, - addresses: vec![ip], - dataset: Some(DatasetRequest { - id, - name: dataset_name, - service_address: address, - }), - services: vec![ServiceZoneService { - id, - details: ServiceType::CockroachDb { address }, - }], + underlay_address: ip, + zone_type: OmicronZoneType::CockroachDb { + dataset: OmicronZoneDataset { + pool_name: dataset_name.pool().clone(), + }, + address, + }, }); } @@ -433,23 +442,17 @@ impl Plan { let dataset_kind = DatasetKind::ExternalDns; let dataset_name = sled.alloc_from_u2_zpool(dataset_kind)?; - sled.request.services.push(ServiceZoneRequest { + sled.request.zones.push(OmicronZoneConfig { id, - zone_type: ZoneType::ExternalDns, - addresses: vec![*http_address.ip()], - dataset: Some(DatasetRequest { - id, - name: dataset_name, - service_address: http_address, - }), - services: vec![ServiceZoneService { - id, - details: ServiceType::ExternalDns { - http_address, - dns_address, - nic, + underlay_address: *http_address.ip(), + zone_type: OmicronZoneType::ExternalDns { + dataset: OmicronZoneDataset { + pool_name: dataset_name.pool().clone(), }, - }], + http_address, + dns_address, + nic, + }, }); } @@ -471,33 +474,28 @@ impl Plan { ) .unwrap(); let (nic, external_ip) = svc_port_builder.next_nexus(id)?; - sled.request.services.push(ServiceZoneRequest { + sled.request.zones.push(OmicronZoneConfig { id, - zone_type: ZoneType::Nexus, - addresses: vec![address], - dataset: None, - services: vec![ServiceZoneService { - id, - details: ServiceType::Nexus { - internal_address: SocketAddrV6::new( - address, - omicron_common::address::NEXUS_INTERNAL_PORT, - 0, - 0, - ), - external_ip, - nic, - // Tell Nexus to use TLS if and only if the caller - // provided TLS certificates. This effectively - // determines the status of TLS for the lifetime of - // the rack. In production-like deployments, we'd - // always expect TLS to be enabled. It's only in - // development that it might not be. - external_tls: !config.external_certificates.is_empty(), - external_dns_servers: config.dns_servers.clone(), - }, - }], - }) + underlay_address: address, + zone_type: OmicronZoneType::Nexus { + internal_address: SocketAddrV6::new( + address, + omicron_common::address::NEXUS_INTERNAL_PORT, + 0, + 0, + ), + external_ip, + nic, + // Tell Nexus to use TLS if and only if the caller + // provided TLS certificates. This effectively + // determines the status of TLS for the lifetime of + // the rack. In production-like deployments, we'd + // always expect TLS to be enabled. It's only in + // development that it might not be. + external_tls: !config.external_certificates.is_empty(), + external_dns_servers: config.dns_servers.clone(), + }, + }); } // Provision Oximeter zones, continuing to stripe across sleds. @@ -518,22 +516,17 @@ impl Plan { omicron_common::address::OXIMETER_PORT, ) .unwrap(); - sled.request.services.push(ServiceZoneRequest { + sled.request.zones.push(OmicronZoneConfig { id, - zone_type: ZoneType::Oximeter, - addresses: vec![address], - dataset: None, - services: vec![ServiceZoneService { - id, - details: ServiceType::Oximeter { - address: SocketAddrV6::new( - address, - omicron_common::address::OXIMETER_PORT, - 0, - 0, - ), - }, - }], + underlay_address: address, + zone_type: OmicronZoneType::Oximeter { + address: SocketAddrV6::new( + address, + omicron_common::address::OXIMETER_PORT, + 0, + 0, + ), + }, }) } @@ -555,19 +548,15 @@ impl Plan { .unwrap(); let dataset_name = sled.alloc_from_u2_zpool(DatasetKind::Clickhouse)?; - sled.request.services.push(ServiceZoneRequest { + sled.request.zones.push(OmicronZoneConfig { id, - zone_type: ZoneType::Clickhouse, - addresses: vec![ip], - dataset: Some(DatasetRequest { - id, - name: dataset_name, - service_address: address, - }), - services: vec![ServiceZoneService { - id, - details: ServiceType::Clickhouse { address }, - }], + underlay_address: ip, + zone_type: OmicronZoneType::Clickhouse { + address, + dataset: OmicronZoneDataset { + pool_name: dataset_name.pool().clone(), + }, + }, }); } @@ -595,19 +584,15 @@ impl Plan { .unwrap(); let dataset_name = sled.alloc_from_u2_zpool(DatasetKind::ClickhouseKeeper)?; - sled.request.services.push(ServiceZoneRequest { + sled.request.zones.push(OmicronZoneConfig { id, - zone_type: ZoneType::ClickhouseKeeper, - addresses: vec![ip], - dataset: Some(DatasetRequest { - id, - name: dataset_name, - service_address: address, - }), - services: vec![ServiceZoneService { - id, - details: ServiceType::ClickhouseKeeper { address }, - }], + underlay_address: ip, + zone_type: OmicronZoneType::ClickhouseKeeper { + address, + dataset: OmicronZoneDataset { + pool_name: dataset_name.pool().clone(), + }, + }, }); } @@ -626,18 +611,13 @@ impl Plan { dns_builder .service_backend_zone(ServiceName::CruciblePantry, &zone, port) .unwrap(); - sled.request.services.push(ServiceZoneRequest { + sled.request.zones.push(OmicronZoneConfig { id, - zone_type: ZoneType::CruciblePantry, - addresses: vec![address], - dataset: None, - services: vec![ServiceZoneService { - id, - details: ServiceType::CruciblePantry { - address: SocketAddrV6::new(address, port, 0, 0), - }, - }], - }) + underlay_address: address, + zone_type: OmicronZoneType::CruciblePantry { + address: SocketAddrV6::new(address, port, 0, 0), + }, + }); } // Provision a Crucible zone on every zpool on every Sled. @@ -657,22 +637,13 @@ impl Plan { ) .unwrap(); - sled.request.services.push(ServiceZoneRequest { + sled.request.zones.push(OmicronZoneConfig { id, - zone_type: ZoneType::Crucible, - addresses: vec![ip], - dataset: Some(DatasetRequest { - id, - name: DatasetName::new( - pool.clone(), - DatasetKind::Crucible, - ), - service_address: address, - }), - services: vec![ServiceZoneService { - id, - details: ServiceType::Crucible { address }, - }], + underlay_address: ip, + zone_type: OmicronZoneType::Crucible { + address, + dataset: OmicronZoneDataset { pool_name: pool.clone() }, + }, }); } } @@ -685,47 +656,40 @@ impl Plan { let id = Uuid::new_v4(); let address = sled.addr_alloc.next().expect("Not enough addrs"); let zone = dns_builder.host_zone(id, address).unwrap(); + let ntp_address = SocketAddrV6::new(address, NTP_PORT, 0, 0); - let (services, svcname) = if idx < BOUNDARY_NTP_COUNT { + let (zone_type, svcname) = if idx < BOUNDARY_NTP_COUNT { boundary_ntp_servers.push(format!("{}.host.{}", id, DNS_ZONE)); let (nic, snat_cfg) = svc_port_builder.next_snat(id)?; ( - vec![ServiceZoneService { - id, - details: ServiceType::BoundaryNtp { - address: SocketAddrV6::new(address, NTP_PORT, 0, 0), - ntp_servers: config.ntp_servers.clone(), - dns_servers: config.dns_servers.clone(), - domain: None, - nic, - snat_cfg, - }, - }], + OmicronZoneType::BoundaryNtp { + address: ntp_address, + ntp_servers: config.ntp_servers.clone(), + dns_servers: config.dns_servers.clone(), + domain: None, + nic, + snat_cfg, + }, ServiceName::BoundaryNtp, ) } else { ( - vec![ServiceZoneService { - id, - details: ServiceType::InternalNtp { - address: SocketAddrV6::new(address, NTP_PORT, 0, 0), - ntp_servers: boundary_ntp_servers.clone(), - dns_servers: rack_dns_servers.clone(), - domain: None, - }, - }], + OmicronZoneType::InternalNtp { + address: ntp_address, + ntp_servers: boundary_ntp_servers.clone(), + dns_servers: rack_dns_servers.clone(), + domain: None, + }, ServiceName::InternalNtp, ) }; dns_builder.service_backend_zone(svcname, &zone, NTP_PORT).unwrap(); - sled.request.services.push(ServiceZoneRequest { + sled.request.zones.push(OmicronZoneConfig { id, - zone_type: ZoneType::Ntp, - addresses: vec![address], - dataset: None, - services, + underlay_address: address, + zone_type, }); } @@ -735,7 +699,42 @@ impl Plan { .collect(); let dns_config = dns_builder.build(); - let plan = Self { services, dns_config }; + Ok(Self { services, dns_config }) + } + + pub async fn create( + log: &Logger, + config: &Config, + storage_manager: &StorageHandle, + sleds: &HashMap, + ) -> Result { + // Load the information we need about each Sled to be able to allocate + // components on it. + let sled_info = { + let result: Result, PlanError> = + futures::future::try_join_all(sleds.values().map( + |sled_request| async { + let subnet = sled_request.body.subnet; + let sled_address = get_sled_address(subnet); + let u2_zpools = + Self::get_u2_zpools_from_sled(log, sled_address) + .await?; + let is_scrimlet = + Self::is_sled_scrimlet(log, sled_address).await?; + Ok(SledInfo::new( + sled_request.body.id, + subnet, + sled_address, + u2_zpools, + is_scrimlet, + )) + }, + )) + .await; + result? + }; + + let plan = Self::create_transient(config, sled_info)?; // Once we've constructed a plan, write it down to durable storage. let paths: Vec = storage_manager @@ -773,13 +772,13 @@ impl AddressBumpAllocator { } /// Wraps up the information used to allocate components to a Sled -struct SledInfo { +pub struct SledInfo { /// unique id for the sled agent - sled_id: Uuid, + pub sled_id: Uuid, /// the sled's unique IPv6 subnet subnet: Ipv6Subnet, /// the address of the Sled Agent on the sled's subnet - sled_address: SocketAddrV6, + pub sled_address: SocketAddrV6, /// the list of zpools on the Sled u2_zpools: Vec, /// spreads components across a Sled's zpools @@ -789,12 +788,12 @@ struct SledInfo { is_scrimlet: bool, /// allocator for addresses in this Sled's subnet addr_alloc: AddressBumpAllocator, - /// under-construction list of services being deployed to a Sled - request: SledRequest, + /// under-construction list of Omicron zones being deployed to a Sled + request: SledConfig, } impl SledInfo { - fn new( + pub fn new( sled_id: Uuid, subnet: Ipv6Subnet, sled_address: SocketAddrV6, @@ -1209,10 +1208,10 @@ mod tests { } #[test] - fn test_rss_service_plan_schema() { + fn test_rss_service_plan_v2_schema() { let schema = schemars::schema_for!(Plan); expectorate::assert_contents( - "../schema/rss-service-plan.json", + "../schema/rss-service-plan-v2.json", &serde_json::to_string_pretty(&schema).unwrap(), ); } diff --git a/sled-agent/src/rack_setup/service.rs b/sled-agent/src/rack_setup/service.rs index 7dcbfa7045..8038658fb1 100644 --- a/sled-agent/src/rack_setup/service.rs +++ b/sled-agent/src/rack_setup/service.rs @@ -11,15 +11,25 @@ //! - DNS records for those services //! - Handoff to Nexus, for control of Control Plane management //! -//! # Phases and Configuration Files +//! # Phases, state files, and restart behavior //! -//! Rack setup occurs in distinct phases which are denoted by the prescence of -//! configuration files. +//! Rack setup occurs in distinct phases that are denoted by the presence of +//! state files that get generated as RSS executes: //! //! - /pool/int/UUID/config/rss-sled-plan.json (Sled Plan) -//! - /pool/int/UUID/config/rss-service-plan.json (Service Plan) +//! - /pool/int/UUID/config/rss-service-plan-v2.json (Service Plan) //! - /pool/int/UUID/config/rss-plan-completed.marker (Plan Execution Complete) //! +//! These phases are described below. As each phase completes, a corresponding +//! state file is written. This mechanism is designed so that if RSS restarts +//! (e.g., after a crash) then it will resume execution using the same plans. +//! +//! The service plan file has "-v2" in the filename because its structure +//! changed in omicron#4466. It is possible that on startup, RSS finds an +//! older-form service plan. In that case, it fails altogether. We do not +//! expect this condition to happen in practice. See the implementation for +//! details. +//! //! ## Sled Plan //! //! RSS should start as a service executing on a Sidecar-attached Gimlet @@ -65,8 +75,8 @@ use crate::bootstrap::params::StartSledAgentRequest; use crate::bootstrap::rss_handle::BootstrapAgentHandle; use crate::nexus::{d2n_params, ConvertInto}; use crate::params::{ - AutonomousServiceOnlyError, ServiceType, ServiceZoneRequest, - ServiceZoneService, TimeSync, ZoneType, + OmicronZoneType, OmicronZonesConfig, TimeSync, + OMICRON_ZONES_CONFIG_INITIAL_GENERATION, }; use crate::rack_setup::plan::service::{ Plan as ServicePlan, PlanError as ServicePlanError, @@ -83,6 +93,7 @@ use nexus_client::{ types as NexusTypes, Client as NexusClient, Error as NexusError, }; use omicron_common::address::get_sled_address; +use omicron_common::api::external::Generation; use omicron_common::api::internal::shared::ExternalPortDiscovery; use omicron_common::backoff::{ retry_notify, retry_policy_internal_service_aggressive, BackoffError, @@ -257,45 +268,75 @@ impl ServiceInner { ServiceInner { log } } - async fn initialize_services_on_sled( + /// Requests that the specified sled configure zones as described by + /// `zones_config` + /// + /// This function succeeds even if the sled fails to apply the configuration + /// if the reason is that the sled is already running a newer configuration. + /// This might sound oddly specific but it's what our sole caller wants. + /// In particular, the caller is going to call this function a few times + /// with successive generation numbers. If we crash and go through the + /// process again, we might run into this case, and it's simplest to just + /// ignore it and proceed. + async fn initialize_zones_on_sled( &self, sled_address: SocketAddrV6, - services: &Vec, + zones_config: &OmicronZonesConfig, ) -> Result<(), SetupServiceError> { let dur = std::time::Duration::from_secs(60); let client = reqwest::ClientBuilder::new() .connect_timeout(dur) .build() .map_err(SetupServiceError::HttpClient)?; + let log = self.log.new(o!("sled_address" => sled_address.to_string())); let client = SledAgentClient::new_with_client( &format!("http://{}", sled_address), client, - self.log.new(o!("SledAgentClient" => sled_address.to_string())), + log.clone(), ); - let services = services - .iter() - .map(|s| s.clone().try_into()) - .collect::, AutonomousServiceOnlyError>>() - .map_err(|err| { - SetupServiceError::SledInitialization(err.to_string()) - })?; - - info!(self.log, "sending service requests..."); let services_put = || async { - info!(self.log, "initializing sled services: {:?}", services); - client - .services_put(&SledAgentTypes::ServiceEnsureBody { - services: services.clone(), - }) - .await - .map_err(BackoffError::transient)?; - Ok::<(), BackoffError>>(()) + info!( + log, + "attempting to set up sled's Omicron zones: {:?}", zones_config + ); + let result = + client.omicron_zones_put(&zones_config.clone().into()).await; + let Err(error) = result else { + return Ok::< + (), + BackoffError>, + >(()); + }; + + if let sled_agent_client::Error::ErrorResponse(response) = &error { + if response.status() == http::StatusCode::CONFLICT { + warn!( + log, + "ignoring attempt to initialize zones because \ + the server seems to be newer"; + "attempted_generation" => + i64::from(&zones_config.generation), + "req_id" => &response.request_id, + "server_message" => &response.message, + ); + + // If we attempt to initialize zones at generation X, and + // the server refuses because it's at some generation newer + // than X, then we treat that as success. See the doc + // comment on this function. + return Ok(()); + } + } + + // TODO Many other codes here should not be retried. See + // omicron#4578. + return Err(BackoffError::transient(error)); }; let log_failure = |error, delay| { warn!( - self.log, - "failed to initialize services"; + log, + "failed to initialize Omicron zones"; "error" => ?error, "retry_after" => ?delay, ); @@ -310,41 +351,26 @@ impl ServiceInner { Ok(()) } - // Ensure that all services of a particular type are running. + // Ensure that all services for a particular version are running. // // This is useful in a rack-setup context, where initial boot ordering // can matter for first-time-setup. // // Note that after first-time setup, the initialization order of // services should not matter. - async fn ensure_all_services_of_type( + // + // Further, it's possible that the target sled is already running a newer + // version. That's not an error here. + async fn ensure_zone_config_at_least( &self, - service_plan: &ServicePlan, - zone_types: &HashSet, + configs: &HashMap, ) -> Result<(), SetupServiceError> { - futures::future::join_all(service_plan.services.iter().map( - |(sled_address, services_request)| async move { - let services: Vec<_> = services_request - .services - .iter() - .filter_map(|service| { - if zone_types.contains(&service.zone_type) { - Some(service.clone()) - } else { - None - } - }) - .collect(); - if !services.is_empty() { - self.initialize_services_on_sled(*sled_address, &services) - .await?; - } - Ok(()) + cancel_safe_futures::future::join_all_then_try(configs.iter().map( + |(sled_address, zones_config)| async move { + self.initialize_zones_on_sled(*sled_address, zones_config).await }, )) - .await - .into_iter() - .collect::>()?; + .await?; Ok(()) } @@ -360,17 +386,15 @@ impl ServiceInner { let dns_server_ips = // iterate sleds service_plan.services.iter().filter_map( - |(_, services_request)| { - // iterate services for this sled - let dns_addrs: Vec = services_request - .services + |(_, sled_config)| { + // iterate zones for this sled + let dns_addrs: Vec = sled_config + .zones .iter() - .filter_map(|service| { - match &service.services[0] { - ServiceZoneService { - details: ServiceType::InternalDns { http_address, .. }, - .. - } => { + .filter_map(|zone_config| { + match &zone_config.zone_type { + OmicronZoneType::InternalDns { http_address, .. } + => { Some(*http_address) }, _ => None, @@ -546,25 +570,25 @@ impl ServiceInner { // a format which can be processed by Nexus. let mut services: Vec = vec![]; let mut datasets: Vec = vec![]; - for (addr, service_request) in service_plan.services.iter() { + for (addr, sled_config) in service_plan.services.iter() { let sled_id = *id_map .get(addr) .expect("Sled address in service plan, but not sled plan"); - for zone in &service_request.services { - services.extend(zone.into_nexus_service_req(sled_id).map_err( - |err| SetupServiceError::BadConfig(err.to_string()), - )?); + for zone in &sled_config.zones { + services.push(zone.to_nexus_service_req(sled_id)); } - for service in service_request.services.iter() { - if let Some(dataset) = &service.dataset { + for zone in &sled_config.zones { + if let Some((dataset_name, dataset_address)) = + zone.dataset_name_and_address() + { datasets.push(NexusTypes::DatasetCreateRequest { - zpool_id: dataset.name.pool().id(), - dataset_id: dataset.id, + zpool_id: dataset_name.pool().id(), + dataset_id: zone.id, request: NexusTypes::DatasetPutRequest { - address: dataset.service_address.to_string(), - kind: dataset.name.dataset().clone().convert(), + address: dataset_address.to_string(), + kind: dataset_name.dataset().clone().convert(), }, }) } @@ -598,14 +622,9 @@ impl ServiceInner { .collect(), addresses: config.addresses.clone(), switch: config.switch.into(), - uplink_port_speed: config - .uplink_port_speed - .clone() - .into(), - uplink_port_fec: config - .uplink_port_fec - .clone() - .into(), + uplink_port_speed: config.uplink_port_speed.into(), + uplink_port_fec: config.uplink_port_fec.into(), + autoneg: config.autoneg, bgp_peers: config .bgp_peers .iter() @@ -705,20 +724,22 @@ impl ServiceInner { ) -> Result<(), SetupServiceError> { // Now that datasets and zones have started for CockroachDB, // perform one-time initialization of the cluster. - let sled_address = - service_plan - .services - .iter() - .find_map(|(sled_address, sled_request)| { - if sled_request.services.iter().any(|service| { - service.zone_type == ZoneType::CockroachDb - }) { - Some(sled_address) - } else { - None - } - }) - .expect("Should not create service plans without CockroachDb"); + let sled_address = service_plan + .services + .iter() + .find_map(|(sled_address, sled_config)| { + if sled_config.zones.iter().any(|zone_config| { + matches!( + &zone_config.zone_type, + OmicronZoneType::CockroachDb { .. } + ) + }) { + Some(sled_address) + } else { + None + } + }) + .expect("Should not create service plans without CockroachDb"); let dur = std::time::Duration::from_secs(60); let client = reqwest::ClientBuilder::new() .connect_timeout(dur) @@ -758,8 +779,8 @@ impl ServiceInner { // time, it creates an allocation plan to provision subnets to an initial // set of sleds. // - // 2. SLED ALLOCATION PLAN EXECUTION. The RSS then carries out this plan, making - // requests to the sleds enumerated within the "allocation plan". + // 2. SLED ALLOCATION PLAN EXECUTION. The RSS then carries out this plan, + // making requests to the sleds enumerated within the "allocation plan". // // 3. SERVICE ALLOCATION PLAN CREATION. Now that Sled Agents are executing // on their respective subnets, they can be queried to create an @@ -770,7 +791,8 @@ impl ServiceInner { // // 5. MARKING SETUP COMPLETE. Once the RSS has successfully initialized the // rack, a marker file is created at "rss_completed_marker_path()". This - // indicates that the plan executed successfully, and no work remains. + // indicates that the plan executed successfully, and the only work + // remaining is to handoff to Nexus. async fn run( &self, config: &Config, @@ -951,11 +973,49 @@ impl ServiceInner { .await? }; + // The service plan describes all the zones that we will eventually + // deploy on each sled. But we cannot currently just deploy them all + // concurrently. We'll do it in a few stages, each corresponding to a + // version of each sled's configuration. + // + // - version 1: no services running + // (We don't have to do anything for this. But we do + // reserve this version number for "no services running" so + // that sled agents can begin with an initial, valid + // OmicronZonesConfig before they've got anything running.) + // - version 2: internal DNS only + // - version 3: internal DNS + NTP servers + // - version 4: internal DNS + NTP servers + CockroachDB + // - version 5: everything + // + // At each stage, we're specifying a complete configuration of what + // should be running on the sled -- including this version number. + // And Sled Agents will reject requests for versions older than the + // one they're currently running. Thus, the version number is a piece + // of global, distributed state. + // + // For now, we hardcode the requests we make to use specific version + // numbers. + let version1_nothing = + Generation::from(OMICRON_ZONES_CONFIG_INITIAL_GENERATION); + let version2_dns_only = version1_nothing.next(); + let version3_dns_and_ntp = version2_dns_only.next(); + let version4_cockroachdb = version3_dns_and_ntp.next(); + let version5_everything = version4_cockroachdb.next(); + // Set up internal DNS services first and write the initial // DNS configuration to the internal DNS servers. - let mut zone_types = HashSet::new(); - zone_types.insert(ZoneType::InternalDns); - self.ensure_all_services_of_type(&service_plan, &zone_types).await?; + let v1generator = OmicronZonesConfigGenerator::initial_version( + &service_plan, + version1_nothing, + ); + let v2generator = v1generator.new_version_with( + version2_dns_only, + &|zone_type: &OmicronZoneType| { + matches!(zone_type, OmicronZoneType::InternalDns { .. }) + }, + ); + self.ensure_zone_config_at_least(v2generator.sled_configs()).await?; self.initialize_internal_dns_records(&service_plan).await?; // Ask MGS in each switch zone which switch it is. @@ -964,10 +1024,17 @@ impl ServiceInner { .await; // Next start up the NTP services. - // Note we also specify internal DNS services again because it - // can ony be additive. - zone_types.insert(ZoneType::Ntp); - self.ensure_all_services_of_type(&service_plan, &zone_types).await?; + let v3generator = v2generator.new_version_with( + version3_dns_and_ntp, + &|zone_type: &OmicronZoneType| { + matches!( + zone_type, + OmicronZoneType::BoundaryNtp { .. } + | OmicronZoneType::InternalNtp { .. } + ) + }, + ); + self.ensure_zone_config_at_least(v3generator.sled_configs()).await?; // Wait until time is synchronized on all sleds before proceeding. self.wait_for_timesync(&sled_addresses).await?; @@ -975,35 +1042,22 @@ impl ServiceInner { info!(self.log, "Finished setting up Internal DNS and NTP"); // Wait until Cockroach has been initialized before running Nexus. - zone_types.insert(ZoneType::CockroachDb); - self.ensure_all_services_of_type(&service_plan, &zone_types).await?; + let v4generator = v3generator.new_version_with( + version4_cockroachdb, + &|zone_type: &OmicronZoneType| { + matches!(zone_type, OmicronZoneType::CockroachDb { .. }) + }, + ); + self.ensure_zone_config_at_least(v4generator.sled_configs()).await?; // Now that datasets and zones have started for CockroachDB, // perform one-time initialization of the cluster. self.initialize_cockroach(&service_plan).await?; - // Issue service initialization requests. - futures::future::join_all(service_plan.services.iter().map( - |(sled_address, services_request)| async move { - // With the current implementation of "initialize_services_on_sled", - // we must provide the set of *all* services that should be - // executing on a sled. - // - // This means re-requesting the DNS and NTP services, even if - // they are already running - this is fine, however, as the - // receiving sled agent doesn't modify the already-running - // service. - self.initialize_services_on_sled( - *sled_address, - &services_request.services, - ) - .await?; - Ok(()) - }, - )) - .await - .into_iter() - .collect::, SetupServiceError>>()?; + // Issue the rest of the zone initialization requests. + let v5generator = + v4generator.new_version_with(version5_everything, &|_| true); + self.ensure_zone_config_at_least(v5generator.sled_configs()).await?; info!(self.log, "Finished setting up services"); @@ -1036,3 +1090,272 @@ impl ServiceInner { Ok(()) } } + +/// Facilitates creating a sequence of OmicronZonesConfig objects for each sled +/// in a service plan to enable phased rollout of services +/// +/// The service plan itself defines which zones should appear on every sled. +/// However, we want to deploy these zones in phases: first internal DNS, then +/// NTP, then CockroachDB, etc. This interface generates sled configs for each +/// phase and enforces that: +/// +/// - each version includes all zones deployed in the previous iteration +/// - each sled's version number increases with each iteration +/// +struct OmicronZonesConfigGenerator<'a> { + service_plan: &'a ServicePlan, + last_configs: HashMap, +} + +impl<'a> OmicronZonesConfigGenerator<'a> { + /// Make a set of sled configurations for an initial version where each sled + /// has nothing deployed on it + fn initial_version( + service_plan: &'a ServicePlan, + initial_version: Generation, + ) -> Self { + let last_configs = service_plan + .services + .keys() + .map(|sled_address| { + ( + *sled_address, + OmicronZonesConfig { + generation: initial_version, + zones: vec![], + }, + ) + }) + .collect(); + Self { service_plan, last_configs } + } + + /// Returns the set of sled configurations produced for this version + fn sled_configs(&self) -> &HashMap { + &self.last_configs + } + + /// Produces a new set of configs for each sled based on the current set of + /// configurations, adding zones from the service plan matching + /// `zone_filter`. + /// + /// # Panics + /// + /// If `version` is not larger than the current version + fn new_version_with( + self, + version: Generation, + zone_filter: &(dyn Fn(&OmicronZoneType) -> bool + Send + Sync), + ) -> OmicronZonesConfigGenerator<'a> { + let last_configs = self + .service_plan + .services + .iter() + .map(|(sled_address, sled_config)| { + let mut zones = match self.last_configs.get(sled_address) { + Some(config) => { + assert!(version > config.generation); + config.zones.clone() + } + None => Vec::new(), + }; + + let zones_already = + zones.iter().map(|z| z.id).collect::>(); + zones.extend( + sled_config + .zones + .iter() + .filter(|z| { + !zones_already.contains(&z.id) + && zone_filter(&z.zone_type) + }) + .cloned(), + ); + + let config = OmicronZonesConfig { generation: version, zones }; + (*sled_address, config) + }) + .collect(); + Self { service_plan: self.service_plan, last_configs } + } +} + +#[cfg(test)] +mod test { + use super::OmicronZonesConfigGenerator; + use crate::{ + params::OmicronZoneType, + rack_setup::plan::service::{Plan as ServicePlan, SledInfo}, + }; + use illumos_utils::zpool::ZpoolName; + use omicron_common::{address::Ipv6Subnet, api::external::Generation}; + + fn make_test_service_plan() -> ServicePlan { + let rss_config = crate::bootstrap::params::test_config(); + let fake_sleds = vec![ + SledInfo::new( + "d4ba4bbe-8542-4907-bc8f-48df53eb5089".parse().unwrap(), + Ipv6Subnet::new("fd00:1122:3344:101::1".parse().unwrap()), + "[fd00:1122:3344:101::1]:80".parse().unwrap(), + vec![ + ZpoolName::new_internal( + "c5885278-0ae2-4f1e-9223-07f2ada818e1".parse().unwrap(), + ), + ZpoolName::new_internal( + "57465977-8275-43aa-a320-b6cd5cb20ca6".parse().unwrap(), + ), + ZpoolName::new_external( + "886f9fe7-bf70-4ddd-ae92-764dc3ed14ab".parse().unwrap(), + ), + ZpoolName::new_external( + "4c9061b1-345b-4985-8cbd-a2a899f15b68".parse().unwrap(), + ), + ZpoolName::new_external( + "b2bd488e-b187-42a0-b157-9ab0f70d91a8".parse().unwrap(), + ), + ], + true, + ), + SledInfo::new( + "b4359dea-665d-41ca-a681-f55912f2d5d0".parse().unwrap(), + Ipv6Subnet::new("fd00:1122:3344:102::1".parse().unwrap()), + "[fd00:1122:3344:102::1]:80".parse().unwrap(), + vec![ + ZpoolName::new_internal( + "34d6b5e5-a09f-4e96-a599-fa306ce6d983".parse().unwrap(), + ), + ZpoolName::new_internal( + "e9b8d1ea-da29-4b61-a493-c0ed319098da".parse().unwrap(), + ), + ZpoolName::new_external( + "37f8e903-2adb-4613-b78c-198122c289f0".parse().unwrap(), + ), + ZpoolName::new_external( + "b50f787c-97b3-4b91-a5bd-99d11fc86fb8".parse().unwrap(), + ), + ZpoolName::new_external( + "809e50c8-930e-413a-950c-69a540b688e2".parse().unwrap(), + ), + ], + true, + ), + ]; + let service_plan = + ServicePlan::create_transient(&rss_config, fake_sleds) + .expect("failed to create service plan"); + + service_plan + } + + #[test] + fn test_omicron_zone_configs() { + let service_plan = make_test_service_plan(); + + // Verify the initial state. + let g1 = Generation::new(); + let v1 = + OmicronZonesConfigGenerator::initial_version(&service_plan, g1); + assert_eq!( + service_plan.services.keys().len(), + v1.sled_configs().keys().len() + ); + for (_, configs) in v1.sled_configs() { + assert_eq!(configs.generation, g1); + assert!(configs.zones.is_empty()); + } + + // Verify that we can add a bunch of zones of a given type. + let g2 = g1.next(); + let v2 = v1.new_version_with(g2, &|zone_type| { + matches!(zone_type, OmicronZoneType::InternalDns { .. }) + }); + let mut v2_nfound = 0; + for (_, config) in v2.sled_configs() { + assert_eq!(config.generation, g2); + v2_nfound += config.zones.len(); + for z in &config.zones { + // The only zones we should find are the Internal DNS ones. + assert!(matches!( + &z.zone_type, + OmicronZoneType::InternalDns { .. } + )); + } + } + // There should have been at least one InternalDns zone. + assert!(v2_nfound > 0); + + // Try again to add zones of the same type. This should be a no-op. + let g3 = g2.next(); + let v3 = v2.new_version_with(g3, &|zone_type| { + matches!(zone_type, OmicronZoneType::InternalDns { .. }) + }); + let mut v3_nfound = 0; + for (_, config) in v3.sled_configs() { + assert_eq!(config.generation, g3); + v3_nfound += config.zones.len(); + for z in &config.zones { + // The only zones we should find are the Internal DNS ones. + assert!(matches!( + &z.zone_type, + OmicronZoneType::InternalDns { .. } + )); + } + } + assert_eq!(v2_nfound, v3_nfound); + + // Now try adding zones of a different type. We should still have all + // the Internal DNS ones, plus a few more. + let g4 = g3.next(); + let v4 = v3.new_version_with(g4, &|zone_type| { + matches!(zone_type, OmicronZoneType::Nexus { .. }) + }); + let mut v4_nfound_dns = 0; + let mut v4_nfound = 0; + for (_, config) in v4.sled_configs() { + assert_eq!(config.generation, g4); + v4_nfound += config.zones.len(); + for z in &config.zones { + match &z.zone_type { + OmicronZoneType::InternalDns { .. } => v4_nfound_dns += 1, + OmicronZoneType::Nexus { .. } => (), + _ => panic!("unexpectedly found a wrong zone type"), + } + } + } + assert_eq!(v4_nfound_dns, v3_nfound); + assert!(v4_nfound > v3_nfound); + + // Now try adding zones that match no filter. Again, this should be a + // no-op but we should still have all the same zones we had before. + let g5 = g4.next(); + let v5 = v4.new_version_with(g5, &|_| false); + let mut v5_nfound = 0; + for (_, config) in v5.sled_configs() { + assert_eq!(config.generation, g5); + v5_nfound += config.zones.len(); + for z in &config.zones { + assert!(matches!( + &z.zone_type, + OmicronZoneType::InternalDns { .. } + | OmicronZoneType::Nexus { .. } + )); + } + } + assert_eq!(v4_nfound, v5_nfound); + + // Finally, try adding the rest of the zones. + let g6 = g5.next(); + let v6 = v5.new_version_with(g6, &|_| true); + let mut v6_nfound = 0; + for (sled_address, config) in v6.sled_configs() { + assert_eq!(config.generation, g6); + v6_nfound += config.zones.len(); + assert_eq!( + config.zones.len(), + service_plan.services.get(sled_address).unwrap().zones.len() + ); + } + assert!(v6_nfound > v5_nfound); + } +} diff --git a/sled-agent/src/server.rs b/sled-agent/src/server.rs index 903c8dabaa..b93ad0721c 100644 --- a/sled-agent/src/server.rs +++ b/sled-agent/src/server.rs @@ -70,9 +70,10 @@ impl Server { .await .map_err(|e| e.to_string())?; - let mut dropshot_config = dropshot::ConfigDropshot::default(); - dropshot_config.request_body_max_bytes = 1024 * 1024; - dropshot_config.bind_address = SocketAddr::V6(sled_address); + let dropshot_config = dropshot::ConfigDropshot { + bind_address: SocketAddr::V6(sled_address), + ..config.dropshot + }; let dropshot_log = log.new(o!("component" => "dropshot (SledAgent)")); let http_server = dropshot::HttpServerStarter::new( &dropshot_config, diff --git a/sled-agent/src/services.rs b/sled-agent/src/services.rs index b87c91768b..837c2a05df 100644 --- a/sled-agent/src/services.rs +++ b/sled-agent/src/services.rs @@ -20,10 +20,10 @@ //! of what other services Nexus wants to have executing on the sled. //! //! To accomplish this, the following interfaces are exposed: -//! - [ServiceManager::ensure_all_services_persistent] exposes an API to request -//! a set of services that should persist beyond reboot. +//! - [ServiceManager::ensure_all_omicron_zones_persistent] exposes an API to +//! request a set of Omicron zones that should persist beyond reboot. //! - [ServiceManager::activate_switch] exposes an API to specifically enable -//! or disable (via [ServiceManager::deactivate_switch]) the switch zone. +//! or disable (via [ServiceManager::deactivate_switch]) the switch zone. use crate::bootstrap::early_networking::{ EarlyNetworkSetup, EarlyNetworkSetupError, @@ -31,11 +31,11 @@ use crate::bootstrap::early_networking::{ use crate::bootstrap::BootstrapNetworking; use crate::config::SidecarRevision; use crate::params::{ - DendriteAsic, ServiceEnsureBody, ServiceType, ServiceZoneRequest, - ServiceZoneService, TimeSync, ZoneBundleCause, ZoneBundleMetadata, - ZoneType, + DendriteAsic, OmicronZoneConfig, OmicronZoneType, OmicronZonesConfig, + TimeSync, ZoneBundleCause, ZoneBundleMetadata, ZoneType, }; use crate::profile::*; +use crate::services_migration::{AllZoneRequests, SERVICES_LEDGER_FILENAME}; use crate::smf_helper::Service; use crate::smf_helper::SmfHelper; use crate::zone_bundle::BundleError; @@ -53,7 +53,7 @@ use illumos_utils::dladm::{ use illumos_utils::link::{Link, VnicAllocator}; use illumos_utils::opte::{DhcpCfg, Port, PortManager, PortTicket}; use illumos_utils::running_zone::{ - InstalledZone, RunCommandError, RunningZone, + InstalledZone, RunCommandError, RunningZone, ZoneBuilderFactory, }; use illumos_utils::zfs::ZONE_ZFS_RAMDISK_DATASET_MOUNTPOINT; use illumos_utils::zone::AddressRequest; @@ -89,28 +89,26 @@ use omicron_common::nexus_config::{ }; use once_cell::sync::OnceCell; use rand::prelude::SliceRandom; -use rand::SeedableRng; use sled_hardware::is_gimlet; use sled_hardware::underlay; use sled_hardware::underlay::BOOTSTRAP_PREFIX; use sled_hardware::Baseboard; use sled_hardware::SledMode; -use sled_storage::dataset::{CONFIG_DATASET, INSTALL_DATASET, ZONE_DATASET}; +use sled_storage::dataset::{ + DatasetKind, DatasetName, CONFIG_DATASET, INSTALL_DATASET, ZONE_DATASET, +}; use sled_storage::manager::StorageHandle; use slog::Logger; use std::collections::BTreeMap; use std::collections::HashSet; -use std::iter; use std::iter::FromIterator; use std::net::{IpAddr, Ipv6Addr, SocketAddr}; use std::str::FromStr; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; -use std::time::{SystemTime, UNIX_EPOCH}; use tokio::io::AsyncWriteExt; -use tokio::sync::oneshot; use tokio::sync::Mutex; -use tokio::sync::MutexGuard; +use tokio::sync::{oneshot, MutexGuard}; use tokio::task::JoinHandle; use uuid::Uuid; @@ -198,9 +196,6 @@ pub enum Error { #[error("Could not initialize service {service} as requested: {message}")] BadServiceRequest { service: String, message: String }, - #[error("Services already configured for this Sled Agent")] - ServicesAlreadyConfigured, - #[error("Failed to get address: {0}")] GetAddressFailure(#[from] illumos_utils::zone::GetAddressError), @@ -224,6 +219,17 @@ pub enum Error { #[error("Error querying simnet devices")] Simnet(#[from] GetSimnetError), + + #[error( + "Requested generation ({requested}) is older than current ({current})" + )] + RequestedConfigOutdated { requested: Generation, current: Generation }, + + #[error("Requested generation {0} with different zones than before")] + RequestedConfigConflicts(Generation), + + #[error("Error migrating old-format services ledger: {0:#}")] + ServicesMigration(anyhow::Error), } impl Error { @@ -237,8 +243,18 @@ impl Error { impl From for omicron_common::api::external::Error { fn from(err: Error) -> Self { - omicron_common::api::external::Error::InternalError { - internal_message: err.to_string(), + match err { + err @ Error::RequestedConfigConflicts(_) => { + omicron_common::api::external::Error::invalid_request( + &err.to_string(), + ) + } + err @ Error::RequestedConfigOutdated { .. } => { + omicron_common::api::external::Error::conflict(&err.to_string()) + } + _ => omicron_common::api::external::Error::InternalError { + internal_message: err.to_string(), + }, } } } @@ -274,42 +290,176 @@ impl Config { } // The filename of the ledger, within the provided directory. -const SERVICES_LEDGER_FILENAME: &str = "services.json"; - -// A wrapper around `ZoneRequest`, which allows it to be serialized -// to a JSON file. -#[derive(Clone, serde::Serialize, serde::Deserialize, schemars::JsonSchema)] -struct AllZoneRequests { - generation: Generation, - requests: Vec, +const ZONES_LEDGER_FILENAME: &str = "omicron-zones.json"; + +/// Combines the Nexus-provided `OmicronZonesConfig` (which describes what Nexus +/// wants for all of its zones) with the locally-determined configuration for +/// these zones. +#[derive( + Clone, Debug, serde::Serialize, serde::Deserialize, schemars::JsonSchema, +)] +pub struct OmicronZonesConfigLocal { + /// generation of the Omicron-provided part of the configuration + /// + /// This generation number is outside of Sled Agent's control. We store + /// exactly what we were given and use this number to decide when to + /// fail requests to establish an outdated configuration. + /// + /// You can think of this as a major version number, with + /// `ledger_generation` being a minor version number. See + /// `is_newer_than()`. + pub omicron_generation: Generation, + + /// ledger-managed generation number + /// + /// This generation is managed by the ledger facility itself. It's bumped + /// whenever we write a new ledger. In practice, we don't currently have + /// any reason to bump this _for a given Omicron generation_ so it's + /// somewhat redundant. In principle, if we needed to modify the ledgered + /// configuration due to some event that doesn't change the Omicron config + /// (e.g., if we wanted to move the root filesystem to a different path), we + /// could do that by bumping this generation. + pub ledger_generation: Generation, + pub zones: Vec, } -impl Default for AllZoneRequests { - fn default() -> Self { - Self { generation: Generation::new(), requests: vec![] } +impl Ledgerable for OmicronZonesConfigLocal { + fn is_newer_than(&self, other: &OmicronZonesConfigLocal) -> bool { + self.omicron_generation > other.omicron_generation + || (self.omicron_generation == other.omicron_generation + && self.ledger_generation >= other.ledger_generation) + } + + fn generation_bump(&mut self) { + self.ledger_generation = self.ledger_generation.next(); } } -impl Ledgerable for AllZoneRequests { - fn is_newer_than(&self, other: &AllZoneRequests) -> bool { - self.generation >= other.generation +impl OmicronZonesConfigLocal { + /// Returns the initial configuration for generation 1, which has no zones + pub fn initial() -> OmicronZonesConfigLocal { + OmicronZonesConfigLocal { + omicron_generation: Generation::new(), + ledger_generation: Generation::new(), + zones: vec![], + } } - fn generation_bump(&mut self) { - self.generation = self.generation.next(); + pub fn to_omicron_zones_config(self) -> OmicronZonesConfig { + OmicronZonesConfig { + generation: self.omicron_generation, + zones: self.zones.into_iter().map(|z| z.zone).collect(), + } } } -// This struct represents the combo of "what zone did you ask for" + "where did -// we put it". -#[derive(Clone, serde::Serialize, serde::Deserialize, schemars::JsonSchema)] -struct ZoneRequest { - zone: ServiceZoneRequest, - // TODO: Consider collapsing "root" into ServiceZoneRequest +/// Combines the Nexus-provided `OmicronZoneConfig` (which describes what Nexus +/// wants for this zone) with any locally-determined configuration (like the +/// path to the root filesystem) +#[derive( + Clone, Debug, serde::Serialize, serde::Deserialize, schemars::JsonSchema, +)] +pub struct OmicronZoneConfigLocal { + pub zone: OmicronZoneConfig, #[schemars(with = "String")] + pub root: Utf8PathBuf, +} + +/// Describes how we want a switch zone to be configured +/// +/// This is analogous to `OmicronZoneConfig`, but for the switch zone (which is +/// operated autonomously by the Sled Agent, not managed by Omicron). +#[derive(Clone)] +struct SwitchZoneConfig { + id: Uuid, + addresses: Vec, + services: Vec, +} + +/// Describes one of several services that may be deployed in a switch zone +/// +/// Some of these are only present in certain configurations (e.g., with a real +/// Tofino vs. SoftNPU) or are configured differently depending on the +/// configuration. +#[derive(Clone)] +enum SwitchService { + ManagementGatewayService, + Wicketd { baseboard: Baseboard }, + Dendrite { asic: DendriteAsic }, + Tfport { pkt_source: String, asic: DendriteAsic }, + Uplink, + MgDdm { mode: String }, + Mgd, + SpSim, +} + +impl crate::smf_helper::Service for SwitchService { + fn service_name(&self) -> String { + match self { + SwitchService::ManagementGatewayService => "mgs", + SwitchService::Wicketd { .. } => "wicketd", + SwitchService::Dendrite { .. } => "dendrite", + SwitchService::Tfport { .. } => "tfport", + SwitchService::Uplink { .. } => "uplink", + SwitchService::MgDdm { .. } => "mg-ddm", + SwitchService::Mgd => "mgd", + SwitchService::SpSim => "sp-sim", + } + .to_owned() + } + fn smf_name(&self) -> String { + format!("svc:/oxide/{}", self.service_name()) + } + fn should_import(&self) -> bool { + true + } +} + +/// Combines the generic `SwitchZoneConfig` with other locally-determined +/// configuration +/// +/// This is analogous to `OmicronZoneConfigLocal`, but for the switch zone. +struct SwitchZoneConfigLocal { + zone: SwitchZoneConfig, root: Utf8PathBuf, } +/// Describes either an Omicron-managed zone or the switch zone, used for +/// functions that operate on either one or the other +enum ZoneArgs<'a> { + Omicron(&'a OmicronZoneConfigLocal), + Switch(&'a SwitchZoneConfigLocal), +} + +impl<'a> ZoneArgs<'a> { + /// If this is an Omicron zone, return its type + pub fn omicron_type(&self) -> Option<&'a OmicronZoneType> { + match self { + ZoneArgs::Omicron(zone_config) => Some(&zone_config.zone.zone_type), + ZoneArgs::Switch(_) => None, + } + } + + /// If this is a sled-local (switch) zone, iterate over the services it's + /// supposed to be running + pub fn sled_local_services( + &self, + ) -> Box + 'a> { + match self { + ZoneArgs::Omicron(_) => Box::new(std::iter::empty()), + ZoneArgs::Switch(request) => Box::new(request.zone.services.iter()), + } + } + + /// Return the root filesystem path for this zone + pub fn root(&self) -> &Utf8Path { + match self { + ZoneArgs::Omicron(zone_config) => &zone_config.root, + ZoneArgs::Switch(zone_request) => &zone_request.root, + } + } +} + struct Task { // A signal for the initializer task to terminate exit_tx: oneshot::Sender<()>, @@ -335,7 +485,7 @@ enum SledLocalZone { // of certain links. Initializing { // The request for the zone - request: ServiceZoneRequest, + request: SwitchZoneConfig, // A background task which keeps looping until the zone is initialized worker: Option, // Filesystems for the switch zone to mount @@ -348,7 +498,7 @@ enum SledLocalZone { // The Zone is currently running. Running { // The original request for the zone - request: ServiceZoneRequest, + request: SwitchZoneConfig, // The currently running zone zone: RunningZone, }, @@ -485,6 +635,173 @@ impl ServiceManager { .collect() } + async fn all_omicron_zone_ledgers(&self) -> Vec { + if let Some(dir) = self.inner.ledger_directory_override.get() { + return vec![dir.join(ZONES_LEDGER_FILENAME)]; + } + let resources = self.inner.storage.get_latest_resources().await; + resources + .all_m2_mountpoints(CONFIG_DATASET) + .into_iter() + .map(|p| p.join(ZONES_LEDGER_FILENAME)) + .collect() + } + + // Loads persistent configuration about any Omicron-managed zones that we're + // supposed to be running. + // + // For historical reasons, there are two possible places this configuration + // could live, each with its own format. This function first checks the + // newer one. If no configuration was found there, it checks the older + // one. If only the older one was found, it is converted into the new form + // so that future calls will only look at the new form. + async fn load_ledgered_zones( + &self, + // This argument attempts to ensure that the caller holds the right + // lock. + _map: &MutexGuard<'_, BTreeMap>, + ) -> Result>, Error> { + // First, try to load the current software's zone ledger. If that + // works, we're done. + let log = &self.inner.log; + let ledger_paths = self.all_omicron_zone_ledgers().await; + info!(log, "Loading Omicron zones from: {ledger_paths:?}"); + let maybe_ledger = + Ledger::::new(log, ledger_paths.clone()) + .await; + + if let Some(ledger) = maybe_ledger { + info!( + log, + "Loaded Omicron zones"; + "zones_config" => ?ledger.data() + ); + return Ok(Some(ledger)); + } + + // Now look for the ledger used by previous versions. If we find it, + // we'll convert it and write out a new ledger used by the current + // software. + info!( + log, + "Loading Omicron zones - No zones detected \ + (will look for old-format services)" + ); + let services_ledger_paths = self.all_service_ledgers().await; + info!( + log, + "Loading old-format services from: {services_ledger_paths:?}" + ); + + let maybe_ledger = + Ledger::::new(log, services_ledger_paths.clone()) + .await; + let maybe_converted = match maybe_ledger { + None => { + // The ledger ignores all errors attempting to load files. That + // might be fine most of the time. In this case, we want to + // raise a big red flag if we find an old-format ledger that we + // can't process. + if services_ledger_paths.iter().any(|p| p.exists()) { + Err(Error::ServicesMigration(anyhow!( + "failed to read or parse old-format ledger, \ + but one exists" + ))) + } else { + // There was no old-format ledger at all. + return Ok(None); + } + } + Some(ledger) => { + let all_services = ledger.into_inner(); + OmicronZonesConfigLocal::try_from(all_services) + .map_err(Error::ServicesMigration) + } + }; + + match maybe_converted { + Err(error) => { + // We've tried to test thoroughly so that this should never + // happen. If for some reason it does happen, engineering + // intervention is likely to be required to figure out how to + // proceed. The current software does not directly support + // whatever was in the ledger, and it's not safe to just come up + // with no zones when we're supposed to be running stuff. We'll + // need to figure out what's unexpected about what we found in + // the ledger and figure out how to fix the + // conversion. + error!( + log, + "Loading Omicron zones - found services but failed \ + to convert them (support intervention required): \ + {:#}", + error + ); + return Err(error); + } + Ok(new_config) => { + // We've successfully converted the old ledger. Write a new + // one. + info!( + log, + "Successfully migrated old-format services ledger to \ + zones ledger" + ); + let mut ledger = Ledger::::new_with( + log, + ledger_paths.clone(), + new_config, + ); + + ledger.commit().await?; + + // We could consider removing the old ledger here. That would + // not guarantee that it would be gone, though, because we could + // crash during `ledger.commit()` above having written at least + // one of the new ledgers. In that case, we won't go through + // this code path again on restart. If we wanted to ensure the + // old-format ledger was gone after the migration, we could + // consider unconditionally removing the old ledger paths in the + // caller, after we've got a copy of the new-format ledger. + // + // Should we? In principle, it shouldn't matter either way + // because we will never look at the old-format ledger unless we + // don't have a new-format one, and we should now have a + // new-format one forever now. + // + // When might it matter? Two cases: + // + // (1) If the sled agent is downgraded to a previous version + // that doesn't know about the new-format ledger. Do we + // want that sled agent to use the old-format one? It + // depends. If that downgrade happens immediately because + // the upgrade to the first new-format version was a + // disaster, then we'd probably rather the downgraded sled + // agent _did_ start its zones. If the downgrade happens + // months later, potentially after various additional + // reconfigurations, then that old-format ledger is probably + // out of date and shouldn't be used. There's no way to + // really know which case we're in, but the latter seems + // quite unlikely (why would we downgrade so far back after + // so long?). So that's a reason to keep the old-format + // ledger. + // + // (2) Suppose a developer or Oxide support engineer removes the + // new ledger for some reason, maybe thinking sled agent + // would come up with no zones running. They'll be + // surprised to discover that it actually starts running a + // potentially old set of zones. This probably only matters + // on a production system, and even then, it probably + // shouldn't happen. + // + // Given these cases, we're left ambivalent. We choose to keep + // the old ledger around. If nothing else, if something goes + // wrong, we'll have a copy of its last contents! + Ok(Some(ledger)) + } + } + } + // TODO(https://github.com/oxidecomputer/omicron/issues/2973): // // The sled agent retries this function indefinitely at the call-site, but @@ -495,65 +812,60 @@ impl ServiceManager { // more clearly. pub async fn load_services(&self) -> Result<(), Error> { let log = &self.inner.log; - let ledger_paths = self.all_service_ledgers().await; - info!(log, "Loading services from: {ledger_paths:?}"); - let mut existing_zones = self.inner.zones.lock().await; let Some(mut ledger) = - Ledger::::new(log, ledger_paths).await + self.load_ledgered_zones(&existing_zones).await? else { - info!(log, "Loading services - No services detected"); + // Nothing found -- nothing to do. + info!( + log, + "Loading Omicron zones - \ + no zones nor old-format services found" + ); return Ok(()); }; - let services = ledger.data_mut(); + + let zones_config = ledger.data_mut(); + info!( + log, + "Loaded Omicron zones"; + "zones_config" => ?zones_config + ); + let omicron_zones_config = + zones_config.clone().to_omicron_zones_config(); // Initialize internal DNS only first: we need it to look up the // boundary switch addresses. This dependency is implicit: when we call - // `ensure_all_services` below, we eventually land in + // `ensure_all_omicron_zones` below, we eventually land in // `opte_ports_needed()`, which for some service types (including Ntp // but _not_ including InternalDns), we perform internal DNS lookups. let all_zones_request = self - .ensure_all_services( + .ensure_all_omicron_zones( &mut existing_zones, - &AllZoneRequests::default(), - ServiceEnsureBody { - services: services - .requests - .clone() - .into_iter() - .filter(|svc| { - matches!( - svc.zone.zone_type, - ZoneType::InternalDns | ZoneType::Ntp - ) - }) - .map(|zone_request| zone_request.zone) - .collect(), + None, + omicron_zones_config.clone(), + |z: &OmicronZoneConfig| { + matches!(z.zone_type, OmicronZoneType::InternalDns { .. }) }, ) .await?; // Initialize NTP services next as they are required for time // synchronization, which is a pre-requisite for the other services. We - // keep `ZoneType::InternalDns` because `ensure_all_services` is - // additive. + // keep `OmicronZoneType::InternalDns` because + // `ensure_all_omicron_zones` is additive. let all_zones_request = self - .ensure_all_services( + .ensure_all_omicron_zones( &mut existing_zones, - &all_zones_request, - ServiceEnsureBody { - services: services - .requests - .clone() - .into_iter() - .filter(|svc| { - matches!( - svc.zone.zone_type, - ZoneType::InternalDns | ZoneType::Ntp - ) - }) - .map(|zone_request| zone_request.zone) - .collect(), + Some(&all_zones_request), + omicron_zones_config.clone(), + |z: &OmicronZoneConfig| { + matches!( + z.zone_type, + OmicronZoneType::InternalDns { .. } + | OmicronZoneType::BoundaryNtp { .. } + | OmicronZoneType::InternalNtp { .. } + ) }, ) .await?; @@ -595,17 +907,11 @@ impl ServiceManager { let mut existing_zones = self.inner.zones.lock().await; // Initialize all remaining services - self.ensure_all_services( + self.ensure_all_omicron_zones( &mut existing_zones, - &all_zones_request, - ServiceEnsureBody { - services: services - .requests - .clone() - .into_iter() - .map(|zone_request| zone_request.zone) - .collect(), - }, + Some(&all_zones_request), + omicron_zones_config, + |_| true, ) .await?; Ok(()) @@ -661,11 +967,11 @@ impl ServiceManager { // Check the services intended to run in the zone to determine whether any // physical devices need to be mapped into the zone when it is created. - fn devices_needed(req: &ServiceZoneRequest) -> Result, Error> { + fn devices_needed(zone_args: &ZoneArgs<'_>) -> Result, Error> { let mut devices = vec![]; - for svc in &req.services { - match &svc.details { - ServiceType::Dendrite { asic: DendriteAsic::TofinoAsic } => { + for svc_details in zone_args.sled_local_services() { + match svc_details { + SwitchService::Dendrite { asic: DendriteAsic::TofinoAsic } => { if let Ok(Some(n)) = tofino::get_tofino() { if let Ok(device_path) = n.device_path() { devices.push(device_path); @@ -676,7 +982,7 @@ impl ServiceManager { device: "tofino".to_string(), }); } - ServiceType::Dendrite { + SwitchService::Dendrite { asic: DendriteAsic::SoftNpuPropolisDevice, } => { devices.push("/dev/tty03".into()); @@ -700,18 +1006,17 @@ impl ServiceManager { // bootstrap address. fn bootstrap_address_needed( &self, - req: &ServiceZoneRequest, + zone_args: &ZoneArgs<'_>, ) -> Result, Error> { - match req.zone_type { - ZoneType::Switch => { - let link = self - .inner - .bootstrap_vnic_allocator - .new_bootstrap() - .map_err(Error::SledLocalVnicCreation)?; - Ok(Some((link, self.inner.switch_zone_bootstrap_address))) - } - _ => Ok(None), + if let ZoneArgs::Switch(_) = zone_args { + let link = self + .inner + .bootstrap_vnic_allocator + .new_bootstrap() + .map_err(Error::SledLocalVnicCreation)?; + Ok(Some((link, self.inner.switch_zone_bootstrap_address))) + } else { + Ok(None) } } @@ -736,7 +1041,7 @@ impl ServiceManager { // local addresses in the zone. fn links_needed( &self, - req: &ServiceZoneRequest, + zone_args: &ZoneArgs<'_>, ) -> Result, Error> { let mut links: Vec<(Link, bool)> = Vec::new(); @@ -744,12 +1049,12 @@ impl ServiceManager { Error::Underlay(underlay::Error::SystemDetection(e)) })?; - for svc in &req.services { - match &svc.details { - ServiceType::Tfport { pkt_source, asic: _ } => { - // The tfport service requires a MAC device to/from which sidecar - // packets may be multiplexed. If the link isn't present, don't - // bother trying to start the zone. + for svc_details in zone_args.sled_local_services() { + match &svc_details { + SwitchService::Tfport { pkt_source, asic: _ } => { + // The tfport service requires a MAC device to/from which + // sidecar packets may be multiplexed. If the link isn't + // present, don't bother trying to start the zone. match Dladm::verify_link(pkt_source) { Ok(link) => { // It's important that tfpkt does **not** receive a @@ -765,7 +1070,7 @@ impl ServiceManager { } } } - ServiceType::MgDdm { .. } => { + SwitchService::MgDdm { .. } => { // If on a non-gimlet, sled-agent can be configured to map // links into the switch zone. Validate those links here. for link in &self.inner.switch_zone_maghemite_links { @@ -796,15 +1101,18 @@ impl ServiceManager { } // Check the services intended to run in the zone to determine whether any - // OPTE ports need to be created and mapped into the zone when it is created. + // OPTE ports need to be created and mapped into the zone when it is + // created. async fn opte_ports_needed( &self, - req: &ServiceZoneRequest, + zone_args: &ZoneArgs<'_>, ) -> Result, Error> { // Only some services currently need OPTE ports if !matches!( - req.zone_type, - ZoneType::ExternalDns | ZoneType::Nexus | ZoneType::Ntp + zone_args.omicron_type(), + Some(OmicronZoneType::ExternalDns { .. }) + | Some(OmicronZoneType::Nexus { .. }) + | Some(OmicronZoneType::BoundaryNtp { .. }) ) { return Ok(vec![]); } @@ -851,100 +1159,122 @@ impl ServiceManager { }) .collect(); - let mut ports = vec![]; - for svc in &req.services { - let external_ip; - let (nic, snat, external_ips) = match &svc.details { - ServiceType::Nexus { external_ip, nic, .. } => { - (nic, None, std::slice::from_ref(external_ip)) - } - ServiceType::ExternalDns { dns_address, nic, .. } => { - external_ip = dns_address.ip(); - (nic, None, std::slice::from_ref(&external_ip)) - } - ServiceType::BoundaryNtp { nic, snat_cfg, .. } => { - (nic, Some(*snat_cfg), &[][..]) - } - _ => continue, - }; - - // Create the OPTE port for the service. - // Note we don't plumb any firewall rules at this point, - // Nexus will plumb them down later but the default OPTE - // config allows outbound access which is enough for - // Boundary NTP which needs to come up before Nexus. - let port = port_manager - .create_port(nic, snat, external_ips, &[], DhcpCfg::default()) - .map_err(|err| Error::ServicePortCreation { - service: svc.details.to_string(), - err: Box::new(err), - })?; + let external_ip; + let (zone_type_str, nic, snat, floating_ips) = match &zone_args + .omicron_type() + { + Some( + zone_type @ OmicronZoneType::Nexus { external_ip, nic, .. }, + ) => ( + zone_type.zone_type_str(), + nic, + None, + std::slice::from_ref(external_ip), + ), + Some( + zone_type @ OmicronZoneType::ExternalDns { + dns_address, + nic, + .. + }, + ) => { + external_ip = dns_address.ip(); + ( + zone_type.zone_type_str(), + nic, + None, + std::slice::from_ref(&external_ip), + ) + } + Some( + zone_type @ OmicronZoneType::BoundaryNtp { + nic, snat_cfg, .. + }, + ) => (zone_type.zone_type_str(), nic, Some(*snat_cfg), &[][..]), + _ => unreachable!("unexpected zone type"), + }; - // We also need to update the switch with the NAT mappings - let (target_ip, first_port, last_port) = match snat { - Some(s) => (s.ip, s.first_port, s.last_port), - None => (external_ips[0], 0, u16::MAX), - }; + // Create the OPTE port for the service. + // Note we don't plumb any firewall rules at this point, + // Nexus will plumb them down later but the default OPTE + // config allows outbound access which is enough for + // Boundary NTP which needs to come up before Nexus. + let port = port_manager + .create_port(nic, snat, None, floating_ips, &[], DhcpCfg::default()) + .map_err(|err| Error::ServicePortCreation { + service: zone_type_str.clone(), + err: Box::new(err), + })?; - for dpd_client in &dpd_clients { - // TODO-correctness(#2933): If we fail part-way we need to - // clean up previous entries instead of leaking them. - let nat_create = || async { - info!( - self.inner.log, "creating NAT entry for service"; - "service" => ?svc, - ); + // We also need to update the switch with the NAT mappings + // XXX: need to revisit iff. any services get more than one + // address. + let (target_ip, first_port, last_port) = match snat { + Some(s) => (s.ip, s.first_port, s.last_port), + None => (floating_ips[0], 0, u16::MAX), + }; - dpd_client - .ensure_nat_entry( - &self.inner.log, - target_ip.into(), - dpd_client::types::MacAddr { - a: port.0.mac().into_array(), - }, - first_port, - last_port, - port.0.vni().as_u32(), - underlay_address, - ) - .await - .map_err(BackoffError::transient)?; + for dpd_client in &dpd_clients { + // TODO-correctness(#2933): If we fail part-way we need to + // clean up previous entries instead of leaking them. + let nat_create = || async { + info!( + self.inner.log, "creating NAT entry for service"; + "zone_type" => &zone_type_str, + ); - Ok::<(), BackoffError>>(()) - }; - let log_failure = |error, _| { - warn!( - self.inner.log, "failed to create NAT entry for service"; - "error" => ?error, - "service" => ?svc, - ); - }; - retry_notify( - retry_policy_internal_service_aggressive(), - nat_create, - log_failure, - ) - .await?; - } + dpd_client + .ensure_nat_entry( + &self.inner.log, + target_ip.into(), + dpd_client::types::MacAddr { + a: port.0.mac().into_array(), + }, + first_port, + last_port, + port.0.vni().as_u32(), + underlay_address, + ) + .await + .map_err(BackoffError::transient)?; - ports.push(port); + Ok::<(), BackoffError>>(()) + }; + let log_failure = |error, _| { + warn!( + self.inner.log, "failed to create NAT entry for service"; + "error" => ?error, + "zone_type" => &zone_type_str, + ); + }; + retry_notify( + retry_policy_internal_service_aggressive(), + nat_create, + log_failure, + ) + .await?; } - - Ok(ports) + Ok(vec![port]) } // Check the services intended to run in the zone to determine whether any // additional privileges need to be enabled for the zone. - fn privs_needed(req: &ServiceZoneRequest) -> Vec { + fn privs_needed(zone_args: &ZoneArgs<'_>) -> Vec { let mut needed = Vec::new(); - for svc in &req.services { - match &svc.details { - ServiceType::Tfport { .. } => { + for svc_details in zone_args.sled_local_services() { + match svc_details { + SwitchService::Tfport { .. } => { needed.push("default".to_string()); needed.push("sys_dl_config".to_string()); } - ServiceType::BoundaryNtp { .. } - | ServiceType::InternalNtp { .. } => { + _ => (), + } + } + + if let Some(omicron_zone_type) = zone_args.omicron_type() { + match omicron_zone_type { + OmicronZoneType::BoundaryNtp { .. } + | OmicronZoneType::InternalNtp { .. } => { needed.push("default".to_string()); needed.push("sys_time".to_string()); needed.push("proc_priocntl".to_string()); @@ -1048,13 +1378,13 @@ impl ServiceManager { async fn initialize_zone( &self, - request: &ZoneRequest, + request: ZoneArgs<'_>, filesystems: &[zone::Fs], data_links: &[String], ) -> Result { - let device_names = Self::devices_needed(&request.zone)?; + let device_names = Self::devices_needed(&request)?; let (bootstrap_vnic, bootstrap_name_and_address) = - match self.bootstrap_address_needed(&request.zone)? { + match self.bootstrap_address_needed(&request)? { Some((vnic, address)) => { let name = vnic.name().to_string(); (Some(vnic), Some((name, address))) @@ -1067,20 +1397,26 @@ impl ServiceManager { let links: Vec; let links_need_link_local: Vec; (links, links_need_link_local) = - self.links_needed(&request.zone)?.into_iter().unzip(); - let opte_ports = self.opte_ports_needed(&request.zone).await?; - let limit_priv = Self::privs_needed(&request.zone); + self.links_needed(&request)?.into_iter().unzip(); + let opte_ports = self.opte_ports_needed(&request).await?; + let limit_priv = Self::privs_needed(&request); // If the zone is managing a particular dataset, plumb that // dataset into the zone. Additionally, construct a "unique enough" name // so we can create multiple zones of this type without collision. - let unique_name = request.zone.zone_name_unique_identifier(); - let datasets = request - .zone - .dataset - .iter() - .map(|d| zone::Dataset { name: d.name.full() }) - .collect::>(); + let unique_name = match &request { + ZoneArgs::Omicron(zone_config) => Some(zone_config.zone.id), + ZoneArgs::Switch(_) => None, + }; + let datasets: Vec<_> = match &request { + ZoneArgs::Omicron(zone_config) => zone_config + .zone + .dataset_name() + .map(|n| zone::Dataset { name: n.full() }) + .into_iter() + .collect(), + ZoneArgs::Switch(_) => vec![], + }; let devices: Vec = device_names .iter() @@ -1103,30 +1439,50 @@ impl ServiceManager { .push(boot_zpool.dataset_mountpoint(INSTALL_DATASET)); } - let installed_zone = InstalledZone::install( - &self.inner.log, - &self.inner.underlay_vnic_allocator, - &request.root, - zone_image_paths.as_slice(), - &request.zone.zone_type.to_string(), - unique_name, - datasets.as_slice(), - &filesystems, - &data_links, - &devices, - opte_ports, - bootstrap_vnic, - links, - limit_priv, - ) - .await?; + let zone_type_str = match &request { + ZoneArgs::Omicron(zone_config) => { + zone_config.zone.zone_type.zone_type_str() + } + ZoneArgs::Switch(_) => "switch".to_string(), + }; + + let mut zone_builder = ZoneBuilderFactory::default().builder(); + if let Some(uuid) = unique_name { + zone_builder = zone_builder.with_unique_name(uuid); + } + if let Some(vnic) = bootstrap_vnic { + zone_builder = zone_builder.with_bootstrap_vnic(vnic); + } + let installed_zone = zone_builder + .with_log(self.inner.log.clone()) + .with_underlay_vnic_allocator(&self.inner.underlay_vnic_allocator) + .with_zone_root_path(&request.root()) + .with_zone_image_paths(zone_image_paths.as_slice()) + .with_zone_type(&zone_type_str) + .with_datasets(datasets.as_slice()) + .with_filesystems(&filesystems) + .with_data_links(&data_links) + .with_devices(&devices) + .with_opte_ports(opte_ports) + .with_links(links) + .with_limit_priv(limit_priv) + .install() + .await?; // TODO(https://github.com/oxidecomputer/omicron/issues/1898): // // These zones are self-assembling -- after they boot, there should // be no "zlogin" necessary to initialize. - match request.zone.zone_type { - ZoneType::Clickhouse => { + match &request { + ZoneArgs::Omicron(OmicronZoneConfigLocal { + zone: + OmicronZoneConfig { + zone_type: OmicronZoneType::Clickhouse { .. }, + underlay_address, + .. + }, + .. + }) => { let Some(info) = self.inner.sled_info.get() else { return Err(Error::SledAgentNotReady); }; @@ -1135,8 +1491,7 @@ impl ServiceManager { let datalink = installed_zone.get_control_vnic_name(); let gateway = &info.underlay_address.to_string(); - assert_eq!(request.zone.addresses.len(), 1); - let listen_addr = &request.zone.addresses[0].to_string(); + let listen_addr = &underlay_address.to_string(); let listen_port = &CLICKHOUSE_PORT.to_string(); let config = PropertyGroupBuilder::new("config") @@ -1162,7 +1517,16 @@ impl ServiceManager { })?; return Ok(RunningZone::boot(installed_zone).await?); } - ZoneType::ClickhouseKeeper => { + + ZoneArgs::Omicron(OmicronZoneConfigLocal { + zone: + OmicronZoneConfig { + zone_type: OmicronZoneType::ClickhouseKeeper { .. }, + underlay_address, + .. + }, + .. + }) => { let Some(info) = self.inner.sled_info.get() else { return Err(Error::SledAgentNotReady); }; @@ -1171,8 +1535,7 @@ impl ServiceManager { let datalink = installed_zone.get_control_vnic_name(); let gateway = &info.underlay_address.to_string(); - assert_eq!(request.zone.addresses.len(), 1); - let listen_addr = &request.zone.addresses[0].to_string(); + let listen_addr = &underlay_address.to_string(); let listen_port = &CLICKHOUSE_KEEPER_PORT.to_string(); let config = PropertyGroupBuilder::new("config") @@ -1201,7 +1564,16 @@ impl ServiceManager { })?; return Ok(RunningZone::boot(installed_zone).await?); } - ZoneType::CockroachDb => { + + ZoneArgs::Omicron(OmicronZoneConfigLocal { + zone: + OmicronZoneConfig { + zone_type: OmicronZoneType::CockroachDb { .. }, + underlay_address, + .. + }, + .. + }) => { let Some(info) = self.inner.sled_info.get() else { return Err(Error::SledAgentNotReady); }; @@ -1211,9 +1583,8 @@ impl ServiceManager { // Configure the CockroachDB service. let datalink = installed_zone.get_control_vnic_name(); let gateway = &info.underlay_address.to_string(); - assert_eq!(request.zone.addresses.len(), 1); let address = SocketAddr::new( - IpAddr::V6(request.zone.addresses[0]), + IpAddr::V6(*underlay_address), COCKROACH_PORT, ); let listen_addr = &address.ip().to_string(); @@ -1242,22 +1613,29 @@ impl ServiceManager { })?; return Ok(RunningZone::boot(installed_zone).await?); } - ZoneType::Crucible => { + + ZoneArgs::Omicron(OmicronZoneConfigLocal { + zone: + OmicronZoneConfig { + zone_type: OmicronZoneType::Crucible { dataset, .. }, + underlay_address, + .. + }, + .. + }) => { let Some(info) = self.inner.sled_info.get() else { return Err(Error::SledAgentNotReady); }; let datalink = installed_zone.get_control_vnic_name(); let gateway = &info.underlay_address.to_string(); - assert_eq!(request.zone.addresses.len(), 1); - let listen_addr = &request.zone.addresses[0].to_string(); + let listen_addr = &underlay_address.to_string(); let listen_port = &CRUCIBLE_PORT.to_string(); - let dataset_name = request - .zone - .dataset - .as_ref() - .map(|d| d.name.full()) - .expect("Crucible requires dataset"); + let dataset_name = DatasetName::new( + dataset.pool_name.clone(), + DatasetKind::Crucible, + ) + .full(); let uuid = &Uuid::new_v4().to_string(); let config = PropertyGroupBuilder::new("config") .add_property("datalink", "astring", datalink) @@ -1282,15 +1660,23 @@ impl ServiceManager { })?; return Ok(RunningZone::boot(installed_zone).await?); } - ZoneType::CruciblePantry => { + + ZoneArgs::Omicron(OmicronZoneConfigLocal { + zone: + OmicronZoneConfig { + zone_type: OmicronZoneType::CruciblePantry { .. }, + underlay_address, + .. + }, + .. + }) => { let Some(info) = self.inner.sled_info.get() else { return Err(Error::SledAgentNotReady); }; let datalink = installed_zone.get_control_vnic_name(); let gateway = &info.underlay_address.to_string(); - assert_eq!(request.zone.addresses.len(), 1); - let listen_addr = &request.zone.addresses[0].to_string(); + let listen_addr = &underlay_address.to_string(); let listen_port = &CRUCIBLE_PANTRY_PORT.to_string(); let config = PropertyGroupBuilder::new("config") @@ -1312,6 +1698,7 @@ impl ServiceManager { let running_zone = RunningZone::boot(installed_zone).await?; return Ok(running_zone); } + _ => {} } @@ -1342,7 +1729,7 @@ impl ServiceManager { self.inner.log, "Ensuring bootstrap address {} exists in {} zone", bootstrap_address.to_string(), - request.zone.zone_type.to_string() + &zone_type_str, ); running_zone.ensure_bootstrap_address(*bootstrap_address).await?; info!( @@ -1363,7 +1750,14 @@ impl ServiceManager { })?; } - for addr in &request.zone.addresses { + let addresses = match &request { + ZoneArgs::Omicron(OmicronZoneConfigLocal { + zone: OmicronZoneConfig { underlay_address, .. }, + .. + }) => std::slice::from_ref(underlay_address), + ZoneArgs::Switch(req) => &req.zone.addresses, + }; + for addr in addresses { if *addr == Ipv6Addr::LOCALHOST { continue; } @@ -1388,9 +1782,7 @@ impl ServiceManager { let sled_underlay_subnet = Ipv6Subnet::::new(info.underlay_address); - if request - .zone - .addresses + if addresses .iter() .any(|ip| sled_underlay_subnet.net().contains(*ip)) { @@ -1422,661 +1814,744 @@ impl ServiceManager { })?; } - for service in &request.zone.services { - // TODO: Related to - // https://github.com/oxidecomputer/omicron/pull/1124 , should we - // avoid importing this manifest? - debug!(self.inner.log, "importing manifest"); - - let smfh = SmfHelper::new(&running_zone, &service.details); - smfh.import_manifest()?; - - match &service.details { - ServiceType::Nexus { - internal_address, - external_tls, - external_dns_servers, - .. - } => { - info!(self.inner.log, "Setting up Nexus service"); - - let sled_info = self - .inner - .sled_info - .get() - .ok_or(Error::SledAgentNotReady)?; - - // While Nexus will be reachable via `external_ip`, it communicates - // atop an OPTE port which operates on a VPC private IP. OPTE will - // map the private IP to the external IP automatically. - let port_ip = running_zone - .ensure_address_for_port("public", 0) - .await? - .ip(); - - // Nexus takes a separate config file for parameters which - // cannot be known at packaging time. - let nexus_port = if *external_tls { 443 } else { 80 }; - let deployment_config = NexusDeploymentConfig { - id: request.zone.id, - rack_id: sled_info.rack_id, - techport_external_server_port: - NEXUS_TECHPORT_EXTERNAL_PORT, - - dropshot_external: ConfigDropshotWithTls { - tls: *external_tls, - dropshot: dropshot::ConfigDropshot { - bind_address: SocketAddr::new( - port_ip, nexus_port, - ), - // This has to be large enough to support: - // - bulk writes to disks - request_body_max_bytes: 8192 * 1024, + match &request { + ZoneArgs::Omicron(zone_config) => { + // TODO: Related to + // https://github.com/oxidecomputer/omicron/pull/1124 , should we + // avoid importing this manifest? + debug!(self.inner.log, "importing manifest"); + + let smfh = + SmfHelper::new(&running_zone, &zone_config.zone.zone_type); + smfh.import_manifest()?; + + match &zone_config.zone.zone_type { + OmicronZoneType::Nexus { + internal_address, + external_tls, + external_dns_servers, + .. + } => { + info!(self.inner.log, "Setting up Nexus service"); + + let sled_info = self + .inner + .sled_info + .get() + .ok_or(Error::SledAgentNotReady)?; + + // While Nexus will be reachable via `external_ip`, it + // communicates atop an OPTE port which operates on a + // VPC private IP. OPTE will map the private IP to the + // external IP automatically. + let port_ip = running_zone + .ensure_address_for_port("public", 0) + .await? + .ip(); + + // Nexus takes a separate config file for parameters + // which cannot be known at packaging time. + let nexus_port = if *external_tls { 443 } else { 80 }; + let deployment_config = NexusDeploymentConfig { + id: zone_config.zone.id, + rack_id: sled_info.rack_id, + techport_external_server_port: + NEXUS_TECHPORT_EXTERNAL_PORT, + + dropshot_external: ConfigDropshotWithTls { + tls: *external_tls, + dropshot: dropshot::ConfigDropshot { + bind_address: SocketAddr::new( + port_ip, nexus_port, + ), + // This has to be large enough to support: + // - bulk writes to disks + request_body_max_bytes: 8192 * 1024, + default_handler_task_mode: + HandlerTaskMode::Detached, + }, + }, + dropshot_internal: dropshot::ConfigDropshot { + bind_address: (*internal_address).into(), + // This has to be large enough to support, among + // other things, the initial list of TLS + // certificates provided by the customer during + // rack setup. + request_body_max_bytes: 10 * 1024 * 1024, default_handler_task_mode: HandlerTaskMode::Detached, }, - }, - dropshot_internal: dropshot::ConfigDropshot { - bind_address: (*internal_address).into(), - // This has to be large enough to support, among - // other things, the initial list of TLS - // certificates provided by the customer during rack - // setup. - request_body_max_bytes: 10 * 1024 * 1024, - default_handler_task_mode: - HandlerTaskMode::Detached, - }, - internal_dns: nexus_config::InternalDns::FromSubnet { - subnet: Ipv6Subnet::::new( - sled_info.underlay_address, + internal_dns: + nexus_config::InternalDns::FromSubnet { + subnet: Ipv6Subnet::::new( + sled_info.underlay_address, + ), + }, + database: nexus_config::Database::FromDns, + external_dns_servers: external_dns_servers.clone(), + }; + + // Copy the partial config file to the expected + // location. + let config_dir = Utf8PathBuf::from(format!( + "{}/var/svc/manifest/site/nexus", + running_zone.root() + )); + // The filename of a half-completed config, in need of + // parameters supplied at runtime. + const PARTIAL_LEDGER_FILENAME: &str = + "config-partial.toml"; + // The filename of a completed config, merging the + // partial config with additional appended parameters + // known at runtime. + const COMPLETE_LEDGER_FILENAME: &str = "config.toml"; + let partial_config_path = + config_dir.join(PARTIAL_LEDGER_FILENAME); + let config_path = + config_dir.join(COMPLETE_LEDGER_FILENAME); + tokio::fs::copy(partial_config_path, &config_path) + .await + .map_err(|err| Error::io_path(&config_path, err))?; + + // Serialize the configuration and append it into the + // file. + let serialized_cfg = + toml::Value::try_from(&deployment_config) + .expect("Cannot serialize config"); + let mut map = toml::map::Map::new(); + map.insert("deployment".to_string(), serialized_cfg); + let config_str = + toml::to_string(&map).map_err(|err| { + Error::TomlSerialize { + path: config_path.clone(), + err, + } + })?; + let mut file = tokio::fs::OpenOptions::new() + .append(true) + .open(&config_path) + .await + .map_err(|err| Error::io_path(&config_path, err))?; + file.write_all(b"\n\n") + .await + .map_err(|err| Error::io_path(&config_path, err))?; + file.write_all(config_str.as_bytes()) + .await + .map_err(|err| Error::io_path(&config_path, err))?; + } + + OmicronZoneType::ExternalDns { + http_address, + dns_address, + .. + } => { + info!( + self.inner.log, + "Setting up external-dns service" + ); + + // Like Nexus, we need to be reachable externally via + // `dns_address` but we don't listen on that address + // directly but instead on a VPC private IP. OPTE will + // en/decapsulate as appropriate. + let port_ip = running_zone + .ensure_address_for_port("public", 0) + .await? + .ip(); + let dns_address = + SocketAddr::new(port_ip, dns_address.port()); + + smfh.setprop( + "config/http_address", + format!( + "[{}]:{}", + http_address.ip(), + http_address.port(), ), - }, - database: nexus_config::Database::FromDns, - external_dns_servers: external_dns_servers.clone(), - }; + )?; + smfh.setprop( + "config/dns_address", + dns_address.to_string(), + )?; - // Copy the partial config file to the expected location. - let config_dir = Utf8PathBuf::from(format!( - "{}/var/svc/manifest/site/nexus", - running_zone.root() - )); - // The filename of a half-completed config, in need of parameters supplied at - // runtime. - const PARTIAL_LEDGER_FILENAME: &str = "config-partial.toml"; - // The filename of a completed config, merging the partial config with - // additional appended parameters known at runtime. - const COMPLETE_LEDGER_FILENAME: &str = "config.toml"; - let partial_config_path = - config_dir.join(PARTIAL_LEDGER_FILENAME); - let config_path = config_dir.join(COMPLETE_LEDGER_FILENAME); - tokio::fs::copy(partial_config_path, &config_path) - .await - .map_err(|err| Error::io_path(&config_path, err))?; - - // Serialize the configuration and append it into the file. - let serialized_cfg = - toml::Value::try_from(&deployment_config) - .expect("Cannot serialize config"); - let mut map = toml::map::Map::new(); - map.insert("deployment".to_string(), serialized_cfg); - let config_str = toml::to_string(&map).map_err(|err| { - Error::TomlSerialize { path: config_path.clone(), err } - })?; - let mut file = tokio::fs::OpenOptions::new() - .append(true) - .open(&config_path) - .await - .map_err(|err| Error::io_path(&config_path, err))?; - file.write_all(b"\n\n") - .await - .map_err(|err| Error::io_path(&config_path, err))?; - file.write_all(config_str.as_bytes()) - .await - .map_err(|err| Error::io_path(&config_path, err))?; - } - ServiceType::ExternalDns { - http_address, dns_address, .. - } => { - info!(self.inner.log, "Setting up external-dns service"); - - // Like Nexus, we need to be reachable externally via - // `dns_address` but we don't listen on that address - // directly but instead on a VPC private IP. OPTE will - // en/decapsulate as appropriate. - let port_ip = running_zone - .ensure_address_for_port("public", 0) - .await? - .ip(); - let dns_address = - SocketAddr::new(port_ip, dns_address.port()); - - smfh.setprop( - "config/http_address", - format!( - "[{}]:{}", - http_address.ip(), - http_address.port(), - ), - )?; - smfh.setprop( - "config/dns_address", - dns_address.to_string(), - )?; + // Refresh the manifest with the new properties we set, + // so they become "effective" properties when the + // service is enabled. + smfh.refresh()?; + } - // Refresh the manifest with the new properties we set, so - // they become "effective" properties when the service is - // enabled. - smfh.refresh()?; - } - ServiceType::InternalDns { - http_address, - dns_address, - gz_address, - gz_address_index, - } => { - info!(self.inner.log, "Setting up internal-dns service"); - - // Internal DNS zones require a special route through the - // global zone, since they are not on the same part of the - // underlay as most other services on this sled (the sled's - // subnet). - // - // We create an IP address in the dedicated portion of the - // underlay used for internal DNS servers, but we *also* - // add a number ("which DNS server is this") to ensure - // these addresses are given unique names. In the unlikely - // case that two internal DNS servers end up on the same - // machine (which is effectively a developer-only - // environment -- we wouldn't want this in prod!), they need - // to be given distinct names. - let addr_name = format!("internaldns{gz_address_index}"); - Zones::ensure_has_global_zone_v6_address( - self.inner.underlay_vnic.clone(), - *gz_address, - &addr_name, - ) - .map_err(|err| Error::GzAddress { - message: format!( + OmicronZoneType::InternalDns { + http_address, + dns_address, + gz_address, + gz_address_index, + .. + } => { + info!( + self.inner.log, + "Setting up internal-dns service" + ); + + // Internal DNS zones require a special route through + // the global zone, since they are not on the same part + // of the underlay as most other services on this sled + // (the sled's subnet). + // + // We create an IP address in the dedicated portion of + // the underlay used for internal DNS servers, but we + // *also* add a number ("which DNS server is this") to + // ensure these addresses are given unique names. In the + // unlikely case that two internal DNS servers end up on + // the same machine (which is effectively a + // developer-only environment -- we wouldn't want this + // in prod!), they need to be given distinct names. + let addr_name = + format!("internaldns{gz_address_index}"); + Zones::ensure_has_global_zone_v6_address( + self.inner.underlay_vnic.clone(), + *gz_address, + &addr_name, + ) + .map_err(|err| { + Error::GzAddress { + message: format!( "Failed to create address {} for Internal DNS zone", addr_name ), - err, - })?; - // If this address is in a new ipv6 prefix, notify maghemite so - // it can advertise it to other sleds. - self.advertise_prefix_of_address(*gz_address).await; - - running_zone.add_default_route(*gz_address).map_err( - |err| Error::ZoneCommand { - intent: "Adding Route".to_string(), - err, - }, - )?; + err, + } + })?; + // If this address is in a new ipv6 prefix, notify + // maghemite so it can advertise it to other sleds. + self.advertise_prefix_of_address(*gz_address).await; + + running_zone.add_default_route(*gz_address).map_err( + |err| Error::ZoneCommand { + intent: "Adding Route".to_string(), + err, + }, + )?; - smfh.setprop( - "config/http_address", - format!( - "[{}]:{}", - http_address.ip(), - http_address.port(), - ), - )?; - smfh.setprop( - "config/dns_address", - &format!( - "[{}]:{}", - dns_address.ip(), - dns_address.port(), - ), - )?; + smfh.setprop( + "config/http_address", + format!( + "[{}]:{}", + http_address.ip(), + http_address.port(), + ), + )?; + smfh.setprop( + "config/dns_address", + &format!( + "[{}]:{}", + dns_address.ip(), + dns_address.port(), + ), + )?; - // Refresh the manifest with the new properties we set, so - // they become "effective" properties when the service is - // enabled. - smfh.refresh()?; - } - ServiceType::Oximeter { address } => { - info!(self.inner.log, "Setting up oximeter service"); - smfh.setprop("config/id", request.zone.id)?; - smfh.setprop("config/address", address.to_string())?; - smfh.refresh()?; - } - ServiceType::ManagementGatewayService => { - info!(self.inner.log, "Setting up MGS service"); - smfh.setprop("config/id", request.zone.id)?; - - // Always tell MGS to listen on localhost so wicketd can - // contact it even before we have an underlay network. - smfh.addpropvalue( - "config/address", - &format!("[::1]:{MGS_PORT}"), - )?; + // Refresh the manifest with the new properties we set, + // so they become "effective" properties when the + // service is enabled. + smfh.refresh()?; + } - if let Some(address) = request.zone.addresses.get(0) { - // Don't use localhost twice - if *address != Ipv6Addr::LOCALHOST { - smfh.addpropvalue( - "config/address", - &format!("[{address}]:{MGS_PORT}"), - )?; - } + OmicronZoneType::Oximeter { address } => { + info!(self.inner.log, "Setting up oximeter service"); + smfh.setprop("config/id", zone_config.zone.id)?; + smfh.setprop("config/address", address.to_string())?; + smfh.refresh()?; } - if let Some(info) = self.inner.sled_info.get() { - smfh.setprop("config/rack_id", info.rack_id)?; + OmicronZoneType::BoundaryNtp { + ntp_servers, + dns_servers, + domain, + .. } + | OmicronZoneType::InternalNtp { + ntp_servers, + dns_servers, + domain, + .. + } => { + let boundary = matches!( + &zone_config.zone.zone_type, + OmicronZoneType::BoundaryNtp { .. } + ); + info!( + self.inner.log, + "Set up NTP service boundary={}, Servers={:?}", + boundary, + ntp_servers + ); - smfh.refresh()?; - } - ServiceType::SpSim => { - info!(self.inner.log, "Setting up Simulated SP service"); - } - ServiceType::Wicketd { baseboard } => { - info!(self.inner.log, "Setting up wicketd service"); + let sled_info = + if let Some(info) = self.inner.sled_info.get() { + info + } else { + return Err(Error::SledAgentNotReady); + }; - smfh.setprop( - "config/address", - &format!("[::1]:{WICKETD_PORT}"), - )?; + let rack_net = Ipv6Subnet::::new( + sled_info.underlay_address, + ) + .net(); - // If we're launching the switch zone, we'll have a - // bootstrap_address based on our call to - // `self.bootstrap_address_needed` (which always gives us an - // address for the switch zone. If we _don't_ have a - // bootstrap address, someone has requested wicketd in a - // non-switch zone; return an error. - let Some((_, bootstrap_address)) = - bootstrap_name_and_address - else { - return Err(Error::BadServiceRequest { - service: "wicketd".to_string(), - message: concat!( - "missing bootstrap address: ", - "wicketd can only be started in the ", - "switch zone", - ) - .to_string(), - }); - }; - smfh.setprop( - "config/artifact-address", - &format!( - "[{bootstrap_address}]:{BOOTSTRAP_ARTIFACT_PORT}" - ), - )?; - - smfh.setprop( - "config/mgs-address", - &format!("[::1]:{MGS_PORT}"), - )?; - - // We intentionally bind `nexus-proxy-address` to `::` so - // wicketd will serve this on all interfaces, particularly - // the tech port interfaces, allowing external clients to - // connect to this Nexus proxy. - smfh.setprop( - "config/nexus-proxy-address", - &format!("[::]:{WICKETD_NEXUS_PROXY_PORT}"), - )?; - if let Some(underlay_address) = self - .inner - .sled_info - .get() - .map(|info| info.underlay_address) - { - let rack_subnet = - Ipv6Subnet::::new(underlay_address); + smfh.setprop("config/allow", &format!("{}", rack_net))?; smfh.setprop( - "config/rack-subnet", - &rack_subnet.net().ip().to_string(), + "config/boundary", + if boundary { "true" } else { "false" }, )?; - } - let serialized_baseboard = - serde_json::to_string_pretty(&baseboard)?; - let serialized_baseboard_path = Utf8PathBuf::from(format!( - "{}/opt/oxide/baseboard.json", - running_zone.root() - )); - tokio::fs::write( - &serialized_baseboard_path, - &serialized_baseboard, - ) - .await - .map_err(|err| { - Error::io_path(&serialized_baseboard_path, err) - })?; - smfh.setprop( - "config/baseboard-file", - String::from("/opt/oxide/baseboard.json"), - )?; + if boundary { + // Configure OPTE port for boundary NTP + running_zone + .ensure_address_for_port("public", 0) + .await?; + } - smfh.refresh()?; - } - ServiceType::Dendrite { asic } => { - info!(self.inner.log, "Setting up dendrite service"); + smfh.delpropvalue("config/server", "*")?; + for server in ntp_servers { + smfh.addpropvalue("config/server", server)?; + } + self.configure_dns_client( + &running_zone, + &dns_servers, + &domain, + ) + .await?; - if let Some(info) = self.inner.sled_info.get() { - smfh.setprop("config/rack_id", info.rack_id)?; - smfh.setprop("config/sled_id", info.config.sled_id)?; - } else { - info!( - self.inner.log, - "no rack_id/sled_id available yet" - ); + smfh.refresh()?; + } + OmicronZoneType::Clickhouse { .. } + | OmicronZoneType::ClickhouseKeeper { .. } + | OmicronZoneType::CockroachDb { .. } + | OmicronZoneType::Crucible { .. } + | OmicronZoneType::CruciblePantry { .. } => { + panic!( + "{} is a service which exists as part of a \ + self-assembling zone", + &zone_config.zone.zone_type.zone_type_str(), + ) } + }; - smfh.delpropvalue("config/address", "*")?; - smfh.delpropvalue("config/dns_server", "*")?; - for address in &request.zone.addresses { - smfh.addpropvalue( - "config/address", - &format!("[{}]:{}", address, DENDRITE_PORT), - )?; - if *address != Ipv6Addr::LOCALHOST { - let az_prefix = - Ipv6Subnet::::new(*address); - for addr in Resolver::servers_from_subnet(az_prefix) + debug!(self.inner.log, "enabling service"); + smfh.enable()?; + } + ZoneArgs::Switch(request) => { + for service in &request.zone.services { + // TODO: Related to + // https://github.com/oxidecomputer/omicron/pull/1124 , should we + // avoid importing this manifest? + debug!(self.inner.log, "importing manifest"); + + let smfh = SmfHelper::new(&running_zone, service); + smfh.import_manifest()?; + + match service { + SwitchService::ManagementGatewayService => { + info!(self.inner.log, "Setting up MGS service"); + smfh.setprop("config/id", request.zone.id)?; + + // Always tell MGS to listen on localhost so wicketd + // can contact it even before we have an underlay + // network. + smfh.addpropvalue( + "config/address", + &format!("[::1]:{MGS_PORT}"), + )?; + + if let Some(address) = request.zone.addresses.get(0) { - smfh.addpropvalue( - "config/dns_server", - &format!("{addr}"), - )?; + // Don't use localhost twice + if *address != Ipv6Addr::LOCALHOST { + smfh.addpropvalue( + "config/address", + &format!("[{address}]:{MGS_PORT}"), + )?; + } + } + + if let Some(info) = self.inner.sled_info.get() { + smfh.setprop("config/rack_id", info.rack_id)?; } + + smfh.refresh()?; } - } - match asic { - DendriteAsic::TofinoAsic => { - // There should be exactly one device_name - // associated with this zone: the /dev path for - // the tofino ASIC. - let dev_cnt = device_names.len(); - if dev_cnt == 1 { + SwitchService::SpSim => { + info!( + self.inner.log, + "Setting up Simulated SP service" + ); + } + SwitchService::Wicketd { baseboard } => { + info!(self.inner.log, "Setting up wicketd service"); + + smfh.setprop( + "config/address", + &format!("[::1]:{WICKETD_PORT}"), + )?; + + // If we're launching the switch zone, we'll have a + // bootstrap_address based on our call to + // `self.bootstrap_address_needed` (which always + // gives us an address for the switch zone. If we + // _don't_ have a bootstrap address, someone has + // requested wicketd in a non-switch zone; return an + // error. + let Some((_, bootstrap_address)) = + bootstrap_name_and_address + else { + return Err(Error::BadServiceRequest { + service: "wicketd".to_string(), + message: concat!( + "missing bootstrap address: ", + "wicketd can only be started in the ", + "switch zone", + ) + .to_string(), + }); + }; + smfh.setprop( + "config/artifact-address", + &format!( + "[{bootstrap_address}]:{BOOTSTRAP_ARTIFACT_PORT}" + ), + )?; + + smfh.setprop( + "config/mgs-address", + &format!("[::1]:{MGS_PORT}"), + )?; + + // We intentionally bind `nexus-proxy-address` to + // `::` so wicketd will serve this on all + // interfaces, particularly the tech port + // interfaces, allowing external clients to connect + // to this Nexus proxy. + smfh.setprop( + "config/nexus-proxy-address", + &format!("[::]:{WICKETD_NEXUS_PROXY_PORT}"), + )?; + if let Some(underlay_address) = self + .inner + .sled_info + .get() + .map(|info| info.underlay_address) + { + let rack_subnet = Ipv6Subnet::::new( + underlay_address, + ); smfh.setprop( - "config/dev_path", - device_names[0].clone(), + "config/rack-subnet", + &rack_subnet.net().ip().to_string(), )?; - } else { - return Err(Error::SledLocalZone( - anyhow::anyhow!( - "{dev_cnt} devices needed for tofino asic" - ), - )); } + + let serialized_baseboard = + serde_json::to_string_pretty(&baseboard)?; + let serialized_baseboard_path = + Utf8PathBuf::from(format!( + "{}/opt/oxide/baseboard.json", + running_zone.root() + )); + tokio::fs::write( + &serialized_baseboard_path, + &serialized_baseboard, + ) + .await + .map_err(|err| { + Error::io_path(&serialized_baseboard_path, err) + })?; smfh.setprop( - "config/port_config", - "/opt/oxide/dendrite/misc/sidecar_config.toml", + "config/baseboard-file", + String::from("/opt/oxide/baseboard.json"), )?; - let sidecar_revision = - match self.inner.sidecar_revision { - SidecarRevision::Physical(ref rev) => rev, - _ => { - return Err(Error::SidecarRevision( - anyhow::anyhow!( - "expected physical sidecar revision" - ), - )) - } - }; - smfh.setprop("config/board_rev", sidecar_revision)?; + + smfh.refresh()?; } - DendriteAsic::TofinoStub => smfh.setprop( - "config/port_config", - "/opt/oxide/dendrite/misc/model_config.toml", - )?, - asic @ (DendriteAsic::SoftNpuZone - | DendriteAsic::SoftNpuPropolisDevice) => { - if asic == &DendriteAsic::SoftNpuZone { - smfh.setprop("config/mgmt", "uds")?; + SwitchService::Dendrite { asic } => { + info!( + self.inner.log, + "Setting up dendrite service" + ); + + if let Some(info) = self.inner.sled_info.get() { + smfh.setprop("config/rack_id", info.rack_id)?; smfh.setprop( - "config/uds_path", - "/opt/softnpu/stuff", + "config/sled_id", + info.config.sled_id, )?; + } else { + info!( + self.inner.log, + "no rack_id/sled_id available yet" + ); } - if asic == &DendriteAsic::SoftNpuPropolisDevice { - smfh.setprop("config/mgmt", "uart")?; + + smfh.delpropvalue("config/address", "*")?; + smfh.delpropvalue("config/dns_server", "*")?; + for address in &request.zone.addresses { + smfh.addpropvalue( + "config/address", + &format!("[{}]:{}", address, DENDRITE_PORT), + )?; + if *address != Ipv6Addr::LOCALHOST { + let az_prefix = + Ipv6Subnet::::new(*address); + for addr in + Resolver::servers_from_subnet(az_prefix) + { + smfh.addpropvalue( + "config/dns_server", + &format!("{addr}"), + )?; + } + } } - let s = match self.inner.sidecar_revision { - SidecarRevision::SoftZone(ref s) => s, - SidecarRevision::SoftPropolis(ref s) => s, - _ => { - return Err(Error::SidecarRevision( - anyhow::anyhow!( - "expected soft sidecar revision" - ), - )) + match asic { + DendriteAsic::TofinoAsic => { + // There should be exactly one device_name + // associated with this zone: the /dev path + // for the tofino ASIC. + let dev_cnt = device_names.len(); + if dev_cnt == 1 { + smfh.setprop( + "config/dev_path", + device_names[0].clone(), + )?; + } else { + return Err(Error::SledLocalZone( + anyhow::anyhow!( + "{dev_cnt} devices needed \ + for tofino asic" + ), + )); + } + smfh.setprop( + "config/port_config", + "/opt/oxide/dendrite/misc/sidecar_config.toml", + )?; + let sidecar_revision = + match self.inner.sidecar_revision { + SidecarRevision::Physical(ref rev) => rev, + _ => { + return Err(Error::SidecarRevision( + anyhow::anyhow!( + "expected physical \ + sidecar revision" + ), + )) + } + }; + smfh.setprop("config/board_rev", sidecar_revision)?; + } + DendriteAsic::TofinoStub => smfh.setprop( + "config/port_config", + "/opt/oxide/dendrite/misc/model_config.toml", + )?, + asic @ (DendriteAsic::SoftNpuZone + | DendriteAsic::SoftNpuPropolisDevice) => { + if asic == &DendriteAsic::SoftNpuZone { + smfh.setprop("config/mgmt", "uds")?; + smfh.setprop( + "config/uds_path", + "/opt/softnpu/stuff", + )?; + } + if asic == &DendriteAsic::SoftNpuPropolisDevice { + smfh.setprop("config/mgmt", "uart")?; + } + let s = match self.inner.sidecar_revision { + SidecarRevision::SoftZone(ref s) => s, + SidecarRevision::SoftPropolis(ref s) => s, + _ => { + return Err(Error::SidecarRevision( + anyhow::anyhow!( + "expected soft sidecar \ + revision" + ), + )) + } + }; + smfh.setprop( + "config/front_ports", + &s.front_port_count.to_string(), + )?; + smfh.setprop( + "config/rear_ports", + &s.rear_port_count.to_string(), + )?; + smfh.setprop( + "config/port_config", + "/opt/oxide/dendrite/misc/softnpu_single_sled_config.toml", + )? } }; - smfh.setprop( - "config/front_ports", - &s.front_port_count.to_string(), - )?; - smfh.setprop( - "config/rear_ports", - &s.rear_port_count.to_string(), - )?; - smfh.setprop( - "config/port_config", - "/opt/oxide/dendrite/misc/softnpu_single_sled_config.toml", - )? + smfh.refresh()?; } - }; - smfh.refresh()?; - } - ServiceType::Tfport { pkt_source, asic } => { - info!(self.inner.log, "Setting up tfport service"); + SwitchService::Tfport { pkt_source, asic } => { + info!(self.inner.log, "Setting up tfport service"); - let is_gimlet = is_gimlet().map_err(|e| { - Error::Underlay(underlay::Error::SystemDetection(e)) - })?; + let is_gimlet = is_gimlet().map_err(|e| { + Error::Underlay( + underlay::Error::SystemDetection(e), + ) + })?; - if is_gimlet { - // Collect the prefixes for each techport. - let techport_prefixes = match bootstrap_name_and_address - .as_ref() - { - Some((_, addr)) => { - Self::bootstrap_addr_to_techport_prefixes(addr) + if is_gimlet { + // Collect the prefixes for each techport. + let nameaddr = + bootstrap_name_and_address.as_ref(); + let techport_prefixes = match nameaddr { + Some((_, addr)) => { + Self::bootstrap_addr_to_techport_prefixes(addr) + } + None => { + return Err(Error::BadServiceRequest { + service: "tfport".into(), + message: "bootstrap addr missing" + .into(), + }); + } + }; + + for (i, prefix) in + techport_prefixes.into_iter().enumerate() + { + // Each `prefix` is an `Ipv6Subnet` + // including a netmask. Stringify just the + // network address, without the mask. + smfh.setprop( + format!("config/techport{i}_prefix"), + prefix.net().network().to_string(), + )?; + } + smfh.setprop("config/pkt_source", pkt_source)?; } - None => { - return Err(Error::BadServiceRequest { - service: "tfport".into(), - message: "bootstrap addr missing".into(), - }); + if asic == &DendriteAsic::SoftNpuZone { + smfh.setprop("config/flags", "--sync-only")?; + } + if asic == &DendriteAsic::SoftNpuPropolisDevice { + smfh.setprop("config/pkt_source", pkt_source)?; } - }; - - for (i, prefix) in - techport_prefixes.into_iter().enumerate() - { - // Each `prefix` is an `Ipv6Subnet` including a netmask. - // Stringify just the network address, without the mask. smfh.setprop( - format!("config/techport{i}_prefix"), - prefix.net().network().to_string(), + "config/host", + &format!("[{}]", Ipv6Addr::LOCALHOST), + )?; + smfh.setprop( + "config/port", + &format!("{}", DENDRITE_PORT), )?; - } - smfh.setprop("config/pkt_source", pkt_source)?; - } - if asic == &DendriteAsic::SoftNpuZone { - smfh.setprop("config/flags", "--sync-only")?; - } - if asic == &DendriteAsic::SoftNpuPropolisDevice { - smfh.setprop("config/pkt_source", pkt_source)?; - } - smfh.setprop( - "config/host", - &format!("[{}]", Ipv6Addr::LOCALHOST), - )?; - smfh.setprop("config/port", &format!("{}", DENDRITE_PORT))?; - - smfh.refresh()?; - } - ServiceType::BoundaryNtp { - ntp_servers, - dns_servers, - domain, - .. - } - | ServiceType::InternalNtp { - ntp_servers, - dns_servers, - domain, - .. - } => { - let boundary = matches!( - service.details, - ServiceType::BoundaryNtp { .. } - ); - info!( - self.inner.log, - "Set up NTP service boundary={}, Servers={:?}", - boundary, - ntp_servers - ); - - let sled_info = - if let Some(info) = self.inner.sled_info.get() { - info - } else { - return Err(Error::SledAgentNotReady); - }; - - let rack_net = Ipv6Subnet::::new( - sled_info.underlay_address, - ) - .net(); - - smfh.setprop("config/allow", &format!("{}", rack_net))?; - smfh.setprop( - "config/boundary", - if boundary { "true" } else { "false" }, - )?; - - if boundary { - // Configure OPTE port for boundary NTP - running_zone - .ensure_address_for_port("public", 0) - .await?; - } - - smfh.delpropvalue("config/server", "*")?; - for server in ntp_servers { - smfh.addpropvalue("config/server", server)?; - } - self.configure_dns_client( - &running_zone, - dns_servers, - &domain, - ) - .await?; - - smfh.refresh()?; - } - ServiceType::Uplink => { - // Nothing to do here - this service is special and - // configured in `ensure_switch_zone_uplinks_configured` - } - ServiceType::Mgd => { - info!(self.inner.log, "Setting up mgd service"); - smfh.setprop("config/admin_host", "::")?; - smfh.refresh()?; - } - ServiceType::MgDdm { mode } => { - info!(self.inner.log, "Setting up mg-ddm service"); - smfh.setprop("config/mode", &mode)?; - smfh.setprop("config/admin_host", "::")?; + smfh.refresh()?; + } + SwitchService::Uplink => { + // Nothing to do here - this service is special and + // configured in + // `ensure_switch_zone_uplinks_configured` + } + SwitchService::Mgd => { + info!(self.inner.log, "Setting up mgd service"); + smfh.setprop("config/admin_host", "::")?; + smfh.refresh()?; + } + SwitchService::MgDdm { mode } => { + info!(self.inner.log, "Setting up mg-ddm service"); - let is_gimlet = is_gimlet().map_err(|e| { - Error::Underlay(underlay::Error::SystemDetection(e)) - })?; + smfh.setprop("config/mode", &mode)?; + smfh.setprop("config/admin_host", "::")?; - let maghemite_interfaces: Vec = if is_gimlet { - (0..32) - .map(|i| { - // See the `tfport_name` function for how - // tfportd names the addrconf it creates. - // Right now, that's `tfportrear[0-31]_0` - // for all rear ports, which is what we're - // directing ddmd to listen for - // advertisements on. - // - // This may grow in a multi-rack future to - // include a subset of "front" ports too, - // when racks are cabled together. - AddrObject::new( - &format!("tfportrear{}_0", i), - IPV6_LINK_LOCAL_NAME, + let is_gimlet = is_gimlet().map_err(|e| { + Error::Underlay( + underlay::Error::SystemDetection(e), ) - .unwrap() - }) - .collect() - } else { - self.inner - .switch_zone_maghemite_links - .iter() - .map(|i| { - AddrObject::new( - &i.to_string(), - IPV6_LINK_LOCAL_NAME, - ) - .unwrap() - }) - .collect() - }; + })?; + + let maghemite_interfaces: Vec = + if is_gimlet { + (0..32) + .map(|i| { + // See the `tfport_name` function + // for how tfportd names the + // addrconf it creates. Right now, + // that's `tfportrear[0-31]_0` for + // all rear ports, which is what + // we're directing ddmd to listen + // for advertisements on. + // + // This may grow in a multi-rack + // future to include a subset of + // "front" ports too, when racks are + // cabled together. + AddrObject::new( + &format!("tfportrear{}_0", i), + IPV6_LINK_LOCAL_NAME, + ) + .unwrap() + }) + .collect() + } else { + self.inner + .switch_zone_maghemite_links + .iter() + .map(|i| { + AddrObject::new( + &i.to_string(), + IPV6_LINK_LOCAL_NAME, + ) + .unwrap() + }) + .collect() + }; - smfh.setprop( - "config/interfaces", - // `svccfg setprop` requires a list of values to be - // enclosed in `()`, and each string value to be - // enclosed in `""`. Note that we do _not_ need to - // escape the parentheses, since this is not passed - // through a shell, but directly to `exec(2)` in the - // zone. - format!( - "({})", - maghemite_interfaces - .iter() - .map(|interface| format!(r#""{}""#, interface)) - .join(" "), - ), - )?; + smfh.setprop( + "config/interfaces", + // `svccfg setprop` requires a list of values to + // be enclosed in `()`, and each string value to + // be enclosed in `""`. Note that we do _not_ + // need to escape the parentheses, since this is + // not passed through a shell, but directly to + // `exec(2)` in the zone. + format!( + "({})", + maghemite_interfaces + .iter() + .map(|interface| format!( + r#""{}""#, + interface + )) + .join(" "), + ), + )?; + + if is_gimlet { + // Ddm for a scrimlet needs to be configured to + // talk to dendrite + smfh.setprop("config/dpd_host", "[::1]")?; + smfh.setprop("config/dpd_port", DENDRITE_PORT)?; + } + smfh.setprop("config/dendrite", "true")?; - if is_gimlet { - // Ddm for a scrimlet needs to be configured to talk to - // dendrite - smfh.setprop("config/dpd_host", "[::1]")?; - smfh.setprop("config/dpd_port", DENDRITE_PORT)?; + smfh.refresh()?; + } } - smfh.setprop("config/dendrite", "true")?; - smfh.refresh()?; - } - ServiceType::Crucible { .. } - | ServiceType::CruciblePantry { .. } - | ServiceType::CockroachDb { .. } - | ServiceType::Clickhouse { .. } - | ServiceType::ClickhouseKeeper { .. } => { - panic!( - "{} is a service which exists as part of a self-assembling zone", - service.details, - ) + debug!(self.inner.log, "enabling service"); + smfh.enable()?; } } - - debug!(self.inner.log, "enabling service"); - smfh.enable()?; - } + }; Ok(running_zone) } // Populates `existing_zones` according to the requests in `services`. - async fn initialize_services_locked( + async fn initialize_omicron_zones_locked( &self, existing_zones: &mut BTreeMap, - requests: &Vec, + requests: &Vec, ) -> Result<(), Error> { if let Some(name) = requests .iter() @@ -2093,7 +2568,7 @@ impl ServiceManager { let futures = requests.iter().map(|request| { async move { self.initialize_zone( - request, + ZoneArgs::Omicron(request), // filesystems= &[], // data_links= @@ -2151,74 +2626,133 @@ impl ServiceManager { Err(BundleError::NoSuchZone { name: name.to_string() }) } - /// Ensures that particular services should be initialized. + /// Returns the current Omicron zone configuration + pub async fn omicron_zones_list( + &self, + ) -> Result { + let log = &self.inner.log; + + // We need to take the lock in order for the information in the ledger + // to be up-to-date. + let _existing_zones = self.inner.zones.lock().await; + + // Read the existing set of services from the ledger. + let zone_ledger_paths = self.all_omicron_zone_ledgers().await; + let ledger_data = match Ledger::::new( + log, + zone_ledger_paths.clone(), + ) + .await + { + Some(ledger) => ledger.data().clone(), + None => OmicronZonesConfigLocal::initial(), + }; + + Ok(ledger_data.to_omicron_zones_config()) + } + + /// Ensures that particular Omicron zones are running /// /// These services will be instantiated by this function, and will be /// recorded to a local file to ensure they start automatically on next /// boot. - pub async fn ensure_all_services_persistent( + pub async fn ensure_all_omicron_zones_persistent( &self, - request: ServiceEnsureBody, + request: OmicronZonesConfig, ) -> Result<(), Error> { let log = &self.inner.log; let mut existing_zones = self.inner.zones.lock().await; // Read the existing set of services from the ledger. - let service_paths = self.all_service_ledgers().await; - let mut ledger = - match Ledger::::new(log, service_paths.clone()) - .await - { - Some(ledger) => ledger, - None => Ledger::::new_with( - log, - service_paths.clone(), - AllZoneRequests::default(), - ), - }; - let ledger_zone_requests = ledger.data_mut(); + let zone_ledger_paths = self.all_omicron_zone_ledgers().await; + let mut ledger = match Ledger::::new( + log, + zone_ledger_paths.clone(), + ) + .await + { + Some(ledger) => ledger, + None => Ledger::::new_with( + log, + zone_ledger_paths.clone(), + OmicronZonesConfigLocal::initial(), + ), + }; + + let ledger_zone_config = ledger.data_mut(); + debug!(log, "ensure_all_omicron_zones_persistent"; + "request_generation" => request.generation.to_string(), + "ledger_generation" => + ledger_zone_config.omicron_generation.to_string(), + ); + + // Absolutely refuse to downgrade the configuration. + if ledger_zone_config.omicron_generation > request.generation { + return Err(Error::RequestedConfigOutdated { + requested: request.generation, + current: ledger_zone_config.omicron_generation, + }); + } - let mut zone_requests = self - .ensure_all_services( + // If the generation is the same as what we're running, but the contents + // aren't, that's a problem, too. + if ledger_zone_config.omicron_generation == request.generation + && ledger_zone_config.clone().to_omicron_zones_config().zones + != request.zones + { + return Err(Error::RequestedConfigConflicts(request.generation)); + } + + let new_config = self + .ensure_all_omicron_zones( &mut existing_zones, - ledger_zone_requests, + Some(ledger_zone_config), request, + |_| true, ) .await?; - // Update the services in the ledger and write it back to both M.2s - ledger_zone_requests.requests.clear(); - ledger_zone_requests.requests.append(&mut zone_requests.requests); + // Update the zones in the ledger and write it back to both M.2s + *ledger_zone_config = new_config; ledger.commit().await?; Ok(()) } - // Ensures that only the following services are running. + // Ensures that only the following Omicron zones are running. // // Does not record any information such that these services are // re-instantiated on boot. - async fn ensure_all_services( + async fn ensure_all_omicron_zones( &self, + // The MutexGuard here attempts to ensure that the caller has the right + // lock held when calling this function. existing_zones: &mut MutexGuard<'_, BTreeMap>, - old_request: &AllZoneRequests, - request: ServiceEnsureBody, - ) -> Result { + old_config: Option<&OmicronZonesConfigLocal>, + new_request: OmicronZonesConfig, + filter: F, + ) -> Result + where + F: Fn(&OmicronZoneConfig) -> bool, + { let log = &self.inner.log; // Do some data-normalization to ensure we can compare the "requested // set" vs the "existing set" as HashSets. - let old_services_set: HashSet = HashSet::from_iter( - old_request.requests.iter().map(|r| r.zone.clone()), - ); - let requested_services_set = - HashSet::from_iter(request.services.into_iter()); + let old_zones_set: HashSet = old_config + .map(|old_config| { + HashSet::from_iter( + old_config.zones.iter().map(|z| z.zone.clone()), + ) + }) + .unwrap_or_else(HashSet::new); + let requested_zones_set = + HashSet::from_iter(new_request.zones.into_iter().filter(filter)); let zones_to_be_removed = - old_services_set.difference(&requested_services_set); - let zones_to_be_added = - requested_services_set.difference(&old_services_set); + old_zones_set.difference(&requested_zones_set); + let zones_to_be_added = requested_zones_set.difference(&old_zones_set); // Destroy zones that should not be running for zone in zones_to_be_removed { @@ -2251,13 +2785,13 @@ impl ServiceManager { } // Create zones that should be running - let mut zone_requests = AllZoneRequests::default(); let all_u2_roots = self .inner .storage .get_latest_resources() .await .all_u2_mountpoints(ZONE_DATASET); + let mut new_zones = Vec::new(); for zone in zones_to_be_added { // Check if we think the zone should already be running let name = zone.zone_name(); @@ -2289,6 +2823,7 @@ impl ServiceManager { } } } + // For each new zone request, we pick an arbitrary U.2 to store // the zone filesystem. Note: This isn't known to Nexus right now, // so it's a local-to-sled decision. @@ -2301,22 +2836,27 @@ impl ServiceManager { .ok_or_else(|| Error::U2NotFound)? .clone(); - zone_requests - .requests - .push(ZoneRequest { zone: zone.clone(), root }); + new_zones.push(OmicronZoneConfigLocal { zone: zone.clone(), root }); } - self.initialize_services_locked( - existing_zones, - &zone_requests.requests, - ) - .await?; - for old_zone in &old_request.requests { - if requested_services_set.contains(&old_zone.zone) { - zone_requests.requests.push(old_zone.clone()); + self.initialize_omicron_zones_locked(existing_zones, &new_zones) + .await?; + + if let Some(old_config) = old_config { + for old_zone in &old_config.zones { + if requested_zones_set.contains(&old_zone.zone) { + new_zones.push(old_zone.clone()); + } } } - Ok(zone_requests) + + Ok(OmicronZonesConfigLocal { + omicron_generation: new_request.generation, + ledger_generation: old_config + .map(|c| c.ledger_generation) + .unwrap_or_else(Generation::new), + zones: new_zones, + }) } pub async fn cockroachdb_initialize(&self) -> Result<(), Error> { @@ -2384,10 +2924,7 @@ impl ServiceManager { Ok(()) } - pub fn boottime_rewrite<'a>( - &self, - zones: impl Iterator, - ) { + pub fn boottime_rewrite(&self) { if self .inner .time_synced @@ -2398,33 +2935,13 @@ impl ServiceManager { return; } - let now = SystemTime::now() - .duration_since(UNIX_EPOCH) - .expect("SystemTime before UNIX EPOCH"); - - info!(self.inner.log, "Setting boot time to {:?}", now); - - let files: Vec = zones - .map(|z| z.root()) - .chain(iter::once(Utf8PathBuf::from("/"))) - .flat_map(|r| [r.join("var/adm/utmpx"), r.join("var/adm/wtmpx")]) - .collect(); - - for file in files { - let mut command = std::process::Command::new(PFEXEC); - let cmd = command.args(&[ - "/usr/platform/oxide/bin/tmpx", - &format!("{}", now.as_secs()), - &file.as_str(), - ]); - match execute(cmd) { - Err(e) => { - warn!(self.inner.log, "Updating {} failed: {}", &file, e); - } - Ok(_) => { - info!(self.inner.log, "Updated {}", &file); - } - } + // Call out to the 'tmpx' utility program which will rewrite the wtmpx + // and utmpx databases in every zone, including the global zone, to + // reflect the adjusted system boot time. + let mut command = std::process::Command::new(PFEXEC); + let cmd = command.args(&["/usr/platform/oxide/bin/tmpx", "-Z"]); + if let Err(e) = execute(cmd) { + warn!(self.inner.log, "Updating [wu]tmpx databases failed: {}", e); } } @@ -2433,7 +2950,7 @@ impl ServiceManager { if let Some(true) = self.inner.skip_timesync { info!(self.inner.log, "Configured to skip timesync checks"); - self.boottime_rewrite(existing_zones.values()); + self.boottime_rewrite(); return Ok(TimeSync { sync: true, ref_id: 0, @@ -2487,7 +3004,7 @@ impl ServiceManager { && correction.abs() <= 0.05; if sync { - self.boottime_rewrite(existing_zones.values()); + self.boottime_rewrite(); } Ok(TimeSync { @@ -2522,7 +3039,8 @@ impl ServiceManager { let mut data_links: Vec = vec![]; let services = match self.inner.sled_mode { - // A pure gimlet sled should not be trying to activate a switch zone. + // A pure gimlet sled should not be trying to activate a switch + // zone. SledMode::Gimlet => { return Err(Error::SledLocalZone(anyhow::anyhow!( "attempted to activate switch zone on non-scrimlet sled" @@ -2533,16 +3051,16 @@ impl ServiceManager { SledMode::Auto | SledMode::Scrimlet { asic: DendriteAsic::TofinoAsic } => { vec![ - ServiceType::Dendrite { asic: DendriteAsic::TofinoAsic }, - ServiceType::ManagementGatewayService, - ServiceType::Tfport { + SwitchService::Dendrite { asic: DendriteAsic::TofinoAsic }, + SwitchService::ManagementGatewayService, + SwitchService::Tfport { pkt_source: "tfpkt0".to_string(), asic: DendriteAsic::TofinoAsic, }, - ServiceType::Uplink, - ServiceType::Wicketd { baseboard }, - ServiceType::Mgd, - ServiceType::MgDdm { mode: "transit".to_string() }, + SwitchService::Uplink, + SwitchService::Wicketd { baseboard }, + SwitchService::Mgd, + SwitchService::MgDdm { mode: "transit".to_string() }, ] } @@ -2551,17 +3069,17 @@ impl ServiceManager { } => { data_links = vec!["vioif0".to_owned()]; vec![ - ServiceType::Dendrite { asic }, - ServiceType::ManagementGatewayService, - ServiceType::Uplink, - ServiceType::Wicketd { baseboard }, - ServiceType::Mgd, - ServiceType::MgDdm { mode: "transit".to_string() }, - ServiceType::Tfport { + SwitchService::Dendrite { asic }, + SwitchService::ManagementGatewayService, + SwitchService::Uplink, + SwitchService::Wicketd { baseboard }, + SwitchService::Mgd, + SwitchService::MgDdm { mode: "transit".to_string() }, + SwitchService::Tfport { pkt_source: "vioif0".to_string(), asic, }, - ServiceType::SpSim, + SwitchService::SpSim, ] } @@ -2581,17 +3099,17 @@ impl ServiceManager { data_links = Dladm::get_simulated_tfports()?; } vec![ - ServiceType::Dendrite { asic }, - ServiceType::ManagementGatewayService, - ServiceType::Uplink, - ServiceType::Wicketd { baseboard }, - ServiceType::Mgd, - ServiceType::MgDdm { mode: "transit".to_string() }, - ServiceType::Tfport { + SwitchService::Dendrite { asic }, + SwitchService::ManagementGatewayService, + SwitchService::Uplink, + SwitchService::Wicketd { baseboard }, + SwitchService::Mgd, + SwitchService::MgDdm { mode: "transit".to_string() }, + SwitchService::Tfport { pkt_source: "tfpkt0".to_string(), asic, }, - ServiceType::SpSim, + SwitchService::SpSim, ] } }; @@ -2600,19 +3118,10 @@ impl ServiceManager { if let Some((ip, _)) = underlay_info { vec![ip] } else { vec![] }; addresses.push(Ipv6Addr::LOCALHOST); - let request = ServiceZoneRequest { - id: Uuid::new_v4(), - zone_type: ZoneType::Switch, - addresses, - dataset: None, - services: services - .into_iter() - .map(|s| ServiceZoneService { id: Uuid::new_v4(), details: s }) - .collect(), - }; + let request = + SwitchZoneConfig { id: Uuid::new_v4(), addresses, services }; self.ensure_zone( - ZoneType::Switch, // request= Some(request), // filesystems= @@ -2680,7 +3189,7 @@ impl ServiceManager { } }; - let smfh = SmfHelper::new(&zone, &ServiceType::Uplink); + let smfh = SmfHelper::new(&zone, &SwitchService::Uplink); // We want to delete all the properties in the `uplinks` group, but we // don't know their names, so instead we'll delete and recreate the @@ -2705,7 +3214,6 @@ impl ServiceManager { /// Ensures that no switch zone is active. pub async fn deactivate_switch(&self) -> Result<(), Error> { self.ensure_zone( - ZoneType::Switch, // request= None, // filesystems= @@ -2722,12 +3230,11 @@ impl ServiceManager { fn start_zone( self, zone: &mut SledLocalZone, - request: ServiceZoneRequest, + request: SwitchZoneConfig, filesystems: Vec, data_links: Vec, ) { let (exit_tx, exit_rx) = oneshot::channel(); - let zone_type = request.zone_type.clone(); *zone = SledLocalZone::Initializing { request, filesystems, @@ -2735,7 +3242,7 @@ impl ServiceManager { worker: Some(Task { exit_tx, initializer: tokio::task::spawn(async move { - self.initialize_zone_loop(zone_type, exit_rx).await + self.initialize_zone_loop(exit_rx).await }), }), }; @@ -2744,21 +3251,14 @@ impl ServiceManager { // Moves the current state to align with the "request". async fn ensure_zone( &self, - zone_type: ZoneType, - request: Option, + request: Option, filesystems: Vec, data_links: Vec, ) -> Result<(), Error> { let log = &self.inner.log; - let mut sled_zone; - match zone_type { - ZoneType::Switch => { - sled_zone = self.inner.switch_zone.lock().await; - } - _ => panic!("Unhandled zone type"), - } - let zone_typestr = zone_type.to_string(); + let mut sled_zone = self.inner.switch_zone.lock().await; + let zone_typestr = "switch"; match (&mut *sled_zone, request) { (SledLocalZone::Disabled, Some(request)) => { @@ -2827,10 +3327,10 @@ impl ServiceManager { } for service in &request.services { - let smfh = SmfHelper::new(&zone, &service.details); + let smfh = SmfHelper::new(&zone, service); - match &service.details { - ServiceType::ManagementGatewayService => { + match service { + SwitchService::ManagementGatewayService => { // Remove any existing `config/address` values // without deleting the property itself. smfh.delpropvalue("config/address", "*")?; @@ -2848,8 +3348,9 @@ impl ServiceManager { &format!("[{address}]:{MGS_PORT}"), )?; - // It should be impossible for the `sled_info` not to be set here, - // as the underlay is set at the same time. + // It should be impossible for the `sled_info` not + // to be set here, as the underlay is set at the + // same time. if let Some(info) = self.inner.sled_info.get() { smfh.setprop("config/rack_id", info.rack_id)?; } else { @@ -2864,7 +3365,7 @@ impl ServiceManager { smfh.refresh()?; } - ServiceType::Dendrite { .. } => { + SwitchService::Dendrite { .. } => { info!(self.inner.log, "configuring dendrite zone"); if let Some(info) = self.inner.sled_info.get() { smfh.setprop("config/rack_id", info.rack_id)?; @@ -2900,7 +3401,7 @@ impl ServiceManager { } smfh.refresh()?; } - ServiceType::Wicketd { .. } => { + SwitchService::Wicketd { .. } => { if let Some(&address) = first_address { let rack_subnet = Ipv6Subnet::::new(address); @@ -2923,15 +3424,16 @@ impl ServiceManager { ); } } - ServiceType::Tfport { .. } => { + SwitchService::Tfport { .. } => { // Since tfport and dpd communicate using localhost, - // the tfport service shouldn't need to be restarted. + // the tfport service shouldn't need to be + // restarted. } - ServiceType::Uplink { .. } => { + SwitchService::Uplink { .. } => { // Only configured in // `ensure_switch_zone_uplinks_configured` } - ServiceType::MgDdm { mode } => { + SwitchService::MgDdm { mode } => { smfh.delpropvalue("config/mode", "*")?; smfh.addpropvalue("config/mode", &mode)?; smfh.refresh()?; @@ -2980,52 +3482,28 @@ impl ServiceManager { // switch zone were on a U.2 device we would not be able to run RSS, as // we could not create the U.2 disks due to lack of encryption. To break // the cycle we put the switch zone root fs on the ramdisk. - let root = if request.zone_type == ZoneType::Switch { - Utf8PathBuf::from(ZONE_ZFS_RAMDISK_DATASET_MOUNTPOINT) - } else { - let all_u2_roots = self - .inner - .storage - .get_latest_resources() - .await - .all_u2_mountpoints(ZONE_DATASET); - let mut rng = rand::rngs::StdRng::from_entropy(); - all_u2_roots - .choose(&mut rng) - .ok_or_else(|| Error::U2NotFound)? - .clone() - }; - - let request = ZoneRequest { zone: request.clone(), root }; + let root = Utf8PathBuf::from(ZONE_ZFS_RAMDISK_DATASET_MOUNTPOINT); + let zone_request = + SwitchZoneConfigLocal { root, zone: request.clone() }; + let zone_args = ZoneArgs::Switch(&zone_request); let zone = - self.initialize_zone(&request, filesystems, data_links).await?; - *sled_zone = - SledLocalZone::Running { request: request.zone.clone(), zone }; + self.initialize_zone(zone_args, filesystems, data_links).await?; + *sled_zone = SledLocalZone::Running { request: request.clone(), zone }; Ok(()) } // Body of a tokio task responsible for running until the switch zone is // inititalized, or it has been told to stop. - async fn initialize_zone_loop( - &self, - zone_type: ZoneType, - mut exit_rx: oneshot::Receiver<()>, - ) { + async fn initialize_zone_loop(&self, mut exit_rx: oneshot::Receiver<()>) { loop { { - let mut sled_zone; - match zone_type { - ZoneType::Switch => { - sled_zone = self.inner.switch_zone.lock().await; - } - _ => panic!("Unhandled zone type"), - } + let mut sled_zone = self.inner.switch_zone.lock().await; match self.try_initialize_sled_local_zone(&mut sled_zone).await { Ok(()) => return, Err(e) => warn!( self.inner.log, - "Failed to initialize {zone_type}: {e}" + "Failed to initialize switch zone: {e}" ), } } @@ -3045,7 +3523,6 @@ impl ServiceManager { #[cfg(test)] mod test { use super::*; - use crate::params::{ServiceZoneService, ZoneType}; use illumos_utils::zpool::ZpoolName; use illumos_utils::{ dladm::{ @@ -3149,27 +3626,96 @@ mod test { ] } + // Configures our mock implementations to work for cases where we configure + // multiple zones in one `ensure_all_omicron_zones_persistent()` call. + // + // This is looser than the expectations created by ensure_new_service() + // because these functions may return any number of times. + fn expect_new_services() -> Vec> { + illumos_utils::USE_MOCKS.store(true, Ordering::SeqCst); + // Create a VNIC + let create_vnic_ctx = MockDladm::create_vnic_context(); + create_vnic_ctx.expect().returning( + |physical_link: &Etherstub, _, _, _, _| { + assert_eq!(&physical_link.0, &UNDERLAY_ETHERSTUB_NAME); + Ok(()) + }, + ); + + // Install the Omicron Zone + let install_ctx = MockZones::install_omicron_zone_context(); + install_ctx.expect().returning(|_, _, name, _, _, _, _, _, _| { + assert!(name.starts_with(EXPECTED_ZONE_NAME_PREFIX)); + Ok(()) + }); + + // Boot the zone. + let boot_ctx = MockZones::boot_context(); + boot_ctx.expect().returning(|name| { + assert!(name.starts_with(EXPECTED_ZONE_NAME_PREFIX)); + Ok(()) + }); + + // After calling `MockZones::boot`, `RunningZone::boot` will then look + // up the zone ID for the booted zone. This goes through + // `MockZone::id` to find the zone and get its ID. + let id_ctx = MockZones::id_context(); + let id = Arc::new(std::sync::Mutex::new(1)); + id_ctx.expect().returning(move |name| { + assert!(name.starts_with(EXPECTED_ZONE_NAME_PREFIX)); + let mut value = id.lock().unwrap(); + let rv = *value; + *value = rv + 1; + Ok(Some(rv)) + }); + + // Ensure the address exists + let ensure_address_ctx = MockZones::ensure_address_context(); + ensure_address_ctx.expect().returning(|_, _, _| { + Ok(ipnetwork::IpNetwork::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 64) + .unwrap()) + }); + + // Wait for the networking service. + let wait_ctx = svc::wait_for_service_context(); + wait_ctx.expect().returning(|_, _, _| Ok(())); + + // Import the manifest, enable the service + let execute_ctx = illumos_utils::execute_helper_context(); + execute_ctx.expect().times(..).returning(|_| { + Ok(std::process::Output { + status: std::process::ExitStatus::from_raw(0), + stdout: vec![], + stderr: vec![], + }) + }); + + vec![ + Box::new(create_vnic_ctx), + Box::new(install_ctx), + Box::new(boot_ctx), + Box::new(id_ctx), + Box::new(ensure_address_ctx), + Box::new(wait_ctx), + Box::new(execute_ctx), + ] + } + // Prepare to call "ensure" for a new service, then actually call "ensure". - async fn ensure_new_service(mgr: &ServiceManager, id: Uuid) { + async fn ensure_new_service( + mgr: &ServiceManager, + id: Uuid, + generation: Generation, + ) { let _expectations = expect_new_service(); - - mgr.ensure_all_services_persistent(ServiceEnsureBody { - services: vec![ServiceZoneRequest { + let address = + SocketAddrV6::new(Ipv6Addr::LOCALHOST, OXIMETER_PORT, 0, 0); + mgr.ensure_all_omicron_zones_persistent(OmicronZonesConfig { + generation, + zones: vec![OmicronZoneConfig { id, - zone_type: ZoneType::Oximeter, - addresses: vec![Ipv6Addr::LOCALHOST], - dataset: None, - services: vec![ServiceZoneService { - id, - details: ServiceType::Oximeter { - address: SocketAddrV6::new( - Ipv6Addr::LOCALHOST, - OXIMETER_PORT, - 0, - 0, - ), - }, - }], + underlay_address: Ipv6Addr::LOCALHOST, + zone_type: OmicronZoneType::Oximeter { address }, }], }) .await @@ -3178,24 +3724,19 @@ mod test { // Prepare to call "ensure" for a service which already exists. We should // return the service without actually installing a new zone. - async fn ensure_existing_service(mgr: &ServiceManager, id: Uuid) { - mgr.ensure_all_services_persistent(ServiceEnsureBody { - services: vec![ServiceZoneRequest { + async fn ensure_existing_service( + mgr: &ServiceManager, + id: Uuid, + generation: Generation, + ) { + let address = + SocketAddrV6::new(Ipv6Addr::LOCALHOST, OXIMETER_PORT, 0, 0); + mgr.ensure_all_omicron_zones_persistent(OmicronZonesConfig { + generation, + zones: vec![OmicronZoneConfig { id, - zone_type: ZoneType::Oximeter, - addresses: vec![Ipv6Addr::LOCALHOST], - dataset: None, - services: vec![ServiceZoneService { - id, - details: ServiceType::Oximeter { - address: SocketAddrV6::new( - Ipv6Addr::LOCALHOST, - OXIMETER_PORT, - 0, - 0, - ), - }, - }], + underlay_address: Ipv6Addr::LOCALHOST, + zone_type: OmicronZoneType::Oximeter { address }, }], }) .await @@ -3271,179 +3812,165 @@ mod test { handle } + #[derive(Clone)] + struct LedgerTestHelper<'a> { + log: slog::Logger, + ddmd_client: DdmAdminClient, + storage_handle: StorageHandle, + zone_bundler: ZoneBundler, + test_config: &'a TestConfig, + } + + impl<'a> LedgerTestHelper<'a> { + async fn new( + log: slog::Logger, + test_config: &'a TestConfig, + ) -> LedgerTestHelper { + let ddmd_client = DdmAdminClient::localhost(&log).unwrap(); + let storage_handle = setup_storage().await; + let zone_bundler = ZoneBundler::new( + log.clone(), + storage_handle.clone(), + Default::default(), + ); + + LedgerTestHelper { + log, + ddmd_client, + storage_handle, + zone_bundler, + test_config, + } + } + + fn new_service_manager(self) -> ServiceManager { + let log = &self.log; + let mgr = ServiceManager::new( + log, + self.ddmd_client, + make_bootstrap_networking_config(), + SledMode::Auto, + Some(true), + SidecarRevision::Physical("rev-test".to_string()), + vec![], + self.storage_handle, + self.zone_bundler, + ); + self.test_config.override_paths(&mgr); + mgr + } + + fn sled_agent_started( + log: &slog::Logger, + test_config: &TestConfig, + mgr: &ServiceManager, + ) { + let port_manager = PortManager::new( + log.new(o!("component" => "PortManager")), + Ipv6Addr::new( + 0xfd00, 0x1de, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + ), + ); + + mgr.sled_agent_started( + test_config.make_config(), + port_manager, + Ipv6Addr::LOCALHOST, + Uuid::new_v4(), + None, + ) + .unwrap(); + } + } + #[tokio::test] - #[serial_test::serial] async fn test_ensure_service() { let logctx = omicron_test_utils::dev::test_setup_log("test_ensure_service"); - let log = logctx.log.clone(); let test_config = TestConfig::new().await; + let helper = + LedgerTestHelper::new(logctx.log.clone(), &test_config).await; + let mgr = helper.new_service_manager(); + LedgerTestHelper::sled_agent_started(&logctx.log, &test_config, &mgr); + + let v1 = Generation::new(); + let found = + mgr.omicron_zones_list().await.expect("failed to list zones"); + assert_eq!(found.generation, v1); + assert!(found.zones.is_empty()); + + let v2 = v1.next(); + let id = Uuid::new_v4(); + ensure_new_service(&mgr, id, v2).await; - let storage_handle = setup_storage().await; - let zone_bundler = ZoneBundler::new( - log.clone(), - storage_handle.clone(), - Default::default(), - ); - let mgr = ServiceManager::new( - &log, - DdmAdminClient::localhost(&log).unwrap(), - make_bootstrap_networking_config(), - SledMode::Auto, - Some(true), - SidecarRevision::Physical("rev-test".to_string()), - vec![], - storage_handle, - zone_bundler, - ); - test_config.override_paths(&mgr); + let found = + mgr.omicron_zones_list().await.expect("failed to list zones"); + assert_eq!(found.generation, v2); + assert_eq!(found.zones.len(), 1); + assert_eq!(found.zones[0].id, id); - let port_manager = PortManager::new( - logctx.log.new(o!("component" => "PortManager")), - Ipv6Addr::new(0xfd00, 0x1de, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01), - ); - mgr.sled_agent_started( - test_config.make_config(), - port_manager, - Ipv6Addr::LOCALHOST, - Uuid::new_v4(), - None, - ) - .unwrap(); - - let id = Uuid::new_v4(); - ensure_new_service(&mgr, id).await; drop_service_manager(mgr); logctx.cleanup_successful(); } #[tokio::test] - #[serial_test::serial] async fn test_ensure_service_which_already_exists() { let logctx = omicron_test_utils::dev::test_setup_log( "test_ensure_service_which_already_exists", ); - let log = logctx.log.clone(); let test_config = TestConfig::new().await; + let helper = + LedgerTestHelper::new(logctx.log.clone(), &test_config).await; + let mgr = helper.new_service_manager(); + LedgerTestHelper::sled_agent_started(&logctx.log, &test_config, &mgr); - let storage_handle = setup_storage().await; - let zone_bundler = ZoneBundler::new( - log.clone(), - storage_handle.clone(), - Default::default(), - ); - let mgr = ServiceManager::new( - &log, - DdmAdminClient::localhost(&log).unwrap(), - make_bootstrap_networking_config(), - SledMode::Auto, - Some(true), - SidecarRevision::Physical("rev-test".to_string()), - vec![], - storage_handle, - zone_bundler, - ); - test_config.override_paths(&mgr); - - let port_manager = PortManager::new( - logctx.log.new(o!("component" => "PortManager")), - Ipv6Addr::new(0xfd00, 0x1de, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01), - ); - mgr.sled_agent_started( - test_config.make_config(), - port_manager, - Ipv6Addr::LOCALHOST, - Uuid::new_v4(), - None, - ) - .unwrap(); - + let v2 = Generation::new().next(); let id = Uuid::new_v4(); - ensure_new_service(&mgr, id).await; - ensure_existing_service(&mgr, id).await; + ensure_new_service(&mgr, id, v2).await; + let v3 = v2.next(); + ensure_existing_service(&mgr, id, v3).await; + let found = + mgr.omicron_zones_list().await.expect("failed to list zones"); + assert_eq!(found.generation, v3); + assert_eq!(found.zones.len(), 1); + assert_eq!(found.zones[0].id, id); + drop_service_manager(mgr); logctx.cleanup_successful(); } #[tokio::test] - #[serial_test::serial] async fn test_services_are_recreated_on_reboot() { let logctx = omicron_test_utils::dev::test_setup_log( "test_services_are_recreated_on_reboot", ); - let log = logctx.log.clone(); let test_config = TestConfig::new().await; - let ddmd_client = DdmAdminClient::localhost(&log).unwrap(); - let bootstrap_networking = make_bootstrap_networking_config(); - - // First, spin up a ServiceManager, create a new service, and tear it - // down. - let storage_handle = setup_storage().await; - let zone_bundler = ZoneBundler::new( - log.clone(), - storage_handle.clone(), - Default::default(), - ); - let mgr = ServiceManager::new( - &log, - ddmd_client.clone(), - bootstrap_networking.clone(), - SledMode::Auto, - Some(true), - SidecarRevision::Physical("rev-test".to_string()), - vec![], - storage_handle.clone(), - zone_bundler.clone(), - ); - test_config.override_paths(&mgr); + let helper = + LedgerTestHelper::new(logctx.log.clone(), &test_config).await; - let port_manager = PortManager::new( - log.new(o!("component" => "PortManager")), - Ipv6Addr::new(0xfd00, 0x1de, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01), - ); - mgr.sled_agent_started( - test_config.make_config(), - port_manager, - Ipv6Addr::LOCALHOST, - Uuid::new_v4(), - None, - ) - .unwrap(); + // First, spin up a ServiceManager, create a new zone, and then tear + // down the ServiceManager. + let mgr = helper.clone().new_service_manager(); + LedgerTestHelper::sled_agent_started(&logctx.log, &test_config, &mgr); + let v2 = Generation::new().next(); let id = Uuid::new_v4(); - ensure_new_service(&mgr, id).await; + ensure_new_service(&mgr, id, v2).await; drop_service_manager(mgr); // Before we re-create the service manager - notably, using the same // config file! - expect that a service gets initialized. let _expectations = expect_new_service(); - let mgr = ServiceManager::new( - &log, - ddmd_client, - bootstrap_networking, - SledMode::Auto, - Some(true), - SidecarRevision::Physical("rev-test".to_string()), - vec![], - storage_handle.clone(), - zone_bundler.clone(), - ); - test_config.override_paths(&mgr); + let mgr = helper.new_service_manager(); + LedgerTestHelper::sled_agent_started(&logctx.log, &test_config, &mgr); - let port_manager = PortManager::new( - log.new(o!("component" => "PortManager")), - Ipv6Addr::new(0xfd00, 0x1de, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01), - ); - mgr.sled_agent_started( - test_config.make_config(), - port_manager, - Ipv6Addr::LOCALHOST, - Uuid::new_v4(), - None, - ) - .unwrap(); + let found = + mgr.omicron_zones_list().await.expect("failed to list zones"); + assert_eq!(found.generation, v2); + assert_eq!(found.zones.len(), 1); + assert_eq!(found.zones[0].id, id); drop_service_manager(mgr); @@ -3451,90 +3978,325 @@ mod test { } #[tokio::test] - #[serial_test::serial] async fn test_services_do_not_persist_without_config() { let logctx = omicron_test_utils::dev::test_setup_log( "test_services_do_not_persist_without_config", ); - let log = logctx.log.clone(); let test_config = TestConfig::new().await; - let ddmd_client = DdmAdminClient::localhost(&log).unwrap(); - let bootstrap_networking = make_bootstrap_networking_config(); - - // First, spin up a ServiceManager, create a new service, and tear it - // down. - let storage_handle = setup_storage().await; - let zone_bundler = ZoneBundler::new( - log.clone(), - storage_handle.clone(), - Default::default(), - ); - let mgr = ServiceManager::new( - &log, - ddmd_client.clone(), - bootstrap_networking.clone(), - SledMode::Auto, - Some(true), - SidecarRevision::Physical("rev-test".to_string()), - vec![], - storage_handle.clone(), - zone_bundler.clone(), - ); - test_config.override_paths(&mgr); + let helper = + LedgerTestHelper::new(logctx.log.clone(), &test_config).await; - let port_manager = PortManager::new( - log.new(o!("component" => "PortManager")), - Ipv6Addr::new(0xfd00, 0x1de, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01), - ); - mgr.sled_agent_started( - test_config.make_config(), - port_manager, - Ipv6Addr::LOCALHOST, - Uuid::new_v4(), - None, - ) - .unwrap(); + // First, spin up a ServiceManager, create a new zone, and then tear + // down the ServiceManager. + let mgr = helper.clone().new_service_manager(); + LedgerTestHelper::sled_agent_started(&logctx.log, &test_config, &mgr); + let v1 = Generation::new(); + let v2 = v1.next(); let id = Uuid::new_v4(); - ensure_new_service(&mgr, id).await; + ensure_new_service(&mgr, id, v2).await; drop_service_manager(mgr); - // Next, delete the ledger. This means the service we just created will - // not be remembered on the next initialization. + // Next, delete the ledger. This means the zone we just created will not + // be remembered on the next initialization. std::fs::remove_file( - test_config.config_dir.path().join(SERVICES_LEDGER_FILENAME), + test_config.config_dir.path().join(ZONES_LEDGER_FILENAME), ) .unwrap(); // Observe that the old service is not re-initialized. - let mgr = ServiceManager::new( - &log, - ddmd_client, - bootstrap_networking, - SledMode::Auto, - Some(true), - SidecarRevision::Physical("rev-test".to_string()), - vec![], - storage_handle, - zone_bundler.clone(), - ); - test_config.override_paths(&mgr); + let mgr = helper.new_service_manager(); + LedgerTestHelper::sled_agent_started(&logctx.log, &test_config, &mgr); + + let found = + mgr.omicron_zones_list().await.expect("failed to list zones"); + assert_eq!(found.generation, v1); + assert!(found.zones.is_empty()); + + drop_service_manager(mgr); + + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_bad_generations() { + // Start like the normal tests. + let logctx = + omicron_test_utils::dev::test_setup_log("test_bad_generations"); + let test_config = TestConfig::new().await; + let helper = + LedgerTestHelper::new(logctx.log.clone(), &test_config).await; + let mgr = helper.new_service_manager(); + LedgerTestHelper::sled_agent_started(&logctx.log, &test_config, &mgr); + + // Like the normal tests, set up a generation with one zone in it. + let v1 = Generation::new(); + let v2 = v1.next(); + let id1 = Uuid::new_v4(); + + let _expectations = expect_new_services(); + let address = + SocketAddrV6::new(Ipv6Addr::LOCALHOST, OXIMETER_PORT, 0, 0); + let mut zones = vec![OmicronZoneConfig { + id: id1, + underlay_address: Ipv6Addr::LOCALHOST, + zone_type: OmicronZoneType::Oximeter { address }, + }]; + mgr.ensure_all_omicron_zones_persistent(OmicronZonesConfig { + generation: v2, + zones: zones.clone(), + }) + .await + .unwrap(); + + let found = + mgr.omicron_zones_list().await.expect("failed to list zones"); + assert_eq!(found.generation, v2); + assert_eq!(found.zones.len(), 1); + assert_eq!(found.zones[0].id, id1); + + // Make a new list of zones that we're going to try with a bunch of + // different generation numbers. + let id2 = Uuid::new_v4(); + zones.push(OmicronZoneConfig { + id: id2, + underlay_address: Ipv6Addr::LOCALHOST, + zone_type: OmicronZoneType::Oximeter { address }, + }); + + // Now try to apply that list with an older generation number. This + // shouldn't work and the reported state should be unchanged. + let error = mgr + .ensure_all_omicron_zones_persistent(OmicronZonesConfig { + generation: v1, + zones: zones.clone(), + }) + .await + .expect_err("unexpectedly went backwards in zones generation"); + assert!(matches!( + error, + Error::RequestedConfigOutdated { requested, current } + if requested == v1 && current == v2 + )); + let found2 = + mgr.omicron_zones_list().await.expect("failed to list zones"); + assert_eq!(found, found2); + + // Now try to apply that list with the same generation number that we + // used before. This shouldn't work either. + let error = mgr + .ensure_all_omicron_zones_persistent(OmicronZonesConfig { + generation: v2, + zones: zones.clone(), + }) + .await + .expect_err("unexpectedly changed a single zone generation"); + assert!(matches!( + error, + Error::RequestedConfigConflicts(vr) if vr == v2 + )); + let found3 = + mgr.omicron_zones_list().await.expect("failed to list zones"); + assert_eq!(found, found3); + + // But we should be able to apply this new list of zones as long as we + // advance the generation number. + let v3 = v2.next(); + mgr.ensure_all_omicron_zones_persistent(OmicronZonesConfig { + generation: v3, + zones: zones.clone(), + }) + .await + .expect("failed to remove all zones in a new generation"); + let found4 = + mgr.omicron_zones_list().await.expect("failed to list zones"); + assert_eq!(found4.generation, v3); + let mut our_zones = zones; + our_zones.sort_by(|a, b| a.id.cmp(&b.id)); + let mut found_zones = found4.zones; + found_zones.sort_by(|a, b| a.id.cmp(&b.id)); + assert_eq!(our_zones, found_zones); + + drop_service_manager(mgr); - let port_manager = PortManager::new( - log.new(o!("component" => "PortManager")), - Ipv6Addr::new(0xfd00, 0x1de, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01), + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_old_ledger_migration() { + let logctx = omicron_test_utils::dev::test_setup_log( + "test_old_ledger_migration", ); - mgr.sled_agent_started( - test_config.make_config(), - port_manager, - Ipv6Addr::LOCALHOST, - Uuid::new_v4(), - None, + let test_config = TestConfig::new().await; + + // Before we start the service manager, stuff one of our old-format + // service ledgers into place. + let contents = + include_str!("../tests/old-service-ledgers/rack2-sled10.json"); + std::fs::write( + test_config.config_dir.path().join(SERVICES_LEDGER_FILENAME), + contents, + ) + .expect("failed to copy example old-format services ledger into place"); + + // Now start the service manager. + let helper = + LedgerTestHelper::new(logctx.log.clone(), &test_config).await; + let mgr = helper.clone().new_service_manager(); + LedgerTestHelper::sled_agent_started(&logctx.log, &test_config, &mgr); + + // Trigger the migration code. (Yes, it's hokey that we create this + // fake argument.) + let unused = Mutex::new(BTreeMap::new()); + let migrated_ledger = mgr + .load_ledgered_zones(&unused.lock().await) + .await + .expect("failed to load ledgered zones") + .unwrap(); + + // As a quick check, the migrated ledger should have some zones. + let migrated_config = migrated_ledger.data(); + assert!(!migrated_config.zones.is_empty()); + + // The ServiceManager should now report the migrated zones, meaning that + // they've been copied into the new-format ledger. + let found = + mgr.omicron_zones_list().await.expect("failed to list zones"); + assert_eq!(found, migrated_config.clone().to_omicron_zones_config()); + // They should both match the expected converted output. + let expected: OmicronZonesConfigLocal = serde_json::from_str( + include_str!("../tests/output/new-zones-ledgers/rack2-sled10.json"), ) .unwrap(); + let expected_config = expected.to_omicron_zones_config(); + assert_eq!(found, expected_config); + // Just to be sure, shut down the manager and create a new one without + // triggering migration again. It should also report the same zones. drop_service_manager(mgr); + let mgr = helper.new_service_manager(); + LedgerTestHelper::sled_agent_started(&logctx.log, &test_config, &mgr); + + let found = + mgr.omicron_zones_list().await.expect("failed to list zones"); + assert_eq!(found, expected_config); + + drop_service_manager(mgr); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_old_ledger_migration_continue() { + // This test is just like "test_old_ledger_migration", except that we + // deploy a new zone after migration and before shutting down the + // service manager. This tests that new changes modify the new, + // migrated config. + let logctx = omicron_test_utils::dev::test_setup_log( + "test_old_ledger_migration_continue", + ); + let test_config = TestConfig::new().await; + + // Before we start the service manager, stuff one of our old-format + // service ledgers into place. + let contents = + include_str!("../tests/old-service-ledgers/rack2-sled10.json"); + std::fs::write( + test_config.config_dir.path().join(SERVICES_LEDGER_FILENAME), + contents, + ) + .expect("failed to copy example old-format services ledger into place"); + + // Now start the service manager. + let helper = + LedgerTestHelper::new(logctx.log.clone(), &test_config).await; + let mgr = helper.clone().new_service_manager(); + LedgerTestHelper::sled_agent_started(&logctx.log, &test_config, &mgr); + + // Trigger the migration code. + let unused = Mutex::new(BTreeMap::new()); + let migrated_ledger = mgr + .load_ledgered_zones(&unused.lock().await) + .await + .expect("failed to load ledgered zones") + .unwrap(); + + // The other test verified that migration has happened normally so let's + // assume it has. Now provision a new zone. + let vv = migrated_ledger.data().omicron_generation.next(); + let id = Uuid::new_v4(); + + let _expectations = expect_new_services(); + let address = + SocketAddrV6::new(Ipv6Addr::LOCALHOST, OXIMETER_PORT, 0, 0); + let mut zones = + migrated_ledger.data().clone().to_omicron_zones_config().zones; + zones.push(OmicronZoneConfig { + id, + underlay_address: Ipv6Addr::LOCALHOST, + zone_type: OmicronZoneType::Oximeter { address }, + }); + mgr.ensure_all_omicron_zones_persistent(OmicronZonesConfig { + generation: vv, + zones, + }) + .await + .expect("failed to add new zone after migration"); + let found = + mgr.omicron_zones_list().await.expect("failed to list zones"); + assert_eq!(found.generation, vv); + assert_eq!(found.zones.len(), migrated_ledger.data().zones.len() + 1); + + // Just to be sure, shut down the manager and create a new one without + // triggering migration again. It should now report one more zone than + // was migrated earlier. + drop_service_manager(mgr); + + let mgr = helper.new_service_manager(); + LedgerTestHelper::sled_agent_started(&logctx.log, &test_config, &mgr); + let found = + mgr.omicron_zones_list().await.expect("failed to list zones"); + assert_eq!(found.generation, vv); + assert_eq!(found.zones.len(), migrated_ledger.data().zones.len() + 1); + + drop_service_manager(mgr); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_old_ledger_migration_bad() { + let logctx = omicron_test_utils::dev::test_setup_log( + "test_old_ledger_migration_bad", + ); + let test_config = TestConfig::new().await; + let helper = + LedgerTestHelper::new(logctx.log.clone(), &test_config).await; + + // Before we start things, stuff a broken ledger into place. For this + // to test what we want, it needs to be a valid ledger that we simply + // failed to convert. + std::fs::write( + test_config.config_dir.path().join(SERVICES_LEDGER_FILENAME), + "{", + ) + .expect("failed to copy example old-format services ledger into place"); + + // Start the service manager. + let mgr = helper.new_service_manager(); + LedgerTestHelper::sled_agent_started(&logctx.log, &test_config, &mgr); + + // Trigger the migration code. + let unused = Mutex::new(BTreeMap::new()); + let error = mgr + .load_ledgered_zones(&unused.lock().await) + .await + .expect_err("succeeded in loading bogus ledgered zones"); + assert_eq!( + "Error migrating old-format services ledger: failed to read or \ + parse old-format ledger, but one exists", + format!("{:#}", error) + ); + logctx.cleanup_successful(); } @@ -3552,19 +4314,19 @@ mod test { } #[test] - fn test_all_zone_requests_schema() { - let schema = schemars::schema_for!(AllZoneRequests); + fn test_zone_bundle_metadata_schema() { + let schema = schemars::schema_for!(ZoneBundleMetadata); expectorate::assert_contents( - "../schema/all-zone-requests.json", + "../schema/zone-bundle-metadata.json", &serde_json::to_string_pretty(&schema).unwrap(), ); } #[test] - fn test_zone_bundle_metadata_schema() { - let schema = schemars::schema_for!(ZoneBundleMetadata); + fn test_all_zones_requests_schema() { + let schema = schemars::schema_for!(OmicronZonesConfigLocal); expectorate::assert_contents( - "../schema/zone-bundle-metadata.json", + "../schema/all-zones-requests.json", &serde_json::to_string_pretty(&schema).unwrap(), ); } diff --git a/sled-agent/src/services_migration.rs b/sled-agent/src/services_migration.rs new file mode 100644 index 0000000000..bedd4759c8 --- /dev/null +++ b/sled-agent/src/services_migration.rs @@ -0,0 +1,624 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Sled Agents are responsible for running zones that make up much of the +//! control plane (Omicron). Configuration for these zones is owned by the +//! control plane, but that configuration must be persisted locally in order to +//! support cold boot of the control plane. (The control plane can't very well +//! tell sled agents what to run if it's not online yet!) +//! +//! Historically, these configurations were represented as an +//! `AllZonesRequests`, which contains a bunch of `ZoneRequest`s, each +//! containing a `ServiceZoneRequest`. This last structure was quite general +//! and made it possible to express a world of configurations that are not +//! actually valid. To avoid spreading extra complexity, these structures were +//! replaced with `OmicronZonesConfigLocal` and `OmicronZonesConfig`, +//! respectively. Upgrading production systems across this change requires +//! migrating any locally-stored configuration in the old format into the new +//! one. +//! +//! This file defines these old-format types and functions to convert them to +//! the new types, solely to perform that migration. We can remove all this +//! when we're satified that all deployed systems that we care about have moved +//! past this change. + +use crate::params::{ + OmicronZoneConfig, OmicronZoneDataset, OmicronZoneType, ZoneType, + OMICRON_ZONES_CONFIG_INITIAL_GENERATION, +}; +use crate::services::{OmicronZoneConfigLocal, OmicronZonesConfigLocal}; +use anyhow::{anyhow, ensure, Context}; +use camino::Utf8PathBuf; +use omicron_common::api::external::Generation; +use omicron_common::api::internal::shared::{ + NetworkInterface, SourceNatConfig, +}; +use omicron_common::ledger::Ledgerable; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +use sled_storage::dataset::{DatasetKind, DatasetName}; +use std::fmt::Debug; +use std::net::{IpAddr, Ipv6Addr, SocketAddr, SocketAddrV6}; +use uuid::Uuid; + +/// The filename of the ledger containing this old-format configuration. +pub const SERVICES_LEDGER_FILENAME: &str = "services.json"; + +/// A wrapper around `ZoneRequest` that allows it to be serialized to a JSON +/// file. +#[derive(Clone, serde::Serialize, serde::Deserialize, schemars::JsonSchema)] +pub struct AllZoneRequests { + /// ledger generation (not an Omicron-provided generation) + generation: Generation, + requests: Vec, +} + +impl Default for AllZoneRequests { + fn default() -> Self { + Self { generation: Generation::new(), requests: vec![] } + } +} + +impl Ledgerable for AllZoneRequests { + fn is_newer_than(&self, other: &AllZoneRequests) -> bool { + self.generation >= other.generation + } + + fn generation_bump(&mut self) { + self.generation = self.generation.next(); + } +} + +impl TryFrom for OmicronZonesConfigLocal { + type Error = anyhow::Error; + + fn try_from(input: AllZoneRequests) -> Result { + // The Omicron generation number that we choose here (2) deserves some + // explanation. + // + // This is supposed to be the control-plane-issued generation number for + // this configuration. But any configuration that we're converting here + // predates the point where the control plane issued generation numbers + // at all. So what should we assign it? Well, what are the + // constraints? + // + // - It must be newer than generation 1 because generation 1 canonically + // represents the initial state of having no zones deployed. If we + // used generation 1 here, any code could ignore this configuration on + // the grounds that it's no newer than what it already has. (The + // contents of a given generation are supposed to be immutable.) + // + // - It should be older than anything else that the control plane might + // try to send us so that if the control plane wants to change + // anything, we won't ignore its request because we think this + // configuration is newer. But really this has to be the control + // plane's responsibility, not ours. That is: Nexus needs to ask us + // what our generation number is and subsequent configurations should + // use newer generation numbers. It's not a great plan for it to + // assume anything about the generation numbers deployed on sleds + // whose configurations it's never seen. (In practice, newly deployed + // systems currently wind up with generation 5, so it _could_ choose + // something like 6 to start with -- or some larger number to leave + // some buffer.) + // + // In summary, 2 seems fine. + let omicron_generation = + Generation::from(OMICRON_ZONES_CONFIG_INITIAL_GENERATION).next(); + + // The ledger generation doesn't really matter. In case it's useful, we + // pick the generation from the ledger that we loaded. + let ledger_generation = input.generation; + + let ndatasets_input = + input.requests.iter().filter(|r| r.zone.dataset.is_some()).count(); + + let zones = input + .requests + .into_iter() + .map(OmicronZoneConfigLocal::try_from) + .collect::, _>>() + .context( + "mapping `AllZoneRequests` to `OmicronZonesConfigLocal`", + )?; + + // As a quick check, the number of datasets in the old and new + // generations ought to be the same. + let ndatasets_output = + zones.iter().filter(|r| r.zone.dataset_name().is_some()).count(); + ensure!( + ndatasets_input == ndatasets_output, + "conversion produced a different number of datasets" + ); + + Ok(OmicronZonesConfigLocal { + omicron_generation, + ledger_generation, + zones, + }) + } +} + +/// This struct represents the combo of "what zone did you ask for" + "where did +/// we put it". +#[derive(Clone, serde::Serialize, serde::Deserialize, schemars::JsonSchema)] +struct ZoneRequest { + zone: ServiceZoneRequest, + #[schemars(with = "String")] + root: Utf8PathBuf, +} + +impl TryFrom for OmicronZoneConfigLocal { + type Error = anyhow::Error; + + fn try_from(input: ZoneRequest) -> Result { + Ok(OmicronZoneConfigLocal { + zone: OmicronZoneConfig::try_from(input.zone)?, + root: input.root, + }) + } +} + +/// Describes a request to create a zone running one or more services. +#[derive( + Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, +)] +struct ServiceZoneRequest { + // The UUID of the zone to be initialized. + id: Uuid, + // The type of the zone to be created. + zone_type: ZoneType, + // The addresses on which the service should listen for requests. + addresses: Vec, + // Datasets which should be managed by this service. + #[serde(default)] + dataset: Option, + // Services that should be run in the zone + services: Vec, +} + +impl TryFrom for OmicronZoneConfig { + type Error = anyhow::Error; + + fn try_from(input: ServiceZoneRequest) -> Result { + let error_context = || { + format!( + "zone {} (type {:?})", + input.id, + input.zone_type.to_string() + ) + }; + + // Historically, this type was used to describe two distinct kinds of + // thing: + // + // 1. an "Omicron" zone: Clickhouse, CockroachDb, Nexus, etc. We call + // these Omicron zones because they're managed by the control plane + // (Omicron). Nexus knows about these, stores information in + // CockroachDB about them, and is responsible for using Sled Agent + // APIs to configure these zones. + // + // 2. a "sled-local" zone. The only such zone is the "switch" zone. + // This is not really known to Nexus nor exposed outside Sled Agent. + // It's configured either based on Sled Agent's config file or else + // autodetection of whether this system _is_ a Scrimlet. + // + // All of the types in this file describe the ledgered configuration of + // the Omicron zones. We don't care about the switch zone here. Even + // for Omicron zones, the `ServiceZoneRequest` type is much more general + // than was strictly necessary to represent the kinds of zones we + // defined in practice. The more constrained schema is described by + // `OmicronZoneConfig`. This function verifies that the structures we + // find conform to that more constrained schema. + // + // Many of these properties were determined by code inspection. They + // could be wrong! But we've tried hard to make sure we're not wrong. + + match input.zone_type { + ZoneType::Clickhouse + | ZoneType::ClickhouseKeeper + | ZoneType::CockroachDb + | ZoneType::CruciblePantry + | ZoneType::Crucible + | ZoneType::ExternalDns + | ZoneType::InternalDns + | ZoneType::Nexus + | ZoneType::Ntp + | ZoneType::Oximeter => (), + ZoneType::Switch => { + return Err(anyhow!("unsupported zone type")) + .with_context(error_context) + } + } + + let id = input.id; + + // In production systems, Omicron zones only ever had exactly one + // address here. Multiple addresses were used for the "switch" zone, + // which cannot appear here. + if input.addresses.len() != 1 { + return Err(anyhow!( + "expected exactly one address, found {}", + input.addresses.len() + )) + .with_context(error_context); + } + + let underlay_address = input.addresses[0]; + + // In production systems, Omicron zones only ever had exactly one + // "service" inside them. (Multiple services were only supported for + // the "switch" zone and for Omicron zones in pre-release versions of + // Omicron, neither of which we expect to see here.) + if input.services.len() != 1 { + return Err(anyhow!( + "expected exactly one service, found {}", + input.services.len(), + )) + .with_context(error_context); + } + + let service = input.services.into_iter().next().unwrap(); + + // The id for the one service we found must match the overall request + // id. + if service.id != input.id { + return Err(anyhow!( + "expected service id ({}) to match id ({})", + service.id, + input.id, + )) + .with_context(error_context); + } + + // If there's a dataset, its id must match the overall request id. + let dataset_request = input + .dataset + .ok_or_else(|| anyhow!("missing dataset")) + .with_context(error_context); + let has_dataset = dataset_request.is_ok(); + if let Ok(dataset) = &dataset_request { + if dataset.id != input.id { + return Err(anyhow!( + "expected dataset id ({}) to match id ({})", + dataset.id, + input.id, + )) + .with_context(error_context); + } + } + + let zone_type = match service.details { + ServiceType::Nexus { + internal_address, + external_ip, + nic, + external_tls, + external_dns_servers, + } => OmicronZoneType::Nexus { + internal_address, + external_ip, + nic, + external_tls, + external_dns_servers, + }, + ServiceType::ExternalDns { http_address, dns_address, nic } => { + OmicronZoneType::ExternalDns { + dataset: dataset_request?.to_omicron_zone_dataset( + DatasetKind::ExternalDns, + http_address, + )?, + http_address, + dns_address, + nic, + } + } + ServiceType::InternalDns { + http_address, + dns_address, + gz_address, + gz_address_index, + } => OmicronZoneType::InternalDns { + dataset: dataset_request?.to_omicron_zone_dataset( + DatasetKind::InternalDns, + http_address, + )?, + http_address, + dns_address, + gz_address, + gz_address_index, + }, + ServiceType::Oximeter { address } => { + OmicronZoneType::Oximeter { address } + } + ServiceType::CruciblePantry { address } => { + OmicronZoneType::CruciblePantry { address } + } + ServiceType::BoundaryNtp { + address, + ntp_servers, + dns_servers, + domain, + nic, + snat_cfg, + } => OmicronZoneType::BoundaryNtp { + address, + ntp_servers, + dns_servers, + domain, + nic, + snat_cfg, + }, + ServiceType::InternalNtp { + address, + ntp_servers, + dns_servers, + domain, + } => OmicronZoneType::InternalNtp { + address, + ntp_servers, + dns_servers, + domain, + }, + ServiceType::Clickhouse { address } => { + OmicronZoneType::Clickhouse { + address, + dataset: dataset_request?.to_omicron_zone_dataset( + DatasetKind::Clickhouse, + address, + )?, + } + } + ServiceType::ClickhouseKeeper { address } => { + OmicronZoneType::ClickhouseKeeper { + address, + dataset: dataset_request?.to_omicron_zone_dataset( + DatasetKind::ClickhouseKeeper, + address, + )?, + } + } + ServiceType::CockroachDb { address } => { + OmicronZoneType::CockroachDb { + address, + dataset: dataset_request?.to_omicron_zone_dataset( + DatasetKind::CockroachDb, + address, + )?, + } + } + ServiceType::Crucible { address } => OmicronZoneType::Crucible { + address, + dataset: dataset_request? + .to_omicron_zone_dataset(DatasetKind::Crucible, address)?, + }, + }; + + if zone_type.dataset_name().is_none() && has_dataset { + // This indicates that the legacy form specified a dataset for a + // zone type that we do not (today) believe should have one. This + // should be impossible. If it happens, we need to re-evaluate our + // assumptions in designing `OmicronZoneType`. + return Err(anyhow!("found dataset that went unused")) + .with_context(error_context); + } + + Ok(OmicronZoneConfig { id, underlay_address, zone_type }) + } +} + +/// Used to request that the Sled initialize a single service. +#[derive( + Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, +)] +struct ServiceZoneService { + id: Uuid, + details: ServiceType, +} + +/// Describes service-specific parameters. +#[derive( + Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, +)] +#[serde(tag = "type", rename_all = "snake_case")] +enum ServiceType { + Nexus { + /// The address at which the internal nexus server is reachable. + internal_address: SocketAddrV6, + /// The address at which the external nexus server is reachable. + external_ip: IpAddr, + /// The service vNIC providing external connectivity using OPTE. + nic: NetworkInterface, + /// Whether Nexus's external endpoint should use TLS + external_tls: bool, + /// External DNS servers Nexus can use to resolve external hosts. + external_dns_servers: Vec, + }, + ExternalDns { + /// The address at which the external DNS server API is reachable. + http_address: SocketAddrV6, + /// The address at which the external DNS server is reachable. + dns_address: SocketAddr, + /// The service vNIC providing external connectivity using OPTE. + nic: NetworkInterface, + }, + InternalDns { + http_address: SocketAddrV6, + dns_address: SocketAddrV6, + /// The addresses in the global zone which should be created + /// + /// For the DNS service, which exists outside the sleds's typical subnet + /// - adding an address in the GZ is necessary to allow inter-zone + /// traffic routing. + gz_address: Ipv6Addr, + + /// The address is also identified with an auxiliary bit of information + /// to ensure that the created global zone address can have a unique + /// name. + gz_address_index: u32, + }, + Oximeter { + address: SocketAddrV6, + }, + CruciblePantry { + address: SocketAddrV6, + }, + BoundaryNtp { + address: SocketAddrV6, + ntp_servers: Vec, + dns_servers: Vec, + domain: Option, + /// The service vNIC providing outbound connectivity using OPTE. + nic: NetworkInterface, + /// The SNAT configuration for outbound connections. + snat_cfg: SourceNatConfig, + }, + InternalNtp { + address: SocketAddrV6, + ntp_servers: Vec, + dns_servers: Vec, + domain: Option, + }, + Clickhouse { + address: SocketAddrV6, + }, + ClickhouseKeeper { + address: SocketAddrV6, + }, + CockroachDb { + address: SocketAddrV6, + }, + Crucible { + address: SocketAddrV6, + }, +} + +/// Describes a request to provision a specific dataset +#[derive( + Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, +)] +struct DatasetRequest { + id: Uuid, + name: DatasetName, + service_address: SocketAddrV6, +} + +impl DatasetRequest { + fn to_omicron_zone_dataset( + self, + kind: DatasetKind, + service_address: SocketAddrV6, + ) -> Result { + ensure!( + kind == *self.name.dataset(), + "expected dataset kind {:?}, found {:?}", + kind, + self.name.dataset(), + ); + + ensure!( + self.service_address == service_address, + "expected dataset kind {:?} service address to be {}, found {}", + kind, + service_address, + self.service_address, + ); + + Ok(OmicronZoneDataset { pool_name: self.name.pool().clone() }) + } +} + +#[cfg(test)] +mod test { + use super::AllZoneRequests; + use crate::services::OmicronZonesConfigLocal; + use camino::Utf8PathBuf; + + /// Verifies that our understanding of this old-format ledger has not + /// changed. (If you need to change this for some reason, you must figure + /// out how that affects systems with old-format ledgers and update this + /// test accordingly.) + #[test] + fn test_all_services_requests_schema() { + let schema = schemars::schema_for!(AllZoneRequests); + expectorate::assert_contents( + "../schema/all-zone-requests.json", + &serde_json::to_string_pretty(&schema).unwrap(), + ); + } + + /// Verifies that we can successfully convert a corpus of known old-format + /// ledgers. These came from two racks operated by Oxide. In practice + /// there probably aren't many different configurations represented here but + /// it's easy enough to just check them all. + /// + /// In terms of verifying the output: all we have done by hand in + /// constructing this test is verify that the code successfully converts + /// them. The conversion code does some basic sanity checks as well, like + /// that we produced the same number of zones and datasets. + #[test] + fn test_convert_known_ledgers() { + let known_ledgers = &[ + /* rack2 */ + "rack2-sled8.json", + "rack2-sled9.json", + "rack2-sled10.json", + "rack2-sled11.json", + "rack2-sled12.json", + "rack2-sled14.json", + "rack2-sled16.json", + "rack2-sled17.json", + "rack2-sled21.json", + "rack2-sled23.json", + "rack2-sled25.json", + /* rack3 (no sled 10) */ + "rack3-sled0.json", + "rack3-sled1.json", + "rack3-sled2.json", + "rack3-sled3.json", + "rack3-sled4.json", + "rack3-sled5.json", + "rack3-sled6.json", + "rack3-sled7.json", + "rack3-sled8.json", + "rack3-sled9.json", + "rack3-sled11.json", + "rack3-sled12.json", + "rack3-sled13.json", + "rack3-sled14.json", + "rack3-sled15.json", + "rack3-sled16.json", + "rack3-sled17.json", + "rack3-sled18.json", + "rack3-sled19.json", + "rack3-sled20.json", + "rack3-sled21.json", + "rack3-sled22.json", + "rack3-sled23.json", + "rack3-sled24.json", + "rack3-sled25.json", + "rack3-sled26.json", + "rack3-sled27.json", + "rack3-sled28.json", + "rack3-sled29.json", + "rack3-sled30.json", + "rack3-sled31.json", + ]; + + let path = Utf8PathBuf::from("tests/old-service-ledgers"); + let out_path = Utf8PathBuf::from("tests/output/new-zones-ledgers"); + for ledger_basename in known_ledgers { + println!("checking {:?}", ledger_basename); + let contents = std::fs::read_to_string(path.join(ledger_basename)) + .expect("failed to read file"); + let parsed: AllZoneRequests = + serde_json::from_str(&contents).expect("failed to parse file"); + let converted = OmicronZonesConfigLocal::try_from(parsed) + .expect("failed to convert file"); + expectorate::assert_contents( + out_path.join(ledger_basename), + &serde_json::to_string_pretty(&converted).unwrap(), + ); + } + } +} diff --git a/sled-agent/src/sim/collection.rs b/sled-agent/src/sim/collection.rs index bd6ed4aa90..8dae31863c 100644 --- a/sled-agent/src/sim/collection.rs +++ b/sled-agent/src/sim/collection.rs @@ -777,7 +777,7 @@ mod test { let error = disk.transition(DiskStateRequested::Attached(id2)).unwrap_err(); if let Error::InvalidRequest { message } = error { - assert_eq!("disk is already attached", message); + assert_eq!("disk is already attached", message.external_message()); } else { panic!("unexpected error type"); } @@ -829,7 +829,10 @@ mod test { let error = disk.transition(DiskStateRequested::Attached(id)).unwrap_err(); if let Error::InvalidRequest { message } = error { - assert_eq!("cannot attach from detaching", message); + assert_eq!( + "cannot attach from detaching", + message.external_message() + ); } else { panic!("unexpected error type"); } diff --git a/sled-agent/src/sim/disk.rs b/sled-agent/src/sim/disk.rs index 0f08289b74..fc388f6ce2 100644 --- a/sled-agent/src/sim/disk.rs +++ b/sled-agent/src/sim/disk.rs @@ -17,6 +17,7 @@ use omicron_common::api::external::Generation; use omicron_common::api::external::ResourceType; use omicron_common::api::internal::nexus::DiskRuntimeState; use omicron_common::api::internal::nexus::ProducerEndpoint; +use omicron_common::api::internal::nexus::ProducerKind; use oximeter_producer::LogConfig; use oximeter_producer::Server as ProducerServer; use propolis_client::types::DiskAttachmentState as PropolisDiskState; @@ -168,6 +169,7 @@ impl SimDisk { let producer_address = SocketAddr::new(Ipv6Addr::LOCALHOST.into(), 0); let server_info = ProducerEndpoint { id, + kind: ProducerKind::SledAgent, address: producer_address, base_route: "/collect".to_string(), interval: Duration::from_millis(200), diff --git a/sled-agent/src/sim/http_entrypoints_pantry.rs b/sled-agent/src/sim/http_entrypoints_pantry.rs index 64b26a83a4..8f572b46a0 100644 --- a/sled-agent/src/sim/http_entrypoints_pantry.rs +++ b/sled-agent/src/sim/http_entrypoints_pantry.rs @@ -96,7 +96,7 @@ struct JobPollResponse { /// Poll to see if a Pantry background job is done #[endpoint { method = GET, - path = "/crucible/pantry/0/job/{id}/is_finished", + path = "/crucible/pantry/0/job/{id}/is-finished", }] async fn is_job_finished( rc: RequestContext>, @@ -139,6 +139,7 @@ async fn job_result_ok( } #[derive(Debug, Deserialize, JsonSchema)] +#[serde(rename_all = "snake_case")] pub enum ExpectedDigest { Sha256(String), } @@ -157,7 +158,7 @@ struct ImportFromUrlResponse { /// Import data from a URL into a volume #[endpoint { method = POST, - path = "/crucible/pantry/0/volume/{id}/import_from_url", + path = "/crucible/pantry/0/volume/{id}/import-from-url", }] async fn import_from_url( rc: RequestContext>, @@ -213,7 +214,7 @@ struct BulkWriteRequest { /// Bulk write data into a volume at a specified offset #[endpoint { method = POST, - path = "/crucible/pantry/0/volume/{id}/bulk_write", + path = "/crucible/pantry/0/volume/{id}/bulk-write", }] async fn bulk_write( rc: RequestContext>, @@ -279,3 +280,101 @@ async fn detach( Ok(HttpResponseDeleted()) } + +#[cfg(test)] +mod tests { + use guppy::graph::ExternalSource; + use guppy::graph::GitReq; + use guppy::graph::PackageGraph; + use guppy::MetadataCommand; + use serde_json::Value; + use std::path::Path; + + fn load_real_api_as_json() -> serde_json::Value { + let manifest_path = Path::new(env!("CARGO_MANIFEST_DIR")) + .parent() + .unwrap() + .join("Cargo.toml"); + let mut cmd = MetadataCommand::new(); + cmd.manifest_path(&manifest_path); + let graph = PackageGraph::from_command(&mut cmd).unwrap(); + let package = graph + .packages() + .find(|pkg| pkg.name() == "crucible-pantry-client") + .unwrap(); + let ExternalSource::Git { req, .. } = + package.source().parse_external().unwrap() + else { + panic!("This should be a Git dependency"); + }; + let part = match req { + GitReq::Branch(inner) => inner, + GitReq::Rev(inner) => inner, + GitReq::Tag(inner) => inner, + GitReq::Default => "main", + _ => unreachable!(), + }; + let raw_url = format!( + "https://raw.githubusercontent.com/oxidecomputer/crucible/{part}/openapi/crucible-pantry.json", + ); + let raw_json = + reqwest::blocking::get(&raw_url).unwrap().text().unwrap(); + serde_json::from_str(&raw_json).unwrap() + } + + // Regression test for https://github.com/oxidecomputer/omicron/issues/4599. + #[test] + fn test_simulated_api_matches_real() { + let real_api = load_real_api_as_json(); + let Value::String(ref title) = real_api["info"]["title"] else { + unreachable!(); + }; + let Value::String(ref version) = real_api["info"]["version"] else { + unreachable!(); + }; + let sim_api = super::api().openapi(title, version).json().unwrap(); + + // We'll assert that anything which apppears in the simulated API must + // appear exactly as-is in the real API. I.e., the simulated is a subset + // (possibly non-strict) of the real API. + compare_json_values(&sim_api, &real_api, String::new()); + } + + fn compare_json_values(lhs: &Value, rhs: &Value, path: String) { + match lhs { + Value::Array(values) => { + let Value::Array(rhs_values) = &rhs else { + panic!( + "Expected an array in the real API JSON at \ + path \"{path}\", found {rhs:?}", + ); + }; + assert_eq!(values.len(), rhs_values.len()); + for (i, (left, right)) in + values.iter().zip(rhs_values.iter()).enumerate() + { + let new_path = format!("{path}[{i}]"); + compare_json_values(left, right, new_path); + } + } + Value::Object(map) => { + let Value::Object(rhs_map) = &rhs else { + panic!( + "Expected a map in the real API JSON at \ + path \"{path}\", found {rhs:?}", + ); + }; + for (key, value) in map.iter() { + let new_path = format!("{path}/{key}"); + let rhs_value = rhs_map.get(key).unwrap_or_else(|| { + panic!("Real API JSON missing key: \"{new_path}\"") + }); + compare_json_values(value, rhs_value, new_path); + } + } + _ => { + assert_eq!(lhs, rhs, "Mismatched keys at JSON path \"{path}\"") + } + } + } +} diff --git a/sled-agent/src/sim/instance.rs b/sled-agent/src/sim/instance.rs index 15ff83c969..8b00adce60 100644 --- a/sled-agent/src/sim/instance.rs +++ b/sled-agent/src/sim/instance.rs @@ -362,13 +362,11 @@ impl SimInstanceInner { } if self.state.instance().gen != old_runtime.gen { - return Err(Error::InvalidRequest { - message: format!( - "wrong Propolis ID generation: expected {}, got {}", - self.state.instance().gen, - old_runtime.gen - ), - }); + return Err(Error::invalid_request(format!( + "wrong Propolis ID generation: expected {}, got {}", + self.state.instance().gen, + old_runtime.gen + ))); } self.state.set_migration_ids(ids, Utc::now()); diff --git a/sled-agent/src/sim/storage.rs b/sled-agent/src/sim/storage.rs index 2528a258d7..101228934d 100644 --- a/sled-agent/src/sim/storage.rs +++ b/sled-agent/src/sim/storage.rs @@ -40,6 +40,7 @@ struct CrucibleDataInner { running_snapshots: HashMap>, on_create: Option, region_creation_error: bool, + region_deletion_error: bool, creating_a_running_snapshot_should_fail: bool, next_port: u16, } @@ -53,6 +54,7 @@ impl CrucibleDataInner { running_snapshots: HashMap::new(), on_create: None, region_creation_error: false, + region_deletion_error: false, creating_a_running_snapshot_should_fail: false, next_port: crucible_port, } @@ -129,6 +131,10 @@ impl CrucibleDataInner { ); } + if self.region_deletion_error { + bail!("region deletion error!"); + } + let id = Uuid::from_str(&id.0).unwrap(); if let Some(region) = self.regions.get_mut(&id) { if region.state == State::Failed { @@ -229,6 +235,10 @@ impl CrucibleDataInner { self.region_creation_error = value; } + fn set_region_deletion_error(&mut self, value: bool) { + self.region_deletion_error = value; + } + fn create_running_snapshot( &mut self, id: &RegionId, @@ -391,6 +401,10 @@ impl CrucibleData { self.inner.lock().await.set_region_creation_error(value); } + pub async fn set_region_deletion_error(&self, value: bool) { + self.inner.lock().await.set_region_deletion_error(value); + } + pub async fn create_running_snapshot( &self, id: &RegionId, diff --git a/sled-agent/src/sled_agent.rs b/sled-agent/src/sled_agent.rs index 9826a987d4..5f278b7f38 100644 --- a/sled-agent/src/sled_agent.rs +++ b/sled-agent/src/sled_agent.rs @@ -4,6 +4,7 @@ //! Sled agent implementation +use crate::boot_disk_os_writer::BootDiskOsWriter; use crate::bootstrap::config::BOOTSTRAP_AGENT_RACK_INIT_PORT; use crate::bootstrap::early_networking::{ EarlyNetworkConfig, EarlyNetworkSetupError, @@ -17,7 +18,7 @@ use crate::nexus::{ConvertInto, NexusClientWithResolver, NexusRequestQueue}; use crate::params::{ DiskStateRequested, InstanceHardware, InstanceMigrationSourceParams, InstancePutStateResponse, InstanceStateRequested, - InstanceUnregisterResponse, ServiceEnsureBody, SledRole, TimeSync, + InstanceUnregisterResponse, OmicronZonesConfig, SledRole, TimeSync, VpcFirewallRule, ZoneBundleMetadata, Zpool, }; use crate::services::{self, ServiceManager}; @@ -43,6 +44,7 @@ use omicron_common::address::{ }; use omicron_common::api::external::Vni; use omicron_common::api::internal::nexus::ProducerEndpoint; +use omicron_common::api::internal::nexus::ProducerKind; use omicron_common::api::internal::nexus::{ SledInstanceState, VmmRuntimeState, }; @@ -67,6 +69,7 @@ use std::sync::Arc; use tokio::sync::oneshot; use uuid::Uuid; +use illumos_utils::running_zone::ZoneBuilderFactory; #[cfg(not(test))] use illumos_utils::{dladm::Dladm, zone::Zones}; #[cfg(test)] @@ -262,6 +265,9 @@ struct SledAgentInner { // Object handling production of metrics for oximeter. metrics_manager: MetricsManager, + + // Handle to the traffic manager for writing OS updates to our boot disks. + boot_disk_os_writer: BootDiskOsWriter, } impl SledAgentInner { @@ -381,6 +387,7 @@ impl SledAgent { port_manager.clone(), storage_manager.clone(), long_running_task_handles.zone_bundler.clone(), + ZoneBuilderFactory::default(), )?; // Configure the VMM reservoir as either a percentage of DRAM or as an @@ -442,8 +449,11 @@ impl SledAgent { })?; let early_network_config = - EarlyNetworkConfig::try_from(serialized_config) - .map_err(|err| BackoffError::transient(err.to_string()))?; + EarlyNetworkConfig::deserialize_bootstore_config( + &log, + &serialized_config, + ) + .map_err(|err| BackoffError::transient(err.to_string()))?; Ok(early_network_config.body.rack_network_config) }; @@ -504,6 +514,7 @@ impl SledAgent { // Nexus. This should not block progress here. let endpoint = ProducerEndpoint { id: request.body.id, + kind: ProducerKind::SledAgent, address: sled_address.into(), base_route: String::from("/metrics/collect"), interval: crate::metrics::METRIC_COLLECTION_INTERVAL, @@ -538,6 +549,7 @@ impl SledAgent { zone_bundler: long_running_task_handles.zone_bundler.clone(), bootstore: long_running_task_handles.bootstore.clone(), metrics_manager, + boot_disk_os_writer: BootDiskOsWriter::new(&parent_log), }), log: log.clone(), }; @@ -551,12 +563,11 @@ impl SledAgent { Ok(sled_agent) } - /// Load services for which we're responsible; only meaningful to call - /// during a cold boot. + /// Load services for which we're responsible. /// /// Blocks until all services have started, retrying indefinitely on /// failure. - pub(crate) async fn cold_boot_load_services(&self) { + pub(crate) async fn load_services(&self) { info!(self.log, "Loading cold boot services"); retry_notify( retry_policy_internal_service_aggressive(), @@ -799,36 +810,40 @@ impl SledAgent { self.inner.zone_bundler.cleanup().await.map_err(Error::from) } - /// Ensures that particular services should be initialized. - /// - /// These services will be instantiated by this function, will be recorded - /// to a local file to ensure they start automatically on next boot. - pub async fn services_ensure( + /// List the Omicron zone configuration that's currently running + pub async fn omicron_zones_list( &self, - requested_services: ServiceEnsureBody, - ) -> Result<(), Error> { - let datasets: Vec<_> = requested_services - .services - .iter() - .filter_map(|service| service.dataset.clone()) - .collect(); + ) -> Result { + Ok(self.inner.services.omicron_zones_list().await?) + } + /// Ensures that the specific set of Omicron zones are running as configured + /// (and that no other zones are running) + pub async fn omicron_zones_ensure( + &self, + requested_zones: OmicronZonesConfig, + ) -> Result<(), Error> { // TODO: // - If these are the set of filesystems, we should also consider // removing the ones which are not listed here. // - It's probably worth sending a bulk request to the storage system, // rather than requesting individual datasets. - for dataset in &datasets { + for zone in &requested_zones.zones { + let Some(dataset_name) = zone.dataset_name() else { + continue; + }; + // First, ensure the dataset exists + let dataset_id = zone.id; self.inner .storage - .upsert_filesystem(dataset.id, dataset.name.clone()) + .upsert_filesystem(dataset_id, dataset_name) .await?; } self.inner .services - .ensure_all_services_persistent(requested_services) + .ensure_all_omicron_zones_persistent(requested_zones) .await?; Ok(()) } @@ -1033,6 +1048,14 @@ impl SledAgent { pub fn metrics_registry(&self) -> &ProducerRegistry { self.inner.metrics_manager.registry() } + + pub(crate) fn storage(&self) -> &StorageHandle { + &self.inner.storage + } + + pub(crate) fn boot_disk_os_writer(&self) -> &BootDiskOsWriter { + &self.inner.boot_disk_os_writer + } } async fn register_metric_producer_with_nexus( diff --git a/sled-agent/tests/data/early_network_blobs.txt b/sled-agent/tests/data/early_network_blobs.txt new file mode 100644 index 0000000000..c968d4010b --- /dev/null +++ b/sled-agent/tests/data/early_network_blobs.txt @@ -0,0 +1,2 @@ +2023-11-30 mupdate failing blob,{"generation":15,"schema_version":1,"body":{"ntp_servers":[],"rack_network_config":{"rack_subnet":"fd00:1122:3344:100::/56","infra_ip_first":"0.0.0.0","infra_ip_last":"0.0.0.0","ports":[{"routes":[],"addresses":[],"switch":"switch1","port":"qsfp0","uplink_port_speed":"speed100_g","uplink_port_fec":"none","bgp_peers":[]},{"routes":[],"addresses":["172.20.15.53/29"],"switch":"switch1","port":"qsfp18","uplink_port_speed":"speed100_g","uplink_port_fec":"rs","bgp_peers":[{"asn":65002,"port":"qsfp18","addr":"172.20.15.51","hold_time":6,"idle_hold_time":6,"delay_open":0,"connect_retry":3,"keepalive":2}]},{"routes":[],"addresses":["172.20.15.45/29"],"switch":"switch0","port":"qsfp18","uplink_port_speed":"speed100_g","uplink_port_fec":"rs","bgp_peers":[{"asn":65002,"port":"qsfp18","addr":"172.20.15.43","hold_time":6,"idle_hold_time":6,"delay_open":0,"connect_retry":3,"keepalive":2}]},{"routes":[],"addresses":[],"switch":"switch0","port":"qsfp0","uplink_port_speed":"speed100_g","uplink_port_fec":"none","bgp_peers":[]}],"bgp":[{"asn":65002,"originate":["172.20.26.0/24"]},{"asn":65002,"originate":["172.20.26.0/24"]}]}}} +2023-12-06 config,{"generation":20,"schema_version":1,"body":{"ntp_servers":["ntp.example.com"],"rack_network_config":{"rack_subnet":"ff01::/32","infra_ip_first":"127.0.0.1","infra_ip_last":"127.1.0.1","ports":[{"routes":[{"destination":"10.1.9.32/16","nexthop":"10.1.9.32"}],"addresses":["2001:db8::/96"],"switch":"switch0","port":"foo","uplink_port_speed":"speed200_g","uplink_port_fec":"firecode","bgp_peers":[{"asn":65000,"port":"bar","addr":"1.2.3.4","hold_time":20,"idle_hold_time":50,"delay_open":null,"connect_retry":30,"keepalive":10}],"autoneg":true}],"bgp":[{"asn":20000,"originate":["192.168.0.0/24"]}]}}} diff --git a/sled-agent/tests/integration_tests/early_network.rs b/sled-agent/tests/integration_tests/early_network.rs new file mode 100644 index 0000000000..c3a4a53ebf --- /dev/null +++ b/sled-agent/tests/integration_tests/early_network.rs @@ -0,0 +1,154 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Tests that EarlyNetworkConfig deserializes across versions. + +use std::net::Ipv4Addr; + +use bootstore::schemes::v0 as bootstore; +use omicron_common::api::{ + external::SwitchLocation, + internal::shared::{ + BgpConfig, BgpPeerConfig, PortConfigV1, PortFec, PortSpeed, + RackNetworkConfig, RouteConfig, + }, +}; +use omicron_sled_agent::bootstrap::early_networking::{ + EarlyNetworkConfig, EarlyNetworkConfigBody, +}; +use omicron_test_utils::dev::test_setup_log; + +const BLOB_PATH: &str = "tests/data/early_network_blobs.txt"; + +/// Test that previous and current versions of `EarlyNetworkConfig` blobs +/// deserialize correctly. +#[test] +fn early_network_blobs_deserialize() { + let logctx = test_setup_log("early_network_blobs_deserialize"); + + let (current_desc, current_config) = current_config_example(); + assert!( + !current_desc.contains(',') && !current_desc.contains('\n'), + "current_desc must not contain commas or newlines" + ); + + // Read old blobs as newline-delimited JSON. + let mut known_blobs = std::fs::read_to_string(BLOB_PATH) + .expect("error reading early_network_blobs.txt"); + let mut current_blob_is_known = false; + for (blob_idx, line) in known_blobs.lines().enumerate() { + let blob_lineno = blob_idx + 1; + let (blob_desc, blob_json) = + line.split_once(',').unwrap_or_else(|| { + panic!( + "error parsing early_network_blobs.txt \ + line {blob_lineno}: missing comma", + ); + }); + + // Attempt to deserialize this blob. + let config = serde_json::from_str::(blob_json) + .unwrap_or_else(|error| { + panic!( + "error deserializing early_network_blobs.txt \ + \"{blob_desc}\" (line {blob_lineno}): {error}", + ); + }); + + // Does this config match the current config? + if blob_desc == current_desc { + assert_eq!( + config, current_config, + "early_network_blobs.txt line {}: {} does not match current config", + blob_lineno, blob_desc + ); + current_blob_is_known = true; + } + + // Now attempt to put this blob into a bootstore config, and deserialize that. + let network_config = bootstore::NetworkConfig { + generation: config.generation, + blob: blob_json.to_owned().into(), + }; + let config2 = EarlyNetworkConfig::deserialize_bootstore_config( + &logctx.log, + &network_config, + ).unwrap_or_else(|error| { + panic!( + "error deserializing early_network_blobs.txt \ + \"{blob_desc}\" (line {blob_lineno}) as bootstore config: {error}", + ); + }); + + assert_eq!( + config, config2, + "early_network_blobs.txt line {}: {} does not match deserialization \ + as bootstore config", + blob_lineno, blob_desc + ); + } + + // If the current blob was not covered, add it to the list of known blobs. + if !current_blob_is_known { + let current_blob_json = serde_json::to_string(¤t_config).unwrap(); + let current_blob = format!("{},{}", current_desc, current_blob_json); + known_blobs.push_str(¤t_blob); + known_blobs.push('\n'); + } + + expectorate::assert_contents(BLOB_PATH, &known_blobs); + + logctx.cleanup_successful(); +} + +/// Returns a current version of the EarlyNetworkConfig blob, along with a +/// short description of the current version. The values can be arbitrary, but +/// this should be a nontrivial blob where no vectors are empty. +/// +/// The goal is that if the definition of `EarlyNetworkConfig` changes in the +/// future, older blobs can still be deserialized correctly. +fn current_config_example() -> (&'static str, EarlyNetworkConfig) { + // NOTE: the description must not contain commas or newlines. + let description = "2023-12-06 config"; + let config = EarlyNetworkConfig { + generation: 20, + schema_version: 1, + body: EarlyNetworkConfigBody { + ntp_servers: vec!["ntp.example.com".to_owned()], + rack_network_config: Some(RackNetworkConfig { + rack_subnet: "ff01::0/32".parse().unwrap(), + infra_ip_first: Ipv4Addr::new(127, 0, 0, 1), + infra_ip_last: Ipv4Addr::new(127, 1, 0, 1), + ports: vec![PortConfigV1 { + routes: vec![RouteConfig { + destination: "10.1.9.32/16".parse().unwrap(), + nexthop: "10.1.9.32".parse().unwrap(), + }], + addresses: vec!["2001:db8::/96".parse().unwrap()], + switch: SwitchLocation::Switch0, + port: "foo".to_owned(), + uplink_port_speed: PortSpeed::Speed200G, + uplink_port_fec: PortFec::Firecode, + bgp_peers: vec![BgpPeerConfig { + asn: 65000, + port: "bar".to_owned(), + addr: Ipv4Addr::new(1, 2, 3, 4), + hold_time: Some(20), + idle_hold_time: Some(50), + delay_open: None, + connect_retry: Some(30), + keepalive: Some(10), + }], + autoneg: true, + }], + bgp: vec![BgpConfig { + asn: 20000, + originate: vec!["192.168.0.0/24".parse().unwrap()], + }], + }), + }, + }; + + (description, config) +} diff --git a/sled-agent/tests/integration_tests/mod.rs b/sled-agent/tests/integration_tests/mod.rs index 1bf43dc00c..13e38077ea 100644 --- a/sled-agent/tests/integration_tests/mod.rs +++ b/sled-agent/tests/integration_tests/mod.rs @@ -3,3 +3,4 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. mod commands; +mod early_network; diff --git a/sled-agent/tests/old-service-ledgers/rack2-sled10.json b/sled-agent/tests/old-service-ledgers/rack2-sled10.json new file mode 100644 index 0000000000..b92a2bf4a0 --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack2-sled10.json @@ -0,0 +1 @@ +{"generation":4,"requests":[{"zone":{"id":"04eef8aa-055c-42ab-bdb6-c982f63c9be0","zone_type":"crucible","addresses":["fd00:1122:3344:107::d"],"dataset":{"id":"04eef8aa-055c-42ab-bdb6-c982f63c9be0","name":{"pool_name":"oxp_845ff39a-3205-416f-8bda-e35829107c8a","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::d]:32345"},"services":[{"id":"04eef8aa-055c-42ab-bdb6-c982f63c9be0","details":{"type":"crucible","address":"[fd00:1122:3344:107::d]:32345"}}]},"root":"/pool/ext/43efdd6d-7419-437a-a282-fc45bfafd042/crypt/zone"},{"zone":{"id":"8568c997-fbbb-46a8-8549-b78284530ffc","zone_type":"crucible","addresses":["fd00:1122:3344:107::5"],"dataset":{"id":"8568c997-fbbb-46a8-8549-b78284530ffc","name":{"pool_name":"oxp_0e485ad3-04e6-404b-b619-87d4fea9f5ae","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::5]:32345"},"services":[{"id":"8568c997-fbbb-46a8-8549-b78284530ffc","details":{"type":"crucible","address":"[fd00:1122:3344:107::5]:32345"}}]},"root":"/pool/ext/9b61d4b2-66f6-459f-86f4-13d0b8c5d6cf/crypt/zone"},{"zone":{"id":"6cec1d60-5c1a-4c1b-9632-2b4bc76bd37c","zone_type":"crucible","addresses":["fd00:1122:3344:107::e"],"dataset":{"id":"6cec1d60-5c1a-4c1b-9632-2b4bc76bd37c","name":{"pool_name":"oxp_62a4c68a-2073-42d0-8e49-01f5e8b90cd4","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::e]:32345"},"services":[{"id":"6cec1d60-5c1a-4c1b-9632-2b4bc76bd37c","details":{"type":"crucible","address":"[fd00:1122:3344:107::e]:32345"}}]},"root":"/pool/ext/845ff39a-3205-416f-8bda-e35829107c8a/crypt/zone"},{"zone":{"id":"aa646c82-c6d7-4d0c-8401-150130927759","zone_type":"clickhouse","addresses":["fd00:1122:3344:107::4"],"dataset":{"id":"aa646c82-c6d7-4d0c-8401-150130927759","name":{"pool_name":"oxp_0e485ad3-04e6-404b-b619-87d4fea9f5ae","kind":{"type":"clickhouse"}},"service_address":"[fd00:1122:3344:107::4]:8123"},"services":[{"id":"aa646c82-c6d7-4d0c-8401-150130927759","details":{"type":"clickhouse","address":"[fd00:1122:3344:107::4]:8123"}}]},"root":"/pool/ext/fd82dcc7-00dd-4d01-826a-937a7d8238fb/crypt/zone"},{"zone":{"id":"2f294ca1-7a4f-468f-8966-2b7915804729","zone_type":"crucible","addresses":["fd00:1122:3344:107::7"],"dataset":{"id":"2f294ca1-7a4f-468f-8966-2b7915804729","name":{"pool_name":"oxp_43efdd6d-7419-437a-a282-fc45bfafd042","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::7]:32345"},"services":[{"id":"2f294ca1-7a4f-468f-8966-2b7915804729","details":{"type":"crucible","address":"[fd00:1122:3344:107::7]:32345"}}]},"root":"/pool/ext/fd82dcc7-00dd-4d01-826a-937a7d8238fb/crypt/zone"},{"zone":{"id":"1a77bd1d-4fd4-4d6c-a105-17f942d94ba6","zone_type":"crucible","addresses":["fd00:1122:3344:107::c"],"dataset":{"id":"1a77bd1d-4fd4-4d6c-a105-17f942d94ba6","name":{"pool_name":"oxp_b6bdfdaf-9c0d-4b74-926c-49ff3ed05562","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::c]:32345"},"services":[{"id":"1a77bd1d-4fd4-4d6c-a105-17f942d94ba6","details":{"type":"crucible","address":"[fd00:1122:3344:107::c]:32345"}}]},"root":"/pool/ext/9b61d4b2-66f6-459f-86f4-13d0b8c5d6cf/crypt/zone"},{"zone":{"id":"f65a6668-1aea-4deb-81ed-191fbe469328","zone_type":"crucible","addresses":["fd00:1122:3344:107::9"],"dataset":{"id":"f65a6668-1aea-4deb-81ed-191fbe469328","name":{"pool_name":"oxp_9b61d4b2-66f6-459f-86f4-13d0b8c5d6cf","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::9]:32345"},"services":[{"id":"f65a6668-1aea-4deb-81ed-191fbe469328","details":{"type":"crucible","address":"[fd00:1122:3344:107::9]:32345"}}]},"root":"/pool/ext/d0584f4a-20ba-436d-a75b-7709e80deb79/crypt/zone"},{"zone":{"id":"ee8bce67-8f8e-4221-97b0-85f1860d66d0","zone_type":"crucible","addresses":["fd00:1122:3344:107::8"],"dataset":{"id":"ee8bce67-8f8e-4221-97b0-85f1860d66d0","name":{"pool_name":"oxp_b252b176-3974-436a-915b-60382b21eb76","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::8]:32345"},"services":[{"id":"ee8bce67-8f8e-4221-97b0-85f1860d66d0","details":{"type":"crucible","address":"[fd00:1122:3344:107::8]:32345"}}]},"root":"/pool/ext/b6bdfdaf-9c0d-4b74-926c-49ff3ed05562/crypt/zone"},{"zone":{"id":"cf3b2d54-5e36-4c93-b44f-8bf36ac98071","zone_type":"crucible","addresses":["fd00:1122:3344:107::b"],"dataset":{"id":"cf3b2d54-5e36-4c93-b44f-8bf36ac98071","name":{"pool_name":"oxp_d0584f4a-20ba-436d-a75b-7709e80deb79","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::b]:32345"},"services":[{"id":"cf3b2d54-5e36-4c93-b44f-8bf36ac98071","details":{"type":"crucible","address":"[fd00:1122:3344:107::b]:32345"}}]},"root":"/pool/ext/4c157f35-865d-4310-9d81-c6259cb69293/crypt/zone"},{"zone":{"id":"5c8c244c-00dc-4b16-aa17-6d9eb4827fab","zone_type":"crucible","addresses":["fd00:1122:3344:107::a"],"dataset":{"id":"5c8c244c-00dc-4b16-aa17-6d9eb4827fab","name":{"pool_name":"oxp_4c157f35-865d-4310-9d81-c6259cb69293","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::a]:32345"},"services":[{"id":"5c8c244c-00dc-4b16-aa17-6d9eb4827fab","details":{"type":"crucible","address":"[fd00:1122:3344:107::a]:32345"}}]},"root":"/pool/ext/845ff39a-3205-416f-8bda-e35829107c8a/crypt/zone"},{"zone":{"id":"7d5e942b-926c-442d-937a-76cc4aa72bf3","zone_type":"crucible","addresses":["fd00:1122:3344:107::6"],"dataset":{"id":"7d5e942b-926c-442d-937a-76cc4aa72bf3","name":{"pool_name":"oxp_fd82dcc7-00dd-4d01-826a-937a7d8238fb","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::6]:32345"},"services":[{"id":"7d5e942b-926c-442d-937a-76cc4aa72bf3","details":{"type":"crucible","address":"[fd00:1122:3344:107::6]:32345"}}]},"root":"/pool/ext/b252b176-3974-436a-915b-60382b21eb76/crypt/zone"},{"zone":{"id":"a3628a56-6f85-43b5-be50-71d8f0e04877","zone_type":"cockroach_db","addresses":["fd00:1122:3344:107::3"],"dataset":{"id":"a3628a56-6f85-43b5-be50-71d8f0e04877","name":{"pool_name":"oxp_0e485ad3-04e6-404b-b619-87d4fea9f5ae","kind":{"type":"cockroach_db"}},"service_address":"[fd00:1122:3344:107::3]:32221"},"services":[{"id":"a3628a56-6f85-43b5-be50-71d8f0e04877","details":{"type":"cockroach_db","address":"[fd00:1122:3344:107::3]:32221"}}]},"root":"/pool/ext/4c157f35-865d-4310-9d81-c6259cb69293/crypt/zone"},{"zone":{"id":"7529be1c-ca8b-441a-89aa-37166cc450df","zone_type":"ntp","addresses":["fd00:1122:3344:107::f"],"dataset":null,"services":[{"id":"7529be1c-ca8b-441a-89aa-37166cc450df","details":{"type":"internal_ntp","address":"[fd00:1122:3344:107::f]:123","ntp_servers":["c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55.host.control-plane.oxide.internal","6ea2684c-115e-48a6-8453-ab52d1cecd73.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/fd82dcc7-00dd-4d01-826a-937a7d8238fb/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack2-sled11.json b/sled-agent/tests/old-service-ledgers/rack2-sled11.json new file mode 100644 index 0000000000..3833bed5c9 --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack2-sled11.json @@ -0,0 +1 @@ +{"generation":4,"requests":[{"zone":{"id":"605be8b9-c652-4a5f-94ca-068ec7a39472","zone_type":"crucible","addresses":["fd00:1122:3344:106::a"],"dataset":{"id":"605be8b9-c652-4a5f-94ca-068ec7a39472","name":{"pool_name":"oxp_cf14d1b9-b4db-4594-b3ab-a9957e770ce9","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::a]:32345"},"services":[{"id":"605be8b9-c652-4a5f-94ca-068ec7a39472","details":{"type":"crucible","address":"[fd00:1122:3344:106::a]:32345"}}]},"root":"/pool/ext/cf5f8849-0c5a-475b-8683-6d17da88d1d1/crypt/zone"},{"zone":{"id":"af8a8712-457c-4ea7-a8b6-aecb04761c1b","zone_type":"crucible","addresses":["fd00:1122:3344:106::9"],"dataset":{"id":"af8a8712-457c-4ea7-a8b6-aecb04761c1b","name":{"pool_name":"oxp_cf5f8849-0c5a-475b-8683-6d17da88d1d1","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::9]:32345"},"services":[{"id":"af8a8712-457c-4ea7-a8b6-aecb04761c1b","details":{"type":"crucible","address":"[fd00:1122:3344:106::9]:32345"}}]},"root":"/pool/ext/7f778610-7328-4554-98f6-b17f74f551c7/crypt/zone"},{"zone":{"id":"0022703b-dcfc-44d4-897a-b42f6f53b433","zone_type":"crucible","addresses":["fd00:1122:3344:106::c"],"dataset":{"id":"0022703b-dcfc-44d4-897a-b42f6f53b433","name":{"pool_name":"oxp_025725fa-9e40-4b46-b018-c420408394ef","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::c]:32345"},"services":[{"id":"0022703b-dcfc-44d4-897a-b42f6f53b433","details":{"type":"crucible","address":"[fd00:1122:3344:106::c]:32345"}}]},"root":"/pool/ext/025725fa-9e40-4b46-b018-c420408394ef/crypt/zone"},{"zone":{"id":"fffddf56-10ca-4b62-9be3-5b3764a5f682","zone_type":"crucible","addresses":["fd00:1122:3344:106::d"],"dataset":{"id":"fffddf56-10ca-4b62-9be3-5b3764a5f682","name":{"pool_name":"oxp_4d2f5aaf-eb14-4b1e-aa99-ae38ec844605","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::d]:32345"},"services":[{"id":"fffddf56-10ca-4b62-9be3-5b3764a5f682","details":{"type":"crucible","address":"[fd00:1122:3344:106::d]:32345"}}]},"root":"/pool/ext/834c9aad-c53b-4357-bc3f-f422efa63848/crypt/zone"},{"zone":{"id":"9b8194ee-917d-4abc-a55c-94cea6cdaea1","zone_type":"crucible","addresses":["fd00:1122:3344:106::6"],"dataset":{"id":"9b8194ee-917d-4abc-a55c-94cea6cdaea1","name":{"pool_name":"oxp_d7665e0d-9354-4341-a76f-965d7c49f277","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::6]:32345"},"services":[{"id":"9b8194ee-917d-4abc-a55c-94cea6cdaea1","details":{"type":"crucible","address":"[fd00:1122:3344:106::6]:32345"}}]},"root":"/pool/ext/cf5f8849-0c5a-475b-8683-6d17da88d1d1/crypt/zone"},{"zone":{"id":"b369e133-485c-4d98-8fee-83542d1fd94d","zone_type":"crucible","addresses":["fd00:1122:3344:106::4"],"dataset":{"id":"b369e133-485c-4d98-8fee-83542d1fd94d","name":{"pool_name":"oxp_4366f80d-3902-4b93-8f2d-380008e805fc","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::4]:32345"},"services":[{"id":"b369e133-485c-4d98-8fee-83542d1fd94d","details":{"type":"crucible","address":"[fd00:1122:3344:106::4]:32345"}}]},"root":"/pool/ext/025725fa-9e40-4b46-b018-c420408394ef/crypt/zone"},{"zone":{"id":"edd99650-5df1-4241-815d-253e4ef2399c","zone_type":"external_dns","addresses":["fd00:1122:3344:106::3"],"dataset":{"id":"edd99650-5df1-4241-815d-253e4ef2399c","name":{"pool_name":"oxp_4366f80d-3902-4b93-8f2d-380008e805fc","kind":{"type":"external_dns"}},"service_address":"[fd00:1122:3344:106::3]:5353"},"services":[{"id":"edd99650-5df1-4241-815d-253e4ef2399c","details":{"type":"external_dns","http_address":"[fd00:1122:3344:106::3]:5353","dns_address":"172.20.26.1:53","nic":{"id":"99b759fc-8e2e-44b7-aca8-93c3b201974d","kind":{"type":"service","id":"edd99650-5df1-4241-815d-253e4ef2399c"},"name":"external-dns-edd99650-5df1-4241-815d-253e4ef2399c","ip":"172.30.1.5","mac":"A8:40:25:FF:B0:9C","subnet":"172.30.1.0/24","vni":100,"primary":true,"slot":0}}}]},"root":"/pool/ext/7f778610-7328-4554-98f6-b17f74f551c7/crypt/zone"},{"zone":{"id":"46d1afcc-cc3f-4b17-aafc-054dd4862d15","zone_type":"crucible","addresses":["fd00:1122:3344:106::5"],"dataset":{"id":"46d1afcc-cc3f-4b17-aafc-054dd4862d15","name":{"pool_name":"oxp_7f778610-7328-4554-98f6-b17f74f551c7","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::5]:32345"},"services":[{"id":"46d1afcc-cc3f-4b17-aafc-054dd4862d15","details":{"type":"crucible","address":"[fd00:1122:3344:106::5]:32345"}}]},"root":"/pool/ext/cf5f8849-0c5a-475b-8683-6d17da88d1d1/crypt/zone"},{"zone":{"id":"12afe1c3-bfe6-4278-8240-91d401347d36","zone_type":"crucible","addresses":["fd00:1122:3344:106::8"],"dataset":{"id":"12afe1c3-bfe6-4278-8240-91d401347d36","name":{"pool_name":"oxp_534bcd4b-502f-4109-af6e-4b28a22c20f1","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::8]:32345"},"services":[{"id":"12afe1c3-bfe6-4278-8240-91d401347d36","details":{"type":"crucible","address":"[fd00:1122:3344:106::8]:32345"}}]},"root":"/pool/ext/4366f80d-3902-4b93-8f2d-380008e805fc/crypt/zone"},{"zone":{"id":"c33b5912-9985-43ed-98f2-41297e2b796a","zone_type":"crucible","addresses":["fd00:1122:3344:106::b"],"dataset":{"id":"c33b5912-9985-43ed-98f2-41297e2b796a","name":{"pool_name":"oxp_834c9aad-c53b-4357-bc3f-f422efa63848","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::b]:32345"},"services":[{"id":"c33b5912-9985-43ed-98f2-41297e2b796a","details":{"type":"crucible","address":"[fd00:1122:3344:106::b]:32345"}}]},"root":"/pool/ext/d7665e0d-9354-4341-a76f-965d7c49f277/crypt/zone"},{"zone":{"id":"65b3db59-9361-4100-9cee-04e32a8c67d3","zone_type":"crucible","addresses":["fd00:1122:3344:106::7"],"dataset":{"id":"65b3db59-9361-4100-9cee-04e32a8c67d3","name":{"pool_name":"oxp_32b5303f-f667-4345-84d2-c7eec63b91b2","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::7]:32345"},"services":[{"id":"65b3db59-9361-4100-9cee-04e32a8c67d3","details":{"type":"crucible","address":"[fd00:1122:3344:106::7]:32345"}}]},"root":"/pool/ext/d7665e0d-9354-4341-a76f-965d7c49f277/crypt/zone"},{"zone":{"id":"82500cc9-f33d-4d59-9e6e-d70ea6133077","zone_type":"ntp","addresses":["fd00:1122:3344:106::e"],"dataset":null,"services":[{"id":"82500cc9-f33d-4d59-9e6e-d70ea6133077","details":{"type":"internal_ntp","address":"[fd00:1122:3344:106::e]:123","ntp_servers":["c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55.host.control-plane.oxide.internal","6ea2684c-115e-48a6-8453-ab52d1cecd73.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/cf14d1b9-b4db-4594-b3ab-a9957e770ce9/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack2-sled12.json b/sled-agent/tests/old-service-ledgers/rack2-sled12.json new file mode 100644 index 0000000000..5126c007f3 --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack2-sled12.json @@ -0,0 +1 @@ +{"generation":5,"requests":[{"zone":{"id":"a76b3357-b690-43b8-8352-3300568ffc2b","zone_type":"crucible","addresses":["fd00:1122:3344:104::a"],"dataset":{"id":"a76b3357-b690-43b8-8352-3300568ffc2b","name":{"pool_name":"oxp_05715ad8-59a1-44ab-ad5f-0cdffb46baab","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::a]:32345"},"services":[{"id":"a76b3357-b690-43b8-8352-3300568ffc2b","details":{"type":"crucible","address":"[fd00:1122:3344:104::a]:32345"}}]},"root":"/pool/ext/2ec2a731-3340-4777-b1bb-4a906c598174/crypt/zone"},{"zone":{"id":"8d202759-ca06-4383-b50f-7f3ec4062bf7","zone_type":"crucible","addresses":["fd00:1122:3344:104::4"],"dataset":{"id":"8d202759-ca06-4383-b50f-7f3ec4062bf7","name":{"pool_name":"oxp_56e32a8f-0877-4437-9cab-94a4928b1495","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::4]:32345"},"services":[{"id":"8d202759-ca06-4383-b50f-7f3ec4062bf7","details":{"type":"crucible","address":"[fd00:1122:3344:104::4]:32345"}}]},"root":"/pool/ext/613b58fc-5a80-42dc-a61c-b143cf220fb5/crypt/zone"},{"zone":{"id":"fcdda266-fc6a-4518-89db-aec007a4b682","zone_type":"crucible","addresses":["fd00:1122:3344:104::b"],"dataset":{"id":"fcdda266-fc6a-4518-89db-aec007a4b682","name":{"pool_name":"oxp_7e1293ad-b903-4054-aeae-2182d5e4a785","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::b]:32345"},"services":[{"id":"fcdda266-fc6a-4518-89db-aec007a4b682","details":{"type":"crucible","address":"[fd00:1122:3344:104::b]:32345"}}]},"root":"/pool/ext/416fd29e-d3b5-4fdf-8101-d0d163fa0706/crypt/zone"},{"zone":{"id":"167cf6a2-ec51-4de2-bc6c-7785bbc0e436","zone_type":"crucible","addresses":["fd00:1122:3344:104::c"],"dataset":{"id":"167cf6a2-ec51-4de2-bc6c-7785bbc0e436","name":{"pool_name":"oxp_f96c8d49-fdf7-4bd6-84f6-c282202d1abc","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::c]:32345"},"services":[{"id":"167cf6a2-ec51-4de2-bc6c-7785bbc0e436","details":{"type":"crucible","address":"[fd00:1122:3344:104::c]:32345"}}]},"root":"/pool/ext/56e32a8f-0877-4437-9cab-94a4928b1495/crypt/zone"},{"zone":{"id":"c6fde82d-8dae-4ef0-b557-6c3d094d9454","zone_type":"crucible","addresses":["fd00:1122:3344:104::9"],"dataset":{"id":"c6fde82d-8dae-4ef0-b557-6c3d094d9454","name":{"pool_name":"oxp_416fd29e-d3b5-4fdf-8101-d0d163fa0706","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::9]:32345"},"services":[{"id":"c6fde82d-8dae-4ef0-b557-6c3d094d9454","details":{"type":"crucible","address":"[fd00:1122:3344:104::9]:32345"}}]},"root":"/pool/ext/3af01cc4-1f16-47d9-a489-abafcb91c2db/crypt/zone"},{"zone":{"id":"650f5da7-86a0-4ade-af0f-bc96e021ded0","zone_type":"crucible","addresses":["fd00:1122:3344:104::5"],"dataset":{"id":"650f5da7-86a0-4ade-af0f-bc96e021ded0","name":{"pool_name":"oxp_b4a71d3d-1ecd-418a-9a52-8d118f82082b","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::5]:32345"},"services":[{"id":"650f5da7-86a0-4ade-af0f-bc96e021ded0","details":{"type":"crucible","address":"[fd00:1122:3344:104::5]:32345"}}]},"root":"/pool/ext/613b58fc-5a80-42dc-a61c-b143cf220fb5/crypt/zone"},{"zone":{"id":"7ce9a2c5-2d37-4188-b7b5-a9db819396c3","zone_type":"crucible","addresses":["fd00:1122:3344:104::d"],"dataset":{"id":"7ce9a2c5-2d37-4188-b7b5-a9db819396c3","name":{"pool_name":"oxp_c87d16b8-e814-4159-8562-f8d7fdd19d13","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::d]:32345"},"services":[{"id":"7ce9a2c5-2d37-4188-b7b5-a9db819396c3","details":{"type":"crucible","address":"[fd00:1122:3344:104::d]:32345"}}]},"root":"/pool/ext/416fd29e-d3b5-4fdf-8101-d0d163fa0706/crypt/zone"},{"zone":{"id":"23e1cf01-70ab-422f-997b-6216158965c3","zone_type":"crucible","addresses":["fd00:1122:3344:104::8"],"dataset":{"id":"23e1cf01-70ab-422f-997b-6216158965c3","name":{"pool_name":"oxp_3af01cc4-1f16-47d9-a489-abafcb91c2db","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::8]:32345"},"services":[{"id":"23e1cf01-70ab-422f-997b-6216158965c3","details":{"type":"crucible","address":"[fd00:1122:3344:104::8]:32345"}}]},"root":"/pool/ext/3af01cc4-1f16-47d9-a489-abafcb91c2db/crypt/zone"},{"zone":{"id":"50209816-89fb-48ed-9595-16899d114844","zone_type":"crucible","addresses":["fd00:1122:3344:104::6"],"dataset":{"id":"50209816-89fb-48ed-9595-16899d114844","name":{"pool_name":"oxp_2ec2a731-3340-4777-b1bb-4a906c598174","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::6]:32345"},"services":[{"id":"50209816-89fb-48ed-9595-16899d114844","details":{"type":"crucible","address":"[fd00:1122:3344:104::6]:32345"}}]},"root":"/pool/ext/416fd29e-d3b5-4fdf-8101-d0d163fa0706/crypt/zone"},{"zone":{"id":"20b100d0-84c3-4119-aa9b-0c632b0b6a3a","zone_type":"nexus","addresses":["fd00:1122:3344:104::3"],"dataset":null,"services":[{"id":"20b100d0-84c3-4119-aa9b-0c632b0b6a3a","details":{"type":"nexus","internal_address":"[fd00:1122:3344:104::3]:12221","external_ip":"172.20.26.4","nic":{"id":"364b0ecd-bf08-4cac-a993-bbf4a70564c7","kind":{"type":"service","id":"20b100d0-84c3-4119-aa9b-0c632b0b6a3a"},"name":"nexus-20b100d0-84c3-4119-aa9b-0c632b0b6a3a","ip":"172.30.2.6","mac":"A8:40:25:FF:B4:C1","subnet":"172.30.2.0/24","vni":100,"primary":true,"slot":0},"external_tls":true,"external_dns_servers":["1.1.1.1","9.9.9.9"]}}]},"root":"/pool/ext/c87d16b8-e814-4159-8562-f8d7fdd19d13/crypt/zone"},{"zone":{"id":"8bc0f29e-0c20-437e-b8ca-7b9844acda22","zone_type":"crucible","addresses":["fd00:1122:3344:104::7"],"dataset":{"id":"8bc0f29e-0c20-437e-b8ca-7b9844acda22","name":{"pool_name":"oxp_613b58fc-5a80-42dc-a61c-b143cf220fb5","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::7]:32345"},"services":[{"id":"8bc0f29e-0c20-437e-b8ca-7b9844acda22","details":{"type":"crucible","address":"[fd00:1122:3344:104::7]:32345"}}]},"root":"/pool/ext/56e32a8f-0877-4437-9cab-94a4928b1495/crypt/zone"},{"zone":{"id":"c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55","zone_type":"ntp","addresses":["fd00:1122:3344:104::e"],"dataset":null,"services":[{"id":"c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55","details":{"type":"boundary_ntp","address":"[fd00:1122:3344:104::e]:123","ntp_servers":["ntp.eng.oxide.computer"],"dns_servers":["1.1.1.1","9.9.9.9"],"domain":null,"nic":{"id":"a4b9bacf-6c04-431a-81ad-9bf0302af96e","kind":{"type":"service","id":"c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55"},"name":"ntp-c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55","ip":"172.30.3.5","mac":"A8:40:25:FF:B2:52","subnet":"172.30.3.0/24","vni":100,"primary":true,"slot":0},"snat_cfg":{"ip":"172.20.26.6","first_port":0,"last_port":16383}}}]},"root":"/pool/ext/3af01cc4-1f16-47d9-a489-abafcb91c2db/crypt/zone"},{"zone":{"id":"51c9ad09-7814-4643-8ad4-689ccbe53fbd","zone_type":"internal_dns","addresses":["fd00:1122:3344:1::1"],"dataset":{"id":"51c9ad09-7814-4643-8ad4-689ccbe53fbd","name":{"pool_name":"oxp_56e32a8f-0877-4437-9cab-94a4928b1495","kind":{"type":"internal_dns"}},"service_address":"[fd00:1122:3344:1::1]:5353"},"services":[{"id":"51c9ad09-7814-4643-8ad4-689ccbe53fbd","details":{"type":"internal_dns","http_address":"[fd00:1122:3344:1::1]:5353","dns_address":"[fd00:1122:3344:1::1]:53","gz_address":"fd00:1122:3344:1::2","gz_address_index":0}}]},"root":"/pool/ext/3af01cc4-1f16-47d9-a489-abafcb91c2db/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack2-sled14.json b/sled-agent/tests/old-service-ledgers/rack2-sled14.json new file mode 100644 index 0000000000..421e21d84d --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack2-sled14.json @@ -0,0 +1 @@ +{"generation":4,"requests":[{"zone":{"id":"ee8b2cfa-87fe-46a6-98ef-23640b80a968","zone_type":"crucible","addresses":["fd00:1122:3344:10b::d"],"dataset":{"id":"ee8b2cfa-87fe-46a6-98ef-23640b80a968","name":{"pool_name":"oxp_4a624324-003a-4255-98e8-546a90b5b7fa","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::d]:32345"},"services":[{"id":"ee8b2cfa-87fe-46a6-98ef-23640b80a968","details":{"type":"crucible","address":"[fd00:1122:3344:10b::d]:32345"}}]},"root":"/pool/ext/6b9ec5f1-859f-459c-9c06-6a51ba87786f/crypt/zone"},{"zone":{"id":"9228f8ca-2a83-439f-9cb7-f2801b5fea27","zone_type":"crucible","addresses":["fd00:1122:3344:10b::6"],"dataset":{"id":"9228f8ca-2a83-439f-9cb7-f2801b5fea27","name":{"pool_name":"oxp_6b9ec5f1-859f-459c-9c06-6a51ba87786f","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::6]:32345"},"services":[{"id":"9228f8ca-2a83-439f-9cb7-f2801b5fea27","details":{"type":"crucible","address":"[fd00:1122:3344:10b::6]:32345"}}]},"root":"/pool/ext/6b9ec5f1-859f-459c-9c06-6a51ba87786f/crypt/zone"},{"zone":{"id":"ee44cdde-7ac9-4469-9f1d-e8bcfeb5cc46","zone_type":"crucible","addresses":["fd00:1122:3344:10b::e"],"dataset":{"id":"ee44cdde-7ac9-4469-9f1d-e8bcfeb5cc46","name":{"pool_name":"oxp_11b02ce7-7e50-486f-86c2-de8af9575a45","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::e]:32345"},"services":[{"id":"ee44cdde-7ac9-4469-9f1d-e8bcfeb5cc46","details":{"type":"crucible","address":"[fd00:1122:3344:10b::e]:32345"}}]},"root":"/pool/ext/11b02ce7-7e50-486f-86c2-de8af9575a45/crypt/zone"},{"zone":{"id":"96bac0b1-8b34-4c81-9e76-6404d2c37630","zone_type":"crucible_pantry","addresses":["fd00:1122:3344:10b::4"],"dataset":null,"services":[{"id":"96bac0b1-8b34-4c81-9e76-6404d2c37630","details":{"type":"crucible_pantry","address":"[fd00:1122:3344:10b::4]:17000"}}]},"root":"/pool/ext/350b2814-7b7f-40f1-9bf6-9818a1ef49bb/crypt/zone"},{"zone":{"id":"d4e1e554-7b98-4413-809e-4a42561c3d0c","zone_type":"crucible","addresses":["fd00:1122:3344:10b::a"],"dataset":{"id":"d4e1e554-7b98-4413-809e-4a42561c3d0c","name":{"pool_name":"oxp_e6d2fe1d-c74d-40cd-8fae-bc7d06bdaac8","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::a]:32345"},"services":[{"id":"d4e1e554-7b98-4413-809e-4a42561c3d0c","details":{"type":"crucible","address":"[fd00:1122:3344:10b::a]:32345"}}]},"root":"/pool/ext/6b9ec5f1-859f-459c-9c06-6a51ba87786f/crypt/zone"},{"zone":{"id":"1dd69b02-a032-46c3-8e2a-5012e8314455","zone_type":"crucible","addresses":["fd00:1122:3344:10b::b"],"dataset":{"id":"1dd69b02-a032-46c3-8e2a-5012e8314455","name":{"pool_name":"oxp_350b2814-7b7f-40f1-9bf6-9818a1ef49bb","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::b]:32345"},"services":[{"id":"1dd69b02-a032-46c3-8e2a-5012e8314455","details":{"type":"crucible","address":"[fd00:1122:3344:10b::b]:32345"}}]},"root":"/pool/ext/350b2814-7b7f-40f1-9bf6-9818a1ef49bb/crypt/zone"},{"zone":{"id":"921f7752-d2f3-40df-a739-5cb1390abc2c","zone_type":"crucible","addresses":["fd00:1122:3344:10b::8"],"dataset":{"id":"921f7752-d2f3-40df-a739-5cb1390abc2c","name":{"pool_name":"oxp_2d1ebe24-6deb-4f81-8450-6842de28126c","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::8]:32345"},"services":[{"id":"921f7752-d2f3-40df-a739-5cb1390abc2c","details":{"type":"crucible","address":"[fd00:1122:3344:10b::8]:32345"}}]},"root":"/pool/ext/91ea7bb6-2be7-4498-9b0d-a0521509ec00/crypt/zone"},{"zone":{"id":"609b25e8-9750-4308-ae6f-7202907a3675","zone_type":"crucible","addresses":["fd00:1122:3344:10b::9"],"dataset":{"id":"609b25e8-9750-4308-ae6f-7202907a3675","name":{"pool_name":"oxp_91ea7bb6-2be7-4498-9b0d-a0521509ec00","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::9]:32345"},"services":[{"id":"609b25e8-9750-4308-ae6f-7202907a3675","details":{"type":"crucible","address":"[fd00:1122:3344:10b::9]:32345"}}]},"root":"/pool/ext/2d1ebe24-6deb-4f81-8450-6842de28126c/crypt/zone"},{"zone":{"id":"a232eba2-e94f-4592-a5a6-ec23f9be3296","zone_type":"crucible","addresses":["fd00:1122:3344:10b::5"],"dataset":{"id":"a232eba2-e94f-4592-a5a6-ec23f9be3296","name":{"pool_name":"oxp_e12f29b8-1ab8-431e-bc96-1c1298947980","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::5]:32345"},"services":[{"id":"a232eba2-e94f-4592-a5a6-ec23f9be3296","details":{"type":"crucible","address":"[fd00:1122:3344:10b::5]:32345"}}]},"root":"/pool/ext/021afd19-2f87-4def-9284-ab7add1dd6ae/crypt/zone"},{"zone":{"id":"800d1758-9312-4b1a-8f02-dc6d644c2a9b","zone_type":"crucible","addresses":["fd00:1122:3344:10b::c"],"dataset":{"id":"800d1758-9312-4b1a-8f02-dc6d644c2a9b","name":{"pool_name":"oxp_b6932bb0-bab8-4876-914a-9c75a600e794","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::c]:32345"},"services":[{"id":"800d1758-9312-4b1a-8f02-dc6d644c2a9b","details":{"type":"crucible","address":"[fd00:1122:3344:10b::c]:32345"}}]},"root":"/pool/ext/b6932bb0-bab8-4876-914a-9c75a600e794/crypt/zone"},{"zone":{"id":"668a4d4a-96dc-4b45-866b-bed3d64c26ec","zone_type":"crucible","addresses":["fd00:1122:3344:10b::7"],"dataset":{"id":"668a4d4a-96dc-4b45-866b-bed3d64c26ec","name":{"pool_name":"oxp_021afd19-2f87-4def-9284-ab7add1dd6ae","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::7]:32345"},"services":[{"id":"668a4d4a-96dc-4b45-866b-bed3d64c26ec","details":{"type":"crucible","address":"[fd00:1122:3344:10b::7]:32345"}}]},"root":"/pool/ext/91ea7bb6-2be7-4498-9b0d-a0521509ec00/crypt/zone"},{"zone":{"id":"8bbea076-ff60-4330-8302-383e18140ef3","zone_type":"cockroach_db","addresses":["fd00:1122:3344:10b::3"],"dataset":{"id":"8bbea076-ff60-4330-8302-383e18140ef3","name":{"pool_name":"oxp_e12f29b8-1ab8-431e-bc96-1c1298947980","kind":{"type":"cockroach_db"}},"service_address":"[fd00:1122:3344:10b::3]:32221"},"services":[{"id":"8bbea076-ff60-4330-8302-383e18140ef3","details":{"type":"cockroach_db","address":"[fd00:1122:3344:10b::3]:32221"}}]},"root":"/pool/ext/4a624324-003a-4255-98e8-546a90b5b7fa/crypt/zone"},{"zone":{"id":"3ccea933-89f2-4ce5-8367-efb0afeffe97","zone_type":"ntp","addresses":["fd00:1122:3344:10b::f"],"dataset":null,"services":[{"id":"3ccea933-89f2-4ce5-8367-efb0afeffe97","details":{"type":"internal_ntp","address":"[fd00:1122:3344:10b::f]:123","ntp_servers":["c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55.host.control-plane.oxide.internal","6ea2684c-115e-48a6-8453-ab52d1cecd73.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/4a624324-003a-4255-98e8-546a90b5b7fa/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack2-sled16.json b/sled-agent/tests/old-service-ledgers/rack2-sled16.json new file mode 100644 index 0000000000..c928e004b2 --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack2-sled16.json @@ -0,0 +1 @@ +{"generation":4,"requests":[{"zone":{"id":"b12aa520-a769-4eac-b56b-09960550a831","zone_type":"crucible","addresses":["fd00:1122:3344:108::7"],"dataset":{"id":"b12aa520-a769-4eac-b56b-09960550a831","name":{"pool_name":"oxp_34dadf3f-f60c-4acc-b82b-4b0c82224222","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::7]:32345"},"services":[{"id":"b12aa520-a769-4eac-b56b-09960550a831","details":{"type":"crucible","address":"[fd00:1122:3344:108::7]:32345"}}]},"root":"/pool/ext/8be8c577-23ac-452e-a205-6d9c95088f61/crypt/zone"},{"zone":{"id":"9bdc40ee-ccba-4d18-9efb-a30596e2d290","zone_type":"crucible","addresses":["fd00:1122:3344:108::d"],"dataset":{"id":"9bdc40ee-ccba-4d18-9efb-a30596e2d290","name":{"pool_name":"oxp_eb81728c-3b83-42fb-8133-ac32a0bdf70f","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::d]:32345"},"services":[{"id":"9bdc40ee-ccba-4d18-9efb-a30596e2d290","details":{"type":"crucible","address":"[fd00:1122:3344:108::d]:32345"}}]},"root":"/pool/ext/8be8c577-23ac-452e-a205-6d9c95088f61/crypt/zone"},{"zone":{"id":"c9a367c7-64d7-48e4-b484-9ecb4e8faea7","zone_type":"crucible","addresses":["fd00:1122:3344:108::9"],"dataset":{"id":"c9a367c7-64d7-48e4-b484-9ecb4e8faea7","name":{"pool_name":"oxp_76ab5a67-e20f-4bf0-87b3-01fcc4144bd2","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::9]:32345"},"services":[{"id":"c9a367c7-64d7-48e4-b484-9ecb4e8faea7","details":{"type":"crucible","address":"[fd00:1122:3344:108::9]:32345"}}]},"root":"/pool/ext/34dadf3f-f60c-4acc-b82b-4b0c82224222/crypt/zone"},{"zone":{"id":"bc5124d8-65e8-4879-bfac-64d59003d482","zone_type":"crucible","addresses":["fd00:1122:3344:108::a"],"dataset":{"id":"bc5124d8-65e8-4879-bfac-64d59003d482","name":{"pool_name":"oxp_5fac7a1d-e855-46e1-b8c2-dd848ac4fee6","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::a]:32345"},"services":[{"id":"bc5124d8-65e8-4879-bfac-64d59003d482","details":{"type":"crucible","address":"[fd00:1122:3344:108::a]:32345"}}]},"root":"/pool/ext/0c4ef358-5533-43db-ad38-a8eff716e53a/crypt/zone"},{"zone":{"id":"5cc7c840-8e6b-48c8-ac4b-f4297f8cf61a","zone_type":"crucible","addresses":["fd00:1122:3344:108::c"],"dataset":{"id":"5cc7c840-8e6b-48c8-ac4b-f4297f8cf61a","name":{"pool_name":"oxp_0c4ef358-5533-43db-ad38-a8eff716e53a","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::c]:32345"},"services":[{"id":"5cc7c840-8e6b-48c8-ac4b-f4297f8cf61a","details":{"type":"crucible","address":"[fd00:1122:3344:108::c]:32345"}}]},"root":"/pool/ext/6d3e9cc6-f03b-4055-9785-05711d5e4fdc/crypt/zone"},{"zone":{"id":"3b767edf-a72d-4d80-a0fc-65d6801ed0e0","zone_type":"crucible","addresses":["fd00:1122:3344:108::e"],"dataset":{"id":"3b767edf-a72d-4d80-a0fc-65d6801ed0e0","name":{"pool_name":"oxp_f522118c-5dcd-4116-8044-07f0cceec52e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::e]:32345"},"services":[{"id":"3b767edf-a72d-4d80-a0fc-65d6801ed0e0","details":{"type":"crucible","address":"[fd00:1122:3344:108::e]:32345"}}]},"root":"/pool/ext/5fac7a1d-e855-46e1-b8c2-dd848ac4fee6/crypt/zone"},{"zone":{"id":"f3c02ed6-fbc5-45c3-a030-409f74b450fd","zone_type":"crucible_pantry","addresses":["fd00:1122:3344:108::4"],"dataset":null,"services":[{"id":"f3c02ed6-fbc5-45c3-a030-409f74b450fd","details":{"type":"crucible_pantry","address":"[fd00:1122:3344:108::4]:17000"}}]},"root":"/pool/ext/eb81728c-3b83-42fb-8133-ac32a0bdf70f/crypt/zone"},{"zone":{"id":"85bd9bdb-1ec5-4a8d-badb-8b5d502546a1","zone_type":"crucible","addresses":["fd00:1122:3344:108::5"],"dataset":{"id":"85bd9bdb-1ec5-4a8d-badb-8b5d502546a1","name":{"pool_name":"oxp_416232c1-bc8f-403f-bacb-28403dd8fced","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::5]:32345"},"services":[{"id":"85bd9bdb-1ec5-4a8d-badb-8b5d502546a1","details":{"type":"crucible","address":"[fd00:1122:3344:108::5]:32345"}}]},"root":"/pool/ext/34dadf3f-f60c-4acc-b82b-4b0c82224222/crypt/zone"},{"zone":{"id":"d2f1c3df-d4e0-4469-b50e-f1871da86ebf","zone_type":"crucible","addresses":["fd00:1122:3344:108::6"],"dataset":{"id":"d2f1c3df-d4e0-4469-b50e-f1871da86ebf","name":{"pool_name":"oxp_6d3e9cc6-f03b-4055-9785-05711d5e4fdc","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::6]:32345"},"services":[{"id":"d2f1c3df-d4e0-4469-b50e-f1871da86ebf","details":{"type":"crucible","address":"[fd00:1122:3344:108::6]:32345"}}]},"root":"/pool/ext/34dadf3f-f60c-4acc-b82b-4b0c82224222/crypt/zone"},{"zone":{"id":"88fe3c12-4c55-47df-b4ee-ed26b795439d","zone_type":"crucible","addresses":["fd00:1122:3344:108::8"],"dataset":{"id":"88fe3c12-4c55-47df-b4ee-ed26b795439d","name":{"pool_name":"oxp_8be8c577-23ac-452e-a205-6d9c95088f61","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::8]:32345"},"services":[{"id":"88fe3c12-4c55-47df-b4ee-ed26b795439d","details":{"type":"crucible","address":"[fd00:1122:3344:108::8]:32345"}}]},"root":"/pool/ext/34dadf3f-f60c-4acc-b82b-4b0c82224222/crypt/zone"},{"zone":{"id":"4d20175a-588b-44b8-8b9c-b16c6c3a97a0","zone_type":"crucible","addresses":["fd00:1122:3344:108::b"],"dataset":{"id":"4d20175a-588b-44b8-8b9c-b16c6c3a97a0","name":{"pool_name":"oxp_a726cacd-fa35-4ed2-ade6-31ad928b24cb","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::b]:32345"},"services":[{"id":"4d20175a-588b-44b8-8b9c-b16c6c3a97a0","details":{"type":"crucible","address":"[fd00:1122:3344:108::b]:32345"}}]},"root":"/pool/ext/0c4ef358-5533-43db-ad38-a8eff716e53a/crypt/zone"},{"zone":{"id":"e86845b5-eabd-49f5-9a10-6dfef9066209","zone_type":"cockroach_db","addresses":["fd00:1122:3344:108::3"],"dataset":{"id":"e86845b5-eabd-49f5-9a10-6dfef9066209","name":{"pool_name":"oxp_416232c1-bc8f-403f-bacb-28403dd8fced","kind":{"type":"cockroach_db"}},"service_address":"[fd00:1122:3344:108::3]:32221"},"services":[{"id":"e86845b5-eabd-49f5-9a10-6dfef9066209","details":{"type":"cockroach_db","address":"[fd00:1122:3344:108::3]:32221"}}]},"root":"/pool/ext/416232c1-bc8f-403f-bacb-28403dd8fced/crypt/zone"},{"zone":{"id":"209b6213-588b-43b6-a89b-19ee5c84ffba","zone_type":"ntp","addresses":["fd00:1122:3344:108::f"],"dataset":null,"services":[{"id":"209b6213-588b-43b6-a89b-19ee5c84ffba","details":{"type":"internal_ntp","address":"[fd00:1122:3344:108::f]:123","ntp_servers":["c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55.host.control-plane.oxide.internal","6ea2684c-115e-48a6-8453-ab52d1cecd73.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/416232c1-bc8f-403f-bacb-28403dd8fced/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack2-sled17.json b/sled-agent/tests/old-service-ledgers/rack2-sled17.json new file mode 100644 index 0000000000..93872adf13 --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack2-sled17.json @@ -0,0 +1 @@ +{"generation":4,"requests":[{"zone":{"id":"90b53c3d-42fa-4ca9-bbfc-96fff245b508","zone_type":"crucible","addresses":["fd00:1122:3344:109::4"],"dataset":{"id":"90b53c3d-42fa-4ca9-bbfc-96fff245b508","name":{"pool_name":"oxp_ae56280b-17ce-4266-8573-e1da9db6c6bb","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::4]:32345"},"services":[{"id":"90b53c3d-42fa-4ca9-bbfc-96fff245b508","details":{"type":"crucible","address":"[fd00:1122:3344:109::4]:32345"}}]},"root":"/pool/ext/b0e1a261-b932-47c4-81e9-1977275ae9d9/crypt/zone"},{"zone":{"id":"4f9f2e1d-be04-4e8b-a50b-ffb18557a650","zone_type":"crucible","addresses":["fd00:1122:3344:109::5"],"dataset":{"id":"4f9f2e1d-be04-4e8b-a50b-ffb18557a650","name":{"pool_name":"oxp_d5b07362-64db-4b18-a3e9-8d7cbabae2d5","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::5]:32345"},"services":[{"id":"4f9f2e1d-be04-4e8b-a50b-ffb18557a650","details":{"type":"crucible","address":"[fd00:1122:3344:109::5]:32345"}}]},"root":"/pool/ext/027a82e8-daa3-4fa6-8205-ed03445e1086/crypt/zone"},{"zone":{"id":"2fa5671d-3109-4f11-ae70-1280f4fa3b89","zone_type":"crucible","addresses":["fd00:1122:3344:109::6"],"dataset":{"id":"2fa5671d-3109-4f11-ae70-1280f4fa3b89","name":{"pool_name":"oxp_9ba7bfbf-b9a2-4237-a142-94c1e68de984","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::6]:32345"},"services":[{"id":"2fa5671d-3109-4f11-ae70-1280f4fa3b89","details":{"type":"crucible","address":"[fd00:1122:3344:109::6]:32345"}}]},"root":"/pool/ext/3cafbb47-c194-4a42-99ff-34dfeab999ed/crypt/zone"},{"zone":{"id":"b63c6882-ca90-4156-b561-4781ab4a0962","zone_type":"crucible","addresses":["fd00:1122:3344:109::7"],"dataset":{"id":"b63c6882-ca90-4156-b561-4781ab4a0962","name":{"pool_name":"oxp_b0e1a261-b932-47c4-81e9-1977275ae9d9","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::7]:32345"},"services":[{"id":"b63c6882-ca90-4156-b561-4781ab4a0962","details":{"type":"crucible","address":"[fd00:1122:3344:109::7]:32345"}}]},"root":"/pool/ext/d5b07362-64db-4b18-a3e9-8d7cbabae2d5/crypt/zone"},{"zone":{"id":"f71344eb-f7e2-439d-82a0-9941e6868fb6","zone_type":"crucible","addresses":["fd00:1122:3344:109::9"],"dataset":{"id":"f71344eb-f7e2-439d-82a0-9941e6868fb6","name":{"pool_name":"oxp_027a82e8-daa3-4fa6-8205-ed03445e1086","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::9]:32345"},"services":[{"id":"f71344eb-f7e2-439d-82a0-9941e6868fb6","details":{"type":"crucible","address":"[fd00:1122:3344:109::9]:32345"}}]},"root":"/pool/ext/027a82e8-daa3-4fa6-8205-ed03445e1086/crypt/zone"},{"zone":{"id":"a60cf0d7-12d5-43cb-aa3f-7a9e84de08fb","zone_type":"crucible","addresses":["fd00:1122:3344:109::a"],"dataset":{"id":"a60cf0d7-12d5-43cb-aa3f-7a9e84de08fb","name":{"pool_name":"oxp_8736aaf9-4d72-42b1-8e4f-07644d999c8b","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::a]:32345"},"services":[{"id":"a60cf0d7-12d5-43cb-aa3f-7a9e84de08fb","details":{"type":"crucible","address":"[fd00:1122:3344:109::a]:32345"}}]},"root":"/pool/ext/8736aaf9-4d72-42b1-8e4f-07644d999c8b/crypt/zone"},{"zone":{"id":"5d0e03b2-8958-4c43-8851-bf819f102958","zone_type":"crucible","addresses":["fd00:1122:3344:109::8"],"dataset":{"id":"5d0e03b2-8958-4c43-8851-bf819f102958","name":{"pool_name":"oxp_62426615-7832-49e7-9426-e39ffeb42c69","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::8]:32345"},"services":[{"id":"5d0e03b2-8958-4c43-8851-bf819f102958","details":{"type":"crucible","address":"[fd00:1122:3344:109::8]:32345"}}]},"root":"/pool/ext/07fc8ec9-1216-4d98-be34-c2970b585e61/crypt/zone"},{"zone":{"id":"accc05a2-ec80-4856-a825-ec6b7f700eaa","zone_type":"crucible","addresses":["fd00:1122:3344:109::d"],"dataset":{"id":"accc05a2-ec80-4856-a825-ec6b7f700eaa","name":{"pool_name":"oxp_dc083c53-7014-4482-8a79-f338ba2b0fb4","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::d]:32345"},"services":[{"id":"accc05a2-ec80-4856-a825-ec6b7f700eaa","details":{"type":"crucible","address":"[fd00:1122:3344:109::d]:32345"}}]},"root":"/pool/ext/027a82e8-daa3-4fa6-8205-ed03445e1086/crypt/zone"},{"zone":{"id":"2e32fdcc-737a-4430-8290-cb7028ea4d50","zone_type":"crucible","addresses":["fd00:1122:3344:109::b"],"dataset":{"id":"2e32fdcc-737a-4430-8290-cb7028ea4d50","name":{"pool_name":"oxp_3cafbb47-c194-4a42-99ff-34dfeab999ed","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::b]:32345"},"services":[{"id":"2e32fdcc-737a-4430-8290-cb7028ea4d50","details":{"type":"crucible","address":"[fd00:1122:3344:109::b]:32345"}}]},"root":"/pool/ext/027a82e8-daa3-4fa6-8205-ed03445e1086/crypt/zone"},{"zone":{"id":"a97c6ae2-37f6-4d93-a66e-cb5cd3c6aaa2","zone_type":"crucible","addresses":["fd00:1122:3344:109::c"],"dataset":{"id":"a97c6ae2-37f6-4d93-a66e-cb5cd3c6aaa2","name":{"pool_name":"oxp_07fc8ec9-1216-4d98-be34-c2970b585e61","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::c]:32345"},"services":[{"id":"a97c6ae2-37f6-4d93-a66e-cb5cd3c6aaa2","details":{"type":"crucible","address":"[fd00:1122:3344:109::c]:32345"}}]},"root":"/pool/ext/07fc8ec9-1216-4d98-be34-c2970b585e61/crypt/zone"},{"zone":{"id":"3237a532-acaa-4ebe-bf11-dde794fea739","zone_type":"cockroach_db","addresses":["fd00:1122:3344:109::3"],"dataset":{"id":"3237a532-acaa-4ebe-bf11-dde794fea739","name":{"pool_name":"oxp_ae56280b-17ce-4266-8573-e1da9db6c6bb","kind":{"type":"cockroach_db"}},"service_address":"[fd00:1122:3344:109::3]:32221"},"services":[{"id":"3237a532-acaa-4ebe-bf11-dde794fea739","details":{"type":"cockroach_db","address":"[fd00:1122:3344:109::3]:32221"}}]},"root":"/pool/ext/027a82e8-daa3-4fa6-8205-ed03445e1086/crypt/zone"},{"zone":{"id":"83257100-5590-484a-b72a-a079389d8da6","zone_type":"ntp","addresses":["fd00:1122:3344:109::e"],"dataset":null,"services":[{"id":"83257100-5590-484a-b72a-a079389d8da6","details":{"type":"internal_ntp","address":"[fd00:1122:3344:109::e]:123","ntp_servers":["c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55.host.control-plane.oxide.internal","6ea2684c-115e-48a6-8453-ab52d1cecd73.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/3cafbb47-c194-4a42-99ff-34dfeab999ed/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack2-sled21.json b/sled-agent/tests/old-service-ledgers/rack2-sled21.json new file mode 100644 index 0000000000..78e003f79e --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack2-sled21.json @@ -0,0 +1 @@ +{"generation":5,"requests":[{"zone":{"id":"0437b69d-73a8-4231-86f9-6b5556e7e7ef","zone_type":"crucible","addresses":["fd00:1122:3344:102::5"],"dataset":{"id":"0437b69d-73a8-4231-86f9-6b5556e7e7ef","name":{"pool_name":"oxp_aa0ffe35-76db-42ab-adf2-ceb072bdf811","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::5]:32345"},"services":[{"id":"0437b69d-73a8-4231-86f9-6b5556e7e7ef","details":{"type":"crucible","address":"[fd00:1122:3344:102::5]:32345"}}]},"root":"/pool/ext/0d2805da-6d24-4e57-a700-0c3865c05544/crypt/zone"},{"zone":{"id":"47234ca5-305f-436a-9e9a-36bca9667680","zone_type":"crucible","addresses":["fd00:1122:3344:102::b"],"dataset":{"id":"47234ca5-305f-436a-9e9a-36bca9667680","name":{"pool_name":"oxp_0d2805da-6d24-4e57-a700-0c3865c05544","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::b]:32345"},"services":[{"id":"47234ca5-305f-436a-9e9a-36bca9667680","details":{"type":"crucible","address":"[fd00:1122:3344:102::b]:32345"}}]},"root":"/pool/ext/160691d8-33a1-4d7d-a48a-c3fd27d76822/crypt/zone"},{"zone":{"id":"2898657e-4141-4c05-851b-147bffc6bbbd","zone_type":"nexus","addresses":["fd00:1122:3344:102::3"],"dataset":null,"services":[{"id":"2898657e-4141-4c05-851b-147bffc6bbbd","details":{"type":"nexus","internal_address":"[fd00:1122:3344:102::3]:12221","external_ip":"172.20.26.5","nic":{"id":"2e9a412e-c79a-48fe-8fa4-f5a6afed1040","kind":{"type":"service","id":"2898657e-4141-4c05-851b-147bffc6bbbd"},"name":"nexus-2898657e-4141-4c05-851b-147bffc6bbbd","ip":"172.30.2.7","mac":"A8:40:25:FF:C6:59","subnet":"172.30.2.0/24","vni":100,"primary":true,"slot":0},"external_tls":true,"external_dns_servers":["1.1.1.1","9.9.9.9"]}}]},"root":"/pool/ext/c0b4ecc1-a145-443f-90d1-2e8136b007bc/crypt/zone"},{"zone":{"id":"cf98c4d6-4a7b-49c0-9b14-48a8adf52ce9","zone_type":"crucible","addresses":["fd00:1122:3344:102::c"],"dataset":{"id":"cf98c4d6-4a7b-49c0-9b14-48a8adf52ce9","name":{"pool_name":"oxp_c0b4ecc1-a145-443f-90d1-2e8136b007bc","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::c]:32345"},"services":[{"id":"cf98c4d6-4a7b-49c0-9b14-48a8adf52ce9","details":{"type":"crucible","address":"[fd00:1122:3344:102::c]:32345"}}]},"root":"/pool/ext/f6acd70a-d6cb-464d-a460-dd5c60301562/crypt/zone"},{"zone":{"id":"13c1e91e-bfcc-4eea-8185-412fc37fdea3","zone_type":"crucible","addresses":["fd00:1122:3344:102::9"],"dataset":{"id":"13c1e91e-bfcc-4eea-8185-412fc37fdea3","name":{"pool_name":"oxp_e9b0a2e4-8060-41bd-a3b5-d0642246d06d","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::9]:32345"},"services":[{"id":"13c1e91e-bfcc-4eea-8185-412fc37fdea3","details":{"type":"crucible","address":"[fd00:1122:3344:102::9]:32345"}}]},"root":"/pool/ext/c0b4ecc1-a145-443f-90d1-2e8136b007bc/crypt/zone"},{"zone":{"id":"c9cb60af-9e0e-4b3b-b971-53138a9b8d27","zone_type":"crucible","addresses":["fd00:1122:3344:102::4"],"dataset":{"id":"c9cb60af-9e0e-4b3b-b971-53138a9b8d27","name":{"pool_name":"oxp_77749ec7-39a9-489d-904b-87f7223c4e3c","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::4]:32345"},"services":[{"id":"c9cb60af-9e0e-4b3b-b971-53138a9b8d27","details":{"type":"crucible","address":"[fd00:1122:3344:102::4]:32345"}}]},"root":"/pool/ext/77749ec7-39a9-489d-904b-87f7223c4e3c/crypt/zone"},{"zone":{"id":"32995cfa-47ec-4b84-8514-7c1c8a86c19d","zone_type":"crucible","addresses":["fd00:1122:3344:102::8"],"dataset":{"id":"32995cfa-47ec-4b84-8514-7c1c8a86c19d","name":{"pool_name":"oxp_eac83f81-eb51-4f3e-874e-82f55dd952ba","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::8]:32345"},"services":[{"id":"32995cfa-47ec-4b84-8514-7c1c8a86c19d","details":{"type":"crucible","address":"[fd00:1122:3344:102::8]:32345"}}]},"root":"/pool/ext/0d2805da-6d24-4e57-a700-0c3865c05544/crypt/zone"},{"zone":{"id":"b93d2e2d-d54b-4503-85c3-9878e3cee9c7","zone_type":"crucible","addresses":["fd00:1122:3344:102::a"],"dataset":{"id":"b93d2e2d-d54b-4503-85c3-9878e3cee9c7","name":{"pool_name":"oxp_160691d8-33a1-4d7d-a48a-c3fd27d76822","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::a]:32345"},"services":[{"id":"b93d2e2d-d54b-4503-85c3-9878e3cee9c7","details":{"type":"crucible","address":"[fd00:1122:3344:102::a]:32345"}}]},"root":"/pool/ext/138663ad-a382-4595-baf0-08f6b0276a67/crypt/zone"},{"zone":{"id":"2ebbac4f-7b0f-43eb-99fd-dd6ff7f9e097","zone_type":"crucible","addresses":["fd00:1122:3344:102::6"],"dataset":{"id":"2ebbac4f-7b0f-43eb-99fd-dd6ff7f9e097","name":{"pool_name":"oxp_138663ad-a382-4595-baf0-08f6b0276a67","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::6]:32345"},"services":[{"id":"2ebbac4f-7b0f-43eb-99fd-dd6ff7f9e097","details":{"type":"crucible","address":"[fd00:1122:3344:102::6]:32345"}}]},"root":"/pool/ext/e9b0a2e4-8060-41bd-a3b5-d0642246d06d/crypt/zone"},{"zone":{"id":"d0eea3b2-e5ac-42bf-97b7-531b78fa06d1","zone_type":"crucible","addresses":["fd00:1122:3344:102::7"],"dataset":{"id":"d0eea3b2-e5ac-42bf-97b7-531b78fa06d1","name":{"pool_name":"oxp_69f0b863-f73f-42b2-9822-b2cb99f09003","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::7]:32345"},"services":[{"id":"d0eea3b2-e5ac-42bf-97b7-531b78fa06d1","details":{"type":"crucible","address":"[fd00:1122:3344:102::7]:32345"}}]},"root":"/pool/ext/138663ad-a382-4595-baf0-08f6b0276a67/crypt/zone"},{"zone":{"id":"2b34cd1d-ea7d-41a1-82b9-75550fdf6eb0","zone_type":"crucible","addresses":["fd00:1122:3344:102::d"],"dataset":{"id":"2b34cd1d-ea7d-41a1-82b9-75550fdf6eb0","name":{"pool_name":"oxp_f6acd70a-d6cb-464d-a460-dd5c60301562","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::d]:32345"},"services":[{"id":"2b34cd1d-ea7d-41a1-82b9-75550fdf6eb0","details":{"type":"crucible","address":"[fd00:1122:3344:102::d]:32345"}}]},"root":"/pool/ext/c0b4ecc1-a145-443f-90d1-2e8136b007bc/crypt/zone"},{"zone":{"id":"6ea2684c-115e-48a6-8453-ab52d1cecd73","zone_type":"ntp","addresses":["fd00:1122:3344:102::e"],"dataset":null,"services":[{"id":"6ea2684c-115e-48a6-8453-ab52d1cecd73","details":{"type":"boundary_ntp","address":"[fd00:1122:3344:102::e]:123","ntp_servers":["ntp.eng.oxide.computer"],"dns_servers":["1.1.1.1","9.9.9.9"],"domain":null,"nic":{"id":"4effd079-ed4e-4cf6-8545-bb9574f516d2","kind":{"type":"service","id":"6ea2684c-115e-48a6-8453-ab52d1cecd73"},"name":"ntp-6ea2684c-115e-48a6-8453-ab52d1cecd73","ip":"172.30.3.6","mac":"A8:40:25:FF:A0:F9","subnet":"172.30.3.0/24","vni":100,"primary":true,"slot":0},"snat_cfg":{"ip":"172.20.26.7","first_port":16384,"last_port":32767}}}]},"root":"/pool/ext/aa0ffe35-76db-42ab-adf2-ceb072bdf811/crypt/zone"},{"zone":{"id":"3a1ea15f-06a4-4afd-959a-c3a00b2bdd80","zone_type":"internal_dns","addresses":["fd00:1122:3344:2::1"],"dataset":{"id":"3a1ea15f-06a4-4afd-959a-c3a00b2bdd80","name":{"pool_name":"oxp_77749ec7-39a9-489d-904b-87f7223c4e3c","kind":{"type":"internal_dns"}},"service_address":"[fd00:1122:3344:2::1]:5353"},"services":[{"id":"3a1ea15f-06a4-4afd-959a-c3a00b2bdd80","details":{"type":"internal_dns","http_address":"[fd00:1122:3344:2::1]:5353","dns_address":"[fd00:1122:3344:2::1]:53","gz_address":"fd00:1122:3344:2::2","gz_address_index":1}}]},"root":"/pool/ext/69f0b863-f73f-42b2-9822-b2cb99f09003/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack2-sled23.json b/sled-agent/tests/old-service-ledgers/rack2-sled23.json new file mode 100644 index 0000000000..29b8c455d3 --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack2-sled23.json @@ -0,0 +1 @@ +{"generation":5,"requests":[{"zone":{"id":"1876cdcf-b2e7-4b79-ad2e-67df716e1860","zone_type":"crucible","addresses":["fd00:1122:3344:10a::8"],"dataset":{"id":"1876cdcf-b2e7-4b79-ad2e-67df716e1860","name":{"pool_name":"oxp_d4c6bdc6-5e99-4f6c-b57a-9bfcb9a76be4","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::8]:32345"},"services":[{"id":"1876cdcf-b2e7-4b79-ad2e-67df716e1860","details":{"type":"crucible","address":"[fd00:1122:3344:10a::8]:32345"}}]},"root":"/pool/ext/86c58ea3-1413-4af3-9aff-9c0a3d758459/crypt/zone"},{"zone":{"id":"0e708ee3-b7a6-4993-a88a-4489add33e29","zone_type":"crucible","addresses":["fd00:1122:3344:10a::d"],"dataset":{"id":"0e708ee3-b7a6-4993-a88a-4489add33e29","name":{"pool_name":"oxp_718ad834-b415-4abb-934d-9f987cde0a96","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::d]:32345"},"services":[{"id":"0e708ee3-b7a6-4993-a88a-4489add33e29","details":{"type":"crucible","address":"[fd00:1122:3344:10a::d]:32345"}}]},"root":"/pool/ext/30f7d236-c835-46cc-bc27-9099a6826f67/crypt/zone"},{"zone":{"id":"4e1b9a65-848f-4649-b360-1df0d135b44d","zone_type":"crucible","addresses":["fd00:1122:3344:10a::c"],"dataset":{"id":"4e1b9a65-848f-4649-b360-1df0d135b44d","name":{"pool_name":"oxp_88ee08c6-1c0f-44c2-9110-b8d5a7589ebb","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::c]:32345"},"services":[{"id":"4e1b9a65-848f-4649-b360-1df0d135b44d","details":{"type":"crucible","address":"[fd00:1122:3344:10a::c]:32345"}}]},"root":"/pool/ext/30f7d236-c835-46cc-bc27-9099a6826f67/crypt/zone"},{"zone":{"id":"da510a57-3af1-4d2b-b2ed-2e8849f27d8b","zone_type":"oximeter","addresses":["fd00:1122:3344:10a::3"],"dataset":null,"services":[{"id":"da510a57-3af1-4d2b-b2ed-2e8849f27d8b","details":{"type":"oximeter","address":"[fd00:1122:3344:10a::3]:12223"}}]},"root":"/pool/ext/718ad834-b415-4abb-934d-9f987cde0a96/crypt/zone"},{"zone":{"id":"d4d9acc8-3e0b-4fab-a0a2-d21920fabd7e","zone_type":"crucible","addresses":["fd00:1122:3344:10a::6"],"dataset":{"id":"d4d9acc8-3e0b-4fab-a0a2-d21920fabd7e","name":{"pool_name":"oxp_9dfe424f-cba6-4bfb-a3dd-e8bd7fdea57d","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::6]:32345"},"services":[{"id":"d4d9acc8-3e0b-4fab-a0a2-d21920fabd7e","details":{"type":"crucible","address":"[fd00:1122:3344:10a::6]:32345"}}]},"root":"/pool/ext/30f7d236-c835-46cc-bc27-9099a6826f67/crypt/zone"},{"zone":{"id":"fcb75972-836b-4f55-ba21-9722832cf5c2","zone_type":"crucible","addresses":["fd00:1122:3344:10a::7"],"dataset":{"id":"fcb75972-836b-4f55-ba21-9722832cf5c2","name":{"pool_name":"oxp_9005671f-3d90-4ed1-be15-ad65b9a65bd5","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::7]:32345"},"services":[{"id":"fcb75972-836b-4f55-ba21-9722832cf5c2","details":{"type":"crucible","address":"[fd00:1122:3344:10a::7]:32345"}}]},"root":"/pool/ext/d4c6bdc6-5e99-4f6c-b57a-9bfcb9a76be4/crypt/zone"},{"zone":{"id":"624beba0-7dcd-4d55-af05-4670c6fcb1fb","zone_type":"crucible","addresses":["fd00:1122:3344:10a::4"],"dataset":{"id":"624beba0-7dcd-4d55-af05-4670c6fcb1fb","name":{"pool_name":"oxp_93867156-a43d-4c03-a899-1535e566c8bd","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::4]:32345"},"services":[{"id":"624beba0-7dcd-4d55-af05-4670c6fcb1fb","details":{"type":"crucible","address":"[fd00:1122:3344:10a::4]:32345"}}]},"root":"/pool/ext/93867156-a43d-4c03-a899-1535e566c8bd/crypt/zone"},{"zone":{"id":"26fb3830-898e-4086-afaf-8f9654716b8c","zone_type":"crucible","addresses":["fd00:1122:3344:10a::b"],"dataset":{"id":"26fb3830-898e-4086-afaf-8f9654716b8c","name":{"pool_name":"oxp_86c58ea3-1413-4af3-9aff-9c0a3d758459","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::b]:32345"},"services":[{"id":"26fb3830-898e-4086-afaf-8f9654716b8c","details":{"type":"crucible","address":"[fd00:1122:3344:10a::b]:32345"}}]},"root":"/pool/ext/93867156-a43d-4c03-a899-1535e566c8bd/crypt/zone"},{"zone":{"id":"a3ef7eba-c08e-48ef-ae7a-89e2fcb49b66","zone_type":"crucible","addresses":["fd00:1122:3344:10a::a"],"dataset":{"id":"a3ef7eba-c08e-48ef-ae7a-89e2fcb49b66","name":{"pool_name":"oxp_cd3fdbae-a9d9-4db7-866a-bca36f6dd634","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::a]:32345"},"services":[{"id":"a3ef7eba-c08e-48ef-ae7a-89e2fcb49b66","details":{"type":"crucible","address":"[fd00:1122:3344:10a::a]:32345"}}]},"root":"/pool/ext/718ad834-b415-4abb-934d-9f987cde0a96/crypt/zone"},{"zone":{"id":"5c1d4a02-f33b-433a-81f5-5c149e3433bd","zone_type":"crucible","addresses":["fd00:1122:3344:10a::5"],"dataset":{"id":"5c1d4a02-f33b-433a-81f5-5c149e3433bd","name":{"pool_name":"oxp_9adfc865-2eef-4880-a6e3-9d2f88c8efd0","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::5]:32345"},"services":[{"id":"5c1d4a02-f33b-433a-81f5-5c149e3433bd","details":{"type":"crucible","address":"[fd00:1122:3344:10a::5]:32345"}}]},"root":"/pool/ext/cd3fdbae-a9d9-4db7-866a-bca36f6dd634/crypt/zone"},{"zone":{"id":"ee77efe9-81d0-4395-a237-15e30c2c2d04","zone_type":"crucible","addresses":["fd00:1122:3344:10a::9"],"dataset":{"id":"ee77efe9-81d0-4395-a237-15e30c2c2d04","name":{"pool_name":"oxp_30f7d236-c835-46cc-bc27-9099a6826f67","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::9]:32345"},"services":[{"id":"ee77efe9-81d0-4395-a237-15e30c2c2d04","details":{"type":"crucible","address":"[fd00:1122:3344:10a::9]:32345"}}]},"root":"/pool/ext/88ee08c6-1c0f-44c2-9110-b8d5a7589ebb/crypt/zone"},{"zone":{"id":"71ab91b7-48d4-4d31-b47e-59f29f419116","zone_type":"ntp","addresses":["fd00:1122:3344:10a::e"],"dataset":null,"services":[{"id":"71ab91b7-48d4-4d31-b47e-59f29f419116","details":{"type":"internal_ntp","address":"[fd00:1122:3344:10a::e]:123","ntp_servers":["c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55.host.control-plane.oxide.internal","6ea2684c-115e-48a6-8453-ab52d1cecd73.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/cd3fdbae-a9d9-4db7-866a-bca36f6dd634/crypt/zone"},{"zone":{"id":"46ccd8fe-4e3c-4307-97ae-1f7ac505082a","zone_type":"internal_dns","addresses":["fd00:1122:3344:3::1"],"dataset":{"id":"46ccd8fe-4e3c-4307-97ae-1f7ac505082a","name":{"pool_name":"oxp_93867156-a43d-4c03-a899-1535e566c8bd","kind":{"type":"internal_dns"}},"service_address":"[fd00:1122:3344:3::1]:5353"},"services":[{"id":"46ccd8fe-4e3c-4307-97ae-1f7ac505082a","details":{"type":"internal_dns","http_address":"[fd00:1122:3344:3::1]:5353","dns_address":"[fd00:1122:3344:3::1]:53","gz_address":"fd00:1122:3344:3::2","gz_address_index":2}}]},"root":"/pool/ext/9dfe424f-cba6-4bfb-a3dd-e8bd7fdea57d/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack2-sled25.json b/sled-agent/tests/old-service-ledgers/rack2-sled25.json new file mode 100644 index 0000000000..e48ef68faa --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack2-sled25.json @@ -0,0 +1 @@ +{"generation":4,"requests":[{"zone":{"id":"180d466d-eb36-4546-8922-e52c4c076823","zone_type":"crucible","addresses":["fd00:1122:3344:101::5"],"dataset":{"id":"180d466d-eb36-4546-8922-e52c4c076823","name":{"pool_name":"oxp_ac789935-fa42-4d00-8967-df0d96dbb74e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::5]:32345"},"services":[{"id":"180d466d-eb36-4546-8922-e52c4c076823","details":{"type":"crucible","address":"[fd00:1122:3344:101::5]:32345"}}]},"root":"/pool/ext/d732addc-cfe8-4c2c-8028-72eb4481b04e/crypt/zone"},{"zone":{"id":"b5af0303-bc03-40a3-b733-0396d705dfbf","zone_type":"crucible","addresses":["fd00:1122:3344:101::7"],"dataset":{"id":"b5af0303-bc03-40a3-b733-0396d705dfbf","name":{"pool_name":"oxp_d732addc-cfe8-4c2c-8028-72eb4481b04e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::7]:32345"},"services":[{"id":"b5af0303-bc03-40a3-b733-0396d705dfbf","details":{"type":"crucible","address":"[fd00:1122:3344:101::7]:32345"}}]},"root":"/pool/ext/677b0057-3a80-461b-aca8-c2cb501a7278/crypt/zone"},{"zone":{"id":"9c7c805a-f5ed-4e48-86e3-7aa81a718881","zone_type":"crucible","addresses":["fd00:1122:3344:101::c"],"dataset":{"id":"9c7c805a-f5ed-4e48-86e3-7aa81a718881","name":{"pool_name":"oxp_923c930c-80f8-448d-8321-cebfc6c41760","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::c]:32345"},"services":[{"id":"9c7c805a-f5ed-4e48-86e3-7aa81a718881","details":{"type":"crucible","address":"[fd00:1122:3344:101::c]:32345"}}]},"root":"/pool/ext/ac789935-fa42-4d00-8967-df0d96dbb74e/crypt/zone"},{"zone":{"id":"4e49c83c-2d4a-491a-91ac-4ab022026dcf","zone_type":"crucible","addresses":["fd00:1122:3344:101::4"],"dataset":{"id":"4e49c83c-2d4a-491a-91ac-4ab022026dcf","name":{"pool_name":"oxp_c99e6032-1d4f-47d2-9efe-ae2b2479554e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::4]:32345"},"services":[{"id":"4e49c83c-2d4a-491a-91ac-4ab022026dcf","details":{"type":"crucible","address":"[fd00:1122:3344:101::4]:32345"}}]},"root":"/pool/ext/653065d2-ab70-47c9-b832-34238fdc95ef/crypt/zone"},{"zone":{"id":"0e38475e-b8b2-4813-bf80-3c170081081a","zone_type":"crucible","addresses":["fd00:1122:3344:101::d"],"dataset":{"id":"0e38475e-b8b2-4813-bf80-3c170081081a","name":{"pool_name":"oxp_653065d2-ab70-47c9-b832-34238fdc95ef","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::d]:32345"},"services":[{"id":"0e38475e-b8b2-4813-bf80-3c170081081a","details":{"type":"crucible","address":"[fd00:1122:3344:101::d]:32345"}}]},"root":"/pool/ext/4c7ad252-55c2-4a1a-9d93-9dfcdfdfacca/crypt/zone"},{"zone":{"id":"75123e60-1116-4b8d-a466-7302220127da","zone_type":"crucible","addresses":["fd00:1122:3344:101::8"],"dataset":{"id":"75123e60-1116-4b8d-a466-7302220127da","name":{"pool_name":"oxp_c764a8ae-6862-4eec-9db0-cc6ea478e4a7","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::8]:32345"},"services":[{"id":"75123e60-1116-4b8d-a466-7302220127da","details":{"type":"crucible","address":"[fd00:1122:3344:101::8]:32345"}}]},"root":"/pool/ext/c764a8ae-6862-4eec-9db0-cc6ea478e4a7/crypt/zone"},{"zone":{"id":"fbd0379c-97fa-49ea-8980-17ae30ffff3c","zone_type":"crucible","addresses":["fd00:1122:3344:101::b"],"dataset":{"id":"fbd0379c-97fa-49ea-8980-17ae30ffff3c","name":{"pool_name":"oxp_fcb0e4c7-e046-4cf5-ad35-3ad90e1eb90c","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::b]:32345"},"services":[{"id":"fbd0379c-97fa-49ea-8980-17ae30ffff3c","details":{"type":"crucible","address":"[fd00:1122:3344:101::b]:32345"}}]},"root":"/pool/ext/4c7ad252-55c2-4a1a-9d93-9dfcdfdfacca/crypt/zone"},{"zone":{"id":"ec635326-cd1d-4f73-b8e6-c3a36a7020db","zone_type":"crucible","addresses":["fd00:1122:3344:101::a"],"dataset":{"id":"ec635326-cd1d-4f73-b8e6-c3a36a7020db","name":{"pool_name":"oxp_6bfb4120-488d-4f3d-90ef-e9bfa523b388","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::a]:32345"},"services":[{"id":"ec635326-cd1d-4f73-b8e6-c3a36a7020db","details":{"type":"crucible","address":"[fd00:1122:3344:101::a]:32345"}}]},"root":"/pool/ext/c99e6032-1d4f-47d2-9efe-ae2b2479554e/crypt/zone"},{"zone":{"id":"f500d564-c40a-4eca-ac8a-a26b435f2037","zone_type":"external_dns","addresses":["fd00:1122:3344:101::3"],"dataset":{"id":"f500d564-c40a-4eca-ac8a-a26b435f2037","name":{"pool_name":"oxp_c99e6032-1d4f-47d2-9efe-ae2b2479554e","kind":{"type":"external_dns"}},"service_address":"[fd00:1122:3344:101::3]:5353"},"services":[{"id":"f500d564-c40a-4eca-ac8a-a26b435f2037","details":{"type":"external_dns","http_address":"[fd00:1122:3344:101::3]:5353","dns_address":"172.20.26.2:53","nic":{"id":"b0b42776-3914-4a69-889f-4831dc72327c","kind":{"type":"service","id":"f500d564-c40a-4eca-ac8a-a26b435f2037"},"name":"external-dns-f500d564-c40a-4eca-ac8a-a26b435f2037","ip":"172.30.1.6","mac":"A8:40:25:FF:D0:B4","subnet":"172.30.1.0/24","vni":100,"primary":true,"slot":0}}}]},"root":"/pool/ext/ac789935-fa42-4d00-8967-df0d96dbb74e/crypt/zone"},{"zone":{"id":"56d4dbcc-3b4a-4ed0-8795-7734aadcc4c0","zone_type":"crucible","addresses":["fd00:1122:3344:101::9"],"dataset":{"id":"56d4dbcc-3b4a-4ed0-8795-7734aadcc4c0","name":{"pool_name":"oxp_4c7ad252-55c2-4a1a-9d93-9dfcdfdfacca","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::9]:32345"},"services":[{"id":"56d4dbcc-3b4a-4ed0-8795-7734aadcc4c0","details":{"type":"crucible","address":"[fd00:1122:3344:101::9]:32345"}}]},"root":"/pool/ext/4c7ad252-55c2-4a1a-9d93-9dfcdfdfacca/crypt/zone"},{"zone":{"id":"0d3a1bd5-f6fe-49cb-807a-190dabc90103","zone_type":"crucible","addresses":["fd00:1122:3344:101::6"],"dataset":{"id":"0d3a1bd5-f6fe-49cb-807a-190dabc90103","name":{"pool_name":"oxp_677b0057-3a80-461b-aca8-c2cb501a7278","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::6]:32345"},"services":[{"id":"0d3a1bd5-f6fe-49cb-807a-190dabc90103","details":{"type":"crucible","address":"[fd00:1122:3344:101::6]:32345"}}]},"root":"/pool/ext/6bfb4120-488d-4f3d-90ef-e9bfa523b388/crypt/zone"},{"zone":{"id":"d34c7184-5d4e-4cb5-8f91-df74a343ffbc","zone_type":"ntp","addresses":["fd00:1122:3344:101::e"],"dataset":null,"services":[{"id":"d34c7184-5d4e-4cb5-8f91-df74a343ffbc","details":{"type":"internal_ntp","address":"[fd00:1122:3344:101::e]:123","ntp_servers":["c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55.host.control-plane.oxide.internal","6ea2684c-115e-48a6-8453-ab52d1cecd73.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/ac789935-fa42-4d00-8967-df0d96dbb74e/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack2-sled8.json b/sled-agent/tests/old-service-ledgers/rack2-sled8.json new file mode 100644 index 0000000000..7d52980d9f --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack2-sled8.json @@ -0,0 +1 @@ +{"generation":4,"requests":[{"zone":{"id":"7153983f-8fd7-4fb9-92ac-0f07a07798b4","zone_type":"crucible","addresses":["fd00:1122:3344:103::a"],"dataset":{"id":"7153983f-8fd7-4fb9-92ac-0f07a07798b4","name":{"pool_name":"oxp_bf428719-1b16-4503-99f4-ad95846d916f","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::a]:32345"},"services":[{"id":"7153983f-8fd7-4fb9-92ac-0f07a07798b4","details":{"type":"crucible","address":"[fd00:1122:3344:103::a]:32345"}}]},"root":"/pool/ext/26e698bb-006d-4208-94b9-d1bc279111fa/crypt/zone"},{"zone":{"id":"7d44ba36-4a69-490a-bc40-f6f90a4208d4","zone_type":"crucible","addresses":["fd00:1122:3344:103::c"],"dataset":{"id":"7d44ba36-4a69-490a-bc40-f6f90a4208d4","name":{"pool_name":"oxp_414e235b-55c3-4dc1-a568-8adf4ea1a052","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::c]:32345"},"services":[{"id":"7d44ba36-4a69-490a-bc40-f6f90a4208d4","details":{"type":"crucible","address":"[fd00:1122:3344:103::c]:32345"}}]},"root":"/pool/ext/cf940e15-dbc5-481b-866a-4de4b018898e/crypt/zone"},{"zone":{"id":"65a11c18-7f59-41ac-b9e7-680627f996e7","zone_type":"nexus","addresses":["fd00:1122:3344:103::3"],"dataset":null,"services":[{"id":"65a11c18-7f59-41ac-b9e7-680627f996e7","details":{"type":"nexus","internal_address":"[fd00:1122:3344:103::3]:12221","external_ip":"172.20.26.3","nic":{"id":"a3e13dde-a2bc-4170-ad84-aad8085b6034","kind":{"type":"service","id":"65a11c18-7f59-41ac-b9e7-680627f996e7"},"name":"nexus-65a11c18-7f59-41ac-b9e7-680627f996e7","ip":"172.30.2.5","mac":"A8:40:25:FF:A6:83","subnet":"172.30.2.0/24","vni":100,"primary":true,"slot":0},"external_tls":true,"external_dns_servers":["1.1.1.1","9.9.9.9"]}}]},"root":"/pool/ext/e126ddcc-8bee-46ba-8199-2a74df0ba040/crypt/zone"},{"zone":{"id":"072fdae8-2adf-4fd2-94ce-e9b0663b91e7","zone_type":"crucible","addresses":["fd00:1122:3344:103::b"],"dataset":{"id":"072fdae8-2adf-4fd2-94ce-e9b0663b91e7","name":{"pool_name":"oxp_26e698bb-006d-4208-94b9-d1bc279111fa","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::b]:32345"},"services":[{"id":"072fdae8-2adf-4fd2-94ce-e9b0663b91e7","details":{"type":"crucible","address":"[fd00:1122:3344:103::b]:32345"}}]},"root":"/pool/ext/bf428719-1b16-4503-99f4-ad95846d916f/crypt/zone"},{"zone":{"id":"01f93020-7e7d-4185-93fb-6ca234056c82","zone_type":"crucible","addresses":["fd00:1122:3344:103::5"],"dataset":{"id":"01f93020-7e7d-4185-93fb-6ca234056c82","name":{"pool_name":"oxp_7b24095a-72df-45e3-984f-2b795e052ac7","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::5]:32345"},"services":[{"id":"01f93020-7e7d-4185-93fb-6ca234056c82","details":{"type":"crucible","address":"[fd00:1122:3344:103::5]:32345"}}]},"root":"/pool/ext/7b24095a-72df-45e3-984f-2b795e052ac7/crypt/zone"},{"zone":{"id":"e238116d-e5cc-43d4-9c8a-6f138ae8a15d","zone_type":"crucible","addresses":["fd00:1122:3344:103::6"],"dataset":{"id":"e238116d-e5cc-43d4-9c8a-6f138ae8a15d","name":{"pool_name":"oxp_e126ddcc-8bee-46ba-8199-2a74df0ba040","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::6]:32345"},"services":[{"id":"e238116d-e5cc-43d4-9c8a-6f138ae8a15d","details":{"type":"crucible","address":"[fd00:1122:3344:103::6]:32345"}}]},"root":"/pool/ext/7b24095a-72df-45e3-984f-2b795e052ac7/crypt/zone"},{"zone":{"id":"585cd8c5-c41e-4be4-beb8-bfbef9b53856","zone_type":"crucible","addresses":["fd00:1122:3344:103::7"],"dataset":{"id":"585cd8c5-c41e-4be4-beb8-bfbef9b53856","name":{"pool_name":"oxp_6340805e-c5af-418d-8bd1-fc0085667f33","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::7]:32345"},"services":[{"id":"585cd8c5-c41e-4be4-beb8-bfbef9b53856","details":{"type":"crucible","address":"[fd00:1122:3344:103::7]:32345"}}]},"root":"/pool/ext/414e235b-55c3-4dc1-a568-8adf4ea1a052/crypt/zone"},{"zone":{"id":"0b41c560-3b20-42f4-82ad-92f5bb575d6b","zone_type":"crucible","addresses":["fd00:1122:3344:103::9"],"dataset":{"id":"0b41c560-3b20-42f4-82ad-92f5bb575d6b","name":{"pool_name":"oxp_b93f880e-c55b-4d6c-9a16-939d84b628fc","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::9]:32345"},"services":[{"id":"0b41c560-3b20-42f4-82ad-92f5bb575d6b","details":{"type":"crucible","address":"[fd00:1122:3344:103::9]:32345"}}]},"root":"/pool/ext/6340805e-c5af-418d-8bd1-fc0085667f33/crypt/zone"},{"zone":{"id":"0ccf27c0-e32d-4b52-a2c5-6db0c64a26f9","zone_type":"crucible","addresses":["fd00:1122:3344:103::d"],"dataset":{"id":"0ccf27c0-e32d-4b52-a2c5-6db0c64a26f9","name":{"pool_name":"oxp_2115b084-be0f-4fba-941b-33a659798a9e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::d]:32345"},"services":[{"id":"0ccf27c0-e32d-4b52-a2c5-6db0c64a26f9","details":{"type":"crucible","address":"[fd00:1122:3344:103::d]:32345"}}]},"root":"/pool/ext/414e235b-55c3-4dc1-a568-8adf4ea1a052/crypt/zone"},{"zone":{"id":"a6ba8273-0320-4dab-b801-281f041b0c50","zone_type":"crucible","addresses":["fd00:1122:3344:103::4"],"dataset":{"id":"a6ba8273-0320-4dab-b801-281f041b0c50","name":{"pool_name":"oxp_8a199f12-4f5c-483a-8aca-f97856658a35","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::4]:32345"},"services":[{"id":"a6ba8273-0320-4dab-b801-281f041b0c50","details":{"type":"crucible","address":"[fd00:1122:3344:103::4]:32345"}}]},"root":"/pool/ext/b93f880e-c55b-4d6c-9a16-939d84b628fc/crypt/zone"},{"zone":{"id":"b9b7b4c2-284a-4ec1-80ea-75b7a43b71c4","zone_type":"crucible","addresses":["fd00:1122:3344:103::8"],"dataset":{"id":"b9b7b4c2-284a-4ec1-80ea-75b7a43b71c4","name":{"pool_name":"oxp_cf940e15-dbc5-481b-866a-4de4b018898e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::8]:32345"},"services":[{"id":"b9b7b4c2-284a-4ec1-80ea-75b7a43b71c4","details":{"type":"crucible","address":"[fd00:1122:3344:103::8]:32345"}}]},"root":"/pool/ext/cf940e15-dbc5-481b-866a-4de4b018898e/crypt/zone"},{"zone":{"id":"7a85d50e-b524-41c1-a052-118027eb77db","zone_type":"ntp","addresses":["fd00:1122:3344:103::e"],"dataset":null,"services":[{"id":"7a85d50e-b524-41c1-a052-118027eb77db","details":{"type":"internal_ntp","address":"[fd00:1122:3344:103::e]:123","ntp_servers":["c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55.host.control-plane.oxide.internal","6ea2684c-115e-48a6-8453-ab52d1cecd73.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/b93f880e-c55b-4d6c-9a16-939d84b628fc/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack2-sled9.json b/sled-agent/tests/old-service-ledgers/rack2-sled9.json new file mode 100644 index 0000000000..36af68759b --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack2-sled9.json @@ -0,0 +1 @@ +{"generation":4,"requests":[{"zone":{"id":"912346a2-d7e6-427e-b373-e8dcbe4fcea9","zone_type":"crucible","addresses":["fd00:1122:3344:105::5"],"dataset":{"id":"912346a2-d7e6-427e-b373-e8dcbe4fcea9","name":{"pool_name":"oxp_b358fb1e-f52a-4a63-9aab-170225509b37","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::5]:32345"},"services":[{"id":"912346a2-d7e6-427e-b373-e8dcbe4fcea9","details":{"type":"crucible","address":"[fd00:1122:3344:105::5]:32345"}}]},"root":"/pool/ext/0ae29053-29a2-489e-a1e6-6aec0ecd05f8/crypt/zone"},{"zone":{"id":"3d420dff-c616-4c7d-bab1-0f9c2b5396bf","zone_type":"crucible","addresses":["fd00:1122:3344:105::a"],"dataset":{"id":"3d420dff-c616-4c7d-bab1-0f9c2b5396bf","name":{"pool_name":"oxp_4eb2e4eb-41d8-496c-9a5a-687d7e004aa4","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::a]:32345"},"services":[{"id":"3d420dff-c616-4c7d-bab1-0f9c2b5396bf","details":{"type":"crucible","address":"[fd00:1122:3344:105::a]:32345"}}]},"root":"/pool/ext/eb1234a5-fdf7-4977-94d5-2eef25ce56a1/crypt/zone"},{"zone":{"id":"9c5d88c9-8ff1-4f23-9438-7b81322eaf68","zone_type":"crucible","addresses":["fd00:1122:3344:105::b"],"dataset":{"id":"9c5d88c9-8ff1-4f23-9438-7b81322eaf68","name":{"pool_name":"oxp_aadf48eb-6ff0-40b5-a092-1fdd06c03e11","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::b]:32345"},"services":[{"id":"9c5d88c9-8ff1-4f23-9438-7b81322eaf68","details":{"type":"crucible","address":"[fd00:1122:3344:105::b]:32345"}}]},"root":"/pool/ext/4358f47f-f21e-4cc8-829e-0c7fc2400a59/crypt/zone"},{"zone":{"id":"f9c1deca-1898-429e-8c93-254c7aa7bae6","zone_type":"crucible","addresses":["fd00:1122:3344:105::8"],"dataset":{"id":"f9c1deca-1898-429e-8c93-254c7aa7bae6","name":{"pool_name":"oxp_d1cb6b7d-2b92-4b7d-8a4d-551987f0277e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::8]:32345"},"services":[{"id":"f9c1deca-1898-429e-8c93-254c7aa7bae6","details":{"type":"crucible","address":"[fd00:1122:3344:105::8]:32345"}}]},"root":"/pool/ext/f8b11629-ced6-412a-9c3f-d169b99ee996/crypt/zone"},{"zone":{"id":"ce8563f3-4a93-45ff-b727-cbfbee6aa413","zone_type":"crucible","addresses":["fd00:1122:3344:105::9"],"dataset":{"id":"ce8563f3-4a93-45ff-b727-cbfbee6aa413","name":{"pool_name":"oxp_4358f47f-f21e-4cc8-829e-0c7fc2400a59","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::9]:32345"},"services":[{"id":"ce8563f3-4a93-45ff-b727-cbfbee6aa413","details":{"type":"crucible","address":"[fd00:1122:3344:105::9]:32345"}}]},"root":"/pool/ext/eb1234a5-fdf7-4977-94d5-2eef25ce56a1/crypt/zone"},{"zone":{"id":"9470ea7d-1920-4b4b-8fca-e7659a1ef733","zone_type":"crucible","addresses":["fd00:1122:3344:105::c"],"dataset":{"id":"9470ea7d-1920-4b4b-8fca-e7659a1ef733","name":{"pool_name":"oxp_17eff217-f0b1-4353-b133-0f68bbd5ceaa","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::c]:32345"},"services":[{"id":"9470ea7d-1920-4b4b-8fca-e7659a1ef733","details":{"type":"crucible","address":"[fd00:1122:3344:105::c]:32345"}}]},"root":"/pool/ext/eb1234a5-fdf7-4977-94d5-2eef25ce56a1/crypt/zone"},{"zone":{"id":"375296e5-0a23-466c-b605-4204080f8103","zone_type":"crucible_pantry","addresses":["fd00:1122:3344:105::4"],"dataset":null,"services":[{"id":"375296e5-0a23-466c-b605-4204080f8103","details":{"type":"crucible_pantry","address":"[fd00:1122:3344:105::4]:17000"}}]},"root":"/pool/ext/4eb2e4eb-41d8-496c-9a5a-687d7e004aa4/crypt/zone"},{"zone":{"id":"f9940969-b0e8-4e8c-86c7-4bc49cd15a5f","zone_type":"crucible","addresses":["fd00:1122:3344:105::7"],"dataset":{"id":"f9940969-b0e8-4e8c-86c7-4bc49cd15a5f","name":{"pool_name":"oxp_f8b11629-ced6-412a-9c3f-d169b99ee996","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::7]:32345"},"services":[{"id":"f9940969-b0e8-4e8c-86c7-4bc49cd15a5f","details":{"type":"crucible","address":"[fd00:1122:3344:105::7]:32345"}}]},"root":"/pool/ext/17eff217-f0b1-4353-b133-0f68bbd5ceaa/crypt/zone"},{"zone":{"id":"23dca27d-c79b-4930-a817-392e8aeaa4c1","zone_type":"crucible","addresses":["fd00:1122:3344:105::e"],"dataset":{"id":"23dca27d-c79b-4930-a817-392e8aeaa4c1","name":{"pool_name":"oxp_57650e05-36ff-4de8-865f-b9562bdb67f5","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::e]:32345"},"services":[{"id":"23dca27d-c79b-4930-a817-392e8aeaa4c1","details":{"type":"crucible","address":"[fd00:1122:3344:105::e]:32345"}}]},"root":"/pool/ext/0ae29053-29a2-489e-a1e6-6aec0ecd05f8/crypt/zone"},{"zone":{"id":"92d3e4e9-0768-4772-83c1-23cce52190e9","zone_type":"crucible","addresses":["fd00:1122:3344:105::6"],"dataset":{"id":"92d3e4e9-0768-4772-83c1-23cce52190e9","name":{"pool_name":"oxp_eb1234a5-fdf7-4977-94d5-2eef25ce56a1","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::6]:32345"},"services":[{"id":"92d3e4e9-0768-4772-83c1-23cce52190e9","details":{"type":"crucible","address":"[fd00:1122:3344:105::6]:32345"}}]},"root":"/pool/ext/b358fb1e-f52a-4a63-9aab-170225509b37/crypt/zone"},{"zone":{"id":"b3e9fee2-24d2-44e7-8539-a6918e85cf2b","zone_type":"crucible","addresses":["fd00:1122:3344:105::d"],"dataset":{"id":"b3e9fee2-24d2-44e7-8539-a6918e85cf2b","name":{"pool_name":"oxp_0ae29053-29a2-489e-a1e6-6aec0ecd05f8","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::d]:32345"},"services":[{"id":"b3e9fee2-24d2-44e7-8539-a6918e85cf2b","details":{"type":"crucible","address":"[fd00:1122:3344:105::d]:32345"}}]},"root":"/pool/ext/eb1234a5-fdf7-4977-94d5-2eef25ce56a1/crypt/zone"},{"zone":{"id":"4c3ef132-ec83-4b1b-9574-7c7d3035f9e9","zone_type":"cockroach_db","addresses":["fd00:1122:3344:105::3"],"dataset":{"id":"4c3ef132-ec83-4b1b-9574-7c7d3035f9e9","name":{"pool_name":"oxp_b358fb1e-f52a-4a63-9aab-170225509b37","kind":{"type":"cockroach_db"}},"service_address":"[fd00:1122:3344:105::3]:32221"},"services":[{"id":"4c3ef132-ec83-4b1b-9574-7c7d3035f9e9","details":{"type":"cockroach_db","address":"[fd00:1122:3344:105::3]:32221"}}]},"root":"/pool/ext/d1cb6b7d-2b92-4b7d-8a4d-551987f0277e/crypt/zone"},{"zone":{"id":"76b79b96-eaa2-4341-9aba-e77cfc92e0a9","zone_type":"ntp","addresses":["fd00:1122:3344:105::f"],"dataset":null,"services":[{"id":"76b79b96-eaa2-4341-9aba-e77cfc92e0a9","details":{"type":"internal_ntp","address":"[fd00:1122:3344:105::f]:123","ntp_servers":["c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55.host.control-plane.oxide.internal","6ea2684c-115e-48a6-8453-ab52d1cecd73.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/0ae29053-29a2-489e-a1e6-6aec0ecd05f8/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled0.json b/sled-agent/tests/old-service-ledgers/rack3-sled0.json new file mode 100644 index 0000000000..a853a525bc --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack3-sled0.json @@ -0,0 +1 @@ +{"generation":4,"requests":[{"zone":{"id":"0710ecea-dbc4-417f-a6f7-1b97c3045db1","zone_type":"crucible","addresses":["fd00:1122:3344:116::6"],"dataset":{"id":"0710ecea-dbc4-417f-a6f7-1b97c3045db1","name":{"pool_name":"oxp_d5313ef5-019c-4c47-bc5e-63794107a1bb","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:116::6]:32345"},"services":[{"id":"0710ecea-dbc4-417f-a6f7-1b97c3045db1","details":{"type":"crucible","address":"[fd00:1122:3344:116::6]:32345"}}]},"root":"/pool/ext/904e93a9-d175-4a20-9006-8c1e847aecf7/crypt/zone"},{"zone":{"id":"28b29d14-d55f-4b55-bbc1-f66e46ae3e70","zone_type":"crucible","addresses":["fd00:1122:3344:116::9"],"dataset":{"id":"28b29d14-d55f-4b55-bbc1-f66e46ae3e70","name":{"pool_name":"oxp_60755ffe-e9ee-4619-a751-8b3ea6405e67","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:116::9]:32345"},"services":[{"id":"28b29d14-d55f-4b55-bbc1-f66e46ae3e70","details":{"type":"crucible","address":"[fd00:1122:3344:116::9]:32345"}}]},"root":"/pool/ext/d5313ef5-019c-4c47-bc5e-63794107a1bb/crypt/zone"},{"zone":{"id":"6f8f9fd2-b139-4069-a7e2-8d40efd58f6c","zone_type":"crucible","addresses":["fd00:1122:3344:116::d"],"dataset":{"id":"6f8f9fd2-b139-4069-a7e2-8d40efd58f6c","name":{"pool_name":"oxp_ccd2cb0b-782f-4026-a160-6d1192f04ca3","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:116::d]:32345"},"services":[{"id":"6f8f9fd2-b139-4069-a7e2-8d40efd58f6c","details":{"type":"crucible","address":"[fd00:1122:3344:116::d]:32345"}}]},"root":"/pool/ext/d5313ef5-019c-4c47-bc5e-63794107a1bb/crypt/zone"},{"zone":{"id":"450308ad-bf4d-40ff-ba62-f3290f7fffaf","zone_type":"crucible","addresses":["fd00:1122:3344:116::4"],"dataset":{"id":"450308ad-bf4d-40ff-ba62-f3290f7fffaf","name":{"pool_name":"oxp_46b09442-65ba-4d59-9121-9803fe3b724b","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:116::4]:32345"},"services":[{"id":"450308ad-bf4d-40ff-ba62-f3290f7fffaf","details":{"type":"crucible","address":"[fd00:1122:3344:116::4]:32345"}}]},"root":"/pool/ext/54d901cc-f75e-417d-8a9f-24363136d0ef/crypt/zone"},{"zone":{"id":"9a22bbaa-eab4-4a32-8546-9882dc029483","zone_type":"crucible","addresses":["fd00:1122:3344:116::8"],"dataset":{"id":"9a22bbaa-eab4-4a32-8546-9882dc029483","name":{"pool_name":"oxp_93e3f350-75a0-4af0-bdac-baf9b423926f","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:116::8]:32345"},"services":[{"id":"9a22bbaa-eab4-4a32-8546-9882dc029483","details":{"type":"crucible","address":"[fd00:1122:3344:116::8]:32345"}}]},"root":"/pool/ext/d5313ef5-019c-4c47-bc5e-63794107a1bb/crypt/zone"},{"zone":{"id":"63a9dc49-0b5b-4483-95ed-553b545dc202","zone_type":"crucible","addresses":["fd00:1122:3344:116::a"],"dataset":{"id":"63a9dc49-0b5b-4483-95ed-553b545dc202","name":{"pool_name":"oxp_e3532845-76c0-42a9-903b-a07f7992e937","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:116::a]:32345"},"services":[{"id":"63a9dc49-0b5b-4483-95ed-553b545dc202","details":{"type":"crucible","address":"[fd00:1122:3344:116::a]:32345"}}]},"root":"/pool/ext/60755ffe-e9ee-4619-a751-8b3ea6405e67/crypt/zone"},{"zone":{"id":"1fef5b6c-78e4-4ad9-9973-9d8c78f1e232","zone_type":"crucible","addresses":["fd00:1122:3344:116::7"],"dataset":{"id":"1fef5b6c-78e4-4ad9-9973-9d8c78f1e232","name":{"pool_name":"oxp_54d901cc-f75e-417d-8a9f-24363136d0ef","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:116::7]:32345"},"services":[{"id":"1fef5b6c-78e4-4ad9-9973-9d8c78f1e232","details":{"type":"crucible","address":"[fd00:1122:3344:116::7]:32345"}}]},"root":"/pool/ext/90d7b6f9-3e28-48b0-86ac-0486728075cf/crypt/zone"},{"zone":{"id":"b2aab21a-cccd-4aa9-977f-a32090e6eaa7","zone_type":"crucible","addresses":["fd00:1122:3344:116::5"],"dataset":{"id":"b2aab21a-cccd-4aa9-977f-a32090e6eaa7","name":{"pool_name":"oxp_90d7b6f9-3e28-48b0-86ac-0486728075cf","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:116::5]:32345"},"services":[{"id":"b2aab21a-cccd-4aa9-977f-a32090e6eaa7","details":{"type":"crucible","address":"[fd00:1122:3344:116::5]:32345"}}]},"root":"/pool/ext/46b09442-65ba-4d59-9121-9803fe3b724b/crypt/zone"},{"zone":{"id":"fc1bbf28-24f3-4c1f-b367-2bc8231eb7d4","zone_type":"crucible","addresses":["fd00:1122:3344:116::b"],"dataset":{"id":"fc1bbf28-24f3-4c1f-b367-2bc8231eb7d4","name":{"pool_name":"oxp_0a7bb0d3-408b-42b1-8846-76cf106a9580","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:116::b]:32345"},"services":[{"id":"fc1bbf28-24f3-4c1f-b367-2bc8231eb7d4","details":{"type":"crucible","address":"[fd00:1122:3344:116::b]:32345"}}]},"root":"/pool/ext/e3532845-76c0-42a9-903b-a07f7992e937/crypt/zone"},{"zone":{"id":"bcb7617a-f76a-4912-8ccc-802d2a697e3c","zone_type":"crucible","addresses":["fd00:1122:3344:116::c"],"dataset":{"id":"bcb7617a-f76a-4912-8ccc-802d2a697e3c","name":{"pool_name":"oxp_904e93a9-d175-4a20-9006-8c1e847aecf7","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:116::c]:32345"},"services":[{"id":"bcb7617a-f76a-4912-8ccc-802d2a697e3c","details":{"type":"crucible","address":"[fd00:1122:3344:116::c]:32345"}}]},"root":"/pool/ext/ccd2cb0b-782f-4026-a160-6d1192f04ca3/crypt/zone"},{"zone":{"id":"371fba3a-658b-469b-b675-c90cc0d39254","zone_type":"cockroach_db","addresses":["fd00:1122:3344:116::3"],"dataset":{"id":"371fba3a-658b-469b-b675-c90cc0d39254","name":{"pool_name":"oxp_46b09442-65ba-4d59-9121-9803fe3b724b","kind":{"type":"cockroach_db"}},"service_address":"[fd00:1122:3344:116::3]:32221"},"services":[{"id":"371fba3a-658b-469b-b675-c90cc0d39254","details":{"type":"cockroach_db","address":"[fd00:1122:3344:116::3]:32221"}}]},"root":"/pool/ext/46b09442-65ba-4d59-9121-9803fe3b724b/crypt/zone"},{"zone":{"id":"5a4d89f5-49e0-4566-a99c-342d1bb26b1c","zone_type":"ntp","addresses":["fd00:1122:3344:116::e"],"dataset":null,"services":[{"id":"5a4d89f5-49e0-4566-a99c-342d1bb26b1c","details":{"type":"internal_ntp","address":"[fd00:1122:3344:116::e]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/60755ffe-e9ee-4619-a751-8b3ea6405e67/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled1.json b/sled-agent/tests/old-service-ledgers/rack3-sled1.json new file mode 100644 index 0000000000..bd735e5e64 --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack3-sled1.json @@ -0,0 +1 @@ +{"generation":4,"requests":[{"zone":{"id":"f401d06c-46fc-42f8-aa51-7515a51355ce","zone_type":"crucible","addresses":["fd00:1122:3344:11c::8"],"dataset":{"id":"f401d06c-46fc-42f8-aa51-7515a51355ce","name":{"pool_name":"oxp_8a88768a-2dd5-43b7-bd40-0db77be4d3a8","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11c::8]:32345"},"services":[{"id":"f401d06c-46fc-42f8-aa51-7515a51355ce","details":{"type":"crucible","address":"[fd00:1122:3344:11c::8]:32345"}}]},"root":"/pool/ext/19d23d27-6a33-4203-b8c1-4b0df4ac791f/crypt/zone"},{"zone":{"id":"721c96ea-08d4-4c89-828f-600e7e344916","zone_type":"crucible","addresses":["fd00:1122:3344:11c::6"],"dataset":{"id":"721c96ea-08d4-4c89-828f-600e7e344916","name":{"pool_name":"oxp_15259003-fb04-4547-b4a9-b4511893c0fd","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11c::6]:32345"},"services":[{"id":"721c96ea-08d4-4c89-828f-600e7e344916","details":{"type":"crucible","address":"[fd00:1122:3344:11c::6]:32345"}}]},"root":"/pool/ext/d2a8ed82-22ef-46d8-ad40-e1cb2cecebee/crypt/zone"},{"zone":{"id":"ca17bdf9-51c5-4e1e-b822-856609070ec6","zone_type":"crucible","addresses":["fd00:1122:3344:11c::5"],"dataset":{"id":"ca17bdf9-51c5-4e1e-b822-856609070ec6","name":{"pool_name":"oxp_d2a8ed82-22ef-46d8-ad40-e1cb2cecebee","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11c::5]:32345"},"services":[{"id":"ca17bdf9-51c5-4e1e-b822-856609070ec6","details":{"type":"crucible","address":"[fd00:1122:3344:11c::5]:32345"}}]},"root":"/pool/ext/15259003-fb04-4547-b4a9-b4511893c0fd/crypt/zone"},{"zone":{"id":"5825447e-1b5b-4960-b202-e75853d3d250","zone_type":"crucible","addresses":["fd00:1122:3344:11c::9"],"dataset":{"id":"5825447e-1b5b-4960-b202-e75853d3d250","name":{"pool_name":"oxp_04e94454-cbd4-4cee-ad69-42372bcbabd5","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11c::9]:32345"},"services":[{"id":"5825447e-1b5b-4960-b202-e75853d3d250","details":{"type":"crucible","address":"[fd00:1122:3344:11c::9]:32345"}}]},"root":"/pool/ext/542e0fb3-552c-4d3b-b853-da1f13b581a0/crypt/zone"},{"zone":{"id":"b937d3f0-1352-47a2-b9d1-a9ccf9c82b16","zone_type":"crucible","addresses":["fd00:1122:3344:11c::c"],"dataset":{"id":"b937d3f0-1352-47a2-b9d1-a9ccf9c82b16","name":{"pool_name":"oxp_542e0fb3-552c-4d3b-b853-da1f13b581a0","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11c::c]:32345"},"services":[{"id":"b937d3f0-1352-47a2-b9d1-a9ccf9c82b16","details":{"type":"crucible","address":"[fd00:1122:3344:11c::c]:32345"}}]},"root":"/pool/ext/eedd1d58-4892-456f-aaf7-9d650c7921ca/crypt/zone"},{"zone":{"id":"d63a677b-8dac-44ee-89a2-cc4cb151254d","zone_type":"crucible","addresses":["fd00:1122:3344:11c::3"],"dataset":{"id":"d63a677b-8dac-44ee-89a2-cc4cb151254d","name":{"pool_name":"oxp_45b5f1ee-7b66-4d74-8364-54fa0c73775f","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11c::3]:32345"},"services":[{"id":"d63a677b-8dac-44ee-89a2-cc4cb151254d","details":{"type":"crucible","address":"[fd00:1122:3344:11c::3]:32345"}}]},"root":"/pool/ext/8a88768a-2dd5-43b7-bd40-0db77be4d3a8/crypt/zone"},{"zone":{"id":"abcb92ea-9f17-4cd8-897b-9d0d1ef7903a","zone_type":"crucible","addresses":["fd00:1122:3344:11c::4"],"dataset":{"id":"abcb92ea-9f17-4cd8-897b-9d0d1ef7903a","name":{"pool_name":"oxp_341d49db-c06a-416d-90e1-b0a3426ed02e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11c::4]:32345"},"services":[{"id":"abcb92ea-9f17-4cd8-897b-9d0d1ef7903a","details":{"type":"crucible","address":"[fd00:1122:3344:11c::4]:32345"}}]},"root":"/pool/ext/eedd1d58-4892-456f-aaf7-9d650c7921ca/crypt/zone"},{"zone":{"id":"000ac89d-db07-47ae-83cf-d9cafef013de","zone_type":"crucible","addresses":["fd00:1122:3344:11c::b"],"dataset":{"id":"000ac89d-db07-47ae-83cf-d9cafef013de","name":{"pool_name":"oxp_eedd1d58-4892-456f-aaf7-9d650c7921ca","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11c::b]:32345"},"services":[{"id":"000ac89d-db07-47ae-83cf-d9cafef013de","details":{"type":"crucible","address":"[fd00:1122:3344:11c::b]:32345"}}]},"root":"/pool/ext/04e94454-cbd4-4cee-ad69-42372bcbabd5/crypt/zone"},{"zone":{"id":"29e1e2e4-695e-4c05-8f0c-c16a0a61d390","zone_type":"crucible","addresses":["fd00:1122:3344:11c::7"],"dataset":{"id":"29e1e2e4-695e-4c05-8f0c-c16a0a61d390","name":{"pool_name":"oxp_19d23d27-6a33-4203-b8c1-4b0df4ac791f","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11c::7]:32345"},"services":[{"id":"29e1e2e4-695e-4c05-8f0c-c16a0a61d390","details":{"type":"crucible","address":"[fd00:1122:3344:11c::7]:32345"}}]},"root":"/pool/ext/d2a8ed82-22ef-46d8-ad40-e1cb2cecebee/crypt/zone"},{"zone":{"id":"9fa7d7be-a6de-4d36-b56b-d1cc5ca7c82c","zone_type":"crucible","addresses":["fd00:1122:3344:11c::a"],"dataset":{"id":"9fa7d7be-a6de-4d36-b56b-d1cc5ca7c82c","name":{"pool_name":"oxp_0fd7a0b1-ed4b-4dc6-8c44-a49c9628c7e1","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11c::a]:32345"},"services":[{"id":"9fa7d7be-a6de-4d36-b56b-d1cc5ca7c82c","details":{"type":"crucible","address":"[fd00:1122:3344:11c::a]:32345"}}]},"root":"/pool/ext/d2a8ed82-22ef-46d8-ad40-e1cb2cecebee/crypt/zone"},{"zone":{"id":"249db5f1-45e2-4a5c-a91f-cc51dbd87040","zone_type":"ntp","addresses":["fd00:1122:3344:11c::d"],"dataset":null,"services":[{"id":"249db5f1-45e2-4a5c-a91f-cc51dbd87040","details":{"type":"internal_ntp","address":"[fd00:1122:3344:11c::d]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/542e0fb3-552c-4d3b-b853-da1f13b581a0/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled11.json b/sled-agent/tests/old-service-ledgers/rack3-sled11.json new file mode 100644 index 0000000000..2918c74c4b --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack3-sled11.json @@ -0,0 +1 @@ +{"generation":5,"requests":[{"zone":{"id":"7ddd0738-59df-4b67-a41e-7f0de9827187","zone_type":"crucible","addresses":["fd00:1122:3344:11e::4"],"dataset":{"id":"7ddd0738-59df-4b67-a41e-7f0de9827187","name":{"pool_name":"oxp_09af632a-6b1b-4a18-8c91-d392da38b02f","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11e::4]:32345"},"services":[{"id":"7ddd0738-59df-4b67-a41e-7f0de9827187","details":{"type":"crucible","address":"[fd00:1122:3344:11e::4]:32345"}}]},"root":"/pool/ext/09af632a-6b1b-4a18-8c91-d392da38b02f/crypt/zone"},{"zone":{"id":"9706189f-713a-4394-b5dc-45dcf67dc46e","zone_type":"crucible","addresses":["fd00:1122:3344:11e::9"],"dataset":{"id":"9706189f-713a-4394-b5dc-45dcf67dc46e","name":{"pool_name":"oxp_4e1837c8-91ab-4d1d-abfd-f5144d88535e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11e::9]:32345"},"services":[{"id":"9706189f-713a-4394-b5dc-45dcf67dc46e","details":{"type":"crucible","address":"[fd00:1122:3344:11e::9]:32345"}}]},"root":"/pool/ext/2f0d47cb-28d1-4350-8656-60c6121f773b/crypt/zone"},{"zone":{"id":"7bdd841b-5e34-4c19-9066-b12578651446","zone_type":"crucible","addresses":["fd00:1122:3344:11e::a"],"dataset":{"id":"7bdd841b-5e34-4c19-9066-b12578651446","name":{"pool_name":"oxp_78d1e7f7-8d11-4fed-8b1e-be58908aea2f","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11e::a]:32345"},"services":[{"id":"7bdd841b-5e34-4c19-9066-b12578651446","details":{"type":"crucible","address":"[fd00:1122:3344:11e::a]:32345"}}]},"root":"/pool/ext/62c23f4b-8e7b-4cd8-9055-19c1d8bd5ac8/crypt/zone"},{"zone":{"id":"74c0f60b-de5f-4456-a85f-f992a6e10424","zone_type":"crucible","addresses":["fd00:1122:3344:11e::b"],"dataset":{"id":"74c0f60b-de5f-4456-a85f-f992a6e10424","name":{"pool_name":"oxp_3b81d709-bf10-4dd7-a2c0-759d8acc2da0","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11e::b]:32345"},"services":[{"id":"74c0f60b-de5f-4456-a85f-f992a6e10424","details":{"type":"crucible","address":"[fd00:1122:3344:11e::b]:32345"}}]},"root":"/pool/ext/09af632a-6b1b-4a18-8c91-d392da38b02f/crypt/zone"},{"zone":{"id":"da81ce6f-bd38-440e-b966-8a743092fa21","zone_type":"crucible","addresses":["fd00:1122:3344:11e::6"],"dataset":{"id":"da81ce6f-bd38-440e-b966-8a743092fa21","name":{"pool_name":"oxp_62c23f4b-8e7b-4cd8-9055-19c1d8bd5ac8","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11e::6]:32345"},"services":[{"id":"da81ce6f-bd38-440e-b966-8a743092fa21","details":{"type":"crucible","address":"[fd00:1122:3344:11e::6]:32345"}}]},"root":"/pool/ext/215dd02b-0de6-488a-9e65-5e588cd079fb/crypt/zone"},{"zone":{"id":"febbca37-5279-400f-a2e9-6b5271b2d2fc","zone_type":"crucible","addresses":["fd00:1122:3344:11e::7"],"dataset":{"id":"febbca37-5279-400f-a2e9-6b5271b2d2fc","name":{"pool_name":"oxp_fb33e773-fb93-41a0-8078-b653b9078dda","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11e::7]:32345"},"services":[{"id":"febbca37-5279-400f-a2e9-6b5271b2d2fc","details":{"type":"crucible","address":"[fd00:1122:3344:11e::7]:32345"}}]},"root":"/pool/ext/2f0d47cb-28d1-4350-8656-60c6121f773b/crypt/zone"},{"zone":{"id":"5100e222-5ea4-4e67-9040-679137e666c8","zone_type":"crucible","addresses":["fd00:1122:3344:11e::5"],"dataset":{"id":"5100e222-5ea4-4e67-9040-679137e666c8","name":{"pool_name":"oxp_23767587-2253-431b-8944-18b9bfefcb3d","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11e::5]:32345"},"services":[{"id":"5100e222-5ea4-4e67-9040-679137e666c8","details":{"type":"crucible","address":"[fd00:1122:3344:11e::5]:32345"}}]},"root":"/pool/ext/3b81d709-bf10-4dd7-a2c0-759d8acc2da0/crypt/zone"},{"zone":{"id":"c7ec3bc8-08ca-4901-a45e-0d68db72c6a7","zone_type":"crucible","addresses":["fd00:1122:3344:11e::3"],"dataset":{"id":"c7ec3bc8-08ca-4901-a45e-0d68db72c6a7","name":{"pool_name":"oxp_2f0d47cb-28d1-4350-8656-60c6121f773b","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11e::3]:32345"},"services":[{"id":"c7ec3bc8-08ca-4901-a45e-0d68db72c6a7","details":{"type":"crucible","address":"[fd00:1122:3344:11e::3]:32345"}}]},"root":"/pool/ext/215dd02b-0de6-488a-9e65-5e588cd079fb/crypt/zone"},{"zone":{"id":"1fc80dd3-0fd9-4403-96bd-5bbf9eb0f15a","zone_type":"crucible","addresses":["fd00:1122:3344:11e::c"],"dataset":{"id":"1fc80dd3-0fd9-4403-96bd-5bbf9eb0f15a","name":{"pool_name":"oxp_2c932d54-41fb-4ffe-a57f-0479b9e5841e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11e::c]:32345"},"services":[{"id":"1fc80dd3-0fd9-4403-96bd-5bbf9eb0f15a","details":{"type":"crucible","address":"[fd00:1122:3344:11e::c]:32345"}}]},"root":"/pool/ext/3b81d709-bf10-4dd7-a2c0-759d8acc2da0/crypt/zone"},{"zone":{"id":"4eacc68d-5699-440a-ab33-c75f259e4cc3","zone_type":"crucible","addresses":["fd00:1122:3344:11e::8"],"dataset":{"id":"4eacc68d-5699-440a-ab33-c75f259e4cc3","name":{"pool_name":"oxp_215dd02b-0de6-488a-9e65-5e588cd079fb","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11e::8]:32345"},"services":[{"id":"4eacc68d-5699-440a-ab33-c75f259e4cc3","details":{"type":"crucible","address":"[fd00:1122:3344:11e::8]:32345"}}]},"root":"/pool/ext/4e1837c8-91ab-4d1d-abfd-f5144d88535e/crypt/zone"},{"zone":{"id":"cb901d3e-8811-4c4c-a274-a44130501ecf","zone_type":"ntp","addresses":["fd00:1122:3344:11e::d"],"dataset":null,"services":[{"id":"cb901d3e-8811-4c4c-a274-a44130501ecf","details":{"type":"boundary_ntp","address":"[fd00:1122:3344:11e::d]:123","ntp_servers":["time.cloudflare.com"],"dns_servers":["1.1.1.1","8.8.8.8"],"domain":null,"nic":{"id":"bcf9d9eb-b4ba-4fd5-91e0-55a3414ae049","kind":{"type":"service","id":"cb901d3e-8811-4c4c-a274-a44130501ecf"},"name":"ntp-cb901d3e-8811-4c4c-a274-a44130501ecf","ip":"172.30.3.6","mac":"A8:40:25:FF:D5:2F","subnet":"172.30.3.0/24","vni":100,"primary":true,"slot":0},"snat_cfg":{"ip":"45.154.216.39","first_port":16384,"last_port":32767}}}]},"root":"/pool/ext/23767587-2253-431b-8944-18b9bfefcb3d/crypt/zone"},{"zone":{"id":"be4aada9-d160-401d-a630-a0764c039702","zone_type":"internal_dns","addresses":["fd00:1122:3344:2::1"],"dataset":{"id":"be4aada9-d160-401d-a630-a0764c039702","name":{"pool_name":"oxp_2f0d47cb-28d1-4350-8656-60c6121f773b","kind":{"type":"internal_dns"}},"service_address":"[fd00:1122:3344:2::1]:5353"},"services":[{"id":"be4aada9-d160-401d-a630-a0764c039702","details":{"type":"internal_dns","http_address":"[fd00:1122:3344:2::1]:5353","dns_address":"[fd00:1122:3344:2::1]:53","gz_address":"fd00:1122:3344:2::2","gz_address_index":1}}]},"root":"/pool/ext/78d1e7f7-8d11-4fed-8b1e-be58908aea2f/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled12.json b/sled-agent/tests/old-service-ledgers/rack3-sled12.json new file mode 100644 index 0000000000..c81f586e01 --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack3-sled12.json @@ -0,0 +1 @@ +{"generation":4,"requests":[{"zone":{"id":"d8f1b9d2-fa2e-4f03-bbea-2039448d7792","zone_type":"crucible","addresses":["fd00:1122:3344:112::5"],"dataset":{"id":"d8f1b9d2-fa2e-4f03-bbea-2039448d7792","name":{"pool_name":"oxp_7d7ed1b7-7b77-4f0a-abb1-27de7cb584d1","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:112::5]:32345"},"services":[{"id":"d8f1b9d2-fa2e-4f03-bbea-2039448d7792","details":{"type":"crucible","address":"[fd00:1122:3344:112::5]:32345"}}]},"root":"/pool/ext/78d9f0ae-8e7f-450e-abc2-76b983efa5cd/crypt/zone"},{"zone":{"id":"2074a935-c0b3-4c4f-aae5-a29adae3e1ac","zone_type":"crucible","addresses":["fd00:1122:3344:112::8"],"dataset":{"id":"2074a935-c0b3-4c4f-aae5-a29adae3e1ac","name":{"pool_name":"oxp_ac663368-45fb-447c-811e-561c68e37bdd","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:112::8]:32345"},"services":[{"id":"2074a935-c0b3-4c4f-aae5-a29adae3e1ac","details":{"type":"crucible","address":"[fd00:1122:3344:112::8]:32345"}}]},"root":"/pool/ext/ac663368-45fb-447c-811e-561c68e37bdd/crypt/zone"},{"zone":{"id":"2885d3c7-ad7d-445c-8630-dc6c81f8caa0","zone_type":"crucible","addresses":["fd00:1122:3344:112::a"],"dataset":{"id":"2885d3c7-ad7d-445c-8630-dc6c81f8caa0","name":{"pool_name":"oxp_8e82e8da-e1c5-4867-bc1c-b5441f9c1010","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:112::a]:32345"},"services":[{"id":"2885d3c7-ad7d-445c-8630-dc6c81f8caa0","details":{"type":"crucible","address":"[fd00:1122:3344:112::a]:32345"}}]},"root":"/pool/ext/8e82e8da-e1c5-4867-bc1c-b5441f9c1010/crypt/zone"},{"zone":{"id":"1eca241b-6868-4c59-876b-58356654f3b5","zone_type":"crucible","addresses":["fd00:1122:3344:112::c"],"dataset":{"id":"1eca241b-6868-4c59-876b-58356654f3b5","name":{"pool_name":"oxp_fde16c69-aa47-4a15-bb3f-3a5861ae45bd","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:112::c]:32345"},"services":[{"id":"1eca241b-6868-4c59-876b-58356654f3b5","details":{"type":"crucible","address":"[fd00:1122:3344:112::c]:32345"}}]},"root":"/pool/ext/7d7ed1b7-7b77-4f0a-abb1-27de7cb584d1/crypt/zone"},{"zone":{"id":"cc656f2e-8542-4986-8524-2f55984939c1","zone_type":"crucible","addresses":["fd00:1122:3344:112::d"],"dataset":{"id":"cc656f2e-8542-4986-8524-2f55984939c1","name":{"pool_name":"oxp_21e6d0f9-887e-4d6f-9a00-4cd61139eea6","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:112::d]:32345"},"services":[{"id":"cc656f2e-8542-4986-8524-2f55984939c1","details":{"type":"crucible","address":"[fd00:1122:3344:112::d]:32345"}}]},"root":"/pool/ext/21e6d0f9-887e-4d6f-9a00-4cd61139eea6/crypt/zone"},{"zone":{"id":"dfb1ebce-a4c7-4b50-9435-9a79b884c1af","zone_type":"clickhouse","addresses":["fd00:1122:3344:112::3"],"dataset":{"id":"dfb1ebce-a4c7-4b50-9435-9a79b884c1af","name":{"pool_name":"oxp_4f045315-de51-46ed-a011-16496615278f","kind":{"type":"clickhouse"}},"service_address":"[fd00:1122:3344:112::3]:8123"},"services":[{"id":"dfb1ebce-a4c7-4b50-9435-9a79b884c1af","details":{"type":"clickhouse","address":"[fd00:1122:3344:112::3]:8123"}}]},"root":"/pool/ext/7d7ed1b7-7b77-4f0a-abb1-27de7cb584d1/crypt/zone"},{"zone":{"id":"a95d90ed-b2b1-4a5d-8d0d-4195b34bc764","zone_type":"crucible","addresses":["fd00:1122:3344:112::6"],"dataset":{"id":"a95d90ed-b2b1-4a5d-8d0d-4195b34bc764","name":{"pool_name":"oxp_d2c77c69-14d7-442e-8b47-a0d7af5a0e7e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:112::6]:32345"},"services":[{"id":"a95d90ed-b2b1-4a5d-8d0d-4195b34bc764","details":{"type":"crucible","address":"[fd00:1122:3344:112::6]:32345"}}]},"root":"/pool/ext/fad56ff1-ad9f-4215-b584-522eab18cf7b/crypt/zone"},{"zone":{"id":"1d3ebc90-d5a5-4cb0-ae90-50bb2163ae13","zone_type":"crucible","addresses":["fd00:1122:3344:112::b"],"dataset":{"id":"1d3ebc90-d5a5-4cb0-ae90-50bb2163ae13","name":{"pool_name":"oxp_fad56ff1-ad9f-4215-b584-522eab18cf7b","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:112::b]:32345"},"services":[{"id":"1d3ebc90-d5a5-4cb0-ae90-50bb2163ae13","details":{"type":"crucible","address":"[fd00:1122:3344:112::b]:32345"}}]},"root":"/pool/ext/7d7ed1b7-7b77-4f0a-abb1-27de7cb584d1/crypt/zone"},{"zone":{"id":"7af9f38b-0c7a-402e-8db3-7c7fb50b4665","zone_type":"crucible","addresses":["fd00:1122:3344:112::9"],"dataset":{"id":"7af9f38b-0c7a-402e-8db3-7c7fb50b4665","name":{"pool_name":"oxp_d0693580-5c5a-449f-803f-ce7188ebc580","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:112::9]:32345"},"services":[{"id":"7af9f38b-0c7a-402e-8db3-7c7fb50b4665","details":{"type":"crucible","address":"[fd00:1122:3344:112::9]:32345"}}]},"root":"/pool/ext/d2c77c69-14d7-442e-8b47-a0d7af5a0e7e/crypt/zone"},{"zone":{"id":"94d9bb0a-ecd2-4501-b960-60982f55ad12","zone_type":"crucible","addresses":["fd00:1122:3344:112::7"],"dataset":{"id":"94d9bb0a-ecd2-4501-b960-60982f55ad12","name":{"pool_name":"oxp_78d9f0ae-8e7f-450e-abc2-76b983efa5cd","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:112::7]:32345"},"services":[{"id":"94d9bb0a-ecd2-4501-b960-60982f55ad12","details":{"type":"crucible","address":"[fd00:1122:3344:112::7]:32345"}}]},"root":"/pool/ext/ac663368-45fb-447c-811e-561c68e37bdd/crypt/zone"},{"zone":{"id":"277c1105-576e-4ec1-8e2c-cbae2f5ac9f6","zone_type":"crucible","addresses":["fd00:1122:3344:112::4"],"dataset":{"id":"277c1105-576e-4ec1-8e2c-cbae2f5ac9f6","name":{"pool_name":"oxp_4f045315-de51-46ed-a011-16496615278f","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:112::4]:32345"},"services":[{"id":"277c1105-576e-4ec1-8e2c-cbae2f5ac9f6","details":{"type":"crucible","address":"[fd00:1122:3344:112::4]:32345"}}]},"root":"/pool/ext/7d7ed1b7-7b77-4f0a-abb1-27de7cb584d1/crypt/zone"},{"zone":{"id":"555c3407-a76c-4ea4-a17a-a670d85a59b0","zone_type":"ntp","addresses":["fd00:1122:3344:112::e"],"dataset":null,"services":[{"id":"555c3407-a76c-4ea4-a17a-a670d85a59b0","details":{"type":"internal_ntp","address":"[fd00:1122:3344:112::e]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/8e82e8da-e1c5-4867-bc1c-b5441f9c1010/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled13.json b/sled-agent/tests/old-service-ledgers/rack3-sled13.json new file mode 100644 index 0000000000..ab151a828e --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack3-sled13.json @@ -0,0 +1 @@ +{"generation":5,"requests":[{"zone":{"id":"fbcf51c9-a732-4a03-8c19-cfb5b819cb7a","zone_type":"crucible","addresses":["fd00:1122:3344:104::5"],"dataset":{"id":"fbcf51c9-a732-4a03-8c19-cfb5b819cb7a","name":{"pool_name":"oxp_382a2961-cd27-4a9c-901d-468a45ff5708","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::5]:32345"},"services":[{"id":"fbcf51c9-a732-4a03-8c19-cfb5b819cb7a","details":{"type":"crucible","address":"[fd00:1122:3344:104::5]:32345"}}]},"root":"/pool/ext/e99994ae-61ca-4742-a02c-eb0a8a5b69ff/crypt/zone"},{"zone":{"id":"7f8a5026-1f1d-4ab3-8c04-077bfda2f815","zone_type":"crucible","addresses":["fd00:1122:3344:104::4"],"dataset":{"id":"7f8a5026-1f1d-4ab3-8c04-077bfda2f815","name":{"pool_name":"oxp_9c99b9b6-8018-455e-a58a-c048ddd3e11b","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::4]:32345"},"services":[{"id":"7f8a5026-1f1d-4ab3-8c04-077bfda2f815","details":{"type":"crucible","address":"[fd00:1122:3344:104::4]:32345"}}]},"root":"/pool/ext/22c79e54-37ef-4ad2-a6cb-a7ee3e4f7167/crypt/zone"},{"zone":{"id":"6d45d856-0e49-4eb7-ad76-989a9ae636a2","zone_type":"crucible","addresses":["fd00:1122:3344:104::3"],"dataset":{"id":"6d45d856-0e49-4eb7-ad76-989a9ae636a2","name":{"pool_name":"oxp_b74a84fa-b4c8-4c5f-92f4-f4e62a0a311d","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::3]:32345"},"services":[{"id":"6d45d856-0e49-4eb7-ad76-989a9ae636a2","details":{"type":"crucible","address":"[fd00:1122:3344:104::3]:32345"}}]},"root":"/pool/ext/9c99b9b6-8018-455e-a58a-c048ddd3e11b/crypt/zone"},{"zone":{"id":"c8dc7fff-72c8-49eb-a552-d605f8655134","zone_type":"crucible","addresses":["fd00:1122:3344:104::6"],"dataset":{"id":"c8dc7fff-72c8-49eb-a552-d605f8655134","name":{"pool_name":"oxp_22c79e54-37ef-4ad2-a6cb-a7ee3e4f7167","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::6]:32345"},"services":[{"id":"c8dc7fff-72c8-49eb-a552-d605f8655134","details":{"type":"crucible","address":"[fd00:1122:3344:104::6]:32345"}}]},"root":"/pool/ext/22c79e54-37ef-4ad2-a6cb-a7ee3e4f7167/crypt/zone"},{"zone":{"id":"128a90f5-8889-4665-8343-2c7098f2922c","zone_type":"crucible","addresses":["fd00:1122:3344:104::7"],"dataset":{"id":"128a90f5-8889-4665-8343-2c7098f2922c","name":{"pool_name":"oxp_8b3d0b51-c6a5-4d2c-827a-0d0d1471136d","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::7]:32345"},"services":[{"id":"128a90f5-8889-4665-8343-2c7098f2922c","details":{"type":"crucible","address":"[fd00:1122:3344:104::7]:32345"}}]},"root":"/pool/ext/29cd042b-e772-4d26-ac85-ef16009950bd/crypt/zone"},{"zone":{"id":"a72f1878-3b03-4267-9024-5df5ebae69de","zone_type":"crucible","addresses":["fd00:1122:3344:104::a"],"dataset":{"id":"a72f1878-3b03-4267-9024-5df5ebae69de","name":{"pool_name":"oxp_e99994ae-61ca-4742-a02c-eb0a8a5b69ff","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::a]:32345"},"services":[{"id":"a72f1878-3b03-4267-9024-5df5ebae69de","details":{"type":"crucible","address":"[fd00:1122:3344:104::a]:32345"}}]},"root":"/pool/ext/8b3d0b51-c6a5-4d2c-827a-0d0d1471136d/crypt/zone"},{"zone":{"id":"6a9165a2-9b66-485a-aaf0-70d89d60bb6c","zone_type":"crucible","addresses":["fd00:1122:3344:104::b"],"dataset":{"id":"6a9165a2-9b66-485a-aaf0-70d89d60bb6c","name":{"pool_name":"oxp_6a02f05f-e400-4c80-8df8-89aaecb6c12b","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::b]:32345"},"services":[{"id":"6a9165a2-9b66-485a-aaf0-70d89d60bb6c","details":{"type":"crucible","address":"[fd00:1122:3344:104::b]:32345"}}]},"root":"/pool/ext/9c99b9b6-8018-455e-a58a-c048ddd3e11b/crypt/zone"},{"zone":{"id":"9677c4ed-96bc-4dcb-ae74-f7a3e9d2b5e2","zone_type":"crucible","addresses":["fd00:1122:3344:104::c"],"dataset":{"id":"9677c4ed-96bc-4dcb-ae74-f7a3e9d2b5e2","name":{"pool_name":"oxp_7c30978f-ee87-4e53-8fdf-3455e5e851b7","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::c]:32345"},"services":[{"id":"9677c4ed-96bc-4dcb-ae74-f7a3e9d2b5e2","details":{"type":"crucible","address":"[fd00:1122:3344:104::c]:32345"}}]},"root":"/pool/ext/29cd042b-e772-4d26-ac85-ef16009950bd/crypt/zone"},{"zone":{"id":"179039e7-3ffd-4b76-9379-bef41d42a5ff","zone_type":"crucible","addresses":["fd00:1122:3344:104::8"],"dataset":{"id":"179039e7-3ffd-4b76-9379-bef41d42a5ff","name":{"pool_name":"oxp_4db7e002-e112-4bfc-a41e-8ae26991b01e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::8]:32345"},"services":[{"id":"179039e7-3ffd-4b76-9379-bef41d42a5ff","details":{"type":"crucible","address":"[fd00:1122:3344:104::8]:32345"}}]},"root":"/pool/ext/8b3d0b51-c6a5-4d2c-827a-0d0d1471136d/crypt/zone"},{"zone":{"id":"6067e31e-b6a3-4114-9e49-0296adc8e7af","zone_type":"crucible","addresses":["fd00:1122:3344:104::9"],"dataset":{"id":"6067e31e-b6a3-4114-9e49-0296adc8e7af","name":{"pool_name":"oxp_29cd042b-e772-4d26-ac85-ef16009950bd","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:104::9]:32345"},"services":[{"id":"6067e31e-b6a3-4114-9e49-0296adc8e7af","details":{"type":"crucible","address":"[fd00:1122:3344:104::9]:32345"}}]},"root":"/pool/ext/9c99b9b6-8018-455e-a58a-c048ddd3e11b/crypt/zone"},{"zone":{"id":"440dd615-e11f-4a5d-aeb4-dcf88bb314de","zone_type":"ntp","addresses":["fd00:1122:3344:104::d"],"dataset":null,"services":[{"id":"440dd615-e11f-4a5d-aeb4-dcf88bb314de","details":{"type":"boundary_ntp","address":"[fd00:1122:3344:104::d]:123","ntp_servers":["time.cloudflare.com"],"dns_servers":["1.1.1.1","8.8.8.8"],"domain":null,"nic":{"id":"0b52fe1b-f4cc-43b1-9ac3-4ebb4ab60133","kind":{"type":"service","id":"440dd615-e11f-4a5d-aeb4-dcf88bb314de"},"name":"ntp-440dd615-e11f-4a5d-aeb4-dcf88bb314de","ip":"172.30.3.5","mac":"A8:40:25:FF:85:1E","subnet":"172.30.3.0/24","vni":100,"primary":true,"slot":0},"snat_cfg":{"ip":"45.154.216.38","first_port":0,"last_port":16383}}}]},"root":"/pool/ext/382a2961-cd27-4a9c-901d-468a45ff5708/crypt/zone"},{"zone":{"id":"06e2de03-bd92-404c-a8ea-a13185539d24","zone_type":"internal_dns","addresses":["fd00:1122:3344:1::1"],"dataset":{"id":"06e2de03-bd92-404c-a8ea-a13185539d24","name":{"pool_name":"oxp_b74a84fa-b4c8-4c5f-92f4-f4e62a0a311d","kind":{"type":"internal_dns"}},"service_address":"[fd00:1122:3344:1::1]:5353"},"services":[{"id":"06e2de03-bd92-404c-a8ea-a13185539d24","details":{"type":"internal_dns","http_address":"[fd00:1122:3344:1::1]:5353","dns_address":"[fd00:1122:3344:1::1]:53","gz_address":"fd00:1122:3344:1::2","gz_address_index":0}}]},"root":"/pool/ext/e99994ae-61ca-4742-a02c-eb0a8a5b69ff/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled14.json b/sled-agent/tests/old-service-ledgers/rack3-sled14.json new file mode 100644 index 0000000000..89c12a015f --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack3-sled14.json @@ -0,0 +1 @@ +{"generation":4,"requests":[{"zone":{"id":"ac35afab-a312-43c3-a42d-04b8e99fcbde","zone_type":"crucible","addresses":["fd00:1122:3344:111::4"],"dataset":{"id":"ac35afab-a312-43c3-a42d-04b8e99fcbde","name":{"pool_name":"oxp_6601065c-c172-4118-81b4-16adde7e9401","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:111::4]:32345"},"services":[{"id":"ac35afab-a312-43c3-a42d-04b8e99fcbde","details":{"type":"crucible","address":"[fd00:1122:3344:111::4]:32345"}}]},"root":"/pool/ext/24d7e250-9fc6-459e-8155-30f8e8ccb28c/crypt/zone"},{"zone":{"id":"6cd94da2-35b9-4683-a931-29ad4a5ed0ef","zone_type":"crucible","addresses":["fd00:1122:3344:111::c"],"dataset":{"id":"6cd94da2-35b9-4683-a931-29ad4a5ed0ef","name":{"pool_name":"oxp_58276eba-a53c-4ef3-b374-4cdcde4d6e12","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:111::c]:32345"},"services":[{"id":"6cd94da2-35b9-4683-a931-29ad4a5ed0ef","details":{"type":"crucible","address":"[fd00:1122:3344:111::c]:32345"}}]},"root":"/pool/ext/24d7e250-9fc6-459e-8155-30f8e8ccb28c/crypt/zone"},{"zone":{"id":"41f07d39-fcc0-4796-8b7c-7cfcd9135f78","zone_type":"crucible","addresses":["fd00:1122:3344:111::9"],"dataset":{"id":"41f07d39-fcc0-4796-8b7c-7cfcd9135f78","name":{"pool_name":"oxp_4b90abdc-3348-4158-bedc-5bcd56e281d8","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:111::9]:32345"},"services":[{"id":"41f07d39-fcc0-4796-8b7c-7cfcd9135f78","details":{"type":"crucible","address":"[fd00:1122:3344:111::9]:32345"}}]},"root":"/pool/ext/8e955f54-fbef-4021-9eec-457825468813/crypt/zone"},{"zone":{"id":"44c35566-dd64-4e4a-896e-c50aaa3df14f","zone_type":"nexus","addresses":["fd00:1122:3344:111::3"],"dataset":null,"services":[{"id":"44c35566-dd64-4e4a-896e-c50aaa3df14f","details":{"type":"nexus","internal_address":"[fd00:1122:3344:111::3]:12221","external_ip":"45.154.216.37","nic":{"id":"6f824d20-6ce0-4e8b-9ce3-b12dd2b59913","kind":{"type":"service","id":"44c35566-dd64-4e4a-896e-c50aaa3df14f"},"name":"nexus-44c35566-dd64-4e4a-896e-c50aaa3df14f","ip":"172.30.2.7","mac":"A8:40:25:FF:E8:5F","subnet":"172.30.2.0/24","vni":100,"primary":true,"slot":0},"external_tls":true,"external_dns_servers":["1.1.1.1","8.8.8.8"]}}]},"root":"/pool/ext/435d7a1b-2865-4d49-903f-a68f464ade4d/crypt/zone"},{"zone":{"id":"e5020d24-8652-456b-bf92-cd7d255a34c5","zone_type":"crucible","addresses":["fd00:1122:3344:111::6"],"dataset":{"id":"e5020d24-8652-456b-bf92-cd7d255a34c5","name":{"pool_name":"oxp_f6925045-363d-4e18-9bde-ee2987b33d21","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:111::6]:32345"},"services":[{"id":"e5020d24-8652-456b-bf92-cd7d255a34c5","details":{"type":"crucible","address":"[fd00:1122:3344:111::6]:32345"}}]},"root":"/pool/ext/6601065c-c172-4118-81b4-16adde7e9401/crypt/zone"},{"zone":{"id":"8f25f258-afd7-4351-83e4-24220ec0c251","zone_type":"crucible","addresses":["fd00:1122:3344:111::8"],"dataset":{"id":"8f25f258-afd7-4351-83e4-24220ec0c251","name":{"pool_name":"oxp_8e955f54-fbef-4021-9eec-457825468813","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:111::8]:32345"},"services":[{"id":"8f25f258-afd7-4351-83e4-24220ec0c251","details":{"type":"crucible","address":"[fd00:1122:3344:111::8]:32345"}}]},"root":"/pool/ext/6601065c-c172-4118-81b4-16adde7e9401/crypt/zone"},{"zone":{"id":"26aa50ec-d70a-47ea-85fc-e55c62a2e0c6","zone_type":"crucible","addresses":["fd00:1122:3344:111::5"],"dataset":{"id":"26aa50ec-d70a-47ea-85fc-e55c62a2e0c6","name":{"pool_name":"oxp_24d7e250-9fc6-459e-8155-30f8e8ccb28c","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:111::5]:32345"},"services":[{"id":"26aa50ec-d70a-47ea-85fc-e55c62a2e0c6","details":{"type":"crucible","address":"[fd00:1122:3344:111::5]:32345"}}]},"root":"/pool/ext/435d7a1b-2865-4d49-903f-a68f464ade4d/crypt/zone"},{"zone":{"id":"68dc212f-a96a-420f-8334-b11ee5d7cb95","zone_type":"crucible","addresses":["fd00:1122:3344:111::7"],"dataset":{"id":"68dc212f-a96a-420f-8334-b11ee5d7cb95","name":{"pool_name":"oxp_4353b00b-937e-4d07-aea6-014c57b6f12c","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:111::7]:32345"},"services":[{"id":"68dc212f-a96a-420f-8334-b11ee5d7cb95","details":{"type":"crucible","address":"[fd00:1122:3344:111::7]:32345"}}]},"root":"/pool/ext/24d7e250-9fc6-459e-8155-30f8e8ccb28c/crypt/zone"},{"zone":{"id":"475140fa-a5dc-4ec1-876d-751c48adfc37","zone_type":"crucible","addresses":["fd00:1122:3344:111::a"],"dataset":{"id":"475140fa-a5dc-4ec1-876d-751c48adfc37","name":{"pool_name":"oxp_ee55b053-6874-4e20-86b5-2e105e64c068","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:111::a]:32345"},"services":[{"id":"475140fa-a5dc-4ec1-876d-751c48adfc37","details":{"type":"crucible","address":"[fd00:1122:3344:111::a]:32345"}}]},"root":"/pool/ext/ee55b053-6874-4e20-86b5-2e105e64c068/crypt/zone"},{"zone":{"id":"09d5a8c9-00db-4914-a2c6-7ae3d2da4558","zone_type":"crucible","addresses":["fd00:1122:3344:111::d"],"dataset":{"id":"09d5a8c9-00db-4914-a2c6-7ae3d2da4558","name":{"pool_name":"oxp_9ab5aba5-47dc-4bc4-8f6d-7cbe0f98a9a2","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:111::d]:32345"},"services":[{"id":"09d5a8c9-00db-4914-a2c6-7ae3d2da4558","details":{"type":"crucible","address":"[fd00:1122:3344:111::d]:32345"}}]},"root":"/pool/ext/8e955f54-fbef-4021-9eec-457825468813/crypt/zone"},{"zone":{"id":"014f6a39-ad64-4f0a-9fef-01ca0d184cbf","zone_type":"crucible","addresses":["fd00:1122:3344:111::b"],"dataset":{"id":"014f6a39-ad64-4f0a-9fef-01ca0d184cbf","name":{"pool_name":"oxp_435d7a1b-2865-4d49-903f-a68f464ade4d","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:111::b]:32345"},"services":[{"id":"014f6a39-ad64-4f0a-9fef-01ca0d184cbf","details":{"type":"crucible","address":"[fd00:1122:3344:111::b]:32345"}}]},"root":"/pool/ext/f6925045-363d-4e18-9bde-ee2987b33d21/crypt/zone"},{"zone":{"id":"aceaf348-ba07-4965-a543-63a800826fe8","zone_type":"ntp","addresses":["fd00:1122:3344:111::e"],"dataset":null,"services":[{"id":"aceaf348-ba07-4965-a543-63a800826fe8","details":{"type":"internal_ntp","address":"[fd00:1122:3344:111::e]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/8e955f54-fbef-4021-9eec-457825468813/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled15.json b/sled-agent/tests/old-service-ledgers/rack3-sled15.json new file mode 100644 index 0000000000..880f29409e --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack3-sled15.json @@ -0,0 +1 @@ +{"generation":4,"requests":[{"zone":{"id":"09a9ecee-1e7c-4819-b27a-73bb61099ce7","zone_type":"external_dns","addresses":["fd00:1122:3344:114::3"],"dataset":{"id":"09a9ecee-1e7c-4819-b27a-73bb61099ce7","name":{"pool_name":"oxp_b7fbb6db-aa4a-4a6d-8206-b7bdc000d56e","kind":{"type":"external_dns"}},"service_address":"[fd00:1122:3344:114::3]:5353"},"services":[{"id":"09a9ecee-1e7c-4819-b27a-73bb61099ce7","details":{"type":"external_dns","http_address":"[fd00:1122:3344:114::3]:5353","dns_address":"45.154.216.33:53","nic":{"id":"400ca77b-7fee-47d5-8f17-1f4b9c729f27","kind":{"type":"service","id":"09a9ecee-1e7c-4819-b27a-73bb61099ce7"},"name":"external-dns-09a9ecee-1e7c-4819-b27a-73bb61099ce7","ip":"172.30.1.5","mac":"A8:40:25:FF:B7:C7","subnet":"172.30.1.0/24","vni":100,"primary":true,"slot":0}}}]},"root":"/pool/ext/9e878b1e-bf92-4155-8162-640851c2f5d5/crypt/zone"},{"zone":{"id":"1792e003-55f7-49b8-906c-4160db91bc23","zone_type":"crucible","addresses":["fd00:1122:3344:114::5"],"dataset":{"id":"1792e003-55f7-49b8-906c-4160db91bc23","name":{"pool_name":"oxp_7f3a760f-a4c0-456f-8a22-2d06ecac1022","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:114::5]:32345"},"services":[{"id":"1792e003-55f7-49b8-906c-4160db91bc23","details":{"type":"crucible","address":"[fd00:1122:3344:114::5]:32345"}}]},"root":"/pool/ext/76f09ad5-c96c-4748-bbe4-71afaea7bc5e/crypt/zone"},{"zone":{"id":"73bc7c0e-1034-449f-8920-4a1f418653ff","zone_type":"crucible","addresses":["fd00:1122:3344:114::8"],"dataset":{"id":"73bc7c0e-1034-449f-8920-4a1f418653ff","name":{"pool_name":"oxp_e87037be-1cdf-4c6e-a8a3-c27b830eaef9","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:114::8]:32345"},"services":[{"id":"73bc7c0e-1034-449f-8920-4a1f418653ff","details":{"type":"crucible","address":"[fd00:1122:3344:114::8]:32345"}}]},"root":"/pool/ext/b7fbb6db-aa4a-4a6d-8206-b7bdc000d56e/crypt/zone"},{"zone":{"id":"06dc6619-6251-4543-9a10-da1698af49d5","zone_type":"crucible","addresses":["fd00:1122:3344:114::9"],"dataset":{"id":"06dc6619-6251-4543-9a10-da1698af49d5","name":{"pool_name":"oxp_ee34c530-ce70-4f1a-8c97-d0ebb77ccfc8","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:114::9]:32345"},"services":[{"id":"06dc6619-6251-4543-9a10-da1698af49d5","details":{"type":"crucible","address":"[fd00:1122:3344:114::9]:32345"}}]},"root":"/pool/ext/9e878b1e-bf92-4155-8162-640851c2f5d5/crypt/zone"},{"zone":{"id":"0d796c52-37ca-490d-b42f-dcc22fe5fd6b","zone_type":"crucible","addresses":["fd00:1122:3344:114::c"],"dataset":{"id":"0d796c52-37ca-490d-b42f-dcc22fe5fd6b","name":{"pool_name":"oxp_9ec2b893-d486-4b24-a077-1a297f9eb15f","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:114::c]:32345"},"services":[{"id":"0d796c52-37ca-490d-b42f-dcc22fe5fd6b","details":{"type":"crucible","address":"[fd00:1122:3344:114::c]:32345"}}]},"root":"/pool/ext/9e72c0e2-4895-4791-b606-2f18e432fb69/crypt/zone"},{"zone":{"id":"91d0011f-de44-4823-bc26-a447affa39bc","zone_type":"crucible","addresses":["fd00:1122:3344:114::a"],"dataset":{"id":"91d0011f-de44-4823-bc26-a447affa39bc","name":{"pool_name":"oxp_85e81a14-031d-4a63-a91f-981c64e91f60","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:114::a]:32345"},"services":[{"id":"91d0011f-de44-4823-bc26-a447affa39bc","details":{"type":"crucible","address":"[fd00:1122:3344:114::a]:32345"}}]},"root":"/pool/ext/b7fbb6db-aa4a-4a6d-8206-b7bdc000d56e/crypt/zone"},{"zone":{"id":"0c44a2f1-559a-459c-9931-e0e7964d41c6","zone_type":"crucible","addresses":["fd00:1122:3344:114::b"],"dataset":{"id":"0c44a2f1-559a-459c-9931-e0e7964d41c6","name":{"pool_name":"oxp_76f09ad5-c96c-4748-bbe4-71afaea7bc5e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:114::b]:32345"},"services":[{"id":"0c44a2f1-559a-459c-9931-e0e7964d41c6","details":{"type":"crucible","address":"[fd00:1122:3344:114::b]:32345"}}]},"root":"/pool/ext/e87037be-1cdf-4c6e-a8a3-c27b830eaef9/crypt/zone"},{"zone":{"id":"ea363819-96f6-4fb6-a203-f18414f1c60e","zone_type":"crucible","addresses":["fd00:1122:3344:114::4"],"dataset":{"id":"ea363819-96f6-4fb6-a203-f18414f1c60e","name":{"pool_name":"oxp_b7fbb6db-aa4a-4a6d-8206-b7bdc000d56e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:114::4]:32345"},"services":[{"id":"ea363819-96f6-4fb6-a203-f18414f1c60e","details":{"type":"crucible","address":"[fd00:1122:3344:114::4]:32345"}}]},"root":"/pool/ext/b7fbb6db-aa4a-4a6d-8206-b7bdc000d56e/crypt/zone"},{"zone":{"id":"21592c39-da6b-4527-842e-edeeceffafa1","zone_type":"crucible","addresses":["fd00:1122:3344:114::6"],"dataset":{"id":"21592c39-da6b-4527-842e-edeeceffafa1","name":{"pool_name":"oxp_9e72c0e2-4895-4791-b606-2f18e432fb69","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:114::6]:32345"},"services":[{"id":"21592c39-da6b-4527-842e-edeeceffafa1","details":{"type":"crucible","address":"[fd00:1122:3344:114::6]:32345"}}]},"root":"/pool/ext/7aff8429-b65d-4a53-a796-7221ac7581a9/crypt/zone"},{"zone":{"id":"f33b1263-f1b2-43a6-a8aa-5f8570dd4e72","zone_type":"crucible","addresses":["fd00:1122:3344:114::7"],"dataset":{"id":"f33b1263-f1b2-43a6-a8aa-5f8570dd4e72","name":{"pool_name":"oxp_9e878b1e-bf92-4155-8162-640851c2f5d5","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:114::7]:32345"},"services":[{"id":"f33b1263-f1b2-43a6-a8aa-5f8570dd4e72","details":{"type":"crucible","address":"[fd00:1122:3344:114::7]:32345"}}]},"root":"/pool/ext/7f3a760f-a4c0-456f-8a22-2d06ecac1022/crypt/zone"},{"zone":{"id":"6f42b469-5a36-4048-a152-e884f7e8a206","zone_type":"crucible","addresses":["fd00:1122:3344:114::d"],"dataset":{"id":"6f42b469-5a36-4048-a152-e884f7e8a206","name":{"pool_name":"oxp_7aff8429-b65d-4a53-a796-7221ac7581a9","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:114::d]:32345"},"services":[{"id":"6f42b469-5a36-4048-a152-e884f7e8a206","details":{"type":"crucible","address":"[fd00:1122:3344:114::d]:32345"}}]},"root":"/pool/ext/9e72c0e2-4895-4791-b606-2f18e432fb69/crypt/zone"},{"zone":{"id":"ad77d594-8f78-4d33-a5e4-59887060178e","zone_type":"ntp","addresses":["fd00:1122:3344:114::e"],"dataset":null,"services":[{"id":"ad77d594-8f78-4d33-a5e4-59887060178e","details":{"type":"internal_ntp","address":"[fd00:1122:3344:114::e]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/85e81a14-031d-4a63-a91f-981c64e91f60/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled16.json b/sled-agent/tests/old-service-ledgers/rack3-sled16.json new file mode 100644 index 0000000000..3a1cbeb411 --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack3-sled16.json @@ -0,0 +1 @@ +{"generation":4,"requests":[{"zone":{"id":"dcb9a4ae-2c89-4a74-905b-b7936ff49c19","zone_type":"crucible","addresses":["fd00:1122:3344:11f::9"],"dataset":{"id":"dcb9a4ae-2c89-4a74-905b-b7936ff49c19","name":{"pool_name":"oxp_af509039-d27f-4095-bc9d-cecbc5c606db","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11f::9]:32345"},"services":[{"id":"dcb9a4ae-2c89-4a74-905b-b7936ff49c19","details":{"type":"crucible","address":"[fd00:1122:3344:11f::9]:32345"}}]},"root":"/pool/ext/44ee0fb4-6034-44e8-b3de-b3a44457ffca/crypt/zone"},{"zone":{"id":"dbd46f71-ec39-4b72-a77d-9d281ccb37e0","zone_type":"crucible","addresses":["fd00:1122:3344:11f::b"],"dataset":{"id":"dbd46f71-ec39-4b72-a77d-9d281ccb37e0","name":{"pool_name":"oxp_44ee0fb4-6034-44e8-b3de-b3a44457ffca","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11f::b]:32345"},"services":[{"id":"dbd46f71-ec39-4b72-a77d-9d281ccb37e0","details":{"type":"crucible","address":"[fd00:1122:3344:11f::b]:32345"}}]},"root":"/pool/ext/5e32c0a3-1210-402b-91fb-256946eeac2b/crypt/zone"},{"zone":{"id":"a1f30569-a5c6-4a6d-922e-241966aea142","zone_type":"crucible","addresses":["fd00:1122:3344:11f::6"],"dataset":{"id":"a1f30569-a5c6-4a6d-922e-241966aea142","name":{"pool_name":"oxp_d2133e8b-51cc-455e-89d0-5454fd4fe109","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11f::6]:32345"},"services":[{"id":"a1f30569-a5c6-4a6d-922e-241966aea142","details":{"type":"crucible","address":"[fd00:1122:3344:11f::6]:32345"}}]},"root":"/pool/ext/3f57835b-1469-499a-8757-7cc56acc5d49/crypt/zone"},{"zone":{"id":"a33e25ae-4e41-40f4-843d-3d12f62d8cb6","zone_type":"crucible","addresses":["fd00:1122:3344:11f::8"],"dataset":{"id":"a33e25ae-4e41-40f4-843d-3d12f62d8cb6","name":{"pool_name":"oxp_c8e4a7f4-1ae6-4683-8397-ea53475a53e8","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11f::8]:32345"},"services":[{"id":"a33e25ae-4e41-40f4-843d-3d12f62d8cb6","details":{"type":"crucible","address":"[fd00:1122:3344:11f::8]:32345"}}]},"root":"/pool/ext/5e32c0a3-1210-402b-91fb-256946eeac2b/crypt/zone"},{"zone":{"id":"65ed75c2-2d80-4de5-a6f6-adfa6516c7cf","zone_type":"crucible","addresses":["fd00:1122:3344:11f::c"],"dataset":{"id":"65ed75c2-2d80-4de5-a6f6-adfa6516c7cf","name":{"pool_name":"oxp_3f57835b-1469-499a-8757-7cc56acc5d49","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11f::c]:32345"},"services":[{"id":"65ed75c2-2d80-4de5-a6f6-adfa6516c7cf","details":{"type":"crucible","address":"[fd00:1122:3344:11f::c]:32345"}}]},"root":"/pool/ext/cd8cd75c-632b-4527-889a-7ca0c080fe2c/crypt/zone"},{"zone":{"id":"bc6ccf18-6b9b-4687-8b70-c7917d972ae0","zone_type":"crucible","addresses":["fd00:1122:3344:11f::a"],"dataset":{"id":"bc6ccf18-6b9b-4687-8b70-c7917d972ae0","name":{"pool_name":"oxp_cd8cd75c-632b-4527-889a-7ca0c080fe2c","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11f::a]:32345"},"services":[{"id":"bc6ccf18-6b9b-4687-8b70-c7917d972ae0","details":{"type":"crucible","address":"[fd00:1122:3344:11f::a]:32345"}}]},"root":"/pool/ext/5e32c0a3-1210-402b-91fb-256946eeac2b/crypt/zone"},{"zone":{"id":"06233bfe-a857-4819-aefe-212af9eeb90f","zone_type":"crucible","addresses":["fd00:1122:3344:11f::5"],"dataset":{"id":"06233bfe-a857-4819-aefe-212af9eeb90f","name":{"pool_name":"oxp_c8a1aaf1-d27c-45fd-9f8d-80ac6bf6865d","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11f::5]:32345"},"services":[{"id":"06233bfe-a857-4819-aefe-212af9eeb90f","details":{"type":"crucible","address":"[fd00:1122:3344:11f::5]:32345"}}]},"root":"/pool/ext/af509039-d27f-4095-bc9d-cecbc5c606db/crypt/zone"},{"zone":{"id":"0bbfef71-9eae-43b6-b5e7-0060ce9269dd","zone_type":"crucible","addresses":["fd00:1122:3344:11f::4"],"dataset":{"id":"0bbfef71-9eae-43b6-b5e7-0060ce9269dd","name":{"pool_name":"oxp_5e32c0a3-1210-402b-91fb-256946eeac2b","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11f::4]:32345"},"services":[{"id":"0bbfef71-9eae-43b6-b5e7-0060ce9269dd","details":{"type":"crucible","address":"[fd00:1122:3344:11f::4]:32345"}}]},"root":"/pool/ext/af509039-d27f-4095-bc9d-cecbc5c606db/crypt/zone"},{"zone":{"id":"550e10ee-24d1-444f-80be-2744dd321e0f","zone_type":"crucible","addresses":["fd00:1122:3344:11f::7"],"dataset":{"id":"550e10ee-24d1-444f-80be-2744dd321e0f","name":{"pool_name":"oxp_f437ce0e-eb45-4be8-b1fe-33ed2656eb01","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11f::7]:32345"},"services":[{"id":"550e10ee-24d1-444f-80be-2744dd321e0f","details":{"type":"crucible","address":"[fd00:1122:3344:11f::7]:32345"}}]},"root":"/pool/ext/44ee0fb4-6034-44e8-b3de-b3a44457ffca/crypt/zone"},{"zone":{"id":"86d768f3-ece2-4956-983f-999bdb23a983","zone_type":"cockroach_db","addresses":["fd00:1122:3344:11f::3"],"dataset":{"id":"86d768f3-ece2-4956-983f-999bdb23a983","name":{"pool_name":"oxp_5e32c0a3-1210-402b-91fb-256946eeac2b","kind":{"type":"cockroach_db"}},"service_address":"[fd00:1122:3344:11f::3]:32221"},"services":[{"id":"86d768f3-ece2-4956-983f-999bdb23a983","details":{"type":"cockroach_db","address":"[fd00:1122:3344:11f::3]:32221"}}]},"root":"/pool/ext/c8a1aaf1-d27c-45fd-9f8d-80ac6bf6865d/crypt/zone"},{"zone":{"id":"2f358812-f72c-4838-a5ea-7d78d0954be0","zone_type":"ntp","addresses":["fd00:1122:3344:11f::d"],"dataset":null,"services":[{"id":"2f358812-f72c-4838-a5ea-7d78d0954be0","details":{"type":"internal_ntp","address":"[fd00:1122:3344:11f::d]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/f437ce0e-eb45-4be8-b1fe-33ed2656eb01/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled17.json b/sled-agent/tests/old-service-ledgers/rack3-sled17.json new file mode 100644 index 0000000000..4063fed2e2 --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack3-sled17.json @@ -0,0 +1 @@ +{"generation":4,"requests":[{"zone":{"id":"525a19a2-d4ac-418d-bdcf-2ce26e7abe70","zone_type":"crucible","addresses":["fd00:1122:3344:107::a"],"dataset":{"id":"525a19a2-d4ac-418d-bdcf-2ce26e7abe70","name":{"pool_name":"oxp_cb774d2f-ff86-4fd7-866b-17a6b10e61f0","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::a]:32345"},"services":[{"id":"525a19a2-d4ac-418d-bdcf-2ce26e7abe70","details":{"type":"crucible","address":"[fd00:1122:3344:107::a]:32345"}}]},"root":"/pool/ext/e17b68b5-f50c-4fc3-b55a-80d284c6c32d/crypt/zone"},{"zone":{"id":"7af188e1-6175-4769-9e4f-2ca7a98b76f6","zone_type":"crucible","addresses":["fd00:1122:3344:107::4"],"dataset":{"id":"7af188e1-6175-4769-9e4f-2ca7a98b76f6","name":{"pool_name":"oxp_0cbbcf22-770d-4e75-9148-e6109b129093","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::4]:32345"},"services":[{"id":"7af188e1-6175-4769-9e4f-2ca7a98b76f6","details":{"type":"crucible","address":"[fd00:1122:3344:107::4]:32345"}}]},"root":"/pool/ext/b998e8df-ea69-4bdd-84cb-b7f17075b060/crypt/zone"},{"zone":{"id":"2544540f-6ffc-46c0-84bf-f42a110c02d7","zone_type":"crucible","addresses":["fd00:1122:3344:107::6"],"dataset":{"id":"2544540f-6ffc-46c0-84bf-f42a110c02d7","name":{"pool_name":"oxp_e17b68b5-f50c-4fc3-b55a-80d284c6c32d","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::6]:32345"},"services":[{"id":"2544540f-6ffc-46c0-84bf-f42a110c02d7","details":{"type":"crucible","address":"[fd00:1122:3344:107::6]:32345"}}]},"root":"/pool/ext/521fa477-4d83-49a8-a5cf-c267b7f0c409/crypt/zone"},{"zone":{"id":"cfc20f72-cac2-4681-a6d8-e5a0accafbb7","zone_type":"crucible","addresses":["fd00:1122:3344:107::7"],"dataset":{"id":"cfc20f72-cac2-4681-a6d8-e5a0accafbb7","name":{"pool_name":"oxp_b998e8df-ea69-4bdd-84cb-b7f17075b060","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::7]:32345"},"services":[{"id":"cfc20f72-cac2-4681-a6d8-e5a0accafbb7","details":{"type":"crucible","address":"[fd00:1122:3344:107::7]:32345"}}]},"root":"/pool/ext/0cbbcf22-770d-4e75-9148-e6109b129093/crypt/zone"},{"zone":{"id":"e24be791-5773-425e-a3df-e35ca81570c7","zone_type":"crucible","addresses":["fd00:1122:3344:107::9"],"dataset":{"id":"e24be791-5773-425e-a3df-e35ca81570c7","name":{"pool_name":"oxp_7849c221-dc7f-43ac-ac47-bc51864e083b","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::9]:32345"},"services":[{"id":"e24be791-5773-425e-a3df-e35ca81570c7","details":{"type":"crucible","address":"[fd00:1122:3344:107::9]:32345"}}]},"root":"/pool/ext/7849c221-dc7f-43ac-ac47-bc51864e083b/crypt/zone"},{"zone":{"id":"170856ee-21cf-4780-8903-175d558bc7cc","zone_type":"crucible","addresses":["fd00:1122:3344:107::3"],"dataset":{"id":"170856ee-21cf-4780-8903-175d558bc7cc","name":{"pool_name":"oxp_618e21e5-77d4-40ba-9f8e-7960e9ad92e2","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::3]:32345"},"services":[{"id":"170856ee-21cf-4780-8903-175d558bc7cc","details":{"type":"crucible","address":"[fd00:1122:3344:107::3]:32345"}}]},"root":"/pool/ext/aa7a37fb-2f03-4d5c-916b-db3a4fc269ac/crypt/zone"},{"zone":{"id":"604278ff-525a-4d41-82ff-07aef3174d38","zone_type":"crucible","addresses":["fd00:1122:3344:107::5"],"dataset":{"id":"604278ff-525a-4d41-82ff-07aef3174d38","name":{"pool_name":"oxp_521fa477-4d83-49a8-a5cf-c267b7f0c409","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::5]:32345"},"services":[{"id":"604278ff-525a-4d41-82ff-07aef3174d38","details":{"type":"crucible","address":"[fd00:1122:3344:107::5]:32345"}}]},"root":"/pool/ext/0cbbcf22-770d-4e75-9148-e6109b129093/crypt/zone"},{"zone":{"id":"d0d4fcc0-6ed0-410a-99c7-5daf34014421","zone_type":"crucible","addresses":["fd00:1122:3344:107::b"],"dataset":{"id":"d0d4fcc0-6ed0-410a-99c7-5daf34014421","name":{"pool_name":"oxp_aa7a37fb-2f03-4d5c-916b-db3a4fc269ac","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::b]:32345"},"services":[{"id":"d0d4fcc0-6ed0-410a-99c7-5daf34014421","details":{"type":"crucible","address":"[fd00:1122:3344:107::b]:32345"}}]},"root":"/pool/ext/aa7a37fb-2f03-4d5c-916b-db3a4fc269ac/crypt/zone"},{"zone":{"id":"c935df7b-2629-48ee-bc10-20508301905d","zone_type":"crucible","addresses":["fd00:1122:3344:107::c"],"dataset":{"id":"c935df7b-2629-48ee-bc10-20508301905d","name":{"pool_name":"oxp_793fd018-5fdc-4e54-9c45-f8023fa3ea18","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::c]:32345"},"services":[{"id":"c935df7b-2629-48ee-bc10-20508301905d","details":{"type":"crucible","address":"[fd00:1122:3344:107::c]:32345"}}]},"root":"/pool/ext/7849c221-dc7f-43ac-ac47-bc51864e083b/crypt/zone"},{"zone":{"id":"4ba5f3b6-8be5-4a85-bc57-a5e3b0b867d8","zone_type":"crucible","addresses":["fd00:1122:3344:107::8"],"dataset":{"id":"4ba5f3b6-8be5-4a85-bc57-a5e3b0b867d8","name":{"pool_name":"oxp_e80e7996-c572-481e-8c22-61c16c6e47f4","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:107::8]:32345"},"services":[{"id":"4ba5f3b6-8be5-4a85-bc57-a5e3b0b867d8","details":{"type":"crucible","address":"[fd00:1122:3344:107::8]:32345"}}]},"root":"/pool/ext/e17b68b5-f50c-4fc3-b55a-80d284c6c32d/crypt/zone"},{"zone":{"id":"395c9d6e-3bd0-445e-9269-46c3260edb83","zone_type":"ntp","addresses":["fd00:1122:3344:107::d"],"dataset":null,"services":[{"id":"395c9d6e-3bd0-445e-9269-46c3260edb83","details":{"type":"internal_ntp","address":"[fd00:1122:3344:107::d]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/0cbbcf22-770d-4e75-9148-e6109b129093/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled18.json b/sled-agent/tests/old-service-ledgers/rack3-sled18.json new file mode 100644 index 0000000000..f47e912424 --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack3-sled18.json @@ -0,0 +1 @@ +{"generation":4,"requests":[{"zone":{"id":"c7096dd4-e429-4a6f-9725-041a77ef2513","zone_type":"crucible","addresses":["fd00:1122:3344:11a::6"],"dataset":{"id":"c7096dd4-e429-4a6f-9725-041a77ef2513","name":{"pool_name":"oxp_dcf62af6-c0f9-4eb5-9b23-9424ef8f3d32","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11a::6]:32345"},"services":[{"id":"c7096dd4-e429-4a6f-9725-041a77ef2513","details":{"type":"crucible","address":"[fd00:1122:3344:11a::6]:32345"}}]},"root":"/pool/ext/b869e463-c8b9-4c12-a6b9-13175b3896dd/crypt/zone"},{"zone":{"id":"09dd367f-b32f-43f3-aa53-11ccec1cd0c9","zone_type":"crucible","addresses":["fd00:1122:3344:11a::9"],"dataset":{"id":"09dd367f-b32f-43f3-aa53-11ccec1cd0c9","name":{"pool_name":"oxp_d7d00317-42c7-4d1e-a04c-85491fb230cd","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11a::9]:32345"},"services":[{"id":"09dd367f-b32f-43f3-aa53-11ccec1cd0c9","details":{"type":"crucible","address":"[fd00:1122:3344:11a::9]:32345"}}]},"root":"/pool/ext/d7d00317-42c7-4d1e-a04c-85491fb230cd/crypt/zone"},{"zone":{"id":"fb2f85f1-05b3-432f-9bb5-63fb27a762b1","zone_type":"crucible","addresses":["fd00:1122:3344:11a::5"],"dataset":{"id":"fb2f85f1-05b3-432f-9bb5-63fb27a762b1","name":{"pool_name":"oxp_db4a9949-68da-4c1c-9a1c-49083eba14fe","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11a::5]:32345"},"services":[{"id":"fb2f85f1-05b3-432f-9bb5-63fb27a762b1","details":{"type":"crucible","address":"[fd00:1122:3344:11a::5]:32345"}}]},"root":"/pool/ext/db4a9949-68da-4c1c-9a1c-49083eba14fe/crypt/zone"},{"zone":{"id":"5b89425e-69e4-4305-8f33-dc5768a1849e","zone_type":"crucible","addresses":["fd00:1122:3344:11a::a"],"dataset":{"id":"5b89425e-69e4-4305-8f33-dc5768a1849e","name":{"pool_name":"oxp_64a1bad7-d1b1-4e39-a3f3-9b8d73c4709e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11a::a]:32345"},"services":[{"id":"5b89425e-69e4-4305-8f33-dc5768a1849e","details":{"type":"crucible","address":"[fd00:1122:3344:11a::a]:32345"}}]},"root":"/pool/ext/64a1bad7-d1b1-4e39-a3f3-9b8d73c4709e/crypt/zone"},{"zone":{"id":"a5156db4-273a-4f8b-b8d8-df77062a6c63","zone_type":"crucible","addresses":["fd00:1122:3344:11a::4"],"dataset":{"id":"a5156db4-273a-4f8b-b8d8-df77062a6c63","name":{"pool_name":"oxp_b869e463-c8b9-4c12-a6b9-13175b3896dd","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11a::4]:32345"},"services":[{"id":"a5156db4-273a-4f8b-b8d8-df77062a6c63","details":{"type":"crucible","address":"[fd00:1122:3344:11a::4]:32345"}}]},"root":"/pool/ext/dcf62af6-c0f9-4eb5-9b23-9424ef8f3d32/crypt/zone"},{"zone":{"id":"1f2d2f86-b69b-4130-bb9b-e62ba0cb6802","zone_type":"crucible","addresses":["fd00:1122:3344:11a::b"],"dataset":{"id":"1f2d2f86-b69b-4130-bb9b-e62ba0cb6802","name":{"pool_name":"oxp_153ffee4-5d7a-4786-ad33-d5567b434fe0","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11a::b]:32345"},"services":[{"id":"1f2d2f86-b69b-4130-bb9b-e62ba0cb6802","details":{"type":"crucible","address":"[fd00:1122:3344:11a::b]:32345"}}]},"root":"/pool/ext/174a067d-1c5a-49f7-a29f-1e62ab1c3796/crypt/zone"},{"zone":{"id":"1e249cc9-52e7-4d66-b713-8ace1392e991","zone_type":"crucible","addresses":["fd00:1122:3344:11a::7"],"dataset":{"id":"1e249cc9-52e7-4d66-b713-8ace1392e991","name":{"pool_name":"oxp_04b6215e-9651-4a3c-ba1b-b8a1e67b3d89","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11a::7]:32345"},"services":[{"id":"1e249cc9-52e7-4d66-b713-8ace1392e991","details":{"type":"crucible","address":"[fd00:1122:3344:11a::7]:32345"}}]},"root":"/pool/ext/db4a9949-68da-4c1c-9a1c-49083eba14fe/crypt/zone"},{"zone":{"id":"eb779538-2b1b-4d1d-8c7e-b15f04db6e53","zone_type":"crucible","addresses":["fd00:1122:3344:11a::3"],"dataset":{"id":"eb779538-2b1b-4d1d-8c7e-b15f04db6e53","name":{"pool_name":"oxp_aacb8524-3562-4f97-a616-9023230d6efa","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11a::3]:32345"},"services":[{"id":"eb779538-2b1b-4d1d-8c7e-b15f04db6e53","details":{"type":"crucible","address":"[fd00:1122:3344:11a::3]:32345"}}]},"root":"/pool/ext/174a067d-1c5a-49f7-a29f-1e62ab1c3796/crypt/zone"},{"zone":{"id":"b575d52d-be7d-46af-814b-91e6d18f3464","zone_type":"crucible","addresses":["fd00:1122:3344:11a::8"],"dataset":{"id":"b575d52d-be7d-46af-814b-91e6d18f3464","name":{"pool_name":"oxp_174a067d-1c5a-49f7-a29f-1e62ab1c3796","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11a::8]:32345"},"services":[{"id":"b575d52d-be7d-46af-814b-91e6d18f3464","details":{"type":"crucible","address":"[fd00:1122:3344:11a::8]:32345"}}]},"root":"/pool/ext/64a1bad7-d1b1-4e39-a3f3-9b8d73c4709e/crypt/zone"},{"zone":{"id":"274200bc-eac7-47d7-8a57-4b7be794caba","zone_type":"crucible","addresses":["fd00:1122:3344:11a::c"],"dataset":{"id":"274200bc-eac7-47d7-8a57-4b7be794caba","name":{"pool_name":"oxp_2e7644e4-7d46-42bf-8e7a-9c3f39085b3f","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11a::c]:32345"},"services":[{"id":"274200bc-eac7-47d7-8a57-4b7be794caba","details":{"type":"crucible","address":"[fd00:1122:3344:11a::c]:32345"}}]},"root":"/pool/ext/2e7644e4-7d46-42bf-8e7a-9c3f39085b3f/crypt/zone"},{"zone":{"id":"bc20ba3a-df62-4a62-97c2-75b5653f84b4","zone_type":"ntp","addresses":["fd00:1122:3344:11a::d"],"dataset":null,"services":[{"id":"bc20ba3a-df62-4a62-97c2-75b5653f84b4","details":{"type":"internal_ntp","address":"[fd00:1122:3344:11a::d]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/04b6215e-9651-4a3c-ba1b-b8a1e67b3d89/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled19.json b/sled-agent/tests/old-service-ledgers/rack3-sled19.json new file mode 100644 index 0000000000..c450320a73 --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack3-sled19.json @@ -0,0 +1 @@ +{"generation":4,"requests":[{"zone":{"id":"9c73abb9-edb8-4aa2-835b-c25ebe4466d9","zone_type":"crucible","addresses":["fd00:1122:3344:109::7"],"dataset":{"id":"9c73abb9-edb8-4aa2-835b-c25ebe4466d9","name":{"pool_name":"oxp_b7a3032f-7b8c-4a6a-9fa2-e5773bfdbc94","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::7]:32345"},"services":[{"id":"9c73abb9-edb8-4aa2-835b-c25ebe4466d9","details":{"type":"crucible","address":"[fd00:1122:3344:109::7]:32345"}}]},"root":"/pool/ext/46d21f3d-23be-4361-b5c5-9d0f6ece5b8c/crypt/zone"},{"zone":{"id":"ca576bda-cbdd-4bb9-9d75-ce06d569e926","zone_type":"crucible","addresses":["fd00:1122:3344:109::a"],"dataset":{"id":"ca576bda-cbdd-4bb9-9d75-ce06d569e926","name":{"pool_name":"oxp_863c4bc4-9c7e-453c-99d8-a3d509f49f3e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::a]:32345"},"services":[{"id":"ca576bda-cbdd-4bb9-9d75-ce06d569e926","details":{"type":"crucible","address":"[fd00:1122:3344:109::a]:32345"}}]},"root":"/pool/ext/7e67cb32-0c00-4090-9647-eb7bae75deeb/crypt/zone"},{"zone":{"id":"f010978d-346e-49cd-b265-7607a25685f9","zone_type":"crucible","addresses":["fd00:1122:3344:109::c"],"dataset":{"id":"f010978d-346e-49cd-b265-7607a25685f9","name":{"pool_name":"oxp_9bc1dab8-2d2a-4f92-bdfb-94ebca7881f1","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::c]:32345"},"services":[{"id":"f010978d-346e-49cd-b265-7607a25685f9","details":{"type":"crucible","address":"[fd00:1122:3344:109::c]:32345"}}]},"root":"/pool/ext/9bc1dab8-2d2a-4f92-bdfb-94ebca7881f1/crypt/zone"},{"zone":{"id":"daff4162-cc81-4586-a457-91d767b8f1d9","zone_type":"crucible","addresses":["fd00:1122:3344:109::6"],"dataset":{"id":"daff4162-cc81-4586-a457-91d767b8f1d9","name":{"pool_name":"oxp_b9b5b50c-e823-41ae-9585-01b818883521","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::6]:32345"},"services":[{"id":"daff4162-cc81-4586-a457-91d767b8f1d9","details":{"type":"crucible","address":"[fd00:1122:3344:109::6]:32345"}}]},"root":"/pool/ext/de682b18-afaf-4d53-b62e-934f6bd4a1f8/crypt/zone"},{"zone":{"id":"9f300d3d-e698-4cc8-be4c-1f81ac8c927f","zone_type":"crucible","addresses":["fd00:1122:3344:109::d"],"dataset":{"id":"9f300d3d-e698-4cc8-be4c-1f81ac8c927f","name":{"pool_name":"oxp_f1d82c22-ad7d-4cda-9ab0-8f5f496d90ce","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::d]:32345"},"services":[{"id":"9f300d3d-e698-4cc8-be4c-1f81ac8c927f","details":{"type":"crucible","address":"[fd00:1122:3344:109::d]:32345"}}]},"root":"/pool/ext/de682b18-afaf-4d53-b62e-934f6bd4a1f8/crypt/zone"},{"zone":{"id":"8db7c7be-da40-4a1c-9681-4d02606a7eb7","zone_type":"crucible","addresses":["fd00:1122:3344:109::9"],"dataset":{"id":"8db7c7be-da40-4a1c-9681-4d02606a7eb7","name":{"pool_name":"oxp_46d21f3d-23be-4361-b5c5-9d0f6ece5b8c","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::9]:32345"},"services":[{"id":"8db7c7be-da40-4a1c-9681-4d02606a7eb7","details":{"type":"crucible","address":"[fd00:1122:3344:109::9]:32345"}}]},"root":"/pool/ext/b7a3032f-7b8c-4a6a-9fa2-e5773bfdbc94/crypt/zone"},{"zone":{"id":"b990911b-805a-4f9d-bd83-e977f5b19a35","zone_type":"crucible","addresses":["fd00:1122:3344:109::4"],"dataset":{"id":"b990911b-805a-4f9d-bd83-e977f5b19a35","name":{"pool_name":"oxp_7e67cb32-0c00-4090-9647-eb7bae75deeb","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::4]:32345"},"services":[{"id":"b990911b-805a-4f9d-bd83-e977f5b19a35","details":{"type":"crucible","address":"[fd00:1122:3344:109::4]:32345"}}]},"root":"/pool/ext/de682b18-afaf-4d53-b62e-934f6bd4a1f8/crypt/zone"},{"zone":{"id":"c99392f5-8f30-41ac-9eeb-12d7f4b707f1","zone_type":"crucible","addresses":["fd00:1122:3344:109::b"],"dataset":{"id":"c99392f5-8f30-41ac-9eeb-12d7f4b707f1","name":{"pool_name":"oxp_de682b18-afaf-4d53-b62e-934f6bd4a1f8","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::b]:32345"},"services":[{"id":"c99392f5-8f30-41ac-9eeb-12d7f4b707f1","details":{"type":"crucible","address":"[fd00:1122:3344:109::b]:32345"}}]},"root":"/pool/ext/46d21f3d-23be-4361-b5c5-9d0f6ece5b8c/crypt/zone"},{"zone":{"id":"7f6cb339-9eb1-4866-8a4f-383bad25b36f","zone_type":"crucible","addresses":["fd00:1122:3344:109::5"],"dataset":{"id":"7f6cb339-9eb1-4866-8a4f-383bad25b36f","name":{"pool_name":"oxp_458cbfa3-3752-415d-8a3b-fb64e88468e1","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::5]:32345"},"services":[{"id":"7f6cb339-9eb1-4866-8a4f-383bad25b36f","details":{"type":"crucible","address":"[fd00:1122:3344:109::5]:32345"}}]},"root":"/pool/ext/b9b5b50c-e823-41ae-9585-01b818883521/crypt/zone"},{"zone":{"id":"11946372-f253-4648-b00c-c7874a7b2888","zone_type":"crucible","addresses":["fd00:1122:3344:109::8"],"dataset":{"id":"11946372-f253-4648-b00c-c7874a7b2888","name":{"pool_name":"oxp_d73332f5-b2a5-46c0-94cf-c5c5712abfe8","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:109::8]:32345"},"services":[{"id":"11946372-f253-4648-b00c-c7874a7b2888","details":{"type":"crucible","address":"[fd00:1122:3344:109::8]:32345"}}]},"root":"/pool/ext/b9b5b50c-e823-41ae-9585-01b818883521/crypt/zone"},{"zone":{"id":"58ece9e1-387f-4d2f-a42f-69cd34f9f380","zone_type":"cockroach_db","addresses":["fd00:1122:3344:109::3"],"dataset":{"id":"58ece9e1-387f-4d2f-a42f-69cd34f9f380","name":{"pool_name":"oxp_7e67cb32-0c00-4090-9647-eb7bae75deeb","kind":{"type":"cockroach_db"}},"service_address":"[fd00:1122:3344:109::3]:32221"},"services":[{"id":"58ece9e1-387f-4d2f-a42f-69cd34f9f380","details":{"type":"cockroach_db","address":"[fd00:1122:3344:109::3]:32221"}}]},"root":"/pool/ext/b9b5b50c-e823-41ae-9585-01b818883521/crypt/zone"},{"zone":{"id":"f016a25a-deb5-4f20-bdb0-2425c00d41a6","zone_type":"ntp","addresses":["fd00:1122:3344:109::e"],"dataset":null,"services":[{"id":"f016a25a-deb5-4f20-bdb0-2425c00d41a6","details":{"type":"internal_ntp","address":"[fd00:1122:3344:109::e]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/b9b5b50c-e823-41ae-9585-01b818883521/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled2.json b/sled-agent/tests/old-service-ledgers/rack3-sled2.json new file mode 100644 index 0000000000..6c420c989d --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack3-sled2.json @@ -0,0 +1 @@ +{"generation":4,"requests":[{"zone":{"id":"dd799dd4-03f9-451d-85e2-844155753a03","zone_type":"crucible","addresses":["fd00:1122:3344:10a::7"],"dataset":{"id":"dd799dd4-03f9-451d-85e2-844155753a03","name":{"pool_name":"oxp_7dcf3acc-bde9-4306-bb46-4c6a6cbbb7ba","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::7]:32345"},"services":[{"id":"dd799dd4-03f9-451d-85e2-844155753a03","details":{"type":"crucible","address":"[fd00:1122:3344:10a::7]:32345"}}]},"root":"/pool/ext/7dcf3acc-bde9-4306-bb46-4c6a6cbbb7ba/crypt/zone"},{"zone":{"id":"dbf9346d-b46d-4402-bb44-92ce20fb5290","zone_type":"crucible","addresses":["fd00:1122:3344:10a::9"],"dataset":{"id":"dbf9346d-b46d-4402-bb44-92ce20fb5290","name":{"pool_name":"oxp_9275d50f-da2c-4f84-9775-598a364309ad","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::9]:32345"},"services":[{"id":"dbf9346d-b46d-4402-bb44-92ce20fb5290","details":{"type":"crucible","address":"[fd00:1122:3344:10a::9]:32345"}}]},"root":"/pool/ext/d83e36ef-dd7a-4cc2-be19-379b1114c031/crypt/zone"},{"zone":{"id":"9a55ebdd-eeef-4954-b0a1-e32b04837f14","zone_type":"crucible","addresses":["fd00:1122:3344:10a::4"],"dataset":{"id":"9a55ebdd-eeef-4954-b0a1-e32b04837f14","name":{"pool_name":"oxp_7f30f77e-5998-4676-a226-b433b5940e77","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::4]:32345"},"services":[{"id":"9a55ebdd-eeef-4954-b0a1-e32b04837f14","details":{"type":"crucible","address":"[fd00:1122:3344:10a::4]:32345"}}]},"root":"/pool/ext/9275d50f-da2c-4f84-9775-598a364309ad/crypt/zone"},{"zone":{"id":"bc2935f8-e4fa-4015-968e-f90985533a6a","zone_type":"crucible","addresses":["fd00:1122:3344:10a::6"],"dataset":{"id":"bc2935f8-e4fa-4015-968e-f90985533a6a","name":{"pool_name":"oxp_022c9d58-e91f-480d-bda6-0cf32ce3b1f5","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::6]:32345"},"services":[{"id":"bc2935f8-e4fa-4015-968e-f90985533a6a","details":{"type":"crucible","address":"[fd00:1122:3344:10a::6]:32345"}}]},"root":"/pool/ext/c395dcc3-6ece-4b3f-b143-e111a54ef7da/crypt/zone"},{"zone":{"id":"63f8c861-fa1d-4121-92d9-7efa5ef7f5a0","zone_type":"crucible","addresses":["fd00:1122:3344:10a::a"],"dataset":{"id":"63f8c861-fa1d-4121-92d9-7efa5ef7f5a0","name":{"pool_name":"oxp_3c805784-f403-4d01-9eb0-4f77d0821980","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::a]:32345"},"services":[{"id":"63f8c861-fa1d-4121-92d9-7efa5ef7f5a0","details":{"type":"crucible","address":"[fd00:1122:3344:10a::a]:32345"}}]},"root":"/pool/ext/9275d50f-da2c-4f84-9775-598a364309ad/crypt/zone"},{"zone":{"id":"4996dcf9-78de-4f69-94fa-c09cc86a8d3c","zone_type":"crucible","addresses":["fd00:1122:3344:10a::b"],"dataset":{"id":"4996dcf9-78de-4f69-94fa-c09cc86a8d3c","name":{"pool_name":"oxp_f9fe9ce6-be0d-4974-bc30-78a8f1330496","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::b]:32345"},"services":[{"id":"4996dcf9-78de-4f69-94fa-c09cc86a8d3c","details":{"type":"crucible","address":"[fd00:1122:3344:10a::b]:32345"}}]},"root":"/pool/ext/9275d50f-da2c-4f84-9775-598a364309ad/crypt/zone"},{"zone":{"id":"36b9a4bf-7b30-4fe7-903d-3b722c79fa86","zone_type":"crucible","addresses":["fd00:1122:3344:10a::c"],"dataset":{"id":"36b9a4bf-7b30-4fe7-903d-3b722c79fa86","name":{"pool_name":"oxp_cb1052e0-4c70-4d37-b979-dd55e6a25f08","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::c]:32345"},"services":[{"id":"36b9a4bf-7b30-4fe7-903d-3b722c79fa86","details":{"type":"crucible","address":"[fd00:1122:3344:10a::c]:32345"}}]},"root":"/pool/ext/3c805784-f403-4d01-9eb0-4f77d0821980/crypt/zone"},{"zone":{"id":"a109a902-6a27-41b6-a881-c353e28e5389","zone_type":"crucible","addresses":["fd00:1122:3344:10a::8"],"dataset":{"id":"a109a902-6a27-41b6-a881-c353e28e5389","name":{"pool_name":"oxp_d83e36ef-dd7a-4cc2-be19-379b1114c031","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::8]:32345"},"services":[{"id":"a109a902-6a27-41b6-a881-c353e28e5389","details":{"type":"crucible","address":"[fd00:1122:3344:10a::8]:32345"}}]},"root":"/pool/ext/d83e36ef-dd7a-4cc2-be19-379b1114c031/crypt/zone"},{"zone":{"id":"d2a9a0bc-ea12-44e3-ac4a-904c76120d11","zone_type":"crucible","addresses":["fd00:1122:3344:10a::3"],"dataset":{"id":"d2a9a0bc-ea12-44e3-ac4a-904c76120d11","name":{"pool_name":"oxp_c395dcc3-6ece-4b3f-b143-e111a54ef7da","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::3]:32345"},"services":[{"id":"d2a9a0bc-ea12-44e3-ac4a-904c76120d11","details":{"type":"crucible","address":"[fd00:1122:3344:10a::3]:32345"}}]},"root":"/pool/ext/9898a289-2f0d-43a6-b053-850f6e784e9a/crypt/zone"},{"zone":{"id":"b3c3e53b-d9ec-4dd8-bd2c-bd811319aa44","zone_type":"crucible","addresses":["fd00:1122:3344:10a::5"],"dataset":{"id":"b3c3e53b-d9ec-4dd8-bd2c-bd811319aa44","name":{"pool_name":"oxp_9898a289-2f0d-43a6-b053-850f6e784e9a","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10a::5]:32345"},"services":[{"id":"b3c3e53b-d9ec-4dd8-bd2c-bd811319aa44","details":{"type":"crucible","address":"[fd00:1122:3344:10a::5]:32345"}}]},"root":"/pool/ext/9275d50f-da2c-4f84-9775-598a364309ad/crypt/zone"},{"zone":{"id":"7b445d3b-fd25-4538-ac3f-f439c66d1223","zone_type":"ntp","addresses":["fd00:1122:3344:10a::d"],"dataset":null,"services":[{"id":"7b445d3b-fd25-4538-ac3f-f439c66d1223","details":{"type":"internal_ntp","address":"[fd00:1122:3344:10a::d]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/f9fe9ce6-be0d-4974-bc30-78a8f1330496/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled20.json b/sled-agent/tests/old-service-ledgers/rack3-sled20.json new file mode 100644 index 0000000000..20c9d60624 --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack3-sled20.json @@ -0,0 +1 @@ +{"generation":4,"requests":[{"zone":{"id":"4b49e669-264d-4bfb-8ab1-555b520b679c","zone_type":"crucible","addresses":["fd00:1122:3344:108::c"],"dataset":{"id":"4b49e669-264d-4bfb-8ab1-555b520b679c","name":{"pool_name":"oxp_799a1c86-9e1a-4626-91e2-a19f7ff5356e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::c]:32345"},"services":[{"id":"4b49e669-264d-4bfb-8ab1-555b520b679c","details":{"type":"crucible","address":"[fd00:1122:3344:108::c]:32345"}}]},"root":"/pool/ext/d2478613-b7c9-4bd3-856f-1fe8e9c903c2/crypt/zone"},{"zone":{"id":"d802baae-9c3f-437a-85fe-cd72653b6db1","zone_type":"crucible","addresses":["fd00:1122:3344:108::5"],"dataset":{"id":"d802baae-9c3f-437a-85fe-cd72653b6db1","name":{"pool_name":"oxp_d2478613-b7c9-4bd3-856f-1fe8e9c903c2","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::5]:32345"},"services":[{"id":"d802baae-9c3f-437a-85fe-cd72653b6db1","details":{"type":"crucible","address":"[fd00:1122:3344:108::5]:32345"}}]},"root":"/pool/ext/116f216c-e151-410f-82bf-8913904cf7b4/crypt/zone"},{"zone":{"id":"e5f69e60-3421-49a4-8c1d-2db8cbb6a5e9","zone_type":"crucible","addresses":["fd00:1122:3344:108::b"],"dataset":{"id":"e5f69e60-3421-49a4-8c1d-2db8cbb6a5e9","name":{"pool_name":"oxp_116f216c-e151-410f-82bf-8913904cf7b4","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::b]:32345"},"services":[{"id":"e5f69e60-3421-49a4-8c1d-2db8cbb6a5e9","details":{"type":"crucible","address":"[fd00:1122:3344:108::b]:32345"}}]},"root":"/pool/ext/eea15142-4635-4e40-b0b4-b0c4f13eca3c/crypt/zone"},{"zone":{"id":"3e598962-ef8c-4cb6-bdfe-ec8563939d6a","zone_type":"crucible","addresses":["fd00:1122:3344:108::4"],"dataset":{"id":"3e598962-ef8c-4cb6-bdfe-ec8563939d6a","name":{"pool_name":"oxp_ababce44-01d1-4c50-b389-f60464c5dde9","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::4]:32345"},"services":[{"id":"3e598962-ef8c-4cb6-bdfe-ec8563939d6a","details":{"type":"crucible","address":"[fd00:1122:3344:108::4]:32345"}}]},"root":"/pool/ext/ababce44-01d1-4c50-b389-f60464c5dde9/crypt/zone"},{"zone":{"id":"25355c9f-cc2b-4b24-8eaa-65190f8936a8","zone_type":"crucible","addresses":["fd00:1122:3344:108::d"],"dataset":{"id":"25355c9f-cc2b-4b24-8eaa-65190f8936a8","name":{"pool_name":"oxp_fed46d41-136d-4462-8782-359014efba59","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::d]:32345"},"services":[{"id":"25355c9f-cc2b-4b24-8eaa-65190f8936a8","details":{"type":"crucible","address":"[fd00:1122:3344:108::d]:32345"}}]},"root":"/pool/ext/eea15142-4635-4e40-b0b4-b0c4f13eca3c/crypt/zone"},{"zone":{"id":"efb2f16c-ebad-4192-b575-dcb4d9b1d5cd","zone_type":"crucible","addresses":["fd00:1122:3344:108::a"],"dataset":{"id":"efb2f16c-ebad-4192-b575-dcb4d9b1d5cd","name":{"pool_name":"oxp_bf509067-0165-456d-98ae-72c86378e626","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::a]:32345"},"services":[{"id":"efb2f16c-ebad-4192-b575-dcb4d9b1d5cd","details":{"type":"crucible","address":"[fd00:1122:3344:108::a]:32345"}}]},"root":"/pool/ext/95220093-e3b8-4f7f-9f5a-cb32cb75180a/crypt/zone"},{"zone":{"id":"89191f0d-4e0b-47fa-9a9e-fbe2a6db1385","zone_type":"crucible","addresses":["fd00:1122:3344:108::8"],"dataset":{"id":"89191f0d-4e0b-47fa-9a9e-fbe2a6db1385","name":{"pool_name":"oxp_eea15142-4635-4e40-b0b4-b0c4f13eca3c","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::8]:32345"},"services":[{"id":"89191f0d-4e0b-47fa-9a9e-fbe2a6db1385","details":{"type":"crucible","address":"[fd00:1122:3344:108::8]:32345"}}]},"root":"/pool/ext/eea15142-4635-4e40-b0b4-b0c4f13eca3c/crypt/zone"},{"zone":{"id":"e4589324-c528-49c7-9141-35e0a7af6947","zone_type":"crucible","addresses":["fd00:1122:3344:108::6"],"dataset":{"id":"e4589324-c528-49c7-9141-35e0a7af6947","name":{"pool_name":"oxp_95220093-e3b8-4f7f-9f5a-cb32cb75180a","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::6]:32345"},"services":[{"id":"e4589324-c528-49c7-9141-35e0a7af6947","details":{"type":"crucible","address":"[fd00:1122:3344:108::6]:32345"}}]},"root":"/pool/ext/ababce44-01d1-4c50-b389-f60464c5dde9/crypt/zone"},{"zone":{"id":"95ebe94d-0e68-421d-9260-c30bd7fe4bd6","zone_type":"nexus","addresses":["fd00:1122:3344:108::3"],"dataset":null,"services":[{"id":"95ebe94d-0e68-421d-9260-c30bd7fe4bd6","details":{"type":"nexus","internal_address":"[fd00:1122:3344:108::3]:12221","external_ip":"45.154.216.35","nic":{"id":"301aa595-f072-4da3-a533-99647b44a66a","kind":{"type":"service","id":"95ebe94d-0e68-421d-9260-c30bd7fe4bd6"},"name":"nexus-95ebe94d-0e68-421d-9260-c30bd7fe4bd6","ip":"172.30.2.5","mac":"A8:40:25:FF:F1:30","subnet":"172.30.2.0/24","vni":100,"primary":true,"slot":0},"external_tls":true,"external_dns_servers":["1.1.1.1","8.8.8.8"]}}]},"root":"/pool/ext/eea15142-4635-4e40-b0b4-b0c4f13eca3c/crypt/zone"},{"zone":{"id":"4b7a7052-f8e8-4196-8d6b-315943986ce6","zone_type":"crucible","addresses":["fd00:1122:3344:108::7"],"dataset":{"id":"4b7a7052-f8e8-4196-8d6b-315943986ce6","name":{"pool_name":"oxp_a549421c-2f12-45cc-b691-202f0a9bfa8b","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::7]:32345"},"services":[{"id":"4b7a7052-f8e8-4196-8d6b-315943986ce6","details":{"type":"crucible","address":"[fd00:1122:3344:108::7]:32345"}}]},"root":"/pool/ext/bf509067-0165-456d-98ae-72c86378e626/crypt/zone"},{"zone":{"id":"71b8ff53-c781-47bb-8ddc-2c7129680542","zone_type":"crucible","addresses":["fd00:1122:3344:108::9"],"dataset":{"id":"71b8ff53-c781-47bb-8ddc-2c7129680542","name":{"pool_name":"oxp_9d19f891-a3d9-4c6e-b1e1-6b0b085a9440","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:108::9]:32345"},"services":[{"id":"71b8ff53-c781-47bb-8ddc-2c7129680542","details":{"type":"crucible","address":"[fd00:1122:3344:108::9]:32345"}}]},"root":"/pool/ext/fed46d41-136d-4462-8782-359014efba59/crypt/zone"},{"zone":{"id":"eaf7bf77-f4c2-4016-9909-4b88a27e9d9a","zone_type":"ntp","addresses":["fd00:1122:3344:108::e"],"dataset":null,"services":[{"id":"eaf7bf77-f4c2-4016-9909-4b88a27e9d9a","details":{"type":"internal_ntp","address":"[fd00:1122:3344:108::e]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/ababce44-01d1-4c50-b389-f60464c5dde9/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled21.json b/sled-agent/tests/old-service-ledgers/rack3-sled21.json new file mode 100644 index 0000000000..4f69e01c7f --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack3-sled21.json @@ -0,0 +1 @@ +{"generation":4,"requests":[{"zone":{"id":"a91e4af3-5d18-4b08-8cb6-0583db8f8842","zone_type":"crucible","addresses":["fd00:1122:3344:117::a"],"dataset":{"id":"a91e4af3-5d18-4b08-8cb6-0583db8f8842","name":{"pool_name":"oxp_4b2896b8-5f0e-42fb-a474-658b28421e65","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:117::a]:32345"},"services":[{"id":"a91e4af3-5d18-4b08-8cb6-0583db8f8842","details":{"type":"crucible","address":"[fd00:1122:3344:117::a]:32345"}}]},"root":"/pool/ext/23393ed9-acee-4686-861f-7fc825af1249/crypt/zone"},{"zone":{"id":"1ce74512-ce3a-4125-95f1-12c86e0275d5","zone_type":"crucible","addresses":["fd00:1122:3344:117::8"],"dataset":{"id":"1ce74512-ce3a-4125-95f1-12c86e0275d5","name":{"pool_name":"oxp_46ece76f-ef00-4dd0-9f73-326c63959470","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:117::8]:32345"},"services":[{"id":"1ce74512-ce3a-4125-95f1-12c86e0275d5","details":{"type":"crucible","address":"[fd00:1122:3344:117::8]:32345"}}]},"root":"/pool/ext/1bd5955e-14a9-463f-adeb-f12bcb45a6c1/crypt/zone"},{"zone":{"id":"fef5d35f-9622-4dee-8635-d26e9f7f6869","zone_type":"crucible","addresses":["fd00:1122:3344:117::4"],"dataset":{"id":"fef5d35f-9622-4dee-8635-d26e9f7f6869","name":{"pool_name":"oxp_e4d7c2e8-016b-4617-afb5-38a2d9c1b508","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:117::4]:32345"},"services":[{"id":"fef5d35f-9622-4dee-8635-d26e9f7f6869","details":{"type":"crucible","address":"[fd00:1122:3344:117::4]:32345"}}]},"root":"/pool/ext/e372bba3-ef60-466f-b819-a3d5b9acbe77/crypt/zone"},{"zone":{"id":"4f024a31-cd38-4219-8381-9f1af70d1d54","zone_type":"crucible","addresses":["fd00:1122:3344:117::c"],"dataset":{"id":"4f024a31-cd38-4219-8381-9f1af70d1d54","name":{"pool_name":"oxp_7cb2a3c2-9d33-4c6a-af57-669f251cf4cf","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:117::c]:32345"},"services":[{"id":"4f024a31-cd38-4219-8381-9f1af70d1d54","details":{"type":"crucible","address":"[fd00:1122:3344:117::c]:32345"}}]},"root":"/pool/ext/cfbd185d-e185-4aaa-a598-9216124ceec4/crypt/zone"},{"zone":{"id":"d00e1d0b-e12f-420a-a4df-21e4cac176f6","zone_type":"crucible","addresses":["fd00:1122:3344:117::b"],"dataset":{"id":"d00e1d0b-e12f-420a-a4df-21e4cac176f6","name":{"pool_name":"oxp_e372bba3-ef60-466f-b819-a3d5b9acbe77","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:117::b]:32345"},"services":[{"id":"d00e1d0b-e12f-420a-a4df-21e4cac176f6","details":{"type":"crucible","address":"[fd00:1122:3344:117::b]:32345"}}]},"root":"/pool/ext/cfbd185d-e185-4aaa-a598-9216124ceec4/crypt/zone"},{"zone":{"id":"1598058a-6064-449e-b39c-1e3d345ed793","zone_type":"crucible","addresses":["fd00:1122:3344:117::5"],"dataset":{"id":"1598058a-6064-449e-b39c-1e3d345ed793","name":{"pool_name":"oxp_022a8d67-1e00-49f3-81ed-a0a1bc187cfa","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:117::5]:32345"},"services":[{"id":"1598058a-6064-449e-b39c-1e3d345ed793","details":{"type":"crucible","address":"[fd00:1122:3344:117::5]:32345"}}]},"root":"/pool/ext/022a8d67-1e00-49f3-81ed-a0a1bc187cfa/crypt/zone"},{"zone":{"id":"c723c4b8-3031-4b25-8c16-fe08bc0b5f00","zone_type":"crucible","addresses":["fd00:1122:3344:117::7"],"dataset":{"id":"c723c4b8-3031-4b25-8c16-fe08bc0b5f00","name":{"pool_name":"oxp_23393ed9-acee-4686-861f-7fc825af1249","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:117::7]:32345"},"services":[{"id":"c723c4b8-3031-4b25-8c16-fe08bc0b5f00","details":{"type":"crucible","address":"[fd00:1122:3344:117::7]:32345"}}]},"root":"/pool/ext/1bd5955e-14a9-463f-adeb-f12bcb45a6c1/crypt/zone"},{"zone":{"id":"7751b307-888f-46c8-8787-75d2f3fdaef3","zone_type":"crucible","addresses":["fd00:1122:3344:117::9"],"dataset":{"id":"7751b307-888f-46c8-8787-75d2f3fdaef3","name":{"pool_name":"oxp_e54e53d4-f68f-4b19-b8c1-9d5ab42e51c1","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:117::9]:32345"},"services":[{"id":"7751b307-888f-46c8-8787-75d2f3fdaef3","details":{"type":"crucible","address":"[fd00:1122:3344:117::9]:32345"}}]},"root":"/pool/ext/e372bba3-ef60-466f-b819-a3d5b9acbe77/crypt/zone"},{"zone":{"id":"89413ff1-d5de-4931-8389-e84e7ea321af","zone_type":"crucible","addresses":["fd00:1122:3344:117::6"],"dataset":{"id":"89413ff1-d5de-4931-8389-e84e7ea321af","name":{"pool_name":"oxp_1bd5955e-14a9-463f-adeb-f12bcb45a6c1","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:117::6]:32345"},"services":[{"id":"89413ff1-d5de-4931-8389-e84e7ea321af","details":{"type":"crucible","address":"[fd00:1122:3344:117::6]:32345"}}]},"root":"/pool/ext/1bd5955e-14a9-463f-adeb-f12bcb45a6c1/crypt/zone"},{"zone":{"id":"287b0b24-72aa-41b5-a597-8523d84225ef","zone_type":"crucible","addresses":["fd00:1122:3344:117::3"],"dataset":{"id":"287b0b24-72aa-41b5-a597-8523d84225ef","name":{"pool_name":"oxp_cfbd185d-e185-4aaa-a598-9216124ceec4","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:117::3]:32345"},"services":[{"id":"287b0b24-72aa-41b5-a597-8523d84225ef","details":{"type":"crucible","address":"[fd00:1122:3344:117::3]:32345"}}]},"root":"/pool/ext/cfbd185d-e185-4aaa-a598-9216124ceec4/crypt/zone"},{"zone":{"id":"4728253e-c534-4a5b-b707-c64ac9a8eb8c","zone_type":"ntp","addresses":["fd00:1122:3344:117::d"],"dataset":null,"services":[{"id":"4728253e-c534-4a5b-b707-c64ac9a8eb8c","details":{"type":"internal_ntp","address":"[fd00:1122:3344:117::d]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/cfbd185d-e185-4aaa-a598-9216124ceec4/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled22.json b/sled-agent/tests/old-service-ledgers/rack3-sled22.json new file mode 100644 index 0000000000..dc98c0390c --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack3-sled22.json @@ -0,0 +1 @@ +{"generation":4,"requests":[{"zone":{"id":"49f20cd1-a8a3-4fa8-9209-59da60cd8f9b","zone_type":"crucible","addresses":["fd00:1122:3344:103::5"],"dataset":{"id":"49f20cd1-a8a3-4fa8-9209-59da60cd8f9b","name":{"pool_name":"oxp_13a9ef4a-f33a-4781-8f83-712c07a79b1f","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::5]:32345"},"services":[{"id":"49f20cd1-a8a3-4fa8-9209-59da60cd8f9b","details":{"type":"crucible","address":"[fd00:1122:3344:103::5]:32345"}}]},"root":"/pool/ext/711eff4e-736c-478e-83aa-ae86f5efbf1d/crypt/zone"},{"zone":{"id":"896fd564-f94e-496b-9fcf-ddfbfcfac9f7","zone_type":"crucible","addresses":["fd00:1122:3344:103::c"],"dataset":{"id":"896fd564-f94e-496b-9fcf-ddfbfcfac9f7","name":{"pool_name":"oxp_0944c0a2-0fb7-4f51-bced-52cc257cd2f6","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::c]:32345"},"services":[{"id":"896fd564-f94e-496b-9fcf-ddfbfcfac9f7","details":{"type":"crucible","address":"[fd00:1122:3344:103::c]:32345"}}]},"root":"/pool/ext/bc54d8c5-955d-429d-84e0-a20a4e5e27a3/crypt/zone"},{"zone":{"id":"911fb8b3-05c2-4af7-8974-6c74a61d94ad","zone_type":"crucible","addresses":["fd00:1122:3344:103::9"],"dataset":{"id":"911fb8b3-05c2-4af7-8974-6c74a61d94ad","name":{"pool_name":"oxp_29f59fce-a867-4571-9d2e-b03fa5c13510","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::9]:32345"},"services":[{"id":"911fb8b3-05c2-4af7-8974-6c74a61d94ad","details":{"type":"crucible","address":"[fd00:1122:3344:103::9]:32345"}}]},"root":"/pool/ext/711eff4e-736c-478e-83aa-ae86f5efbf1d/crypt/zone"},{"zone":{"id":"682b34db-0b06-4770-a8fe-74437cf184d6","zone_type":"crucible","addresses":["fd00:1122:3344:103::6"],"dataset":{"id":"682b34db-0b06-4770-a8fe-74437cf184d6","name":{"pool_name":"oxp_094d11d2-8049-4138-bcf4-562f5f8e77c0","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::6]:32345"},"services":[{"id":"682b34db-0b06-4770-a8fe-74437cf184d6","details":{"type":"crucible","address":"[fd00:1122:3344:103::6]:32345"}}]},"root":"/pool/ext/0944c0a2-0fb7-4f51-bced-52cc257cd2f6/crypt/zone"},{"zone":{"id":"d8d20365-ecd3-4fd5-9495-c0670e3bd5d9","zone_type":"crucible","addresses":["fd00:1122:3344:103::a"],"dataset":{"id":"d8d20365-ecd3-4fd5-9495-c0670e3bd5d9","name":{"pool_name":"oxp_fb97ff7b-0225-400c-a137-3b38a786c0a0","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::a]:32345"},"services":[{"id":"d8d20365-ecd3-4fd5-9495-c0670e3bd5d9","details":{"type":"crucible","address":"[fd00:1122:3344:103::a]:32345"}}]},"root":"/pool/ext/094d11d2-8049-4138-bcf4-562f5f8e77c0/crypt/zone"},{"zone":{"id":"673620b6-44d9-4310-8e17-3024ac84e708","zone_type":"crucible","addresses":["fd00:1122:3344:103::7"],"dataset":{"id":"673620b6-44d9-4310-8e17-3024ac84e708","name":{"pool_name":"oxp_711eff4e-736c-478e-83aa-ae86f5efbf1d","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::7]:32345"},"services":[{"id":"673620b6-44d9-4310-8e17-3024ac84e708","details":{"type":"crucible","address":"[fd00:1122:3344:103::7]:32345"}}]},"root":"/pool/ext/fb97ff7b-0225-400c-a137-3b38a786c0a0/crypt/zone"},{"zone":{"id":"bf6dfc04-4d4c-41b6-a011-40ffc3bc5080","zone_type":"crucible","addresses":["fd00:1122:3344:103::8"],"dataset":{"id":"bf6dfc04-4d4c-41b6-a011-40ffc3bc5080","name":{"pool_name":"oxp_f815f1b6-48ef-436d-8768-eb08227e2386","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::8]:32345"},"services":[{"id":"bf6dfc04-4d4c-41b6-a011-40ffc3bc5080","details":{"type":"crucible","address":"[fd00:1122:3344:103::8]:32345"}}]},"root":"/pool/ext/13a9ef4a-f33a-4781-8f83-712c07a79b1f/crypt/zone"},{"zone":{"id":"ac8a82a8-fb6f-4635-a9a9-d98617eab390","zone_type":"crucible","addresses":["fd00:1122:3344:103::3"],"dataset":{"id":"ac8a82a8-fb6f-4635-a9a9-d98617eab390","name":{"pool_name":"oxp_97d6c860-4e2f-496e-974b-2e293fee6af9","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::3]:32345"},"services":[{"id":"ac8a82a8-fb6f-4635-a9a9-d98617eab390","details":{"type":"crucible","address":"[fd00:1122:3344:103::3]:32345"}}]},"root":"/pool/ext/0944c0a2-0fb7-4f51-bced-52cc257cd2f6/crypt/zone"},{"zone":{"id":"4ed66558-4815-4b85-9b94-9edf3ee69ead","zone_type":"crucible","addresses":["fd00:1122:3344:103::4"],"dataset":{"id":"4ed66558-4815-4b85-9b94-9edf3ee69ead","name":{"pool_name":"oxp_bc54d8c5-955d-429d-84e0-a20a4e5e27a3","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::4]:32345"},"services":[{"id":"4ed66558-4815-4b85-9b94-9edf3ee69ead","details":{"type":"crucible","address":"[fd00:1122:3344:103::4]:32345"}}]},"root":"/pool/ext/13a9ef4a-f33a-4781-8f83-712c07a79b1f/crypt/zone"},{"zone":{"id":"8a71c6ee-b08d-4c3d-b13c-c9cebc4c328a","zone_type":"crucible","addresses":["fd00:1122:3344:103::b"],"dataset":{"id":"8a71c6ee-b08d-4c3d-b13c-c9cebc4c328a","name":{"pool_name":"oxp_2bdfa429-09bd-4fa1-aa20-eea99f0d2b85","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:103::b]:32345"},"services":[{"id":"8a71c6ee-b08d-4c3d-b13c-c9cebc4c328a","details":{"type":"crucible","address":"[fd00:1122:3344:103::b]:32345"}}]},"root":"/pool/ext/29f59fce-a867-4571-9d2e-b03fa5c13510/crypt/zone"},{"zone":{"id":"7e6b8962-7a1e-4d7b-b7ea-49e64a51d98d","zone_type":"ntp","addresses":["fd00:1122:3344:103::d"],"dataset":null,"services":[{"id":"7e6b8962-7a1e-4d7b-b7ea-49e64a51d98d","details":{"type":"internal_ntp","address":"[fd00:1122:3344:103::d]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/2bdfa429-09bd-4fa1-aa20-eea99f0d2b85/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled23.json b/sled-agent/tests/old-service-ledgers/rack3-sled23.json new file mode 100644 index 0000000000..ade2144287 --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack3-sled23.json @@ -0,0 +1 @@ +{"generation":4,"requests":[{"zone":{"id":"6b7e931d-4b91-4dc6-9a7b-4c19ac669e5d","zone_type":"crucible","addresses":["fd00:1122:3344:105::4"],"dataset":{"id":"6b7e931d-4b91-4dc6-9a7b-4c19ac669e5d","name":{"pool_name":"oxp_24dab7f5-164a-47f3-a878-f32ab1e68cce","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::4]:32345"},"services":[{"id":"6b7e931d-4b91-4dc6-9a7b-4c19ac669e5d","details":{"type":"crucible","address":"[fd00:1122:3344:105::4]:32345"}}]},"root":"/pool/ext/ad493851-2d11-4c2d-8d75-989579d9616a/crypt/zone"},{"zone":{"id":"6c58e7aa-71e1-4868-9d4b-e12c7ef40303","zone_type":"crucible","addresses":["fd00:1122:3344:105::a"],"dataset":{"id":"6c58e7aa-71e1-4868-9d4b-e12c7ef40303","name":{"pool_name":"oxp_d664c9e8-bc81-4225-a618-a8ae2d057186","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::a]:32345"},"services":[{"id":"6c58e7aa-71e1-4868-9d4b-e12c7ef40303","details":{"type":"crucible","address":"[fd00:1122:3344:105::a]:32345"}}]},"root":"/pool/ext/ad493851-2d11-4c2d-8d75-989579d9616a/crypt/zone"},{"zone":{"id":"51c6dc8d-b1a4-454a-9b19-01e45eb0b599","zone_type":"crucible","addresses":["fd00:1122:3344:105::d"],"dataset":{"id":"51c6dc8d-b1a4-454a-9b19-01e45eb0b599","name":{"pool_name":"oxp_f5f85537-eb25-4d0e-8e94-b775c41abd73","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::d]:32345"},"services":[{"id":"51c6dc8d-b1a4-454a-9b19-01e45eb0b599","details":{"type":"crucible","address":"[fd00:1122:3344:105::d]:32345"}}]},"root":"/pool/ext/4f1eafe9-b28d-49d3-83e2-ceac8721d6b5/crypt/zone"},{"zone":{"id":"8cbffa61-0bd0-4ad2-bd7d-30fe0dd57469","zone_type":"crucible","addresses":["fd00:1122:3344:105::9"],"dataset":{"id":"8cbffa61-0bd0-4ad2-bd7d-30fe0dd57469","name":{"pool_name":"oxp_88abca38-3f61-4d4b-80a1-4ea3e4827f84","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::9]:32345"},"services":[{"id":"8cbffa61-0bd0-4ad2-bd7d-30fe0dd57469","details":{"type":"crucible","address":"[fd00:1122:3344:105::9]:32345"}}]},"root":"/pool/ext/88abca38-3f61-4d4b-80a1-4ea3e4827f84/crypt/zone"},{"zone":{"id":"2177f37f-2ac9-4e66-bf74-a10bd91f4d33","zone_type":"crucible","addresses":["fd00:1122:3344:105::6"],"dataset":{"id":"2177f37f-2ac9-4e66-bf74-a10bd91f4d33","name":{"pool_name":"oxp_59e20871-4670-40d6-8ff4-aa97899fc991","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::6]:32345"},"services":[{"id":"2177f37f-2ac9-4e66-bf74-a10bd91f4d33","details":{"type":"crucible","address":"[fd00:1122:3344:105::6]:32345"}}]},"root":"/pool/ext/4f1eafe9-b28d-49d3-83e2-ceac8721d6b5/crypt/zone"},{"zone":{"id":"e4e43855-4879-4910-a2ba-40f625c1cc2d","zone_type":"crucible","addresses":["fd00:1122:3344:105::b"],"dataset":{"id":"e4e43855-4879-4910-a2ba-40f625c1cc2d","name":{"pool_name":"oxp_967d2f05-b141-44f5-837d-9b2aa67ee128","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::b]:32345"},"services":[{"id":"e4e43855-4879-4910-a2ba-40f625c1cc2d","details":{"type":"crucible","address":"[fd00:1122:3344:105::b]:32345"}}]},"root":"/pool/ext/6b6f34cd-6d3d-4832-a4e6-3df112c97133/crypt/zone"},{"zone":{"id":"8d2517e1-f9ad-40f2-abb9-2f5122839910","zone_type":"crucible","addresses":["fd00:1122:3344:105::7"],"dataset":{"id":"8d2517e1-f9ad-40f2-abb9-2f5122839910","name":{"pool_name":"oxp_ad493851-2d11-4c2d-8d75-989579d9616a","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::7]:32345"},"services":[{"id":"8d2517e1-f9ad-40f2-abb9-2f5122839910","details":{"type":"crucible","address":"[fd00:1122:3344:105::7]:32345"}}]},"root":"/pool/ext/88abca38-3f61-4d4b-80a1-4ea3e4827f84/crypt/zone"},{"zone":{"id":"44cb3698-a7b1-4388-9165-ac76082ec8bc","zone_type":"crucible","addresses":["fd00:1122:3344:105::5"],"dataset":{"id":"44cb3698-a7b1-4388-9165-ac76082ec8bc","name":{"pool_name":"oxp_4292a83c-8c1f-4b2e-9120-72e0c510bf3c","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::5]:32345"},"services":[{"id":"44cb3698-a7b1-4388-9165-ac76082ec8bc","details":{"type":"crucible","address":"[fd00:1122:3344:105::5]:32345"}}]},"root":"/pool/ext/24dab7f5-164a-47f3-a878-f32ab1e68cce/crypt/zone"},{"zone":{"id":"931b5c86-9d72-4518-bfd6-97863152ac65","zone_type":"crucible","addresses":["fd00:1122:3344:105::c"],"dataset":{"id":"931b5c86-9d72-4518-bfd6-97863152ac65","name":{"pool_name":"oxp_6b6f34cd-6d3d-4832-a4e6-3df112c97133","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::c]:32345"},"services":[{"id":"931b5c86-9d72-4518-bfd6-97863152ac65","details":{"type":"crucible","address":"[fd00:1122:3344:105::c]:32345"}}]},"root":"/pool/ext/ad493851-2d11-4c2d-8d75-989579d9616a/crypt/zone"},{"zone":{"id":"ac568073-1889-463e-8cc4-cfed16ce2a34","zone_type":"crucible","addresses":["fd00:1122:3344:105::8"],"dataset":{"id":"ac568073-1889-463e-8cc4-cfed16ce2a34","name":{"pool_name":"oxp_4f1eafe9-b28d-49d3-83e2-ceac8721d6b5","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:105::8]:32345"},"services":[{"id":"ac568073-1889-463e-8cc4-cfed16ce2a34","details":{"type":"crucible","address":"[fd00:1122:3344:105::8]:32345"}}]},"root":"/pool/ext/4292a83c-8c1f-4b2e-9120-72e0c510bf3c/crypt/zone"},{"zone":{"id":"e8f86fbb-864e-4d5a-961c-b50b54ae853e","zone_type":"cockroach_db","addresses":["fd00:1122:3344:105::3"],"dataset":{"id":"e8f86fbb-864e-4d5a-961c-b50b54ae853e","name":{"pool_name":"oxp_24dab7f5-164a-47f3-a878-f32ab1e68cce","kind":{"type":"cockroach_db"}},"service_address":"[fd00:1122:3344:105::3]:32221"},"services":[{"id":"e8f86fbb-864e-4d5a-961c-b50b54ae853e","details":{"type":"cockroach_db","address":"[fd00:1122:3344:105::3]:32221"}}]},"root":"/pool/ext/4f1eafe9-b28d-49d3-83e2-ceac8721d6b5/crypt/zone"},{"zone":{"id":"c79caea0-37b1-49d6-ae6e-8cf849d91374","zone_type":"ntp","addresses":["fd00:1122:3344:105::e"],"dataset":null,"services":[{"id":"c79caea0-37b1-49d6-ae6e-8cf849d91374","details":{"type":"internal_ntp","address":"[fd00:1122:3344:105::e]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/24dab7f5-164a-47f3-a878-f32ab1e68cce/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled24.json b/sled-agent/tests/old-service-ledgers/rack3-sled24.json new file mode 100644 index 0000000000..e7bd3050d6 --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack3-sled24.json @@ -0,0 +1 @@ +{"generation":4,"requests":[{"zone":{"id":"d2b1e468-bc3c-4d08-b855-ae3327465375","zone_type":"crucible","addresses":["fd00:1122:3344:106::3"],"dataset":{"id":"d2b1e468-bc3c-4d08-b855-ae3327465375","name":{"pool_name":"oxp_9db196bf-828d-4e55-a2c1-dd9d579d3908","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::3]:32345"},"services":[{"id":"d2b1e468-bc3c-4d08-b855-ae3327465375","details":{"type":"crucible","address":"[fd00:1122:3344:106::3]:32345"}}]},"root":"/pool/ext/74df4c92-edbb-4431-a770-1d015110e66b/crypt/zone"},{"zone":{"id":"61f94a16-79fd-42e3-b225-a4dc67228437","zone_type":"crucible","addresses":["fd00:1122:3344:106::6"],"dataset":{"id":"61f94a16-79fd-42e3-b225-a4dc67228437","name":{"pool_name":"oxp_d77d5b08-5f70-496a-997b-b38804dc3b8a","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::6]:32345"},"services":[{"id":"61f94a16-79fd-42e3-b225-a4dc67228437","details":{"type":"crucible","address":"[fd00:1122:3344:106::6]:32345"}}]},"root":"/pool/ext/daf9e3cd-5a40-4eba-a0f6-4f94dab37dae/crypt/zone"},{"zone":{"id":"7d32ef34-dec5-4fd8-899e-20bbc473a3ee","zone_type":"crucible","addresses":["fd00:1122:3344:106::7"],"dataset":{"id":"7d32ef34-dec5-4fd8-899e-20bbc473a3ee","name":{"pool_name":"oxp_50c1b653-6231-41fe-b3cf-b7ba709a0746","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::7]:32345"},"services":[{"id":"7d32ef34-dec5-4fd8-899e-20bbc473a3ee","details":{"type":"crucible","address":"[fd00:1122:3344:106::7]:32345"}}]},"root":"/pool/ext/9db196bf-828d-4e55-a2c1-dd9d579d3908/crypt/zone"},{"zone":{"id":"c34b7ae5-26b9-4651-a3c4-20bba2bd0d2c","zone_type":"crucible","addresses":["fd00:1122:3344:106::5"],"dataset":{"id":"c34b7ae5-26b9-4651-a3c4-20bba2bd0d2c","name":{"pool_name":"oxp_88aea92c-ab92-44c1-9471-eb8e30e075d3","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::5]:32345"},"services":[{"id":"c34b7ae5-26b9-4651-a3c4-20bba2bd0d2c","details":{"type":"crucible","address":"[fd00:1122:3344:106::5]:32345"}}]},"root":"/pool/ext/8da316d4-6b18-4980-a0a8-6e76e72cc40d/crypt/zone"},{"zone":{"id":"36472be8-9a70-4c14-bd02-439b725cec1a","zone_type":"crucible","addresses":["fd00:1122:3344:106::8"],"dataset":{"id":"36472be8-9a70-4c14-bd02-439b725cec1a","name":{"pool_name":"oxp_54544b3a-1513-4db2-911e-7c1eb4b12385","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::8]:32345"},"services":[{"id":"36472be8-9a70-4c14-bd02-439b725cec1a","details":{"type":"crucible","address":"[fd00:1122:3344:106::8]:32345"}}]},"root":"/pool/ext/54544b3a-1513-4db2-911e-7c1eb4b12385/crypt/zone"},{"zone":{"id":"2548f8ab-5255-4334-a1fb-5d7d95213129","zone_type":"crucible","addresses":["fd00:1122:3344:106::9"],"dataset":{"id":"2548f8ab-5255-4334-a1fb-5d7d95213129","name":{"pool_name":"oxp_08050450-967f-431c-9a12-0d051aff020e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::9]:32345"},"services":[{"id":"2548f8ab-5255-4334-a1fb-5d7d95213129","details":{"type":"crucible","address":"[fd00:1122:3344:106::9]:32345"}}]},"root":"/pool/ext/08050450-967f-431c-9a12-0d051aff020e/crypt/zone"},{"zone":{"id":"1455c069-853c-49cd-853a-3ea81b89acd4","zone_type":"crucible","addresses":["fd00:1122:3344:106::c"],"dataset":{"id":"1455c069-853c-49cd-853a-3ea81b89acd4","name":{"pool_name":"oxp_8da316d4-6b18-4980-a0a8-6e76e72cc40d","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::c]:32345"},"services":[{"id":"1455c069-853c-49cd-853a-3ea81b89acd4","details":{"type":"crucible","address":"[fd00:1122:3344:106::c]:32345"}}]},"root":"/pool/ext/08050450-967f-431c-9a12-0d051aff020e/crypt/zone"},{"zone":{"id":"27c0244b-f91a-46c3-bc96-e8eec009371e","zone_type":"crucible","addresses":["fd00:1122:3344:106::b"],"dataset":{"id":"27c0244b-f91a-46c3-bc96-e8eec009371e","name":{"pool_name":"oxp_daf9e3cd-5a40-4eba-a0f6-4f94dab37dae","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::b]:32345"},"services":[{"id":"27c0244b-f91a-46c3-bc96-e8eec009371e","details":{"type":"crucible","address":"[fd00:1122:3344:106::b]:32345"}}]},"root":"/pool/ext/74df4c92-edbb-4431-a770-1d015110e66b/crypt/zone"},{"zone":{"id":"9e46d837-1e0f-42b6-a352-84e6946b8734","zone_type":"crucible","addresses":["fd00:1122:3344:106::4"],"dataset":{"id":"9e46d837-1e0f-42b6-a352-84e6946b8734","name":{"pool_name":"oxp_74df4c92-edbb-4431-a770-1d015110e66b","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::4]:32345"},"services":[{"id":"9e46d837-1e0f-42b6-a352-84e6946b8734","details":{"type":"crucible","address":"[fd00:1122:3344:106::4]:32345"}}]},"root":"/pool/ext/15f94c39-d48c-41f6-a913-cc1d04aef1a2/crypt/zone"},{"zone":{"id":"b972fcd4-c1b3-4b3c-9e24-f59c7a7cb192","zone_type":"crucible","addresses":["fd00:1122:3344:106::a"],"dataset":{"id":"b972fcd4-c1b3-4b3c-9e24-f59c7a7cb192","name":{"pool_name":"oxp_15f94c39-d48c-41f6-a913-cc1d04aef1a2","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:106::a]:32345"},"services":[{"id":"b972fcd4-c1b3-4b3c-9e24-f59c7a7cb192","details":{"type":"crucible","address":"[fd00:1122:3344:106::a]:32345"}}]},"root":"/pool/ext/74df4c92-edbb-4431-a770-1d015110e66b/crypt/zone"},{"zone":{"id":"e1c8c655-1950-42d5-ae1f-a4ce84854bbc","zone_type":"ntp","addresses":["fd00:1122:3344:106::d"],"dataset":null,"services":[{"id":"e1c8c655-1950-42d5-ae1f-a4ce84854bbc","details":{"type":"internal_ntp","address":"[fd00:1122:3344:106::d]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/15f94c39-d48c-41f6-a913-cc1d04aef1a2/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled25.json b/sled-agent/tests/old-service-ledgers/rack3-sled25.json new file mode 100644 index 0000000000..642657bbce --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack3-sled25.json @@ -0,0 +1 @@ +{"generation":4,"requests":[{"zone":{"id":"10b80058-9b2e-4d6c-8a1a-a61a8258c12f","zone_type":"crucible","addresses":["fd00:1122:3344:118::9"],"dataset":{"id":"10b80058-9b2e-4d6c-8a1a-a61a8258c12f","name":{"pool_name":"oxp_953c19bb-9fff-4488-8a7b-29de9994a948","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:118::9]:32345"},"services":[{"id":"10b80058-9b2e-4d6c-8a1a-a61a8258c12f","details":{"type":"crucible","address":"[fd00:1122:3344:118::9]:32345"}}]},"root":"/pool/ext/a78caf97-6145-4908-83b5-a03a6d2e0ac4/crypt/zone"},{"zone":{"id":"f58fef96-7b5e-40c2-9482-669088a19209","zone_type":"crucible","addresses":["fd00:1122:3344:118::d"],"dataset":{"id":"f58fef96-7b5e-40c2-9482-669088a19209","name":{"pool_name":"oxp_d7976706-d6ed-4465-8b04-450c96d8feec","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:118::d]:32345"},"services":[{"id":"f58fef96-7b5e-40c2-9482-669088a19209","details":{"type":"crucible","address":"[fd00:1122:3344:118::d]:32345"}}]},"root":"/pool/ext/d7976706-d6ed-4465-8b04-450c96d8feec/crypt/zone"},{"zone":{"id":"624f1168-47b6-4aa1-84da-e20a0d74d783","zone_type":"crucible","addresses":["fd00:1122:3344:118::b"],"dataset":{"id":"624f1168-47b6-4aa1-84da-e20a0d74d783","name":{"pool_name":"oxp_a78caf97-6145-4908-83b5-a03a6d2e0ac4","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:118::b]:32345"},"services":[{"id":"624f1168-47b6-4aa1-84da-e20a0d74d783","details":{"type":"crucible","address":"[fd00:1122:3344:118::b]:32345"}}]},"root":"/pool/ext/a5b16ffe-a834-4a83-a4e9-487d4cbb7e3d/crypt/zone"},{"zone":{"id":"8ea85412-19b4-45c1-a53c-027ddd629296","zone_type":"crucible","addresses":["fd00:1122:3344:118::6"],"dataset":{"id":"8ea85412-19b4-45c1-a53c-027ddd629296","name":{"pool_name":"oxp_d5f4c903-155a-4c91-aadd-6039a4f64821","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:118::6]:32345"},"services":[{"id":"8ea85412-19b4-45c1-a53c-027ddd629296","details":{"type":"crucible","address":"[fd00:1122:3344:118::6]:32345"}}]},"root":"/pool/ext/7d2a7685-c1c9-4d2d-a2bb-df65d96ea3e2/crypt/zone"},{"zone":{"id":"fd226b82-71d7-4719-b32c-a6c7abe28a2a","zone_type":"external_dns","addresses":["fd00:1122:3344:118::3"],"dataset":{"id":"fd226b82-71d7-4719-b32c-a6c7abe28a2a","name":{"pool_name":"oxp_84a80b58-70e9-439c-9558-5b343d9a4b53","kind":{"type":"external_dns"}},"service_address":"[fd00:1122:3344:118::3]:5353"},"services":[{"id":"fd226b82-71d7-4719-b32c-a6c7abe28a2a","details":{"type":"external_dns","http_address":"[fd00:1122:3344:118::3]:5353","dns_address":"45.154.216.34:53","nic":{"id":"7f72b6fd-1120-44dc-b3a7-f727502ba47c","kind":{"type":"service","id":"fd226b82-71d7-4719-b32c-a6c7abe28a2a"},"name":"external-dns-fd226b82-71d7-4719-b32c-a6c7abe28a2a","ip":"172.30.1.6","mac":"A8:40:25:FF:9E:D1","subnet":"172.30.1.0/24","vni":100,"primary":true,"slot":0}}}]},"root":"/pool/ext/a5b16ffe-a834-4a83-a4e9-487d4cbb7e3d/crypt/zone"},{"zone":{"id":"08d0c38d-f0d9-45b9-856d-b85059fe5f07","zone_type":"crucible","addresses":["fd00:1122:3344:118::4"],"dataset":{"id":"08d0c38d-f0d9-45b9-856d-b85059fe5f07","name":{"pool_name":"oxp_84a80b58-70e9-439c-9558-5b343d9a4b53","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:118::4]:32345"},"services":[{"id":"08d0c38d-f0d9-45b9-856d-b85059fe5f07","details":{"type":"crucible","address":"[fd00:1122:3344:118::4]:32345"}}]},"root":"/pool/ext/a5b16ffe-a834-4a83-a4e9-487d4cbb7e3d/crypt/zone"},{"zone":{"id":"5de7d3fd-4a3f-4fdd-b6b2-d1186e16dce5","zone_type":"crucible","addresses":["fd00:1122:3344:118::7"],"dataset":{"id":"5de7d3fd-4a3f-4fdd-b6b2-d1186e16dce5","name":{"pool_name":"oxp_d76e058f-2d1e-4b15-b3a0-e5509a246876","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:118::7]:32345"},"services":[{"id":"5de7d3fd-4a3f-4fdd-b6b2-d1186e16dce5","details":{"type":"crucible","address":"[fd00:1122:3344:118::7]:32345"}}]},"root":"/pool/ext/a5b16ffe-a834-4a83-a4e9-487d4cbb7e3d/crypt/zone"},{"zone":{"id":"5d0f5cad-10b3-497c-903b-eeeabce920e2","zone_type":"crucible","addresses":["fd00:1122:3344:118::8"],"dataset":{"id":"5d0f5cad-10b3-497c-903b-eeeabce920e2","name":{"pool_name":"oxp_3a3ad639-8800-4951-bc2a-201d269e47a2","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:118::8]:32345"},"services":[{"id":"5d0f5cad-10b3-497c-903b-eeeabce920e2","details":{"type":"crucible","address":"[fd00:1122:3344:118::8]:32345"}}]},"root":"/pool/ext/3a3ad639-8800-4951-bc2a-201d269e47a2/crypt/zone"},{"zone":{"id":"39f9cefa-801c-4843-9fb9-05446ffbdd1a","zone_type":"crucible","addresses":["fd00:1122:3344:118::a"],"dataset":{"id":"39f9cefa-801c-4843-9fb9-05446ffbdd1a","name":{"pool_name":"oxp_7d2a7685-c1c9-4d2d-a2bb-df65d96ea3e2","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:118::a]:32345"},"services":[{"id":"39f9cefa-801c-4843-9fb9-05446ffbdd1a","details":{"type":"crucible","address":"[fd00:1122:3344:118::a]:32345"}}]},"root":"/pool/ext/a78caf97-6145-4908-83b5-a03a6d2e0ac4/crypt/zone"},{"zone":{"id":"0711e710-7fdd-4e68-94c8-294b8677e804","zone_type":"crucible","addresses":["fd00:1122:3344:118::5"],"dataset":{"id":"0711e710-7fdd-4e68-94c8-294b8677e804","name":{"pool_name":"oxp_a5b16ffe-a834-4a83-a4e9-487d4cbb7e3d","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:118::5]:32345"},"services":[{"id":"0711e710-7fdd-4e68-94c8-294b8677e804","details":{"type":"crucible","address":"[fd00:1122:3344:118::5]:32345"}}]},"root":"/pool/ext/3a3ad639-8800-4951-bc2a-201d269e47a2/crypt/zone"},{"zone":{"id":"318a62cc-5c6c-4805-9fb6-c0f6a75ce31c","zone_type":"crucible","addresses":["fd00:1122:3344:118::c"],"dataset":{"id":"318a62cc-5c6c-4805-9fb6-c0f6a75ce31c","name":{"pool_name":"oxp_1d5f0ba3-6b31-4cea-a9a9-2065a538887d","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:118::c]:32345"},"services":[{"id":"318a62cc-5c6c-4805-9fb6-c0f6a75ce31c","details":{"type":"crucible","address":"[fd00:1122:3344:118::c]:32345"}}]},"root":"/pool/ext/d7976706-d6ed-4465-8b04-450c96d8feec/crypt/zone"},{"zone":{"id":"463d0498-85b9-40eb-af96-d99af58a587c","zone_type":"ntp","addresses":["fd00:1122:3344:118::e"],"dataset":null,"services":[{"id":"463d0498-85b9-40eb-af96-d99af58a587c","details":{"type":"internal_ntp","address":"[fd00:1122:3344:118::e]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/d5f4c903-155a-4c91-aadd-6039a4f64821/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled26.json b/sled-agent/tests/old-service-ledgers/rack3-sled26.json new file mode 100644 index 0000000000..0978cb9e45 --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack3-sled26.json @@ -0,0 +1 @@ +{"generation":4,"requests":[{"zone":{"id":"d8b3de97-cc79-48f6-83ad-02017c21223b","zone_type":"crucible_pantry","addresses":["fd00:1122:3344:119::3"],"dataset":null,"services":[{"id":"d8b3de97-cc79-48f6-83ad-02017c21223b","details":{"type":"crucible_pantry","address":"[fd00:1122:3344:119::3]:17000"}}]},"root":"/pool/ext/e0faea44-8b5c-40b0-bb75-a1aec1a10377/crypt/zone"},{"zone":{"id":"adba1a3b-5bac-44d5-aa5a-879dc6eadb5f","zone_type":"crucible","addresses":["fd00:1122:3344:119::c"],"dataset":{"id":"adba1a3b-5bac-44d5-aa5a-879dc6eadb5f","name":{"pool_name":"oxp_21c339c3-6461-4bdb-8b0e-c0f9f08ee10b","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:119::c]:32345"},"services":[{"id":"adba1a3b-5bac-44d5-aa5a-879dc6eadb5f","details":{"type":"crucible","address":"[fd00:1122:3344:119::c]:32345"}}]},"root":"/pool/ext/f5c73c28-2168-4321-b737-4ca6663155c9/crypt/zone"},{"zone":{"id":"42bb9833-5c39-4aba-b2c4-da2ca1287728","zone_type":"crucible","addresses":["fd00:1122:3344:119::a"],"dataset":{"id":"42bb9833-5c39-4aba-b2c4-da2ca1287728","name":{"pool_name":"oxp_1f91451d-a466-4c9a-a6e6-0abd7985595f","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:119::a]:32345"},"services":[{"id":"42bb9833-5c39-4aba-b2c4-da2ca1287728","details":{"type":"crucible","address":"[fd00:1122:3344:119::a]:32345"}}]},"root":"/pool/ext/21c339c3-6461-4bdb-8b0e-c0f9f08ee10b/crypt/zone"},{"zone":{"id":"197695e1-d949-4982-b679-6e5c9ab4bcc7","zone_type":"crucible","addresses":["fd00:1122:3344:119::b"],"dataset":{"id":"197695e1-d949-4982-b679-6e5c9ab4bcc7","name":{"pool_name":"oxp_e0faea44-8b5c-40b0-bb75-a1aec1a10377","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:119::b]:32345"},"services":[{"id":"197695e1-d949-4982-b679-6e5c9ab4bcc7","details":{"type":"crucible","address":"[fd00:1122:3344:119::b]:32345"}}]},"root":"/pool/ext/b31e1815-cae0-4145-940c-874fff63bdd5/crypt/zone"},{"zone":{"id":"bf99d4f8-edf1-4de5-98d4-8e6a24965005","zone_type":"crucible","addresses":["fd00:1122:3344:119::8"],"dataset":{"id":"bf99d4f8-edf1-4de5-98d4-8e6a24965005","name":{"pool_name":"oxp_ef2c3afb-6962-4f6b-b567-14766bbd9ec0","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:119::8]:32345"},"services":[{"id":"bf99d4f8-edf1-4de5-98d4-8e6a24965005","details":{"type":"crucible","address":"[fd00:1122:3344:119::8]:32345"}}]},"root":"/pool/ext/21c339c3-6461-4bdb-8b0e-c0f9f08ee10b/crypt/zone"},{"zone":{"id":"390d1853-8be9-4987-b8b6-f022999bf4e7","zone_type":"crucible","addresses":["fd00:1122:3344:119::7"],"dataset":{"id":"390d1853-8be9-4987-b8b6-f022999bf4e7","name":{"pool_name":"oxp_06eed00a-d8d3-4b9d-84c9-23fce535f63e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:119::7]:32345"},"services":[{"id":"390d1853-8be9-4987-b8b6-f022999bf4e7","details":{"type":"crucible","address":"[fd00:1122:3344:119::7]:32345"}}]},"root":"/pool/ext/ef2c3afb-6962-4f6b-b567-14766bbd9ec0/crypt/zone"},{"zone":{"id":"76fe2161-90df-41b5-9c94-067de9c29db1","zone_type":"crucible","addresses":["fd00:1122:3344:119::4"],"dataset":{"id":"76fe2161-90df-41b5-9c94-067de9c29db1","name":{"pool_name":"oxp_f5c73c28-2168-4321-b737-4ca6663155c9","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:119::4]:32345"},"services":[{"id":"76fe2161-90df-41b5-9c94-067de9c29db1","details":{"type":"crucible","address":"[fd00:1122:3344:119::4]:32345"}}]},"root":"/pool/ext/ef2c3afb-6962-4f6b-b567-14766bbd9ec0/crypt/zone"},{"zone":{"id":"f49dc522-2b13-4055-964c-8315671096aa","zone_type":"crucible","addresses":["fd00:1122:3344:119::d"],"dataset":{"id":"f49dc522-2b13-4055-964c-8315671096aa","name":{"pool_name":"oxp_662c278b-7f5f-4c7e-91ff-70207e8a307b","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:119::d]:32345"},"services":[{"id":"f49dc522-2b13-4055-964c-8315671096aa","details":{"type":"crucible","address":"[fd00:1122:3344:119::d]:32345"}}]},"root":"/pool/ext/1f91451d-a466-4c9a-a6e6-0abd7985595f/crypt/zone"},{"zone":{"id":"08cc7bd6-368e-4d16-a619-28b17eff35af","zone_type":"crucible","addresses":["fd00:1122:3344:119::9"],"dataset":{"id":"08cc7bd6-368e-4d16-a619-28b17eff35af","name":{"pool_name":"oxp_5516b9ac-b139-40da-aa3b-f094568ba095","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:119::9]:32345"},"services":[{"id":"08cc7bd6-368e-4d16-a619-28b17eff35af","details":{"type":"crucible","address":"[fd00:1122:3344:119::9]:32345"}}]},"root":"/pool/ext/06eed00a-d8d3-4b9d-84c9-23fce535f63e/crypt/zone"},{"zone":{"id":"74b0613f-bce8-4922-93e0-b5bfccfc8443","zone_type":"crucible","addresses":["fd00:1122:3344:119::5"],"dataset":{"id":"74b0613f-bce8-4922-93e0-b5bfccfc8443","name":{"pool_name":"oxp_b31e1815-cae0-4145-940c-874fff63bdd5","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:119::5]:32345"},"services":[{"id":"74b0613f-bce8-4922-93e0-b5bfccfc8443","details":{"type":"crucible","address":"[fd00:1122:3344:119::5]:32345"}}]},"root":"/pool/ext/21c339c3-6461-4bdb-8b0e-c0f9f08ee10b/crypt/zone"},{"zone":{"id":"55fcfc62-8435-475f-a2aa-29373901b993","zone_type":"crucible","addresses":["fd00:1122:3344:119::6"],"dataset":{"id":"55fcfc62-8435-475f-a2aa-29373901b993","name":{"pool_name":"oxp_eadf6a03-1028-4d48-ac0d-0d27ef2c8c0f","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:119::6]:32345"},"services":[{"id":"55fcfc62-8435-475f-a2aa-29373901b993","details":{"type":"crucible","address":"[fd00:1122:3344:119::6]:32345"}}]},"root":"/pool/ext/1f91451d-a466-4c9a-a6e6-0abd7985595f/crypt/zone"},{"zone":{"id":"d52ccea3-6d7f-43a6-a19f-e0409f4e9cdc","zone_type":"ntp","addresses":["fd00:1122:3344:119::e"],"dataset":null,"services":[{"id":"d52ccea3-6d7f-43a6-a19f-e0409f4e9cdc","details":{"type":"internal_ntp","address":"[fd00:1122:3344:119::e]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/f5c73c28-2168-4321-b737-4ca6663155c9/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled27.json b/sled-agent/tests/old-service-ledgers/rack3-sled27.json new file mode 100644 index 0000000000..0b2db29c4a --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack3-sled27.json @@ -0,0 +1 @@ +{"generation":4,"requests":[{"zone":{"id":"095e612f-e218-4a16-aa6e-98c3d69a470a","zone_type":"crucible","addresses":["fd00:1122:3344:10d::a"],"dataset":{"id":"095e612f-e218-4a16-aa6e-98c3d69a470a","name":{"pool_name":"oxp_9f657858-623f-4d78-9841-6e620b5ede30","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10d::a]:32345"},"services":[{"id":"095e612f-e218-4a16-aa6e-98c3d69a470a","details":{"type":"crucible","address":"[fd00:1122:3344:10d::a]:32345"}}]},"root":"/pool/ext/2d086b51-2b77-4bc7-adc6-43586ea38ce9/crypt/zone"},{"zone":{"id":"de818730-0e3b-4567-94e7-344bd9b6f564","zone_type":"crucible","addresses":["fd00:1122:3344:10d::3"],"dataset":{"id":"de818730-0e3b-4567-94e7-344bd9b6f564","name":{"pool_name":"oxp_ba6ab301-07e1-4d35-80ac-59612f2c2bdb","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10d::3]:32345"},"services":[{"id":"de818730-0e3b-4567-94e7-344bd9b6f564","details":{"type":"crucible","address":"[fd00:1122:3344:10d::3]:32345"}}]},"root":"/pool/ext/7cee2806-e898-47d8-b568-e276a6e271f8/crypt/zone"},{"zone":{"id":"6a21dc3c-3a9d-4520-9a91-7d8f2737bcd4","zone_type":"crucible","addresses":["fd00:1122:3344:10d::4"],"dataset":{"id":"6a21dc3c-3a9d-4520-9a91-7d8f2737bcd4","name":{"pool_name":"oxp_7cee2806-e898-47d8-b568-e276a6e271f8","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10d::4]:32345"},"services":[{"id":"6a21dc3c-3a9d-4520-9a91-7d8f2737bcd4","details":{"type":"crucible","address":"[fd00:1122:3344:10d::4]:32345"}}]},"root":"/pool/ext/cef23d87-31ed-40d5-99b8-12d7be8e46e7/crypt/zone"},{"zone":{"id":"e01b7f45-b8d7-4944-ba5b-41fb699889a9","zone_type":"crucible","addresses":["fd00:1122:3344:10d::b"],"dataset":{"id":"e01b7f45-b8d7-4944-ba5b-41fb699889a9","name":{"pool_name":"oxp_d9af8878-50bd-4425-95d9-e6556ce92cfa","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10d::b]:32345"},"services":[{"id":"e01b7f45-b8d7-4944-ba5b-41fb699889a9","details":{"type":"crucible","address":"[fd00:1122:3344:10d::b]:32345"}}]},"root":"/pool/ext/6fe9bcaa-88cb-451d-b086-24a3ad53fa22/crypt/zone"},{"zone":{"id":"4271ef62-d319-4e80-b157-915321cec8c7","zone_type":"crucible","addresses":["fd00:1122:3344:10d::c"],"dataset":{"id":"4271ef62-d319-4e80-b157-915321cec8c7","name":{"pool_name":"oxp_ba8ee7dd-cdfb-48bd-92ce-4dc45e070930","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10d::c]:32345"},"services":[{"id":"4271ef62-d319-4e80-b157-915321cec8c7","details":{"type":"crucible","address":"[fd00:1122:3344:10d::c]:32345"}}]},"root":"/pool/ext/9f657858-623f-4d78-9841-6e620b5ede30/crypt/zone"},{"zone":{"id":"6bdcc159-aeb9-4903-9486-dd8b43a3dc16","zone_type":"crucible","addresses":["fd00:1122:3344:10d::8"],"dataset":{"id":"6bdcc159-aeb9-4903-9486-dd8b43a3dc16","name":{"pool_name":"oxp_5b03a5dc-bb5a-4bf4-bc21-0af849cd1dab","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10d::8]:32345"},"services":[{"id":"6bdcc159-aeb9-4903-9486-dd8b43a3dc16","details":{"type":"crucible","address":"[fd00:1122:3344:10d::8]:32345"}}]},"root":"/pool/ext/d9af8878-50bd-4425-95d9-e6556ce92cfa/crypt/zone"},{"zone":{"id":"85540e54-cdd7-4baa-920c-5cf54cbc1f83","zone_type":"crucible","addresses":["fd00:1122:3344:10d::7"],"dataset":{"id":"85540e54-cdd7-4baa-920c-5cf54cbc1f83","name":{"pool_name":"oxp_ee24f9a6-84ab-49a5-a28f-e394abfcaa95","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10d::7]:32345"},"services":[{"id":"85540e54-cdd7-4baa-920c-5cf54cbc1f83","details":{"type":"crucible","address":"[fd00:1122:3344:10d::7]:32345"}}]},"root":"/pool/ext/9f657858-623f-4d78-9841-6e620b5ede30/crypt/zone"},{"zone":{"id":"750d1a0b-6a14-46c5-9a0b-a504caefb198","zone_type":"crucible","addresses":["fd00:1122:3344:10d::9"],"dataset":{"id":"750d1a0b-6a14-46c5-9a0b-a504caefb198","name":{"pool_name":"oxp_cef23d87-31ed-40d5-99b8-12d7be8e46e7","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10d::9]:32345"},"services":[{"id":"750d1a0b-6a14-46c5-9a0b-a504caefb198","details":{"type":"crucible","address":"[fd00:1122:3344:10d::9]:32345"}}]},"root":"/pool/ext/ba8ee7dd-cdfb-48bd-92ce-4dc45e070930/crypt/zone"},{"zone":{"id":"b5996893-1a9a-434e-a257-d702694f058b","zone_type":"crucible","addresses":["fd00:1122:3344:10d::6"],"dataset":{"id":"b5996893-1a9a-434e-a257-d702694f058b","name":{"pool_name":"oxp_2d086b51-2b77-4bc7-adc6-43586ea38ce9","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10d::6]:32345"},"services":[{"id":"b5996893-1a9a-434e-a257-d702694f058b","details":{"type":"crucible","address":"[fd00:1122:3344:10d::6]:32345"}}]},"root":"/pool/ext/7cee2806-e898-47d8-b568-e276a6e271f8/crypt/zone"},{"zone":{"id":"8b36686a-b98d-451a-9124-a3583000a83a","zone_type":"crucible","addresses":["fd00:1122:3344:10d::5"],"dataset":{"id":"8b36686a-b98d-451a-9124-a3583000a83a","name":{"pool_name":"oxp_6fe9bcaa-88cb-451d-b086-24a3ad53fa22","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10d::5]:32345"},"services":[{"id":"8b36686a-b98d-451a-9124-a3583000a83a","details":{"type":"crucible","address":"[fd00:1122:3344:10d::5]:32345"}}]},"root":"/pool/ext/9f657858-623f-4d78-9841-6e620b5ede30/crypt/zone"},{"zone":{"id":"88d695a2-c8c1-41af-85b0-77424f4d650d","zone_type":"ntp","addresses":["fd00:1122:3344:10d::d"],"dataset":null,"services":[{"id":"88d695a2-c8c1-41af-85b0-77424f4d650d","details":{"type":"internal_ntp","address":"[fd00:1122:3344:10d::d]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/ba6ab301-07e1-4d35-80ac-59612f2c2bdb/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled28.json b/sled-agent/tests/old-service-ledgers/rack3-sled28.json new file mode 100644 index 0000000000..ec137c18fa --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack3-sled28.json @@ -0,0 +1 @@ +{"generation":4,"requests":[{"zone":{"id":"a126365d-f459-43bf-9f99-dbe1c4cdecf8","zone_type":"crucible","addresses":["fd00:1122:3344:113::4"],"dataset":{"id":"a126365d-f459-43bf-9f99-dbe1c4cdecf8","name":{"pool_name":"oxp_c99eabb2-6815-416a-9660-87e2609b357a","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:113::4]:32345"},"services":[{"id":"a126365d-f459-43bf-9f99-dbe1c4cdecf8","details":{"type":"crucible","address":"[fd00:1122:3344:113::4]:32345"}}]},"root":"/pool/ext/6461a450-f043-4d1e-bc03-4a68ed5fe94a/crypt/zone"},{"zone":{"id":"52f57ef8-546a-43bd-a0f3-8c42b99c37a6","zone_type":"crucible","addresses":["fd00:1122:3344:113::3"],"dataset":{"id":"52f57ef8-546a-43bd-a0f3-8c42b99c37a6","name":{"pool_name":"oxp_f6530e9c-6d64-44fa-93d5-ae427916fbf1","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:113::3]:32345"},"services":[{"id":"52f57ef8-546a-43bd-a0f3-8c42b99c37a6","details":{"type":"crucible","address":"[fd00:1122:3344:113::3]:32345"}}]},"root":"/pool/ext/97662260-6b62-450f-9d7e-42f7dee5d568/crypt/zone"},{"zone":{"id":"3ee87855-9423-43ff-800a-fa4fdbf1d956","zone_type":"crucible","addresses":["fd00:1122:3344:113::a"],"dataset":{"id":"3ee87855-9423-43ff-800a-fa4fdbf1d956","name":{"pool_name":"oxp_6461a450-f043-4d1e-bc03-4a68ed5fe94a","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:113::a]:32345"},"services":[{"id":"3ee87855-9423-43ff-800a-fa4fdbf1d956","details":{"type":"crucible","address":"[fd00:1122:3344:113::a]:32345"}}]},"root":"/pool/ext/9515dc86-fe62-4d4f-b38d-b3461cc042fc/crypt/zone"},{"zone":{"id":"55d0ddf9-9b24-4a7a-b97f-248e240f9ba6","zone_type":"crucible","addresses":["fd00:1122:3344:113::5"],"dataset":{"id":"55d0ddf9-9b24-4a7a-b97f-248e240f9ba6","name":{"pool_name":"oxp_97662260-6b62-450f-9d7e-42f7dee5d568","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:113::5]:32345"},"services":[{"id":"55d0ddf9-9b24-4a7a-b97f-248e240f9ba6","details":{"type":"crucible","address":"[fd00:1122:3344:113::5]:32345"}}]},"root":"/pool/ext/9515dc86-fe62-4d4f-b38d-b3461cc042fc/crypt/zone"},{"zone":{"id":"014cad37-56a7-4b2a-9c9e-505b15b4de85","zone_type":"crucible","addresses":["fd00:1122:3344:113::b"],"dataset":{"id":"014cad37-56a7-4b2a-9c9e-505b15b4de85","name":{"pool_name":"oxp_8529ce8e-21d2-4b23-b9fd-6b90c7ae4f90","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:113::b]:32345"},"services":[{"id":"014cad37-56a7-4b2a-9c9e-505b15b4de85","details":{"type":"crucible","address":"[fd00:1122:3344:113::b]:32345"}}]},"root":"/pool/ext/6461a450-f043-4d1e-bc03-4a68ed5fe94a/crypt/zone"},{"zone":{"id":"e14fb192-aaab-42ab-aa86-c85f13955940","zone_type":"crucible","addresses":["fd00:1122:3344:113::6"],"dataset":{"id":"e14fb192-aaab-42ab-aa86-c85f13955940","name":{"pool_name":"oxp_5a9455ca-fb01-4549-9a70-7579c031779d","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:113::6]:32345"},"services":[{"id":"e14fb192-aaab-42ab-aa86-c85f13955940","details":{"type":"crucible","address":"[fd00:1122:3344:113::6]:32345"}}]},"root":"/pool/ext/f6530e9c-6d64-44fa-93d5-ae427916fbf1/crypt/zone"},{"zone":{"id":"14540609-9371-442b-8486-88c244e97cd4","zone_type":"crucible","addresses":["fd00:1122:3344:113::8"],"dataset":{"id":"14540609-9371-442b-8486-88c244e97cd4","name":{"pool_name":"oxp_2916d6f3-8775-4887-a6d3-f9723982756f","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:113::8]:32345"},"services":[{"id":"14540609-9371-442b-8486-88c244e97cd4","details":{"type":"crucible","address":"[fd00:1122:3344:113::8]:32345"}}]},"root":"/pool/ext/8529ce8e-21d2-4b23-b9fd-6b90c7ae4f90/crypt/zone"},{"zone":{"id":"97a6b35f-0af9-41eb-93a1-f8bc5dbba357","zone_type":"crucible","addresses":["fd00:1122:3344:113::7"],"dataset":{"id":"97a6b35f-0af9-41eb-93a1-f8bc5dbba357","name":{"pool_name":"oxp_9515dc86-fe62-4d4f-b38d-b3461cc042fc","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:113::7]:32345"},"services":[{"id":"97a6b35f-0af9-41eb-93a1-f8bc5dbba357","details":{"type":"crucible","address":"[fd00:1122:3344:113::7]:32345"}}]},"root":"/pool/ext/8529ce8e-21d2-4b23-b9fd-6b90c7ae4f90/crypt/zone"},{"zone":{"id":"5734aa24-cb66-4b0a-9eb2-564646f8d729","zone_type":"crucible","addresses":["fd00:1122:3344:113::9"],"dataset":{"id":"5734aa24-cb66-4b0a-9eb2-564646f8d729","name":{"pool_name":"oxp_9f889a6c-17b1-4edd-9659-458d91439dc1","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:113::9]:32345"},"services":[{"id":"5734aa24-cb66-4b0a-9eb2-564646f8d729","details":{"type":"crucible","address":"[fd00:1122:3344:113::9]:32345"}}]},"root":"/pool/ext/a5074e7f-8d3b-40e0-a79e-dbd9af9d5693/crypt/zone"},{"zone":{"id":"ba86eca1-1427-4540-b4a6-1d9a0e1bc656","zone_type":"crucible","addresses":["fd00:1122:3344:113::c"],"dataset":{"id":"ba86eca1-1427-4540-b4a6-1d9a0e1bc656","name":{"pool_name":"oxp_a5074e7f-8d3b-40e0-a79e-dbd9af9d5693","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:113::c]:32345"},"services":[{"id":"ba86eca1-1427-4540-b4a6-1d9a0e1bc656","details":{"type":"crucible","address":"[fd00:1122:3344:113::c]:32345"}}]},"root":"/pool/ext/2916d6f3-8775-4887-a6d3-f9723982756f/crypt/zone"},{"zone":{"id":"6634dbc4-d22f-40a4-8cd3-4f271d781fa1","zone_type":"ntp","addresses":["fd00:1122:3344:113::d"],"dataset":null,"services":[{"id":"6634dbc4-d22f-40a4-8cd3-4f271d781fa1","details":{"type":"internal_ntp","address":"[fd00:1122:3344:113::d]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/a5074e7f-8d3b-40e0-a79e-dbd9af9d5693/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled29.json b/sled-agent/tests/old-service-ledgers/rack3-sled29.json new file mode 100644 index 0000000000..2618364e4f --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack3-sled29.json @@ -0,0 +1 @@ +{"generation":5,"requests":[{"zone":{"id":"1cdd1ebf-9321-4f2d-914c-1e617f60b41a","zone_type":"crucible","addresses":["fd00:1122:3344:120::8"],"dataset":{"id":"1cdd1ebf-9321-4f2d-914c-1e617f60b41a","name":{"pool_name":"oxp_74046573-78a2-46b4-86dc-40bb2ee29dd5","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:120::8]:32345"},"services":[{"id":"1cdd1ebf-9321-4f2d-914c-1e617f60b41a","details":{"type":"crucible","address":"[fd00:1122:3344:120::8]:32345"}}]},"root":"/pool/ext/c1f0a9e4-ea10-4fd9-8b6d-79a2bacfec5e/crypt/zone"},{"zone":{"id":"720a0d08-d1c0-43ba-af86-f2dac1a53639","zone_type":"crucible","addresses":["fd00:1122:3344:120::c"],"dataset":{"id":"720a0d08-d1c0-43ba-af86-f2dac1a53639","name":{"pool_name":"oxp_068d2790-1044-41ed-97a5-b493490b14d1","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:120::c]:32345"},"services":[{"id":"720a0d08-d1c0-43ba-af86-f2dac1a53639","details":{"type":"crucible","address":"[fd00:1122:3344:120::c]:32345"}}]},"root":"/pool/ext/86cd16cf-d00d-40bc-b14a-8220b1e11476/crypt/zone"},{"zone":{"id":"d9f0b97b-2cef-4155-b45f-7db89263e4cf","zone_type":"crucible","addresses":["fd00:1122:3344:120::9"],"dataset":{"id":"d9f0b97b-2cef-4155-b45f-7db89263e4cf","name":{"pool_name":"oxp_8171bf0d-e61e-43f9-87d6-ec8833b80102","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:120::9]:32345"},"services":[{"id":"d9f0b97b-2cef-4155-b45f-7db89263e4cf","details":{"type":"crucible","address":"[fd00:1122:3344:120::9]:32345"}}]},"root":"/pool/ext/86cd16cf-d00d-40bc-b14a-8220b1e11476/crypt/zone"},{"zone":{"id":"018edff1-0d95-45a3-9a01-39c419bec55a","zone_type":"crucible","addresses":["fd00:1122:3344:120::b"],"dataset":{"id":"018edff1-0d95-45a3-9a01-39c419bec55a","name":{"pool_name":"oxp_0b11e026-f265-49a0-935f-7b234c19c789","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:120::b]:32345"},"services":[{"id":"018edff1-0d95-45a3-9a01-39c419bec55a","details":{"type":"crucible","address":"[fd00:1122:3344:120::b]:32345"}}]},"root":"/pool/ext/35db8700-d6a7-498c-9d2c-08eb9ab41b7c/crypt/zone"},{"zone":{"id":"f8cc1c1e-a556-436c-836d-42052101c38a","zone_type":"crucible","addresses":["fd00:1122:3344:120::3"],"dataset":{"id":"f8cc1c1e-a556-436c-836d-42052101c38a","name":{"pool_name":"oxp_ed8e5a26-5591-405a-b792-408f5b16e444","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:120::3]:32345"},"services":[{"id":"f8cc1c1e-a556-436c-836d-42052101c38a","details":{"type":"crucible","address":"[fd00:1122:3344:120::3]:32345"}}]},"root":"/pool/ext/1069bdee-fe5a-4164-a856-ff8ae56c07fb/crypt/zone"},{"zone":{"id":"f9600313-fac0-45a1-a1b5-02dd6af468b9","zone_type":"crucible","addresses":["fd00:1122:3344:120::4"],"dataset":{"id":"f9600313-fac0-45a1-a1b5-02dd6af468b9","name":{"pool_name":"oxp_c1f0a9e4-ea10-4fd9-8b6d-79a2bacfec5e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:120::4]:32345"},"services":[{"id":"f9600313-fac0-45a1-a1b5-02dd6af468b9","details":{"type":"crucible","address":"[fd00:1122:3344:120::4]:32345"}}]},"root":"/pool/ext/74046573-78a2-46b4-86dc-40bb2ee29dd5/crypt/zone"},{"zone":{"id":"869e4f7c-5312-4b98-bacc-1508f236bf5a","zone_type":"crucible","addresses":["fd00:1122:3344:120::6"],"dataset":{"id":"869e4f7c-5312-4b98-bacc-1508f236bf5a","name":{"pool_name":"oxp_04aea8dc-4316-432f-a13a-d7d9b2efa3f2","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:120::6]:32345"},"services":[{"id":"869e4f7c-5312-4b98-bacc-1508f236bf5a","details":{"type":"crucible","address":"[fd00:1122:3344:120::6]:32345"}}]},"root":"/pool/ext/0b11e026-f265-49a0-935f-7b234c19c789/crypt/zone"},{"zone":{"id":"31ed5a0c-7caf-4825-b730-85ee94fe27f1","zone_type":"crucible","addresses":["fd00:1122:3344:120::a"],"dataset":{"id":"31ed5a0c-7caf-4825-b730-85ee94fe27f1","name":{"pool_name":"oxp_86cd16cf-d00d-40bc-b14a-8220b1e11476","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:120::a]:32345"},"services":[{"id":"31ed5a0c-7caf-4825-b730-85ee94fe27f1","details":{"type":"crucible","address":"[fd00:1122:3344:120::a]:32345"}}]},"root":"/pool/ext/04aea8dc-4316-432f-a13a-d7d9b2efa3f2/crypt/zone"},{"zone":{"id":"7e5a3c39-152a-4270-b01e-9e144cca4aaa","zone_type":"crucible","addresses":["fd00:1122:3344:120::5"],"dataset":{"id":"7e5a3c39-152a-4270-b01e-9e144cca4aaa","name":{"pool_name":"oxp_1069bdee-fe5a-4164-a856-ff8ae56c07fb","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:120::5]:32345"},"services":[{"id":"7e5a3c39-152a-4270-b01e-9e144cca4aaa","details":{"type":"crucible","address":"[fd00:1122:3344:120::5]:32345"}}]},"root":"/pool/ext/04aea8dc-4316-432f-a13a-d7d9b2efa3f2/crypt/zone"},{"zone":{"id":"9a03a386-7304-4a86-bee8-153ef643195e","zone_type":"crucible","addresses":["fd00:1122:3344:120::7"],"dataset":{"id":"9a03a386-7304-4a86-bee8-153ef643195e","name":{"pool_name":"oxp_35db8700-d6a7-498c-9d2c-08eb9ab41b7c","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:120::7]:32345"},"services":[{"id":"9a03a386-7304-4a86-bee8-153ef643195e","details":{"type":"crucible","address":"[fd00:1122:3344:120::7]:32345"}}]},"root":"/pool/ext/068d2790-1044-41ed-97a5-b493490b14d1/crypt/zone"},{"zone":{"id":"a800d0a7-1020-481c-8be8-ecfd28b7a2be","zone_type":"ntp","addresses":["fd00:1122:3344:120::d"],"dataset":null,"services":[{"id":"a800d0a7-1020-481c-8be8-ecfd28b7a2be","details":{"type":"internal_ntp","address":"[fd00:1122:3344:120::d]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/c1f0a9e4-ea10-4fd9-8b6d-79a2bacfec5e/crypt/zone"},{"zone":{"id":"be469efd-8e07-4b8e-bcee-6fd33373cdef","zone_type":"internal_dns","addresses":["fd00:1122:3344:3::1"],"dataset":{"id":"be469efd-8e07-4b8e-bcee-6fd33373cdef","name":{"pool_name":"oxp_ed8e5a26-5591-405a-b792-408f5b16e444","kind":{"type":"internal_dns"}},"service_address":"[fd00:1122:3344:3::1]:5353"},"services":[{"id":"be469efd-8e07-4b8e-bcee-6fd33373cdef","details":{"type":"internal_dns","http_address":"[fd00:1122:3344:3::1]:5353","dns_address":"[fd00:1122:3344:3::1]:53","gz_address":"fd00:1122:3344:3::2","gz_address_index":2}}]},"root":"/pool/ext/068d2790-1044-41ed-97a5-b493490b14d1/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled3.json b/sled-agent/tests/old-service-ledgers/rack3-sled3.json new file mode 100644 index 0000000000..6bcb626cf6 --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack3-sled3.json @@ -0,0 +1 @@ +{"generation":4,"requests":[{"zone":{"id":"19d091b8-e005-4ff4-97e1-026de95e3667","zone_type":"crucible","addresses":["fd00:1122:3344:10f::c"],"dataset":{"id":"19d091b8-e005-4ff4-97e1-026de95e3667","name":{"pool_name":"oxp_11a63469-4f57-4976-8620-0055bf82dc97","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10f::c]:32345"},"services":[{"id":"19d091b8-e005-4ff4-97e1-026de95e3667","details":{"type":"crucible","address":"[fd00:1122:3344:10f::c]:32345"}}]},"root":"/pool/ext/6a73a62c-c636-4557-af45-042cb287aee6/crypt/zone"},{"zone":{"id":"57d77171-104e-4977-b2f9-9b529ee7f8a0","zone_type":"crucible","addresses":["fd00:1122:3344:10f::8"],"dataset":{"id":"57d77171-104e-4977-b2f9-9b529ee7f8a0","name":{"pool_name":"oxp_7f3060af-058f-4f52-ab80-902bd13e7ef4","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10f::8]:32345"},"services":[{"id":"57d77171-104e-4977-b2f9-9b529ee7f8a0","details":{"type":"crucible","address":"[fd00:1122:3344:10f::8]:32345"}}]},"root":"/pool/ext/7f3060af-058f-4f52-ab80-902bd13e7ef4/crypt/zone"},{"zone":{"id":"b0371ccf-67da-4562-baf2-eaabe5243e9b","zone_type":"crucible","addresses":["fd00:1122:3344:10f::7"],"dataset":{"id":"b0371ccf-67da-4562-baf2-eaabe5243e9b","name":{"pool_name":"oxp_58ae04cb-26ff-4e30-a20d-9f847bafba4d","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10f::7]:32345"},"services":[{"id":"b0371ccf-67da-4562-baf2-eaabe5243e9b","details":{"type":"crucible","address":"[fd00:1122:3344:10f::7]:32345"}}]},"root":"/pool/ext/125ddcda-f94b-46bc-a10a-94e9acf40265/crypt/zone"},{"zone":{"id":"ae3791ff-2657-4252-bd61-58ec5dc237cd","zone_type":"crucible","addresses":["fd00:1122:3344:10f::9"],"dataset":{"id":"ae3791ff-2657-4252-bd61-58ec5dc237cd","name":{"pool_name":"oxp_125ddcda-f94b-46bc-a10a-94e9acf40265","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10f::9]:32345"},"services":[{"id":"ae3791ff-2657-4252-bd61-58ec5dc237cd","details":{"type":"crucible","address":"[fd00:1122:3344:10f::9]:32345"}}]},"root":"/pool/ext/58ae04cb-26ff-4e30-a20d-9f847bafba4d/crypt/zone"},{"zone":{"id":"73f865dc-5db7-48c6-9dc4-dff56dd8c045","zone_type":"crucible_pantry","addresses":["fd00:1122:3344:10f::3"],"dataset":null,"services":[{"id":"73f865dc-5db7-48c6-9dc4-dff56dd8c045","details":{"type":"crucible_pantry","address":"[fd00:1122:3344:10f::3]:17000"}}]},"root":"/pool/ext/11a63469-4f57-4976-8620-0055bf82dc97/crypt/zone"},{"zone":{"id":"e5d0170a-0d60-4c51-8f72-4c301979690e","zone_type":"crucible","addresses":["fd00:1122:3344:10f::6"],"dataset":{"id":"e5d0170a-0d60-4c51-8f72-4c301979690e","name":{"pool_name":"oxp_efe4cbab-2a39-4d7d-ae6c-83eb3ab8d4b5","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10f::6]:32345"},"services":[{"id":"e5d0170a-0d60-4c51-8f72-4c301979690e","details":{"type":"crucible","address":"[fd00:1122:3344:10f::6]:32345"}}]},"root":"/pool/ext/6a73a62c-c636-4557-af45-042cb287aee6/crypt/zone"},{"zone":{"id":"ea6894de-c575-43bc-86e9-65b8a58499ff","zone_type":"crucible","addresses":["fd00:1122:3344:10f::a"],"dataset":{"id":"ea6894de-c575-43bc-86e9-65b8a58499ff","name":{"pool_name":"oxp_a87dc882-8b88-4a99-9628-5db79072cffa","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10f::a]:32345"},"services":[{"id":"ea6894de-c575-43bc-86e9-65b8a58499ff","details":{"type":"crucible","address":"[fd00:1122:3344:10f::a]:32345"}}]},"root":"/pool/ext/11a63469-4f57-4976-8620-0055bf82dc97/crypt/zone"},{"zone":{"id":"3081dc99-4fa9-4238-adfa-b9ca381c1f7b","zone_type":"crucible","addresses":["fd00:1122:3344:10f::b"],"dataset":{"id":"3081dc99-4fa9-4238-adfa-b9ca381c1f7b","name":{"pool_name":"oxp_6a73a62c-c636-4557-af45-042cb287aee6","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10f::b]:32345"},"services":[{"id":"3081dc99-4fa9-4238-adfa-b9ca381c1f7b","details":{"type":"crucible","address":"[fd00:1122:3344:10f::b]:32345"}}]},"root":"/pool/ext/a87dc882-8b88-4a99-9628-5db79072cffa/crypt/zone"},{"zone":{"id":"b4a3d7c8-487d-4d76-ae4e-a6a51595a5a6","zone_type":"crucible","addresses":["fd00:1122:3344:10f::d"],"dataset":{"id":"b4a3d7c8-487d-4d76-ae4e-a6a51595a5a6","name":{"pool_name":"oxp_a12f87ee-9918-4269-9de4-4bad4fb41caa","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10f::d]:32345"},"services":[{"id":"b4a3d7c8-487d-4d76-ae4e-a6a51595a5a6","details":{"type":"crucible","address":"[fd00:1122:3344:10f::d]:32345"}}]},"root":"/pool/ext/a12f87ee-9918-4269-9de4-4bad4fb41caa/crypt/zone"},{"zone":{"id":"5ebcee26-f76c-4206-8d81-584ac138d3b9","zone_type":"crucible","addresses":["fd00:1122:3344:10f::4"],"dataset":{"id":"5ebcee26-f76c-4206-8d81-584ac138d3b9","name":{"pool_name":"oxp_27f1917e-fb69-496a-9d40-8ef0d0c0ee55","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10f::4]:32345"},"services":[{"id":"5ebcee26-f76c-4206-8d81-584ac138d3b9","details":{"type":"crucible","address":"[fd00:1122:3344:10f::4]:32345"}}]},"root":"/pool/ext/58ae04cb-26ff-4e30-a20d-9f847bafba4d/crypt/zone"},{"zone":{"id":"90b2bc57-3a2a-4117-bb6d-7eda7542329a","zone_type":"crucible","addresses":["fd00:1122:3344:10f::5"],"dataset":{"id":"90b2bc57-3a2a-4117-bb6d-7eda7542329a","name":{"pool_name":"oxp_a222e405-40f6-4fdd-9146-94f7d94ed08a","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10f::5]:32345"},"services":[{"id":"90b2bc57-3a2a-4117-bb6d-7eda7542329a","details":{"type":"crucible","address":"[fd00:1122:3344:10f::5]:32345"}}]},"root":"/pool/ext/a12f87ee-9918-4269-9de4-4bad4fb41caa/crypt/zone"},{"zone":{"id":"0fb540af-58d3-4abc-bfad-e49765c2b1ee","zone_type":"ntp","addresses":["fd00:1122:3344:10f::e"],"dataset":null,"services":[{"id":"0fb540af-58d3-4abc-bfad-e49765c2b1ee","details":{"type":"internal_ntp","address":"[fd00:1122:3344:10f::e]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/58ae04cb-26ff-4e30-a20d-9f847bafba4d/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled30.json b/sled-agent/tests/old-service-ledgers/rack3-sled30.json new file mode 100644 index 0000000000..e919de3488 --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack3-sled30.json @@ -0,0 +1 @@ +{"generation":4,"requests":[{"zone":{"id":"dda0f1c6-84a5-472c-b350-a799c8d3d0eb","zone_type":"crucible","addresses":["fd00:1122:3344:115::8"],"dataset":{"id":"dda0f1c6-84a5-472c-b350-a799c8d3d0eb","name":{"pool_name":"oxp_028b6c9e-5a0e-43d2-a8ed-a5946cf62924","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:115::8]:32345"},"services":[{"id":"dda0f1c6-84a5-472c-b350-a799c8d3d0eb","details":{"type":"crucible","address":"[fd00:1122:3344:115::8]:32345"}}]},"root":"/pool/ext/b8d84b9c-a65e-4c86-8196-69da5317ae63/crypt/zone"},{"zone":{"id":"157672f9-113f-48b7-9808-dff3c3e67dcd","zone_type":"crucible","addresses":["fd00:1122:3344:115::a"],"dataset":{"id":"157672f9-113f-48b7-9808-dff3c3e67dcd","name":{"pool_name":"oxp_4fdca201-b37e-4072-a1cc-3cb7705954eb","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:115::a]:32345"},"services":[{"id":"157672f9-113f-48b7-9808-dff3c3e67dcd","details":{"type":"crucible","address":"[fd00:1122:3344:115::a]:32345"}}]},"root":"/pool/ext/b8d84b9c-a65e-4c86-8196-69da5317ae63/crypt/zone"},{"zone":{"id":"5a7d4f67-a70f-4d8b-8d35-4dc600991fb5","zone_type":"crucible","addresses":["fd00:1122:3344:115::5"],"dataset":{"id":"5a7d4f67-a70f-4d8b-8d35-4dc600991fb5","name":{"pool_name":"oxp_11a991e5-19a9-48b0-8186-34249ef67957","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:115::5]:32345"},"services":[{"id":"5a7d4f67-a70f-4d8b-8d35-4dc600991fb5","details":{"type":"crucible","address":"[fd00:1122:3344:115::5]:32345"}}]},"root":"/pool/ext/1e9c9764-aaa4-4681-b110-a937b4c52748/crypt/zone"},{"zone":{"id":"c7036645-b680-4816-834f-8ae1af24c159","zone_type":"crucible","addresses":["fd00:1122:3344:115::b"],"dataset":{"id":"c7036645-b680-4816-834f-8ae1af24c159","name":{"pool_name":"oxp_0780be56-c13d-4c6a-a1ac-37753a0da820","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:115::b]:32345"},"services":[{"id":"c7036645-b680-4816-834f-8ae1af24c159","details":{"type":"crucible","address":"[fd00:1122:3344:115::b]:32345"}}]},"root":"/pool/ext/80a8d756-ee22-4c88-8b5b-4a46f7eca249/crypt/zone"},{"zone":{"id":"45e47e4b-708f-40b5-a8c8-fbfd73696d45","zone_type":"crucible","addresses":["fd00:1122:3344:115::7"],"dataset":{"id":"45e47e4b-708f-40b5-a8c8-fbfd73696d45","name":{"pool_name":"oxp_80a8d756-ee22-4c88-8b5b-4a46f7eca249","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:115::7]:32345"},"services":[{"id":"45e47e4b-708f-40b5-a8c8-fbfd73696d45","details":{"type":"crucible","address":"[fd00:1122:3344:115::7]:32345"}}]},"root":"/pool/ext/4fdca201-b37e-4072-a1cc-3cb7705954eb/crypt/zone"},{"zone":{"id":"e805b0c1-3f80-49da-8dc1-caaf843e5003","zone_type":"crucible","addresses":["fd00:1122:3344:115::c"],"dataset":{"id":"e805b0c1-3f80-49da-8dc1-caaf843e5003","name":{"pool_name":"oxp_d54e1ed7-e589-4413-a487-6e9a257104e7","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:115::c]:32345"},"services":[{"id":"e805b0c1-3f80-49da-8dc1-caaf843e5003","details":{"type":"crucible","address":"[fd00:1122:3344:115::c]:32345"}}]},"root":"/pool/ext/d54e1ed7-e589-4413-a487-6e9a257104e7/crypt/zone"},{"zone":{"id":"e47d3f81-3df6-4c35-bec6-41277bc74c07","zone_type":"crucible","addresses":["fd00:1122:3344:115::4"],"dataset":{"id":"e47d3f81-3df6-4c35-bec6-41277bc74c07","name":{"pool_name":"oxp_b8d84b9c-a65e-4c86-8196-69da5317ae63","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:115::4]:32345"},"services":[{"id":"e47d3f81-3df6-4c35-bec6-41277bc74c07","details":{"type":"crucible","address":"[fd00:1122:3344:115::4]:32345"}}]},"root":"/pool/ext/772b3aaa-3501-4dc7-9b3d-048b8b1f7970/crypt/zone"},{"zone":{"id":"2a796a69-b061-44c7-b2df-35bc611f10f5","zone_type":"crucible","addresses":["fd00:1122:3344:115::6"],"dataset":{"id":"2a796a69-b061-44c7-b2df-35bc611f10f5","name":{"pool_name":"oxp_73abe9e0-d38e-48fc-bdec-b094bfa5670d","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:115::6]:32345"},"services":[{"id":"2a796a69-b061-44c7-b2df-35bc611f10f5","details":{"type":"crucible","address":"[fd00:1122:3344:115::6]:32345"}}]},"root":"/pool/ext/028b6c9e-5a0e-43d2-a8ed-a5946cf62924/crypt/zone"},{"zone":{"id":"4e1d2af1-8ef4-4762-aa80-b08da08b45bb","zone_type":"crucible","addresses":["fd00:1122:3344:115::3"],"dataset":{"id":"4e1d2af1-8ef4-4762-aa80-b08da08b45bb","name":{"pool_name":"oxp_772b3aaa-3501-4dc7-9b3d-048b8b1f7970","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:115::3]:32345"},"services":[{"id":"4e1d2af1-8ef4-4762-aa80-b08da08b45bb","details":{"type":"crucible","address":"[fd00:1122:3344:115::3]:32345"}}]},"root":"/pool/ext/d54e1ed7-e589-4413-a487-6e9a257104e7/crypt/zone"},{"zone":{"id":"fb1b10d5-b7cb-416d-98fc-b5d3bc02d495","zone_type":"crucible","addresses":["fd00:1122:3344:115::9"],"dataset":{"id":"fb1b10d5-b7cb-416d-98fc-b5d3bc02d495","name":{"pool_name":"oxp_1e9c9764-aaa4-4681-b110-a937b4c52748","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:115::9]:32345"},"services":[{"id":"fb1b10d5-b7cb-416d-98fc-b5d3bc02d495","details":{"type":"crucible","address":"[fd00:1122:3344:115::9]:32345"}}]},"root":"/pool/ext/b8d84b9c-a65e-4c86-8196-69da5317ae63/crypt/zone"},{"zone":{"id":"5155463c-8a09-45a5-ad1b-817f2e93b284","zone_type":"ntp","addresses":["fd00:1122:3344:115::d"],"dataset":null,"services":[{"id":"5155463c-8a09-45a5-ad1b-817f2e93b284","details":{"type":"internal_ntp","address":"[fd00:1122:3344:115::d]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/772b3aaa-3501-4dc7-9b3d-048b8b1f7970/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled31.json b/sled-agent/tests/old-service-ledgers/rack3-sled31.json new file mode 100644 index 0000000000..d984227227 --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack3-sled31.json @@ -0,0 +1 @@ +{"generation":4,"requests":[{"zone":{"id":"a0eae689-8e6b-4297-bb3d-8b7ffc5c4a07","zone_type":"crucible","addresses":["fd00:1122:3344:102::c"],"dataset":{"id":"a0eae689-8e6b-4297-bb3d-8b7ffc5c4a07","name":{"pool_name":"oxp_274cb567-fd74-4e00-b9c7-6ca367b3fda4","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::c]:32345"},"services":[{"id":"a0eae689-8e6b-4297-bb3d-8b7ffc5c4a07","details":{"type":"crucible","address":"[fd00:1122:3344:102::c]:32345"}}]},"root":"/pool/ext/1443b190-de16-42b0-b881-e87e875dd507/crypt/zone"},{"zone":{"id":"9cea406d-451e-4328-9052-b58487f799a5","zone_type":"crucible","addresses":["fd00:1122:3344:102::b"],"dataset":{"id":"9cea406d-451e-4328-9052-b58487f799a5","name":{"pool_name":"oxp_89c7f72e-632c-462b-a515-01cd80683711","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::b]:32345"},"services":[{"id":"9cea406d-451e-4328-9052-b58487f799a5","details":{"type":"crucible","address":"[fd00:1122:3344:102::b]:32345"}}]},"root":"/pool/ext/274cb567-fd74-4e00-b9c7-6ca367b3fda4/crypt/zone"},{"zone":{"id":"9c7dad7e-7f60-4bf4-8efc-0883a17e7cf6","zone_type":"crucible","addresses":["fd00:1122:3344:102::6"],"dataset":{"id":"9c7dad7e-7f60-4bf4-8efc-0883a17e7cf6","name":{"pool_name":"oxp_2c8e5637-b989-4b8f-82ac-ff2e9102b560","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::6]:32345"},"services":[{"id":"9c7dad7e-7f60-4bf4-8efc-0883a17e7cf6","details":{"type":"crucible","address":"[fd00:1122:3344:102::6]:32345"}}]},"root":"/pool/ext/1443b190-de16-42b0-b881-e87e875dd507/crypt/zone"},{"zone":{"id":"73015cba-79c6-4a67-97d8-fa0819cbf750","zone_type":"crucible","addresses":["fd00:1122:3344:102::a"],"dataset":{"id":"73015cba-79c6-4a67-97d8-fa0819cbf750","name":{"pool_name":"oxp_fa62108e-f7bb-4f6d-86f3-8094a1ea8352","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::a]:32345"},"services":[{"id":"73015cba-79c6-4a67-97d8-fa0819cbf750","details":{"type":"crucible","address":"[fd00:1122:3344:102::a]:32345"}}]},"root":"/pool/ext/2c8e5637-b989-4b8f-82ac-ff2e9102b560/crypt/zone"},{"zone":{"id":"f9ca3097-072e-4e7f-9f50-eb7c7ae39b6f","zone_type":"crucible","addresses":["fd00:1122:3344:102::5"],"dataset":{"id":"f9ca3097-072e-4e7f-9f50-eb7c7ae39b6f","name":{"pool_name":"oxp_42c6602c-2ccf-48ce-8344-693c832fd693","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::5]:32345"},"services":[{"id":"f9ca3097-072e-4e7f-9f50-eb7c7ae39b6f","details":{"type":"crucible","address":"[fd00:1122:3344:102::5]:32345"}}]},"root":"/pool/ext/2c8e5637-b989-4b8f-82ac-ff2e9102b560/crypt/zone"},{"zone":{"id":"e7855e05-a125-4a80-ac2c-8a2db96e1bf8","zone_type":"crucible","addresses":["fd00:1122:3344:102::7"],"dataset":{"id":"e7855e05-a125-4a80-ac2c-8a2db96e1bf8","name":{"pool_name":"oxp_1f72afd3-d2aa-46a8-b81a-54dbcc2f6317","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::7]:32345"},"services":[{"id":"e7855e05-a125-4a80-ac2c-8a2db96e1bf8","details":{"type":"crucible","address":"[fd00:1122:3344:102::7]:32345"}}]},"root":"/pool/ext/42c6602c-2ccf-48ce-8344-693c832fd693/crypt/zone"},{"zone":{"id":"e5de9bc9-e996-4fea-8318-ad7a8a6be4a3","zone_type":"crucible","addresses":["fd00:1122:3344:102::4"],"dataset":{"id":"e5de9bc9-e996-4fea-8318-ad7a8a6be4a3","name":{"pool_name":"oxp_1443b190-de16-42b0-b881-e87e875dd507","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::4]:32345"},"services":[{"id":"e5de9bc9-e996-4fea-8318-ad7a8a6be4a3","details":{"type":"crucible","address":"[fd00:1122:3344:102::4]:32345"}}]},"root":"/pool/ext/89c7f72e-632c-462b-a515-01cd80683711/crypt/zone"},{"zone":{"id":"cd0d0aac-44ff-4566-9260-a64ae6cecef4","zone_type":"crucible","addresses":["fd00:1122:3344:102::8"],"dataset":{"id":"cd0d0aac-44ff-4566-9260-a64ae6cecef4","name":{"pool_name":"oxp_92c0d1f6-cb4d-4ddb-b5ba-979fb3491812","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::8]:32345"},"services":[{"id":"cd0d0aac-44ff-4566-9260-a64ae6cecef4","details":{"type":"crucible","address":"[fd00:1122:3344:102::8]:32345"}}]},"root":"/pool/ext/89c7f72e-632c-462b-a515-01cd80683711/crypt/zone"},{"zone":{"id":"a8230592-0e7a-46c8-a653-7587a27f05bf","zone_type":"crucible","addresses":["fd00:1122:3344:102::9"],"dataset":{"id":"a8230592-0e7a-46c8-a653-7587a27f05bf","name":{"pool_name":"oxp_1b7873de-99fd-454f-b576-bff695524133","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::9]:32345"},"services":[{"id":"a8230592-0e7a-46c8-a653-7587a27f05bf","details":{"type":"crucible","address":"[fd00:1122:3344:102::9]:32345"}}]},"root":"/pool/ext/92c0d1f6-cb4d-4ddb-b5ba-979fb3491812/crypt/zone"},{"zone":{"id":"c19ffbb1-4dc1-4825-a3cf-080e9b543b16","zone_type":"crucible","addresses":["fd00:1122:3344:102::d"],"dataset":{"id":"c19ffbb1-4dc1-4825-a3cf-080e9b543b16","name":{"pool_name":"oxp_67823df7-511c-4984-b98c-7a8f5c40c22d","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:102::d]:32345"},"services":[{"id":"c19ffbb1-4dc1-4825-a3cf-080e9b543b16","details":{"type":"crucible","address":"[fd00:1122:3344:102::d]:32345"}}]},"root":"/pool/ext/1443b190-de16-42b0-b881-e87e875dd507/crypt/zone"},{"zone":{"id":"ff30fe7c-51f3-43b9-a788-d8f94a7bb028","zone_type":"cockroach_db","addresses":["fd00:1122:3344:102::3"],"dataset":{"id":"ff30fe7c-51f3-43b9-a788-d8f94a7bb028","name":{"pool_name":"oxp_1443b190-de16-42b0-b881-e87e875dd507","kind":{"type":"cockroach_db"}},"service_address":"[fd00:1122:3344:102::3]:32221"},"services":[{"id":"ff30fe7c-51f3-43b9-a788-d8f94a7bb028","details":{"type":"cockroach_db","address":"[fd00:1122:3344:102::3]:32221"}}]},"root":"/pool/ext/fa62108e-f7bb-4f6d-86f3-8094a1ea8352/crypt/zone"},{"zone":{"id":"16b50c55-8117-4efd-aabf-0273677b89d5","zone_type":"ntp","addresses":["fd00:1122:3344:102::e"],"dataset":null,"services":[{"id":"16b50c55-8117-4efd-aabf-0273677b89d5","details":{"type":"internal_ntp","address":"[fd00:1122:3344:102::e]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/fa62108e-f7bb-4f6d-86f3-8094a1ea8352/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled4.json b/sled-agent/tests/old-service-ledgers/rack3-sled4.json new file mode 100644 index 0000000000..e9e5ce5569 --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack3-sled4.json @@ -0,0 +1 @@ +{"generation":4,"requests":[{"zone":{"id":"22452953-ee80-4659-a555-8e027bf205b0","zone_type":"crucible","addresses":["fd00:1122:3344:10c::4"],"dataset":{"id":"22452953-ee80-4659-a555-8e027bf205b0","name":{"pool_name":"oxp_92ba1667-a6f7-4913-9b00-14825384c7bf","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10c::4]:32345"},"services":[{"id":"22452953-ee80-4659-a555-8e027bf205b0","details":{"type":"crucible","address":"[fd00:1122:3344:10c::4]:32345"}}]},"root":"/pool/ext/ab62b941-5f84-42c7-929d-295b20efffe7/crypt/zone"},{"zone":{"id":"9a5a2fcf-44a0-4468-979a-a71686cef627","zone_type":"crucible","addresses":["fd00:1122:3344:10c::3"],"dataset":{"id":"9a5a2fcf-44a0-4468-979a-a71686cef627","name":{"pool_name":"oxp_dbfdc981-1b81-4d7d-9449-9530890b199a","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10c::3]:32345"},"services":[{"id":"9a5a2fcf-44a0-4468-979a-a71686cef627","details":{"type":"crucible","address":"[fd00:1122:3344:10c::3]:32345"}}]},"root":"/pool/ext/74ac4da9-cdae-4c08-8431-11211184aa09/crypt/zone"},{"zone":{"id":"a014f12e-2636-4258-af76-e01d9b8d1c1f","zone_type":"crucible","addresses":["fd00:1122:3344:10c::b"],"dataset":{"id":"a014f12e-2636-4258-af76-e01d9b8d1c1f","name":{"pool_name":"oxp_ab62b941-5f84-42c7-929d-295b20efffe7","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10c::b]:32345"},"services":[{"id":"a014f12e-2636-4258-af76-e01d9b8d1c1f","details":{"type":"crucible","address":"[fd00:1122:3344:10c::b]:32345"}}]},"root":"/pool/ext/a624a843-1c4e-41c3-a1d2-4be7a6c57e9b/crypt/zone"},{"zone":{"id":"431768b8-26ba-4ab4-b616-9e183bb79b8b","zone_type":"crucible","addresses":["fd00:1122:3344:10c::7"],"dataset":{"id":"431768b8-26ba-4ab4-b616-9e183bb79b8b","name":{"pool_name":"oxp_7c121177-3210-4457-9b42-3657add6e166","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10c::7]:32345"},"services":[{"id":"431768b8-26ba-4ab4-b616-9e183bb79b8b","details":{"type":"crucible","address":"[fd00:1122:3344:10c::7]:32345"}}]},"root":"/pool/ext/74ac4da9-cdae-4c08-8431-11211184aa09/crypt/zone"},{"zone":{"id":"22992c56-bd5a-4d0f-86c5-d6f8e87b7bbb","zone_type":"crucible","addresses":["fd00:1122:3344:10c::9"],"dataset":{"id":"22992c56-bd5a-4d0f-86c5-d6f8e87b7bbb","name":{"pool_name":"oxp_842bdd28-196e-4b18-83db-68bd81176a44","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10c::9]:32345"},"services":[{"id":"22992c56-bd5a-4d0f-86c5-d6f8e87b7bbb","details":{"type":"crucible","address":"[fd00:1122:3344:10c::9]:32345"}}]},"root":"/pool/ext/74ac4da9-cdae-4c08-8431-11211184aa09/crypt/zone"},{"zone":{"id":"de376149-aa45-4660-9ae6-15e8ba4a4233","zone_type":"crucible","addresses":["fd00:1122:3344:10c::5"],"dataset":{"id":"de376149-aa45-4660-9ae6-15e8ba4a4233","name":{"pool_name":"oxp_25856a84-6707-4b94-81d1-b43d5bc990d7","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10c::5]:32345"},"services":[{"id":"de376149-aa45-4660-9ae6-15e8ba4a4233","details":{"type":"crucible","address":"[fd00:1122:3344:10c::5]:32345"}}]},"root":"/pool/ext/7c121177-3210-4457-9b42-3657add6e166/crypt/zone"},{"zone":{"id":"ceeba69d-8c0a-47df-a37b-7f1b90f23016","zone_type":"crucible","addresses":["fd00:1122:3344:10c::a"],"dataset":{"id":"ceeba69d-8c0a-47df-a37b-7f1b90f23016","name":{"pool_name":"oxp_a624a843-1c4e-41c3-a1d2-4be7a6c57e9b","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10c::a]:32345"},"services":[{"id":"ceeba69d-8c0a-47df-a37b-7f1b90f23016","details":{"type":"crucible","address":"[fd00:1122:3344:10c::a]:32345"}}]},"root":"/pool/ext/74ac4da9-cdae-4c08-8431-11211184aa09/crypt/zone"},{"zone":{"id":"65293ce4-2e63-4336-9207-3c61f58667f9","zone_type":"crucible","addresses":["fd00:1122:3344:10c::c"],"dataset":{"id":"65293ce4-2e63-4336-9207-3c61f58667f9","name":{"pool_name":"oxp_74ac4da9-cdae-4c08-8431-11211184aa09","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10c::c]:32345"},"services":[{"id":"65293ce4-2e63-4336-9207-3c61f58667f9","details":{"type":"crucible","address":"[fd00:1122:3344:10c::c]:32345"}}]},"root":"/pool/ext/842bdd28-196e-4b18-83db-68bd81176a44/crypt/zone"},{"zone":{"id":"e8f55a5d-65f9-436c-bc25-1d1a7070e876","zone_type":"crucible","addresses":["fd00:1122:3344:10c::6"],"dataset":{"id":"e8f55a5d-65f9-436c-bc25-1d1a7070e876","name":{"pool_name":"oxp_9bfe385c-16dd-4209-bc0b-f28ae75d58e3","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10c::6]:32345"},"services":[{"id":"e8f55a5d-65f9-436c-bc25-1d1a7070e876","details":{"type":"crucible","address":"[fd00:1122:3344:10c::6]:32345"}}]},"root":"/pool/ext/92ba1667-a6f7-4913-9b00-14825384c7bf/crypt/zone"},{"zone":{"id":"2dfbd4c6-afbf-4c8c-bf40-764f02727852","zone_type":"crucible","addresses":["fd00:1122:3344:10c::8"],"dataset":{"id":"2dfbd4c6-afbf-4c8c-bf40-764f02727852","name":{"pool_name":"oxp_55eb093d-6b6f-418c-9767-09afe4c51fff","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10c::8]:32345"},"services":[{"id":"2dfbd4c6-afbf-4c8c-bf40-764f02727852","details":{"type":"crucible","address":"[fd00:1122:3344:10c::8]:32345"}}]},"root":"/pool/ext/dbfdc981-1b81-4d7d-9449-9530890b199a/crypt/zone"},{"zone":{"id":"8c73baf7-1a58-4e2c-b4d1-966c89a18d03","zone_type":"ntp","addresses":["fd00:1122:3344:10c::d"],"dataset":null,"services":[{"id":"8c73baf7-1a58-4e2c-b4d1-966c89a18d03","details":{"type":"internal_ntp","address":"[fd00:1122:3344:10c::d]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/842bdd28-196e-4b18-83db-68bd81176a44/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled5.json b/sled-agent/tests/old-service-ledgers/rack3-sled5.json new file mode 100644 index 0000000000..ea7b5ec40a --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack3-sled5.json @@ -0,0 +1 @@ +{"generation":4,"requests":[{"zone":{"id":"2f488e7b-fd93-48a6-8b2b-61f6e8336268","zone_type":"crucible","addresses":["fd00:1122:3344:101::b"],"dataset":{"id":"2f488e7b-fd93-48a6-8b2b-61f6e8336268","name":{"pool_name":"oxp_5840a3b7-f765-45d3-8a41-7f543f936bee","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::b]:32345"},"services":[{"id":"2f488e7b-fd93-48a6-8b2b-61f6e8336268","details":{"type":"crucible","address":"[fd00:1122:3344:101::b]:32345"}}]},"root":"/pool/ext/dd084b76-1130-4ad3-9196-6b02be607fe9/crypt/zone"},{"zone":{"id":"1ed5fd3f-933a-4921-a91f-5c286823f8d4","zone_type":"crucible","addresses":["fd00:1122:3344:101::a"],"dataset":{"id":"1ed5fd3f-933a-4921-a91f-5c286823f8d4","name":{"pool_name":"oxp_c1e807e7-b64a-4dbd-b845-ffed0b9a54f1","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::a]:32345"},"services":[{"id":"1ed5fd3f-933a-4921-a91f-5c286823f8d4","details":{"type":"crucible","address":"[fd00:1122:3344:101::a]:32345"}}]},"root":"/pool/ext/be06ea9c-df86-4fec-b5dd-8809710893af/crypt/zone"},{"zone":{"id":"0f8f1013-465d-4b49-b55d-f0b9bf6f789a","zone_type":"crucible","addresses":["fd00:1122:3344:101::6"],"dataset":{"id":"0f8f1013-465d-4b49-b55d-f0b9bf6f789a","name":{"pool_name":"oxp_4dfa7003-0305-47f5-b23d-88a228c1e12e","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::6]:32345"},"services":[{"id":"0f8f1013-465d-4b49-b55d-f0b9bf6f789a","details":{"type":"crucible","address":"[fd00:1122:3344:101::6]:32345"}}]},"root":"/pool/ext/be06ea9c-df86-4fec-b5dd-8809710893af/crypt/zone"},{"zone":{"id":"2e4ef017-6c62-40bc-bab5-f2e01addad22","zone_type":"crucible","addresses":["fd00:1122:3344:101::7"],"dataset":{"id":"2e4ef017-6c62-40bc-bab5-f2e01addad22","name":{"pool_name":"oxp_d94e9c58-e6d1-444b-b7d8-19ac17dea042","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::7]:32345"},"services":[{"id":"2e4ef017-6c62-40bc-bab5-f2e01addad22","details":{"type":"crucible","address":"[fd00:1122:3344:101::7]:32345"}}]},"root":"/pool/ext/c1e807e7-b64a-4dbd-b845-ffed0b9a54f1/crypt/zone"},{"zone":{"id":"6a0baf13-a80b-4778-a0ab-a69cd851de2d","zone_type":"crucible","addresses":["fd00:1122:3344:101::9"],"dataset":{"id":"6a0baf13-a80b-4778-a0ab-a69cd851de2d","name":{"pool_name":"oxp_be06ea9c-df86-4fec-b5dd-8809710893af","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::9]:32345"},"services":[{"id":"6a0baf13-a80b-4778-a0ab-a69cd851de2d","details":{"type":"crucible","address":"[fd00:1122:3344:101::9]:32345"}}]},"root":"/pool/ext/a9d419d4-5915-4a40-baa3-3512785de034/crypt/zone"},{"zone":{"id":"391ec257-fd47-4cc8-9bfa-49a0747a9a67","zone_type":"crucible","addresses":["fd00:1122:3344:101::8"],"dataset":{"id":"391ec257-fd47-4cc8-9bfa-49a0747a9a67","name":{"pool_name":"oxp_a9d419d4-5915-4a40-baa3-3512785de034","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::8]:32345"},"services":[{"id":"391ec257-fd47-4cc8-9bfa-49a0747a9a67","details":{"type":"crucible","address":"[fd00:1122:3344:101::8]:32345"}}]},"root":"/pool/ext/709d5d04-5dff-4558-8b5d-fbc2a7d83036/crypt/zone"},{"zone":{"id":"fd8e615a-f170-4da9-b8d0-2a5a123d8682","zone_type":"crucible_pantry","addresses":["fd00:1122:3344:101::3"],"dataset":null,"services":[{"id":"fd8e615a-f170-4da9-b8d0-2a5a123d8682","details":{"type":"crucible_pantry","address":"[fd00:1122:3344:101::3]:17000"}}]},"root":"/pool/ext/dd084b76-1130-4ad3-9196-6b02be607fe9/crypt/zone"},{"zone":{"id":"f8a793f4-cd08-49ec-8fee-6bcd37092fdc","zone_type":"crucible","addresses":["fd00:1122:3344:101::c"],"dataset":{"id":"f8a793f4-cd08-49ec-8fee-6bcd37092fdc","name":{"pool_name":"oxp_709d5d04-5dff-4558-8b5d-fbc2a7d83036","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::c]:32345"},"services":[{"id":"f8a793f4-cd08-49ec-8fee-6bcd37092fdc","details":{"type":"crucible","address":"[fd00:1122:3344:101::c]:32345"}}]},"root":"/pool/ext/d94e9c58-e6d1-444b-b7d8-19ac17dea042/crypt/zone"},{"zone":{"id":"c67d44be-d6b8-4a08-a7e0-3ab300749ad6","zone_type":"crucible","addresses":["fd00:1122:3344:101::4"],"dataset":{"id":"c67d44be-d6b8-4a08-a7e0-3ab300749ad6","name":{"pool_name":"oxp_231cd696-2839-4a9a-ae42-6d875a98a797","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::4]:32345"},"services":[{"id":"c67d44be-d6b8-4a08-a7e0-3ab300749ad6","details":{"type":"crucible","address":"[fd00:1122:3344:101::4]:32345"}}]},"root":"/pool/ext/709d5d04-5dff-4558-8b5d-fbc2a7d83036/crypt/zone"},{"zone":{"id":"e91b4957-8165-451d-9fa5-090c3a39f199","zone_type":"crucible","addresses":["fd00:1122:3344:101::d"],"dataset":{"id":"e91b4957-8165-451d-9fa5-090c3a39f199","name":{"pool_name":"oxp_dd084b76-1130-4ad3-9196-6b02be607fe9","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::d]:32345"},"services":[{"id":"e91b4957-8165-451d-9fa5-090c3a39f199","details":{"type":"crucible","address":"[fd00:1122:3344:101::d]:32345"}}]},"root":"/pool/ext/5840a3b7-f765-45d3-8a41-7f543f936bee/crypt/zone"},{"zone":{"id":"5e737b6e-d33d-4a2c-b8c0-3cad9d05a68f","zone_type":"crucible","addresses":["fd00:1122:3344:101::5"],"dataset":{"id":"5e737b6e-d33d-4a2c-b8c0-3cad9d05a68f","name":{"pool_name":"oxp_8fa4f837-c6f3-4c65-88d4-21eb3cd7ffee","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:101::5]:32345"},"services":[{"id":"5e737b6e-d33d-4a2c-b8c0-3cad9d05a68f","details":{"type":"crucible","address":"[fd00:1122:3344:101::5]:32345"}}]},"root":"/pool/ext/dd084b76-1130-4ad3-9196-6b02be607fe9/crypt/zone"},{"zone":{"id":"7e6b7816-b1a6-40f3-894a-a5d5c0571dbb","zone_type":"ntp","addresses":["fd00:1122:3344:101::e"],"dataset":null,"services":[{"id":"7e6b7816-b1a6-40f3-894a-a5d5c0571dbb","details":{"type":"internal_ntp","address":"[fd00:1122:3344:101::e]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/be06ea9c-df86-4fec-b5dd-8809710893af/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled6.json b/sled-agent/tests/old-service-ledgers/rack3-sled6.json new file mode 100644 index 0000000000..2c499813cd --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack3-sled6.json @@ -0,0 +1 @@ +{"generation":4,"requests":[{"zone":{"id":"eafffae7-69fd-49e1-9541-7cf237ab12b3","zone_type":"crucible","addresses":["fd00:1122:3344:110::3"],"dataset":{"id":"eafffae7-69fd-49e1-9541-7cf237ab12b3","name":{"pool_name":"oxp_929404cd-2522-4440-b21c-91d466a9a7e0","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:110::3]:32345"},"services":[{"id":"eafffae7-69fd-49e1-9541-7cf237ab12b3","details":{"type":"crucible","address":"[fd00:1122:3344:110::3]:32345"}}]},"root":"/pool/ext/aff390ed-8d70-49fa-9000-5420b54ab118/crypt/zone"},{"zone":{"id":"f4bccf15-d69f-402d-9bd2-7959a4cb2823","zone_type":"crucible","addresses":["fd00:1122:3344:110::9"],"dataset":{"id":"f4bccf15-d69f-402d-9bd2-7959a4cb2823","name":{"pool_name":"oxp_f80f96be-a3d7-490a-96a7-faf7da80a579","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:110::9]:32345"},"services":[{"id":"f4bccf15-d69f-402d-9bd2-7959a4cb2823","details":{"type":"crucible","address":"[fd00:1122:3344:110::9]:32345"}}]},"root":"/pool/ext/6bcd54c8-d4a8-429d-8f17-cf02615eb063/crypt/zone"},{"zone":{"id":"82e51c9d-c187-4baa-8307-e46eeafc5ff2","zone_type":"crucible","addresses":["fd00:1122:3344:110::5"],"dataset":{"id":"82e51c9d-c187-4baa-8307-e46eeafc5ff2","name":{"pool_name":"oxp_37d86199-6834-49d9-888a-88ff6f281b29","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:110::5]:32345"},"services":[{"id":"82e51c9d-c187-4baa-8307-e46eeafc5ff2","details":{"type":"crucible","address":"[fd00:1122:3344:110::5]:32345"}}]},"root":"/pool/ext/d2e27e2a-2deb-42ae-84a7-c2d06f3aeb4f/crypt/zone"},{"zone":{"id":"cf667caf-304c-40c4-acce-f0eb05d011ef","zone_type":"crucible","addresses":["fd00:1122:3344:110::8"],"dataset":{"id":"cf667caf-304c-40c4-acce-f0eb05d011ef","name":{"pool_name":"oxp_625c0110-644e-4d63-8321-b85ab5642260","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:110::8]:32345"},"services":[{"id":"cf667caf-304c-40c4-acce-f0eb05d011ef","details":{"type":"crucible","address":"[fd00:1122:3344:110::8]:32345"}}]},"root":"/pool/ext/d2e27e2a-2deb-42ae-84a7-c2d06f3aeb4f/crypt/zone"},{"zone":{"id":"14e60912-108e-4dd3-984e-2332a183b346","zone_type":"crucible","addresses":["fd00:1122:3344:110::b"],"dataset":{"id":"14e60912-108e-4dd3-984e-2332a183b346","name":{"pool_name":"oxp_fa6470f5-0a4c-4fef-b0b1-57c8749c6cca","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:110::b]:32345"},"services":[{"id":"14e60912-108e-4dd3-984e-2332a183b346","details":{"type":"crucible","address":"[fd00:1122:3344:110::b]:32345"}}]},"root":"/pool/ext/6c5ab641-3bd4-4d8c-96f4-4f56c1045142/crypt/zone"},{"zone":{"id":"1aacf923-c96f-4bab-acb0-63f28e86eef6","zone_type":"crucible","addresses":["fd00:1122:3344:110::c"],"dataset":{"id":"1aacf923-c96f-4bab-acb0-63f28e86eef6","name":{"pool_name":"oxp_21b0f3ed-d27f-4996-968b-bf2b494d9308","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:110::c]:32345"},"services":[{"id":"1aacf923-c96f-4bab-acb0-63f28e86eef6","details":{"type":"crucible","address":"[fd00:1122:3344:110::c]:32345"}}]},"root":"/pool/ext/625c0110-644e-4d63-8321-b85ab5642260/crypt/zone"},{"zone":{"id":"b9db0845-04d3-4dc1-84ba-224749562a6c","zone_type":"crucible","addresses":["fd00:1122:3344:110::6"],"dataset":{"id":"b9db0845-04d3-4dc1-84ba-224749562a6c","name":{"pool_name":"oxp_d2e27e2a-2deb-42ae-84a7-c2d06f3aeb4f","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:110::6]:32345"},"services":[{"id":"b9db0845-04d3-4dc1-84ba-224749562a6c","details":{"type":"crucible","address":"[fd00:1122:3344:110::6]:32345"}}]},"root":"/pool/ext/aff390ed-8d70-49fa-9000-5420b54ab118/crypt/zone"},{"zone":{"id":"38b51865-ee80-4e1b-a40b-3452951f9022","zone_type":"crucible","addresses":["fd00:1122:3344:110::7"],"dataset":{"id":"38b51865-ee80-4e1b-a40b-3452951f9022","name":{"pool_name":"oxp_6bcd54c8-d4a8-429d-8f17-cf02615eb063","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:110::7]:32345"},"services":[{"id":"38b51865-ee80-4e1b-a40b-3452951f9022","details":{"type":"crucible","address":"[fd00:1122:3344:110::7]:32345"}}]},"root":"/pool/ext/37d86199-6834-49d9-888a-88ff6f281b29/crypt/zone"},{"zone":{"id":"4bc441f6-f7e5-4d68-8751-53ef1e251c47","zone_type":"crucible","addresses":["fd00:1122:3344:110::a"],"dataset":{"id":"4bc441f6-f7e5-4d68-8751-53ef1e251c47","name":{"pool_name":"oxp_6c5ab641-3bd4-4d8c-96f4-4f56c1045142","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:110::a]:32345"},"services":[{"id":"4bc441f6-f7e5-4d68-8751-53ef1e251c47","details":{"type":"crucible","address":"[fd00:1122:3344:110::a]:32345"}}]},"root":"/pool/ext/21b0f3ed-d27f-4996-968b-bf2b494d9308/crypt/zone"},{"zone":{"id":"d2c20cf8-ed4c-4815-add9-45996364f721","zone_type":"crucible","addresses":["fd00:1122:3344:110::4"],"dataset":{"id":"d2c20cf8-ed4c-4815-add9-45996364f721","name":{"pool_name":"oxp_aff390ed-8d70-49fa-9000-5420b54ab118","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:110::4]:32345"},"services":[{"id":"d2c20cf8-ed4c-4815-add9-45996364f721","details":{"type":"crucible","address":"[fd00:1122:3344:110::4]:32345"}}]},"root":"/pool/ext/6c5ab641-3bd4-4d8c-96f4-4f56c1045142/crypt/zone"},{"zone":{"id":"1bb548cb-889a-411e-8c67-d1b785225180","zone_type":"ntp","addresses":["fd00:1122:3344:110::d"],"dataset":null,"services":[{"id":"1bb548cb-889a-411e-8c67-d1b785225180","details":{"type":"internal_ntp","address":"[fd00:1122:3344:110::d]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/6bcd54c8-d4a8-429d-8f17-cf02615eb063/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled7.json b/sled-agent/tests/old-service-ledgers/rack3-sled7.json new file mode 100644 index 0000000000..fb701a2bdb --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack3-sled7.json @@ -0,0 +1 @@ +{"generation":4,"requests":[{"zone":{"id":"2eb74fa3-71ec-484c-8ffa-3daeab0e4c78","zone_type":"crucible","addresses":["fd00:1122:3344:11d::3"],"dataset":{"id":"2eb74fa3-71ec-484c-8ffa-3daeab0e4c78","name":{"pool_name":"oxp_c6b63fea-e3e2-4806-b8dc-bdfe7b5c3d89","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11d::3]:32345"},"services":[{"id":"2eb74fa3-71ec-484c-8ffa-3daeab0e4c78","details":{"type":"crucible","address":"[fd00:1122:3344:11d::3]:32345"}}]},"root":"/pool/ext/9f20cbae-7a63-4c31-9386-2ac3cbe12030/crypt/zone"},{"zone":{"id":"9f92bfcf-7435-44a6-8e77-0597f93cd0b4","zone_type":"crucible","addresses":["fd00:1122:3344:11d::7"],"dataset":{"id":"9f92bfcf-7435-44a6-8e77-0597f93cd0b4","name":{"pool_name":"oxp_9fa336f1-2b69-4ebf-9553-e3bab7e3e6ef","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11d::7]:32345"},"services":[{"id":"9f92bfcf-7435-44a6-8e77-0597f93cd0b4","details":{"type":"crucible","address":"[fd00:1122:3344:11d::7]:32345"}}]},"root":"/pool/ext/e05a6264-63f2-4961-bc14-57b4f65614c0/crypt/zone"},{"zone":{"id":"1bf9aed4-9fd3-4d87-b8e7-7f066d25ec1d","zone_type":"crucible","addresses":["fd00:1122:3344:11d::b"],"dataset":{"id":"1bf9aed4-9fd3-4d87-b8e7-7f066d25ec1d","name":{"pool_name":"oxp_a5a52f47-9c9a-4519-83dc-abc56619495d","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11d::b]:32345"},"services":[{"id":"1bf9aed4-9fd3-4d87-b8e7-7f066d25ec1d","details":{"type":"crucible","address":"[fd00:1122:3344:11d::b]:32345"}}]},"root":"/pool/ext/cbcad26e-5e52-41b7-9875-1a84d30d8a15/crypt/zone"},{"zone":{"id":"2a722aa7-cd8a-445d-83fe-57fc9b9a8249","zone_type":"crucible","addresses":["fd00:1122:3344:11d::8"],"dataset":{"id":"2a722aa7-cd8a-445d-83fe-57fc9b9a8249","name":{"pool_name":"oxp_1f4b71eb-505f-4706-912c-b13dd3f2eafb","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11d::8]:32345"},"services":[{"id":"2a722aa7-cd8a-445d-83fe-57fc9b9a8249","details":{"type":"crucible","address":"[fd00:1122:3344:11d::8]:32345"}}]},"root":"/pool/ext/a5a52f47-9c9a-4519-83dc-abc56619495d/crypt/zone"},{"zone":{"id":"76af5b23-d833-435c-b848-2a09d9fad9a1","zone_type":"crucible","addresses":["fd00:1122:3344:11d::c"],"dataset":{"id":"76af5b23-d833-435c-b848-2a09d9fad9a1","name":{"pool_name":"oxp_cbcad26e-5e52-41b7-9875-1a84d30d8a15","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11d::c]:32345"},"services":[{"id":"76af5b23-d833-435c-b848-2a09d9fad9a1","details":{"type":"crucible","address":"[fd00:1122:3344:11d::c]:32345"}}]},"root":"/pool/ext/9f20cbae-7a63-4c31-9386-2ac3cbe12030/crypt/zone"},{"zone":{"id":"3a412bf4-a385-4e66-9ada-a87f6536d6ca","zone_type":"crucible","addresses":["fd00:1122:3344:11d::4"],"dataset":{"id":"3a412bf4-a385-4e66-9ada-a87f6536d6ca","name":{"pool_name":"oxp_e05a6264-63f2-4961-bc14-57b4f65614c0","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11d::4]:32345"},"services":[{"id":"3a412bf4-a385-4e66-9ada-a87f6536d6ca","details":{"type":"crucible","address":"[fd00:1122:3344:11d::4]:32345"}}]},"root":"/pool/ext/e05a6264-63f2-4961-bc14-57b4f65614c0/crypt/zone"},{"zone":{"id":"99a25fa7-8231-4a46-a6ec-ffc5281db1f8","zone_type":"crucible","addresses":["fd00:1122:3344:11d::5"],"dataset":{"id":"99a25fa7-8231-4a46-a6ec-ffc5281db1f8","name":{"pool_name":"oxp_722494ab-9a2b-481b-ac11-292fded682a5","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11d::5]:32345"},"services":[{"id":"99a25fa7-8231-4a46-a6ec-ffc5281db1f8","details":{"type":"crucible","address":"[fd00:1122:3344:11d::5]:32345"}}]},"root":"/pool/ext/e05a6264-63f2-4961-bc14-57b4f65614c0/crypt/zone"},{"zone":{"id":"06c7ddc8-9b3e-48ef-9874-0c40874e9877","zone_type":"crucible","addresses":["fd00:1122:3344:11d::a"],"dataset":{"id":"06c7ddc8-9b3e-48ef-9874-0c40874e9877","name":{"pool_name":"oxp_8c3972d1-5b17-4479-88cc-1c33e4344160","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11d::a]:32345"},"services":[{"id":"06c7ddc8-9b3e-48ef-9874-0c40874e9877","details":{"type":"crucible","address":"[fd00:1122:3344:11d::a]:32345"}}]},"root":"/pool/ext/8c3972d1-5b17-4479-88cc-1c33e4344160/crypt/zone"},{"zone":{"id":"1212b2dc-157d-4bd3-94af-fb5db1d91f24","zone_type":"crucible","addresses":["fd00:1122:3344:11d::9"],"dataset":{"id":"1212b2dc-157d-4bd3-94af-fb5db1d91f24","name":{"pool_name":"oxp_9f20cbae-7a63-4c31-9386-2ac3cbe12030","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11d::9]:32345"},"services":[{"id":"1212b2dc-157d-4bd3-94af-fb5db1d91f24","details":{"type":"crucible","address":"[fd00:1122:3344:11d::9]:32345"}}]},"root":"/pool/ext/977aa6c3-2026-4178-9948-e09f78008575/crypt/zone"},{"zone":{"id":"b1fb5f2e-b20d-4f4c-9f6f-bbeb1a98dd50","zone_type":"crucible","addresses":["fd00:1122:3344:11d::6"],"dataset":{"id":"b1fb5f2e-b20d-4f4c-9f6f-bbeb1a98dd50","name":{"pool_name":"oxp_977aa6c3-2026-4178-9948-e09f78008575","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:11d::6]:32345"},"services":[{"id":"b1fb5f2e-b20d-4f4c-9f6f-bbeb1a98dd50","details":{"type":"crucible","address":"[fd00:1122:3344:11d::6]:32345"}}]},"root":"/pool/ext/722494ab-9a2b-481b-ac11-292fded682a5/crypt/zone"},{"zone":{"id":"e68dde0f-0647-46db-ae1c-711835c13e25","zone_type":"ntp","addresses":["fd00:1122:3344:11d::d"],"dataset":null,"services":[{"id":"e68dde0f-0647-46db-ae1c-711835c13e25","details":{"type":"internal_ntp","address":"[fd00:1122:3344:11d::d]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/1f4b71eb-505f-4706-912c-b13dd3f2eafb/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled8.json b/sled-agent/tests/old-service-ledgers/rack3-sled8.json new file mode 100644 index 0000000000..cf96f8ae81 --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack3-sled8.json @@ -0,0 +1 @@ +{"generation":4,"requests":[{"zone":{"id":"85c18b7c-a100-458c-b18d-ecfdacaefac4","zone_type":"crucible","addresses":["fd00:1122:3344:10e::5"],"dataset":{"id":"85c18b7c-a100-458c-b18d-ecfdacaefac4","name":{"pool_name":"oxp_07b266bc-86c3-4a76-9522-8b34ba1ae78c","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10e::5]:32345"},"services":[{"id":"85c18b7c-a100-458c-b18d-ecfdacaefac4","details":{"type":"crucible","address":"[fd00:1122:3344:10e::5]:32345"}}]},"root":"/pool/ext/5b88e44e-f886-4de8-8a6b-48ea5ed9d70b/crypt/zone"},{"zone":{"id":"db303465-7879-4d86-8da8-a0c7162e5184","zone_type":"crucible","addresses":["fd00:1122:3344:10e::4"],"dataset":{"id":"db303465-7879-4d86-8da8-a0c7162e5184","name":{"pool_name":"oxp_e9488a32-880d-44a2-8948-db0b7e3a35b5","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10e::4]:32345"},"services":[{"id":"db303465-7879-4d86-8da8-a0c7162e5184","details":{"type":"crucible","address":"[fd00:1122:3344:10e::4]:32345"}}]},"root":"/pool/ext/8d798756-7200-4db4-9faf-f41b75106a63/crypt/zone"},{"zone":{"id":"c44ce6be-512d-4104-9260-a5b8fe373937","zone_type":"crucible","addresses":["fd00:1122:3344:10e::9"],"dataset":{"id":"c44ce6be-512d-4104-9260-a5b8fe373937","name":{"pool_name":"oxp_025dfc06-5aeb-407f-adc8-ba18dc9bba35","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10e::9]:32345"},"services":[{"id":"c44ce6be-512d-4104-9260-a5b8fe373937","details":{"type":"crucible","address":"[fd00:1122:3344:10e::9]:32345"}}]},"root":"/pool/ext/1544ce68-3544-4cba-b3b6-1927d08b78a5/crypt/zone"},{"zone":{"id":"1cfdb5b6-e568-436a-a85f-7fecf1b8eef2","zone_type":"nexus","addresses":["fd00:1122:3344:10e::3"],"dataset":null,"services":[{"id":"1cfdb5b6-e568-436a-a85f-7fecf1b8eef2","details":{"type":"nexus","internal_address":"[fd00:1122:3344:10e::3]:12221","external_ip":"45.154.216.36","nic":{"id":"569754a2-a5e0-4aa8-90a7-2fa65f43b667","kind":{"type":"service","id":"1cfdb5b6-e568-436a-a85f-7fecf1b8eef2"},"name":"nexus-1cfdb5b6-e568-436a-a85f-7fecf1b8eef2","ip":"172.30.2.6","mac":"A8:40:25:FF:EC:6B","subnet":"172.30.2.0/24","vni":100,"primary":true,"slot":0},"external_tls":true,"external_dns_servers":["1.1.1.1","8.8.8.8"]}}]},"root":"/pool/ext/025dfc06-5aeb-407f-adc8-ba18dc9bba35/crypt/zone"},{"zone":{"id":"44a68792-ca14-442e-b7a9-11970d50ba0e","zone_type":"crucible","addresses":["fd00:1122:3344:10e::a"],"dataset":{"id":"44a68792-ca14-442e-b7a9-11970d50ba0e","name":{"pool_name":"oxp_2a492098-7df3-4409-9466-561edb7aa99b","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10e::a]:32345"},"services":[{"id":"44a68792-ca14-442e-b7a9-11970d50ba0e","details":{"type":"crucible","address":"[fd00:1122:3344:10e::a]:32345"}}]},"root":"/pool/ext/1544ce68-3544-4cba-b3b6-1927d08b78a5/crypt/zone"},{"zone":{"id":"514cf0ca-6d23-434e-9785-446b83b2f029","zone_type":"crucible","addresses":["fd00:1122:3344:10e::7"],"dataset":{"id":"514cf0ca-6d23-434e-9785-446b83b2f029","name":{"pool_name":"oxp_5b88e44e-f886-4de8-8a6b-48ea5ed9d70b","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10e::7]:32345"},"services":[{"id":"514cf0ca-6d23-434e-9785-446b83b2f029","details":{"type":"crucible","address":"[fd00:1122:3344:10e::7]:32345"}}]},"root":"/pool/ext/5b88e44e-f886-4de8-8a6b-48ea5ed9d70b/crypt/zone"},{"zone":{"id":"bc6d8347-8f64-4031-912c-932349df07fe","zone_type":"crucible","addresses":["fd00:1122:3344:10e::6"],"dataset":{"id":"bc6d8347-8f64-4031-912c-932349df07fe","name":{"pool_name":"oxp_1544ce68-3544-4cba-b3b6-1927d08b78a5","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10e::6]:32345"},"services":[{"id":"bc6d8347-8f64-4031-912c-932349df07fe","details":{"type":"crucible","address":"[fd00:1122:3344:10e::6]:32345"}}]},"root":"/pool/ext/1544ce68-3544-4cba-b3b6-1927d08b78a5/crypt/zone"},{"zone":{"id":"1ab0a4f5-99ad-4341-8c89-7fd03e5ccb08","zone_type":"crucible","addresses":["fd00:1122:3344:10e::b"],"dataset":{"id":"1ab0a4f5-99ad-4341-8c89-7fd03e5ccb08","name":{"pool_name":"oxp_033eb462-968f-42ce-9c29-377bd40a3014","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10e::b]:32345"},"services":[{"id":"1ab0a4f5-99ad-4341-8c89-7fd03e5ccb08","details":{"type":"crucible","address":"[fd00:1122:3344:10e::b]:32345"}}]},"root":"/pool/ext/9e1a0803-7453-4eac-91c9-d7891ecd634f/crypt/zone"},{"zone":{"id":"d6f2520b-3d04-44d9-bd46-6ffccfcb46d2","zone_type":"crucible","addresses":["fd00:1122:3344:10e::8"],"dataset":{"id":"d6f2520b-3d04-44d9-bd46-6ffccfcb46d2","name":{"pool_name":"oxp_36e8d29c-1e88-4c2b-8f59-f312201067c3","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10e::8]:32345"},"services":[{"id":"d6f2520b-3d04-44d9-bd46-6ffccfcb46d2","details":{"type":"crucible","address":"[fd00:1122:3344:10e::8]:32345"}}]},"root":"/pool/ext/1544ce68-3544-4cba-b3b6-1927d08b78a5/crypt/zone"},{"zone":{"id":"d6da9d13-bfcf-469d-a99e-faeb5e30be32","zone_type":"crucible","addresses":["fd00:1122:3344:10e::c"],"dataset":{"id":"d6da9d13-bfcf-469d-a99e-faeb5e30be32","name":{"pool_name":"oxp_9e1a0803-7453-4eac-91c9-d7891ecd634f","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10e::c]:32345"},"services":[{"id":"d6da9d13-bfcf-469d-a99e-faeb5e30be32","details":{"type":"crucible","address":"[fd00:1122:3344:10e::c]:32345"}}]},"root":"/pool/ext/8d798756-7200-4db4-9faf-f41b75106a63/crypt/zone"},{"zone":{"id":"a1dc59c2-5883-4fb8-83be-ac2d95d255d1","zone_type":"crucible","addresses":["fd00:1122:3344:10e::d"],"dataset":{"id":"a1dc59c2-5883-4fb8-83be-ac2d95d255d1","name":{"pool_name":"oxp_8d798756-7200-4db4-9faf-f41b75106a63","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10e::d]:32345"},"services":[{"id":"a1dc59c2-5883-4fb8-83be-ac2d95d255d1","details":{"type":"crucible","address":"[fd00:1122:3344:10e::d]:32345"}}]},"root":"/pool/ext/36e8d29c-1e88-4c2b-8f59-f312201067c3/crypt/zone"},{"zone":{"id":"48f25dba-7392-44ce-9bb0-28489ebc44bc","zone_type":"ntp","addresses":["fd00:1122:3344:10e::e"],"dataset":null,"services":[{"id":"48f25dba-7392-44ce-9bb0-28489ebc44bc","details":{"type":"internal_ntp","address":"[fd00:1122:3344:10e::e]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/5b88e44e-f886-4de8-8a6b-48ea5ed9d70b/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/old-service-ledgers/rack3-sled9.json b/sled-agent/tests/old-service-ledgers/rack3-sled9.json new file mode 100644 index 0000000000..c225f50081 --- /dev/null +++ b/sled-agent/tests/old-service-ledgers/rack3-sled9.json @@ -0,0 +1 @@ +{"generation":4,"requests":[{"zone":{"id":"b452e5e1-ab4c-4994-9679-ef21b3b4fee9","zone_type":"crucible","addresses":["fd00:1122:3344:10b::6"],"dataset":{"id":"b452e5e1-ab4c-4994-9679-ef21b3b4fee9","name":{"pool_name":"oxp_d63a297d-ae6a-4072-9dca-dda404044989","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::6]:32345"},"services":[{"id":"b452e5e1-ab4c-4994-9679-ef21b3b4fee9","details":{"type":"crucible","address":"[fd00:1122:3344:10b::6]:32345"}}]},"root":"/pool/ext/7c204111-31df-4c32-9a3e-780411f700fd/crypt/zone"},{"zone":{"id":"e9826cdc-6d3a-4eff-b1b5-ec4364ebe6b9","zone_type":"oximeter","addresses":["fd00:1122:3344:10b::3"],"dataset":null,"services":[{"id":"e9826cdc-6d3a-4eff-b1b5-ec4364ebe6b9","details":{"type":"oximeter","address":"[fd00:1122:3344:10b::3]:12223"}}]},"root":"/pool/ext/7c204111-31df-4c32-9a3e-780411f700fd/crypt/zone"},{"zone":{"id":"b0cde4a8-f27c-46e8-8355-756be9045afc","zone_type":"crucible","addresses":["fd00:1122:3344:10b::b"],"dataset":{"id":"b0cde4a8-f27c-46e8-8355-756be9045afc","name":{"pool_name":"oxp_07c1a8e7-51f5-4f12-a43d-734719fef92b","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::b]:32345"},"services":[{"id":"b0cde4a8-f27c-46e8-8355-756be9045afc","details":{"type":"crucible","address":"[fd00:1122:3344:10b::b]:32345"}}]},"root":"/pool/ext/1f6adf64-c9b9-4ed7-b3e2-37fb25624646/crypt/zone"},{"zone":{"id":"e2f70cf6-e285-4212-9b01-77ebf2ca9219","zone_type":"crucible","addresses":["fd00:1122:3344:10b::d"],"dataset":{"id":"e2f70cf6-e285-4212-9b01-77ebf2ca9219","name":{"pool_name":"oxp_a809f28a-7f25-4362-bc56-0cbdd72af2cb","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::d]:32345"},"services":[{"id":"e2f70cf6-e285-4212-9b01-77ebf2ca9219","details":{"type":"crucible","address":"[fd00:1122:3344:10b::d]:32345"}}]},"root":"/pool/ext/92a1bd39-6e8a-4226-b9d0-e3e8a9b8504f/crypt/zone"},{"zone":{"id":"b0949c9d-4aa1-4bc4-9cb3-5875b9166885","zone_type":"crucible","addresses":["fd00:1122:3344:10b::a"],"dataset":{"id":"b0949c9d-4aa1-4bc4-9cb3-5875b9166885","name":{"pool_name":"oxp_af0cc12b-43c5-473a-89a7-28351fbbb430","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::a]:32345"},"services":[{"id":"b0949c9d-4aa1-4bc4-9cb3-5875b9166885","details":{"type":"crucible","address":"[fd00:1122:3344:10b::a]:32345"}}]},"root":"/pool/ext/cf1594ed-7c0c-467c-b0af-a689dcb427a3/crypt/zone"},{"zone":{"id":"7cea4d59-a8ca-4826-901d-8d5bd935dc09","zone_type":"crucible","addresses":["fd00:1122:3344:10b::9"],"dataset":{"id":"7cea4d59-a8ca-4826-901d-8d5bd935dc09","name":{"pool_name":"oxp_d75dae09-4992-4a61-ab7d-5ae1d2b068ba","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::9]:32345"},"services":[{"id":"7cea4d59-a8ca-4826-901d-8d5bd935dc09","details":{"type":"crucible","address":"[fd00:1122:3344:10b::9]:32345"}}]},"root":"/pool/ext/a809f28a-7f25-4362-bc56-0cbdd72af2cb/crypt/zone"},{"zone":{"id":"08adaeee-c3b5-4cd8-8fbd-ac371b3101c9","zone_type":"crucible","addresses":["fd00:1122:3344:10b::4"],"dataset":{"id":"08adaeee-c3b5-4cd8-8fbd-ac371b3101c9","name":{"pool_name":"oxp_d9f23187-fbf9-4ea5-a103-bc112263a9a7","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::4]:32345"},"services":[{"id":"08adaeee-c3b5-4cd8-8fbd-ac371b3101c9","details":{"type":"crucible","address":"[fd00:1122:3344:10b::4]:32345"}}]},"root":"/pool/ext/7c204111-31df-4c32-9a3e-780411f700fd/crypt/zone"},{"zone":{"id":"3da1ade5-3fcb-4e64-aa08-81ee8a9ef723","zone_type":"crucible","addresses":["fd00:1122:3344:10b::8"],"dataset":{"id":"3da1ade5-3fcb-4e64-aa08-81ee8a9ef723","name":{"pool_name":"oxp_1f6adf64-c9b9-4ed7-b3e2-37fb25624646","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::8]:32345"},"services":[{"id":"3da1ade5-3fcb-4e64-aa08-81ee8a9ef723","details":{"type":"crucible","address":"[fd00:1122:3344:10b::8]:32345"}}]},"root":"/pool/ext/07c1a8e7-51f5-4f12-a43d-734719fef92b/crypt/zone"},{"zone":{"id":"816f26a7-4c28-4a39-b9ad-a036678520ab","zone_type":"crucible","addresses":["fd00:1122:3344:10b::7"],"dataset":{"id":"816f26a7-4c28-4a39-b9ad-a036678520ab","name":{"pool_name":"oxp_92a1bd39-6e8a-4226-b9d0-e3e8a9b8504f","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::7]:32345"},"services":[{"id":"816f26a7-4c28-4a39-b9ad-a036678520ab","details":{"type":"crucible","address":"[fd00:1122:3344:10b::7]:32345"}}]},"root":"/pool/ext/d9f23187-fbf9-4ea5-a103-bc112263a9a7/crypt/zone"},{"zone":{"id":"839f9839-409f-45d3-b8a6-7085507b90f6","zone_type":"crucible","addresses":["fd00:1122:3344:10b::c"],"dataset":{"id":"839f9839-409f-45d3-b8a6-7085507b90f6","name":{"pool_name":"oxp_7c204111-31df-4c32-9a3e-780411f700fd","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::c]:32345"},"services":[{"id":"839f9839-409f-45d3-b8a6-7085507b90f6","details":{"type":"crucible","address":"[fd00:1122:3344:10b::c]:32345"}}]},"root":"/pool/ext/af0cc12b-43c5-473a-89a7-28351fbbb430/crypt/zone"},{"zone":{"id":"c717c81f-a228-4412-a34e-90f8c491d847","zone_type":"crucible","addresses":["fd00:1122:3344:10b::5"],"dataset":{"id":"c717c81f-a228-4412-a34e-90f8c491d847","name":{"pool_name":"oxp_cf1594ed-7c0c-467c-b0af-a689dcb427a3","kind":{"type":"crucible"}},"service_address":"[fd00:1122:3344:10b::5]:32345"},"services":[{"id":"c717c81f-a228-4412-a34e-90f8c491d847","details":{"type":"crucible","address":"[fd00:1122:3344:10b::5]:32345"}}]},"root":"/pool/ext/d63a297d-ae6a-4072-9dca-dda404044989/crypt/zone"},{"zone":{"id":"e1fa2023-6c86-40a4-ae59-a0de112cf7a9","zone_type":"ntp","addresses":["fd00:1122:3344:10b::e"],"dataset":null,"services":[{"id":"e1fa2023-6c86-40a4-ae59-a0de112cf7a9","details":{"type":"internal_ntp","address":"[fd00:1122:3344:10b::e]:123","ntp_servers":["440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal","cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal"],"dns_servers":["fd00:1122:3344:1::1","fd00:1122:3344:2::1","fd00:1122:3344:3::1"],"domain":null}}]},"root":"/pool/ext/d9f23187-fbf9-4ea5-a103-bc112263a9a7/crypt/zone"}]} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack2-sled10.json b/sled-agent/tests/output/new-zones-ledgers/rack2-sled10.json new file mode 100644 index 0000000000..c00a65e8ea --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack2-sled10.json @@ -0,0 +1,195 @@ +{ + "omicron_generation": 2, + "ledger_generation": 4, + "zones": [ + { + "zone": { + "id": "04eef8aa-055c-42ab-bdb6-c982f63c9be0", + "underlay_address": "fd00:1122:3344:107::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:107::d]:32345", + "dataset": { + "pool_name": "oxp_845ff39a-3205-416f-8bda-e35829107c8a" + } + } + }, + "root": "/pool/ext/43efdd6d-7419-437a-a282-fc45bfafd042/crypt/zone" + }, + { + "zone": { + "id": "8568c997-fbbb-46a8-8549-b78284530ffc", + "underlay_address": "fd00:1122:3344:107::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:107::5]:32345", + "dataset": { + "pool_name": "oxp_0e485ad3-04e6-404b-b619-87d4fea9f5ae" + } + } + }, + "root": "/pool/ext/9b61d4b2-66f6-459f-86f4-13d0b8c5d6cf/crypt/zone" + }, + { + "zone": { + "id": "6cec1d60-5c1a-4c1b-9632-2b4bc76bd37c", + "underlay_address": "fd00:1122:3344:107::e", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:107::e]:32345", + "dataset": { + "pool_name": "oxp_62a4c68a-2073-42d0-8e49-01f5e8b90cd4" + } + } + }, + "root": "/pool/ext/845ff39a-3205-416f-8bda-e35829107c8a/crypt/zone" + }, + { + "zone": { + "id": "aa646c82-c6d7-4d0c-8401-150130927759", + "underlay_address": "fd00:1122:3344:107::4", + "zone_type": { + "type": "clickhouse", + "address": "[fd00:1122:3344:107::4]:8123", + "dataset": { + "pool_name": "oxp_0e485ad3-04e6-404b-b619-87d4fea9f5ae" + } + } + }, + "root": "/pool/ext/fd82dcc7-00dd-4d01-826a-937a7d8238fb/crypt/zone" + }, + { + "zone": { + "id": "2f294ca1-7a4f-468f-8966-2b7915804729", + "underlay_address": "fd00:1122:3344:107::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:107::7]:32345", + "dataset": { + "pool_name": "oxp_43efdd6d-7419-437a-a282-fc45bfafd042" + } + } + }, + "root": "/pool/ext/fd82dcc7-00dd-4d01-826a-937a7d8238fb/crypt/zone" + }, + { + "zone": { + "id": "1a77bd1d-4fd4-4d6c-a105-17f942d94ba6", + "underlay_address": "fd00:1122:3344:107::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:107::c]:32345", + "dataset": { + "pool_name": "oxp_b6bdfdaf-9c0d-4b74-926c-49ff3ed05562" + } + } + }, + "root": "/pool/ext/9b61d4b2-66f6-459f-86f4-13d0b8c5d6cf/crypt/zone" + }, + { + "zone": { + "id": "f65a6668-1aea-4deb-81ed-191fbe469328", + "underlay_address": "fd00:1122:3344:107::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:107::9]:32345", + "dataset": { + "pool_name": "oxp_9b61d4b2-66f6-459f-86f4-13d0b8c5d6cf" + } + } + }, + "root": "/pool/ext/d0584f4a-20ba-436d-a75b-7709e80deb79/crypt/zone" + }, + { + "zone": { + "id": "ee8bce67-8f8e-4221-97b0-85f1860d66d0", + "underlay_address": "fd00:1122:3344:107::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:107::8]:32345", + "dataset": { + "pool_name": "oxp_b252b176-3974-436a-915b-60382b21eb76" + } + } + }, + "root": "/pool/ext/b6bdfdaf-9c0d-4b74-926c-49ff3ed05562/crypt/zone" + }, + { + "zone": { + "id": "cf3b2d54-5e36-4c93-b44f-8bf36ac98071", + "underlay_address": "fd00:1122:3344:107::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:107::b]:32345", + "dataset": { + "pool_name": "oxp_d0584f4a-20ba-436d-a75b-7709e80deb79" + } + } + }, + "root": "/pool/ext/4c157f35-865d-4310-9d81-c6259cb69293/crypt/zone" + }, + { + "zone": { + "id": "5c8c244c-00dc-4b16-aa17-6d9eb4827fab", + "underlay_address": "fd00:1122:3344:107::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:107::a]:32345", + "dataset": { + "pool_name": "oxp_4c157f35-865d-4310-9d81-c6259cb69293" + } + } + }, + "root": "/pool/ext/845ff39a-3205-416f-8bda-e35829107c8a/crypt/zone" + }, + { + "zone": { + "id": "7d5e942b-926c-442d-937a-76cc4aa72bf3", + "underlay_address": "fd00:1122:3344:107::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:107::6]:32345", + "dataset": { + "pool_name": "oxp_fd82dcc7-00dd-4d01-826a-937a7d8238fb" + } + } + }, + "root": "/pool/ext/b252b176-3974-436a-915b-60382b21eb76/crypt/zone" + }, + { + "zone": { + "id": "a3628a56-6f85-43b5-be50-71d8f0e04877", + "underlay_address": "fd00:1122:3344:107::3", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:107::3]:32221", + "dataset": { + "pool_name": "oxp_0e485ad3-04e6-404b-b619-87d4fea9f5ae" + } + } + }, + "root": "/pool/ext/4c157f35-865d-4310-9d81-c6259cb69293/crypt/zone" + }, + { + "zone": { + "id": "7529be1c-ca8b-441a-89aa-37166cc450df", + "underlay_address": "fd00:1122:3344:107::f", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:107::f]:123", + "ntp_servers": [ + "c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55.host.control-plane.oxide.internal", + "6ea2684c-115e-48a6-8453-ab52d1cecd73.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/fd82dcc7-00dd-4d01-826a-937a7d8238fb/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack2-sled11.json b/sled-agent/tests/output/new-zones-ledgers/rack2-sled11.json new file mode 100644 index 0000000000..79aae3e8c1 --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack2-sled11.json @@ -0,0 +1,196 @@ +{ + "omicron_generation": 2, + "ledger_generation": 4, + "zones": [ + { + "zone": { + "id": "605be8b9-c652-4a5f-94ca-068ec7a39472", + "underlay_address": "fd00:1122:3344:106::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:106::a]:32345", + "dataset": { + "pool_name": "oxp_cf14d1b9-b4db-4594-b3ab-a9957e770ce9" + } + } + }, + "root": "/pool/ext/cf5f8849-0c5a-475b-8683-6d17da88d1d1/crypt/zone" + }, + { + "zone": { + "id": "af8a8712-457c-4ea7-a8b6-aecb04761c1b", + "underlay_address": "fd00:1122:3344:106::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:106::9]:32345", + "dataset": { + "pool_name": "oxp_cf5f8849-0c5a-475b-8683-6d17da88d1d1" + } + } + }, + "root": "/pool/ext/7f778610-7328-4554-98f6-b17f74f551c7/crypt/zone" + }, + { + "zone": { + "id": "0022703b-dcfc-44d4-897a-b42f6f53b433", + "underlay_address": "fd00:1122:3344:106::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:106::c]:32345", + "dataset": { + "pool_name": "oxp_025725fa-9e40-4b46-b018-c420408394ef" + } + } + }, + "root": "/pool/ext/025725fa-9e40-4b46-b018-c420408394ef/crypt/zone" + }, + { + "zone": { + "id": "fffddf56-10ca-4b62-9be3-5b3764a5f682", + "underlay_address": "fd00:1122:3344:106::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:106::d]:32345", + "dataset": { + "pool_name": "oxp_4d2f5aaf-eb14-4b1e-aa99-ae38ec844605" + } + } + }, + "root": "/pool/ext/834c9aad-c53b-4357-bc3f-f422efa63848/crypt/zone" + }, + { + "zone": { + "id": "9b8194ee-917d-4abc-a55c-94cea6cdaea1", + "underlay_address": "fd00:1122:3344:106::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:106::6]:32345", + "dataset": { + "pool_name": "oxp_d7665e0d-9354-4341-a76f-965d7c49f277" + } + } + }, + "root": "/pool/ext/cf5f8849-0c5a-475b-8683-6d17da88d1d1/crypt/zone" + }, + { + "zone": { + "id": "b369e133-485c-4d98-8fee-83542d1fd94d", + "underlay_address": "fd00:1122:3344:106::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:106::4]:32345", + "dataset": { + "pool_name": "oxp_4366f80d-3902-4b93-8f2d-380008e805fc" + } + } + }, + "root": "/pool/ext/025725fa-9e40-4b46-b018-c420408394ef/crypt/zone" + }, + { + "zone": { + "id": "edd99650-5df1-4241-815d-253e4ef2399c", + "underlay_address": "fd00:1122:3344:106::3", + "zone_type": { + "type": "external_dns", + "dataset": { + "pool_name": "oxp_4366f80d-3902-4b93-8f2d-380008e805fc" + }, + "http_address": "[fd00:1122:3344:106::3]:5353", + "dns_address": "172.20.26.1:53", + "nic": { + "id": "99b759fc-8e2e-44b7-aca8-93c3b201974d", + "kind": { + "type": "service", + "id": "edd99650-5df1-4241-815d-253e4ef2399c" + }, + "name": "external-dns-edd99650-5df1-4241-815d-253e4ef2399c", + "ip": "172.30.1.5", + "mac": "A8:40:25:FF:B0:9C", + "subnet": "172.30.1.0/24", + "vni": 100, + "primary": true, + "slot": 0 + } + } + }, + "root": "/pool/ext/7f778610-7328-4554-98f6-b17f74f551c7/crypt/zone" + }, + { + "zone": { + "id": "46d1afcc-cc3f-4b17-aafc-054dd4862d15", + "underlay_address": "fd00:1122:3344:106::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:106::5]:32345", + "dataset": { + "pool_name": "oxp_7f778610-7328-4554-98f6-b17f74f551c7" + } + } + }, + "root": "/pool/ext/cf5f8849-0c5a-475b-8683-6d17da88d1d1/crypt/zone" + }, + { + "zone": { + "id": "12afe1c3-bfe6-4278-8240-91d401347d36", + "underlay_address": "fd00:1122:3344:106::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:106::8]:32345", + "dataset": { + "pool_name": "oxp_534bcd4b-502f-4109-af6e-4b28a22c20f1" + } + } + }, + "root": "/pool/ext/4366f80d-3902-4b93-8f2d-380008e805fc/crypt/zone" + }, + { + "zone": { + "id": "c33b5912-9985-43ed-98f2-41297e2b796a", + "underlay_address": "fd00:1122:3344:106::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:106::b]:32345", + "dataset": { + "pool_name": "oxp_834c9aad-c53b-4357-bc3f-f422efa63848" + } + } + }, + "root": "/pool/ext/d7665e0d-9354-4341-a76f-965d7c49f277/crypt/zone" + }, + { + "zone": { + "id": "65b3db59-9361-4100-9cee-04e32a8c67d3", + "underlay_address": "fd00:1122:3344:106::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:106::7]:32345", + "dataset": { + "pool_name": "oxp_32b5303f-f667-4345-84d2-c7eec63b91b2" + } + } + }, + "root": "/pool/ext/d7665e0d-9354-4341-a76f-965d7c49f277/crypt/zone" + }, + { + "zone": { + "id": "82500cc9-f33d-4d59-9e6e-d70ea6133077", + "underlay_address": "fd00:1122:3344:106::e", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:106::e]:123", + "ntp_servers": [ + "c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55.host.control-plane.oxide.internal", + "6ea2684c-115e-48a6-8453-ab52d1cecd73.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/cf14d1b9-b4db-4594-b3ab-a9957e770ce9/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack2-sled12.json b/sled-agent/tests/output/new-zones-ledgers/rack2-sled12.json new file mode 100644 index 0000000000..39ebad3183 --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack2-sled12.json @@ -0,0 +1,232 @@ +{ + "omicron_generation": 2, + "ledger_generation": 5, + "zones": [ + { + "zone": { + "id": "a76b3357-b690-43b8-8352-3300568ffc2b", + "underlay_address": "fd00:1122:3344:104::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:104::a]:32345", + "dataset": { + "pool_name": "oxp_05715ad8-59a1-44ab-ad5f-0cdffb46baab" + } + } + }, + "root": "/pool/ext/2ec2a731-3340-4777-b1bb-4a906c598174/crypt/zone" + }, + { + "zone": { + "id": "8d202759-ca06-4383-b50f-7f3ec4062bf7", + "underlay_address": "fd00:1122:3344:104::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:104::4]:32345", + "dataset": { + "pool_name": "oxp_56e32a8f-0877-4437-9cab-94a4928b1495" + } + } + }, + "root": "/pool/ext/613b58fc-5a80-42dc-a61c-b143cf220fb5/crypt/zone" + }, + { + "zone": { + "id": "fcdda266-fc6a-4518-89db-aec007a4b682", + "underlay_address": "fd00:1122:3344:104::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:104::b]:32345", + "dataset": { + "pool_name": "oxp_7e1293ad-b903-4054-aeae-2182d5e4a785" + } + } + }, + "root": "/pool/ext/416fd29e-d3b5-4fdf-8101-d0d163fa0706/crypt/zone" + }, + { + "zone": { + "id": "167cf6a2-ec51-4de2-bc6c-7785bbc0e436", + "underlay_address": "fd00:1122:3344:104::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:104::c]:32345", + "dataset": { + "pool_name": "oxp_f96c8d49-fdf7-4bd6-84f6-c282202d1abc" + } + } + }, + "root": "/pool/ext/56e32a8f-0877-4437-9cab-94a4928b1495/crypt/zone" + }, + { + "zone": { + "id": "c6fde82d-8dae-4ef0-b557-6c3d094d9454", + "underlay_address": "fd00:1122:3344:104::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:104::9]:32345", + "dataset": { + "pool_name": "oxp_416fd29e-d3b5-4fdf-8101-d0d163fa0706" + } + } + }, + "root": "/pool/ext/3af01cc4-1f16-47d9-a489-abafcb91c2db/crypt/zone" + }, + { + "zone": { + "id": "650f5da7-86a0-4ade-af0f-bc96e021ded0", + "underlay_address": "fd00:1122:3344:104::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:104::5]:32345", + "dataset": { + "pool_name": "oxp_b4a71d3d-1ecd-418a-9a52-8d118f82082b" + } + } + }, + "root": "/pool/ext/613b58fc-5a80-42dc-a61c-b143cf220fb5/crypt/zone" + }, + { + "zone": { + "id": "7ce9a2c5-2d37-4188-b7b5-a9db819396c3", + "underlay_address": "fd00:1122:3344:104::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:104::d]:32345", + "dataset": { + "pool_name": "oxp_c87d16b8-e814-4159-8562-f8d7fdd19d13" + } + } + }, + "root": "/pool/ext/416fd29e-d3b5-4fdf-8101-d0d163fa0706/crypt/zone" + }, + { + "zone": { + "id": "23e1cf01-70ab-422f-997b-6216158965c3", + "underlay_address": "fd00:1122:3344:104::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:104::8]:32345", + "dataset": { + "pool_name": "oxp_3af01cc4-1f16-47d9-a489-abafcb91c2db" + } + } + }, + "root": "/pool/ext/3af01cc4-1f16-47d9-a489-abafcb91c2db/crypt/zone" + }, + { + "zone": { + "id": "50209816-89fb-48ed-9595-16899d114844", + "underlay_address": "fd00:1122:3344:104::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:104::6]:32345", + "dataset": { + "pool_name": "oxp_2ec2a731-3340-4777-b1bb-4a906c598174" + } + } + }, + "root": "/pool/ext/416fd29e-d3b5-4fdf-8101-d0d163fa0706/crypt/zone" + }, + { + "zone": { + "id": "20b100d0-84c3-4119-aa9b-0c632b0b6a3a", + "underlay_address": "fd00:1122:3344:104::3", + "zone_type": { + "type": "nexus", + "internal_address": "[fd00:1122:3344:104::3]:12221", + "external_ip": "172.20.26.4", + "nic": { + "id": "364b0ecd-bf08-4cac-a993-bbf4a70564c7", + "kind": { + "type": "service", + "id": "20b100d0-84c3-4119-aa9b-0c632b0b6a3a" + }, + "name": "nexus-20b100d0-84c3-4119-aa9b-0c632b0b6a3a", + "ip": "172.30.2.6", + "mac": "A8:40:25:FF:B4:C1", + "subnet": "172.30.2.0/24", + "vni": 100, + "primary": true, + "slot": 0 + }, + "external_tls": true, + "external_dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ] + } + }, + "root": "/pool/ext/c87d16b8-e814-4159-8562-f8d7fdd19d13/crypt/zone" + }, + { + "zone": { + "id": "8bc0f29e-0c20-437e-b8ca-7b9844acda22", + "underlay_address": "fd00:1122:3344:104::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:104::7]:32345", + "dataset": { + "pool_name": "oxp_613b58fc-5a80-42dc-a61c-b143cf220fb5" + } + } + }, + "root": "/pool/ext/56e32a8f-0877-4437-9cab-94a4928b1495/crypt/zone" + }, + { + "zone": { + "id": "c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55", + "underlay_address": "fd00:1122:3344:104::e", + "zone_type": { + "type": "boundary_ntp", + "address": "[fd00:1122:3344:104::e]:123", + "ntp_servers": [ + "ntp.eng.oxide.computer" + ], + "dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "domain": null, + "nic": { + "id": "a4b9bacf-6c04-431a-81ad-9bf0302af96e", + "kind": { + "type": "service", + "id": "c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55" + }, + "name": "ntp-c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55", + "ip": "172.30.3.5", + "mac": "A8:40:25:FF:B2:52", + "subnet": "172.30.3.0/24", + "vni": 100, + "primary": true, + "slot": 0 + }, + "snat_cfg": { + "ip": "172.20.26.6", + "first_port": 0, + "last_port": 16383 + } + } + }, + "root": "/pool/ext/3af01cc4-1f16-47d9-a489-abafcb91c2db/crypt/zone" + }, + { + "zone": { + "id": "51c9ad09-7814-4643-8ad4-689ccbe53fbd", + "underlay_address": "fd00:1122:3344:1::1", + "zone_type": { + "type": "internal_dns", + "dataset": { + "pool_name": "oxp_56e32a8f-0877-4437-9cab-94a4928b1495" + }, + "http_address": "[fd00:1122:3344:1::1]:5353", + "dns_address": "[fd00:1122:3344:1::1]:53", + "gz_address": "fd00:1122:3344:1::2", + "gz_address_index": 0 + } + }, + "root": "/pool/ext/3af01cc4-1f16-47d9-a489-abafcb91c2db/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack2-sled14.json b/sled-agent/tests/output/new-zones-ledgers/rack2-sled14.json new file mode 100644 index 0000000000..25dfb72a78 --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack2-sled14.json @@ -0,0 +1,192 @@ +{ + "omicron_generation": 2, + "ledger_generation": 4, + "zones": [ + { + "zone": { + "id": "ee8b2cfa-87fe-46a6-98ef-23640b80a968", + "underlay_address": "fd00:1122:3344:10b::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10b::d]:32345", + "dataset": { + "pool_name": "oxp_4a624324-003a-4255-98e8-546a90b5b7fa" + } + } + }, + "root": "/pool/ext/6b9ec5f1-859f-459c-9c06-6a51ba87786f/crypt/zone" + }, + { + "zone": { + "id": "9228f8ca-2a83-439f-9cb7-f2801b5fea27", + "underlay_address": "fd00:1122:3344:10b::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10b::6]:32345", + "dataset": { + "pool_name": "oxp_6b9ec5f1-859f-459c-9c06-6a51ba87786f" + } + } + }, + "root": "/pool/ext/6b9ec5f1-859f-459c-9c06-6a51ba87786f/crypt/zone" + }, + { + "zone": { + "id": "ee44cdde-7ac9-4469-9f1d-e8bcfeb5cc46", + "underlay_address": "fd00:1122:3344:10b::e", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10b::e]:32345", + "dataset": { + "pool_name": "oxp_11b02ce7-7e50-486f-86c2-de8af9575a45" + } + } + }, + "root": "/pool/ext/11b02ce7-7e50-486f-86c2-de8af9575a45/crypt/zone" + }, + { + "zone": { + "id": "96bac0b1-8b34-4c81-9e76-6404d2c37630", + "underlay_address": "fd00:1122:3344:10b::4", + "zone_type": { + "type": "crucible_pantry", + "address": "[fd00:1122:3344:10b::4]:17000" + } + }, + "root": "/pool/ext/350b2814-7b7f-40f1-9bf6-9818a1ef49bb/crypt/zone" + }, + { + "zone": { + "id": "d4e1e554-7b98-4413-809e-4a42561c3d0c", + "underlay_address": "fd00:1122:3344:10b::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10b::a]:32345", + "dataset": { + "pool_name": "oxp_e6d2fe1d-c74d-40cd-8fae-bc7d06bdaac8" + } + } + }, + "root": "/pool/ext/6b9ec5f1-859f-459c-9c06-6a51ba87786f/crypt/zone" + }, + { + "zone": { + "id": "1dd69b02-a032-46c3-8e2a-5012e8314455", + "underlay_address": "fd00:1122:3344:10b::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10b::b]:32345", + "dataset": { + "pool_name": "oxp_350b2814-7b7f-40f1-9bf6-9818a1ef49bb" + } + } + }, + "root": "/pool/ext/350b2814-7b7f-40f1-9bf6-9818a1ef49bb/crypt/zone" + }, + { + "zone": { + "id": "921f7752-d2f3-40df-a739-5cb1390abc2c", + "underlay_address": "fd00:1122:3344:10b::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10b::8]:32345", + "dataset": { + "pool_name": "oxp_2d1ebe24-6deb-4f81-8450-6842de28126c" + } + } + }, + "root": "/pool/ext/91ea7bb6-2be7-4498-9b0d-a0521509ec00/crypt/zone" + }, + { + "zone": { + "id": "609b25e8-9750-4308-ae6f-7202907a3675", + "underlay_address": "fd00:1122:3344:10b::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10b::9]:32345", + "dataset": { + "pool_name": "oxp_91ea7bb6-2be7-4498-9b0d-a0521509ec00" + } + } + }, + "root": "/pool/ext/2d1ebe24-6deb-4f81-8450-6842de28126c/crypt/zone" + }, + { + "zone": { + "id": "a232eba2-e94f-4592-a5a6-ec23f9be3296", + "underlay_address": "fd00:1122:3344:10b::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10b::5]:32345", + "dataset": { + "pool_name": "oxp_e12f29b8-1ab8-431e-bc96-1c1298947980" + } + } + }, + "root": "/pool/ext/021afd19-2f87-4def-9284-ab7add1dd6ae/crypt/zone" + }, + { + "zone": { + "id": "800d1758-9312-4b1a-8f02-dc6d644c2a9b", + "underlay_address": "fd00:1122:3344:10b::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10b::c]:32345", + "dataset": { + "pool_name": "oxp_b6932bb0-bab8-4876-914a-9c75a600e794" + } + } + }, + "root": "/pool/ext/b6932bb0-bab8-4876-914a-9c75a600e794/crypt/zone" + }, + { + "zone": { + "id": "668a4d4a-96dc-4b45-866b-bed3d64c26ec", + "underlay_address": "fd00:1122:3344:10b::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10b::7]:32345", + "dataset": { + "pool_name": "oxp_021afd19-2f87-4def-9284-ab7add1dd6ae" + } + } + }, + "root": "/pool/ext/91ea7bb6-2be7-4498-9b0d-a0521509ec00/crypt/zone" + }, + { + "zone": { + "id": "8bbea076-ff60-4330-8302-383e18140ef3", + "underlay_address": "fd00:1122:3344:10b::3", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:10b::3]:32221", + "dataset": { + "pool_name": "oxp_e12f29b8-1ab8-431e-bc96-1c1298947980" + } + } + }, + "root": "/pool/ext/4a624324-003a-4255-98e8-546a90b5b7fa/crypt/zone" + }, + { + "zone": { + "id": "3ccea933-89f2-4ce5-8367-efb0afeffe97", + "underlay_address": "fd00:1122:3344:10b::f", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:10b::f]:123", + "ntp_servers": [ + "c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55.host.control-plane.oxide.internal", + "6ea2684c-115e-48a6-8453-ab52d1cecd73.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/4a624324-003a-4255-98e8-546a90b5b7fa/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack2-sled16.json b/sled-agent/tests/output/new-zones-ledgers/rack2-sled16.json new file mode 100644 index 0000000000..905742e678 --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack2-sled16.json @@ -0,0 +1,192 @@ +{ + "omicron_generation": 2, + "ledger_generation": 4, + "zones": [ + { + "zone": { + "id": "b12aa520-a769-4eac-b56b-09960550a831", + "underlay_address": "fd00:1122:3344:108::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:108::7]:32345", + "dataset": { + "pool_name": "oxp_34dadf3f-f60c-4acc-b82b-4b0c82224222" + } + } + }, + "root": "/pool/ext/8be8c577-23ac-452e-a205-6d9c95088f61/crypt/zone" + }, + { + "zone": { + "id": "9bdc40ee-ccba-4d18-9efb-a30596e2d290", + "underlay_address": "fd00:1122:3344:108::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:108::d]:32345", + "dataset": { + "pool_name": "oxp_eb81728c-3b83-42fb-8133-ac32a0bdf70f" + } + } + }, + "root": "/pool/ext/8be8c577-23ac-452e-a205-6d9c95088f61/crypt/zone" + }, + { + "zone": { + "id": "c9a367c7-64d7-48e4-b484-9ecb4e8faea7", + "underlay_address": "fd00:1122:3344:108::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:108::9]:32345", + "dataset": { + "pool_name": "oxp_76ab5a67-e20f-4bf0-87b3-01fcc4144bd2" + } + } + }, + "root": "/pool/ext/34dadf3f-f60c-4acc-b82b-4b0c82224222/crypt/zone" + }, + { + "zone": { + "id": "bc5124d8-65e8-4879-bfac-64d59003d482", + "underlay_address": "fd00:1122:3344:108::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:108::a]:32345", + "dataset": { + "pool_name": "oxp_5fac7a1d-e855-46e1-b8c2-dd848ac4fee6" + } + } + }, + "root": "/pool/ext/0c4ef358-5533-43db-ad38-a8eff716e53a/crypt/zone" + }, + { + "zone": { + "id": "5cc7c840-8e6b-48c8-ac4b-f4297f8cf61a", + "underlay_address": "fd00:1122:3344:108::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:108::c]:32345", + "dataset": { + "pool_name": "oxp_0c4ef358-5533-43db-ad38-a8eff716e53a" + } + } + }, + "root": "/pool/ext/6d3e9cc6-f03b-4055-9785-05711d5e4fdc/crypt/zone" + }, + { + "zone": { + "id": "3b767edf-a72d-4d80-a0fc-65d6801ed0e0", + "underlay_address": "fd00:1122:3344:108::e", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:108::e]:32345", + "dataset": { + "pool_name": "oxp_f522118c-5dcd-4116-8044-07f0cceec52e" + } + } + }, + "root": "/pool/ext/5fac7a1d-e855-46e1-b8c2-dd848ac4fee6/crypt/zone" + }, + { + "zone": { + "id": "f3c02ed6-fbc5-45c3-a030-409f74b450fd", + "underlay_address": "fd00:1122:3344:108::4", + "zone_type": { + "type": "crucible_pantry", + "address": "[fd00:1122:3344:108::4]:17000" + } + }, + "root": "/pool/ext/eb81728c-3b83-42fb-8133-ac32a0bdf70f/crypt/zone" + }, + { + "zone": { + "id": "85bd9bdb-1ec5-4a8d-badb-8b5d502546a1", + "underlay_address": "fd00:1122:3344:108::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:108::5]:32345", + "dataset": { + "pool_name": "oxp_416232c1-bc8f-403f-bacb-28403dd8fced" + } + } + }, + "root": "/pool/ext/34dadf3f-f60c-4acc-b82b-4b0c82224222/crypt/zone" + }, + { + "zone": { + "id": "d2f1c3df-d4e0-4469-b50e-f1871da86ebf", + "underlay_address": "fd00:1122:3344:108::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:108::6]:32345", + "dataset": { + "pool_name": "oxp_6d3e9cc6-f03b-4055-9785-05711d5e4fdc" + } + } + }, + "root": "/pool/ext/34dadf3f-f60c-4acc-b82b-4b0c82224222/crypt/zone" + }, + { + "zone": { + "id": "88fe3c12-4c55-47df-b4ee-ed26b795439d", + "underlay_address": "fd00:1122:3344:108::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:108::8]:32345", + "dataset": { + "pool_name": "oxp_8be8c577-23ac-452e-a205-6d9c95088f61" + } + } + }, + "root": "/pool/ext/34dadf3f-f60c-4acc-b82b-4b0c82224222/crypt/zone" + }, + { + "zone": { + "id": "4d20175a-588b-44b8-8b9c-b16c6c3a97a0", + "underlay_address": "fd00:1122:3344:108::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:108::b]:32345", + "dataset": { + "pool_name": "oxp_a726cacd-fa35-4ed2-ade6-31ad928b24cb" + } + } + }, + "root": "/pool/ext/0c4ef358-5533-43db-ad38-a8eff716e53a/crypt/zone" + }, + { + "zone": { + "id": "e86845b5-eabd-49f5-9a10-6dfef9066209", + "underlay_address": "fd00:1122:3344:108::3", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:108::3]:32221", + "dataset": { + "pool_name": "oxp_416232c1-bc8f-403f-bacb-28403dd8fced" + } + } + }, + "root": "/pool/ext/416232c1-bc8f-403f-bacb-28403dd8fced/crypt/zone" + }, + { + "zone": { + "id": "209b6213-588b-43b6-a89b-19ee5c84ffba", + "underlay_address": "fd00:1122:3344:108::f", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:108::f]:123", + "ntp_servers": [ + "c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55.host.control-plane.oxide.internal", + "6ea2684c-115e-48a6-8453-ab52d1cecd73.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/416232c1-bc8f-403f-bacb-28403dd8fced/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack2-sled17.json b/sled-agent/tests/output/new-zones-ledgers/rack2-sled17.json new file mode 100644 index 0000000000..1cccd0467b --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack2-sled17.json @@ -0,0 +1,181 @@ +{ + "omicron_generation": 2, + "ledger_generation": 4, + "zones": [ + { + "zone": { + "id": "90b53c3d-42fa-4ca9-bbfc-96fff245b508", + "underlay_address": "fd00:1122:3344:109::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:109::4]:32345", + "dataset": { + "pool_name": "oxp_ae56280b-17ce-4266-8573-e1da9db6c6bb" + } + } + }, + "root": "/pool/ext/b0e1a261-b932-47c4-81e9-1977275ae9d9/crypt/zone" + }, + { + "zone": { + "id": "4f9f2e1d-be04-4e8b-a50b-ffb18557a650", + "underlay_address": "fd00:1122:3344:109::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:109::5]:32345", + "dataset": { + "pool_name": "oxp_d5b07362-64db-4b18-a3e9-8d7cbabae2d5" + } + } + }, + "root": "/pool/ext/027a82e8-daa3-4fa6-8205-ed03445e1086/crypt/zone" + }, + { + "zone": { + "id": "2fa5671d-3109-4f11-ae70-1280f4fa3b89", + "underlay_address": "fd00:1122:3344:109::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:109::6]:32345", + "dataset": { + "pool_name": "oxp_9ba7bfbf-b9a2-4237-a142-94c1e68de984" + } + } + }, + "root": "/pool/ext/3cafbb47-c194-4a42-99ff-34dfeab999ed/crypt/zone" + }, + { + "zone": { + "id": "b63c6882-ca90-4156-b561-4781ab4a0962", + "underlay_address": "fd00:1122:3344:109::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:109::7]:32345", + "dataset": { + "pool_name": "oxp_b0e1a261-b932-47c4-81e9-1977275ae9d9" + } + } + }, + "root": "/pool/ext/d5b07362-64db-4b18-a3e9-8d7cbabae2d5/crypt/zone" + }, + { + "zone": { + "id": "f71344eb-f7e2-439d-82a0-9941e6868fb6", + "underlay_address": "fd00:1122:3344:109::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:109::9]:32345", + "dataset": { + "pool_name": "oxp_027a82e8-daa3-4fa6-8205-ed03445e1086" + } + } + }, + "root": "/pool/ext/027a82e8-daa3-4fa6-8205-ed03445e1086/crypt/zone" + }, + { + "zone": { + "id": "a60cf0d7-12d5-43cb-aa3f-7a9e84de08fb", + "underlay_address": "fd00:1122:3344:109::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:109::a]:32345", + "dataset": { + "pool_name": "oxp_8736aaf9-4d72-42b1-8e4f-07644d999c8b" + } + } + }, + "root": "/pool/ext/8736aaf9-4d72-42b1-8e4f-07644d999c8b/crypt/zone" + }, + { + "zone": { + "id": "5d0e03b2-8958-4c43-8851-bf819f102958", + "underlay_address": "fd00:1122:3344:109::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:109::8]:32345", + "dataset": { + "pool_name": "oxp_62426615-7832-49e7-9426-e39ffeb42c69" + } + } + }, + "root": "/pool/ext/07fc8ec9-1216-4d98-be34-c2970b585e61/crypt/zone" + }, + { + "zone": { + "id": "accc05a2-ec80-4856-a825-ec6b7f700eaa", + "underlay_address": "fd00:1122:3344:109::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:109::d]:32345", + "dataset": { + "pool_name": "oxp_dc083c53-7014-4482-8a79-f338ba2b0fb4" + } + } + }, + "root": "/pool/ext/027a82e8-daa3-4fa6-8205-ed03445e1086/crypt/zone" + }, + { + "zone": { + "id": "2e32fdcc-737a-4430-8290-cb7028ea4d50", + "underlay_address": "fd00:1122:3344:109::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:109::b]:32345", + "dataset": { + "pool_name": "oxp_3cafbb47-c194-4a42-99ff-34dfeab999ed" + } + } + }, + "root": "/pool/ext/027a82e8-daa3-4fa6-8205-ed03445e1086/crypt/zone" + }, + { + "zone": { + "id": "a97c6ae2-37f6-4d93-a66e-cb5cd3c6aaa2", + "underlay_address": "fd00:1122:3344:109::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:109::c]:32345", + "dataset": { + "pool_name": "oxp_07fc8ec9-1216-4d98-be34-c2970b585e61" + } + } + }, + "root": "/pool/ext/07fc8ec9-1216-4d98-be34-c2970b585e61/crypt/zone" + }, + { + "zone": { + "id": "3237a532-acaa-4ebe-bf11-dde794fea739", + "underlay_address": "fd00:1122:3344:109::3", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:109::3]:32221", + "dataset": { + "pool_name": "oxp_ae56280b-17ce-4266-8573-e1da9db6c6bb" + } + } + }, + "root": "/pool/ext/027a82e8-daa3-4fa6-8205-ed03445e1086/crypt/zone" + }, + { + "zone": { + "id": "83257100-5590-484a-b72a-a079389d8da6", + "underlay_address": "fd00:1122:3344:109::e", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:109::e]:123", + "ntp_servers": [ + "c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55.host.control-plane.oxide.internal", + "6ea2684c-115e-48a6-8453-ab52d1cecd73.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/3cafbb47-c194-4a42-99ff-34dfeab999ed/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack2-sled21.json b/sled-agent/tests/output/new-zones-ledgers/rack2-sled21.json new file mode 100644 index 0000000000..35caa638e8 --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack2-sled21.json @@ -0,0 +1,232 @@ +{ + "omicron_generation": 2, + "ledger_generation": 5, + "zones": [ + { + "zone": { + "id": "0437b69d-73a8-4231-86f9-6b5556e7e7ef", + "underlay_address": "fd00:1122:3344:102::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::5]:32345", + "dataset": { + "pool_name": "oxp_aa0ffe35-76db-42ab-adf2-ceb072bdf811" + } + } + }, + "root": "/pool/ext/0d2805da-6d24-4e57-a700-0c3865c05544/crypt/zone" + }, + { + "zone": { + "id": "47234ca5-305f-436a-9e9a-36bca9667680", + "underlay_address": "fd00:1122:3344:102::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::b]:32345", + "dataset": { + "pool_name": "oxp_0d2805da-6d24-4e57-a700-0c3865c05544" + } + } + }, + "root": "/pool/ext/160691d8-33a1-4d7d-a48a-c3fd27d76822/crypt/zone" + }, + { + "zone": { + "id": "2898657e-4141-4c05-851b-147bffc6bbbd", + "underlay_address": "fd00:1122:3344:102::3", + "zone_type": { + "type": "nexus", + "internal_address": "[fd00:1122:3344:102::3]:12221", + "external_ip": "172.20.26.5", + "nic": { + "id": "2e9a412e-c79a-48fe-8fa4-f5a6afed1040", + "kind": { + "type": "service", + "id": "2898657e-4141-4c05-851b-147bffc6bbbd" + }, + "name": "nexus-2898657e-4141-4c05-851b-147bffc6bbbd", + "ip": "172.30.2.7", + "mac": "A8:40:25:FF:C6:59", + "subnet": "172.30.2.0/24", + "vni": 100, + "primary": true, + "slot": 0 + }, + "external_tls": true, + "external_dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ] + } + }, + "root": "/pool/ext/c0b4ecc1-a145-443f-90d1-2e8136b007bc/crypt/zone" + }, + { + "zone": { + "id": "cf98c4d6-4a7b-49c0-9b14-48a8adf52ce9", + "underlay_address": "fd00:1122:3344:102::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::c]:32345", + "dataset": { + "pool_name": "oxp_c0b4ecc1-a145-443f-90d1-2e8136b007bc" + } + } + }, + "root": "/pool/ext/f6acd70a-d6cb-464d-a460-dd5c60301562/crypt/zone" + }, + { + "zone": { + "id": "13c1e91e-bfcc-4eea-8185-412fc37fdea3", + "underlay_address": "fd00:1122:3344:102::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::9]:32345", + "dataset": { + "pool_name": "oxp_e9b0a2e4-8060-41bd-a3b5-d0642246d06d" + } + } + }, + "root": "/pool/ext/c0b4ecc1-a145-443f-90d1-2e8136b007bc/crypt/zone" + }, + { + "zone": { + "id": "c9cb60af-9e0e-4b3b-b971-53138a9b8d27", + "underlay_address": "fd00:1122:3344:102::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::4]:32345", + "dataset": { + "pool_name": "oxp_77749ec7-39a9-489d-904b-87f7223c4e3c" + } + } + }, + "root": "/pool/ext/77749ec7-39a9-489d-904b-87f7223c4e3c/crypt/zone" + }, + { + "zone": { + "id": "32995cfa-47ec-4b84-8514-7c1c8a86c19d", + "underlay_address": "fd00:1122:3344:102::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::8]:32345", + "dataset": { + "pool_name": "oxp_eac83f81-eb51-4f3e-874e-82f55dd952ba" + } + } + }, + "root": "/pool/ext/0d2805da-6d24-4e57-a700-0c3865c05544/crypt/zone" + }, + { + "zone": { + "id": "b93d2e2d-d54b-4503-85c3-9878e3cee9c7", + "underlay_address": "fd00:1122:3344:102::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::a]:32345", + "dataset": { + "pool_name": "oxp_160691d8-33a1-4d7d-a48a-c3fd27d76822" + } + } + }, + "root": "/pool/ext/138663ad-a382-4595-baf0-08f6b0276a67/crypt/zone" + }, + { + "zone": { + "id": "2ebbac4f-7b0f-43eb-99fd-dd6ff7f9e097", + "underlay_address": "fd00:1122:3344:102::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::6]:32345", + "dataset": { + "pool_name": "oxp_138663ad-a382-4595-baf0-08f6b0276a67" + } + } + }, + "root": "/pool/ext/e9b0a2e4-8060-41bd-a3b5-d0642246d06d/crypt/zone" + }, + { + "zone": { + "id": "d0eea3b2-e5ac-42bf-97b7-531b78fa06d1", + "underlay_address": "fd00:1122:3344:102::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::7]:32345", + "dataset": { + "pool_name": "oxp_69f0b863-f73f-42b2-9822-b2cb99f09003" + } + } + }, + "root": "/pool/ext/138663ad-a382-4595-baf0-08f6b0276a67/crypt/zone" + }, + { + "zone": { + "id": "2b34cd1d-ea7d-41a1-82b9-75550fdf6eb0", + "underlay_address": "fd00:1122:3344:102::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::d]:32345", + "dataset": { + "pool_name": "oxp_f6acd70a-d6cb-464d-a460-dd5c60301562" + } + } + }, + "root": "/pool/ext/c0b4ecc1-a145-443f-90d1-2e8136b007bc/crypt/zone" + }, + { + "zone": { + "id": "6ea2684c-115e-48a6-8453-ab52d1cecd73", + "underlay_address": "fd00:1122:3344:102::e", + "zone_type": { + "type": "boundary_ntp", + "address": "[fd00:1122:3344:102::e]:123", + "ntp_servers": [ + "ntp.eng.oxide.computer" + ], + "dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ], + "domain": null, + "nic": { + "id": "4effd079-ed4e-4cf6-8545-bb9574f516d2", + "kind": { + "type": "service", + "id": "6ea2684c-115e-48a6-8453-ab52d1cecd73" + }, + "name": "ntp-6ea2684c-115e-48a6-8453-ab52d1cecd73", + "ip": "172.30.3.6", + "mac": "A8:40:25:FF:A0:F9", + "subnet": "172.30.3.0/24", + "vni": 100, + "primary": true, + "slot": 0 + }, + "snat_cfg": { + "ip": "172.20.26.7", + "first_port": 16384, + "last_port": 32767 + } + } + }, + "root": "/pool/ext/aa0ffe35-76db-42ab-adf2-ceb072bdf811/crypt/zone" + }, + { + "zone": { + "id": "3a1ea15f-06a4-4afd-959a-c3a00b2bdd80", + "underlay_address": "fd00:1122:3344:2::1", + "zone_type": { + "type": "internal_dns", + "dataset": { + "pool_name": "oxp_77749ec7-39a9-489d-904b-87f7223c4e3c" + }, + "http_address": "[fd00:1122:3344:2::1]:5353", + "dns_address": "[fd00:1122:3344:2::1]:53", + "gz_address": "fd00:1122:3344:2::2", + "gz_address_index": 1 + } + }, + "root": "/pool/ext/69f0b863-f73f-42b2-9822-b2cb99f09003/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack2-sled23.json b/sled-agent/tests/output/new-zones-ledgers/rack2-sled23.json new file mode 100644 index 0000000000..94fcb3a327 --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack2-sled23.json @@ -0,0 +1,195 @@ +{ + "omicron_generation": 2, + "ledger_generation": 5, + "zones": [ + { + "zone": { + "id": "1876cdcf-b2e7-4b79-ad2e-67df716e1860", + "underlay_address": "fd00:1122:3344:10a::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10a::8]:32345", + "dataset": { + "pool_name": "oxp_d4c6bdc6-5e99-4f6c-b57a-9bfcb9a76be4" + } + } + }, + "root": "/pool/ext/86c58ea3-1413-4af3-9aff-9c0a3d758459/crypt/zone" + }, + { + "zone": { + "id": "0e708ee3-b7a6-4993-a88a-4489add33e29", + "underlay_address": "fd00:1122:3344:10a::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10a::d]:32345", + "dataset": { + "pool_name": "oxp_718ad834-b415-4abb-934d-9f987cde0a96" + } + } + }, + "root": "/pool/ext/30f7d236-c835-46cc-bc27-9099a6826f67/crypt/zone" + }, + { + "zone": { + "id": "4e1b9a65-848f-4649-b360-1df0d135b44d", + "underlay_address": "fd00:1122:3344:10a::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10a::c]:32345", + "dataset": { + "pool_name": "oxp_88ee08c6-1c0f-44c2-9110-b8d5a7589ebb" + } + } + }, + "root": "/pool/ext/30f7d236-c835-46cc-bc27-9099a6826f67/crypt/zone" + }, + { + "zone": { + "id": "da510a57-3af1-4d2b-b2ed-2e8849f27d8b", + "underlay_address": "fd00:1122:3344:10a::3", + "zone_type": { + "type": "oximeter", + "address": "[fd00:1122:3344:10a::3]:12223" + } + }, + "root": "/pool/ext/718ad834-b415-4abb-934d-9f987cde0a96/crypt/zone" + }, + { + "zone": { + "id": "d4d9acc8-3e0b-4fab-a0a2-d21920fabd7e", + "underlay_address": "fd00:1122:3344:10a::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10a::6]:32345", + "dataset": { + "pool_name": "oxp_9dfe424f-cba6-4bfb-a3dd-e8bd7fdea57d" + } + } + }, + "root": "/pool/ext/30f7d236-c835-46cc-bc27-9099a6826f67/crypt/zone" + }, + { + "zone": { + "id": "fcb75972-836b-4f55-ba21-9722832cf5c2", + "underlay_address": "fd00:1122:3344:10a::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10a::7]:32345", + "dataset": { + "pool_name": "oxp_9005671f-3d90-4ed1-be15-ad65b9a65bd5" + } + } + }, + "root": "/pool/ext/d4c6bdc6-5e99-4f6c-b57a-9bfcb9a76be4/crypt/zone" + }, + { + "zone": { + "id": "624beba0-7dcd-4d55-af05-4670c6fcb1fb", + "underlay_address": "fd00:1122:3344:10a::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10a::4]:32345", + "dataset": { + "pool_name": "oxp_93867156-a43d-4c03-a899-1535e566c8bd" + } + } + }, + "root": "/pool/ext/93867156-a43d-4c03-a899-1535e566c8bd/crypt/zone" + }, + { + "zone": { + "id": "26fb3830-898e-4086-afaf-8f9654716b8c", + "underlay_address": "fd00:1122:3344:10a::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10a::b]:32345", + "dataset": { + "pool_name": "oxp_86c58ea3-1413-4af3-9aff-9c0a3d758459" + } + } + }, + "root": "/pool/ext/93867156-a43d-4c03-a899-1535e566c8bd/crypt/zone" + }, + { + "zone": { + "id": "a3ef7eba-c08e-48ef-ae7a-89e2fcb49b66", + "underlay_address": "fd00:1122:3344:10a::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10a::a]:32345", + "dataset": { + "pool_name": "oxp_cd3fdbae-a9d9-4db7-866a-bca36f6dd634" + } + } + }, + "root": "/pool/ext/718ad834-b415-4abb-934d-9f987cde0a96/crypt/zone" + }, + { + "zone": { + "id": "5c1d4a02-f33b-433a-81f5-5c149e3433bd", + "underlay_address": "fd00:1122:3344:10a::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10a::5]:32345", + "dataset": { + "pool_name": "oxp_9adfc865-2eef-4880-a6e3-9d2f88c8efd0" + } + } + }, + "root": "/pool/ext/cd3fdbae-a9d9-4db7-866a-bca36f6dd634/crypt/zone" + }, + { + "zone": { + "id": "ee77efe9-81d0-4395-a237-15e30c2c2d04", + "underlay_address": "fd00:1122:3344:10a::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10a::9]:32345", + "dataset": { + "pool_name": "oxp_30f7d236-c835-46cc-bc27-9099a6826f67" + } + } + }, + "root": "/pool/ext/88ee08c6-1c0f-44c2-9110-b8d5a7589ebb/crypt/zone" + }, + { + "zone": { + "id": "71ab91b7-48d4-4d31-b47e-59f29f419116", + "underlay_address": "fd00:1122:3344:10a::e", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:10a::e]:123", + "ntp_servers": [ + "c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55.host.control-plane.oxide.internal", + "6ea2684c-115e-48a6-8453-ab52d1cecd73.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/cd3fdbae-a9d9-4db7-866a-bca36f6dd634/crypt/zone" + }, + { + "zone": { + "id": "46ccd8fe-4e3c-4307-97ae-1f7ac505082a", + "underlay_address": "fd00:1122:3344:3::1", + "zone_type": { + "type": "internal_dns", + "dataset": { + "pool_name": "oxp_93867156-a43d-4c03-a899-1535e566c8bd" + }, + "http_address": "[fd00:1122:3344:3::1]:5353", + "dns_address": "[fd00:1122:3344:3::1]:53", + "gz_address": "fd00:1122:3344:3::2", + "gz_address_index": 2 + } + }, + "root": "/pool/ext/9dfe424f-cba6-4bfb-a3dd-e8bd7fdea57d/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack2-sled25.json b/sled-agent/tests/output/new-zones-ledgers/rack2-sled25.json new file mode 100644 index 0000000000..09a07149cf --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack2-sled25.json @@ -0,0 +1,196 @@ +{ + "omicron_generation": 2, + "ledger_generation": 4, + "zones": [ + { + "zone": { + "id": "180d466d-eb36-4546-8922-e52c4c076823", + "underlay_address": "fd00:1122:3344:101::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::5]:32345", + "dataset": { + "pool_name": "oxp_ac789935-fa42-4d00-8967-df0d96dbb74e" + } + } + }, + "root": "/pool/ext/d732addc-cfe8-4c2c-8028-72eb4481b04e/crypt/zone" + }, + { + "zone": { + "id": "b5af0303-bc03-40a3-b733-0396d705dfbf", + "underlay_address": "fd00:1122:3344:101::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::7]:32345", + "dataset": { + "pool_name": "oxp_d732addc-cfe8-4c2c-8028-72eb4481b04e" + } + } + }, + "root": "/pool/ext/677b0057-3a80-461b-aca8-c2cb501a7278/crypt/zone" + }, + { + "zone": { + "id": "9c7c805a-f5ed-4e48-86e3-7aa81a718881", + "underlay_address": "fd00:1122:3344:101::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::c]:32345", + "dataset": { + "pool_name": "oxp_923c930c-80f8-448d-8321-cebfc6c41760" + } + } + }, + "root": "/pool/ext/ac789935-fa42-4d00-8967-df0d96dbb74e/crypt/zone" + }, + { + "zone": { + "id": "4e49c83c-2d4a-491a-91ac-4ab022026dcf", + "underlay_address": "fd00:1122:3344:101::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::4]:32345", + "dataset": { + "pool_name": "oxp_c99e6032-1d4f-47d2-9efe-ae2b2479554e" + } + } + }, + "root": "/pool/ext/653065d2-ab70-47c9-b832-34238fdc95ef/crypt/zone" + }, + { + "zone": { + "id": "0e38475e-b8b2-4813-bf80-3c170081081a", + "underlay_address": "fd00:1122:3344:101::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::d]:32345", + "dataset": { + "pool_name": "oxp_653065d2-ab70-47c9-b832-34238fdc95ef" + } + } + }, + "root": "/pool/ext/4c7ad252-55c2-4a1a-9d93-9dfcdfdfacca/crypt/zone" + }, + { + "zone": { + "id": "75123e60-1116-4b8d-a466-7302220127da", + "underlay_address": "fd00:1122:3344:101::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::8]:32345", + "dataset": { + "pool_name": "oxp_c764a8ae-6862-4eec-9db0-cc6ea478e4a7" + } + } + }, + "root": "/pool/ext/c764a8ae-6862-4eec-9db0-cc6ea478e4a7/crypt/zone" + }, + { + "zone": { + "id": "fbd0379c-97fa-49ea-8980-17ae30ffff3c", + "underlay_address": "fd00:1122:3344:101::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::b]:32345", + "dataset": { + "pool_name": "oxp_fcb0e4c7-e046-4cf5-ad35-3ad90e1eb90c" + } + } + }, + "root": "/pool/ext/4c7ad252-55c2-4a1a-9d93-9dfcdfdfacca/crypt/zone" + }, + { + "zone": { + "id": "ec635326-cd1d-4f73-b8e6-c3a36a7020db", + "underlay_address": "fd00:1122:3344:101::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::a]:32345", + "dataset": { + "pool_name": "oxp_6bfb4120-488d-4f3d-90ef-e9bfa523b388" + } + } + }, + "root": "/pool/ext/c99e6032-1d4f-47d2-9efe-ae2b2479554e/crypt/zone" + }, + { + "zone": { + "id": "f500d564-c40a-4eca-ac8a-a26b435f2037", + "underlay_address": "fd00:1122:3344:101::3", + "zone_type": { + "type": "external_dns", + "dataset": { + "pool_name": "oxp_c99e6032-1d4f-47d2-9efe-ae2b2479554e" + }, + "http_address": "[fd00:1122:3344:101::3]:5353", + "dns_address": "172.20.26.2:53", + "nic": { + "id": "b0b42776-3914-4a69-889f-4831dc72327c", + "kind": { + "type": "service", + "id": "f500d564-c40a-4eca-ac8a-a26b435f2037" + }, + "name": "external-dns-f500d564-c40a-4eca-ac8a-a26b435f2037", + "ip": "172.30.1.6", + "mac": "A8:40:25:FF:D0:B4", + "subnet": "172.30.1.0/24", + "vni": 100, + "primary": true, + "slot": 0 + } + } + }, + "root": "/pool/ext/ac789935-fa42-4d00-8967-df0d96dbb74e/crypt/zone" + }, + { + "zone": { + "id": "56d4dbcc-3b4a-4ed0-8795-7734aadcc4c0", + "underlay_address": "fd00:1122:3344:101::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::9]:32345", + "dataset": { + "pool_name": "oxp_4c7ad252-55c2-4a1a-9d93-9dfcdfdfacca" + } + } + }, + "root": "/pool/ext/4c7ad252-55c2-4a1a-9d93-9dfcdfdfacca/crypt/zone" + }, + { + "zone": { + "id": "0d3a1bd5-f6fe-49cb-807a-190dabc90103", + "underlay_address": "fd00:1122:3344:101::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::6]:32345", + "dataset": { + "pool_name": "oxp_677b0057-3a80-461b-aca8-c2cb501a7278" + } + } + }, + "root": "/pool/ext/6bfb4120-488d-4f3d-90ef-e9bfa523b388/crypt/zone" + }, + { + "zone": { + "id": "d34c7184-5d4e-4cb5-8f91-df74a343ffbc", + "underlay_address": "fd00:1122:3344:101::e", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:101::e]:123", + "ntp_servers": [ + "c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55.host.control-plane.oxide.internal", + "6ea2684c-115e-48a6-8453-ab52d1cecd73.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/ac789935-fa42-4d00-8967-df0d96dbb74e/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack2-sled8.json b/sled-agent/tests/output/new-zones-ledgers/rack2-sled8.json new file mode 100644 index 0000000000..669889b3c5 --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack2-sled8.json @@ -0,0 +1,198 @@ +{ + "omicron_generation": 2, + "ledger_generation": 4, + "zones": [ + { + "zone": { + "id": "7153983f-8fd7-4fb9-92ac-0f07a07798b4", + "underlay_address": "fd00:1122:3344:103::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::a]:32345", + "dataset": { + "pool_name": "oxp_bf428719-1b16-4503-99f4-ad95846d916f" + } + } + }, + "root": "/pool/ext/26e698bb-006d-4208-94b9-d1bc279111fa/crypt/zone" + }, + { + "zone": { + "id": "7d44ba36-4a69-490a-bc40-f6f90a4208d4", + "underlay_address": "fd00:1122:3344:103::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::c]:32345", + "dataset": { + "pool_name": "oxp_414e235b-55c3-4dc1-a568-8adf4ea1a052" + } + } + }, + "root": "/pool/ext/cf940e15-dbc5-481b-866a-4de4b018898e/crypt/zone" + }, + { + "zone": { + "id": "65a11c18-7f59-41ac-b9e7-680627f996e7", + "underlay_address": "fd00:1122:3344:103::3", + "zone_type": { + "type": "nexus", + "internal_address": "[fd00:1122:3344:103::3]:12221", + "external_ip": "172.20.26.3", + "nic": { + "id": "a3e13dde-a2bc-4170-ad84-aad8085b6034", + "kind": { + "type": "service", + "id": "65a11c18-7f59-41ac-b9e7-680627f996e7" + }, + "name": "nexus-65a11c18-7f59-41ac-b9e7-680627f996e7", + "ip": "172.30.2.5", + "mac": "A8:40:25:FF:A6:83", + "subnet": "172.30.2.0/24", + "vni": 100, + "primary": true, + "slot": 0 + }, + "external_tls": true, + "external_dns_servers": [ + "1.1.1.1", + "9.9.9.9" + ] + } + }, + "root": "/pool/ext/e126ddcc-8bee-46ba-8199-2a74df0ba040/crypt/zone" + }, + { + "zone": { + "id": "072fdae8-2adf-4fd2-94ce-e9b0663b91e7", + "underlay_address": "fd00:1122:3344:103::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::b]:32345", + "dataset": { + "pool_name": "oxp_26e698bb-006d-4208-94b9-d1bc279111fa" + } + } + }, + "root": "/pool/ext/bf428719-1b16-4503-99f4-ad95846d916f/crypt/zone" + }, + { + "zone": { + "id": "01f93020-7e7d-4185-93fb-6ca234056c82", + "underlay_address": "fd00:1122:3344:103::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::5]:32345", + "dataset": { + "pool_name": "oxp_7b24095a-72df-45e3-984f-2b795e052ac7" + } + } + }, + "root": "/pool/ext/7b24095a-72df-45e3-984f-2b795e052ac7/crypt/zone" + }, + { + "zone": { + "id": "e238116d-e5cc-43d4-9c8a-6f138ae8a15d", + "underlay_address": "fd00:1122:3344:103::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::6]:32345", + "dataset": { + "pool_name": "oxp_e126ddcc-8bee-46ba-8199-2a74df0ba040" + } + } + }, + "root": "/pool/ext/7b24095a-72df-45e3-984f-2b795e052ac7/crypt/zone" + }, + { + "zone": { + "id": "585cd8c5-c41e-4be4-beb8-bfbef9b53856", + "underlay_address": "fd00:1122:3344:103::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::7]:32345", + "dataset": { + "pool_name": "oxp_6340805e-c5af-418d-8bd1-fc0085667f33" + } + } + }, + "root": "/pool/ext/414e235b-55c3-4dc1-a568-8adf4ea1a052/crypt/zone" + }, + { + "zone": { + "id": "0b41c560-3b20-42f4-82ad-92f5bb575d6b", + "underlay_address": "fd00:1122:3344:103::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::9]:32345", + "dataset": { + "pool_name": "oxp_b93f880e-c55b-4d6c-9a16-939d84b628fc" + } + } + }, + "root": "/pool/ext/6340805e-c5af-418d-8bd1-fc0085667f33/crypt/zone" + }, + { + "zone": { + "id": "0ccf27c0-e32d-4b52-a2c5-6db0c64a26f9", + "underlay_address": "fd00:1122:3344:103::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::d]:32345", + "dataset": { + "pool_name": "oxp_2115b084-be0f-4fba-941b-33a659798a9e" + } + } + }, + "root": "/pool/ext/414e235b-55c3-4dc1-a568-8adf4ea1a052/crypt/zone" + }, + { + "zone": { + "id": "a6ba8273-0320-4dab-b801-281f041b0c50", + "underlay_address": "fd00:1122:3344:103::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::4]:32345", + "dataset": { + "pool_name": "oxp_8a199f12-4f5c-483a-8aca-f97856658a35" + } + } + }, + "root": "/pool/ext/b93f880e-c55b-4d6c-9a16-939d84b628fc/crypt/zone" + }, + { + "zone": { + "id": "b9b7b4c2-284a-4ec1-80ea-75b7a43b71c4", + "underlay_address": "fd00:1122:3344:103::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::8]:32345", + "dataset": { + "pool_name": "oxp_cf940e15-dbc5-481b-866a-4de4b018898e" + } + } + }, + "root": "/pool/ext/cf940e15-dbc5-481b-866a-4de4b018898e/crypt/zone" + }, + { + "zone": { + "id": "7a85d50e-b524-41c1-a052-118027eb77db", + "underlay_address": "fd00:1122:3344:103::e", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:103::e]:123", + "ntp_servers": [ + "c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55.host.control-plane.oxide.internal", + "6ea2684c-115e-48a6-8453-ab52d1cecd73.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/b93f880e-c55b-4d6c-9a16-939d84b628fc/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack2-sled9.json b/sled-agent/tests/output/new-zones-ledgers/rack2-sled9.json new file mode 100644 index 0000000000..d4a429f9b0 --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack2-sled9.json @@ -0,0 +1,192 @@ +{ + "omicron_generation": 2, + "ledger_generation": 4, + "zones": [ + { + "zone": { + "id": "912346a2-d7e6-427e-b373-e8dcbe4fcea9", + "underlay_address": "fd00:1122:3344:105::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:105::5]:32345", + "dataset": { + "pool_name": "oxp_b358fb1e-f52a-4a63-9aab-170225509b37" + } + } + }, + "root": "/pool/ext/0ae29053-29a2-489e-a1e6-6aec0ecd05f8/crypt/zone" + }, + { + "zone": { + "id": "3d420dff-c616-4c7d-bab1-0f9c2b5396bf", + "underlay_address": "fd00:1122:3344:105::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:105::a]:32345", + "dataset": { + "pool_name": "oxp_4eb2e4eb-41d8-496c-9a5a-687d7e004aa4" + } + } + }, + "root": "/pool/ext/eb1234a5-fdf7-4977-94d5-2eef25ce56a1/crypt/zone" + }, + { + "zone": { + "id": "9c5d88c9-8ff1-4f23-9438-7b81322eaf68", + "underlay_address": "fd00:1122:3344:105::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:105::b]:32345", + "dataset": { + "pool_name": "oxp_aadf48eb-6ff0-40b5-a092-1fdd06c03e11" + } + } + }, + "root": "/pool/ext/4358f47f-f21e-4cc8-829e-0c7fc2400a59/crypt/zone" + }, + { + "zone": { + "id": "f9c1deca-1898-429e-8c93-254c7aa7bae6", + "underlay_address": "fd00:1122:3344:105::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:105::8]:32345", + "dataset": { + "pool_name": "oxp_d1cb6b7d-2b92-4b7d-8a4d-551987f0277e" + } + } + }, + "root": "/pool/ext/f8b11629-ced6-412a-9c3f-d169b99ee996/crypt/zone" + }, + { + "zone": { + "id": "ce8563f3-4a93-45ff-b727-cbfbee6aa413", + "underlay_address": "fd00:1122:3344:105::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:105::9]:32345", + "dataset": { + "pool_name": "oxp_4358f47f-f21e-4cc8-829e-0c7fc2400a59" + } + } + }, + "root": "/pool/ext/eb1234a5-fdf7-4977-94d5-2eef25ce56a1/crypt/zone" + }, + { + "zone": { + "id": "9470ea7d-1920-4b4b-8fca-e7659a1ef733", + "underlay_address": "fd00:1122:3344:105::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:105::c]:32345", + "dataset": { + "pool_name": "oxp_17eff217-f0b1-4353-b133-0f68bbd5ceaa" + } + } + }, + "root": "/pool/ext/eb1234a5-fdf7-4977-94d5-2eef25ce56a1/crypt/zone" + }, + { + "zone": { + "id": "375296e5-0a23-466c-b605-4204080f8103", + "underlay_address": "fd00:1122:3344:105::4", + "zone_type": { + "type": "crucible_pantry", + "address": "[fd00:1122:3344:105::4]:17000" + } + }, + "root": "/pool/ext/4eb2e4eb-41d8-496c-9a5a-687d7e004aa4/crypt/zone" + }, + { + "zone": { + "id": "f9940969-b0e8-4e8c-86c7-4bc49cd15a5f", + "underlay_address": "fd00:1122:3344:105::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:105::7]:32345", + "dataset": { + "pool_name": "oxp_f8b11629-ced6-412a-9c3f-d169b99ee996" + } + } + }, + "root": "/pool/ext/17eff217-f0b1-4353-b133-0f68bbd5ceaa/crypt/zone" + }, + { + "zone": { + "id": "23dca27d-c79b-4930-a817-392e8aeaa4c1", + "underlay_address": "fd00:1122:3344:105::e", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:105::e]:32345", + "dataset": { + "pool_name": "oxp_57650e05-36ff-4de8-865f-b9562bdb67f5" + } + } + }, + "root": "/pool/ext/0ae29053-29a2-489e-a1e6-6aec0ecd05f8/crypt/zone" + }, + { + "zone": { + "id": "92d3e4e9-0768-4772-83c1-23cce52190e9", + "underlay_address": "fd00:1122:3344:105::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:105::6]:32345", + "dataset": { + "pool_name": "oxp_eb1234a5-fdf7-4977-94d5-2eef25ce56a1" + } + } + }, + "root": "/pool/ext/b358fb1e-f52a-4a63-9aab-170225509b37/crypt/zone" + }, + { + "zone": { + "id": "b3e9fee2-24d2-44e7-8539-a6918e85cf2b", + "underlay_address": "fd00:1122:3344:105::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:105::d]:32345", + "dataset": { + "pool_name": "oxp_0ae29053-29a2-489e-a1e6-6aec0ecd05f8" + } + } + }, + "root": "/pool/ext/eb1234a5-fdf7-4977-94d5-2eef25ce56a1/crypt/zone" + }, + { + "zone": { + "id": "4c3ef132-ec83-4b1b-9574-7c7d3035f9e9", + "underlay_address": "fd00:1122:3344:105::3", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:105::3]:32221", + "dataset": { + "pool_name": "oxp_b358fb1e-f52a-4a63-9aab-170225509b37" + } + } + }, + "root": "/pool/ext/d1cb6b7d-2b92-4b7d-8a4d-551987f0277e/crypt/zone" + }, + { + "zone": { + "id": "76b79b96-eaa2-4341-9aba-e77cfc92e0a9", + "underlay_address": "fd00:1122:3344:105::f", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:105::f]:123", + "ntp_servers": [ + "c3ec3d1a-3172-4d36-bfd3-f54a04d5ba55.host.control-plane.oxide.internal", + "6ea2684c-115e-48a6-8453-ab52d1cecd73.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/0ae29053-29a2-489e-a1e6-6aec0ecd05f8/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled0.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled0.json new file mode 100644 index 0000000000..db6c55f556 --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack3-sled0.json @@ -0,0 +1,181 @@ +{ + "omicron_generation": 2, + "ledger_generation": 4, + "zones": [ + { + "zone": { + "id": "0710ecea-dbc4-417f-a6f7-1b97c3045db1", + "underlay_address": "fd00:1122:3344:116::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:116::6]:32345", + "dataset": { + "pool_name": "oxp_d5313ef5-019c-4c47-bc5e-63794107a1bb" + } + } + }, + "root": "/pool/ext/904e93a9-d175-4a20-9006-8c1e847aecf7/crypt/zone" + }, + { + "zone": { + "id": "28b29d14-d55f-4b55-bbc1-f66e46ae3e70", + "underlay_address": "fd00:1122:3344:116::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:116::9]:32345", + "dataset": { + "pool_name": "oxp_60755ffe-e9ee-4619-a751-8b3ea6405e67" + } + } + }, + "root": "/pool/ext/d5313ef5-019c-4c47-bc5e-63794107a1bb/crypt/zone" + }, + { + "zone": { + "id": "6f8f9fd2-b139-4069-a7e2-8d40efd58f6c", + "underlay_address": "fd00:1122:3344:116::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:116::d]:32345", + "dataset": { + "pool_name": "oxp_ccd2cb0b-782f-4026-a160-6d1192f04ca3" + } + } + }, + "root": "/pool/ext/d5313ef5-019c-4c47-bc5e-63794107a1bb/crypt/zone" + }, + { + "zone": { + "id": "450308ad-bf4d-40ff-ba62-f3290f7fffaf", + "underlay_address": "fd00:1122:3344:116::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:116::4]:32345", + "dataset": { + "pool_name": "oxp_46b09442-65ba-4d59-9121-9803fe3b724b" + } + } + }, + "root": "/pool/ext/54d901cc-f75e-417d-8a9f-24363136d0ef/crypt/zone" + }, + { + "zone": { + "id": "9a22bbaa-eab4-4a32-8546-9882dc029483", + "underlay_address": "fd00:1122:3344:116::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:116::8]:32345", + "dataset": { + "pool_name": "oxp_93e3f350-75a0-4af0-bdac-baf9b423926f" + } + } + }, + "root": "/pool/ext/d5313ef5-019c-4c47-bc5e-63794107a1bb/crypt/zone" + }, + { + "zone": { + "id": "63a9dc49-0b5b-4483-95ed-553b545dc202", + "underlay_address": "fd00:1122:3344:116::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:116::a]:32345", + "dataset": { + "pool_name": "oxp_e3532845-76c0-42a9-903b-a07f7992e937" + } + } + }, + "root": "/pool/ext/60755ffe-e9ee-4619-a751-8b3ea6405e67/crypt/zone" + }, + { + "zone": { + "id": "1fef5b6c-78e4-4ad9-9973-9d8c78f1e232", + "underlay_address": "fd00:1122:3344:116::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:116::7]:32345", + "dataset": { + "pool_name": "oxp_54d901cc-f75e-417d-8a9f-24363136d0ef" + } + } + }, + "root": "/pool/ext/90d7b6f9-3e28-48b0-86ac-0486728075cf/crypt/zone" + }, + { + "zone": { + "id": "b2aab21a-cccd-4aa9-977f-a32090e6eaa7", + "underlay_address": "fd00:1122:3344:116::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:116::5]:32345", + "dataset": { + "pool_name": "oxp_90d7b6f9-3e28-48b0-86ac-0486728075cf" + } + } + }, + "root": "/pool/ext/46b09442-65ba-4d59-9121-9803fe3b724b/crypt/zone" + }, + { + "zone": { + "id": "fc1bbf28-24f3-4c1f-b367-2bc8231eb7d4", + "underlay_address": "fd00:1122:3344:116::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:116::b]:32345", + "dataset": { + "pool_name": "oxp_0a7bb0d3-408b-42b1-8846-76cf106a9580" + } + } + }, + "root": "/pool/ext/e3532845-76c0-42a9-903b-a07f7992e937/crypt/zone" + }, + { + "zone": { + "id": "bcb7617a-f76a-4912-8ccc-802d2a697e3c", + "underlay_address": "fd00:1122:3344:116::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:116::c]:32345", + "dataset": { + "pool_name": "oxp_904e93a9-d175-4a20-9006-8c1e847aecf7" + } + } + }, + "root": "/pool/ext/ccd2cb0b-782f-4026-a160-6d1192f04ca3/crypt/zone" + }, + { + "zone": { + "id": "371fba3a-658b-469b-b675-c90cc0d39254", + "underlay_address": "fd00:1122:3344:116::3", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:116::3]:32221", + "dataset": { + "pool_name": "oxp_46b09442-65ba-4d59-9121-9803fe3b724b" + } + } + }, + "root": "/pool/ext/46b09442-65ba-4d59-9121-9803fe3b724b/crypt/zone" + }, + { + "zone": { + "id": "5a4d89f5-49e0-4566-a99c-342d1bb26b1c", + "underlay_address": "fd00:1122:3344:116::e", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:116::e]:123", + "ntp_servers": [ + "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", + "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/60755ffe-e9ee-4619-a751-8b3ea6405e67/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled1.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled1.json new file mode 100644 index 0000000000..ae3e3d8f4a --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack3-sled1.json @@ -0,0 +1,167 @@ +{ + "omicron_generation": 2, + "ledger_generation": 4, + "zones": [ + { + "zone": { + "id": "f401d06c-46fc-42f8-aa51-7515a51355ce", + "underlay_address": "fd00:1122:3344:11c::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11c::8]:32345", + "dataset": { + "pool_name": "oxp_8a88768a-2dd5-43b7-bd40-0db77be4d3a8" + } + } + }, + "root": "/pool/ext/19d23d27-6a33-4203-b8c1-4b0df4ac791f/crypt/zone" + }, + { + "zone": { + "id": "721c96ea-08d4-4c89-828f-600e7e344916", + "underlay_address": "fd00:1122:3344:11c::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11c::6]:32345", + "dataset": { + "pool_name": "oxp_15259003-fb04-4547-b4a9-b4511893c0fd" + } + } + }, + "root": "/pool/ext/d2a8ed82-22ef-46d8-ad40-e1cb2cecebee/crypt/zone" + }, + { + "zone": { + "id": "ca17bdf9-51c5-4e1e-b822-856609070ec6", + "underlay_address": "fd00:1122:3344:11c::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11c::5]:32345", + "dataset": { + "pool_name": "oxp_d2a8ed82-22ef-46d8-ad40-e1cb2cecebee" + } + } + }, + "root": "/pool/ext/15259003-fb04-4547-b4a9-b4511893c0fd/crypt/zone" + }, + { + "zone": { + "id": "5825447e-1b5b-4960-b202-e75853d3d250", + "underlay_address": "fd00:1122:3344:11c::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11c::9]:32345", + "dataset": { + "pool_name": "oxp_04e94454-cbd4-4cee-ad69-42372bcbabd5" + } + } + }, + "root": "/pool/ext/542e0fb3-552c-4d3b-b853-da1f13b581a0/crypt/zone" + }, + { + "zone": { + "id": "b937d3f0-1352-47a2-b9d1-a9ccf9c82b16", + "underlay_address": "fd00:1122:3344:11c::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11c::c]:32345", + "dataset": { + "pool_name": "oxp_542e0fb3-552c-4d3b-b853-da1f13b581a0" + } + } + }, + "root": "/pool/ext/eedd1d58-4892-456f-aaf7-9d650c7921ca/crypt/zone" + }, + { + "zone": { + "id": "d63a677b-8dac-44ee-89a2-cc4cb151254d", + "underlay_address": "fd00:1122:3344:11c::3", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11c::3]:32345", + "dataset": { + "pool_name": "oxp_45b5f1ee-7b66-4d74-8364-54fa0c73775f" + } + } + }, + "root": "/pool/ext/8a88768a-2dd5-43b7-bd40-0db77be4d3a8/crypt/zone" + }, + { + "zone": { + "id": "abcb92ea-9f17-4cd8-897b-9d0d1ef7903a", + "underlay_address": "fd00:1122:3344:11c::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11c::4]:32345", + "dataset": { + "pool_name": "oxp_341d49db-c06a-416d-90e1-b0a3426ed02e" + } + } + }, + "root": "/pool/ext/eedd1d58-4892-456f-aaf7-9d650c7921ca/crypt/zone" + }, + { + "zone": { + "id": "000ac89d-db07-47ae-83cf-d9cafef013de", + "underlay_address": "fd00:1122:3344:11c::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11c::b]:32345", + "dataset": { + "pool_name": "oxp_eedd1d58-4892-456f-aaf7-9d650c7921ca" + } + } + }, + "root": "/pool/ext/04e94454-cbd4-4cee-ad69-42372bcbabd5/crypt/zone" + }, + { + "zone": { + "id": "29e1e2e4-695e-4c05-8f0c-c16a0a61d390", + "underlay_address": "fd00:1122:3344:11c::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11c::7]:32345", + "dataset": { + "pool_name": "oxp_19d23d27-6a33-4203-b8c1-4b0df4ac791f" + } + } + }, + "root": "/pool/ext/d2a8ed82-22ef-46d8-ad40-e1cb2cecebee/crypt/zone" + }, + { + "zone": { + "id": "9fa7d7be-a6de-4d36-b56b-d1cc5ca7c82c", + "underlay_address": "fd00:1122:3344:11c::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11c::a]:32345", + "dataset": { + "pool_name": "oxp_0fd7a0b1-ed4b-4dc6-8c44-a49c9628c7e1" + } + } + }, + "root": "/pool/ext/d2a8ed82-22ef-46d8-ad40-e1cb2cecebee/crypt/zone" + }, + { + "zone": { + "id": "249db5f1-45e2-4a5c-a91f-cc51dbd87040", + "underlay_address": "fd00:1122:3344:11c::d", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:11c::d]:123", + "ntp_servers": [ + "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", + "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/542e0fb3-552c-4d3b-b853-da1f13b581a0/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled11.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled11.json new file mode 100644 index 0000000000..c94417ffb8 --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack3-sled11.json @@ -0,0 +1,201 @@ +{ + "omicron_generation": 2, + "ledger_generation": 5, + "zones": [ + { + "zone": { + "id": "7ddd0738-59df-4b67-a41e-7f0de9827187", + "underlay_address": "fd00:1122:3344:11e::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11e::4]:32345", + "dataset": { + "pool_name": "oxp_09af632a-6b1b-4a18-8c91-d392da38b02f" + } + } + }, + "root": "/pool/ext/09af632a-6b1b-4a18-8c91-d392da38b02f/crypt/zone" + }, + { + "zone": { + "id": "9706189f-713a-4394-b5dc-45dcf67dc46e", + "underlay_address": "fd00:1122:3344:11e::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11e::9]:32345", + "dataset": { + "pool_name": "oxp_4e1837c8-91ab-4d1d-abfd-f5144d88535e" + } + } + }, + "root": "/pool/ext/2f0d47cb-28d1-4350-8656-60c6121f773b/crypt/zone" + }, + { + "zone": { + "id": "7bdd841b-5e34-4c19-9066-b12578651446", + "underlay_address": "fd00:1122:3344:11e::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11e::a]:32345", + "dataset": { + "pool_name": "oxp_78d1e7f7-8d11-4fed-8b1e-be58908aea2f" + } + } + }, + "root": "/pool/ext/62c23f4b-8e7b-4cd8-9055-19c1d8bd5ac8/crypt/zone" + }, + { + "zone": { + "id": "74c0f60b-de5f-4456-a85f-f992a6e10424", + "underlay_address": "fd00:1122:3344:11e::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11e::b]:32345", + "dataset": { + "pool_name": "oxp_3b81d709-bf10-4dd7-a2c0-759d8acc2da0" + } + } + }, + "root": "/pool/ext/09af632a-6b1b-4a18-8c91-d392da38b02f/crypt/zone" + }, + { + "zone": { + "id": "da81ce6f-bd38-440e-b966-8a743092fa21", + "underlay_address": "fd00:1122:3344:11e::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11e::6]:32345", + "dataset": { + "pool_name": "oxp_62c23f4b-8e7b-4cd8-9055-19c1d8bd5ac8" + } + } + }, + "root": "/pool/ext/215dd02b-0de6-488a-9e65-5e588cd079fb/crypt/zone" + }, + { + "zone": { + "id": "febbca37-5279-400f-a2e9-6b5271b2d2fc", + "underlay_address": "fd00:1122:3344:11e::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11e::7]:32345", + "dataset": { + "pool_name": "oxp_fb33e773-fb93-41a0-8078-b653b9078dda" + } + } + }, + "root": "/pool/ext/2f0d47cb-28d1-4350-8656-60c6121f773b/crypt/zone" + }, + { + "zone": { + "id": "5100e222-5ea4-4e67-9040-679137e666c8", + "underlay_address": "fd00:1122:3344:11e::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11e::5]:32345", + "dataset": { + "pool_name": "oxp_23767587-2253-431b-8944-18b9bfefcb3d" + } + } + }, + "root": "/pool/ext/3b81d709-bf10-4dd7-a2c0-759d8acc2da0/crypt/zone" + }, + { + "zone": { + "id": "c7ec3bc8-08ca-4901-a45e-0d68db72c6a7", + "underlay_address": "fd00:1122:3344:11e::3", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11e::3]:32345", + "dataset": { + "pool_name": "oxp_2f0d47cb-28d1-4350-8656-60c6121f773b" + } + } + }, + "root": "/pool/ext/215dd02b-0de6-488a-9e65-5e588cd079fb/crypt/zone" + }, + { + "zone": { + "id": "1fc80dd3-0fd9-4403-96bd-5bbf9eb0f15a", + "underlay_address": "fd00:1122:3344:11e::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11e::c]:32345", + "dataset": { + "pool_name": "oxp_2c932d54-41fb-4ffe-a57f-0479b9e5841e" + } + } + }, + "root": "/pool/ext/3b81d709-bf10-4dd7-a2c0-759d8acc2da0/crypt/zone" + }, + { + "zone": { + "id": "4eacc68d-5699-440a-ab33-c75f259e4cc3", + "underlay_address": "fd00:1122:3344:11e::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11e::8]:32345", + "dataset": { + "pool_name": "oxp_215dd02b-0de6-488a-9e65-5e588cd079fb" + } + } + }, + "root": "/pool/ext/4e1837c8-91ab-4d1d-abfd-f5144d88535e/crypt/zone" + }, + { + "zone": { + "id": "cb901d3e-8811-4c4c-a274-a44130501ecf", + "underlay_address": "fd00:1122:3344:11e::d", + "zone_type": { + "type": "boundary_ntp", + "address": "[fd00:1122:3344:11e::d]:123", + "ntp_servers": [ + "time.cloudflare.com" + ], + "dns_servers": [ + "1.1.1.1", + "8.8.8.8" + ], + "domain": null, + "nic": { + "id": "bcf9d9eb-b4ba-4fd5-91e0-55a3414ae049", + "kind": { + "type": "service", + "id": "cb901d3e-8811-4c4c-a274-a44130501ecf" + }, + "name": "ntp-cb901d3e-8811-4c4c-a274-a44130501ecf", + "ip": "172.30.3.6", + "mac": "A8:40:25:FF:D5:2F", + "subnet": "172.30.3.0/24", + "vni": 100, + "primary": true, + "slot": 0 + }, + "snat_cfg": { + "ip": "45.154.216.39", + "first_port": 16384, + "last_port": 32767 + } + } + }, + "root": "/pool/ext/23767587-2253-431b-8944-18b9bfefcb3d/crypt/zone" + }, + { + "zone": { + "id": "be4aada9-d160-401d-a630-a0764c039702", + "underlay_address": "fd00:1122:3344:2::1", + "zone_type": { + "type": "internal_dns", + "dataset": { + "pool_name": "oxp_2f0d47cb-28d1-4350-8656-60c6121f773b" + }, + "http_address": "[fd00:1122:3344:2::1]:5353", + "dns_address": "[fd00:1122:3344:2::1]:53", + "gz_address": "fd00:1122:3344:2::2", + "gz_address_index": 1 + } + }, + "root": "/pool/ext/78d1e7f7-8d11-4fed-8b1e-be58908aea2f/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled12.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled12.json new file mode 100644 index 0000000000..bfc30cf160 --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack3-sled12.json @@ -0,0 +1,181 @@ +{ + "omicron_generation": 2, + "ledger_generation": 4, + "zones": [ + { + "zone": { + "id": "d8f1b9d2-fa2e-4f03-bbea-2039448d7792", + "underlay_address": "fd00:1122:3344:112::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:112::5]:32345", + "dataset": { + "pool_name": "oxp_7d7ed1b7-7b77-4f0a-abb1-27de7cb584d1" + } + } + }, + "root": "/pool/ext/78d9f0ae-8e7f-450e-abc2-76b983efa5cd/crypt/zone" + }, + { + "zone": { + "id": "2074a935-c0b3-4c4f-aae5-a29adae3e1ac", + "underlay_address": "fd00:1122:3344:112::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:112::8]:32345", + "dataset": { + "pool_name": "oxp_ac663368-45fb-447c-811e-561c68e37bdd" + } + } + }, + "root": "/pool/ext/ac663368-45fb-447c-811e-561c68e37bdd/crypt/zone" + }, + { + "zone": { + "id": "2885d3c7-ad7d-445c-8630-dc6c81f8caa0", + "underlay_address": "fd00:1122:3344:112::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:112::a]:32345", + "dataset": { + "pool_name": "oxp_8e82e8da-e1c5-4867-bc1c-b5441f9c1010" + } + } + }, + "root": "/pool/ext/8e82e8da-e1c5-4867-bc1c-b5441f9c1010/crypt/zone" + }, + { + "zone": { + "id": "1eca241b-6868-4c59-876b-58356654f3b5", + "underlay_address": "fd00:1122:3344:112::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:112::c]:32345", + "dataset": { + "pool_name": "oxp_fde16c69-aa47-4a15-bb3f-3a5861ae45bd" + } + } + }, + "root": "/pool/ext/7d7ed1b7-7b77-4f0a-abb1-27de7cb584d1/crypt/zone" + }, + { + "zone": { + "id": "cc656f2e-8542-4986-8524-2f55984939c1", + "underlay_address": "fd00:1122:3344:112::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:112::d]:32345", + "dataset": { + "pool_name": "oxp_21e6d0f9-887e-4d6f-9a00-4cd61139eea6" + } + } + }, + "root": "/pool/ext/21e6d0f9-887e-4d6f-9a00-4cd61139eea6/crypt/zone" + }, + { + "zone": { + "id": "dfb1ebce-a4c7-4b50-9435-9a79b884c1af", + "underlay_address": "fd00:1122:3344:112::3", + "zone_type": { + "type": "clickhouse", + "address": "[fd00:1122:3344:112::3]:8123", + "dataset": { + "pool_name": "oxp_4f045315-de51-46ed-a011-16496615278f" + } + } + }, + "root": "/pool/ext/7d7ed1b7-7b77-4f0a-abb1-27de7cb584d1/crypt/zone" + }, + { + "zone": { + "id": "a95d90ed-b2b1-4a5d-8d0d-4195b34bc764", + "underlay_address": "fd00:1122:3344:112::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:112::6]:32345", + "dataset": { + "pool_name": "oxp_d2c77c69-14d7-442e-8b47-a0d7af5a0e7e" + } + } + }, + "root": "/pool/ext/fad56ff1-ad9f-4215-b584-522eab18cf7b/crypt/zone" + }, + { + "zone": { + "id": "1d3ebc90-d5a5-4cb0-ae90-50bb2163ae13", + "underlay_address": "fd00:1122:3344:112::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:112::b]:32345", + "dataset": { + "pool_name": "oxp_fad56ff1-ad9f-4215-b584-522eab18cf7b" + } + } + }, + "root": "/pool/ext/7d7ed1b7-7b77-4f0a-abb1-27de7cb584d1/crypt/zone" + }, + { + "zone": { + "id": "7af9f38b-0c7a-402e-8db3-7c7fb50b4665", + "underlay_address": "fd00:1122:3344:112::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:112::9]:32345", + "dataset": { + "pool_name": "oxp_d0693580-5c5a-449f-803f-ce7188ebc580" + } + } + }, + "root": "/pool/ext/d2c77c69-14d7-442e-8b47-a0d7af5a0e7e/crypt/zone" + }, + { + "zone": { + "id": "94d9bb0a-ecd2-4501-b960-60982f55ad12", + "underlay_address": "fd00:1122:3344:112::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:112::7]:32345", + "dataset": { + "pool_name": "oxp_78d9f0ae-8e7f-450e-abc2-76b983efa5cd" + } + } + }, + "root": "/pool/ext/ac663368-45fb-447c-811e-561c68e37bdd/crypt/zone" + }, + { + "zone": { + "id": "277c1105-576e-4ec1-8e2c-cbae2f5ac9f6", + "underlay_address": "fd00:1122:3344:112::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:112::4]:32345", + "dataset": { + "pool_name": "oxp_4f045315-de51-46ed-a011-16496615278f" + } + } + }, + "root": "/pool/ext/7d7ed1b7-7b77-4f0a-abb1-27de7cb584d1/crypt/zone" + }, + { + "zone": { + "id": "555c3407-a76c-4ea4-a17a-a670d85a59b0", + "underlay_address": "fd00:1122:3344:112::e", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:112::e]:123", + "ntp_servers": [ + "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", + "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/8e82e8da-e1c5-4867-bc1c-b5441f9c1010/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled13.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled13.json new file mode 100644 index 0000000000..66c04be148 --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack3-sled13.json @@ -0,0 +1,201 @@ +{ + "omicron_generation": 2, + "ledger_generation": 5, + "zones": [ + { + "zone": { + "id": "fbcf51c9-a732-4a03-8c19-cfb5b819cb7a", + "underlay_address": "fd00:1122:3344:104::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:104::5]:32345", + "dataset": { + "pool_name": "oxp_382a2961-cd27-4a9c-901d-468a45ff5708" + } + } + }, + "root": "/pool/ext/e99994ae-61ca-4742-a02c-eb0a8a5b69ff/crypt/zone" + }, + { + "zone": { + "id": "7f8a5026-1f1d-4ab3-8c04-077bfda2f815", + "underlay_address": "fd00:1122:3344:104::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:104::4]:32345", + "dataset": { + "pool_name": "oxp_9c99b9b6-8018-455e-a58a-c048ddd3e11b" + } + } + }, + "root": "/pool/ext/22c79e54-37ef-4ad2-a6cb-a7ee3e4f7167/crypt/zone" + }, + { + "zone": { + "id": "6d45d856-0e49-4eb7-ad76-989a9ae636a2", + "underlay_address": "fd00:1122:3344:104::3", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:104::3]:32345", + "dataset": { + "pool_name": "oxp_b74a84fa-b4c8-4c5f-92f4-f4e62a0a311d" + } + } + }, + "root": "/pool/ext/9c99b9b6-8018-455e-a58a-c048ddd3e11b/crypt/zone" + }, + { + "zone": { + "id": "c8dc7fff-72c8-49eb-a552-d605f8655134", + "underlay_address": "fd00:1122:3344:104::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:104::6]:32345", + "dataset": { + "pool_name": "oxp_22c79e54-37ef-4ad2-a6cb-a7ee3e4f7167" + } + } + }, + "root": "/pool/ext/22c79e54-37ef-4ad2-a6cb-a7ee3e4f7167/crypt/zone" + }, + { + "zone": { + "id": "128a90f5-8889-4665-8343-2c7098f2922c", + "underlay_address": "fd00:1122:3344:104::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:104::7]:32345", + "dataset": { + "pool_name": "oxp_8b3d0b51-c6a5-4d2c-827a-0d0d1471136d" + } + } + }, + "root": "/pool/ext/29cd042b-e772-4d26-ac85-ef16009950bd/crypt/zone" + }, + { + "zone": { + "id": "a72f1878-3b03-4267-9024-5df5ebae69de", + "underlay_address": "fd00:1122:3344:104::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:104::a]:32345", + "dataset": { + "pool_name": "oxp_e99994ae-61ca-4742-a02c-eb0a8a5b69ff" + } + } + }, + "root": "/pool/ext/8b3d0b51-c6a5-4d2c-827a-0d0d1471136d/crypt/zone" + }, + { + "zone": { + "id": "6a9165a2-9b66-485a-aaf0-70d89d60bb6c", + "underlay_address": "fd00:1122:3344:104::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:104::b]:32345", + "dataset": { + "pool_name": "oxp_6a02f05f-e400-4c80-8df8-89aaecb6c12b" + } + } + }, + "root": "/pool/ext/9c99b9b6-8018-455e-a58a-c048ddd3e11b/crypt/zone" + }, + { + "zone": { + "id": "9677c4ed-96bc-4dcb-ae74-f7a3e9d2b5e2", + "underlay_address": "fd00:1122:3344:104::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:104::c]:32345", + "dataset": { + "pool_name": "oxp_7c30978f-ee87-4e53-8fdf-3455e5e851b7" + } + } + }, + "root": "/pool/ext/29cd042b-e772-4d26-ac85-ef16009950bd/crypt/zone" + }, + { + "zone": { + "id": "179039e7-3ffd-4b76-9379-bef41d42a5ff", + "underlay_address": "fd00:1122:3344:104::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:104::8]:32345", + "dataset": { + "pool_name": "oxp_4db7e002-e112-4bfc-a41e-8ae26991b01e" + } + } + }, + "root": "/pool/ext/8b3d0b51-c6a5-4d2c-827a-0d0d1471136d/crypt/zone" + }, + { + "zone": { + "id": "6067e31e-b6a3-4114-9e49-0296adc8e7af", + "underlay_address": "fd00:1122:3344:104::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:104::9]:32345", + "dataset": { + "pool_name": "oxp_29cd042b-e772-4d26-ac85-ef16009950bd" + } + } + }, + "root": "/pool/ext/9c99b9b6-8018-455e-a58a-c048ddd3e11b/crypt/zone" + }, + { + "zone": { + "id": "440dd615-e11f-4a5d-aeb4-dcf88bb314de", + "underlay_address": "fd00:1122:3344:104::d", + "zone_type": { + "type": "boundary_ntp", + "address": "[fd00:1122:3344:104::d]:123", + "ntp_servers": [ + "time.cloudflare.com" + ], + "dns_servers": [ + "1.1.1.1", + "8.8.8.8" + ], + "domain": null, + "nic": { + "id": "0b52fe1b-f4cc-43b1-9ac3-4ebb4ab60133", + "kind": { + "type": "service", + "id": "440dd615-e11f-4a5d-aeb4-dcf88bb314de" + }, + "name": "ntp-440dd615-e11f-4a5d-aeb4-dcf88bb314de", + "ip": "172.30.3.5", + "mac": "A8:40:25:FF:85:1E", + "subnet": "172.30.3.0/24", + "vni": 100, + "primary": true, + "slot": 0 + }, + "snat_cfg": { + "ip": "45.154.216.38", + "first_port": 0, + "last_port": 16383 + } + } + }, + "root": "/pool/ext/382a2961-cd27-4a9c-901d-468a45ff5708/crypt/zone" + }, + { + "zone": { + "id": "06e2de03-bd92-404c-a8ea-a13185539d24", + "underlay_address": "fd00:1122:3344:1::1", + "zone_type": { + "type": "internal_dns", + "dataset": { + "pool_name": "oxp_b74a84fa-b4c8-4c5f-92f4-f4e62a0a311d" + }, + "http_address": "[fd00:1122:3344:1::1]:5353", + "dns_address": "[fd00:1122:3344:1::1]:53", + "gz_address": "fd00:1122:3344:1::2", + "gz_address_index": 0 + } + }, + "root": "/pool/ext/e99994ae-61ca-4742-a02c-eb0a8a5b69ff/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled14.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled14.json new file mode 100644 index 0000000000..e8d061dbfd --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack3-sled14.json @@ -0,0 +1,198 @@ +{ + "omicron_generation": 2, + "ledger_generation": 4, + "zones": [ + { + "zone": { + "id": "ac35afab-a312-43c3-a42d-04b8e99fcbde", + "underlay_address": "fd00:1122:3344:111::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:111::4]:32345", + "dataset": { + "pool_name": "oxp_6601065c-c172-4118-81b4-16adde7e9401" + } + } + }, + "root": "/pool/ext/24d7e250-9fc6-459e-8155-30f8e8ccb28c/crypt/zone" + }, + { + "zone": { + "id": "6cd94da2-35b9-4683-a931-29ad4a5ed0ef", + "underlay_address": "fd00:1122:3344:111::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:111::c]:32345", + "dataset": { + "pool_name": "oxp_58276eba-a53c-4ef3-b374-4cdcde4d6e12" + } + } + }, + "root": "/pool/ext/24d7e250-9fc6-459e-8155-30f8e8ccb28c/crypt/zone" + }, + { + "zone": { + "id": "41f07d39-fcc0-4796-8b7c-7cfcd9135f78", + "underlay_address": "fd00:1122:3344:111::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:111::9]:32345", + "dataset": { + "pool_name": "oxp_4b90abdc-3348-4158-bedc-5bcd56e281d8" + } + } + }, + "root": "/pool/ext/8e955f54-fbef-4021-9eec-457825468813/crypt/zone" + }, + { + "zone": { + "id": "44c35566-dd64-4e4a-896e-c50aaa3df14f", + "underlay_address": "fd00:1122:3344:111::3", + "zone_type": { + "type": "nexus", + "internal_address": "[fd00:1122:3344:111::3]:12221", + "external_ip": "45.154.216.37", + "nic": { + "id": "6f824d20-6ce0-4e8b-9ce3-b12dd2b59913", + "kind": { + "type": "service", + "id": "44c35566-dd64-4e4a-896e-c50aaa3df14f" + }, + "name": "nexus-44c35566-dd64-4e4a-896e-c50aaa3df14f", + "ip": "172.30.2.7", + "mac": "A8:40:25:FF:E8:5F", + "subnet": "172.30.2.0/24", + "vni": 100, + "primary": true, + "slot": 0 + }, + "external_tls": true, + "external_dns_servers": [ + "1.1.1.1", + "8.8.8.8" + ] + } + }, + "root": "/pool/ext/435d7a1b-2865-4d49-903f-a68f464ade4d/crypt/zone" + }, + { + "zone": { + "id": "e5020d24-8652-456b-bf92-cd7d255a34c5", + "underlay_address": "fd00:1122:3344:111::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:111::6]:32345", + "dataset": { + "pool_name": "oxp_f6925045-363d-4e18-9bde-ee2987b33d21" + } + } + }, + "root": "/pool/ext/6601065c-c172-4118-81b4-16adde7e9401/crypt/zone" + }, + { + "zone": { + "id": "8f25f258-afd7-4351-83e4-24220ec0c251", + "underlay_address": "fd00:1122:3344:111::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:111::8]:32345", + "dataset": { + "pool_name": "oxp_8e955f54-fbef-4021-9eec-457825468813" + } + } + }, + "root": "/pool/ext/6601065c-c172-4118-81b4-16adde7e9401/crypt/zone" + }, + { + "zone": { + "id": "26aa50ec-d70a-47ea-85fc-e55c62a2e0c6", + "underlay_address": "fd00:1122:3344:111::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:111::5]:32345", + "dataset": { + "pool_name": "oxp_24d7e250-9fc6-459e-8155-30f8e8ccb28c" + } + } + }, + "root": "/pool/ext/435d7a1b-2865-4d49-903f-a68f464ade4d/crypt/zone" + }, + { + "zone": { + "id": "68dc212f-a96a-420f-8334-b11ee5d7cb95", + "underlay_address": "fd00:1122:3344:111::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:111::7]:32345", + "dataset": { + "pool_name": "oxp_4353b00b-937e-4d07-aea6-014c57b6f12c" + } + } + }, + "root": "/pool/ext/24d7e250-9fc6-459e-8155-30f8e8ccb28c/crypt/zone" + }, + { + "zone": { + "id": "475140fa-a5dc-4ec1-876d-751c48adfc37", + "underlay_address": "fd00:1122:3344:111::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:111::a]:32345", + "dataset": { + "pool_name": "oxp_ee55b053-6874-4e20-86b5-2e105e64c068" + } + } + }, + "root": "/pool/ext/ee55b053-6874-4e20-86b5-2e105e64c068/crypt/zone" + }, + { + "zone": { + "id": "09d5a8c9-00db-4914-a2c6-7ae3d2da4558", + "underlay_address": "fd00:1122:3344:111::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:111::d]:32345", + "dataset": { + "pool_name": "oxp_9ab5aba5-47dc-4bc4-8f6d-7cbe0f98a9a2" + } + } + }, + "root": "/pool/ext/8e955f54-fbef-4021-9eec-457825468813/crypt/zone" + }, + { + "zone": { + "id": "014f6a39-ad64-4f0a-9fef-01ca0d184cbf", + "underlay_address": "fd00:1122:3344:111::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:111::b]:32345", + "dataset": { + "pool_name": "oxp_435d7a1b-2865-4d49-903f-a68f464ade4d" + } + } + }, + "root": "/pool/ext/f6925045-363d-4e18-9bde-ee2987b33d21/crypt/zone" + }, + { + "zone": { + "id": "aceaf348-ba07-4965-a543-63a800826fe8", + "underlay_address": "fd00:1122:3344:111::e", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:111::e]:123", + "ntp_servers": [ + "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", + "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/8e955f54-fbef-4021-9eec-457825468813/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled15.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled15.json new file mode 100644 index 0000000000..e3b3dba86a --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack3-sled15.json @@ -0,0 +1,196 @@ +{ + "omicron_generation": 2, + "ledger_generation": 4, + "zones": [ + { + "zone": { + "id": "09a9ecee-1e7c-4819-b27a-73bb61099ce7", + "underlay_address": "fd00:1122:3344:114::3", + "zone_type": { + "type": "external_dns", + "dataset": { + "pool_name": "oxp_b7fbb6db-aa4a-4a6d-8206-b7bdc000d56e" + }, + "http_address": "[fd00:1122:3344:114::3]:5353", + "dns_address": "45.154.216.33:53", + "nic": { + "id": "400ca77b-7fee-47d5-8f17-1f4b9c729f27", + "kind": { + "type": "service", + "id": "09a9ecee-1e7c-4819-b27a-73bb61099ce7" + }, + "name": "external-dns-09a9ecee-1e7c-4819-b27a-73bb61099ce7", + "ip": "172.30.1.5", + "mac": "A8:40:25:FF:B7:C7", + "subnet": "172.30.1.0/24", + "vni": 100, + "primary": true, + "slot": 0 + } + } + }, + "root": "/pool/ext/9e878b1e-bf92-4155-8162-640851c2f5d5/crypt/zone" + }, + { + "zone": { + "id": "1792e003-55f7-49b8-906c-4160db91bc23", + "underlay_address": "fd00:1122:3344:114::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:114::5]:32345", + "dataset": { + "pool_name": "oxp_7f3a760f-a4c0-456f-8a22-2d06ecac1022" + } + } + }, + "root": "/pool/ext/76f09ad5-c96c-4748-bbe4-71afaea7bc5e/crypt/zone" + }, + { + "zone": { + "id": "73bc7c0e-1034-449f-8920-4a1f418653ff", + "underlay_address": "fd00:1122:3344:114::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:114::8]:32345", + "dataset": { + "pool_name": "oxp_e87037be-1cdf-4c6e-a8a3-c27b830eaef9" + } + } + }, + "root": "/pool/ext/b7fbb6db-aa4a-4a6d-8206-b7bdc000d56e/crypt/zone" + }, + { + "zone": { + "id": "06dc6619-6251-4543-9a10-da1698af49d5", + "underlay_address": "fd00:1122:3344:114::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:114::9]:32345", + "dataset": { + "pool_name": "oxp_ee34c530-ce70-4f1a-8c97-d0ebb77ccfc8" + } + } + }, + "root": "/pool/ext/9e878b1e-bf92-4155-8162-640851c2f5d5/crypt/zone" + }, + { + "zone": { + "id": "0d796c52-37ca-490d-b42f-dcc22fe5fd6b", + "underlay_address": "fd00:1122:3344:114::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:114::c]:32345", + "dataset": { + "pool_name": "oxp_9ec2b893-d486-4b24-a077-1a297f9eb15f" + } + } + }, + "root": "/pool/ext/9e72c0e2-4895-4791-b606-2f18e432fb69/crypt/zone" + }, + { + "zone": { + "id": "91d0011f-de44-4823-bc26-a447affa39bc", + "underlay_address": "fd00:1122:3344:114::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:114::a]:32345", + "dataset": { + "pool_name": "oxp_85e81a14-031d-4a63-a91f-981c64e91f60" + } + } + }, + "root": "/pool/ext/b7fbb6db-aa4a-4a6d-8206-b7bdc000d56e/crypt/zone" + }, + { + "zone": { + "id": "0c44a2f1-559a-459c-9931-e0e7964d41c6", + "underlay_address": "fd00:1122:3344:114::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:114::b]:32345", + "dataset": { + "pool_name": "oxp_76f09ad5-c96c-4748-bbe4-71afaea7bc5e" + } + } + }, + "root": "/pool/ext/e87037be-1cdf-4c6e-a8a3-c27b830eaef9/crypt/zone" + }, + { + "zone": { + "id": "ea363819-96f6-4fb6-a203-f18414f1c60e", + "underlay_address": "fd00:1122:3344:114::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:114::4]:32345", + "dataset": { + "pool_name": "oxp_b7fbb6db-aa4a-4a6d-8206-b7bdc000d56e" + } + } + }, + "root": "/pool/ext/b7fbb6db-aa4a-4a6d-8206-b7bdc000d56e/crypt/zone" + }, + { + "zone": { + "id": "21592c39-da6b-4527-842e-edeeceffafa1", + "underlay_address": "fd00:1122:3344:114::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:114::6]:32345", + "dataset": { + "pool_name": "oxp_9e72c0e2-4895-4791-b606-2f18e432fb69" + } + } + }, + "root": "/pool/ext/7aff8429-b65d-4a53-a796-7221ac7581a9/crypt/zone" + }, + { + "zone": { + "id": "f33b1263-f1b2-43a6-a8aa-5f8570dd4e72", + "underlay_address": "fd00:1122:3344:114::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:114::7]:32345", + "dataset": { + "pool_name": "oxp_9e878b1e-bf92-4155-8162-640851c2f5d5" + } + } + }, + "root": "/pool/ext/7f3a760f-a4c0-456f-8a22-2d06ecac1022/crypt/zone" + }, + { + "zone": { + "id": "6f42b469-5a36-4048-a152-e884f7e8a206", + "underlay_address": "fd00:1122:3344:114::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:114::d]:32345", + "dataset": { + "pool_name": "oxp_7aff8429-b65d-4a53-a796-7221ac7581a9" + } + } + }, + "root": "/pool/ext/9e72c0e2-4895-4791-b606-2f18e432fb69/crypt/zone" + }, + { + "zone": { + "id": "ad77d594-8f78-4d33-a5e4-59887060178e", + "underlay_address": "fd00:1122:3344:114::e", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:114::e]:123", + "ntp_servers": [ + "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", + "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/85e81a14-031d-4a63-a91f-981c64e91f60/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled16.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled16.json new file mode 100644 index 0000000000..3cd727e1bc --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack3-sled16.json @@ -0,0 +1,167 @@ +{ + "omicron_generation": 2, + "ledger_generation": 4, + "zones": [ + { + "zone": { + "id": "dcb9a4ae-2c89-4a74-905b-b7936ff49c19", + "underlay_address": "fd00:1122:3344:11f::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11f::9]:32345", + "dataset": { + "pool_name": "oxp_af509039-d27f-4095-bc9d-cecbc5c606db" + } + } + }, + "root": "/pool/ext/44ee0fb4-6034-44e8-b3de-b3a44457ffca/crypt/zone" + }, + { + "zone": { + "id": "dbd46f71-ec39-4b72-a77d-9d281ccb37e0", + "underlay_address": "fd00:1122:3344:11f::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11f::b]:32345", + "dataset": { + "pool_name": "oxp_44ee0fb4-6034-44e8-b3de-b3a44457ffca" + } + } + }, + "root": "/pool/ext/5e32c0a3-1210-402b-91fb-256946eeac2b/crypt/zone" + }, + { + "zone": { + "id": "a1f30569-a5c6-4a6d-922e-241966aea142", + "underlay_address": "fd00:1122:3344:11f::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11f::6]:32345", + "dataset": { + "pool_name": "oxp_d2133e8b-51cc-455e-89d0-5454fd4fe109" + } + } + }, + "root": "/pool/ext/3f57835b-1469-499a-8757-7cc56acc5d49/crypt/zone" + }, + { + "zone": { + "id": "a33e25ae-4e41-40f4-843d-3d12f62d8cb6", + "underlay_address": "fd00:1122:3344:11f::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11f::8]:32345", + "dataset": { + "pool_name": "oxp_c8e4a7f4-1ae6-4683-8397-ea53475a53e8" + } + } + }, + "root": "/pool/ext/5e32c0a3-1210-402b-91fb-256946eeac2b/crypt/zone" + }, + { + "zone": { + "id": "65ed75c2-2d80-4de5-a6f6-adfa6516c7cf", + "underlay_address": "fd00:1122:3344:11f::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11f::c]:32345", + "dataset": { + "pool_name": "oxp_3f57835b-1469-499a-8757-7cc56acc5d49" + } + } + }, + "root": "/pool/ext/cd8cd75c-632b-4527-889a-7ca0c080fe2c/crypt/zone" + }, + { + "zone": { + "id": "bc6ccf18-6b9b-4687-8b70-c7917d972ae0", + "underlay_address": "fd00:1122:3344:11f::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11f::a]:32345", + "dataset": { + "pool_name": "oxp_cd8cd75c-632b-4527-889a-7ca0c080fe2c" + } + } + }, + "root": "/pool/ext/5e32c0a3-1210-402b-91fb-256946eeac2b/crypt/zone" + }, + { + "zone": { + "id": "06233bfe-a857-4819-aefe-212af9eeb90f", + "underlay_address": "fd00:1122:3344:11f::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11f::5]:32345", + "dataset": { + "pool_name": "oxp_c8a1aaf1-d27c-45fd-9f8d-80ac6bf6865d" + } + } + }, + "root": "/pool/ext/af509039-d27f-4095-bc9d-cecbc5c606db/crypt/zone" + }, + { + "zone": { + "id": "0bbfef71-9eae-43b6-b5e7-0060ce9269dd", + "underlay_address": "fd00:1122:3344:11f::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11f::4]:32345", + "dataset": { + "pool_name": "oxp_5e32c0a3-1210-402b-91fb-256946eeac2b" + } + } + }, + "root": "/pool/ext/af509039-d27f-4095-bc9d-cecbc5c606db/crypt/zone" + }, + { + "zone": { + "id": "550e10ee-24d1-444f-80be-2744dd321e0f", + "underlay_address": "fd00:1122:3344:11f::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11f::7]:32345", + "dataset": { + "pool_name": "oxp_f437ce0e-eb45-4be8-b1fe-33ed2656eb01" + } + } + }, + "root": "/pool/ext/44ee0fb4-6034-44e8-b3de-b3a44457ffca/crypt/zone" + }, + { + "zone": { + "id": "86d768f3-ece2-4956-983f-999bdb23a983", + "underlay_address": "fd00:1122:3344:11f::3", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:11f::3]:32221", + "dataset": { + "pool_name": "oxp_5e32c0a3-1210-402b-91fb-256946eeac2b" + } + } + }, + "root": "/pool/ext/c8a1aaf1-d27c-45fd-9f8d-80ac6bf6865d/crypt/zone" + }, + { + "zone": { + "id": "2f358812-f72c-4838-a5ea-7d78d0954be0", + "underlay_address": "fd00:1122:3344:11f::d", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:11f::d]:123", + "ntp_servers": [ + "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", + "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/f437ce0e-eb45-4be8-b1fe-33ed2656eb01/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled17.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled17.json new file mode 100644 index 0000000000..09981ecacc --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack3-sled17.json @@ -0,0 +1,167 @@ +{ + "omicron_generation": 2, + "ledger_generation": 4, + "zones": [ + { + "zone": { + "id": "525a19a2-d4ac-418d-bdcf-2ce26e7abe70", + "underlay_address": "fd00:1122:3344:107::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:107::a]:32345", + "dataset": { + "pool_name": "oxp_cb774d2f-ff86-4fd7-866b-17a6b10e61f0" + } + } + }, + "root": "/pool/ext/e17b68b5-f50c-4fc3-b55a-80d284c6c32d/crypt/zone" + }, + { + "zone": { + "id": "7af188e1-6175-4769-9e4f-2ca7a98b76f6", + "underlay_address": "fd00:1122:3344:107::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:107::4]:32345", + "dataset": { + "pool_name": "oxp_0cbbcf22-770d-4e75-9148-e6109b129093" + } + } + }, + "root": "/pool/ext/b998e8df-ea69-4bdd-84cb-b7f17075b060/crypt/zone" + }, + { + "zone": { + "id": "2544540f-6ffc-46c0-84bf-f42a110c02d7", + "underlay_address": "fd00:1122:3344:107::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:107::6]:32345", + "dataset": { + "pool_name": "oxp_e17b68b5-f50c-4fc3-b55a-80d284c6c32d" + } + } + }, + "root": "/pool/ext/521fa477-4d83-49a8-a5cf-c267b7f0c409/crypt/zone" + }, + { + "zone": { + "id": "cfc20f72-cac2-4681-a6d8-e5a0accafbb7", + "underlay_address": "fd00:1122:3344:107::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:107::7]:32345", + "dataset": { + "pool_name": "oxp_b998e8df-ea69-4bdd-84cb-b7f17075b060" + } + } + }, + "root": "/pool/ext/0cbbcf22-770d-4e75-9148-e6109b129093/crypt/zone" + }, + { + "zone": { + "id": "e24be791-5773-425e-a3df-e35ca81570c7", + "underlay_address": "fd00:1122:3344:107::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:107::9]:32345", + "dataset": { + "pool_name": "oxp_7849c221-dc7f-43ac-ac47-bc51864e083b" + } + } + }, + "root": "/pool/ext/7849c221-dc7f-43ac-ac47-bc51864e083b/crypt/zone" + }, + { + "zone": { + "id": "170856ee-21cf-4780-8903-175d558bc7cc", + "underlay_address": "fd00:1122:3344:107::3", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:107::3]:32345", + "dataset": { + "pool_name": "oxp_618e21e5-77d4-40ba-9f8e-7960e9ad92e2" + } + } + }, + "root": "/pool/ext/aa7a37fb-2f03-4d5c-916b-db3a4fc269ac/crypt/zone" + }, + { + "zone": { + "id": "604278ff-525a-4d41-82ff-07aef3174d38", + "underlay_address": "fd00:1122:3344:107::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:107::5]:32345", + "dataset": { + "pool_name": "oxp_521fa477-4d83-49a8-a5cf-c267b7f0c409" + } + } + }, + "root": "/pool/ext/0cbbcf22-770d-4e75-9148-e6109b129093/crypt/zone" + }, + { + "zone": { + "id": "d0d4fcc0-6ed0-410a-99c7-5daf34014421", + "underlay_address": "fd00:1122:3344:107::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:107::b]:32345", + "dataset": { + "pool_name": "oxp_aa7a37fb-2f03-4d5c-916b-db3a4fc269ac" + } + } + }, + "root": "/pool/ext/aa7a37fb-2f03-4d5c-916b-db3a4fc269ac/crypt/zone" + }, + { + "zone": { + "id": "c935df7b-2629-48ee-bc10-20508301905d", + "underlay_address": "fd00:1122:3344:107::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:107::c]:32345", + "dataset": { + "pool_name": "oxp_793fd018-5fdc-4e54-9c45-f8023fa3ea18" + } + } + }, + "root": "/pool/ext/7849c221-dc7f-43ac-ac47-bc51864e083b/crypt/zone" + }, + { + "zone": { + "id": "4ba5f3b6-8be5-4a85-bc57-a5e3b0b867d8", + "underlay_address": "fd00:1122:3344:107::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:107::8]:32345", + "dataset": { + "pool_name": "oxp_e80e7996-c572-481e-8c22-61c16c6e47f4" + } + } + }, + "root": "/pool/ext/e17b68b5-f50c-4fc3-b55a-80d284c6c32d/crypt/zone" + }, + { + "zone": { + "id": "395c9d6e-3bd0-445e-9269-46c3260edb83", + "underlay_address": "fd00:1122:3344:107::d", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:107::d]:123", + "ntp_servers": [ + "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", + "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/0cbbcf22-770d-4e75-9148-e6109b129093/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled18.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled18.json new file mode 100644 index 0000000000..708019883e --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack3-sled18.json @@ -0,0 +1,167 @@ +{ + "omicron_generation": 2, + "ledger_generation": 4, + "zones": [ + { + "zone": { + "id": "c7096dd4-e429-4a6f-9725-041a77ef2513", + "underlay_address": "fd00:1122:3344:11a::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11a::6]:32345", + "dataset": { + "pool_name": "oxp_dcf62af6-c0f9-4eb5-9b23-9424ef8f3d32" + } + } + }, + "root": "/pool/ext/b869e463-c8b9-4c12-a6b9-13175b3896dd/crypt/zone" + }, + { + "zone": { + "id": "09dd367f-b32f-43f3-aa53-11ccec1cd0c9", + "underlay_address": "fd00:1122:3344:11a::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11a::9]:32345", + "dataset": { + "pool_name": "oxp_d7d00317-42c7-4d1e-a04c-85491fb230cd" + } + } + }, + "root": "/pool/ext/d7d00317-42c7-4d1e-a04c-85491fb230cd/crypt/zone" + }, + { + "zone": { + "id": "fb2f85f1-05b3-432f-9bb5-63fb27a762b1", + "underlay_address": "fd00:1122:3344:11a::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11a::5]:32345", + "dataset": { + "pool_name": "oxp_db4a9949-68da-4c1c-9a1c-49083eba14fe" + } + } + }, + "root": "/pool/ext/db4a9949-68da-4c1c-9a1c-49083eba14fe/crypt/zone" + }, + { + "zone": { + "id": "5b89425e-69e4-4305-8f33-dc5768a1849e", + "underlay_address": "fd00:1122:3344:11a::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11a::a]:32345", + "dataset": { + "pool_name": "oxp_64a1bad7-d1b1-4e39-a3f3-9b8d73c4709e" + } + } + }, + "root": "/pool/ext/64a1bad7-d1b1-4e39-a3f3-9b8d73c4709e/crypt/zone" + }, + { + "zone": { + "id": "a5156db4-273a-4f8b-b8d8-df77062a6c63", + "underlay_address": "fd00:1122:3344:11a::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11a::4]:32345", + "dataset": { + "pool_name": "oxp_b869e463-c8b9-4c12-a6b9-13175b3896dd" + } + } + }, + "root": "/pool/ext/dcf62af6-c0f9-4eb5-9b23-9424ef8f3d32/crypt/zone" + }, + { + "zone": { + "id": "1f2d2f86-b69b-4130-bb9b-e62ba0cb6802", + "underlay_address": "fd00:1122:3344:11a::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11a::b]:32345", + "dataset": { + "pool_name": "oxp_153ffee4-5d7a-4786-ad33-d5567b434fe0" + } + } + }, + "root": "/pool/ext/174a067d-1c5a-49f7-a29f-1e62ab1c3796/crypt/zone" + }, + { + "zone": { + "id": "1e249cc9-52e7-4d66-b713-8ace1392e991", + "underlay_address": "fd00:1122:3344:11a::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11a::7]:32345", + "dataset": { + "pool_name": "oxp_04b6215e-9651-4a3c-ba1b-b8a1e67b3d89" + } + } + }, + "root": "/pool/ext/db4a9949-68da-4c1c-9a1c-49083eba14fe/crypt/zone" + }, + { + "zone": { + "id": "eb779538-2b1b-4d1d-8c7e-b15f04db6e53", + "underlay_address": "fd00:1122:3344:11a::3", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11a::3]:32345", + "dataset": { + "pool_name": "oxp_aacb8524-3562-4f97-a616-9023230d6efa" + } + } + }, + "root": "/pool/ext/174a067d-1c5a-49f7-a29f-1e62ab1c3796/crypt/zone" + }, + { + "zone": { + "id": "b575d52d-be7d-46af-814b-91e6d18f3464", + "underlay_address": "fd00:1122:3344:11a::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11a::8]:32345", + "dataset": { + "pool_name": "oxp_174a067d-1c5a-49f7-a29f-1e62ab1c3796" + } + } + }, + "root": "/pool/ext/64a1bad7-d1b1-4e39-a3f3-9b8d73c4709e/crypt/zone" + }, + { + "zone": { + "id": "274200bc-eac7-47d7-8a57-4b7be794caba", + "underlay_address": "fd00:1122:3344:11a::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11a::c]:32345", + "dataset": { + "pool_name": "oxp_2e7644e4-7d46-42bf-8e7a-9c3f39085b3f" + } + } + }, + "root": "/pool/ext/2e7644e4-7d46-42bf-8e7a-9c3f39085b3f/crypt/zone" + }, + { + "zone": { + "id": "bc20ba3a-df62-4a62-97c2-75b5653f84b4", + "underlay_address": "fd00:1122:3344:11a::d", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:11a::d]:123", + "ntp_servers": [ + "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", + "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/04b6215e-9651-4a3c-ba1b-b8a1e67b3d89/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled19.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled19.json new file mode 100644 index 0000000000..197df304e3 --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack3-sled19.json @@ -0,0 +1,181 @@ +{ + "omicron_generation": 2, + "ledger_generation": 4, + "zones": [ + { + "zone": { + "id": "9c73abb9-edb8-4aa2-835b-c25ebe4466d9", + "underlay_address": "fd00:1122:3344:109::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:109::7]:32345", + "dataset": { + "pool_name": "oxp_b7a3032f-7b8c-4a6a-9fa2-e5773bfdbc94" + } + } + }, + "root": "/pool/ext/46d21f3d-23be-4361-b5c5-9d0f6ece5b8c/crypt/zone" + }, + { + "zone": { + "id": "ca576bda-cbdd-4bb9-9d75-ce06d569e926", + "underlay_address": "fd00:1122:3344:109::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:109::a]:32345", + "dataset": { + "pool_name": "oxp_863c4bc4-9c7e-453c-99d8-a3d509f49f3e" + } + } + }, + "root": "/pool/ext/7e67cb32-0c00-4090-9647-eb7bae75deeb/crypt/zone" + }, + { + "zone": { + "id": "f010978d-346e-49cd-b265-7607a25685f9", + "underlay_address": "fd00:1122:3344:109::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:109::c]:32345", + "dataset": { + "pool_name": "oxp_9bc1dab8-2d2a-4f92-bdfb-94ebca7881f1" + } + } + }, + "root": "/pool/ext/9bc1dab8-2d2a-4f92-bdfb-94ebca7881f1/crypt/zone" + }, + { + "zone": { + "id": "daff4162-cc81-4586-a457-91d767b8f1d9", + "underlay_address": "fd00:1122:3344:109::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:109::6]:32345", + "dataset": { + "pool_name": "oxp_b9b5b50c-e823-41ae-9585-01b818883521" + } + } + }, + "root": "/pool/ext/de682b18-afaf-4d53-b62e-934f6bd4a1f8/crypt/zone" + }, + { + "zone": { + "id": "9f300d3d-e698-4cc8-be4c-1f81ac8c927f", + "underlay_address": "fd00:1122:3344:109::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:109::d]:32345", + "dataset": { + "pool_name": "oxp_f1d82c22-ad7d-4cda-9ab0-8f5f496d90ce" + } + } + }, + "root": "/pool/ext/de682b18-afaf-4d53-b62e-934f6bd4a1f8/crypt/zone" + }, + { + "zone": { + "id": "8db7c7be-da40-4a1c-9681-4d02606a7eb7", + "underlay_address": "fd00:1122:3344:109::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:109::9]:32345", + "dataset": { + "pool_name": "oxp_46d21f3d-23be-4361-b5c5-9d0f6ece5b8c" + } + } + }, + "root": "/pool/ext/b7a3032f-7b8c-4a6a-9fa2-e5773bfdbc94/crypt/zone" + }, + { + "zone": { + "id": "b990911b-805a-4f9d-bd83-e977f5b19a35", + "underlay_address": "fd00:1122:3344:109::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:109::4]:32345", + "dataset": { + "pool_name": "oxp_7e67cb32-0c00-4090-9647-eb7bae75deeb" + } + } + }, + "root": "/pool/ext/de682b18-afaf-4d53-b62e-934f6bd4a1f8/crypt/zone" + }, + { + "zone": { + "id": "c99392f5-8f30-41ac-9eeb-12d7f4b707f1", + "underlay_address": "fd00:1122:3344:109::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:109::b]:32345", + "dataset": { + "pool_name": "oxp_de682b18-afaf-4d53-b62e-934f6bd4a1f8" + } + } + }, + "root": "/pool/ext/46d21f3d-23be-4361-b5c5-9d0f6ece5b8c/crypt/zone" + }, + { + "zone": { + "id": "7f6cb339-9eb1-4866-8a4f-383bad25b36f", + "underlay_address": "fd00:1122:3344:109::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:109::5]:32345", + "dataset": { + "pool_name": "oxp_458cbfa3-3752-415d-8a3b-fb64e88468e1" + } + } + }, + "root": "/pool/ext/b9b5b50c-e823-41ae-9585-01b818883521/crypt/zone" + }, + { + "zone": { + "id": "11946372-f253-4648-b00c-c7874a7b2888", + "underlay_address": "fd00:1122:3344:109::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:109::8]:32345", + "dataset": { + "pool_name": "oxp_d73332f5-b2a5-46c0-94cf-c5c5712abfe8" + } + } + }, + "root": "/pool/ext/b9b5b50c-e823-41ae-9585-01b818883521/crypt/zone" + }, + { + "zone": { + "id": "58ece9e1-387f-4d2f-a42f-69cd34f9f380", + "underlay_address": "fd00:1122:3344:109::3", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:109::3]:32221", + "dataset": { + "pool_name": "oxp_7e67cb32-0c00-4090-9647-eb7bae75deeb" + } + } + }, + "root": "/pool/ext/b9b5b50c-e823-41ae-9585-01b818883521/crypt/zone" + }, + { + "zone": { + "id": "f016a25a-deb5-4f20-bdb0-2425c00d41a6", + "underlay_address": "fd00:1122:3344:109::e", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:109::e]:123", + "ntp_servers": [ + "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", + "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/b9b5b50c-e823-41ae-9585-01b818883521/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled2.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled2.json new file mode 100644 index 0000000000..ba6ab6f915 --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack3-sled2.json @@ -0,0 +1,167 @@ +{ + "omicron_generation": 2, + "ledger_generation": 4, + "zones": [ + { + "zone": { + "id": "dd799dd4-03f9-451d-85e2-844155753a03", + "underlay_address": "fd00:1122:3344:10a::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10a::7]:32345", + "dataset": { + "pool_name": "oxp_7dcf3acc-bde9-4306-bb46-4c6a6cbbb7ba" + } + } + }, + "root": "/pool/ext/7dcf3acc-bde9-4306-bb46-4c6a6cbbb7ba/crypt/zone" + }, + { + "zone": { + "id": "dbf9346d-b46d-4402-bb44-92ce20fb5290", + "underlay_address": "fd00:1122:3344:10a::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10a::9]:32345", + "dataset": { + "pool_name": "oxp_9275d50f-da2c-4f84-9775-598a364309ad" + } + } + }, + "root": "/pool/ext/d83e36ef-dd7a-4cc2-be19-379b1114c031/crypt/zone" + }, + { + "zone": { + "id": "9a55ebdd-eeef-4954-b0a1-e32b04837f14", + "underlay_address": "fd00:1122:3344:10a::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10a::4]:32345", + "dataset": { + "pool_name": "oxp_7f30f77e-5998-4676-a226-b433b5940e77" + } + } + }, + "root": "/pool/ext/9275d50f-da2c-4f84-9775-598a364309ad/crypt/zone" + }, + { + "zone": { + "id": "bc2935f8-e4fa-4015-968e-f90985533a6a", + "underlay_address": "fd00:1122:3344:10a::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10a::6]:32345", + "dataset": { + "pool_name": "oxp_022c9d58-e91f-480d-bda6-0cf32ce3b1f5" + } + } + }, + "root": "/pool/ext/c395dcc3-6ece-4b3f-b143-e111a54ef7da/crypt/zone" + }, + { + "zone": { + "id": "63f8c861-fa1d-4121-92d9-7efa5ef7f5a0", + "underlay_address": "fd00:1122:3344:10a::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10a::a]:32345", + "dataset": { + "pool_name": "oxp_3c805784-f403-4d01-9eb0-4f77d0821980" + } + } + }, + "root": "/pool/ext/9275d50f-da2c-4f84-9775-598a364309ad/crypt/zone" + }, + { + "zone": { + "id": "4996dcf9-78de-4f69-94fa-c09cc86a8d3c", + "underlay_address": "fd00:1122:3344:10a::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10a::b]:32345", + "dataset": { + "pool_name": "oxp_f9fe9ce6-be0d-4974-bc30-78a8f1330496" + } + } + }, + "root": "/pool/ext/9275d50f-da2c-4f84-9775-598a364309ad/crypt/zone" + }, + { + "zone": { + "id": "36b9a4bf-7b30-4fe7-903d-3b722c79fa86", + "underlay_address": "fd00:1122:3344:10a::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10a::c]:32345", + "dataset": { + "pool_name": "oxp_cb1052e0-4c70-4d37-b979-dd55e6a25f08" + } + } + }, + "root": "/pool/ext/3c805784-f403-4d01-9eb0-4f77d0821980/crypt/zone" + }, + { + "zone": { + "id": "a109a902-6a27-41b6-a881-c353e28e5389", + "underlay_address": "fd00:1122:3344:10a::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10a::8]:32345", + "dataset": { + "pool_name": "oxp_d83e36ef-dd7a-4cc2-be19-379b1114c031" + } + } + }, + "root": "/pool/ext/d83e36ef-dd7a-4cc2-be19-379b1114c031/crypt/zone" + }, + { + "zone": { + "id": "d2a9a0bc-ea12-44e3-ac4a-904c76120d11", + "underlay_address": "fd00:1122:3344:10a::3", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10a::3]:32345", + "dataset": { + "pool_name": "oxp_c395dcc3-6ece-4b3f-b143-e111a54ef7da" + } + } + }, + "root": "/pool/ext/9898a289-2f0d-43a6-b053-850f6e784e9a/crypt/zone" + }, + { + "zone": { + "id": "b3c3e53b-d9ec-4dd8-bd2c-bd811319aa44", + "underlay_address": "fd00:1122:3344:10a::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10a::5]:32345", + "dataset": { + "pool_name": "oxp_9898a289-2f0d-43a6-b053-850f6e784e9a" + } + } + }, + "root": "/pool/ext/9275d50f-da2c-4f84-9775-598a364309ad/crypt/zone" + }, + { + "zone": { + "id": "7b445d3b-fd25-4538-ac3f-f439c66d1223", + "underlay_address": "fd00:1122:3344:10a::d", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:10a::d]:123", + "ntp_servers": [ + "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", + "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/f9fe9ce6-be0d-4974-bc30-78a8f1330496/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled20.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled20.json new file mode 100644 index 0000000000..f02f1f05e5 --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack3-sled20.json @@ -0,0 +1,198 @@ +{ + "omicron_generation": 2, + "ledger_generation": 4, + "zones": [ + { + "zone": { + "id": "4b49e669-264d-4bfb-8ab1-555b520b679c", + "underlay_address": "fd00:1122:3344:108::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:108::c]:32345", + "dataset": { + "pool_name": "oxp_799a1c86-9e1a-4626-91e2-a19f7ff5356e" + } + } + }, + "root": "/pool/ext/d2478613-b7c9-4bd3-856f-1fe8e9c903c2/crypt/zone" + }, + { + "zone": { + "id": "d802baae-9c3f-437a-85fe-cd72653b6db1", + "underlay_address": "fd00:1122:3344:108::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:108::5]:32345", + "dataset": { + "pool_name": "oxp_d2478613-b7c9-4bd3-856f-1fe8e9c903c2" + } + } + }, + "root": "/pool/ext/116f216c-e151-410f-82bf-8913904cf7b4/crypt/zone" + }, + { + "zone": { + "id": "e5f69e60-3421-49a4-8c1d-2db8cbb6a5e9", + "underlay_address": "fd00:1122:3344:108::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:108::b]:32345", + "dataset": { + "pool_name": "oxp_116f216c-e151-410f-82bf-8913904cf7b4" + } + } + }, + "root": "/pool/ext/eea15142-4635-4e40-b0b4-b0c4f13eca3c/crypt/zone" + }, + { + "zone": { + "id": "3e598962-ef8c-4cb6-bdfe-ec8563939d6a", + "underlay_address": "fd00:1122:3344:108::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:108::4]:32345", + "dataset": { + "pool_name": "oxp_ababce44-01d1-4c50-b389-f60464c5dde9" + } + } + }, + "root": "/pool/ext/ababce44-01d1-4c50-b389-f60464c5dde9/crypt/zone" + }, + { + "zone": { + "id": "25355c9f-cc2b-4b24-8eaa-65190f8936a8", + "underlay_address": "fd00:1122:3344:108::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:108::d]:32345", + "dataset": { + "pool_name": "oxp_fed46d41-136d-4462-8782-359014efba59" + } + } + }, + "root": "/pool/ext/eea15142-4635-4e40-b0b4-b0c4f13eca3c/crypt/zone" + }, + { + "zone": { + "id": "efb2f16c-ebad-4192-b575-dcb4d9b1d5cd", + "underlay_address": "fd00:1122:3344:108::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:108::a]:32345", + "dataset": { + "pool_name": "oxp_bf509067-0165-456d-98ae-72c86378e626" + } + } + }, + "root": "/pool/ext/95220093-e3b8-4f7f-9f5a-cb32cb75180a/crypt/zone" + }, + { + "zone": { + "id": "89191f0d-4e0b-47fa-9a9e-fbe2a6db1385", + "underlay_address": "fd00:1122:3344:108::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:108::8]:32345", + "dataset": { + "pool_name": "oxp_eea15142-4635-4e40-b0b4-b0c4f13eca3c" + } + } + }, + "root": "/pool/ext/eea15142-4635-4e40-b0b4-b0c4f13eca3c/crypt/zone" + }, + { + "zone": { + "id": "e4589324-c528-49c7-9141-35e0a7af6947", + "underlay_address": "fd00:1122:3344:108::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:108::6]:32345", + "dataset": { + "pool_name": "oxp_95220093-e3b8-4f7f-9f5a-cb32cb75180a" + } + } + }, + "root": "/pool/ext/ababce44-01d1-4c50-b389-f60464c5dde9/crypt/zone" + }, + { + "zone": { + "id": "95ebe94d-0e68-421d-9260-c30bd7fe4bd6", + "underlay_address": "fd00:1122:3344:108::3", + "zone_type": { + "type": "nexus", + "internal_address": "[fd00:1122:3344:108::3]:12221", + "external_ip": "45.154.216.35", + "nic": { + "id": "301aa595-f072-4da3-a533-99647b44a66a", + "kind": { + "type": "service", + "id": "95ebe94d-0e68-421d-9260-c30bd7fe4bd6" + }, + "name": "nexus-95ebe94d-0e68-421d-9260-c30bd7fe4bd6", + "ip": "172.30.2.5", + "mac": "A8:40:25:FF:F1:30", + "subnet": "172.30.2.0/24", + "vni": 100, + "primary": true, + "slot": 0 + }, + "external_tls": true, + "external_dns_servers": [ + "1.1.1.1", + "8.8.8.8" + ] + } + }, + "root": "/pool/ext/eea15142-4635-4e40-b0b4-b0c4f13eca3c/crypt/zone" + }, + { + "zone": { + "id": "4b7a7052-f8e8-4196-8d6b-315943986ce6", + "underlay_address": "fd00:1122:3344:108::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:108::7]:32345", + "dataset": { + "pool_name": "oxp_a549421c-2f12-45cc-b691-202f0a9bfa8b" + } + } + }, + "root": "/pool/ext/bf509067-0165-456d-98ae-72c86378e626/crypt/zone" + }, + { + "zone": { + "id": "71b8ff53-c781-47bb-8ddc-2c7129680542", + "underlay_address": "fd00:1122:3344:108::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:108::9]:32345", + "dataset": { + "pool_name": "oxp_9d19f891-a3d9-4c6e-b1e1-6b0b085a9440" + } + } + }, + "root": "/pool/ext/fed46d41-136d-4462-8782-359014efba59/crypt/zone" + }, + { + "zone": { + "id": "eaf7bf77-f4c2-4016-9909-4b88a27e9d9a", + "underlay_address": "fd00:1122:3344:108::e", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:108::e]:123", + "ntp_servers": [ + "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", + "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/ababce44-01d1-4c50-b389-f60464c5dde9/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled21.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled21.json new file mode 100644 index 0000000000..d6c19b96ed --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack3-sled21.json @@ -0,0 +1,167 @@ +{ + "omicron_generation": 2, + "ledger_generation": 4, + "zones": [ + { + "zone": { + "id": "a91e4af3-5d18-4b08-8cb6-0583db8f8842", + "underlay_address": "fd00:1122:3344:117::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:117::a]:32345", + "dataset": { + "pool_name": "oxp_4b2896b8-5f0e-42fb-a474-658b28421e65" + } + } + }, + "root": "/pool/ext/23393ed9-acee-4686-861f-7fc825af1249/crypt/zone" + }, + { + "zone": { + "id": "1ce74512-ce3a-4125-95f1-12c86e0275d5", + "underlay_address": "fd00:1122:3344:117::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:117::8]:32345", + "dataset": { + "pool_name": "oxp_46ece76f-ef00-4dd0-9f73-326c63959470" + } + } + }, + "root": "/pool/ext/1bd5955e-14a9-463f-adeb-f12bcb45a6c1/crypt/zone" + }, + { + "zone": { + "id": "fef5d35f-9622-4dee-8635-d26e9f7f6869", + "underlay_address": "fd00:1122:3344:117::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:117::4]:32345", + "dataset": { + "pool_name": "oxp_e4d7c2e8-016b-4617-afb5-38a2d9c1b508" + } + } + }, + "root": "/pool/ext/e372bba3-ef60-466f-b819-a3d5b9acbe77/crypt/zone" + }, + { + "zone": { + "id": "4f024a31-cd38-4219-8381-9f1af70d1d54", + "underlay_address": "fd00:1122:3344:117::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:117::c]:32345", + "dataset": { + "pool_name": "oxp_7cb2a3c2-9d33-4c6a-af57-669f251cf4cf" + } + } + }, + "root": "/pool/ext/cfbd185d-e185-4aaa-a598-9216124ceec4/crypt/zone" + }, + { + "zone": { + "id": "d00e1d0b-e12f-420a-a4df-21e4cac176f6", + "underlay_address": "fd00:1122:3344:117::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:117::b]:32345", + "dataset": { + "pool_name": "oxp_e372bba3-ef60-466f-b819-a3d5b9acbe77" + } + } + }, + "root": "/pool/ext/cfbd185d-e185-4aaa-a598-9216124ceec4/crypt/zone" + }, + { + "zone": { + "id": "1598058a-6064-449e-b39c-1e3d345ed793", + "underlay_address": "fd00:1122:3344:117::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:117::5]:32345", + "dataset": { + "pool_name": "oxp_022a8d67-1e00-49f3-81ed-a0a1bc187cfa" + } + } + }, + "root": "/pool/ext/022a8d67-1e00-49f3-81ed-a0a1bc187cfa/crypt/zone" + }, + { + "zone": { + "id": "c723c4b8-3031-4b25-8c16-fe08bc0b5f00", + "underlay_address": "fd00:1122:3344:117::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:117::7]:32345", + "dataset": { + "pool_name": "oxp_23393ed9-acee-4686-861f-7fc825af1249" + } + } + }, + "root": "/pool/ext/1bd5955e-14a9-463f-adeb-f12bcb45a6c1/crypt/zone" + }, + { + "zone": { + "id": "7751b307-888f-46c8-8787-75d2f3fdaef3", + "underlay_address": "fd00:1122:3344:117::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:117::9]:32345", + "dataset": { + "pool_name": "oxp_e54e53d4-f68f-4b19-b8c1-9d5ab42e51c1" + } + } + }, + "root": "/pool/ext/e372bba3-ef60-466f-b819-a3d5b9acbe77/crypt/zone" + }, + { + "zone": { + "id": "89413ff1-d5de-4931-8389-e84e7ea321af", + "underlay_address": "fd00:1122:3344:117::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:117::6]:32345", + "dataset": { + "pool_name": "oxp_1bd5955e-14a9-463f-adeb-f12bcb45a6c1" + } + } + }, + "root": "/pool/ext/1bd5955e-14a9-463f-adeb-f12bcb45a6c1/crypt/zone" + }, + { + "zone": { + "id": "287b0b24-72aa-41b5-a597-8523d84225ef", + "underlay_address": "fd00:1122:3344:117::3", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:117::3]:32345", + "dataset": { + "pool_name": "oxp_cfbd185d-e185-4aaa-a598-9216124ceec4" + } + } + }, + "root": "/pool/ext/cfbd185d-e185-4aaa-a598-9216124ceec4/crypt/zone" + }, + { + "zone": { + "id": "4728253e-c534-4a5b-b707-c64ac9a8eb8c", + "underlay_address": "fd00:1122:3344:117::d", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:117::d]:123", + "ntp_servers": [ + "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", + "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/cfbd185d-e185-4aaa-a598-9216124ceec4/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled22.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled22.json new file mode 100644 index 0000000000..1cd6fed362 --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack3-sled22.json @@ -0,0 +1,167 @@ +{ + "omicron_generation": 2, + "ledger_generation": 4, + "zones": [ + { + "zone": { + "id": "49f20cd1-a8a3-4fa8-9209-59da60cd8f9b", + "underlay_address": "fd00:1122:3344:103::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::5]:32345", + "dataset": { + "pool_name": "oxp_13a9ef4a-f33a-4781-8f83-712c07a79b1f" + } + } + }, + "root": "/pool/ext/711eff4e-736c-478e-83aa-ae86f5efbf1d/crypt/zone" + }, + { + "zone": { + "id": "896fd564-f94e-496b-9fcf-ddfbfcfac9f7", + "underlay_address": "fd00:1122:3344:103::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::c]:32345", + "dataset": { + "pool_name": "oxp_0944c0a2-0fb7-4f51-bced-52cc257cd2f6" + } + } + }, + "root": "/pool/ext/bc54d8c5-955d-429d-84e0-a20a4e5e27a3/crypt/zone" + }, + { + "zone": { + "id": "911fb8b3-05c2-4af7-8974-6c74a61d94ad", + "underlay_address": "fd00:1122:3344:103::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::9]:32345", + "dataset": { + "pool_name": "oxp_29f59fce-a867-4571-9d2e-b03fa5c13510" + } + } + }, + "root": "/pool/ext/711eff4e-736c-478e-83aa-ae86f5efbf1d/crypt/zone" + }, + { + "zone": { + "id": "682b34db-0b06-4770-a8fe-74437cf184d6", + "underlay_address": "fd00:1122:3344:103::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::6]:32345", + "dataset": { + "pool_name": "oxp_094d11d2-8049-4138-bcf4-562f5f8e77c0" + } + } + }, + "root": "/pool/ext/0944c0a2-0fb7-4f51-bced-52cc257cd2f6/crypt/zone" + }, + { + "zone": { + "id": "d8d20365-ecd3-4fd5-9495-c0670e3bd5d9", + "underlay_address": "fd00:1122:3344:103::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::a]:32345", + "dataset": { + "pool_name": "oxp_fb97ff7b-0225-400c-a137-3b38a786c0a0" + } + } + }, + "root": "/pool/ext/094d11d2-8049-4138-bcf4-562f5f8e77c0/crypt/zone" + }, + { + "zone": { + "id": "673620b6-44d9-4310-8e17-3024ac84e708", + "underlay_address": "fd00:1122:3344:103::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::7]:32345", + "dataset": { + "pool_name": "oxp_711eff4e-736c-478e-83aa-ae86f5efbf1d" + } + } + }, + "root": "/pool/ext/fb97ff7b-0225-400c-a137-3b38a786c0a0/crypt/zone" + }, + { + "zone": { + "id": "bf6dfc04-4d4c-41b6-a011-40ffc3bc5080", + "underlay_address": "fd00:1122:3344:103::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::8]:32345", + "dataset": { + "pool_name": "oxp_f815f1b6-48ef-436d-8768-eb08227e2386" + } + } + }, + "root": "/pool/ext/13a9ef4a-f33a-4781-8f83-712c07a79b1f/crypt/zone" + }, + { + "zone": { + "id": "ac8a82a8-fb6f-4635-a9a9-d98617eab390", + "underlay_address": "fd00:1122:3344:103::3", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::3]:32345", + "dataset": { + "pool_name": "oxp_97d6c860-4e2f-496e-974b-2e293fee6af9" + } + } + }, + "root": "/pool/ext/0944c0a2-0fb7-4f51-bced-52cc257cd2f6/crypt/zone" + }, + { + "zone": { + "id": "4ed66558-4815-4b85-9b94-9edf3ee69ead", + "underlay_address": "fd00:1122:3344:103::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::4]:32345", + "dataset": { + "pool_name": "oxp_bc54d8c5-955d-429d-84e0-a20a4e5e27a3" + } + } + }, + "root": "/pool/ext/13a9ef4a-f33a-4781-8f83-712c07a79b1f/crypt/zone" + }, + { + "zone": { + "id": "8a71c6ee-b08d-4c3d-b13c-c9cebc4c328a", + "underlay_address": "fd00:1122:3344:103::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:103::b]:32345", + "dataset": { + "pool_name": "oxp_2bdfa429-09bd-4fa1-aa20-eea99f0d2b85" + } + } + }, + "root": "/pool/ext/29f59fce-a867-4571-9d2e-b03fa5c13510/crypt/zone" + }, + { + "zone": { + "id": "7e6b8962-7a1e-4d7b-b7ea-49e64a51d98d", + "underlay_address": "fd00:1122:3344:103::d", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:103::d]:123", + "ntp_servers": [ + "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", + "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/2bdfa429-09bd-4fa1-aa20-eea99f0d2b85/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled23.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled23.json new file mode 100644 index 0000000000..ab171ad8cd --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack3-sled23.json @@ -0,0 +1,181 @@ +{ + "omicron_generation": 2, + "ledger_generation": 4, + "zones": [ + { + "zone": { + "id": "6b7e931d-4b91-4dc6-9a7b-4c19ac669e5d", + "underlay_address": "fd00:1122:3344:105::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:105::4]:32345", + "dataset": { + "pool_name": "oxp_24dab7f5-164a-47f3-a878-f32ab1e68cce" + } + } + }, + "root": "/pool/ext/ad493851-2d11-4c2d-8d75-989579d9616a/crypt/zone" + }, + { + "zone": { + "id": "6c58e7aa-71e1-4868-9d4b-e12c7ef40303", + "underlay_address": "fd00:1122:3344:105::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:105::a]:32345", + "dataset": { + "pool_name": "oxp_d664c9e8-bc81-4225-a618-a8ae2d057186" + } + } + }, + "root": "/pool/ext/ad493851-2d11-4c2d-8d75-989579d9616a/crypt/zone" + }, + { + "zone": { + "id": "51c6dc8d-b1a4-454a-9b19-01e45eb0b599", + "underlay_address": "fd00:1122:3344:105::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:105::d]:32345", + "dataset": { + "pool_name": "oxp_f5f85537-eb25-4d0e-8e94-b775c41abd73" + } + } + }, + "root": "/pool/ext/4f1eafe9-b28d-49d3-83e2-ceac8721d6b5/crypt/zone" + }, + { + "zone": { + "id": "8cbffa61-0bd0-4ad2-bd7d-30fe0dd57469", + "underlay_address": "fd00:1122:3344:105::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:105::9]:32345", + "dataset": { + "pool_name": "oxp_88abca38-3f61-4d4b-80a1-4ea3e4827f84" + } + } + }, + "root": "/pool/ext/88abca38-3f61-4d4b-80a1-4ea3e4827f84/crypt/zone" + }, + { + "zone": { + "id": "2177f37f-2ac9-4e66-bf74-a10bd91f4d33", + "underlay_address": "fd00:1122:3344:105::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:105::6]:32345", + "dataset": { + "pool_name": "oxp_59e20871-4670-40d6-8ff4-aa97899fc991" + } + } + }, + "root": "/pool/ext/4f1eafe9-b28d-49d3-83e2-ceac8721d6b5/crypt/zone" + }, + { + "zone": { + "id": "e4e43855-4879-4910-a2ba-40f625c1cc2d", + "underlay_address": "fd00:1122:3344:105::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:105::b]:32345", + "dataset": { + "pool_name": "oxp_967d2f05-b141-44f5-837d-9b2aa67ee128" + } + } + }, + "root": "/pool/ext/6b6f34cd-6d3d-4832-a4e6-3df112c97133/crypt/zone" + }, + { + "zone": { + "id": "8d2517e1-f9ad-40f2-abb9-2f5122839910", + "underlay_address": "fd00:1122:3344:105::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:105::7]:32345", + "dataset": { + "pool_name": "oxp_ad493851-2d11-4c2d-8d75-989579d9616a" + } + } + }, + "root": "/pool/ext/88abca38-3f61-4d4b-80a1-4ea3e4827f84/crypt/zone" + }, + { + "zone": { + "id": "44cb3698-a7b1-4388-9165-ac76082ec8bc", + "underlay_address": "fd00:1122:3344:105::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:105::5]:32345", + "dataset": { + "pool_name": "oxp_4292a83c-8c1f-4b2e-9120-72e0c510bf3c" + } + } + }, + "root": "/pool/ext/24dab7f5-164a-47f3-a878-f32ab1e68cce/crypt/zone" + }, + { + "zone": { + "id": "931b5c86-9d72-4518-bfd6-97863152ac65", + "underlay_address": "fd00:1122:3344:105::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:105::c]:32345", + "dataset": { + "pool_name": "oxp_6b6f34cd-6d3d-4832-a4e6-3df112c97133" + } + } + }, + "root": "/pool/ext/ad493851-2d11-4c2d-8d75-989579d9616a/crypt/zone" + }, + { + "zone": { + "id": "ac568073-1889-463e-8cc4-cfed16ce2a34", + "underlay_address": "fd00:1122:3344:105::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:105::8]:32345", + "dataset": { + "pool_name": "oxp_4f1eafe9-b28d-49d3-83e2-ceac8721d6b5" + } + } + }, + "root": "/pool/ext/4292a83c-8c1f-4b2e-9120-72e0c510bf3c/crypt/zone" + }, + { + "zone": { + "id": "e8f86fbb-864e-4d5a-961c-b50b54ae853e", + "underlay_address": "fd00:1122:3344:105::3", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:105::3]:32221", + "dataset": { + "pool_name": "oxp_24dab7f5-164a-47f3-a878-f32ab1e68cce" + } + } + }, + "root": "/pool/ext/4f1eafe9-b28d-49d3-83e2-ceac8721d6b5/crypt/zone" + }, + { + "zone": { + "id": "c79caea0-37b1-49d6-ae6e-8cf849d91374", + "underlay_address": "fd00:1122:3344:105::e", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:105::e]:123", + "ntp_servers": [ + "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", + "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/24dab7f5-164a-47f3-a878-f32ab1e68cce/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled24.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled24.json new file mode 100644 index 0000000000..9968abe6d9 --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack3-sled24.json @@ -0,0 +1,167 @@ +{ + "omicron_generation": 2, + "ledger_generation": 4, + "zones": [ + { + "zone": { + "id": "d2b1e468-bc3c-4d08-b855-ae3327465375", + "underlay_address": "fd00:1122:3344:106::3", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:106::3]:32345", + "dataset": { + "pool_name": "oxp_9db196bf-828d-4e55-a2c1-dd9d579d3908" + } + } + }, + "root": "/pool/ext/74df4c92-edbb-4431-a770-1d015110e66b/crypt/zone" + }, + { + "zone": { + "id": "61f94a16-79fd-42e3-b225-a4dc67228437", + "underlay_address": "fd00:1122:3344:106::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:106::6]:32345", + "dataset": { + "pool_name": "oxp_d77d5b08-5f70-496a-997b-b38804dc3b8a" + } + } + }, + "root": "/pool/ext/daf9e3cd-5a40-4eba-a0f6-4f94dab37dae/crypt/zone" + }, + { + "zone": { + "id": "7d32ef34-dec5-4fd8-899e-20bbc473a3ee", + "underlay_address": "fd00:1122:3344:106::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:106::7]:32345", + "dataset": { + "pool_name": "oxp_50c1b653-6231-41fe-b3cf-b7ba709a0746" + } + } + }, + "root": "/pool/ext/9db196bf-828d-4e55-a2c1-dd9d579d3908/crypt/zone" + }, + { + "zone": { + "id": "c34b7ae5-26b9-4651-a3c4-20bba2bd0d2c", + "underlay_address": "fd00:1122:3344:106::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:106::5]:32345", + "dataset": { + "pool_name": "oxp_88aea92c-ab92-44c1-9471-eb8e30e075d3" + } + } + }, + "root": "/pool/ext/8da316d4-6b18-4980-a0a8-6e76e72cc40d/crypt/zone" + }, + { + "zone": { + "id": "36472be8-9a70-4c14-bd02-439b725cec1a", + "underlay_address": "fd00:1122:3344:106::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:106::8]:32345", + "dataset": { + "pool_name": "oxp_54544b3a-1513-4db2-911e-7c1eb4b12385" + } + } + }, + "root": "/pool/ext/54544b3a-1513-4db2-911e-7c1eb4b12385/crypt/zone" + }, + { + "zone": { + "id": "2548f8ab-5255-4334-a1fb-5d7d95213129", + "underlay_address": "fd00:1122:3344:106::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:106::9]:32345", + "dataset": { + "pool_name": "oxp_08050450-967f-431c-9a12-0d051aff020e" + } + } + }, + "root": "/pool/ext/08050450-967f-431c-9a12-0d051aff020e/crypt/zone" + }, + { + "zone": { + "id": "1455c069-853c-49cd-853a-3ea81b89acd4", + "underlay_address": "fd00:1122:3344:106::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:106::c]:32345", + "dataset": { + "pool_name": "oxp_8da316d4-6b18-4980-a0a8-6e76e72cc40d" + } + } + }, + "root": "/pool/ext/08050450-967f-431c-9a12-0d051aff020e/crypt/zone" + }, + { + "zone": { + "id": "27c0244b-f91a-46c3-bc96-e8eec009371e", + "underlay_address": "fd00:1122:3344:106::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:106::b]:32345", + "dataset": { + "pool_name": "oxp_daf9e3cd-5a40-4eba-a0f6-4f94dab37dae" + } + } + }, + "root": "/pool/ext/74df4c92-edbb-4431-a770-1d015110e66b/crypt/zone" + }, + { + "zone": { + "id": "9e46d837-1e0f-42b6-a352-84e6946b8734", + "underlay_address": "fd00:1122:3344:106::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:106::4]:32345", + "dataset": { + "pool_name": "oxp_74df4c92-edbb-4431-a770-1d015110e66b" + } + } + }, + "root": "/pool/ext/15f94c39-d48c-41f6-a913-cc1d04aef1a2/crypt/zone" + }, + { + "zone": { + "id": "b972fcd4-c1b3-4b3c-9e24-f59c7a7cb192", + "underlay_address": "fd00:1122:3344:106::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:106::a]:32345", + "dataset": { + "pool_name": "oxp_15f94c39-d48c-41f6-a913-cc1d04aef1a2" + } + } + }, + "root": "/pool/ext/74df4c92-edbb-4431-a770-1d015110e66b/crypt/zone" + }, + { + "zone": { + "id": "e1c8c655-1950-42d5-ae1f-a4ce84854bbc", + "underlay_address": "fd00:1122:3344:106::d", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:106::d]:123", + "ntp_servers": [ + "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", + "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/15f94c39-d48c-41f6-a913-cc1d04aef1a2/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled25.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled25.json new file mode 100644 index 0000000000..8deca6b56a --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack3-sled25.json @@ -0,0 +1,196 @@ +{ + "omicron_generation": 2, + "ledger_generation": 4, + "zones": [ + { + "zone": { + "id": "10b80058-9b2e-4d6c-8a1a-a61a8258c12f", + "underlay_address": "fd00:1122:3344:118::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:118::9]:32345", + "dataset": { + "pool_name": "oxp_953c19bb-9fff-4488-8a7b-29de9994a948" + } + } + }, + "root": "/pool/ext/a78caf97-6145-4908-83b5-a03a6d2e0ac4/crypt/zone" + }, + { + "zone": { + "id": "f58fef96-7b5e-40c2-9482-669088a19209", + "underlay_address": "fd00:1122:3344:118::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:118::d]:32345", + "dataset": { + "pool_name": "oxp_d7976706-d6ed-4465-8b04-450c96d8feec" + } + } + }, + "root": "/pool/ext/d7976706-d6ed-4465-8b04-450c96d8feec/crypt/zone" + }, + { + "zone": { + "id": "624f1168-47b6-4aa1-84da-e20a0d74d783", + "underlay_address": "fd00:1122:3344:118::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:118::b]:32345", + "dataset": { + "pool_name": "oxp_a78caf97-6145-4908-83b5-a03a6d2e0ac4" + } + } + }, + "root": "/pool/ext/a5b16ffe-a834-4a83-a4e9-487d4cbb7e3d/crypt/zone" + }, + { + "zone": { + "id": "8ea85412-19b4-45c1-a53c-027ddd629296", + "underlay_address": "fd00:1122:3344:118::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:118::6]:32345", + "dataset": { + "pool_name": "oxp_d5f4c903-155a-4c91-aadd-6039a4f64821" + } + } + }, + "root": "/pool/ext/7d2a7685-c1c9-4d2d-a2bb-df65d96ea3e2/crypt/zone" + }, + { + "zone": { + "id": "fd226b82-71d7-4719-b32c-a6c7abe28a2a", + "underlay_address": "fd00:1122:3344:118::3", + "zone_type": { + "type": "external_dns", + "dataset": { + "pool_name": "oxp_84a80b58-70e9-439c-9558-5b343d9a4b53" + }, + "http_address": "[fd00:1122:3344:118::3]:5353", + "dns_address": "45.154.216.34:53", + "nic": { + "id": "7f72b6fd-1120-44dc-b3a7-f727502ba47c", + "kind": { + "type": "service", + "id": "fd226b82-71d7-4719-b32c-a6c7abe28a2a" + }, + "name": "external-dns-fd226b82-71d7-4719-b32c-a6c7abe28a2a", + "ip": "172.30.1.6", + "mac": "A8:40:25:FF:9E:D1", + "subnet": "172.30.1.0/24", + "vni": 100, + "primary": true, + "slot": 0 + } + } + }, + "root": "/pool/ext/a5b16ffe-a834-4a83-a4e9-487d4cbb7e3d/crypt/zone" + }, + { + "zone": { + "id": "08d0c38d-f0d9-45b9-856d-b85059fe5f07", + "underlay_address": "fd00:1122:3344:118::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:118::4]:32345", + "dataset": { + "pool_name": "oxp_84a80b58-70e9-439c-9558-5b343d9a4b53" + } + } + }, + "root": "/pool/ext/a5b16ffe-a834-4a83-a4e9-487d4cbb7e3d/crypt/zone" + }, + { + "zone": { + "id": "5de7d3fd-4a3f-4fdd-b6b2-d1186e16dce5", + "underlay_address": "fd00:1122:3344:118::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:118::7]:32345", + "dataset": { + "pool_name": "oxp_d76e058f-2d1e-4b15-b3a0-e5509a246876" + } + } + }, + "root": "/pool/ext/a5b16ffe-a834-4a83-a4e9-487d4cbb7e3d/crypt/zone" + }, + { + "zone": { + "id": "5d0f5cad-10b3-497c-903b-eeeabce920e2", + "underlay_address": "fd00:1122:3344:118::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:118::8]:32345", + "dataset": { + "pool_name": "oxp_3a3ad639-8800-4951-bc2a-201d269e47a2" + } + } + }, + "root": "/pool/ext/3a3ad639-8800-4951-bc2a-201d269e47a2/crypt/zone" + }, + { + "zone": { + "id": "39f9cefa-801c-4843-9fb9-05446ffbdd1a", + "underlay_address": "fd00:1122:3344:118::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:118::a]:32345", + "dataset": { + "pool_name": "oxp_7d2a7685-c1c9-4d2d-a2bb-df65d96ea3e2" + } + } + }, + "root": "/pool/ext/a78caf97-6145-4908-83b5-a03a6d2e0ac4/crypt/zone" + }, + { + "zone": { + "id": "0711e710-7fdd-4e68-94c8-294b8677e804", + "underlay_address": "fd00:1122:3344:118::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:118::5]:32345", + "dataset": { + "pool_name": "oxp_a5b16ffe-a834-4a83-a4e9-487d4cbb7e3d" + } + } + }, + "root": "/pool/ext/3a3ad639-8800-4951-bc2a-201d269e47a2/crypt/zone" + }, + { + "zone": { + "id": "318a62cc-5c6c-4805-9fb6-c0f6a75ce31c", + "underlay_address": "fd00:1122:3344:118::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:118::c]:32345", + "dataset": { + "pool_name": "oxp_1d5f0ba3-6b31-4cea-a9a9-2065a538887d" + } + } + }, + "root": "/pool/ext/d7976706-d6ed-4465-8b04-450c96d8feec/crypt/zone" + }, + { + "zone": { + "id": "463d0498-85b9-40eb-af96-d99af58a587c", + "underlay_address": "fd00:1122:3344:118::e", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:118::e]:123", + "ntp_servers": [ + "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", + "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/d5f4c903-155a-4c91-aadd-6039a4f64821/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled26.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled26.json new file mode 100644 index 0000000000..a3c5d97b53 --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack3-sled26.json @@ -0,0 +1,178 @@ +{ + "omicron_generation": 2, + "ledger_generation": 4, + "zones": [ + { + "zone": { + "id": "d8b3de97-cc79-48f6-83ad-02017c21223b", + "underlay_address": "fd00:1122:3344:119::3", + "zone_type": { + "type": "crucible_pantry", + "address": "[fd00:1122:3344:119::3]:17000" + } + }, + "root": "/pool/ext/e0faea44-8b5c-40b0-bb75-a1aec1a10377/crypt/zone" + }, + { + "zone": { + "id": "adba1a3b-5bac-44d5-aa5a-879dc6eadb5f", + "underlay_address": "fd00:1122:3344:119::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:119::c]:32345", + "dataset": { + "pool_name": "oxp_21c339c3-6461-4bdb-8b0e-c0f9f08ee10b" + } + } + }, + "root": "/pool/ext/f5c73c28-2168-4321-b737-4ca6663155c9/crypt/zone" + }, + { + "zone": { + "id": "42bb9833-5c39-4aba-b2c4-da2ca1287728", + "underlay_address": "fd00:1122:3344:119::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:119::a]:32345", + "dataset": { + "pool_name": "oxp_1f91451d-a466-4c9a-a6e6-0abd7985595f" + } + } + }, + "root": "/pool/ext/21c339c3-6461-4bdb-8b0e-c0f9f08ee10b/crypt/zone" + }, + { + "zone": { + "id": "197695e1-d949-4982-b679-6e5c9ab4bcc7", + "underlay_address": "fd00:1122:3344:119::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:119::b]:32345", + "dataset": { + "pool_name": "oxp_e0faea44-8b5c-40b0-bb75-a1aec1a10377" + } + } + }, + "root": "/pool/ext/b31e1815-cae0-4145-940c-874fff63bdd5/crypt/zone" + }, + { + "zone": { + "id": "bf99d4f8-edf1-4de5-98d4-8e6a24965005", + "underlay_address": "fd00:1122:3344:119::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:119::8]:32345", + "dataset": { + "pool_name": "oxp_ef2c3afb-6962-4f6b-b567-14766bbd9ec0" + } + } + }, + "root": "/pool/ext/21c339c3-6461-4bdb-8b0e-c0f9f08ee10b/crypt/zone" + }, + { + "zone": { + "id": "390d1853-8be9-4987-b8b6-f022999bf4e7", + "underlay_address": "fd00:1122:3344:119::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:119::7]:32345", + "dataset": { + "pool_name": "oxp_06eed00a-d8d3-4b9d-84c9-23fce535f63e" + } + } + }, + "root": "/pool/ext/ef2c3afb-6962-4f6b-b567-14766bbd9ec0/crypt/zone" + }, + { + "zone": { + "id": "76fe2161-90df-41b5-9c94-067de9c29db1", + "underlay_address": "fd00:1122:3344:119::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:119::4]:32345", + "dataset": { + "pool_name": "oxp_f5c73c28-2168-4321-b737-4ca6663155c9" + } + } + }, + "root": "/pool/ext/ef2c3afb-6962-4f6b-b567-14766bbd9ec0/crypt/zone" + }, + { + "zone": { + "id": "f49dc522-2b13-4055-964c-8315671096aa", + "underlay_address": "fd00:1122:3344:119::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:119::d]:32345", + "dataset": { + "pool_name": "oxp_662c278b-7f5f-4c7e-91ff-70207e8a307b" + } + } + }, + "root": "/pool/ext/1f91451d-a466-4c9a-a6e6-0abd7985595f/crypt/zone" + }, + { + "zone": { + "id": "08cc7bd6-368e-4d16-a619-28b17eff35af", + "underlay_address": "fd00:1122:3344:119::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:119::9]:32345", + "dataset": { + "pool_name": "oxp_5516b9ac-b139-40da-aa3b-f094568ba095" + } + } + }, + "root": "/pool/ext/06eed00a-d8d3-4b9d-84c9-23fce535f63e/crypt/zone" + }, + { + "zone": { + "id": "74b0613f-bce8-4922-93e0-b5bfccfc8443", + "underlay_address": "fd00:1122:3344:119::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:119::5]:32345", + "dataset": { + "pool_name": "oxp_b31e1815-cae0-4145-940c-874fff63bdd5" + } + } + }, + "root": "/pool/ext/21c339c3-6461-4bdb-8b0e-c0f9f08ee10b/crypt/zone" + }, + { + "zone": { + "id": "55fcfc62-8435-475f-a2aa-29373901b993", + "underlay_address": "fd00:1122:3344:119::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:119::6]:32345", + "dataset": { + "pool_name": "oxp_eadf6a03-1028-4d48-ac0d-0d27ef2c8c0f" + } + } + }, + "root": "/pool/ext/1f91451d-a466-4c9a-a6e6-0abd7985595f/crypt/zone" + }, + { + "zone": { + "id": "d52ccea3-6d7f-43a6-a19f-e0409f4e9cdc", + "underlay_address": "fd00:1122:3344:119::e", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:119::e]:123", + "ntp_servers": [ + "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", + "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/f5c73c28-2168-4321-b737-4ca6663155c9/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled27.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled27.json new file mode 100644 index 0000000000..193df7a567 --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack3-sled27.json @@ -0,0 +1,167 @@ +{ + "omicron_generation": 2, + "ledger_generation": 4, + "zones": [ + { + "zone": { + "id": "095e612f-e218-4a16-aa6e-98c3d69a470a", + "underlay_address": "fd00:1122:3344:10d::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10d::a]:32345", + "dataset": { + "pool_name": "oxp_9f657858-623f-4d78-9841-6e620b5ede30" + } + } + }, + "root": "/pool/ext/2d086b51-2b77-4bc7-adc6-43586ea38ce9/crypt/zone" + }, + { + "zone": { + "id": "de818730-0e3b-4567-94e7-344bd9b6f564", + "underlay_address": "fd00:1122:3344:10d::3", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10d::3]:32345", + "dataset": { + "pool_name": "oxp_ba6ab301-07e1-4d35-80ac-59612f2c2bdb" + } + } + }, + "root": "/pool/ext/7cee2806-e898-47d8-b568-e276a6e271f8/crypt/zone" + }, + { + "zone": { + "id": "6a21dc3c-3a9d-4520-9a91-7d8f2737bcd4", + "underlay_address": "fd00:1122:3344:10d::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10d::4]:32345", + "dataset": { + "pool_name": "oxp_7cee2806-e898-47d8-b568-e276a6e271f8" + } + } + }, + "root": "/pool/ext/cef23d87-31ed-40d5-99b8-12d7be8e46e7/crypt/zone" + }, + { + "zone": { + "id": "e01b7f45-b8d7-4944-ba5b-41fb699889a9", + "underlay_address": "fd00:1122:3344:10d::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10d::b]:32345", + "dataset": { + "pool_name": "oxp_d9af8878-50bd-4425-95d9-e6556ce92cfa" + } + } + }, + "root": "/pool/ext/6fe9bcaa-88cb-451d-b086-24a3ad53fa22/crypt/zone" + }, + { + "zone": { + "id": "4271ef62-d319-4e80-b157-915321cec8c7", + "underlay_address": "fd00:1122:3344:10d::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10d::c]:32345", + "dataset": { + "pool_name": "oxp_ba8ee7dd-cdfb-48bd-92ce-4dc45e070930" + } + } + }, + "root": "/pool/ext/9f657858-623f-4d78-9841-6e620b5ede30/crypt/zone" + }, + { + "zone": { + "id": "6bdcc159-aeb9-4903-9486-dd8b43a3dc16", + "underlay_address": "fd00:1122:3344:10d::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10d::8]:32345", + "dataset": { + "pool_name": "oxp_5b03a5dc-bb5a-4bf4-bc21-0af849cd1dab" + } + } + }, + "root": "/pool/ext/d9af8878-50bd-4425-95d9-e6556ce92cfa/crypt/zone" + }, + { + "zone": { + "id": "85540e54-cdd7-4baa-920c-5cf54cbc1f83", + "underlay_address": "fd00:1122:3344:10d::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10d::7]:32345", + "dataset": { + "pool_name": "oxp_ee24f9a6-84ab-49a5-a28f-e394abfcaa95" + } + } + }, + "root": "/pool/ext/9f657858-623f-4d78-9841-6e620b5ede30/crypt/zone" + }, + { + "zone": { + "id": "750d1a0b-6a14-46c5-9a0b-a504caefb198", + "underlay_address": "fd00:1122:3344:10d::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10d::9]:32345", + "dataset": { + "pool_name": "oxp_cef23d87-31ed-40d5-99b8-12d7be8e46e7" + } + } + }, + "root": "/pool/ext/ba8ee7dd-cdfb-48bd-92ce-4dc45e070930/crypt/zone" + }, + { + "zone": { + "id": "b5996893-1a9a-434e-a257-d702694f058b", + "underlay_address": "fd00:1122:3344:10d::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10d::6]:32345", + "dataset": { + "pool_name": "oxp_2d086b51-2b77-4bc7-adc6-43586ea38ce9" + } + } + }, + "root": "/pool/ext/7cee2806-e898-47d8-b568-e276a6e271f8/crypt/zone" + }, + { + "zone": { + "id": "8b36686a-b98d-451a-9124-a3583000a83a", + "underlay_address": "fd00:1122:3344:10d::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10d::5]:32345", + "dataset": { + "pool_name": "oxp_6fe9bcaa-88cb-451d-b086-24a3ad53fa22" + } + } + }, + "root": "/pool/ext/9f657858-623f-4d78-9841-6e620b5ede30/crypt/zone" + }, + { + "zone": { + "id": "88d695a2-c8c1-41af-85b0-77424f4d650d", + "underlay_address": "fd00:1122:3344:10d::d", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:10d::d]:123", + "ntp_servers": [ + "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", + "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/ba6ab301-07e1-4d35-80ac-59612f2c2bdb/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled28.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled28.json new file mode 100644 index 0000000000..210b388a19 --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack3-sled28.json @@ -0,0 +1,167 @@ +{ + "omicron_generation": 2, + "ledger_generation": 4, + "zones": [ + { + "zone": { + "id": "a126365d-f459-43bf-9f99-dbe1c4cdecf8", + "underlay_address": "fd00:1122:3344:113::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:113::4]:32345", + "dataset": { + "pool_name": "oxp_c99eabb2-6815-416a-9660-87e2609b357a" + } + } + }, + "root": "/pool/ext/6461a450-f043-4d1e-bc03-4a68ed5fe94a/crypt/zone" + }, + { + "zone": { + "id": "52f57ef8-546a-43bd-a0f3-8c42b99c37a6", + "underlay_address": "fd00:1122:3344:113::3", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:113::3]:32345", + "dataset": { + "pool_name": "oxp_f6530e9c-6d64-44fa-93d5-ae427916fbf1" + } + } + }, + "root": "/pool/ext/97662260-6b62-450f-9d7e-42f7dee5d568/crypt/zone" + }, + { + "zone": { + "id": "3ee87855-9423-43ff-800a-fa4fdbf1d956", + "underlay_address": "fd00:1122:3344:113::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:113::a]:32345", + "dataset": { + "pool_name": "oxp_6461a450-f043-4d1e-bc03-4a68ed5fe94a" + } + } + }, + "root": "/pool/ext/9515dc86-fe62-4d4f-b38d-b3461cc042fc/crypt/zone" + }, + { + "zone": { + "id": "55d0ddf9-9b24-4a7a-b97f-248e240f9ba6", + "underlay_address": "fd00:1122:3344:113::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:113::5]:32345", + "dataset": { + "pool_name": "oxp_97662260-6b62-450f-9d7e-42f7dee5d568" + } + } + }, + "root": "/pool/ext/9515dc86-fe62-4d4f-b38d-b3461cc042fc/crypt/zone" + }, + { + "zone": { + "id": "014cad37-56a7-4b2a-9c9e-505b15b4de85", + "underlay_address": "fd00:1122:3344:113::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:113::b]:32345", + "dataset": { + "pool_name": "oxp_8529ce8e-21d2-4b23-b9fd-6b90c7ae4f90" + } + } + }, + "root": "/pool/ext/6461a450-f043-4d1e-bc03-4a68ed5fe94a/crypt/zone" + }, + { + "zone": { + "id": "e14fb192-aaab-42ab-aa86-c85f13955940", + "underlay_address": "fd00:1122:3344:113::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:113::6]:32345", + "dataset": { + "pool_name": "oxp_5a9455ca-fb01-4549-9a70-7579c031779d" + } + } + }, + "root": "/pool/ext/f6530e9c-6d64-44fa-93d5-ae427916fbf1/crypt/zone" + }, + { + "zone": { + "id": "14540609-9371-442b-8486-88c244e97cd4", + "underlay_address": "fd00:1122:3344:113::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:113::8]:32345", + "dataset": { + "pool_name": "oxp_2916d6f3-8775-4887-a6d3-f9723982756f" + } + } + }, + "root": "/pool/ext/8529ce8e-21d2-4b23-b9fd-6b90c7ae4f90/crypt/zone" + }, + { + "zone": { + "id": "97a6b35f-0af9-41eb-93a1-f8bc5dbba357", + "underlay_address": "fd00:1122:3344:113::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:113::7]:32345", + "dataset": { + "pool_name": "oxp_9515dc86-fe62-4d4f-b38d-b3461cc042fc" + } + } + }, + "root": "/pool/ext/8529ce8e-21d2-4b23-b9fd-6b90c7ae4f90/crypt/zone" + }, + { + "zone": { + "id": "5734aa24-cb66-4b0a-9eb2-564646f8d729", + "underlay_address": "fd00:1122:3344:113::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:113::9]:32345", + "dataset": { + "pool_name": "oxp_9f889a6c-17b1-4edd-9659-458d91439dc1" + } + } + }, + "root": "/pool/ext/a5074e7f-8d3b-40e0-a79e-dbd9af9d5693/crypt/zone" + }, + { + "zone": { + "id": "ba86eca1-1427-4540-b4a6-1d9a0e1bc656", + "underlay_address": "fd00:1122:3344:113::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:113::c]:32345", + "dataset": { + "pool_name": "oxp_a5074e7f-8d3b-40e0-a79e-dbd9af9d5693" + } + } + }, + "root": "/pool/ext/2916d6f3-8775-4887-a6d3-f9723982756f/crypt/zone" + }, + { + "zone": { + "id": "6634dbc4-d22f-40a4-8cd3-4f271d781fa1", + "underlay_address": "fd00:1122:3344:113::d", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:113::d]:123", + "ntp_servers": [ + "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", + "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/a5074e7f-8d3b-40e0-a79e-dbd9af9d5693/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled29.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled29.json new file mode 100644 index 0000000000..ccd1bd65be --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack3-sled29.json @@ -0,0 +1,184 @@ +{ + "omicron_generation": 2, + "ledger_generation": 5, + "zones": [ + { + "zone": { + "id": "1cdd1ebf-9321-4f2d-914c-1e617f60b41a", + "underlay_address": "fd00:1122:3344:120::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:120::8]:32345", + "dataset": { + "pool_name": "oxp_74046573-78a2-46b4-86dc-40bb2ee29dd5" + } + } + }, + "root": "/pool/ext/c1f0a9e4-ea10-4fd9-8b6d-79a2bacfec5e/crypt/zone" + }, + { + "zone": { + "id": "720a0d08-d1c0-43ba-af86-f2dac1a53639", + "underlay_address": "fd00:1122:3344:120::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:120::c]:32345", + "dataset": { + "pool_name": "oxp_068d2790-1044-41ed-97a5-b493490b14d1" + } + } + }, + "root": "/pool/ext/86cd16cf-d00d-40bc-b14a-8220b1e11476/crypt/zone" + }, + { + "zone": { + "id": "d9f0b97b-2cef-4155-b45f-7db89263e4cf", + "underlay_address": "fd00:1122:3344:120::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:120::9]:32345", + "dataset": { + "pool_name": "oxp_8171bf0d-e61e-43f9-87d6-ec8833b80102" + } + } + }, + "root": "/pool/ext/86cd16cf-d00d-40bc-b14a-8220b1e11476/crypt/zone" + }, + { + "zone": { + "id": "018edff1-0d95-45a3-9a01-39c419bec55a", + "underlay_address": "fd00:1122:3344:120::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:120::b]:32345", + "dataset": { + "pool_name": "oxp_0b11e026-f265-49a0-935f-7b234c19c789" + } + } + }, + "root": "/pool/ext/35db8700-d6a7-498c-9d2c-08eb9ab41b7c/crypt/zone" + }, + { + "zone": { + "id": "f8cc1c1e-a556-436c-836d-42052101c38a", + "underlay_address": "fd00:1122:3344:120::3", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:120::3]:32345", + "dataset": { + "pool_name": "oxp_ed8e5a26-5591-405a-b792-408f5b16e444" + } + } + }, + "root": "/pool/ext/1069bdee-fe5a-4164-a856-ff8ae56c07fb/crypt/zone" + }, + { + "zone": { + "id": "f9600313-fac0-45a1-a1b5-02dd6af468b9", + "underlay_address": "fd00:1122:3344:120::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:120::4]:32345", + "dataset": { + "pool_name": "oxp_c1f0a9e4-ea10-4fd9-8b6d-79a2bacfec5e" + } + } + }, + "root": "/pool/ext/74046573-78a2-46b4-86dc-40bb2ee29dd5/crypt/zone" + }, + { + "zone": { + "id": "869e4f7c-5312-4b98-bacc-1508f236bf5a", + "underlay_address": "fd00:1122:3344:120::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:120::6]:32345", + "dataset": { + "pool_name": "oxp_04aea8dc-4316-432f-a13a-d7d9b2efa3f2" + } + } + }, + "root": "/pool/ext/0b11e026-f265-49a0-935f-7b234c19c789/crypt/zone" + }, + { + "zone": { + "id": "31ed5a0c-7caf-4825-b730-85ee94fe27f1", + "underlay_address": "fd00:1122:3344:120::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:120::a]:32345", + "dataset": { + "pool_name": "oxp_86cd16cf-d00d-40bc-b14a-8220b1e11476" + } + } + }, + "root": "/pool/ext/04aea8dc-4316-432f-a13a-d7d9b2efa3f2/crypt/zone" + }, + { + "zone": { + "id": "7e5a3c39-152a-4270-b01e-9e144cca4aaa", + "underlay_address": "fd00:1122:3344:120::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:120::5]:32345", + "dataset": { + "pool_name": "oxp_1069bdee-fe5a-4164-a856-ff8ae56c07fb" + } + } + }, + "root": "/pool/ext/04aea8dc-4316-432f-a13a-d7d9b2efa3f2/crypt/zone" + }, + { + "zone": { + "id": "9a03a386-7304-4a86-bee8-153ef643195e", + "underlay_address": "fd00:1122:3344:120::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:120::7]:32345", + "dataset": { + "pool_name": "oxp_35db8700-d6a7-498c-9d2c-08eb9ab41b7c" + } + } + }, + "root": "/pool/ext/068d2790-1044-41ed-97a5-b493490b14d1/crypt/zone" + }, + { + "zone": { + "id": "a800d0a7-1020-481c-8be8-ecfd28b7a2be", + "underlay_address": "fd00:1122:3344:120::d", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:120::d]:123", + "ntp_servers": [ + "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", + "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/c1f0a9e4-ea10-4fd9-8b6d-79a2bacfec5e/crypt/zone" + }, + { + "zone": { + "id": "be469efd-8e07-4b8e-bcee-6fd33373cdef", + "underlay_address": "fd00:1122:3344:3::1", + "zone_type": { + "type": "internal_dns", + "dataset": { + "pool_name": "oxp_ed8e5a26-5591-405a-b792-408f5b16e444" + }, + "http_address": "[fd00:1122:3344:3::1]:5353", + "dns_address": "[fd00:1122:3344:3::1]:53", + "gz_address": "fd00:1122:3344:3::2", + "gz_address_index": 2 + } + }, + "root": "/pool/ext/068d2790-1044-41ed-97a5-b493490b14d1/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled3.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled3.json new file mode 100644 index 0000000000..5da6d95389 --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack3-sled3.json @@ -0,0 +1,178 @@ +{ + "omicron_generation": 2, + "ledger_generation": 4, + "zones": [ + { + "zone": { + "id": "19d091b8-e005-4ff4-97e1-026de95e3667", + "underlay_address": "fd00:1122:3344:10f::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10f::c]:32345", + "dataset": { + "pool_name": "oxp_11a63469-4f57-4976-8620-0055bf82dc97" + } + } + }, + "root": "/pool/ext/6a73a62c-c636-4557-af45-042cb287aee6/crypt/zone" + }, + { + "zone": { + "id": "57d77171-104e-4977-b2f9-9b529ee7f8a0", + "underlay_address": "fd00:1122:3344:10f::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10f::8]:32345", + "dataset": { + "pool_name": "oxp_7f3060af-058f-4f52-ab80-902bd13e7ef4" + } + } + }, + "root": "/pool/ext/7f3060af-058f-4f52-ab80-902bd13e7ef4/crypt/zone" + }, + { + "zone": { + "id": "b0371ccf-67da-4562-baf2-eaabe5243e9b", + "underlay_address": "fd00:1122:3344:10f::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10f::7]:32345", + "dataset": { + "pool_name": "oxp_58ae04cb-26ff-4e30-a20d-9f847bafba4d" + } + } + }, + "root": "/pool/ext/125ddcda-f94b-46bc-a10a-94e9acf40265/crypt/zone" + }, + { + "zone": { + "id": "ae3791ff-2657-4252-bd61-58ec5dc237cd", + "underlay_address": "fd00:1122:3344:10f::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10f::9]:32345", + "dataset": { + "pool_name": "oxp_125ddcda-f94b-46bc-a10a-94e9acf40265" + } + } + }, + "root": "/pool/ext/58ae04cb-26ff-4e30-a20d-9f847bafba4d/crypt/zone" + }, + { + "zone": { + "id": "73f865dc-5db7-48c6-9dc4-dff56dd8c045", + "underlay_address": "fd00:1122:3344:10f::3", + "zone_type": { + "type": "crucible_pantry", + "address": "[fd00:1122:3344:10f::3]:17000" + } + }, + "root": "/pool/ext/11a63469-4f57-4976-8620-0055bf82dc97/crypt/zone" + }, + { + "zone": { + "id": "e5d0170a-0d60-4c51-8f72-4c301979690e", + "underlay_address": "fd00:1122:3344:10f::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10f::6]:32345", + "dataset": { + "pool_name": "oxp_efe4cbab-2a39-4d7d-ae6c-83eb3ab8d4b5" + } + } + }, + "root": "/pool/ext/6a73a62c-c636-4557-af45-042cb287aee6/crypt/zone" + }, + { + "zone": { + "id": "ea6894de-c575-43bc-86e9-65b8a58499ff", + "underlay_address": "fd00:1122:3344:10f::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10f::a]:32345", + "dataset": { + "pool_name": "oxp_a87dc882-8b88-4a99-9628-5db79072cffa" + } + } + }, + "root": "/pool/ext/11a63469-4f57-4976-8620-0055bf82dc97/crypt/zone" + }, + { + "zone": { + "id": "3081dc99-4fa9-4238-adfa-b9ca381c1f7b", + "underlay_address": "fd00:1122:3344:10f::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10f::b]:32345", + "dataset": { + "pool_name": "oxp_6a73a62c-c636-4557-af45-042cb287aee6" + } + } + }, + "root": "/pool/ext/a87dc882-8b88-4a99-9628-5db79072cffa/crypt/zone" + }, + { + "zone": { + "id": "b4a3d7c8-487d-4d76-ae4e-a6a51595a5a6", + "underlay_address": "fd00:1122:3344:10f::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10f::d]:32345", + "dataset": { + "pool_name": "oxp_a12f87ee-9918-4269-9de4-4bad4fb41caa" + } + } + }, + "root": "/pool/ext/a12f87ee-9918-4269-9de4-4bad4fb41caa/crypt/zone" + }, + { + "zone": { + "id": "5ebcee26-f76c-4206-8d81-584ac138d3b9", + "underlay_address": "fd00:1122:3344:10f::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10f::4]:32345", + "dataset": { + "pool_name": "oxp_27f1917e-fb69-496a-9d40-8ef0d0c0ee55" + } + } + }, + "root": "/pool/ext/58ae04cb-26ff-4e30-a20d-9f847bafba4d/crypt/zone" + }, + { + "zone": { + "id": "90b2bc57-3a2a-4117-bb6d-7eda7542329a", + "underlay_address": "fd00:1122:3344:10f::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10f::5]:32345", + "dataset": { + "pool_name": "oxp_a222e405-40f6-4fdd-9146-94f7d94ed08a" + } + } + }, + "root": "/pool/ext/a12f87ee-9918-4269-9de4-4bad4fb41caa/crypt/zone" + }, + { + "zone": { + "id": "0fb540af-58d3-4abc-bfad-e49765c2b1ee", + "underlay_address": "fd00:1122:3344:10f::e", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:10f::e]:123", + "ntp_servers": [ + "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", + "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/58ae04cb-26ff-4e30-a20d-9f847bafba4d/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled30.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled30.json new file mode 100644 index 0000000000..c92a638b85 --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack3-sled30.json @@ -0,0 +1,167 @@ +{ + "omicron_generation": 2, + "ledger_generation": 4, + "zones": [ + { + "zone": { + "id": "dda0f1c6-84a5-472c-b350-a799c8d3d0eb", + "underlay_address": "fd00:1122:3344:115::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:115::8]:32345", + "dataset": { + "pool_name": "oxp_028b6c9e-5a0e-43d2-a8ed-a5946cf62924" + } + } + }, + "root": "/pool/ext/b8d84b9c-a65e-4c86-8196-69da5317ae63/crypt/zone" + }, + { + "zone": { + "id": "157672f9-113f-48b7-9808-dff3c3e67dcd", + "underlay_address": "fd00:1122:3344:115::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:115::a]:32345", + "dataset": { + "pool_name": "oxp_4fdca201-b37e-4072-a1cc-3cb7705954eb" + } + } + }, + "root": "/pool/ext/b8d84b9c-a65e-4c86-8196-69da5317ae63/crypt/zone" + }, + { + "zone": { + "id": "5a7d4f67-a70f-4d8b-8d35-4dc600991fb5", + "underlay_address": "fd00:1122:3344:115::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:115::5]:32345", + "dataset": { + "pool_name": "oxp_11a991e5-19a9-48b0-8186-34249ef67957" + } + } + }, + "root": "/pool/ext/1e9c9764-aaa4-4681-b110-a937b4c52748/crypt/zone" + }, + { + "zone": { + "id": "c7036645-b680-4816-834f-8ae1af24c159", + "underlay_address": "fd00:1122:3344:115::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:115::b]:32345", + "dataset": { + "pool_name": "oxp_0780be56-c13d-4c6a-a1ac-37753a0da820" + } + } + }, + "root": "/pool/ext/80a8d756-ee22-4c88-8b5b-4a46f7eca249/crypt/zone" + }, + { + "zone": { + "id": "45e47e4b-708f-40b5-a8c8-fbfd73696d45", + "underlay_address": "fd00:1122:3344:115::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:115::7]:32345", + "dataset": { + "pool_name": "oxp_80a8d756-ee22-4c88-8b5b-4a46f7eca249" + } + } + }, + "root": "/pool/ext/4fdca201-b37e-4072-a1cc-3cb7705954eb/crypt/zone" + }, + { + "zone": { + "id": "e805b0c1-3f80-49da-8dc1-caaf843e5003", + "underlay_address": "fd00:1122:3344:115::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:115::c]:32345", + "dataset": { + "pool_name": "oxp_d54e1ed7-e589-4413-a487-6e9a257104e7" + } + } + }, + "root": "/pool/ext/d54e1ed7-e589-4413-a487-6e9a257104e7/crypt/zone" + }, + { + "zone": { + "id": "e47d3f81-3df6-4c35-bec6-41277bc74c07", + "underlay_address": "fd00:1122:3344:115::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:115::4]:32345", + "dataset": { + "pool_name": "oxp_b8d84b9c-a65e-4c86-8196-69da5317ae63" + } + } + }, + "root": "/pool/ext/772b3aaa-3501-4dc7-9b3d-048b8b1f7970/crypt/zone" + }, + { + "zone": { + "id": "2a796a69-b061-44c7-b2df-35bc611f10f5", + "underlay_address": "fd00:1122:3344:115::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:115::6]:32345", + "dataset": { + "pool_name": "oxp_73abe9e0-d38e-48fc-bdec-b094bfa5670d" + } + } + }, + "root": "/pool/ext/028b6c9e-5a0e-43d2-a8ed-a5946cf62924/crypt/zone" + }, + { + "zone": { + "id": "4e1d2af1-8ef4-4762-aa80-b08da08b45bb", + "underlay_address": "fd00:1122:3344:115::3", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:115::3]:32345", + "dataset": { + "pool_name": "oxp_772b3aaa-3501-4dc7-9b3d-048b8b1f7970" + } + } + }, + "root": "/pool/ext/d54e1ed7-e589-4413-a487-6e9a257104e7/crypt/zone" + }, + { + "zone": { + "id": "fb1b10d5-b7cb-416d-98fc-b5d3bc02d495", + "underlay_address": "fd00:1122:3344:115::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:115::9]:32345", + "dataset": { + "pool_name": "oxp_1e9c9764-aaa4-4681-b110-a937b4c52748" + } + } + }, + "root": "/pool/ext/b8d84b9c-a65e-4c86-8196-69da5317ae63/crypt/zone" + }, + { + "zone": { + "id": "5155463c-8a09-45a5-ad1b-817f2e93b284", + "underlay_address": "fd00:1122:3344:115::d", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:115::d]:123", + "ntp_servers": [ + "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", + "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/772b3aaa-3501-4dc7-9b3d-048b8b1f7970/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled31.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled31.json new file mode 100644 index 0000000000..5e38262740 --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack3-sled31.json @@ -0,0 +1,181 @@ +{ + "omicron_generation": 2, + "ledger_generation": 4, + "zones": [ + { + "zone": { + "id": "a0eae689-8e6b-4297-bb3d-8b7ffc5c4a07", + "underlay_address": "fd00:1122:3344:102::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::c]:32345", + "dataset": { + "pool_name": "oxp_274cb567-fd74-4e00-b9c7-6ca367b3fda4" + } + } + }, + "root": "/pool/ext/1443b190-de16-42b0-b881-e87e875dd507/crypt/zone" + }, + { + "zone": { + "id": "9cea406d-451e-4328-9052-b58487f799a5", + "underlay_address": "fd00:1122:3344:102::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::b]:32345", + "dataset": { + "pool_name": "oxp_89c7f72e-632c-462b-a515-01cd80683711" + } + } + }, + "root": "/pool/ext/274cb567-fd74-4e00-b9c7-6ca367b3fda4/crypt/zone" + }, + { + "zone": { + "id": "9c7dad7e-7f60-4bf4-8efc-0883a17e7cf6", + "underlay_address": "fd00:1122:3344:102::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::6]:32345", + "dataset": { + "pool_name": "oxp_2c8e5637-b989-4b8f-82ac-ff2e9102b560" + } + } + }, + "root": "/pool/ext/1443b190-de16-42b0-b881-e87e875dd507/crypt/zone" + }, + { + "zone": { + "id": "73015cba-79c6-4a67-97d8-fa0819cbf750", + "underlay_address": "fd00:1122:3344:102::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::a]:32345", + "dataset": { + "pool_name": "oxp_fa62108e-f7bb-4f6d-86f3-8094a1ea8352" + } + } + }, + "root": "/pool/ext/2c8e5637-b989-4b8f-82ac-ff2e9102b560/crypt/zone" + }, + { + "zone": { + "id": "f9ca3097-072e-4e7f-9f50-eb7c7ae39b6f", + "underlay_address": "fd00:1122:3344:102::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::5]:32345", + "dataset": { + "pool_name": "oxp_42c6602c-2ccf-48ce-8344-693c832fd693" + } + } + }, + "root": "/pool/ext/2c8e5637-b989-4b8f-82ac-ff2e9102b560/crypt/zone" + }, + { + "zone": { + "id": "e7855e05-a125-4a80-ac2c-8a2db96e1bf8", + "underlay_address": "fd00:1122:3344:102::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::7]:32345", + "dataset": { + "pool_name": "oxp_1f72afd3-d2aa-46a8-b81a-54dbcc2f6317" + } + } + }, + "root": "/pool/ext/42c6602c-2ccf-48ce-8344-693c832fd693/crypt/zone" + }, + { + "zone": { + "id": "e5de9bc9-e996-4fea-8318-ad7a8a6be4a3", + "underlay_address": "fd00:1122:3344:102::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::4]:32345", + "dataset": { + "pool_name": "oxp_1443b190-de16-42b0-b881-e87e875dd507" + } + } + }, + "root": "/pool/ext/89c7f72e-632c-462b-a515-01cd80683711/crypt/zone" + }, + { + "zone": { + "id": "cd0d0aac-44ff-4566-9260-a64ae6cecef4", + "underlay_address": "fd00:1122:3344:102::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::8]:32345", + "dataset": { + "pool_name": "oxp_92c0d1f6-cb4d-4ddb-b5ba-979fb3491812" + } + } + }, + "root": "/pool/ext/89c7f72e-632c-462b-a515-01cd80683711/crypt/zone" + }, + { + "zone": { + "id": "a8230592-0e7a-46c8-a653-7587a27f05bf", + "underlay_address": "fd00:1122:3344:102::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::9]:32345", + "dataset": { + "pool_name": "oxp_1b7873de-99fd-454f-b576-bff695524133" + } + } + }, + "root": "/pool/ext/92c0d1f6-cb4d-4ddb-b5ba-979fb3491812/crypt/zone" + }, + { + "zone": { + "id": "c19ffbb1-4dc1-4825-a3cf-080e9b543b16", + "underlay_address": "fd00:1122:3344:102::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:102::d]:32345", + "dataset": { + "pool_name": "oxp_67823df7-511c-4984-b98c-7a8f5c40c22d" + } + } + }, + "root": "/pool/ext/1443b190-de16-42b0-b881-e87e875dd507/crypt/zone" + }, + { + "zone": { + "id": "ff30fe7c-51f3-43b9-a788-d8f94a7bb028", + "underlay_address": "fd00:1122:3344:102::3", + "zone_type": { + "type": "cockroach_db", + "address": "[fd00:1122:3344:102::3]:32221", + "dataset": { + "pool_name": "oxp_1443b190-de16-42b0-b881-e87e875dd507" + } + } + }, + "root": "/pool/ext/fa62108e-f7bb-4f6d-86f3-8094a1ea8352/crypt/zone" + }, + { + "zone": { + "id": "16b50c55-8117-4efd-aabf-0273677b89d5", + "underlay_address": "fd00:1122:3344:102::e", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:102::e]:123", + "ntp_servers": [ + "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", + "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/fa62108e-f7bb-4f6d-86f3-8094a1ea8352/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled4.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled4.json new file mode 100644 index 0000000000..7c1d269d61 --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack3-sled4.json @@ -0,0 +1,167 @@ +{ + "omicron_generation": 2, + "ledger_generation": 4, + "zones": [ + { + "zone": { + "id": "22452953-ee80-4659-a555-8e027bf205b0", + "underlay_address": "fd00:1122:3344:10c::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10c::4]:32345", + "dataset": { + "pool_name": "oxp_92ba1667-a6f7-4913-9b00-14825384c7bf" + } + } + }, + "root": "/pool/ext/ab62b941-5f84-42c7-929d-295b20efffe7/crypt/zone" + }, + { + "zone": { + "id": "9a5a2fcf-44a0-4468-979a-a71686cef627", + "underlay_address": "fd00:1122:3344:10c::3", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10c::3]:32345", + "dataset": { + "pool_name": "oxp_dbfdc981-1b81-4d7d-9449-9530890b199a" + } + } + }, + "root": "/pool/ext/74ac4da9-cdae-4c08-8431-11211184aa09/crypt/zone" + }, + { + "zone": { + "id": "a014f12e-2636-4258-af76-e01d9b8d1c1f", + "underlay_address": "fd00:1122:3344:10c::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10c::b]:32345", + "dataset": { + "pool_name": "oxp_ab62b941-5f84-42c7-929d-295b20efffe7" + } + } + }, + "root": "/pool/ext/a624a843-1c4e-41c3-a1d2-4be7a6c57e9b/crypt/zone" + }, + { + "zone": { + "id": "431768b8-26ba-4ab4-b616-9e183bb79b8b", + "underlay_address": "fd00:1122:3344:10c::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10c::7]:32345", + "dataset": { + "pool_name": "oxp_7c121177-3210-4457-9b42-3657add6e166" + } + } + }, + "root": "/pool/ext/74ac4da9-cdae-4c08-8431-11211184aa09/crypt/zone" + }, + { + "zone": { + "id": "22992c56-bd5a-4d0f-86c5-d6f8e87b7bbb", + "underlay_address": "fd00:1122:3344:10c::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10c::9]:32345", + "dataset": { + "pool_name": "oxp_842bdd28-196e-4b18-83db-68bd81176a44" + } + } + }, + "root": "/pool/ext/74ac4da9-cdae-4c08-8431-11211184aa09/crypt/zone" + }, + { + "zone": { + "id": "de376149-aa45-4660-9ae6-15e8ba4a4233", + "underlay_address": "fd00:1122:3344:10c::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10c::5]:32345", + "dataset": { + "pool_name": "oxp_25856a84-6707-4b94-81d1-b43d5bc990d7" + } + } + }, + "root": "/pool/ext/7c121177-3210-4457-9b42-3657add6e166/crypt/zone" + }, + { + "zone": { + "id": "ceeba69d-8c0a-47df-a37b-7f1b90f23016", + "underlay_address": "fd00:1122:3344:10c::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10c::a]:32345", + "dataset": { + "pool_name": "oxp_a624a843-1c4e-41c3-a1d2-4be7a6c57e9b" + } + } + }, + "root": "/pool/ext/74ac4da9-cdae-4c08-8431-11211184aa09/crypt/zone" + }, + { + "zone": { + "id": "65293ce4-2e63-4336-9207-3c61f58667f9", + "underlay_address": "fd00:1122:3344:10c::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10c::c]:32345", + "dataset": { + "pool_name": "oxp_74ac4da9-cdae-4c08-8431-11211184aa09" + } + } + }, + "root": "/pool/ext/842bdd28-196e-4b18-83db-68bd81176a44/crypt/zone" + }, + { + "zone": { + "id": "e8f55a5d-65f9-436c-bc25-1d1a7070e876", + "underlay_address": "fd00:1122:3344:10c::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10c::6]:32345", + "dataset": { + "pool_name": "oxp_9bfe385c-16dd-4209-bc0b-f28ae75d58e3" + } + } + }, + "root": "/pool/ext/92ba1667-a6f7-4913-9b00-14825384c7bf/crypt/zone" + }, + { + "zone": { + "id": "2dfbd4c6-afbf-4c8c-bf40-764f02727852", + "underlay_address": "fd00:1122:3344:10c::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10c::8]:32345", + "dataset": { + "pool_name": "oxp_55eb093d-6b6f-418c-9767-09afe4c51fff" + } + } + }, + "root": "/pool/ext/dbfdc981-1b81-4d7d-9449-9530890b199a/crypt/zone" + }, + { + "zone": { + "id": "8c73baf7-1a58-4e2c-b4d1-966c89a18d03", + "underlay_address": "fd00:1122:3344:10c::d", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:10c::d]:123", + "ntp_servers": [ + "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", + "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/842bdd28-196e-4b18-83db-68bd81176a44/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled5.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled5.json new file mode 100644 index 0000000000..acbfa17eda --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack3-sled5.json @@ -0,0 +1,178 @@ +{ + "omicron_generation": 2, + "ledger_generation": 4, + "zones": [ + { + "zone": { + "id": "2f488e7b-fd93-48a6-8b2b-61f6e8336268", + "underlay_address": "fd00:1122:3344:101::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::b]:32345", + "dataset": { + "pool_name": "oxp_5840a3b7-f765-45d3-8a41-7f543f936bee" + } + } + }, + "root": "/pool/ext/dd084b76-1130-4ad3-9196-6b02be607fe9/crypt/zone" + }, + { + "zone": { + "id": "1ed5fd3f-933a-4921-a91f-5c286823f8d4", + "underlay_address": "fd00:1122:3344:101::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::a]:32345", + "dataset": { + "pool_name": "oxp_c1e807e7-b64a-4dbd-b845-ffed0b9a54f1" + } + } + }, + "root": "/pool/ext/be06ea9c-df86-4fec-b5dd-8809710893af/crypt/zone" + }, + { + "zone": { + "id": "0f8f1013-465d-4b49-b55d-f0b9bf6f789a", + "underlay_address": "fd00:1122:3344:101::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::6]:32345", + "dataset": { + "pool_name": "oxp_4dfa7003-0305-47f5-b23d-88a228c1e12e" + } + } + }, + "root": "/pool/ext/be06ea9c-df86-4fec-b5dd-8809710893af/crypt/zone" + }, + { + "zone": { + "id": "2e4ef017-6c62-40bc-bab5-f2e01addad22", + "underlay_address": "fd00:1122:3344:101::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::7]:32345", + "dataset": { + "pool_name": "oxp_d94e9c58-e6d1-444b-b7d8-19ac17dea042" + } + } + }, + "root": "/pool/ext/c1e807e7-b64a-4dbd-b845-ffed0b9a54f1/crypt/zone" + }, + { + "zone": { + "id": "6a0baf13-a80b-4778-a0ab-a69cd851de2d", + "underlay_address": "fd00:1122:3344:101::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::9]:32345", + "dataset": { + "pool_name": "oxp_be06ea9c-df86-4fec-b5dd-8809710893af" + } + } + }, + "root": "/pool/ext/a9d419d4-5915-4a40-baa3-3512785de034/crypt/zone" + }, + { + "zone": { + "id": "391ec257-fd47-4cc8-9bfa-49a0747a9a67", + "underlay_address": "fd00:1122:3344:101::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::8]:32345", + "dataset": { + "pool_name": "oxp_a9d419d4-5915-4a40-baa3-3512785de034" + } + } + }, + "root": "/pool/ext/709d5d04-5dff-4558-8b5d-fbc2a7d83036/crypt/zone" + }, + { + "zone": { + "id": "fd8e615a-f170-4da9-b8d0-2a5a123d8682", + "underlay_address": "fd00:1122:3344:101::3", + "zone_type": { + "type": "crucible_pantry", + "address": "[fd00:1122:3344:101::3]:17000" + } + }, + "root": "/pool/ext/dd084b76-1130-4ad3-9196-6b02be607fe9/crypt/zone" + }, + { + "zone": { + "id": "f8a793f4-cd08-49ec-8fee-6bcd37092fdc", + "underlay_address": "fd00:1122:3344:101::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::c]:32345", + "dataset": { + "pool_name": "oxp_709d5d04-5dff-4558-8b5d-fbc2a7d83036" + } + } + }, + "root": "/pool/ext/d94e9c58-e6d1-444b-b7d8-19ac17dea042/crypt/zone" + }, + { + "zone": { + "id": "c67d44be-d6b8-4a08-a7e0-3ab300749ad6", + "underlay_address": "fd00:1122:3344:101::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::4]:32345", + "dataset": { + "pool_name": "oxp_231cd696-2839-4a9a-ae42-6d875a98a797" + } + } + }, + "root": "/pool/ext/709d5d04-5dff-4558-8b5d-fbc2a7d83036/crypt/zone" + }, + { + "zone": { + "id": "e91b4957-8165-451d-9fa5-090c3a39f199", + "underlay_address": "fd00:1122:3344:101::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::d]:32345", + "dataset": { + "pool_name": "oxp_dd084b76-1130-4ad3-9196-6b02be607fe9" + } + } + }, + "root": "/pool/ext/5840a3b7-f765-45d3-8a41-7f543f936bee/crypt/zone" + }, + { + "zone": { + "id": "5e737b6e-d33d-4a2c-b8c0-3cad9d05a68f", + "underlay_address": "fd00:1122:3344:101::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:101::5]:32345", + "dataset": { + "pool_name": "oxp_8fa4f837-c6f3-4c65-88d4-21eb3cd7ffee" + } + } + }, + "root": "/pool/ext/dd084b76-1130-4ad3-9196-6b02be607fe9/crypt/zone" + }, + { + "zone": { + "id": "7e6b7816-b1a6-40f3-894a-a5d5c0571dbb", + "underlay_address": "fd00:1122:3344:101::e", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:101::e]:123", + "ntp_servers": [ + "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", + "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/be06ea9c-df86-4fec-b5dd-8809710893af/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled6.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled6.json new file mode 100644 index 0000000000..ce4b6f03cd --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack3-sled6.json @@ -0,0 +1,167 @@ +{ + "omicron_generation": 2, + "ledger_generation": 4, + "zones": [ + { + "zone": { + "id": "eafffae7-69fd-49e1-9541-7cf237ab12b3", + "underlay_address": "fd00:1122:3344:110::3", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:110::3]:32345", + "dataset": { + "pool_name": "oxp_929404cd-2522-4440-b21c-91d466a9a7e0" + } + } + }, + "root": "/pool/ext/aff390ed-8d70-49fa-9000-5420b54ab118/crypt/zone" + }, + { + "zone": { + "id": "f4bccf15-d69f-402d-9bd2-7959a4cb2823", + "underlay_address": "fd00:1122:3344:110::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:110::9]:32345", + "dataset": { + "pool_name": "oxp_f80f96be-a3d7-490a-96a7-faf7da80a579" + } + } + }, + "root": "/pool/ext/6bcd54c8-d4a8-429d-8f17-cf02615eb063/crypt/zone" + }, + { + "zone": { + "id": "82e51c9d-c187-4baa-8307-e46eeafc5ff2", + "underlay_address": "fd00:1122:3344:110::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:110::5]:32345", + "dataset": { + "pool_name": "oxp_37d86199-6834-49d9-888a-88ff6f281b29" + } + } + }, + "root": "/pool/ext/d2e27e2a-2deb-42ae-84a7-c2d06f3aeb4f/crypt/zone" + }, + { + "zone": { + "id": "cf667caf-304c-40c4-acce-f0eb05d011ef", + "underlay_address": "fd00:1122:3344:110::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:110::8]:32345", + "dataset": { + "pool_name": "oxp_625c0110-644e-4d63-8321-b85ab5642260" + } + } + }, + "root": "/pool/ext/d2e27e2a-2deb-42ae-84a7-c2d06f3aeb4f/crypt/zone" + }, + { + "zone": { + "id": "14e60912-108e-4dd3-984e-2332a183b346", + "underlay_address": "fd00:1122:3344:110::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:110::b]:32345", + "dataset": { + "pool_name": "oxp_fa6470f5-0a4c-4fef-b0b1-57c8749c6cca" + } + } + }, + "root": "/pool/ext/6c5ab641-3bd4-4d8c-96f4-4f56c1045142/crypt/zone" + }, + { + "zone": { + "id": "1aacf923-c96f-4bab-acb0-63f28e86eef6", + "underlay_address": "fd00:1122:3344:110::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:110::c]:32345", + "dataset": { + "pool_name": "oxp_21b0f3ed-d27f-4996-968b-bf2b494d9308" + } + } + }, + "root": "/pool/ext/625c0110-644e-4d63-8321-b85ab5642260/crypt/zone" + }, + { + "zone": { + "id": "b9db0845-04d3-4dc1-84ba-224749562a6c", + "underlay_address": "fd00:1122:3344:110::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:110::6]:32345", + "dataset": { + "pool_name": "oxp_d2e27e2a-2deb-42ae-84a7-c2d06f3aeb4f" + } + } + }, + "root": "/pool/ext/aff390ed-8d70-49fa-9000-5420b54ab118/crypt/zone" + }, + { + "zone": { + "id": "38b51865-ee80-4e1b-a40b-3452951f9022", + "underlay_address": "fd00:1122:3344:110::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:110::7]:32345", + "dataset": { + "pool_name": "oxp_6bcd54c8-d4a8-429d-8f17-cf02615eb063" + } + } + }, + "root": "/pool/ext/37d86199-6834-49d9-888a-88ff6f281b29/crypt/zone" + }, + { + "zone": { + "id": "4bc441f6-f7e5-4d68-8751-53ef1e251c47", + "underlay_address": "fd00:1122:3344:110::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:110::a]:32345", + "dataset": { + "pool_name": "oxp_6c5ab641-3bd4-4d8c-96f4-4f56c1045142" + } + } + }, + "root": "/pool/ext/21b0f3ed-d27f-4996-968b-bf2b494d9308/crypt/zone" + }, + { + "zone": { + "id": "d2c20cf8-ed4c-4815-add9-45996364f721", + "underlay_address": "fd00:1122:3344:110::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:110::4]:32345", + "dataset": { + "pool_name": "oxp_aff390ed-8d70-49fa-9000-5420b54ab118" + } + } + }, + "root": "/pool/ext/6c5ab641-3bd4-4d8c-96f4-4f56c1045142/crypt/zone" + }, + { + "zone": { + "id": "1bb548cb-889a-411e-8c67-d1b785225180", + "underlay_address": "fd00:1122:3344:110::d", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:110::d]:123", + "ntp_servers": [ + "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", + "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/6bcd54c8-d4a8-429d-8f17-cf02615eb063/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled7.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled7.json new file mode 100644 index 0000000000..62653d0767 --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack3-sled7.json @@ -0,0 +1,167 @@ +{ + "omicron_generation": 2, + "ledger_generation": 4, + "zones": [ + { + "zone": { + "id": "2eb74fa3-71ec-484c-8ffa-3daeab0e4c78", + "underlay_address": "fd00:1122:3344:11d::3", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11d::3]:32345", + "dataset": { + "pool_name": "oxp_c6b63fea-e3e2-4806-b8dc-bdfe7b5c3d89" + } + } + }, + "root": "/pool/ext/9f20cbae-7a63-4c31-9386-2ac3cbe12030/crypt/zone" + }, + { + "zone": { + "id": "9f92bfcf-7435-44a6-8e77-0597f93cd0b4", + "underlay_address": "fd00:1122:3344:11d::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11d::7]:32345", + "dataset": { + "pool_name": "oxp_9fa336f1-2b69-4ebf-9553-e3bab7e3e6ef" + } + } + }, + "root": "/pool/ext/e05a6264-63f2-4961-bc14-57b4f65614c0/crypt/zone" + }, + { + "zone": { + "id": "1bf9aed4-9fd3-4d87-b8e7-7f066d25ec1d", + "underlay_address": "fd00:1122:3344:11d::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11d::b]:32345", + "dataset": { + "pool_name": "oxp_a5a52f47-9c9a-4519-83dc-abc56619495d" + } + } + }, + "root": "/pool/ext/cbcad26e-5e52-41b7-9875-1a84d30d8a15/crypt/zone" + }, + { + "zone": { + "id": "2a722aa7-cd8a-445d-83fe-57fc9b9a8249", + "underlay_address": "fd00:1122:3344:11d::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11d::8]:32345", + "dataset": { + "pool_name": "oxp_1f4b71eb-505f-4706-912c-b13dd3f2eafb" + } + } + }, + "root": "/pool/ext/a5a52f47-9c9a-4519-83dc-abc56619495d/crypt/zone" + }, + { + "zone": { + "id": "76af5b23-d833-435c-b848-2a09d9fad9a1", + "underlay_address": "fd00:1122:3344:11d::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11d::c]:32345", + "dataset": { + "pool_name": "oxp_cbcad26e-5e52-41b7-9875-1a84d30d8a15" + } + } + }, + "root": "/pool/ext/9f20cbae-7a63-4c31-9386-2ac3cbe12030/crypt/zone" + }, + { + "zone": { + "id": "3a412bf4-a385-4e66-9ada-a87f6536d6ca", + "underlay_address": "fd00:1122:3344:11d::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11d::4]:32345", + "dataset": { + "pool_name": "oxp_e05a6264-63f2-4961-bc14-57b4f65614c0" + } + } + }, + "root": "/pool/ext/e05a6264-63f2-4961-bc14-57b4f65614c0/crypt/zone" + }, + { + "zone": { + "id": "99a25fa7-8231-4a46-a6ec-ffc5281db1f8", + "underlay_address": "fd00:1122:3344:11d::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11d::5]:32345", + "dataset": { + "pool_name": "oxp_722494ab-9a2b-481b-ac11-292fded682a5" + } + } + }, + "root": "/pool/ext/e05a6264-63f2-4961-bc14-57b4f65614c0/crypt/zone" + }, + { + "zone": { + "id": "06c7ddc8-9b3e-48ef-9874-0c40874e9877", + "underlay_address": "fd00:1122:3344:11d::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11d::a]:32345", + "dataset": { + "pool_name": "oxp_8c3972d1-5b17-4479-88cc-1c33e4344160" + } + } + }, + "root": "/pool/ext/8c3972d1-5b17-4479-88cc-1c33e4344160/crypt/zone" + }, + { + "zone": { + "id": "1212b2dc-157d-4bd3-94af-fb5db1d91f24", + "underlay_address": "fd00:1122:3344:11d::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11d::9]:32345", + "dataset": { + "pool_name": "oxp_9f20cbae-7a63-4c31-9386-2ac3cbe12030" + } + } + }, + "root": "/pool/ext/977aa6c3-2026-4178-9948-e09f78008575/crypt/zone" + }, + { + "zone": { + "id": "b1fb5f2e-b20d-4f4c-9f6f-bbeb1a98dd50", + "underlay_address": "fd00:1122:3344:11d::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:11d::6]:32345", + "dataset": { + "pool_name": "oxp_977aa6c3-2026-4178-9948-e09f78008575" + } + } + }, + "root": "/pool/ext/722494ab-9a2b-481b-ac11-292fded682a5/crypt/zone" + }, + { + "zone": { + "id": "e68dde0f-0647-46db-ae1c-711835c13e25", + "underlay_address": "fd00:1122:3344:11d::d", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:11d::d]:123", + "ntp_servers": [ + "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", + "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/1f4b71eb-505f-4706-912c-b13dd3f2eafb/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled8.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled8.json new file mode 100644 index 0000000000..b848826231 --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack3-sled8.json @@ -0,0 +1,198 @@ +{ + "omicron_generation": 2, + "ledger_generation": 4, + "zones": [ + { + "zone": { + "id": "85c18b7c-a100-458c-b18d-ecfdacaefac4", + "underlay_address": "fd00:1122:3344:10e::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10e::5]:32345", + "dataset": { + "pool_name": "oxp_07b266bc-86c3-4a76-9522-8b34ba1ae78c" + } + } + }, + "root": "/pool/ext/5b88e44e-f886-4de8-8a6b-48ea5ed9d70b/crypt/zone" + }, + { + "zone": { + "id": "db303465-7879-4d86-8da8-a0c7162e5184", + "underlay_address": "fd00:1122:3344:10e::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10e::4]:32345", + "dataset": { + "pool_name": "oxp_e9488a32-880d-44a2-8948-db0b7e3a35b5" + } + } + }, + "root": "/pool/ext/8d798756-7200-4db4-9faf-f41b75106a63/crypt/zone" + }, + { + "zone": { + "id": "c44ce6be-512d-4104-9260-a5b8fe373937", + "underlay_address": "fd00:1122:3344:10e::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10e::9]:32345", + "dataset": { + "pool_name": "oxp_025dfc06-5aeb-407f-adc8-ba18dc9bba35" + } + } + }, + "root": "/pool/ext/1544ce68-3544-4cba-b3b6-1927d08b78a5/crypt/zone" + }, + { + "zone": { + "id": "1cfdb5b6-e568-436a-a85f-7fecf1b8eef2", + "underlay_address": "fd00:1122:3344:10e::3", + "zone_type": { + "type": "nexus", + "internal_address": "[fd00:1122:3344:10e::3]:12221", + "external_ip": "45.154.216.36", + "nic": { + "id": "569754a2-a5e0-4aa8-90a7-2fa65f43b667", + "kind": { + "type": "service", + "id": "1cfdb5b6-e568-436a-a85f-7fecf1b8eef2" + }, + "name": "nexus-1cfdb5b6-e568-436a-a85f-7fecf1b8eef2", + "ip": "172.30.2.6", + "mac": "A8:40:25:FF:EC:6B", + "subnet": "172.30.2.0/24", + "vni": 100, + "primary": true, + "slot": 0 + }, + "external_tls": true, + "external_dns_servers": [ + "1.1.1.1", + "8.8.8.8" + ] + } + }, + "root": "/pool/ext/025dfc06-5aeb-407f-adc8-ba18dc9bba35/crypt/zone" + }, + { + "zone": { + "id": "44a68792-ca14-442e-b7a9-11970d50ba0e", + "underlay_address": "fd00:1122:3344:10e::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10e::a]:32345", + "dataset": { + "pool_name": "oxp_2a492098-7df3-4409-9466-561edb7aa99b" + } + } + }, + "root": "/pool/ext/1544ce68-3544-4cba-b3b6-1927d08b78a5/crypt/zone" + }, + { + "zone": { + "id": "514cf0ca-6d23-434e-9785-446b83b2f029", + "underlay_address": "fd00:1122:3344:10e::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10e::7]:32345", + "dataset": { + "pool_name": "oxp_5b88e44e-f886-4de8-8a6b-48ea5ed9d70b" + } + } + }, + "root": "/pool/ext/5b88e44e-f886-4de8-8a6b-48ea5ed9d70b/crypt/zone" + }, + { + "zone": { + "id": "bc6d8347-8f64-4031-912c-932349df07fe", + "underlay_address": "fd00:1122:3344:10e::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10e::6]:32345", + "dataset": { + "pool_name": "oxp_1544ce68-3544-4cba-b3b6-1927d08b78a5" + } + } + }, + "root": "/pool/ext/1544ce68-3544-4cba-b3b6-1927d08b78a5/crypt/zone" + }, + { + "zone": { + "id": "1ab0a4f5-99ad-4341-8c89-7fd03e5ccb08", + "underlay_address": "fd00:1122:3344:10e::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10e::b]:32345", + "dataset": { + "pool_name": "oxp_033eb462-968f-42ce-9c29-377bd40a3014" + } + } + }, + "root": "/pool/ext/9e1a0803-7453-4eac-91c9-d7891ecd634f/crypt/zone" + }, + { + "zone": { + "id": "d6f2520b-3d04-44d9-bd46-6ffccfcb46d2", + "underlay_address": "fd00:1122:3344:10e::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10e::8]:32345", + "dataset": { + "pool_name": "oxp_36e8d29c-1e88-4c2b-8f59-f312201067c3" + } + } + }, + "root": "/pool/ext/1544ce68-3544-4cba-b3b6-1927d08b78a5/crypt/zone" + }, + { + "zone": { + "id": "d6da9d13-bfcf-469d-a99e-faeb5e30be32", + "underlay_address": "fd00:1122:3344:10e::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10e::c]:32345", + "dataset": { + "pool_name": "oxp_9e1a0803-7453-4eac-91c9-d7891ecd634f" + } + } + }, + "root": "/pool/ext/8d798756-7200-4db4-9faf-f41b75106a63/crypt/zone" + }, + { + "zone": { + "id": "a1dc59c2-5883-4fb8-83be-ac2d95d255d1", + "underlay_address": "fd00:1122:3344:10e::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10e::d]:32345", + "dataset": { + "pool_name": "oxp_8d798756-7200-4db4-9faf-f41b75106a63" + } + } + }, + "root": "/pool/ext/36e8d29c-1e88-4c2b-8f59-f312201067c3/crypt/zone" + }, + { + "zone": { + "id": "48f25dba-7392-44ce-9bb0-28489ebc44bc", + "underlay_address": "fd00:1122:3344:10e::e", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:10e::e]:123", + "ntp_servers": [ + "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", + "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/5b88e44e-f886-4de8-8a6b-48ea5ed9d70b/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-agent/tests/output/new-zones-ledgers/rack3-sled9.json b/sled-agent/tests/output/new-zones-ledgers/rack3-sled9.json new file mode 100644 index 0000000000..62d45a2f5a --- /dev/null +++ b/sled-agent/tests/output/new-zones-ledgers/rack3-sled9.json @@ -0,0 +1,178 @@ +{ + "omicron_generation": 2, + "ledger_generation": 4, + "zones": [ + { + "zone": { + "id": "b452e5e1-ab4c-4994-9679-ef21b3b4fee9", + "underlay_address": "fd00:1122:3344:10b::6", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10b::6]:32345", + "dataset": { + "pool_name": "oxp_d63a297d-ae6a-4072-9dca-dda404044989" + } + } + }, + "root": "/pool/ext/7c204111-31df-4c32-9a3e-780411f700fd/crypt/zone" + }, + { + "zone": { + "id": "e9826cdc-6d3a-4eff-b1b5-ec4364ebe6b9", + "underlay_address": "fd00:1122:3344:10b::3", + "zone_type": { + "type": "oximeter", + "address": "[fd00:1122:3344:10b::3]:12223" + } + }, + "root": "/pool/ext/7c204111-31df-4c32-9a3e-780411f700fd/crypt/zone" + }, + { + "zone": { + "id": "b0cde4a8-f27c-46e8-8355-756be9045afc", + "underlay_address": "fd00:1122:3344:10b::b", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10b::b]:32345", + "dataset": { + "pool_name": "oxp_07c1a8e7-51f5-4f12-a43d-734719fef92b" + } + } + }, + "root": "/pool/ext/1f6adf64-c9b9-4ed7-b3e2-37fb25624646/crypt/zone" + }, + { + "zone": { + "id": "e2f70cf6-e285-4212-9b01-77ebf2ca9219", + "underlay_address": "fd00:1122:3344:10b::d", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10b::d]:32345", + "dataset": { + "pool_name": "oxp_a809f28a-7f25-4362-bc56-0cbdd72af2cb" + } + } + }, + "root": "/pool/ext/92a1bd39-6e8a-4226-b9d0-e3e8a9b8504f/crypt/zone" + }, + { + "zone": { + "id": "b0949c9d-4aa1-4bc4-9cb3-5875b9166885", + "underlay_address": "fd00:1122:3344:10b::a", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10b::a]:32345", + "dataset": { + "pool_name": "oxp_af0cc12b-43c5-473a-89a7-28351fbbb430" + } + } + }, + "root": "/pool/ext/cf1594ed-7c0c-467c-b0af-a689dcb427a3/crypt/zone" + }, + { + "zone": { + "id": "7cea4d59-a8ca-4826-901d-8d5bd935dc09", + "underlay_address": "fd00:1122:3344:10b::9", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10b::9]:32345", + "dataset": { + "pool_name": "oxp_d75dae09-4992-4a61-ab7d-5ae1d2b068ba" + } + } + }, + "root": "/pool/ext/a809f28a-7f25-4362-bc56-0cbdd72af2cb/crypt/zone" + }, + { + "zone": { + "id": "08adaeee-c3b5-4cd8-8fbd-ac371b3101c9", + "underlay_address": "fd00:1122:3344:10b::4", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10b::4]:32345", + "dataset": { + "pool_name": "oxp_d9f23187-fbf9-4ea5-a103-bc112263a9a7" + } + } + }, + "root": "/pool/ext/7c204111-31df-4c32-9a3e-780411f700fd/crypt/zone" + }, + { + "zone": { + "id": "3da1ade5-3fcb-4e64-aa08-81ee8a9ef723", + "underlay_address": "fd00:1122:3344:10b::8", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10b::8]:32345", + "dataset": { + "pool_name": "oxp_1f6adf64-c9b9-4ed7-b3e2-37fb25624646" + } + } + }, + "root": "/pool/ext/07c1a8e7-51f5-4f12-a43d-734719fef92b/crypt/zone" + }, + { + "zone": { + "id": "816f26a7-4c28-4a39-b9ad-a036678520ab", + "underlay_address": "fd00:1122:3344:10b::7", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10b::7]:32345", + "dataset": { + "pool_name": "oxp_92a1bd39-6e8a-4226-b9d0-e3e8a9b8504f" + } + } + }, + "root": "/pool/ext/d9f23187-fbf9-4ea5-a103-bc112263a9a7/crypt/zone" + }, + { + "zone": { + "id": "839f9839-409f-45d3-b8a6-7085507b90f6", + "underlay_address": "fd00:1122:3344:10b::c", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10b::c]:32345", + "dataset": { + "pool_name": "oxp_7c204111-31df-4c32-9a3e-780411f700fd" + } + } + }, + "root": "/pool/ext/af0cc12b-43c5-473a-89a7-28351fbbb430/crypt/zone" + }, + { + "zone": { + "id": "c717c81f-a228-4412-a34e-90f8c491d847", + "underlay_address": "fd00:1122:3344:10b::5", + "zone_type": { + "type": "crucible", + "address": "[fd00:1122:3344:10b::5]:32345", + "dataset": { + "pool_name": "oxp_cf1594ed-7c0c-467c-b0af-a689dcb427a3" + } + } + }, + "root": "/pool/ext/d63a297d-ae6a-4072-9dca-dda404044989/crypt/zone" + }, + { + "zone": { + "id": "e1fa2023-6c86-40a4-ae59-a0de112cf7a9", + "underlay_address": "fd00:1122:3344:10b::e", + "zone_type": { + "type": "internal_ntp", + "address": "[fd00:1122:3344:10b::e]:123", + "ntp_servers": [ + "440dd615-e11f-4a5d-aeb4-dcf88bb314de.host.control-plane.oxide.internal", + "cb901d3e-8811-4c4c-a274-a44130501ecf.host.control-plane.oxide.internal" + ], + "dns_servers": [ + "fd00:1122:3344:1::1", + "fd00:1122:3344:2::1", + "fd00:1122:3344:3::1" + ], + "domain": null + } + }, + "root": "/pool/ext/d9f23187-fbf9-4ea5-a103-bc112263a9a7/crypt/zone" + } + ] +} \ No newline at end of file diff --git a/sled-hardware/Cargo.toml b/sled-hardware/Cargo.toml index 36ba633067..66ecbf9d64 100644 --- a/sled-hardware/Cargo.toml +++ b/sled-hardware/Cargo.toml @@ -31,4 +31,3 @@ libefi-illumos = { git = "https://github.com/oxidecomputer/libefi-illumos", bran [dev-dependencies] illumos-utils = { workspace = true, features = ["testing"] } omicron-test-utils.workspace = true -serial_test.workspace = true diff --git a/sled-hardware/src/illumos/partitions.rs b/sled-hardware/src/illumos/partitions.rs index 4b7e69057d..de62e25cfe 100644 --- a/sled-hardware/src/illumos/partitions.rs +++ b/sled-hardware/src/illumos/partitions.rs @@ -207,7 +207,6 @@ mod test { } #[test] - #[serial_test::serial] fn ensure_partition_layout_u2_format_with_dev_path() { let logctx = test_setup_log("ensure_partition_layout_u2_format_with_dev_path"); diff --git a/smf/cockroachdb/method_script.sh b/smf/cockroachdb/method_script.sh index ee42ab1891..e5ab4e8eaa 100755 --- a/smf/cockroachdb/method_script.sh +++ b/smf/cockroachdb/method_script.sh @@ -44,6 +44,7 @@ fi args=( '--insecure' '--listen-addr' "[$LISTEN_ADDR]:$LISTEN_PORT" + '--http-addr' '127.0.0.1:8080' '--store' "$DATASTORE" '--join' "$JOIN_ADDRS" ) diff --git a/smf/nexus/multi-sled/config-partial.toml b/smf/nexus/multi-sled/config-partial.toml index 94c8f5572e..d330f32ab6 100644 --- a/smf/nexus/multi-sled/config-partial.toml +++ b/smf/nexus/multi-sled/config-partial.toml @@ -46,6 +46,7 @@ inventory.period_secs = 600 inventory.nkeep = 3 # Disable inventory collection altogether (for emergencies) inventory.disable = false +phantom_disks.period_secs = 30 [default_region_allocation_strategy] # by default, allocate across 3 distinct sleds diff --git a/smf/nexus/single-sled/config-partial.toml b/smf/nexus/single-sled/config-partial.toml index fcaa6176a8..cbd4851613 100644 --- a/smf/nexus/single-sled/config-partial.toml +++ b/smf/nexus/single-sled/config-partial.toml @@ -46,6 +46,7 @@ inventory.period_secs = 600 inventory.nkeep = 3 # Disable inventory collection altogether (for emergencies) inventory.disable = false +phantom_disks.period_secs = 30 [default_region_allocation_strategy] # by default, allocate without requirement for distinct sleds. diff --git a/smf/sled-agent/gimlet-standalone/config-rss.toml b/smf/sled-agent/gimlet-standalone/config-rss.toml index 29a7a79eba..f7a93260e3 100644 --- a/smf/sled-agent/gimlet-standalone/config-rss.toml +++ b/smf/sled-agent/gimlet-standalone/config-rss.toml @@ -110,6 +110,8 @@ port = "qsfp0" uplink_port_speed = "40G" # The forward error correction mode for this port. uplink_port_fec="none" +# Do not use autonegotiation +autoneg = false # Switch to use for the uplink. For single-rack deployments this can be # "switch0" (upper slot) or "switch1" (lower slot). For single-node softnpu # and dendrite stub environments, use "switch0" diff --git a/smf/sled-agent/gimlet-standalone/config.toml b/smf/sled-agent/gimlet-standalone/config.toml index e714504311..4d06895453 100644 --- a/smf/sled-agent/gimlet-standalone/config.toml +++ b/smf/sled-agent/gimlet-standalone/config.toml @@ -41,6 +41,11 @@ swap_device_size_gb = 256 data_links = ["net0", "net1"] +[dropshot] +# Host OS images are just over 800 MiB currently; set this to 2 GiB to give some +# breathing room. +request_body_max_bytes = 2_147_483_648 + [log] level = "info" mode = "file" diff --git a/smf/sled-agent/gimlet/config.toml b/smf/sled-agent/gimlet/config.toml index 442e76b393..666d55f359 100644 --- a/smf/sled-agent/gimlet/config.toml +++ b/smf/sled-agent/gimlet/config.toml @@ -37,6 +37,11 @@ swap_device_size_gb = 256 data_links = ["cxgbe0", "cxgbe1"] +[dropshot] +# Host OS images are just over 800 MiB currently; set this to 2 GiB to give some +# breathing room. +request_body_max_bytes = 2_147_483_648 + [log] level = "info" mode = "file" diff --git a/smf/sled-agent/non-gimlet/config-rss.toml b/smf/sled-agent/non-gimlet/config-rss.toml index fea3cfa5d8..fdc81c0f8f 100644 --- a/smf/sled-agent/non-gimlet/config-rss.toml +++ b/smf/sled-agent/non-gimlet/config-rss.toml @@ -109,7 +109,9 @@ port = "qsfp0" # The speed of this port. uplink_port_speed = "40G" # The forward error correction mode for this port. -uplink_port_fec="none" +uplink_port_fec = "none" +# Do not use autonegotiation +autoneg = false # Switch to use for the uplink. For single-rack deployments this can be # "switch0" (upper slot) or "switch1" (lower slot). For single-node softnpu # and dendrite stub environments, use "switch0" diff --git a/smf/sled-agent/non-gimlet/config.toml b/smf/sled-agent/non-gimlet/config.toml index 176f4002a5..432652c50b 100644 --- a/smf/sled-agent/non-gimlet/config.toml +++ b/smf/sled-agent/non-gimlet/config.toml @@ -76,6 +76,11 @@ switch_zone_maghemite_links = ["tfportrear0_0"] data_links = ["net0", "net1"] +[dropshot] +# Host OS images are just over 800 MiB currently; set this to 2 GiB to give some +# breathing room. +request_body_max_bytes = 2_147_483_648 + [log] level = "info" mode = "file" diff --git a/smf/wicketd/manifest.xml b/smf/wicketd/manifest.xml index 778a7abf2d..b45ff1544b 100644 --- a/smf/wicketd/manifest.xml +++ b/smf/wicketd/manifest.xml @@ -32,7 +32,7 @@ it expected https). --> diff --git a/sp-sim/src/gimlet.rs b/sp-sim/src/gimlet.rs index be8d903d3f..5cfad94c86 100644 --- a/sp-sim/src/gimlet.rs +++ b/sp-sim/src/gimlet.rs @@ -14,6 +14,7 @@ use crate::server::UdpServer; use crate::update::SimSpUpdate; use crate::Responsiveness; use crate::SimulatedSp; +use crate::SIM_ROT_BOARD; use anyhow::{anyhow, bail, Context, Result}; use async_trait::async_trait; use futures::future; @@ -21,8 +22,11 @@ use futures::Future; use gateway_messages::ignition::{self, LinkEvents}; use gateway_messages::sp_impl::SpHandler; use gateway_messages::sp_impl::{BoundsChecked, DeviceDescription}; +use gateway_messages::CfpaPage; use gateway_messages::ComponentAction; use gateway_messages::Header; +use gateway_messages::RotRequest; +use gateway_messages::RotResponse; use gateway_messages::RotSlotId; use gateway_messages::SpComponent; use gateway_messages::SpError; @@ -107,10 +111,25 @@ impl SimulatedSp for Gimlet { self.rot.lock().unwrap().handle_deserialized(request) } - async fn last_update_data(&self) -> Option> { + async fn last_sp_update_data(&self) -> Option> { let handler = self.handler.as_ref()?; let handler = handler.lock().await; - handler.update_state.last_update_data() + handler.update_state.last_sp_update_data() + } + + async fn last_rot_update_data(&self) -> Option> { + let handler = self.handler.as_ref()?; + let handler = handler.lock().await; + handler.update_state.last_rot_update_data() + } + + async fn last_host_phase1_update_data( + &self, + slot: u16, + ) -> Option> { + let handler = self.handler.as_ref()?; + let handler = handler.lock().await; + handler.update_state.last_host_phase1_update_data(slot) } async fn current_update_status(&self) -> gateway_messages::UpdateStatus { @@ -573,7 +592,7 @@ struct Handler { power_state: PowerState, startup_options: StartupOptions, update_state: SimSpUpdate, - reset_pending: bool, + reset_pending: Option, // To simulate an SP reset, we should (after doing whatever housekeeping we // need to track the reset) intentionally _fail_ to respond to the request, @@ -615,7 +634,7 @@ impl Handler { power_state: PowerState::A2, startup_options: StartupOptions::empty(), update_state: SimSpUpdate::default(), - reset_pending: false, + reset_pending: None, should_fail_to_respond_signal: None, } } @@ -1065,8 +1084,9 @@ impl SpHandler for Handler { "port" => ?port, "component" => ?component, ); - if component == SpComponent::SP_ITSELF { - self.reset_pending = true; + if component == SpComponent::SP_ITSELF || component == SpComponent::ROT + { + self.reset_pending = Some(component); Ok(()) } else { Err(SpError::RequestUnsupportedForComponent) @@ -1086,9 +1106,9 @@ impl SpHandler for Handler { "component" => ?component, ); if component == SpComponent::SP_ITSELF { - if self.reset_pending { + if self.reset_pending == Some(SpComponent::SP_ITSELF) { self.update_state.sp_reset(); - self.reset_pending = false; + self.reset_pending = None; if let Some(signal) = self.should_fail_to_respond_signal.take() { // Instruct `server::handle_request()` to _not_ respond to @@ -1099,6 +1119,14 @@ impl SpHandler for Handler { } else { Err(SpError::ResetComponentTriggerWithoutPrepare) } + } else if component == SpComponent::ROT { + if self.reset_pending == Some(SpComponent::ROT) { + self.update_state.rot_reset(); + self.reset_pending = None; + Ok(()) + } else { + Err(SpError::ResetComponentTriggerWithoutPrepare) + } } else { Err(SpError::RequestUnsupportedForComponent) } @@ -1169,7 +1197,7 @@ impl SpHandler for Handler { port: SpPort, component: SpComponent, ) -> Result { - warn!( + debug!( &self.log, "asked for component active slot"; "sender" => %sender, "port" => ?port, @@ -1192,7 +1220,7 @@ impl SpHandler for Handler { slot: u16, persist: bool, ) -> Result<(), SpError> { - warn!( + debug!( &self.log, "asked to set component active slot"; "sender" => %sender, "port" => ?port, @@ -1203,9 +1231,12 @@ impl SpHandler for Handler { if component == SpComponent::ROT { self.rot_active_slot = rot_slot_id_from_u16(slot)?; Ok(()) + } else if component == SpComponent::HOST_CPU_BOOT_FLASH { + self.update_state.set_active_host_slot(slot); + Ok(()) } else { // The real SP returns `RequestUnsupportedForComponent` for anything - // other than the RoT, including SP_ITSELF. + // other than the RoT and host boot flash, including SP_ITSELF. Err(SpError::RequestUnsupportedForComponent) } } @@ -1322,7 +1353,7 @@ impl SpHandler for Handler { static SP_VERS: &[u8] = b"0.0.1"; static ROT_GITC: &[u8] = b"eeeeeeee"; - static ROT_BORD: &[u8] = b"SimGimletRot"; + static ROT_BORD: &[u8] = SIM_ROT_BOARD.as_bytes(); static ROT_NAME: &[u8] = b"SimGimlet"; static ROT_VERS: &[u8] = b"0.0.1"; @@ -1355,10 +1386,18 @@ impl SpHandler for Handler { fn read_rot( &mut self, - _request: gateway_messages::RotRequest, - _buf: &mut [u8], - ) -> std::result::Result { - Err(SpError::RequestUnsupportedForSp) + request: RotRequest, + buf: &mut [u8], + ) -> std::result::Result { + let dummy_page = match request { + RotRequest::ReadCmpa => "gimlet-cmpa", + RotRequest::ReadCfpa(CfpaPage::Active) => "gimlet-cfpa-active", + RotRequest::ReadCfpa(CfpaPage::Inactive) => "gimlet-cfpa-inactive", + RotRequest::ReadCfpa(CfpaPage::Scratch) => "gimlet-cfpa-scratch", + }; + buf[..dummy_page.len()].copy_from_slice(dummy_page.as_bytes()); + buf[dummy_page.len()..].fill(0); + Ok(RotResponse::Ok) } } diff --git a/sp-sim/src/lib.rs b/sp-sim/src/lib.rs index 668c7c3311..87643af9a8 100644 --- a/sp-sim/src/lib.rs +++ b/sp-sim/src/lib.rs @@ -28,6 +28,8 @@ use std::net::SocketAddrV6; use tokio::sync::mpsc; use tokio::sync::watch; +pub const SIM_ROT_BOARD: &str = "SimRot"; + #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum Responsiveness { Responsive, @@ -58,8 +60,19 @@ pub trait SimulatedSp { /// Get the last completed update delivered to this simulated SP. /// - /// Only returns data after a simulated reset. - async fn last_update_data(&self) -> Option>; + /// Only returns data after a simulated reset of the SP. + async fn last_sp_update_data(&self) -> Option>; + + /// Get the last completed update delivered to this simulated RoT. + /// + /// Only returns data after a simulated reset of the RoT. + async fn last_rot_update_data(&self) -> Option>; + + /// Get the last completed update delivered to the host phase1 flash slot. + async fn last_host_phase1_update_data( + &self, + slot: u16, + ) -> Option>; /// Get the current update status, just as would be returned by an MGS /// request to get the update status. diff --git a/sp-sim/src/sidecar.rs b/sp-sim/src/sidecar.rs index c8fb4c5481..1bd6fe4964 100644 --- a/sp-sim/src/sidecar.rs +++ b/sp-sim/src/sidecar.rs @@ -16,6 +16,7 @@ use crate::server::UdpServer; use crate::update::SimSpUpdate; use crate::Responsiveness; use crate::SimulatedSp; +use crate::SIM_ROT_BOARD; use anyhow::Result; use async_trait::async_trait; use futures::future; @@ -26,6 +27,7 @@ use gateway_messages::ignition::LinkEvents; use gateway_messages::sp_impl::BoundsChecked; use gateway_messages::sp_impl::DeviceDescription; use gateway_messages::sp_impl::SpHandler; +use gateway_messages::CfpaPage; use gateway_messages::ComponentAction; use gateway_messages::ComponentDetails; use gateway_messages::DiscoverResponse; @@ -33,6 +35,8 @@ use gateway_messages::IgnitionCommand; use gateway_messages::IgnitionState; use gateway_messages::MgsError; use gateway_messages::PowerState; +use gateway_messages::RotRequest; +use gateway_messages::RotResponse; use gateway_messages::RotSlotId; use gateway_messages::SpComponent; use gateway_messages::SpError; @@ -118,10 +122,24 @@ impl SimulatedSp for Sidecar { self.rot.lock().unwrap().handle_deserialized(request) } - async fn last_update_data(&self) -> Option> { + async fn last_sp_update_data(&self) -> Option> { let handler = self.handler.as_ref()?; let handler = handler.lock().await; - handler.update_state.last_update_data() + handler.update_state.last_sp_update_data() + } + + async fn last_rot_update_data(&self) -> Option> { + let handler = self.handler.as_ref()?; + let handler = handler.lock().await; + handler.update_state.last_rot_update_data() + } + + async fn last_host_phase1_update_data( + &self, + _slot: u16, + ) -> Option> { + // sidecars do not have attached hosts + None } async fn current_update_status(&self) -> gateway_messages::UpdateStatus { @@ -380,7 +398,7 @@ struct Handler { power_state: PowerState, update_state: SimSpUpdate, - reset_pending: bool, + reset_pending: Option, // To simulate an SP reset, we should (after doing whatever housekeeping we // need to track the reset) intentionally _fail_ to respond to the request, @@ -419,7 +437,7 @@ impl Handler { rot_active_slot: RotSlotId::A, power_state: PowerState::A2, update_state: SimSpUpdate::default(), - reset_pending: false, + reset_pending: None, should_fail_to_respond_signal: None, } } @@ -846,8 +864,9 @@ impl SpHandler for Handler { "port" => ?port, "component" => ?component, ); - if component == SpComponent::SP_ITSELF { - self.reset_pending = true; + if component == SpComponent::SP_ITSELF || component == SpComponent::ROT + { + self.reset_pending = Some(component); Ok(()) } else { Err(SpError::RequestUnsupportedForComponent) @@ -867,9 +886,9 @@ impl SpHandler for Handler { "component" => ?component, ); if component == SpComponent::SP_ITSELF { - if self.reset_pending { + if self.reset_pending == Some(SpComponent::SP_ITSELF) { self.update_state.sp_reset(); - self.reset_pending = false; + self.reset_pending = None; if let Some(signal) = self.should_fail_to_respond_signal.take() { // Instruct `server::handle_request()` to _not_ respond to @@ -880,6 +899,14 @@ impl SpHandler for Handler { } else { Err(SpError::ResetComponentTriggerWithoutPrepare) } + } else if component == SpComponent::ROT { + if self.reset_pending == Some(SpComponent::ROT) { + self.update_state.rot_reset(); + self.reset_pending = None; + Ok(()) + } else { + Err(SpError::ResetComponentTriggerWithoutPrepare) + } } else { Err(SpError::RequestUnsupportedForComponent) } @@ -1101,7 +1128,7 @@ impl SpHandler for Handler { static SP_VERS: &[u8] = b"0.0.1"; static ROT_GITC: &[u8] = b"eeeeeeee"; - static ROT_BORD: &[u8] = b"SimSidecarRot"; + static ROT_BORD: &[u8] = SIM_ROT_BOARD.as_bytes(); static ROT_NAME: &[u8] = b"SimSidecar"; static ROT_VERS: &[u8] = b"0.0.1"; @@ -1134,10 +1161,18 @@ impl SpHandler for Handler { fn read_rot( &mut self, - _request: gateway_messages::RotRequest, - _buf: &mut [u8], - ) -> std::result::Result { - Err(SpError::RequestUnsupportedForSp) + request: RotRequest, + buf: &mut [u8], + ) -> std::result::Result { + let dummy_page = match request { + RotRequest::ReadCmpa => "sidecar-cmpa", + RotRequest::ReadCfpa(CfpaPage::Active) => "sidecar-cfpa-active", + RotRequest::ReadCfpa(CfpaPage::Inactive) => "sidecar-cfpa-inactive", + RotRequest::ReadCfpa(CfpaPage::Scratch) => "sidecar-cfpa-scratch", + }; + buf[..dummy_page.len()].copy_from_slice(dummy_page.as_bytes()); + buf[dummy_page.len()..].fill(0); + Ok(RotResponse::Ok) } } diff --git a/sp-sim/src/update.rs b/sp-sim/src/update.rs index e57659ca1a..0efa730a26 100644 --- a/sp-sim/src/update.rs +++ b/sp-sim/src/update.rs @@ -2,6 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. +use std::collections::BTreeMap; use std::io::Cursor; use std::mem; @@ -13,12 +14,26 @@ use gateway_messages::UpdateInProgressStatus; pub(crate) struct SimSpUpdate { state: UpdateState, - last_update_data: Option>, + last_sp_update_data: Option>, + last_rot_update_data: Option>, + last_host_phase1_update_data: BTreeMap>, + active_host_slot: Option, } impl Default for SimSpUpdate { fn default() -> Self { - Self { state: UpdateState::NotPrepared, last_update_data: None } + Self { + state: UpdateState::NotPrepared, + last_sp_update_data: None, + last_rot_update_data: None, + last_host_phase1_update_data: BTreeMap::new(), + + // In the real SP, there is always _some_ active host slot. We could + // emulate that by always defaulting to slot 0, but instead we'll + // ensure any tests that expect to read or write a particular slot + // set that slot as active first. + active_host_slot: None, + } } } @@ -38,9 +53,20 @@ impl SimSpUpdate { UpdateState::NotPrepared | UpdateState::Aborted(_) | UpdateState::Completed { .. } => { + let slot = if component == SpComponent::HOST_CPU_BOOT_FLASH { + match self.active_host_slot { + Some(slot) => slot, + None => return Err(SpError::InvalidSlotForComponent), + } + } else { + // We don't manage SP or RoT slots, so just use 0 + 0 + }; + self.state = UpdateState::Prepared { component, id, + slot, data: Cursor::new(vec![0u8; total_size].into_boxed_slice()), }; Ok(()) @@ -58,7 +84,7 @@ impl SimSpUpdate { chunk_data: &[u8], ) -> Result<(), SpError> { match &mut self.state { - UpdateState::Prepared { component, id, data } => { + UpdateState::Prepared { component, id, slot, data } => { // Ensure that the update ID and target component are correct. if chunk.id != *id || chunk.component != *component { return Err(SpError::InvalidUpdateId { sp_update_id: *id }); @@ -79,9 +105,17 @@ impl SimSpUpdate { if data.position() == data.get_ref().len() as u64 { let mut stolen = Cursor::new(Box::default()); mem::swap(data, &mut stolen); + let data = stolen.into_inner(); + + if *component == SpComponent::HOST_CPU_BOOT_FLASH { + self.last_host_phase1_update_data + .insert(*slot, data.clone()); + } + self.state = UpdateState::Completed { + component: *component, id: *id, - data: stolen.into_inner(), + data, }; } @@ -112,16 +146,48 @@ impl SimSpUpdate { } pub(crate) fn sp_reset(&mut self) { - self.last_update_data = match &self.state { - UpdateState::Completed { data, .. } => Some(data.clone()), + match &self.state { + UpdateState::Completed { data, component, .. } => { + if *component == SpComponent::SP_ITSELF { + self.last_sp_update_data = Some(data.clone()); + } + } UpdateState::NotPrepared | UpdateState::Prepared { .. } - | UpdateState::Aborted(_) => None, - }; + | UpdateState::Aborted(_) => (), + } } - pub(crate) fn last_update_data(&self) -> Option> { - self.last_update_data.clone() + pub(crate) fn rot_reset(&mut self) { + match &self.state { + UpdateState::Completed { data, component, .. } => { + if *component == SpComponent::ROT { + self.last_rot_update_data = Some(data.clone()); + } + } + UpdateState::NotPrepared + | UpdateState::Prepared { .. } + | UpdateState::Aborted(_) => (), + } + } + + pub(crate) fn last_sp_update_data(&self) -> Option> { + self.last_sp_update_data.clone() + } + + pub(crate) fn last_rot_update_data(&self) -> Option> { + self.last_rot_update_data.clone() + } + + pub(crate) fn last_host_phase1_update_data( + &self, + slot: u16, + ) -> Option> { + self.last_host_phase1_update_data.get(&slot).cloned() + } + + pub(crate) fn set_active_host_slot(&mut self, slot: u16) { + self.active_host_slot = Some(slot); } } @@ -130,6 +196,7 @@ enum UpdateState { Prepared { component: SpComponent, id: UpdateId, + slot: u16, // data would ordinarily be a Cursor>, but that can grow and // reallocate. We want to ensure that we don't receive any more data // than originally promised, so use a Cursor> to ensure that @@ -138,6 +205,7 @@ enum UpdateState { }, Aborted(UpdateId), Completed { + component: SpComponent, id: UpdateId, data: Box<[u8]>, }, diff --git a/test-utils/src/certificates.rs b/test-utils/src/certificates.rs index ab84f30b15..54da013e0c 100644 --- a/test-utils/src/certificates.rs +++ b/test-utils/src/certificates.rs @@ -79,10 +79,10 @@ impl CertificateChain { fn tls_cert_to_pem(certs: &Vec) -> String { let mut serialized_certs = String::new(); for cert in certs { - let encoded_cert = pem::encode(&pem::Pem { - tag: "CERTIFICATE".to_string(), - contents: cert.0.clone(), - }); + let encoded_cert = pem::encode(&pem::Pem::new( + "CERTIFICATE".to_string(), + cert.0.clone(), + )); serialized_certs.push_str(&encoded_cert); } diff --git a/tools/ci_download_softnpu_machinery b/tools/ci_download_softnpu_machinery index 3efb030063..e147238673 100755 --- a/tools/ci_download_softnpu_machinery +++ b/tools/ci_download_softnpu_machinery @@ -15,7 +15,7 @@ OUT_DIR="out/npuzone" # Pinned commit for softnpu ASIC simulator SOFTNPU_REPO="softnpu" -SOFTNPU_COMMIT="dec63e67156fe6e958991bbfa090629868115ab5" +SOFTNPU_COMMIT="dbab082dfa89da5db5ca2325c257089d2f130092" # This is the softnpu ASIC simulator echo "fetching npuzone" diff --git a/tools/console_version b/tools/console_version index 811620e9e7..725bda0ee9 100644 --- a/tools/console_version +++ b/tools/console_version @@ -1,2 +1,2 @@ -COMMIT="ae8218df707360a902133f4a96b48a3b5a62a09e" -SHA2="ae35b991d3ff835a59b59126298790cb7431a282b25ba4add4e7fb6ea6b98989" +COMMIT="1802c2859f40712017ab89e72740e39bfd59320b" +SHA2="34768a895f187a6ed263c0050c42084f3907c331b547362871c2ce330e9d08d1" diff --git a/tools/create_virtual_hardware.sh b/tools/create_virtual_hardware.sh index 884d356222..7721fb1c0f 100755 --- a/tools/create_virtual_hardware.sh +++ b/tools/create_virtual_hardware.sh @@ -63,8 +63,8 @@ function ensure_softnpu_zone { --omicron-zone \ --ports sc0_0,tfportrear0_0 \ --ports sc0_1,tfportqsfp0_0 \ - --sidecar-lite-commit f0585a29fb0285f7a1220c1118856b0e5c1f75c5 \ - --softnpu-commit dec63e67156fe6e958991bbfa090629868115ab5 + --sidecar-lite-commit 45ed98fea5824feb4d42f45bbf218e597dc9fc58 \ + --softnpu-commit dbab082dfa89da5db5ca2325c257089d2f130092 } "$SOURCE_DIR"/scrimlet/softnpu-init.sh success "softnpu zone exists" diff --git a/tools/dendrite_openapi_version b/tools/dendrite_openapi_version index ba4b5a5722..c2afe5ca87 100644 --- a/tools/dendrite_openapi_version +++ b/tools/dendrite_openapi_version @@ -1,2 +1,2 @@ -COMMIT="8ff834e7d0a6adb263240edd40537f2c0768f1a4" +COMMIT="45e05b2a90203d84510e0c8e902d9449b09ffd9b" SHA2="07d115bfa8498a8015ca2a8447efeeac32e24aeb25baf3d5e2313216e11293c0" diff --git a/tools/dendrite_stub_checksums b/tools/dendrite_stub_checksums index 619a6bf287..2b4f0e7555 100644 --- a/tools/dendrite_stub_checksums +++ b/tools/dendrite_stub_checksums @@ -1,3 +1,3 @@ -CIDL_SHA256_ILLUMOS="c00e79f55e0bdf048069b2d18a4d009ddfef46e7e5d846887cf96e843a8884bd" -CIDL_SHA256_LINUX_DPD="b5d829b4628759ac374106f3c56c29074b29577fd0ff72f61c3b8289fea430fe" -CIDL_SHA256_LINUX_SWADM="afc68828f54dc57b32dc1556fc588baeab12341c30e96cc0fadb49f401b4b48f" +CIDL_SHA256_ILLUMOS="b14e73c8091a004472f9825b9b81b2c685bc5a48801704380a80481499060ad9" +CIDL_SHA256_LINUX_DPD="a0d92b5007826b119c68fdaef753e33b125740ec7b3e771bfa6b3aa8d9fcb8cc" +CIDL_SHA256_LINUX_SWADM="13387460db5b57e6ffad6c0b8877af32cc6d53fecc4a1a0910143c0446d39a38" diff --git a/tools/generate-wicketd-api.sh b/tools/generate-wicketd-api.sh new file mode 100755 index 0000000000..f1af33aecc --- /dev/null +++ b/tools/generate-wicketd-api.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +./target/debug/wicketd openapi > openapi/wicketd.json diff --git a/tools/install_builder_prerequisites.sh b/tools/install_builder_prerequisites.sh index d3ecd8eaa8..1ce133dff3 100755 --- a/tools/install_builder_prerequisites.sh +++ b/tools/install_builder_prerequisites.sh @@ -131,6 +131,8 @@ function install_packages { "library/libxmlsec1" # "bindgen leverages libclang to preprocess, parse, and type check C and C++ header files." "pkg:/ooce/developer/clang-$CLANGVER" + "system/library/gcc-runtime" + "system/library/g++-runtime" ) # Install/update the set of packages. diff --git a/tools/maghemite_ddm_openapi_version b/tools/maghemite_ddm_openapi_version index 76bdb9ca92..37c099d7f5 100644 --- a/tools/maghemite_ddm_openapi_version +++ b/tools/maghemite_ddm_openapi_version @@ -1,2 +1,2 @@ -COMMIT="12b392be94ff93abc3017bf2610a3b18e2174a2d" +COMMIT="2fd39b75df696961e5ea190c7d74dd91f4849cd3" SHA2="9737906555a60911636532f00f1dc2866dc7cd6553beb106e9e57beabad41cdf" diff --git a/tools/maghemite_mg_openapi_version b/tools/maghemite_mg_openapi_version index d6d1788cbc..329c05fc42 100644 --- a/tools/maghemite_mg_openapi_version +++ b/tools/maghemite_mg_openapi_version @@ -1,2 +1,2 @@ -COMMIT="12b392be94ff93abc3017bf2610a3b18e2174a2d" -SHA2="6c1fab8d5028b52a161d8bf02aae47844699cdc5f7b28e1ac519fc4ec1ab3971" +COMMIT="2fd39b75df696961e5ea190c7d74dd91f4849cd3" +SHA2="931efa310d972b1f8afba2308751fc6a2035afbaebba77b3a40a8358c123ba3c" diff --git a/tools/maghemite_mgd_checksums b/tools/maghemite_mgd_checksums index 9657147159..1d3cf98f94 100644 --- a/tools/maghemite_mgd_checksums +++ b/tools/maghemite_mgd_checksums @@ -1,2 +1,2 @@ -CIDL_SHA256="c4a7a626c84a28de3d2c6bfd85592bda2abad8cf5b41b2ce90b9c03904ccd3df" -MGD_LINUX_SHA256="81231b30872fa1c581aa22c101f32d11f33f335758ac1fd2653436fbc7aab93f" \ No newline at end of file +CIDL_SHA256="802636775fa77dc6eec193e65fde87e403f6a11531745d47ef5e7ff13b242890" +MGD_LINUX_SHA256="1bcadfd700902e3640843e0bb53d3defdbcd8d86c3279efa0953ae8d6437e2b0" \ No newline at end of file diff --git a/tools/opte_version b/tools/opte_version index 0a79a6aba9..fa0ef8d768 100644 --- a/tools/opte_version +++ b/tools/opte_version @@ -1 +1 @@ -0.25.183 +0.27.199 diff --git a/tufaceous-lib/Cargo.toml b/tufaceous-lib/Cargo.toml index bcfcee6b9c..aa9a26e3bb 100644 --- a/tufaceous-lib/Cargo.toml +++ b/tufaceous-lib/Cargo.toml @@ -7,6 +7,7 @@ publish = false [dependencies] anyhow = { workspace = true, features = ["backtrace"] } +async-trait.workspace = true buf-list.workspace = true bytes.workspace = true bytesize = { workspace = true, features = ["serde"] } @@ -16,6 +17,7 @@ chrono.workspace = true debug-ignore.workspace = true flate2.workspace = true fs-err.workspace = true +futures.workspace = true hex.workspace = true hubtools.workspace = true itertools.workspace = true @@ -28,11 +30,13 @@ serde_path_to_error.workspace = true sha2.workspace = true slog.workspace = true tar.workspace = true +tokio.workspace = true toml.workspace = true tough.workspace = true -url = "2.4.1" +url = "2.5.0" zip.workspace = true omicron-workspace-hack.workspace = true [dev-dependencies] omicron-test-utils.workspace = true +tokio = { workspace = true, features = ["test-util"] } diff --git a/tufaceous-lib/src/artifact.rs b/tufaceous-lib/src/artifact.rs index 56f3e34ecb..23cf31e8c3 100644 --- a/tufaceous-lib/src/artifact.rs +++ b/tufaceous-lib/src/artifact.rs @@ -127,7 +127,7 @@ pub struct HostPhaseImages { } impl HostPhaseImages { - pub fn extract(reader: R) -> Result { + pub fn extract(reader: R) -> Result { let mut phase_1 = Vec::new(); let mut phase_2 = Vec::new(); Self::extract_into( @@ -138,13 +138,12 @@ impl HostPhaseImages { Ok(Self { phase_1: phase_1.into(), phase_2: phase_2.into() }) } - pub fn extract_into( + pub fn extract_into( reader: R, phase_1: W, phase_2: W, ) -> Result<()> { - let uncompressed = - flate2::bufread::GzDecoder::new(BufReader::new(reader)); + let uncompressed = flate2::bufread::GzDecoder::new(reader); let mut archive = tar::Archive::new(uncompressed); let mut oxide_json_found = false; @@ -248,7 +247,7 @@ pub struct RotArchives { } impl RotArchives { - pub fn extract(reader: R) -> Result { + pub fn extract(reader: R) -> Result { let mut archive_a = Vec::new(); let mut archive_b = Vec::new(); Self::extract_into( @@ -259,13 +258,12 @@ impl RotArchives { Ok(Self { archive_a: archive_a.into(), archive_b: archive_b.into() }) } - pub fn extract_into( + pub fn extract_into( reader: R, archive_a: W, archive_b: W, ) -> Result<()> { - let uncompressed = - flate2::bufread::GzDecoder::new(BufReader::new(reader)); + let uncompressed = flate2::bufread::GzDecoder::new(reader); let mut archive = tar::Archive::new(uncompressed); let mut oxide_json_found = false; diff --git a/tufaceous-lib/src/assemble/build.rs b/tufaceous-lib/src/assemble/build.rs index 081e2e10d5..4cb636c9d3 100644 --- a/tufaceous-lib/src/assemble/build.rs +++ b/tufaceous-lib/src/assemble/build.rs @@ -44,7 +44,7 @@ impl OmicronRepoAssembler { self } - pub fn build(&self) -> Result<()> { + pub async fn build(&self) -> Result<()> { let (build_dir, is_temp) = match &self.build_dir { Some(dir) => (dir.clone(), false), None => { @@ -61,7 +61,7 @@ impl OmicronRepoAssembler { slog::info!(self.log, "assembling repository in `{build_dir}`"); - match self.build_impl(&build_dir) { + match self.build_impl(&build_dir).await { Ok(()) => { if is_temp { slog::debug!(self.log, "assembly successful, cleaning up"); @@ -92,15 +92,17 @@ impl OmicronRepoAssembler { Ok(()) } - fn build_impl(&self, build_dir: &Utf8Path) -> Result<()> { + async fn build_impl(&self, build_dir: &Utf8Path) -> Result<()> { let mut repository = OmicronRepo::initialize( &self.log, build_dir, self.manifest.system_version.clone(), self.keys.clone(), self.expiry, - )? - .into_editor()?; + ) + .await? + .into_editor() + .await?; // Add all the artifacts. for (kind, entries) in &self.manifest.artifacts { @@ -118,10 +120,11 @@ impl OmicronRepoAssembler { } // Write out the repository. - repository.sign_and_finish(self.keys.clone(), self.expiry)?; + repository.sign_and_finish(self.keys.clone(), self.expiry).await?; // Now reopen the repository to archive it into a zip file. let repo2 = OmicronRepo::load_untrusted(&self.log, build_dir) + .await .context("error reopening repository to archive")?; repo2 .archive(&self.output_path) diff --git a/tufaceous-lib/src/key.rs b/tufaceous-lib/src/key.rs index 8a5054b331..96282ee377 100644 --- a/tufaceous-lib/src/key.rs +++ b/tufaceous-lib/src/key.rs @@ -5,6 +5,7 @@ use ring::rand::SecureRandom; use ring::signature::Ed25519KeyPair; use std::fmt::Display; use std::str::FromStr; +use tough::async_trait; use tough::key_source::KeySource; use tough::sign::{Sign, SignKeyPair}; @@ -38,30 +39,32 @@ impl Key { } } +#[async_trait] impl Sign for Key { fn tuf_key(&self) -> tough::schema::key::Key { self.as_sign().tuf_key() } - fn sign( + async fn sign( &self, msg: &[u8], - rng: &dyn SecureRandom, + rng: &(dyn SecureRandom + Sync), ) -> Result, Box> { - self.as_sign().sign(msg, rng) + self.as_sign().sign(msg, rng).await } } +#[async_trait] impl KeySource for Key { - fn as_sign( + async fn as_sign( &self, ) -> Result, Box> { Ok(Box::new(self.clone())) } - fn write( + async fn write( &self, _value: &str, _key_id_hex: &str, diff --git a/tufaceous-lib/src/repository.rs b/tufaceous-lib/src/repository.rs index 11a6064602..416d5c9990 100644 --- a/tufaceous-lib/src/repository.rs +++ b/tufaceous-lib/src/repository.rs @@ -4,9 +4,11 @@ use crate::{key::Key, target::TargetWriter, AddArtifact, ArchiveBuilder}; use anyhow::{anyhow, bail, Context, Result}; +use buf_list::BufList; use camino::{Utf8Path, Utf8PathBuf}; use chrono::{DateTime, Utc}; -use fs_err::{self as fs, File}; +use fs_err::{self as fs}; +use futures::TryStreamExt; use omicron_common::{ api::external::SemverVersion, update::{Artifact, ArtifactsDocument}, @@ -28,38 +30,41 @@ pub struct OmicronRepo { impl OmicronRepo { /// Initializes a new repository at the given path, writing it to disk. - pub fn initialize( + pub async fn initialize( log: &slog::Logger, repo_path: &Utf8Path, system_version: SemverVersion, keys: Vec, expiry: DateTime, ) -> Result { - let root = crate::root::new_root(keys.clone(), expiry)?; + let root = crate::root::new_root(keys.clone(), expiry).await?; let editor = OmicronRepoEditor::initialize( repo_path.to_owned(), root, system_version, - )?; + ) + .await?; editor .sign_and_finish(keys, expiry) + .await .context("error signing new repository")?; // In theory we "trust" the key we just used to sign this repository, // but the code path is equivalent to `load_untrusted`. - Self::load_untrusted(log, repo_path) + Self::load_untrusted(log, repo_path).await } /// Loads a repository from the given path. /// /// This method enforces expirations. To load without expiration enforcement, use /// [`Self::load_untrusted_ignore_expiration`]. - pub fn load_untrusted( + pub async fn load_untrusted( log: &slog::Logger, repo_path: &Utf8Path, ) -> Result { Self::load_untrusted_impl(log, repo_path, ExpirationEnforcement::Safe) + .await } /// Loads a repository from the given path, ignoring expiration. @@ -68,30 +73,36 @@ impl OmicronRepo { /// /// 1. When you're editing an existing repository and will re-sign it afterwards. /// 2. In an environment in which time isn't available. - pub fn load_untrusted_ignore_expiration( + pub async fn load_untrusted_ignore_expiration( log: &slog::Logger, repo_path: &Utf8Path, ) -> Result { Self::load_untrusted_impl(log, repo_path, ExpirationEnforcement::Unsafe) + .await } - fn load_untrusted_impl( + async fn load_untrusted_impl( log: &slog::Logger, repo_path: &Utf8Path, exp: ExpirationEnforcement, ) -> Result { let log = log.new(slog::o!("component" => "OmicronRepo")); let repo_path = repo_path.canonicalize_utf8()?; + let root_json = repo_path.join("metadata").join("1.root.json"); + let root = tokio::fs::read(&root_json) + .await + .with_context(|| format!("error reading from {root_json}"))?; let repo = RepositoryLoader::new( - File::open(repo_path.join("metadata").join("1.root.json"))?, + &root, Url::from_file_path(repo_path.join("metadata")) .expect("the canonical path is not absolute?"), Url::from_file_path(repo_path.join("targets")) .expect("the canonical path is not absolute?"), ) .expiration_enforcement(exp) - .load()?; + .load() + .await?; Ok(Self { log, repo, repo_path }) } @@ -107,12 +118,17 @@ impl OmicronRepo { } /// Reads the artifacts document from the repo. - pub fn read_artifacts(&self) -> Result { + pub async fn read_artifacts(&self) -> Result { let reader = self .repo - .read_target(&"artifacts.json".try_into()?)? + .read_target(&"artifacts.json".try_into()?) + .await? .ok_or_else(|| anyhow!("artifacts.json should be present"))?; - serde_json::from_reader(reader) + let buf_list = reader + .try_collect::() + .await + .context("error reading from artifacts.json")?; + serde_json::from_reader(buf_list::Cursor::new(&buf_list)) .context("error deserializing artifacts.json") } @@ -177,8 +193,8 @@ impl OmicronRepo { /// Converts `self` into an `OmicronRepoEditor`, which can be used to perform /// modifications to the repository. - pub fn into_editor(self) -> Result { - OmicronRepoEditor::new(self) + pub async fn into_editor(self) -> Result { + OmicronRepoEditor::new(self).await } /// Prepends the target digest to the name if using consistent snapshots. Returns both the @@ -210,8 +226,8 @@ pub struct OmicronRepoEditor { } impl OmicronRepoEditor { - fn new(repo: OmicronRepo) -> Result { - let artifacts = repo.read_artifacts()?; + async fn new(repo: OmicronRepo) -> Result { + let artifacts = repo.read_artifacts().await?; let existing_target_names = repo .repo @@ -226,7 +242,8 @@ impl OmicronRepoEditor { .join("metadata") .join(format!("{}.root.json", repo.repo.root().signed.version)), repo.repo, - )?; + ) + .await?; Ok(Self { editor, @@ -236,7 +253,7 @@ impl OmicronRepoEditor { }) } - fn initialize( + async fn initialize( repo_path: Utf8PathBuf, root: SignedRole, system_version: SemverVersion, @@ -250,7 +267,7 @@ impl OmicronRepoEditor { fs::create_dir_all(&targets_dir)?; fs::write(&root_path, root.buffer())?; - let editor = RepositoryEditor::new(&root_path)?; + let editor = RepositoryEditor::new(&root_path).await?; Ok(Self { editor, @@ -297,7 +314,7 @@ impl OmicronRepoEditor { } /// Consumes self, signing the repository and writing out this repository to disk. - pub fn sign_and_finish( + pub async fn sign_and_finish( mut self, keys: Vec, expiry: DateTime, @@ -313,9 +330,11 @@ impl OmicronRepoEditor { let signed = self .editor .sign(&crate::key::boxed_keys(keys)) + .await .context("error signing keys")?; signed .write(self.repo_path.join("metadata")) + .await .context("error writing repository")?; Ok(()) } @@ -346,8 +365,8 @@ mod tests { use chrono::Days; use omicron_test_utils::dev::test_setup_log; - #[test] - fn reject_artifacts_with_the_same_filename() { + #[tokio::test] + async fn reject_artifacts_with_the_same_filename() { let logctx = test_setup_log("reject_artifacts_with_the_same_filename"); let tempdir = Utf8TempDir::new().unwrap(); let mut repo = OmicronRepo::initialize( @@ -357,8 +376,10 @@ mod tests { vec![Key::generate_ed25519()], Utc::now() + Days::new(1), ) + .await .unwrap() .into_editor() + .await .unwrap(); // Targets are uniquely identified by their kind/name/version triple; diff --git a/tufaceous-lib/src/root.rs b/tufaceous-lib/src/root.rs index 8ecd1cdf9d..cf5f7129c5 100644 --- a/tufaceous-lib/src/root.rs +++ b/tufaceous-lib/src/root.rs @@ -8,7 +8,7 @@ use tough::editor::signed::SignedRole; use tough::schema::{KeyHolder, RoleKeys, RoleType, Root}; use tough::sign::Sign; -pub(crate) fn new_root( +pub(crate) async fn new_root( keys: Vec, expires: DateTime, ) -> Result> { @@ -47,5 +47,6 @@ pub(crate) fn new_root( &KeyHolder::Root(root), &keys, &SystemRandom::new(), - )?) + ) + .await?) } diff --git a/tufaceous/Cargo.toml b/tufaceous/Cargo.toml index e48513e24c..81248af57d 100644 --- a/tufaceous/Cargo.toml +++ b/tufaceous/Cargo.toml @@ -17,6 +17,7 @@ slog.workspace = true slog-async.workspace = true slog-envlogger.workspace = true slog-term.workspace = true +tokio.workspace = true tufaceous-lib.workspace = true omicron-workspace-hack.workspace = true @@ -27,6 +28,7 @@ fs-err.workspace = true omicron-test-utils.workspace = true predicates.workspace = true tempfile.workspace = true +tokio = { workspace = true, features = ["test-util"] } [[test]] name = "manifest-tests" diff --git a/tufaceous/src/dispatch.rs b/tufaceous/src/dispatch.rs index ea0db63fce..fc86c948df 100644 --- a/tufaceous/src/dispatch.rs +++ b/tufaceous/src/dispatch.rs @@ -36,7 +36,7 @@ pub struct Args { impl Args { /// Executes these arguments. - pub fn exec(self, log: &slog::Logger) -> Result<()> { + pub async fn exec(self, log: &slog::Logger) -> Result<()> { let repo_path = match self.repo { Some(repo) => repo, None => std::env::current_dir()?.try_into()?, @@ -52,7 +52,8 @@ impl Args { system_version, keys, self.expiry, - )?; + ) + .await?; slog::info!( log, "Initialized TUF repository in {}", @@ -87,8 +88,9 @@ impl Args { let repo = OmicronRepo::load_untrusted_ignore_expiration( &log, &repo_path, - )?; - let mut editor = repo.into_editor()?; + ) + .await?; + let mut editor = repo.into_editor().await?; let new_artifact = AddArtifact::from_path(kind, name, version, path)?; @@ -96,7 +98,7 @@ impl Args { editor .add_artifact(&new_artifact) .context("error adding artifact")?; - editor.sign_and_finish(self.keys, self.expiry)?; + editor.sign_and_finish(self.keys, self.expiry).await?; println!( "added {} {}, version {}", new_artifact.kind(), @@ -113,7 +115,8 @@ impl Args { let repo = OmicronRepo::load_untrusted_ignore_expiration( &log, &repo_path, - )?; + ) + .await?; repo.archive(&output_path)?; Ok(()) @@ -124,13 +127,14 @@ impl Args { // Now load the repository and ensure it's valid. let repo = OmicronRepo::load_untrusted(&log, &dest) + .await .with_context(|| { format!( "error loading extracted repository at `{dest}` \ (extracted files are still available)" ) })?; - repo.read_artifacts().with_context(|| { + repo.read_artifacts().await.with_context(|| { format!( "error loading artifacts.json from extracted archive \ at `{dest}`" @@ -169,7 +173,7 @@ impl Args { assembler.set_build_dir(dir); } - assembler.build()?; + assembler.build().await?; Ok(()) } diff --git a/tufaceous/src/main.rs b/tufaceous/src/main.rs index 30832cffbf..014817ee53 100644 --- a/tufaceous/src/main.rs +++ b/tufaceous/src/main.rs @@ -7,10 +7,11 @@ use clap::Parser; use slog::Drain; use tufaceous::Args; -fn main() -> Result<()> { +#[tokio::main] +async fn main() -> Result<()> { let log = setup_log(); let args = Args::parse(); - args.exec(&log) + args.exec(&log).await } fn setup_log() -> slog::Logger { diff --git a/tufaceous/tests/integration-tests/command_tests.rs b/tufaceous/tests/integration-tests/command_tests.rs index 73c94572eb..72c3a1a13a 100644 --- a/tufaceous/tests/integration-tests/command_tests.rs +++ b/tufaceous/tests/integration-tests/command_tests.rs @@ -14,8 +14,8 @@ use omicron_test_utils::dev::test_setup_log; use predicates::prelude::*; use tufaceous_lib::{Key, OmicronRepo}; -#[test] -fn test_init_and_add() -> Result<()> { +#[tokio::test] +async fn test_init_and_add() -> Result<()> { let logctx = test_setup_log("test_init_and_add"); let tempdir = tempfile::tempdir().unwrap(); let key = Key::generate_ed25519(); @@ -54,9 +54,9 @@ fn test_init_and_add() -> Result<()> { // Now read the repository and ensure the list of expected artifacts. let repo_path: Utf8PathBuf = tempdir.path().join("repo").try_into()?; - let repo = OmicronRepo::load_untrusted(&logctx.log, &repo_path)?; + let repo = OmicronRepo::load_untrusted(&logctx.log, &repo_path).await?; - let artifacts = repo.read_artifacts()?; + let artifacts = repo.read_artifacts().await?; assert_eq!( artifacts.artifacts.len(), 2, diff --git a/update-engine/examples/update-engine-basic/display.rs b/update-engine/examples/update-engine-basic/display.rs index 122777211b..891bdce6d3 100644 --- a/update-engine/examples/update-engine-basic/display.rs +++ b/update-engine/examples/update-engine-basic/display.rs @@ -88,6 +88,7 @@ async fn display_group( slog::info!(log, "setting up display"); let mut display = GroupDisplay::new( + log, [ (GroupDisplayKey::Example, "example"), (GroupDisplayKey::Other, "other"), diff --git a/update-engine/src/buffer.rs b/update-engine/src/buffer.rs index 6e0e66d6d0..36a0626963 100644 --- a/update-engine/src/buffer.rs +++ b/update-engine/src/buffer.rs @@ -1627,6 +1627,16 @@ pub enum TerminalKind { Aborted, } +impl fmt::Display for TerminalKind { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Completed => write!(f, "completed"), + Self::Failed => write!(f, "failed"), + Self::Aborted => write!(f, "aborted"), + } + } +} + impl ExecutionStatus { /// Returns the terminal status and the total amount of time elapsed, or /// None if the execution has not reached a terminal state. @@ -1671,17 +1681,13 @@ mod tests { use std::collections::HashSet; use anyhow::{bail, ensure, Context}; - use futures::StreamExt; use indexmap::IndexSet; use omicron_test_utils::dev::test_setup_log; use serde::{de::IntoDeserializer, Deserialize}; - use tokio::sync::mpsc; - use tokio_stream::wrappers::ReceiverStream; use crate::{ - events::{ProgressCounter, ProgressUnits, StepProgress}, - test_utils::TestSpec, - StepContext, StepSuccess, UpdateEngine, + events::ProgressCounter, + test_utils::{generate_test_events, GenerateTestEventsKind, TestSpec}, }; use super::*; @@ -1689,108 +1695,11 @@ mod tests { #[tokio::test] async fn test_buffer() { let logctx = test_setup_log("test_buffer"); - // The channel is big enough to contain all possible events. - let (sender, receiver) = mpsc::channel(512); - let engine: UpdateEngine = - UpdateEngine::new(&logctx.log, sender); - - engine - .new_step("foo".to_owned(), 1, "Step 1", move |_cx| async move { - StepSuccess::new(()).into() - }) - .register(); - - engine - .new_step("bar".to_owned(), 2, "Step 2", move |cx| async move { - for _ in 0..20 { - cx.send_progress(StepProgress::with_current_and_total( - 5, - 20, - ProgressUnits::BYTES, - Default::default(), - )) - .await; - - cx.send_progress(StepProgress::reset( - Default::default(), - "reset step 2", - )) - .await; - - cx.send_progress(StepProgress::retry("retry step 2")).await; - } - StepSuccess::new(()).into() - }) - .register(); - - engine - .new_step( - "nested".to_owned(), - 3, - "Step 3 (this is nested)", - move |parent_cx| async move { - parent_cx - .with_nested_engine(|engine| { - define_nested_engine(&parent_cx, engine); - Ok(()) - }) - .await - .expect_err("this is expected to fail"); - - StepSuccess::new(()).into() - }, - ) - .register(); - - let log = logctx.log.clone(); - engine - .new_step( - "remote-nested".to_owned(), - 20, - "Step 4 (remote nested)", - move |cx| async move { - let (sender, mut receiver) = mpsc::channel(16); - let mut engine = UpdateEngine::new(&log, sender); - define_remote_nested_engine(&mut engine, 20); - - let mut buffer = EventBuffer::default(); - - let mut execute_fut = std::pin::pin!(engine.execute()); - let mut execute_done = false; - loop { - tokio::select! { - res = &mut execute_fut, if !execute_done => { - res.expect("remote nested engine completed successfully"); - execute_done = true; - } - Some(event) = receiver.recv() => { - // Generate complete reports to ensure deduping - // happens within StepContexts. - buffer.add_event(event); - cx.send_nested_report(buffer.generate_report()).await?; - } - else => { - break; - } - } - } - - StepSuccess::new(()).into() - }, - ) - .register(); - - // The step index here (100) is large enough to be higher than all nested - // steps. - engine - .new_step("baz".to_owned(), 100, "Step 5", move |_cx| async move { - StepSuccess::new(()).into() - }) - .register(); - - engine.execute().await.expect("execution successful"); - let generated_events: Vec<_> = - ReceiverStream::new(receiver).collect().await; + let generated_events = generate_test_events( + &logctx.log, + GenerateTestEventsKind::Completed, + ) + .await; let test_cx = BufferTestContext::new(generated_events); @@ -2417,71 +2326,6 @@ mod tests { } } - fn define_nested_engine<'a>( - parent_cx: &'a StepContext, - engine: &mut UpdateEngine<'a, TestSpec>, - ) { - engine - .new_step( - "nested-foo".to_owned(), - 4, - "Nested step 1", - move |cx| async move { - parent_cx - .send_progress(StepProgress::with_current_and_total( - 1, - 3, - "steps", - Default::default(), - )) - .await; - cx.send_progress( - StepProgress::progress(Default::default()), - ) - .await; - StepSuccess::new(()).into() - }, - ) - .register(); - - engine - .new_step::<_, _, ()>( - "nested-bar".to_owned(), - 5, - "Nested step 2 (fails)", - move |cx| async move { - // This is used by NestedProgressCheck below. - parent_cx - .send_progress(StepProgress::with_current_and_total( - 2, - 3, - "steps", - Default::default(), - )) - .await; - - cx.send_progress(StepProgress::with_current( - 50, - "units", - Default::default(), - )) - .await; - - parent_cx - .send_progress(StepProgress::with_current_and_total( - 3, - 3, - "steps", - Default::default(), - )) - .await; - - bail!("failing step") - }, - ) - .register(); - } - #[derive(Clone, Copy, Debug, PartialEq, Eq)] enum NestedProgressCheck { Initial, @@ -2530,42 +2374,4 @@ mod tests { ); } } - - fn define_remote_nested_engine( - engine: &mut UpdateEngine<'_, TestSpec>, - start_id: usize, - ) { - engine - .new_step( - "nested-foo".to_owned(), - start_id + 1, - "Nested step 1", - move |cx| async move { - cx.send_progress( - StepProgress::progress(Default::default()), - ) - .await; - StepSuccess::new(()).into() - }, - ) - .register(); - - engine - .new_step::<_, _, ()>( - "nested-bar".to_owned(), - start_id + 2, - "Nested step 2", - move |cx| async move { - cx.send_progress(StepProgress::with_current( - 20, - "units", - Default::default(), - )) - .await; - - StepSuccess::new(()).into() - }, - ) - .register(); - } } diff --git a/update-engine/src/display/group_display.rs b/update-engine/src/display/group_display.rs index 0d50489a9f..0e04361ce4 100644 --- a/update-engine/src/display/group_display.rs +++ b/update-engine/src/display/group_display.rs @@ -30,6 +30,7 @@ use super::{ pub struct GroupDisplay { // We don't need to add any buffering here because we already write data to // the writer in a line-buffered fashion (see Self::write_events). + log: slog::Logger, writer: W, max_width: usize, // This is set to the highest value of root_total_elapsed seen from any event reports. @@ -45,6 +46,7 @@ impl GroupDisplay { /// /// The function passed in is expected to create a writer. pub fn new( + log: &slog::Logger, keys_and_prefixes: impl IntoIterator, writer: W, ) -> Self @@ -70,6 +72,7 @@ impl GroupDisplay { let not_started = single_states.len(); Self { + log: log.new(slog::o!("component" => "GroupDisplay")), writer, max_width, // This creates the stopwatch in the stopped state with duration 0 -- i.e. a minimal @@ -84,6 +87,7 @@ impl GroupDisplay { /// Creates a new `GroupDisplay` with the provided report keys, using the /// `Display` impl to obtain the respective prefixes. pub fn new_with_display( + log: &slog::Logger, keys: impl IntoIterator, writer: W, ) -> Self @@ -91,6 +95,7 @@ impl GroupDisplay { K: fmt::Display, { Self::new( + log, keys.into_iter().map(|k| { let prefix = k.to_string(); (k, prefix) @@ -144,7 +149,30 @@ impl GroupDisplay { TokioSw::with_elapsed_started(root_total_elapsed); } } + self.stats.apply_result(result); + + if result.before != result.after { + slog::debug!( + self.log, + "add_event_report caused state transition"; + "prefix" => &state.prefix, + "before" => %result.before, + "after" => %result.after, + "current_stats" => ?self.stats, + "root_total_elapsed" => ?result.root_total_elapsed, + ); + } else { + slog::trace!( + self.log, + "add_event_report called, state did not change"; + "prefix" => &state.prefix, + "state" => %result.before, + "current_stats" => ?self.stats, + "root_total_elapsed" => ?result.root_total_elapsed, + ); + } + Ok(()) } else { Err(UnknownReportKey {}) @@ -179,7 +207,7 @@ impl GroupDisplay { } } -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct GroupDisplayStats { /// The total number of reports. pub total: usize, @@ -236,18 +264,9 @@ impl GroupDisplayStats { } fn apply_result(&mut self, result: AddEventReportResult) { - // Process result.after first to avoid integer underflow. - match result.after { - SingleStateTag::NotStarted => self.not_started += 1, - SingleStateTag::Running => self.running += 1, - SingleStateTag::Terminal(TerminalKind::Completed) => { - self.completed += 1 - } - SingleStateTag::Terminal(TerminalKind::Failed) => self.failed += 1, - SingleStateTag::Terminal(TerminalKind::Aborted) => { - self.aborted += 1 - } - SingleStateTag::Overwritten => self.overwritten += 1, + if result.before == result.after { + // Nothing to do. + return; } match result.before { @@ -262,6 +281,19 @@ impl GroupDisplayStats { } SingleStateTag::Overwritten => self.overwritten -= 1, } + + match result.after { + SingleStateTag::NotStarted => self.not_started += 1, + SingleStateTag::Running => self.running += 1, + SingleStateTag::Terminal(TerminalKind::Completed) => { + self.completed += 1 + } + SingleStateTag::Terminal(TerminalKind::Failed) => self.failed += 1, + SingleStateTag::Terminal(TerminalKind::Aborted) => { + self.aborted += 1 + } + SingleStateTag::Overwritten => self.overwritten += 1, + } } fn format_line( @@ -336,92 +368,139 @@ impl SingleState { &mut self, event_report: EventReport, ) -> AddEventReportResult { - let before = match &self.kind { + match &mut self.kind { SingleStateKind::NotStarted { .. } => { - self.kind = SingleStateKind::Running { - event_buffer: EventBuffer::new(8), + // We're starting a new update. + let before = SingleStateTag::NotStarted; + let mut event_buffer = EventBuffer::default(); + let (after, root_total_elapsed) = + match Self::apply_report(&mut event_buffer, event_report) { + ApplyReportResult::NotStarted => { + // This means that the event report was empty. Don't + // update `self.kind`. + (SingleStateTag::NotStarted, None) + } + ApplyReportResult::Running(root_total_elapsed) => { + self.kind = + SingleStateKind::Running { event_buffer }; + (SingleStateTag::Running, Some(root_total_elapsed)) + } + ApplyReportResult::Terminal(info) => { + let terminal_kind = info.kind; + let root_total_elapsed = info.root_total_elapsed; + + self.kind = SingleStateKind::Terminal { + info, + pending_event_buffer: Some(event_buffer), + }; + ( + SingleStateTag::Terminal(terminal_kind), + root_total_elapsed, + ) + } + ApplyReportResult::Overwritten => { + self.kind = SingleStateKind::Overwritten { + displayed: false, + }; + (SingleStateTag::Overwritten, None) + } + }; + + AddEventReportResult { before, after, root_total_elapsed } + } + SingleStateKind::Running { event_buffer } => { + // We're in the middle of an update. + let before = SingleStateTag::Running; + let (after, root_total_elapsed) = match Self::apply_report( + event_buffer, + event_report, + ) { + ApplyReportResult::NotStarted => { + // This is an illegal state transition: once a + // non-empty event report has been received, the + // event buffer never goes back to the NotStarted + // state. + unreachable!("illegal state transition from Running to NotStarted") + } + ApplyReportResult::Running(root_total_elapsed) => { + (SingleStateTag::Running, Some(root_total_elapsed)) + } + ApplyReportResult::Terminal(info) => { + let terminal_kind = info.kind; + let root_total_elapsed = info.root_total_elapsed; + + // Grab the event buffer so we can store it in the + // Terminal state below. + let event_buffer = std::mem::replace( + event_buffer, + EventBuffer::new(0), + ); + + self.kind = SingleStateKind::Terminal { + info, + pending_event_buffer: Some(event_buffer), + }; + ( + SingleStateTag::Terminal(terminal_kind), + root_total_elapsed, + ) + } + ApplyReportResult::Overwritten => { + self.kind = + SingleStateKind::Overwritten { displayed: false }; + (SingleStateTag::Overwritten, None) + } }; - SingleStateTag::NotStarted + AddEventReportResult { before, after, root_total_elapsed } } - SingleStateKind::Running { .. } => SingleStateTag::Running, - SingleStateKind::Terminal { info, .. } => { // Once we've reached a terminal state, we don't record any more // events. - return AddEventReportResult::unchanged( + AddEventReportResult::unchanged( SingleStateTag::Terminal(info.kind), info.root_total_elapsed, - ); + ) } SingleStateKind::Overwritten { .. } => { // This update has already completed -- assume that the event // buffer is for a new update, which we don't show. - return AddEventReportResult::unchanged( + AddEventReportResult::unchanged( SingleStateTag::Overwritten, None, - ); + ) } - }; - - let SingleStateKind::Running { event_buffer } = &mut self.kind else { - unreachable!("other branches were handled above"); - }; + } + } + /// The internal logic used by [`Self::add_event_report`]. + fn apply_report( + event_buffer: &mut EventBuffer, + event_report: EventReport, + ) -> ApplyReportResult { if let Some(root_execution_id) = event_buffer.root_execution_id() { if event_report.root_execution_id != Some(root_execution_id) { // The report is for a different execution ID -- assume that // this event is completed and mark our current execution as // completed. - self.kind = SingleStateKind::Overwritten { displayed: false }; - return AddEventReportResult { - before, - after: SingleStateTag::Overwritten, - root_total_elapsed: None, - }; + return ApplyReportResult::Overwritten; } } event_buffer.add_event_report(event_report); - let (after, max_total_elapsed) = - match event_buffer.root_execution_summary() { - Some(summary) => { - match summary.execution_status { - ExecutionStatus::NotStarted => { - (SingleStateTag::NotStarted, None) - } - ExecutionStatus::Running { - root_total_elapsed: max_total_elapsed, - .. - } => (SingleStateTag::Running, Some(max_total_elapsed)), - ExecutionStatus::Terminal(info) => { - // Grab the event buffer to store it in the terminal state. - let event_buffer = std::mem::replace( - event_buffer, - EventBuffer::new(0), - ); - let terminal_kind = info.kind; - let root_total_elapsed = info.root_total_elapsed; - self.kind = SingleStateKind::Terminal { - info, - pending_event_buffer: Some(event_buffer), - }; - ( - SingleStateTag::Terminal(terminal_kind), - root_total_elapsed, - ) - } - } + match event_buffer.root_execution_summary() { + Some(summary) => match summary.execution_status { + ExecutionStatus::NotStarted => ApplyReportResult::NotStarted, + ExecutionStatus::Running { root_total_elapsed, .. } => { + ApplyReportResult::Running(root_total_elapsed) } - None => { - // We don't have a summary yet. - (SingleStateTag::NotStarted, None) + ExecutionStatus::Terminal(info) => { + ApplyReportResult::Terminal(info) } - }; - - AddEventReportResult { - before, - after, - root_total_elapsed: max_total_elapsed, + }, + None => { + // We don't have a summary yet. + ApplyReportResult::NotStarted + } } } @@ -488,6 +567,7 @@ enum SingleStateKind { }, } +#[derive(Clone, Copy, Debug, Eq, PartialEq)] struct AddEventReportResult { before: SingleStateTag, after: SingleStateTag, @@ -503,10 +583,238 @@ impl AddEventReportResult { } } -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone, Debug, Eq, PartialEq)] enum SingleStateTag { NotStarted, Running, Terminal(TerminalKind), Overwritten, } + +impl fmt::Display for SingleStateTag { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::NotStarted => write!(f, "not started"), + Self::Running => write!(f, "running"), + Self::Terminal(kind) => write!(f, "{kind}"), + Self::Overwritten => write!(f, "overwritten"), + } + } +} + +#[derive(Clone, Debug)] +enum ApplyReportResult { + NotStarted, + Running(Duration), + Terminal(ExecutionTerminalInfo), + Overwritten, +} + +#[cfg(test)] +mod tests { + use omicron_test_utils::dev::test_setup_log; + + use super::*; + + use crate::test_utils::{generate_test_events, GenerateTestEventsKind}; + + #[tokio::test] + async fn test_stats() { + let logctx = test_setup_log("test_stats"); + // Generate three sets of events, one for each kind. + let generated_completed = generate_test_events( + &logctx.log, + GenerateTestEventsKind::Completed, + ) + .await; + let generated_failed = + generate_test_events(&logctx.log, GenerateTestEventsKind::Failed) + .await; + let generated_aborted = + generate_test_events(&logctx.log, GenerateTestEventsKind::Aborted) + .await; + + // Set up a `GroupDisplay` with three keys. + let mut group_display = GroupDisplay::new_with_display( + &logctx.log, + vec![ + GroupDisplayKey::Completed, + GroupDisplayKey::Failed, + GroupDisplayKey::Aborted, + GroupDisplayKey::Overwritten, + ], + std::io::stdout(), + ); + + let mut expected_stats = GroupDisplayStats { + total: 4, + not_started: 4, + running: 0, + completed: 0, + failed: 0, + aborted: 0, + overwritten: 0, + }; + assert_eq!(group_display.stats(), &expected_stats); + assert!(!expected_stats.is_terminal()); + assert!(!expected_stats.has_failures()); + + // Pass in an empty EventReport -- ensure that this doesn't move it to + // a Running state. + + group_display + .add_event_report( + &GroupDisplayKey::Completed, + EventReport::default(), + ) + .unwrap(); + assert_eq!(group_display.stats(), &expected_stats); + + // Pass in events one by one -- ensure that we're always in the running + // state until we've completed. + { + expected_stats.not_started -= 1; + expected_stats.running += 1; + + let n = generated_completed.len(); + + let mut buffer = EventBuffer::default(); + let mut last_seen = None; + + for (i, event) in + generated_completed.clone().into_iter().enumerate() + { + buffer.add_event(event); + let report = buffer.generate_report_since(&mut last_seen); + group_display + .add_event_report(&GroupDisplayKey::Completed, report) + .unwrap(); + if i == n - 1 { + // The last event should have moved us to the completed + // state. + expected_stats.running -= 1; + expected_stats.completed += 1; + } else { + // We should still be in the running state. + } + assert_eq!(group_display.stats(), &expected_stats); + assert!(!expected_stats.is_terminal()); + assert!(!expected_stats.has_failures()); + } + } + + // Pass in failed events, this time using buffer.generate_report() + // rather than buffer.generate_report_since(). + { + expected_stats.not_started -= 1; + expected_stats.running += 1; + + let n = generated_failed.len(); + + let mut buffer = EventBuffer::default(); + for (i, event) in generated_failed.clone().into_iter().enumerate() { + buffer.add_event(event); + let report = buffer.generate_report(); + group_display + .add_event_report(&GroupDisplayKey::Failed, report) + .unwrap(); + if i == n - 1 { + // The last event should have moved us to the failed state. + expected_stats.running -= 1; + expected_stats.failed += 1; + assert!(expected_stats.has_failures()); + } else { + // We should still be in the running state. + assert!(!expected_stats.has_failures()); + } + assert_eq!(group_display.stats(), &expected_stats); + } + } + + // Pass in aborted events all at once. + { + expected_stats.not_started -= 1; + expected_stats.running += 1; + + let mut buffer = EventBuffer::default(); + for event in generated_aborted { + buffer.add_event(event); + } + let report = buffer.generate_report(); + group_display + .add_event_report(&GroupDisplayKey::Aborted, report) + .unwrap(); + // The aborted events should have moved us to the aborted state. + expected_stats.running -= 1; + expected_stats.aborted += 1; + assert_eq!(group_display.stats(), &expected_stats); + + // Try passing in one of the events that, if we were running, would + // cause us to move to an overwritten state. Ensure that that does + // not happen (i.e. expected_stats stays the same) + let mut buffer = EventBuffer::default(); + buffer.add_event(generated_failed.first().unwrap().clone()); + let report = buffer.generate_report(); + group_display + .add_event_report(&GroupDisplayKey::Aborted, report) + .unwrap(); + assert_eq!(group_display.stats(), &expected_stats); + } + + // For the overwritten state, pass in half of the completed events, and + // then pass in all of the failed events. + + { + expected_stats.not_started -= 1; + expected_stats.running += 1; + + let mut buffer = EventBuffer::default(); + let n = generated_completed.len() / 2; + for event in generated_completed.into_iter().take(n) { + buffer.add_event(event); + } + let report = buffer.generate_report(); + group_display + .add_event_report(&GroupDisplayKey::Overwritten, report) + .unwrap(); + assert_eq!(group_display.stats(), &expected_stats); + + // Now pass in a single failed event, which has a different + // execution ID. + let mut buffer = EventBuffer::default(); + buffer.add_event(generated_failed.first().unwrap().clone()); + let report = buffer.generate_report(); + group_display + .add_event_report(&GroupDisplayKey::Overwritten, report) + .unwrap(); + // The overwritten event should have moved us to the overwritten + // state. + expected_stats.running -= 1; + expected_stats.overwritten += 1; + } + + assert!(expected_stats.has_failures()); + assert!(expected_stats.is_terminal()); + + logctx.cleanup_successful(); + } + + #[derive(Debug, Eq, PartialEq, Ord, PartialOrd)] + enum GroupDisplayKey { + Completed, + Failed, + Aborted, + Overwritten, + } + + impl fmt::Display for GroupDisplayKey { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Completed => write!(f, "completed"), + Self::Failed => write!(f, "failed"), + Self::Aborted => write!(f, "aborted"), + Self::Overwritten => write!(f, "overwritten"), + } + } + } +} diff --git a/update-engine/src/test_utils.rs b/update-engine/src/test_utils.rs index 0bacfbeb8d..b943d1ddfe 100644 --- a/update-engine/src/test_utils.rs +++ b/update-engine/src/test_utils.rs @@ -4,9 +4,16 @@ // Copyright 2023 Oxide Computer Company +use anyhow::bail; +use futures::StreamExt; use schemars::JsonSchema; +use tokio::sync::{mpsc, oneshot}; +use tokio_stream::wrappers::ReceiverStream; -use crate::{ExecutionId, StepSpec}; +use crate::{ + events::{Event, ProgressUnits, StepProgress}, + EventBuffer, ExecutionId, StepContext, StepSpec, StepSuccess, UpdateEngine, +}; #[derive(JsonSchema)] pub(crate) enum TestSpec {} @@ -27,3 +34,278 @@ pub(crate) static TEST_EXECUTION_UUID: &str = pub fn test_execution_id() -> ExecutionId { ExecutionId(TEST_EXECUTION_UUID.parse().expect("valid UUID")) } + +#[derive(Copy, Clone, Debug)] +pub(crate) enum GenerateTestEventsKind { + Completed, + Failed, + Aborted, +} + +pub(crate) async fn generate_test_events( + log: &slog::Logger, + kind: GenerateTestEventsKind, +) -> Vec> { + // The channel is big enough to contain all possible events. + let (sender, receiver) = mpsc::channel(512); + let engine = UpdateEngine::new(log, sender); + + match kind { + GenerateTestEventsKind::Completed => { + define_test_steps(log, &engine, LastStepOutcome::Completed); + engine.execute().await.expect("execution successful"); + } + GenerateTestEventsKind::Failed => { + define_test_steps(log, &engine, LastStepOutcome::Failed); + engine.execute().await.expect_err("execution failed"); + } + GenerateTestEventsKind::Aborted => { + // In this case, the last step signals that it has been reached via + // sending a message over this channel, and then waits forever. We + // abort execution by calling into the AbortHandle. + let (sender, receiver) = oneshot::channel(); + define_test_steps(log, &engine, LastStepOutcome::Aborted(sender)); + let abort_handle = engine.abort_handle(); + let mut execute_fut = std::pin::pin!(engine.execute()); + let mut receiver = std::pin::pin!(receiver); + let mut receiver_done = false; + loop { + tokio::select! { + res = &mut execute_fut => { + res.expect_err("execution should have been aborted, but completed successfully"); + break; + } + _ = &mut receiver, if !receiver_done => { + receiver_done = true; + abort_handle + .abort("test engine deliberately aborted") + .expect("engine should still be alive"); + } + } + } + } + } + + ReceiverStream::new(receiver).collect().await +} + +#[derive(Debug)] +enum LastStepOutcome { + Completed, + Failed, + Aborted(oneshot::Sender<()>), +} + +#[derive(Debug)] +enum Never {} + +fn define_test_steps( + log: &slog::Logger, + engine: &UpdateEngine, + last_step_outcome: LastStepOutcome, +) { + engine + .new_step("foo".to_owned(), 1, "Step 1", move |_cx| async move { + StepSuccess::new(()).into() + }) + .register(); + + engine + .new_step("bar".to_owned(), 2, "Step 2", move |cx| async move { + for _ in 0..20 { + cx.send_progress(StepProgress::with_current_and_total( + 5, + 20, + ProgressUnits::BYTES, + Default::default(), + )) + .await; + + cx.send_progress(StepProgress::reset( + Default::default(), + "reset step 2", + )) + .await; + + cx.send_progress(StepProgress::retry("retry step 2")).await; + } + StepSuccess::new(()).into() + }) + .register(); + + engine + .new_step( + "nested".to_owned(), + 3, + "Step 3 (this is nested)", + move |parent_cx| async move { + parent_cx + .with_nested_engine(|engine| { + define_nested_engine(&parent_cx, engine); + Ok(()) + }) + .await + .expect_err("this is expected to fail"); + + StepSuccess::new(()).into() + }, + ) + .register(); + + let log = log.clone(); + engine + .new_step( + "remote-nested".to_owned(), + 20, + "Step 4 (remote nested)", + move |cx| async move { + let (sender, mut receiver) = mpsc::channel(16); + let mut engine = UpdateEngine::new(&log, sender); + define_remote_nested_engine(&mut engine, 20); + + let mut buffer = EventBuffer::default(); + + let mut execute_fut = std::pin::pin!(engine.execute()); + let mut execute_done = false; + loop { + tokio::select! { + res = &mut execute_fut, if !execute_done => { + res.expect("remote nested engine completed successfully"); + execute_done = true; + } + Some(event) = receiver.recv() => { + // Generate complete reports to ensure deduping + // happens within StepContexts. + buffer.add_event(event); + cx.send_nested_report(buffer.generate_report()).await?; + } + else => { + break; + } + } + } + + StepSuccess::new(()).into() + }, + ) + .register(); + + // The step index here (100) is large enough to be higher than all nested + // steps. + engine + .new_step("baz".to_owned(), 100, "Step 5", move |_cx| async move { + match last_step_outcome { + LastStepOutcome::Completed => StepSuccess::new(()).into(), + LastStepOutcome::Failed => { + bail!("last step failed") + } + LastStepOutcome::Aborted(sender) => { + sender.send(()).expect("receiver should be alive"); + // The driver of the engine is responsible for aborting it + // at this point. + std::future::pending::().await; + unreachable!("pending future can never resolve"); + } + } + }) + .register(); +} + +fn define_nested_engine<'a>( + parent_cx: &'a StepContext, + engine: &mut UpdateEngine<'a, TestSpec>, +) { + engine + .new_step( + "nested-foo".to_owned(), + 4, + "Nested step 1", + move |cx| async move { + parent_cx + .send_progress(StepProgress::with_current_and_total( + 1, + 3, + "steps", + Default::default(), + )) + .await; + cx.send_progress(StepProgress::progress(Default::default())) + .await; + StepSuccess::new(()).into() + }, + ) + .register(); + + engine + .new_step::<_, _, ()>( + "nested-bar".to_owned(), + 5, + "Nested step 2 (fails)", + move |cx| async move { + // This is used by NestedProgressCheck below. + parent_cx + .send_progress(StepProgress::with_current_and_total( + 2, + 3, + "steps", + Default::default(), + )) + .await; + + cx.send_progress(StepProgress::with_current( + 50, + "units", + Default::default(), + )) + .await; + + parent_cx + .send_progress(StepProgress::with_current_and_total( + 3, + 3, + "steps", + Default::default(), + )) + .await; + + bail!("failing step") + }, + ) + .register(); +} + +fn define_remote_nested_engine( + engine: &mut UpdateEngine<'_, TestSpec>, + start_id: usize, +) { + engine + .new_step( + "nested-foo".to_owned(), + start_id + 1, + "Nested step 1", + move |cx| async move { + cx.send_progress(StepProgress::progress(Default::default())) + .await; + StepSuccess::new(()).into() + }, + ) + .register(); + + engine + .new_step::<_, _, ()>( + "nested-bar".to_owned(), + start_id + 2, + "Nested step 2", + move |cx| async move { + cx.send_progress(StepProgress::with_current( + 20, + "units", + Default::default(), + )) + .await; + + StepSuccess::new(()).into() + }, + ) + .register(); +} diff --git a/wicket-common/src/update_events.rs b/wicket-common/src/update_events.rs index e0f9d4b228..fe92887646 100644 --- a/wicket-common/src/update_events.rs +++ b/wicket-common/src/update_events.rs @@ -10,6 +10,7 @@ use schemars::JsonSchema; use serde::Deserialize; use serde::Serialize; use std::fmt; +use std::sync::Arc; use thiserror::Error; use update_engine::errors::NestedEngineError; use update_engine::StepSpec; @@ -197,12 +198,13 @@ pub enum UpdateTerminalError { #[source] error: gateway_client::Error, }, - #[error("failed to upload trampoline phase 2 to MGS (was a new TUF repo uploaded?)")] - // Currently, the only way this error variant can be produced is if the - // upload task died or was replaced because a new TUF repository was - // uploaded. In the future, we may want to produce errors here if the upload - // to MGS fails too many times, for example. - TrampolinePhase2UploadFailed, + #[error("uploading trampoline phase 2 to MGS failed")] + TrampolinePhase2UploadFailed { + #[source] + error: Arc, + }, + #[error("uploading trampoline phase 2 to MGS cancelled (was a new TUF repo uploaded?)")] + TrampolinePhase2UploadCancelled, #[error("downloading installinator failed")] DownloadingInstallinatorFailed { #[source] diff --git a/wicket/src/cli/rack_setup/config_template.toml b/wicket/src/cli/rack_setup/config_template.toml index 617b61fadc..2886fa01d7 100644 --- a/wicket/src/cli/rack_setup/config_template.toml +++ b/wicket/src/cli/rack_setup/config_template.toml @@ -65,6 +65,9 @@ uplink_port_speed = "" # `none`, `firecode`, or `rs` uplink_port_fec = "" +# `true` or `false` +autoneg = "" + # A list of bgp peers # { addr = "1.7.0.1", asn = 47, port = "qsfp0" } bgp_peers = [] diff --git a/wicket/src/cli/rack_setup/config_toml.rs b/wicket/src/cli/rack_setup/config_toml.rs index 9b1a25a50e..5a8e8a560e 100644 --- a/wicket/src/cli/rack_setup/config_toml.rs +++ b/wicket/src/cli/rack_setup/config_toml.rs @@ -229,6 +229,12 @@ fn populate_network_table( ); _last_key = Some(property); } + uplink.insert( + "autoneg", + Item::Value(Value::Boolean(Formatted::new( + cfg.autoneg, + ))), + ); let mut routes = Array::new(); for r in &cfg.routes { @@ -449,6 +455,7 @@ mod tests { PortFec::None => InternalPortFec::None, PortFec::Rs => InternalPortFec::Rs, }, + autoneg: config.autoneg, switch: match config.switch { SwitchLocation::Switch0 => { InternalSwitchLocation::Switch0 @@ -529,6 +536,7 @@ mod tests { }], uplink_port_speed: PortSpeed::Speed400G, uplink_port_fec: PortFec::Firecode, + autoneg: true, port: "port0".into(), switch: SwitchLocation::Switch0, }], diff --git a/wicket/src/cli/rack_update.rs b/wicket/src/cli/rack_update.rs index fa41fa7b8c..cac0f09ee5 100644 --- a/wicket/src/cli/rack_update.rs +++ b/wicket/src/cli/rack_update.rs @@ -174,6 +174,7 @@ async fn do_attach_to_updates( output: CommandOutput<'_>, ) -> Result<()> { let mut display = GroupDisplay::new_with_display( + &log, update_ids.iter().copied(), output.stderr, ); diff --git a/wicket/src/ui/widgets/popup.rs b/wicket/src/ui/widgets/popup.rs index 19d7aa18b1..fb8c0f1f24 100644 --- a/wicket/src/ui/widgets/popup.rs +++ b/wicket/src/ui/widgets/popup.rs @@ -464,7 +464,7 @@ pub fn draw_buttons( let button_rects = Layout::default() .direction(Direction::Horizontal) .horizontal_margin(2) - .constraints(constraints.as_ref()) + .constraints(constraints) .split(rect); let block = Block::default() diff --git a/wicketd/Cargo.toml b/wicketd/Cargo.toml index db1ac9c04a..97550342d0 100644 --- a/wicketd/Cargo.toml +++ b/wicketd/Cargo.toml @@ -8,6 +8,7 @@ license = "MPL-2.0" anyhow.workspace = true async-trait.workspace = true base64.workspace = true +buf-list.workspace = true bytes.workspace = true camino.workspace = true camino-tempfile.workspace = true @@ -57,6 +58,7 @@ sled-hardware.workspace = true tufaceous-lib.workspace = true update-engine.workspace = true wicket-common.workspace = true +wicketd-client.workspace = true omicron-workspace-hack.workspace = true [[bin]] @@ -82,4 +84,3 @@ tar.workspace = true tokio = { workspace = true, features = ["test-util"] } tufaceous.workspace = true wicket.workspace = true -wicketd-client.workspace = true diff --git a/wicketd/src/artifacts/artifacts_with_plan.rs b/wicketd/src/artifacts/artifacts_with_plan.rs index 331aecfc70..d3319d7f6b 100644 --- a/wicketd/src/artifacts/artifacts_with_plan.rs +++ b/wicketd/src/artifacts/artifacts_with_plan.rs @@ -50,7 +50,7 @@ pub(super) struct ArtifactsWithPlan { } impl ArtifactsWithPlan { - pub(super) fn from_zip( + pub(super) async fn from_zip( zip_data: T, log: &Logger, ) -> Result @@ -68,10 +68,12 @@ impl ArtifactsWithPlan { // anyone can sign the repositories and this code will accept that. let repository = OmicronRepo::load_untrusted_ignore_expiration(log, dir.path()) + .await .map_err(RepositoryError::LoadRepository)?; let artifacts = repository .read_artifacts() + .await .map_err(RepositoryError::ReadArtifactsDocument)?; // Create another temporary directory where we'll "permanently" (as long @@ -132,9 +134,10 @@ impl ArtifactsWithPlan { .map_err(RepositoryError::TargetHashLength)?, ); - let reader = repository + let stream = repository .repo() .read_target(&target_name) + .await .map_err(|error| RepositoryError::LocateTarget { target: artifact.target.clone(), error: Box::new(error), @@ -143,13 +146,15 @@ impl ArtifactsWithPlan { RepositoryError::MissingTarget(artifact.target.clone()) })?; - plan_builder.add_artifact( - artifact.into_id(), - artifact_hash, - io::BufReader::new(reader), - &mut by_id, - &mut by_hash, - )?; + plan_builder + .add_artifact( + artifact.into_id(), + artifact_hash, + stream, + &mut by_id, + &mut by_hash, + ) + .await?; } // Ensure we know how to apply updates from this set of artifacts; we'll @@ -218,8 +223,11 @@ mod tests { /// Test that `ArtifactsWithPlan` can extract the fake repository generated /// by tufaceous. - #[test] - fn test_extract_fake() -> Result<()> { + /// + /// See documentation for extract_nested_artifact_pair in update_plan.rs + /// for why multi_thread is required. + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn test_extract_fake() -> Result<()> { let logctx = test_setup_log("test_extract_fake"); let temp_dir = Utf8TempDir::new()?; let archive_path = temp_dir.path().join("archive.zip"); @@ -233,12 +241,15 @@ mod tests { ]) .context("error parsing args")?; - args.exec(&logctx.log).context("error executing assemble command")?; + args.exec(&logctx.log) + .await + .context("error executing assemble command")?; // Now check that it can be read by the archive extractor. let zip_bytes = std::fs::File::open(&archive_path) .context("error opening archive.zip")?; let plan = ArtifactsWithPlan::from_zip(zip_bytes, &logctx.log) + .await .context("error reading archive.zip")?; // Check that all known artifact kinds are present in the map. let by_id_kinds: BTreeSet<_> = diff --git a/wicketd/src/artifacts/error.rs b/wicketd/src/artifacts/error.rs index ef81ec66f3..ada8fbe011 100644 --- a/wicketd/src/artifacts/error.rs +++ b/wicketd/src/artifacts/error.rs @@ -57,6 +57,13 @@ pub(super) enum RepositoryError { )] MissingTarget(String), + #[error("error reading artifact of kind `{kind}` from repository")] + ReadArtifact { + kind: ArtifactKind, + #[source] + error: Box, + }, + #[error("error copying artifact of kind `{kind}` from repository")] CopyExtractedArtifact { kind: ArtifactKind, @@ -160,6 +167,7 @@ impl RepositoryError { | RepositoryError::LoadRepository(_) | RepositoryError::ReadArtifactsDocument(_) | RepositoryError::TargetHashRead { .. } + | RepositoryError::ReadArtifact { .. } | RepositoryError::CopyExtractedArtifact { .. } => { HttpError::for_bad_request(None, message) } diff --git a/wicketd/src/artifacts/extracted_artifacts.rs b/wicketd/src/artifacts/extracted_artifacts.rs index b796201936..5683cd1c13 100644 --- a/wicketd/src/artifacts/extracted_artifacts.rs +++ b/wicketd/src/artifacts/extracted_artifacts.rs @@ -7,6 +7,8 @@ use anyhow::Context; use camino::Utf8PathBuf; use camino_tempfile::NamedUtf8TempFile; use camino_tempfile::Utf8TempDir; +use futures::Stream; +use futures::StreamExt; use omicron_common::update::ArtifactHash; use omicron_common::update::ArtifactHashId; use omicron_common::update::ArtifactKind; @@ -14,13 +16,11 @@ use sha2::Digest; use sha2::Sha256; use slog::info; use slog::Logger; -use std::fs::File; use std::io; -use std::io::BufWriter; -use std::io::Read; use std::io::Write; use std::sync::Arc; use tokio::io::AsyncRead; +use tokio::io::AsyncWriteExt; use tokio_util::io::ReaderStream; /// Handle to the data of an extracted artifact. @@ -123,17 +123,18 @@ impl ExtractedArtifacts { self.tempdir.path().join(format!("{}", artifact_hash_id.hash)) } - /// Copy from `reader` into our temp directory, returning a handle to the + /// Copy from `stream` into our temp directory, returning a handle to the /// extracted artifact on success. - pub(super) fn store( + pub(super) async fn store( &mut self, artifact_hash_id: ArtifactHashId, - mut reader: impl Read, + stream: impl Stream>, ) -> Result { let output_path = self.path_for_artifact(&artifact_hash_id); - let mut writer = BufWriter::new( - File::create(&output_path) + let mut writer = tokio::io::BufWriter::new( + tokio::fs::File::create(&output_path) + .await .with_context(|| { format!("failed to create temp file {output_path}") }) @@ -143,15 +144,29 @@ impl ExtractedArtifacts { })?, ); - let file_size = io::copy(&mut reader, &mut writer) - .with_context(|| format!("failed writing to {output_path}")) - .map_err(|error| RepositoryError::CopyExtractedArtifact { + let mut stream = std::pin::pin!(stream); + + let mut file_size = 0; + + while let Some(res) = stream.next().await { + let chunk = res.map_err(|error| RepositoryError::ReadArtifact { kind: artifact_hash_id.kind.clone(), - error, - })? as usize; + error: Box::new(error), + })?; + file_size += chunk.len(); + writer + .write_all(&chunk) + .await + .with_context(|| format!("failed writing to {output_path}")) + .map_err(|error| RepositoryError::CopyExtractedArtifact { + kind: artifact_hash_id.kind.clone(), + error, + })?; + } writer .flush() + .await .with_context(|| format!("failed flushing {output_path}")) .map_err(|error| RepositoryError::CopyExtractedArtifact { kind: artifact_hash_id.kind.clone(), diff --git a/wicketd/src/artifacts/store.rs b/wicketd/src/artifacts/store.rs index 29e1ecef0a..2a7b4a646b 100644 --- a/wicketd/src/artifacts/store.rs +++ b/wicketd/src/artifacts/store.rs @@ -42,12 +42,9 @@ impl WicketdArtifactStore { slog::debug!(self.log, "adding repository"); let log = self.log.clone(); - let new_artifacts = tokio::task::spawn_blocking(move || { - ArtifactsWithPlan::from_zip(data, &log) - .map_err(|error| error.to_http_error()) - }) - .await - .unwrap()?; + let new_artifacts = ArtifactsWithPlan::from_zip(data, &log) + .await + .map_err(|error| error.to_http_error())?; self.replace(new_artifacts); Ok(()) diff --git a/wicketd/src/artifacts/update_plan.rs b/wicketd/src/artifacts/update_plan.rs index 5d7bee629a..c6db7c1b65 100644 --- a/wicketd/src/artifacts/update_plan.rs +++ b/wicketd/src/artifacts/update_plan.rs @@ -14,7 +14,10 @@ use super::extracted_artifacts::HashingNamedUtf8TempFile; use super::ArtifactIdData; use super::Board; use super::ExtractedArtifactDataHandle; -use anyhow::anyhow; +use bytes::Bytes; +use futures::Stream; +use futures::StreamExt; +use futures::TryStreamExt; use hubtools::RawHubrisArchive; use omicron_common::api::external::SemverVersion; use omicron_common::api::internal::nexus::KnownArtifactKind; @@ -28,7 +31,6 @@ use std::collections::btree_map; use std::collections::BTreeMap; use std::collections::HashMap; use std::io; -use std::io::Read; use tufaceous_lib::HostPhaseImages; use tufaceous_lib::RotArchives; @@ -143,24 +145,26 @@ impl<'a> UpdatePlanBuilder<'a> { }) } - pub(super) fn add_artifact( + pub(super) async fn add_artifact( &mut self, artifact_id: ArtifactId, artifact_hash: ArtifactHash, - reader: io::BufReader, + stream: impl Stream> + Send, by_id: &mut BTreeMap>, by_hash: &mut HashMap, ) -> Result<(), RepositoryError> { // If we don't know this artifact kind, we'll still serve it up by hash, // but we don't do any further processing on it. let Some(artifact_kind) = artifact_id.kind.to_known() else { - return self.add_unknown_artifact( - artifact_id, - artifact_hash, - reader, - by_id, - by_hash, - ); + return self + .add_unknown_artifact( + artifact_id, + artifact_hash, + stream, + by_id, + by_hash, + ) + .await; }; // If we do know the artifact kind, we may have additional work to do, @@ -170,48 +174,57 @@ impl<'a> UpdatePlanBuilder<'a> { match artifact_kind { KnownArtifactKind::GimletSp | KnownArtifactKind::PscSp - | KnownArtifactKind::SwitchSp => self.add_sp_artifact( - artifact_id, - artifact_kind, - artifact_hash, - reader, - by_id, - by_hash, - ), + | KnownArtifactKind::SwitchSp => { + self.add_sp_artifact( + artifact_id, + artifact_kind, + artifact_hash, + stream, + by_id, + by_hash, + ) + .await + } KnownArtifactKind::GimletRot | KnownArtifactKind::PscRot - | KnownArtifactKind::SwitchRot => self.add_rot_artifact( - artifact_id, - artifact_kind, - reader, - by_id, - by_hash, - ), + | KnownArtifactKind::SwitchRot => { + self.add_rot_artifact( + artifact_id, + artifact_kind, + stream, + by_id, + by_hash, + ) + .await + } KnownArtifactKind::Host => { - self.add_host_artifact(artifact_id, reader, by_id, by_hash) + self.add_host_artifact(artifact_id, stream, by_id, by_hash) } KnownArtifactKind::Trampoline => self.add_trampoline_artifact( artifact_id, - reader, - by_id, - by_hash, - ), - KnownArtifactKind::ControlPlane => self.add_control_plane_artifact( - artifact_id, - artifact_hash, - reader, + stream, by_id, by_hash, ), + KnownArtifactKind::ControlPlane => { + self.add_control_plane_artifact( + artifact_id, + artifact_hash, + stream, + by_id, + by_hash, + ) + .await + } } } - fn add_sp_artifact( + async fn add_sp_artifact( &mut self, artifact_id: ArtifactId, artifact_kind: KnownArtifactKind, artifact_hash: ArtifactHash, - mut reader: io::BufReader, + stream: impl Stream> + Send, by_id: &mut BTreeMap>, by_hash: &mut HashMap, ) -> Result<(), RepositoryError> { @@ -228,15 +241,18 @@ impl<'a> UpdatePlanBuilder<'a> { | KnownArtifactKind::SwitchRot => unreachable!(), }; + let mut stream = std::pin::pin!(stream); + // SP images are small, and hubtools wants a `&[u8]` to parse, so we'll // read the whole thing into memory. let mut data = Vec::new(); - reader.read_to_end(&mut data).map_err(|error| { - RepositoryError::CopyExtractedArtifact { + while let Some(res) = stream.next().await { + let chunk = res.map_err(|error| RepositoryError::ReadArtifact { kind: artifact_kind.into(), - error: anyhow!(error), - } - })?; + error: Box::new(error), + })?; + data.extend_from_slice(&chunk); + } let (artifact_id, board) = read_hubris_board_from_archive(artifact_id, data.clone())?; @@ -255,7 +271,11 @@ impl<'a> UpdatePlanBuilder<'a> { ArtifactHashId { kind: artifact_kind.into(), hash: artifact_hash }; let data = self .extracted_artifacts - .store(artifact_hash_id, io::Cursor::new(&data))?; + .store( + artifact_hash_id, + futures::stream::iter([Ok(Bytes::from(data))]), + ) + .await?; slot.insert(ArtifactIdData { id: artifact_id.clone(), data: data.clone(), @@ -273,11 +293,11 @@ impl<'a> UpdatePlanBuilder<'a> { Ok(()) } - fn add_rot_artifact( + async fn add_rot_artifact( &mut self, artifact_id: ArtifactId, artifact_kind: KnownArtifactKind, - reader: io::BufReader, + stream: impl Stream> + Send, by_id: &mut BTreeMap>, by_hash: &mut HashMap, ) -> Result<(), RepositoryError> { @@ -310,9 +330,12 @@ impl<'a> UpdatePlanBuilder<'a> { }; let (rot_a_data, rot_b_data) = Self::extract_nested_artifact_pair( + stream, &mut self.extracted_artifacts, artifact_kind, - |out_a, out_b| RotArchives::extract_into(reader, out_a, out_b), + |reader, out_a, out_b| { + RotArchives::extract_into(reader, out_a, out_b) + }, )?; // Technically we've done all we _need_ to do with the RoT images. We @@ -358,7 +381,7 @@ impl<'a> UpdatePlanBuilder<'a> { fn add_host_artifact( &mut self, artifact_id: ArtifactId, - reader: io::BufReader, + stream: impl Stream> + Send, by_id: &mut BTreeMap>, by_hash: &mut HashMap, ) -> Result<(), RepositoryError> { @@ -369,9 +392,12 @@ impl<'a> UpdatePlanBuilder<'a> { } let (phase_1_data, phase_2_data) = Self::extract_nested_artifact_pair( + stream, &mut self.extracted_artifacts, KnownArtifactKind::Host, - |out_1, out_2| HostPhaseImages::extract_into(reader, out_1, out_2), + |reader, out_1, out_2| { + HostPhaseImages::extract_into(reader, out_1, out_2) + }, )?; // Similarly to the RoT, we need to create new, non-conflicting artifact @@ -409,7 +435,7 @@ impl<'a> UpdatePlanBuilder<'a> { fn add_trampoline_artifact( &mut self, artifact_id: ArtifactId, - reader: io::BufReader, + stream: impl Stream> + Send, by_id: &mut BTreeMap>, by_hash: &mut HashMap, ) -> Result<(), RepositoryError> { @@ -422,9 +448,12 @@ impl<'a> UpdatePlanBuilder<'a> { } let (phase_1_data, phase_2_data) = Self::extract_nested_artifact_pair( + stream, &mut self.extracted_artifacts, KnownArtifactKind::Trampoline, - |out_1, out_2| HostPhaseImages::extract_into(reader, out_1, out_2), + |reader, out_1, out_2| { + HostPhaseImages::extract_into(reader, out_1, out_2) + }, )?; // Similarly to the RoT, we need to create new, non-conflicting artifact @@ -466,11 +495,11 @@ impl<'a> UpdatePlanBuilder<'a> { Ok(()) } - fn add_control_plane_artifact( + async fn add_control_plane_artifact( &mut self, artifact_id: ArtifactId, artifact_hash: ArtifactHash, - reader: io::BufReader, + stream: impl Stream> + Send, by_id: &mut BTreeMap>, by_hash: &mut HashMap, ) -> Result<(), RepositoryError> { @@ -487,7 +516,8 @@ impl<'a> UpdatePlanBuilder<'a> { hash: artifact_hash, }; - let data = self.extracted_artifacts.store(artifact_hash_id, reader)?; + let data = + self.extracted_artifacts.store(artifact_hash_id, stream).await?; self.control_plane_hash = Some(data.hash()); @@ -503,11 +533,11 @@ impl<'a> UpdatePlanBuilder<'a> { Ok(()) } - fn add_unknown_artifact( + async fn add_unknown_artifact( &mut self, artifact_id: ArtifactId, artifact_hash: ArtifactHash, - reader: io::BufReader, + stream: impl Stream> + Send, by_id: &mut BTreeMap>, by_hash: &mut HashMap, ) -> Result<(), RepositoryError> { @@ -515,7 +545,8 @@ impl<'a> UpdatePlanBuilder<'a> { let artifact_hash_id = ArtifactHashId { kind: artifact_kind.clone(), hash: artifact_hash }; - let data = self.extracted_artifacts.store(artifact_hash_id, reader)?; + let data = + self.extracted_artifacts.store(artifact_hash_id, stream).await?; record_extracted_artifact( artifact_id, @@ -529,11 +560,80 @@ impl<'a> UpdatePlanBuilder<'a> { Ok(()) } - // RoT, host OS, and trampoline OS artifacts all contain a pair of artifacts - // we actually care about (RoT: A/B images; host/trampoline: phase1/phase2 - // images). This method is a helper that converts a single artifact `reader` - // into a pair of extracted artifacts. + /// A helper that converts a single artifact `stream` into a pair of + /// extracted artifacts. + /// + /// RoT, host OS, and trampoline OS artifacts all contain a pair of + /// artifacts we actually care about (RoT: A/B images; host/trampoline: + /// phase1/phase2 images). This method is a helper to extract that. + /// + /// This method uses a `block_in_place` into synchronous code, because the + /// value of changing tufaceous to do async tarball extraction is honestly + /// pretty dubious. + /// + /// The main costs of this are that: + /// 1. This code can only be used with multithreaded Tokio executors. (This + /// is OK for production, but does require that our tests use `flavor = + /// "multi_thread`.) + /// 2. Parallelizing extraction is harder if we ever want to do that in the + /// future. (It can be done using the async-scoped crate, though.) + /// + /// Depending on how things shake out, we may want to revisit this in the + /// future. fn extract_nested_artifact_pair( + stream: impl Stream> + Send, + extracted_artifacts: &mut ExtractedArtifacts, + kind: KnownArtifactKind, + extract: F, + ) -> Result< + (ExtractedArtifactDataHandle, ExtractedArtifactDataHandle), + RepositoryError, + > + where + F: FnOnce( + &mut dyn io::BufRead, + &mut HashingNamedUtf8TempFile, + &mut HashingNamedUtf8TempFile, + ) -> anyhow::Result<()> + + Send, + { + // Since stream isn't guaranteed to be 'static, we have to use + // block_in_place here, not spawn_blocking. This does mean that the + // current task is taken over, and that this function can only be used + // from a multithreaded Tokio runtime. + // + // An alternative would be to use the `async-scoped` crate. However: + // + // - We would only spawn one task there. + // - The only safe use of async-scoped is with the `scope_and_block` + // call, which uses `tokio::task::block_in_place` anyway. + // - async-scoped also requires a multithreaded Tokio runtime. + // + // If we ever want to parallelize extraction across all the different + // artifacts, `async-scoped` would be a good fit. + tokio::task::block_in_place(|| { + let stream = std::pin::pin!(stream); + let reader = + tokio_util::io::StreamReader::new(stream.map_err(|error| { + // StreamReader requires a conversion from tough's errors to + // std::io::Error. + std::io::Error::new(io::ErrorKind::Other, error) + })); + + // RotArchives::extract_into takes a synchronous reader, so we need + // to use this bridge. The bridge can only be used from a blocking + // context. + let mut reader = tokio_util::io::SyncIoBridge::new(reader); + + Self::extract_nested_artifact_pair_impl( + extracted_artifacts, + kind, + |out_a, out_b| extract(&mut reader, out_a, out_b), + ) + }) + } + + fn extract_nested_artifact_pair_impl( extracted_artifacts: &mut ExtractedArtifacts, kind: KnownArtifactKind, extract: F, @@ -838,7 +938,9 @@ mod tests { builder.build_to_vec().unwrap() } - #[tokio::test] + // See documentation for extract_nested_artifact_pair for why multi_thread + // is required. + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_update_plan_from_artifacts() { const VERSION_0: SemverVersion = SemverVersion::new(0, 0, 0); @@ -867,10 +969,11 @@ mod tests { .add_artifact( id, hash, - io::BufReader::new(io::Cursor::new(&data)), + futures::stream::iter([Ok(Bytes::from(data))]), &mut by_id, &mut by_hash, ) + .await .unwrap(); } @@ -889,10 +992,11 @@ mod tests { .add_artifact( id, hash, - io::BufReader::new(io::Cursor::new(&data)), + futures::stream::iter([Ok(Bytes::from(data))]), &mut by_id, &mut by_hash, ) + .await .unwrap(); } @@ -917,10 +1021,11 @@ mod tests { .add_artifact( id, hash, - io::BufReader::new(io::Cursor::new(&data)), + futures::stream::iter([Ok(Bytes::from(data))]), &mut by_id, &mut by_hash, ) + .await .unwrap(); } } @@ -945,10 +1050,11 @@ mod tests { .add_artifact( id, hash, - io::BufReader::new(io::Cursor::new(data)), + futures::stream::iter([Ok(data.clone())]), &mut by_id, &mut by_hash, ) + .await .unwrap(); } @@ -972,10 +1078,11 @@ mod tests { .add_artifact( id, hash, - io::BufReader::new(io::Cursor::new(data)), + futures::stream::iter([Ok(data.clone())]), &mut by_id, &mut by_hash, ) + .await .unwrap(); } diff --git a/wicketd/src/bin/wicketd.rs b/wicketd/src/bin/wicketd.rs index 887ac496e0..24fa802c79 100644 --- a/wicketd/src/bin/wicketd.rs +++ b/wicketd/src/bin/wicketd.rs @@ -5,6 +5,7 @@ //! Executable for wicketd: technician port based management service use anyhow::{anyhow, Context}; +use camino::Utf8PathBuf; use clap::Parser; use omicron_common::{ address::Ipv6Subnet, @@ -24,9 +25,9 @@ enum Args { /// Start a wicketd server Run { #[clap(name = "CONFIG_FILE_PATH", action)] - config_file_path: PathBuf, + config_file_path: Utf8PathBuf, - /// The address for the technician port + /// The address on which the main wicketd dropshot server should listen #[clap(short, long, action)] address: SocketAddrV6, @@ -57,6 +58,19 @@ enum Args { #[clap(long, action, conflicts_with("read_smf_config"))] rack_subnet: Option, }, + + /// Instruct a running wicketd server to refresh its config + /// + /// Mechanically, this hits a specific endpoint served by wicketd's dropshot + /// server + RefreshConfig { + #[clap(name = "CONFIG_FILE_PATH", action)] + config_file_path: Utf8PathBuf, + + /// The address of the server to refresh + #[clap(short, long, action)] + address: SocketAddrV6, + }, } #[tokio::main] @@ -104,9 +118,7 @@ async fn do_run() -> Result<(), CmdError> { }; let config = Config::from_file(&config_file_path) - .with_context(|| { - format!("failed to parse {}", config_file_path.display()) - }) + .with_context(|| format!("failed to parse {config_file_path}")) .map_err(CmdError::Failure)?; let rack_subnet = match rack_subnet { @@ -140,5 +152,24 @@ async fn do_run() -> Result<(), CmdError> { .await .map_err(|err| CmdError::Failure(anyhow!(err))) } + Args::RefreshConfig { config_file_path, address } => { + let config = Config::from_file(&config_file_path) + .with_context(|| format!("failed to parse {config_file_path}")) + .map_err(CmdError::Failure)?; + + let log = config + .log + .to_logger("wicketd") + .context("failed to initialize logger") + .map_err(CmdError::Failure)?; + + // When run via `svcadm refresh ...`, we need to respect the special + // [SMF exit codes](https://illumos.org/man/7/smf_method). Returning + // an error from main exits with code 1 (from libc::EXIT_FAILURE), + // which does not collide with any special SMF codes. + Server::refresh_config(log, address) + .await + .map_err(CmdError::Failure) + } } } diff --git a/wicketd/src/lib.rs b/wicketd/src/lib.rs index ada1902654..32188d77de 100644 --- a/wicketd/src/lib.rs +++ b/wicketd/src/lib.rs @@ -16,11 +16,12 @@ mod preflight_check; mod rss_config; mod update_tracker; -use anyhow::{anyhow, Result}; +use anyhow::{anyhow, Context, Result}; use artifacts::{WicketdArtifactServer, WicketdArtifactStore}; use bootstrap_addrs::BootstrapPeers; pub use config::Config; pub(crate) use context::ServerContext; +use display_error_chain::DisplayErrorChain; use dropshot::{ConfigDropshot, HandlerTaskMode, HttpServer}; pub use installinator_progress::{IprUpdateTracker, RunningUpdateState}; use internal_dns::resolver::Resolver; @@ -34,6 +35,7 @@ use preflight_check::PreflightCheckerHandler; use sled_hardware::Baseboard; use slog::{debug, error, o, Drain}; use std::sync::{Mutex, OnceLock}; +use std::time::Duration; use std::{ net::{SocketAddr, SocketAddrV6}, sync::Arc, @@ -70,7 +72,6 @@ pub struct SmfConfigValues { impl SmfConfigValues { #[cfg(target_os = "illumos")] pub fn read_current() -> Result { - use anyhow::Context; use illumos_utils::scf::ScfHandle; const CONFIG_PG: &str = "config"; @@ -259,11 +260,70 @@ impl Server { res = self.artifact_server => { match res { Ok(()) => Err("artifact server exited unexpectedly".to_owned()), - // The artifact server returns an anyhow::Error, which has a `Debug` impl that - // prints out the chain of errors. + // The artifact server returns an anyhow::Error, which has a + // `Debug` impl that prints out the chain of errors. Err(err) => Err(format!("running artifact server: {err:?}")), } } } } + + /// Instruct a running server at the specified address to reload its config + /// parameters + pub async fn refresh_config( + log: slog::Logger, + address: SocketAddrV6, + ) -> Result<()> { + // It's possible we're being told to refresh a server's config before + // it's ready to receive such a request, so we'll give it a healthy + // amount of time before we give up: we'll set a client timeout and also + // retry a few times. See + // https://github.com/oxidecomputer/omicron/issues/4604. + const CLIENT_TIMEOUT: Duration = Duration::from_secs(5); + const SLEEP_BETWEEN_RETRIES: Duration = Duration::from_secs(10); + const NUM_RETRIES: usize = 3; + + let client = reqwest::Client::builder() + .connect_timeout(CLIENT_TIMEOUT) + .timeout(CLIENT_TIMEOUT) + .build() + .context("failed to construct reqwest Client")?; + + let client = wicketd_client::Client::new_with_client( + &format!("http://{address}"), + client, + log, + ); + let log = client.inner(); + + let mut attempt = 0; + loop { + attempt += 1; + + // If we succeed, we're done. + let Err(err) = client.post_reload_config().await else { + return Ok(()); + }; + + // If we failed, either warn+sleep and try again, or fail. + if attempt < NUM_RETRIES { + slog::warn!( + log, + "failed to refresh wicketd config \ + (attempt {attempt} of {NUM_RETRIES}); \ + will retry after {CLIENT_TIMEOUT:?}"; + "err" => %DisplayErrorChain::new(&err), + ); + tokio::time::sleep(SLEEP_BETWEEN_RETRIES).await; + } else { + slog::error!( + log, + "failed to refresh wicketd config \ + (tried {NUM_RETRIES} times)"; + "err" => %DisplayErrorChain::new(&err), + ); + return Err(err).context("failed to contact wicketd"); + } + } + } } diff --git a/wicketd/src/preflight_check/uplink.rs b/wicketd/src/preflight_check/uplink.rs index d94baf1995..25411f17a5 100644 --- a/wicketd/src/preflight_check/uplink.rs +++ b/wicketd/src/preflight_check/uplink.rs @@ -775,10 +775,8 @@ fn build_port_settings( LinkSettings { addrs, params: LinkCreate { - // TODO we should take these parameters too - // https://github.com/oxidecomputer/omicron/issues/3061 - autoneg: false, - kr: false, + autoneg: uplink.autoneg, + kr: false, //NOTE: kr does not apply to user configurable links fec, speed, lane: Some(LinkId(0)), diff --git a/wicketd/src/rss_config.rs b/wicketd/src/rss_config.rs index 0aaea427f3..f654597d81 100644 --- a/wicketd/src/rss_config.rs +++ b/wicketd/src/rss_config.rs @@ -548,6 +548,7 @@ fn validate_rack_network_config( PortFec::None => BaPortFec::None, PortFec::Rs => BaPortFec::Rs, }, + autoneg: config.autoneg, }) .collect(), bgp: config diff --git a/wicketd/src/update_tracker.rs b/wicketd/src/update_tracker.rs index f4b5db2476..7faaa08a28 100644 --- a/wicketd/src/update_tracker.rs +++ b/wicketd/src/update_tracker.rs @@ -41,7 +41,6 @@ use installinator_common::InstallinatorSpec; use installinator_common::M2Slot; use installinator_common::WriteOutput; use omicron_common::api::external::SemverVersion; -use omicron_common::backoff; use omicron_common::update::ArtifactHash; use slog::error; use slog::info; @@ -103,12 +102,22 @@ struct SpUpdateData { } #[derive(Debug)] -struct UploadTrampolinePhase2ToMgsStatus { - hash: ArtifactHash, - // The upload task retries forever until it succeeds, so we don't need to - // keep a "tried but failed" variant here; we just need to know the ID of - // the uploaded image once it's done. - uploaded_image_id: Option, +enum UploadTrampolinePhase2ToMgsStatus { + Running { hash: ArtifactHash }, + Done { hash: ArtifactHash, uploaded_image_id: HostPhase2RecoveryImageId }, + Failed(Arc), +} + +impl UploadTrampolinePhase2ToMgsStatus { + fn hash(&self) -> Option { + match self { + UploadTrampolinePhase2ToMgsStatus::Running { hash } + | UploadTrampolinePhase2ToMgsStatus::Done { hash, .. } => { + Some(*hash) + } + UploadTrampolinePhase2ToMgsStatus::Failed(_) => None, + } + } } #[derive(Debug)] @@ -172,14 +181,19 @@ impl UpdateTracker { } /// Starts a fake update that doesn't perform any steps, but simply waits - /// for a watch receiver to resolve. + /// for a receiver to resolve. + /// + /// The inner sender will resolve once the update is completed. #[doc(hidden)] pub async fn start_fake_update( &self, sps: BTreeSet, - watch_receiver: watch::Receiver<()>, + fake_step_receiver: oneshot::Receiver>, ) -> Result<(), Vec> { - let imp = FakeUpdateDriver { watch_receiver, log: self.log.clone() }; + let imp = FakeUpdateDriver { + fake_step_receiver: Some(fake_step_receiver), + log: self.log.clone(), + }; self.start_impl(sps, Some(imp)).await } @@ -308,9 +322,8 @@ impl UpdateTracker { ) -> UploadTrampolinePhase2ToMgs { let artifact = plan.trampoline_phase_2.clone(); let (status_tx, status_rx) = - watch::channel(UploadTrampolinePhase2ToMgsStatus { + watch::channel(UploadTrampolinePhase2ToMgsStatus::Running { hash: artifact.data.hash(), - uploaded_image_id: None, }); let task = tokio::spawn(upload_trampoline_phase_2_to_mgs( self.mgs_client.clone(), @@ -426,8 +439,8 @@ impl<'tr> SpawnUpdateDriver for RealSpawnUpdateDriver<'tr> { // this artifact? If not, cancel the old task (which // might still be trying to upload) and start a new one // with our current image. - if prev.status.borrow().hash - != plan.trampoline_phase_2.data.hash() + if prev.status.borrow().hash() + != Some(plan.trampoline_phase_2.data.hash()) { // It does _not_ match - we have a new plan with a // different trampoline image. If the old task is @@ -507,7 +520,7 @@ impl<'tr> SpawnUpdateDriver for RealSpawnUpdateDriver<'tr> { /// waits for a [`watch::Receiver`] to resolve. #[derive(Debug)] struct FakeUpdateDriver { - watch_receiver: watch::Receiver<()>, + fake_step_receiver: Option>>, log: Logger, } @@ -531,22 +544,24 @@ impl SpawnUpdateDriver for FakeUpdateDriver { let engine = UpdateEngine::new(&log, sender); let abort_handle = engine.abort_handle(); - let mut watch_receiver = self.watch_receiver.clone(); + let fake_step_receiver = self + .fake_step_receiver + .take() + .expect("fake step receiver is only taken once"); let task = tokio::spawn(async move { // The step component and ID have been chosen arbitrarily here -- // they aren't important. - engine + let final_sender_handle = engine .new_step( UpdateComponent::Host, UpdateStepId::RunningInstallinator, "Fake step that waits for receiver to resolve", move |_cx| async move { - // This will resolve as soon as the watch sender - // (typically a test) sends a value over the watch - // channel. - _ = watch_receiver.changed().await; - StepSuccess::new(()).into() + // This will resolve as soon as the sender (typically a + // test) sends a value over the channel. + let ret = fake_step_receiver.await; + StepSuccess::new(ret).into() }, ) .register(); @@ -558,16 +573,36 @@ impl SpawnUpdateDriver for FakeUpdateDriver { } }); - match engine.execute().await { - Ok(_cx) => (), - Err(err) => { - error!(log, "update failed"; "err" => %err); - } - } + let engine_res = engine.execute().await; // Wait for all events to be received and written to the event // buffer. event_receiving_task.await.expect("event receiving task panicked"); + + // Finally, notify the receiving end of the inner sender: this + // indicates that the update is done. + match engine_res { + Ok(cx) => { + info!(log, "fake update completed successfully"); + let final_sender = + final_sender_handle.into_value(cx.token()).await; + match final_sender { + Ok(sender) => { + if let Err(_) = sender.send(()) { + warn!(log, "failed to send final value"); + } + } + Err(error) => { + // This occurs if the fake_step_receiver's sender + // side was closed. Nothing to do here but warn. + warn!(log, "failed to get final sender: {}", error); + } + } + } + Err(error) => { + error!(log, "fake update failed: {}", error); + } + } }); SpUpdateData { task, abort_handle, event_buffer } @@ -1147,19 +1182,38 @@ impl UpdateDriver { // We expect this loop to run just once, but iterate just in // case the image ID doesn't get populated the first time. loop { + match &*upload_trampoline_phase_2_to_mgs.borrow_and_update() + { + UploadTrampolinePhase2ToMgsStatus::Running { .. } => { + // fall through to `.changed()` below + }, + UploadTrampolinePhase2ToMgsStatus::Done { + uploaded_image_id, + .. + } => { + return StepSuccess::new( + uploaded_image_id.clone(), + ).into(); + } + UploadTrampolinePhase2ToMgsStatus::Failed(error) => { + let error = Arc::clone(error); + return Err(UpdateTerminalError::TrampolinePhase2UploadFailed { + error, + }); + } + } + + // `upload_trampoline_phase_2_to_mgs` holds onto the sending + // half of this channel until all receivers are gone, so the + // only way we can fail to receive here is if that task + // panicked (which would abort our process) or was cancelled + // (because a new TUF repo has been uploaded), in which case + // we should fail the current update. upload_trampoline_phase_2_to_mgs.changed().await.map_err( |_recv_err| { - UpdateTerminalError::TrampolinePhase2UploadFailed + UpdateTerminalError::TrampolinePhase2UploadCancelled } )?; - - if let Some(image_id) = upload_trampoline_phase_2_to_mgs - .borrow() - .uploaded_image_id - .as_ref() - { - return StepSuccess::new(image_id.clone()).into(); - } } }, ).register(); @@ -2149,59 +2203,68 @@ async fn upload_trampoline_phase_2_to_mgs( status: watch::Sender, log: Logger, ) { - let data = artifact.data; - let hash = data.hash(); - let upload_task = move || { - let mgs_client = mgs_client.clone(); - let data = data.clone(); - - async move { - let image_stream = data.reader_stream().await.map_err(|e| { - // TODO-correctness If we get an I/O error opening the file - // associated with `data`, is it actually a transient error? If - // we change this to `permanent` we'll have to do some different - // error handling below and at our call site to retry. We - // _shouldn't_ get errors from `reader_stream()` in general, so - // it's probably okay either way? - backoff::BackoffError::transient(format!("{e:#}")) - })?; - mgs_client - .recovery_host_phase2_upload(reqwest::Body::wrap_stream( - image_stream, - )) - .await - .map_err(|e| backoff::BackoffError::transient(e.to_string())) - } - }; + // We make at most 3 attempts to upload the trampoline to our local MGS, + // sleeping briefly between attempts if we fail. + const MAX_ATTEMPTS: usize = 3; + const SLEEP_BETWEEN_ATTEMPTS: Duration = Duration::from_secs(1); + + let mut attempt = 1; + let final_status = loop { + let image_stream = match artifact.data.reader_stream().await { + Ok(stream) => stream, + Err(err) => { + error!( + log, "failed to read trampoline phase 2"; + "err" => #%err, + ); + break UploadTrampolinePhase2ToMgsStatus::Failed(Arc::new( + err.context("failed to read trampoline phase 2"), + )); + } + }; - let log_failure = move |err, delay| { - warn!( - log, - "failed to upload trampoline phase 2 to MGS, will retry in {:?}", - delay; - "err" => %err, - ); + match mgs_client + .recovery_host_phase2_upload(reqwest::Body::wrap_stream( + image_stream, + )) + .await + { + Ok(response) => { + break UploadTrampolinePhase2ToMgsStatus::Done { + hash: artifact.data.hash(), + uploaded_image_id: response.into_inner(), + }; + } + Err(err) => { + if attempt < MAX_ATTEMPTS { + error!( + log, "failed to upload trampoline phase 2 to MGS; \ + will retry after {SLEEP_BETWEEN_ATTEMPTS:?}"; + "attempt" => attempt, + "err" => %DisplayErrorChain::new(&err), + ); + tokio::time::sleep(SLEEP_BETWEEN_ATTEMPTS).await; + attempt += 1; + continue; + } else { + error!( + log, "failed to upload trampoline phase 2 to MGS; \ + giving up"; + "attempt" => attempt, + "err" => %DisplayErrorChain::new(&err), + ); + break UploadTrampolinePhase2ToMgsStatus::Failed(Arc::new( + anyhow::Error::new(err) + .context("failed to upload trampoline phase 2"), + )); + } + } + } }; - // retry_policy_internal_service_aggressive() retries forever, so we can - // unwrap this call to retry_notify - let uploaded_image_id = backoff::retry_notify( - backoff::retry_policy_internal_service_aggressive(), - upload_task, - log_failure, - ) - .await - .unwrap() - .into_inner(); - - // Notify all receivers that we've uploaded the image. - _ = status.send(UploadTrampolinePhase2ToMgsStatus { - hash, - uploaded_image_id: Some(uploaded_image_id), - }); - - // Wait for all receivers to be gone before we exit, so they don't get recv - // errors unless we're cancelled. + // Send our final status, then wait for all receivers to be gone before we + // exit, so they don't get recv errors unless we're cancelled. + status.send_replace(final_status); status.closed().await; } diff --git a/wicketd/tests/integration_tests/updates.rs b/wicketd/tests/integration_tests/updates.rs index fb1637f44e..611d81c7f5 100644 --- a/wicketd/tests/integration_tests/updates.rs +++ b/wicketd/tests/integration_tests/updates.rs @@ -17,7 +17,7 @@ use omicron_common::{ api::internal::nexus::KnownArtifactKind, update::{ArtifactHashId, ArtifactKind}, }; -use tokio::sync::watch; +use tokio::sync::oneshot; use update_engine::NestedError; use uuid::Uuid; use wicket::OutputKind; @@ -31,7 +31,9 @@ use wicketd_client::types::{ StartUpdateParams, }; -#[tokio::test] +// See documentation for extract_nested_artifact_pair in update_plan.rs for why +// multi_thread is required. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_updates() { let gateway = gateway_setup::test_setup("test_updates", SpPort::One).await; let wicketd_testctx = WicketdTestContext::setup(gateway).await; @@ -48,7 +50,7 @@ async fn test_updates() { ]) .expect("args parsed correctly"); - args.exec(log).expect("assemble command completed successfully"); + args.exec(log).await.expect("assemble command completed successfully"); // Read the archive and upload it to the server. let zip_bytes = @@ -175,15 +177,15 @@ async fn test_updates() { StepEventKind::ExecutionFailed { failed_step, .. } => { // TODO: obviously we shouldn't stop here, get past more of the // update process in this test. - assert_eq!(failed_step.info.component, UpdateComponent::Rot); + assert_eq!(failed_step.info.component, UpdateComponent::Host); } other => { panic!("unexpected terminal event kind: {other:?}"); } } - // Try starting the update again -- this should fail because we require that update state is - // cleared before starting a new one. + // Try starting the update again -- this should fail because we require that + // update state is cleared before starting a new one. { let error = wicketd_testctx .wicketd_client @@ -195,8 +197,8 @@ async fn test_updates() { ); let error_str = error.to_string(); assert!( - // Errors lose type information across the OpenAPI boundary, so sadly we have to match on - // the error string. + // Errors lose type information across the OpenAPI boundary, so + // sadly we have to match on the error string. error_str.contains("existing update data found"), "unexpected error: {error_str}" ); @@ -258,7 +260,9 @@ async fn test_updates() { wicketd_testctx.teardown().await; } -#[tokio::test] +// See documentation for extract_nested_artifact_pair in update_plan.rs for why +// multi_thread is required. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_installinator_fetch() { let gateway = gateway_setup::test_setup("test_updates", SpPort::One).await; let wicketd_testctx = WicketdTestContext::setup(gateway).await; @@ -275,7 +279,7 @@ async fn test_installinator_fetch() { ]) .expect("args parsed correctly"); - args.exec(log).expect("assemble command completed successfully"); + args.exec(log).await.expect("assemble command completed successfully"); // Read the archive and upload it to the server. let zip_bytes = @@ -391,7 +395,9 @@ async fn test_installinator_fetch() { wicketd_testctx.teardown().await; } -#[tokio::test] +// See documentation for extract_nested_artifact_pair in update_plan.rs for why +// multi_thread is required. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_update_races() { let gateway = gateway_setup::test_setup( "test_artifact_upload_while_updating", @@ -412,7 +418,7 @@ async fn test_update_races() { ]) .expect("args parsed correctly"); - args.exec(log).expect("assemble command completed successfully"); + args.exec(log).await.expect("assemble command completed successfully"); // Read the archive and upload it to the server. let zip_bytes = @@ -430,7 +436,7 @@ async fn test_update_races() { }; let sps: BTreeSet<_> = vec![sp].into_iter().collect(); - let (sender, receiver) = watch::channel(()); + let (sender, receiver) = oneshot::channel(); wicketd_testctx .server .update_tracker @@ -449,7 +455,7 @@ async fn test_update_races() { // Also try starting another fake update, which should fail -- we don't let updates be started // if there's current update state. { - let (_, receiver) = watch::channel(()); + let (_, receiver) = oneshot::channel(); let err = wicketd_testctx .server .update_tracker @@ -464,9 +470,10 @@ async fn test_update_races() { } // Unblock the update, letting it run to completion. - sender.send(()).expect("receiver kept open by update engine"); + let (final_sender, final_receiver) = oneshot::channel(); + sender.send(final_sender).expect("receiver kept open by update engine"); + final_receiver.await.expect("update engine completed successfully"); - // Ensure that the event buffer indicates completion. let event_buffer = wicketd_testctx .wicketd_client .get_update_sp(&SpType::Sled, 0) diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 4d416eca02..f462fd5b6d 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -14,7 +14,9 @@ publish = false ### BEGIN HAKARI SECTION [dependencies] +ahash = { version = "0.8.6" } anyhow = { version = "1.0.75", features = ["backtrace"] } +base16ct = { version = "0.2.0", default-features = false, features = ["alloc"] } bit-set = { version = "0.5.3" } bit-vec = { version = "0.6.3" } bitflags-dff4ba8e3ae991db = { package = "bitflags", version = "1.3.2" } @@ -33,9 +35,12 @@ crossbeam-epoch = { version = "0.9.15" } crossbeam-utils = { version = "0.8.16" } crossterm = { version = "0.27.0", features = ["event-stream", "serde"] } crypto-common = { version = "0.1.6", default-features = false, features = ["getrandom", "std"] } +der = { version = "0.7.8", default-features = false, features = ["derive", "flagset", "oid", "pem", "std"] } diesel = { version = "2.1.4", features = ["chrono", "i-implement-a-third-party-backend-and-opt-into-breaking-changes", "network-address", "postgres", "r2d2", "serde_json", "uuid"] } digest = { version = "0.10.7", features = ["mac", "oid", "std"] } either = { version = "1.9.0" } +elliptic-curve = { version = "0.13.8", features = ["ecdh", "hazmat", "pem", "std"] } +ff = { version = "0.13.0", default-features = false, features = ["alloc"] } flate2 = { version = "1.0.28" } futures = { version = "0.3.29" } futures-channel = { version = "0.3.29", features = ["sink"] } @@ -47,8 +52,10 @@ futures-util = { version = "0.3.29", features = ["channel", "io", "sink"] } gateway-messages = { git = "https://github.com/oxidecomputer/management-gateway-service", rev = "2739c18e80697aa6bc235c935176d14b4d757ee9", features = ["std"] } generic-array = { version = "0.14.7", default-features = false, features = ["more_lengths", "zeroize"] } getrandom = { version = "0.2.10", default-features = false, features = ["js", "rdrand", "std"] } +group = { version = "0.13.0", default-features = false, features = ["alloc"] } hashbrown = { version = "0.13.2" } hex = { version = "0.4.3", features = ["serde"] } +hmac = { version = "0.12.1", default-features = false, features = ["reset"] } hyper = { version = "0.14.27", features = ["full"] } indexmap = { version = "2.1.0", features = ["serde"] } inout = { version = "0.1.3", default-features = false, features = ["std"] } @@ -64,7 +71,8 @@ num-bigint = { version = "0.4.4", features = ["rand"] } num-integer = { version = "0.1.45", features = ["i128"] } num-iter = { version = "0.1.43", default-features = false, features = ["i128"] } num-traits = { version = "0.2.16", features = ["i128", "libm"] } -openapiv3 = { version = "2.0.0-rc.1", default-features = false, features = ["skip_serializing_defaults"] } +openapiv3 = { version = "2.0.0", default-features = false, features = ["skip_serializing_defaults"] } +pem-rfc7468 = { version = "0.7.0", default-features = false, features = ["std"] } petgraph = { version = "0.6.4", features = ["serde-1"] } postgres-types = { version = "0.2.6", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } ppv-lite86 = { version = "0.2.17", default-features = false, features = ["simd", "std"] } @@ -76,24 +84,25 @@ regex = { version = "1.10.2" } regex-automata = { version = "0.4.3", default-features = false, features = ["dfa-onepass", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } regex-syntax = { version = "0.8.2" } reqwest = { version = "0.11.22", features = ["blocking", "json", "rustls-tls", "stream"] } -ring = { version = "0.16.20", features = ["std"] } +ring = { version = "0.17.7", features = ["std"] } schemars = { version = "0.8.13", features = ["bytes", "chrono", "uuid1"] } semver = { version = "1.0.20", features = ["serde"] } -serde = { version = "1.0.192", features = ["alloc", "derive", "rc"] } -serde_json = { version = "1.0.108", features = ["raw_value"] } +serde = { version = "1.0.193", features = ["alloc", "derive", "rc"] } +serde_json = { version = "1.0.108", features = ["raw_value", "unbounded_depth"] } sha2 = { version = "0.10.8", features = ["oid"] } -signature = { version = "2.1.0", default-features = false, features = ["digest", "rand_core", "std"] } -similar = { version = "2.2.1", features = ["inline", "unicode"] } +similar = { version = "2.3.0", features = ["inline", "unicode"] } slog = { version = "2.7.0", features = ["dynamic-keys", "max_level_trace", "release_max_level_debug", "release_max_level_trace"] } +snafu = { version = "0.7.5", features = ["futures"] } spin = { version = "0.9.8" } string_cache = { version = "0.8.7" } subtle = { version = "2.5.0" } syn-dff4ba8e3ae991db = { package = "syn", version = "1.0.109", features = ["extra-traits", "fold", "full", "visit"] } syn-f595c2ba2a3f28df = { package = "syn", version = "2.0.32", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } time = { version = "0.3.27", features = ["formatting", "local-offset", "macros", "parsing"] } -tokio = { version = "1.33.0", features = ["full", "test-util"] } +tokio = { version = "1.35.0", features = ["full", "test-util"] } tokio-postgres = { version = "0.7.10", features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } tokio-stream = { version = "0.1.14", features = ["net"] } +tokio-util = { version = "0.7.10", features = ["codec", "io-util"] } toml = { version = "0.7.8" } toml_edit-647d43efb71741da = { package = "toml_edit", version = "0.21.0", features = ["serde"] } tracing = { version = "0.1.37", features = ["log"] } @@ -101,13 +110,16 @@ trust-dns-proto = { version = "0.22.0" } unicode-bidi = { version = "0.3.13" } unicode-normalization = { version = "0.1.22" } usdt = { version = "0.3.5" } -uuid = { version = "1.5.0", features = ["serde", "v4"] } +uuid = { version = "1.6.1", features = ["serde", "v4"] } yasna = { version = "0.5.2", features = ["bit-vec", "num-bigint", "std", "time"] } -zeroize = { version = "1.6.0", features = ["std", "zeroize_derive"] } +zerocopy = { version = "0.7.26", features = ["derive", "simd"] } +zeroize = { version = "1.7.0", features = ["std", "zeroize_derive"] } zip = { version = "0.6.6", default-features = false, features = ["bzip2", "deflate"] } [build-dependencies] +ahash = { version = "0.8.6" } anyhow = { version = "1.0.75", features = ["backtrace"] } +base16ct = { version = "0.2.0", default-features = false, features = ["alloc"] } bit-set = { version = "0.5.3" } bit-vec = { version = "0.6.3" } bitflags-dff4ba8e3ae991db = { package = "bitflags", version = "1.3.2" } @@ -126,9 +138,12 @@ crossbeam-epoch = { version = "0.9.15" } crossbeam-utils = { version = "0.8.16" } crossterm = { version = "0.27.0", features = ["event-stream", "serde"] } crypto-common = { version = "0.1.6", default-features = false, features = ["getrandom", "std"] } +der = { version = "0.7.8", default-features = false, features = ["derive", "flagset", "oid", "pem", "std"] } diesel = { version = "2.1.4", features = ["chrono", "i-implement-a-third-party-backend-and-opt-into-breaking-changes", "network-address", "postgres", "r2d2", "serde_json", "uuid"] } digest = { version = "0.10.7", features = ["mac", "oid", "std"] } either = { version = "1.9.0" } +elliptic-curve = { version = "0.13.8", features = ["ecdh", "hazmat", "pem", "std"] } +ff = { version = "0.13.0", default-features = false, features = ["alloc"] } flate2 = { version = "1.0.28" } futures = { version = "0.3.29" } futures-channel = { version = "0.3.29", features = ["sink"] } @@ -140,8 +155,10 @@ futures-util = { version = "0.3.29", features = ["channel", "io", "sink"] } gateway-messages = { git = "https://github.com/oxidecomputer/management-gateway-service", rev = "2739c18e80697aa6bc235c935176d14b4d757ee9", features = ["std"] } generic-array = { version = "0.14.7", default-features = false, features = ["more_lengths", "zeroize"] } getrandom = { version = "0.2.10", default-features = false, features = ["js", "rdrand", "std"] } +group = { version = "0.13.0", default-features = false, features = ["alloc"] } hashbrown = { version = "0.13.2" } hex = { version = "0.4.3", features = ["serde"] } +hmac = { version = "0.12.1", default-features = false, features = ["reset"] } hyper = { version = "0.14.27", features = ["full"] } indexmap = { version = "2.1.0", features = ["serde"] } inout = { version = "0.1.3", default-features = false, features = ["std"] } @@ -157,7 +174,8 @@ num-bigint = { version = "0.4.4", features = ["rand"] } num-integer = { version = "0.1.45", features = ["i128"] } num-iter = { version = "0.1.43", default-features = false, features = ["i128"] } num-traits = { version = "0.2.16", features = ["i128", "libm"] } -openapiv3 = { version = "2.0.0-rc.1", default-features = false, features = ["skip_serializing_defaults"] } +openapiv3 = { version = "2.0.0", default-features = false, features = ["skip_serializing_defaults"] } +pem-rfc7468 = { version = "0.7.0", default-features = false, features = ["std"] } petgraph = { version = "0.6.4", features = ["serde-1"] } postgres-types = { version = "0.2.6", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } ppv-lite86 = { version = "0.2.17", default-features = false, features = ["simd", "std"] } @@ -169,15 +187,15 @@ regex = { version = "1.10.2" } regex-automata = { version = "0.4.3", default-features = false, features = ["dfa-onepass", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } regex-syntax = { version = "0.8.2" } reqwest = { version = "0.11.22", features = ["blocking", "json", "rustls-tls", "stream"] } -ring = { version = "0.16.20", features = ["std"] } +ring = { version = "0.17.7", features = ["std"] } schemars = { version = "0.8.13", features = ["bytes", "chrono", "uuid1"] } semver = { version = "1.0.20", features = ["serde"] } -serde = { version = "1.0.192", features = ["alloc", "derive", "rc"] } -serde_json = { version = "1.0.108", features = ["raw_value"] } +serde = { version = "1.0.193", features = ["alloc", "derive", "rc"] } +serde_json = { version = "1.0.108", features = ["raw_value", "unbounded_depth"] } sha2 = { version = "0.10.8", features = ["oid"] } -signature = { version = "2.1.0", default-features = false, features = ["digest", "rand_core", "std"] } -similar = { version = "2.2.1", features = ["inline", "unicode"] } +similar = { version = "2.3.0", features = ["inline", "unicode"] } slog = { version = "2.7.0", features = ["dynamic-keys", "max_level_trace", "release_max_level_debug", "release_max_level_trace"] } +snafu = { version = "0.7.5", features = ["futures"] } spin = { version = "0.9.8" } string_cache = { version = "0.8.7" } subtle = { version = "2.5.0" } @@ -185,9 +203,10 @@ syn-dff4ba8e3ae991db = { package = "syn", version = "1.0.109", features = ["extr syn-f595c2ba2a3f28df = { package = "syn", version = "2.0.32", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } time = { version = "0.3.27", features = ["formatting", "local-offset", "macros", "parsing"] } time-macros = { version = "0.2.13", default-features = false, features = ["formatting", "parsing"] } -tokio = { version = "1.33.0", features = ["full", "test-util"] } +tokio = { version = "1.35.0", features = ["full", "test-util"] } tokio-postgres = { version = "0.7.10", features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } tokio-stream = { version = "0.1.14", features = ["net"] } +tokio-util = { version = "0.7.10", features = ["codec", "io-util"] } toml = { version = "0.7.8" } toml_edit-647d43efb71741da = { package = "toml_edit", version = "0.21.0", features = ["serde"] } tracing = { version = "0.1.37", features = ["log"] } @@ -195,68 +214,75 @@ trust-dns-proto = { version = "0.22.0" } unicode-bidi = { version = "0.3.13" } unicode-normalization = { version = "0.1.22" } usdt = { version = "0.3.5" } -uuid = { version = "1.5.0", features = ["serde", "v4"] } +uuid = { version = "1.6.1", features = ["serde", "v4"] } yasna = { version = "0.5.2", features = ["bit-vec", "num-bigint", "std", "time"] } -zeroize = { version = "1.6.0", features = ["std", "zeroize_derive"] } +zerocopy = { version = "0.7.26", features = ["derive", "simd"] } +zeroize = { version = "1.7.0", features = ["std", "zeroize_derive"] } zip = { version = "0.6.6", default-features = false, features = ["bzip2", "deflate"] } [target.x86_64-unknown-linux-gnu.dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } hyper-rustls = { version = "0.24.2" } -mio = { version = "0.8.8", features = ["net", "os-ext"] } -once_cell = { version = "1.18.0", features = ["unstable"] } -rustix = { version = "0.38.9", features = ["fs", "termios"] } +mio = { version = "0.8.9", features = ["net", "os-ext"] } +once_cell = { version = "1.19.0", features = ["unstable"] } +rustix = { version = "0.38.25", features = ["fs", "termios"] } [target.x86_64-unknown-linux-gnu.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } hyper-rustls = { version = "0.24.2" } -mio = { version = "0.8.8", features = ["net", "os-ext"] } -once_cell = { version = "1.18.0", features = ["unstable"] } -rustix = { version = "0.38.9", features = ["fs", "termios"] } +mio = { version = "0.8.9", features = ["net", "os-ext"] } +once_cell = { version = "1.19.0", features = ["unstable"] } +rustix = { version = "0.38.25", features = ["fs", "termios"] } [target.x86_64-apple-darwin.dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +errno = { version = "0.3.2", default-features = false, features = ["std"] } hyper-rustls = { version = "0.24.2" } -mio = { version = "0.8.8", features = ["net", "os-ext"] } -once_cell = { version = "1.18.0", features = ["unstable"] } -rustix = { version = "0.38.9", features = ["fs", "termios"] } +mio = { version = "0.8.9", features = ["net", "os-ext"] } +once_cell = { version = "1.19.0", features = ["unstable"] } +rustix = { version = "0.38.25", features = ["fs", "termios"] } [target.x86_64-apple-darwin.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +errno = { version = "0.3.2", default-features = false, features = ["std"] } hyper-rustls = { version = "0.24.2" } -mio = { version = "0.8.8", features = ["net", "os-ext"] } -once_cell = { version = "1.18.0", features = ["unstable"] } -rustix = { version = "0.38.9", features = ["fs", "termios"] } +mio = { version = "0.8.9", features = ["net", "os-ext"] } +once_cell = { version = "1.19.0", features = ["unstable"] } +rustix = { version = "0.38.25", features = ["fs", "termios"] } [target.aarch64-apple-darwin.dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +errno = { version = "0.3.2", default-features = false, features = ["std"] } hyper-rustls = { version = "0.24.2" } -mio = { version = "0.8.8", features = ["net", "os-ext"] } -once_cell = { version = "1.18.0", features = ["unstable"] } -rustix = { version = "0.38.9", features = ["fs", "termios"] } +mio = { version = "0.8.9", features = ["net", "os-ext"] } +once_cell = { version = "1.19.0", features = ["unstable"] } +rustix = { version = "0.38.25", features = ["fs", "termios"] } [target.aarch64-apple-darwin.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +errno = { version = "0.3.2", default-features = false, features = ["std"] } hyper-rustls = { version = "0.24.2" } -mio = { version = "0.8.8", features = ["net", "os-ext"] } -once_cell = { version = "1.18.0", features = ["unstable"] } -rustix = { version = "0.38.9", features = ["fs", "termios"] } +mio = { version = "0.8.9", features = ["net", "os-ext"] } +once_cell = { version = "1.19.0", features = ["unstable"] } +rustix = { version = "0.38.25", features = ["fs", "termios"] } [target.x86_64-unknown-illumos.dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +errno = { version = "0.3.2", default-features = false, features = ["std"] } hyper-rustls = { version = "0.24.2" } -mio = { version = "0.8.8", features = ["net", "os-ext"] } -once_cell = { version = "1.18.0", features = ["unstable"] } -rustix = { version = "0.38.9", features = ["fs", "termios"] } +mio = { version = "0.8.9", features = ["net", "os-ext"] } +once_cell = { version = "1.19.0", features = ["unstable"] } +rustix = { version = "0.38.25", features = ["fs", "termios"] } toml_datetime = { version = "0.6.5", default-features = false, features = ["serde"] } toml_edit-cdcf2f9584511fe6 = { package = "toml_edit", version = "0.19.15", features = ["serde"] } [target.x86_64-unknown-illumos.build-dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.0", default-features = false, features = ["std"] } +errno = { version = "0.3.2", default-features = false, features = ["std"] } hyper-rustls = { version = "0.24.2" } -mio = { version = "0.8.8", features = ["net", "os-ext"] } -once_cell = { version = "1.18.0", features = ["unstable"] } -rustix = { version = "0.38.9", features = ["fs", "termios"] } +mio = { version = "0.8.9", features = ["net", "os-ext"] } +once_cell = { version = "1.19.0", features = ["unstable"] } +rustix = { version = "0.38.25", features = ["fs", "termios"] } toml_datetime = { version = "0.6.5", default-features = false, features = ["serde"] } toml_edit-cdcf2f9584511fe6 = { package = "toml_edit", version = "0.19.15", features = ["serde"] }