diff --git a/.github/buildomat/jobs/ci-tools.sh b/.github/buildomat/jobs/ci-tools.sh deleted file mode 100755 index 4c58731e249..00000000000 --- a/.github/buildomat/jobs/ci-tools.sh +++ /dev/null @@ -1,77 +0,0 @@ -#!/bin/bash -#: -#: name = "helios / CI tools" -#: variety = "basic" -#: target = "helios-2.0" -#: rust_toolchain = "1.72.1" -#: output_rules = [ -#: "=/work/end-to-end-tests/*.gz", -#: "=/work/caboose-util.gz", -#: "=/work/tufaceous.gz", -#: "=/work/commtest", -#: "=/work/permslip.gz", -#: ] -#: access_repos = [ -#: "oxidecomputer/permission-slip", -#: "oxidecomputer/sshauth" -#: ] - -set -o errexit -set -o pipefail -set -o xtrace - -cargo --version -rustc --version - -ptime -m ./tools/install_builder_prerequisites.sh -yp - -########## end-to-end-tests ########## - -banner end-to-end-tests - -# -# Reduce debuginfo just to line tables. -# -export CARGO_PROFILE_DEV_DEBUG=1 -export CARGO_PROFILE_TEST_DEBUG=1 -export CARGO_INCREMENTAL=0 - -ptime -m cargo build --locked -p end-to-end-tests --tests --bin bootstrap \ - --message-format json-render-diagnostics >/tmp/output.end-to-end.json - -mkdir -p /work -ptime -m cargo build --locked -p end-to-end-tests --tests --bin commtest -cp target/debug/commtest /work/commtest - -mkdir -p /work/end-to-end-tests -for p in target/debug/bootstrap $(/opt/ooce/bin/jq -r 'select(.profile.test) | .executable' /tmp/output.end-to-end.json); do - # shellcheck disable=SC2094 - ptime -m gzip < "$p" > /work/end-to-end-tests/"$(basename "$p").gz" -done - -########## caboose-util ########## - -banner caboose-util - -ptime -m cargo build --locked -p caboose-util --release -ptime -m gzip < target/release/caboose-util > /work/caboose-util.gz - -########## tufaceous ########## - -banner tufaceous - -ptime -m cargo build --locked -p tufaceous --release -ptime -m gzip < target/release/tufaceous > /work/tufaceous.gz - -########## permission-slip ########## - -banner permission-slip - -source "./tools/permslip_commit" -git init /work/permission-slip-build -pushd /work/permission-slip-build -git remote add origin https://github.com/oxidecomputer/permission-slip.git -ptime -m git fetch --depth 1 origin "$COMMIT" -git checkout FETCH_HEAD -ptime -m cargo build --locked -p permission-slip-client --release -ptime -m gzip < target/release/permslip > /work/permslip.gz diff --git a/.github/buildomat/jobs/deploy.sh b/.github/buildomat/jobs/deploy.sh index 8d3e94cd5ec..c947a05e10a 100755 --- a/.github/buildomat/jobs/deploy.sh +++ b/.github/buildomat/jobs/deploy.sh @@ -20,8 +20,6 @@ #: [dependencies.package] #: job = "helios / package" #: -#: [dependencies.ci-tools] -#: job = "helios / CI tools" set -o errexit set -o pipefail @@ -144,13 +142,6 @@ pfexec chown build:build /opt/oxide/work cd /opt/oxide/work ptime -m tar xvzf /input/package/work/package.tar.gz -cp /input/package/work/zones/* out/ -mv out/nexus-single-sled.tar.gz out/nexus.tar.gz -mkdir tests -for p in /input/ci-tools/work/end-to-end-tests/*.gz; do - ptime -m gunzip < "$p" > "tests/$(basename "${p%.gz}")" - chmod a+x "tests/$(basename "${p%.gz}")" -done # Ask buildomat for the range of extra addresses that we're allowed to use, and # break them up into the ranges we need. @@ -354,7 +345,7 @@ echo "Waited for nexus: ${retry}s" export RUST_BACKTRACE=1 export E2E_TLS_CERT IPPOOL_START IPPOOL_END -eval "$(./tests/bootstrap)" +eval "$(./target/debug/bootstrap)" export OXIDE_HOST OXIDE_TOKEN # @@ -387,7 +378,6 @@ done /usr/oxide/oxide --resolve "$OXIDE_RESOLVE" --cacert "$E2E_TLS_CERT" \ image promote --project images --image debian11 -rm ./tests/bootstrap for test_bin in tests/*; do ./"$test_bin" done diff --git a/.github/buildomat/jobs/host-image.sh b/.github/buildomat/jobs/host-image.sh deleted file mode 100755 index 2f4d146a488..00000000000 --- a/.github/buildomat/jobs/host-image.sh +++ /dev/null @@ -1,93 +0,0 @@ -#!/bin/bash -#: -#: name = "helios / build OS images" -#: variety = "basic" -#: target = "helios-2.0" -#: rust_toolchain = "1.72.1" -#: output_rules = [ -#: "=/work/helios/upload/os-host.tar.gz", -#: "=/work/helios/upload/os-trampoline.tar.gz", -#: ] -#: access_repos = [ -#: "oxidecomputer/amd-apcb", -#: "oxidecomputer/amd-efs", -#: "oxidecomputer/amd-firmware", -#: "oxidecomputer/amd-flash", -#: "oxidecomputer/amd-host-image-builder", -#: "oxidecomputer/boot-image-tools", -#: "oxidecomputer/chelsio-t6-roms", -#: "oxidecomputer/compliance-pilot", -#: "oxidecomputer/facade", -#: "oxidecomputer/helios", -#: "oxidecomputer/helios-omicron-brand", -#: "oxidecomputer/helios-omnios-build", -#: "oxidecomputer/helios-omnios-extra", -#: "oxidecomputer/nanobl-rs", -#: ] -#: -#: [dependencies.package] -#: job = "helios / package" -#: -#: [[publish]] -#: series = "image" -#: name = "os.tar.gz" -#: from_output = "/work/helios/image/output/os.tar.gz" -#: - -set -o errexit -set -o pipefail -set -o xtrace - -cargo --version -rustc --version - -TOP=$PWD - -source "$TOP/tools/include/force-git-over-https.sh" - -# Check out helios into /work/helios -HELIOSDIR=/work/helios -git clone https://github.com/oxidecomputer/helios.git "$HELIOSDIR" -cd "$HELIOSDIR" -# Record the branch and commit in the output -git status --branch --porcelain=2 -# Setting BUILD_OS to no makes setup skip repositories we don't need for -# building the OS itself (we are just building an image from already built OS). -BUILD_OS=no gmake setup - -# Commands that "helios-build" would ask us to run (either explicitly or -# implicitly, to avoid an error). -rc=0 -pfexec pkg install -q /system/zones/brand/omicron1/tools || rc=$? -case $rc in - # `man pkg` notes that exit code 4 means no changes were made because - # there is nothing to do; that's fine. Any other exit code is an error. - 0 | 4) ;; - *) exit $rc ;; -esac - -pfexec zfs create -p "rpool/images/$USER" - - -# TODO: Consider importing zones here too? - -cd "$TOP" -OUTPUTDIR="$HELIOSDIR/upload" -mkdir "$OUTPUTDIR" - -banner OS -./tools/build-host-image.sh -B \ - -S /input/package/work/zones/switch-asic.tar.gz \ - "$HELIOSDIR" \ - /input/package/work/global-zone-packages.tar.gz - -mv "$HELIOSDIR/image/output/os.tar.gz" "$OUTPUTDIR/os-host.tar.gz" - -banner Trampoline - -./tools/build-host-image.sh -R \ - "$HELIOSDIR" \ - /input/package/work/trampoline-global-zone-packages.tar.gz - -mv "$HELIOSDIR/image/output/os.tar.gz" "$OUTPUTDIR/os-trampoline.tar.gz" - diff --git a/.github/buildomat/jobs/package.sh b/.github/buildomat/jobs/package.sh index 11a5a1a0eef..63e5e1ce716 100755 --- a/.github/buildomat/jobs/package.sh +++ b/.github/buildomat/jobs/package.sh @@ -3,24 +3,11 @@ #: name = "helios / package" #: variety = "basic" #: target = "helios-2.0" -#: rust_toolchain = "1.72.1" +#: rust_toolchain = "1.77.2" #: output_rules = [ -#: "=/work/version.txt", #: "=/work/package.tar.gz", -#: "=/work/global-zone-packages.tar.gz", -#: "=/work/trampoline-global-zone-packages.tar.gz", -#: "=/work/zones/*.tar.gz", #: ] #: -#: [[publish]] -#: series = "image" -#: name = "global-zone-packages" -#: from_output = "/work/global-zone-packages.tar.gz" -#: -#: [[publish]] -#: series = "image" -#: name = "trampoline-global-zone-packages" -#: from_output = "/work/trampoline-global-zone-packages.tar.gz" set -o errexit set -o pipefail @@ -32,17 +19,6 @@ rustc --version WORK=/work pfexec mkdir -p $WORK && pfexec chown $USER $WORK -# -# Generate the version for control plane artifacts here. We use `0.git` as the -# prerelease field because it comes before `alpha`. -# -# In this job, we stamp the version into packages installed in the host and -# trampoline global zone images. -# -COMMIT=$(git rev-parse HEAD) -VERSION="8.0.0-0.ci+git${COMMIT:0:11}" -echo "$VERSION" >/work/version.txt - ptime -m ./tools/install_builder_prerequisites.sh -yp ptime -m ./tools/ci_download_softnpu_machinery @@ -52,88 +28,33 @@ ptime -m cargo run --locked --release --bin omicron-package -- \ -t test target create -i standard -m non-gimlet -s softnpu -r single-sled ptime -m cargo run --locked --release --bin omicron-package -- \ -t test package +mapfile -t packages \ + < <(cargo run --locked --release --bin omicron-package -- -t test list-outputs) # Build the xtask binary used by the deploy job ptime -m cargo build --locked --release -p xtask -# Assemble some utilities into a tarball that can be used by deployment -# phases of buildomat. +# Build the end-to-end tests +# Reduce debuginfo just to line tables. +export CARGO_PROFILE_DEV_DEBUG=line-tables-only +export CARGO_PROFILE_TEST_DEBUG=line-tables-only +ptime -m cargo build --locked -p end-to-end-tests --tests --bin bootstrap \ + --message-format json-render-diagnostics >/tmp/output.end-to-end.json +mkdir tests +/opt/ooce/bin/jq -r 'select(.profile.test) | .executable' /tmp/output.end-to-end.json \ + | xargs -I {} -t cp {} tests/ + +# Assemble these outputs and some utilities into a tarball that can be used by +# deployment phases of buildomat. files=( - out/*.tar out/target/test out/npuzone/* package-manifest.toml smf/sled-agent/non-gimlet/config.toml target/release/omicron-package target/release/xtask + target/debug/bootstrap + tests/* ) - -ptime -m tar cvzf $WORK/package.tar.gz "${files[@]}" - -tarball_src_dir="$(pwd)/out/versioned" -stamp_packages() { - for package in "$@"; do - cargo run --locked --release --bin omicron-package -- stamp "$package" "$VERSION" - done -} - -# Keep the single-sled Nexus zone around for the deploy job. (The global zone -# build below overwrites the file.) -mv out/nexus.tar.gz out/nexus-single-sled.tar.gz - -# Build necessary for the global zone -ptime -m cargo run --locked --release --bin omicron-package -- \ - -t host target create -i standard -m gimlet -s asic -r multi-sled -ptime -m cargo run --locked --release --bin omicron-package -- \ - -t host package -stamp_packages omicron-sled-agent mg-ddm-gz propolis-server overlay oxlog pumpkind-gz - -# Create global zone package @ $WORK/global-zone-packages.tar.gz -ptime -m ./tools/build-global-zone-packages.sh "$tarball_src_dir" $WORK - -# Non-Global Zones - -# Assemble Zone Images into their respective output locations. -# -# Zones that are included into another are intentionally omitted from this list -# (e.g., the switch zone tarballs contain several other zone tarballs: dendrite, -# mg-ddm, etc.). -# -# Note that when building for a real gimlet, `propolis-server` and `switch-*` -# should be included in the OS ramdisk. -mkdir -p $WORK/zones -zones=( - out/clickhouse.tar.gz - out/clickhouse_keeper.tar.gz - out/cockroachdb.tar.gz - out/crucible-pantry-zone.tar.gz - out/crucible-zone.tar.gz - out/external-dns.tar.gz - out/internal-dns.tar.gz - out/nexus.tar.gz - out/nexus-single-sled.tar.gz - out/oximeter.tar.gz - out/propolis-server.tar.gz - out/switch-*.tar.gz - out/ntp.tar.gz - out/omicron-gateway-softnpu.tar.gz - out/omicron-gateway-asic.tar.gz - out/overlay.tar.gz - out/probe.tar.gz -) -cp "${zones[@]}" $WORK/zones/ - -# -# Global Zone files for Trampoline image -# - -# Build necessary for the trampoline image -ptime -m cargo run --locked --release --bin omicron-package -- \ - -t recovery target create -i trampoline -ptime -m cargo run --locked --release --bin omicron-package -- \ - -t recovery package -stamp_packages installinator mg-ddm-gz - -# Create trampoline global zone package @ $WORK/trampoline-global-zone-packages.tar.gz -ptime -m ./tools/build-trampoline-global-zone-packages.sh "$tarball_src_dir" $WORK +ptime -m tar cvzf $WORK/package.tar.gz "${files[@]}" "${packages[@]}" diff --git a/.github/buildomat/jobs/tuf-repo.sh b/.github/buildomat/jobs/tuf-repo.sh index 89928a0030a..2ed1ae08c34 100755 --- a/.github/buildomat/jobs/tuf-repo.sh +++ b/.github/buildomat/jobs/tuf-repo.sh @@ -3,20 +3,29 @@ #: name = "helios / build TUF repo" #: variety = "basic" #: target = "helios-2.0" +#: rust_toolchain = "1.77.2" #: output_rules = [ -#: "=/work/manifest*.toml", -#: "=/work/repo-*.zip", -#: "=/work/repo-*.zip.sha256.txt", +#: "=/work/manifest.toml", +#: "=/work/repo.zip", +#: "=/work/repo.zip.sha256.txt", +#: "%/work/*.log", +#: ] +#: access_repos = [ +#: "oxidecomputer/amd-apcb", +#: "oxidecomputer/amd-efs", +#: "oxidecomputer/amd-firmware", +#: "oxidecomputer/amd-flash", +#: "oxidecomputer/amd-host-image-builder", +#: "oxidecomputer/boot-image-tools", +#: "oxidecomputer/chelsio-t6-roms", +#: "oxidecomputer/compliance-pilot", +#: "oxidecomputer/facade", +#: "oxidecomputer/helios", +#: "oxidecomputer/helios-omicron-brand", +#: "oxidecomputer/helios-omnios-build", +#: "oxidecomputer/helios-omnios-extra", +#: "oxidecomputer/nanobl-rs", #: ] -#: -#: [dependencies.ci-tools] -#: job = "helios / CI tools" -#: -#: [dependencies.package] -#: job = "helios / package" -#: -#: [dependencies.host] -#: job = "helios / build OS images" #: #: [[publish]] #: series = "rot-all" @@ -26,105 +35,34 @@ #: [[publish]] #: series = "rot-all" #: name = "repo.zip" -#: from_output = "/work/repo-rot-all.zip" +#: from_output = "/work/repo.zip" #: #: [[publish]] #: series = "rot-all" #: name = "repo.zip.sha256.txt" -#: from_output = "/work/repo-rot-all.zip.sha256.txt" +#: from_output = "/work/repo.zip.sha256.txt" #: set -o errexit set -o pipefail set -o xtrace -TOP=$PWD -VERSION=$(< /input/package/work/version.txt) - -for bin in caboose-util tufaceous permslip; do - ptime -m gunzip < /input/ci-tools/work/$bin.gz > /work/$bin - chmod a+x /work/$bin -done - -# -# We do two things here: -# 1. Run `omicron-package stamp` on all the zones. -# 2. Run `omicron-package unpack` to switch from "package-name.tar.gz" to "service_name.tar.gz". -# -mkdir /work/package -pushd /work/package -tar xf /input/package/work/package.tar.gz out package-manifest.toml target/release/omicron-package -target/release/omicron-package -t default target create -i standard -m gimlet -s asic -r multi-sled -ln -s /input/package/work/zones/* out/ -rm out/switch-softnpu.tar.gz # not used when target switch=asic -rm out/omicron-gateway-softnpu.tar.gz # not used when target switch=asic -rm out/nexus-single-sled.tar.gz # only used for deploy tests -for zone in out/*.tar.gz; do - target/release/omicron-package stamp "$(basename "${zone%.tar.gz}")" "$VERSION" -done -mv out/versioned/* out/ -OMICRON_NO_UNINSTALL=1 target/release/omicron-package unpack --out install -popd - -# Generate a throwaway repository key. -python3 -c 'import secrets; open("/work/key.txt", "w").write("ed25519:%s\n" % secrets.token_hex(32))' -read -r TUFACEOUS_KEY /work/manifest.toml <>/work/manifest.toml <>/work/manifest.toml <> /work/manifest.toml - done < $TOP/tools/permslip_$name - popd -} +rc=0 +pfexec pkg install -q /system/zones/brand/omicron1/tools || rc=$? +case $rc in + # `man pkg` notes that exit code 4 means no changes were made because + # there is nothing to do; that's fine. Any other exit code is an error. + 0 | 4) ;; + *) exit $rc ;; +esac -mkdir /work/hubris -pushd /work/hubris -download_region_manifests https://permslip-staging.corp.oxide.computer staging -download_region_manifests https://signer-us-west.corp.oxide.computer production -popd +pfexec zfs create -p "rpool/images/$USER/host" +pfexec zfs create -p "rpool/images/$USER/recovery" -/work/tufaceous assemble --no-generate-key /work/manifest.toml /work/repo-rot-all.zip -digest -a sha256 /work/repo-rot-all.zip > /work/repo-rot-all.zip.sha256.txt +cargo run --release --bin omicron-releng -- --output-dir /work diff --git a/.github/workflows/hakari.yml b/.github/workflows/hakari.yml index 45cc926d3c3..d6207dc0f06 100644 --- a/.github/workflows/hakari.yml +++ b/.github/workflows/hakari.yml @@ -24,7 +24,7 @@ jobs: with: toolchain: stable - name: Install cargo-hakari - uses: taiki-e/install-action@c2927f0c5b5adc6a76bc4a7847bc6e0503754bed # v2 + uses: taiki-e/install-action@2f990e9c484f0590cb76a07296e9677b417493e9 # v2 with: tool: cargo-hakari - name: Check workspace-hack Cargo.toml is up-to-date diff --git a/Cargo.lock b/Cargo.lock index 8f90846cf9e..667a4392399 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -785,20 +785,11 @@ dependencies = [ "pkg-config", ] -[[package]] -name = "caboose-util" -version = "0.1.0" -dependencies = [ - "anyhow", - "hubtools", - "omicron-workspace-hack", -] - [[package]] name = "camino" -version = "1.1.6" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c59e92b5a388f549b863a7bea62612c09f24c8393560709a54558a9abdfb3b9c" +checksum = "e0ec6b951b160caa93cc0c7b209e5a3bff7aae9062213451ac99493cd844c239" dependencies = [ "serde", ] @@ -843,7 +834,7 @@ checksum = "2d886547e41f740c616ae73108f6eb70afe6d940c7bc697cb30f13daec073037" dependencies = [ "camino", "cargo-platform", - "semver 1.0.22", + "semver 1.0.23", "serde", "serde_json", "thiserror", @@ -2547,6 +2538,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "88a41f105fe1d5b6b34b2055e3dc59bb79b46b48b2040b9e6c7b4b5de097aa41" dependencies = [ "autocfg", + "tokio", ] [[package]] @@ -2903,7 +2895,7 @@ dependencies = [ "once_cell", "pathdiff", "petgraph", - "semver 1.0.22", + "semver 1.0.23", "serde", "serde_json", "smallvec 1.13.1", @@ -4548,7 +4540,7 @@ dependencies = [ "rand 0.8.5", "ref-cast", "schemars", - "semver 1.0.22", + "semver 1.0.23", "serde", "serde_json", "sled-agent-client", @@ -4625,7 +4617,7 @@ dependencies = [ "rustls 0.22.4", "samael", "schemars", - "semver 1.0.22", + "semver 1.0.23", "serde", "serde_json", "serde_urlencoded", @@ -4899,6 +4891,7 @@ dependencies = [ "base64 0.22.1", "chrono", "clap", + "derive-where", "dns-service-client", "futures", "gateway-client", @@ -4911,6 +4904,7 @@ dependencies = [ "omicron-workspace-hack", "openssl", "parse-display", + "proptest", "schemars", "serde", "serde_json", @@ -4921,6 +4915,7 @@ dependencies = [ "steno", "strum", "tabled", + "test-strategy", "thiserror", "uuid", ] @@ -5238,7 +5233,7 @@ dependencies = [ "regress", "reqwest", "schemars", - "semver 1.0.22", + "semver 1.0.23", "serde", "serde_human_bytes", "serde_json", @@ -5452,7 +5447,7 @@ dependencies = [ "rustls-pemfile 2.1.2", "samael", "schemars", - "semver 1.0.22", + "semver 1.0.23", "serde", "serde_json", "serde_urlencoded", @@ -5557,7 +5552,7 @@ dependencies = [ "rayon", "reqwest", "ring 0.17.8", - "semver 1.0.22", + "semver 1.0.23", "serde", "sled-hardware", "slog", @@ -5589,6 +5584,37 @@ dependencies = [ "thiserror", ] +[[package]] +name = "omicron-releng" +version = "0.1.0" +dependencies = [ + "anyhow", + "camino", + "camino-tempfile", + "cargo_metadata", + "chrono", + "clap", + "fs-err", + "futures", + "hex", + "omicron-common", + "omicron-workspace-hack", + "omicron-zone-package", + "once_cell", + "reqwest", + "semver 1.0.23", + "serde", + "sha2", + "shell-words", + "slog", + "slog-async", + "slog-term", + "tar", + "tokio", + "toml 0.8.12", + "tufaceous-lib", +] + [[package]] name = "omicron-rpaths" version = "0.1.0" @@ -5662,7 +5688,7 @@ dependencies = [ "rcgen", "reqwest", "schemars", - "semver 1.0.22", + "semver 1.0.23", "serde", "serde_human_bytes", "serde_json", @@ -5775,6 +5801,7 @@ dependencies = [ "elliptic-curve", "ff", "flate2", + "fs-err", "futures", "futures-channel", "futures-core", @@ -5812,11 +5839,8 @@ dependencies = [ "pem-rfc7468", "petgraph", "postgres-types", - "ppv-lite86", "predicates", "proc-macro2", - "rand 0.8.5", - "rand_chacha 0.3.1", "regex", "regex-automata 0.4.5", "regex-syntax 0.8.2", @@ -5825,7 +5849,7 @@ dependencies = [ "rustix", "schemars", "scopeguard", - "semver 1.0.22", + "semver 1.0.23", "serde", "serde_json", "sha2", @@ -5856,7 +5880,6 @@ dependencies = [ "yasna", "zerocopy 0.7.32", "zeroize", - "zip", ] [[package]] @@ -5878,7 +5901,7 @@ dependencies = [ "hex", "reqwest", "ring 0.16.20", - "semver 1.0.22", + "semver 1.0.23", "serde", "serde_derive", "serde_json", @@ -6607,9 +6630,9 @@ dependencies = [ [[package]] name = "petgraph" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" +checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", "indexmap 2.2.6", @@ -6926,9 +6949,9 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.19" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ac2cf0f2e4f42b49f5ffd07dae8d746508ef7526c13940e5f524012ae6c6550" +checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" dependencies = [ "proc-macro2", "syn 2.0.60", @@ -6979,9 +7002,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.81" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d1597b0c024618f09a9c3b8655b7e430397a36d23fdafec26d6965e9eec3eba" +checksum = "8ad3d49ab951a01fbaafe34f2ec74122942fe18a3f9814c3268f1bb72042131b" dependencies = [ "unicode-ident", ] @@ -7512,18 +7535,18 @@ dependencies = [ [[package]] name = "ref-cast" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4846d4c50d1721b1a3bef8af76924eef20d5e723647333798c1b519b3a9473f" +checksum = "ccf0a6f84d5f1d581da8b41b47ec8600871962f2a528115b542b362d4b744931" dependencies = [ "ref-cast-impl", ] [[package]] name = "ref-cast-impl" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fddb4f8d99b0a2ebafc65a87a69a7b9875e4b1ae1f00db265d300ef7f28bccc" +checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" dependencies = [ "proc-macro2", "quote", @@ -7919,7 +7942,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.22", + "semver 1.0.23", ] [[package]] @@ -8279,9 +8302,9 @@ checksum = "d4f410fedcf71af0345d7607d246e7ad15faaadd49d240ee3b24e5dc21a820ac" [[package]] name = "semver" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" dependencies = [ "serde", ] @@ -8366,9 +8389,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.116" +version = "1.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e17db7126d17feb94eb3fad46bf1a96b034e8aacbc2e775fe81505f8b0b2813" +checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3" dependencies = [ "itoa", "ryu", @@ -9901,7 +9924,7 @@ dependencies = [ "home", "once_cell", "regex", - "semver 1.0.22", + "semver 1.0.23", "walkdir", ] diff --git a/Cargo.toml b/Cargo.toml index c217d4df056..f0ea4811e9b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,7 +2,6 @@ members = [ "api_identity", "bootstore", - "caboose-util", "certificates", "clients/bootstrap-agent-client", "clients/ddm-admin-client", @@ -21,6 +20,7 @@ members = [ "dev-tools/omicron-dev", "dev-tools/oxlog", "dev-tools/reconfigurator-cli", + "dev-tools/releng", "dev-tools/xtask", "dns-server", "end-to-end-tests", @@ -84,7 +84,6 @@ members = [ default-members = [ "bootstore", - "caboose-util", "certificates", "clients/bootstrap-agent-client", "clients/ddm-admin-client", @@ -103,6 +102,7 @@ default-members = [ "dev-tools/omicron-dev", "dev-tools/oxlog", "dev-tools/reconfigurator-cli", + "dev-tools/releng", # Do not include xtask in the list of default members, because this causes # hakari to not work as well and build times to be longer. # See omicron#4392. @@ -228,6 +228,7 @@ bytes = "1.6.0" camino = { version = "1.1", features = ["serde1"] } camino-tempfile = "1.1.1" cancel-safe-futures = "0.1.5" +cargo_metadata = "0.18.1" chacha20poly1305 = "0.10.1" ciborium = "0.2.2" cfg-if = "1.0" @@ -371,12 +372,12 @@ paste = "1.0.15" percent-encoding = "2.3.1" peg = "0.8.3" pem = "3.0" -petgraph = "0.6.4" +petgraph = "0.6.5" postgres-protocol = "0.6.6" predicates = "3.1.0" pretty_assertions = "1.4.0" pretty-hex = "0.4.1" -prettyplease = { version = "0.2.19", features = ["verbatim"] } +prettyplease = { version = "0.2.20", features = ["verbatim"] } proc-macro2 = "1.0" progenitor = { git = "https://github.com/oxidecomputer/progenitor", branch = "main" } progenitor-client = { git = "https://github.com/oxidecomputer/progenitor", branch = "main" } @@ -406,10 +407,10 @@ rustyline = "14.0.0" samael = { version = "0.0.15", features = ["xmlsec"] } schemars = "0.8.16" secrecy = "0.8.0" -semver = { version = "1.0.22", features = ["std", "serde"] } +semver = { version = "1.0.23", features = ["std", "serde"] } serde = { version = "1.0", default-features = false, features = [ "derive", "rc" ] } serde_human_bytes = { git = "http://github.com/oxidecomputer/serde_human_bytes", branch = "main" } -serde_json = "1.0.116" +serde_json = "1.0.117" serde_path_to_error = "0.1.16" serde_tokenstream = "0.2" serde_urlencoded = "0.7.1" @@ -491,7 +492,7 @@ wicket-common = { path = "wicket-common" } wicketd-client = { path = "clients/wicketd-client" } zeroize = { version = "1.7.0", features = ["zeroize_derive", "std"] } zip = { version = "0.6.6", default-features = false, features = ["deflate","bzip2"] } -zone = { version = "0.3", default-features = false, features = ["async", "sync"] } +zone = { version = "0.3", default-features = false, features = ["async"] } # newtype-uuid is set to default-features = false because we don't want to # depend on std in omicron-uuid-kinds (in case a no-std library wants to access diff --git a/caboose-util/Cargo.toml b/caboose-util/Cargo.toml deleted file mode 100644 index ceff70b41db..00000000000 --- a/caboose-util/Cargo.toml +++ /dev/null @@ -1,13 +0,0 @@ -[package] -name = "caboose-util" -version = "0.1.0" -edition = "2021" -license = "MPL-2.0" - -[lints] -workspace = true - -[dependencies] -anyhow.workspace = true -hubtools.workspace = true -omicron-workspace-hack.workspace = true diff --git a/caboose-util/src/main.rs b/caboose-util/src/main.rs deleted file mode 100644 index 36851cd36d4..00000000000 --- a/caboose-util/src/main.rs +++ /dev/null @@ -1,32 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -// Copyright 2023 Oxide Computer Company - -use anyhow::{bail, Context, Result}; -use hubtools::{Caboose, RawHubrisArchive}; - -fn main() -> Result<()> { - let mut args = std::env::args().skip(1); - match args.next().context("subcommand required")?.as_str() { - "read-board" => { - let caboose = read_caboose(args.next())?; - println!("{}", std::str::from_utf8(caboose.board()?)?); - Ok(()) - } - "read-version" => { - let caboose = read_caboose(args.next())?; - println!("{}", std::str::from_utf8(caboose.version()?)?); - Ok(()) - } - unknown => bail!("unknown command {}", unknown), - } -} - -fn read_caboose(path: Option) -> Result { - let archive = RawHubrisArchive::load( - &path.context("path to hubris archive required")?, - )?; - Ok(archive.read_caboose()?) -} diff --git a/common/src/api/external/mod.rs b/common/src/api/external/mod.rs index 93c9727b0a8..f385c0b4fac 100644 --- a/common/src/api/external/mod.rs +++ b/common/src/api/external/mod.rs @@ -1234,6 +1234,13 @@ impl DiskState { pub struct Ipv4Net(pub ipnetwork::Ipv4Network); impl Ipv4Net { + /// Constructs a new `Ipv4Net` representing a single IP. + pub fn single(ip: Ipv4Addr) -> Self { + Ipv4Net( + ipnetwork::Ipv4Network::new(ip, 32).expect("32 is within range"), + ) + } + /// Return `true` if this IPv4 subnetwork is from an RFC 1918 private /// address space. pub fn is_private(&self) -> bool { @@ -1301,6 +1308,13 @@ impl Ipv6Net { /// The prefix length for all VPC Sunets pub const VPC_SUBNET_IPV6_PREFIX_LENGTH: u8 = 64; + /// Constructs a new `Ipv6Net` representing a single IPv6 address. + pub fn single(ip: Ipv6Addr) -> Self { + Ipv6Net( + ipnetwork::Ipv6Network::new(ip, 128).expect("128 is within range"), + ) + } + /// Return `true` if this subnetwork is in the IPv6 Unique Local Address /// range defined in RFC 4193, e.g., `fd00:/8` pub fn is_unique_local(&self) -> bool { @@ -1436,6 +1450,14 @@ pub enum IpNet { } impl IpNet { + /// Constructs a new `IpNet` representing a single IP. + pub fn single(ip: IpAddr) -> Self { + match ip { + IpAddr::V4(ip) => IpNet::V4(Ipv4Net::single(ip)), + IpAddr::V6(ip) => IpNet::V6(Ipv6Net::single(ip)), + } + } + /// Return the underlying address. pub fn ip(&self) -> IpAddr { match self { @@ -1508,39 +1530,22 @@ impl From for IpNet { } } +// NOTE: We deliberately do *NOT* implement `From for IpNet`. +// This is because there are many ways to convert an address into a network. +// See https://github.com/oxidecomputer/omicron/issues/5687. + impl From for IpNet { fn from(n: Ipv4Net) -> IpNet { IpNet::V4(n) } } -impl From for IpNet { - fn from(n: Ipv4Addr) -> IpNet { - IpNet::V4(Ipv4Net(ipnetwork::Ipv4Network::from(n))) - } -} - impl From for IpNet { fn from(n: Ipv6Net) -> IpNet { IpNet::V6(n) } } -impl From for IpNet { - fn from(n: Ipv6Addr) -> IpNet { - IpNet::V6(Ipv6Net(ipnetwork::Ipv6Network::from(n))) - } -} - -impl From for IpNet { - fn from(n: IpAddr) -> IpNet { - match n { - IpAddr::V4(v4) => IpNet::from(v4), - IpAddr::V6(v6) => IpNet::from(v6), - } - } -} - impl std::fmt::Display for IpNet { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { diff --git a/common/src/api/internal/shared.rs b/common/src/api/internal/shared.rs index 6bd40d3ff0a..9d9ff083e43 100644 --- a/common/src/api/internal/shared.rs +++ b/common/src/api/internal/shared.rs @@ -608,7 +608,7 @@ mod tests { assert_eq!( parsed, AllowedSourceIps::try_from(vec![ - IpNet::from(Ipv4Addr::LOCALHOST), + IpNet::V4(Ipv4Net::single(Ipv4Addr::LOCALHOST)), IpNet::V4(Ipv4Net( Ipv4Network::new(Ipv4Addr::new(10, 0, 0, 0), 24).unwrap() )), diff --git a/dev-tools/omdb/tests/successes.out b/dev-tools/omdb/tests/successes.out index 0aa47f27126..8c68b0f4315 100644 --- a/dev-tools/omdb/tests/successes.out +++ b/dev-tools/omdb/tests/successes.out @@ -426,7 +426,7 @@ task: "metrics_producer_gc" currently executing: no last completed activation: , triggered by an explicit signal started at (s ago) and ran for ms - last completion reported error: metric producer gc disabled (omicron#5284) +warning: unknown background task: "metrics_producer_gc" (don't know how to interpret details: Object {"expiration": String(""), "pruned": Array []}) task: "phantom_disks" configured period: every 30s diff --git a/dev-tools/omdb/tests/usage_errors.out b/dev-tools/omdb/tests/usage_errors.out index 3ffe579a233..15fc9d322e0 100644 --- a/dev-tools/omdb/tests/usage_errors.out +++ b/dev-tools/omdb/tests/usage_errors.out @@ -279,6 +279,8 @@ Options: Possible values: - commissioned: All sleds that are currently part of the control plane cluster + - decommissioned: All sleds that were previously part of the control plane cluster + but have been decommissioned - discretionary: Sleds that are eligible for discretionary services - in-service: Sleds that are in service (even if they might not be eligible for discretionary services) diff --git a/dev-tools/reconfigurator-cli/src/main.rs b/dev-tools/reconfigurator-cli/src/main.rs index 72add6ce8c4..f088c9d97d7 100644 --- a/dev-tools/reconfigurator-cli/src/main.rs +++ b/dev-tools/reconfigurator-cli/src/main.rs @@ -32,7 +32,9 @@ use nexus_types::inventory::SledRole; use omicron_common::api::external::Generation; use omicron_common::api::external::Name; use omicron_uuid_kinds::CollectionUuid; +use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::SledUuid; +use omicron_uuid_kinds::VnicUuid; use reedline::{Reedline, Signal}; use std::collections::BTreeMap; use std::io::BufRead; @@ -146,7 +148,8 @@ impl ReconfiguratorSim { .add_omicron_zone_external_ip(zone.id, external_ip) .context("adding omicron zone external IP")?; let nic = OmicronZoneNic { - id: nic.id, + // TODO-cleanup use `TypedUuid` everywhere + id: VnicUuid::from_untyped_uuid(nic.id), mac: nic.mac, ip: nic.ip, slot: nic.slot, diff --git a/dev-tools/reconfigurator-cli/tests/test_basic.rs b/dev-tools/reconfigurator-cli/tests/test_basic.rs index a8fd91f1563..1ae78487a38 100644 --- a/dev-tools/reconfigurator-cli/tests/test_basic.rs +++ b/dev-tools/reconfigurator-cli/tests/test_basic.rs @@ -56,7 +56,7 @@ type ControlPlaneTestContext = #[nexus_test] async fn test_blueprint_edit(cptestctx: &ControlPlaneTestContext) { // Setup - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let log = &cptestctx.logctx.log; let opctx = OpContext::for_background( diff --git a/dev-tools/releng/Cargo.toml b/dev-tools/releng/Cargo.toml new file mode 100644 index 00000000000..19ede6c24db --- /dev/null +++ b/dev-tools/releng/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "omicron-releng" +version = "0.1.0" +edition = "2021" +license = "MPL-2.0" + +[dependencies] +anyhow.workspace = true +camino.workspace = true +camino-tempfile.workspace = true +cargo_metadata.workspace = true +chrono.workspace = true +clap.workspace = true +fs-err = { workspace = true, features = ["tokio"] } +futures.workspace = true +hex.workspace = true +omicron-common.workspace = true +omicron-workspace-hack.workspace = true +omicron-zone-package.workspace = true +once_cell.workspace = true +reqwest.workspace = true +semver.workspace = true +serde.workspace = true +sha2.workspace = true +shell-words.workspace = true +slog.workspace = true +slog-async.workspace = true +slog-term.workspace = true +tar.workspace = true +tokio = { workspace = true, features = ["full"] } +toml.workspace = true +tufaceous-lib.workspace = true + +[lints] +workspace = true diff --git a/dev-tools/releng/src/cmd.rs b/dev-tools/releng/src/cmd.rs new file mode 100644 index 00000000000..198eabf99ff --- /dev/null +++ b/dev-tools/releng/src/cmd.rs @@ -0,0 +1,167 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use std::ffi::OsStr; +use std::path::Path; +use std::process::ExitStatus; +use std::process::Output; +use std::process::Stdio; +use std::time::Instant; + +use anyhow::ensure; +use anyhow::Context; +use anyhow::Result; +use slog::debug; +use slog::Logger; + +/// Wrapper for `tokio::process::Command` where the builder methods take/return +/// `self`, plus a number of convenience methods. +pub(crate) struct Command { + inner: tokio::process::Command, +} + +impl Command { + pub(crate) fn new(program: impl AsRef) -> Command { + Command { inner: tokio::process::Command::new(program) } + } + + pub(crate) fn arg(mut self, arg: impl AsRef) -> Command { + self.inner.arg(arg); + self + } + + pub(crate) fn args( + mut self, + args: impl IntoIterator>, + ) -> Command { + self.inner.args(args); + self + } + + pub(crate) fn current_dir(mut self, dir: impl AsRef) -> Command { + self.inner.current_dir(dir); + self + } + + pub(crate) fn env( + mut self, + key: impl AsRef, + value: impl AsRef, + ) -> Command { + self.inner.env(key, value); + self + } + + pub(crate) fn env_remove(mut self, key: impl AsRef) -> Command { + self.inner.env_remove(key); + self + } + + pub(crate) async fn is_success(mut self, logger: &Logger) -> Result { + self.inner + .stdin(Stdio::null()) + .stdout(Stdio::inherit()) + .stderr(Stdio::inherit()); + Ok(xtrace(&mut self, logger).await?.status.success()) + } + + pub(crate) async fn ensure_success( + mut self, + logger: &Logger, + ) -> Result<()> { + self.inner + .stdin(Stdio::null()) + .stdout(Stdio::inherit()) + .stderr(Stdio::inherit()); + let status = xtrace(&mut self, logger).await?.status; + check_status(self, status) + } + + pub(crate) async fn ensure_stdout( + mut self, + logger: &Logger, + ) -> Result { + self.inner + .stdin(Stdio::null()) + .stdout(Stdio::piped()) + .stderr(Stdio::inherit()); + let output = xtrace(&mut self, logger).await?; + check_status(self, output.status)?; + String::from_utf8(output.stdout).context("command stdout was not UTF-8") + } + + pub(crate) fn into_parts(self) -> (Description, tokio::process::Command) { + (Description { str: self.to_string() }, self.inner) + } +} + +impl std::fmt::Display for Command { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let command = self.inner.as_std(); + for (name, value) in command.get_envs() { + if let Some(value) = value { + write!( + f, + "{}={} ", + shell_words::quote(&name.to_string_lossy()), + shell_words::quote(&value.to_string_lossy()) + )?; + } + } + write!( + f, + "{}", + shell_words::quote(&command.get_program().to_string_lossy()) + )?; + for arg in command.get_args() { + write!(f, " {}", shell_words::quote(&arg.to_string_lossy()))?; + } + Ok(()) + } +} + +/// Returned from [`Command::into_parts`] for use in the `job` module. +pub(crate) struct Description { + str: String, +} + +impl Description { + pub(crate) fn check_status(&self, status: ExitStatus) -> Result<()> { + check_status(self, status) + } +} + +impl std::fmt::Display for Description { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.str) + } +} + +fn check_status( + command: impl std::fmt::Display, + status: ExitStatus, +) -> Result<()> { + ensure!(status.success(), "command `{}` exited with {}", command, status); + Ok(()) +} + +async fn xtrace(command: &mut Command, logger: &Logger) -> Result { + command.inner.stdin(Stdio::null()).kill_on_drop(true); + debug!(logger, "running: {}", command); + let start = Instant::now(); + let output = command + .inner + .spawn() + .with_context(|| format!("failed to exec `{}`", command))? + .wait_with_output() + .await + .with_context(|| format!("failed to wait on `{}`", command))?; + debug!( + logger, + "process exited with {} ({:?})", + output.status, + Instant::now().saturating_duration_since(start) + ); + Ok(output) +} diff --git a/dev-tools/releng/src/hubris.rs b/dev-tools/releng/src/hubris.rs new file mode 100644 index 00000000000..685a729a9f2 --- /dev/null +++ b/dev-tools/releng/src/hubris.rs @@ -0,0 +1,148 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use std::collections::BTreeMap; +use std::collections::HashMap; + +use anyhow::Context; +use anyhow::Result; +use camino::Utf8PathBuf; +use fs_err::tokio as fs; +use futures::future::TryFutureExt; +use omicron_common::api::external::SemverVersion; +use omicron_common::api::internal::nexus::KnownArtifactKind; +use semver::Version; +use serde::Deserialize; +use tufaceous_lib::assemble::DeserializedArtifactData; +use tufaceous_lib::assemble::DeserializedArtifactSource; +use tufaceous_lib::assemble::DeserializedFileArtifactSource; +use tufaceous_lib::assemble::DeserializedManifest; + +pub(crate) async fn fetch_hubris_artifacts( + base_url: &'static str, + client: reqwest::Client, + manifest_list: Utf8PathBuf, + output_dir: Utf8PathBuf, +) -> Result<()> { + macro_rules! zip { + ($expr:expr) => { + output_dir.join(format!("{}.zip", $expr)) + }; + } + + fs::create_dir_all(&output_dir).await?; + + // This could be parallelized with FuturesUnordered but in practice this + // takes less time than OS builds. + + let mut manifest = DeserializedManifest { + system_version: SemverVersion(Version::new(0, 0, 0)), + artifacts: BTreeMap::new(), + }; + + for line in fs::read_to_string(manifest_list).await?.lines() { + if let Some(hash) = line.split_whitespace().next() { + let data = fetch_hash(base_url, &client, hash).await?; + let str = String::from_utf8(data).with_context(|| { + format!("hubris artifact manifest {} was not UTF-8", hash) + })?; + let hash_manifest: Manifest = + toml::from_str(&str).with_context(|| { + format!( + "failed to deserialize hubris artifact manifest {}", + hash + ) + })?; + for (kind, artifacts) in hash_manifest.artifacts { + for artifact in artifacts { + let (source, hashes) = match artifact.source { + Source::File(file) => ( + DeserializedArtifactSource::File { + path: zip!(file.hash), + }, + vec![file.hash], + ), + Source::CompositeRot { archive_a, archive_b } => ( + DeserializedArtifactSource::CompositeRot { + archive_a: + DeserializedFileArtifactSource::File { + path: zip!(archive_a.hash), + }, + archive_b: + DeserializedFileArtifactSource::File { + path: zip!(archive_b.hash), + }, + }, + vec![archive_a.hash, archive_b.hash], + ), + }; + manifest.artifacts.entry(kind).or_default().push( + DeserializedArtifactData { + name: artifact.name, + version: artifact.version, + source, + }, + ); + for hash in hashes { + let data = fetch_hash(base_url, &client, &hash).await?; + fs::write(output_dir.join(zip!(hash)), data).await?; + } + } + } + } + } + + fs::write( + output_dir.join("manifest.toml"), + toml::to_string_pretty(&manifest)?.into_bytes(), + ) + .await?; + Ok(()) +} + +async fn fetch_hash( + base_url: &'static str, + client: &reqwest::Client, + hash: &str, +) -> Result> { + client + .get(format!("{}/artifact/{}", base_url, hash)) + .send() + .and_then(|response| response.json()) + .await + .with_context(|| { + format!( + "failed to fetch hubris artifact {} from {}", + hash, base_url + ) + }) +} + +// These structs are similar to `DeserializeManifest` and friends from +// tufaceous-lib, except that the source is a hash instead of a file path. This +// hash is used to download the artifact from Permission Slip. +#[derive(Deserialize)] +struct Manifest { + #[serde(rename = "artifact")] + artifacts: HashMap>, +} + +#[derive(Deserialize)] +struct Artifact { + name: String, + version: SemverVersion, + source: Source, +} + +#[derive(Deserialize)] +#[serde(tag = "kind", rename_all = "kebab-case")] +enum Source { + File(FileSource), + CompositeRot { archive_a: FileSource, archive_b: FileSource }, +} + +#[derive(Deserialize)] +struct FileSource { + hash: String, +} diff --git a/dev-tools/releng/src/job.rs b/dev-tools/releng/src/job.rs new file mode 100644 index 00000000000..dcb58a0b920 --- /dev/null +++ b/dev-tools/releng/src/job.rs @@ -0,0 +1,305 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! A quick-and-dirty job runner. +//! +//! Jobs are async functions given a name. All jobs must be described before the +//! jobs can be run (`Jobs::run_all` consumes the job runner). Jobs can depend +//! on other jobs, which is implemented via `tokio::sync::oneshot` channels; a +//! completed job sends a message to all registered receivers, which are waiting +//! on the messages in order to run. This essentially creates a DAG, except +//! instead of us having to keep track of it, we make it Tokio's problem. +//! +//! A `tokio::sync::Semaphore` is used to restrict the number of jobs to +//! `std::thread::available_parallelism`, except for a hardcoded list of +//! prioritized job names that are allowed to ignore this. + +use std::collections::HashMap; +use std::future::Future; +use std::process::Stdio; +use std::sync::Arc; +use std::time::Instant; + +use anyhow::anyhow; +use anyhow::Context; +use anyhow::Result; +use camino::Utf8Path; +use camino::Utf8PathBuf; +use fs_err::tokio::File; +use futures::future::BoxFuture; +use futures::future::FutureExt; +use futures::stream::FuturesUnordered; +use futures::stream::TryStreamExt; +use slog::info; +use slog::Logger; +use tokio::io::AsyncBufReadExt; +use tokio::io::AsyncRead; +use tokio::io::AsyncWrite; +use tokio::io::AsyncWriteExt; +use tokio::io::BufReader; +use tokio::sync::oneshot; +use tokio::sync::oneshot::error::RecvError; +use tokio::sync::Semaphore; + +use crate::cmd::Command; + +// We want these two jobs to run without delay because they take the longest +// amount of time, so we allow them to run without taking a permit first. +const PERMIT_NOT_REQUIRED: [&str; 2] = ["host-package", "host-image"]; + +pub(crate) struct Jobs { + logger: Logger, + permits: Arc, + log_dir: Utf8PathBuf, + map: HashMap, +} + +struct Job { + future: BoxFuture<'static, Result<()>>, + wait_for: Vec>, + notify: Vec>, +} + +pub(crate) struct Selector<'a> { + jobs: &'a mut Jobs, + name: String, +} + +impl Jobs { + pub(crate) fn new( + logger: &Logger, + permits: Arc, + log_dir: &Utf8Path, + ) -> Jobs { + Jobs { + logger: logger.clone(), + permits, + log_dir: log_dir.to_owned(), + map: HashMap::new(), + } + } + + pub(crate) fn push( + &mut self, + name: impl AsRef, + future: impl Future> + Send + 'static, + ) -> Selector<'_> { + let name = name.as_ref().to_owned(); + assert!(!self.map.contains_key(&name), "duplicate job name {}", name); + self.map.insert( + name.clone(), + Job { + future: run_job( + self.logger.clone(), + self.permits.clone(), + name.clone(), + future, + ) + .boxed(), + wait_for: Vec::new(), + notify: Vec::new(), + }, + ); + Selector { jobs: self, name } + } + + pub(crate) fn push_command( + &mut self, + name: impl AsRef, + command: Command, + ) -> Selector<'_> { + let name = name.as_ref().to_owned(); + assert!(!self.map.contains_key(&name), "duplicate job name {}", name); + self.map.insert( + name.clone(), + Job { + future: spawn_with_output( + command, + self.logger.clone(), + self.permits.clone(), + name.clone(), + self.log_dir.join(&name).with_extension("log"), + ) + .boxed(), + wait_for: Vec::new(), + notify: Vec::new(), + }, + ); + Selector { jobs: self, name } + } + + pub(crate) fn select(&mut self, name: impl AsRef) -> Selector<'_> { + Selector { jobs: self, name: name.as_ref().to_owned() } + } + + pub(crate) async fn run_all(self) -> Result<()> { + self.map + .into_values() + .map(Job::run) + .collect::>() + .try_collect::<()>() + .await + } +} + +impl Job { + async fn run(self) -> Result<()> { + let result: Result<(), RecvError> = self + .wait_for + .into_iter() + .collect::>() + .try_collect::<()>() + .await; + result.map_err(|_| anyhow!("dependency failed"))?; + + self.future.await?; + for sender in self.notify { + // Ignore the error here -- the only reason we should fail to send + // our message is if a task has failed or the user hit Ctrl-C, at + // which point a bunch of error logging is not particularly useful. + sender.send(()).ok(); + } + Ok(()) + } +} + +impl<'a> Selector<'a> { + #[track_caller] + pub(crate) fn after(self, other: impl AsRef) -> Self { + let (sender, receiver) = oneshot::channel(); + self.jobs + .map + .get_mut(&self.name) + .expect("invalid job name") + .wait_for + .push(receiver); + self.jobs + .map + .get_mut(other.as_ref()) + .expect("invalid job name") + .notify + .push(sender); + self + } +} + +macro_rules! info_or_error { + ($logger:expr, $result:expr, $($tt:tt)*) => { + if $result.is_ok() { + ::slog::info!($logger, $($tt)*); + } else { + ::slog::error!($logger, $($tt)*); + } + }; +} + +async fn run_job( + logger: Logger, + permits: Arc, + name: String, + future: impl Future> + Send + 'static, +) -> Result<()> { + if !PERMIT_NOT_REQUIRED.contains(&name.as_str()) { + let _ = permits.acquire_owned().await?; + } + + info!(logger, "[{}] running task", name); + let start = Instant::now(); + let result = tokio::spawn(future).await?; + let duration = Instant::now().saturating_duration_since(start); + info_or_error!( + logger, + result, + "[{}] task {} ({:?})", + name, + if result.is_ok() { "succeeded" } else { "failed" }, + duration + ); + result +} + +async fn spawn_with_output( + command: Command, + logger: Logger, + permits: Arc, + name: String, + log_path: Utf8PathBuf, +) -> Result<()> { + if !PERMIT_NOT_REQUIRED.contains(&name.as_str()) { + let _ = permits.acquire_owned().await?; + } + + let (command_desc, mut command) = command.into_parts(); + + let log_file_1 = File::create(log_path).await?; + let log_file_2 = log_file_1.try_clone().await?; + + info!(logger, "[{}] running: {}", name, command_desc); + let start = Instant::now(); + let mut child = command + .kill_on_drop(true) + .stdin(Stdio::null()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .with_context(|| format!("failed to exec `{}`", command_desc))?; + + let stdout = spawn_reader( + format!("[{:>16}] ", name), + child.stdout.take().unwrap(), + tokio::io::stdout(), + log_file_1, + ); + let stderr = spawn_reader( + format!("[{:>16}] ", name), + child.stderr.take().unwrap(), + tokio::io::stderr(), + log_file_2, + ); + + let status = child.wait().await.with_context(|| { + format!("I/O error while waiting for job {:?} to complete", name) + })?; + let result = command_desc.check_status(status); + info_or_error!( + logger, + result, + "[{}] process exited with {} ({:?})", + name, + status, + Instant::now().saturating_duration_since(start) + ); + + // bubble up any errors from `spawn_reader` + stdout.await??; + stderr.await??; + + result +} + +fn spawn_reader( + prefix: String, + reader: impl AsyncRead + Send + Unpin + 'static, + mut terminal_writer: impl AsyncWrite + Send + Unpin + 'static, + logfile_writer: File, +) -> tokio::task::JoinHandle> { + let mut reader = BufReader::new(reader); + let mut logfile_writer = tokio::fs::File::from(logfile_writer); + let mut buf = prefix.into_bytes(); + let prefix_len = buf.len(); + tokio::spawn(async move { + loop { + buf.truncate(prefix_len); + // We have no particular control over the output from the child + // processes we run, so we read until a newline character without + // relying on valid UTF-8 output. + let size = reader.read_until(b'\n', &mut buf).await?; + if size == 0 { + return Ok(()); + } + terminal_writer.write_all(&buf).await?; + logfile_writer.write_all(&buf[prefix_len..]).await?; + } + }) +} diff --git a/dev-tools/releng/src/main.rs b/dev-tools/releng/src/main.rs new file mode 100644 index 00000000000..0fa43829313 --- /dev/null +++ b/dev-tools/releng/src/main.rs @@ -0,0 +1,734 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +mod cmd; +mod hubris; +mod job; +mod tuf; + +use std::sync::Arc; +use std::time::Duration; +use std::time::Instant; + +use anyhow::bail; +use anyhow::Context; +use anyhow::Result; +use camino::Utf8PathBuf; +use chrono::Utc; +use clap::Parser; +use fs_err::tokio as fs; +use omicron_zone_package::config::Config; +use once_cell::sync::Lazy; +use semver::Version; +use slog::debug; +use slog::error; +use slog::info; +use slog::Drain; +use slog::Logger; +use slog_term::FullFormat; +use slog_term::TermDecorator; +use tokio::sync::Semaphore; + +use crate::cmd::Command; +use crate::job::Jobs; + +/// The base version we're currently building. Build information is appended to +/// this later on. +/// +/// Under current policy, each new release is a major version bump, and +/// generally referred to only by the major version (e.g. 8.0.0 is referred +/// to as "v8", "version 8", or "release 8" to customers). The use of semantic +/// versioning is mostly to hedge for perhaps wanting something more granular in +/// the future. +const BASE_VERSION: Version = Version::new(8, 0, 0); + +#[derive(Debug, Clone, Copy)] +enum InstallMethod { + /// Unpack the tarball to `/opt/oxide/`, and install + /// `pkg/manifest.xml` (if it exists) to + /// `/lib/svc/manifest/site/.xml`. + Install, + /// Copy the tarball to `/opt/oxide/.tar.gz`. + Bundle, +} + +/// Packages to install or bundle in the host OS image. +const HOST_IMAGE_PACKAGES: [(&str, InstallMethod); 7] = [ + ("mg-ddm-gz", InstallMethod::Install), + ("omicron-sled-agent", InstallMethod::Install), + ("overlay", InstallMethod::Bundle), + ("oxlog", InstallMethod::Install), + ("propolis-server", InstallMethod::Bundle), + ("pumpkind-gz", InstallMethod::Install), + ("switch-asic", InstallMethod::Bundle), +]; +/// Packages to install or bundle in the recovery (trampoline) OS image. +const RECOVERY_IMAGE_PACKAGES: [(&str, InstallMethod); 2] = [ + ("installinator", InstallMethod::Install), + ("mg-ddm-gz", InstallMethod::Install), +]; +/// Packages to ship with the TUF repo. +const TUF_PACKAGES: [&str; 11] = [ + "clickhouse_keeper", + "clickhouse", + "cockroachdb", + "crucible-pantry-zone", + "crucible-zone", + "external-dns", + "internal-dns", + "nexus", + "ntp", + "oximeter", + "probe", +]; + +const HELIOS_REPO: &str = "https://pkg.oxide.computer/helios/2/dev/"; + +static WORKSPACE_DIR: Lazy = Lazy::new(|| { + // $CARGO_MANIFEST_DIR is at `.../omicron/dev-tools/releng` + let mut dir = + Utf8PathBuf::from(std::env::var("CARGO_MANIFEST_DIR").expect( + "$CARGO_MANIFEST_DIR is not set; run this via `cargo xtask releng`", + )); + dir.pop(); + dir.pop(); + dir +}); + +#[derive(Parser)] +/// Run the Oxide release engineering process and produce a TUF repo that can be +/// used to update a rack. +/// +/// For more information, see `docs/releng.adoc` in the Omicron repository. +/// +/// Note that `--host-dataset` and `--recovery-dataset` must be set to different +/// values to build the two OS images in parallel. This is strongly recommended. +struct Args { + /// ZFS dataset to use for `helios-build` when building the host image + #[clap(long, default_value_t = Self::default_dataset("host"))] + host_dataset: String, + + /// ZFS dataset to use for `helios-build` when building the recovery + /// (trampoline) image + #[clap(long, default_value_t = Self::default_dataset("recovery"))] + recovery_dataset: String, + + /// Path to a Helios repository checkout (default: "helios" in the same + /// directory as "omicron") + #[clap(long, default_value_t = Self::default_helios_dir())] + helios_dir: Utf8PathBuf, + + /// Ignore the current HEAD of the Helios repository checkout + #[clap(long)] + ignore_helios_origin: bool, + + /// Output dir for TUF repo and log files + #[clap(long, default_value_t = Self::default_output_dir())] + output_dir: Utf8PathBuf, + + /// Path to the directory containing the rustup proxy `bin/cargo` (usually + /// set by Cargo) + #[clap(long, env = "CARGO_HOME")] + cargo_home: Option, + + /// Path to the git binary + #[clap(long, env = "GIT", default_value = "git")] + git_bin: Utf8PathBuf, + + /// Path to a pre-built omicron-package binary (skips building if set) + #[clap(long, env = "OMICRON_PACKAGE")] + omicron_package_bin: Option, +} + +impl Args { + fn default_dataset(name: &str) -> String { + format!( + "rpool/images/{}/{}", + std::env::var("LOGNAME").expect("$LOGNAME is not set"), + name + ) + } + + fn default_helios_dir() -> Utf8PathBuf { + WORKSPACE_DIR + .parent() + .expect("omicron is presumably not cloned at /") + .join("helios") + } + + fn default_output_dir() -> Utf8PathBuf { + WORKSPACE_DIR.join("out/releng") + } +} + +#[tokio::main] +async fn main() -> Result<()> { + let args = Args::parse(); + + let decorator = TermDecorator::new().build(); + let drain = FullFormat::new(decorator).build().fuse(); + let drain = slog_async::Async::new(drain).build().fuse(); + let logger = Logger::root(drain, slog::o!()); + + // Change the working directory to the workspace root. + debug!(logger, "changing working directory to {}", *WORKSPACE_DIR); + std::env::set_current_dir(&*WORKSPACE_DIR) + .context("failed to change working directory to workspace root")?; + + // Determine the target directory. + let target_dir = cargo_metadata::MetadataCommand::new() + .no_deps() + .exec() + .context("failed to get cargo metadata")? + .target_directory; + + // We build everything in Omicron with $CARGO, but we need to use the rustup + // proxy for Cargo when outside Omicron. + let rustup_cargo = match &args.cargo_home { + Some(path) => path.join("bin/cargo"), + None => Utf8PathBuf::from("cargo"), + }; + // `var_os` here is deliberate: if CARGO is set to a non-UTF-8 path we + // shouldn't do something confusing as a fallback. + let cargo = match std::env::var_os("CARGO") { + Some(path) => Utf8PathBuf::try_from(std::path::PathBuf::from(path)) + .context("$CARGO is not valid UTF-8")?, + None => rustup_cargo.clone(), + }; + + let permits = Arc::new(Semaphore::new( + std::thread::available_parallelism() + .context("couldn't get available parallelism")? + .into(), + )); + + let commit = Command::new(&args.git_bin) + .args(["rev-parse", "HEAD"]) + .ensure_stdout(&logger) + .await? + .trim() + .to_owned(); + + let mut version = BASE_VERSION.clone(); + // Differentiate between CI and local builds. We use `0.word` as the + // prerelease field because it comes before `alpha`. + version.pre = + if std::env::var_os("CI").is_some() { "0.ci" } else { "0.local" } + .parse()?; + // Set the build metadata to the current commit hash. + let mut build = String::with_capacity(14); + build.push_str("git"); + build.extend(commit.chars().take(11)); + version.build = build.parse()?; + let version_str = version.to_string(); + info!(logger, "version: {}", version_str); + + let manifest = Arc::new(omicron_zone_package::config::parse_manifest( + &fs::read_to_string(WORKSPACE_DIR.join("package-manifest.toml")) + .await?, + )?); + let opte_version = + fs::read_to_string(WORKSPACE_DIR.join("tools/opte_version")).await?; + + let client = reqwest::ClientBuilder::new() + .connect_timeout(Duration::from_secs(15)) + .timeout(Duration::from_secs(15)) + .build() + .context("failed to build reqwest client")?; + + // PREFLIGHT ============================================================== + let mut preflight_ok = true; + + for package in HOST_IMAGE_PACKAGES + .into_iter() + .chain(RECOVERY_IMAGE_PACKAGES) + .map(|(package, _)| package) + .chain(TUF_PACKAGES) + { + if !manifest.packages.contains_key(package) { + error!( + logger, + "package {} to be installed in the OS image \ + is not listed in the package manifest", + package + ); + preflight_ok = false; + } + } + + // Ensure the Helios checkout exists + if args.helios_dir.exists() { + if !args.ignore_helios_origin { + // check that our helios clone is up to date + Command::new(&args.git_bin) + .arg("-C") + .arg(&args.helios_dir) + .args(["fetch", "--no-write-fetch-head", "origin", "master"]) + .ensure_success(&logger) + .await?; + let stdout = Command::new(&args.git_bin) + .arg("-C") + .arg(&args.helios_dir) + .args(["rev-parse", "HEAD", "origin/master"]) + .ensure_stdout(&logger) + .await?; + let mut lines = stdout.lines(); + let first = + lines.next().context("git-rev-parse output was empty")?; + if !lines.all(|line| line == first) { + error!( + logger, + "helios checkout at {0} is out-of-date; run \ + `git pull -C {0}`, or run omicron-releng with \ + --ignore-helios-origin or --helios-path", + shell_words::quote(args.helios_dir.as_str()) + ); + preflight_ok = false; + } + } + } else { + info!(logger, "cloning helios to {}", args.helios_dir); + Command::new(&args.git_bin) + .args(["clone", "https://github.com/oxidecomputer/helios.git"]) + .arg(&args.helios_dir) + .ensure_success(&logger) + .await?; + } + // Record the branch and commit in the output + Command::new(&args.git_bin) + .arg("-C") + .arg(&args.helios_dir) + .args(["status", "--branch", "--porcelain=2"]) + .ensure_success(&logger) + .await?; + + // Check that the omicron1 brand is installed + if !Command::new("pkg") + .args(["verify", "-q", "/system/zones/brand/omicron1/tools"]) + .is_success(&logger) + .await? + { + error!( + logger, + "the omicron1 brand is not installed; install it with \ + `pfexec pkg install /system/zones/brand/omicron1/tools`" + ); + preflight_ok = false; + } + + // Check that the datasets for helios-image to use exist + for (dataset, option) in [ + (&args.host_dataset, "--host-dataset"), + (&args.recovery_dataset, "--recovery-dataset"), + ] { + if !Command::new("zfs") + .arg("list") + .arg(dataset) + .is_success(&logger) + .await? + { + error!( + logger, + "the dataset {0} does not exist; run `pfexec zfs create \ + -p {0}`, or specify a different one with {1}", + shell_words::quote(dataset), + option + ); + preflight_ok = false; + } + } + + if !preflight_ok { + bail!("some preflight checks failed"); + } + + fs::create_dir_all(&args.output_dir).await?; + + // DEFINE JOBS ============================================================ + let tempdir = camino_tempfile::tempdir() + .context("failed to create temporary directory")?; + let mut jobs = Jobs::new(&logger, permits.clone(), &args.output_dir); + + jobs.push_command( + "helios-setup", + Command::new("ptime") + .args(["-m", "gmake", "setup"]) + .current_dir(&args.helios_dir) + // ?!?! + // somehow, the Makefile does not see a new `$(PWD)` without this. + .env("PWD", &args.helios_dir) + // Setting `BUILD_OS` to no makes setup skip repositories we don't + // need for building the OS itself (we are just building an image + // from an already-built OS). + .env("BUILD_OS", "no") + .env_remove("CARGO") + .env_remove("RUSTUP_TOOLCHAIN"), + ); + + // Download the toolchain for phbl before we get to the image build steps. + // (This is possibly a micro-optimization.) + jobs.push_command( + "phbl-toolchain", + Command::new(&rustup_cargo) + .arg("--version") + .current_dir(args.helios_dir.join("projects/phbl")) + .env_remove("CARGO") + .env_remove("RUSTUP_TOOLCHAIN"), + ) + .after("helios-setup"); + + let omicron_package = if let Some(path) = &args.omicron_package_bin { + // omicron-package is provided, so don't build it. + jobs.push("omicron-package", std::future::ready(Ok(()))); + path.clone() + } else { + jobs.push_command( + "omicron-package", + Command::new("ptime").args([ + "-m", + cargo.as_str(), + "build", + "--locked", + "--release", + "--bin", + "omicron-package", + ]), + ); + target_dir.join("release/omicron-package") + }; + + // Generate `omicron-package stamp` jobs for a list of packages as a nested + // `Jobs`. Returns the selector for the outer job. + // + // (This could be a function but the resulting function would have too many + // confusable arguments.) + macro_rules! stamp_packages { + ($name:expr, $target:expr, $packages:expr) => {{ + let mut stamp_jobs = + Jobs::new(&logger, permits.clone(), &args.output_dir); + for package in $packages { + stamp_jobs.push_command( + format!("stamp-{}", package), + Command::new(&omicron_package) + .args([ + "--target", + $target.as_str(), + "--artifacts", + $target.artifacts_path(&args).as_str(), + "stamp", + package, + &version_str, + ]) + .env_remove("CARGO_MANIFEST_DIR"), + ); + } + jobs.push($name, stamp_jobs.run_all()) + }}; + } + + for target in [Target::Host, Target::Recovery] { + let artifacts_path = target.artifacts_path(&args); + + // omicron-package target create + jobs.push_command( + format!("{}-target", target), + Command::new(&omicron_package) + .args([ + "--target", + target.as_str(), + "--artifacts", + artifacts_path.as_str(), + "target", + "create", + ]) + .args(target.target_args()) + .env_remove("CARGO_MANIFEST_DIR"), + ) + .after("omicron-package"); + + // omicron-package package + jobs.push_command( + format!("{}-package", target), + Command::new(&omicron_package) + .args([ + "--target", + target.as_str(), + "--artifacts", + artifacts_path.as_str(), + "package", + ]) + .env_remove("CARGO_MANIFEST_DIR"), + ) + .after(format!("{}-target", target)); + + // omicron-package stamp + stamp_packages!( + format!("{}-stamp", target), + target, + target.proto_package_names() + ) + .after(format!("{}-package", target)); + + // [build proto dir, to be overlaid into disk image] + let proto_dir = tempdir.path().join("proto").join(target.as_str()); + jobs.push( + format!("{}-proto", target), + build_proto_area( + artifacts_path, + proto_dir.clone(), + target.proto_packages(), + manifest.clone(), + ), + ) + .after(format!("{}-stamp", target)); + + // The ${os_short_commit} token will be expanded by `helios-build` + let image_name = format!( + "{} {}/${{os_short_commit}} {}", + target.image_prefix(), + commit.chars().take(7).collect::(), + Utc::now().format("%Y-%m-%d %H:%M") + ); + + // helios-build experiment-image + jobs.push_command( + format!("{}-image", target), + Command::new("ptime") + .arg("-m") + .arg(args.helios_dir.join("helios-build")) + .arg("experiment-image") + .arg("-o") // output directory for image + .arg(args.output_dir.join(format!("os-{}", target))) + .arg("-p") // use an external package repository + .arg(format!("helios-dev={}", HELIOS_REPO)) + .arg("-F") // pass extra image builder features + .arg(format!("optever={}", opte_version.trim())) + .arg("-P") // include all files from extra proto area + .arg(proto_dir.join("root")) + .arg("-N") // image name + .arg(image_name) + .arg("-s") // tempdir name suffix + .arg(target.as_str()) + .args(target.image_build_args()) + .current_dir(&args.helios_dir) + .env( + "IMAGE_DATASET", + match target { + Target::Host => &args.host_dataset, + Target::Recovery => &args.recovery_dataset, + }, + ) + .env_remove("CARGO") + .env_remove("RUSTUP_TOOLCHAIN"), + ) + .after("helios-setup") + .after(format!("{}-proto", target)); + } + // Build the recovery target after we build the host target. Only one + // of these will build at a time since Cargo locks its target directory; + // since host-package and host-image both take longer than their recovery + // counterparts, this should be the fastest option to go first. + jobs.select("recovery-package").after("host-package"); + if args.host_dataset == args.recovery_dataset { + // If the datasets are the same, we can't parallelize these. + jobs.select("recovery-image").after("host-image"); + } + + // Set up /root/.profile in the host OS image. + jobs.push( + "host-profile", + host_add_root_profile(tempdir.path().join("proto/host/root/root")), + ) + .after("host-proto"); + jobs.select("host-image").after("host-profile"); + + stamp_packages!("tuf-stamp", Target::Host, TUF_PACKAGES) + .after("host-stamp"); + + for (name, base_url) in [ + ("staging", "https://permslip-staging.corp.oxide.computer"), + ("production", "https://signer-us-west.corp.oxide.computer"), + ] { + jobs.push( + format!("hubris-{}", name), + hubris::fetch_hubris_artifacts( + base_url, + client.clone(), + WORKSPACE_DIR.join(format!("tools/permslip_{}", name)), + args.output_dir.join(format!("hubris-{}", name)), + ), + ); + } + + jobs.push( + "tuf-repo", + tuf::build_tuf_repo( + logger.clone(), + args.output_dir.clone(), + version, + manifest, + ), + ) + .after("tuf-stamp") + .after("host-image") + .after("recovery-image") + .after("hubris-staging") + .after("hubris-production"); + + // RUN JOBS =============================================================== + let start = Instant::now(); + jobs.run_all().await?; + info!( + logger, + "all jobs completed in {:?}", + Instant::now().saturating_duration_since(start) + ); + Ok(()) +} + +#[derive(Clone, Copy)] +enum Target { + Host, + Recovery, +} + +impl Target { + fn as_str(self) -> &'static str { + match self { + Target::Host => "host", + Target::Recovery => "recovery", + } + } + + fn artifacts_path(self, args: &Args) -> Utf8PathBuf { + match self { + Target::Host => WORKSPACE_DIR.join("out"), + Target::Recovery => { + args.output_dir.join(format!("artifacts-{}", self)) + } + } + } + + fn target_args(self) -> &'static [&'static str] { + match self { + Target::Host => &[ + "--image", + "standard", + "--machine", + "gimlet", + "--switch", + "asic", + "--rack-topology", + "multi-sled", + ], + Target::Recovery => &["--image", "trampoline"], + } + } + + fn proto_packages(self) -> &'static [(&'static str, InstallMethod)] { + match self { + Target::Host => &HOST_IMAGE_PACKAGES, + Target::Recovery => &RECOVERY_IMAGE_PACKAGES, + } + } + + fn proto_package_names(self) -> impl Iterator { + self.proto_packages().iter().map(|(name, _)| *name) + } + + fn image_prefix(self) -> &'static str { + match self { + Target::Host => "ci", + Target::Recovery => "recovery", + } + } + + fn image_build_args(self) -> &'static [&'static str] { + match self { + Target::Host => &[ + "-B", // include omicron1 brand + ], + Target::Recovery => &[ + "-R", // recovery image + ], + } + } +} + +impl std::fmt::Display for Target { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.as_str()) + } +} + +async fn build_proto_area( + mut package_dir: Utf8PathBuf, + proto_dir: Utf8PathBuf, + packages: &'static [(&'static str, InstallMethod)], + manifest: Arc, +) -> Result<()> { + let opt_oxide = proto_dir.join("root/opt/oxide"); + let manifest_site = proto_dir.join("root/lib/svc/manifest/site"); + fs::create_dir_all(&opt_oxide).await?; + + // use the stamped packages + package_dir.push("versioned"); + + for &(package_name, method) in packages { + let package = + manifest.packages.get(package_name).expect("checked in preflight"); + match method { + InstallMethod::Install => { + let path = opt_oxide.join(&package.service_name); + fs::create_dir(&path).await?; + + let cloned_path = path.clone(); + let cloned_package_dir = package_dir.to_owned(); + tokio::task::spawn_blocking(move || -> Result<()> { + let mut archive = tar::Archive::new(std::fs::File::open( + cloned_package_dir + .join(package_name) + .with_extension("tar"), + )?); + archive.unpack(cloned_path).with_context(|| { + format!("failed to extract {}.tar.gz", package_name) + })?; + Ok(()) + }) + .await??; + + let smf_manifest = path.join("pkg").join("manifest.xml"); + if smf_manifest.exists() { + fs::create_dir_all(&manifest_site).await?; + fs::rename( + smf_manifest, + manifest_site + .join(&package.service_name) + .with_extension("xml"), + ) + .await?; + } + } + InstallMethod::Bundle => { + fs::copy( + package_dir.join(format!("{}.tar.gz", package_name)), + opt_oxide.join(format!("{}.tar.gz", package.service_name)), + ) + .await?; + } + } + } + + Ok(()) +} + +async fn host_add_root_profile(host_proto_root: Utf8PathBuf) -> Result<()> { + fs::create_dir_all(&host_proto_root).await?; + fs::write( + host_proto_root.join(".profile"), + "# Add opteadm, ddadm, oxlog to PATH\n\ + export PATH=$PATH:/opt/oxide/opte/bin:/opt/oxide/mg-ddm:/opt/oxide/oxlog\n", + ).await?; + Ok(()) +} diff --git a/dev-tools/releng/src/tuf.rs b/dev-tools/releng/src/tuf.rs new file mode 100644 index 00000000000..2a880210ebb --- /dev/null +++ b/dev-tools/releng/src/tuf.rs @@ -0,0 +1,149 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use std::sync::Arc; + +use anyhow::Context; +use anyhow::Result; +use camino::Utf8PathBuf; +use chrono::Duration; +use chrono::Timelike; +use chrono::Utc; +use fs_err::tokio as fs; +use fs_err::tokio::File; +use omicron_common::api::external::SemverVersion; +use omicron_common::api::internal::nexus::KnownArtifactKind; +use omicron_zone_package::config::Config; +use semver::Version; +use sha2::Digest; +use sha2::Sha256; +use slog::Logger; +use tokio::io::AsyncReadExt; +use tufaceous_lib::assemble::ArtifactManifest; +use tufaceous_lib::assemble::DeserializedArtifactData; +use tufaceous_lib::assemble::DeserializedArtifactSource; +use tufaceous_lib::assemble::DeserializedControlPlaneZoneSource; +use tufaceous_lib::assemble::DeserializedManifest; +use tufaceous_lib::assemble::OmicronRepoAssembler; +use tufaceous_lib::Key; + +pub(crate) async fn build_tuf_repo( + logger: Logger, + output_dir: Utf8PathBuf, + version: Version, + package_manifest: Arc, +) -> Result<()> { + // We currently go about this somewhat strangely; the old release + // engineering process produced a Tufaceous manifest, and (the now very many + // copies of) the TUF repo download-and-unpack script we use expects to be + // able to download a manifest. So we build up a `DeserializedManifest`, + // write it to disk, and then turn it into an `ArtifactManifest` to actually + // build the repo. + + // Start a new manifest by loading the Hubris staging manifest. + let mut manifest = DeserializedManifest::from_path( + &output_dir.join("hubris-staging/manifest.toml"), + ) + .context("failed to open intermediate hubris staging manifest")?; + // Set the version. + manifest.system_version = SemverVersion(version); + + // Load the Hubris production manifest and merge it in. + let hubris_production = DeserializedManifest::from_path( + &output_dir.join("hubris-production/manifest.toml"), + ) + .context("failed to open intermediate hubris production manifest")?; + for (kind, artifacts) in hubris_production.artifacts { + manifest.artifacts.entry(kind).or_default().extend(artifacts); + } + + // Add the OS images. + manifest.artifacts.insert( + KnownArtifactKind::Host, + vec![DeserializedArtifactData { + name: "host".to_string(), + version: manifest.system_version.clone(), + source: DeserializedArtifactSource::File { + path: output_dir.join("os-host/os.tar.gz"), + }, + }], + ); + manifest.artifacts.insert( + KnownArtifactKind::Trampoline, + vec![DeserializedArtifactData { + name: "trampoline".to_string(), + version: manifest.system_version.clone(), + source: DeserializedArtifactSource::File { + path: output_dir.join("os-recovery/os.tar.gz"), + }, + }], + ); + + // Add the control plane zones. + let mut zones = Vec::new(); + for package in crate::TUF_PACKAGES { + zones.push(DeserializedControlPlaneZoneSource::File { + file_name: Some(format!( + "{}.tar.gz", + package_manifest + .packages + .get(package) + .expect("checked in preflight") + .service_name + )), + path: crate::WORKSPACE_DIR + .join("out/versioned") + .join(format!("{}.tar.gz", package)), + }); + } + manifest.artifacts.insert( + KnownArtifactKind::ControlPlane, + vec![DeserializedArtifactData { + name: "control-plane".to_string(), + version: manifest.system_version.clone(), + source: DeserializedArtifactSource::CompositeControlPlane { zones }, + }], + ); + + // Serialize the manifest out. + fs::write( + output_dir.join("manifest.toml"), + toml::to_string_pretty(&manifest)?.into_bytes(), + ) + .await?; + + // Convert the manifest. + let manifest = ArtifactManifest::from_deserialized(&output_dir, manifest)?; + manifest.verify_all_present()?; + // Assemble the repo. + let keys = vec![Key::generate_ed25519()]; + let expiry = Utc::now().with_nanosecond(0).unwrap() + Duration::weeks(1); + OmicronRepoAssembler::new( + &logger, + manifest, + keys, + expiry, + output_dir.join("repo.zip"), + ) + .build() + .await?; + // Generate the checksum file. + let mut hasher = Sha256::new(); + let mut buf = [0; 8192]; + let mut file = File::open(output_dir.join("repo.zip")).await?; + loop { + let n = file.read(&mut buf).await?; + if n == 0 { + break; + } + hasher.update(&buf[..n]); + } + fs::write( + output_dir.join("repo.zip.sha256.txt"), + format!("{}\n", hex::encode(&hasher.finalize())), + ) + .await?; + + Ok(()) +} diff --git a/dev-tools/xtask/Cargo.toml b/dev-tools/xtask/Cargo.toml index 11fcf405bd0..2aecde57e53 100644 --- a/dev-tools/xtask/Cargo.toml +++ b/dev-tools/xtask/Cargo.toml @@ -11,7 +11,7 @@ workspace = true anyhow.workspace = true camino.workspace = true cargo_toml = "0.20" -cargo_metadata = "0.18" +cargo_metadata.workspace = true clap.workspace = true macaddr.workspace = true serde.workspace = true diff --git a/docs/releng.adoc b/docs/releng.adoc new file mode 100644 index 00000000000..31252c9a89c --- /dev/null +++ b/docs/releng.adoc @@ -0,0 +1,81 @@ +:showtitle: +:numbered: +:toc: left + += Oxide Release Engineering + +Omicron is the Oxide control plane, and thus brings together all of the +various components outside of this repo that make up the software on the +product. This includes (but definitely isn't limited to): + +- https://github.com/oxidecomputer/propolis[Propolis], our hypervisor +- https://github.com/oxidecomputer/helios[Helios], our host operating + system +- https://github.com/oxidecomputer/crucible[Crucible], our block storage + service +- https://github.com/oxidecomputer/maghemite[Maghemite], our switch + control software and routing protocol +- https://github.com/oxidecomputer/hubris[Hubris], our embedded + microcontroller operating system used on the root of trust and service + processors +- https://github.com/oxidecomputer/console[The web console] + +Each of these has their own build processes that produce some sort of +usable artifact, whether that is an illumos zone or a tarball of static +assets. + +The release engineering process builds the control plane and combines +it with the many external artifacts into a final artifact -- a Zip +archive of a TUF repository -- that contains everything necessary for +the product to operate. This process is run on each commit to ensure it +is always functional. You can also run the process locally with +`cargo xtask releng`. + +== Process overview + +`cargo xtask releng` performs all of these steps in parallel (with +the temporary exception of artifact downloads handled by +`tools/install_builder_prerequisites.sh`): + +. `tools/install_builder_prerequisites.sh` downloads several artifacts + (via the `tools/ci_*` scripts) that are necessary to build Omicron; + many of these are ultimately packaged by `omicron-package`. These + scripts are generally controlled by the `tools/*_version` and + `tools/*_checksums` files. +. `cargo xtask releng` downloads the current root of trust and + service processor images built by the Hubris release engineering + process, which are signed in https://github.com/oxidecomputer/permission-slip[Permission Slip]. + This is controlled by the `tools/permslip_production` and + `tools/permslip_staging` files. +. `omicron-package` is the heart of the release engineering process; it + reads the manifest from `package-manifest.toml`, runs an appropriate + `cargo build` command, downloads any additional artifacts, and + packages them into a series of illumos zones and tarballs. (It can + also manage installation and uninstallation of these zones; see + how-to-run.adoc.) +. Some of the illumos zones are distributed with the OS images (because + they are reliant on OS-specific APIs), and some are distributed + separately. `cargo xtask releng` unpacks the zones for the OS image + into a temporary directory that is overlaid onto the OS image in the + next step. +. `helios-build` from the https://github.com/oxidecomputer/helios[Helios] + repository then builds two images: the *host* image, which is used + during normal operation, and the *trampoline* (or *recovery*) image, + which is used to update the host image. +. Finally, `cargo xtask releng` generates a Zip archive of a + https://theupdateframework.io/[TUF] repository, which contains the + host and trampoline OS images, the ROT and SP images, and all the + illumos zones that are not installed into the OS images. This archive + can be uploaded to Wicket to perform an upgrade of the rack while the + control plane is not running. + +== Beyond `cargo xtask releng` + +Currently we use TUF repos generated in CI (by `cargo xtask releng`) +directly. These repositories use a generated throwaway key to sign +the TUF metadata. In the limit, we will have a process to sign release +builds of these TUF repositories, which will be available as a Zip +archive for an operator to upload to Nexus or Wicket, as well as an +HTTPS repository for racks connected to the internet or with access to +a proxy to perform automatic updates. The exact nature of the PKI and +trust policies for each of these update flows is under discussion. diff --git a/illumos-utils/src/dumpadm.rs b/illumos-utils/src/dumpadm.rs index e37874f795d..5a8247041b6 100644 --- a/illumos-utils/src/dumpadm.rs +++ b/illumos-utils/src/dumpadm.rs @@ -1,11 +1,10 @@ use crate::{execute, ExecutionError}; -use byteorder::{LittleEndian, ReadBytesExt}; use camino::Utf8PathBuf; use std::ffi::OsString; -use std::fs::File; -use std::io::{Seek, SeekFrom}; use std::os::unix::ffi::OsStringExt; use std::process::Command; +use tokio::fs::File; +use tokio::io::{AsyncReadExt, AsyncSeekExt, SeekFrom}; pub const DUMPADM: &str = "/usr/sbin/dumpadm"; pub const SAVECORE: &str = "/usr/bin/savecore"; @@ -48,11 +47,11 @@ pub enum DumpHdrError { /// been a core written there at all, Err(DumpHdrError::InvalidVersion) if the /// dumphdr isn't the one we know how to handle (10), or other variants of /// DumpHdrError if there are I/O failures while reading the block device. -pub fn dump_flag_is_valid( +pub async fn dump_flag_is_valid( dump_slice: &Utf8PathBuf, ) -> Result { - let mut f = File::open(dump_slice).map_err(DumpHdrError::OpenRaw)?; - f.seek(SeekFrom::Start(DUMP_OFFSET)).map_err(DumpHdrError::Seek)?; + let mut f = File::open(dump_slice).await.map_err(DumpHdrError::OpenRaw)?; + f.seek(SeekFrom::Start(DUMP_OFFSET)).await.map_err(DumpHdrError::Seek)?; // read the first few fields of dumphdr. // typedef struct dumphdr { @@ -62,21 +61,18 @@ pub fn dump_flag_is_valid( // /* [...] */ // } - let magic = - f.read_u32::().map_err(DumpHdrError::ReadMagic)?; - if magic != DUMP_MAGIC { + let magic = f.read_u32().await.map_err(DumpHdrError::ReadMagic)?; + if magic != DUMP_MAGIC.to_be() { return Err(DumpHdrError::InvalidMagic(magic)); } - let version = - f.read_u32::().map_err(DumpHdrError::ReadVersion)?; - if version != DUMP_VERSION { + let version = f.read_u32().await.map_err(DumpHdrError::ReadVersion)?; + if version != DUMP_VERSION.to_be() { return Err(DumpHdrError::InvalidVersion(version)); } - let flags = - f.read_u32::().map_err(DumpHdrError::ReadFlags)?; - Ok((flags & DF_VALID) != 0) + let flags = f.read_u32().await.map_err(DumpHdrError::ReadFlags)?; + Ok((flags & DF_VALID.to_be()) != 0) } pub enum DumpContentType { diff --git a/nexus/db-model/src/network_interface.rs b/nexus/db-model/src/network_interface.rs index ff774699d61..8520afdb762 100644 --- a/nexus/db-model/src/network_interface.rs +++ b/nexus/db-model/src/network_interface.rs @@ -17,7 +17,9 @@ use ipnetwork::NetworkSize; use nexus_types::external_api::params; use nexus_types::identity::Resource; use omicron_common::api::{external, internal}; +use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::OmicronZoneUuid; +use omicron_uuid_kinds::VnicUuid; use sled_agent_client::ZoneKind; use uuid::Uuid; @@ -207,7 +209,7 @@ impl TryFrom<&'_ ServiceNetworkInterface> }); } Ok(Self { - id: nic.id(), + id: VnicUuid::from_untyped_uuid(nic.id()), mac: *nic.mac, ip: nic.ip.ip(), slot: *nic.slot, diff --git a/nexus/db-model/src/schema_versions.rs b/nexus/db-model/src/schema_versions.rs index 377a85f01b9..9ada1f7b052 100644 --- a/nexus/db-model/src/schema_versions.rs +++ b/nexus/db-model/src/schema_versions.rs @@ -17,7 +17,7 @@ use std::collections::BTreeMap; /// /// This must be updated when you change the database schema. Refer to /// schema/crdb/README.adoc in the root of this repository for details. -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(62, 0, 0); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(63, 0, 0); /// List of all past database schema versions, in *reverse* order /// @@ -29,7 +29,8 @@ static KNOWN_VERSIONS: Lazy> = Lazy::new(|| { // | leaving the first copy as an example for the next person. // v // KnownVersion::new(next_int, "unique-dirname-with-the-sql-files"), - KnownVersion::new(62, "vpc-subnet-routing"), + KnownVersion::new(63, "vpc-subnet-routing"), + KnownVersion::new(62, "allocate-subnet-decommissioned-sleds"), KnownVersion::new(61, "blueprint-add-sled-state"), KnownVersion::new(60, "add-lookup-vmm-by-sled-id-index"), KnownVersion::new(59, "enforce-first-as-default"), diff --git a/nexus/db-model/src/sled_underlay_subnet_allocation.rs b/nexus/db-model/src/sled_underlay_subnet_allocation.rs index 8dae9da4b82..3cb9579f1b6 100644 --- a/nexus/db-model/src/sled_underlay_subnet_allocation.rs +++ b/nexus/db-model/src/sled_underlay_subnet_allocation.rs @@ -8,7 +8,7 @@ use omicron_uuid_kinds::SledKind; use uuid::Uuid; /// Underlay allocation for a sled added to an initialized rack -#[derive(Queryable, Insertable, Debug, Clone, Selectable)] +#[derive(Queryable, Insertable, Debug, Clone, PartialEq, Eq, Selectable)] #[diesel(table_name = sled_underlay_subnet_allocation)] pub struct SledUnderlaySubnetAllocation { pub rack_id: Uuid, diff --git a/nexus/db-queries/src/db/datastore/deployment.rs b/nexus/db-queries/src/db/datastore/deployment.rs index 7359f1725b6..09bc2eef0f8 100644 --- a/nexus/db-queries/src/db/datastore/deployment.rs +++ b/nexus/db-queries/src/db/datastore/deployment.rs @@ -1346,6 +1346,7 @@ mod tests { use omicron_uuid_kinds::PhysicalDiskUuid; use omicron_uuid_kinds::SledUuid; use omicron_uuid_kinds::ZpoolUuid; + use once_cell::sync::Lazy; use pretty_assertions::assert_eq; use rand::thread_rng; use rand::Rng; @@ -1353,8 +1354,8 @@ mod tests { use std::mem; use std::net::Ipv6Addr; - static EMPTY_PLANNING_INPUT: PlanningInput = - PlanningInputBuilder::empty_input(); + static EMPTY_PLANNING_INPUT: Lazy = + Lazy::new(|| PlanningInputBuilder::empty_input()); // This is a not-super-future-maintainer-friendly helper to check that all // the subtables related to blueprints have been pruned of a specific diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index 8e8913f7bdb..04901c77855 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -21,6 +21,7 @@ use crate::db::fixed_data::vpc_subnet::DNS_VPC_SUBNET; use crate::db::fixed_data::vpc_subnet::NEXUS_VPC_SUBNET; use crate::db::fixed_data::vpc_subnet::NTP_VPC_SUBNET; use crate::db::identity::Asset; +use crate::db::lookup::LookupPath; use crate::db::model::Dataset; use crate::db::model::IncompleteExternalIp; use crate::db::model::PhysicalDisk; @@ -41,6 +42,7 @@ use nexus_db_model::InitialDnsGroup; use nexus_db_model::PasswordHashString; use nexus_db_model::SiloUser; use nexus_db_model::SiloUserPasswordHash; +use nexus_db_model::SledState; use nexus_db_model::SledUnderlaySubnetAllocation; use nexus_types::deployment::blueprint_zone_type; use nexus_types::deployment::Blueprint; @@ -183,8 +185,8 @@ impl From for Error { pub enum SledUnderlayAllocationResult { /// A new allocation was created New(SledUnderlaySubnetAllocation), - /// A prior allocation was found - Existing(SledUnderlaySubnetAllocation), + /// A prior allocation associated with a commissioned sled was found + CommissionedSled(SledUnderlaySubnetAllocation), } impl DataStore { @@ -327,8 +329,44 @@ impl DataStore { }; for allocation in allocations { if allocation.hw_baseboard_id == new_allocation.hw_baseboard_id { - // We already have an allocation for this sled. - return Ok(SledUnderlayAllocationResult::Existing(allocation)); + // We already have an allocation for this sled, but we need to + // check whether this allocation matches a sled that has been + // decommissioned. (The same physical sled, tracked by + // `hw_baseboard_id`, can be logically removed from the control + // plane via decommissioning, then added back again later, which + // requires allocating a new subnet.) + match LookupPath::new(opctx, self) + .sled_id(allocation.sled_id.into_untyped_uuid()) + .optional_fetch_for(authz::Action::Read) + .await? + .map(|(_, sled)| sled.state()) + { + Some(SledState::Active) => { + // This allocation is for an active sled; return the + // existing allocation. + return Ok( + SledUnderlayAllocationResult::CommissionedSled( + allocation, + ), + ); + } + Some(SledState::Decommissioned) => { + // This allocation was for a now-decommissioned sled; + // ignore it and keep searching. + } + None => { + // This allocation is still "new" in the sense that it + // is assigned to a sled that has not yet upserted + // itself to join the control plane. We must return + // `::New(_)` here to ensure idempotence of allocation + // (e.g., if we allocate a sled, but its sled-agent + // crashes before it can upsert itself, we need to be + // able to get the same allocation back again). + return Ok(SledUnderlayAllocationResult::New( + allocation, + )); + } + } } if allocation.subnet_octet == new_allocation.subnet_octet { bail_unless!( @@ -962,7 +1000,6 @@ mod test { }; use crate::db::datastore::test_utils::datastore_test; use crate::db::datastore::Discoverability; - use crate::db::lookup::LookupPath; use crate::db::model::ExternalIp; use crate::db::model::IpKind; use crate::db::model::IpPoolRange; @@ -1190,8 +1227,7 @@ mod test { logctx.cleanup_successful(); } - async fn create_test_sled(db: &DataStore) -> Sled { - let sled_id = Uuid::new_v4(); + async fn create_test_sled(db: &DataStore, sled_id: Uuid) -> Sled { let addr = SocketAddrV6::new(Ipv6Addr::LOCALHOST, 0, 0, 0); let sled_update = SledUpdate::new( sled_id, @@ -1270,9 +1306,9 @@ mod test { let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; - let sled1 = create_test_sled(&datastore).await; - let sled2 = create_test_sled(&datastore).await; - let sled3 = create_test_sled(&datastore).await; + let sled1 = create_test_sled(&datastore, Uuid::new_v4()).await; + let sled2 = create_test_sled(&datastore, Uuid::new_v4()).await; + let sled3 = create_test_sled(&datastore, Uuid::new_v4()).await; let service_ip_pool_ranges = vec![IpRange::try_from(( Ipv4Addr::new(1, 2, 3, 4), @@ -1621,7 +1657,7 @@ mod test { let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; - let sled = create_test_sled(&datastore).await; + let sled = create_test_sled(&datastore, Uuid::new_v4()).await; // Ask for two Nexus services, with different external IPs. let nexus_ip_start = Ipv4Addr::new(1, 2, 3, 4); @@ -1904,7 +1940,7 @@ mod test { let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; - let sled = create_test_sled(&datastore).await; + let sled = create_test_sled(&datastore, Uuid::new_v4()).await; let mut system = SystemDescription::new(); system @@ -2000,7 +2036,7 @@ mod test { let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; - let sled = create_test_sled(&datastore).await; + let sled = create_test_sled(&datastore, Uuid::new_v4()).await; let ip = IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)); let service_ip_pool_ranges = vec![IpRange::from(ip)]; @@ -2256,7 +2292,9 @@ mod test { SledUnderlayAllocationResult::New(allocation) => { allocation.subnet_octet } - SledUnderlayAllocationResult::Existing(allocation) => { + SledUnderlayAllocationResult::CommissionedSled( + allocation, + ) => { panic!("unexpected allocation {allocation:?}"); } }, @@ -2276,9 +2314,9 @@ mod test { ); // If we attempt to insert the same baseboards again, we should get the - // existing allocations back. - for (hw_baseboard_id, expected_octet) in - hw_baseboard_ids.into_iter().zip(expected) + // same new allocations back. + for (&hw_baseboard_id, prev_allocation) in + hw_baseboard_ids.iter().zip(&allocations) { match datastore .allocate_sled_underlay_subnet_octets( @@ -2288,17 +2326,134 @@ mod test { ) .await .unwrap() + { + SledUnderlayAllocationResult::New(allocation) => { + assert_eq!(allocation, *prev_allocation); + } + SledUnderlayAllocationResult::CommissionedSled(allocation) => { + panic!("unexpected allocation {allocation:?}"); + } + } + } + + // Pick one of the hw_baseboard_ids and insert a sled record. We should + // get back the `CommissionedSled` allocation result if we retry + // allocation of that baseboard. + create_test_sled( + &datastore, + allocations[0].sled_id.into_untyped_uuid(), + ) + .await; + match datastore + .allocate_sled_underlay_subnet_octets( + &opctx, + rack_id, + hw_baseboard_ids[0], + ) + .await + .unwrap() + { + SledUnderlayAllocationResult::New(allocation) => { + panic!("unexpected allocation {allocation:?}"); + } + SledUnderlayAllocationResult::CommissionedSled(allocation) => { + assert_eq!(allocation, allocations[0]); + } + } + + // If we attempt to insert the same baseboard again and that baseboard + // is only assigned to decommissioned sleds, we should get a new + // allocation. We'll pick one hw baseboard ID, create a `Sled` for it, + // decommission that sled, and confirm we get a new octet, five times in + // a loop (to emulate the same sled being added and decommissioned + // multiple times). + let mut next_expected_octet = *expected.last().unwrap() + 1; + let mut prior_allocation = allocations.last().unwrap().clone(); + let target_hw_baseboard_id = *hw_baseboard_ids.last().unwrap(); + for _ in 0..5 { + // Commission the sled. + let sled = create_test_sled( + &datastore, + prior_allocation.sled_id.into_untyped_uuid(), + ) + .await; + + // If we attempt this same baseboard again, we get the existing + // allocation back. + match datastore + .allocate_sled_underlay_subnet_octets( + &opctx, + rack_id, + target_hw_baseboard_id, + ) + .await + .unwrap() { SledUnderlayAllocationResult::New(allocation) => { panic!("unexpected allocation {allocation:?}"); } - SledUnderlayAllocationResult::Existing(allocation) => { - assert_eq!( - allocation.subnet_octet, expected_octet, - "unexpected octet for {allocation:?}" - ); + SledUnderlayAllocationResult::CommissionedSled(existing) => { + assert_eq!(existing, prior_allocation); } } + + // Decommission the sled. + let (authz_sled,) = LookupPath::new(&opctx, &datastore) + .sled_id(sled.id()) + .lookup_for(authz::Action::Modify) + .await + .expect("found target sled ID"); + datastore + .sled_set_policy_to_expunged(&opctx, &authz_sled) + .await + .expect("expunged sled"); + datastore + .sled_set_state_to_decommissioned(&opctx, &authz_sled) + .await + .expect("decommissioned sled"); + + // Attempt a new allocation for the same hw_baseboard_id. + let allocation = match datastore + .allocate_sled_underlay_subnet_octets( + &opctx, + rack_id, + target_hw_baseboard_id, + ) + .await + .unwrap() + { + SledUnderlayAllocationResult::New(allocation) => allocation, + SledUnderlayAllocationResult::CommissionedSled(allocation) => { + panic!("unexpected existing allocation {allocation:?}"); + } + }; + + // We should get the next octet with a new sled ID. + assert_eq!(allocation.subnet_octet, next_expected_octet); + assert_ne!(allocation.sled_id.into_untyped_uuid(), sled.id()); + prior_allocation = allocation; + + // Ensure if we attempt this same baseboard again, we get the + // same allocation back (the sled hasn't been commissioned yet). + match datastore + .allocate_sled_underlay_subnet_octets( + &opctx, + rack_id, + target_hw_baseboard_id, + ) + .await + .unwrap() + { + SledUnderlayAllocationResult::New(allocation) => { + assert_eq!(prior_allocation, allocation); + } + SledUnderlayAllocationResult::CommissionedSled(existing) => { + panic!("unexpected allocation {existing:?}"); + } + } + + // Bump our expectations for the next iteration. + next_expected_octet += 1; } db.cleanup().await.unwrap(); diff --git a/nexus/networking/src/firewall_rules.rs b/nexus/networking/src/firewall_rules.rs index dc67ce5937a..623c5457020 100644 --- a/nexus/networking/src/firewall_rules.rs +++ b/nexus/networking/src/firewall_rules.rs @@ -353,7 +353,7 @@ pub async fn resolve_firewall_rules_for_sled_agent( .unwrap_or(&no_interfaces) { host_addrs.push( - HostIdentifier::Ip(IpNet::from( + HostIdentifier::Ip(IpNet::single( interface.ip, )) .into(), @@ -373,7 +373,7 @@ pub async fn resolve_firewall_rules_for_sled_agent( } external::VpcFirewallRuleHostFilter::Ip(addr) => { host_addrs.push( - HostIdentifier::Ip(IpNet::from(*addr)).into(), + HostIdentifier::Ip(IpNet::single(*addr)).into(), ) } external::VpcFirewallRuleHostFilter::IpNet(net) => { diff --git a/nexus/reconfigurator/execution/src/datasets.rs b/nexus/reconfigurator/execution/src/datasets.rs index 6e4286f9db0..c4f5cbae822 100644 --- a/nexus/reconfigurator/execution/src/datasets.rs +++ b/nexus/reconfigurator/execution/src/datasets.rs @@ -147,7 +147,7 @@ mod tests { const TEST_NAME: &str = "test_ensure_crucible_dataset_records_exist"; // Set up. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), diff --git a/nexus/reconfigurator/execution/src/dns.rs b/nexus/reconfigurator/execution/src/dns.rs index 1760421dee1..4223652b002 100644 --- a/nexus/reconfigurator/execution/src/dns.rs +++ b/nexus/reconfigurator/execution/src/dns.rs @@ -1126,7 +1126,7 @@ mod test { async fn test_silos_external_dns_end_to_end( cptestctx: &ControlPlaneTestContext, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let log = &cptestctx.logctx.log; let opctx = OpContext::for_background( diff --git a/nexus/reconfigurator/execution/src/external_networking.rs b/nexus/reconfigurator/execution/src/external_networking.rs index 40ad65816ef..cff912c1370 100644 --- a/nexus/reconfigurator/execution/src/external_networking.rs +++ b/nexus/reconfigurator/execution/src/external_networking.rs @@ -883,7 +883,7 @@ mod tests { cptestctx: &ControlPlaneTestContext, ) { // Set up. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), @@ -1141,7 +1141,7 @@ mod tests { cptestctx: &ControlPlaneTestContext, ) { // Set up. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), diff --git a/nexus/reconfigurator/execution/src/omicron_physical_disks.rs b/nexus/reconfigurator/execution/src/omicron_physical_disks.rs index 89287713c29..ab0c5cab45a 100644 --- a/nexus/reconfigurator/execution/src/omicron_physical_disks.rs +++ b/nexus/reconfigurator/execution/src/omicron_physical_disks.rs @@ -149,7 +149,7 @@ mod test { #[nexus_test] async fn test_deploy_omicron_disks(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), diff --git a/nexus/reconfigurator/execution/src/omicron_zones.rs b/nexus/reconfigurator/execution/src/omicron_zones.rs index 32482691751..68c1455ee40 100644 --- a/nexus/reconfigurator/execution/src/omicron_zones.rs +++ b/nexus/reconfigurator/execution/src/omicron_zones.rs @@ -139,7 +139,7 @@ mod test { #[nexus_test] async fn test_deploy_omicron_zones(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), diff --git a/nexus/reconfigurator/execution/src/sled_state.rs b/nexus/reconfigurator/execution/src/sled_state.rs index aaa5b6bc268..fafc1c2e44c 100644 --- a/nexus/reconfigurator/execution/src/sled_state.rs +++ b/nexus/reconfigurator/execution/src/sled_state.rs @@ -90,7 +90,7 @@ mod tests { async fn test_decommission_is_idempotent( cptestctx: &ControlPlaneTestContext, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), diff --git a/nexus/reconfigurator/planning/src/example.rs b/nexus/reconfigurator/planning/src/example.rs index 24dbbd15acd..f8748be7582 100644 --- a/nexus/reconfigurator/planning/src/example.rs +++ b/nexus/reconfigurator/planning/src/example.rs @@ -13,7 +13,9 @@ use nexus_types::deployment::OmicronZoneNic; use nexus_types::deployment::PlanningInput; use nexus_types::deployment::SledFilter; use nexus_types::inventory::Collection; +use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::SledKind; +use omicron_uuid_kinds::VnicUuid; use typed_rng::TypedUuidRng; pub struct ExampleSystem { @@ -105,7 +107,8 @@ impl ExampleSystem { .add_omicron_zone_nic( service_id, OmicronZoneNic { - id: nic.id, + // TODO-cleanup use `TypedUuid` everywhere + id: VnicUuid::from_untyped_uuid(nic.id), mac: nic.mac, ip: nic.ip, slot: nic.slot, diff --git a/nexus/src/app/allow_list.rs b/nexus/src/app/allow_list.rs index 8f53db68a23..6b32f0c6f35 100644 --- a/nexus/src/app/allow_list.rs +++ b/nexus/src/app/allow_list.rs @@ -13,6 +13,8 @@ use omicron_common::api::external; use omicron_common::api::external::Error; use std::net::IpAddr; +use crate::context::ServerKind; + impl super::Nexus { /// Fetch the allowlist of source IPs that can reach user-facing services. pub async fn allow_list_view( @@ -30,6 +32,7 @@ impl super::Nexus { &self, opctx: &OpContext, remote_addr: IpAddr, + server_kind: ServerKind, params: params::AllowListUpdate, ) -> Result { if let external::AllowedSourceIps::List(list) = ¶ms.allowed_ips { @@ -50,6 +53,14 @@ impl super::Nexus { // the request came from is on the allowlist. This is our only real // guardrail to prevent accidentally preventing any future access to // the rack! + // + // Note that we elide this check when handling a request proxied + // from `wicketd`. This is intentional and used as a safety + // mechanism in the even of lockout or other recovery scenarios. + let check_remote_addr = match server_kind { + ServerKind::External => true, + ServerKind::Techport | ServerKind::Internal => false, + }; let mut contains_remote = false; for entry in list.iter() { contains_remote |= entry.contains(remote_addr); @@ -67,7 +78,7 @@ impl super::Nexus { )); } } - if !contains_remote { + if check_remote_addr && !contains_remote { return Err(Error::invalid_request( "The source IP allow list would prevent access \ from the current client! Ensure that the allowlist \ diff --git a/nexus/src/app/background/blueprint_execution.rs b/nexus/src/app/background/blueprint_execution.rs index 1291e72a9b2..2ac1b3fd355 100644 --- a/nexus/src/app/background/blueprint_execution.rs +++ b/nexus/src/app/background/blueprint_execution.rs @@ -178,7 +178,7 @@ mod test { #[nexus_test(server = crate::Server)] async fn test_deploy_omicron_zones(cptestctx: &ControlPlaneTestContext) { // Set up the test. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_background( cptestctx.logctx.log.clone(), diff --git a/nexus/src/app/background/blueprint_load.rs b/nexus/src/app/background/blueprint_load.rs index 8334abecb56..cda1d07fcb0 100644 --- a/nexus/src/app/background/blueprint_load.rs +++ b/nexus/src/app/background/blueprint_load.rs @@ -233,7 +233,7 @@ mod test { #[nexus_test(server = crate::Server)] async fn test_load_blueprints(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), diff --git a/nexus/src/app/background/common.rs b/nexus/src/app/background/common.rs index 0fbfa9938d0..da595dc4e16 100644 --- a/nexus/src/app/background/common.rs +++ b/nexus/src/app/background/common.rs @@ -533,7 +533,7 @@ mod test { // activated #[nexus_test(server = crate::Server)] async fn test_driver_basic(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), @@ -698,7 +698,7 @@ mod test { // activated. #[nexus_test(server = crate::Server)] async fn test_activation_in_progress(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), @@ -843,7 +843,7 @@ mod test { #[nexus_test(server = crate::Server)] async fn test_saga_request_flow(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), diff --git a/nexus/src/app/background/dns_config.rs b/nexus/src/app/background/dns_config.rs index be18ac36129..71e0a812a76 100644 --- a/nexus/src/app/background/dns_config.rs +++ b/nexus/src/app/background/dns_config.rs @@ -175,7 +175,7 @@ mod test { #[nexus_test(server = crate::Server)] async fn test_basic(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), diff --git a/nexus/src/app/background/dns_propagation.rs b/nexus/src/app/background/dns_propagation.rs index cf7a399999f..7d650f6f275 100644 --- a/nexus/src/app/background/dns_propagation.rs +++ b/nexus/src/app/background/dns_propagation.rs @@ -196,7 +196,7 @@ mod test { #[nexus_test(server = crate::Server)] async fn test_basic(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), diff --git a/nexus/src/app/background/external_endpoints.rs b/nexus/src/app/background/external_endpoints.rs index ed530e0775f..1a587298d5e 100644 --- a/nexus/src/app/background/external_endpoints.rs +++ b/nexus/src/app/background/external_endpoints.rs @@ -131,7 +131,7 @@ mod test { #[nexus_test(server = crate::Server)] async fn test_basic(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), diff --git a/nexus/src/app/background/init.rs b/nexus/src/app/background/init.rs index 9d9a65c23b1..d2f940018d3 100644 --- a/nexus/src/app/background/init.rs +++ b/nexus/src/app/background/init.rs @@ -503,7 +503,7 @@ pub mod test { // the new DNS configuration #[nexus_test(server = crate::Server)] async fn test_dns_propagation_basic(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), diff --git a/nexus/src/app/background/inventory_collection.rs b/nexus/src/app/background/inventory_collection.rs index 7455a14afbe..52ee8f6e133 100644 --- a/nexus/src/app/background/inventory_collection.rs +++ b/nexus/src/app/background/inventory_collection.rs @@ -214,7 +214,7 @@ mod test { // collections, too. #[nexus_test(server = crate::Server)] async fn test_basic(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), @@ -328,7 +328,7 @@ mod test { #[nexus_test(server = crate::Server)] async fn test_db_sled_enumerator(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), diff --git a/nexus/src/app/background/metrics_producer_gc.rs b/nexus/src/app/background/metrics_producer_gc.rs index 7bd2bd6c8c4..a3e5aaab32d 100644 --- a/nexus/src/app/background/metrics_producer_gc.rs +++ b/nexus/src/app/background/metrics_producer_gc.rs @@ -22,33 +22,14 @@ use std::time::Duration; pub struct MetricProducerGc { datastore: Arc, lease_duration: Duration, - disabled: bool, } impl MetricProducerGc { pub fn new(datastore: Arc, lease_duration: Duration) -> Self { - Self { - datastore, - lease_duration, - // TODO We should turn this task on as a part of landing the rest of - // the move to metric producer leases. For now, we leave it disabled - // to avoid pruning producers that don't know to renew leases, but - // make this a boolean so our unit test can enable it. - disabled: true, - } + Self { datastore, lease_duration } } async fn activate(&mut self, opctx: &OpContext) -> serde_json::Value { - if self.disabled { - warn!( - opctx.log, - "Metric producer GC: statically disabled pending omicron#5284" - ); - return json!({ - "error": "metric producer gc disabled (omicron#5284)", - }); - } - let Some(expiration) = TimeDelta::from_std(self.lease_duration) .ok() .and_then(|delta| Utc::now().checked_sub_signed(delta)) @@ -170,7 +151,7 @@ mod tests { #[nexus_test(server = crate::Server)] async fn test_pruning(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), @@ -219,25 +200,13 @@ mod tests { .await .expect("failed to insert producer"); - // Activate the task. It should immediately return because our GC is - // currently statically disabled (remove this check once that is no - // longer true!). + // Create the task and activate it. Technically this is racy in that it + // could prune the producer we just added, but if it's been an hour + // since then, we have bigger problems. This should _not_ prune the + // producer, since it's been active within the last hour. let mut gc = MetricProducerGc::new(datastore.clone(), Duration::from_secs(3600)); let value = gc.activate(&opctx).await; - assert_eq!( - value, - json!({ - "error": "metric producer gc disabled (omicron#5284)", - }) - ); - - // Enable the task and activate it. Technically this is racy, but if - // it's been an hour since we inserted the producer in the previous - // statement, we have bigger problems. This should _not_ prune the - // producer, since it's been active within the last hour. - gc.disabled = false; - let value = gc.activate(&opctx).await; let value = value.as_object().expect("non-object"); assert!(!value.contains_key("failures")); assert!(value.contains_key("expiration")); diff --git a/nexus/src/app/disk.rs b/nexus/src/app/disk.rs index 2286d2f1838..78ae002dd31 100644 --- a/nexus/src/app/disk.rs +++ b/nexus/src/app/disk.rs @@ -488,10 +488,10 @@ impl super::Nexus { // that user's program can act accordingly. In a way, the user's // program is an externally driven saga instead. - let client = crucible_pantry_client::Client::new(&format!( - "http://{}", - endpoint - )); + let client = crucible_pantry_client::Client::new_with_client( + &format!("http://{}", endpoint), + self.reqwest_client.clone(), + ); let request = crucible_pantry_client::types::BulkWriteRequest { offset: param.offset, base64_encoded_data: param.base64_encoded_data, diff --git a/nexus/src/app/external_endpoints.rs b/nexus/src/app/external_endpoints.rs index db87632bbfd..18d2399eb59 100644 --- a/nexus/src/app/external_endpoints.rs +++ b/nexus/src/app/external_endpoints.rs @@ -26,7 +26,7 @@ //! "certificate resolver" object that impls //! [`rustls::server::ResolvesServerCert`]. See [`NexusCertResolver`]. -use crate::ServerContext; +use crate::context::ApiContext; use anyhow::anyhow; use anyhow::bail; use anyhow::Context; @@ -674,7 +674,7 @@ impl super::Nexus { /// case, we'll choose an arbitrary Silo. pub fn endpoint_for_request( &self, - rqctx: &dropshot::RequestContext>, + rqctx: &dropshot::RequestContext, ) -> Result, Error> { let log = &rqctx.log; let rqinfo = &rqctx.request; diff --git a/nexus/src/app/mod.rs b/nexus/src/app/mod.rs index a22fad0c81e..4b77788c965 100644 --- a/nexus/src/app/mod.rs +++ b/nexus/src/app/mod.rs @@ -152,6 +152,13 @@ pub struct Nexus { /// The metric producer server from which oximeter collects metric data. producer_server: std::sync::Mutex>, + /// Reusable `reqwest::Client`, to be cloned and used with the Progenitor- + /// generated `Client::new_with_client`. + /// + /// (This does not need to be in an `Arc` because `reqwest::Client` uses + /// `Arc` internally.) + reqwest_client: reqwest::Client, + /// Client to the timeseries database. timeseries_client: LazyTimeseriesClient, @@ -343,6 +350,12 @@ impl Nexus { } } + let reqwest_client = reqwest::ClientBuilder::new() + .connect_timeout(std::time::Duration::from_secs(15)) + .timeout(std::time::Duration::from_secs(15)) + .build() + .map_err(|e| e.to_string())?; + // Connect to clickhouse - but do so lazily. // Clickhouse may not be executing when Nexus starts. let timeseries_client = if let Some(address) = @@ -412,6 +425,7 @@ impl Nexus { internal_server: std::sync::Mutex::new(None), producer_server: std::sync::Mutex::new(None), populate_status, + reqwest_client, timeseries_client, updates_config: config.pkg.updates.clone(), tunables: config.pkg.tunables.clone(), @@ -520,6 +534,11 @@ impl Nexus { &self.id } + /// Return the rack ID for this Nexus instance. + pub fn rack_id(&self) -> Uuid { + self.rack_id + } + /// Return the tunable configuration parameters, e.g. for use in tests. pub fn tunables(&self) -> &Tunables { &self.tunables @@ -643,6 +662,16 @@ impl Nexus { .map(|server| server.local_addr()) } + pub(crate) async fn get_techport_server_address( + &self, + ) -> Option { + self.techport_external_server + .lock() + .unwrap() + .as_ref() + .map(|server| server.local_addr()) + } + pub(crate) async fn get_internal_server_address( &self, ) -> Option { diff --git a/nexus/src/app/rack.rs b/nexus/src/app/rack.rs index 25c0824ce64..c766446f38f 100644 --- a/nexus/src/app/rack.rs +++ b/nexus/src/app/rack.rs @@ -790,11 +790,11 @@ impl super::Nexus { .await? { SledUnderlayAllocationResult::New(allocation) => allocation, - SledUnderlayAllocationResult::Existing(allocation) => { + SledUnderlayAllocationResult::CommissionedSled(allocation) => { return Err(Error::ObjectAlreadyExists { type_name: ResourceType::Sled, object_name: format!( - "{} / {} ({})", + "{} ({}): {}", sled.serial, sled.part, allocation.sled_id ), }); diff --git a/nexus/src/app/sagas/disk_create.rs b/nexus/src/app/sagas/disk_create.rs index 165bf7573c9..5c4f5bf1eee 100644 --- a/nexus/src/app/sagas/disk_create.rs +++ b/nexus/src/app/sagas/disk_create.rs @@ -882,7 +882,7 @@ pub(crate) mod test { pub fn test_opctx(cptestctx: &ControlPlaneTestContext) -> OpContext { OpContext::for_tests( cptestctx.logctx.log.new(o!()), - cptestctx.server.apictx().nexus.datastore().clone(), + cptestctx.server.server_context().nexus.datastore().clone(), ) } @@ -893,7 +893,7 @@ pub(crate) mod test { DiskTest::new(cptestctx).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_id = create_project(&client, PROJECT_NAME).await.identity.id; @@ -1033,7 +1033,7 @@ pub(crate) mod test { test: &DiskTest, ) { let sled_agent = &cptestctx.sled_agent.sled_agent; - let datastore = cptestctx.server.apictx().nexus.datastore(); + let datastore = cptestctx.server.server_context().nexus.datastore(); crate::app::sagas::test_helpers::assert_no_failed_undo_steps( &cptestctx.logctx.log, @@ -1063,7 +1063,7 @@ pub(crate) mod test { let log = &cptestctx.logctx.log; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_id = create_project(&client, PROJECT_NAME).await.identity.id; let opctx = test_opctx(cptestctx); @@ -1093,7 +1093,7 @@ pub(crate) mod test { let log = &cptestctx.logctx.log; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx.nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_id = create_project(&client, PROJECT_NAME).await.identity.id; let opctx = test_opctx(&cptestctx); @@ -1111,7 +1111,7 @@ pub(crate) mod test { } async fn destroy_disk(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx.nexus; + let nexus = &cptestctx.server.server_context().nexus; let opctx = test_opctx(&cptestctx); let disk_selector = params::DiskSelector { project: Some( @@ -1134,7 +1134,7 @@ pub(crate) mod test { let test = DiskTest::new(cptestctx).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx.nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_id = create_project(&client, PROJECT_NAME).await.identity.id; diff --git a/nexus/src/app/sagas/disk_delete.rs b/nexus/src/app/sagas/disk_delete.rs index 333e6c16725..24cf331a347 100644 --- a/nexus/src/app/sagas/disk_delete.rs +++ b/nexus/src/app/sagas/disk_delete.rs @@ -201,12 +201,12 @@ pub(crate) mod test { pub fn test_opctx(cptestctx: &ControlPlaneTestContext) -> OpContext { OpContext::for_tests( cptestctx.logctx.log.new(o!()), - cptestctx.server.apictx.nexus.datastore().clone(), + cptestctx.server.server_context().nexus.datastore().clone(), ) } async fn create_disk(cptestctx: &ControlPlaneTestContext) -> Disk { - let nexus = &cptestctx.server.apictx.nexus; + let nexus = &cptestctx.server.server_context().nexus; let opctx = test_opctx(&cptestctx); let project_selector = params::ProjectSelector { @@ -232,7 +232,7 @@ pub(crate) mod test { DiskTest::new(cptestctx).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx.nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_id = create_project(client, PROJECT_NAME).await.identity.id; let disk = create_disk(&cptestctx).await; @@ -258,7 +258,7 @@ pub(crate) mod test { let test = DiskTest::new(cptestctx).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx.nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_id = create_project(client, PROJECT_NAME).await.identity.id; let disk = create_disk(&cptestctx).await; diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index 73fe910c764..a6df7183d19 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -1137,7 +1137,7 @@ pub mod test { ) { DiskTest::new(cptestctx).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_id = create_org_project_and_disk(&client).await; // Build the saga DAG with the provided test parameters @@ -1264,7 +1264,7 @@ pub mod test { cptestctx: &ControlPlaneTestContext, ) { let sled_agent = &cptestctx.sled_agent.sled_agent; - let datastore = cptestctx.server.apictx().nexus.datastore(); + let datastore = cptestctx.server.server_context().nexus.datastore(); // Check that no partial artifacts of instance creation exist assert!(no_instance_records_exist(datastore).await); @@ -1300,7 +1300,7 @@ pub mod test { let log = &cptestctx.logctx.log; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_id = create_org_project_and_disk(&client).await; // Build the saga DAG with the provided test parameters @@ -1329,7 +1329,7 @@ pub mod test { let log = &cptestctx.logctx.log; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_id = create_org_project_and_disk(&client).await; let opctx = test_helpers::test_opctx(&cptestctx); @@ -1353,7 +1353,7 @@ pub mod test { DiskTest::new(cptestctx).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_id = create_org_project_and_disk(&client).await; // Build the saga DAG with the provided test parameters diff --git a/nexus/src/app/sagas/instance_delete.rs b/nexus/src/app/sagas/instance_delete.rs index 0e253913b0d..d93c1455adc 100644 --- a/nexus/src/app/sagas/instance_delete.rs +++ b/nexus/src/app/sagas/instance_delete.rs @@ -210,7 +210,7 @@ mod test { instance_id: Uuid, ) -> Params { let opctx = test_opctx(&cptestctx); - let datastore = cptestctx.server.apictx().nexus.datastore(); + let datastore = cptestctx.server.server_context().nexus.datastore(); let (.., authz_instance, instance) = LookupPath::new(&opctx, &datastore) @@ -253,7 +253,7 @@ mod test { pub fn test_opctx(cptestctx: &ControlPlaneTestContext) -> OpContext { OpContext::for_tests( cptestctx.logctx.log.new(o!()), - cptestctx.server.apictx().nexus.datastore().clone(), + cptestctx.server.server_context().nexus.datastore().clone(), ) } @@ -263,7 +263,7 @@ mod test { ) { DiskTest::new(cptestctx).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; create_org_project_and_disk(&client).await; // Build the saga DAG with the provided test parameters @@ -290,7 +290,7 @@ mod test { cptestctx: &ControlPlaneTestContext, params: params::InstanceCreate, ) -> db::model::Instance { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let opctx = test_opctx(&cptestctx); let project_selector = params::ProjectSelector { @@ -304,7 +304,8 @@ mod test { .await .unwrap(); - let datastore = cptestctx.server.apictx().nexus.datastore().clone(); + let datastore = + cptestctx.server.server_context().nexus.datastore().clone(); let (.., db_instance) = LookupPath::new(&opctx, &datastore) .instance_id(instance_state.instance().id()) .fetch() @@ -321,7 +322,7 @@ mod test { DiskTest::new(cptestctx).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; create_org_project_and_disk(&client).await; // Build the saga DAG with the provided test parameters diff --git a/nexus/src/app/sagas/instance_ip_attach.rs b/nexus/src/app/sagas/instance_ip_attach.rs index 3cd6ac1c468..3332b712747 100644 --- a/nexus/src/app/sagas/instance_ip_attach.rs +++ b/nexus/src/app/sagas/instance_ip_attach.rs @@ -410,7 +410,7 @@ pub(crate) mod test { cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let sled_agent = &cptestctx.sled_agent.sled_agent; @@ -460,7 +460,7 @@ pub(crate) mod test { use nexus_db_queries::db::schema::external_ip::dsl; let sled_agent = &cptestctx.sled_agent.sled_agent; - let datastore = cptestctx.server.apictx().nexus.datastore(); + let datastore = cptestctx.server.server_context().nexus.datastore(); let conn = datastore.pool_connection_for_tests().await.unwrap(); @@ -500,7 +500,7 @@ pub(crate) mod test { ) { let log = &cptestctx.logctx.log; let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let opctx = test_helpers::test_opctx(cptestctx); @@ -526,7 +526,7 @@ pub(crate) mod test { ) { let log = &cptestctx.logctx.log; let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let opctx = test_helpers::test_opctx(cptestctx); @@ -555,7 +555,7 @@ pub(crate) mod test { cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let opctx = test_helpers::test_opctx(cptestctx); diff --git a/nexus/src/app/sagas/instance_ip_detach.rs b/nexus/src/app/sagas/instance_ip_detach.rs index 7a718243765..2f1d76c8535 100644 --- a/nexus/src/app/sagas/instance_ip_detach.rs +++ b/nexus/src/app/sagas/instance_ip_detach.rs @@ -381,7 +381,7 @@ pub(crate) mod test { cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let sled_agent = &cptestctx.sled_agent.sled_agent; @@ -425,7 +425,7 @@ pub(crate) mod test { let opctx = test_helpers::test_opctx(cptestctx); let sled_agent = &cptestctx.sled_agent.sled_agent; - let datastore = cptestctx.server.apictx().nexus.datastore(); + let datastore = cptestctx.server.server_context().nexus.datastore(); let conn = datastore.pool_connection_for_tests().await.unwrap(); @@ -475,7 +475,7 @@ pub(crate) mod test { ) { let log = &cptestctx.logctx.log; let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let opctx = test_helpers::test_opctx(cptestctx); @@ -503,7 +503,7 @@ pub(crate) mod test { ) { let log = &cptestctx.logctx.log; let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let opctx = test_helpers::test_opctx(cptestctx); @@ -534,7 +534,7 @@ pub(crate) mod test { cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let opctx = test_helpers::test_opctx(cptestctx); diff --git a/nexus/src/app/sagas/instance_migrate.rs b/nexus/src/app/sagas/instance_migrate.rs index a727debbeab..1cfd170faf2 100644 --- a/nexus/src/app/sagas/instance_migrate.rs +++ b/nexus/src/app/sagas/instance_migrate.rs @@ -614,7 +614,7 @@ mod tests { ) { let other_sleds = add_sleds(cptestctx, 1).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let _project_id = setup_test_project(&client).await; let opctx = test_helpers::test_opctx(cptestctx); @@ -658,7 +658,7 @@ mod tests { let log = &cptestctx.logctx.log; let other_sleds = add_sleds(cptestctx, 1).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let _project_id = setup_test_project(&client).await; let opctx = test_helpers::test_opctx(cptestctx); diff --git a/nexus/src/app/sagas/instance_start.rs b/nexus/src/app/sagas/instance_start.rs index 55c00d87072..b76bc2e37d3 100644 --- a/nexus/src/app/sagas/instance_start.rs +++ b/nexus/src/app/sagas/instance_start.rs @@ -767,7 +767,7 @@ mod test { cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let _project_id = setup_test_project(&client).await; let opctx = test_helpers::test_opctx(cptestctx); let instance = create_instance(client).await; @@ -806,7 +806,7 @@ mod test { ) { let log = &cptestctx.logctx.log; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let _project_id = setup_test_project(&client).await; let opctx = test_helpers::test_opctx(cptestctx); let instance = create_instance(client).await; @@ -868,7 +868,7 @@ mod test { cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let _project_id = setup_test_project(&client).await; let opctx = test_helpers::test_opctx(cptestctx); let instance = create_instance(client).await; @@ -910,7 +910,7 @@ mod test { #[nexus_test(server = crate::Server)] async fn test_ensure_running_unwind(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let _project_id = setup_test_project(&client).await; let opctx = test_helpers::test_opctx(cptestctx); let instance = create_instance(client).await; diff --git a/nexus/src/app/sagas/project_create.rs b/nexus/src/app/sagas/project_create.rs index b31dd821f03..68935905196 100644 --- a/nexus/src/app/sagas/project_create.rs +++ b/nexus/src/app/sagas/project_create.rs @@ -188,7 +188,7 @@ mod test { fn test_opctx(cptestctx: &ControlPlaneTestContext) -> OpContext { OpContext::for_tests( cptestctx.logctx.log.new(o!()), - cptestctx.server.apictx().nexus.datastore().clone(), + cptestctx.server.server_context().nexus.datastore().clone(), ) } @@ -258,7 +258,7 @@ mod test { async fn test_saga_basic_usage_succeeds( cptestctx: &ControlPlaneTestContext, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; // Before running the test, confirm we have no records of any projects. verify_clean_slate(nexus.datastore()).await; @@ -279,7 +279,7 @@ mod test { cptestctx: &ControlPlaneTestContext, ) { let log = &cptestctx.logctx.log; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let opctx = test_opctx(&cptestctx); crate::app::sagas::test_helpers::action_failure_can_unwind::< SagaProjectCreate, diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index ff57470a5f5..53e06e310d7 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -1876,7 +1876,7 @@ mod test { pub fn test_opctx(cptestctx: &ControlPlaneTestContext) -> OpContext { OpContext::for_tests( cptestctx.logctx.log.new(o!()), - cptestctx.server.apictx().nexus.datastore().clone(), + cptestctx.server.server_context().nexus.datastore().clone(), ) } @@ -1889,7 +1889,7 @@ mod test { DiskTest::new(cptestctx).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let disk_id = create_project_and_disk_and_pool(&client).await; // Build the saga DAG with the provided test parameters @@ -1976,7 +1976,7 @@ mod test { // Verifies: // - No snapshot records exist // - No region snapshot records exist - let datastore = cptestctx.server.apictx().nexus.datastore(); + let datastore = cptestctx.server.server_context().nexus.datastore(); assert!(no_snapshot_records_exist(datastore).await); assert!(no_region_snapshot_records_exist(datastore).await); } @@ -2016,7 +2016,7 @@ mod test { // Read out the instance's assigned sled, then poke the instance to get // it from the Starting state to the Running state so the test disk can // be snapshotted. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let opctx = test_opctx(&cptestctx); let (.., authz_instance) = LookupPath::new(&opctx, nexus.datastore()) .instance_id(instance.identity.id) @@ -2080,7 +2080,7 @@ mod test { let log = &cptestctx.logctx.log; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let disk_id = create_project_and_disk_and_pool(&client).await; // Build the saga DAG with the provided test parameters @@ -2219,7 +2219,7 @@ mod test { DiskTest::new(cptestctx).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let disk_id = create_project_and_disk_and_pool(&client).await; // Build the saga DAG with the provided test parameters @@ -2324,7 +2324,7 @@ mod test { DiskTest::new(cptestctx).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let disk_id = create_project_and_disk_and_pool(&client).await; // Build the saga DAG with the provided test parameters diff --git a/nexus/src/app/sagas/test_helpers.rs b/nexus/src/app/sagas/test_helpers.rs index 1b383d27bb3..bacd0f1c9d7 100644 --- a/nexus/src/app/sagas/test_helpers.rs +++ b/nexus/src/app/sagas/test_helpers.rs @@ -34,7 +34,7 @@ type ControlPlaneTestContext = pub fn test_opctx(cptestctx: &ControlPlaneTestContext) -> OpContext { OpContext::for_tests( cptestctx.logctx.log.new(o!()), - cptestctx.server.apictx().nexus.datastore().clone(), + cptestctx.server.server_context().nexus.datastore().clone(), ) } @@ -42,7 +42,7 @@ pub(crate) async fn instance_start( cptestctx: &ControlPlaneTestContext, id: &Uuid, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let opctx = test_opctx(&cptestctx); let instance_selector = nexus_types::external_api::params::InstanceSelector { @@ -62,7 +62,7 @@ pub(crate) async fn instance_stop( cptestctx: &ControlPlaneTestContext, id: &Uuid, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let opctx = test_opctx(&cptestctx); let instance_selector = nexus_types::external_api::params::InstanceSelector { @@ -83,7 +83,7 @@ pub(crate) async fn instance_stop_by_name( name: &str, project_name: &str, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let opctx = test_opctx(&cptestctx); let instance_selector = nexus_types::external_api::params::InstanceSelector { @@ -104,7 +104,7 @@ pub(crate) async fn instance_delete_by_name( name: &str, project_name: &str, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let opctx = test_opctx(&cptestctx); let instance_selector = nexus_types::external_api::params::InstanceSelector { @@ -126,7 +126,7 @@ pub(crate) async fn instance_simulate( ) { info!(&cptestctx.logctx.log, "Poking simulated instance"; "instance_id" => %instance_id); - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let sa = nexus .instance_sled_by_id(instance_id) .await @@ -145,7 +145,7 @@ pub(crate) async fn instance_simulate_by_name( "instance_name" => %name, "project_name" => %project_name); - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let opctx = test_opctx(&cptestctx); let instance_selector = nexus_types::external_api::params::InstanceSelector { @@ -168,7 +168,7 @@ pub async fn instance_fetch( cptestctx: &ControlPlaneTestContext, instance_id: Uuid, ) -> InstanceAndActiveVmm { - let datastore = cptestctx.server.apictx().nexus.datastore().clone(); + let datastore = cptestctx.server.server_context().nexus.datastore().clone(); let opctx = test_opctx(&cptestctx); let (.., authz_instance) = LookupPath::new(&opctx, &datastore) .instance_id(instance_id) @@ -194,7 +194,7 @@ pub async fn no_virtual_provisioning_resource_records_exist( use nexus_db_queries::db::model::VirtualProvisioningResource; use nexus_db_queries::db::schema::virtual_provisioning_resource::dsl; - let datastore = cptestctx.server.apictx().nexus.datastore().clone(); + let datastore = cptestctx.server.server_context().nexus.datastore().clone(); let conn = datastore.pool_connection_for_tests().await.unwrap(); datastore @@ -223,7 +223,7 @@ pub async fn no_virtual_provisioning_collection_records_using_instances( use nexus_db_queries::db::model::VirtualProvisioningCollection; use nexus_db_queries::db::schema::virtual_provisioning_collection::dsl; - let datastore = cptestctx.server.apictx().nexus.datastore().clone(); + let datastore = cptestctx.server.server_context().nexus.datastore().clone(); let conn = datastore.pool_connection_for_tests().await.unwrap(); datastore diff --git a/nexus/src/app/sagas/test_saga.rs b/nexus/src/app/sagas/test_saga.rs index 0520a17602f..9ccdc4aebc1 100644 --- a/nexus/src/app/sagas/test_saga.rs +++ b/nexus/src/app/sagas/test_saga.rs @@ -75,7 +75,7 @@ type ControlPlaneTestContext = #[nexus_test(server = crate::Server)] async fn test_saga_stuck(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let params = Params {}; let dag = create_saga_dag::(params).unwrap(); let runnable_saga = nexus.create_runnable_saga(dag.clone()).await.unwrap(); diff --git a/nexus/src/app/sagas/vpc_create.rs b/nexus/src/app/sagas/vpc_create.rs index 9f5f94c53c6..cc62d9315dc 100644 --- a/nexus/src/app/sagas/vpc_create.rs +++ b/nexus/src/app/sagas/vpc_create.rs @@ -543,7 +543,7 @@ pub(crate) mod test { fn test_opctx(cptestctx: &ControlPlaneTestContext) -> OpContext { OpContext::for_tests( cptestctx.logctx.log.new(o!()), - cptestctx.server.apictx().nexus.datastore().clone(), + cptestctx.server.server_context().nexus.datastore().clone(), ) } @@ -552,7 +552,7 @@ pub(crate) mod test { project_id: Uuid, action: authz::Action, ) -> authz::Project { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_selector = params::ProjectSelector { project: NameOrId::Id(project_id) }; let opctx = test_opctx(&cptestctx); @@ -570,7 +570,7 @@ pub(crate) mod test { project_id: Uuid, ) { let opctx = test_opctx(&cptestctx); - let datastore = cptestctx.server.apictx().nexus.datastore(); + let datastore = cptestctx.server.server_context().nexus.datastore(); let default_name = Name::try_from("default".to_string()).unwrap(); let system_name = Name::try_from("system".to_string()).unwrap(); @@ -757,7 +757,7 @@ pub(crate) mod test { cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_id = create_org_and_project(&client).await; delete_project_vpc_defaults(&cptestctx, project_id).await; @@ -787,7 +787,7 @@ pub(crate) mod test { let log = &cptestctx.logctx.log; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_id = create_org_and_project(&client).await; delete_project_vpc_defaults(&cptestctx, project_id).await; diff --git a/nexus/src/context.rs b/nexus/src/context.rs index cf2b9d6f173..72ecd6b8ac2 100644 --- a/nexus/src/context.rs +++ b/nexus/src/context.rs @@ -29,6 +29,61 @@ use std::str::FromStr; use std::sync::Arc; use uuid::Uuid; +/// Indicates the kind of HTTP server. +#[derive(Clone, Copy)] +pub enum ServerKind { + /// This serves the internal API. + Internal, + /// This serves the external API over the normal public network. + External, + /// This serves the external API proxied over the technician port. + Techport, +} + +/// The API context for each distinct Dropshot server. +/// +/// This packages up the main server context, which is shared by all API servers +/// (e.g., internal, external, and techport). It also includes the +/// [`ServerKind`], which makes it possible to know which server is handling any +/// particular request. +#[derive(Clone)] +pub struct ApiContext { + /// The kind of server. + pub kind: ServerKind, + /// Shared state available to all endpoint handlers. + pub context: Arc, +} + +impl ApiContext { + /// Create a new context with a rack ID and logger. This creates the + /// underlying `Nexus` as well. + pub async fn for_internal( + rack_id: Uuid, + log: Logger, + config: &NexusConfig, + ) -> Result { + ServerContext::new(rack_id, log, config) + .await + .map(|context| Self { kind: ServerKind::Internal, context }) + } + + /// Clone self for use by the external Dropshot server. + pub fn for_external(&self) -> Self { + Self { kind: ServerKind::External, context: self.context.clone() } + } + + /// Clone self for use by the techport Dropshot server. + pub fn for_techport(&self) -> Self { + Self { kind: ServerKind::Techport, context: self.context.clone() } + } +} + +impl std::borrow::Borrow for ApiContext { + fn borrow(&self) -> &ServerContext { + &self.context + } +} + /// Shared state available to all API request handlers pub struct ServerContext { /// reference to the underlying nexus @@ -262,18 +317,19 @@ impl ServerContext { /// Authenticates an incoming request to the external API and produces a new /// operation context for it pub(crate) async fn op_context_for_external_api( - rqctx: &dropshot::RequestContext>, + rqctx: &dropshot::RequestContext, ) -> Result { let apictx = rqctx.context(); OpContext::new_async( &rqctx.log, async { - let authn = - Arc::new(apictx.external_authn.authn_request(rqctx).await?); - let datastore = Arc::clone(apictx.nexus.datastore()); + let authn = Arc::new( + apictx.context.external_authn.authn_request(rqctx).await?, + ); + let datastore = Arc::clone(apictx.context.nexus.datastore()); let authz = authz::Context::new( Arc::clone(&authn), - Arc::clone(&apictx.authz), + Arc::clone(&apictx.context.authz), datastore, ); Ok((authn, authz)) @@ -285,17 +341,17 @@ pub(crate) async fn op_context_for_external_api( } pub(crate) async fn op_context_for_internal_api( - rqctx: &dropshot::RequestContext>, + rqctx: &dropshot::RequestContext, ) -> OpContext { - let apictx = rqctx.context(); + let apictx = &rqctx.context(); OpContext::new_async( &rqctx.log, async { - let authn = Arc::clone(&apictx.internal_authn); - let datastore = Arc::clone(apictx.nexus.datastore()); + let authn = Arc::clone(&apictx.context.internal_authn); + let datastore = Arc::clone(apictx.context.nexus.datastore()); let authz = authz::Context::new( Arc::clone(&authn), - Arc::clone(&apictx.authz), + Arc::clone(&apictx.context.authz), datastore, ); Ok::<_, std::convert::Infallible>((authn, authz)) diff --git a/nexus/src/external_api/console_api.rs b/nexus/src/external_api/console_api.rs index d49e7f3be42..caff195047f 100644 --- a/nexus/src/external_api/console_api.rs +++ b/nexus/src/external_api/console_api.rs @@ -21,7 +21,7 @@ // toolchain; we can remove this attribute then. #![allow(clippy::declare_interior_mutable_const)] -use crate::ServerContext; +use crate::context::ApiContext; use anyhow::Context; use camino::{Utf8Path, Utf8PathBuf}; use dropshot::{ @@ -56,7 +56,6 @@ use serde_urlencoded; use std::collections::HashMap; use std::num::NonZeroU32; use std::str::FromStr; -use std::sync::Arc; use tokio::fs::File; use tokio_util::codec::{BytesCodec, FramedRead}; @@ -239,7 +238,7 @@ impl RelayState { unpublished = true, }] pub(crate) async fn login_saml_begin( - rqctx: RequestContext>, + rqctx: RequestContext, _path_params: Path, _query_params: Query, ) -> Result, HttpError> { @@ -258,13 +257,13 @@ pub(crate) async fn login_saml_begin( unpublished = true, }] pub(crate) async fn login_saml_redirect( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path_params = path_params.into_inner(); // Use opctx_external_authn because this request will be @@ -303,7 +302,11 @@ pub(crate) async fn login_saml_redirect( } }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Authenticate a user via SAML @@ -313,13 +316,13 @@ pub(crate) async fn login_saml_redirect( tags = ["login"], }] pub(crate) async fn login_saml( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, body_bytes: dropshot::UntypedBody, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path_params = path_params.into_inner(); // By definition, this request is not authenticated. These operations @@ -379,14 +382,18 @@ pub(crate) async fn login_saml( // use absolute timeout even though session might idle out first. // browser expiration is mostly for convenience, as the API will // reject requests with an expired session regardless - apictx.session_absolute_timeout(), - apictx.external_tls_enabled, + apictx.context.session_absolute_timeout(), + apictx.context.external_tls_enabled, )?; headers.append(header::SET_COOKIE, cookie); } Ok(response) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } #[derive(Deserialize, JsonSchema)] @@ -401,14 +408,14 @@ pub struct LoginPathParam { unpublished = true, }] pub(crate) async fn login_local_begin( - rqctx: RequestContext>, + rqctx: RequestContext, _path_params: Path, _query_params: Query, ) -> Result, HttpError> { // TODO: figure out why instrumenting doesn't work // let apictx = rqctx.context(); // let handler = async { serve_console_index(rqctx.context()).await }; - // apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + // apictx.context.external_latencies.instrument_dropshot_handler(&rqctx, handler).await serve_console_index(rqctx).await } @@ -419,13 +426,13 @@ pub(crate) async fn login_local_begin( tags = ["login"], }] pub(crate) async fn login_local( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, credentials: dropshot::TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let credentials = credentials.into_inner(); let silo = path.silo_name.into(); @@ -448,22 +455,26 @@ pub(crate) async fn login_local( // use absolute timeout even though session might idle out first. // browser expiration is mostly for convenience, as the API will // reject requests with an expired session regardless - apictx.session_absolute_timeout(), - apictx.external_tls_enabled, + apictx.context.session_absolute_timeout(), + apictx.context.external_tls_enabled, )?; headers.append(header::SET_COOKIE, cookie); } Ok(response) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } async fn create_session( opctx: &OpContext, - apictx: &ServerContext, + apictx: &ApiContext, user: Option, ) -> Result { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let session = match user { Some(user) => nexus.session_create(&opctx, user.id()).await?, None => Err(Error::Unauthenticated { @@ -483,12 +494,12 @@ async fn create_session( tags = ["hidden"], }] pub(crate) async fn logout( - rqctx: RequestContext>, + rqctx: RequestContext, cookies: Cookies, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await; let token = cookies.get(SESSION_COOKIE_COOKIE_NAME); @@ -513,14 +524,20 @@ pub(crate) async fn logout( let headers = response.headers_mut(); headers.append( header::SET_COOKIE, - clear_session_cookie_header_value(apictx.external_tls_enabled)?, + clear_session_cookie_header_value( + apictx.context.external_tls_enabled, + )?, ); }; Ok(response) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } #[derive(Deserialize, JsonSchema)] @@ -574,10 +591,10 @@ pub struct LoginUrlQuery { /// `redirect_uri` represents the URL to send the user back to after successful /// login, and is included in `state` query param if present async fn get_login_url( - rqctx: &RequestContext>, + rqctx: &RequestContext, redirect_uri: Option, ) -> Result { - let nexus = &rqctx.context().nexus; + let nexus = &rqctx.context().context.nexus; let endpoint = nexus.endpoint_for_request(rqctx)?; let silo = endpoint.silo(); @@ -643,7 +660,7 @@ async fn get_login_url( unpublished = true, }] pub(crate) async fn login_begin( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result { let apictx = rqctx.context(); @@ -652,11 +669,15 @@ pub(crate) async fn login_begin( let login_url = get_login_url(&rqctx, query.redirect_uri).await?; http_response_found(login_url) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } pub(crate) async fn console_index_or_login_redirect( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result, HttpError> { let opctx = crate::context::op_context_for_external_api(&rqctx).await; @@ -692,7 +713,7 @@ macro_rules! console_page { ($name:ident, $path:literal) => { #[endpoint { method = GET, path = $path, unpublished = true, }] pub(crate) async fn $name( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result, HttpError> { console_index_or_login_redirect(rqctx).await } @@ -704,7 +725,7 @@ macro_rules! console_page_wildcard { ($name:ident, $path:literal) => { #[endpoint { method = GET, path = $path, unpublished = true, }] pub(crate) async fn $name( - rqctx: RequestContext>, + rqctx: RequestContext, _path_params: Path, ) -> Result, HttpError> { console_index_or_login_redirect(rqctx).await @@ -784,12 +805,13 @@ const WEB_SECURITY_HEADERS: [(HeaderName, HeaderValue); 3] = [ /// file is present in the directory and `gzip` is listed in the request's /// `Accept-Encoding` header. async fn serve_static( - rqctx: RequestContext>, + rqctx: RequestContext, path: &Utf8Path, cache_control: HeaderValue, ) -> Result, HttpError> { let apictx = rqctx.context(); let static_dir = apictx + .context .console_config .static_dir .as_deref() @@ -854,7 +876,7 @@ async fn serve_static( unpublished = true, }] pub(crate) async fn asset( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { // asset URLs contain hashes, so cache for 1 year @@ -868,7 +890,7 @@ pub(crate) async fn asset( /// Serve `/index.html` via [`serve_static`]. Disallow caching. pub(crate) async fn serve_console_index( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result, HttpError> { // do not cache this response in browser const CACHE_CONTROL: HeaderValue = HeaderValue::from_static("no-store"); diff --git a/nexus/src/external_api/device_auth.rs b/nexus/src/external_api/device_auth.rs index 1697722f6f6..2aa1965e791 100644 --- a/nexus/src/external_api/device_auth.rs +++ b/nexus/src/external_api/device_auth.rs @@ -12,7 +12,7 @@ use super::console_api::console_index_or_login_redirect; use super::views::DeviceAccessTokenGrant; use crate::app::external_endpoints::authority_for_request; -use crate::ServerContext; +use crate::ApiContext; use dropshot::{ endpoint, HttpError, HttpResponseUpdatedNoContent, RequestContext, TypedBody, @@ -23,7 +23,6 @@ use nexus_db_queries::db::model::DeviceAccessToken; use omicron_common::api::external::InternalContext; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; -use std::sync::Arc; use uuid::Uuid; // Token granting à la RFC 8628 (OAuth 2.0 Device Authorization Grant) @@ -64,11 +63,11 @@ pub struct DeviceAuthRequest { tags = ["hidden"], // "token" }] pub(crate) async fn device_auth_request( - rqctx: RequestContext>, + rqctx: RequestContext, params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let params = params.into_inner(); let handler = async { let opctx = nexus.opctx_external_authn(); @@ -116,7 +115,7 @@ pub struct DeviceAuthVerify { unpublished = true, }] pub(crate) async fn device_auth_verify( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result, HttpError> { console_index_or_login_redirect(rqctx).await } @@ -127,7 +126,7 @@ pub(crate) async fn device_auth_verify( unpublished = true, }] pub(crate) async fn device_auth_success( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result, HttpError> { console_index_or_login_redirect(rqctx).await } @@ -143,11 +142,11 @@ pub(crate) async fn device_auth_success( tags = ["hidden"], // "token" }] pub(crate) async fn device_auth_confirm( - rqctx: RequestContext>, + rqctx: RequestContext, params: TypedBody, ) -> Result { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let params = params.into_inner(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -163,7 +162,11 @@ pub(crate) async fn device_auth_confirm( .await?; Ok(HttpResponseUpdatedNoContent()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] @@ -192,11 +195,11 @@ pub enum DeviceAccessTokenResponse { tags = ["hidden"], // "token" }] pub(crate) async fn device_access_token( - rqctx: RequestContext>, + rqctx: RequestContext, params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let params = params.into_inner(); let handler = async { // RFC 8628 §3.4 diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index d3cbcd432a4..2678768b482 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -13,8 +13,7 @@ use super::{ Utilization, Vpc, VpcRouter, VpcSubnet, }, }; -use crate::external_api::shared; -use crate::ServerContext; +use crate::{context::ApiContext, external_api::shared}; use dropshot::HttpError; use dropshot::HttpResponseAccepted; use dropshot::HttpResponseCreated; @@ -95,10 +94,9 @@ use schemars::JsonSchema; use serde::Deserialize; use serde::Serialize; use std::net::IpAddr; -use std::sync::Arc; use uuid::Uuid; -type NexusApiDescription = ApiDescription>; +type NexusApiDescription = ApiDescription; /// Returns a description of the external nexus API pub(crate) fn external_api() -> NexusApiDescription { @@ -372,9 +370,9 @@ pub(crate) fn external_api() -> NexusApiDescription { endpoint: T, ) -> Result<(), String> where - T: Into>>, + T: Into>, { - let mut ep: ApiEndpoint> = endpoint.into(); + let mut ep: ApiEndpoint = endpoint.into(); // only one tag is allowed ep.tags = vec![String::from("hidden")]; ep.path = String::from("/experimental") + &ep.path; @@ -450,7 +448,7 @@ pub(crate) fn external_api() -> NexusApiDescription { tags = ["system/status"], }] async fn ping( - _rqctx: RequestContext>, + _rqctx: RequestContext, ) -> Result, HttpError> { Ok(HttpResponseOk(views::Ping { status: views::PingStatus::Ok })) } @@ -462,16 +460,20 @@ async fn ping( tags = ["policy"], }] async fn system_policy_view( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let policy = nexus.fleet_fetch_policy(&opctx).await?; Ok(HttpResponseOk(policy)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update top-level IAM policy @@ -481,12 +483,12 @@ async fn system_policy_view( tags = ["policy"], }] async fn system_policy_update( - rqctx: RequestContext>, + rqctx: RequestContext, new_policy: TypedBody>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let new_policy = new_policy.into_inner(); let nasgns = new_policy.role_assignments.len(); // This should have been validated during parsing. @@ -495,7 +497,11 @@ async fn system_policy_update( let policy = nexus.fleet_update_policy(&opctx, &new_policy).await?; Ok(HttpResponseOk(policy)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch current silo's IAM policy @@ -505,11 +511,11 @@ async fn system_policy_update( tags = ["silos"], }] pub(crate) async fn policy_view( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let silo: NameOrId = opctx .authn @@ -522,7 +528,11 @@ pub(crate) async fn policy_view( let policy = nexus.silo_fetch_policy(&opctx, &silo_lookup).await?; Ok(HttpResponseOk(policy)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update current silo's IAM policy @@ -532,12 +542,12 @@ pub(crate) async fn policy_view( tags = ["silos"], }] async fn policy_update( - rqctx: RequestContext>, + rqctx: RequestContext, new_policy: TypedBody>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let new_policy = new_policy.into_inner(); let nasgns = new_policy.role_assignments.len(); // This should have been validated during parsing. @@ -554,7 +564,11 @@ async fn policy_update( nexus.silo_update_policy(&opctx, &silo_lookup, &new_policy).await?; Ok(HttpResponseOk(policy)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch resource utilization for user's current silo @@ -564,11 +578,11 @@ async fn policy_update( tags = ["silos"], }] async fn utilization_view( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let silo_lookup = nexus.current_silo_lookup(&opctx)?; let utilization = @@ -576,7 +590,11 @@ async fn utilization_view( Ok(HttpResponseOk(utilization.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch current utilization for given silo @@ -586,12 +604,12 @@ async fn utilization_view( tags = ["system/silos"], }] async fn silo_utilization_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let silo_lookup = @@ -600,7 +618,11 @@ async fn silo_utilization_view( Ok(HttpResponseOk(quotas.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List current utilization state for all silos #[endpoint { @@ -609,12 +631,12 @@ async fn silo_utilization_view( tags = ["system/silos"], }] async fn silo_utilization_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pagparams = data_page_params_for(&rqctx, &query)?; @@ -635,7 +657,11 @@ async fn silo_utilization_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Lists resource quotas for all silos @@ -645,12 +671,12 @@ async fn silo_utilization_list( tags = ["system/silos"], }] async fn system_quotas_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pagparams = data_page_params_for(&rqctx, &query)?; @@ -669,7 +695,11 @@ async fn system_quotas_list( &|_, quota: &SiloQuotas| quota.silo_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch resource quotas for silo @@ -679,12 +709,12 @@ async fn system_quotas_list( tags = ["system/silos"], }] async fn silo_quotas_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let silo_lookup = @@ -692,7 +722,11 @@ async fn silo_quotas_view( let quota = nexus.silo_quotas_view(&opctx, &silo_lookup).await?; Ok(HttpResponseOk(quota.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update resource quotas for silo @@ -704,13 +738,13 @@ async fn silo_quotas_view( tags = ["system/silos"], }] async fn silo_quotas_update( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, new_quota: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let silo_lookup = @@ -720,7 +754,11 @@ async fn silo_quotas_update( .await?; Ok(HttpResponseOk(quota.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List silos @@ -732,12 +770,12 @@ async fn silo_quotas_update( tags = ["system/silos"], }] async fn silo_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -755,7 +793,11 @@ async fn silo_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create a silo @@ -765,18 +807,22 @@ async fn silo_list( tags = ["system/silos"], }] async fn silo_create( - rqctx: RequestContext>, + rqctx: RequestContext, new_silo_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let silo = nexus.silo_create(&opctx, new_silo_params.into_inner()).await?; Ok(HttpResponseCreated(silo.try_into()?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch silo @@ -788,19 +834,23 @@ async fn silo_create( tags = ["system/silos"], }] async fn silo_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let silo_lookup = nexus.silo_lookup(&opctx, path.silo)?; let (.., silo) = silo_lookup.fetch().await?; Ok(HttpResponseOk(silo.try_into()?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List IP pools linked to silo @@ -814,14 +864,14 @@ async fn silo_view( tags = ["system/silos"], }] async fn silo_ip_pool_list( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); @@ -846,7 +896,11 @@ async fn silo_ip_pool_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete a silo @@ -858,19 +912,23 @@ async fn silo_ip_pool_list( tags = ["system/silos"], }] async fn silo_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let params = path_params.into_inner(); let silo_lookup = nexus.silo_lookup(&opctx, params.silo)?; nexus.silo_delete(&opctx, &silo_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch silo IAM policy @@ -880,19 +938,23 @@ async fn silo_delete( tags = ["system/silos"], }] async fn silo_policy_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let silo_lookup = nexus.silo_lookup(&opctx, path.silo)?; let policy = nexus.silo_fetch_policy(&opctx, &silo_lookup).await?; Ok(HttpResponseOk(policy)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update silo IAM policy @@ -902,7 +964,7 @@ async fn silo_policy_view( tags = ["system/silos"], }] async fn silo_policy_update( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, new_policy: TypedBody>, ) -> Result>, HttpError> { @@ -913,14 +975,18 @@ async fn silo_policy_update( // This should have been validated during parsing. bail_unless!(nasgns <= shared::MAX_ROLE_ASSIGNMENTS_PER_RESOURCE); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let silo_lookup = nexus.silo_lookup(&opctx, path.silo)?; let policy = nexus.silo_update_policy(&opctx, &silo_lookup, &new_policy).await?; Ok(HttpResponseOk(policy)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Silo-specific user endpoints @@ -932,13 +998,13 @@ async fn silo_policy_update( tags = ["system/silos"], }] async fn silo_user_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanById::from_query(&query)?; @@ -956,7 +1022,11 @@ async fn silo_user_list( &|_, user: &User| user.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Path parameters for Silo User requests @@ -973,14 +1043,14 @@ struct UserParam { tags = ["system/silos"], }] async fn silo_user_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let silo_lookup = nexus.silo_lookup(&opctx, query.silo)?; @@ -988,7 +1058,11 @@ async fn silo_user_view( nexus.silo_user_fetch(&opctx, &silo_lookup, path.user_id).await?; Ok(HttpResponseOk(user.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Silo identity providers @@ -1000,13 +1074,13 @@ async fn silo_user_view( tags = ["system/silos"], }] async fn silo_identity_provider_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -1025,7 +1099,11 @@ async fn silo_identity_provider_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Silo SAML identity providers @@ -1037,14 +1115,14 @@ async fn silo_identity_provider_list( tags = ["system/silos"], }] async fn saml_identity_provider_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, new_provider: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let silo_lookup = nexus.silo_lookup(&opctx, query.silo)?; let provider = nexus @@ -1056,7 +1134,11 @@ async fn saml_identity_provider_create( .await?; Ok(HttpResponseCreated(provider.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch SAML IdP @@ -1066,14 +1148,14 @@ async fn saml_identity_provider_create( tags = ["system/silos"], }] async fn saml_identity_provider_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let saml_identity_provider_selector = @@ -1090,7 +1172,11 @@ async fn saml_identity_provider_view( .await?; Ok(HttpResponseOk(provider.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // TODO: no DELETE for identity providers? @@ -1108,14 +1194,14 @@ async fn saml_identity_provider_view( tags = ["system/silos"], }] async fn local_idp_user_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, new_user_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let silo_lookup = nexus.silo_lookup(&opctx, query.silo)?; let user = nexus @@ -1127,7 +1213,11 @@ async fn local_idp_user_create( .await?; Ok(HttpResponseCreated(user.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete user @@ -1137,21 +1227,25 @@ async fn local_idp_user_create( tags = ["system/silos"], }] async fn local_idp_user_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let silo_lookup = nexus.silo_lookup(&opctx, query.silo)?; nexus.local_idp_delete_user(&opctx, &silo_lookup, path.user_id).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Set or invalidate user's password @@ -1164,7 +1258,7 @@ async fn local_idp_user_delete( tags = ["system/silos"], }] async fn local_idp_user_set_password( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, update: TypedBody, @@ -1172,7 +1266,7 @@ async fn local_idp_user_set_password( let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let silo_lookup = nexus.silo_lookup(&opctx, query.silo)?; @@ -1186,7 +1280,11 @@ async fn local_idp_user_set_password( .await?; Ok(HttpResponseUpdatedNoContent()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List projects @@ -1196,12 +1294,12 @@ async fn local_idp_user_set_password( tags = ["projects"], }] async fn project_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -1219,7 +1317,11 @@ async fn project_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create project @@ -1229,18 +1331,22 @@ async fn project_list( tags = ["projects"], }] async fn project_create( - rqctx: RequestContext>, + rqctx: RequestContext, new_project: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let project = nexus.project_create(&opctx, &new_project.into_inner()).await?; Ok(HttpResponseCreated(project.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch project @@ -1250,11 +1356,11 @@ async fn project_create( tags = ["projects"], }] async fn project_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -1264,7 +1370,11 @@ async fn project_view( nexus.project_lookup(&opctx, project_selector)?.fetch().await?; Ok(HttpResponseOk(project.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete project @@ -1274,11 +1384,11 @@ async fn project_view( tags = ["projects"], }] async fn project_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -1288,7 +1398,11 @@ async fn project_delete( nexus.project_delete(&opctx, &project_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // TODO-correctness: Is it valid for PUT to accept application/json that's a @@ -1303,12 +1417,12 @@ async fn project_delete( tags = ["projects"], }] async fn project_update( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, updated_project: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let updated_project = updated_project.into_inner(); let handler = async { @@ -1321,7 +1435,11 @@ async fn project_update( .await?; Ok(HttpResponseOk(project.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch project's IAM policy @@ -1331,11 +1449,11 @@ async fn project_update( tags = ["projects"], }] async fn project_policy_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result>, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -1346,7 +1464,11 @@ async fn project_policy_view( nexus.project_fetch_policy(&opctx, &project_lookup).await?; Ok(HttpResponseOk(policy)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update project's IAM policy @@ -1356,12 +1478,12 @@ async fn project_policy_view( tags = ["projects"], }] async fn project_policy_update( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, new_policy: TypedBody>, ) -> Result>, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let new_policy = new_policy.into_inner(); let handler = async { @@ -1374,7 +1496,11 @@ async fn project_policy_update( .await?; Ok(HttpResponseOk(new_policy)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // IP Pools @@ -1386,12 +1512,12 @@ async fn project_policy_update( tags = ["projects"], }] async fn project_ip_pool_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -1412,7 +1538,11 @@ async fn project_ip_pool_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch IP pool @@ -1422,13 +1552,13 @@ async fn project_ip_pool_list( tags = ["projects"], }] async fn project_ip_pool_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let pool_selector = path_params.into_inner().pool; let (pool, silo_link) = nexus.silo_ip_pool_fetch(&opctx, &pool_selector).await?; @@ -1437,7 +1567,11 @@ async fn project_ip_pool_view( is_default: silo_link.is_default, })) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List IP pools @@ -1447,12 +1581,12 @@ async fn project_ip_pool_view( tags = ["system/networking"], }] async fn ip_pool_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -1470,7 +1604,11 @@ async fn ip_pool_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } #[derive(Deserialize, JsonSchema)] @@ -1485,18 +1623,22 @@ pub struct IpPoolPathParam { tags = ["system/networking"], }] async fn ip_pool_create( - rqctx: RequestContext>, + rqctx: RequestContext, pool_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let pool_params = pool_params.into_inner(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let pool = nexus.ip_pool_create(&opctx, &pool_params).await?; Ok(HttpResponseCreated(IpPool::from(pool))) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch IP pool @@ -1506,13 +1648,13 @@ async fn ip_pool_create( tags = ["system/networking"], }] async fn ip_pool_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let pool_selector = path_params.into_inner().pool; // We do not prevent the service pool from being fetched by name or ID // like we do for update, delete, associate. @@ -1520,7 +1662,11 @@ async fn ip_pool_view( nexus.ip_pool_lookup(&opctx, &pool_selector)?.fetch().await?; Ok(HttpResponseOk(IpPool::from(pool))) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete IP pool @@ -1530,19 +1676,23 @@ async fn ip_pool_view( tags = ["system/networking"], }] async fn ip_pool_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; nexus.ip_pool_delete(&opctx, &pool_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update IP pool @@ -1552,21 +1702,25 @@ async fn ip_pool_delete( tags = ["system/networking"], }] async fn ip_pool_update( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, updates: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let updates = updates.into_inner(); let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; let pool = nexus.ip_pool_update(&opctx, &pool_lookup, &updates).await?; Ok(HttpResponseOk(pool.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch IP pool utilization @@ -1576,13 +1730,13 @@ async fn ip_pool_update( tags = ["system/networking"], }] async fn ip_pool_utilization_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let pool_selector = path_params.into_inner().pool; // We do not prevent the service pool from being fetched by name or ID // like we do for update, delete, associate. @@ -1591,7 +1745,11 @@ async fn ip_pool_utilization_view( nexus.ip_pool_utilization_view(&opctx, &pool_lookup).await?; Ok(HttpResponseOk(utilization.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List IP pool's linked silos @@ -1601,7 +1759,7 @@ async fn ip_pool_utilization_view( tags = ["system/networking"], }] async fn ip_pool_silo_list( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, // paginating by resource_id because they're unique per pool. most robust // option would be to paginate by a composite key representing the (pool, @@ -1618,7 +1776,7 @@ async fn ip_pool_silo_list( let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; @@ -1639,7 +1797,11 @@ async fn ip_pool_silo_list( &|_, x: &views::IpPoolSiloLink| x.silo_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Link IP pool to silo @@ -1653,14 +1815,14 @@ async fn ip_pool_silo_list( tags = ["system/networking"], }] async fn ip_pool_silo_link( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, resource_assoc: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let resource_assoc = resource_assoc.into_inner(); let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; @@ -1669,7 +1831,11 @@ async fn ip_pool_silo_link( .await?; Ok(HttpResponseCreated(assoc.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Unlink IP pool from silo @@ -1681,20 +1847,24 @@ async fn ip_pool_silo_link( tags = ["system/networking"], }] async fn ip_pool_silo_unlink( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; let silo_lookup = nexus.silo_lookup(&opctx, path.silo)?; nexus.ip_pool_unlink_silo(&opctx, &pool_lookup, &silo_lookup).await?; Ok(HttpResponseUpdatedNoContent()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Make IP pool default for silo @@ -1709,14 +1879,14 @@ async fn ip_pool_silo_unlink( tags = ["system/networking"], }] async fn ip_pool_silo_update( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, update: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let update = update.into_inner(); let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; @@ -1726,7 +1896,11 @@ async fn ip_pool_silo_update( .await?; Ok(HttpResponseOk(assoc.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch Oxide service IP pool @@ -1736,16 +1910,20 @@ async fn ip_pool_silo_update( tags = ["system/networking"], }] async fn ip_pool_service_view( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let pool = nexus.ip_pool_service_fetch(&opctx).await?; Ok(HttpResponseOk(IpPool::from(pool))) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } type IpPoolRangePaginationParams = PaginationParams; @@ -1759,14 +1937,14 @@ type IpPoolRangePaginationParams = PaginationParams; tags = ["system/networking"], }] async fn ip_pool_range_list( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let path = path_params.into_inner(); let marker = match query.page { @@ -1793,7 +1971,11 @@ async fn ip_pool_range_list( }, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Add range to IP pool @@ -1805,21 +1987,25 @@ async fn ip_pool_range_list( tags = ["system/networking"], }] async fn ip_pool_range_add( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, range_params: TypedBody, ) -> Result, HttpError> { let apictx = &rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let range = range_params.into_inner(); let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; let out = nexus.ip_pool_add_range(&opctx, &pool_lookup, &range).await?; Ok(HttpResponseCreated(out.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Remove range from IP pool @@ -1829,21 +2015,25 @@ async fn ip_pool_range_add( tags = ["system/networking"], }] async fn ip_pool_range_remove( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, range_params: TypedBody, ) -> Result { let apictx = &rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let range = range_params.into_inner(); let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; nexus.ip_pool_delete_range(&opctx, &pool_lookup, &range).await?; Ok(HttpResponseUpdatedNoContent()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List IP ranges for the Oxide service pool @@ -1855,13 +2045,13 @@ async fn ip_pool_range_remove( tags = ["system/networking"], }] async fn ip_pool_service_range_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let marker = match query.page { WhichPage::First(_) => None, @@ -1886,7 +2076,11 @@ async fn ip_pool_service_range_list( }, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Add IP range to Oxide service pool @@ -1898,18 +2092,22 @@ async fn ip_pool_service_range_list( tags = ["system/networking"], }] async fn ip_pool_service_range_add( - rqctx: RequestContext>, + rqctx: RequestContext, range_params: TypedBody, ) -> Result, HttpError> { let apictx = &rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let range = range_params.into_inner(); let out = nexus.ip_pool_service_add_range(&opctx, &range).await?; Ok(HttpResponseCreated(out.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Remove IP range from Oxide service pool @@ -1919,18 +2117,22 @@ async fn ip_pool_service_range_add( tags = ["system/networking"], }] async fn ip_pool_service_range_remove( - rqctx: RequestContext>, + rqctx: RequestContext, range_params: TypedBody, ) -> Result { let apictx = &rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let range = range_params.into_inner(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; nexus.ip_pool_service_delete_range(&opctx, &range).await?; Ok(HttpResponseUpdatedNoContent()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Floating IP Addresses @@ -1942,12 +2144,12 @@ async fn ip_pool_service_range_remove( tags = ["floating-ips"], }] async fn floating_ip_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; @@ -1964,7 +2166,11 @@ async fn floating_ip_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create floating IP @@ -1974,12 +2180,12 @@ async fn floating_ip_list( tags = ["floating-ips"], }] async fn floating_ip_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, floating_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let floating_params = floating_params.into_inner(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -1990,7 +2196,11 @@ async fn floating_ip_create( .await?; Ok(HttpResponseCreated(ip)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update floating IP @@ -2000,14 +2210,14 @@ async fn floating_ip_create( tags = ["floating-ips"], }] async fn floating_ip_update( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, updated_floating_ip: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let updated_floating_ip_params = updated_floating_ip.into_inner(); @@ -2027,7 +2237,11 @@ async fn floating_ip_update( .await?; Ok(HttpResponseOk(floating_ip)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete floating IP @@ -2037,14 +2251,14 @@ async fn floating_ip_update( tags = ["floating-ips"], }] async fn floating_ip_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let floating_ip_selector = params::FloatingIpSelector { @@ -2057,7 +2271,11 @@ async fn floating_ip_delete( nexus.floating_ip_delete(&opctx, fip_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch floating IP @@ -2067,14 +2285,14 @@ async fn floating_ip_delete( tags = ["floating-ips"] }] async fn floating_ip_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let floating_ip_selector = params::FloatingIpSelector { @@ -2087,7 +2305,11 @@ async fn floating_ip_view( .await?; Ok(HttpResponseOk(fip.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Attach floating IP @@ -2099,7 +2321,7 @@ async fn floating_ip_view( tags = ["floating-ips"], }] async fn floating_ip_attach( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, target: TypedBody, @@ -2107,7 +2329,7 @@ async fn floating_ip_attach( let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let floating_ip_selector = params::FloatingIpSelector { @@ -2123,7 +2345,11 @@ async fn floating_ip_attach( .await?; Ok(HttpResponseAccepted(ip)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Detach floating IP @@ -2135,14 +2361,14 @@ async fn floating_ip_attach( tags = ["floating-ips"], }] async fn floating_ip_detach( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let floating_ip_selector = params::FloatingIpSelector { @@ -2154,7 +2380,11 @@ async fn floating_ip_detach( let ip = nexus.floating_ip_detach(&opctx, fip_lookup).await?; Ok(HttpResponseAccepted(ip)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Disks @@ -2166,13 +2396,13 @@ async fn floating_ip_detach( tags = ["disks"], }] async fn disk_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -2191,7 +2421,11 @@ async fn disk_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // TODO-correctness See note about instance create. This should be async. @@ -2202,14 +2436,14 @@ async fn disk_list( tags = ["disks"] }] async fn disk_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, new_disk: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let params = new_disk.into_inner(); let project_lookup = nexus.project_lookup(&opctx, query)?; @@ -2217,7 +2451,11 @@ async fn disk_create( nexus.project_create_disk(&opctx, &project_lookup, ¶ms).await?; Ok(HttpResponseCreated(disk.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch disk @@ -2227,14 +2465,14 @@ async fn disk_create( tags = ["disks"] }] async fn disk_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let disk_selector = @@ -2243,7 +2481,11 @@ async fn disk_view( nexus.disk_lookup(&opctx, disk_selector)?.fetch().await?; Ok(HttpResponseOk(disk.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete disk @@ -2253,14 +2495,14 @@ async fn disk_view( tags = ["disks"], }] async fn disk_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let disk_selector = @@ -2269,7 +2511,11 @@ async fn disk_delete( nexus.project_delete_disk(&opctx, &disk_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } #[derive(Display, Serialize, Deserialize, JsonSchema)] @@ -2297,7 +2543,7 @@ struct DiskMetricsPath { tags = ["disks"], }] async fn disk_metrics_list( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query< PaginationParams, @@ -2306,7 +2552,7 @@ async fn disk_metrics_list( ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); @@ -2331,7 +2577,11 @@ async fn disk_metrics_list( Ok(HttpResponseOk(result)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Start importing blocks into disk @@ -2343,14 +2593,14 @@ async fn disk_metrics_list( tags = ["disks"], }] async fn disk_bulk_write_import_start( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); @@ -2362,7 +2612,11 @@ async fn disk_bulk_write_import_start( Ok(HttpResponseUpdatedNoContent()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Import blocks into disk @@ -2372,7 +2626,7 @@ async fn disk_bulk_write_import_start( tags = ["disks"], }] async fn disk_bulk_write_import( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, import_params: TypedBody, @@ -2380,7 +2634,7 @@ async fn disk_bulk_write_import( let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let params = import_params.into_inner(); @@ -2393,7 +2647,11 @@ async fn disk_bulk_write_import( Ok(HttpResponseUpdatedNoContent()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Stop importing blocks into disk @@ -2405,14 +2663,14 @@ async fn disk_bulk_write_import( tags = ["disks"], }] async fn disk_bulk_write_import_stop( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); @@ -2424,7 +2682,11 @@ async fn disk_bulk_write_import_stop( Ok(HttpResponseUpdatedNoContent()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Confirm disk block import completion @@ -2434,7 +2696,7 @@ async fn disk_bulk_write_import_stop( tags = ["disks"], }] async fn disk_finalize_import( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, finalize_params: TypedBody, @@ -2442,7 +2704,7 @@ async fn disk_finalize_import( let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let params = finalize_params.into_inner(); @@ -2454,7 +2716,11 @@ async fn disk_finalize_import( Ok(HttpResponseUpdatedNoContent()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Instances @@ -2466,12 +2732,12 @@ async fn disk_finalize_import( tags = ["instances"], }] async fn instance_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -2491,7 +2757,11 @@ async fn instance_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create instance @@ -2501,12 +2771,12 @@ async fn instance_list( tags = ["instances"], }] async fn instance_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, new_instance: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let project_selector = query_params.into_inner(); let new_instance_params = &new_instance.into_inner(); let handler = async { @@ -2521,7 +2791,11 @@ async fn instance_create( .await?; Ok(HttpResponseCreated(instance.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch instance @@ -2531,12 +2805,12 @@ async fn instance_create( tags = ["instances"], }] async fn instance_view( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let handler = async { @@ -2555,7 +2829,11 @@ async fn instance_view( .await?; Ok(HttpResponseOk(instance_and_vmm.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete instance @@ -2565,12 +2843,12 @@ async fn instance_view( tags = ["instances"], }] async fn instance_delete( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, path_params: Path, ) -> Result { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let instance_selector = params::InstanceSelector { @@ -2584,7 +2862,11 @@ async fn instance_delete( nexus.project_destroy_instance(&opctx, &instance_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // TODO should this be in the public API? @@ -2595,13 +2877,13 @@ async fn instance_delete( tags = ["instances"], }] async fn instance_migrate( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, path_params: Path, migrate_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let migrate_instance_params = migrate_params.into_inner(); @@ -2622,7 +2904,11 @@ async fn instance_migrate( .await?; Ok(HttpResponseOk(instance.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Reboot an instance @@ -2632,12 +2918,12 @@ async fn instance_migrate( tags = ["instances"], }] async fn instance_reboot( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let instance_selector = params::InstanceSelector { @@ -2651,7 +2937,11 @@ async fn instance_reboot( let instance = nexus.instance_reboot(&opctx, &instance_lookup).await?; Ok(HttpResponseAccepted(instance.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Boot instance @@ -2661,12 +2951,12 @@ async fn instance_reboot( tags = ["instances"], }] async fn instance_start( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let instance_selector = params::InstanceSelector { @@ -2680,7 +2970,11 @@ async fn instance_start( let instance = nexus.instance_start(&opctx, &instance_lookup).await?; Ok(HttpResponseAccepted(instance.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Stop instance @@ -2690,12 +2984,12 @@ async fn instance_start( tags = ["instances"], }] async fn instance_stop( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let instance_selector = params::InstanceSelector { @@ -2709,7 +3003,11 @@ async fn instance_stop( let instance = nexus.instance_stop(&opctx, &instance_lookup).await?; Ok(HttpResponseAccepted(instance.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch instance serial console @@ -2719,14 +3017,14 @@ async fn instance_stop( tags = ["instances"], }] async fn instance_serial_console( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let instance_selector = params::InstanceSelector { @@ -2740,7 +3038,11 @@ async fn instance_serial_console( .await?; Ok(HttpResponseOk(data)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Stream instance serial console @@ -2750,13 +3052,13 @@ async fn instance_serial_console( tags = ["instances"], }] async fn instance_serial_console_stream( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, conn: WebsocketConnection, ) -> WebsocketChannelResult { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -2806,13 +3108,13 @@ async fn instance_serial_console_stream( tags = ["instances"], }] async fn instance_ssh_public_key_list( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; @@ -2837,7 +3139,11 @@ async fn instance_ssh_public_key_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List disks for instance @@ -2847,13 +3153,13 @@ async fn instance_ssh_public_key_list( tags = ["instances"], }] async fn instance_disk_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, path_params: Path, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; @@ -2878,7 +3184,11 @@ async fn instance_disk_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Attach disk to instance @@ -2888,13 +3198,13 @@ async fn instance_disk_list( tags = ["instances"], }] async fn instance_disk_attach( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, disk_to_attach: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let disk = disk_to_attach.into_inner().disk; @@ -2910,7 +3220,11 @@ async fn instance_disk_attach( nexus.instance_attach_disk(&opctx, &instance_lookup, disk).await?; Ok(HttpResponseAccepted(disk.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Detach disk from instance @@ -2920,7 +3234,7 @@ async fn instance_disk_attach( tags = ["instances"], }] async fn instance_disk_detach( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, disk_to_detach: TypedBody, @@ -2928,7 +3242,7 @@ async fn instance_disk_detach( let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let disk = disk_to_detach.into_inner().disk; @@ -2942,7 +3256,11 @@ async fn instance_disk_detach( nexus.instance_detach_disk(&opctx, &instance_lookup, disk).await?; Ok(HttpResponseAccepted(disk.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Certificates @@ -2958,12 +3276,12 @@ async fn instance_disk_detach( tags = ["silos"], }] async fn certificate_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -2981,7 +3299,11 @@ async fn certificate_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create new system-wide x.509 certificate @@ -2994,18 +3316,22 @@ async fn certificate_list( tags = ["silos"] }] async fn certificate_create( - rqctx: RequestContext>, + rqctx: RequestContext, new_cert: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let new_cert_params = new_cert.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let cert = nexus.certificate_create(&opctx, new_cert_params).await?; Ok(HttpResponseCreated(cert.try_into()?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Path parameters for Certificate requests @@ -3023,19 +3349,23 @@ struct CertificatePathParam { tags = ["silos"], }] async fn certificate_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let (.., cert) = nexus.certificate_lookup(&opctx, &path.certificate).fetch().await?; Ok(HttpResponseOk(cert.try_into()?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete certificate @@ -3047,12 +3377,12 @@ async fn certificate_view( tags = ["silos"], }] async fn certificate_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; nexus @@ -3063,7 +3393,11 @@ async fn certificate_delete( .await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create address lot @@ -3073,12 +3407,12 @@ async fn certificate_delete( tags = ["system/networking"], }] async fn networking_address_lot_create( - rqctx: RequestContext>, + rqctx: RequestContext, new_address_lot: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let params = new_address_lot.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let result = nexus.address_lot_create(&opctx, params).await?; @@ -3089,7 +3423,11 @@ async fn networking_address_lot_create( Ok(HttpResponseCreated(AddressLotCreateResponse { lot, blocks })) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete address lot @@ -3099,20 +3437,24 @@ async fn networking_address_lot_create( tags = ["system/networking"], }] async fn networking_address_lot_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let address_lot_lookup = nexus.address_lot_lookup(&opctx, path.address_lot)?; nexus.address_lot_delete(&opctx, &address_lot_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List address lots @@ -3122,12 +3464,12 @@ async fn networking_address_lot_delete( tags = ["system/networking"], }] async fn networking_address_lot_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -3146,7 +3488,11 @@ async fn networking_address_lot_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List blocks in address lot @@ -3156,13 +3502,13 @@ async fn networking_address_lot_list( tags = ["system/networking"], }] async fn networking_address_lot_block_list( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let path = path_params.into_inner(); let pagparams = data_page_params_for(&rqctx, &query)?; @@ -3182,7 +3528,11 @@ async fn networking_address_lot_block_list( &|_, x: &AddressLotBlock| x.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create loopback address @@ -3192,12 +3542,12 @@ async fn networking_address_lot_block_list( tags = ["system/networking"], }] async fn networking_loopback_address_create( - rqctx: RequestContext>, + rqctx: RequestContext, new_loopback_address: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let params = new_loopback_address.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let result = nexus.loopback_address_create(&opctx, params).await?; @@ -3206,7 +3556,11 @@ async fn networking_loopback_address_create( Ok(HttpResponseCreated(addr)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } #[derive(Serialize, Deserialize, JsonSchema)] @@ -3233,12 +3587,12 @@ pub struct LoopbackAddressPath { tags = ["system/networking"], }] async fn networking_loopback_address_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path: Path, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let addr = match IpNetwork::new(path.address, path.subnet_mask) { @@ -3258,7 +3612,11 @@ async fn networking_loopback_address_delete( .await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List loopback addresses @@ -3268,12 +3626,12 @@ async fn networking_loopback_address_delete( tags = ["system/networking"], }] async fn networking_loopback_address_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pagparams = data_page_params_for(&rqctx, &query)?; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -3290,7 +3648,11 @@ async fn networking_loopback_address_list( &|_, x: &LoopbackAddress| x.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create switch port settings @@ -3300,12 +3662,12 @@ async fn networking_loopback_address_list( tags = ["system/networking"], }] async fn networking_switch_port_settings_create( - rqctx: RequestContext>, + rqctx: RequestContext, new_settings: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let params = new_settings.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let result = nexus.switch_port_settings_post(&opctx, params).await?; @@ -3313,7 +3675,11 @@ async fn networking_switch_port_settings_create( let settings: SwitchPortSettingsView = result.into(); Ok(HttpResponseCreated(settings)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete switch port settings @@ -3323,18 +3689,22 @@ async fn networking_switch_port_settings_create( tags = ["system/networking"], }] async fn networking_switch_port_settings_delete( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let selector = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; nexus.switch_port_settings_delete(&opctx, &selector).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List switch port settings @@ -3344,14 +3714,14 @@ async fn networking_switch_port_settings_delete( tags = ["system/networking"], }] async fn networking_switch_port_settings_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query< PaginatedByNameOrId, >, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -3370,7 +3740,11 @@ async fn networking_switch_port_settings_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Get information about switch port @@ -3380,18 +3754,22 @@ async fn networking_switch_port_settings_list( tags = ["system/networking"], }] async fn networking_switch_port_settings_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = path_params.into_inner().port; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let settings = nexus.switch_port_settings_get(&opctx, &query).await?; Ok(HttpResponseOk(settings.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List switch ports @@ -3401,12 +3779,12 @@ async fn networking_switch_port_settings_view( tags = ["system/hardware"], }] async fn networking_switch_port_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pagparams = data_page_params_for(&rqctx, &query)?; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -3423,7 +3801,11 @@ async fn networking_switch_port_list( &|_, x: &SwitchPort| x.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Get switch port status @@ -3433,13 +3815,13 @@ async fn networking_switch_port_list( tags = ["system/hardware"], }] async fn networking_switch_port_status( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let path = path_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -3449,7 +3831,11 @@ async fn networking_switch_port_status( .await?, )) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Apply switch port settings @@ -3459,14 +3845,14 @@ async fn networking_switch_port_status( tags = ["system/hardware"], }] async fn networking_switch_port_apply_settings( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, settings_body: TypedBody, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let port = path_params.into_inner().port; let query = query_params.into_inner(); let settings = settings_body.into_inner(); @@ -3476,7 +3862,11 @@ async fn networking_switch_port_apply_settings( .await?; Ok(HttpResponseUpdatedNoContent {}) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Clear switch port settings @@ -3486,20 +3876,24 @@ async fn networking_switch_port_apply_settings( tags = ["system/hardware"], }] async fn networking_switch_port_clear_settings( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let port = path_params.into_inner().port; let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; nexus.switch_port_clear_settings(&opctx, &port, &query).await?; Ok(HttpResponseUpdatedNoContent {}) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create new BGP configuration @@ -3509,18 +3903,22 @@ async fn networking_switch_port_clear_settings( tags = ["system/networking"], }] async fn networking_bgp_config_create( - rqctx: RequestContext>, + rqctx: RequestContext, config: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let config = config.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let result = nexus.bgp_config_set(&opctx, &config).await?; Ok(HttpResponseCreated::(result.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List BGP configurations @@ -3530,12 +3928,12 @@ async fn networking_bgp_config_create( tags = ["system/networking"], }] async fn networking_bgp_config_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -3554,7 +3952,11 @@ async fn networking_bgp_config_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } //TODO pagination? the normal by-name/by-id stuff does not work here @@ -3565,16 +3967,20 @@ async fn networking_bgp_config_list( tags = ["system/networking"], }] async fn networking_bgp_status( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result>, HttpError> { let apictx = rqctx.context(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let result = nexus.bgp_peer_status(&opctx).await?; Ok(HttpResponseOk(result)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Get BGP router message history @@ -3584,18 +3990,22 @@ async fn networking_bgp_status( tags = ["system/networking"], }] async fn networking_bgp_message_history( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let sel = query_params.into_inner(); let result = nexus.bgp_message_history(&opctx, &sel).await?; Ok(HttpResponseOk(AggregateBgpMessageHistory::new(result))) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } //TODO pagination? the normal by-name/by-id stuff does not work here @@ -3606,18 +4016,22 @@ async fn networking_bgp_message_history( tags = ["system/networking"], }] async fn networking_bgp_imported_routes_ipv4( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let sel = query_params.into_inner(); let result = nexus.bgp_imported_routes_ipv4(&opctx, &sel).await?; Ok(HttpResponseOk(result)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete BGP configuration @@ -3627,18 +4041,22 @@ async fn networking_bgp_imported_routes_ipv4( tags = ["system/networking"], }] async fn networking_bgp_config_delete( - rqctx: RequestContext>, + rqctx: RequestContext, sel: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let sel = sel.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; nexus.bgp_config_delete(&opctx, &sel).await?; Ok(HttpResponseUpdatedNoContent {}) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create new BGP announce set @@ -3648,18 +4066,22 @@ async fn networking_bgp_config_delete( tags = ["system/networking"], }] async fn networking_bgp_announce_set_create( - rqctx: RequestContext>, + rqctx: RequestContext, config: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let config = config.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let result = nexus.bgp_create_announce_set(&opctx, &config).await?; Ok(HttpResponseCreated::(result.0.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } //TODO pagination? the normal by-name/by-id stuff does not work here @@ -3670,12 +4092,12 @@ async fn networking_bgp_announce_set_create( tags = ["system/networking"], }] async fn networking_bgp_announce_set_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let sel = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let result = nexus @@ -3686,7 +4108,11 @@ async fn networking_bgp_announce_set_list( .collect(); Ok(HttpResponseOk(result)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete BGP announce set @@ -3696,18 +4122,22 @@ async fn networking_bgp_announce_set_list( tags = ["system/networking"], }] async fn networking_bgp_announce_set_delete( - rqctx: RequestContext>, + rqctx: RequestContext, selector: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let sel = selector.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; nexus.bgp_delete_announce_set(&opctx, &sel).await?; Ok(HttpResponseUpdatedNoContent {}) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Enable a BFD session @@ -3717,18 +4147,22 @@ async fn networking_bgp_announce_set_delete( tags = ["system/networking"], }] async fn networking_bfd_enable( - rqctx: RequestContext>, + rqctx: RequestContext, session: TypedBody, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; nexus.bfd_enable(&opctx, session.into_inner()).await?; Ok(HttpResponseUpdatedNoContent {}) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Disable a BFD session @@ -3738,18 +4172,22 @@ async fn networking_bfd_enable( tags = ["system/networking"], }] async fn networking_bfd_disable( - rqctx: RequestContext>, + rqctx: RequestContext, session: TypedBody, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; nexus.bfd_disable(&opctx, session.into_inner()).await?; Ok(HttpResponseUpdatedNoContent {}) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Get BFD status @@ -3759,17 +4197,21 @@ async fn networking_bfd_disable( tags = ["system/networking"], }] async fn networking_bfd_status( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; let status = nexus.bfd_status(&opctx).await?; Ok(HttpResponseOk(status)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Get user-facing services IP allowlist @@ -3779,11 +4221,11 @@ async fn networking_bfd_status( tags = ["system/networking"], }] async fn networking_allow_list_view( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; nexus .allow_list_view(&opctx) @@ -3791,7 +4233,11 @@ async fn networking_allow_list_view( .map(HttpResponseOk) .map_err(HttpError::from) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update user-facing services IP allowlist @@ -3801,22 +4247,27 @@ async fn networking_allow_list_view( tags = ["system/networking"], }] async fn networking_allow_list_update( - rqctx: RequestContext>, + rqctx: RequestContext, params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; + let server_kind = apictx.kind; let params = params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let remote_addr = rqctx.request.remote_addr().ip(); nexus - .allow_list_upsert(&opctx, remote_addr, params) + .allow_list_upsert(&opctx, remote_addr, server_kind, params) .await .map(HttpResponseOk) .map_err(HttpError::from) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Images @@ -3831,13 +4282,13 @@ async fn networking_allow_list_update( tags = ["images"], }] async fn image_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -3867,7 +4318,11 @@ async fn image_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create image @@ -3879,14 +4334,14 @@ async fn image_list( tags = ["images"] }] async fn image_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, new_image: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let params = &new_image.into_inner(); let parent_lookup = match query.project.clone() { @@ -3905,7 +4360,11 @@ async fn image_create( let image = nexus.image_create(&opctx, &parent_lookup, ¶ms).await?; Ok(HttpResponseCreated(image.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch image @@ -3917,14 +4376,14 @@ async fn image_create( tags = ["images"], }] async fn image_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let image: nexus_db_model::Image = match nexus @@ -3948,7 +4407,11 @@ async fn image_view( }; Ok(HttpResponseOk(image.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete image @@ -3962,14 +4425,14 @@ async fn image_view( tags = ["images"], }] async fn image_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let image_lookup = nexus @@ -3984,7 +4447,11 @@ async fn image_delete( nexus.image_delete(&opctx, &image_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Promote project image @@ -3996,14 +4463,14 @@ async fn image_delete( tags = ["images"] }] async fn image_promote( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let image_lookup = nexus @@ -4018,7 +4485,11 @@ async fn image_promote( let image = nexus.image_promote(&opctx, &image_lookup).await?; Ok(HttpResponseAccepted(image.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Demote silo image @@ -4030,14 +4501,14 @@ async fn image_promote( tags = ["images"] }] async fn image_demote( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let image_lookup = nexus @@ -4053,7 +4524,11 @@ async fn image_demote( nexus.image_demote(&opctx, &image_lookup, &project_lookup).await?; Ok(HttpResponseAccepted(image.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List network interfaces @@ -4063,13 +4538,13 @@ async fn image_demote( tags = ["instances"], }] async fn instance_network_interface_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -4092,7 +4567,11 @@ async fn instance_network_interface_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create network interface @@ -4102,14 +4581,14 @@ async fn instance_network_interface_list( tags = ["instances"], }] async fn instance_network_interface_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, interface_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let instance_lookup = nexus.instance_lookup(&opctx, query)?; let iface = nexus @@ -4121,7 +4600,11 @@ async fn instance_network_interface_create( .await?; Ok(HttpResponseCreated(iface.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete network interface @@ -4136,14 +4619,14 @@ async fn instance_network_interface_create( tags = ["instances"], }] async fn instance_network_interface_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let interface_selector = params::InstanceNetworkInterfaceSelector { @@ -4158,7 +4641,11 @@ async fn instance_network_interface_delete( .await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch network interface @@ -4168,14 +4655,14 @@ async fn instance_network_interface_delete( tags = ["instances"], }] async fn instance_network_interface_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let interface_selector = params::InstanceNetworkInterfaceSelector { @@ -4189,7 +4676,11 @@ async fn instance_network_interface_view( .await?; Ok(HttpResponseOk(interface.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update network interface @@ -4199,7 +4690,7 @@ async fn instance_network_interface_view( tags = ["instances"], }] async fn instance_network_interface_update( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, updated_iface: TypedBody, @@ -4207,7 +4698,7 @@ async fn instance_network_interface_update( let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let updated_iface = updated_iface.into_inner(); @@ -4231,7 +4722,11 @@ async fn instance_network_interface_update( .await?; Ok(HttpResponseOk(InstanceNetworkInterface::from(interface))) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // External IP addresses for instances @@ -4243,13 +4738,13 @@ async fn instance_network_interface_update( tags = ["instances"], }] async fn instance_external_ip_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, path_params: Path, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -4263,7 +4758,11 @@ async fn instance_external_ip_list( nexus.instance_list_external_ips(&opctx, &instance_lookup).await?; Ok(HttpResponseOk(ResultsPage { items: ips, next_page: None })) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Allocate and attach ephemeral IP to instance @@ -4273,7 +4772,7 @@ async fn instance_external_ip_list( tags = ["instances"], }] async fn instance_ephemeral_ip_attach( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ip_to_create: TypedBody, @@ -4281,7 +4780,7 @@ async fn instance_ephemeral_ip_attach( let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let instance_selector = params::InstanceSelector { @@ -4299,7 +4798,11 @@ async fn instance_ephemeral_ip_attach( .await?; Ok(HttpResponseAccepted(ip)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Detach and deallocate ephemeral IP from instance @@ -4309,14 +4812,14 @@ async fn instance_ephemeral_ip_attach( tags = ["instances"], }] async fn instance_ephemeral_ip_detach( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let instance_selector = params::InstanceSelector { @@ -4334,7 +4837,11 @@ async fn instance_ephemeral_ip_detach( .await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Snapshots @@ -4346,13 +4853,13 @@ async fn instance_ephemeral_ip_detach( tags = ["snapshots"], }] async fn snapshot_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -4371,7 +4878,11 @@ async fn snapshot_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create snapshot @@ -4383,14 +4894,14 @@ async fn snapshot_list( tags = ["snapshots"], }] async fn snapshot_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, new_snapshot: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let new_snapshot_params = &new_snapshot.into_inner(); let project_lookup = nexus.project_lookup(&opctx, query)?; @@ -4399,7 +4910,11 @@ async fn snapshot_create( .await?; Ok(HttpResponseCreated(snapshot.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch snapshot @@ -4409,14 +4924,14 @@ async fn snapshot_create( tags = ["snapshots"], }] async fn snapshot_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let snapshot_selector = params::SnapshotSelector { @@ -4427,7 +4942,11 @@ async fn snapshot_view( nexus.snapshot_lookup(&opctx, snapshot_selector)?.fetch().await?; Ok(HttpResponseOk(snapshot.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete snapshot @@ -4437,14 +4956,14 @@ async fn snapshot_view( tags = ["snapshots"], }] async fn snapshot_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let snapshot_selector = params::SnapshotSelector { @@ -4456,7 +4975,11 @@ async fn snapshot_delete( nexus.snapshot_delete(&opctx, &snapshot_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // VPCs @@ -4468,12 +4991,12 @@ async fn snapshot_delete( tags = ["vpcs"], }] async fn vpc_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -4494,7 +5017,11 @@ async fn vpc_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create VPC @@ -4504,12 +5031,12 @@ async fn vpc_list( tags = ["vpcs"], }] async fn vpc_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, body: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let new_vpc_params = body.into_inner(); let handler = async { @@ -4520,7 +5047,11 @@ async fn vpc_create( .await?; Ok(HttpResponseCreated(vpc.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch VPC @@ -4530,13 +5061,13 @@ async fn vpc_create( tags = ["vpcs"], }] async fn vpc_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -4545,7 +5076,11 @@ async fn vpc_view( let (.., vpc) = nexus.vpc_lookup(&opctx, vpc_selector)?.fetch().await?; Ok(HttpResponseOk(vpc.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update a VPC @@ -4555,14 +5090,14 @@ async fn vpc_view( tags = ["vpcs"], }] async fn vpc_update( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, updated_vpc: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let updated_vpc_params = &updated_vpc.into_inner(); @@ -4575,7 +5110,11 @@ async fn vpc_update( .await?; Ok(HttpResponseOk(vpc.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete VPC @@ -4585,13 +5124,13 @@ async fn vpc_update( tags = ["vpcs"], }] async fn vpc_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -4601,7 +5140,11 @@ async fn vpc_delete( nexus.project_delete_vpc(&opctx, &vpc_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List subnets @@ -4611,12 +5154,12 @@ async fn vpc_delete( tags = ["vpcs"], }] async fn vpc_subnet_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -4636,7 +5179,11 @@ async fn vpc_subnet_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create subnet @@ -4646,13 +5193,13 @@ async fn vpc_subnet_list( tags = ["vpcs"], }] async fn vpc_subnet_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, create_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let create = create_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -4661,7 +5208,11 @@ async fn vpc_subnet_create( nexus.vpc_create_subnet(&opctx, &vpc_lookup, &create).await?; Ok(HttpResponseCreated(subnet.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch subnet @@ -4671,13 +5222,13 @@ async fn vpc_subnet_create( tags = ["vpcs"], }] async fn vpc_subnet_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -4690,7 +5241,11 @@ async fn vpc_subnet_view( nexus.vpc_subnet_lookup(&opctx, subnet_selector)?.fetch().await?; Ok(HttpResponseOk(subnet.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete subnet @@ -4700,13 +5255,13 @@ async fn vpc_subnet_view( tags = ["vpcs"], }] async fn vpc_subnet_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -4719,7 +5274,11 @@ async fn vpc_subnet_delete( nexus.vpc_delete_subnet(&opctx, &subnet_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update subnet @@ -4729,14 +5288,14 @@ async fn vpc_subnet_delete( tags = ["vpcs"], }] async fn vpc_subnet_update( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, subnet_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let subnet_params = subnet_params.into_inner(); @@ -4752,7 +5311,11 @@ async fn vpc_subnet_update( .await?; Ok(HttpResponseOk(subnet.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // This endpoint is likely temporary. We would rather list all IPs allocated in @@ -4766,13 +5329,13 @@ async fn vpc_subnet_update( tags = ["vpcs"], }] async fn vpc_subnet_list_network_interfaces( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let path = path_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; @@ -4801,7 +5364,11 @@ async fn vpc_subnet_list_network_interfaces( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // VPC Firewalls @@ -4814,7 +5381,7 @@ async fn vpc_subnet_list_network_interfaces( tags = ["vpcs"], }] async fn vpc_firewall_rules_view( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result, HttpError> { // TODO: Check If-Match and fail if the ETag doesn't match anymore. @@ -4823,7 +5390,7 @@ async fn vpc_firewall_rules_view( let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let vpc_lookup = nexus.vpc_lookup(&opctx, query)?; let rules = nexus.vpc_list_firewall_rules(&opctx, &vpc_lookup).await?; @@ -4831,7 +5398,11 @@ async fn vpc_firewall_rules_view( rules: rules.into_iter().map(|rule| rule.into()).collect(), })) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Replace firewall rules @@ -4841,7 +5412,7 @@ async fn vpc_firewall_rules_view( tags = ["vpcs"], }] async fn vpc_firewall_rules_update( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, router_params: TypedBody, ) -> Result, HttpError> { @@ -4850,7 +5421,7 @@ async fn vpc_firewall_rules_update( let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let router_params = router_params.into_inner(); let vpc_lookup = nexus.vpc_lookup(&opctx, query)?; @@ -4861,7 +5432,11 @@ async fn vpc_firewall_rules_update( rules: rules.into_iter().map(|rule| rule.into()).collect(), })) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // VPC Routers @@ -4873,13 +5448,13 @@ async fn vpc_firewall_rules_update( tags = ["vpcs"], }] async fn vpc_router_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -4898,7 +5473,11 @@ async fn vpc_router_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch router @@ -4908,13 +5487,13 @@ async fn vpc_router_list( tags = ["vpcs"], }] async fn vpc_router_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -4927,7 +5506,11 @@ async fn vpc_router_view( nexus.vpc_router_lookup(&opctx, router_selector)?.fetch().await?; Ok(HttpResponseOk(vpc_router.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create VPC router @@ -4937,13 +5520,13 @@ async fn vpc_router_view( tags = ["vpcs"], }] async fn vpc_router_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, create_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let create = create_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -4958,7 +5541,11 @@ async fn vpc_router_create( .await?; Ok(HttpResponseCreated(router.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete router @@ -4968,13 +5555,13 @@ async fn vpc_router_create( tags = ["vpcs"], }] async fn vpc_router_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -4987,7 +5574,11 @@ async fn vpc_router_delete( nexus.vpc_delete_router(&opctx, &router_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update router @@ -4997,14 +5588,14 @@ async fn vpc_router_delete( tags = ["vpcs"], }] async fn vpc_router_update( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, router_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let router_params = router_params.into_inner(); @@ -5020,7 +5611,11 @@ async fn vpc_router_update( .await?; Ok(HttpResponseOk(router.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List routes @@ -5032,13 +5627,13 @@ async fn vpc_router_update( tags = ["vpcs"], }] async fn vpc_router_route_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -5057,7 +5652,11 @@ async fn vpc_router_route_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Vpc Router Routes @@ -5069,14 +5668,14 @@ async fn vpc_router_route_list( tags = ["vpcs"], }] async fn vpc_router_route_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let route_selector = params::RouteSelector { @@ -5091,7 +5690,11 @@ async fn vpc_router_route_view( .await?; Ok(HttpResponseOk(route.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create route @@ -5101,14 +5704,14 @@ async fn vpc_router_route_view( tags = ["vpcs"], }] async fn vpc_router_route_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, create_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let create = create_params.into_inner(); let router_lookup = nexus.vpc_router_lookup(&opctx, query)?; @@ -5122,7 +5725,11 @@ async fn vpc_router_route_create( .await?; Ok(HttpResponseCreated(route.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete route @@ -5132,14 +5739,14 @@ async fn vpc_router_route_create( tags = ["vpcs"], }] async fn vpc_router_route_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let route_selector = params::RouteSelector { @@ -5153,7 +5760,11 @@ async fn vpc_router_route_delete( nexus.router_delete_route(&opctx, &route_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update route @@ -5163,14 +5774,14 @@ async fn vpc_router_route_delete( tags = ["vpcs"], }] async fn vpc_router_route_update( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, router_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let router_params = router_params.into_inner(); @@ -5188,7 +5799,11 @@ async fn vpc_router_route_update( .await?; Ok(HttpResponseOk(route.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Racks @@ -5200,12 +5815,12 @@ async fn vpc_router_route_update( tags = ["system/hardware"], }] async fn rack_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let racks = nexus @@ -5220,7 +5835,11 @@ async fn rack_list( &|_, rack: &Rack| rack.identity.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Path parameters for Rack requests @@ -5237,18 +5856,22 @@ struct RackPathParam { tags = ["system/hardware"], }] async fn rack_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let rack_info = nexus.rack_lookup(&opctx, &path.rack_id).await?; Ok(HttpResponseOk(rack_info.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List uninitialized sleds @@ -5258,7 +5881,7 @@ async fn rack_view( tags = ["system/hardware"] }] async fn sled_list_uninitialized( - rqctx: RequestContext>, + rqctx: RequestContext, query: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); @@ -5270,12 +5893,16 @@ async fn sled_list_uninitialized( ); } let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let sleds = nexus.sled_list_uninitialized(&opctx).await?; Ok(HttpResponseOk(ResultsPage { items: sleds, next_page: None })) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// The unique ID of a sled. @@ -5296,11 +5923,11 @@ pub struct SledId { tags = ["system/hardware"] }] async fn sled_add( - rqctx: RequestContext>, + rqctx: RequestContext, sled: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let id = nexus @@ -5309,7 +5936,11 @@ async fn sled_add( .into_untyped_uuid(); Ok(HttpResponseCreated(SledId { id })) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Sleds @@ -5321,12 +5952,12 @@ async fn sled_add( tags = ["system/hardware"], }] async fn sled_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let sleds = nexus @@ -5341,7 +5972,11 @@ async fn sled_list( &|_, sled: &Sled| sled.identity.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch sled @@ -5351,19 +5986,23 @@ async fn sled_list( tags = ["system/hardware"], }] async fn sled_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let (.., sled) = nexus.sled_lookup(&opctx, &path.sled_id)?.fetch().await?; Ok(HttpResponseOk(sled.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Set sled provision policy @@ -5373,13 +6012,13 @@ async fn sled_view( tags = ["system/hardware"], }] async fn sled_set_provision_policy( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, new_provision_state: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let new_state = new_provision_state.into_inner().state; @@ -5397,7 +6036,11 @@ async fn sled_set_provision_policy( Ok(HttpResponseOk(response)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List instances running on given sled @@ -5407,13 +6050,13 @@ async fn sled_set_provision_policy( tags = ["system/hardware"], }] async fn sled_instance_list( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -5434,7 +6077,11 @@ async fn sled_instance_list( &|_, sled_instance: &views::SledInstance| sled_instance.identity.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Physical disks @@ -5446,12 +6093,12 @@ async fn sled_instance_list( tags = ["system/hardware"], }] async fn physical_disk_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let disks = nexus @@ -5466,7 +6113,11 @@ async fn physical_disk_list( &|_, disk: &PhysicalDisk| disk.identity.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Get a physical disk @@ -5476,12 +6127,12 @@ async fn physical_disk_list( tags = ["system/hardware"], }] async fn physical_disk_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -5489,7 +6140,11 @@ async fn physical_disk_view( nexus.physical_disk_lookup(&opctx, &path).await?.fetch().await?; Ok(HttpResponseOk(physical_disk.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Switches @@ -5501,12 +6156,12 @@ async fn physical_disk_view( tags = ["system/hardware"], }] async fn switch_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let switches = nexus @@ -5521,7 +6176,11 @@ async fn switch_list( &|_, switch: &views::Switch| switch.identity.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch switch @@ -5531,12 +6190,12 @@ async fn switch_list( tags = ["system/hardware"], }] async fn switch_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let (.., switch) = nexus @@ -5548,7 +6207,11 @@ async fn switch_view( .await?; Ok(HttpResponseOk(switch.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List physical disks attached to sleds @@ -5558,13 +6221,13 @@ async fn switch_view( tags = ["system/hardware"], }] async fn sled_physical_disk_list( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -5584,7 +6247,11 @@ async fn sled_physical_disk_list( &|_, disk: &PhysicalDisk| disk.identity.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Metrics @@ -5625,7 +6292,7 @@ struct SystemMetricsPathParam { tags = ["system/metrics"], }] async fn system_metric( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, pag_params: Query< PaginationParams, @@ -5634,7 +6301,7 @@ async fn system_metric( ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let metric_name = path_params.into_inner().metric_name; let pagination = pag_params.into_inner(); let limit = rqctx.page_limit(&pagination)?; @@ -5657,7 +6324,11 @@ async fn system_metric( Ok(HttpResponseOk(result)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// View metrics @@ -5669,7 +6340,7 @@ async fn system_metric( tags = ["metrics"], }] async fn silo_metric( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, pag_params: Query< PaginationParams, @@ -5678,7 +6349,7 @@ async fn silo_metric( ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let metric_name = path_params.into_inner().metric_name; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -5706,7 +6377,11 @@ async fn silo_metric( Ok(HttpResponseOk(result)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List timeseries schemas @@ -5716,13 +6391,13 @@ async fn silo_metric( tags = ["metrics"], }] async fn timeseries_schema_list( - rqctx: RequestContext>, + rqctx: RequestContext, pag_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let pagination = pag_params.into_inner(); let limit = rqctx.page_limit(&pagination)?; @@ -5732,7 +6407,11 @@ async fn timeseries_schema_list( .map(HttpResponseOk) .map_err(HttpError::from) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // TODO: can we link to an OxQL reference? Do we have one? Can we even do links? @@ -5746,12 +6425,12 @@ async fn timeseries_schema_list( tags = ["metrics"], }] async fn timeseries_query( - rqctx: RequestContext>, + rqctx: RequestContext, body: TypedBody, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let query = body.into_inner().query; nexus @@ -5760,7 +6439,11 @@ async fn timeseries_query( .map(HttpResponseOk) .map_err(HttpError::from) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Updates @@ -5773,12 +6456,12 @@ async fn timeseries_query( unpublished = true, }] async fn system_update_put_repository( - rqctx: RequestContext>, + rqctx: RequestContext, query: Query, body: StreamingBody, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let query = query.into_inner(); @@ -5787,7 +6470,11 @@ async fn system_update_put_repository( nexus.updates_put_repository(&opctx, body, query.file_name).await?; Ok(HttpResponseOk(update)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch TUF repository description @@ -5800,11 +6487,11 @@ async fn system_update_put_repository( unpublished = true, }] async fn system_update_get_repository( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let params = path_params.into_inner(); @@ -5814,7 +6501,11 @@ async fn system_update_get_repository( description: description.into_external(), })) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Silo users @@ -5826,12 +6517,12 @@ async fn system_update_get_repository( tags = ["silos"], }] async fn user_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pagparams = data_page_params_for(&rqctx, &query)?; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -5855,7 +6546,11 @@ async fn user_list( &|_, user: &User| user.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Silo groups @@ -5867,11 +6562,11 @@ async fn user_list( tags = ["silos"], }] async fn group_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pagparams = data_page_params_for(&rqctx, &query)?; let handler = async { @@ -5888,7 +6583,11 @@ async fn group_list( &|_, group: &Group| group.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch group @@ -5898,19 +6597,23 @@ async fn group_list( tags = ["silos"], }] async fn group_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let (.., group) = nexus.silo_group_lookup(&opctx, &path.group_id).fetch().await?; Ok(HttpResponseOk(group.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Built-in (system) users @@ -5922,11 +6625,11 @@ async fn group_view( tags = ["system/silos"], }] async fn user_builtin_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pagparams = data_page_params_for(&rqctx, &query)?.map_name(|n| Name::ref_cast(n)); @@ -5944,7 +6647,11 @@ async fn user_builtin_list( &marker_for_name, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch built-in user @@ -5954,19 +6661,23 @@ async fn user_builtin_list( tags = ["system/silos"], }] async fn user_builtin_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let user_selector = path_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let (.., user) = nexus.user_builtin_lookup(&opctx, &user_selector)?.fetch().await?; Ok(HttpResponseOk(user.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Built-in roles @@ -5992,11 +6703,11 @@ struct RolePathParam { tags = ["roles"], }] async fn role_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -6026,7 +6737,11 @@ async fn role_list( |role: &Role, _| RolePage { last_seen: role.name.to_string() }, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch built-in role @@ -6036,11 +6751,11 @@ async fn role_list( tags = ["roles"], }] async fn role_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let role_name = &path.role_name; let handler = async { @@ -6048,7 +6763,11 @@ async fn role_view( let role = nexus.role_builtin_fetch(&opctx, &role_name).await?; Ok(HttpResponseOk(role.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Current user @@ -6060,10 +6779,10 @@ async fn role_view( tags = ["session"], }] pub(crate) async fn current_user_view( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let user = nexus.silo_user_fetch_self(&opctx).await?; @@ -6073,7 +6792,11 @@ pub(crate) async fn current_user_view( silo_name: silo.name().clone(), })) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch current user's groups @@ -6083,13 +6806,13 @@ pub(crate) async fn current_user_view( tags = ["session"], }] pub(crate) async fn current_user_groups( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let groups = nexus .silo_user_fetch_groups_for_self( @@ -6106,7 +6829,11 @@ pub(crate) async fn current_user_groups( &|_, group: &views::Group| group.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Per-user SSH public keys @@ -6120,13 +6847,13 @@ pub(crate) async fn current_user_groups( tags = ["session"], }] async fn current_user_ssh_key_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -6147,7 +6874,11 @@ async fn current_user_ssh_key_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create SSH public key @@ -6159,13 +6890,13 @@ async fn current_user_ssh_key_list( tags = ["session"], }] async fn current_user_ssh_key_create( - rqctx: RequestContext>, + rqctx: RequestContext, new_key: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let &actor = opctx .authn .actor_required() @@ -6175,7 +6906,11 @@ async fn current_user_ssh_key_create( .await?; Ok(HttpResponseCreated(ssh_key.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch SSH public key @@ -6187,13 +6922,13 @@ async fn current_user_ssh_key_create( tags = ["session"], }] async fn current_user_ssh_key_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let &actor = opctx .authn @@ -6209,7 +6944,11 @@ async fn current_user_ssh_key_view( assert_eq!(silo_user.id(), actor.actor_id()); Ok(HttpResponseOk(ssh_key.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete SSH public key @@ -6221,13 +6960,13 @@ async fn current_user_ssh_key_view( tags = ["session"], }] async fn current_user_ssh_key_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let &actor = opctx .authn @@ -6241,7 +6980,11 @@ async fn current_user_ssh_key_delete( nexus.ssh_key_delete(&opctx, actor.actor_id(), &ssh_key_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List instrumentation probes @@ -6251,7 +6994,7 @@ async fn current_user_ssh_key_delete( tags = ["system/probes"], }] async fn probe_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); @@ -6259,7 +7002,7 @@ async fn probe_list( let opctx = crate::context::op_context_for_external_api(&rqctx).await?; opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -6279,7 +7022,11 @@ async fn probe_list( }, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// View instrumentation probe @@ -6289,7 +7036,7 @@ async fn probe_list( tags = ["system/probes"], }] async fn probe_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { @@ -6298,7 +7045,7 @@ async fn probe_view( let opctx = crate::context::op_context_for_external_api(&rqctx).await?; opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let project_selector = query_params.into_inner(); let project_lookup = nexus.project_lookup(&opctx, project_selector)?; @@ -6306,7 +7053,11 @@ async fn probe_view( nexus.probe_get(&opctx, &project_lookup, &path.probe).await?; Ok(HttpResponseOk(probe)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create instrumentation probe @@ -6316,7 +7067,7 @@ async fn probe_view( tags = ["system/probes"], }] async fn probe_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, new_probe: TypedBody, ) -> Result, HttpError> { @@ -6325,7 +7076,7 @@ async fn probe_create( let opctx = crate::context::op_context_for_external_api(&rqctx).await?; opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let new_probe_params = &new_probe.into_inner(); let project_selector = query_params.into_inner(); let project_lookup = nexus.project_lookup(&opctx, project_selector)?; @@ -6334,7 +7085,11 @@ async fn probe_create( .await?; Ok(HttpResponseCreated(probe.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete instrumentation probe @@ -6344,7 +7099,7 @@ async fn probe_create( tags = ["system/probes"], }] async fn probe_delete( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, path_params: Path, ) -> Result { @@ -6353,14 +7108,18 @@ async fn probe_delete( let opctx = crate::context::op_context_for_external_api(&rqctx).await?; opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let project_selector = query_params.into_inner(); let project_lookup = nexus.project_lookup(&opctx, project_selector)?; nexus.probe_delete(&opctx, &project_lookup, path.probe).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } #[cfg(test)] diff --git a/nexus/src/internal_api/http_entrypoints.rs b/nexus/src/internal_api/http_entrypoints.rs index 81b63d0d89a..ceafe7f103a 100644 --- a/nexus/src/internal_api/http_entrypoints.rs +++ b/nexus/src/internal_api/http_entrypoints.rs @@ -5,7 +5,7 @@ //! Handler functions (entrypoints) for HTTP APIs internal to the control plane use super::params::{OximeterInfo, RackInitializationRequest}; -use crate::ServerContext; +use crate::context::ApiContext; use dropshot::endpoint; use dropshot::ApiDescription; use dropshot::FreeformBody; @@ -60,10 +60,9 @@ use serde::Deserialize; use serde::Serialize; use std::collections::BTreeMap; use std::collections::BTreeSet; -use std::sync::Arc; use uuid::Uuid; -type NexusApiDescription = ApiDescription>; +type NexusApiDescription = ApiDescription; /// Returns a description of the internal nexus API pub(crate) fn internal_api() -> NexusApiDescription { @@ -134,10 +133,10 @@ struct SledAgentPathParam { path = "/sled-agents/{sled_id}", }] async fn sled_agent_get( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let path = path_params.into_inner(); @@ -155,11 +154,11 @@ async fn sled_agent_get( path = "/sled-agents/{sled_id}", }] async fn sled_agent_put( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, sled_info: TypedBody, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let path = path_params.into_inner(); @@ -182,10 +181,10 @@ async fn sled_agent_put( path = "/sled-agents/{sled_id}/firewall-rules-update", }] async fn sled_firewall_rules_request( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let path = path_params.into_inner(); @@ -211,11 +210,11 @@ struct RackPathParam { path = "/racks/{rack_id}/initialization-complete", }] async fn rack_initialization_complete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, info: TypedBody, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let path = path_params.into_inner(); let request = info.into_inner(); @@ -237,11 +236,11 @@ struct SwitchPathParam { path = "/switch/{switch_id}", }] async fn switch_put( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, body: TypedBody, ) -> Result, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let nexus = &apictx.nexus; let path = path_params.into_inner(); @@ -264,11 +263,11 @@ struct InstancePathParam { path = "/instances/{instance_id}", }] async fn cpapi_instances_put( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, new_runtime_state: TypedBody, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let path = path_params.into_inner(); let new_state = new_runtime_state.into_inner(); @@ -294,11 +293,11 @@ struct DiskPathParam { path = "/disks/{disk_id}", }] async fn cpapi_disks_put( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, new_runtime_state: TypedBody, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let path = path_params.into_inner(); let new_state = new_runtime_state.into_inner(); @@ -329,10 +328,10 @@ struct VolumePathParam { path = "/volume/{volume_id}/remove-read-only-parent", }] async fn cpapi_volume_remove_read_only_parent( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let path = path_params.into_inner(); @@ -354,10 +353,10 @@ async fn cpapi_volume_remove_read_only_parent( path = "/disk/{disk_id}/remove-read-only-parent", }] async fn cpapi_disk_remove_read_only_parent( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let path = path_params.into_inner(); @@ -375,10 +374,10 @@ async fn cpapi_disk_remove_read_only_parent( path = "/metrics/producers", }] async fn cpapi_producers_post( - request_context: RequestContext>, + request_context: RequestContext, producer_info: TypedBody, ) -> Result, HttpError> { - let context = request_context.context(); + let context = &request_context.context().context; let handler = async { let nexus = &context.nexus; let producer_info = producer_info.into_inner(); @@ -413,11 +412,11 @@ pub struct CollectorIdPathParams { path = "/metrics/collectors/{collector_id}/producers", }] async fn cpapi_assigned_producers_list( - request_context: RequestContext>, + request_context: RequestContext, path_params: Path, query_params: Query, ) -> Result>, HttpError> { - let context = request_context.context(); + let context = &request_context.context().context; let handler = async { let nexus = &context.nexus; let collector_id = path_params.into_inner().collector_id; @@ -446,10 +445,10 @@ async fn cpapi_assigned_producers_list( path = "/metrics/collectors", }] async fn cpapi_collectors_post( - request_context: RequestContext>, + request_context: RequestContext, oximeter_info: TypedBody, ) -> Result { - let context = request_context.context(); + let context = &request_context.context().context; let handler = async { let nexus = &context.nexus; let oximeter_info = oximeter_info.into_inner(); @@ -470,10 +469,10 @@ async fn cpapi_collectors_post( path = "/artifacts/{kind}/{name}/{version}", }] async fn cpapi_artifact_download( - request_context: RequestContext>, + request_context: RequestContext, path_params: Path, ) -> Result, HttpError> { - let context = request_context.context(); + let context = &request_context.context().context; let nexus = &context.nexus; let opctx = crate::context::op_context_for_internal_api(&request_context).await; @@ -497,11 +496,11 @@ struct UpstairsPathParam { path = "/crucible/0/upstairs/{upstairs_id}/repair-start", }] async fn cpapi_upstairs_repair_start( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, repair_start_info: TypedBody, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let path = path_params.into_inner(); @@ -525,11 +524,11 @@ async fn cpapi_upstairs_repair_start( path = "/crucible/0/upstairs/{upstairs_id}/repair-finish", }] async fn cpapi_upstairs_repair_finish( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, repair_finish_info: TypedBody, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let path = path_params.into_inner(); @@ -560,11 +559,11 @@ struct UpstairsRepairPathParam { path = "/crucible/0/upstairs/{upstairs_id}/repair/{repair_id}/progress", }] async fn cpapi_upstairs_repair_progress( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, repair_progress: TypedBody, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let path = path_params.into_inner(); @@ -597,11 +596,11 @@ struct UpstairsDownstairsPathParam { path = "/crucible/0/upstairs/{upstairs_id}/downstairs/{downstairs_id}/stop-request", }] async fn cpapi_downstairs_client_stop_request( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, downstairs_client_stop_request: TypedBody, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let path = path_params.into_inner(); @@ -627,11 +626,11 @@ async fn cpapi_downstairs_client_stop_request( path = "/crucible/0/upstairs/{upstairs_id}/downstairs/{downstairs_id}/stopped", }] async fn cpapi_downstairs_client_stopped( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, downstairs_client_stopped: TypedBody, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let path = path_params.into_inner(); @@ -658,10 +657,10 @@ async fn cpapi_downstairs_client_stopped( path = "/sagas", }] async fn saga_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let nexus = &apictx.nexus; let query = query_params.into_inner(); @@ -690,10 +689,10 @@ struct SagaPathParam { path = "/sagas/{saga_id}", }] async fn saga_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let nexus = &apictx.nexus; @@ -715,9 +714,9 @@ async fn saga_view( path = "/bgtasks", }] async fn bgtask_list( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result>, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let nexus = &apictx.nexus; let opctx = crate::context::op_context_for_internal_api(&rqctx).await; @@ -747,10 +746,10 @@ struct BackgroundTasksActivateRequest { path = "/bgtasks/view/{bgtask_name}", }] async fn bgtask_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let nexus = &apictx.nexus; @@ -768,10 +767,10 @@ async fn bgtask_view( path = "/bgtasks/activate", }] async fn bgtask_activate( - rqctx: RequestContext>, + rqctx: RequestContext, body: TypedBody, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let nexus = &apictx.nexus; @@ -810,11 +809,11 @@ struct RpwNatQueryParam { path = "/nat/ipv4/changeset/{from_gen}" }] async fn ipv4_nat_changeset( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result>, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let nexus = &apictx.nexus; @@ -843,10 +842,10 @@ async fn ipv4_nat_changeset( path = "/deployment/blueprints/all", }] async fn blueprint_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let nexus = &apictx.nexus; let query = query_params.into_inner(); @@ -869,10 +868,10 @@ async fn blueprint_list( path = "/deployment/blueprints/all/{blueprint_id}", }] async fn blueprint_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let nexus = &apictx.nexus; @@ -889,10 +888,10 @@ async fn blueprint_view( path = "/deployment/blueprints/all/{blueprint_id}", }] async fn blueprint_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let nexus = &apictx.nexus; @@ -911,9 +910,9 @@ async fn blueprint_delete( path = "/deployment/blueprints/target", }] async fn blueprint_target_view( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let nexus = &apictx.nexus; @@ -929,10 +928,10 @@ async fn blueprint_target_view( path = "/deployment/blueprints/target", }] async fn blueprint_target_set( - rqctx: RequestContext>, + rqctx: RequestContext, target: TypedBody, ) -> Result, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let nexus = &apictx.nexus; @@ -949,10 +948,10 @@ async fn blueprint_target_set( path = "/deployment/blueprints/target/enabled", }] async fn blueprint_target_set_enabled( - rqctx: RequestContext>, + rqctx: RequestContext, target: TypedBody, ) -> Result, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let nexus = &apictx.nexus; @@ -972,9 +971,9 @@ async fn blueprint_target_set_enabled( path = "/deployment/blueprints/regenerate", }] async fn blueprint_regenerate( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let nexus = &apictx.nexus; @@ -992,10 +991,10 @@ async fn blueprint_regenerate( path = "/deployment/blueprints/import", }] async fn blueprint_import( - rqctx: RequestContext>, + rqctx: RequestContext, blueprint: TypedBody, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let nexus = &apictx.nexus; @@ -1012,9 +1011,9 @@ async fn blueprint_import( path = "/sleds/uninitialized", }] async fn sled_list_uninitialized( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result>, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let nexus = &apictx.nexus; let opctx = crate::context::op_context_for_internal_api(&rqctx).await; @@ -1040,10 +1039,10 @@ pub struct SledId { path = "/sleds/add", }] async fn sled_add( - rqctx: RequestContext>, + rqctx: RequestContext, sled: TypedBody, ) -> Result, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; @@ -1064,10 +1063,10 @@ async fn sled_add( path = "/sleds/expunge", }] async fn sled_expunge( - rqctx: RequestContext>, + rqctx: RequestContext, sled: TypedBody, ) -> Result, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; @@ -1090,11 +1089,11 @@ struct ProbePathParam { path = "/probes/{sled}" }] async fn probes_get( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result>, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let query = query_params.into_inner(); let path = path_params.into_inner(); diff --git a/nexus/src/lib.rs b/nexus/src/lib.rs index e34b694e521..e1b327de913 100644 --- a/nexus/src/lib.rs +++ b/nexus/src/lib.rs @@ -20,6 +20,7 @@ mod saga_interface; pub use app::test_interfaces::TestInterfaces; pub use app::Nexus; +use context::ApiContext; use context::ServerContext; use dropshot::ConfigDropshot; use external_api::http_entrypoints::external_api; @@ -77,11 +78,10 @@ pub fn run_openapi_internal() -> Result<(), String> { /// A partially-initialized Nexus server, which exposes an internal interface, /// but is not ready to receive external requests. pub struct InternalServer { - /// shared state used by API request handlers - apictx: Arc, + /// Shared server state. + apictx: ApiContext, /// dropshot server for internal API - http_server_internal: dropshot::HttpServer>, - + http_server_internal: dropshot::HttpServer, config: NexusConfig, log: Logger, } @@ -97,31 +97,39 @@ impl InternalServer { let ctxlog = log.new(o!("component" => "ServerContext")); - let apictx = - ServerContext::new(config.deployment.rack_id, ctxlog, &config) - .await?; + let context = ApiContext::for_internal( + config.deployment.rack_id, + ctxlog, + &config, + ) + .await?; // Launch the internal server. let server_starter_internal = dropshot::HttpServerStarter::new( &config.deployment.dropshot_internal, internal_api(), - Arc::clone(&apictx), + context.clone(), &log.new(o!("component" => "dropshot_internal")), ) .map_err(|error| format!("initializing internal server: {}", error))?; let http_server_internal = server_starter_internal.start(); - Ok(Self { apictx, http_server_internal, config: config.clone(), log }) + Ok(Self { + apictx: context, + http_server_internal, + config: config.clone(), + log, + }) } } -type DropshotServer = dropshot::HttpServer>; +type DropshotServer = dropshot::HttpServer; /// Packages up a [`Nexus`], running both external and internal HTTP API servers /// wired up to Nexus pub struct Server { /// shared state used by API request handlers - apictx: Arc, + apictx: ApiContext, } impl Server { @@ -132,16 +140,17 @@ impl Server { let config = internal.config; // Wait until RSS handoff completes. - let opctx = apictx.nexus.opctx_for_service_balancer(); - apictx.nexus.await_rack_initialization(&opctx).await; + let opctx = apictx.context.nexus.opctx_for_service_balancer(); + apictx.context.nexus.await_rack_initialization(&opctx).await; // While we've started our internal server, we need to wait until we've // definitely implemented our source IP allowlist for making requests to // the external server we're about to start. - apictx.nexus.await_ip_allowlist_plumbing().await; + apictx.context.nexus.await_ip_allowlist_plumbing().await; // Launch the external server. let tls_config = apictx + .context .nexus .external_tls_config(config.deployment.dropshot_external.tls) .await; @@ -167,7 +176,7 @@ impl Server { dropshot::HttpServerStarter::new_with_tls( &config.deployment.dropshot_external.dropshot, external_api(), - Arc::clone(&apictx), + apictx.for_external(), &log.new(o!("component" => "dropshot_external")), tls_config.clone().map(dropshot::ConfigTls::Dynamic), ) @@ -181,7 +190,7 @@ impl Server { dropshot::HttpServerStarter::new_with_tls( &techport_server_config, external_api(), - Arc::clone(&apictx), + apictx.for_techport(), &log.new(o!("component" => "dropshot_external_techport")), tls_config.map(dropshot::ConfigTls::Dynamic), ) @@ -195,11 +204,12 @@ impl Server { // metric data. let producer_server = start_producer_server( &log, - &apictx.producer_registry, + &apictx.context.producer_registry, http_server_internal.local_addr(), )?; apictx + .context .nexus .set_servers( http_server_external, @@ -212,8 +222,8 @@ impl Server { Ok(server) } - pub fn apictx(&self) -> &Arc { - &self.apictx + pub fn server_context(&self) -> &Arc { + &self.apictx.context } /// Wait for the given server to shut down @@ -222,7 +232,7 @@ impl Server { /// immediately after calling `start()`, the program will block indefinitely /// or until something else initiates a graceful shutdown. pub(crate) async fn wait_for_finish(self) -> Result<(), String> { - self.apictx.nexus.wait_for_shutdown().await + self.server_context().nexus.wait_for_shutdown().await } } @@ -236,7 +246,7 @@ impl nexus_test_interface::NexusServer for Server { ) -> (InternalServer, SocketAddr) { let internal_server = InternalServer::start(config, &log).await.unwrap(); - internal_server.apictx.nexus.wait_for_populate().await.unwrap(); + internal_server.apictx.context.nexus.wait_for_populate().await.unwrap(); let addr = internal_server.http_server_internal.local_addr(); (internal_server, addr) } @@ -259,7 +269,8 @@ impl nexus_test_interface::NexusServer for Server { // Perform the "handoff from RSS". // // However, RSS isn't running, so we'll do the handoff ourselves. - let opctx = internal_server.apictx.nexus.opctx_for_internal_api(); + let opctx = + internal_server.apictx.context.nexus.opctx_for_internal_api(); // Allocation of the initial Nexus's external IP is a little funny. In // a real system, it'd be allocated by RSS and provided with the rack @@ -290,6 +301,7 @@ impl nexus_test_interface::NexusServer for Server { internal_server .apictx + .context .nexus .rack_initialize( &opctx, @@ -332,7 +344,7 @@ impl nexus_test_interface::NexusServer for Server { // Historically, tests have assumed that there's only one provisionable // sled, and that's convenient for a lot of purposes. Mark our second // sled non-provisionable. - let nexus = &rv.apictx().nexus; + let nexus = &rv.server_context().nexus; nexus .sled_set_provision_policy( &opctx, @@ -349,11 +361,15 @@ impl nexus_test_interface::NexusServer for Server { } async fn get_http_server_external_address(&self) -> SocketAddr { - self.apictx.nexus.get_external_server_address().await.unwrap() + self.apictx.context.nexus.get_external_server_address().await.unwrap() + } + + async fn get_http_server_techport_address(&self) -> SocketAddr { + self.apictx.context.nexus.get_techport_server_address().await.unwrap() } async fn get_http_server_internal_address(&self) -> SocketAddr { - self.apictx.nexus.get_internal_server_address().await.unwrap() + self.apictx.context.nexus.get_internal_server_address().await.unwrap() } async fn upsert_crucible_dataset( @@ -363,8 +379,9 @@ impl nexus_test_interface::NexusServer for Server { dataset_id: Uuid, address: SocketAddrV6, ) { - let opctx = self.apictx.nexus.opctx_for_internal_api(); + let opctx = self.apictx.context.nexus.opctx_for_internal_api(); self.apictx + .context .nexus .upsert_physical_disk(&opctx, physical_disk) .await @@ -372,9 +389,10 @@ impl nexus_test_interface::NexusServer for Server { let zpool_id = zpool.id; - self.apictx.nexus.upsert_zpool(&opctx, zpool).await.unwrap(); + self.apictx.context.nexus.upsert_zpool(&opctx, zpool).await.unwrap(); self.apictx + .context .nexus .upsert_dataset( dataset_id, @@ -389,7 +407,7 @@ impl nexus_test_interface::NexusServer for Server { async fn inventory_collect_and_get_latest_collection( &self, ) -> Result, Error> { - let nexus = &self.apictx.nexus; + let nexus = &self.apictx.context.nexus; nexus.activate_inventory_collection(); @@ -399,6 +417,7 @@ impl nexus_test_interface::NexusServer for Server { async fn close(mut self) { self.apictx + .context .nexus .close_servers() .await diff --git a/nexus/test-interface/src/lib.rs b/nexus/test-interface/src/lib.rs index 2c7f0989eab..06c5570b7bb 100644 --- a/nexus/test-interface/src/lib.rs +++ b/nexus/test-interface/src/lib.rs @@ -68,6 +68,7 @@ pub trait NexusServer: Send + Sync + 'static { ) -> Self; async fn get_http_server_external_address(&self) -> SocketAddr; + async fn get_http_server_techport_address(&self) -> SocketAddr; async fn get_http_server_internal_address(&self) -> SocketAddr; // Previously, as a dataset was created (within the sled agent), diff --git a/nexus/test-utils/src/lib.rs b/nexus/test-utils/src/lib.rs index 23d84ee702a..8bbb6ef38c7 100644 --- a/nexus/test-utils/src/lib.rs +++ b/nexus/test-utils/src/lib.rs @@ -104,6 +104,7 @@ pub const TEST_SUITE_PASSWORD: &str = "oxide"; pub struct ControlPlaneTestContext { pub start_time: chrono::DateTime, pub external_client: ClientTestContext, + pub techport_client: ClientTestContext, pub internal_client: ClientTestContext, pub server: N, pub database: dev::db::CockroachInstance, @@ -257,6 +258,7 @@ pub struct ControlPlaneTestContextBuilder<'a, N: NexusServer> { pub logctx: LogContext, pub external_client: Option, + pub techport_client: Option, pub internal_client: Option, pub server: Option, @@ -307,6 +309,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { start_time, logctx, external_client: None, + techport_client: None, internal_client: None, server: None, database: None, @@ -832,6 +835,8 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { let external_server_addr = server.get_http_server_external_address().await; + let techport_external_server_addr = + server.get_http_server_techport_address().await; let internal_server_addr = server.get_http_server_internal_address().await; let testctx_external = ClientTestContext::new( @@ -840,6 +845,12 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { .log .new(o!("component" => "external client test context")), ); + let testctx_techport = ClientTestContext::new( + techport_external_server_addr, + self.logctx.log.new( + o!("component" => "techport external client test context"), + ), + ); let testctx_internal = ClientTestContext::new( internal_server_addr, self.logctx @@ -849,6 +860,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { self.external_dns_zone_name = Some(external_dns_zone_name); self.external_client = Some(testctx_external); + self.techport_client = Some(testctx_techport); self.internal_client = Some(testctx_internal); self.silo_name = Some(silo_name); self.user_name = Some(user_name); @@ -1086,6 +1098,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { start_time: self.start_time, server: self.server.unwrap(), external_client: self.external_client.unwrap(), + techport_client: self.techport_client.unwrap(), internal_client: self.internal_client.unwrap(), database: self.database.unwrap(), clickhouse: self.clickhouse.unwrap(), diff --git a/nexus/tests/integration_tests/allow_list.rs b/nexus/tests/integration_tests/allow_list.rs index 319696b5f5c..dc206843f71 100644 --- a/nexus/tests/integration_tests/allow_list.rs +++ b/nexus/tests/integration_tests/allow_list.rs @@ -9,6 +9,7 @@ use nexus_test_utils::http_testing::{AuthnMode, NexusRequest}; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::{params, views}; use omicron_common::api::external::AllowedSourceIps; +use omicron_common::api::external::IpNet; use std::net::IpAddr; use std::net::Ipv4Addr; @@ -74,7 +75,7 @@ async fn test_allow_list(cptestctx: &ControlPlaneTestContext) { } // Set the list with exactly one IP, make sure it's the same. - let allowed_ips = AllowedSourceIps::try_from(vec![our_addr.into()]) + let allowed_ips = AllowedSourceIps::try_from(vec![IpNet::single(our_addr)]) .expect("Expected a valid IP list"); update_list_and_compare(client, allowed_ips).await; @@ -82,8 +83,10 @@ async fn test_allow_list(cptestctx: &ControlPlaneTestContext) { // // This is a regression for // https://github.com/oxidecomputer/omicron/issues/5727. - let addrs = - vec![our_addr.into(), IpAddr::V4(Ipv4Addr::new(10, 0, 0, 1)).into()]; + let addrs = vec![ + IpNet::single(our_addr), + IpNet::single(IpAddr::V4(Ipv4Addr::new(10, 0, 0, 1))), + ]; let allowed_ips = AllowedSourceIps::try_from(addrs.clone()) .expect("Expected a valid IP list"); update_list_and_compare(client, allowed_ips).await; @@ -98,10 +101,10 @@ async fn test_allow_list(cptestctx: &ControlPlaneTestContext) { // Check that we cannot make the request with a list that doesn't include // us. - let addrs = vec![IpAddr::V4(Ipv4Addr::new(1, 1, 1, 1)).into()]; + let addrs = vec![IpNet::single(IpAddr::V4(Ipv4Addr::new(1, 1, 1, 1)))]; let allowed_ips = AllowedSourceIps::try_from(addrs.clone()) .expect("Expected a valid IP list"); - let new_list = params::AllowListUpdate { allowed_ips }; + let new_list = params::AllowListUpdate { allowed_ips: allowed_ips.clone() }; let err: dropshot::HttpErrorResponseBody = NexusRequest::expect_failure_with_body( client, @@ -119,4 +122,9 @@ async fn test_allow_list(cptestctx: &ControlPlaneTestContext) { assert!(err .message .contains("would prevent access from the current client")); + + // But we _should_ be able to make this self-defeating request through the + // techport proxy server. + let client = &cptestctx.techport_client; + update_list_and_compare(client, allowed_ips).await; } diff --git a/nexus/tests/integration_tests/disks.rs b/nexus/tests/integration_tests/disks.rs index c74cfb5c504..886504a83b3 100644 --- a/nexus/tests/integration_tests/disks.rs +++ b/nexus/tests/integration_tests/disks.rs @@ -191,7 +191,7 @@ async fn test_disk_create_attach_detach_delete( let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; let project_id = create_project_and_pool(client).await; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let disks_url = get_disks_url(); // Create a disk. @@ -365,7 +365,7 @@ async fn test_disk_slot_assignment(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; create_project_and_pool(client).await; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let disk_names = ["a", "b", "c", "d"]; let mut disks = Vec::new(); @@ -391,7 +391,7 @@ async fn test_disk_slot_assignment(cptestctx: &ControlPlaneTestContext) { get_disk_attach_url(&instance.identity.id.into()); async fn get_disk_slot(ctx: &ControlPlaneTestContext, disk_id: Uuid) -> u8 { - let apictx = &ctx.server.apictx(); + let apictx = &ctx.server.server_context(); let nexus = &apictx.nexus; let datastore = nexus.datastore(); let opctx = @@ -469,7 +469,7 @@ async fn test_disk_slot_assignment(cptestctx: &ControlPlaneTestContext) { #[nexus_test] async fn test_disk_move_between_instances(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; DiskTest::new(&cptestctx).await; create_project_and_pool(&client).await; let disks_url = get_disks_url(); @@ -1043,7 +1043,7 @@ async fn test_disk_virtual_provisioning_collection( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let _test = DiskTest::new(&cptestctx).await; @@ -1251,7 +1251,7 @@ async fn test_disk_virtual_provisioning_collection_failed_delete( ) { // Confirm that there's no panic deleting a project if a disk deletion fails let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let disk_test = DiskTest::new(&cptestctx).await; @@ -1391,7 +1391,7 @@ async fn test_phantom_disk_rename(cptestctx: &ControlPlaneTestContext) { // faulted let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let _disk_test = DiskTest::new(&cptestctx).await; @@ -1512,7 +1512,7 @@ async fn test_phantom_disk_rename(cptestctx: &ControlPlaneTestContext) { #[nexus_test] async fn test_disk_size_accounting(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); // Create three 10 GiB zpools, each with one dataset. @@ -1973,7 +1973,7 @@ async fn test_project_delete_disk_no_auth_idempotent( // Call project_delete_disk_no_auth twice, ensuring that the disk is either // there before deleting and not afterwards. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); @@ -2013,7 +2013,7 @@ async fn test_project_delete_disk_no_auth_idempotent( // Test allocating a single region #[nexus_test] async fn test_single_region_allocate(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); @@ -2085,7 +2085,7 @@ async fn test_single_region_allocate(cptestctx: &ControlPlaneTestContext) { async fn test_region_allocation_strategy_random_is_idempotent( cptestctx: &ControlPlaneTestContext, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); @@ -2152,7 +2152,7 @@ async fn test_region_allocation_strategy_random_is_idempotent( async fn test_region_allocation_strategy_random_is_idempotent_arbitrary( cptestctx: &ControlPlaneTestContext, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); @@ -2208,7 +2208,7 @@ async fn test_region_allocation_strategy_random_is_idempotent_arbitrary( async fn test_single_region_allocate_for_replace( cptestctx: &ControlPlaneTestContext, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); @@ -2297,7 +2297,7 @@ async fn test_single_region_allocate_for_replace( async fn test_single_region_allocate_for_replace_not_enough_zpools( cptestctx: &ControlPlaneTestContext, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); @@ -2387,7 +2387,7 @@ async fn test_single_region_allocate_for_replace_not_enough_zpools( async fn test_region_allocation_after_delete( cptestctx: &ControlPlaneTestContext, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); diff --git a/nexus/tests/integration_tests/external_ips.rs b/nexus/tests/integration_tests/external_ips.rs index 9d7ef34b35f..396edddc41e 100644 --- a/nexus/tests/integration_tests/external_ips.rs +++ b/nexus/tests/integration_tests/external_ips.rs @@ -628,7 +628,7 @@ async fn test_floating_ip_create_attachment( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; create_default_ip_pool(&client).await; @@ -725,7 +725,7 @@ async fn test_external_ip_live_attach_detach( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; create_default_ip_pool(&client).await; @@ -934,7 +934,7 @@ async fn test_floating_ip_attach_fail_between_projects( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let _nexus = &apictx.nexus; create_default_ip_pool(&client).await; @@ -1009,7 +1009,7 @@ async fn test_external_ip_attach_fail_if_in_use_by_other( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; create_default_ip_pool(&client).await; diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index 0b5947ef7eb..7ad52b99191 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -280,7 +280,7 @@ async fn test_instances_create_reboot_halt( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let instance_name = "just-rainsticks"; @@ -585,7 +585,7 @@ async fn test_instance_start_creates_networking_state( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let instance_name = "series-of-tubes"; @@ -688,7 +688,7 @@ async fn test_instance_start_creates_networking_state( #[nexus_test] async fn test_instance_migrate(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let instance_name = "bird-ecology"; @@ -784,7 +784,7 @@ async fn test_instance_migrate(cptestctx: &ControlPlaneTestContext) { #[nexus_test] async fn test_instance_migrate_v2p(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let datastore = nexus.datastore(); let opctx = @@ -936,7 +936,7 @@ async fn test_instance_failed_after_sled_agent_error( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let instance_name = "losing-is-fun"; @@ -1063,7 +1063,7 @@ async fn assert_metrics( #[nexus_test] async fn test_instance_metrics(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let datastore = nexus.datastore(); @@ -1143,14 +1143,14 @@ async fn test_instance_metrics_with_migration( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let instance_name = "bird-ecology"; // Wait until Nexus registers as a producer with Oximeter. wait_for_producer( &cptestctx.oximeter, - cptestctx.server.apictx().nexus.id(), + cptestctx.server.server_context().nexus.id(), ) .await; @@ -1276,7 +1276,7 @@ async fn test_instances_create_stopped_start( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let instance_name = "just-rainsticks"; @@ -1327,7 +1327,7 @@ async fn test_instances_delete_fails_when_running_succeeds_when_stopped( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let instance_name = "just-rainsticks"; @@ -1849,7 +1849,7 @@ async fn test_instance_create_delete_network_interface( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let instance_name = "nic-attach-test-inst"; create_project_and_pool(&client).await; @@ -2090,7 +2090,7 @@ async fn test_instance_update_network_interfaces( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let instance_name = "nic-update-test-inst"; create_project_and_pool(&client).await; @@ -2710,7 +2710,7 @@ async fn test_instance_create_attach_disks_undo( let faulted_disk = create_disk(&client, PROJECT_NAME, "faulted-disk").await; // set `faulted_disk` to the faulted state - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; assert!(nexus .set_disk_as_faulted(&faulted_disk.identity.id) @@ -2971,7 +2971,7 @@ async fn test_cannot_attach_faulted_disks(cptestctx: &ControlPlaneTestContext) { assert_eq!(disks.len(), 8); // Set the 7th to FAULTED - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; assert!(nexus.set_disk_as_faulted(&disks[6].identity.id).await.unwrap()); @@ -3129,7 +3129,7 @@ async fn test_disks_detached_when_instance_destroyed( // sled. let instance_url = format!("/v1/instances/nfs?project={}", PROJECT_NAME); let instance = instance_get(&client, &instance_url).await; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let sa = nexus .instance_sled_by_id(&instance.identity.id) @@ -3656,7 +3656,7 @@ async fn test_cannot_provision_instance_beyond_cpu_capacity( // Make the started instance transition to Running, shut it down, and verify // that the other reasonably-sized instance can now start. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; instance_simulate(nexus, &instances[1].identity.id).await; instances[1] = instance_post(client, configs[1].0, InstanceOp::Stop).await; instance_simulate(nexus, &instances[1].identity.id).await; @@ -3762,7 +3762,7 @@ async fn test_cannot_provision_instance_beyond_ram_capacity( // Make the started instance transition to Running, shut it down, and verify // that the other reasonably-sized instance can now start. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; instance_simulate(nexus, &instances[1].identity.id).await; instances[1] = instance_post(client, configs[1].0, InstanceOp::Stop).await; instance_simulate(nexus, &instances[1].identity.id).await; @@ -3772,7 +3772,7 @@ async fn test_cannot_provision_instance_beyond_ram_capacity( #[nexus_test] async fn test_instance_serial(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let instance_name = "kris-picks"; @@ -4052,7 +4052,7 @@ async fn stop_and_delete_instance( let client = &cptestctx.external_client; let instance = instance_post(&client, instance_name, InstanceOp::Stop).await; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; instance_simulate(nexus, &instance.identity.id).await; let url = format!("/v1/instances/{}?project={}", instance_name, PROJECT_NAME); @@ -4436,7 +4436,7 @@ async fn test_instance_create_in_silo(cptestctx: &ControlPlaneTestContext) { // Make sure the instance can actually start even though a collaborator // created it. - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let authn = AuthnMode::SiloUser(user_id); let instance_url = get_instance_url(instance_name); @@ -4533,7 +4533,7 @@ async fn test_instance_v2p_mappings(cptestctx: &ControlPlaneTestContext) { // Validate that every sled (except the instance's sled) now has a V2P // mapping for this instance - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let datastore = nexus.datastore(); let opctx = diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index cb5eade735f..38cfd258448 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -791,7 +791,7 @@ async fn test_ip_pool_utilization_total(cptestctx: &ControlPlaneTestContext) { // allowed. It's worth doing because we want this code to correctly handle // IPv6 ranges when they are allowed again. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let log = cptestctx.logctx.log.new(o!()); let opctx = OpContext::for_tests(log, datastore.clone()); @@ -1147,7 +1147,7 @@ async fn test_ip_range_delete_with_allocated_external_ip_fails( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let ip_pools_url = "/v1/system/ip-pools"; let pool_name = "mypool"; diff --git a/nexus/tests/integration_tests/metrics.rs b/nexus/tests/integration_tests/metrics.rs index ec44c3747a3..abcc7f1c75d 100644 --- a/nexus/tests/integration_tests/metrics.rs +++ b/nexus/tests/integration_tests/metrics.rs @@ -179,7 +179,7 @@ async fn test_metrics( // Wait until Nexus registers as a producer with Oximeter. wait_for_producer( &cptestctx.oximeter, - cptestctx.server.apictx().nexus.id(), + cptestctx.server.server_context().nexus.id(), ) .await; @@ -259,7 +259,7 @@ async fn test_timeseries_schema_list( // Nexus registers itself as a metric producer on startup, with its own UUID // as the producer ID. Wait for this to show up in the registered lists of // producers. - let nexus_id = cptestctx.server.apictx().nexus.id(); + let nexus_id = cptestctx.server.server_context().nexus.id(); wait_for_producer(&cptestctx.oximeter, nexus_id).await; // We should be able to fetch the list of timeseries, and it should include @@ -328,7 +328,8 @@ async fn test_instance_watcher_metrics( let client = &cptestctx.external_client; let internal_client = &cptestctx.internal_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; + let oximeter = &cptestctx.oximeter; // TODO(eliza): consider factoring this out to a generic // `activate_background_task` function in `nexus-test-utils` eventually? @@ -399,6 +400,8 @@ async fn test_instance_watcher_metrics( ) .await .unwrap(); + // Make sure that the latest metrics have been collected. + oximeter.force_collect().await; }; #[track_caller] @@ -443,11 +446,8 @@ async fn test_instance_watcher_metrics( let project = create_project_and_pool(&client).await; let project_name = project.identity.name.as_str(); // Wait until Nexus registers as a producer with Oximeter. - wait_for_producer( - &cptestctx.oximeter, - cptestctx.server.apictx().nexus.id(), - ) - .await; + wait_for_producer(&oximeter, cptestctx.server.server_context().nexus.id()) + .await; eprintln!("--- creating instance 1 ---"); let instance1 = create_instance(&client, project_name, "i-1").await; diff --git a/nexus/tests/integration_tests/pantry.rs b/nexus/tests/integration_tests/pantry.rs index 1a3908affa1..c5d98709ac2 100644 --- a/nexus/tests/integration_tests/pantry.rs +++ b/nexus/tests/integration_tests/pantry.rs @@ -393,7 +393,7 @@ async fn test_cannot_mount_import_ready_disk( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; DiskTest::new(&cptestctx).await; create_project_and_pool(client).await; @@ -424,7 +424,7 @@ async fn test_cannot_mount_import_from_bulk_writes_disk( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; DiskTest::new(&cptestctx).await; create_project_and_pool(client).await; @@ -448,7 +448,7 @@ async fn test_import_blocks_with_bulk_write( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; DiskTest::new(&cptestctx).await; create_project_and_pool(client).await; @@ -489,7 +489,7 @@ async fn test_import_blocks_with_bulk_write_with_snapshot( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; DiskTest::new(&cptestctx).await; create_project_and_pool(client).await; @@ -732,7 +732,7 @@ async fn test_cannot_bulk_write_start_attached_disk( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; DiskTest::new(&cptestctx).await; create_project_and_pool(client).await; @@ -762,7 +762,7 @@ async fn test_cannot_bulk_write_attached_disk( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; DiskTest::new(&cptestctx).await; create_project_and_pool(client).await; @@ -792,7 +792,7 @@ async fn test_cannot_bulk_write_stop_attached_disk( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; DiskTest::new(&cptestctx).await; create_project_and_pool(client).await; @@ -821,7 +821,7 @@ async fn test_cannot_finalize_attached_disk( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; DiskTest::new(&cptestctx).await; create_project_and_pool(client).await; diff --git a/nexus/tests/integration_tests/rack.rs b/nexus/tests/integration_tests/rack.rs index a6c218cea84..c72c59b6f73 100644 --- a/nexus/tests/integration_tests/rack.rs +++ b/nexus/tests/integration_tests/rack.rs @@ -5,6 +5,10 @@ use dropshot::ResultsPage; use http::Method; use http::StatusCode; +use nexus_client::types::SledId; +use nexus_db_model::SledBaseboard; +use nexus_db_model::SledSystemHardware; +use nexus_db_model::SledUpdate; use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; @@ -17,7 +21,7 @@ use nexus_types::internal_api::params::SledAgentInfo; use nexus_types::internal_api::params::SledRole; use omicron_common::api::external::ByteCount; use omicron_common::api::external::Generation; -use omicron_nexus::TestInterfaces; +use omicron_uuid_kinds::GenericUuid; use uuid::Uuid; type ControlPlaneTestContext = @@ -35,14 +39,17 @@ async fn test_list_own_rack(cptestctx: &ControlPlaneTestContext) { .all_items; assert_eq!(1, racks.len()); - assert_eq!(cptestctx.server.apictx().nexus.rack_id(), racks[0].identity.id); + assert_eq!( + cptestctx.server.server_context().nexus.rack_id(), + racks[0].identity.id + ); } #[nexus_test] async fn test_get_own_rack(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let expected_id = cptestctx.server.apictx().nexus.rack_id(); + let expected_id = cptestctx.server.server_context().nexus.rack_id(); let rack_url = format!("/v1/system/hardware/racks/{}", expected_id); let rack = NexusRequest::object_get(client, &rack_url) .authn_as(AuthnMode::PrivilegedUser) @@ -167,7 +174,7 @@ async fn test_sled_add(cptestctx: &ControlPlaneTestContext) { // Add one of these sleds. let add_url = "/v1/system/hardware/sleds/"; let baseboard = uninitialized_sleds.pop().unwrap().baseboard; - NexusRequest::objects_post( + let sled_id = NexusRequest::objects_post( external_client, add_url, ¶ms::UninitializedSledId { @@ -176,11 +183,53 @@ async fn test_sled_add(cptestctx: &ControlPlaneTestContext) { }, ) .authn_as(AuthnMode::PrivilegedUser) - .execute() + .execute_and_parse_unwrap::() .await - .expect("failed to add sled"); + .id; + + // Attempting to add the same sled again should succeed with the same sled + // ID: this operation should be idempotent up until the point at which the + // sled is inserted in the db. + let repeat_sled_id = NexusRequest::objects_post( + external_client, + add_url, + ¶ms::UninitializedSledId { + serial: baseboard.serial.clone(), + part: baseboard.part.clone(), + }, + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute_and_parse_unwrap::() + .await + .id; + assert_eq!(sled_id, repeat_sled_id); + + // Now upsert the sled. + let nexus = &cptestctx.server.server_context().nexus; + nexus + .datastore() + .sled_upsert(SledUpdate::new( + sled_id.into_untyped_uuid(), + "[::1]:0".parse().unwrap(), + SledBaseboard { + serial_number: baseboard.serial.clone(), + part_number: baseboard.part.clone(), + revision: 0, + }, + SledSystemHardware { + is_scrimlet: false, + usable_hardware_threads: 8, + usable_physical_ram: (1 << 30).try_into().unwrap(), + reservoir_size: (1 << 20).try_into().unwrap(), + }, + nexus.rack_id(), + Generation::new().into(), + )) + .await + .expect("inserted sled"); - // Attempting to add the same sled again should fail. + // The sled has been commissioned as part of the rack, so adding it should + // fail. let error: dropshot::HttpErrorResponseBody = NexusRequest::expect_failure_with_body( external_client, diff --git a/nexus/tests/integration_tests/saml.rs b/nexus/tests/integration_tests/saml.rs index b1b0429c2e2..80816f2ea29 100644 --- a/nexus/tests/integration_tests/saml.rs +++ b/nexus/tests/integration_tests/saml.rs @@ -91,7 +91,7 @@ async fn test_create_a_saml_idp(cptestctx: &ControlPlaneTestContext) { .await; // Assert external authenticator opctx can read it - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let (.., _retrieved_silo_nexus) = nexus .silo_lookup( &nexus.opctx_external_authn(), @@ -1167,7 +1167,7 @@ async fn test_post_saml_response(cptestctx: &ControlPlaneTestContext) { ) .await; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; nexus.set_samael_max_issue_delay( chrono::Utc::now() - "2022-05-04T15:36:12.631Z" @@ -1298,7 +1298,7 @@ async fn test_post_saml_response_with_relay_state( ) .await; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; nexus.set_samael_max_issue_delay( chrono::Utc::now() - "2022-05-04T15:36:12.631Z" diff --git a/nexus/tests/integration_tests/silo_users.rs b/nexus/tests/integration_tests/silo_users.rs index 099a186a2c2..598d2a28a46 100644 --- a/nexus/tests/integration_tests/silo_users.rs +++ b/nexus/tests/integration_tests/silo_users.rs @@ -26,10 +26,10 @@ type ControlPlaneTestContext = #[nexus_test] async fn test_silo_group_users(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let opctx = OpContext::for_tests( cptestctx.logctx.log.new(o!()), - cptestctx.server.apictx().nexus.datastore().clone(), + cptestctx.server.server_context().nexus.datastore().clone(), ); // we start out with the two default users diff --git a/nexus/tests/integration_tests/silos.rs b/nexus/tests/integration_tests/silos.rs index 6dfddb12e1c..e95b2870ca6 100644 --- a/nexus/tests/integration_tests/silos.rs +++ b/nexus/tests/integration_tests/silos.rs @@ -55,7 +55,7 @@ type ControlPlaneTestContext = #[nexus_test] async fn test_silos(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; // Verify that we cannot create a name with the same name as the recovery // Silo that was created during rack initialization. @@ -277,7 +277,7 @@ async fn test_silos(cptestctx: &ControlPlaneTestContext) { #[nexus_test] async fn test_silo_admin_group(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let silo: Silo = object_create( client, @@ -523,7 +523,7 @@ async fn test_deleting_a_silo_deletes_the_idp( .expect("failed to make request"); // Expect that the silo is gone - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let response = IdentityProviderType::lookup( &nexus.datastore(), @@ -747,7 +747,7 @@ struct TestSiloUserProvisionTypes { #[nexus_test] async fn test_silo_user_provision_types(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let test_cases: Vec = vec![ @@ -844,7 +844,7 @@ async fn test_silo_user_fetch_by_external_id( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let silo = create_silo( &client, @@ -1026,7 +1026,7 @@ async fn test_silo_users_list(cptestctx: &ControlPlaneTestContext) { #[nexus_test] async fn test_silo_groups_jit(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let silo = create_silo( @@ -1095,7 +1095,7 @@ async fn test_silo_groups_jit(cptestctx: &ControlPlaneTestContext) { #[nexus_test] async fn test_silo_groups_fixed(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let silo = create_silo( &client, @@ -1156,7 +1156,7 @@ async fn test_silo_groups_remove_from_one_group( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let silo = create_silo( @@ -1269,7 +1269,7 @@ async fn test_silo_groups_remove_from_both_groups( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let silo = create_silo( @@ -1381,7 +1381,7 @@ async fn test_silo_groups_remove_from_both_groups( #[nexus_test] async fn test_silo_delete_clean_up_groups(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; // Create a silo let silo = create_silo( @@ -1463,7 +1463,7 @@ async fn test_silo_delete_clean_up_groups(cptestctx: &ControlPlaneTestContext) { #[nexus_test] async fn test_ensure_same_silo_group(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; // Create a silo let silo = create_silo( @@ -1525,7 +1525,7 @@ async fn test_ensure_same_silo_group(cptestctx: &ControlPlaneTestContext) { #[nexus_test] async fn test_silo_user_views(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let datastore = cptestctx.server.apictx().nexus.datastore(); + let datastore = cptestctx.server.server_context().nexus.datastore(); // Create the two Silos. let silo1 = @@ -1741,7 +1741,7 @@ async fn create_jit_user( #[nexus_test] async fn test_jit_silo_constraints(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let silo = create_silo(&client, "jit", true, shared::SiloIdentityMode::SamlJit) diff --git a/nexus/tests/integration_tests/sleds.rs b/nexus/tests/integration_tests/sleds.rs index bf1e2e4b99b..97dbb39bc67 100644 --- a/nexus/tests/integration_tests/sleds.rs +++ b/nexus/tests/integration_tests/sleds.rs @@ -106,7 +106,7 @@ async fn test_physical_disk_create_list_delete( let disks_initial = physical_disks_list(&external_client, &disks_url).await; // Inject a disk into the database, observe it in the external API - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let sled_id = Uuid::from_str(&SLED_AGENT_UUID).unwrap(); let physical_disk = DbPhysicalDisk::new( diff --git a/nexus/tests/integration_tests/snapshots.rs b/nexus/tests/integration_tests/snapshots.rs index 058c59a5017..3fb6f8f6eca 100644 --- a/nexus/tests/integration_tests/snapshots.rs +++ b/nexus/tests/integration_tests/snapshots.rs @@ -136,7 +136,7 @@ async fn test_snapshot_basic(cptestctx: &ControlPlaneTestContext) { .await; // cannot snapshot attached disk for instance in state starting - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; instance_simulate(nexus, &instance.identity.id).await; // Issue snapshot request @@ -362,7 +362,7 @@ async fn test_snapshot_stopped_instance(cptestctx: &ControlPlaneTestContext) { #[nexus_test] async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); DiskTest::new(&cptestctx).await; let project_id = create_project_and_pool(client).await; @@ -521,7 +521,7 @@ async fn test_reject_creating_disk_from_snapshot( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let project_id = create_project_and_pool(&client).await; @@ -674,7 +674,7 @@ async fn test_reject_creating_disk_from_illegal_snapshot( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let project_id = create_project_and_pool(&client).await; @@ -770,7 +770,7 @@ async fn test_reject_creating_disk_from_other_project_snapshot( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let project_id = create_project_and_pool(&client).await; @@ -1002,7 +1002,7 @@ async fn test_create_snapshot_record_idempotent( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let project_id = create_project_and_pool(&client).await; @@ -1194,7 +1194,7 @@ async fn test_create_snapshot_record_idempotent( async fn test_region_snapshot_create_idempotent( cptestctx: &ControlPlaneTestContext, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let region_snapshot = db::model::RegionSnapshot { @@ -1218,7 +1218,7 @@ async fn test_region_snapshot_create_idempotent( #[nexus_test] async fn test_multiple_deletes_not_sent(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); DiskTest::new(&cptestctx).await; let _project_id = create_project_and_pool(client).await; diff --git a/nexus/tests/integration_tests/subnet_allocation.rs b/nexus/tests/integration_tests/subnet_allocation.rs index d9d015bf267..0efc659890c 100644 --- a/nexus/tests/integration_tests/subnet_allocation.rs +++ b/nexus/tests/integration_tests/subnet_allocation.rs @@ -91,8 +91,12 @@ async fn test_subnet_allocation(cptestctx: &ControlPlaneTestContext) { // Create a new, small VPC Subnet, so we don't need to issue many requests // to test address exhaustion. - let subnet_size = - cptestctx.server.apictx().nexus.tunables().max_vpc_ipv4_subnet_prefix; + let subnet_size = cptestctx + .server + .server_context() + .nexus + .tunables() + .max_vpc_ipv4_subnet_prefix; let vpc_selector = format!("project={}&vpc=default", project_name); let subnets_url = format!("/v1/vpc-subnets?{}", vpc_selector); let subnet_name = "small"; diff --git a/nexus/tests/integration_tests/volume_management.rs b/nexus/tests/integration_tests/volume_management.rs index ecfa7cf0f1d..ae348e775d5 100644 --- a/nexus/tests/integration_tests/volume_management.rs +++ b/nexus/tests/integration_tests/volume_management.rs @@ -1351,7 +1351,7 @@ async fn test_volume_remove_read_only_parent_base( ) { // Test the removal of a volume with a read only parent. // The ROP should end up on the t_vid volume. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); @@ -1465,7 +1465,7 @@ async fn test_volume_remove_read_only_parent_no_parent( ) { // Test the removal of a read only parent from a volume // without a read only parent. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); @@ -1483,7 +1483,7 @@ async fn test_volume_remove_read_only_parent_volume_not_volume( ) { // test removal of a read only volume for a volume that is not // of a type to have a read only parent. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); @@ -1512,7 +1512,7 @@ async fn test_volume_remove_read_only_parent_bad_volume( ) { // Test the removal of a read only parent from a volume // that does not exist - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); @@ -1528,7 +1528,7 @@ async fn test_volume_remove_read_only_parent_volume_deleted( cptestctx: &ControlPlaneTestContext, ) { // Test the removal of a read_only_parent from a deleted volume. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); let block_size = 512; @@ -1558,7 +1558,7 @@ async fn test_volume_remove_read_only_parent_volume_deleted( async fn test_volume_remove_rop_saga(cptestctx: &ControlPlaneTestContext) { // Test the saga for removal of a volume with a read only parent. // We create a volume with a read only parent, then call the saga on it. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); @@ -1621,7 +1621,7 @@ async fn test_volume_remove_rop_saga_twice( // Test calling the saga for removal of a volume with a read only parent // two times, the first will remove the read_only_parent, the second will // do nothing. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); @@ -1720,7 +1720,7 @@ async fn test_volume_remove_rop_saga_volume_not_volume( ) { // Test saga removal of a read only volume for a volume that is not // of a type to have a read only parent. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let volume_id = Uuid::new_v4(); let datastore = nexus.datastore(); @@ -1759,7 +1759,7 @@ async fn test_volume_remove_rop_saga_deleted_volume( ) { // Test that a saga removal of a read_only_parent from a deleted volume // takes no action on that deleted volume. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); let block_size = 512; @@ -1823,7 +1823,7 @@ async fn test_volume_remove_rop_saga_deleted_volume( async fn test_volume_checkout(cptestctx: &ControlPlaneTestContext) { // Verify that a volume_checkout will update the generation number in the // database when the volume type is Volume with sub_volume Region. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); let block_size = 512; @@ -1874,7 +1874,7 @@ async fn test_volume_checkout_updates_nothing( ) { // Verify that a volume_checkout will do nothing for a volume that does // not contain a sub_volume with a generation field. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); let block_size = 512; @@ -1927,7 +1927,7 @@ async fn test_volume_checkout_updates_multiple_gen( // Verify that a volume_checkout will update the generation number in the // database when the volume type is Volume with multiple sub_volumes of // type Region. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); let block_size = 512; @@ -1993,7 +1993,7 @@ async fn test_volume_checkout_updates_sparse_multiple_gen( // database when the volume type is Volume with multiple sub_volumes of // type Region and also verify that a non generation sub_volume won't be a // problem - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); let block_size = 512; @@ -2054,7 +2054,7 @@ async fn test_volume_checkout_updates_sparse_mid_multiple_gen( // database when the volume type is Volume with multiple sub_volumes of // type Region and also verify that a non generation sub_volume in the // middle of the sub_volumes won't be a problem - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); let block_size = 512; @@ -2113,7 +2113,7 @@ async fn test_volume_checkout_randomize_ids_only_read_only( ) { // Verify that a volume_checkout_randomize_ids will not work for // non-read-only Regions - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); let block_size = 512; @@ -2155,7 +2155,7 @@ async fn test_volume_checkout_randomize_ids_only_read_only( /// `[ipv6]:port` targets being reused. #[nexus_test] async fn test_keep_your_targets_straight(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); // Four zpools, one dataset each @@ -2646,7 +2646,7 @@ fn volume_match_gen( async fn test_volume_hard_delete_idempotent( cptestctx: &ControlPlaneTestContext, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); diff --git a/nexus/tests/integration_tests/vpc_subnets.rs b/nexus/tests/integration_tests/vpc_subnets.rs index 76cff9ac791..0814512cf2d 100644 --- a/nexus/tests/integration_tests/vpc_subnets.rs +++ b/nexus/tests/integration_tests/vpc_subnets.rs @@ -31,7 +31,7 @@ async fn test_delete_vpc_subnet_with_interfaces_fails( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; // Create a project that we'll use for testing. diff --git a/nexus/types/Cargo.toml b/nexus/types/Cargo.toml index 372cee858ac..8f766334169 100644 --- a/nexus/types/Cargo.toml +++ b/nexus/types/Cargo.toml @@ -12,6 +12,7 @@ anyhow.workspace = true chrono.workspace = true clap.workspace = true base64.workspace = true +derive-where.workspace = true futures.workspace = true humantime.workspace = true ipnetwork.workspace = true @@ -38,3 +39,7 @@ omicron-common.workspace = true omicron-passwords.workspace = true omicron-workspace-hack.workspace = true sled-agent-client.workspace = true + +[dev-dependencies] +proptest.workspace = true +test-strategy.workspace = true diff --git a/nexus/types/proptest-regressions/deployment/tri_map.txt b/nexus/types/proptest-regressions/deployment/tri_map.txt new file mode 100644 index 00000000000..c3f4260f522 --- /dev/null +++ b/nexus/types/proptest-regressions/deployment/tri_map.txt @@ -0,0 +1,7 @@ +# Seeds for failure cases proptest has generated in the past. It is +# automatically read and these particular cases re-run before any +# novel cases are generated. +# +# It is recommended to check this file in to source control so that +# everyone who runs the test benefits from these saved cases. +cc bafcbc817cff65814a6f3233f1ef3d6c36f75c37ad35175d17d1c8484a734034 # shrinks to input = _ProptestOpsArgs { initial: {(0, '$', ""): "", (0, ' ', ""): ""}, ops: [] } diff --git a/nexus/types/src/deployment.rs b/nexus/types/src/deployment.rs index b7b5bf6aac4..a577c4978c4 100644 --- a/nexus/types/src/deployment.rs +++ b/nexus/types/src/deployment.rs @@ -42,15 +42,22 @@ use strum::IntoEnumIterator; use thiserror::Error; use uuid::Uuid; +mod network_resources; mod planning_input; +mod tri_map; mod zone_type; +pub use network_resources::AddNetworkResourceError; +pub use network_resources::OmicronZoneExternalFloatingAddr; +pub use network_resources::OmicronZoneExternalFloatingIp; +pub use network_resources::OmicronZoneExternalIp; +pub use network_resources::OmicronZoneExternalIpEntry; +pub use network_resources::OmicronZoneExternalIpKey; +pub use network_resources::OmicronZoneExternalSnatIp; +pub use network_resources::OmicronZoneNetworkResources; +pub use network_resources::OmicronZoneNic; +pub use network_resources::OmicronZoneNicEntry; pub use planning_input::DiskFilter; -pub use planning_input::OmicronZoneExternalFloatingAddr; -pub use planning_input::OmicronZoneExternalFloatingIp; -pub use planning_input::OmicronZoneExternalIp; -pub use planning_input::OmicronZoneExternalSnatIp; -pub use planning_input::OmicronZoneNic; pub use planning_input::PlanningInput; pub use planning_input::PlanningInputBuildError; pub use planning_input::PlanningInputBuilder; diff --git a/nexus/types/src/deployment/network_resources.rs b/nexus/types/src/deployment/network_resources.rs new file mode 100644 index 00000000000..15f495d87a7 --- /dev/null +++ b/nexus/types/src/deployment/network_resources.rs @@ -0,0 +1,307 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use super::tri_map::TriMap; +use super::tri_map::TriMapEntry; +use anyhow::anyhow; +use omicron_common::api::external::MacAddr; +use omicron_common::api::internal::shared::SourceNatConfig; +use omicron_uuid_kinds::ExternalIpUuid; +use omicron_uuid_kinds::OmicronZoneUuid; +use omicron_uuid_kinds::VnicUuid; +use schemars::JsonSchema; +use serde::Deserialize; +use serde::Serialize; +use std::net::IpAddr; +use std::net::SocketAddr; +use thiserror::Error; + +/// Tracker and validator for network resources allocated to Omicron-managed +/// zones. +/// +/// ## Implementation notes +/// +/// `OmicronZoneNetworkResources` consists of two 1:1:1 "trijective" maps: +/// +/// 1. Providing a unique map for Omicron zone IDs, external IP IDs, and +/// external IPs. +/// 2. Providing a unique map for Omicron zone IDs, vNIC IDs, and vNICs. +/// +/// One question that arises: should there instead be a single 1:1:1:1:1 map? +/// In other words, is there a 1:1 mapping between external IPs and vNICs as +/// well? The answer is "generally yes", but: +/// +/// - They're not stored in the database that way, and it's possible that +/// there's some divergence. +/// - We currently don't plan to get any utility out of asserting the 1:1:1:1:1 +/// map. The main planned use of this is for expunged zone garbage collection +/// -- while that benefits from trijective maps tremendously, there's no +/// additional value in asserting a unique mapping between external IPs and +/// vNICs. +/// +/// So we use two separate maps for now. But a single map is always a +/// possibility in the future, if required. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct OmicronZoneNetworkResources { + /// external IPs allocated to Omicron zones + omicron_zone_external_ips: TriMap, + + /// vNICs allocated to Omicron zones + omicron_zone_nics: TriMap, +} + +impl OmicronZoneNetworkResources { + pub fn new() -> Self { + Self { + omicron_zone_external_ips: TriMap::new(), + omicron_zone_nics: TriMap::new(), + } + } + + pub fn add_external_ip( + &mut self, + zone_id: OmicronZoneUuid, + ip: OmicronZoneExternalIp, + ) -> Result<(), AddNetworkResourceError> { + let entry = OmicronZoneExternalIpEntry { zone_id, ip }; + self.omicron_zone_external_ips.insert_no_dups(entry).map_err(|err| { + AddNetworkResourceError::DuplicateOmicronZoneExternalIp { + zone_id, + ip, + err: anyhow!(err), + } + }) + } + + pub fn add_nic( + &mut self, + zone_id: OmicronZoneUuid, + nic: OmicronZoneNic, + ) -> Result<(), AddNetworkResourceError> { + let entry = OmicronZoneNicEntry { zone_id, nic: nic.clone() }; + self.omicron_zone_nics.insert_no_dups(entry).map_err(|err| { + AddNetworkResourceError::DuplicateOmicronZoneNic { + zone_id, + nic, + err: anyhow!(err), + } + }) + } + + pub fn get_external_ip_by_zone_id( + &self, + zone_id: OmicronZoneUuid, + ) -> Option<&OmicronZoneExternalIpEntry> { + self.omicron_zone_external_ips.get1(&zone_id) + } + + pub fn get_external_ip_by_external_ip_id( + &self, + ip: ExternalIpUuid, + ) -> Option<&OmicronZoneExternalIpEntry> { + self.omicron_zone_external_ips.get2(&ip) + } + + pub fn get_external_ip_by_ip( + &self, + ip: OmicronZoneExternalIpKey, + ) -> Option<&OmicronZoneExternalIpEntry> { + self.omicron_zone_external_ips.get3(&ip) + } + + pub fn get_nic_by_zone_id( + &self, + zone_id: OmicronZoneUuid, + ) -> Option<&OmicronZoneNicEntry> { + self.omicron_zone_nics.get1(&zone_id) + } + + pub fn get_nic_by_vnic_id( + &self, + vnic_id: VnicUuid, + ) -> Option<&OmicronZoneNicEntry> { + self.omicron_zone_nics.get2(&vnic_id) + } + + pub fn get_nic_by_mac(&self, mac: MacAddr) -> Option<&OmicronZoneNicEntry> { + self.omicron_zone_nics.get3(&mac) + } +} + +/// External IP variants possible for Omicron-managed zones. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +pub enum OmicronZoneExternalIp { + Floating(OmicronZoneExternalFloatingIp), + Snat(OmicronZoneExternalSnatIp), + // We may eventually want `Ephemeral(_)` too (arguably Nexus could be + // ephemeral?), but for now we only have Floating and Snat uses. +} + +impl OmicronZoneExternalIp { + pub fn id(&self) -> ExternalIpUuid { + match self { + OmicronZoneExternalIp::Floating(ext) => ext.id, + OmicronZoneExternalIp::Snat(ext) => ext.id, + } + } + + pub fn ip(&self) -> IpAddr { + match self { + OmicronZoneExternalIp::Floating(ext) => ext.ip, + OmicronZoneExternalIp::Snat(ext) => ext.snat_cfg.ip, + } + } + + pub fn ip_key(&self) -> OmicronZoneExternalIpKey { + match self { + OmicronZoneExternalIp::Floating(ip) => { + OmicronZoneExternalIpKey::Floating(ip.ip) + } + OmicronZoneExternalIp::Snat(snat) => { + OmicronZoneExternalIpKey::Snat(snat.snat_cfg) + } + } + } +} + +/// An IP-based key suitable for uniquely identifying an +/// [`OmicronZoneExternalIp`]. +/// +/// We can't use the IP itself to uniquely identify an external IP because SNAT +/// IPs can have overlapping addresses. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub enum OmicronZoneExternalIpKey { + Floating(IpAddr), + Snat(SourceNatConfig), +} + +/// Floating external IP allocated to an Omicron-managed zone. +/// +/// This is a slimmer `nexus_db_model::ExternalIp` that only stores the fields +/// necessary for blueprint planning, and requires that the zone have a single +/// IP. +#[derive( + Debug, Clone, Copy, PartialEq, Eq, JsonSchema, Serialize, Deserialize, +)] +pub struct OmicronZoneExternalFloatingIp { + pub id: ExternalIpUuid, + pub ip: IpAddr, +} + +/// Floating external address with port allocated to an Omicron-managed zone. +#[derive( + Debug, Clone, Copy, PartialEq, Eq, JsonSchema, Serialize, Deserialize, +)] +pub struct OmicronZoneExternalFloatingAddr { + pub id: ExternalIpUuid, + pub addr: SocketAddr, +} + +impl OmicronZoneExternalFloatingAddr { + pub fn into_ip(self) -> OmicronZoneExternalFloatingIp { + OmicronZoneExternalFloatingIp { id: self.id, ip: self.addr.ip() } + } +} + +/// SNAT (outbound) external IP allocated to an Omicron-managed zone. +/// +/// This is a slimmer `nexus_db_model::ExternalIp` that only stores the fields +/// necessary for blueprint planning, and requires that the zone have a single +/// IP. +#[derive( + Debug, Clone, Copy, PartialEq, Eq, JsonSchema, Serialize, Deserialize, +)] +pub struct OmicronZoneExternalSnatIp { + pub id: ExternalIpUuid, + pub snat_cfg: SourceNatConfig, +} + +/// Network interface allocated to an Omicron-managed zone. +/// +/// This is a slimmer `nexus_db_model::ServiceNetworkInterface` that only stores +/// the fields necessary for blueprint planning. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct OmicronZoneNic { + pub id: VnicUuid, + pub mac: MacAddr, + pub ip: IpAddr, + pub slot: u8, + pub primary: bool, +} + +/// A pair of an Omicron zone ID and an external IP. +/// +/// Part of [`OmicronZoneNetworkResources`]. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct OmicronZoneExternalIpEntry { + pub zone_id: OmicronZoneUuid, + pub ip: OmicronZoneExternalIp, +} + +/// Specification for the tri-map of Omicron zone external IPs. +impl TriMapEntry for OmicronZoneExternalIpEntry { + type K1 = OmicronZoneUuid; + type K2 = ExternalIpUuid; + + // Note: cannot use IpAddr here, because SNAT IPs can overlap as long as + // their port blocks are disjoint. + type K3 = OmicronZoneExternalIpKey; + + fn key1(&self) -> Self::K1 { + self.zone_id + } + + fn key2(&self) -> Self::K2 { + self.ip.id() + } + + fn key3(&self) -> Self::K3 { + self.ip.ip_key() + } +} + +/// A pair of an Omicron zone ID and a network interface. +/// +/// Part of [`OmicronZoneNetworkResources`]. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct OmicronZoneNicEntry { + zone_id: OmicronZoneUuid, + nic: OmicronZoneNic, +} + +impl TriMapEntry for OmicronZoneNicEntry { + type K1 = OmicronZoneUuid; + type K2 = VnicUuid; + type K3 = MacAddr; + + fn key1(&self) -> Self::K1 { + self.zone_id + } + + fn key2(&self) -> Self::K2 { + self.nic.id + } + + fn key3(&self) -> Self::K3 { + self.nic.mac + } +} + +#[derive(Debug, Error)] +pub enum AddNetworkResourceError { + #[error("associating Omicron zone {zone_id} with {ip:?} failed due to duplicates")] + DuplicateOmicronZoneExternalIp { + zone_id: OmicronZoneUuid, + ip: OmicronZoneExternalIp, + #[source] + err: anyhow::Error, + }, + #[error("associating Omicron zone {zone_id} with {nic:?} failed due to duplicates")] + DuplicateOmicronZoneNic { + zone_id: OmicronZoneUuid, + nic: OmicronZoneNic, + #[source] + err: anyhow::Error, + }, +} diff --git a/nexus/types/src/deployment/planning_input.rs b/nexus/types/src/deployment/planning_input.rs index 1975cfaae0b..ccb15b858a6 100644 --- a/nexus/types/src/deployment/planning_input.rs +++ b/nexus/types/src/deployment/planning_input.rs @@ -5,34 +5,136 @@ //! Types describing inputs the Reconfigurator needs to plan and produce new //! blueprints. +use super::AddNetworkResourceError; +use super::OmicronZoneExternalIp; +use super::OmicronZoneNetworkResources; +use super::OmicronZoneNic; use crate::external_api::views::PhysicalDiskPolicy; use crate::external_api::views::PhysicalDiskState; use crate::external_api::views::SledPolicy; use crate::external_api::views::SledProvisionPolicy; use crate::external_api::views::SledState; use clap::ValueEnum; +use ipnetwork::IpNetwork; use omicron_common::address::IpRange; use omicron_common::address::Ipv6Subnet; use omicron_common::address::SLED_PREFIX; use omicron_common::api::external::Generation; -use omicron_common::api::external::MacAddr; -use omicron_common::api::internal::shared::SourceNatConfig; use omicron_common::api::internal::shared::SourceNatConfigError; use omicron_common::disk::DiskIdentity; -use omicron_uuid_kinds::ExternalIpUuid; use omicron_uuid_kinds::OmicronZoneUuid; use omicron_uuid_kinds::PhysicalDiskUuid; use omicron_uuid_kinds::SledUuid; use omicron_uuid_kinds::ZpoolUuid; -use schemars::JsonSchema; use serde::Deserialize; use serde::Serialize; use std::collections::btree_map::Entry; use std::collections::BTreeMap; -use std::net::IpAddr; -use std::net::SocketAddr; use strum::IntoEnumIterator; -use uuid::Uuid; + +/// Policy and database inputs to the Reconfigurator planner +/// +/// The primary inputs to the planner are the parent (either a parent blueprint +/// or an inventory collection) and this structure. This type holds the +/// fleet-wide policy as well as any additional information fetched from CRDB +/// that the planner needs to make decisions. +/// +/// The current policy is pretty limited. It's aimed primarily at supporting +/// the add/remove sled use case. +/// +/// The planning input has some internal invariants that code outside of this +/// module can rely on. They include: +/// +/// - Each Omicron zone has at most one external IP and at most one vNIC. +/// - A given external IP or vNIC is only associated with a single Omicron +/// zone. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PlanningInput { + /// fleet-wide policy + policy: Policy, + + /// current internal DNS version + internal_dns_version: Generation, + + /// current external DNS version + external_dns_version: Generation, + + /// per-sled policy and resources + sleds: BTreeMap, + + /// per-zone network resources + network_resources: OmicronZoneNetworkResources, +} + +impl PlanningInput { + pub fn internal_dns_version(&self) -> Generation { + self.internal_dns_version + } + + pub fn external_dns_version(&self) -> Generation { + self.external_dns_version + } + + pub fn target_nexus_zone_count(&self) -> usize { + self.policy.target_nexus_zone_count + } + + pub fn service_ip_pool_ranges(&self) -> &[IpRange] { + &self.policy.service_ip_pool_ranges + } + + pub fn all_sleds( + &self, + filter: SledFilter, + ) -> impl Iterator + '_ { + self.sleds.iter().filter_map(move |(&sled_id, details)| { + filter + .matches_policy_and_state(details.policy, details.state) + .then_some((sled_id, details)) + }) + } + + pub fn all_sled_ids( + &self, + filter: SledFilter, + ) -> impl Iterator + '_ { + self.all_sleds(filter).map(|(sled_id, _)| sled_id) + } + + pub fn all_sled_resources( + &self, + filter: SledFilter, + ) -> impl Iterator + '_ { + self.all_sleds(filter) + .map(|(sled_id, details)| (sled_id, &details.resources)) + } + + pub fn sled_policy(&self, sled_id: &SledUuid) -> Option { + self.sleds.get(sled_id).map(|details| details.policy) + } + + pub fn sled_resources(&self, sled_id: &SledUuid) -> Option<&SledResources> { + self.sleds.get(sled_id).map(|details| &details.resources) + } + + pub fn network_resources(&self) -> &OmicronZoneNetworkResources { + &self.network_resources + } + + /// Convert this `PlanningInput` back into a [`PlanningInputBuilder`] + /// + /// This is primarily useful for tests that want to mutate an existing + /// [`PlanningInput`]. + pub fn into_builder(self) -> PlanningInputBuilder { + PlanningInputBuilder { + policy: self.policy, + internal_dns_version: self.internal_dns_version, + external_dns_version: self.external_dns_version, + sleds: self.sleds, + network_resources: self.network_resources, + } + } +} /// Describes a single disk already managed by the sled. #[derive(Debug, Clone, Serialize, Deserialize)] @@ -152,85 +254,6 @@ impl SledResources { } } -/// External IP variants possible for Omicron-managed zones. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] -pub enum OmicronZoneExternalIp { - Floating(OmicronZoneExternalFloatingIp), - Snat(OmicronZoneExternalSnatIp), - // We may eventually want `Ephemeral(_)` too (arguably Nexus could be - // ephemeral?), but for now we only have Floating and Snat uses. -} - -impl OmicronZoneExternalIp { - pub fn id(&self) -> ExternalIpUuid { - match self { - OmicronZoneExternalIp::Floating(ext) => ext.id, - OmicronZoneExternalIp::Snat(ext) => ext.id, - } - } - - pub fn ip(&self) -> IpAddr { - match self { - OmicronZoneExternalIp::Floating(ext) => ext.ip, - OmicronZoneExternalIp::Snat(ext) => ext.snat_cfg.ip, - } - } -} - -/// Floating external IP allocated to an Omicron-managed zone. -/// -/// This is a slimmer `nexus_db_model::ExternalIp` that only stores the fields -/// necessary for blueprint planning, and requires that the zone have a single -/// IP. -#[derive( - Debug, Clone, Copy, PartialEq, Eq, JsonSchema, Serialize, Deserialize, -)] -pub struct OmicronZoneExternalFloatingIp { - pub id: ExternalIpUuid, - pub ip: IpAddr, -} - -/// Floating external address with port allocated to an Omicron-managed zone. -#[derive( - Debug, Clone, Copy, PartialEq, Eq, JsonSchema, Serialize, Deserialize, -)] -pub struct OmicronZoneExternalFloatingAddr { - pub id: ExternalIpUuid, - pub addr: SocketAddr, -} - -impl OmicronZoneExternalFloatingAddr { - pub fn into_ip(self) -> OmicronZoneExternalFloatingIp { - OmicronZoneExternalFloatingIp { id: self.id, ip: self.addr.ip() } - } -} - -/// SNAT (outbound) external IP allocated to an Omicron-managed zone. -/// -/// This is a slimmer `nexus_db_model::ExternalIp` that only stores the fields -/// necessary for blueprint planning, and requires that the zone have a single -/// IP. -#[derive( - Debug, Clone, Copy, PartialEq, Eq, JsonSchema, Serialize, Deserialize, -)] -pub struct OmicronZoneExternalSnatIp { - pub id: ExternalIpUuid, - pub snat_cfg: SourceNatConfig, -} - -/// Network interface allocated to an Omicron-managed zone. -/// -/// This is a slimmer `nexus_db_model::ServiceNetworkInterface` that only stores -/// the fields necessary for blueprint planning. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct OmicronZoneNic { - pub id: Uuid, - pub mac: MacAddr, - pub ip: IpAddr, - pub slot: u8, - pub primary: bool, -} - /// Filters that apply to sleds. /// /// This logic lives here rather than within the individual components making @@ -250,6 +273,14 @@ pub enum SledFilter { /// fetch "all sleds regardless of current policy or state". Commissioned, + /// All sleds that were previously part of the control plane cluster but + /// have been decommissioned. + /// + /// Any sleds matching this filter are expected to no longer be present. + /// This filter is only useful for historical or debugging purposes, such as + /// listing decommissioned sleds via `omdb`. + Decommissioned, + /// Sleds that are eligible for discretionary services. Discretionary, @@ -312,6 +343,7 @@ impl SledPolicy { provision_policy: SledProvisionPolicy::Provisionable, } => match filter { SledFilter::Commissioned => true, + SledFilter::Decommissioned => false, SledFilter::Discretionary => true, SledFilter::InService => true, SledFilter::QueryDuringInventory => true, @@ -322,6 +354,7 @@ impl SledPolicy { provision_policy: SledProvisionPolicy::NonProvisionable, } => match filter { SledFilter::Commissioned => true, + SledFilter::Decommissioned => false, SledFilter::Discretionary => false, SledFilter::InService => true, SledFilter::QueryDuringInventory => true, @@ -330,6 +363,7 @@ impl SledPolicy { }, SledPolicy::Expunged => match filter { SledFilter::Commissioned => true, + SledFilter::Decommissioned => true, SledFilter::Discretionary => false, SledFilter::InService => false, SledFilter::QueryDuringInventory => false, @@ -360,6 +394,7 @@ impl SledState { match self { SledState::Active => match filter { SledFilter::Commissioned => true, + SledFilter::Decommissioned => false, SledFilter::Discretionary => true, SledFilter::InService => true, SledFilter::QueryDuringInventory => true, @@ -368,6 +403,7 @@ impl SledState { }, SledState::Decommissioned => match filter { SledFilter::Commissioned => false, + SledFilter::Decommissioned => true, SledFilter::Discretionary => false, SledFilter::InService => false, SledFilter::QueryDuringInventory => false, @@ -413,37 +449,6 @@ pub struct Policy { pub target_nexus_zone_count: usize, } -/// Policy and database inputs to the Reconfigurator planner -/// -/// The primary inputs to the planner are the parent (either a parent blueprint -/// or an inventory collection) and this structure. This type holds the -/// fleet-wide policy as well as any additional information fetched from CRDB -/// that the planner needs to make decisions. -/// -/// -/// The current policy is pretty limited. It's aimed primarily at supporting -/// the add/remove sled use case. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct PlanningInput { - /// fleet-wide policy - policy: Policy, - - /// current internal DNS version - internal_dns_version: Generation, - - /// current external DNS version - external_dns_version: Generation, - - /// per-sled policy and resources - sleds: BTreeMap, - - /// external IPs allocated to Omicron zones - omicron_zone_external_ips: BTreeMap, - - /// vNICs allocated to Omicron zones - omicron_zone_nics: BTreeMap, -} - #[derive(Debug, Clone, Serialize, Deserialize)] pub struct SledDetails { /// current sled policy @@ -454,82 +459,14 @@ pub struct SledDetails { pub resources: SledResources, } -impl PlanningInput { - pub fn internal_dns_version(&self) -> Generation { - self.internal_dns_version - } - - pub fn external_dns_version(&self) -> Generation { - self.external_dns_version - } - - pub fn target_nexus_zone_count(&self) -> usize { - self.policy.target_nexus_zone_count - } - - pub fn service_ip_pool_ranges(&self) -> &[IpRange] { - &self.policy.service_ip_pool_ranges - } - - pub fn all_sleds( - &self, - filter: SledFilter, - ) -> impl Iterator + '_ { - self.sleds.iter().filter_map(move |(&sled_id, details)| { - filter - .matches_policy_and_state(details.policy, details.state) - .then_some((sled_id, details)) - }) - } - - pub fn all_sled_ids( - &self, - filter: SledFilter, - ) -> impl Iterator + '_ { - self.all_sleds(filter).map(|(sled_id, _)| sled_id) - } - - pub fn all_sled_resources( - &self, - filter: SledFilter, - ) -> impl Iterator + '_ { - self.all_sleds(filter) - .map(|(sled_id, details)| (sled_id, &details.resources)) - } - - pub fn sled_policy(&self, sled_id: &SledUuid) -> Option { - self.sleds.get(sled_id).map(|details| details.policy) - } - - pub fn sled_resources(&self, sled_id: &SledUuid) -> Option<&SledResources> { - self.sleds.get(sled_id).map(|details| &details.resources) - } - - // Convert this `PlanningInput` back into a [`PlanningInputBuilder`] - // - // This is primarily useful for tests that want to mutate an existing - // `PlanningInput`. - pub fn into_builder(self) -> PlanningInputBuilder { - PlanningInputBuilder { - policy: self.policy, - internal_dns_version: self.internal_dns_version, - external_dns_version: self.external_dns_version, - sleds: self.sleds, - omicron_zone_external_ips: self.omicron_zone_external_ips, - omicron_zone_nics: self.omicron_zone_nics, - } - } -} - #[derive(Debug, thiserror::Error)] pub enum PlanningInputBuildError { #[error("duplicate sled ID: {0}")] DuplicateSledId(SledUuid), - #[error("Omicron zone {zone_id} already has an external IP ({ip:?})")] - DuplicateOmicronZoneExternalIp { - zone_id: OmicronZoneUuid, - ip: OmicronZoneExternalIp, - }, + #[error("Omicron zone {zone_id} has a range of IPs ({ip:?}), only a single IP is supported")] + NotSingleIp { zone_id: OmicronZoneUuid, ip: IpNetwork }, + #[error(transparent)] + AddNetworkResource(#[from] AddNetworkResourceError), #[error("Omicron zone {0} has an ephemeral IP (unsupported)")] EphemeralIpUnsupported(OmicronZoneUuid), #[error("Omicron zone {zone_id} has a bad SNAT config")] @@ -538,8 +475,6 @@ pub enum PlanningInputBuildError { #[source] err: SourceNatConfigError, }, - #[error("Omicron zone {zone_id} already has a NIC ({nic:?})")] - DuplicateOmicronZoneNic { zone_id: OmicronZoneUuid, nic: OmicronZoneNic }, } /// Constructor for [`PlanningInput`]. @@ -549,12 +484,12 @@ pub struct PlanningInputBuilder { internal_dns_version: Generation, external_dns_version: Generation, sleds: BTreeMap, - omicron_zone_external_ips: BTreeMap, - omicron_zone_nics: BTreeMap, + network_resources: OmicronZoneNetworkResources, } impl PlanningInputBuilder { - pub const fn empty_input() -> PlanningInput { + pub fn empty_input() -> PlanningInput { + // This empty input is known to be valid. PlanningInput { policy: Policy { service_ip_pool_ranges: Vec::new(), @@ -563,8 +498,7 @@ impl PlanningInputBuilder { internal_dns_version: Generation::new(), external_dns_version: Generation::new(), sleds: BTreeMap::new(), - omicron_zone_external_ips: BTreeMap::new(), - omicron_zone_nics: BTreeMap::new(), + network_resources: OmicronZoneNetworkResources::new(), } } @@ -578,8 +512,7 @@ impl PlanningInputBuilder { internal_dns_version, external_dns_version, sleds: BTreeMap::new(), - omicron_zone_external_ips: BTreeMap::new(), - omicron_zone_nics: BTreeMap::new(), + network_resources: OmicronZoneNetworkResources::new(), } } @@ -604,18 +537,7 @@ impl PlanningInputBuilder { zone_id: OmicronZoneUuid, ip: OmicronZoneExternalIp, ) -> Result<(), PlanningInputBuildError> { - match self.omicron_zone_external_ips.entry(zone_id) { - Entry::Vacant(slot) => { - slot.insert(ip); - Ok(()) - } - Entry::Occupied(prev) => { - Err(PlanningInputBuildError::DuplicateOmicronZoneExternalIp { - zone_id, - ip: *prev.get(), - }) - } - } + Ok(self.network_resources.add_external_ip(zone_id, ip)?) } pub fn add_omicron_zone_nic( @@ -623,18 +545,7 @@ impl PlanningInputBuilder { zone_id: OmicronZoneUuid, nic: OmicronZoneNic, ) -> Result<(), PlanningInputBuildError> { - match self.omicron_zone_nics.entry(zone_id) { - Entry::Vacant(slot) => { - slot.insert(nic); - Ok(()) - } - Entry::Occupied(prev) => { - Err(PlanningInputBuildError::DuplicateOmicronZoneNic { - zone_id, - nic: prev.get().clone(), - }) - } - } + Ok(self.network_resources.add_nic(zone_id, nic)?) } pub fn policy_mut(&mut self) -> &mut Policy { @@ -663,8 +574,7 @@ impl PlanningInputBuilder { internal_dns_version: self.internal_dns_version, external_dns_version: self.external_dns_version, sleds: self.sleds, - omicron_zone_external_ips: self.omicron_zone_external_ips, - omicron_zone_nics: self.omicron_zone_nics, + network_resources: self.network_resources, } } } diff --git a/nexus/types/src/deployment/tri_map.rs b/nexus/types/src/deployment/tri_map.rs new file mode 100644 index 00000000000..52b64aec43c --- /dev/null +++ b/nexus/types/src/deployment/tri_map.rs @@ -0,0 +1,511 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use std::{ + borrow::Borrow, + collections::{hash_map, BTreeSet, HashMap}, + fmt, + hash::Hash, +}; + +use derive_where::derive_where; +use serde::{Deserialize, Serialize, Serializer}; + +/// An append-only 1:1:1 (trijective) map for three keys and a value. +/// +/// The storage mechanism is a vector of entries, with indexes into that vector +/// stored in three hashmaps. This allows for efficient lookups by any of the +/// three keys, while preventing duplicates. +/// +/// Not totally generic yet, just meant for the deployment use case. +#[derive_where(Clone, Debug, Default)] +pub(crate) struct TriMap { + entries: Vec, + // Invariant: the value (usize) in these maps are valid indexes into + // `entries`, and are a 1:1 mapping. + k1_to_entry: HashMap, + k2_to_entry: HashMap, + k3_to_entry: HashMap, +} + +// Note: Eq and PartialEq are not implemented for TriMap. Implementing them +// would need to be done with care, because TriMap is not semantically like an +// IndexMap: two maps are equivalent even if their entries are in a different +// order. + +/// The `Serialize` impl for `TriMap` serializes just the list of entries. +impl Serialize for TriMap +where + T: Serialize, +{ + fn serialize( + &self, + serializer: S, + ) -> Result { + // Serialize just the entries -- don't serialize the indexes. We'll + // rebuild the indexes on deserialization. + self.entries.serialize(serializer) + } +} + +/// The `Deserialize` impl for `TriMap` deserializes the list of entries and +/// then rebuilds the indexes, producing an error if there are any duplicates. +impl<'de, T: TriMapEntry> Deserialize<'de> for TriMap +where + T: Deserialize<'de>, +{ + fn deserialize>( + deserializer: D, + ) -> Result { + // First, deserialize the entries. + let entries = Vec::::deserialize(deserializer)?; + + // Now build a map from scratch, inserting the entries sequentially. + // This will catch issues with duplicates. + let mut map = TriMap::new(); + for entry in entries { + map.insert_no_dups(entry).map_err(serde::de::Error::custom)?; + } + + Ok(map) + } +} + +pub(crate) trait TriMapEntry: Clone + fmt::Debug { + type K1: Eq + Hash + Clone + fmt::Debug; + type K2: Eq + Hash + Clone + fmt::Debug; + type K3: Eq + Hash + Clone + fmt::Debug; + + fn key1(&self) -> Self::K1; + fn key2(&self) -> Self::K2; + fn key3(&self) -> Self::K3; +} + +impl TriMap { + pub(crate) fn new() -> Self { + Self { + entries: Vec::new(), + k1_to_entry: HashMap::new(), + k2_to_entry: HashMap::new(), + k3_to_entry: HashMap::new(), + } + } + + /// Checks general invariants of the map. + /// + /// The code below always upholds these invariants, but it's useful to have + /// an explicit check for tests. + #[cfg(test)] + fn validate(&self) -> anyhow::Result<()> { + use anyhow::{ensure, Context}; + + // Check that all the maps are of the right size. + ensure!( + self.entries.len() == self.k1_to_entry.len(), + "key1 index has {} entries, but there are {} entries", + self.k1_to_entry.len(), + self.entries.len() + ); + ensure!( + self.entries.len() == self.k2_to_entry.len(), + "key2 index has {} entries, but there are {} entries", + self.k2_to_entry.len(), + self.entries.len() + ); + ensure!( + self.entries.len() == self.k3_to_entry.len(), + "key3 index has {} entries, but there are {} entries", + self.k3_to_entry.len(), + self.entries.len() + ); + + // Check that the indexes are all correct. + for (ix, entry) in self.entries.iter().enumerate() { + let key1 = entry.key1(); + let key2 = entry.key2(); + let key3 = entry.key3(); + + let ix1 = self.k1_to_entry.get(&key1).context(format!( + "entry at index {ix} ({entry:?}) has no key1 index" + ))?; + let ix2 = self.k2_to_entry.get(&key2).context(format!( + "entry at index {ix} ({entry:?}) has no key2 index" + ))?; + let ix3 = self.k3_to_entry.get(&key3).context(format!( + "entry at index {ix} ({entry:?}) has no key3 index" + ))?; + + if *ix1 != ix || *ix2 != ix || *ix3 != ix { + return Err(anyhow::anyhow!( + "entry at index {} has mismatched indexes: key1: {}, key2: {}, key3: {}", + ix, + ix1, + ix2, + ix3 + )); + } + } + + Ok(()) + } + + /// Inserts a value into the set, returning an error if any duplicates were + /// added. + pub(crate) fn insert_no_dups( + &mut self, + value: T, + ) -> Result<(), DuplicateEntry> { + let mut dups = BTreeSet::new(); + + // Check for duplicates *before* inserting the new entry, because we + // don't want to partially insert the new entry and then have to roll + // back. + let e1 = detect_dup_or_insert( + self.k1_to_entry.entry(value.key1()), + &mut dups, + ); + let e2 = detect_dup_or_insert( + self.k2_to_entry.entry(value.key2()), + &mut dups, + ); + let e3 = detect_dup_or_insert( + self.k3_to_entry.entry(value.key3()), + &mut dups, + ); + + if !dups.is_empty() { + return Err(DuplicateEntry { + new: value, + dups: dups.iter().map(|ix| self.entries[*ix].clone()).collect(), + }); + } + + let next_index = self.entries.len(); + self.entries.push(value); + // e1, e2 and e3 are all Some because if they were None, dups would be + // non-empty, and we'd have bailed out earlier. + e1.unwrap().insert(next_index); + e2.unwrap().insert(next_index); + e3.unwrap().insert(next_index); + + Ok(()) + } + + pub(crate) fn get1(&self, key1: &Q) -> Option<&T> + where + T::K1: Borrow, + Q: Eq + Hash + ?Sized, + { + self.k1_to_entry.get(key1).map(|ix| &self.entries[*ix]) + } + + pub(crate) fn get2(&self, key2: &Q) -> Option<&T> + where + T::K2: Borrow, + Q: Eq + Hash + ?Sized, + { + self.k2_to_entry.get(key2).map(|ix| &self.entries[*ix]) + } + + pub(crate) fn get3(&self, key3: &Q) -> Option<&T> + where + T::K3: Borrow, + Q: Eq + Hash + ?Sized, + { + self.k3_to_entry.get(key3).map(|ix| &self.entries[*ix]) + } +} + +fn detect_dup_or_insert<'a, K>( + entry: hash_map::Entry<'a, K, usize>, + dups: &mut BTreeSet, +) -> Option> { + match entry { + hash_map::Entry::Vacant(slot) => Some(slot), + hash_map::Entry::Occupied(slot) => { + dups.insert(*slot.get()); + None + } + } +} + +#[derive(Debug)] +pub struct DuplicateEntry { + new: T, + dups: Vec, +} + +impl fmt::Display for DuplicateEntry { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "duplicate entry: {:?} conflicts with existing: {:?}", + self.new, self.dups + ) + } +} + +impl std::error::Error for DuplicateEntry {} + +#[cfg(test)] +mod tests { + use super::*; + use proptest::prelude::*; + use test_strategy::{proptest, Arbitrary}; + + #[derive( + Clone, Debug, Eq, PartialEq, Arbitrary, Serialize, Deserialize, + )] + struct TestEntry { + key1: u8, + key2: char, + key3: String, + value: String, + } + + impl TriMapEntry for TestEntry { + // These types are chosen to represent various kinds of keys in the + // proptest below. + // + // We use u8 since there can only be 256 values, increasing the + // likelihood of collisions in the proptest below. + type K1 = u8; + // char is chosen because the Arbitrary impl for it is biased towards + // ASCII, increasing the likelihood of collisions. + type K2 = char; + // String is a generally open-ended type that probably won't have many + // collisions. + type K3 = String; + + fn key1(&self) -> Self::K1 { + self.key1 + } + + fn key2(&self) -> Self::K2 { + self.key2 + } + + fn key3(&self) -> Self::K3 { + self.key3.clone() + } + } + + #[test] + fn test_insert_entry_no_dups() { + let mut map = TriMap::::new(); + + // Add an element. + let v1 = TestEntry { + key1: 0, + key2: 'a', + key3: "x".to_string(), + value: "v".to_string(), + }; + map.insert_no_dups(v1.clone()).unwrap(); + + // Add an exact duplicate, which should error out. + let error = map.insert_no_dups(v1.clone()).unwrap_err(); + assert_eq!(&error.new, &v1); + assert_eq!(error.dups, vec![v1.clone()]); + + // Add a duplicate against just key1, which should error out. + let v2 = TestEntry { + key1: 0, + key2: 'b', + key3: "y".to_string(), + value: "v".to_string(), + }; + let error = map.insert_no_dups(v2.clone()).unwrap_err(); + assert_eq!(&error.new, &v2); + assert_eq!(error.dups, vec![v1.clone()]); + + // Add a duplicate against just key2, which should error out. + let v3 = TestEntry { + key1: 1, + key2: 'a', + key3: "y".to_string(), + value: "v".to_string(), + }; + let error = map.insert_no_dups(v3.clone()).unwrap_err(); + assert_eq!(&error.new, &v3); + + // Add a duplicate against just key3, which should error out. + let v4 = TestEntry { + key1: 1, + key2: 'b', + key3: "x".to_string(), + value: "v".to_string(), + }; + let error = map.insert_no_dups(v4.clone()).unwrap_err(); + assert_eq!(&error.new, &v4); + + // Add an entry that doesn't have any conflicts. + let v5 = TestEntry { + key1: 1, + key2: 'b', + key3: "y".to_string(), + value: "v".to_string(), + }; + map.insert_no_dups(v5.clone()).unwrap(); + } + + /// Represents a naive version of `TriMap` that doesn't have any indexes + /// and does linear scans. + #[derive(Debug)] + struct NaiveTriMap { + entries: Vec, + } + + impl NaiveTriMap { + fn new() -> Self { + Self { entries: Vec::new() } + } + + fn insert_entry_no_dups( + &mut self, + entry: TestEntry, + ) -> Result<(), DuplicateEntry> { + let dups = self + .entries + .iter() + .filter(|e| { + e.key1 == entry.key1 + || e.key2 == entry.key2 + || e.key3 == entry.key3 + }) + .cloned() + .collect::>(); + + if !dups.is_empty() { + return Err(DuplicateEntry { new: entry, dups }); + } + + self.entries.push(entry); + Ok(()) + } + } + + #[derive(Debug, Arbitrary)] + enum Operation { + // Make inserts a bit more common to try and fill up the map. + #[weight(3)] + Insert(TestEntry), + Get1(u8), + Get2(char), + Get3(String), + } + + #[proptest] + fn proptest_serialize_roundtrip(values: Vec) { + let mut map = TriMap::::new(); + let mut first_error = None; + for value in values.clone() { + // Ignore errors from duplicates which are quite possible to occur + // here, since we're just testing serialization. But store the + // first error to ensure that deserialization returns errors. + if let Err(error) = map.insert_no_dups(value) { + if first_error.is_none() { + first_error = Some(error); + } + } + } + + let serialized = serde_json::to_string(&map).unwrap(); + let deserialized: TriMap = + serde_json::from_str(&serialized).unwrap(); + + assert_eq!(map.entries, deserialized.entries, "entries match"); + // All of the indexes should be the same too. + assert_eq!( + map.k1_to_entry, deserialized.k1_to_entry, + "k1 indexes match" + ); + assert_eq!( + map.k2_to_entry, deserialized.k2_to_entry, + "k2 indexes match" + ); + assert_eq!( + map.k3_to_entry, deserialized.k3_to_entry, + "k3 indexes match" + ); + + // Try deserializing the full list of values directly, and see that the + // error reported is the same as first_error. + // + // Here we rely on the fact that a TriMap is serialized as just a + // vector. + let serialized = serde_json::to_string(&values).unwrap(); + let res: Result, _> = + serde_json::from_str(&serialized); + match (first_error, res) { + (None, Ok(_)) => {} // No error, should be fine + (Some(first_error), Ok(_)) => { + panic!( + "expected error ({first_error}), but deserialization succeeded" + ) + } + (None, Err(error)) => { + panic!("unexpected error: {error}, deserialization should have succeeded") + } + (Some(first_error), Err(error)) => { + // first_error is the error from the map, and error is the + // deserialization error (which should always be a custom + // error, stored as a string). + let expected = first_error.to_string(); + let actual = error.to_string(); + assert_eq!(actual, expected, "error matches"); + } + } + } + + #[proptest(cases = 16)] + fn proptest_ops( + #[strategy(prop::collection::vec(any::(), 0..1024))] + ops: Vec, + ) { + let mut map = TriMap::::new(); + let mut naive_map = NaiveTriMap::new(); + + // Now perform the operations on both maps. + for op in ops { + match op { + Operation::Insert(entry) => { + let map_res = map.insert_no_dups(entry.clone()); + let naive_res = + naive_map.insert_entry_no_dups(entry.clone()); + + assert_eq!(map_res.is_ok(), naive_res.is_ok()); + if let Err(map_err) = map_res { + let naive_err = naive_res.unwrap_err(); + assert_eq!(map_err.new, naive_err.new); + assert_eq!(map_err.dups, naive_err.dups); + } + + map.validate().expect("map should be valid"); + } + Operation::Get1(key1) => { + let map_res = map.get1(&key1); + let naive_res = + naive_map.entries.iter().find(|e| e.key1 == key1); + + assert_eq!(map_res, naive_res); + } + Operation::Get2(key2) => { + let map_res = map.get2(&key2); + let naive_res = + naive_map.entries.iter().find(|e| e.key2 == key2); + + assert_eq!(map_res, naive_res); + } + Operation::Get3(key3) => { + let map_res = map.get3(&key3); + let naive_res = + naive_map.entries.iter().find(|e| e.key3 == key3); + + assert_eq!(map_res, naive_res); + } + } + } + } +} diff --git a/nexus/types/src/deployment/zone_type.rs b/nexus/types/src/deployment/zone_type.rs index 035e0667bc0..9f663015cde 100644 --- a/nexus/types/src/deployment/zone_type.rs +++ b/nexus/types/src/deployment/zone_type.rs @@ -196,7 +196,7 @@ impl BlueprintZoneType { } pub mod blueprint_zone_type { - use crate::deployment::planning_input::OmicronZoneExternalFloatingAddr; + use crate::deployment::OmicronZoneExternalFloatingAddr; use crate::deployment::OmicronZoneExternalFloatingIp; use crate::deployment::OmicronZoneExternalSnatIp; use crate::inventory::OmicronZoneDataset; diff --git a/package-manifest.toml b/package-manifest.toml index e4f19ea29df..4862dcd91fc 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -592,7 +592,7 @@ only_for_targets.image = "standard" only_for_targets.switch = "asic" [package.pumpkind-gz] -service_name = "pumpkind-gz" +service_name = "pumpkind" source.type = "prebuilt" source.repo = "pumpkind" source.commit = "3fe9c306590fb2f28f54ace7fd18b3c126323683" diff --git a/package/src/bin/omicron-package.rs b/package/src/bin/omicron-package.rs index 3b8bd249186..09fa7ab178a 100644 --- a/package/src/bin/omicron-package.rs +++ b/package/src/bin/omicron-package.rs @@ -199,6 +199,25 @@ async fn do_dot(config: &Config) -> Result<()> { Ok(()) } +async fn do_list_outputs( + config: &Config, + output_directory: &Utf8Path, + intermediate: bool, +) -> Result<()> { + for (name, package) in + config.package_config.packages_to_build(&config.target).0 + { + if !intermediate + && package.output + == (PackageOutput::Zone { intermediate_only: true }) + { + continue; + } + println!("{}", package.get_output_path(name, output_directory)); + } + Ok(()) +} + // The name reserved for the currently-in-use build target. const ACTIVE: &str = "active"; @@ -919,7 +938,7 @@ async fn main() -> Result<()> { tokio::fs::create_dir_all(&args.artifact_dir).await?; let logpath = args.artifact_dir.join("LOG"); let logfile = std::io::LineWriter::new(open_options.open(&logpath)?); - println!("Logging to: {}", std::fs::canonicalize(logpath)?.display()); + eprintln!("Logging to: {}", std::fs::canonicalize(logpath)?.display()); let drain = slog_bunyan::new(logfile).build().fuse(); let drain = slog_async::Async::new(drain).build().fuse(); @@ -981,6 +1000,10 @@ async fn main() -> Result<()> { SubCommand::Build(BuildCommand::Dot) => { do_dot(&get_config()?).await?; } + SubCommand::Build(BuildCommand::ListOutputs { intermediate }) => { + do_list_outputs(&get_config()?, &args.artifact_dir, *intermediate) + .await?; + } SubCommand::Build(BuildCommand::Package { disable_cache }) => { do_package(&get_config()?, &args.artifact_dir, *disable_cache) .await?; diff --git a/package/src/lib.rs b/package/src/lib.rs index bba1a3a0cdb..2b99cfbe07a 100644 --- a/package/src/lib.rs +++ b/package/src/lib.rs @@ -90,6 +90,11 @@ pub enum BuildCommand { }, /// Make a `dot` graph to visualize the package tree Dot, + /// List the output packages for the current target + ListOutputs { + #[clap(long)] + intermediate: bool, + }, /// Builds the packages specified in a manifest, and places them into an /// 'out' directory. Package { diff --git a/schema/crdb/allocate-subnet-decommissioned-sleds/up1.sql b/schema/crdb/allocate-subnet-decommissioned-sleds/up1.sql new file mode 100644 index 00000000000..adffd4a2cfc --- /dev/null +++ b/schema/crdb/allocate-subnet-decommissioned-sleds/up1.sql @@ -0,0 +1,2 @@ +ALTER TABLE omicron.public.sled_underlay_subnet_allocation + ALTER PRIMARY KEY USING COLUMNS (hw_baseboard_id, sled_id); diff --git a/schema/crdb/allocate-subnet-decommissioned-sleds/up2.sql b/schema/crdb/allocate-subnet-decommissioned-sleds/up2.sql new file mode 100644 index 00000000000..ba67d093f49 --- /dev/null +++ b/schema/crdb/allocate-subnet-decommissioned-sleds/up2.sql @@ -0,0 +1 @@ +DROP INDEX IF EXISTS sled_underlay_subnet_allocation_hw_baseboard_id_key CASCADE; diff --git a/schema/crdb/allocate-subnet-decommissioned-sleds/up3.sql b/schema/crdb/allocate-subnet-decommissioned-sleds/up3.sql new file mode 100644 index 00000000000..f96b3312c9d --- /dev/null +++ b/schema/crdb/allocate-subnet-decommissioned-sleds/up3.sql @@ -0,0 +1,3 @@ +CREATE UNIQUE INDEX IF NOT EXISTS commissioned_sled_uniqueness + ON omicron.public.sled (serial_number, part_number) + WHERE sled_state != 'decommissioned'; diff --git a/schema/crdb/allocate-subnet-decommissioned-sleds/up4.sql b/schema/crdb/allocate-subnet-decommissioned-sleds/up4.sql new file mode 100644 index 00000000000..9489a61c2a0 --- /dev/null +++ b/schema/crdb/allocate-subnet-decommissioned-sleds/up4.sql @@ -0,0 +1 @@ +DROP INDEX IF EXISTS serial_part_revision_unique CASCADE; diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 47022bba3f8..afd5a75132e 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -148,15 +148,18 @@ CREATE TABLE IF NOT EXISTS omicron.public.sled ( sled_state omicron.public.sled_state NOT NULL, /* Generation number owned and incremented by the sled-agent */ - sled_agent_gen INT8 NOT NULL DEFAULT 1, - - -- This constraint should be upheld, even for deleted disks - -- in the fleet. - CONSTRAINT serial_part_revision_unique UNIQUE ( - serial_number, part_number, revision - ) + sled_agent_gen INT8 NOT NULL DEFAULT 1 ); +-- Add an index that ensures a given physical sled (identified by serial and +-- part number) can only be a commissioned member of the control plane once. +-- +-- TODO Should `sled` reference `hw_baseboard_id` instead of having its own +-- serial/part columns? +CREATE UNIQUE INDEX IF NOT EXISTS commissioned_sled_uniqueness + ON omicron.public.sled (serial_number, part_number) + WHERE sled_state != 'decommissioned'; + /* Add an index which lets us look up sleds on a rack */ CREATE UNIQUE INDEX IF NOT EXISTS lookup_sled_by_rack ON omicron.public.sled ( rack_id, @@ -222,7 +225,7 @@ CREATE UNIQUE INDEX IF NOT EXISTS lookup_resource_by_sled ON omicron.public.sled CREATE TABLE IF NOT EXISTS omicron.public.sled_underlay_subnet_allocation ( -- The physical identity of the sled -- (foreign key into `hw_baseboard_id` table) - hw_baseboard_id UUID PRIMARY KEY, + hw_baseboard_id UUID, -- The rack to which a sled is being added -- (foreign key into `rack` table) @@ -240,7 +243,9 @@ CREATE TABLE IF NOT EXISTS omicron.public.sled_underlay_subnet_allocation ( -- The octet that extends a /56 rack subnet to a /64 sled subnet -- -- Always between 33 and 255 inclusive - subnet_octet INT2 NOT NULL UNIQUE CHECK (subnet_octet BETWEEN 33 AND 255) + subnet_octet INT2 NOT NULL UNIQUE CHECK (subnet_octet BETWEEN 33 AND 255), + + PRIMARY KEY (hw_baseboard_id, sled_id) ); -- Add an index which allows pagination by {rack_id, sled_id} pairs. diff --git a/sled-agent/src/dump_setup.rs b/sled-agent/src/dump_setup.rs index 4717f8b49ed..02d3d41dd7b 100644 --- a/sled-agent/src/dump_setup.rs +++ b/sled-agent/src/dump_setup.rs @@ -82,6 +82,7 @@ //! rotated log files having the same modified time to the second), the //! number is incremented by 1 until no conflict remains. +use async_trait::async_trait; use camino::Utf8PathBuf; use derive_more::{AsRef, From}; use illumos_utils::coreadm::{CoreAdm, CoreFileOption}; @@ -97,8 +98,8 @@ use slog::Logger; use std::collections::HashSet; use std::ffi::OsString; use std::path::{Path, PathBuf}; -use std::sync::{Arc, Weak}; use std::time::{Duration, SystemTime, SystemTimeError, UNIX_EPOCH}; +use tokio::sync::mpsc::Receiver; use zone::{Zone, ZoneError}; const ZFS_PROP_USED: &str = "used"; @@ -117,14 +118,14 @@ struct DebugDataset(Utf8PathBuf); #[derive(AsRef, Clone, Debug, Eq, From, Hash, Ord, PartialEq, PartialOrd)] struct CoreDataset(Utf8PathBuf); -#[derive(AsRef, Clone, From)] -struct CoreZpool { +#[derive(AsRef, Clone, Debug, From)] +pub(super) struct CoreZpool { mount_config: MountConfig, name: ZpoolName, } -#[derive(AsRef, Clone, From)] -struct DebugZpool { +#[derive(AsRef, Clone, Debug, From)] +pub(super) struct DebugZpool { mount_config: MountConfig, name: ZpoolName, } @@ -167,6 +168,16 @@ trait GetMountpoint: AsRef { } } } + +#[derive(Debug)] +enum DumpSetupCmd { + UpdateDumpdevSetup { + dump_slices: Vec, + debug_datasets: Vec, + core_datasets: Vec, + }, +} + struct DumpSetupWorker { core_dataset_names: Vec, debug_dataset_names: Vec, @@ -182,33 +193,33 @@ struct DumpSetupWorker { savecored_slices: HashSet, log: Logger, + rx: Receiver, coredumpadm_invoker: Box, zfs_invoker: Box, zone_invoker: Box, } pub struct DumpSetup { - worker: Arc>, + tx: tokio::sync::mpsc::Sender, mount_config: MountConfig, - _poller: std::thread::JoinHandle<()>, + _poller: tokio::task::JoinHandle<()>, log: Logger, } impl DumpSetup { pub fn new(log: &Logger, mount_config: MountConfig) -> Self { - let worker = Arc::new(std::sync::Mutex::new(DumpSetupWorker::new( + let (tx, rx) = tokio::sync::mpsc::channel(16); + let worker = DumpSetupWorker::new( Box::new(RealCoreDumpAdm {}), Box::new(RealZfs {}), Box::new(RealZone {}), log.new(o!("component" => "DumpSetup-worker")), - ))); - let worker_weak = Arc::downgrade(&worker); - let log_poll = log.new(o!("component" => "DumpSetup-archival")); - let _poller = std::thread::spawn(move || { - Self::poll_file_archival(worker_weak, log_poll) - }); + rx, + ); + let _poller = + tokio::spawn(async move { worker.poll_file_archival().await }); let log = log.new(o!("component" => "DumpSetup")); - Self { worker, mount_config, _poller, log } + Self { tx, mount_config, _poller, log } } pub(crate) async fn update_dumpdev_setup( @@ -268,55 +279,16 @@ impl DumpSetup { } } - let savecore_lock = self.worker.clone(); - let log_tmp = log.new(o!("component" => "DumpSetup-mutex")); - tokio::task::spawn_blocking(move || match savecore_lock.lock() { - Ok(mut guard) => { - guard.update_disk_loadout( - m2_dump_slices, - u2_debug_datasets, - m2_core_datasets, - ); - } - Err(err) => { - error!(log_tmp, "DumpSetup mutex poisoned: {err:?}"); - } - }); - } - - fn poll_file_archival( - worker: Weak>, - log: Logger, - ) { - info!(log, "DumpSetup poll loop started."); - loop { - if let Some(mutex) = worker.upgrade() { - match mutex.lock() { - Ok(mut guard) => { - guard.reevaluate_choices(); - if let Err(err) = guard.archive_files() { - error!( - log, - "Failed to archive debug/dump files: {err:?}" - ); - } - } - Err(err) => { - error!( - log, - "DumpSetup mutex poisoned in poll thread: {err:?}" - ); - break; - } - } - } else { - info!( - log, - "DumpSetup weak pointer dropped, leaving poll loop." - ); - break; - } - std::thread::sleep(ARCHIVAL_INTERVAL); + if let Err(err) = self + .tx + .send(DumpSetupCmd::UpdateDumpdevSetup { + dump_slices: m2_dump_slices, + debug_datasets: u2_debug_datasets, + core_datasets: m2_core_datasets, + }) + .await + { + error!(log, "DumpSetup channel closed: {:?}", err.0); } } } @@ -331,9 +303,10 @@ enum ZfsGetError { Parse(#[from] std::num::ParseIntError), } +#[async_trait] trait CoreDumpAdmInvoker { fn coreadm(&self, core_dir: &Utf8PathBuf) -> Result<(), ExecutionError>; - fn dumpadm( + async fn dumpadm( &self, dump_slice: &Utf8PathBuf, savecore_dir: Option<&Utf8PathBuf>, @@ -378,14 +351,16 @@ trait ZfsInvoker { ) -> Utf8PathBuf; } +#[async_trait] trait ZoneInvoker { - fn get_zones(&self) -> Result, ArchiveLogsError>; + async fn get_zones(&self) -> Result, ArchiveLogsError>; } struct RealCoreDumpAdm {} struct RealZfs {} struct RealZone {} +#[async_trait] impl CoreDumpAdmInvoker for RealCoreDumpAdm { fn coreadm(&self, core_dir: &Utf8PathBuf) -> Result<(), ExecutionError> { let mut cmd = CoreAdm::new(); @@ -414,7 +389,7 @@ impl CoreDumpAdmInvoker for RealCoreDumpAdm { // function also invokes `savecore(8)` to save it into that directory. // On success, returns Ok(Some(stdout)) if `savecore(8)` was invoked, or // Ok(None) if it wasn't. - fn dumpadm( + async fn dumpadm( &self, dump_slice: &Utf8PathBuf, savecore_dir: Option<&Utf8PathBuf>, @@ -427,7 +402,7 @@ impl CoreDumpAdmInvoker for RealCoreDumpAdm { // which is in the ramdisk pool), because dumpadm refuses to do what // we ask otherwise. let tmp_crash = "/tmp/crash"; - std::fs::create_dir_all(tmp_crash).map_err(|err| { + tokio::fs::create_dir_all(tmp_crash).await.map_err(|err| { ExecutionError::ExecutionStart { command: format!("mkdir {tmp_crash:?}"), err, @@ -457,7 +432,7 @@ impl CoreDumpAdmInvoker for RealCoreDumpAdm { if savecore_dir.is_some() { // and does the dump slice have one to save off if let Ok(true) = - illumos_utils::dumpadm::dump_flag_is_valid(dump_slice) + illumos_utils::dumpadm::dump_flag_is_valid(dump_slice).await { return illumos_utils::dumpadm::SaveCore.execute(); } @@ -490,9 +465,11 @@ impl ZfsInvoker for RealZfs { } } +#[async_trait] impl ZoneInvoker for RealZone { - fn get_zones(&self) -> Result, ArchiveLogsError> { - Ok(zone::Adm::list_blocking()? + async fn get_zones(&self) -> Result, ArchiveLogsError> { + Ok(zone::Adm::list() + .await? .into_iter() .filter(|z| z.global() || z.name().starts_with(ZONE_PREFIX)) .collect::>()) @@ -505,6 +482,7 @@ impl DumpSetupWorker { zfs_invoker: Box, zone_invoker: Box, log: Logger, + rx: Receiver, ) -> Self { Self { core_dataset_names: vec![], @@ -517,24 +495,67 @@ impl DumpSetupWorker { known_core_dirs: vec![], savecored_slices: Default::default(), log, + rx, coredumpadm_invoker, zfs_invoker, zone_invoker, } } + async fn poll_file_archival(mut self) { + info!(self.log, "DumpSetup poll loop started."); + loop { + match tokio::time::timeout(ARCHIVAL_INTERVAL, self.rx.recv()).await + { + Ok(Some(DumpSetupCmd::UpdateDumpdevSetup { + dump_slices, + debug_datasets, + core_datasets, + })) => { + self.update_disk_loadout( + dump_slices, + debug_datasets, + core_datasets, + ); + } + Ok(None) => { + warn!( + self.log, + "Control channel closed, no more dump archival!" + ); + break; + } + Err(_elapsed) => { + // no new disks, just pump cores/logs with what we've got + } + } + // regardless of whether we updated disks, + // at least every ARCHIVAL_INTERVAL, + // figure out if we should change our target volumes... + self.reevaluate_choices().await; + // and then do the actual archiving. + if let Err(err) = self.archive_files().await { + error!(self.log, "Failed to archive debug/dump files: {err:?}"); + } + } + } + fn update_disk_loadout( &mut self, dump_slices: Vec, debug_datasets: Vec, core_datasets: Vec, ) { + info!( + self.log, + "Updated view of disks"; + "core_datasets" => %core_datasets.len(), + "debug_datasets" => %debug_datasets.len(), + "dump_slices" => %dump_slices.len(), + ); self.core_dataset_names = core_datasets; self.debug_dataset_names = debug_datasets; - self.known_dump_slices = dump_slices; - - self.reevaluate_choices(); } // only allow mounted zfs datasets into 'known_*_dirs', @@ -554,7 +575,7 @@ impl DumpSetupWorker { .collect(); } - fn reevaluate_choices(&mut self) { + async fn reevaluate_choices(&mut self) { self.update_mounted_dirs(); self.known_dump_slices.sort(); @@ -609,7 +630,7 @@ impl DumpSetupWorker { self.chosen_debug_dir = None; } else { warn!(self.log, "All candidate debug/dump dirs are over usage threshold, removing older archived files"); - if let Err(err) = self.cleanup() { + if let Err(err) = self.cleanup().await { error!(self.log, "Couldn't clean up any debug/dump dirs, may hit dataset quota in {x:?}: {err:?}"); } else { self.chosen_debug_dir = None; @@ -665,7 +686,9 @@ impl DumpSetupWorker { // Let's try to see if it appears to have a kernel dump already match illumos_utils::dumpadm::dump_flag_is_valid( dump_slice.as_ref(), - ) { + ) + .await + { Ok(true) => { debug!(self.log, "Dump slice {dump_slice:?} appears to have a valid header; will attempt to savecore"); } @@ -676,7 +699,9 @@ impl DumpSetupWorker { debug!(self.log, "Dump slice {dump_slice:?} appears to be unused: {err:?}"); } } - if let Ok(saved) = self.dumpadm_and_savecore(&dump_slice) { + if let Ok(saved) = + self.dumpadm_and_savecore(&dump_slice).await + { if let Some(out) = saved { info!(self.log, "Previous dump on slice {dump_slice:?} saved, configured slice as target for new dumps. {out:?}"); } @@ -691,13 +716,16 @@ impl DumpSetupWorker { for dump_slice in &self.known_dump_slices { match illumos_utils::dumpadm::dump_flag_is_valid( dump_slice.as_ref(), - ) { + ) + .await + { Ok(false) => { // Have dumpadm write the config for crash dumps to be // on this slice, at least, until a U.2 comes along. match self .coredumpadm_invoker .dumpadm(dump_slice.as_ref(), None) + .await { Ok(_) => { info!(self.log, "Using dump device {dump_slice:?} with no savecore destination (no U.2 debug zvol yet)"); @@ -731,7 +759,7 @@ impl DumpSetupWorker { changed_slice = true; // temporarily changes the system's dump slice so savecore(8) // can update the header in the slice when it finishes... - match self.dumpadm_and_savecore(&dump_slice) { + match self.dumpadm_and_savecore(&dump_slice).await { Ok(saved) => { if let Some(stdout) = &saved { info!( @@ -759,6 +787,7 @@ impl DumpSetupWorker { if let Err(err) = self .coredumpadm_invoker .dumpadm(dump_slice.as_ref(), None) + .await { error!(self.log, "Could not restore dump slice to {dump_slice:?}: {err:?}"); } @@ -767,7 +796,7 @@ impl DumpSetupWorker { } } - fn archive_files(&self) -> std::io::Result<()> { + async fn archive_files(&self) -> tokio::io::Result<()> { if let Some(debug_dir) = &self.chosen_debug_dir { if self.known_core_dirs.is_empty() { info!(self.log, "No core dump locations yet known."); @@ -780,6 +809,7 @@ impl DumpSetupWorker { if let Err(err) = Self::copy_sync_and_remove(&entry.path(), &dest) + .await { error!( self.log, @@ -804,7 +834,7 @@ impl DumpSetupWorker { ); } - if let Err(err) = self.archive_logs() { + if let Err(err) = self.archive_logs().await { if !matches!(err, ArchiveLogsError::NoDebugDirYet) { error!( self.log, @@ -816,32 +846,32 @@ impl DumpSetupWorker { Ok(()) } - fn copy_sync_and_remove( + async fn copy_sync_and_remove( source: impl AsRef, dest: impl AsRef, - ) -> std::io::Result<()> { + ) -> tokio::io::Result<()> { let source = source.as_ref(); let dest = dest.as_ref(); - let mut dest_f = std::fs::File::create(&dest)?; - let mut src_f = std::fs::File::open(&source)?; + let mut dest_f = tokio::fs::File::create(&dest).await?; + let mut src_f = tokio::fs::File::open(&source).await?; - std::io::copy(&mut src_f, &mut dest_f)?; + tokio::io::copy(&mut src_f, &mut dest_f).await?; - dest_f.sync_all()?; + dest_f.sync_all().await?; drop(src_f); drop(dest_f); - std::fs::remove_file(source)?; + tokio::fs::remove_file(source).await?; Ok(()) } - fn archive_logs(&self) -> Result<(), ArchiveLogsError> { + async fn archive_logs(&self) -> Result<(), ArchiveLogsError> { let debug_dir = self .chosen_debug_dir .as_ref() .ok_or(ArchiveLogsError::NoDebugDirYet)?; - let oxz_zones = self.zone_invoker.get_zones()?; + let oxz_zones = self.zone_invoker.get_zones().await?; for zone in oxz_zones { let logdir = if zone.global() { PathBuf::from("/var/svc/log") @@ -849,12 +879,12 @@ impl DumpSetupWorker { zone.path().join("root/var/svc/log") }; let zone_name = zone.name(); - self.archive_logs_inner(debug_dir, logdir, zone_name)?; + self.archive_logs_inner(debug_dir, logdir, zone_name).await?; } Ok(()) } - fn archive_logs_inner( + async fn archive_logs_inner( &self, debug_dir: &DebugDataset, logdir: PathBuf, @@ -873,7 +903,7 @@ impl DumpSetupWorker { } let dest_dir = debug_dir.as_ref().join(zone_name).into_std_path_buf(); if !rotated_log_files.is_empty() { - std::fs::create_dir_all(&dest_dir)?; + tokio::fs::create_dir_all(&dest_dir).await?; let count = rotated_log_files.len(); info!( self.log, @@ -903,7 +933,7 @@ impl DumpSetupWorker { break; } } - if let Err(err) = Self::copy_sync_and_remove(&entry, dest) { + if let Err(err) = Self::copy_sync_and_remove(&entry, dest).await { warn!(self.log, "Failed to archive {entry:?}: {err:?}"); } } @@ -919,7 +949,7 @@ impl DumpSetupWorker { // for savecore to behave the way we want (i.e. clear the flag // after succeeding), we could hypothetically miss a dump if // the kernel crashes again while savecore is still running. - fn dumpadm_and_savecore( + async fn dumpadm_and_savecore( &mut self, dump_slice: &DumpSlicePath, ) -> Result, ExecutionError> { @@ -931,6 +961,7 @@ impl DumpSetupWorker { match self .coredumpadm_invoker .dumpadm(dump_slice.as_ref(), Some(&savecore_dir)) + .await { Ok(saved) => { self.savecored_slices.insert(dump_slice.clone()); @@ -940,10 +971,10 @@ impl DumpSetupWorker { } } - fn cleanup(&self) -> Result<(), CleanupError> { + async fn cleanup(&self) -> Result<(), CleanupError> { let mut dir_info = Vec::new(); for dir in &self.known_debug_dirs { - match self.scope_dir_for_cleanup(dir) { + match self.scope_dir_for_cleanup(dir).await { Ok(info) => { dir_info.push((info, dir)); } @@ -967,7 +998,7 @@ impl DumpSetupWorker { // the i/o error *may* be an issue with the underlying disk, so // we continue to the dataset with the next-oldest average age // of files-to-delete in the sorted list. - if let Err(err) = std::fs::remove_file(&path) { + if let Err(err) = tokio::fs::remove_file(&path).await { error!(self.log, "Couldn't delete {path:?} from debug dataset, skipping {dir:?}. {err:?}"); continue 'outer; } @@ -980,7 +1011,7 @@ impl DumpSetupWorker { Ok(()) } - fn scope_dir_for_cleanup( + async fn scope_dir_for_cleanup( &self, debug_dir: &DebugDataset, ) -> Result { @@ -999,7 +1030,7 @@ impl DumpSetupWorker { for path in glob::glob(debug_dir.as_ref().join("**/*").as_str())?.flatten() { - let meta = std::fs::metadata(&path)?; + let meta = tokio::fs::metadata(&path).await?; // we need this to be a Duration rather than SystemTime so we can // do math to it later. let time = meta.modified()?.duration_since(UNIX_EPOCH)?; @@ -1033,7 +1064,7 @@ impl DumpSetupWorker { #[derive(thiserror::Error, Debug)] pub enum ArchiveLogsError { #[error("I/O error: {0}")] - IoError(#[from] std::io::Error), + IoError(#[from] tokio::io::Error), #[error("Error calling zoneadm: {0}")] Zoneadm(#[from] ZoneError), #[error("Non-UTF8 zone path for zone {0}")] @@ -1053,7 +1084,7 @@ enum CleanupError { #[error("Failed to query ZFS properties: {0}")] ZfsError(#[from] ZfsGetError), #[error("I/O error: {0}")] - IoError(#[from] std::io::Error), + IoError(#[from] tokio::io::Error), #[error("Glob pattern invalid: {0}")] Glob(#[from] glob::PatternError), #[error("A file's observed modified time was before the Unix epoch: {0}")] @@ -1075,9 +1106,9 @@ mod tests { }; use sled_storage::dataset::{CRASH_DATASET, DUMP_DATASET}; use std::collections::HashMap; - use std::io::Write; use std::str::FromStr; use tempfile::TempDir; + use tokio::io::AsyncWriteExt; impl Clone for ZfsGetError { fn clone(&self) -> Self { @@ -1103,6 +1134,7 @@ mod tests { pub zones: Vec, } + #[async_trait] impl CoreDumpAdmInvoker for FakeCoreDumpAdm { fn coreadm( &self, @@ -1111,7 +1143,7 @@ mod tests { Ok(()) } - fn dumpadm( + async fn dumpadm( &self, _dump_slice: &Utf8PathBuf, _savecore_dir: Option<&Utf8PathBuf>, @@ -1168,14 +1200,15 @@ mod tests { .join(mountpoint) } } + #[async_trait] impl ZoneInvoker for FakeZone { - fn get_zones(&self) -> Result, ArchiveLogsError> { + async fn get_zones(&self) -> Result, ArchiveLogsError> { Ok(self.zones.clone()) } } - #[test] - fn test_does_not_configure_coreadm_when_no_crash_dataset_mounted() { + #[tokio::test] + async fn test_does_not_configure_coreadm_when_no_crash_dataset_mounted() { let logctx = omicron_test_utils::dev::test_setup_log( "test_does_not_configure_coreadm_when_no_crash_dataset_mounted", ); @@ -1193,10 +1226,12 @@ mod tests { }), Box::::default(), logctx.log.clone(), + tokio::sync::mpsc::channel(1).1, ); // nothing when no disks worker.update_disk_loadout(vec![], vec![], vec![]); + worker.reevaluate_choices().await; assert_eq!(worker.chosen_core_dir, None); // nothing when only a disk that's not ready @@ -1205,12 +1240,13 @@ mod tests { name: ZpoolName::from_str(NOT_MOUNTED_INTERNAL).unwrap(), }; worker.update_disk_loadout(vec![], vec![], vec![non_mounted_zpool]); + worker.reevaluate_choices().await; assert_eq!(worker.chosen_core_dir, None); logctx.cleanup_successful(); } - #[test] - fn test_configures_coreadm_only_when_crash_dataset_mounted() { + #[tokio::test] + async fn test_configures_coreadm_only_when_crash_dataset_mounted() { let logctx = omicron_test_utils::dev::test_setup_log( "test_configures_coreadm_only_when_crash_dataset_mounted", ); @@ -1266,6 +1302,7 @@ mod tests { }), Box::::default(), logctx.log.clone(), + tokio::sync::mpsc::channel(1).1, ); // something when there's one that's ready! @@ -1274,8 +1311,9 @@ mod tests { vec![], vec![non_mounted_zpool.clone(), mounted_zpool], ); + worker.reevaluate_choices().await; assert_eq!( - worker.chosen_core_dir.as_ref().unwrap().0, + worker.chosen_core_dir.as_ref().expect("core dir wasn't chosen").0, Utf8PathBuf::from(ZPOOL_MNT).join(CRASH_DATASET) ); @@ -1285,34 +1323,35 @@ mod tests { vec![], vec![non_mounted_zpool, err_zpool], ); + worker.reevaluate_choices().await; assert_eq!(worker.chosen_core_dir, None); logctx.cleanup_successful(); } // we make these so illumos_utils::dumpadm::dump_flag_is_valid returns what we want - fn populate_tempdir_with_fake_dumps( + async fn populate_tempdir_with_fake_dumps( tempdir: &TempDir, ) -> (DumpSlicePath, DumpSlicePath) { let occupied = DumpSlicePath( Utf8PathBuf::from_path_buf(tempdir.path().join("occupied.bin")) .unwrap(), ); - let mut f = std::fs::File::create(occupied.as_ref()).unwrap(); - f.write_all(&[0u8; DUMP_OFFSET as usize]).unwrap(); - f.write_all(&DUMP_MAGIC.to_le_bytes()).unwrap(); - f.write_all(&DUMP_VERSION.to_le_bytes()).unwrap(); - f.write_all(&DF_VALID.to_le_bytes()).unwrap(); + let mut f = tokio::fs::File::create(occupied.as_ref()).await.unwrap(); + f.write_all(&[0u8; DUMP_OFFSET as usize]).await.unwrap(); + f.write_all(&DUMP_MAGIC.to_le_bytes()).await.unwrap(); + f.write_all(&DUMP_VERSION.to_le_bytes()).await.unwrap(); + f.write_all(&DF_VALID.to_le_bytes()).await.unwrap(); drop(f); let vacant = DumpSlicePath( Utf8PathBuf::from_path_buf(tempdir.path().join("vacant.bin")) .unwrap(), ); - let mut f = std::fs::File::create(vacant.as_ref()).unwrap(); - f.write_all(&[0u8; DUMP_OFFSET as usize]).unwrap(); - f.write_all(&DUMP_MAGIC.to_le_bytes()).unwrap(); - f.write_all(&DUMP_VERSION.to_le_bytes()).unwrap(); - f.write_all(&0u32.to_le_bytes()).unwrap(); + let mut f = tokio::fs::File::create(vacant.as_ref()).await.unwrap(); + f.write_all(&[0u8; DUMP_OFFSET as usize]).await.unwrap(); + f.write_all(&DUMP_MAGIC.to_le_bytes()).await.unwrap(); + f.write_all(&DUMP_VERSION.to_le_bytes()).await.unwrap(); + f.write_all(&0u32.to_le_bytes()).await.unwrap(); drop(f); (occupied, vacant) @@ -1320,8 +1359,8 @@ mod tests { // if we only have two filled dump slices and nowhere to evacuate them, // don't configure a dump slice at all. - #[test] - fn test_savecore_and_dumpadm_not_called_when_occupied_and_no_dir() { + #[tokio::test] + async fn test_savecore_and_dumpadm_not_called_when_occupied_and_no_dir() { let logctx = omicron_test_utils::dev::test_setup_log( "test_savecore_and_dumpadm_not_called_when_occupied_and_no_dir", ); @@ -1330,15 +1369,17 @@ mod tests { Box::::default(), Box::::default(), logctx.log.clone(), + tokio::sync::mpsc::channel(1).1, ); let tempdir = TempDir::new().unwrap(); - let (occupied, _) = populate_tempdir_with_fake_dumps(&tempdir); + let (occupied, _) = populate_tempdir_with_fake_dumps(&tempdir).await; worker.update_disk_loadout( vec![occupied.clone(), occupied], vec![], vec![], ); + worker.reevaluate_choices().await; assert!(worker.chosen_dump_slice.is_none()); logctx.cleanup_successful(); } @@ -1346,8 +1387,8 @@ mod tests { // if we have one dump slice that's free and one that's full, // and nowhere to savecore the full one, // we should always call dumpadm with the free one. - #[test] - fn test_dumpadm_called_when_vacant_slice_but_no_dir() { + #[tokio::test] + async fn test_dumpadm_called_when_vacant_slice_but_no_dir() { let logctx = omicron_test_utils::dev::test_setup_log( "test_dumpadm_called_when_vacant_slice_but_no_dir", ); @@ -1356,14 +1397,17 @@ mod tests { Box::::default(), Box::::default(), logctx.log.clone(), + tokio::sync::mpsc::channel(1).1, ); let tempdir = TempDir::new().unwrap(); - let (occupied, vacant) = populate_tempdir_with_fake_dumps(&tempdir); + let (occupied, vacant) = + populate_tempdir_with_fake_dumps(&tempdir).await; worker.update_disk_loadout( vec![occupied, vacant.clone()], vec![], vec![], ); + worker.reevaluate_choices().await; assert_eq!(worker.chosen_dump_slice.as_ref(), Some(&vacant)); logctx.cleanup_successful(); } @@ -1371,8 +1415,8 @@ mod tests { // if we have two occupied dump slices, // but we also have somewhere to unload them, // call dumpadm and savecore. - #[test] - fn test_savecore_and_dumpadm_invoked_when_slices_occupied_and_dir_is_available( + #[tokio::test] + async fn test_savecore_and_dumpadm_invoked_when_slices_occupied_and_dir_is_available( ) { let logctx = omicron_test_utils::dev::test_setup_log("test_savecore_and_dumpadm_invoked_when_slices_occupied_and_dir_is_available"); const MOUNTED_EXTERNAL: &str = @@ -1395,9 +1439,10 @@ mod tests { }), Box::::default(), logctx.log.clone(), + tokio::sync::mpsc::channel(1).1, ); let tempdir = TempDir::new().unwrap(); - let (occupied, _) = populate_tempdir_with_fake_dumps(&tempdir); + let (occupied, _) = populate_tempdir_with_fake_dumps(&tempdir).await; let mounted_zpool = DebugZpool { mount_config: MountConfig::default(), @@ -1408,6 +1453,8 @@ mod tests { vec![mounted_zpool], vec![], ); + worker.reevaluate_choices().await; + assert_eq!(worker.chosen_dump_slice.as_ref(), Some(&occupied)); assert_eq!( worker.chosen_debug_dir.unwrap().0, @@ -1416,8 +1463,8 @@ mod tests { logctx.cleanup_successful(); } - #[test] - fn test_archives_rotated_logs_and_cores() { + #[tokio::test] + async fn test_archives_rotated_logs_and_cores() { let logctx = omicron_test_utils::dev::test_setup_log( "test_archives_rotated_logs_and_cores", ); @@ -1465,24 +1512,27 @@ mod tests { }), Box::new(FakeZone { zones: vec![zone.clone()] }), logctx.log.clone(), + tokio::sync::mpsc::channel(1).1, ); - std::fs::create_dir_all(&core_dir).unwrap(); - std::fs::create_dir_all(&debug_dir).unwrap(); - std::fs::create_dir_all(&zone_logs).unwrap(); + tokio::fs::create_dir_all(&core_dir).await.unwrap(); + tokio::fs::create_dir_all(&debug_dir).await.unwrap(); + tokio::fs::create_dir_all(&zone_logs).await.unwrap(); const LOG_NAME: &'static str = "foo.log.0"; - writeln!( - std::fs::File::create(zone_logs.join(LOG_NAME)).unwrap(), - "hello" - ) - .unwrap(); + tokio::fs::File::create(zone_logs.join(LOG_NAME)) + .await + .expect("creating fake log") + .write_all(b"hello") + .await + .expect("writing fake log"); const CORE_NAME: &str = "core.myzone.myexe.123.1690540950"; - writeln!( - std::fs::File::create(core_dir.join(CORE_NAME)).unwrap(), - "crunch" - ) - .unwrap(); + tokio::fs::File::create(core_dir.join(CORE_NAME)) + .await + .expect("creating fake core") + .write_all(b"crunch") + .await + .expect("writing fake core"); let mounted_core_zpool = CoreZpool { mount_config: MountConfig::default(), @@ -1498,7 +1548,8 @@ mod tests { vec![mounted_debug_zpool], vec![mounted_core_zpool], ); - worker.archive_files().unwrap(); + worker.reevaluate_choices().await; + worker.archive_files().await.unwrap(); // it'll be renamed to use an epoch timestamp instead of .0 let log_glob = diff --git a/test-utils/src/dev/test_cmds.rs b/test-utils/src/dev/test_cmds.rs index 51ade208f85..3c675ddfd99 100644 --- a/test-utils/src/dev/test_cmds.rs +++ b/test-utils/src/dev/test_cmds.rs @@ -160,10 +160,27 @@ pub fn redact_variable(input: &str) -> String { .replace_all(&s, "") .to_string(); - let s = regex::Regex::new(r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z") - .unwrap() - .replace_all(&s, "") - .to_string(); + let s = { + let mut new_s = String::with_capacity(s.len()); + let mut last_match = 0; + for m in regex::Regex::new(r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+Z") + .unwrap() + .find_iter(&s) + { + new_s.push_str(&s[last_match..m.start()]); + new_s.push_str("", so this subtraction can't + // underflow. Insert spaces to match widths. + for _ in 0..(m.len() - "".len()) { + new_s.push(' '); + } + new_s.push_str("TIMESTAMP>"); + last_match = m.end(); + } + new_s.push_str(&s[last_match..]); + new_s + }; // Replace formatted durations. These are pretty specific to the background // task output. diff --git a/tools/build-host-image.sh b/tools/build-host-image.sh deleted file mode 100755 index e90d800849e..00000000000 --- a/tools/build-host-image.sh +++ /dev/null @@ -1,111 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit -set -o pipefail -set -o xtrace - -function usage -{ - echo "usage: $0 [-fRB] HELIOS_PATH PACKAGES_TARBALL" - echo - echo " -f Force helios build despite git hash mismatch" - echo " -R Build recovery (trampoline) image" - echo " -B Build standard image" - exit 1 -} - -function main -{ - while getopts ":hfRBS:" opt; do - case $opt in - f) - FORCE=1 - ;; - R) - BUILD_RECOVERY=1 - HELIOS_BUILD_EXTRA_ARGS=-R - IMAGE_PREFIX=recovery - ;; - B) - BUILD_STANDARD=1 - HELIOS_BUILD_EXTRA_ARGS=-B - IMAGE_PREFIX=ci - ;; - S) - SWITCH_ZONE=$OPTARG - ;; - h | \?) - usage - ;; - esac - done - shift $((OPTIND-1)) - - # Ensure we got either -R or -B but not both - case "x$BUILD_RECOVERY$BUILD_STANDARD" in - x11) - echo "specify at most one of -R, -B" - exit 1 - ;; - x) - echo "must specify either -R or -B" - exit 1 - ;; - *) ;; - esac - - if [ "$#" != "2" ]; then - usage - fi - HELIOS_PATH=$1 - GLOBAL_ZONE_TARBALL_PATH=$2 - - TOOLS_DIR="$(pwd)/$(dirname "$0")" - - # Grab the opte version - OPTE_VER=$(cat "$TOOLS_DIR/opte_version") - - # Assemble global zone files in a temporary directory. - if ! tmp_gz=$(mktemp -d); then - exit 1 - fi - trap 'cd /; rm -rf "$tmp_gz"' EXIT - - # Extract the global zone tarball into a tmp_gz directory - echo "Extracting gz packages into $tmp_gz" - ptime -m tar xvzf "$GLOBAL_ZONE_TARBALL_PATH" -C "$tmp_gz" - - # If the user specified a switch zone (which is probably named - # `switch-SOME_VARIANT.tar.gz`), stage it in the right place and rename it - # to just `switch.tar.gz`. - if [ "x$SWITCH_ZONE" != "x" ]; then - mkdir -p "$tmp_gz/root/opt/oxide" - cp "$SWITCH_ZONE" "$tmp_gz/root/opt/oxide/switch.tar.gz" - fi - - if [ "x$BUILD_STANDARD" != "x" ]; then - mkdir -p "$tmp_gz/root/root" - echo "# Add opteadm, ddmadm, oxlog to PATH" >> "$tmp_gz/root/root/.profile" - echo 'export PATH=$PATH:/opt/oxide/opte/bin:/opt/oxide/mg-ddm:/opt/oxide/oxlog' >> "$tmp_gz/root/root/.profile" - fi - - # Move to the helios checkout - cd "$HELIOS_PATH" - - HELIOS_REPO=https://pkg.oxide.computer/helios/2/dev/ - - # Build an image name that includes the omicron and host OS hashes - IMAGE_NAME="$IMAGE_PREFIX ${GITHUB_SHA:0:7}" - # The ${os_short_commit} token will be expanded by `helios-build` - IMAGE_NAME+='/${os_short_commit}' - IMAGE_NAME+=" $(date +'%Y-%m-%d %H:%M')" - - ./helios-build experiment-image \ - -p helios-dev="$HELIOS_REPO" \ - -F optever="$OPTE_VER" \ - -P "$tmp_gz/root" \ - -N "$IMAGE_NAME" \ - $HELIOS_BUILD_EXTRA_ARGS -} - -main "$@" diff --git a/tools/hubris_checksums b/tools/hubris_checksums deleted file mode 100644 index 913cc460c48..00000000000 --- a/tools/hubris_checksums +++ /dev/null @@ -1,8 +0,0 @@ -4d38415a186fb1058c991d0e5ed44711457526e32687ff48ab6d6feadd8b4aa4 build-gimlet-c-image-default-v1.0.13.zip -ead1988cfebb4f79c364a2207f0bda741b8dd0e4f02fb34b4d341c648ecaa733 build-gimlet-d-image-default-v1.0.13.zip -85f5fc9c206c5fc61b4c2380b94a337220e944d67c0cb6bb2cb2486f8d5bc193 build-gimlet-e-image-default-v1.0.13.zip -ac7d898369e94e33b3556a405352b24a1ee107ce877d416811d9e9fae1f1a1ec build-gimlet-f-image-default-v1.0.13.zip -8cf812dc4aacc013335eb932d2bfaf8a542dec7bc29ea671d9a4235c12d61564 build-psc-b-image-default-v1.0.13.zip -85622677eef52c6d210f44e82b2b6cdc5a8357e509744abe1693883b7635b38c build-psc-c-image-default-v1.0.13.zip -87d6cd4add1aabe53756ba8f66a461cd3aa08f1a0093f94ea81a35a6a175ed21 build-sidecar-b-image-default-v1.0.13.zip -d50d6f77da6fc736843b5418359532f18b7ffa090c2a3d68b5dc1d35281385f5 build-sidecar-c-image-default-v1.0.13.zip diff --git a/tools/hubris_version b/tools/hubris_version deleted file mode 100644 index 717d36cec2f..00000000000 --- a/tools/hubris_version +++ /dev/null @@ -1 +0,0 @@ -TAGS=(gimlet-v1.0.13 psc-v1.0.13 sidecar-v1.0.13) diff --git a/tools/permslip_commit b/tools/permslip_commit deleted file mode 100644 index 58140df7da9..00000000000 --- a/tools/permslip_commit +++ /dev/null @@ -1 +0,0 @@ -COMMIT=5d44e0065f90051a28881c75e3574142ada9b695 diff --git a/tools/permslip_staging b/tools/permslip_staging index 9ddec55a210..683cfc0ec90 100644 --- a/tools/permslip_staging +++ b/tools/permslip_staging @@ -1,4 +1,4 @@ -2138737caec2c771692389c7cc6f45110aefc7d86c49ea872d5090549d6e59c7 manifest-gimlet-v1.0.17.toml +a90127c6098a99100413f0f0d4eb97c96f825cff11a92c41b8b1c2927977b17a manifest-gimlet-v1.0.18.toml e34b2f363ed0e1399e175bfae9e5e50217255c7984154697180d8a2d4611f65d manifest-oxide-rot-1-v1.0.10.toml b56e35fae0f4ed9e84e4e4d40f6cc576ceb52e4fba400b83841eb47d35cbbf8b manifest-psc-v1.0.16.toml 9bd043382ad5c7cdb8f00a66e401a6c4b88e8d588915f304d2c261ea7df4d1b5 manifest-sidecar-v1.0.16.toml diff --git a/tufaceous-lib/src/assemble/manifest.rs b/tufaceous-lib/src/assemble/manifest.rs index 8825327c1dc..1c4a676f4c1 100644 --- a/tufaceous-lib/src/assemble/manifest.rs +++ b/tufaceous-lib/src/assemble/manifest.rs @@ -524,6 +524,8 @@ impl DeserializedFileArtifactSource { pub enum DeserializedControlPlaneZoneSource { File { path: Utf8PathBuf, + #[serde(skip_serializing_if = "Option::is_none")] + file_name: Option, }, Fake { name: String, @@ -542,12 +544,15 @@ impl DeserializedControlPlaneZoneSource { F: FnOnce(&str, CompositeEntry<'_>) -> Result, { let (name, data, mtime_source) = match self { - DeserializedControlPlaneZoneSource::File { path } => { + DeserializedControlPlaneZoneSource::File { path, file_name } => { let data = std::fs::read(path) .with_context(|| format!("failed to read {path}"))?; - let name = path.file_name().with_context(|| { - format!("zone path missing file name: {path}") - })?; + let name = file_name + .as_deref() + .or_else(|| path.file_name()) + .with_context(|| { + format!("zone path missing file name: {path}") + })?; // For now, always use the current time as the source. (Maybe // change this to use the mtime on disk in the future?) (name, data, MtimeSource::Now) diff --git a/uuid-kinds/src/lib.rs b/uuid-kinds/src/lib.rs index 489e0da3659..2fc08972a69 100644 --- a/uuid-kinds/src/lib.rs +++ b/uuid-kinds/src/lib.rs @@ -62,5 +62,6 @@ impl_typed_uuid_kind! { Upstairs => "upstairs", UpstairsRepair => "upstairs_repair", UpstairsSession => "upstairs_session", + Vnic => "vnic", Zpool => "zpool", } diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 625832637a1..9bbbc28e3f1 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -46,6 +46,7 @@ either = { version = "1.11.0" } elliptic-curve = { version = "0.13.8", features = ["ecdh", "hazmat", "pem", "std"] } ff = { version = "0.13.0", default-features = false, features = ["alloc"] } flate2 = { version = "1.0.30" } +fs-err = { version = "2.11.0", default-features = false, features = ["tokio"] } futures = { version = "0.3.30" } futures-channel = { version = "0.3.30", features = ["sink"] } futures-core = { version = "0.3.30" } @@ -79,13 +80,10 @@ num-traits = { version = "0.2.19", features = ["i128", "libm"] } openapiv3 = { version = "2.0.0", default-features = false, features = ["skip_serializing_defaults"] } peg-runtime = { version = "0.8.3", default-features = false, features = ["std"] } pem-rfc7468 = { version = "0.7.0", default-features = false, features = ["std"] } -petgraph = { version = "0.6.4", features = ["serde-1"] } +petgraph = { version = "0.6.5", features = ["serde-1"] } postgres-types = { version = "0.2.6", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } -ppv-lite86 = { version = "0.2.17", default-features = false, features = ["simd", "std"] } predicates = { version = "3.1.0" } -proc-macro2 = { version = "1.0.81" } -rand = { version = "0.8.5" } -rand_chacha = { version = "0.3.1", default-features = false, features = ["std"] } +proc-macro2 = { version = "1.0.82" } regex = { version = "1.10.4" } regex-automata = { version = "0.4.5", default-features = false, features = ["dfa", "hybrid", "meta", "nfa", "perf", "unicode"] } regex-syntax = { version = "0.8.2" } @@ -93,9 +91,9 @@ reqwest = { version = "0.11.27", features = ["blocking", "cookies", "json", "rus ring = { version = "0.17.8", features = ["std"] } schemars = { version = "0.8.17", features = ["bytes", "chrono", "uuid1"] } scopeguard = { version = "1.2.0" } -semver = { version = "1.0.22", features = ["serde"] } +semver = { version = "1.0.23", features = ["serde"] } serde = { version = "1.0.201", features = ["alloc", "derive", "rc"] } -serde_json = { version = "1.0.116", features = ["raw_value", "unbounded_depth"] } +serde_json = { version = "1.0.117", features = ["raw_value", "unbounded_depth"] } sha2 = { version = "0.10.8", features = ["oid"] } similar = { version = "2.4.0", features = ["inline", "unicode"] } slog = { version = "2.7.0", features = ["dynamic-keys", "max_level_trace", "release_max_level_debug", "release_max_level_trace"] } @@ -121,7 +119,6 @@ uuid = { version = "1.8.0", features = ["serde", "v4"] } yasna = { version = "0.5.2", features = ["bit-vec", "num-bigint", "std", "time"] } zerocopy = { version = "0.7.32", features = ["derive", "simd"] } zeroize = { version = "1.7.0", features = ["std", "zeroize_derive"] } -zip = { version = "0.6.6", default-features = false, features = ["bzip2", "deflate"] } [build-dependencies] ahash = { version = "0.8.8" } @@ -153,6 +150,7 @@ either = { version = "1.11.0" } elliptic-curve = { version = "0.13.8", features = ["ecdh", "hazmat", "pem", "std"] } ff = { version = "0.13.0", default-features = false, features = ["alloc"] } flate2 = { version = "1.0.30" } +fs-err = { version = "2.11.0", default-features = false, features = ["tokio"] } futures = { version = "0.3.30" } futures-channel = { version = "0.3.30", features = ["sink"] } futures-core = { version = "0.3.30" } @@ -186,13 +184,10 @@ num-traits = { version = "0.2.19", features = ["i128", "libm"] } openapiv3 = { version = "2.0.0", default-features = false, features = ["skip_serializing_defaults"] } peg-runtime = { version = "0.8.3", default-features = false, features = ["std"] } pem-rfc7468 = { version = "0.7.0", default-features = false, features = ["std"] } -petgraph = { version = "0.6.4", features = ["serde-1"] } +petgraph = { version = "0.6.5", features = ["serde-1"] } postgres-types = { version = "0.2.6", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } -ppv-lite86 = { version = "0.2.17", default-features = false, features = ["simd", "std"] } predicates = { version = "3.1.0" } -proc-macro2 = { version = "1.0.81" } -rand = { version = "0.8.5" } -rand_chacha = { version = "0.3.1", default-features = false, features = ["std"] } +proc-macro2 = { version = "1.0.82" } regex = { version = "1.10.4" } regex-automata = { version = "0.4.5", default-features = false, features = ["dfa", "hybrid", "meta", "nfa", "perf", "unicode"] } regex-syntax = { version = "0.8.2" } @@ -200,9 +195,9 @@ reqwest = { version = "0.11.27", features = ["blocking", "cookies", "json", "rus ring = { version = "0.17.8", features = ["std"] } schemars = { version = "0.8.17", features = ["bytes", "chrono", "uuid1"] } scopeguard = { version = "1.2.0" } -semver = { version = "1.0.22", features = ["serde"] } +semver = { version = "1.0.23", features = ["serde"] } serde = { version = "1.0.201", features = ["alloc", "derive", "rc"] } -serde_json = { version = "1.0.116", features = ["raw_value", "unbounded_depth"] } +serde_json = { version = "1.0.117", features = ["raw_value", "unbounded_depth"] } sha2 = { version = "0.10.8", features = ["oid"] } similar = { version = "2.4.0", features = ["inline", "unicode"] } slog = { version = "2.7.0", features = ["dynamic-keys", "max_level_trace", "release_max_level_debug", "release_max_level_trace"] } @@ -229,7 +224,6 @@ uuid = { version = "1.8.0", features = ["serde", "v4"] } yasna = { version = "0.5.2", features = ["bit-vec", "num-bigint", "std", "time"] } zerocopy = { version = "0.7.32", features = ["derive", "simd"] } zeroize = { version = "1.7.0", features = ["std", "zeroize_derive"] } -zip = { version = "0.6.6", default-features = false, features = ["bzip2", "deflate"] } [target.x86_64-unknown-linux-gnu.dependencies] bitflags-f595c2ba2a3f28df = { package = "bitflags", version = "2.4.2", default-features = false, features = ["std"] }