diff --git a/.cargo/config b/.cargo/config.toml similarity index 100% rename from .cargo/config rename to .cargo/config.toml diff --git a/.config/nextest.toml b/.config/nextest.toml index 32fe543655..95d4c20102 100644 --- a/.config/nextest.toml +++ b/.config/nextest.toml @@ -3,7 +3,7 @@ # # The required version should be bumped up if we need new features, performance # improvements or bugfixes that are present in newer versions of nextest. -nextest-version = { required = "0.9.64", recommended = "0.9.67" } +nextest-version = { required = "0.9.64", recommended = "0.9.70" } experimental = ["setup-scripts"] diff --git a/.github/buildomat/build-and-test.sh b/.github/buildomat/build-and-test.sh index 7d0aadcb56..cc344522db 100755 --- a/.github/buildomat/build-and-test.sh +++ b/.github/buildomat/build-and-test.sh @@ -9,7 +9,7 @@ target_os=$1 # NOTE: This version should be in sync with the recommended version in # .config/nextest.toml. (Maybe build an automated way to pull the recommended # version in the future.) -NEXTEST_VERSION='0.9.67' +NEXTEST_VERSION='0.9.70' cargo --version rustc --version @@ -76,19 +76,6 @@ export RUSTC_BOOTSTRAP=1 # We report build progress to stderr, and the "--timings=json" output goes to stdout. ptime -m cargo build -Z unstable-options --timings=json --workspace --tests --locked --verbose 1> "$OUTPUT_DIR/crate-build-timings.json" -# If we are running on illumos we want to verify that we are not requiring -# system libraries outside of specific binaries. If we encounter this situation -# we bail. -# NB: `cargo xtask verify-libraries` runs `cargo build --bins` to ensure it can -# check the final executables. -if [[ $target_os == "illumos" ]]; then - banner verify-libraries - # This has a separate timeout from `cargo nextest` since `timeout` expects - # to run an external command and therefore we cannot run bash functions or - # subshells. - ptime -m timeout 10m cargo xtask verify-libraries -fi - # # We apply our own timeout to ensure that we get a normal failure on timeout # rather than a buildomat timeout. See oxidecomputer/buildomat#8. diff --git a/.github/buildomat/jobs/a4x2-deploy.sh b/.github/buildomat/jobs/a4x2-deploy.sh index dfc9191611..c8eb998b35 100755 --- a/.github/buildomat/jobs/a4x2-deploy.sh +++ b/.github/buildomat/jobs/a4x2-deploy.sh @@ -2,8 +2,7 @@ #: #: name = "a4x2-deploy" #: variety = "basic" -#: target = "lab-2.0-opte-0.27" -#: rust_toolchain = "stable" +#: target = "lab-2.0-opte-0.29" #: output_rules = [ #: "/out/falcon/*.log", #: "/out/falcon/*.err", @@ -13,7 +12,7 @@ #: "%/out/dhcp-server.log", #: ] #: skip_clone = true -#: enable = true +#: enable = false #: #: [dependencies.a4x2] #: job = "a4x2-prepare" diff --git a/.github/buildomat/jobs/a4x2-prepare.sh b/.github/buildomat/jobs/a4x2-prepare.sh index 79fa037139..1438ec06de 100755 --- a/.github/buildomat/jobs/a4x2-prepare.sh +++ b/.github/buildomat/jobs/a4x2-prepare.sh @@ -3,7 +3,7 @@ #: name = "a4x2-prepare" #: variety = "basic" #: target = "helios-2.0" -#: rust_toolchain = "stable" +#: rust_toolchain = "1.78.0" #: output_rules = [ #: "=/out/cargo-bay-ce.tgz", #: "=/out/cargo-bay-cr1.tgz", @@ -20,7 +20,7 @@ #: access_repos = [ #: "oxidecomputer/testbed", #: ] -#: enable = true +#: enable = false source ./env.sh diff --git a/.github/buildomat/jobs/build-and-test-helios.sh b/.github/buildomat/jobs/build-and-test-helios.sh index cfcbb61475..b63d2e783f 100755 --- a/.github/buildomat/jobs/build-and-test-helios.sh +++ b/.github/buildomat/jobs/build-and-test-helios.sh @@ -3,7 +3,7 @@ #: name = "build-and-test (helios)" #: variety = "basic" #: target = "helios-2.0" -#: rust_toolchain = "1.72.1" +#: rust_toolchain = "1.78.0" #: output_rules = [ #: "%/work/*", #: "%/var/tmp/omicron_tmp/*", diff --git a/.github/buildomat/jobs/build-and-test-linux.sh b/.github/buildomat/jobs/build-and-test-linux.sh index 22332ce65c..4a1f86c3e1 100755 --- a/.github/buildomat/jobs/build-and-test-linux.sh +++ b/.github/buildomat/jobs/build-and-test-linux.sh @@ -3,7 +3,7 @@ #: name = "build-and-test (ubuntu-22.04)" #: variety = "basic" #: target = "ubuntu-22.04" -#: rust_toolchain = "1.72.1" +#: rust_toolchain = "1.78.0" #: output_rules = [ #: "%/work/*", #: "%/var/tmp/omicron_tmp/*", diff --git a/.github/buildomat/jobs/ci-tools.sh b/.github/buildomat/jobs/ci-tools.sh deleted file mode 100755 index ce17d4fb30..0000000000 --- a/.github/buildomat/jobs/ci-tools.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/bin/bash -#: -#: name = "helios / CI tools" -#: variety = "basic" -#: target = "helios-2.0" -#: rust_toolchain = "1.72.1" -#: output_rules = [ -#: "=/work/end-to-end-tests/*.gz", -#: "=/work/caboose-util.gz", -#: "=/work/tufaceous.gz", -#: "=/work/commtest", -#: ] - -set -o errexit -set -o pipefail -set -o xtrace - -cargo --version -rustc --version - -ptime -m ./tools/install_builder_prerequisites.sh -yp - -########## end-to-end-tests ########## - -banner end-to-end-tests - -# -# Reduce debuginfo just to line tables. -# -export CARGO_PROFILE_DEV_DEBUG=1 -export CARGO_PROFILE_TEST_DEBUG=1 -export CARGO_INCREMENTAL=0 - -ptime -m cargo build --locked -p end-to-end-tests --tests --bin bootstrap \ - --message-format json-render-diagnostics >/tmp/output.end-to-end.json - -mkdir -p /work -ptime -m cargo build --locked -p end-to-end-tests --tests --bin commtest -cp target/debug/commtest /work/commtest - -mkdir -p /work/end-to-end-tests -for p in target/debug/bootstrap $(/opt/ooce/bin/jq -r 'select(.profile.test) | .executable' /tmp/output.end-to-end.json); do - # shellcheck disable=SC2094 - ptime -m gzip < "$p" > /work/end-to-end-tests/"$(basename "$p").gz" -done - -########## caboose-util ########## - -banner caboose-util - -ptime -m cargo build --locked -p caboose-util --release -ptime -m gzip < target/release/caboose-util > /work/caboose-util.gz - -########## tufaceous ########## - -banner tufaceous - -ptime -m cargo build --locked -p tufaceous --release -ptime -m gzip < target/release/tufaceous > /work/tufaceous.gz diff --git a/.github/buildomat/jobs/clippy.sh b/.github/buildomat/jobs/clippy.sh index abbcda2150..1f4c578e47 100755 --- a/.github/buildomat/jobs/clippy.sh +++ b/.github/buildomat/jobs/clippy.sh @@ -3,7 +3,7 @@ #: name = "clippy (helios)" #: variety = "basic" #: target = "helios-2.0" -#: rust_toolchain = "1.72.1" +#: rust_toolchain = "1.78.0" #: output_rules = [] # Run clippy on illumos (not just other systems) because a bunch of our code diff --git a/.github/buildomat/jobs/deploy.sh b/.github/buildomat/jobs/deploy.sh index 6574ac839c..31733f0dc0 100755 --- a/.github/buildomat/jobs/deploy.sh +++ b/.github/buildomat/jobs/deploy.sh @@ -2,7 +2,7 @@ #: #: name = "helios / deploy" #: variety = "basic" -#: target = "lab-2.0-opte-0.28" +#: target = "lab-2.0-opte-0.29" #: output_rules = [ #: "%/var/svc/log/oxide-sled-agent:default.log*", #: "%/zone/oxz_*/root/var/svc/log/oxide-*.log*", @@ -20,8 +20,6 @@ #: [dependencies.package] #: job = "helios / package" #: -#: [dependencies.ci-tools] -#: job = "helios / CI tools" set -o errexit set -o pipefail @@ -144,13 +142,6 @@ pfexec chown build:build /opt/oxide/work cd /opt/oxide/work ptime -m tar xvzf /input/package/work/package.tar.gz -cp /input/package/work/zones/* out/ -mv out/nexus-single-sled.tar.gz out/nexus.tar.gz -mkdir tests -for p in /input/ci-tools/work/end-to-end-tests/*.gz; do - ptime -m gunzip < "$p" > "tests/$(basename "${p%.gz}")" - chmod a+x "tests/$(basename "${p%.gz}")" -done # Ask buildomat for the range of extra addresses that we're allowed to use, and # break them up into the ranges we need. @@ -201,11 +192,16 @@ routeadm -e ipv4-forwarding -u PXA_START="$EXTRA_IP_START" PXA_END="$EXTRA_IP_END" -# These variables are used by softnpu_init, so export them. -export GATEWAY_IP GATEWAY_MAC PXA_START PXA_END - pfexec zpool create -f scratch c1t1d0 c2t1d0 -ZPOOL_VDEV_DIR=/scratch ptime -m pfexec ./tools/create_virtual_hardware.sh + +ptime -m \ + pfexec ./target/release/xtask virtual-hardware \ + --vdev-dir /scratch \ + create \ + --gateway-ip "$GATEWAY_IP" \ + --gateway-mac "$GATEWAY_MAC" \ + --pxa-start "$PXA_START" \ + --pxa-end "$PXA_END" # # Generate a self-signed certificate to use as the initial TLS certificate for @@ -214,7 +210,12 @@ ZPOOL_VDEV_DIR=/scratch ptime -m pfexec ./tools/create_virtual_hardware.sh # real system, the certificate would come from the customer during initial rack # setup on the technician port. # -tar xf out/omicron-sled-agent.tar pkg/config-rss.toml +tar xf out/omicron-sled-agent.tar pkg/config-rss.toml pkg/config.toml + +# Update the vdevs to point to where we've created them +sed -E -i~ "s/(m2|u2)(.*\.vdev)/\/scratch\/\1\2/g" pkg/config.toml +diff -u pkg/config.toml{~,} || true + SILO_NAME="$(sed -n 's/silo_name = "\(.*\)"/\1/p' pkg/config-rss.toml)" EXTERNAL_DNS_DOMAIN="$(sed -n 's/external_dns_zone_name = "\(.*\)"/\1/p' pkg/config-rss.toml)" @@ -241,8 +242,8 @@ addresses = \\[\"$UPLINK_IP/24\"\\] " pkg/config-rss.toml diff -u pkg/config-rss.toml{~,} || true -tar rvf out/omicron-sled-agent.tar pkg/config-rss.toml -rm -f pkg/config-rss.toml* +tar rvf out/omicron-sled-agent.tar pkg/config-rss.toml pkg/config.toml +rm -f pkg/config-rss.toml* pkg/config.toml* # # By default, OpenSSL creates self-signed certificates with "CA:true". The TLS @@ -344,7 +345,7 @@ echo "Waited for nexus: ${retry}s" export RUST_BACKTRACE=1 export E2E_TLS_CERT IPPOOL_START IPPOOL_END -eval "$(./tests/bootstrap)" +eval "$(./target/debug/bootstrap)" export OXIDE_HOST OXIDE_TOKEN # @@ -377,7 +378,6 @@ done /usr/oxide/oxide --resolve "$OXIDE_RESOLVE" --cacert "$E2E_TLS_CERT" \ image promote --project images --image debian11 -rm ./tests/bootstrap for test_bin in tests/*; do ./"$test_bin" done diff --git a/.github/buildomat/jobs/host-image.sh b/.github/buildomat/jobs/host-image.sh deleted file mode 100755 index 2f4d146a48..0000000000 --- a/.github/buildomat/jobs/host-image.sh +++ /dev/null @@ -1,93 +0,0 @@ -#!/bin/bash -#: -#: name = "helios / build OS images" -#: variety = "basic" -#: target = "helios-2.0" -#: rust_toolchain = "1.72.1" -#: output_rules = [ -#: "=/work/helios/upload/os-host.tar.gz", -#: "=/work/helios/upload/os-trampoline.tar.gz", -#: ] -#: access_repos = [ -#: "oxidecomputer/amd-apcb", -#: "oxidecomputer/amd-efs", -#: "oxidecomputer/amd-firmware", -#: "oxidecomputer/amd-flash", -#: "oxidecomputer/amd-host-image-builder", -#: "oxidecomputer/boot-image-tools", -#: "oxidecomputer/chelsio-t6-roms", -#: "oxidecomputer/compliance-pilot", -#: "oxidecomputer/facade", -#: "oxidecomputer/helios", -#: "oxidecomputer/helios-omicron-brand", -#: "oxidecomputer/helios-omnios-build", -#: "oxidecomputer/helios-omnios-extra", -#: "oxidecomputer/nanobl-rs", -#: ] -#: -#: [dependencies.package] -#: job = "helios / package" -#: -#: [[publish]] -#: series = "image" -#: name = "os.tar.gz" -#: from_output = "/work/helios/image/output/os.tar.gz" -#: - -set -o errexit -set -o pipefail -set -o xtrace - -cargo --version -rustc --version - -TOP=$PWD - -source "$TOP/tools/include/force-git-over-https.sh" - -# Check out helios into /work/helios -HELIOSDIR=/work/helios -git clone https://github.com/oxidecomputer/helios.git "$HELIOSDIR" -cd "$HELIOSDIR" -# Record the branch and commit in the output -git status --branch --porcelain=2 -# Setting BUILD_OS to no makes setup skip repositories we don't need for -# building the OS itself (we are just building an image from already built OS). -BUILD_OS=no gmake setup - -# Commands that "helios-build" would ask us to run (either explicitly or -# implicitly, to avoid an error). -rc=0 -pfexec pkg install -q /system/zones/brand/omicron1/tools || rc=$? -case $rc in - # `man pkg` notes that exit code 4 means no changes were made because - # there is nothing to do; that's fine. Any other exit code is an error. - 0 | 4) ;; - *) exit $rc ;; -esac - -pfexec zfs create -p "rpool/images/$USER" - - -# TODO: Consider importing zones here too? - -cd "$TOP" -OUTPUTDIR="$HELIOSDIR/upload" -mkdir "$OUTPUTDIR" - -banner OS -./tools/build-host-image.sh -B \ - -S /input/package/work/zones/switch-asic.tar.gz \ - "$HELIOSDIR" \ - /input/package/work/global-zone-packages.tar.gz - -mv "$HELIOSDIR/image/output/os.tar.gz" "$OUTPUTDIR/os-host.tar.gz" - -banner Trampoline - -./tools/build-host-image.sh -R \ - "$HELIOSDIR" \ - /input/package/work/trampoline-global-zone-packages.tar.gz - -mv "$HELIOSDIR/image/output/os.tar.gz" "$OUTPUTDIR/os-trampoline.tar.gz" - diff --git a/.github/buildomat/jobs/omicron-common.sh b/.github/buildomat/jobs/omicron-common.sh new file mode 100755 index 0000000000..345d99f405 --- /dev/null +++ b/.github/buildomat/jobs/omicron-common.sh @@ -0,0 +1,25 @@ +#!/bin/bash +#: +#: name = "omicron-common (helios)" +#: variety = "basic" +#: target = "helios-2.0" +#: rust_toolchain = "1.78.0" +#: output_rules = [] + +# Verify that omicron-common builds successfully when used as a dependency +# in an external project. It must not leak anything that requires an external +# dependency (apart from OpenSSL/pkg-config). + +set -o errexit +set -o pipefail +set -o xtrace + +cargo --version +rustc --version + +cd /tmp +cargo new --lib test-project +cd test-project +cargo add omicron-common --path /work/oxidecomputer/omicron/common +cargo check +cargo build --release diff --git a/.github/buildomat/jobs/package.sh b/.github/buildomat/jobs/package.sh index d290976d9f..81ed41a961 100755 --- a/.github/buildomat/jobs/package.sh +++ b/.github/buildomat/jobs/package.sh @@ -3,24 +3,11 @@ #: name = "helios / package" #: variety = "basic" #: target = "helios-2.0" -#: rust_toolchain = "1.72.1" +#: rust_toolchain = "1.78.0" #: output_rules = [ -#: "=/work/version.txt", #: "=/work/package.tar.gz", -#: "=/work/global-zone-packages.tar.gz", -#: "=/work/trampoline-global-zone-packages.tar.gz", -#: "=/work/zones/*.tar.gz", #: ] #: -#: [[publish]] -#: series = "image" -#: name = "global-zone-packages" -#: from_output = "/work/global-zone-packages.tar.gz" -#: -#: [[publish]] -#: series = "image" -#: name = "trampoline-global-zone-packages" -#: from_output = "/work/trampoline-global-zone-packages.tar.gz" set -o errexit set -o pipefail @@ -29,16 +16,8 @@ set -o xtrace cargo --version rustc --version -# -# Generate the version for control plane artifacts here. We use `0.git` as the -# prerelease field because it comes before `alpha`. -# -# In this job, we stamp the version into packages installed in the host and -# trampoline global zone images. -# -COMMIT=$(git rev-parse HEAD) -VERSION="7.0.0-0.ci+git${COMMIT:0:11}" -echo "$VERSION" >/work/version.txt +WORK=/work +pfexec mkdir -p $WORK && pfexec chown $USER $WORK ptime -m ./tools/install_builder_prerequisites.sh -yp ptime -m ./tools/ci_download_softnpu_machinery @@ -49,95 +28,33 @@ ptime -m cargo run --locked --release --bin omicron-package -- \ -t test target create -i standard -m non-gimlet -s softnpu -r single-sled ptime -m cargo run --locked --release --bin omicron-package -- \ -t test package +mapfile -t packages \ + < <(cargo run --locked --release --bin omicron-package -- -t test list-outputs) + +# Build the xtask binary used by the deploy job +ptime -m cargo build --locked --release -p xtask + +# Build the end-to-end tests +# Reduce debuginfo just to line tables. +export CARGO_PROFILE_DEV_DEBUG=line-tables-only +export CARGO_PROFILE_TEST_DEBUG=line-tables-only +ptime -m cargo build --locked -p end-to-end-tests --tests --bin bootstrap \ + --message-format json-render-diagnostics >/tmp/output.end-to-end.json +mkdir tests +/opt/ooce/bin/jq -r 'select(.profile.test) | .executable' /tmp/output.end-to-end.json \ + | xargs -I {} -t cp {} tests/ -# Assemble some utilities into a tarball that can be used by deployment -# phases of buildomat. +# Assemble these outputs and some utilities into a tarball that can be used by +# deployment phases of buildomat. files=( - out/*.tar out/target/test out/npuzone/* package-manifest.toml smf/sled-agent/non-gimlet/config.toml target/release/omicron-package - tools/create_virtual_hardware.sh - tools/virtual_hardware.sh - tools/scrimlet/* + target/release/xtask + target/debug/bootstrap + tests/* ) - -pfexec mkdir -p /work && pfexec chown $USER /work -ptime -m tar cvzf /work/package.tar.gz "${files[@]}" - -tarball_src_dir="$(pwd)/out/versioned" -stamp_packages() { - for package in "$@"; do - # TODO: remove once https://github.com/oxidecomputer/omicron-package/pull/54 lands - if [[ $package == mg-ddm-gz ]]; then - echo "0.0.0" > VERSION - tar rvf "out/$package.tar" VERSION - rm VERSION - fi - - cargo run --locked --release --bin omicron-package -- stamp "$package" "$VERSION" - done -} - -# Keep the single-sled Nexus zone around for the deploy job. (The global zone -# build below overwrites the file.) -mv out/nexus.tar.gz out/nexus-single-sled.tar.gz - -# Build necessary for the global zone -ptime -m cargo run --locked --release --bin omicron-package -- \ - -t host target create -i standard -m gimlet -s asic -r multi-sled -ptime -m cargo run --locked --release --bin omicron-package -- \ - -t host package -stamp_packages omicron-sled-agent mg-ddm-gz propolis-server overlay oxlog - -# Create global zone package @ /work/global-zone-packages.tar.gz -ptime -m ./tools/build-global-zone-packages.sh "$tarball_src_dir" /work - -# Non-Global Zones - -# Assemble Zone Images into their respective output locations. -# -# Zones that are included into another are intentionally omitted from this list -# (e.g., the switch zone tarballs contain several other zone tarballs: dendrite, -# mg-ddm, etc.). -# -# Note that when building for a real gimlet, `propolis-server` and `switch-*` -# should be included in the OS ramdisk. -mkdir -p /work/zones -zones=( - out/clickhouse.tar.gz - out/clickhouse_keeper.tar.gz - out/cockroachdb.tar.gz - out/crucible-pantry-zone.tar.gz - out/crucible-zone.tar.gz - out/external-dns.tar.gz - out/internal-dns.tar.gz - out/nexus.tar.gz - out/nexus-single-sled.tar.gz - out/oximeter.tar.gz - out/propolis-server.tar.gz - out/switch-*.tar.gz - out/ntp.tar.gz - out/omicron-gateway-softnpu.tar.gz - out/omicron-gateway-asic.tar.gz - out/overlay.tar.gz - out/probe.tar.gz -) -cp "${zones[@]}" /work/zones/ - -# -# Global Zone files for Trampoline image -# - -# Build necessary for the trampoline image -ptime -m cargo run --locked --release --bin omicron-package -- \ - -t recovery target create -i trampoline -ptime -m cargo run --locked --release --bin omicron-package -- \ - -t recovery package -stamp_packages installinator mg-ddm-gz - -# Create trampoline global zone package @ /work/trampoline-global-zone-packages.tar.gz -ptime -m ./tools/build-trampoline-global-zone-packages.sh "$tarball_src_dir" /work +ptime -m tar cvzf $WORK/package.tar.gz "${files[@]}" "${packages[@]}" diff --git a/.github/buildomat/jobs/tuf-repo.sh b/.github/buildomat/jobs/tuf-repo.sh index 56cb41b51b..5b2d1bd405 100755 --- a/.github/buildomat/jobs/tuf-repo.sh +++ b/.github/buildomat/jobs/tuf-repo.sh @@ -3,210 +3,66 @@ #: name = "helios / build TUF repo" #: variety = "basic" #: target = "helios-2.0" +#: rust_toolchain = "1.78.0" #: output_rules = [ -#: "=/work/manifest*.toml", -#: "=/work/repo-*.zip.part*", -#: "=/work/repo-*.zip.sha256.txt", +#: "=/work/manifest.toml", +#: "=/work/repo.zip", +#: "=/work/repo.zip.sha256.txt", +#: "%/work/*.log", #: ] #: access_repos = [ -#: "oxidecomputer/dvt-dock", +#: "oxidecomputer/amd-apcb", +#: "oxidecomputer/amd-efs", +#: "oxidecomputer/amd-firmware", +#: "oxidecomputer/amd-flash", +#: "oxidecomputer/amd-host-image-builder", +#: "oxidecomputer/boot-image-tools", +#: "oxidecomputer/chelsio-t6-roms", +#: "oxidecomputer/compliance-pilot", +#: "oxidecomputer/facade", +#: "oxidecomputer/helios", +#: "oxidecomputer/helios-omicron-brand", +#: "oxidecomputer/helios-omnios-build", +#: "oxidecomputer/helios-omnios-extra", +#: "oxidecomputer/nanobl-rs", #: ] #: -#: [dependencies.ci-tools] -#: job = "helios / CI tools" -#: -#: [dependencies.package] -#: job = "helios / package" -#: -#: [dependencies.host] -#: job = "helios / build OS images" -#: #: [[publish]] #: series = "rot-all" -#: name = "repo.zip.parta" -#: from_output = "/work/repo-rot-all.zip.parta" +#: name = "manifest.toml" +#: from_output = "/work/manifest.toml" #: #: [[publish]] #: series = "rot-all" -#: name = "repo.zip.partb" -#: from_output = "/work/repo-rot-all.zip.partb" +#: name = "repo.zip" +#: from_output = "/work/repo.zip" #: #: [[publish]] #: series = "rot-all" #: name = "repo.zip.sha256.txt" -#: from_output = "/work/repo-rot-all.zip.sha256.txt" +#: from_output = "/work/repo.zip.sha256.txt" #: set -o errexit set -o pipefail set -o xtrace -ALL_BOARDS=(gimlet-{c..f} psc-{b..c} sidecar-{b..c}) - -TOP=$PWD -VERSION=$(< /input/package/work/version.txt) - -for bin in caboose-util tufaceous; do - ptime -m gunzip < /input/ci-tools/work/$bin.gz > /work/$bin - chmod a+x /work/$bin -done - -# -# We do two things here: -# 1. Run `omicron-package stamp` on all the zones. -# 2. Run `omicron-package unpack` to switch from "package-name.tar.gz" to "service_name.tar.gz". -# -mkdir /work/package -pushd /work/package -tar xf /input/package/work/package.tar.gz out package-manifest.toml target/release/omicron-package -target/release/omicron-package -t default target create -i standard -m gimlet -s asic -r multi-sled -ln -s /input/package/work/zones/* out/ -rm out/switch-softnpu.tar.gz # not used when target switch=asic -rm out/omicron-gateway-softnpu.tar.gz # not used when target switch=asic -rm out/nexus-single-sled.tar.gz # only used for deploy tests -for zone in out/*.tar.gz; do - target/release/omicron-package stamp "$(basename "${zone%.tar.gz}")" "$VERSION" -done -mv out/versioned/* out/ -OMICRON_NO_UNINSTALL=1 target/release/omicron-package unpack --out install -popd - -# Generate a throwaway repository key. -python3 -c 'import secrets; open("/work/key.txt", "w").write("ed25519:%s\n" % secrets.token_hex(32))' -read -r TUFACEOUS_KEY /work/manifest.toml <>/work/manifest.toml <>/work/manifest.toml <&2 echo "\`caboose-util $1\` mismatch:" - >&2 echo " $2: $output_a" - >&2 echo " $3: $output_b" - exit 1 - fi - echo "$output_a" -} - -# Add the SP images. -for board_rev in "${ALL_BOARDS[@]}"; do - board=${board_rev%-?} - tufaceous_board=${board//sidecar/switch} - sp_image="/work/hubris/${board_rev}.zip" - sp_caboose_version=$(/work/caboose-util read-version "$sp_image") - sp_caboose_board=$(/work/caboose-util read-board "$sp_image") - - cat >>/work/manifest.toml <>/work/manifest.toml < /work/repo-rot-all.zip.sha256.txt +pfexec zfs create -p "rpool/images/$USER/host" +pfexec zfs create -p "rpool/images/$USER/recovery" -# -# XXX: There are some issues downloading Buildomat artifacts > 1 GiB, see -# oxidecomputer/buildomat#36. -# -split -a 1 -b 1024m /work/repo-rot-all.zip /work/repo-rot-all.zip.part -rm /work/repo-rot-all.zip -# Ensure the build doesn't fail if the repo gets smaller than 1 GiB. -touch /work/repo-rot-all.zip.partb +cargo xtask releng --output-dir /work diff --git a/.github/workflows/hakari.yml b/.github/workflows/hakari.yml index 5194f2d28a..236b9b5023 100644 --- a/.github/workflows/hakari.yml +++ b/.github/workflows/hakari.yml @@ -17,14 +17,14 @@ jobs: env: RUSTFLAGS: -D warnings steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 with: ref: ${{ github.event.pull_request.head.sha }} # see omicron#4461 - uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af # v1 with: toolchain: stable - name: Install cargo-hakari - uses: taiki-e/install-action@3d5321a5e3ceb69232a18ca12966908a643cbce3 # v2 + uses: taiki-e/install-action@7491b900536dd0dae2e47ce7c17f140e46328dc4 # v2 with: tool: cargo-hakari - name: Check workspace-hack Cargo.toml is up-to-date diff --git a/Cargo.lock b/Cargo.lock index 3ab4a4f95f..3060a8fae7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -29,9 +29,9 @@ dependencies = [ [[package]] name = "aes" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac1f845298e95f983ff1944b728ae08b8cebab80d684f0a832ed0fc74dfa27e2" +checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" dependencies = [ "cfg-if", "cipher", @@ -54,31 +54,31 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.8" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42cd52102d3df161c77a887b608d7a4897d7cc112886a9537b738a887a03aaff" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", - "getrandom 0.2.12", + "getrandom 0.2.14", "once_cell", "version_check", - "zerocopy 0.7.32", + "zerocopy 0.7.34", ] [[package]] name = "aho-corasick" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] [[package]] name = "allocator-api2" -version = "0.2.16" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" +checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "android-tzdata" @@ -101,58 +101,50 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" -[[package]] -name = "ansi_term" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" -dependencies = [ - "winapi", -] - [[package]] name = "anstream" -version = "0.6.11" +version = "0.6.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e2e1ebcb11de5c03c67de28a7df593d32191b44939c482e97702baaaa6ab6a5" +checksum = "418c75fa768af9c03be99d17643f93f79bbba589895012a80e3452a19ddda15b" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", + "is_terminal_polyfill", "utf8parse", ] [[package]] name = "anstyle" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" +checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b" [[package]] name = "anstyle-parse" -version = "0.2.1" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333" +checksum = "c03a11a9034d92058ceb6ee011ce58af4a9bf61491aa7e1e59ecd24bd40d22d4" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.0.0" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" +checksum = "a64c907d4e79225ac72e2a354c9ce84d50ebb4586dee56c82b3ee73004f537f5" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.2" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" +checksum = "61a38449feb7068f52bb06c12759005cf459ee52bb4adc1d5a7c4322d716fb19" dependencies = [ "anstyle", "windows-sys 0.52.0", @@ -160,9 +152,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.79" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" +checksum = "25bdb32cbbdce2b519a9cd7df3a678443100e265d5e25ca763b7572a5104f5f3" dependencies = [ "backtrace", ] @@ -174,7 +166,7 @@ dependencies = [ "omicron-workspace-hack", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] @@ -188,9 +180,9 @@ dependencies = [ [[package]] name = "arc-swap" -version = "1.6.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" +checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" [[package]] name = "argon2" @@ -247,7 +239,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed72493ac66d5804837f480ab3766c72bdfab91a65e565fc54fa9e42db0073a8" dependencies = [ "anstyle", - "bstr 1.9.0", + "bstr 1.9.1", "doc-comment", "predicates", "predicates-core", @@ -275,13 +267,13 @@ dependencies = [ [[package]] name = "async-recursion" -version = "1.0.5" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fd55a5ba1179988837d24ab4c7cc8ed6efdeff578ede0416b4225a5fca35bd0" +checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] @@ -303,34 +295,34 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] name = "async-trait" -version = "0.1.77" +version = "0.1.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" +checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] name = "atomic-polyfill" -version = "0.1.11" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3ff7eb3f316534d83a8a2c3d1674ace8a5a71198eba31e2e2b597833f699b28" +checksum = "8cf2bce30dfe09ef0bfaef228b9d414faaf7e563035494d7fe092dba54b300f4" dependencies = [ "critical-section", ] [[package]] name = "atomic-waker" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1181e1e0d1fce796a03db1ae795d67167da795f9cf4a39c37589e85ef57f26d3" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" [[package]] name = "atomicwrites" @@ -359,7 +351,7 @@ name = "authz-macros" version = "0.1.0" dependencies = [ "expectorate", - "heck 0.4.1", + "heck 0.5.0", "nexus-macros-common", "omicron-workspace-hack", "prettyplease", @@ -367,14 +359,14 @@ dependencies = [ "quote", "serde", "serde_tokenstream 0.2.0", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] name = "autocfg" -version = "1.1.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "backoff" @@ -383,7 +375,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1" dependencies = [ "futures-core", - "getrandom 0.2.12", + "getrandom 0.2.14", "instant", "pin-project-lite", "rand 0.8.5", @@ -392,16 +384,16 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.69" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" dependencies = [ "addr2line", "cc", "cfg-if", "libc", "miniz_oxide", - "object 0.32.1", + "object 0.32.2", "rustc-demangle", ] @@ -425,9 +417,9 @@ checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] name = "base64" -version = "0.22.0" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9475866fec1451be56a3c2400fd081ff546538961565ccb5b7142cbd22bc7a51" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" @@ -435,15 +427,6 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" -[[package]] -name = "basic-toml" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2db21524cad41c5591204d22d75e1970a2d1f71060214ca931dc7d5afe2c14e5" -dependencies = [ - "serde", -] - [[package]] name = "bb8" version = "0.8.3" @@ -453,7 +436,7 @@ dependencies = [ "async-trait", "futures-channel", "futures-util", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "tokio", ] @@ -481,61 +464,61 @@ dependencies = [ [[package]] name = "bhyve_api" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=6dceb9ef69c217cb78a2018bbedafbc19f6ec1af#6dceb9ef69c217cb78a2018bbedafbc19f6ec1af" +source = "git+https://github.com/oxidecomputer/propolis?rev=6d7ed9a033babc054db9eff5b59dee978d2b0d76#6d7ed9a033babc054db9eff5b59dee978d2b0d76" dependencies = [ - "bhyve_api_sys 0.0.0 (git+https://github.com/oxidecomputer/propolis?rev=6dceb9ef69c217cb78a2018bbedafbc19f6ec1af)", + "bhyve_api_sys 0.0.0 (git+https://github.com/oxidecomputer/propolis?rev=6d7ed9a033babc054db9eff5b59dee978d2b0d76)", "libc", - "strum 0.26.1", + "strum", ] [[package]] name = "bhyve_api" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=fdf0585c6a227a7cfbee4a61a36938c3d77e4712#fdf0585c6a227a7cfbee4a61a36938c3d77e4712" +source = "git+https://github.com/oxidecomputer/propolis?rev=6dceb9ef69c217cb78a2018bbedafbc19f6ec1af#6dceb9ef69c217cb78a2018bbedafbc19f6ec1af" dependencies = [ - "bhyve_api_sys 0.0.0 (git+https://github.com/oxidecomputer/propolis?rev=fdf0585c6a227a7cfbee4a61a36938c3d77e4712)", + "bhyve_api_sys 0.0.0 (git+https://github.com/oxidecomputer/propolis?rev=6dceb9ef69c217cb78a2018bbedafbc19f6ec1af)", "libc", - "strum 0.26.1", + "strum", ] [[package]] name = "bhyve_api_sys" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=6dceb9ef69c217cb78a2018bbedafbc19f6ec1af#6dceb9ef69c217cb78a2018bbedafbc19f6ec1af" +source = "git+https://github.com/oxidecomputer/propolis?rev=6d7ed9a033babc054db9eff5b59dee978d2b0d76#6d7ed9a033babc054db9eff5b59dee978d2b0d76" dependencies = [ "libc", - "strum 0.26.1", + "strum", ] [[package]] name = "bhyve_api_sys" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=fdf0585c6a227a7cfbee4a61a36938c3d77e4712#fdf0585c6a227a7cfbee4a61a36938c3d77e4712" +source = "git+https://github.com/oxidecomputer/propolis?rev=6dceb9ef69c217cb78a2018bbedafbc19f6ec1af#6dceb9ef69c217cb78a2018bbedafbc19f6ec1af" dependencies = [ "libc", - "strum 0.26.1", + "strum", ] [[package]] name = "bindgen" -version = "0.69.2" +version = "0.69.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c69fae65a523209d34240b60abe0c42d33d1045d445c0839d8a4894a736e2d" +checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "cexpr", "clang-sys", + "itertools 0.12.1", "lazy_static", "lazycell", "log", - "peeking_take_while", "prettyplease", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", - "syn 2.0.52", + "syn 2.0.64", "which", ] @@ -568,9 +551,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.2" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" +checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" dependencies = [ "serde", ] @@ -618,26 +601,26 @@ dependencies = [ [[package]] name = "blake2b_simd" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c2f0dc9a68c6317d884f97cc36cf5a3d20ba14ce404227df55e1af708ab04bc" +checksum = "23285ad32269793932e830392f2fe2f83e26488fd3ec778883a93c8323735780" dependencies = [ "arrayref", "arrayvec", - "constant_time_eq 0.2.6", + "constant_time_eq", ] [[package]] name = "blake3" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0231f06152bf547e9c2b5194f247cd97aacf6dcd8b15d8e5ec0663f64580da87" +checksum = "30cca6d3674597c30ddf2c587bf8d9d65c9a84d2326d941cc79c9842dfe0ef52" dependencies = [ "arrayref", "arrayvec", "cc", "cfg-if", - "constant_time_eq 0.3.0", + "constant_time_eq", "memmap2", "rayon", ] @@ -700,7 +683,7 @@ dependencies = [ "slog-term", "thiserror", "tokio", - "uuid 1.7.0", + "uuid", "vsss-rs", "zeroize", ] @@ -709,17 +692,18 @@ dependencies = [ name = "bootstrap-agent-client" version = "0.1.0" dependencies = [ - "ipnetwork", "omicron-common", "omicron-workspace-hack", + "oxnet", "progenitor", "regress", "reqwest", "schemars", "serde", + "serde_json", "sled-hardware-types", "slog", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -735,12 +719,12 @@ dependencies = [ [[package]] name = "bstr" -version = "1.9.0" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c48f0051a4b4c5e0b6d365cd04af53aeaa209e3cc15ec2cdb69e73cc87fbd0dc" +checksum = "05efc5cfd9110c8416e471df0e96702d58690178e206e61b7173706673c93706" dependencies = [ "memchr", - "regex-automata 0.4.5", + "regex-automata 0.4.6", "serde", ] @@ -756,15 +740,15 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.13.0" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "bytecount" -version = "0.6.3" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c676a478f63e9fa2dd5368a42f28bba0d6c560b775f38583c8bbaa7fcd67c9c" +checksum = "5ce89b21cab1437276d2650d57e971f9d548a2d9037cc231abdc0562b97498ce" [[package]] name = "byteorder" @@ -774,9 +758,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" +checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" dependencies = [ "serde", ] @@ -802,20 +786,11 @@ dependencies = [ "pkg-config", ] -[[package]] -name = "caboose-util" -version = "0.1.0" -dependencies = [ - "anyhow", - "hubtools", - "omicron-workspace-hack", -] - [[package]] name = "camino" -version = "1.1.6" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c59e92b5a388f549b863a7bea62612c09f24c8393560709a54558a9abdfb3b9c" +checksum = "e0ec6b951b160caa93cc0c7b209e5a3bff7aae9062213451ac99493cd844c239" dependencies = [ "serde", ] @@ -845,9 +820,9 @@ dependencies = [ [[package]] name = "cargo-platform" -version = "0.1.3" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cfa25e60aea747ec7e1124f238816749faa93759c6ff5b31f1ccdda137f4479" +checksum = "24b1f0365a6c6bb4020cd05806fd0d33c44d38046b8bd7f0e40814b9763cabfc" dependencies = [ "serde", ] @@ -860,7 +835,7 @@ checksum = "2d886547e41f740c616ae73108f6eb70afe6d940c7bc697cb30f13daec073037" dependencies = [ "camino", "cargo-platform", - "semver 1.0.22", + "semver 1.0.23", "serde", "serde_json", "thiserror", @@ -868,12 +843,12 @@ dependencies = [ [[package]] name = "cargo_toml" -version = "0.19.0" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "922d6ea3081d68b9e3e09557204bff47f9b5406a4a304dc917e187f8cafd582b" +checksum = "c8cb1d556b8b8f36e5ca74938008be3ac102f5dcb5b68a0477e4249ae2291cd3" dependencies = [ "serde", - "toml 0.8.10", + "toml 0.8.13", ] [[package]] @@ -908,12 +883,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.83" +version = "1.0.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" -dependencies = [ - "libc", -] +checksum = "099a5357d84c4c61eb35fc8eafa9a79a902c2f76911e5747ced4e032edd8d9b4" [[package]] name = "cexpr" @@ -926,11 +898,11 @@ dependencies = [ [[package]] name = "cfg-expr" -version = "0.15.6" +version = "0.15.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6100bc57b6209840798d95cb2775684849d332f7bd788db2a8c8caf7ef82a41a" +checksum = "d067ad48b8650848b989a59a86c6c36a995d02d2bf778d45c3c5d57bc2718f02" dependencies = [ - "smallvec 1.13.1", + "smallvec 1.13.2", "target-lexicon", ] @@ -940,6 +912,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" + [[package]] name = "chacha20" version = "0.9.1" @@ -966,9 +944,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.34" +version = "0.4.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bc015644b92d5890fab7489e49d21f879d5c990186827d42ec511919404f38b" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" dependencies = [ "android-tzdata", "iana-time-zone", @@ -976,7 +954,7 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-targets 0.52.0", + "windows-targets 0.52.5", ] [[package]] @@ -1019,9 +997,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" +checksum = "67523a3b4be3ce1989d607a828d036249522dd9c1c8de7f4dd2dae43a37369d1" dependencies = [ "glob", "libc", @@ -1030,24 +1008,9 @@ dependencies = [ [[package]] name = "clap" -version = "2.34.0" +version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" -dependencies = [ - "ansi_term", - "atty", - "bitflags 1.3.2", - "strsim 0.8.0", - "textwrap 0.11.0", - "unicode-width", - "vec_map", -] - -[[package]] -name = "clap" -version = "4.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c918d541ef2913577a0f9566e9ce27cb35b6df072075769e0b26cb5a554520da" +checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0" dependencies = [ "clap_builder", "clap_derive", @@ -1055,27 +1018,27 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.1" +version = "4.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f3e7391dad68afb0c2ede1bf619f579a3dc9c2ec67f089baa397123a2f3d1eb" +checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4" dependencies = [ "anstream", "anstyle", "clap_lex", - "strsim 0.11.0", + "strsim", "terminal_size", ] [[package]] name = "clap_derive" -version = "4.5.0" +version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "307bc0538d5f0f83b8248db3087aa92fe504e4691294d0c96c0eabc33f47ba47" +checksum = "528131438037fd55894f62d6e9f068b8f45ac57ffa77517819645d10aed04f64" dependencies = [ - "heck 0.4.1", + "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] @@ -1086,9 +1049,9 @@ checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" [[package]] name = "clipboard-win" -version = "5.0.0" +version = "5.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c57002a5d9be777c1ef967e33674dac9ebd310d8893e4e3437b14d5f0f6372cc" +checksum = "79f4473f5144e20d9aceaf2972478f06ddf687831eafeeb434fbaf0acc4144ad" dependencies = [ "error-code", ] @@ -1101,9 +1064,9 @@ checksum = "67ba02a97a2bd10f4b59b25c7973101c79642302776489e030cd13cdab09ed15" [[package]] name = "colorchoice" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" +checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422" [[package]] name = "colored" @@ -1167,12 +1130,6 @@ dependencies = [ "unicode-xid", ] -[[package]] -name = "constant_time_eq" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21a53c0a4d288377e7415b53dcfc3c04da5cdc2cc95c8d5ac178b58f0b861ad6" - [[package]] name = "constant_time_eq" version = "0.3.0" @@ -1198,9 +1155,9 @@ dependencies = [ [[package]] name = "cookie" -version = "0.18.0" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cd91cf61412820176e137621345ee43b3f4423e589e7ae4e50d601d93e35ef8" +checksum = "4ddef33a339a91ea89fb53151bd0a4689cfce27055c291dfa69945475d22c747" dependencies = [ "time", "version_check", @@ -1225,9 +1182,9 @@ dependencies = [ [[package]] name = "core-foundation" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" dependencies = [ "core-foundation-sys", "libc", @@ -1235,9 +1192,9 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.4" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" +checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" [[package]] name = "corncobs" @@ -1268,30 +1225,30 @@ dependencies = [ [[package]] name = "crc" -version = "3.0.1" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe" +checksum = "69e6e4d7b33a94f0991c26729976b10ebde1d34c3ee82408fb536164fa10d636" dependencies = [ "crc-catalog", ] [[package]] name = "crc-any" -version = "2.4.3" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "774646b687f63643eb0f4bf13dc263cb581c8c9e57973b6ddf78bda3994d88df" +checksum = "a62ec9ff5f7965e4d7280bd5482acd20aadb50d632cf6c1d74493856b011fa73" [[package]] name = "crc-catalog" -version = "2.2.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cace84e55f07e7301bae1c519df89cdad8cc3cd868413d3fdbdeca9ff3db484" +checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" [[package]] name = "crc32fast" -version = "1.3.2" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" +checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa" dependencies = [ "cfg-if", ] @@ -1317,7 +1274,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.5.1", + "clap", "criterion-plot", "futures", "is-terminal", @@ -1354,21 +1311,19 @@ checksum = "7059fff8937831a9ae6f0fe4d658ffabf58f2ca96aa9dec1c889f936f705f216" [[package]] name = "crossbeam-channel" -version = "0.5.8" +version = "0.5.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" +checksum = "ab3db02a9c5b5121e1e42fbdb1aeb65f5e02624cc58c43f2884c6ccac0b82f95" dependencies = [ - "cfg-if", "crossbeam-utils", ] [[package]] name = "crossbeam-deque" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" dependencies = [ - "cfg-if", "crossbeam-epoch", "crossbeam-utils", ] @@ -1394,12 +1349,12 @@ version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f476fe445d41c9e991fd07515a6f463074b782242ccf4a5b7b1d1012e70824df" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "crossterm_winapi", "futures-core", "libc", "mio", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "serde", "signal-hook", "signal-hook-mio", @@ -1418,7 +1373,7 @@ dependencies = [ [[package]] name = "crucible-agent-client" version = "0.0.1" -source = "git+https://github.com/oxidecomputer/crucible?rev=16f16478f4af1502b25ddcd79d307b3f116f13f6#16f16478f4af1502b25ddcd79d307b3f116f13f6" +source = "git+https://github.com/oxidecomputer/crucible?rev=8c6d485110ecfae5409575246b986a145c386dc4#8c6d485110ecfae5409575246b986a145c386dc4" dependencies = [ "anyhow", "chrono", @@ -1434,7 +1389,7 @@ dependencies = [ [[package]] name = "crucible-pantry-client" version = "0.0.1" -source = "git+https://github.com/oxidecomputer/crucible?rev=16f16478f4af1502b25ddcd79d307b3f116f13f6#16f16478f4af1502b25ddcd79d307b3f116f13f6" +source = "git+https://github.com/oxidecomputer/crucible?rev=8c6d485110ecfae5409575246b986a145c386dc4#8c6d485110ecfae5409575246b986a145c386dc4" dependencies = [ "anyhow", "chrono", @@ -1445,13 +1400,13 @@ dependencies = [ "schemars", "serde", "serde_json", - "uuid 1.7.0", + "uuid", ] [[package]] name = "crucible-smf" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/crucible?rev=16f16478f4af1502b25ddcd79d307b3f116f13f6#16f16478f4af1502b25ddcd79d307b3f116f13f6" +source = "git+https://github.com/oxidecomputer/crucible?rev=8c6d485110ecfae5409575246b986a145c386dc4#8c6d485110ecfae5409575246b986a145c386dc4" dependencies = [ "crucible-workspace-hack", "libc", @@ -1474,9 +1429,9 @@ checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" [[package]] name = "crypto-bigint" -version = "0.5.2" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf4c2f4e1afd912bc40bfd6fed5d9dc1f288e0ba01bfcc835cc5bc3eb13efe15" +checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ "generic-array", "rand_core 0.6.4", @@ -1537,9 +1492,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.1.1" +version = "4.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89b8c6a2e4b1f45971ad09761aafb85514a84744b67a95e32c3cc1352d1f65c" +checksum = "0a677b8922c94e01bdbb12126b0bc852f00447528dee1782229af9c720c3f348" dependencies = [ "cfg-if", "cpufeatures", @@ -1555,100 +1510,65 @@ dependencies = [ [[package]] name = "curve25519-dalek-derive" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", -] - -[[package]] -name = "darling" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" -dependencies = [ - "darling_core 0.14.4", - "darling_macro 0.14.4", + "syn 2.0.64", ] [[package]] name = "darling" -version = "0.20.3" +version = "0.20.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0209d94da627ab5605dcccf08bb18afa5009cfbef48d8a8b7d7bdbc79be25c5e" +checksum = "83b2eb4d90d12bdda5ed17de686c2acb4c57914f8f921b8da7e112b5a36f3fe1" dependencies = [ - "darling_core 0.20.3", - "darling_macro 0.20.3", + "darling_core", + "darling_macro", ] [[package]] name = "darling_core" -version = "0.14.4" +version = "0.20.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" +checksum = "622687fe0bac72a04e5599029151f5796111b90f1baaa9b544d807a5e31cd120" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", - "strsim 0.10.0", - "syn 1.0.109", -] - -[[package]] -name = "darling_core" -version = "0.20.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "177e3443818124b357d8e76f53be906d60937f0d3a90773a664fa63fa253e621" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim 0.10.0", - "syn 2.0.52", -] - -[[package]] -name = "darling_macro" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" -dependencies = [ - "darling_core 0.14.4", - "quote", - "syn 1.0.109", + "strsim", + "syn 2.0.64", ] [[package]] name = "darling_macro" -version = "0.20.3" +version = "0.20.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" +checksum = "733cabb43482b1a1b53eee8583c2b9e8684d592215ea83efd305dd31bc2f0178" dependencies = [ - "darling_core 0.20.3", + "darling_core", "quote", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] name = "data-encoding" -version = "2.4.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" +checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" [[package]] name = "datatest-stable" -version = "0.2.3" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22a384d02609f0774f4dbf0c38fc57eb2769b24c30b9185911ff657ec14837da" +checksum = "a560b3fd20463b56397bd457aa71243ccfdcffe696050b66e3b1e0ec0457e7f1" dependencies = [ "camino", + "fancy-regex", "libtest-mimic", - "regex", "walkdir", ] @@ -1657,7 +1577,7 @@ name = "db-macros" version = "0.1.0" dependencies = [ "expectorate", - "heck 0.4.1", + "heck 0.5.0", "nexus-macros-common", "omicron-workspace-hack", "prettyplease", @@ -1665,13 +1585,13 @@ dependencies = [ "quote", "serde", "serde_tokenstream 0.2.0", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] name = "ddm-admin-client" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/maghemite?rev=de065a84831e66c829603d9a098e237e8f5faaa1#de065a84831e66c829603d9a098e237e8f5faaa1" +source = "git+https://github.com/oxidecomputer/maghemite?rev=025389ff39d594bf2b815377e2c1dc4dd23b1f96#025389ff39d594bf2b815377e2c1dc4dd23b1f96" dependencies = [ "percent-encoding", "progenitor", @@ -1679,7 +1599,7 @@ dependencies = [ "serde", "serde_json", "slog", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -1690,9 +1610,9 @@ checksum = "ffe7ed1d93f4553003e20b629abe9085e1e81b1429520f897f8f8860bc6dfc21" [[package]] name = "defmt" -version = "0.3.5" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8a2d011b2fee29fb7d659b83c43fce9a2cb4df453e16d441a51448e448f3f98" +checksum = "a99dd22262668b887121d4672af5a64b238f026099f1a2a1b322066c9ecfe9e0" dependencies = [ "bitflags 1.3.2", "defmt-macros", @@ -1700,31 +1620,31 @@ dependencies = [ [[package]] name = "defmt-macros" -version = "0.3.6" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54f0216f6c5acb5ae1a47050a6645024e6edafc2ee32d421955eccfef12ef92e" +checksum = "e3a9f309eff1f79b3ebdf252954d90ae440599c26c2c553fe87a2d17195f2dcb" dependencies = [ "defmt-parser", "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] name = "defmt-parser" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "269924c02afd7f94bc4cecbfa5c379f6ffcf9766b3408fe63d22c728654eccd0" +checksum = "ff4a5fefe330e8d7f31b16a318f9ce81000d8e35e69b93eae154d16d2278f70f" dependencies = [ "thiserror", ] [[package]] name = "der" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" +checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" dependencies = [ "const-oid", "der_derive", @@ -1741,7 +1661,7 @@ checksum = "5fe87ce4529967e0ba1dcf8450bab64d97dfd5010a6256187ffe2e43e6f0e049" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] @@ -1762,38 +1682,38 @@ checksum = "62d671cc41a825ebabc75757b62d3d168c577f9149b2d49ece1dad1f72119d25" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] name = "derive_builder" -version = "0.12.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d67778784b508018359cbc8696edb3db78160bab2c2a28ba7f56ef6932997f8" +checksum = "0350b5cb0331628a5916d6c5c0b72e97393b8b6b03b47a9284f4e7f5a405ffd7" dependencies = [ "derive_builder_macro", ] [[package]] name = "derive_builder_core" -version = "0.12.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c11bdc11a0c47bc7d37d582b5285da6849c96681023680b906673c5707af7b0f" +checksum = "d48cda787f839151732d396ac69e3473923d54312c070ee21e9effcaa8ca0b1d" dependencies = [ - "darling 0.14.4", + "darling", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.64", ] [[package]] name = "derive_builder_macro" -version = "0.12.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebcda35c7a396850a55ffeac740804b40ffec779b98fffbb1738f4033f0ee79e" +checksum = "206868b8242f27cecce124c19fd88157fbd0dd334df2587f36417bafbc85097b" dependencies = [ "derive_builder_core", - "syn 1.0.109", + "syn 2.0.64", ] [[package]] @@ -1812,12 +1732,12 @@ dependencies = [ [[package]] name = "derror-macro" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=7ee353a470ea59529ee1b34729681da887aa88ce#7ee353a470ea59529ee1b34729681da887aa88ce" +source = "git+https://github.com/oxidecomputer/opte?rev=194a8d1d6443f78d59702a25849607dba33db732#194a8d1d6443f78d59702a25849607dba33db732" dependencies = [ - "darling 0.20.3", + "darling", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] @@ -1843,11 +1763,11 @@ checksum = "a7993efb860416547839c115490d4951c6d0f8ec04a3594d9dd99d50ed7ec170" [[package]] name = "diesel" -version = "2.1.4" +version = "2.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62c6fcf842f17f8c78ecf7c81d75c5ce84436b41ee07e03f490fbb5f5a8731d8" +checksum = "ff236accb9a5069572099f0b350a92e9560e8e63a9b8d546162f4a5e03026bb2" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "byteorder", "chrono", "diesel_derives", @@ -1857,7 +1777,7 @@ dependencies = [ "pq-sys", "r2d2", "serde_json", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -1868,20 +1788,20 @@ dependencies = [ "diesel", "serde", "usdt 0.5.0", - "uuid 1.7.0", + "uuid", "version_check", ] [[package]] name = "diesel_derives" -version = "2.1.2" +version = "2.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef8337737574f55a468005a83499da720f20c65586241ffea339db9ecdfd2b44" +checksum = "14701062d6bed917b5c7103bdffaee1e4609279e240488ad24e7bd979ca6866c" dependencies = [ "diesel_table_macro_syntax", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] @@ -1890,7 +1810,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc5557efc453706fed5e4fa85006fe9817c224c3f480a34c7e5959fd700921c5" dependencies = [ - "syn 2.0.52", + "syn 2.0.64", ] [[package]] @@ -1971,7 +1891,7 @@ version = "0.0.0" source = "git+https://github.com/oxidecomputer/propolis?rev=6dceb9ef69c217cb78a2018bbedafbc19f6ec1af#6dceb9ef69c217cb78a2018bbedafbc19f6ec1af" dependencies = [ "libc", - "strum 0.26.1", + "strum", ] [[package]] @@ -1993,8 +1913,9 @@ version = "0.1.0" dependencies = [ "anyhow", "camino", + "camino-tempfile", "chrono", - "clap 4.5.1", + "clap", "dns-service-client", "dropshot", "expectorate", @@ -2013,16 +1934,15 @@ dependencies = [ "slog-envlogger", "slog-term", "subprocess", - "tempdir", "tempfile", "thiserror", "tokio", - "toml 0.8.10", + "toml 0.8.13", "trust-dns-client", "trust-dns-proto", "trust-dns-resolver", "trust-dns-server", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -2031,6 +1951,7 @@ version = "0.1.0" dependencies = [ "anyhow", "chrono", + "expectorate", "http 0.2.12", "omicron-workspace-hack", "progenitor", @@ -2053,7 +1974,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e6b21a1211455e82b1245d6e1b024f30606afbb734c114515d40d0e0b34ce81" dependencies = [ "thiserror", - "zerocopy 0.3.0", + "zerocopy 0.3.2", ] [[package]] @@ -2067,7 +1988,7 @@ dependencies = [ "serde", "serde_json", "thiserror", - "zerocopy 0.7.32", + "zerocopy 0.7.34", ] [[package]] @@ -2098,18 +2019,18 @@ dependencies = [ "serde", "serde_json", "slog", - "toml 0.8.10", - "uuid 1.7.0", + "toml 0.8.13", + "uuid", ] [[package]] name = "dropshot" -version = "0.9.1-dev" -source = "git+https://github.com/oxidecomputer/dropshot?branch=main#29ae98d1f909c6832661408a4c03f929e8afa6e9" +version = "0.10.2-dev" +source = "git+https://github.com/oxidecomputer/dropshot?branch=main#0cd0e828d096578392b6a5524334d44fd10ef6da" dependencies = [ "async-stream", "async-trait", - "base64 0.21.7", + "base64 0.22.1", "bytes", "camino", "chrono", @@ -2117,18 +2038,18 @@ dependencies = [ "dropshot_endpoint", "form_urlencoded", "futures", - "hostname", + "hostname 0.4.0", "http 0.2.12", "hyper 0.14.28", - "indexmap 2.2.5", + "indexmap 2.2.6", "multer", "openapiv3", "paste", "percent-encoding", - "proc-macro2", - "rustls 0.22.2", - "rustls-pemfile 2.1.1", + "rustls 0.22.4", + "rustls-pemfile 2.1.2", "schemars", + "scopeguard", "serde", "serde_json", "serde_path_to_error", @@ -2141,23 +2062,23 @@ dependencies = [ "slog-term", "tokio", "tokio-rustls 0.25.0", - "toml 0.8.10", - "usdt 0.3.5", - "uuid 1.7.0", + "toml 0.8.13", + "usdt 0.5.0", + "uuid", "version_check", "waitgroup", ] [[package]] name = "dropshot_endpoint" -version = "0.9.1-dev" -source = "git+https://github.com/oxidecomputer/dropshot?branch=main#29ae98d1f909c6832661408a4c03f929e8afa6e9" +version = "0.10.2-dev" +source = "git+https://github.com/oxidecomputer/dropshot?branch=main#0cd0e828d096578392b6a5524334d44fd10ef6da" dependencies = [ "proc-macro2", "quote", "serde", "serde_tokenstream 0.2.0", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] @@ -2198,7 +2119,7 @@ dependencies = [ "digest", "elliptic-curve", "rfc6979", - "signature 2.1.0", + "signature 2.2.0", "spki", ] @@ -2213,33 +2134,34 @@ dependencies = [ [[package]] name = "ed25519" -version = "2.2.2" +version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60f6d271ca33075c88028be6f04d502853d63a5ece419d269c15315d4fc1cf1d" +checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" dependencies = [ "pkcs8", - "signature 2.1.0", + "signature 2.2.0", ] [[package]] name = "ed25519-dalek" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7277392b266383ef8396db7fdeb1e77b6c52fed775f5df15bb24f35b72156980" +checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" dependencies = [ "curve25519-dalek", - "ed25519 2.2.2", + "ed25519 2.2.3", "rand_core 0.6.4", "serde", "sha2", + "subtle", "zeroize", ] [[package]] name = "either" -version = "1.10.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a" +checksum = "3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b" [[package]] name = "elliptic-curve" @@ -2270,9 +2192,9 @@ checksum = "ef1a6892d9eef45c8fa6b9e0086428a2cca8491aca8f787c534a3d6d0bcb3ced" [[package]] name = "ena" -version = "0.14.2" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c533630cf40e9caa44bd91aadc88a75d75a4c3a12b4cfde353cbed41daa1e1f1" +checksum = "3d248bdd43ce613d87415282f69b9bb99d947d290b10962dd6c56233312c2ad5" dependencies = [ "log", ] @@ -2285,9 +2207,9 @@ checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" [[package]] name = "encoding_rs" -version = "0.8.33" +version = "0.8.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" +checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" dependencies = [ "cfg-if", ] @@ -2299,9 +2221,9 @@ dependencies = [ "anstyle", "anyhow", "async-trait", - "base64 0.22.0", + "base64 0.22.1", "chrono", - "clap 4.5.1", + "clap", "colored", "dhcproto", "http 0.2.12", @@ -2320,11 +2242,11 @@ dependencies = [ "russh-keys", "serde", "serde_json", - "socket2 0.5.5", + "socket2 0.5.7", "tokio", - "toml 0.8.10", + "toml 0.8.13", "trust-dns-resolver", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -2360,9 +2282,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.10.0" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85cdab6a89accf66733ad5a1693a4dcced6aeff64602b634530dd73c1f3ee9f0" +checksum = "4cd405aab171cb85d6735e5c8d9db038c17d3ca007a4d2c25f337935c3d90580" dependencies = [ "is-terminal", "log", @@ -2386,9 +2308,9 @@ dependencies = [ [[package]] name = "errno" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" dependencies = [ "libc", "windows-sys 0.52.0", @@ -2396,9 +2318,18 @@ dependencies = [ [[package]] name = "error-code" -version = "3.0.0" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0474425d51df81997e2f90a21591180b38eccf27292d755f3e30750225c175b" + +[[package]] +name = "escape8259" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "281e452d3bad4005426416cdba5ccfd4f5c1280e10099e21db27f7c1c28347fc" +checksum = "ba4f4911e3666fcd7826997b4745c8224295a6f3072f1418c3067b97a67557ee" +dependencies = [ + "rustversion", +] [[package]] name = "expectorate" @@ -2417,11 +2348,22 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" +[[package]] +name = "fancy-regex" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "531e46835a22af56d1e3b66f04844bed63158bc094a628bec1d321d9b4c44bf2" +dependencies = [ + "bit-set", + "regex-automata 0.4.6", + "regex-syntax 0.8.3", +] + [[package]] name = "fastrand" -version = "2.0.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" +checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" [[package]] name = "fatfs" @@ -2435,17 +2377,6 @@ dependencies = [ "log", ] -[[package]] -name = "fd-lock" -version = "3.0.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef033ed5e9bad94e55838ca0ca906db0e043f517adda0c8b79c7a8c66c93c1b5" -dependencies = [ - "cfg-if", - "rustix", - "windows-sys 0.48.0", -] - [[package]] name = "fd-lock" version = "4.0.2" @@ -2469,9 +2400,9 @@ dependencies = [ [[package]] name = "fiat-crypto" -version = "0.2.1" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0870c84016d4b481be5c9f323c24f65e31e901ae618f0e80f4308fb00de1d2d" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" [[package]] name = "filetime" @@ -2485,6 +2416,12 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "finl_unicode" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fcfdc7a0362c9f4444381a9e697c79d435fe65b52a37466fc2c1184cee9edc6" + [[package]] name = "fixedbitset" version = "0.4.2" @@ -2493,15 +2430,15 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flagset" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda653ca797810c02f7ca4b804b40b8b95ae046eb989d356bce17919a8c25499" +checksum = "cdeb3aa5e95cf9aabc17f060cfa0ced7b83f042390760ca53bf09df9968acaa1" [[package]] name = "flate2" -version = "1.0.28" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" +checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae" dependencies = [ "crc32fast", "miniz_oxide", @@ -2561,7 +2498,7 @@ checksum = "1a5c6c585bc94aaf2c7b51dd4c2ba22680844aba4c687be581871a6f518c5742" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] @@ -2598,6 +2535,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "88a41f105fe1d5b6b34b2055e3dc59bb79b46b48b2040b9e6c7b4b5de097aa41" dependencies = [ "autocfg", + "tokio", ] [[package]] @@ -2610,12 +2548,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "fuchsia-cprng" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" - [[package]] name = "funty" version = "2.0.0" @@ -2678,7 +2610,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] @@ -2695,9 +2627,9 @@ checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-timer" -version = "3.0.2" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" +checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" [[package]] name = "futures-util" @@ -2731,7 +2663,7 @@ name = "gateway-cli" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.1", + "clap", "futures", "gateway-client", "gateway-messages", @@ -2746,14 +2678,14 @@ dependencies = [ "termios", "tokio", "tokio-tungstenite 0.20.1", - "uuid 1.7.0", + "uuid", ] [[package]] name = "gateway-client" version = "0.1.0" dependencies = [ - "base64 0.22.0", + "base64 0.22.1", "chrono", "gateway-messages", "omicron-workspace-hack", @@ -2764,7 +2696,7 @@ dependencies = [ "serde", "serde_json", "slog", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -2778,9 +2710,9 @@ dependencies = [ "serde_repr", "smoltcp 0.9.1", "static_assertions", - "strum_macros 0.25.2", - "uuid 1.7.0", - "zerocopy 0.6.4", + "strum_macros 0.25.3", + "uuid", + "zerocopy 0.6.6", ] [[package]] @@ -2803,13 +2735,13 @@ dependencies = [ "serde", "serde-big-array 0.5.1", "slog", - "socket2 0.5.5", + "socket2 0.5.7", "string_cache", "thiserror", "tlvc 0.3.1 (git+https://github.com/oxidecomputer/tlvc.git?branch=main)", "tokio", "usdt 0.3.5", - "uuid 1.7.0", + "uuid", "version_check", "zip", ] @@ -2827,7 +2759,7 @@ dependencies = [ "slog", "sp-sim", "tokio", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -2873,9 +2805,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.12" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" +checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" dependencies = [ "cfg-if", "js-sys", @@ -2886,9 +2818,9 @@ dependencies = [ [[package]] name = "ghash" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40" +checksum = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1" dependencies = [ "opaque-debug", "polyval", @@ -2896,9 +2828,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.28.0" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" [[package]] name = "glob" @@ -2908,22 +2840,22 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "globset" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "759c97c1e17c55525b57192c06a267cda0ac5210b222d6b82189a2338fa1c13d" +checksum = "57da3b9b5b85bd66f31093f8c408b90a74431672542466497dcbdfdc02034be1" dependencies = [ "aho-corasick", - "bstr 1.9.0", - "fnv", + "bstr 1.9.1", "log", - "regex", + "regex-automata 0.4.6", + "regex-syntax 0.8.3", ] [[package]] name = "goblin" -version = "0.8.0" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb07a4ffed2093b118a525b1d8f5204ae274faed5604537caf7135d0f18d9887" +checksum = "1b363a30c165f666402fe6a3024d3bec7ebc898f96a4a23bd1c99f8dbf3f4f47" dependencies = [ "log", "plain", @@ -2954,16 +2886,16 @@ dependencies = [ "debug-ignore", "fixedbitset", "guppy-workspace-hack", - "indexmap 2.2.5", + "indexmap 2.2.6", "itertools 0.12.1", "nested", "once_cell", "pathdiff", "petgraph", - "semver 1.0.22", + "semver 1.0.23", "serde", "serde_json", - "smallvec 1.13.1", + "smallvec 1.13.2", "static_assertions", "target-spec", ] @@ -2976,9 +2908,9 @@ checksum = "92620684d99f750bae383ecb3be3748142d6095760afd5cbcf2261e9a279d780" [[package]] name = "h2" -version = "0.3.24" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2c4422095b67ee78da96fbb51a4cc413b3b25883c7717ff7ca1ab31022c9c9" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" dependencies = [ "bytes", "fnv", @@ -2986,7 +2918,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.2.5", + "indexmap 2.2.6", "slab", "tokio", "tokio-util", @@ -2995,9 +2927,9 @@ dependencies = [ [[package]] name = "half" -version = "2.3.1" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc52e53916c08643f1b56ec082790d1e86a32e58dc5268f897f313fbae7b4872" +checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" dependencies = [ "cfg-if", "crunchy", @@ -3038,9 +2970,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.14.3" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ "ahash", "allocator-api2", @@ -3072,9 +3004,9 @@ dependencies = [ [[package]] name = "heapless" -version = "0.7.16" +version = "0.7.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db04bc24a18b9ea980628ecf00e6c0264f3c1426dac36c00cb49b6fbad8b0743" +checksum = "cdc6457c0eb62c71aac4bc17216026d8410337c4126773b9c5daba343f17964f" dependencies = [ "atomic-polyfill", "hash32 0.2.1", @@ -3108,6 +3040,12 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + [[package]] name = "hermit-abi" version = "0.1.19" @@ -3119,9 +3057,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.3.2" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "hex" @@ -3164,11 +3102,11 @@ dependencies = [ [[package]] name = "home" -version = "0.5.5" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" +checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -3182,6 +3120,17 @@ dependencies = [ "winapi", ] +[[package]] +name = "hostname" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9c7c7c8ac16c798734b8a24560c1362120597c40d5e1459f09498f8f6c8f2ba" +dependencies = [ + "cfg-if", + "libc", + "windows", +] + [[package]] name = "http" version = "0.2.12" @@ -3195,9 +3144,9 @@ dependencies = [ [[package]] name = "http" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" dependencies = [ "bytes", "fnv", @@ -3206,9 +3155,9 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", "http 0.2.12", @@ -3222,7 +3171,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" dependencies = [ "bytes", - "http 1.0.0", + "http 1.1.0", ] [[package]] @@ -3268,7 +3217,7 @@ dependencies = [ [[package]] name = "hubpack" version = "0.1.0" -source = "git+https://github.com/cbiffle/hubpack?rev=df08cc3a6e1f97381cd0472ae348e310f0119e25#df08cc3a6e1f97381cd0472ae348e310f0119e25" +source = "git+https://github.com/cbiffle/hubpack.git?rev=df08cc3a6e1f97381cd0472ae348e310f0119e25#df08cc3a6e1f97381cd0472ae348e310f0119e25" dependencies = [ "hubpack_derive 0.1.0", "serde", @@ -3287,7 +3236,7 @@ dependencies = [ [[package]] name = "hubpack_derive" version = "0.1.0" -source = "git+https://github.com/cbiffle/hubpack?rev=df08cc3a6e1f97381cd0472ae348e310f0119e25#df08cc3a6e1f97381cd0472ae348e310f0119e25" +source = "git+https://github.com/cbiffle/hubpack.git?rev=df08cc3a6e1f97381cd0472ae348e310f0119e25#df08cc3a6e1f97381cd0472ae348e310f0119e25" dependencies = [ "proc-macro2", "quote", @@ -3307,20 +3256,21 @@ dependencies = [ [[package]] name = "hubtools" -version = "0.4.1" -source = "git+https://github.com/oxidecomputer/hubtools.git?branch=main#73cd5a84689d59ecce9da66ad4389c540d315168" +version = "0.4.6" +source = "git+https://github.com/oxidecomputer/hubtools.git?branch=main#943c4bbe6b50d1ab635d085d6204895fb4154e79" dependencies = [ + "hex", "lpc55_areas", "lpc55_sign", "object 0.30.4", "path-slash", "rsa", "thiserror", - "tlvc 0.3.1 (git+https://github.com/oxidecomputer/tlvc.git)", + "tlvc 0.3.1 (git+https://github.com/oxidecomputer/tlvc)", "tlvc-text", "toml 0.7.8", "x509-cert", - "zerocopy 0.6.4", + "zerocopy 0.6.6", "zip", ] @@ -3342,12 +3292,12 @@ dependencies = [ "futures-util", "h2", "http 0.2.12", - "http-body 0.4.5", + "http-body 0.4.6", "httparse", "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.5", + "socket2 0.5.7", "tokio", "tower-service", "tracing", @@ -3356,18 +3306,19 @@ dependencies = [ [[package]] name = "hyper" -version = "1.1.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5aa53871fc917b1a9ed87b683a5d86db645e23acb32c2e0785a353e522fb75" +checksum = "fe575dd17d0862a9a33781c8c4696a55c320909004a67a00fb286ba8b1bc496d" dependencies = [ "bytes", "futures-channel", "futures-util", - "http 1.0.0", + "http 1.1.0", "http-body 1.0.0", "httparse", "itoa", "pin-project-lite", + "smallvec 1.13.2", "tokio", "want", ] @@ -3381,7 +3332,7 @@ dependencies = [ "futures-util", "http 0.2.12", "hyper 0.14.28", - "rustls 0.21.9", + "rustls 0.21.12", "tokio", "tokio-rustls 0.24.1", ] @@ -3393,11 +3344,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0bea761b46ae2b24eb4aef630d8d1c398157b6fc29e6350ecf090a0b70c952c" dependencies = [ "futures-util", - "http 1.0.0", - "hyper 1.1.0", + "http 1.1.0", + "hyper 1.3.1", "hyper-util", "log", - "rustls 0.22.2", + "rustls 0.22.4", "rustls-native-certs", "rustls-pki-types", "tokio", @@ -3439,18 +3390,18 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdea9aac0dbe5a9240d68cfd9501e2db94222c6dc06843e06640b9e07f0fdc67" +checksum = "ca38ef113da30126bbff9cd1705f9273e15d45498615d138b0c20279ac7a76aa" dependencies = [ "bytes", "futures-channel", "futures-util", - "http 1.0.0", + "http 1.1.0", "http-body 1.0.0", - "hyper 1.1.0", + "hyper 1.3.1", "pin-project-lite", - "socket2 0.5.5", + "socket2 0.5.7", "tokio", "tower", "tower-service", @@ -3459,16 +3410,16 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.57" +version = "0.1.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" +checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "windows", + "windows-core", ] [[package]] @@ -3530,7 +3481,7 @@ dependencies = [ [[package]] name = "illumos-sys-hdrs" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=7ee353a470ea59529ee1b34729681da887aa88ce#7ee353a470ea59529ee1b34729681da887aa88ce" +source = "git+https://github.com/oxidecomputer/opte?rev=194a8d1d6443f78d59702a25849607dba33db732#194a8d1d6443f78d59702a25849607dba33db732" [[package]] name = "illumos-utils" @@ -3538,7 +3489,7 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "bhyve_api 0.0.0 (git+https://github.com/oxidecomputer/propolis?rev=fdf0585c6a227a7cfbee4a61a36938c3d77e4712)", + "bhyve_api 0.0.0 (git+https://github.com/oxidecomputer/propolis?rev=6d7ed9a033babc054db9eff5b59dee978d2b0d76)", "byteorder", "camino", "camino-tempfile", @@ -3550,10 +3501,12 @@ dependencies = [ "macaddr", "mockall", "omicron-common", + "omicron-uuid-kinds", "omicron-workspace-hack", "opte-ioctl", "oxide-vpc", "oxlog", + "oxnet", "regress", "schemars", "serde", @@ -3562,8 +3515,9 @@ dependencies = [ "smf", "thiserror", "tokio", - "toml 0.8.10", - "uuid 1.7.0", + "toml 0.8.13", + "uuid", + "whoami", "zone 0.3.0", ] @@ -3591,12 +3545,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.5" +version = "2.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b0b929d511467233429c45a44ac1dcaa21ba0f5ba11e4879e6ed28ddb4f9df4" +checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" dependencies = [ "equivalent", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "serde", ] @@ -3622,9 +3576,9 @@ checksum = "bfa799dd5ed20a7e349f3b4639aa80d74549c81716d9ec4f994c9b5815598306" [[package]] name = "indoc" -version = "2.0.3" +version = "2.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c785eefb63ebd0e33416dfcb8d6da0bf27ce752843a45632a67bf10d4d4b5c4" +checksum = "b248f5224d1d606005e02c97f5aa4e88eeb230488bcc03bc9ca4d7991399f2b5" [[package]] name = "inout" @@ -3646,7 +3600,7 @@ dependencies = [ "bytes", "camino", "cancel-safe-futures", - "clap 4.5.1", + "clap", "display-error-chain", "futures", "hex", @@ -3681,7 +3635,7 @@ dependencies = [ "tokio-stream", "tufaceous-lib", "update-engine", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -3698,7 +3652,7 @@ dependencies = [ "serde_json", "slog", "update-engine", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -3707,7 +3661,7 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "clap 4.5.1", + "clap", "dropshot", "expectorate", "hyper 0.14.28", @@ -3722,7 +3676,7 @@ dependencies = [ "serde_json", "slog", "subprocess", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -3769,6 +3723,7 @@ dependencies = [ "hyper 0.14.28", "omicron-common", "omicron-test-utils", + "omicron-uuid-kinds", "omicron-workspace-hack", "progenitor", "reqwest", @@ -3780,7 +3735,7 @@ dependencies = [ "thiserror", "tokio", "trust-dns-resolver", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -3788,7 +3743,7 @@ name = "internal-dns-cli" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.1", + "clap", "dropshot", "internal-dns", "omicron-common", @@ -3817,7 +3772,7 @@ dependencies = [ "serde", "test-strategy", "thiserror", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -3826,7 +3781,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.5", + "socket2 0.5.7", "widestring", "windows-sys 0.48.0", "winreg", @@ -3850,13 +3805,13 @@ dependencies = [ [[package]] name = "is-terminal" -version = "0.4.9" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" +checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b" dependencies = [ - "hermit-abi 0.3.2", - "rustix", - "windows-sys 0.48.0", + "hermit-abi 0.3.9", + "libc", + "windows-sys 0.52.0", ] [[package]] @@ -3865,6 +3820,12 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7655c9839580ee829dfacba1d1278c2b7883e50a277ff7541299489d6bdfdc45" +[[package]] +name = "is_terminal_polyfill" +version = "1.70.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8478577c03552c21db0e2724ffb8986a5ce7af88107e6be5d2ee6e158c12800" + [[package]] name = "ispf" version = "0.1.0" @@ -3893,24 +3854,24 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.9" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "js-sys" -version = "0.3.64" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" dependencies = [ "wasm-bindgen", ] [[package]] name = "keccak" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" +checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" dependencies = [ "cpufeatures", ] @@ -3934,10 +3895,10 @@ dependencies = [ [[package]] name = "kstat-macro" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=7ee353a470ea59529ee1b34729681da887aa88ce#7ee353a470ea59529ee1b34729681da887aa88ce" +source = "git+https://github.com/oxidecomputer/opte?rev=194a8d1d6443f78d59702a25849607dba33db732#194a8d1d6443f78d59702a25849607dba33db732" dependencies = [ "quote", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] @@ -4012,7 +3973,7 @@ dependencies = [ "libc", "libefi-sys", "thiserror", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -4029,7 +3990,7 @@ dependencies = [ "anstyle", "anyhow", "camino", - "clap 4.5.1", + "clap", "colored", "futures", "libc", @@ -4052,30 +4013,30 @@ dependencies = [ "tokio", "tokio-tungstenite 0.21.0", "toml 0.7.8", - "uuid 1.7.0", + "uuid", "zone 0.1.8", ] [[package]] name = "libloading" -version = "0.7.4" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" +checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" dependencies = [ "cfg-if", - "winapi", + "windows-targets 0.48.5", ] [[package]] name = "libm" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" +checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" [[package]] name = "libnet" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/netadm-sys?branch=main#d44d9e084f39e844f8083d4d9b39a331061ebbcc" +source = "git+https://github.com/oxidecomputer/netadm-sys?branch=main#4ceaf96e02acb8258ea4aa403326c08932324835" dependencies = [ "anyhow", "cfg-if", @@ -4086,7 +4047,7 @@ dependencies = [ "nvpair", "nvpair-sys", "rusty-doors", - "socket2 0.4.9", + "socket2 0.4.10", "thiserror", "tracing", ] @@ -4094,7 +4055,7 @@ dependencies = [ [[package]] name = "libnet" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/netadm-sys#f114bd0d543d886cd453932e9f0967de57289bc2" +source = "git+https://github.com/oxidecomputer/netadm-sys#4ceaf96e02acb8258ea4aa403326c08932324835" dependencies = [ "anyhow", "cfg-if", @@ -4105,7 +4066,7 @@ dependencies = [ "nvpair", "nvpair-sys", "rusty-doors", - "socket2 0.4.9", + "socket2 0.4.10", "thiserror", "tracing", ] @@ -4124,6 +4085,16 @@ name = "libnvme-sys" version = "0.0.0" source = "git+https://github.com/oxidecomputer/libnvme?rev=6fffcc81d2c423ed2d2e6c5c2827485554c4ecbe#6fffcc81d2c423ed2d2e6c5c2827485554c4ecbe" +[[package]] +name = "libredox" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +dependencies = [ + "bitflags 2.5.0", + "libc", +] + [[package]] name = "libsw" version = "3.3.1" @@ -4135,11 +4106,12 @@ dependencies = [ [[package]] name = "libtest-mimic" -version = "0.6.1" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d8de370f98a6cb8a4606618e53e802f93b094ddec0f96988eaec2c27e6e9ce7" +checksum = "cc0bda45ed5b3a2904262c1bb91e526127aa70e7ef3758aba2ef93cf896b9b58" dependencies = [ - "clap 4.5.1", + "clap", + "escape8259", "termcolor", "threadpool", ] @@ -4175,9 +4147,9 @@ checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" [[package]] name = "lock_api" -version = "0.4.10" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ "autocfg", "scopeguard", @@ -4191,25 +4163,25 @@ checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" [[package]] name = "lpc55_areas" -version = "0.2.4" -source = "git+https://github.com/oxidecomputer/lpc55_support#96f064eaae5e95930efaab6c29fd1b2e22225dac" +version = "0.2.5" +source = "git+https://github.com/oxidecomputer/lpc55_support#131520fc913ecce9b80557e854751953f743a7d2" dependencies = [ "bitfield", - "clap 4.5.1", + "clap", "packed_struct", "serde", ] [[package]] name = "lpc55_sign" -version = "0.3.3" -source = "git+https://github.com/oxidecomputer/lpc55_support#96f064eaae5e95930efaab6c29fd1b2e22225dac" +version = "0.3.4" +source = "git+https://github.com/oxidecomputer/lpc55_support#131520fc913ecce9b80557e854751953f743a7d2" dependencies = [ "byteorder", "const-oid", "crc-any", "der", - "env_logger 0.10.0", + "env_logger 0.10.2", "hex", "log", "lpc55_areas", @@ -4222,16 +4194,16 @@ dependencies = [ "sha2", "thiserror", "x509-cert", - "zerocopy 0.6.4", + "zerocopy 0.6.6", ] [[package]] name = "lru" -version = "0.12.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2994eeba8ed550fd9b47a0b38f0242bc3344e496483c6180b69139cc2fa5d1d7" +checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc" dependencies = [ - "hashbrown 0.14.3", + "hashbrown 0.14.5", ] [[package]] @@ -4284,10 +4256,11 @@ checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" [[package]] name = "md-5" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6365506850d44bff6e2fbcb5176cf63650e48bd45ef2fe2665ae1570e0f4b9ca" +checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" dependencies = [ + "cfg-if", "digest", ] @@ -4299,9 +4272,9 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771" [[package]] name = "memchr" -version = "2.7.1" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" +checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" [[package]] name = "memmap" @@ -4315,9 +4288,9 @@ dependencies = [ [[package]] name = "memmap2" -version = "0.7.1" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f49388d20533534cd19360ad3d6a7dadc885944aa802ba3995040c5ec11288c6" +checksum = "fe751422e4a8caa417e13c3ea66452215d7d63e19e604f4980461212f3ae1322" dependencies = [ "libc", ] @@ -4334,7 +4307,7 @@ dependencies = [ [[package]] name = "mg-admin-client" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/maghemite?rev=de065a84831e66c829603d9a098e237e8f5faaa1#de065a84831e66c829603d9a098e237e8f5faaa1" +source = "git+https://github.com/oxidecomputer/maghemite?rev=025389ff39d594bf2b815377e2c1dc4dd23b1f96#025389ff39d594bf2b815377e2c1dc4dd23b1f96" dependencies = [ "anyhow", "chrono", @@ -4371,9 +4344,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" dependencies = [ "adler", ] @@ -4392,9 +4365,9 @@ dependencies = [ [[package]] name = "mockall" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a978c8292954bcb9347a4e28772c0a0621166a1598fc1be28ac0076a4bb810e" +checksum = "43766c2b5203b10de348ffe19f7e54564b64f3d6018ff7648d1e2d6d3a0f0a48" dependencies = [ "cfg-if", "downcast", @@ -4407,28 +4380,27 @@ dependencies = [ [[package]] name = "mockall_derive" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad2765371d0978ba4ace4ebef047baa62fc068b431e468444b5610dd441c639b" +checksum = "af7cbce79ec385a1d4f54baa90a76401eb15d9cab93685f62e7e9f942aa00ae2" dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] name = "multer" -version = "3.0.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a15d522be0a9c3e46fd2632e272d178f56387bdb5c9fbb3a36c649062e9b5219" +checksum = "83e87776546dc87511aa5ee218730c92b666d7264ab6ed41f9d215af9cd5224b" dependencies = [ "bytes", "encoding_rs", "futures-util", - "http 1.0.0", + "http 1.1.0", "httparse", - "log", "memchr", "mime", "spin 0.9.8", @@ -4450,7 +4422,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3" dependencies = [ - "getrandom 0.2.12", + "getrandom 0.2.14", ] [[package]] @@ -4479,9 +4451,9 @@ checksum = "ca2b420f638f07fe83056b55ea190bb815f609ec5a35e7017884a10f78839c9e" [[package]] name = "new_debug_unreachable" -version = "1.0.4" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4a24736216ec316047a1fc4252e27dabb04218aa4a3f37c6e7ddbf1f9782b54" +checksum = "650eef8c711430f1a879fdd01d4745a7deea475becfb90269c06775983bbf086" [[package]] name = "newline-converter" @@ -4494,13 +4466,13 @@ dependencies = [ [[package]] name = "newtype-uuid" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a5ff2b31594942586c1520da8f1e5c705729ec67b3c2ad0fe459f0b576e4d9a" +checksum = "3526cb7c660872e401beaf3297f95f548ce3b4b4bdd8121b7c0713771d7c4a6e" dependencies = [ "schemars", "serde", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -4518,12 +4490,12 @@ version = "0.1.0" dependencies = [ "chrono", "futures", - "ipnetwork", "nexus-types", "omicron-common", "omicron-passwords", "omicron-uuid-kinds", "omicron-workspace-hack", + "oxnet", "progenitor", "regress", "reqwest", @@ -4531,7 +4503,7 @@ dependencies = [ "serde", "serde_json", "slog", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -4550,8 +4522,8 @@ dependencies = [ "serde_json", "serde_with", "tokio-postgres", - "toml 0.8.10", - "uuid 1.7.0", + "toml 0.8.13", + "uuid", ] [[package]] @@ -4580,20 +4552,23 @@ dependencies = [ "omicron-uuid-kinds", "omicron-workspace-hack", "once_cell", + "oxnet", "parse-display", "pq-sys", "rand 0.8.5", "ref-cast", "schemars", - "semver 1.0.22", + "semver 1.0.23", "serde", "serde_json", "sled-agent-client", + "slog", + "slog-error-chain", "steno", - "strum 0.26.1", + "strum", "thiserror", "tokio", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -4605,13 +4580,13 @@ dependencies = [ "async-bb8-diesel", "async-trait", "authz-macros", - "base64 0.22.0", + "base64 0.22.1", "bb8", "camino", "camino-tempfile", "chrono", "const_format", - "cookie 0.18.0", + "cookie 0.18.1", "db-macros", "diesel", "diesel-dtrace", @@ -4647,6 +4622,7 @@ dependencies = [ "openssl", "oso", "oximeter", + "oxnet", "paste", "pem", "petgraph", @@ -4657,10 +4633,10 @@ dependencies = [ "rcgen", "ref-cast", "regex", - "rustls 0.22.2", + "rustls 0.22.4", "samael", "schemars", - "semver 1.0.22", + "semver 1.0.23", "serde", "serde_json", "serde_urlencoded", @@ -4670,14 +4646,14 @@ dependencies = [ "slog-error-chain", "static_assertions", "steno", - "strum 0.26.1", + "strum", "subprocess", "swrite", "term", "thiserror", "tokio", "usdt 0.5.0", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -4688,6 +4664,7 @@ dependencies = [ "omicron-common", "omicron-workspace-hack", "once_cell", + "oxnet", "rand 0.8.5", "serde_json", ] @@ -4697,7 +4674,7 @@ name = "nexus-inventory" version = "0.1.0" dependencies = [ "anyhow", - "base64 0.22.0", + "base64 0.22.1", "chrono", "expectorate", "futures", @@ -4707,16 +4684,18 @@ dependencies = [ "nexus-types", "omicron-common", "omicron-sled-agent", + "omicron-uuid-kinds", "omicron-workspace-hack", "regex", "reqwest", "serde_json", "sled-agent-client", "slog", - "strum 0.26.1", + "strum", "thiserror", "tokio", - "uuid 1.7.0", + "typed-rng", + "uuid", ] [[package]] @@ -4726,7 +4705,34 @@ dependencies = [ "omicron-workspace-hack", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.64", +] + +[[package]] +name = "nexus-metrics-producer-gc" +version = "0.1.0" +dependencies = [ + "async-bb8-diesel", + "chrono", + "diesel", + "futures", + "httptest", + "ipnetwork", + "nexus-db-model", + "nexus-db-queries", + "nexus-test-utils", + "nexus-types", + "omicron-common", + "omicron-rpaths", + "omicron-test-utils", + "omicron-workspace-hack", + "oximeter-client", + "pq-sys", + "slog", + "slog-error-chain", + "thiserror", + "tokio", + "uuid", ] [[package]] @@ -4738,10 +4744,11 @@ dependencies = [ "nexus-db-queries", "omicron-common", "omicron-workspace-hack", + "oxnet", "reqwest", "sled-agent-client", "slog", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -4749,11 +4756,12 @@ name = "nexus-reconfigurator-execution" version = "0.1.0" dependencies = [ "anyhow", + "async-bb8-diesel", "chrono", + "diesel", "dns-service-client", "futures", "httptest", - "illumos-utils", "internal-dns", "ipnet", "nexus-config", @@ -4770,14 +4778,16 @@ dependencies = [ "omicron-nexus", "omicron-rpaths", "omicron-test-utils", + "omicron-uuid-kinds", "omicron-workspace-hack", + "oxnet", "pq-sys", "reqwest", "sled-agent-client", "slog", "slog-error-chain", "tokio", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -4786,35 +4796,45 @@ version = "0.1.0" dependencies = [ "anyhow", "chrono", + "debug-ignore", "expectorate", "gateway-client", - "indexmap 2.2.5", + "indexmap 2.2.6", "internal-dns", "ipnet", - "ipnetwork", + "maplit", "nexus-config", "nexus-inventory", "nexus-types", "omicron-common", "omicron-test-utils", + "omicron-uuid-kinds", "omicron-workspace-hack", + "oxnet", + "proptest", "rand 0.8.5", - "rand_seeder", "sled-agent-client", "slog", + "test-strategy", "thiserror", - "uuid 1.7.0", + "typed-rng", + "uuid", ] [[package]] name = "nexus-reconfigurator-preparation" version = "0.1.0" dependencies = [ - "illumos-utils", + "anyhow", + "futures", "nexus-db-model", + "nexus-db-queries", "nexus-types", "omicron-common", + "omicron-uuid-kinds", "omicron-workspace-hack", + "slog", + "slog-error-chain", ] [[package]] @@ -4827,7 +4847,7 @@ dependencies = [ "omicron-common", "omicron-workspace-hack", "slog", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -4859,6 +4879,7 @@ dependencies = [ "omicron-passwords", "omicron-sled-agent", "omicron-test-utils", + "omicron-uuid-kinds", "omicron-workspace-hack", "oximeter", "oximeter-collector", @@ -4871,7 +4892,7 @@ dependencies = [ "tokio", "tokio-util", "trust-dns-resolver", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -4880,7 +4901,7 @@ version = "0.1.0" dependencies = [ "omicron-workspace-hack", "quote", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] @@ -4889,27 +4910,37 @@ version = "0.1.0" dependencies = [ "anyhow", "api_identity", - "base64 0.22.0", + "base64 0.22.1", "chrono", + "clap", + "derive-where", + "derive_more", "dns-service-client", "futures", "gateway-client", "humantime", + "ipnetwork", + "newtype-uuid", "omicron-common", "omicron-passwords", "omicron-uuid-kinds", "omicron-workspace-hack", "openssl", + "oxnet", "parse-display", + "proptest", "schemars", "serde", "serde_json", "serde_with", "sled-agent-client", + "slog", + "slog-error-chain", "steno", - "strum 0.26.1", + "strum", + "test-strategy", "thiserror", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -4918,7 +4949,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77a5d83df9f36fe23f0c3648c6bbb8b0298bb5f1939c8f2704431371f4b84d43" dependencies = [ - "smallvec 1.13.1", + "smallvec 1.13.2", ] [[package]] @@ -4936,12 +4967,13 @@ dependencies = [ [[package]] name = "nix" -version = "0.27.1" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" +checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "cfg-if", + "cfg_aliases", "libc", ] @@ -4978,9 +5010,9 @@ dependencies = [ [[package]] name = "num" -version = "0.4.1" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b05180d69e3da0e530ba2a1dae5110317e49e3b7f3d41be227dc5f92e49ee7af" +checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" dependencies = [ "num-complex", "num-integer", @@ -4991,11 +5023,10 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" +checksum = "c165a9ab64cf766f73521c0dd2cfdff64f488b8f0b3e621face3462d3db536d7" dependencies = [ - "autocfg", "num-integer", "num-traits", "rand 0.8.5", @@ -5015,15 +5046,15 @@ dependencies = [ "num-traits", "rand 0.8.5", "serde", - "smallvec 1.13.1", + "smallvec 1.13.2", "zeroize", ] [[package]] name = "num-complex" -version = "0.4.4" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ba157ca0885411de85d6ca030ba7e2a83a28636056c7c699b07c8b6f7383214" +checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" dependencies = [ "num-traits", ] @@ -5036,13 +5067,13 @@ checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" [[package]] name = "num-derive" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e6a0fd4f737c707bd9086cc16c925f294943eb62eb71499e9fd4cf71f8b9f4e" +checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] @@ -5056,9 +5087,9 @@ dependencies = [ [[package]] name = "num-iter" -version = "0.1.44" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d869c01cc0c455284163fd0092f1f93835385ccab5a98a0dcc497b2f8bf055a9" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" dependencies = [ "autocfg", "num-integer", @@ -5067,20 +5098,19 @@ dependencies = [ [[package]] name = "num-rational" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0" +checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" dependencies = [ - "autocfg", "num-integer", "num-traits", ] [[package]] name = "num-traits" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", "libm", @@ -5092,7 +5122,7 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi 0.3.2", + "hermit-abi 0.3.9", "libc", ] @@ -5119,9 +5149,9 @@ dependencies = [ [[package]] name = "num_threads" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" +checksum = "5c7398b9c8b70908f6371f47ed36737907c87c52af34c268fed0bf0ceb92ead9" dependencies = [ "libc", ] @@ -5161,9 +5191,9 @@ dependencies = [ [[package]] name = "object" -version = "0.32.1" +version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" dependencies = [ "memchr", ] @@ -5217,6 +5247,7 @@ dependencies = [ "omicron-uuid-kinds", "omicron-workspace-hack", "once_cell", + "oxnet", "parse-display", "progenitor", "progenitor-client", @@ -5225,7 +5256,7 @@ dependencies = [ "regress", "reqwest", "schemars", - "semver 1.0.22", + "semver 1.0.23", "serde", "serde_human_bytes", "serde_json", @@ -5233,11 +5264,12 @@ dependencies = [ "serde_with", "slog", "slog-error-chain", - "strum 0.26.1", + "strum", "test-strategy", "thiserror", "tokio", - "uuid 1.7.0", + "toml 0.8.13", + "uuid", ] [[package]] @@ -5261,8 +5293,8 @@ dependencies = [ "slog", "thiserror", "tokio", - "toml 0.8.10", - "uuid 1.7.0", + "toml 0.8.13", + "uuid", ] [[package]] @@ -5272,7 +5304,7 @@ dependencies = [ "anyhow", "camino", "camino-tempfile", - "clap 4.5.1", + "clap", "dropshot", "expectorate", "futures", @@ -5296,7 +5328,7 @@ dependencies = [ "subprocess", "tokio", "tokio-postgres", - "toml 0.8.10", + "toml 0.8.13", ] [[package]] @@ -5304,9 +5336,9 @@ name = "omicron-gateway" version = "0.1.0" dependencies = [ "anyhow", - "base64 0.22.0", + "base64 0.22.1", "camino", - "clap 4.5.1", + "clap", "dropshot", "expectorate", "futures", @@ -5338,8 +5370,8 @@ dependencies = [ "tokio", "tokio-stream", "tokio-tungstenite 0.20.1", - "toml 0.8.10", - "uuid 1.7.0", + "toml 0.8.13", + "uuid", ] [[package]] @@ -5350,18 +5382,19 @@ dependencies = [ "assert_matches", "async-bb8-diesel", "async-trait", - "base64 0.22.0", + "base64 0.22.1", "buf-list", "bytes", "camino", "camino-tempfile", "cancel-safe-futures", "chrono", - "clap 4.5.1", + "clap", "criterion", "crucible-agent-client", "crucible-pantry-client", "diesel", + "display-error-chain", "dns-server", "dns-service-client", "dpd-client", @@ -5385,12 +5418,13 @@ dependencies = [ "itertools 0.12.1", "macaddr", "mg-admin-client", - "mime_guess", + "nexus-client", "nexus-config", "nexus-db-model", "nexus-db-queries", "nexus-defaults", "nexus-inventory", + "nexus-metrics-producer-gc", "nexus-networking", "nexus-reconfigurator-execution", "nexus-reconfigurator-planning", @@ -5414,9 +5448,11 @@ dependencies = [ "oxide-client", "oximeter", "oximeter-client", + "oximeter-collector", "oximeter-db", "oximeter-instruments", "oximeter-producer", + "oxnet", "parse-display", "paste", "pem", @@ -5424,18 +5460,18 @@ dependencies = [ "pq-sys", "pretty_assertions", "progenitor-client", - "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=fdf0585c6a227a7cfbee4a61a36938c3d77e4712)", + "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=6d7ed9a033babc054db9eff5b59dee978d2b0d76)", "rand 0.8.5", "rcgen", "ref-cast", "regex", "reqwest", "ring 0.17.8", - "rustls 0.22.2", - "rustls-pemfile 2.1.1", + "rustls 0.22.4", + "rustls-pemfile 2.1.2", "samael", "schemars", - "semver 1.0.22", + "semver 1.0.23", "serde", "serde_json", "serde_urlencoded", @@ -5449,19 +5485,20 @@ dependencies = [ "slog-term", "sp-sim", "steno", - "strum 0.26.1", + "strum", "subprocess", "tempfile", "term", "thiserror", "tokio", "tokio-postgres", + "tokio-util", "tough", "trust-dns-resolver", "tufaceous", "tufaceous-lib", "update-common", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -5473,7 +5510,7 @@ dependencies = [ "camino", "camino-tempfile", "chrono", - "clap 4.5.1", + "clap", "crossterm", "crucible-agent-client", "csv", @@ -5501,6 +5538,7 @@ dependencies = [ "omicron-nexus", "omicron-rpaths", "omicron-test-utils", + "omicron-uuid-kinds", "omicron-workspace-hack", "oximeter-client", "pq-sys", @@ -5511,13 +5549,13 @@ dependencies = [ "sled-agent-client", "slog", "slog-error-chain", - "strum 0.26.1", + "strum", "subprocess", "tabled", - "textwrap 0.16.1", + "textwrap", "tokio", "unicode-width", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -5526,7 +5564,7 @@ version = "0.1.0" dependencies = [ "anyhow", "camino", - "clap 4.5.1", + "clap", "expectorate", "futures", "hex", @@ -5538,7 +5576,7 @@ dependencies = [ "rayon", "reqwest", "ring 0.17.8", - "semver 1.0.22", + "semver 1.0.23", "serde", "sled-hardware", "slog", @@ -5546,12 +5584,12 @@ dependencies = [ "slog-bunyan", "slog-term", "smf", - "strum 0.26.1", + "strum", "swrite", "tar", "thiserror", "tokio", - "toml 0.8.10", + "toml 0.8.13", "walkdir", ] @@ -5570,6 +5608,37 @@ dependencies = [ "thiserror", ] +[[package]] +name = "omicron-releng" +version = "0.1.0" +dependencies = [ + "anyhow", + "camino", + "camino-tempfile", + "cargo_metadata", + "chrono", + "clap", + "fs-err", + "futures", + "hex", + "omicron-common", + "omicron-workspace-hack", + "omicron-zone-package", + "once_cell", + "reqwest", + "semver 1.0.23", + "serde", + "sha2", + "shell-words", + "slog", + "slog-async", + "slog-term", + "tar", + "tokio", + "toml 0.8.13", + "tufaceous-lib", +] + [[package]] name = "omicron-rpaths" version = "0.1.0" @@ -5584,7 +5653,7 @@ dependencies = [ "anyhow", "assert_matches", "async-trait", - "base64 0.22.0", + "base64 0.22.1", "bootstore", "bootstrap-agent-client", "bytes", @@ -5593,7 +5662,7 @@ dependencies = [ "cancel-safe-futures", "cfg-if", "chrono", - "clap 4.5.1", + "clap", "crucible-agent-client", "derive_more", "display-error-chain", @@ -5627,6 +5696,7 @@ dependencies = [ "omicron-common", "omicron-ddm-admin-client", "omicron-test-utils", + "omicron-uuid-kinds", "omicron-workspace-hack", "once_cell", "openapi-lint", @@ -5635,14 +5705,15 @@ dependencies = [ "oximeter", "oximeter-instruments", "oximeter-producer", + "oxnet", "pretty_assertions", - "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=fdf0585c6a227a7cfbee4a61a36938c3d77e4712)", + "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=6d7ed9a033babc054db9eff5b59dee978d2b0d76)", "propolis-mock-server", "rand 0.8.5", "rcgen", "reqwest", "schemars", - "semver 1.0.22", + "semver 1.0.23", "serde", "serde_human_bytes", "serde_json", @@ -5658,7 +5729,7 @@ dependencies = [ "slog-term", "smf", "static_assertions", - "strum 0.26.1", + "strum", "subprocess", "tar", "tempfile", @@ -5667,9 +5738,9 @@ dependencies = [ "tokio", "tokio-stream", "tokio-util", - "toml 0.8.10", + "toml 0.8.13", "usdt 0.5.0", - "uuid 1.7.0", + "uuid", "zeroize", "zone 0.3.0", ] @@ -5699,7 +5770,7 @@ dependencies = [ "regex", "reqwest", "ring 0.17.8", - "rustls 0.22.2", + "rustls 0.22.4", "slog", "subprocess", "tar", @@ -5708,7 +5779,7 @@ dependencies = [ "tokio", "tokio-postgres", "usdt 0.5.0", - "uuid 1.7.0", + "uuid", "walkdir", ] @@ -5717,6 +5788,7 @@ name = "omicron-uuid-kinds" version = "0.1.0" dependencies = [ "newtype-uuid", + "paste", "schemars", ] @@ -5731,14 +5803,13 @@ dependencies = [ "bit-set", "bit-vec", "bitflags 1.3.2", - "bitflags 2.4.2", - "bstr 0.2.17", - "bstr 1.9.0", + "bitflags 2.5.0", + "bstr 1.9.1", "byteorder", "bytes", "chrono", "cipher", - "clap 4.5.1", + "clap", "clap_builder", "console", "const-oid", @@ -5754,6 +5825,7 @@ dependencies = [ "elliptic-curve", "ff", "flate2", + "fs-err", "futures", "futures-channel", "futures-core", @@ -5763,19 +5835,21 @@ dependencies = [ "futures-util", "gateway-messages", "generic-array", - "getrandom 0.2.12", + "getrandom 0.2.14", "group", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "hex", "hmac", "hyper 0.14.28", - "indexmap 2.2.5", + "indexmap 2.2.6", "inout", "ipnetwork", "itertools 0.10.5", + "itertools 0.12.1", "lalrpop-util", "lazy_static", "libc", + "linux-raw-sys", "log", "managed", "memchr", @@ -5787,32 +5861,32 @@ dependencies = [ "num-traits", "once_cell", "openapiv3", + "peg-runtime", "pem-rfc7468", "petgraph", "postgres-types", - "ppv-lite86", "predicates", "proc-macro2", - "rand 0.8.5", - "rand_chacha 0.3.1", "regex", - "regex-automata 0.4.5", - "regex-syntax 0.8.2", + "regex-automata 0.4.6", + "regex-syntax 0.8.3", "reqwest", "ring 0.17.8", "rustix", "schemars", - "semver 1.0.22", + "scopeguard", + "semver 1.0.23", "serde", "serde_json", "sha2", "similar", "slog", + "smallvec 1.13.2", "spin 0.9.8", "string_cache", "subtle", "syn 1.0.109", - "syn 2.0.52", + "syn 2.0.64", "time", "time-macros", "tokio", @@ -5822,18 +5896,17 @@ dependencies = [ "toml 0.7.8", "toml_datetime", "toml_edit 0.19.15", - "toml_edit 0.22.6", + "toml_edit 0.22.13", "tracing", "trust-dns-proto", "unicode-bidi", "unicode-normalization", - "usdt 0.3.5", + "usdt 0.5.0", "usdt-impl 0.5.0", - "uuid 1.7.0", + "uuid", "yasna", - "zerocopy 0.7.32", + "zerocopy 0.7.34", "zeroize", - "zip", ] [[package]] @@ -5855,7 +5928,7 @@ dependencies = [ "hex", "reqwest", "ring 0.16.20", - "semver 1.0.22", + "semver 1.0.23", "serde", "serde_derive", "serde_json", @@ -5882,9 +5955,9 @@ checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" [[package]] name = "opaque-debug" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" [[package]] name = "openapi-lint" @@ -5892,7 +5965,7 @@ version = "0.4.0" source = "git+https://github.com/oxidecomputer/openapi-lint?branch=main#ef442ee4343e97b6d9c217d3e7533962fe7d7236" dependencies = [ "heck 0.4.1", - "indexmap 2.2.5", + "indexmap 2.2.6", "lazy_static", "openapiv3", "regex", @@ -5904,18 +5977,18 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc02deea53ffe807708244e5914f6b099ad7015a207ee24317c22112e17d9c5c" dependencies = [ - "indexmap 2.2.5", + "indexmap 2.2.6", "serde", "serde_json", ] [[package]] name = "openssl" -version = "0.10.60" +version = "0.10.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79a4c6c3a2b158f7f8f2a2fc5a969fa3a068df6fc9dbb4a43845436e3af7c800" +checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "cfg-if", "foreign-types 0.3.2", "libc", @@ -5932,7 +6005,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] @@ -5943,9 +6016,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.96" +version = "0.9.102" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3812c071ba60da8b5677cc12bcb1d42989a65553772897a7e0355545a819838f" +checksum = "c597637d56fbc83893a35eb0dd04b2b8e7a50c91e64e9493e398b5df4fb45fa2" dependencies = [ "cc", "libc", @@ -5956,7 +6029,7 @@ dependencies = [ [[package]] name = "opte" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=7ee353a470ea59529ee1b34729681da887aa88ce#7ee353a470ea59529ee1b34729681da887aa88ce" +source = "git+https://github.com/oxidecomputer/opte?rev=194a8d1d6443f78d59702a25849607dba33db732#194a8d1d6443f78d59702a25849607dba33db732" dependencies = [ "cfg-if", "derror-macro", @@ -5974,7 +6047,7 @@ dependencies = [ [[package]] name = "opte-api" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=7ee353a470ea59529ee1b34729681da887aa88ce#7ee353a470ea59529ee1b34729681da887aa88ce" +source = "git+https://github.com/oxidecomputer/opte?rev=194a8d1d6443f78d59702a25849607dba33db732#194a8d1d6443f78d59702a25849607dba33db732" dependencies = [ "illumos-sys-hdrs", "ipnetwork", @@ -5986,7 +6059,7 @@ dependencies = [ [[package]] name = "opte-ioctl" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=7ee353a470ea59529ee1b34729681da887aa88ce#7ee353a470ea59529ee1b34729681da887aa88ce" +source = "git+https://github.com/oxidecomputer/opte?rev=194a8d1d6443f78d59702a25849607dba33db732#194a8d1d6443f78d59702a25849607dba33db732" dependencies = [ "libc", "libnet 0.1.0 (git+https://github.com/oxidecomputer/netadm-sys)", @@ -6005,9 +6078,9 @@ checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" [[package]] name = "oso" -version = "0.27.0" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fceecc04a9e9dcb63a42d937a4249557da8d2695cf83eb5ee78015473ab12ae2" +checksum = "eeabb069616e6a494420f5ab27dbad46efa8dd4b45d30a0302857a7bcdea4293" dependencies = [ "impl-trait-for-tuples", "lazy_static", @@ -6020,9 +6093,9 @@ dependencies = [ [[package]] name = "oso-derive" -version = "0.27.0" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1766857f83748ce5596ab98e1a57d64ccfe3259e71b7b53289c8c32c2cfef9a8" +checksum = "a2f5236d7c60cce1bcd76146bcbc4b2a5fb1234894fb84b1ec751863e8399e9c" dependencies = [ "quote", "syn 1.0.109", @@ -6039,7 +6112,7 @@ name = "oxide-client" version = "0.1.0" dependencies = [ "anyhow", - "base64 0.22.0", + "base64 0.22.1", "chrono", "futures", "http 0.2.12", @@ -6054,13 +6127,13 @@ dependencies = [ "thiserror", "tokio", "trust-dns-resolver", - "uuid 1.7.0", + "uuid", ] [[package]] name = "oxide-vpc" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=7ee353a470ea59529ee1b34729681da887aa88ce#7ee353a470ea59529ee1b34729681da887aa88ce" +source = "git+https://github.com/oxidecomputer/opte?rev=194a8d1d6443f78d59702a25849607dba33db732#194a8d1d6443f78d59702a25849607dba33db732" dependencies = [ "cfg-if", "illumos-sys-hdrs", @@ -6069,7 +6142,7 @@ dependencies = [ "serde", "smoltcp 0.11.0", "tabwriter", - "zerocopy 0.7.32", + "zerocopy 0.7.34", ] [[package]] @@ -6088,10 +6161,10 @@ dependencies = [ "schemars", "serde", "serde_json", - "strum 0.26.1", + "strum", "thiserror", "trybuild", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -6106,7 +6179,7 @@ dependencies = [ "reqwest", "serde", "slog", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -6116,7 +6189,7 @@ dependencies = [ "anyhow", "camino", "chrono", - "clap 4.5.1", + "clap", "dropshot", "expectorate", "futures", @@ -6141,12 +6214,12 @@ dependencies = [ "slog-async", "slog-dtrace", "slog-term", - "strum 0.26.1", + "strum", "subprocess", "thiserror", "tokio", - "toml 0.8.10", - "uuid 1.7.0", + "toml 0.8.13", + "uuid", ] [[package]] @@ -6154,22 +6227,26 @@ name = "oximeter-db" version = "0.1.0" dependencies = [ "anyhow", + "async-recursion", "async-trait", "bcs", "bytes", "camino", "chrono", - "clap 4.5.1", + "clap", + "crossterm", "dropshot", "expectorate", "futures", "highway", - "indexmap 2.2.5", + "indexmap 2.2.6", "itertools 0.12.1", + "num", "omicron-common", "omicron-test-utils", "omicron-workspace-hack", "oximeter", + "peg", "reedline", "regex", "reqwest", @@ -6183,13 +6260,13 @@ dependencies = [ "slog-term", "sqlformat", "sqlparser", - "strum 0.26.1", + "strum", "tabled", "tempfile", "thiserror", "tokio", "usdt 0.5.0", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -6210,7 +6287,7 @@ dependencies = [ "slog-term", "thiserror", "tokio", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -6220,7 +6297,7 @@ dependencies = [ "omicron-workspace-hack", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] @@ -6229,19 +6306,23 @@ version = "0.1.0" dependencies = [ "anyhow", "chrono", - "clap 4.5.1", + "clap", "dropshot", + "internal-dns", "nexus-client", "omicron-common", + "omicron-test-utils", "omicron-workspace-hack", "oximeter", "schemars", "serde", + "serde_json", "slog", "slog-dtrace", + "slog-term", "thiserror", "tokio", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -6251,9 +6332,21 @@ dependencies = [ "anyhow", "camino", "chrono", - "clap 4.5.1", + "clap", "omicron-workspace-hack", - "uuid 1.7.0", + "sigpipe", + "uuid", +] + +[[package]] +name = "oxnet" +version = "0.1.0" +source = "git+https://github.com/oxidecomputer/oxnet?branch=main#42b4d3c77c7f5f2636cd6c4bbf37ac3eada047e0" +dependencies = [ + "ipnetwork", + "schemars", + "serde", + "serde_json", ] [[package]] @@ -6328,12 +6421,12 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +checksum = "7e4af0ca4f6caed20e900d564c242b8e5d4903fdacf31d3daf527b66fe6f42fb" dependencies = [ "lock_api", - "parking_lot_core 0.9.8", + "parking_lot_core 0.9.10", ] [[package]] @@ -6346,21 +6439,21 @@ dependencies = [ "instant", "libc", "redox_syscall 0.2.16", - "smallvec 1.13.1", + "smallvec 1.13.2", "winapi", ] [[package]] name = "parking_lot_core" -version = "0.9.8" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.3.5", - "smallvec 1.13.1", - "windows-targets 0.48.5", + "redox_syscall 0.5.1", + "smallvec 1.13.2", + "windows-targets 0.52.5", ] [[package]] @@ -6371,7 +6464,7 @@ checksum = "06af5f9333eb47bd9ba8462d612e37a8328a5cb80b13f0af4de4c3b89f52dee5" dependencies = [ "parse-display-derive", "regex", - "regex-syntax 0.8.2", + "regex-syntax 0.8.3", ] [[package]] @@ -6383,9 +6476,9 @@ dependencies = [ "proc-macro2", "quote", "regex", - "regex-syntax 0.8.2", + "regex-syntax 0.8.3", "structmeta 0.3.0", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] @@ -6430,9 +6523,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] name = "path-slash" @@ -6471,18 +6564,39 @@ dependencies = [ ] [[package]] -name = "peeking_take_while" -version = "0.1.2" +name = "peg" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" +checksum = "8a625d12ad770914cbf7eff6f9314c3ef803bfe364a1b20bc36ddf56673e71e5" +dependencies = [ + "peg-macros", + "peg-runtime", +] + +[[package]] +name = "peg-macros" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f241d42067ed3ab6a4fece1db720838e1418f36d868585a27931f95d6bc03582" +dependencies = [ + "peg-runtime", + "proc-macro2", + "quote", +] + +[[package]] +name = "peg-runtime" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3aeb8f54c078314c2065ee649a7241f46b9d8e418e1a9581ba0546657d7aa3a" [[package]] name = "pem" -version = "3.0.2" +version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3163d2912b7c3b52d651a055f2c7eec9ba5cd22d26ef75b8dd3a59980b185923" +checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" dependencies = [ - "base64 0.21.7", + "base64 0.22.1", "serde", ] @@ -6503,9 +6617,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.6" +version = "2.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f200d8d83c44a45b21764d1916299752ca035d15ecd46faca3e9a2a2bf6ad06" +checksum = "560131c633294438da9f7c4b08189194b20946c8274c6b9e38881a7874dc8ee8" dependencies = [ "memchr", "thiserror", @@ -6514,9 +6628,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.6" +version = "2.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcd6ab1236bbdb3a49027e920e693192ebfe8913f6d60e294de57463a493cfde" +checksum = "26293c9193fbca7b1a3bf9b79dc1e388e927e6cacaa78b4a3ab705a1d3d41459" dependencies = [ "pest", "pest_generator", @@ -6524,22 +6638,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.6" +version = "2.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a31940305ffc96863a735bef7c7994a00b325a7138fdbc5bda0f1a0476d3275" +checksum = "3ec22af7d3fb470a85dd2ca96b7c577a1eb4ef6f1683a9fe9a8c16e136c04687" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] name = "pest_meta" -version = "2.7.6" +version = "2.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7ff62f5259e53b78d1af898941cdcdccfae7385cf7d793a6e55de5d05bb4b7d" +checksum = "d7a240022f37c361ec1878d646fc5b7d7c4d28d5946e1a80ad5a7a4f4ca0bdcd" dependencies = [ "once_cell", "pest", @@ -6548,12 +6662,12 @@ dependencies = [ [[package]] name = "petgraph" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" +checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", - "indexmap 2.2.5", + "indexmap 2.2.6", "serde", "serde_derive", ] @@ -6587,29 +6701,29 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.3" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.3" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] name = "pin-project-lite" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" +checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" [[package]] name = "pin-utils" @@ -6640,9 +6754,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.27" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" +checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" [[package]] name = "plain" @@ -6652,9 +6766,9 @@ checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" [[package]] name = "platforms" -version = "3.0.2" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3d7ddaed09e0eb771a79ab0fd64609ba0afb0a8366421957936ad14cbd13630" +checksum = "db23d408679286588f4d4644f965003d056e3dd5abcaaa938116871d7ce2fee7" [[package]] name = "plotters" @@ -6686,9 +6800,9 @@ dependencies = [ [[package]] name = "polar-core" -version = "0.27.0" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d1b77e852bec994296c8a1dddc231ab3f112bfa0a0399fc8a7fd8bddfb46b4e" +checksum = "b3aa6f61d235de56ccffbca8627377ebe6ff0052a419f67b098f319a5f32e06d" dependencies = [ "indoc 1.0.9", "js-sys", @@ -6713,9 +6827,9 @@ dependencies = [ [[package]] name = "polyval" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52cff9d1d4dee5fe6d03729099f4a310a41179e0a10dbf542039873f2e826fb" +checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25" dependencies = [ "cfg-if", "cpufeatures", @@ -6730,9 +6844,9 @@ source = "git+https://github.com/oxidecomputer/poptrie?branch=multipath#ca52bef3 [[package]] name = "portable-atomic" -version = "1.4.3" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31114a898e107c51bb1609ffaf55a0e011cf6a4d7f1170d0015a165082c0338b" +checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0" [[package]] name = "portpicker" @@ -6784,7 +6898,7 @@ dependencies = [ "postgres-protocol", "serde", "serde_json", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -6867,12 +6981,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.16" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a41cf62165e97c7f814d2221421dbb9afcbcdb0a88068e5ea206e19951c2cbb5" +checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" dependencies = [ "proc-macro2", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] @@ -6920,17 +7034,17 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.78" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" +checksum = "8ad3d49ab951a01fbaafe34f2ec74122942fe18a3f9814c3268f1bb72042131b" dependencies = [ "unicode-ident", ] [[package]] name = "progenitor" -version = "0.6.0" -source = "git+https://github.com/oxidecomputer/progenitor?branch=main#90d3282f488a17f9c85e25c26845fef2d92af435" +version = "0.7.0" +source = "git+https://github.com/oxidecomputer/progenitor?branch=main#c59c6d64ed2a206bbbc9949abd3457bc0e3810e2" dependencies = [ "progenitor-client", "progenitor-impl", @@ -6940,8 +7054,8 @@ dependencies = [ [[package]] name = "progenitor-client" -version = "0.6.0" -source = "git+https://github.com/oxidecomputer/progenitor?branch=main#90d3282f488a17f9c85e25c26845fef2d92af435" +version = "0.7.0" +source = "git+https://github.com/oxidecomputer/progenitor?branch=main#c59c6d64ed2a206bbbc9949abd3457bc0e3810e2" dependencies = [ "bytes", "futures-core", @@ -6954,13 +7068,13 @@ dependencies = [ [[package]] name = "progenitor-impl" -version = "0.6.0" -source = "git+https://github.com/oxidecomputer/progenitor?branch=main#90d3282f488a17f9c85e25c26845fef2d92af435" +version = "0.7.0" +source = "git+https://github.com/oxidecomputer/progenitor?branch=main#c59c6d64ed2a206bbbc9949abd3457bc0e3810e2" dependencies = [ "getopts", - "heck 0.4.1", + "heck 0.5.0", "http 0.2.12", - "indexmap 2.2.5", + "indexmap 2.2.6", "openapiv3", "proc-macro2", "quote", @@ -6968,7 +7082,7 @@ dependencies = [ "schemars", "serde", "serde_json", - "syn 2.0.52", + "syn 2.0.64", "thiserror", "typify", "unicode-ident", @@ -6976,8 +7090,8 @@ dependencies = [ [[package]] name = "progenitor-macro" -version = "0.6.0" -source = "git+https://github.com/oxidecomputer/progenitor?branch=main#90d3282f488a17f9c85e25c26845fef2d92af435" +version = "0.7.0" +source = "git+https://github.com/oxidecomputer/progenitor?branch=main#c59c6d64ed2a206bbbc9949abd3457bc0e3810e2" dependencies = [ "openapiv3", "proc-macro2", @@ -6988,7 +7102,7 @@ dependencies = [ "serde_json", "serde_tokenstream 0.2.0", "serde_yaml", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] @@ -6998,7 +7112,7 @@ source = "git+https://github.com/oxidecomputer/propolis?rev=6dceb9ef69c217cb78a2 dependencies = [ "anyhow", "bhyve_api 0.0.0 (git+https://github.com/oxidecomputer/propolis?rev=6dceb9ef69c217cb78a2018bbedafbc19f6ec1af)", - "bitflags 2.4.2", + "bitflags 2.5.0", "bitstruct", "byteorder", "dladm", @@ -7013,18 +7127,18 @@ dependencies = [ "serde_arrays", "serde_json", "slog", - "strum 0.26.1", + "strum", "thiserror", "tokio", "usdt 0.5.0", - "uuid 1.7.0", + "uuid", "viona_api", ] [[package]] name = "propolis-client" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=6dceb9ef69c217cb78a2018bbedafbc19f6ec1af#6dceb9ef69c217cb78a2018bbedafbc19f6ec1af" +source = "git+https://github.com/oxidecomputer/propolis?rev=6d7ed9a033babc054db9eff5b59dee978d2b0d76#6d7ed9a033babc054db9eff5b59dee978d2b0d76" dependencies = [ "async-trait", "base64 0.21.7", @@ -7039,13 +7153,13 @@ dependencies = [ "thiserror", "tokio", "tokio-tungstenite 0.20.1", - "uuid 1.7.0", + "uuid", ] [[package]] name = "propolis-client" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=fdf0585c6a227a7cfbee4a61a36938c3d77e4712#fdf0585c6a227a7cfbee4a61a36938c3d77e4712" +source = "git+https://github.com/oxidecomputer/propolis?rev=6dceb9ef69c217cb78a2018bbedafbc19f6ec1af#6dceb9ef69c217cb78a2018bbedafbc19f6ec1af" dependencies = [ "async-trait", "base64 0.21.7", @@ -7060,23 +7174,23 @@ dependencies = [ "thiserror", "tokio", "tokio-tungstenite 0.20.1", - "uuid 1.7.0", + "uuid", ] [[package]] name = "propolis-mock-server" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=fdf0585c6a227a7cfbee4a61a36938c3d77e4712#fdf0585c6a227a7cfbee4a61a36938c3d77e4712" +source = "git+https://github.com/oxidecomputer/propolis?rev=6d7ed9a033babc054db9eff5b59dee978d2b0d76#6d7ed9a033babc054db9eff5b59dee978d2b0d76" dependencies = [ "anyhow", "atty", "base64 0.21.7", - "clap 4.5.1", + "clap", "dropshot", "futures", "hyper 0.14.28", "progenitor", - "propolis_types 0.0.0 (git+https://github.com/oxidecomputer/propolis?rev=fdf0585c6a227a7cfbee4a61a36938c3d77e4712)", + "propolis_types 0.0.0 (git+https://github.com/oxidecomputer/propolis?rev=6d7ed9a033babc054db9eff5b59dee978d2b0d76)", "rand 0.8.5", "reqwest", "schemars", @@ -7090,7 +7204,7 @@ dependencies = [ "thiserror", "tokio", "tokio-tungstenite 0.20.1", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -7108,7 +7222,7 @@ dependencies = [ [[package]] name = "propolis_types" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=6dceb9ef69c217cb78a2018bbedafbc19f6ec1af#6dceb9ef69c217cb78a2018bbedafbc19f6ec1af" +source = "git+https://github.com/oxidecomputer/propolis?rev=6d7ed9a033babc054db9eff5b59dee978d2b0d76#6d7ed9a033babc054db9eff5b59dee978d2b0d76" dependencies = [ "schemars", "serde", @@ -7117,7 +7231,7 @@ dependencies = [ [[package]] name = "propolis_types" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=fdf0585c6a227a7cfbee4a61a36938c3d77e4712#fdf0585c6a227a7cfbee4a61a36938c3d77e4712" +source = "git+https://github.com/oxidecomputer/propolis?rev=6dceb9ef69c217cb78a2018bbedafbc19f6ec1af#6dceb9ef69c217cb78a2018bbedafbc19f6ec1af" dependencies = [ "schemars", "serde", @@ -7131,13 +7245,13 @@ checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.4.2", + "bitflags 2.5.0", "lazy_static", "num-traits", "rand 0.8.5", "rand_chacha 0.3.1", "rand_xorshift", - "regex-syntax 0.8.2", + "regex-syntax 0.8.3", "rusty-fork", "tempfile", "unarray", @@ -7167,9 +7281,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quick-xml" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eff6510e86862b57b210fd8cbe8ed3f0d7d600b9c2863cd4549a2e033c66e956" +checksum = "1004a344b30a54e2ee58d66a71b32d2db2feb0a31f9a2d302bf0536f15de2a33" dependencies = [ "memchr", "serde", @@ -7177,9 +7291,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.35" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ "proc-macro2", ] @@ -7191,7 +7305,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51de85fb3fb6524929c8a2eb85e6b6d363de4e8c48f9e2c2eac4944abc181c93" dependencies = [ "log", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "scheduled-thread-pool", ] @@ -7211,19 +7325,6 @@ dependencies = [ "nibble_vec", ] -[[package]] -name = "rand" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293" -dependencies = [ - "fuchsia-cprng", - "libc", - "rand_core 0.3.1", - "rdrand", - "winapi", -] - [[package]] name = "rand" version = "0.7.3" @@ -7268,21 +7369,6 @@ dependencies = [ "rand_core 0.6.4", ] -[[package]] -name = "rand_core" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" -dependencies = [ - "rand_core 0.4.2", -] - -[[package]] -name = "rand_core" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" - [[package]] name = "rand_core" version = "0.5.1" @@ -7298,7 +7384,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.12", + "getrandom 0.2.14", ] [[package]] @@ -7330,29 +7416,29 @@ dependencies = [ [[package]] name = "ratatui" -version = "0.26.1" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcb12f8fbf6c62614b0d56eb352af54f6a22410c3b079eb53ee93c7b97dd31d8" +checksum = "a564a852040e82671dc50a37d88f3aa83bbc690dfc6844cfe7a2591620206a80" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "cassowary", "compact_str", "crossterm", - "indoc 2.0.3", + "indoc 2.0.5", "itertools 0.12.1", "lru", "paste", "stability", - "strum 0.26.1", + "strum", "unicode-segmentation", "unicode-width", ] [[package]] name = "rayon" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4963ed1bc86e4f3ee217022bd855b297cef07fb9eac5dfa1f788b220b49b3bd" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" dependencies = [ "either", "rayon-core", @@ -7380,42 +7466,45 @@ dependencies = [ "yasna", ] -[[package]] -name = "rdrand" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" -dependencies = [ - "rand_core 0.3.1", -] - [[package]] name = "reconfigurator-cli" version = "0.1.0" dependencies = [ "anyhow", + "assert_matches", "camino", "camino-tempfile", - "clap 4.5.1", + "clap", + "dns-service-client", "dropshot", "expectorate", "humantime", - "indexmap 2.2.5", + "indexmap 2.2.6", + "nexus-client", + "nexus-db-queries", + "nexus-reconfigurator-execution", "nexus-reconfigurator-planning", + "nexus-reconfigurator-preparation", + "nexus-test-utils", + "nexus-test-utils-macros", "nexus-types", "omicron-common", + "omicron-nexus", "omicron-rpaths", "omicron-test-utils", + "omicron-uuid-kinds", "omicron-workspace-hack", + "pq-sys", "reedline", - "regex", + "serde", "serde_json", "slog", "slog-error-chain", "subprocess", "swrite", "tabled", - "uuid 1.7.0", + "tokio", + "uuid", ] [[package]] @@ -7429,48 +7518,48 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.3.5" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" dependencies = [ "bitflags 1.3.2", ] [[package]] name = "redox_syscall" -version = "0.4.1" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.5.0", ] [[package]] name = "redox_users" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" +checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" dependencies = [ - "getrandom 0.2.12", - "redox_syscall 0.2.16", + "getrandom 0.2.14", + "libredox", "thiserror", ] [[package]] name = "reedline" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413a9fa6a5d8c937d3ae1e975bfb6a918bb0b6cdfae6a10416218c837a31b8fc" +checksum = "65ebc241ed0ccea0bbbd775a55a76f0dd9971ef084589dea938751a03ffedc14" dependencies = [ "chrono", "crossterm", - "fd-lock 3.0.13", + "fd-lock", "itertools 0.12.1", "nu-ansi-term", "serde", "strip-ansi-escapes", - "strum 0.25.0", - "strum_macros 0.25.2", + "strum", + "strum_macros 0.26.2", "thiserror", "unicode-segmentation", "unicode-width", @@ -7478,34 +7567,34 @@ dependencies = [ [[package]] name = "ref-cast" -version = "1.0.20" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acde58d073e9c79da00f2b5b84eed919c8326832648a5b109b3fce1bb1175280" +checksum = "ccf0a6f84d5f1d581da8b41b47ec8600871962f2a528115b542b362d4b744931" dependencies = [ "ref-cast-impl", ] [[package]] name = "ref-cast-impl" -version = "1.0.20" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f7473c2cfcf90008193dd0e3e16599455cb601a9fce322b5bb55de799664925" +checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] name = "regex" -version = "1.10.3" +version = "1.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" +checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.5", - "regex-syntax 0.8.2", + "regex-automata 0.4.6", + "regex-syntax 0.8.3", ] [[package]] @@ -7516,13 +7605,13 @@ checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" [[package]] name = "regex-automata" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd" +checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.2", + "regex-syntax 0.8.3", ] [[package]] @@ -7533,40 +7622,31 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" [[package]] name = "regress" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d06f9a1f7cd8473611ba1a480cf35f9c5cffc2954336ba90a982fdb7e7d7f51e" +checksum = "0eae2a1ebfecc58aff952ef8ccd364329abe627762f5bf09ff42eb9d98522479" dependencies = [ - "hashbrown 0.14.3", + "hashbrown 0.14.5", "memchr", ] [[package]] name = "relative-path" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c707298afce11da2efef2f600116fa93ffa7a032b5d7b628aa17711ec81383ca" - -[[package]] -name = "remove_dir_all" -version = "0.5.3" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi", -] +checksum = "ba39f3699c378cd8970968dcbff9c43159ea4cfbd88d43c00b22f2ef10a435d2" [[package]] name = "reqwest" -version = "0.11.24" +version = "0.11.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6920094eb85afde5e4a138be3f2de8bbdf28000f0029e72c45025a56b042251" +checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" dependencies = [ "base64 0.21.7", "bytes", @@ -7577,7 +7657,7 @@ dependencies = [ "futures-util", "h2", "http 0.2.12", - "http-body 0.4.5", + "http-body 0.4.6", "hyper 0.14.28", "hyper-rustls 0.24.2", "hyper-tls", @@ -7589,8 +7669,8 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls 0.21.9", - "rustls-pemfile 1.0.3", + "rustls 0.21.12", + "rustls-pemfile 1.0.4", "serde", "serde_json", "serde_urlencoded", @@ -7616,7 +7696,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" dependencies = [ - "hostname", + "hostname 0.3.1", "quick-error", ] @@ -7668,7 +7748,7 @@ checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", "cfg-if", - "getrandom 0.2.12", + "getrandom 0.2.14", "libc", "spin 0.9.8", "untrusted 0.9.0", @@ -7693,7 +7773,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b91f7eff05f748767f183df4320a63d6936e9c6107d97c9e6bdd9784f4289c94" dependencies = [ "base64 0.21.7", - "bitflags 2.4.2", + "bitflags 2.5.0", "serde", "serde_derive", ] @@ -7711,23 +7791,21 @@ dependencies = [ [[package]] name = "rsa" -version = "0.9.2" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ab43bb47d23c1a631b4b680199a45255dce26fa9ab2fa902581f624ff13e6a8" +checksum = "5d0e5124fcb30e76a7e79bfee683a2746db83784b86289f6251b54b7950a0dfc" dependencies = [ - "byteorder", "const-oid", "digest", "num-bigint-dig", "num-integer", - "num-iter", "num-traits", "pkcs1", "pkcs8", "rand_core 0.6.4", "serde", "sha2", - "signature 2.1.0", + "signature 2.2.0", "spki", "subtle", "zeroize", @@ -7735,9 +7813,9 @@ dependencies = [ [[package]] name = "rstest" -version = "0.18.2" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97eeab2f3c0a199bc4be135c36c924b6590b88c377d416494288c14f2db30199" +checksum = "9d5316d2a1479eeef1ea21e7f9ddc67c191d497abc8fc3ba2467857abbb68330" dependencies = [ "futures", "futures-timer", @@ -7747,9 +7825,9 @@ dependencies = [ [[package]] name = "rstest_macros" -version = "0.18.2" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d428f8247852f894ee1be110b375111b586d4fa431f6c46e64ba5a0dcccbe605" +checksum = "04a9df72cc1f67020b0d63ad9bfe4a323e459ea7eb68e03bd9824db49f9a4c25" dependencies = [ "cfg-if", "glob", @@ -7758,30 +7836,30 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.0", - "syn 2.0.52", + "syn 2.0.64", "unicode-ident", ] [[package]] name = "rtoolbox" -version = "0.0.1" +version = "0.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "034e22c514f5c0cb8a10ff341b9b048b5ceb21591f31c8f44c43b960f9b3524a" +checksum = "c247d24e63230cdb56463ae328478bd5eac8b8faa8c69461a77e8e323afac90e" dependencies = [ "libc", - "winapi", + "windows-sys 0.48.0", ] [[package]] name = "russh" -version = "0.42.0" +version = "0.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "394cc2733c5b5ca9f342d9532b78599849633ccabdbf40f1af094cacf4d86b62" +checksum = "1c9534703dc13be1eefc5708618f4c346da8e4f04f260218613f351ed5e94259" dependencies = [ "aes", "aes-gcm", "async-trait", - "bitflags 2.4.2", + "bitflags 2.5.0", "byteorder", "chacha20", "ctr", @@ -7809,9 +7887,9 @@ dependencies = [ [[package]] name = "russh-cryptovec" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3fdf036c2216b554053d19d4af45c1722d13b00ac494ea19825daf4beac034e" +checksum = "2b077b6dd8d8c085dac62f7fcc5a83df60c7f7a22d49bfba994f2f4dbf60bc74" dependencies = [ "libc", "winapi", @@ -7819,9 +7897,9 @@ dependencies = [ [[package]] name = "russh-keys" -version = "0.42.0" +version = "0.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e98aa03d476f8d2bf6e4525291c1eb8e22f4ae9653d7a5458fd53cb0191c741" +checksum = "aa4a5afa2fab6fd49d0c470a3b75c3c70a4f363c38db32df5ae3b44a3abf5ab9" dependencies = [ "aes", "async-trait", @@ -7858,21 +7936,20 @@ dependencies = [ [[package]] name = "rust-argon2" -version = "1.0.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5885493fdf0be6cdff808d1533ce878d21cfa49c7086fa00c66355cd9141bfc" +checksum = "9d9848531d60c9cbbcf9d166c885316c24bc0e2a9d3eba0956bb6cbbd79bc6e8" dependencies = [ "base64 0.21.7", "blake2b_simd", - "constant_time_eq 0.3.0", - "crossbeam-utils", + "constant_time_eq", ] [[package]] name = "rustc-demangle" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustc-hash" @@ -7895,29 +7972,29 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.22", + "semver 1.0.23", ] [[package]] name = "rustfmt-wrapper" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed729e3bee08ec2befd593c27e90ca9fdd25efdc83c94c3b82eaef16e4f7406e" +checksum = "f1adc9dfed5cc999077978cc7163b9282c5751c8d39827c4ea8c8c220ca5a440" dependencies = [ "serde", "tempfile", "thiserror", - "toml 0.5.11", + "toml 0.8.13", "toolchain_find", ] [[package]] name = "rustix" -version = "0.38.31" +version = "0.38.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" +checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "errno", "libc", "linux-raw-sys", @@ -7926,9 +8003,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.9" +version = "0.21.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "629648aced5775d558af50b2b4c7b02983a04b312126d45eeead26e7caa498b9" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" dependencies = [ "log", "ring 0.17.8", @@ -7938,14 +8015,14 @@ dependencies = [ [[package]] name = "rustls" -version = "0.22.2" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e87c9956bd9807afa1f77e0f7594af32566e830e088a5576d27c5b6f30f49d41" +checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" dependencies = [ "log", "ring 0.17.8", "rustls-pki-types", - "rustls-webpki 0.102.1", + "rustls-webpki 0.102.4", "subtle", "zeroize", ] @@ -7957,7 +8034,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f1fb85efa936c42c6d5fc28d2629bb51e4b2f4b8a5211e297d599cc5a093792" dependencies = [ "openssl-probe", - "rustls-pemfile 2.1.1", + "rustls-pemfile 2.1.2", "rustls-pki-types", "schannel", "security-framework", @@ -7965,28 +8042,28 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" dependencies = [ "base64 0.21.7", ] [[package]] name = "rustls-pemfile" -version = "2.1.1" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f48172685e6ff52a556baa527774f61fcaa884f59daf3375c62a3f1cd2549dab" +checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" dependencies = [ - "base64 0.21.7", + "base64 0.22.1", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.3.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "048a63e5b3ac996d78d402940b5fa47973d2d080c6c6fffa1d0f19c4445310b7" +checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" [[package]] name = "rustls-webpki" @@ -8000,9 +8077,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.1" +version = "0.102.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef4ca26037c909dedb327b48c3327d0ba91d3dd3c4e05dad328f210ffb68e95b" +checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e" dependencies = [ "ring 0.17.8", "rustls-pki-types", @@ -8011,9 +8088,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.14" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" +checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" [[package]] name = "rusty-doors" @@ -8047,31 +8124,31 @@ dependencies = [ [[package]] name = "rustyline" -version = "13.0.0" +version = "14.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02a2d683a4ac90aeef5b1013933f6d977bd37d51ff3f4dad829d4931a7e6be86" +checksum = "7803e8936da37efd9b6d4478277f4b2b9bb5cdb37a113e8d63222e58da647e63" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "cfg-if", "clipboard-win", - "fd-lock 4.0.2", + "fd-lock", "home", "libc", "log", "memchr", - "nix 0.27.1", + "nix 0.28.0", "radix_trie", "unicode-segmentation", "unicode-width", "utf8parse", - "winapi", + "windows-sys 0.52.0", ] [[package]] name = "ryu" -version = "1.0.15" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "salty" @@ -8086,10 +8163,11 @@ dependencies = [ [[package]] name = "samael" -version = "0.0.14" -source = "git+https://github.com/oxidecomputer/samael?branch=oxide/omicron#9e609a8f6fa0dd84e3bb8f579f46bd780c8be62b" +version = "0.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5da862a2115c0767681e28309a367dbd0a2366026948aae0272787e582d71eaf" dependencies = [ - "base64 0.21.7", + "base64 0.22.1", "bindgen", "chrono", "data-encoding", @@ -8107,7 +8185,7 @@ dependencies = [ "serde", "thiserror", "url", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -8121,11 +8199,11 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" +checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -8134,14 +8212,14 @@ version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19" dependencies = [ - "parking_lot 0.12.1", + "parking_lot 0.12.2", ] [[package]] name = "schemars" -version = "0.8.16" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45a28f4c49489add4ce10783f7911893516f15afe45d015608d41faca6bc4d29" +checksum = "b0218ceea14babe24a4a5836f86ade86c1effbc198164e619194cb5069187e29" dependencies = [ "bytes", "chrono", @@ -8149,20 +8227,19 @@ dependencies = [ "schemars_derive", "serde", "serde_json", - "uuid 0.8.2", - "uuid 1.7.0", + "uuid", ] [[package]] name = "schemars_derive" -version = "0.8.16" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c767fd6fa65d9ccf9cf026122c1b555f2ef9a4f0cea69da4d7dbc3e258d30967" +checksum = "3ed5a1ccce8ff962e31a165d41f6e2a2dd1245099dc4d594f5574a86cd90f4d3" dependencies = [ "proc-macro2", "quote", "serde_derive_internals", - "syn 1.0.109", + "syn 2.0.64", ] [[package]] @@ -8188,17 +8265,17 @@ checksum = "7f81c2fde025af7e69b1d1420531c8a8811ca898919db177141a85313b1cb932" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] name = "sct" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" -dependencies = [ - "ring 0.16.20", - "untrusted 0.7.1", +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" +dependencies = [ + "ring 0.17.8", + "untrusted 0.9.0", ] [[package]] @@ -8226,11 +8303,11 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.9.2" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.5.0", "core-foundation", "core-foundation-sys", "libc", @@ -8239,9 +8316,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.9.1" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" dependencies = [ "core-foundation-sys", "libc", @@ -8255,36 +8332,18 @@ checksum = "d4f410fedcf71af0345d7607d246e7ad15faaadd49d240ee3b24e5dc21a820ac" [[package]] name = "semver" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" -dependencies = [ - "semver-parser", -] - -[[package]] -name = "semver" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" dependencies = [ "serde", ] -[[package]] -name = "semver-parser" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" -dependencies = [ - "pest", -] - [[package]] name = "serde" -version = "1.0.197" +version = "1.0.202" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" +checksum = "226b61a0d411b2ba5ff6d7f73a476ac4f8bb900373459cd00fab8512828ba395" dependencies = [ "serde_derive", ] @@ -8329,24 +8388,24 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.197" +version = "1.0.202" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" +checksum = "6048858004bcff69094cd972ed40a32500f153bd3be9f716b2eed2e8217c4838" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] name = "serde_derive_internals" -version = "0.26.0" +version = "0.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c" +checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.64", ] [[package]] @@ -8360,9 +8419,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.114" +version = "1.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" +checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3" dependencies = [ "itoa", "ryu", @@ -8390,20 +8449,20 @@ dependencies = [ [[package]] name = "serde_repr" -version = "0.1.16" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8725e1dfadb3a50f7e5ce0b1a540466f6ed3fe7a0fca2ac2b8b831d31316bd00" +checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] name = "serde_spanned" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" +checksum = "79e674e01f999af37c49f70a6ede167a8a60b2503e56c5599532a65baa5969a0" dependencies = [ "serde", ] @@ -8428,7 +8487,7 @@ dependencies = [ "proc-macro2", "quote", "serde", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] @@ -8445,15 +8504,15 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.6.1" +version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15d167997bd841ec232f5b2b8e0e26606df2e7caa4c31b95ea9ca52b200bd270" +checksum = "0ad483d2ab0149d5a5ebcd9972a3852711e0153d863bf5a5d0391d28883c4a20" dependencies = [ - "base64 0.21.7", + "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.2.5", + "indexmap 2.2.6", "serde", "serde_derive", "serde_json", @@ -8463,23 +8522,23 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.6.1" +version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "865f9743393e638991566a8b7a479043c2c8da94a33e0a31f18214c9cae0a64d" +checksum = "65569b702f41443e8bc8bbb1c5779bd0450bbe723b56198980e80ec45780bce2" dependencies = [ - "darling 0.20.3", + "darling", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] name = "serde_yaml" -version = "0.9.25" +version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a49e178e4452f45cb61d0cd8cebc1b0fafd3e41929e996cef79aa3aca91f574" +checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.2.5", + "indexmap 2.2.6", "itoa", "ryu", "serde", @@ -8553,9 +8612,9 @@ dependencies = [ [[package]] name = "signal-hook-registry" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" dependencies = [ "libc", ] @@ -8580,19 +8639,28 @@ checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" [[package]] name = "signature" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e1788eed21689f9cf370582dfc467ef36ed9c707f073528ddafa8d83e3b8500" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ "digest", "rand_core 0.6.4", ] +[[package]] +name = "sigpipe" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5584bfb3e0d348139d8210285e39f6d2f8a1902ac06de343e06357d1d763d8e6" +dependencies = [ + "libc", +] + [[package]] name = "similar" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32fea41aca09ee824cc9724996433064c89f7777e60762749a4170a14abbfa21" +checksum = "fa42c91313f1d05da9b26f267f931cf178d4aba455b4c4622dd7355eb80c6640" dependencies = [ "bstr 0.2.17", "unicode-segmentation", @@ -8646,16 +8714,18 @@ dependencies = [ "anyhow", "async-trait", "chrono", - "ipnetwork", "omicron-common", + "omicron-uuid-kinds", "omicron-workspace-hack", + "oxnet", "progenitor", "regress", "reqwest", "schemars", "serde", + "serde_json", "slog", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -8675,6 +8745,7 @@ dependencies = [ "macaddr", "omicron-common", "omicron-test-utils", + "omicron-uuid-kinds", "omicron-workspace-hack", "rand 0.8.5", "schemars", @@ -8685,7 +8756,7 @@ dependencies = [ "thiserror", "tofino", "tokio", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -8704,16 +8775,21 @@ dependencies = [ name = "sled-storage" version = "0.1.0" dependencies = [ + "anyhow", "async-trait", "camino", "camino-tempfile", "cfg-if", + "debug-ignore", "derive_more", + "expectorate", + "futures", "glob", "illumos-utils", "key-manager", "omicron-common", "omicron-test-utils", + "omicron-uuid-kinds", "omicron-workspace-hack", "rand 0.8.5", "schemars", @@ -8723,7 +8799,7 @@ dependencies = [ "slog", "thiserror", "tokio", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -8750,7 +8826,7 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcaaf6e68789d3f0411f1e72bc443214ef252a1038b6e344836e50442541f190" dependencies = [ - "hostname", + "hostname 0.3.1", "slog", "slog-json", "time", @@ -8801,7 +8877,7 @@ source = "git+https://github.com/oxidecomputer/slog-error-chain?branch=main#15f6 dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] @@ -8840,11 +8916,11 @@ dependencies = [ [[package]] name = "slog-term" -version = "2.9.0" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87d29185c55b7b258b4f120eab00f48557d4d9bc814f41713f449d35b0f8977c" +checksum = "b6e022d0b998abfe5c3782c1f03551a596269450ccd677ea51c56f8b214610e8" dependencies = [ - "atty", + "is-terminal", "slog", "term", "thread_local", @@ -8862,21 +8938,21 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.13.1" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "smawk" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f67ad224767faa3c7d8b6d91985b78e70a1324408abcb1cfcc2be4c06bc06043" +checksum = "b7c388c1b5e93756d0c740965c41e8822f866621d41acbdf6336a6a168f8840c" [[package]] name = "smf" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6015a9bbf269b84c928dc68e11680bbdfa6f065f1c6d5383ec134f55bab188b" +checksum = "4a491bfc47dffa70a3c267bc379e9de9f4b0a7195e474a94498189b177f8d18c" dependencies = [ "thiserror", ] @@ -8890,7 +8966,7 @@ dependencies = [ "bitflags 1.3.2", "byteorder", "cfg-if", - "heapless 0.7.16", + "heapless 0.7.17", "managed", ] @@ -8910,11 +8986,10 @@ dependencies = [ [[package]] name = "snafu" -version = "0.7.5" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4de37ad025c587a29e8f3f5605c00f70b98715ef90b9061a815b9e59e9042d6" +checksum = "75976f4748ab44f6e5332102be424e7c2dc18daeaf7e725f2040c3ebb133512e" dependencies = [ - "doc-comment", "futures-core", "pin-project", "snafu-derive", @@ -8922,21 +8997,21 @@ dependencies = [ [[package]] name = "snafu-derive" -version = "0.7.5" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "990079665f075b699031e9c08fd3ab99be5029b96f3b78dc0709e8f77e4efebf" +checksum = "b4b19911debfb8c2fb1107bc6cb2d61868aaf53a988449213959bb1b5b1ed95f" dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.64", ] [[package]] name = "socket2" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" +checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" dependencies = [ "libc", "winapi", @@ -8944,12 +9019,12 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.5" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" +checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" dependencies = [ "libc", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -8958,7 +9033,7 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "clap 4.5.1", + "clap", "dropshot", "futures", "gateway-messages", @@ -8972,7 +9047,7 @@ dependencies = [ "sprockets-rot", "thiserror", "tokio", - "toml 0.8.10", + "toml 0.8.13", ] [[package]] @@ -8992,9 +9067,9 @@ dependencies = [ [[package]] name = "spki" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ "base64ct", "der", @@ -9040,9 +9115,9 @@ dependencies = [ [[package]] name = "sqlparser" -version = "0.43.1" +version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f95c4bae5aba7cd30bd506f7140026ade63cff5afd778af8854026f9606bf5d4" +checksum = "f7bbffee862a796d67959a89859d6b1046bb5016d63e23835ad0da182777bbe0" dependencies = [ "log", "sqlparser_derive", @@ -9056,17 +9131,17 @@ checksum = "01b2e185515564f15375f593fb966b5718bc624ba77fe49fa4616ad619690554" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] name = "stability" -version = "0.1.1" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebd1b177894da2a2d9120208c3386066af06a488255caabc5de8ddca22dbc3ce" +checksum = "2ff9eaf853dec4c8802325d8b6d3dffa86cc707fd7a1a4cdbf416e13b061787a" dependencies = [ "quote", - "syn 1.0.109", + "syn 2.0.64", ] [[package]] @@ -9100,7 +9175,7 @@ dependencies = [ "slog", "thiserror", "tokio", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -9111,7 +9186,7 @@ checksum = "f91138e76242f575eb1d3b38b4f1362f10d3a43f47d182a5b359af488a02293b" dependencies = [ "new_debug_unreachable", "once_cell", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "phf_shared 0.10.0", "precomputed-hash", "serde", @@ -9119,10 +9194,11 @@ dependencies = [ [[package]] name = "stringprep" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db3737bde7edce97102e0e2b15365bf7a20bfdb5f60f4f9e8d7004258a51a8da" +checksum = "bb41d74e231a107a1b4ee36bd1214b11285b77768d2e3824aedafa988fd36ee6" dependencies = [ + "finl_unicode", "unicode-bidi", "unicode-normalization", ] @@ -9138,21 +9214,9 @@ dependencies = [ [[package]] name = "strsim" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" - -[[package]] -name = "strsim" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" - -[[package]] -name = "strsim" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee073c9e4cd00e28217186dbe12796d692868f432bf2e97ee73bed0c56dfa01" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "structmeta" @@ -9163,7 +9227,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta-derive 0.2.0", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] @@ -9175,7 +9239,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta-derive 0.3.0", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] @@ -9186,7 +9250,7 @@ checksum = "a60bcaff7397072dca0017d1db428e30d5002e00b6847703e2e42005c95fbe00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] @@ -9197,46 +9261,16 @@ checksum = "152a0b65a590ff6c3da95cabe2353ee04e6167c896b28e3b14478c2636c922fc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", -] - -[[package]] -name = "structopt" -version = "0.3.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c6b5c64445ba8094a6ab0c3cd2ad323e07171012d9c98b0b15651daf1787a10" -dependencies = [ - "clap 2.34.0", - "lazy_static", - "structopt-derive", -] - -[[package]] -name = "structopt-derive" -version = "0.4.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" -dependencies = [ - "heck 0.3.3", - "proc-macro-error", - "proc-macro2", - "quote", - "syn 1.0.109", + "syn 2.0.64", ] [[package]] name = "strum" -version = "0.25.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" - -[[package]] -name = "strum" -version = "0.26.1" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "723b93e8addf9aa965ebe2d11da6d7540fa2283fcea14b3371ff055f7ba13f5f" +checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29" dependencies = [ - "strum_macros 0.26.1", + "strum_macros 0.26.2", ] [[package]] @@ -9254,28 +9288,28 @@ dependencies = [ [[package]] name = "strum_macros" -version = "0.25.2" +version = "0.25.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad8d03b598d3d0fff69bf533ee3ef19b8eeb342729596df84bcc7e1f96ec4059" +checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" dependencies = [ "heck 0.4.1", "proc-macro2", "quote", "rustversion", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] name = "strum_macros" -version = "0.26.1" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a3417fc93d76740d974a01654a09777cb500428cc874ca9f45edfe0c4d4cd18" +checksum = "c6cf59daf282c0a494ba14fd21610a0325f9f90ec9d1231dea26bcb1d696c946" dependencies = [ "heck 0.4.1", "proc-macro2", "quote", "rustversion", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] @@ -9322,9 +9356,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.52" +version = "2.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b699d15b36d1f02c3e7c69f8ffef53de37aefae075d8488d4ba1a7788d574a07" +checksum = "7ad3dee41f36859875573074334c200d1add8e4a87bb37113ebd31d926b7b11f" dependencies = [ "proc-macro2", "quote", @@ -9428,9 +9462,9 @@ dependencies = [ [[package]] name = "target-lexicon" -version = "0.12.13" +version = "0.12.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69758bda2e78f098e4ccb393021a0963bb3442eac05f135c30f61b7370bbafae" +checksum = "e1fc403891a21bcfb7c37834ba66a547a8f402146eba7265b5a6d88059c9ff2f" [[package]] name = "target-spec" @@ -9444,21 +9478,11 @@ dependencies = [ "unicode-ident", ] -[[package]] -name = "tempdir" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15f2b5fb00ccdf689e0149d1b1b3c03fead81c2b37735d812fa8bddbbf41b6d8" -dependencies = [ - "rand 0.4.6", - "remove_dir_all", -] - [[package]] name = "tempfile" -version = "3.10.0" +version = "3.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a365e8cd18e44762ef95d87f284f4b5cd04107fec2ff3052bd6a3e6069669e67" +checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ "cfg-if", "fastrand", @@ -9479,9 +9503,9 @@ dependencies = [ [[package]] name = "termcolor" -version = "1.2.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" dependencies = [ "winapi-util", ] @@ -9520,16 +9544,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta 0.2.0", - "syn 2.0.52", -] - -[[package]] -name = "textwrap" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" -dependencies = [ - "unicode-width", + "syn 2.0.64", ] [[package]] @@ -9545,22 +9560,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.57" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e45bcbe8ed29775f228095caf2cd67af7a4ccf756ebff23a306bf3e8b47b24b" +checksum = "579e9083ca58dd9dcf91a9923bb9054071b9ebbd800b342194c9feb0ee89fc18" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.57" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" +checksum = "e2470041c06ec3ac1ab38d0356a6119054dedaea53e12fbefc0de730a1c08524" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] @@ -9585,20 +9600,19 @@ dependencies = [ [[package]] name = "thread-id" -version = "4.2.0" +version = "4.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79474f573561cdc4871a0de34a51c92f7f5a56039113fbb5b9c9f96bdb756669" +checksum = "f0ec81c46e9eb50deaa257be2f148adf052d1fb7701cfd55ccfab2525280b70b" dependencies = [ "libc", - "redox_syscall 0.2.16", "winapi", ] [[package]] name = "thread_local" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" dependencies = [ "cfg-if", "once_cell", @@ -9615,9 +9629,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.34" +version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ "deranged", "itoa", @@ -9638,9 +9652,9 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.17" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ba3a3ef41e6672a2f0f001392bb5dcd3ff0a9992d618ca761a11c3121547774" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" dependencies = [ "num-conv", "time-core", @@ -9680,6 +9694,27 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" +[[package]] +name = "tls_codec" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e78c9c330f8c85b2bae7c8368f2739157db9991235123aa1b15ef9502bfb6a" +dependencies = [ + "tls_codec_derive", + "zeroize", +] + +[[package]] +name = "tls_codec_derive" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d9ef545650e79f30233c0003bcc2504d7efac6dad25fca40744de773fe2049c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.64", +] + [[package]] name = "tlvc" version = "0.3.1" @@ -9687,57 +9722,55 @@ source = "git+https://github.com/oxidecomputer/tlvc.git?branch=main#e644a21a7ca9 dependencies = [ "byteorder", "crc", - "zerocopy 0.6.4", + "zerocopy 0.6.6", ] [[package]] name = "tlvc" version = "0.3.1" -source = "git+https://github.com/oxidecomputer/tlvc.git#e644a21a7ca973ed31499106ea926bd63ebccc6f" +source = "git+https://github.com/oxidecomputer/tlvc#e644a21a7ca973ed31499106ea926bd63ebccc6f" dependencies = [ "byteorder", "crc", - "zerocopy 0.6.4", + "zerocopy 0.6.6", ] [[package]] name = "tlvc-text" version = "0.3.0" -source = "git+https://github.com/oxidecomputer/tlvc.git#e644a21a7ca973ed31499106ea926bd63ebccc6f" +source = "git+https://github.com/oxidecomputer/tlvc#e644a21a7ca973ed31499106ea926bd63ebccc6f" dependencies = [ "ron 0.8.1", "serde", - "tlvc 0.3.1 (git+https://github.com/oxidecomputer/tlvc.git)", - "zerocopy 0.6.4", + "tlvc 0.3.1 (git+https://github.com/oxidecomputer/tlvc)", + "zerocopy 0.6.6", ] [[package]] name = "tofino" version = "0.1.0" -source = "git+http://github.com/oxidecomputer/tofino?branch=main#8283f8021068f055484b653f0cc6b4d5c0979dc1" +source = "git+http://github.com/oxidecomputer/tofino?branch=main#1b66b89c3727d2191082df057b068ec52560e334" dependencies = [ "anyhow", "cc", - "chrono", "illumos-devinfo", - "structopt", ] [[package]] name = "tokio" -version = "1.36.0" +version = "1.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" +checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" dependencies = [ "backtrace", "bytes", "libc", "mio", "num_cpus", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.5", + "socket2 0.5.7", "tokio-macros", "windows-sys 0.48.0", ] @@ -9750,7 +9783,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] @@ -9776,14 +9809,14 @@ dependencies = [ "futures-channel", "futures-util", "log", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "percent-encoding", "phf", "pin-project-lite", "postgres-protocol", "postgres-types", "rand 0.8.5", - "socket2 0.5.5", + "socket2 0.5.7", "tokio", "tokio-util", "whoami", @@ -9795,7 +9828,7 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls 0.21.9", + "rustls 0.21.12", "tokio", ] @@ -9805,16 +9838,16 @@ version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" dependencies = [ - "rustls 0.22.2", + "rustls 0.22.4", "rustls-pki-types", "tokio", ] [[package]] name = "tokio-stream" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" +checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" dependencies = [ "futures-core", "pin-project-lite", @@ -9847,16 +9880,15 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.10" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" +checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" dependencies = [ "bytes", "futures-core", "futures-sink", "pin-project-lite", "tokio", - "tracing", ] [[package]] @@ -9882,21 +9914,21 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.10" +version = "0.8.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a9aad4a3066010876e8dcf5a8a06e70a558751117a145c6ce2b82c2e2054290" +checksum = "a4e43f8cc456c9704c851ae29c67e17ef65d2c30017c17a9765b89c382dc8bba" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.6", + "toml_edit 0.22.13", ] [[package]] name = "toml_datetime" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" +checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf" dependencies = [ "serde", ] @@ -9907,36 +9939,36 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.2.5", + "indexmap 2.2.6", "serde", "serde_spanned", "toml_datetime", - "winnow 0.5.15", + "winnow 0.5.40", ] [[package]] name = "toml_edit" -version = "0.22.6" +version = "0.22.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c1b5fd4128cc8d3e0cb74d4ed9a9cc7c7284becd4df68f5f940e1ad123606f6" +checksum = "c127785850e8c20836d49732ae6abfa47616e60bf9d9f57c43c250361a9db96c" dependencies = [ - "indexmap 2.2.5", + "indexmap 2.2.6", "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.1", + "winnow 0.6.8", ] [[package]] name = "toolchain_find" -version = "0.2.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e85654a10e7a07a47c6f19d93818f3f343e22927f2fa280c84f7c8042743413" +checksum = "ebc8c9a7f0a2966e1acdaf0461023d0b01471eeead645370cf4c3f5cff153f2a" dependencies = [ "home", - "lazy_static", + "once_cell", "regex", - "semver 0.11.0", + "semver 1.0.23", "walkdir", ] @@ -9948,9 +9980,9 @@ checksum = "ea68304e134ecd095ac6c3574494fc62b909f416c4fca77e440530221e549d3d" [[package]] name = "tough" -version = "0.16.0" +version = "0.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49455926f64001de53ef047c2053e2f17440e412b8b1e958d4ad8a6008db7128" +checksum = "b8d7a87d51ca5a113542e1b9f5ee2b14b6864bf7f34d103740086fa9c3d57d3b" dependencies = [ "async-recursion", "async-trait", @@ -10028,7 +10060,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] @@ -10077,7 +10109,7 @@ dependencies = [ "ipnet", "lazy_static", "rand 0.8.5", - "smallvec 1.13.1", + "smallvec 1.13.2", "thiserror", "tinyvec", "tokio", @@ -10096,9 +10128,9 @@ dependencies = [ "ipconfig", "lazy_static", "lru-cache", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "resolv-conf", - "smallvec 1.13.1", + "smallvec 1.13.2", "thiserror", "tokio", "tracing", @@ -10129,23 +10161,22 @@ dependencies = [ [[package]] name = "try-lock" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "trybuild" -version = "1.0.89" +version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a9d3ba662913483d6722303f619e75ea10b7855b0f8e0d72799cf8621bb488f" +checksum = "33a5f13f11071020bb12de7a16b925d2d58636175c20c11dc5f96cb64bb6c9b3" dependencies = [ - "basic-toml", "glob", - "once_cell", "serde", "serde_derive", "serde_json", "termcolor", + "toml 0.8.13", ] [[package]] @@ -10156,7 +10187,7 @@ dependencies = [ "assert_cmd", "camino", "chrono", - "clap 4.5.1", + "clap", "console", "datatest-stable", "fs-err", @@ -10205,7 +10236,7 @@ dependencies = [ "slog", "tar", "tokio", - "toml 0.8.10", + "toml 0.8.13", "tough", "url", "zip", @@ -10213,9 +10244,9 @@ dependencies = [ [[package]] name = "tui-tree-widget" -version = "0.17.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c317bb061f42d943a2eb118b5de0ee98fc2443f0631e54b24a19de014a28810" +checksum = "fb0c6f924587e719c50b8f83485afbe4d4c16edca6b641d5d9a3204edeba5cf0" dependencies = [ "ratatui", "unicode-width", @@ -10249,7 +10280,7 @@ dependencies = [ "byteorder", "bytes", "data-encoding", - "http 1.0.0", + "http 1.1.0", "httparse", "log", "rand 0.8.5", @@ -10261,20 +10292,32 @@ dependencies = [ [[package]] name = "typed-path" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a90726108dab678edab76459751e1cc7c597c3484a6384d6423191255fa641b" +checksum = "668404597c2c687647f6f8934f97c280fd500db28557f52b07c56b92d3dc500a" + +[[package]] +name = "typed-rng" +version = "0.1.0" +dependencies = [ + "newtype-uuid", + "omicron-workspace-hack", + "rand 0.8.5", + "rand_core 0.6.4", + "rand_seeder", + "uuid", +] [[package]] name = "typenum" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "typify" -version = "0.0.16" -source = "git+https://github.com/oxidecomputer/typify#c5ebe0a2bf08ad8a743be5b593b1a8526a3fff4a" +version = "0.1.0" +source = "git+https://github.com/oxidecomputer/typify#ad1296f6ceb998ae8c247d999b7828703a232bdd" dependencies = [ "typify-impl", "typify-macro", @@ -10282,33 +10325,36 @@ dependencies = [ [[package]] name = "typify-impl" -version = "0.0.16" -source = "git+https://github.com/oxidecomputer/typify#c5ebe0a2bf08ad8a743be5b593b1a8526a3fff4a" +version = "0.1.0" +source = "git+https://github.com/oxidecomputer/typify#ad1296f6ceb998ae8c247d999b7828703a232bdd" dependencies = [ - "heck 0.4.1", + "heck 0.5.0", "log", "proc-macro2", "quote", "regress", "schemars", + "semver 1.0.23", + "serde", "serde_json", - "syn 2.0.52", + "syn 2.0.64", "thiserror", "unicode-ident", ] [[package]] name = "typify-macro" -version = "0.0.16" -source = "git+https://github.com/oxidecomputer/typify#c5ebe0a2bf08ad8a743be5b593b1a8526a3fff4a" +version = "0.1.0" +source = "git+https://github.com/oxidecomputer/typify#ad1296f6ceb998ae8c247d999b7828703a232bdd" dependencies = [ "proc-macro2", "quote", "schemars", + "semver 1.0.23", "serde", "serde_json", "serde_tokenstream 0.2.0", - "syn 2.0.52", + "syn 2.0.64", "typify-impl", ] @@ -10353,24 +10399,24 @@ checksum = "3b09c83c3c29d37506a3e260c08c03743a6bb66a9cd432c6934ab501a190571f" [[package]] name = "unicode-normalization" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" dependencies = [ "tinyvec", ] [[package]] name = "unicode-segmentation" -version = "1.10.1" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" +checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" [[package]] name = "unicode-width" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" +checksum = "68f5e5f3158ecfd4b8ff6fe086db7c8467a2dfdac97fe420f2b7c4aa97af66d6" [[package]] name = "unicode-xid" @@ -10396,9 +10442,9 @@ dependencies = [ [[package]] name = "unsafe-libyaml" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab4c90930b95a82d00dc9e9ac071b4991924390d46cbd0dfe566148667605e4b" +checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" [[package]] name = "untrusted" @@ -10421,7 +10467,7 @@ dependencies = [ "camino", "camino-tempfile", "chrono", - "clap 4.5.1", + "clap", "debug-ignore", "display-error-chain", "dropshot", @@ -10452,12 +10498,12 @@ dependencies = [ "camino", "camino-tempfile", "cancel-safe-futures", - "clap 4.5.1", + "clap", "debug-ignore", "derive-where", "either", "futures", - "indexmap 2.2.5", + "indexmap 2.2.6", "indicatif", "libsw", "linear-map", @@ -10475,7 +10521,7 @@ dependencies = [ "tokio", "tokio-stream", "unicode-width", - "uuid 1.7.0", + "uuid", ] [[package]] @@ -10542,7 +10588,7 @@ dependencies = [ "proc-macro2", "quote", "serde_tokenstream 0.2.0", - "syn 2.0.52", + "syn 2.0.64", "usdt-impl 0.5.0", ] @@ -10580,7 +10626,7 @@ dependencies = [ "quote", "serde", "serde_json", - "syn 2.0.52", + "syn 2.0.64", "thiserror", "thread-id", "version_check", @@ -10610,7 +10656,7 @@ dependencies = [ "proc-macro2", "quote", "serde_tokenstream 0.2.0", - "syn 2.0.52", + "syn 2.0.64", "usdt-impl 0.5.0", ] @@ -10628,18 +10674,22 @@ checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "uuid" -version = "0.8.2" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" +checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" +dependencies = [ + "getrandom 0.2.14", + "serde", +] [[package]] -name = "uuid" -version = "1.7.0" +name = "uzers" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f00cc9702ca12d3c81455259621e676d0f7251cec66a21e98fe2e9a37db93b2a" +checksum = "76d283dc7e8c901e79e32d077866eaf599156cbf427fffa8289aecc52c5c3f63" dependencies = [ - "getrandom 0.2.12", - "serde", + "libc", + "log", ] [[package]] @@ -10648,12 +10698,6 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" -[[package]] -name = "vec_map" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" - [[package]] name = "version_check" version = "0.9.4" @@ -10735,9 +10779,9 @@ dependencies = [ [[package]] name = "walkdir" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" dependencies = [ "same-file", "winapi-util", @@ -10772,9 +10816,9 @@ checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" [[package]] name = "wasm-bindgen" -version = "0.2.87" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -10782,24 +10826,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.87" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.64", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.37" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" +checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" dependencies = [ "cfg-if", "js-sys", @@ -10809,9 +10853,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.87" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -10819,22 +10863,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.87" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.64", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.87" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" [[package]] name = "wasm-streams" @@ -10851,9 +10895,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.64" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" +checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" dependencies = [ "js-sys", "wasm-bindgen", @@ -10861,26 +10905,27 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.25.2" +version = "0.25.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" +checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" [[package]] name = "which" -version = "4.4.0" +version = "4.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269" +checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" dependencies = [ "either", - "libc", + "home", "once_cell", + "rustix", ] [[package]] name = "whoami" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fec781d48b41f8163426ed18e8fc2864c12937df9ce54c88ede7bd47270893e" +checksum = "a44ab49fad634e88f55bf8f9bb3abd2f27d7204172a112c7c9987e01c1c94ea9" dependencies = [ "redox_syscall 0.4.1", "wasite", @@ -10896,13 +10941,15 @@ dependencies = [ "buf-list", "camino", "ciborium", - "clap 4.5.1", + "clap", "crossterm", + "expectorate", "futures", "humantime", - "indexmap 2.2.5", + "indexmap 2.2.6", "indicatif", "itertools 0.12.1", + "maplit", "omicron-common", "omicron-passwords", "omicron-workspace-hack", @@ -10915,17 +10962,18 @@ dependencies = [ "serde", "serde_json", "shell-words", + "sled-hardware-types", "slog", "slog-async", "slog-envlogger", "slog-term", "supports-color", "tempfile", - "textwrap 0.16.1", + "textwrap", "tokio", "tokio-util", - "toml 0.8.10", - "toml_edit 0.22.6", + "toml 0.8.13", + "toml_edit 0.22.13", "tui-tree-widget", "unicode-width", "update-engine", @@ -10940,12 +10988,18 @@ version = "0.1.0" dependencies = [ "anyhow", "gateway-client", + "maplit", "omicron-common", "omicron-workspace-hack", + "owo-colors", + "oxnet", "schemars", "serde", "serde_json", + "sha2", + "sled-hardware-types", "thiserror", + "toml 0.8.13", "update-engine", ] @@ -10957,7 +11011,7 @@ dependencies = [ "bytes", "camino", "ciborium", - "clap 4.5.1", + "clap", "crossterm", "omicron-workspace-hack", "reedline", @@ -10976,13 +11030,13 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "base64 0.22.0", + "base64 0.22.1", "bootstrap-agent-client", "buf-list", "bytes", "camino", "camino-tempfile", - "clap 4.5.1", + "clap", "debug-ignore", "display-error-chain", "dpd-client", @@ -11006,7 +11060,6 @@ dependencies = [ "installinator-artifactd", "installinator-common", "internal-dns", - "ipnetwork", "itertools 0.12.1", "maplit", "omicron-certificates", @@ -11015,8 +11068,10 @@ dependencies = [ "omicron-passwords", "omicron-test-utils", "omicron-workspace-hack", + "once_cell", "openapi-lint", "openapiv3", + "oxnet", "rand 0.8.5", "reqwest", "schemars", @@ -11032,14 +11087,14 @@ dependencies = [ "tokio", "tokio-stream", "tokio-util", - "toml 0.8.10", + "toml 0.8.13", "tough", "trust-dns-resolver", "tufaceous", "tufaceous-lib", "update-common", "update-engine", - "uuid 1.7.0", + "uuid", "wicket", "wicket-common", "wicketd-client", @@ -11051,7 +11106,7 @@ version = "0.1.0" dependencies = [ "chrono", "installinator-common", - "ipnetwork", + "omicron-common", "omicron-workspace-hack", "progenitor", "regress", @@ -11059,17 +11114,18 @@ dependencies = [ "schemars", "serde", "serde_json", + "sled-hardware-types", "slog", "update-engine", - "uuid 1.7.0", + "uuid", "wicket-common", ] [[package]] name = "widestring" -version = "1.0.2" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "653f141f39ec16bba3c5abe400a0c60da7468261cc2cbf36805022876bc721a8" +checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" [[package]] name = "winapi" @@ -11089,11 +11145,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.5" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b" dependencies = [ - "winapi", + "windows-sys 0.52.0", ] [[package]] @@ -11104,11 +11160,21 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows" -version = "0.48.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" +checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" dependencies = [ - "windows-targets 0.48.5", + "windows-core", + "windows-targets 0.52.5", +] + +[[package]] +name = "windows-core" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +dependencies = [ + "windows-targets 0.52.5", ] [[package]] @@ -11126,7 +11192,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.5", ] [[package]] @@ -11146,17 +11212,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" dependencies = [ - "windows_aarch64_gnullvm 0.52.0", - "windows_aarch64_msvc 0.52.0", - "windows_i686_gnu 0.52.0", - "windows_i686_msvc 0.52.0", - "windows_x86_64_gnu 0.52.0", - "windows_x86_64_gnullvm 0.52.0", - "windows_x86_64_msvc 0.52.0", + "windows_aarch64_gnullvm 0.52.5", + "windows_aarch64_msvc 0.52.5", + "windows_i686_gnu 0.52.5", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.5", + "windows_x86_64_gnu 0.52.5", + "windows_x86_64_gnullvm 0.52.5", + "windows_x86_64_msvc 0.52.5", ] [[package]] @@ -11167,9 +11234,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" +checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" [[package]] name = "windows_aarch64_msvc" @@ -11179,9 +11246,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" +checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" [[package]] name = "windows_i686_gnu" @@ -11191,9 +11258,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.0" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" +checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" [[package]] name = "windows_i686_msvc" @@ -11203,9 +11276,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" +checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" [[package]] name = "windows_x86_64_gnu" @@ -11215,9 +11288,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" +checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" [[package]] name = "windows_x86_64_gnullvm" @@ -11227,9 +11300,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" +checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" [[package]] name = "windows_x86_64_msvc" @@ -11239,24 +11312,24 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" +checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" [[package]] name = "winnow" -version = "0.5.15" +version = "0.5.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c2e3184b9c4e92ad5167ca73039d0c42476302ab603e2fec4487511f38ccefc" +checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" dependencies = [ "memchr", ] [[package]] name = "winnow" -version = "0.6.1" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d90f4e0f530c4c69f62b80d839e9ef3855edc9cba471a160c4d692deed62b401" +checksum = "c3c52e9c97a68071b23e836c9380edae937f17b9c4667bd021973efc689f618d" dependencies = [ "memchr", ] @@ -11282,22 +11355,25 @@ dependencies = [ [[package]] name = "x509-cert" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25eefca1d99701da3a57feb07e5079fc62abba059fc139e98c13bbb250f3ef29" +checksum = "1301e935010a701ae5f8655edc0ad17c44bad3ac5ce8c39185f75453b720ae94" dependencies = [ "const-oid", "der", "spki", + "tls_codec", ] [[package]] name = "xattr" -version = "1.0.1" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4686009f71ff3e5c4dbcf1a282d0a44db3f021ba69350cd42086b3e5f1c6985" +checksum = "8da84f1a25939b27f6820d92aed108f83ff920fdf11a7b19366c27c4cda81d4f" dependencies = [ "libc", + "linux-raw-sys", + "rustix", ] [[package]] @@ -11308,11 +11384,12 @@ dependencies = [ "camino", "cargo_metadata", "cargo_toml", - "clap 4.5.1", + "clap", "fs-err", + "macaddr", "serde", "swrite", - "toml 0.8.10", + "toml 0.8.13", ] [[package]] @@ -11334,9 +11411,9 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.3.0" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6580539ad917b7c026220c4b3f2c08d52ce54d6ce0dc491e66002e35388fab46" +checksum = "da091bab2bd35db397c46f5b81748b56f28f8fda837087fab9b6b07b6d66e3f1" dependencies = [ "byteorder", "zerocopy-derive 0.2.0", @@ -11344,22 +11421,22 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.6.4" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20707b61725734c595e840fb3704378a0cd2b9c74cc9e6e20724838fc6a1e2f9" +checksum = "854e949ac82d619ee9a14c66a1b674ac730422372ccb759ce0c39cabcf2bf8e6" dependencies = [ "byteorder", - "zerocopy-derive 0.6.4", + "zerocopy-derive 0.6.6", ] [[package]] name = "zerocopy" -version = "0.7.32" +version = "0.7.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087" dependencies = [ "byteorder", - "zerocopy-derive 0.7.32", + "zerocopy-derive 0.7.34", ] [[package]] @@ -11375,24 +11452,24 @@ dependencies = [ [[package]] name = "zerocopy-derive" -version = "0.6.4" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56097d5b91d711293a42be9289403896b68654625021732067eac7a4ca388a1f" +checksum = "125139de3f6b9d625c39e2efdd73d41bdac468ccd556556440e322be0e1bbd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] name = "zerocopy-derive" -version = "0.7.32" +version = "0.7.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] @@ -11412,7 +11489,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.64", ] [[package]] @@ -11451,17 +11528,18 @@ dependencies = [ ] [[package]] -name = "zone-network-setup" +name = "zone-setup" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.1", + "clap", "dropshot", "illumos-utils", "omicron-common", "omicron-workspace-hack", "slog", "tokio", + "uzers", "zone 0.3.0", ] diff --git a/Cargo.toml b/Cargo.toml index 425f3aeab2..a350f59f0a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,7 +2,6 @@ members = [ "api_identity", "bootstore", - "caboose-util", "certificates", "clients/bootstrap-agent-client", "clients/ddm-admin-client", @@ -21,6 +20,7 @@ members = [ "dev-tools/omicron-dev", "dev-tools/oxlog", "dev-tools/reconfigurator-cli", + "dev-tools/releng", "dev-tools/xtask", "dns-server", "end-to-end-tests", @@ -44,6 +44,7 @@ members = [ "nexus/defaults", "nexus/inventory", "nexus/macros-common", + "nexus/metrics-producer-gc", "nexus/networking", "nexus/reconfigurator/execution", "nexus/reconfigurator/planning", @@ -69,6 +70,7 @@ members = [ "test-utils", "tufaceous-lib", "tufaceous", + "typed-rng", "update-common", "update-engine", "uuid-kinds", @@ -77,12 +79,11 @@ members = [ "wicket", "wicketd", "workspace-hack", - "zone-network-setup", + "zone-setup", ] default-members = [ "bootstore", - "caboose-util", "certificates", "clients/bootstrap-agent-client", "clients/ddm-admin-client", @@ -101,6 +102,7 @@ default-members = [ "dev-tools/omicron-dev", "dev-tools/oxlog", "dev-tools/reconfigurator-cli", + "dev-tools/releng", # Do not include xtask in the list of default members, because this causes # hakari to not work as well and build times to be longer. # See omicron#4392. @@ -120,6 +122,7 @@ default-members = [ "nexus-config", "nexus/authz-macros", "nexus/macros-common", + "nexus/metrics-producer-gc", "nexus/networking", "nexus/db-macros", "nexus/db-model", @@ -147,6 +150,7 @@ default-members = [ "test-utils", "tufaceous-lib", "tufaceous", + "typed-rng", "update-common", "update-engine", "uuid-kinds", @@ -154,23 +158,65 @@ default-members = [ "wicket-dbg", "wicket", "wicketd", - "zone-network-setup", + "zone-setup", ] resolver = "2" +# +# Tree-wide lint configuration. +# https://doc.rust-lang.org/stable/cargo/reference/manifest.html#the-lints-section +# +# For a list of Clippy lints, see +# https://rust-lang.github.io/rust-clippy/master. +# +[workspace.lints.clippy] +# Clippy's style nits are useful, but not worth keeping in CI. +style = { level = "allow", priority = -1 } +# But continue to warn on anything in the "disallowed_" namespace. +disallowed_macros = "warn" +disallowed_methods = "warn" +disallowed_names = "warn" +disallowed_script_idents = "warn" +disallowed_types = "warn" +# Warn on some more style lints that are relatively stable and make sense. +iter_cloned_collect = "warn" +iter_next_slice = "warn" +iter_nth = "warn" +iter_nth_zero = "warn" +iter_skip_next = "warn" +len_zero = "warn" +redundant_field_names = "warn" +# `declare_interior_mutable_const` is classified as a style lint, but it can +# identify real bugs (e.g., declarying a `const Atomic` and using it like +# a `static Atomic`). However, it is also subject to false positives (e.g., +# idiomatically declaring a static array of atomics uses `const Atomic`). We +# warn on this to catch the former, and expect any uses of the latter to allow +# this locally. +# +# Note: any const value with a type containing a `bytes::Bytes` hits this lint, +# and you should `#![allow]` it for now. This is most likely to be seen with +# `http::header::{HeaderName, HeaderValue}`. This is a Clippy bug which will be +# fixed in the Rust 1.80 toolchain (rust-lang/rust-clippy#12691). +declare_interior_mutable_const = "warn" +# Also warn on casts, preferring explicit conversions instead. +# +# We'd like to warn on lossy casts in the future, but lossless casts are the +# easiest ones to convert over. +cast_lossless = "warn" + [workspace.dependencies] anyhow = "1.0" -anstyle = "1.0.6" +anstyle = "1.0.7" api_identity = { path = "api_identity" } approx = "0.5.1" assert_matches = "1.5.0" assert_cmd = "2.0.14" async-bb8-diesel = { git = "https://github.com/oxidecomputer/async-bb8-diesel", rev = "ed7ab5ef0513ba303d33efd41d3e9e381169d59b" } -async-trait = "0.1.77" +async-trait = "0.1.80" atomicwrites = "0.4.3" authz-macros = { path = "nexus/authz-macros" } backoff = { version = "0.4.0", features = [ "tokio" ] } -base64 = "0.22.0" +base64 = "0.22.1" bb8 = "0.8.3" bcs = "0.1.6" bincode = "1.3.3" @@ -178,10 +224,11 @@ bootstore = { path = "bootstore" } bootstrap-agent-client = { path = "clients/bootstrap-agent-client" } buf-list = { version = "1.0.3", features = ["tokio1"] } byteorder = "1.5.0" -bytes = "1.5.0" +bytes = "1.6.0" camino = { version = "1.1", features = ["serde1"] } camino-tempfile = "1.1.1" cancel-safe-futures = "0.1.5" +cargo_metadata = "0.18.1" chacha20poly1305 = "0.10.1" ciborium = "0.2.2" cfg-if = "1.0" @@ -193,30 +240,30 @@ cookie = "0.18" criterion = { version = "0.5.1", features = [ "async_tokio" ] } crossbeam = "0.8" crossterm = { version = "0.27.0", features = ["event-stream"] } -crucible-agent-client = { git = "https://github.com/oxidecomputer/crucible", rev = "16f16478f4af1502b25ddcd79d307b3f116f13f6" } -crucible-pantry-client = { git = "https://github.com/oxidecomputer/crucible", rev = "16f16478f4af1502b25ddcd79d307b3f116f13f6" } -crucible-smf = { git = "https://github.com/oxidecomputer/crucible", rev = "16f16478f4af1502b25ddcd79d307b3f116f13f6" } +crucible-agent-client = { git = "https://github.com/oxidecomputer/crucible", rev = "8c6d485110ecfae5409575246b986a145c386dc4" } +crucible-pantry-client = { git = "https://github.com/oxidecomputer/crucible", rev = "8c6d485110ecfae5409575246b986a145c386dc4" } +crucible-smf = { git = "https://github.com/oxidecomputer/crucible", rev = "8c6d485110ecfae5409575246b986a145c386dc4" } csv = "1.3.0" curve25519-dalek = "4" -datatest-stable = "0.2.3" +datatest-stable = "0.2.9" display-error-chain = "0.2.0" omicron-ddm-admin-client = { path = "clients/ddm-admin-client" } db-macros = { path = "nexus/db-macros" } debug-ignore = "1.0.5" derive_more = "0.99.17" derive-where = "1.2.7" -diesel = { version = "2.1.4", features = ["postgres", "r2d2", "chrono", "serde_json", "network-address", "uuid"] } +diesel = { version = "2.1.6", features = ["postgres", "r2d2", "chrono", "serde_json", "network-address", "uuid"] } diesel-dtrace = { git = "https://github.com/oxidecomputer/diesel-dtrace", branch = "main" } dns-server = { path = "dns-server" } dns-service-client = { path = "clients/dns-service-client" } dpd-client = { path = "clients/dpd-client" } dropshot = { git = "https://github.com/oxidecomputer/dropshot", branch = "main", features = [ "usdt-probes" ] } dyn-clone = "1.0.17" -either = "1.10.0" +either = "1.11.0" expectorate = "1.1.0" fatfs = "0.3.6" filetime = "0.2.23" -flate2 = "1.0.28" +flate2 = "1.0.30" flume = "0.11.0" foreign-types = "0.3.2" fs-err = "2.11.0" @@ -229,7 +276,7 @@ gethostname = "0.4.3" glob = "0.3.1" guppy = "0.17.5" headers = "0.3.9" -heck = "0.4" +heck = "0.5" hex = "0.4.3" hex-literal = "0.4.1" highway = "1.1.0" @@ -242,7 +289,7 @@ hyper = "0.14" hyper-rustls = "0.26.0" hyper-staticfile = "0.9.5" illumos-utils = { path = "illumos-utils" } -indexmap = "2.2.5" +indexmap = "2.2.6" indicatif = { version = "0.17.8", features = ["rayon"] } installinator = { path = "installinator" } installinator-artifactd = { path = "installinator-artifactd" } @@ -253,7 +300,6 @@ ipcc = { path = "ipcc" } ipnet = "2.9" itertools = "0.12.1" internet-checksum = "0.2" -ipcc-key-value = { path = "ipcc-key-value" } ipnetwork = { version = "0.20", features = ["schemars"] } ispf = { git = "https://github.com/oxidecomputer/ispf" } key-manager = { path = "key-manager" } @@ -264,11 +310,10 @@ libnvme = { git = "https://github.com/oxidecomputer/libnvme", rev = "6fffcc81d2c linear-map = "1.2.0" macaddr = { version = "1.0.1", features = ["serde_std"] } maplit = "1.0.2" -mime_guess = "2.0.4" mockall = "0.12" newtype_derive = "0.1.6" -mg-admin-client = { git = "https://github.com/oxidecomputer/maghemite", rev = "de065a84831e66c829603d9a098e237e8f5faaa1" } -ddm-admin-client = { git = "https://github.com/oxidecomputer/maghemite", rev = "de065a84831e66c829603d9a098e237e8f5faaa1" } +mg-admin-client = { git = "https://github.com/oxidecomputer/maghemite", rev = "025389ff39d594bf2b815377e2c1dc4dd23b1f96" } +ddm-admin-client = { git = "https://github.com/oxidecomputer/maghemite", rev = "025389ff39d594bf2b815377e2c1dc4dd23b1f96" } multimap = "0.10.0" nexus-client = { path = "clients/nexus-client" } nexus-config = { path = "nexus-config" } @@ -277,6 +322,7 @@ nexus-db-queries = { path = "nexus/db-queries" } nexus-defaults = { path = "nexus/defaults" } nexus-inventory = { path = "nexus/inventory" } nexus-macros-common = { path = "nexus/macros-common" } +nexus-metrics-producer-gc = { path = "nexus/metrics-producer-gc" } nexus-networking = { path = "nexus/networking" } nexus-reconfigurator-execution = { path = "nexus/reconfigurator/execution" } nexus-reconfigurator-planning = { path = "nexus/reconfigurator/planning" } @@ -285,30 +331,31 @@ omicron-certificates = { path = "certificates" } omicron-passwords = { path = "passwords" } omicron-workspace-hack = "0.1.0" oxlog = { path = "dev-tools/oxlog" } +oxnet = { git = "https://github.com/oxidecomputer/oxnet", branch = "main" } nexus-test-interface = { path = "nexus/test-interface" } nexus-test-utils-macros = { path = "nexus/test-utils-macros" } nexus-test-utils = { path = "nexus/test-utils" } nexus-types = { path = "nexus/types" } num-integer = "0.1.46" -num = { version = "0.4.1", default-features = false, features = [ "libm" ] } +num = { version = "0.4.3", default-features = false, features = [ "libm" ] } omicron-common = { path = "common" } omicron-gateway = { path = "gateway" } omicron-nexus = { path = "nexus" } +omicron-omdb = { path = "dev-tools/omdb" } omicron-package = { path = "package" } omicron-rpaths = { path = "rpaths" } omicron-sled-agent = { path = "sled-agent" } omicron-test-utils = { path = "test-utils" } omicron-zone-package = "0.11.0" oxide-client = { path = "clients/oxide-client" } -oxide-vpc = { git = "https://github.com/oxidecomputer/opte", rev = "7ee353a470ea59529ee1b34729681da887aa88ce", features = [ "api", "std" ] } +oxide-vpc = { git = "https://github.com/oxidecomputer/opte", rev = "194a8d1d6443f78d59702a25849607dba33db732", features = [ "api", "std" ] } once_cell = "1.19.0" openapi-lint = { git = "https://github.com/oxidecomputer/openapi-lint", branch = "main" } openapiv3 = "2.0.0" # must match samael's crate! openssl = "0.10" openssl-sys = "0.9" -openssl-probe = "0.1.5" -opte-ioctl = { git = "https://github.com/oxidecomputer/opte", rev = "7ee353a470ea59529ee1b34729681da887aa88ce" } +opte-ioctl = { git = "https://github.com/oxidecomputer/opte", rev = "194a8d1d6443f78d59702a25849607dba33db732" } oso = "0.27" owo-colors = "4.0.0" oximeter = { path = "oximeter/oximeter" } @@ -322,57 +369,59 @@ p256 = "0.13" parse-display = "0.9.0" partial-io = { version = "0.5.4", features = ["proptest1", "tokio1"] } parse-size = "1.0.0" -paste = "1.0.14" +paste = "1.0.15" percent-encoding = "2.3.1" +peg = "0.8.3" pem = "3.0" -petgraph = "0.6.4" +petgraph = "0.6.5" postgres-protocol = "0.6.6" predicates = "3.1.0" pretty_assertions = "1.4.0" pretty-hex = "0.4.1" -prettyplease = { version = "0.2.16", features = ["verbatim"] } +prettyplease = { version = "0.2.20", features = ["verbatim"] } proc-macro2 = "1.0" progenitor = { git = "https://github.com/oxidecomputer/progenitor", branch = "main" } progenitor-client = { git = "https://github.com/oxidecomputer/progenitor", branch = "main" } -bhyve_api = { git = "https://github.com/oxidecomputer/propolis", rev = "fdf0585c6a227a7cfbee4a61a36938c3d77e4712" } -propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "fdf0585c6a227a7cfbee4a61a36938c3d77e4712" } -propolis-mock-server = { git = "https://github.com/oxidecomputer/propolis", rev = "fdf0585c6a227a7cfbee4a61a36938c3d77e4712" } +bhyve_api = { git = "https://github.com/oxidecomputer/propolis", rev = "6d7ed9a033babc054db9eff5b59dee978d2b0d76" } +propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "6d7ed9a033babc054db9eff5b59dee978d2b0d76" } +propolis-mock-server = { git = "https://github.com/oxidecomputer/propolis", rev = "6d7ed9a033babc054db9eff5b59dee978d2b0d76" } proptest = "1.4.0" quote = "1.0" rand = "0.8.5" +rand_core = "0.6.4" rand_seeder = "0.2.3" -ratatui = "0.26.1" -rayon = "1.9" +ratatui = "0.26.2" +rayon = "1.10" rcgen = "0.12.1" -reedline = "0.30.0" +reedline = "0.31.0" ref-cast = "1.0" -regex = "1.10.3" -regress = "0.9.0" +regex = "1.10.4" +regress = "0.9.1" reqwest = { version = "0.11", default-features = false } ring = "0.17.8" rpassword = "7.3.1" -rstest = "0.18.2" +rstest = "0.19.0" rustfmt-wrapper = "0.2" rustls = "0.22.2" -rustls-pemfile = "2.1.1" -rustyline = "13.0.0" -samael = { version = "0.0.14", features = ["xmlsec"] } +rustls-pemfile = "2.1.2" +rustyline = "14.0.0" +samael = { version = "0.0.15", features = ["xmlsec"] } schemars = "0.8.16" secrecy = "0.8.0" -semver = { version = "1.0.22", features = ["std", "serde"] } +semver = { version = "1.0.23", features = ["std", "serde"] } serde = { version = "1.0", default-features = false, features = [ "derive", "rc" ] } -serde_derive = "1.0" serde_human_bytes = { git = "http://github.com/oxidecomputer/serde_human_bytes", branch = "main" } -serde_json = "1.0.114" +serde_json = "1.0.117" serde_path_to_error = "0.1.16" serde_tokenstream = "0.2" serde_urlencoded = "0.7.1" -serde_with = "3.6.1" +serde_with = "3.7.0" sha2 = "0.10.8" sha3 = "0.10.8" shell-words = "1.1.0" signal-hook = "0.3" signal-hook-tokio = { version = "0.3", features = [ "futures-v0_3" ] } +sigpipe = "0.1.3" similar-asserts = "1.5.0" sled = "0.34" sled-agent-client = { path = "clients/sled-agent-client" } @@ -385,15 +434,15 @@ slog-bunyan = "2.5" slog-dtrace = "0.3" slog-envlogger = "2.2" slog-error-chain = { git = "https://github.com/oxidecomputer/slog-error-chain", branch = "main", features = ["derive"] } -slog-term = "2.9" +slog-term = "2.9.1" smf = "0.2" -snafu = "0.7" socket2 = { version = "0.5", features = ["all"] } sp-sim = { path = "sp-sim" } sprockets-common = { git = "http://github.com/oxidecomputer/sprockets", rev = "77df31efa5619d0767ffc837ef7468101608aee9" } sprockets-host = { git = "http://github.com/oxidecomputer/sprockets", rev = "77df31efa5619d0767ffc837ef7468101608aee9" } sprockets-rot = { git = "http://github.com/oxidecomputer/sprockets", rev = "77df31efa5619d0767ffc837ef7468101608aee9" } -sqlparser = { version = "0.43.1", features = [ "visitor" ] } +sqlformat = "0.2.3" +sqlparser = { version = "0.45.0", features = [ "visitor" ] } static_assertions = "1.1.0" # Please do not change the Steno version to a Git dependency. It makes it # harder than expected to make breaking changes (even if you specify a specific @@ -407,7 +456,6 @@ libsw = { version = "3.3.1", features = ["tokio"] } syn = { version = "2.0" } tabled = "0.15.0" tar = "0.4" -tempdir = "0.3" tempfile = "3.10" term = "0.7" termios = "0.3" @@ -415,41 +463,44 @@ textwrap = "0.16.1" test-strategy = "0.3.1" thiserror = "1.0" tofino = { git = "http://github.com/oxidecomputer/tofino", branch = "main" } -tokio = "1.36.0" +tokio = "1.37.0" tokio-postgres = { version = "0.7", features = [ "with-chrono-0_4", "with-uuid-1" ] } -tokio-stream = "0.1.14" +tokio-stream = "0.1.15" tokio-tungstenite = "0.20" tokio-util = { version = "0.7.10", features = ["io", "io-util"] } -toml = "0.8.10" -toml_edit = "0.22.6" -tough = { version = "0.16.0", features = [ "http" ] } +toml = "0.8.12" +toml_edit = "0.22.12" +tough = { version = "0.17.1", features = [ "http" ] } trust-dns-client = "0.22" trust-dns-proto = "0.22" trust-dns-resolver = "0.22" trust-dns-server = "0.22" -trybuild = "1.0.89" +trybuild = "1.0.91" tufaceous = { path = "tufaceous" } tufaceous-lib = { path = "tufaceous-lib" } -tui-tree-widget = "0.17.0" +tui-tree-widget = "0.19.0" +typed-rng = { path = "typed-rng" } unicode-width = "0.1.11" update-common = { path = "update-common" } update-engine = { path = "update-engine" } usdt = "0.5.0" -uuid = { version = "1.7.0", features = ["serde", "v4"] } -walkdir = "2.4" +uuid = { version = "1.8.0", features = ["serde", "v4"] } +uzers = "0.11" +walkdir = "2.5" +whoami = "1.5" wicket = { path = "wicket" } wicket-common = { path = "wicket-common" } wicketd-client = { path = "clients/wicketd-client" } zeroize = { version = "1.7.0", features = ["zeroize_derive", "std"] } zip = { version = "0.6.6", default-features = false, features = ["deflate","bzip2"] } -zone = { version = "0.3", default-features = false, features = ["async", "sync"] } +zone = { version = "0.3", default-features = false, features = ["async"] } # newtype-uuid is set to default-features = false because we don't want to # depend on std in omicron-uuid-kinds (in case a no-std library wants to access # the kinds). However, uses of omicron-uuid-kinds _within omicron_ will have # std and the other features enabled because they'll refer to it via # omicron-uuid-kinds.workspace = true. -newtype-uuid = { version = "1.0.1", default-features = false } +newtype-uuid = { version = "1.1.0", default-features = false } omicron-uuid-kinds = { path = "uuid-kinds", features = ["serde", "schemars08", "uuid-v4"] } # NOTE: The test profile inherits from the dev profile, so settings under @@ -652,11 +703,6 @@ branch = "oxide/omicron" [patch.crates-io.omicron-workspace-hack] path = "workspace-hack" -# Pulls in https://github.com/njaremko/samael/pull/41 -[patch.crates-io.samael] -git = "https://github.com/oxidecomputer/samael" -branch = "oxide/omicron" - # Several crates such as crucible and propolis have have a Git dependency on # this repo. Omicron itself depends on these crates, which can lead to two # copies of these crates in the dependency graph. (As a Git dependency, and as diff --git a/README.adoc b/README.adoc index 0e09fc39df..9db11f0337 100644 --- a/README.adoc +++ b/README.adoc @@ -47,7 +47,7 @@ To build and run the non-simulated version of Omicron, see: xref:docs/how-to-run The supported way to run tests is via https://nexte.st/[cargo-nextest]. -NOTE: `cargo test` may work, but that can't be guaranteed as `cargo test` isn't run in CI. +NOTE: `cargo test` will not work for many of our tests, since they rely on nextest-specific features. If you don't already have nextest installed, get started by https://nexte.st/book/pre-built-binaries[downloading a pre-built binary] or installing nextest via your package manager. Nextest has pre-built binaries for Linux, macOS and illumos. diff --git a/api_identity/Cargo.toml b/api_identity/Cargo.toml index 547defa7c5..6d53ee12c9 100644 --- a/api_identity/Cargo.toml +++ b/api_identity/Cargo.toml @@ -10,6 +10,9 @@ license = "MPL-2.0" [lib] proc-macro = true +[lints] +workspace = true + [dependencies] proc-macro2.workspace = true quote.workspace = true diff --git a/bootstore/Cargo.toml b/bootstore/Cargo.toml index 5e9bcd1ef4..3dc6215917 100644 --- a/bootstore/Cargo.toml +++ b/bootstore/Cargo.toml @@ -8,6 +8,9 @@ license = "MPL-2.0" [build-dependencies] omicron-rpaths.workspace = true +[lints] +workspace = true + [dependencies] bytes.workspace = true camino.workspace = true @@ -27,7 +30,7 @@ slog.workspace = true thiserror.workspace = true tokio.workspace = true uuid.workspace = true -vsss-rs = { version = "3.3.4", features = ["std", "curve25519"] } +vsss-rs = { version = "=3.3.4", features = ["std", "curve25519"] } zeroize.workspace = true # See omicron-rpaths for more about the "pq-sys" dependency. diff --git a/bootstore/src/schemes/v0/peer.rs b/bootstore/src/schemes/v0/peer.rs index efb916a61f..1d676953ac 100644 --- a/bootstore/src/schemes/v0/peer.rs +++ b/bootstore/src/schemes/v0/peer.rs @@ -52,8 +52,8 @@ pub enum NodeRequestError { Send, #[error( - "Network config update failed because it is out of date. Attempted - update generation: {attempted_update_generation}, current generation: + "Network config update failed because it is out of date. Attempted \ + update generation: {attempted_update_generation}, current generation: \ {current_generation}" )] StaleNetworkConfig { diff --git a/bootstore/src/schemes/v0/peer_networking.rs b/bootstore/src/schemes/v0/peer_networking.rs index 13afd27fa2..d5e3e3fa71 100644 --- a/bootstore/src/schemes/v0/peer_networking.rs +++ b/bootstore/src/schemes/v0/peer_networking.rs @@ -599,9 +599,11 @@ fn read_frame_size(buf: [u8; FRAME_HEADER_SIZE]) -> usize { #[derive(Debug, From)] enum HandshakeError { - Serialization(ciborium::ser::Error), - Deserialization(ciborium::de::Error), - Io(tokio::io::Error), + // Rust 1.77 warns on tuple variants not being used, but in reality these are + // used for their Debug impl. + Serialization(#[allow(dead_code)] ciborium::ser::Error), + Deserialization(#[allow(dead_code)] ciborium::de::Error), + Io(#[allow(dead_code)] tokio::io::Error), UnsupportedScheme, UnsupportedVersion, Timeout, diff --git a/caboose-util/Cargo.toml b/caboose-util/Cargo.toml deleted file mode 100644 index 91bf00741e..0000000000 --- a/caboose-util/Cargo.toml +++ /dev/null @@ -1,10 +0,0 @@ -[package] -name = "caboose-util" -version = "0.1.0" -edition = "2021" -license = "MPL-2.0" - -[dependencies] -anyhow.workspace = true -hubtools.workspace = true -omicron-workspace-hack.workspace = true diff --git a/caboose-util/src/main.rs b/caboose-util/src/main.rs deleted file mode 100644 index 36851cd36d..0000000000 --- a/caboose-util/src/main.rs +++ /dev/null @@ -1,32 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -// Copyright 2023 Oxide Computer Company - -use anyhow::{bail, Context, Result}; -use hubtools::{Caboose, RawHubrisArchive}; - -fn main() -> Result<()> { - let mut args = std::env::args().skip(1); - match args.next().context("subcommand required")?.as_str() { - "read-board" => { - let caboose = read_caboose(args.next())?; - println!("{}", std::str::from_utf8(caboose.board()?)?); - Ok(()) - } - "read-version" => { - let caboose = read_caboose(args.next())?; - println!("{}", std::str::from_utf8(caboose.version()?)?); - Ok(()) - } - unknown => bail!("unknown command {}", unknown), - } -} - -fn read_caboose(path: Option) -> Result { - let archive = RawHubrisArchive::load( - &path.context("path to hubris archive required")?, - )?; - Ok(archive.read_caboose()?) -} diff --git a/certificates/Cargo.toml b/certificates/Cargo.toml index 87b12fd167..51e4a2e421 100644 --- a/certificates/Cargo.toml +++ b/certificates/Cargo.toml @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] display-error-chain.workspace = true foreign-types.workspace = true diff --git a/certificates/src/lib.rs b/certificates/src/lib.rs index 442a9cfdd5..ee4ab4a6bd 100644 --- a/certificates/src/lib.rs +++ b/certificates/src/lib.rs @@ -412,7 +412,7 @@ mod tests { // Valid certs: either no key usage values, or valid ones. for ext_key_usage in &valid_ext_key_usage { let mut params = CertificateParams::new(vec![HOST.to_string()]); - params.extended_key_usages = ext_key_usage.clone(); + params.extended_key_usages.clone_from(ext_key_usage); assert!( validate_cert_with_params(params, &[HOST]).is_ok(), @@ -431,7 +431,7 @@ mod tests { for ext_key_usage in &invalid_ext_key_usage { let mut params = CertificateParams::new(vec![HOST.to_string()]); - params.extended_key_usages = ext_key_usage.clone(); + params.extended_key_usages.clone_from(ext_key_usage); assert!( matches!( diff --git a/clients/bootstrap-agent-client/Cargo.toml b/clients/bootstrap-agent-client/Cargo.toml index ea5d5358f9..0b1d2fab4b 100644 --- a/clients/bootstrap-agent-client/Cargo.toml +++ b/clients/bootstrap-agent-client/Cargo.toml @@ -4,15 +4,19 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] omicron-common.workspace = true progenitor.workspace = true -ipnetwork.workspace = true regress.workspace = true reqwest = { workspace = true, features = [ "json", "rustls-tls", "stream" ] } schemars.workspace = true serde.workspace = true +serde_json.workspace = true sled-hardware-types.workspace = true slog.workspace = true uuid.workspace = true omicron-workspace-hack.workspace = true +oxnet.workspace = true diff --git a/clients/bootstrap-agent-client/src/lib.rs b/clients/bootstrap-agent-client/src/lib.rs index 61ebd04e1b..b29f4e69f4 100644 --- a/clients/bootstrap-agent-client/src/lib.rs +++ b/clients/bootstrap-agent-client/src/lib.rs @@ -18,10 +18,12 @@ progenitor::generate_api!( slog::debug!(log, "client response"; "result" => ?result); }), derives = [schemars::JsonSchema], + crates = { + "oxnet" = "0.1.0", + }, replace = { - Ipv4Network = ipnetwork::Ipv4Network, - Ipv6Network = ipnetwork::Ipv6Network, - IpNetwork = ipnetwork::IpNetwork, + AllowedSourceIps = omicron_common::api::external::AllowedSourceIps, + ImportExportPolicy = omicron_common::api::external::ImportExportPolicy, } ); diff --git a/clients/ddm-admin-client/Cargo.toml b/clients/ddm-admin-client/Cargo.toml index 1b0fca2951..bd99492f30 100644 --- a/clients/ddm-admin-client/Cargo.toml +++ b/clients/ddm-admin-client/Cargo.toml @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] either.workspace = true progenitor-client.workspace = true diff --git a/clients/ddm-admin-client/src/lib.rs b/clients/ddm-admin-client/src/lib.rs index 5be2dd53bd..b926ee2971 100644 --- a/clients/ddm-admin-client/src/lib.rs +++ b/clients/ddm-admin-client/src/lib.rs @@ -82,7 +82,7 @@ impl Client { let me = self.clone(); tokio::spawn(async move { let prefix = - Ipv6Prefix { addr: address.net().network(), len: SLED_PREFIX }; + Ipv6Prefix { addr: address.net().prefix(), len: SLED_PREFIX }; retry_notify(retry_policy_internal_service_aggressive(), || async { info!( me.log, "Sending prefix to ddmd for advertisement"; diff --git a/clients/dns-service-client/Cargo.toml b/clients/dns-service-client/Cargo.toml index 27ffb66d88..cdaef701bd 100644 --- a/clients/dns-service-client/Cargo.toml +++ b/clients/dns-service-client/Cargo.toml @@ -4,9 +4,13 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] anyhow.workspace = true chrono.workspace = true +expectorate.workspace = true http.workspace = true progenitor.workspace = true reqwest = { workspace = true, features = ["json", "rustls-tls", "stream"] } diff --git a/clients/dns-service-client/src/diff.rs b/clients/dns-service-client/src/diff.rs index ce04319dff..2ae7036c86 100644 --- a/clients/dns-service-client/src/diff.rs +++ b/clients/dns-service-client/src/diff.rs @@ -2,173 +2,252 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -use crate::types::DnsConfigParams; +use crate::types::DnsConfigZone; use crate::types::DnsRecord; +use crate::types::Srv; use crate::DnsRecords; use anyhow::ensure; -use anyhow::Context; +use std::collections::BTreeSet; + +#[derive(Debug)] +enum NameDiff<'a> { + Added(&'a str, &'a [DnsRecord]), + Removed(&'a str, &'a [DnsRecord]), + Changed(&'a str, &'a [DnsRecord], &'a [DnsRecord]), + Unchanged(&'a str, &'a [DnsRecord]), +} /// Compare the DNS records contained in two sets of DNS configuration #[derive(Debug)] pub struct DnsDiff<'a> { left: &'a DnsRecords, right: &'a DnsRecords, + zone_name: &'a str, + all_names: BTreeSet<&'a String>, } impl<'a> DnsDiff<'a> { - /// Compare the DNS records contained in two sets of DNS configuration + /// Compare the DNS records contained in two DNS zones' configs /// - /// Both configurations are expected to contain exactly one zone and they - /// should have the same name. + /// Both zones are expected to have the same name. pub fn new( - left: &'a DnsConfigParams, - right: &'a DnsConfigParams, + left_zone: &'a DnsConfigZone, + right_zone: &'a DnsConfigZone, ) -> Result, anyhow::Error> { - let left_zone = left.sole_zone().context("left side of diff")?; - let right_zone = right.sole_zone().context("right side of diff")?; - ensure!( left_zone.zone_name == right_zone.zone_name, "cannot compare DNS configuration from zones with different names: \ {:?} vs. {:?}", left_zone.zone_name, right_zone.zone_name, ); - Ok(DnsDiff { left: &left_zone.records, right: &right_zone.records }) + let all_names = + left_zone.records.keys().chain(right_zone.records.keys()).collect(); + + Ok(DnsDiff { + left: &left_zone.records, + right: &right_zone.records, + zone_name: &left_zone.zone_name, + all_names, + }) + } + + fn iter_names(&self) -> impl Iterator> { + self.all_names.iter().map(|k| { + let name = k.as_str(); + let v1 = self.left.get(*k); + let v2 = self.right.get(*k); + match (v1, v2) { + (None, Some(v2)) => NameDiff::Added(name, v2.as_ref()), + (Some(v1), None) => NameDiff::Removed(name, v1.as_ref()), + (Some(v1), Some(v2)) => { + let mut v1_sorted = v1.clone(); + let mut v2_sorted = v2.clone(); + v1_sorted.sort(); + v2_sorted.sort(); + if v1_sorted == v2_sorted { + NameDiff::Unchanged(name, v1.as_ref()) + } else { + NameDiff::Changed(name, v1.as_ref(), v2.as_ref()) + } + } + (None, None) => unreachable!(), + } + }) } /// Iterate over the names that are present in the `right` config but /// absent in the `left` one (i.e., added between `left` and `right`) pub fn names_added(&self) -> impl Iterator { - self.right - .iter() - .filter(|(k, _)| !self.left.contains_key(*k)) - .map(|(k, v)| (k.as_ref(), v.as_ref())) + self.iter_names().filter_map(|nd| { + if let NameDiff::Added(k, v) = nd { + Some((k, v)) + } else { + None + } + }) } /// Iterate over the names that are present in the `left` config but /// absent in the `right` one (i.e., removed between `left` and `right`) pub fn names_removed(&self) -> impl Iterator { - self.left - .iter() - .filter(|(k, _)| !self.right.contains_key(*k)) - .map(|(k, v)| (k.as_ref(), v.as_ref())) + self.iter_names().filter_map(|nd| { + if let NameDiff::Removed(k, v) = nd { + Some((k, v)) + } else { + None + } + }) } /// Iterate over the names whose records changed between `left` and `right`. pub fn names_changed( &self, ) -> impl Iterator { - self.left.iter().filter_map(|(k, v1)| match self.right.get(k) { - Some(v2) => { - let mut v1_sorted = v1.clone(); - let mut v2_sorted = v2.clone(); - v1_sorted.sort(); - v2_sorted.sort(); - (v1_sorted != v2_sorted) - .then(|| (k.as_ref(), v1.as_ref(), v2.as_ref())) + self.iter_names().filter_map(|nd| { + if let NameDiff::Changed(k, v1, v2) = nd { + Some((k, v1, v2)) + } else { + None + } + }) + } + + /// Iterate over the names whose records were unchanged between `left` and + /// `right` + pub fn names_unchanged( + &self, + ) -> impl Iterator { + self.iter_names().filter_map(|nd| { + if let NameDiff::Unchanged(k, v) = nd { + Some((k, v)) + } else { + None } - _ => None, }) } /// Returns true iff there are no differences in the DNS names and records /// described by the given configurations pub fn is_empty(&self) -> bool { - self.names_added().next().is_none() - && self.names_removed().next().is_none() - && self.names_changed().next().is_none() + self.iter_names().all(|nd| matches!(nd, NameDiff::Unchanged(_, _))) + } +} + +impl<'a> std::fmt::Display for DnsDiff<'a> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let names_changed = !self.is_empty(); + let zone_name = &self.zone_name; + + if !names_changed { + writeln!(f, " DNS zone: {:?} (unchanged)", zone_name,)?; + } else { + writeln!(f, "* DNS zone: {:?}: ", zone_name)?; + } + + let print_records = |f: &mut std::fmt::Formatter<'_>, + prefix, + records: &[DnsRecord]| + -> std::fmt::Result { + for r in records.iter() { + writeln!( + f, + "{} {}", + prefix, + match r { + DnsRecord::A(addr) => format!("A {}", addr), + DnsRecord::Aaaa(addr) => format!("AAAA {}", addr), + DnsRecord::Srv(Srv { port, target, .. }) => { + format!("SRV port {:5} {}", port, target) + } + } + )?; + } + + Ok(()) + }; + + for name_diff in self.iter_names() { + match name_diff { + NameDiff::Added(name, records) => { + writeln!( + f, + "+ name: {:50} (records: {})", + name, + records.len() + )?; + print_records(f, "+", records)?; + } + NameDiff::Removed(name, records) => { + writeln!( + f, + "- name: {:50} (records: {})", + name, + records.len() + )?; + print_records(f, "-", records)?; + } + NameDiff::Unchanged(name, records) => { + writeln!( + f, + " name: {:50} (records: {})", + name, + records.len() + )?; + print_records(f, " ", records)?; + } + NameDiff::Changed(name, records1, records2) => { + writeln!( + f, + "* name: {:50} (records: {} -> {})", + name, + records1.len(), + records2.len(), + )?; + print_records(f, "-", records1)?; + print_records(f, "+", records2)?; + } + } + } + + Ok(()) } } #[cfg(test)] mod test { use super::DnsDiff; - use crate::types::DnsConfigParams; use crate::types::DnsConfigZone; use crate::types::DnsRecord; - use chrono::Utc; use std::collections::HashMap; use std::net::Ipv4Addr; const ZONE_NAME: &str = "dummy"; - fn example() -> DnsConfigParams { - DnsConfigParams { - generation: 4, - time_created: Utc::now(), - zones: vec![DnsConfigZone { - zone_name: ZONE_NAME.to_string(), - records: HashMap::from([ - ( - "ex1".to_string(), - vec![DnsRecord::A(Ipv4Addr::LOCALHOST)], - ), - ( - "ex2".to_string(), - vec![DnsRecord::A("192.168.1.3".parse().unwrap())], - ), - ]), - }], + fn example() -> DnsConfigZone { + DnsConfigZone { + zone_name: ZONE_NAME.to_string(), + records: HashMap::from([ + ("ex1".to_string(), vec![DnsRecord::A(Ipv4Addr::LOCALHOST)]), + ( + "ex2".to_string(), + vec![DnsRecord::A("192.168.1.3".parse().unwrap())], + ), + ]), } } #[test] fn diff_invalid() { - let example_empty = DnsConfigParams { - generation: 3, - time_created: Utc::now(), - zones: vec![], - }; - - // Configs must have at least one zone. - let error = DnsDiff::new(&example_empty, &example_empty) - .expect_err("unexpectedly succeeded comparing two empty configs"); - assert!( - format!("{:#}", error).contains("expected exactly one DNS zone") - ); - - let example = example(); - let error = DnsDiff::new(&example_empty, &example) - .expect_err("unexpectedly succeeded comparing an empty config"); - assert!( - format!("{:#}", error).contains("expected exactly one DNS zone") - ); - - // Configs must not have more than one zone. - let example_multiple = DnsConfigParams { - generation: 3, - time_created: Utc::now(), - zones: vec![ - DnsConfigZone { - zone_name: ZONE_NAME.to_string(), - records: HashMap::new(), - }, - DnsConfigZone { - zone_name: "two".to_string(), - records: HashMap::new(), - }, - ], - }; - let error = DnsDiff::new(&example_multiple, &example).expect_err( - "unexpectedly succeeded comparing config with multiple zones", - ); - assert!( - format!("{:#}", error).contains("expected exactly one DNS zone") - ); - // Cannot compare different zone names - let example_different_zone = DnsConfigParams { - generation: 3, - time_created: Utc::now(), - zones: vec![DnsConfigZone { - zone_name: format!("{}-other", ZONE_NAME), - records: HashMap::new(), - }], + let example_different_zone = DnsConfigZone { + zone_name: format!("{}-other", ZONE_NAME), + records: HashMap::new(), }; - let error = DnsDiff::new(&example_different_zone, &example).expect_err( - "unexpectedly succeeded comparing configs with \ + let error = DnsDiff::new(&example_different_zone, &example()) + .expect_err( + "unexpectedly succeeded comparing configs with \ different zone names", - ); + ); assert_eq!( format!("{:#}", error), "cannot compare DNS configuration from zones with different \ @@ -184,27 +263,27 @@ mod test { assert_eq!(diff.names_removed().count(), 0); assert_eq!(diff.names_added().count(), 0); assert_eq!(diff.names_changed().count(), 0); + expectorate::assert_contents( + "tests/output/diff_example_empty.out", + &diff.to_string(), + ); } #[test] fn diff_different() { let example = example(); - let example2 = DnsConfigParams { - generation: 4, - time_created: Utc::now(), - zones: vec![DnsConfigZone { - zone_name: ZONE_NAME.to_string(), - records: HashMap::from([ - ( - "ex2".to_string(), - vec![DnsRecord::A("192.168.1.4".parse().unwrap())], - ), - ( - "ex3".to_string(), - vec![DnsRecord::A(std::net::Ipv4Addr::LOCALHOST)], - ), - ]), - }], + let example2 = DnsConfigZone { + zone_name: ZONE_NAME.to_string(), + records: HashMap::from([ + ( + "ex2".to_string(), + vec![DnsRecord::A("192.168.1.4".parse().unwrap())], + ), + ( + "ex3".to_string(), + vec![DnsRecord::A(std::net::Ipv4Addr::LOCALHOST)], + ), + ]), }; let diff = DnsDiff::new(&example, &example2).unwrap(); @@ -231,5 +310,19 @@ mod test { changed[0].2, vec![DnsRecord::A("192.168.1.4".parse().unwrap())] ); + + expectorate::assert_contents( + "tests/output/diff_example_different.out", + &diff.to_string(), + ); + + // Diff'ing the reverse direction exercises different cases (e.g., what + // was added now appears as removed). Also, the generation number + // should really be different. + let diff = DnsDiff::new(&example2, &example).unwrap(); + expectorate::assert_contents( + "tests/output/diff_example_different_reversed.out", + &diff.to_string(), + ); } } diff --git a/clients/dns-service-client/tests/output/diff_example_different.out b/clients/dns-service-client/tests/output/diff_example_different.out new file mode 100644 index 0000000000..7f2f73fed6 --- /dev/null +++ b/clients/dns-service-client/tests/output/diff_example_different.out @@ -0,0 +1,8 @@ +* DNS zone: "dummy": +- name: ex1 (records: 1) +- A 127.0.0.1 +* name: ex2 (records: 1 -> 1) +- A 192.168.1.3 ++ A 192.168.1.4 ++ name: ex3 (records: 1) ++ A 127.0.0.1 diff --git a/clients/dns-service-client/tests/output/diff_example_different_reversed.out b/clients/dns-service-client/tests/output/diff_example_different_reversed.out new file mode 100644 index 0000000000..ba52d4720c --- /dev/null +++ b/clients/dns-service-client/tests/output/diff_example_different_reversed.out @@ -0,0 +1,8 @@ +* DNS zone: "dummy": ++ name: ex1 (records: 1) ++ A 127.0.0.1 +* name: ex2 (records: 1 -> 1) +- A 192.168.1.4 ++ A 192.168.1.3 +- name: ex3 (records: 1) +- A 127.0.0.1 diff --git a/clients/dns-service-client/tests/output/diff_example_empty.out b/clients/dns-service-client/tests/output/diff_example_empty.out new file mode 100644 index 0000000000..1e3ba76bc9 --- /dev/null +++ b/clients/dns-service-client/tests/output/diff_example_empty.out @@ -0,0 +1,5 @@ + DNS zone: "dummy" (unchanged) + name: ex1 (records: 1) + A 127.0.0.1 + name: ex2 (records: 1) + A 192.168.1.3 diff --git a/clients/dpd-client/Cargo.toml b/clients/dpd-client/Cargo.toml index 0239c6d9b0..477435d8bb 100644 --- a/clients/dpd-client/Cargo.toml +++ b/clients/dpd-client/Cargo.toml @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] futures.workspace = true progenitor-client.workspace = true diff --git a/clients/dpd-client/src/lib.rs b/clients/dpd-client/src/lib.rs index a898c31781..556a8493d7 100644 --- a/clients/dpd-client/src/lib.rs +++ b/clients/dpd-client/src/lib.rs @@ -479,7 +479,7 @@ impl From for Ipv4Cidr { impl From for u64 { fn from(x: Ipv4Cidr) -> Self { let prefix: u32 = x.prefix.into(); - ((prefix as u64) << 32) | (x.prefix_len as u64) + (u64::from(prefix) << 32) | u64::from(x.prefix_len) } } @@ -762,12 +762,12 @@ impl fmt::Debug for MacAddr { impl From for u64 { fn from(mac: MacAddr) -> u64 { - ((mac.a[0] as u64) << 40) - | ((mac.a[1] as u64) << 32) - | ((mac.a[2] as u64) << 24) - | ((mac.a[3] as u64) << 16) - | ((mac.a[4] as u64) << 8) - | (mac.a[5] as u64) + (u64::from(mac.a[0]) << 40) + | (u64::from(mac.a[1]) << 32) + | (u64::from(mac.a[2]) << 24) + | (u64::from(mac.a[3]) << 16) + | (u64::from(mac.a[4]) << 8) + | u64::from(mac.a[5]) } } diff --git a/clients/gateway-client/Cargo.toml b/clients/gateway-client/Cargo.toml index 9e1118cf59..96f6484122 100644 --- a/clients/gateway-client/Cargo.toml +++ b/clients/gateway-client/Cargo.toml @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] base64.workspace = true chrono.workspace = true diff --git a/clients/gateway-client/src/lib.rs b/clients/gateway-client/src/lib.rs index 7dbc50eea2..6e932577a7 100644 --- a/clients/gateway-client/src/lib.rs +++ b/clients/gateway-client/src/lib.rs @@ -50,15 +50,15 @@ progenitor::generate_api!( }), derives = [schemars::JsonSchema], patch = { - SpIdentifier = { derives = [Copy, PartialEq, Hash, Eq, Serialize, Deserialize] }, - SpIgnition = { derives = [PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize] }, - SpIgnitionSystemType = { derives = [Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize] }, - SpState = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize] }, - RotState = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize] }, - RotImageDetails = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize] }, - RotSlot = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize] }, - ImageVersion = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize] }, - HostPhase2RecoveryImageId = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize] }, + HostPhase2RecoveryImageId = { derives = [PartialEq, Eq, PartialOrd, Ord] }, + ImageVersion = { derives = [PartialEq, Eq, PartialOrd, Ord] }, + RotImageDetails = { derives = [PartialEq, Eq, PartialOrd, Ord] }, + RotSlot = { derives = [PartialEq, Eq, PartialOrd, Ord] }, + RotState = { derives = [PartialEq, Eq, PartialOrd, Ord] }, + SpIdentifier = { derives = [Copy, PartialEq, Hash, Eq] }, + SpIgnition = { derives = [PartialEq, Eq, PartialOrd, Ord] }, + SpIgnitionSystemType = { derives = [Copy, PartialEq, Eq, PartialOrd, Ord] }, + SpState = { derives = [PartialEq, Eq, PartialOrd, Ord] }, }, ); diff --git a/clients/installinator-artifact-client/Cargo.toml b/clients/installinator-artifact-client/Cargo.toml index c3ddc529d9..f1e896864f 100644 --- a/clients/installinator-artifact-client/Cargo.toml +++ b/clients/installinator-artifact-client/Cargo.toml @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] installinator-common.workspace = true progenitor.workspace = true diff --git a/clients/installinator-artifact-client/src/lib.rs b/clients/installinator-artifact-client/src/lib.rs index de3072a34a..96806c2cab 100644 --- a/clients/installinator-artifact-client/src/lib.rs +++ b/clients/installinator-artifact-client/src/lib.rs @@ -19,13 +19,13 @@ progenitor::generate_api!( }), derives = [schemars::JsonSchema], replace = { + Duration = std::time::Duration, EventReportForInstallinatorSpec = installinator_common::EventReport, - StepEventForInstallinatorSpec = installinator_common::StepEvent, + M2Slot = installinator_common::M2Slot, + ProgressEventForGenericSpec = installinator_common::ProgressEvent, ProgressEventForInstallinatorSpec = installinator_common::ProgressEvent, StepEventForGenericSpec = installinator_common::StepEvent, - ProgressEventForGenericSpec = installinator_common::ProgressEvent, - M2Slot = installinator_common::M2Slot, - Duration = std::time::Duration, + StepEventForInstallinatorSpec = installinator_common::StepEvent, } ); diff --git a/clients/nexus-client/Cargo.toml b/clients/nexus-client/Cargo.toml index fd6df6919f..1b64fa24d1 100644 --- a/clients/nexus-client/Cargo.toml +++ b/clients/nexus-client/Cargo.toml @@ -4,13 +4,16 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] chrono.workspace = true futures.workspace = true -ipnetwork.workspace = true nexus-types.workspace = true omicron-common.workspace = true omicron-passwords.workspace = true +oxnet.workspace = true progenitor.workspace = true regress.workspace = true reqwest = { workspace = true, features = ["rustls-tls", "stream"] } diff --git a/clients/nexus-client/src/lib.rs b/clients/nexus-client/src/lib.rs index ad8269e675..6546af8673 100644 --- a/clients/nexus-client/src/lib.rs +++ b/clients/nexus-client/src/lib.rs @@ -21,21 +21,24 @@ progenitor::generate_api!( post_hook = (|log: &slog::Logger, result: &Result<_, _>| { slog::debug!(log, "client response"; "result" => ?result); }), + crates = { + "oxnet" = "0.1.0", + }, replace = { // It's kind of unfortunate to pull in such a complex and unstable type // as "blueprint" this way, but we have really useful functionality // (e.g., diff'ing) that's implemented on our local type. Blueprint = nexus_types::deployment::Blueprint, Generation = omicron_common::api::external::Generation, - Ipv4Network = ipnetwork::Ipv4Network, - Ipv6Network = ipnetwork::Ipv6Network, - IpNetwork = ipnetwork::IpNetwork, + ImportExportPolicy = omicron_common::api::external::ImportExportPolicy, MacAddr = omicron_common::api::external::MacAddr, Name = omicron_common::api::external::Name, - NewPasswordHash = omicron_passwords::NewPasswordHash, NetworkInterface = omicron_common::api::internal::shared::NetworkInterface, NetworkInterfaceKind = omicron_common::api::internal::shared::NetworkInterfaceKind, + NewPasswordHash = omicron_passwords::NewPasswordHash, + TypedUuidForCollectionKind = omicron_uuid_kinds::CollectionUuid, TypedUuidForDownstairsKind = omicron_uuid_kinds::TypedUuid, + TypedUuidForSledKind = omicron_uuid_kinds::TypedUuid, TypedUuidForUpstairsKind = omicron_uuid_kinds::TypedUuid, TypedUuidForUpstairsRepairKind = omicron_uuid_kinds::TypedUuid, TypedUuidForUpstairsSessionKind = omicron_uuid_kinds::TypedUuid, @@ -229,7 +232,6 @@ impl From<&omicron_common::api::internal::nexus::ProducerEndpoint> ) -> Self { Self { address: s.address.to_string(), - base_route: s.base_route.clone(), id: s.id, kind: s.kind.into(), interval: s.interval.into(), @@ -255,7 +257,9 @@ impl From for types::Duration { impl From for std::time::Duration { fn from(s: types::Duration) -> Self { - std::time::Duration::from_nanos(s.secs * 1000000000 + s.nanos as u64) + std::time::Duration::from_nanos( + s.secs * 1000000000 + u64::from(s.nanos), + ) } } @@ -287,7 +291,8 @@ impl From<&omicron_common::api::internal::shared::SourceNatConfig> fn from( r: &omicron_common::api::internal::shared::SourceNatConfig, ) -> Self { - Self { ip: r.ip, first_port: r.first_port, last_port: r.last_port } + let (first_port, last_port) = r.port_range_raw(); + Self { ip: r.ip, first_port, last_port } } } @@ -382,3 +387,48 @@ impl From } } } + +impl From + for omicron_common::api::internal::nexus::ProducerKind +{ + fn from(kind: types::ProducerKind) -> Self { + use omicron_common::api::internal::nexus::ProducerKind; + match kind { + types::ProducerKind::SledAgent => ProducerKind::SledAgent, + types::ProducerKind::Instance => ProducerKind::Instance, + types::ProducerKind::Service => ProducerKind::Service, + } + } +} + +impl TryFrom + for omicron_common::api::internal::nexus::ProducerEndpoint +{ + type Error = String; + + fn try_from(ep: types::ProducerEndpoint) -> Result { + let Ok(address) = ep.address.parse() else { + return Err(format!("Invalid IP address: {}", ep.address)); + }; + Ok(Self { + id: ep.id, + kind: ep.kind.into(), + address, + interval: ep.interval.into(), + }) + } +} + +impl From<&omicron_common::api::external::AllowedSourceIps> + for types::AllowedSourceIps +{ + fn from(ips: &omicron_common::api::external::AllowedSourceIps) -> Self { + use omicron_common::api::external::AllowedSourceIps; + match ips { + AllowedSourceIps::Any => types::AllowedSourceIps::Any, + AllowedSourceIps::List(list) => { + types::AllowedSourceIps::List(list.iter().cloned().collect()) + } + } + } +} diff --git a/clients/oxide-client/Cargo.toml b/clients/oxide-client/Cargo.toml index 3cb411729d..f2adcacb1b 100644 --- a/clients/oxide-client/Cargo.toml +++ b/clients/oxide-client/Cargo.toml @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] anyhow.workspace = true base64.workspace = true diff --git a/clients/oximeter-client/Cargo.toml b/clients/oximeter-client/Cargo.toml index e54b152415..dadf6d8c4d 100644 --- a/clients/oximeter-client/Cargo.toml +++ b/clients/oximeter-client/Cargo.toml @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] chrono.workspace = true futures.workspace = true diff --git a/clients/oximeter-client/src/lib.rs b/clients/oximeter-client/src/lib.rs index 11aa1452f8..74fc6968e8 100644 --- a/clients/oximeter-client/src/lib.rs +++ b/clients/oximeter-client/src/lib.rs @@ -41,7 +41,6 @@ impl From<&omicron_common::api::internal::nexus::ProducerEndpoint> ) -> Self { Self { address: s.address.to_string(), - base_route: s.base_route.clone(), id: s.id, kind: s.kind.into(), interval: s.interval.into(), diff --git a/clients/sled-agent-client/Cargo.toml b/clients/sled-agent-client/Cargo.toml index 71b94441ed..11cc5adfd7 100644 --- a/clients/sled-agent-client/Cargo.toml +++ b/clients/sled-agent-client/Cargo.toml @@ -4,17 +4,22 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] anyhow.workspace = true async-trait.workspace = true chrono.workspace = true omicron-common.workspace = true +omicron-uuid-kinds.workspace = true +omicron-workspace-hack.workspace = true +oxnet.workspace = true progenitor.workspace = true -ipnetwork.workspace = true regress.workspace = true reqwest = { workspace = true, features = [ "json", "rustls-tls", "stream" ] } schemars.workspace = true serde.workspace = true +serde_json.workspace = true slog.workspace = true uuid.workspace = true -omicron-workspace-hack.workspace = true diff --git a/clients/sled-agent-client/src/lib.rs b/clients/sled-agent-client/src/lib.rs index 0426982d3e..300e3713ea 100644 --- a/clients/sled-agent-client/src/lib.rs +++ b/clients/sled-agent-client/src/lib.rs @@ -8,17 +8,15 @@ use anyhow::Context; use async_trait::async_trait; use omicron_common::api::internal::shared::NetworkInterface; use std::convert::TryFrom; +use std::fmt; use std::hash::Hash; use std::net::IpAddr; use std::net::SocketAddr; -use types::{ - BfdPeerConfig, BgpConfig, BgpPeerConfig, PortConfigV1, RouteConfig, -}; use uuid::Uuid; progenitor::generate_api!( spec = "../../openapi/sled-agent.json", - derives = [ schemars::JsonSchema, PartialEq ], + derives = [schemars::JsonSchema, PartialEq], inner_type = slog::Logger, pre_hook = (|log: &slog::Logger, request: &reqwest::Request| { slog::debug!(log, "client request"; @@ -30,51 +28,105 @@ progenitor::generate_api!( post_hook = (|log: &slog::Logger, result: &Result<_, _>| { slog::debug!(log, "client response"; "result" => ?result); }), - //TODO trade the manual transformations later in this file for the - // replace directives below? + patch = { + BfdPeerConfig = { derives = [Eq, Hash] }, + BgpConfig = { derives = [Eq, Hash] }, + BgpPeerConfig = { derives = [Eq, Hash] }, + OmicronPhysicalDiskConfig = { derives = [Eq, Hash, PartialOrd, Ord] }, + PortConfigV1 = { derives = [Eq, Hash] }, + RouteConfig = { derives = [Eq, Hash] }, + VirtualNetworkInterfaceHost = { derives = [Eq, Hash] }, + }, + crates = { + "oxnet" = "0.1.0", + }, replace = { ByteCount = omicron_common::api::external::ByteCount, + DiskIdentity = omicron_common::disk::DiskIdentity, Generation = omicron_common::api::external::Generation, + ImportExportPolicy = omicron_common::api::external::ImportExportPolicy, MacAddr = omicron_common::api::external::MacAddr, Name = omicron_common::api::external::Name, - SwitchLocation = omicron_common::api::external::SwitchLocation, - Ipv6Network = ipnetwork::Ipv6Network, - IpNetwork = ipnetwork::IpNetwork, + NetworkInterface = omicron_common::api::internal::shared::NetworkInterface, PortFec = omicron_common::api::internal::shared::PortFec, PortSpeed = omicron_common::api::internal::shared::PortSpeed, SourceNatConfig = omicron_common::api::internal::shared::SourceNatConfig, + SwitchLocation = omicron_common::api::external::SwitchLocation, + TypedUuidForZpoolKind = omicron_uuid_kinds::ZpoolUuid, Vni = omicron_common::api::external::Vni, - NetworkInterface = omicron_common::api::internal::shared::NetworkInterface, + ZpoolKind = omicron_common::zpool_name::ZpoolKind, + ZpoolName = omicron_common::zpool_name::ZpoolName, } ); // We cannot easily configure progenitor to derive `Eq` on all the client- // generated types because some have floats and other types that can't impl // `Eq`. We impl it explicitly for a few types on which we need it. +impl Eq for types::OmicronPhysicalDisksConfig {} impl Eq for types::OmicronZonesConfig {} impl Eq for types::OmicronZoneConfig {} impl Eq for types::OmicronZoneType {} impl Eq for types::OmicronZoneDataset {} +/// Like [`types::OmicronZoneType`], but without any associated data. +/// +/// We have a few enums of this form floating around. This particular one is +/// meant to correspond exactly 1:1 with `OmicronZoneType`. +/// +/// The [`fmt::Display`] impl for this type is a human-readable label, meant +/// for testing and reporting. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub enum ZoneKind { + BoundaryNtp, + Clickhouse, + ClickhouseKeeper, + CockroachDb, + Crucible, + CruciblePantry, + ExternalDns, + InternalDns, + InternalNtp, + Nexus, + Oximeter, +} + +impl fmt::Display for ZoneKind { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ZoneKind::BoundaryNtp => write!(f, "boundary_ntp"), + ZoneKind::Clickhouse => write!(f, "clickhouse"), + ZoneKind::ClickhouseKeeper => write!(f, "clickhouse_keeper"), + ZoneKind::CockroachDb => write!(f, "cockroach_db"), + ZoneKind::Crucible => write!(f, "crucible"), + ZoneKind::CruciblePantry => write!(f, "crucible_pantry"), + ZoneKind::ExternalDns => write!(f, "external_dns"), + ZoneKind::InternalDns => write!(f, "internal_dns"), + ZoneKind::InternalNtp => write!(f, "internal_ntp"), + ZoneKind::Nexus => write!(f, "nexus"), + ZoneKind::Oximeter => write!(f, "oximeter"), + } + } +} + impl types::OmicronZoneType { - /// Human-readable label describing what kind of zone this is - /// - /// This is just use for testing and reporting. - pub fn label(&self) -> impl std::fmt::Display { + /// Returns the [`ZoneKind`] corresponding to this variant. + pub fn kind(&self) -> ZoneKind { match self { - types::OmicronZoneType::BoundaryNtp { .. } => "boundary_ntp", - types::OmicronZoneType::Clickhouse { .. } => "clickhouse", + types::OmicronZoneType::BoundaryNtp { .. } => ZoneKind::BoundaryNtp, + types::OmicronZoneType::Clickhouse { .. } => ZoneKind::Clickhouse, types::OmicronZoneType::ClickhouseKeeper { .. } => { - "clickhouse_keeper" + ZoneKind::ClickhouseKeeper + } + types::OmicronZoneType::CockroachDb { .. } => ZoneKind::CockroachDb, + types::OmicronZoneType::Crucible { .. } => ZoneKind::Crucible, + types::OmicronZoneType::CruciblePantry { .. } => { + ZoneKind::CruciblePantry } - types::OmicronZoneType::CockroachDb { .. } => "cockroach_db", - types::OmicronZoneType::Crucible { .. } => "crucible", - types::OmicronZoneType::CruciblePantry { .. } => "crucible_pantry", - types::OmicronZoneType::ExternalDns { .. } => "external_dns", - types::OmicronZoneType::InternalDns { .. } => "internal_dns", - types::OmicronZoneType::InternalNtp { .. } => "internal_ntp", - types::OmicronZoneType::Nexus { .. } => "nexus", - types::OmicronZoneType::Oximeter { .. } => "oximeter", + types::OmicronZoneType::ExternalDns { .. } => ZoneKind::ExternalDns, + types::OmicronZoneType::InternalDns { .. } => ZoneKind::InternalDns, + types::OmicronZoneType::InternalNtp { .. } => ZoneKind::InternalNtp, + types::OmicronZoneType::Nexus { .. } => ZoneKind::Nexus, + types::OmicronZoneType::Oximeter { .. } => ZoneKind::Oximeter, } } @@ -189,16 +241,6 @@ impl omicron_common::api::external::ClientError for types::Error { } } -impl From for omicron_common::disk::DiskIdentity { - fn from(identity: types::DiskIdentity) -> Self { - Self { - vendor: identity.vendor, - serial: identity.serial, - model: identity.model, - } - } -} - impl From for types::InstanceRuntimeState { @@ -369,100 +411,6 @@ impl From for omicron_common::api::external::DiskState { } } -impl From for types::Ipv4Net { - fn from(n: omicron_common::api::external::Ipv4Net) -> Self { - Self::try_from(n.to_string()).unwrap_or_else(|e| panic!("{}: {}", n, e)) - } -} - -impl From for types::Ipv6Net { - fn from(n: omicron_common::api::external::Ipv6Net) -> Self { - Self::try_from(n.to_string()).unwrap_or_else(|e| panic!("{}: {}", n, e)) - } -} - -impl From for types::IpNet { - fn from(s: omicron_common::api::external::IpNet) -> Self { - use omicron_common::api::external::IpNet; - match s { - IpNet::V4(v4) => Self::V4(v4.into()), - IpNet::V6(v6) => Self::V6(v6.into()), - } - } -} - -impl From for types::Ipv4Net { - fn from(n: ipnetwork::Ipv4Network) -> Self { - Self::try_from(n.to_string()).unwrap_or_else(|e| panic!("{}: {}", n, e)) - } -} - -impl From for ipnetwork::Ipv4Network { - fn from(n: types::Ipv4Net) -> Self { - n.parse().unwrap() - } -} - -impl From for types::Ipv4Network { - fn from(n: ipnetwork::Ipv4Network) -> Self { - Self::try_from(n.to_string()).unwrap_or_else(|e| panic!("{}: {}", n, e)) - } -} - -impl From for types::Ipv6Net { - fn from(n: ipnetwork::Ipv6Network) -> Self { - Self::try_from(n.to_string()).unwrap_or_else(|e| panic!("{}: {}", n, e)) - } -} - -impl From for ipnetwork::Ipv6Network { - fn from(n: types::Ipv6Net) -> Self { - n.parse().unwrap() - } -} - -impl From for types::IpNet { - fn from(n: ipnetwork::IpNetwork) -> Self { - use ipnetwork::IpNetwork; - match n { - IpNetwork::V4(v4) => Self::V4(v4.into()), - IpNetwork::V6(v6) => Self::V6(v6.into()), - } - } -} - -impl From for ipnetwork::IpNetwork { - fn from(n: types::IpNet) -> Self { - match n { - types::IpNet::V4(v4) => ipnetwork::IpNetwork::V4(v4.into()), - types::IpNet::V6(v6) => ipnetwork::IpNetwork::V6(v6.into()), - } - } -} - -impl From for types::Ipv4Net { - fn from(n: std::net::Ipv4Addr) -> Self { - Self::try_from(format!("{n}/32")) - .unwrap_or_else(|e| panic!("{}: {}", n, e)) - } -} - -impl From for types::Ipv6Net { - fn from(n: std::net::Ipv6Addr) -> Self { - Self::try_from(format!("{n}/128")) - .unwrap_or_else(|e| panic!("{}: {}", n, e)) - } -} - -impl From for types::IpNet { - fn from(s: std::net::IpAddr) -> Self { - match s { - IpAddr::V4(v4) => Self::V4(v4.into()), - IpAddr::V6(v6) => Self::V6(v6.into()), - } - } -} - impl From for types::L4PortRange { fn from(s: omicron_common::api::external::L4PortRange) -> Self { Self::try_from(s.to_string()).unwrap_or_else(|e| panic!("{}: {}", s, e)) @@ -523,7 +471,7 @@ impl From fn from(s: omicron_common::api::internal::nexus::HostIdentifier) -> Self { use omicron_common::api::internal::nexus::HostIdentifier::*; match s { - Ip(net) => Self::Ip(net.into()), + Ip(net) => Self::Ip(net), Vpc(vni) => Self::Vpc(vni), } } @@ -627,60 +575,3 @@ impl TestInterfaces for Client { .expect("disk_finish_transition() failed unexpectedly"); } } - -impl Eq for BgpConfig {} - -impl Hash for BgpConfig { - fn hash(&self, state: &mut H) { - self.asn.hash(state); - self.originate.hash(state); - } -} - -impl Hash for BgpPeerConfig { - fn hash(&self, state: &mut H) { - self.addr.hash(state); - self.asn.hash(state); - self.port.hash(state); - self.hold_time.hash(state); - self.connect_retry.hash(state); - self.delay_open.hash(state); - self.idle_hold_time.hash(state); - self.keepalive.hash(state); - } -} - -impl Hash for RouteConfig { - fn hash(&self, state: &mut H) { - self.destination.hash(state); - self.nexthop.hash(state); - } -} - -impl Eq for PortConfigV1 {} - -impl Hash for PortConfigV1 { - fn hash(&self, state: &mut H) { - self.addresses.hash(state); - self.autoneg.hash(state); - self.bgp_peers.hash(state); - self.port.hash(state); - self.routes.hash(state); - self.switch.hash(state); - self.uplink_port_fec.hash(state); - self.uplink_port_speed.hash(state); - } -} - -impl Eq for BfdPeerConfig {} - -impl Hash for BfdPeerConfig { - fn hash(&self, state: &mut H) { - self.local.hash(state); - self.remote.hash(state); - self.detection_threshold.hash(state); - self.required_rx.hash(state); - self.mode.hash(state); - self.switch.hash(state); - } -} diff --git a/clients/wicketd-client/Cargo.toml b/clients/wicketd-client/Cargo.toml index 814309b975..8e50964e59 100644 --- a/clients/wicketd-client/Cargo.toml +++ b/clients/wicketd-client/Cargo.toml @@ -4,18 +4,22 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] chrono.workspace = true installinator-common.workspace = true -ipnetwork.workspace = true +omicron-common.workspace = true +omicron-workspace-hack.workspace = true progenitor.workspace = true regress.workspace = true reqwest = { workspace = true, features = ["rustls-tls", "stream"] } schemars.workspace = true serde.workspace = true serde_json.workspace = true +sled-hardware-types.workspace = true slog.workspace = true update-engine.workspace = true uuid.workspace = true wicket-common.workspace = true -omicron-workspace-hack.workspace = true diff --git a/clients/wicketd-client/src/lib.rs b/clients/wicketd-client/src/lib.rs index 09f9ca1418..8edb797b20 100644 --- a/clients/wicketd-client/src/lib.rs +++ b/clients/wicketd-client/src/lib.rs @@ -18,87 +18,70 @@ progenitor::generate_api!( slog::debug!(log, "client response"; "result" => ?result); }), derives = [schemars::JsonSchema], - patch = - { - SpComponentCaboose = { derives = [PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize] }, - SpIdentifier = { derives = [Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize] }, - SpState = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize] }, - SpComponentInfo= { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize]}, - SpIgnition= { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize]}, - SpIgnitionSystemType= { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize]}, - SpInventory = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize]}, - RackV1Inventory = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize]}, - RotState = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize]}, - RotImageDetails = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize]}, - RotInventory = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize]}, - RotSlot = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize]}, - ImageVersion = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize]}, - StartUpdateOptions = { derives = [ Serialize, Deserialize, Default ]}, - Ipv4Range = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize ] }, - Ipv6Range = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize ] }, - IpRange = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize ] }, - Baseboard = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize ] }, - BootstrapSledDescription = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize ] }, - RackInitId = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize ] }, - RackResetId = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize ] }, - RackOperationStatus = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize ] }, - RackNetworkConfigV1 = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize ] }, - UplinkConfig = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize ] }, - PortConfigV1 = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize ] }, - BgpPeerConfig = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize ] }, - BgpConfig = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize ] }, - RouteConfig = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize ] }, - CurrentRssUserConfigInsensitive = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize ] }, - CurrentRssUserConfigSensitive = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize ] }, - CurrentRssUserConfig = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize ] }, - UserSpecifiedRackNetworkConfig = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize ] }, - GetLocationResponse = { derives = [ PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize ] }, + patch = { + CurrentRssUserConfig = { derives = [PartialEq] }, + CurrentRssUserConfigSensitive = { derives = [PartialEq, Eq, PartialOrd, Ord] }, + GetLocationResponse = { derives = [PartialEq, Eq, PartialOrd, Ord] }, + ImageVersion = { derives = [PartialEq, Eq, PartialOrd, Ord]}, + RackInitId = { derives = [PartialEq, Eq, PartialOrd, Ord] }, + RackNetworkConfigV1 = { derives = [PartialEq, Eq, PartialOrd, Ord] }, + RackOperationStatus = { derives = [PartialEq, Eq, PartialOrd, Ord] }, + RackResetId = { derives = [PartialEq, Eq, PartialOrd, Ord] }, + RackV1Inventory = { derives = [PartialEq, Eq, PartialOrd, Ord]}, + RotImageDetails = { derives = [PartialEq, Eq, PartialOrd, Ord]}, + RotInventory = { derives = [PartialEq, Eq, PartialOrd, Ord]}, + RotSlot = { derives = [PartialEq, Eq, PartialOrd, Ord]}, + RotState = { derives = [PartialEq, Eq, PartialOrd, Ord]}, + SpComponentCaboose = { derives = [PartialEq, Eq, PartialOrd, Ord] }, + SpComponentInfo = { derives = [PartialEq, Eq, PartialOrd, Ord]}, + SpIgnition = { derives = [PartialEq, Eq, PartialOrd, Ord]}, + SpIgnitionSystemType= { derives = [PartialEq, Eq, PartialOrd, Ord]}, + SpInventory = { derives = [PartialEq, Eq, PartialOrd, Ord]}, + SpState = { derives = [PartialEq, Eq, PartialOrd, Ord] }, + StartUpdateOptions = { derives = [Default]}, + UplinkConfig = { derives = [PartialEq, Eq, PartialOrd, Ord] }, }, replace = { + AllowedSourceIps = omicron_common::api::internal::shared::AllowedSourceIps, + Baseboard = sled_hardware_types::Baseboard, + BgpAuthKey = wicket_common::rack_setup::BgpAuthKey, + BgpAuthKeyId = wicket_common::rack_setup::BgpAuthKeyId, + BgpAuthKeyInfo = wicket_common::rack_setup::BgpAuthKeyInfo, + BgpAuthKeyStatus = wicket_common::rack_setup::BgpAuthKeyStatus, + BgpConfig = omicron_common::api::internal::shared::BgpConfig, + BgpPeerAuthKind = wicket_common::rack_setup::BgpPeerAuthKind, + BgpPeerConfig = omicron_common::api::internal::shared::BgpPeerConfig, + BootstrapSledDescription = wicket_common::rack_setup::BootstrapSledDescription, + ClearUpdateStateResponse = wicket_common::rack_update::ClearUpdateStateResponse, + CurrentRssUserConfigInsensitive = wicket_common::rack_setup::CurrentRssUserConfigInsensitive, Duration = std::time::Duration, - Ipv4Network = ipnetwork::Ipv4Network, - Ipv6Network = ipnetwork::Ipv6Network, - IpNetwork = ipnetwork::IpNetwork, + EventReportForWicketdEngineSpec = wicket_common::update_events::EventReport, + GetBgpAuthKeyInfoResponse = wicket_common::rack_setup::GetBgpAuthKeyInfoResponse, + ImportExportPolicy = omicron_common::api::internal::shared::ImportExportPolicy, + IpRange = omicron_common::address::IpRange, + Ipv4Range = omicron_common::address::Ipv4Range, + Ipv6Range = omicron_common::address::Ipv6Range, + M2Slot = installinator_common::M2Slot, + PortConfigV1 = omicron_common::api::internal::shared::PortConfigV1, + PortFec = omicron_common::api::internal::shared::PortFec, + PortSpeed = omicron_common::api::internal::shared::PortSpeed, + ProgressEventForGenericSpec = update_engine::events::ProgressEvent, + ProgressEventForInstallinatorSpec = installinator_common::ProgressEvent, + ProgressEventForWicketdEngineSpec = wicket_common::update_events::ProgressEvent, PutRssUserConfigInsensitive = wicket_common::rack_setup::PutRssUserConfigInsensitive, - ClearUpdateStateResponse = wicket_common::rack_update::ClearUpdateStateResponse, + RouteConfig = omicron_common::api::internal::shared::RouteConfig, SpIdentifier = wicket_common::rack_update::SpIdentifier, SpType = wicket_common::rack_update::SpType, - EventReportForWicketdEngineSpec = wicket_common::update_events::EventReport, - StepEventForWicketdEngineSpec = wicket_common::update_events::StepEvent, - ProgressEventForWicketdEngineSpec = wicket_common::update_events::ProgressEvent, StepEventForGenericSpec = update_engine::events::StepEvent, - ProgressEventForGenericSpec = update_engine::events::ProgressEvent, StepEventForInstallinatorSpec = installinator_common::StepEvent, - ProgressEventForInstallinatorSpec = installinator_common::ProgressEvent, - M2Slot = installinator_common::M2Slot, + StepEventForWicketdEngineSpec = wicket_common::update_events::StepEvent, + SwitchLocation = omicron_common::api::internal::shared::SwitchLocation, + UserSpecifiedBgpPeerConfig = wicket_common::rack_setup::UserSpecifiedBgpPeerConfig, + UserSpecifiedImportExportPolicy = wicket_common::rack_setup::UserSpecifiedImportExportPolicy, + UserSpecifiedPortConfig = wicket_common::rack_setup::UserSpecifiedPortConfig, + UserSpecifiedRackNetworkConfig = wicket_common::rack_setup::UserSpecifiedRackNetworkConfig, } ); /// A type alias for errors returned by this crate. pub type ClientError = crate::Error; - -impl types::Baseboard { - pub fn identifier(&self) -> &str { - match &self { - Self::Gimlet { identifier, .. } => &identifier, - Self::Pc { identifier, .. } => &identifier, - Self::Unknown => "unknown", - } - } - - pub fn model(&self) -> &str { - match self { - Self::Gimlet { model, .. } => &model, - Self::Pc { model, .. } => &model, - Self::Unknown => "unknown", - } - } - - pub fn revision(&self) -> i64 { - match self { - Self::Gimlet { revision, .. } => *revision, - Self::Pc { .. } => 0, - Self::Unknown => 0, - } - } -} diff --git a/common/Cargo.toml b/common/Cargo.toml index 0485d3973b..b51e1bb070 100644 --- a/common/Cargo.toml +++ b/common/Cargo.toml @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + # NOTE: # # This crate is depended on by several other workspaces! Be careful of adding @@ -25,6 +28,7 @@ ipnetwork.workspace = true macaddr.workspace = true mg-admin-client.workspace = true omicron-uuid-kinds.workspace = true +oxnet.workspace = true proptest = { workspace = true, optional = true } rand.workspace = true reqwest = { workspace = true, features = ["rustls-tls", "stream"] } @@ -55,6 +59,7 @@ libc.workspace = true regress.workspace = true serde_urlencoded.workspace = true tokio = { workspace = true, features = ["test-util"] } +toml.workspace = true [features] testing = ["proptest", "test-strategy"] diff --git a/common/src/address.rs b/common/src/address.rs index 8e12810343..eddfb996c4 100644 --- a/common/src/address.rs +++ b/common/src/address.rs @@ -7,9 +7,10 @@ //! This addressing functionality is shared by both initialization services //! and Nexus, who need to agree upon addressing schemes. -use crate::api::external::{self, Error, Ipv4Net, Ipv6Net}; -use ipnetwork::{Ipv4Network, Ipv6Network}; +use crate::api::external::{self, Error}; +use ipnetwork::Ipv6Network; use once_cell::sync::Lazy; +use oxnet::{Ipv4Net, Ipv6Net}; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddrV6}; @@ -72,6 +73,12 @@ pub const WICKETD_NEXUS_PROXY_PORT: u16 = 12229; pub const NTP_PORT: u16 = 123; +/// The length for all VPC IPv6 prefixes +pub const VPC_IPV6_PREFIX_LENGTH: u8 = 48; + +/// The prefix length for all VPC subnets +pub const VPC_SUBNET_IPV6_PREFIX_LENGTH: u8 = 64; + // The number of ports available to an SNAT IP. // Note that for static NAT, this value isn't used, and all ports are available. // @@ -104,61 +111,50 @@ pub const NUM_SOURCE_NAT_PORTS: u16 = 1 << 14; // Furthermore, all the below *_OPTE_IPV6_SUBNET constants are // /64's within this prefix. pub static SERVICE_VPC_IPV6_PREFIX: Lazy = Lazy::new(|| { - Ipv6Net( - Ipv6Network::new( - Ipv6Addr::new(0xfd77, 0xe9d2, 0x9cd9, 0, 0, 0, 0, 0), - Ipv6Net::VPC_IPV6_PREFIX_LENGTH, - ) - .unwrap(), + Ipv6Net::new( + Ipv6Addr::new(0xfd77, 0xe9d2, 0x9cd9, 0, 0, 0, 0, 0), + VPC_IPV6_PREFIX_LENGTH, ) + .unwrap() }); /// The IPv4 subnet for External DNS OPTE ports. -pub static DNS_OPTE_IPV4_SUBNET: Lazy = Lazy::new(|| { - Ipv4Net(Ipv4Network::new(Ipv4Addr::new(172, 30, 1, 0), 24).unwrap()) -}); +pub static DNS_OPTE_IPV4_SUBNET: Lazy = + Lazy::new(|| Ipv4Net::new(Ipv4Addr::new(172, 30, 1, 0), 24).unwrap()); /// The IPv6 subnet for External DNS OPTE ports. pub static DNS_OPTE_IPV6_SUBNET: Lazy = Lazy::new(|| { - Ipv6Net( - Ipv6Network::new( - Ipv6Addr::new(0xfd77, 0xe9d2, 0x9cd9, 1, 0, 0, 0, 0), - Ipv6Net::VPC_SUBNET_IPV6_PREFIX_LENGTH, - ) - .unwrap(), + Ipv6Net::new( + Ipv6Addr::new(0xfd77, 0xe9d2, 0x9cd9, 1, 0, 0, 0, 0), + VPC_SUBNET_IPV6_PREFIX_LENGTH, ) + .unwrap() }); /// The IPv4 subnet for Nexus OPTE ports. -pub static NEXUS_OPTE_IPV4_SUBNET: Lazy = Lazy::new(|| { - Ipv4Net(Ipv4Network::new(Ipv4Addr::new(172, 30, 2, 0), 24).unwrap()) -}); +pub static NEXUS_OPTE_IPV4_SUBNET: Lazy = + Lazy::new(|| Ipv4Net::new(Ipv4Addr::new(172, 30, 2, 0), 24).unwrap()); /// The IPv6 subnet for Nexus OPTE ports. pub static NEXUS_OPTE_IPV6_SUBNET: Lazy = Lazy::new(|| { - Ipv6Net( - Ipv6Network::new( - Ipv6Addr::new(0xfd77, 0xe9d2, 0x9cd9, 2, 0, 0, 0, 0), - Ipv6Net::VPC_SUBNET_IPV6_PREFIX_LENGTH, - ) - .unwrap(), + Ipv6Net::new( + Ipv6Addr::new(0xfd77, 0xe9d2, 0x9cd9, 2, 0, 0, 0, 0), + VPC_SUBNET_IPV6_PREFIX_LENGTH, ) + .unwrap() }); /// The IPv4 subnet for Boundary NTP OPTE ports. -pub static NTP_OPTE_IPV4_SUBNET: Lazy = Lazy::new(|| { - Ipv4Net(Ipv4Network::new(Ipv4Addr::new(172, 30, 3, 0), 24).unwrap()) -}); +pub static NTP_OPTE_IPV4_SUBNET: Lazy = + Lazy::new(|| Ipv4Net::new(Ipv4Addr::new(172, 30, 3, 0), 24).unwrap()); /// The IPv6 subnet for Boundary NTP OPTE ports. pub static NTP_OPTE_IPV6_SUBNET: Lazy = Lazy::new(|| { - Ipv6Net( - Ipv6Network::new( - Ipv6Addr::new(0xfd77, 0xe9d2, 0x9cd9, 3, 0, 0, 0, 0), - Ipv6Net::VPC_SUBNET_IPV6_PREFIX_LENGTH, - ) - .unwrap(), + Ipv6Net::new( + Ipv6Addr::new(0xfd77, 0xe9d2, 0x9cd9, 3, 0, 0, 0, 0), + VPC_SUBNET_IPV6_PREFIX_LENGTH, ) + .unwrap() }); // Anycast is a mechanism in which a single IP address is shared by multiple @@ -188,7 +184,7 @@ pub const CP_SERVICES_RESERVED_ADDRESSES: u16 = 0xFFFF; // to assume that addresses in this subnet are available. pub const SLED_RESERVED_ADDRESSES: u16 = 32; -/// Wraps an [`Ipv6Network`] with a compile-time prefix length. +/// Wraps an [`Ipv6Net`] with a compile-time prefix length. #[derive(Debug, Clone, Copy, JsonSchema, Serialize, Hash, PartialEq, Eq)] #[schemars(rename = "Ipv6Subnet")] pub struct Ipv6Subnet { @@ -198,23 +194,23 @@ pub struct Ipv6Subnet { impl Ipv6Subnet { pub fn new(addr: Ipv6Addr) -> Self { // Create a network with the compile-time prefix length. - let net = Ipv6Network::new(addr, N).unwrap(); + let net = Ipv6Net::new(addr, N).unwrap(); // Ensure the address is set to within-prefix only components. - let net = Ipv6Network::new(net.network(), N).unwrap(); - Self { net: Ipv6Net(net) } + let net = Ipv6Net::new(net.prefix(), N).unwrap(); + Self { net } } /// Returns the underlying network. - pub fn net(&self) -> Ipv6Network { - self.net.0 + pub fn net(&self) -> Ipv6Net { + self.net } } impl From for Ipv6Subnet { fn from(net: Ipv6Network) -> Self { // Ensure the address is set to within-prefix only components. - let net = Ipv6Network::new(net.network(), N).unwrap(); - Self { net: Ipv6Net(net) } + let net = Ipv6Net::new(net.network(), N).unwrap(); + Self { net } } } @@ -230,13 +226,13 @@ impl<'de, const N: u8> Deserialize<'de> for Ipv6Subnet { } let Inner { net } = Inner::deserialize(deserializer)?; - if net.prefix() == N { + if net.width() == N { Ok(Self { net }) } else { Err(::custom(format!( "expected prefix {} but found {}", N, - net.prefix(), + net.width(), ))) } } @@ -252,24 +248,16 @@ impl DnsSubnet { /// Returns the DNS server address within the subnet. /// /// This is the first address within the subnet. - pub fn dns_address(&self) -> Ipv6Network { - Ipv6Network::new( - self.subnet.net().iter().nth(DNS_ADDRESS_INDEX).unwrap(), - SLED_PREFIX, - ) - .unwrap() + pub fn dns_address(&self) -> Ipv6Addr { + self.subnet.net().nth(DNS_ADDRESS_INDEX as u128).unwrap() } /// Returns the address which the Global Zone should create /// to be able to contact the DNS server. /// /// This is the second address within the subnet. - pub fn gz_address(&self) -> Ipv6Network { - Ipv6Network::new( - self.subnet.net().iter().nth(GZ_ADDRESS_INDEX).unwrap(), - SLED_PREFIX, - ) - .unwrap() + pub fn gz_address(&self) -> Ipv6Addr { + self.subnet.net().nth(GZ_ADDRESS_INDEX as u128).unwrap() } } @@ -281,7 +269,7 @@ pub struct ReservedRackSubnet(pub Ipv6Subnet); impl ReservedRackSubnet { /// Returns the subnet for the reserved rack subnet. pub fn new(subnet: Ipv6Subnet) -> Self { - ReservedRackSubnet(Ipv6Subnet::::new(subnet.net().ip())) + ReservedRackSubnet(Ipv6Subnet::::new(subnet.net().addr())) } /// Returns the DNS addresses from this reserved rack subnet. @@ -308,7 +296,7 @@ pub fn get_internal_dns_server_addresses(addr: Ipv6Addr) -> Vec { &reserved_rack_subnet.get_dns_subnets()[0..DNS_REDUNDANCY]; dns_subnets .iter() - .map(|dns_subnet| IpAddr::from(dns_subnet.dns_address().ip())) + .map(|dns_subnet| IpAddr::from(dns_subnet.dns_address())) .collect() } @@ -320,7 +308,7 @@ const SWITCH_ZONE_ADDRESS_INDEX: usize = 2; /// This address will come from the first address of the [`SLED_PREFIX`] subnet. pub fn get_sled_address(sled_subnet: Ipv6Subnet) -> SocketAddrV6 { let sled_agent_ip = - sled_subnet.net().iter().nth(SLED_AGENT_ADDRESS_INDEX).unwrap(); + sled_subnet.net().nth(SLED_AGENT_ADDRESS_INDEX as u128).unwrap(); SocketAddrV6::new(sled_agent_ip, SLED_AGENT_PORT, 0, 0) } @@ -330,7 +318,7 @@ pub fn get_sled_address(sled_subnet: Ipv6Subnet) -> SocketAddrV6 { pub fn get_switch_zone_address( sled_subnet: Ipv6Subnet, ) -> Ipv6Addr { - sled_subnet.net().iter().nth(SWITCH_ZONE_ADDRESS_INDEX).unwrap() + sled_subnet.net().nth(SWITCH_ZONE_ADDRESS_INDEX as u128).unwrap() } /// Returns a sled subnet within a rack subnet. @@ -340,7 +328,7 @@ pub fn get_64_subnet( rack_subnet: Ipv6Subnet, index: u8, ) -> Ipv6Subnet { - let mut rack_network = rack_subnet.net().network().octets(); + let mut rack_network = rack_subnet.net().addr().octets(); // To set bits distinguishing the /64 from the /56, we modify the 7th octet. rack_network[7] = index; @@ -352,7 +340,7 @@ pub fn get_64_subnet( /// /// The first address in the range is guaranteed to be no greater than the last /// address. -#[derive(Clone, Copy, Debug, PartialEq, Deserialize, Serialize)] +#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)] #[serde(untagged)] pub enum IpRange { V4(Ipv4Range), @@ -485,7 +473,9 @@ impl From for IpRange { /// A non-decreasing IPv4 address range, inclusive of both ends. /// /// The first address must be less than or equal to the last address. -#[derive(Clone, Copy, Debug, PartialEq, Deserialize, Serialize, JsonSchema)] +#[derive( + Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize, JsonSchema, +)] #[serde(try_from = "AnyIpv4Range")] pub struct Ipv4Range { pub first: Ipv4Addr, @@ -547,7 +537,9 @@ impl TryFrom for Ipv4Range { /// A non-decreasing IPv6 address range, inclusive of both ends. /// /// The first address must be less than or equal to the last address. -#[derive(Clone, Copy, Debug, PartialEq, Deserialize, Serialize, JsonSchema)] +#[derive( + Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize, JsonSchema, +)] #[serde(try_from = "AnyIpv6Range")] pub struct Ipv6Range { pub first: Ipv6Addr, @@ -676,7 +668,7 @@ mod test { assert_eq!( // Note that these bits (indicating the rack) are zero. // vv - "fd00:1122:3344:0000::/56".parse::().unwrap(), + "fd00:1122:3344:0000::/56".parse::().unwrap(), rack_subnet.0.net(), ); @@ -686,11 +678,11 @@ mod test { // The DNS address and GZ address should be only differing by one. assert_eq!( - "fd00:1122:3344:0001::1/64".parse::().unwrap(), + "fd00:1122:3344:0001::1".parse::().unwrap(), dns_subnets[0].dns_address(), ); assert_eq!( - "fd00:1122:3344:0001::2/64".parse::().unwrap(), + "fd00:1122:3344:0001::2".parse::().unwrap(), dns_subnets[0].gz_address(), ); } diff --git a/common/src/api/external/mod.rs b/common/src/api/external/mod.rs index 324231f469..07a7776f1e 100644 --- a/common/src/api/external/mod.rs +++ b/common/src/api/external/mod.rs @@ -9,6 +9,7 @@ mod error; pub mod http_pagination; +pub use crate::api::internal::shared::AllowedSourceIps; pub use crate::api::internal::shared::SwitchLocation; use crate::update::ArtifactHash; use crate::update::ArtifactId; @@ -21,6 +22,7 @@ use dropshot::HttpError; pub use dropshot::PaginationOrder; pub use error::*; use futures::stream::BoxStream; +use oxnet::IpNet; use parse_display::Display; use parse_display::FromStr; use rand::thread_rng; @@ -38,7 +40,6 @@ use std::fmt::Formatter; use std::fmt::Result as FormatResult; use std::net::IpAddr; use std::net::Ipv4Addr; -use std::net::Ipv6Addr; use std::num::{NonZeroU16, NonZeroU32}; use std::str::FromStr; use uuid::Uuid; @@ -526,12 +527,21 @@ impl JsonSchema for RoleName { // in the database as an i64. Constraining it here ensures that we can't fail // to serialize the value. // -// TODO: custom JsonSchema and Deserialize impls to enforce i64::MAX limit -#[derive( - Copy, Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, -)] +// TODO: custom JsonSchema impl to describe i64::MAX limit; this is blocked by +// https://github.com/oxidecomputer/typify/issues/589 +#[derive(Copy, Clone, Debug, Serialize, JsonSchema, PartialEq, Eq)] pub struct ByteCount(u64); +impl<'de> Deserialize<'de> for ByteCount { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let bytes = u64::deserialize(deserializer)?; + ByteCount::try_from(bytes).map_err(serde::de::Error::custom) + } +} + #[allow(non_upper_case_globals)] const KiB: u64 = 1024; #[allow(non_upper_case_globals)] @@ -847,6 +857,7 @@ impl JsonSchema for Hostname { pub enum ResourceType { AddressLot, AddressLotBlock, + AllowList, BackgroundTask, BgpConfig, BgpAnnounceSet, @@ -881,6 +892,7 @@ pub enum ResourceType { ServiceNetworkInterface, Sled, SledInstance, + SledLedger, Switch, SagaDbg, Snapshot, @@ -1217,338 +1229,33 @@ impl DiskState { } } -/// An `Ipv4Net` represents a IPv4 subnetwork, including the address and network mask. -#[derive(Clone, Copy, Debug, Deserialize, Hash, PartialEq, Eq, Serialize)] -pub struct Ipv4Net(pub ipnetwork::Ipv4Network); - -impl Ipv4Net { - /// Return `true` if this IPv4 subnetwork is from an RFC 1918 private - /// address space. - pub fn is_private(&self) -> bool { - self.0.network().is_private() - } -} - -impl std::ops::Deref for Ipv4Net { - type Target = ipnetwork::Ipv4Network; - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl std::fmt::Display for Ipv4Net { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.0) - } -} - -impl JsonSchema for Ipv4Net { - fn schema_name() -> String { - "Ipv4Net".to_string() - } - - fn json_schema( - _: &mut schemars::gen::SchemaGenerator, - ) -> schemars::schema::Schema { - schemars::schema::SchemaObject { - metadata: Some(Box::new(schemars::schema::Metadata { - title: Some("An IPv4 subnet".to_string()), - description: Some( - "An IPv4 subnet, including prefix and subnet mask" - .to_string(), - ), - examples: vec!["192.168.1.0/24".into()], - ..Default::default() - })), - instance_type: Some(schemars::schema::InstanceType::String.into()), - string: Some(Box::new(schemars::schema::StringValidation { - pattern: Some( - concat!( - r#"^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}"#, - r#"([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])"#, - r#"/([0-9]|1[0-9]|2[0-9]|3[0-2])$"#, - ) - .to_string(), - ), - ..Default::default() - })), - ..Default::default() - } - .into() - } -} - -/// An `Ipv6Net` represents a IPv6 subnetwork, including the address and network mask. -#[derive(Clone, Copy, Debug, Deserialize, Hash, PartialEq, Eq, Serialize)] -pub struct Ipv6Net(pub ipnetwork::Ipv6Network); - -impl Ipv6Net { +pub trait Ipv6NetExt { /// The length for all VPC IPv6 prefixes - pub const VPC_IPV6_PREFIX_LENGTH: u8 = 48; + const VPC_IPV6_PREFIX_LENGTH: u8 = 48; - /// The prefix length for all VPC Sunets - pub const VPC_SUBNET_IPV6_PREFIX_LENGTH: u8 = 64; - - /// Return `true` if this subnetwork is in the IPv6 Unique Local Address - /// range defined in RFC 4193, e.g., `fd00:/8` - pub fn is_unique_local(&self) -> bool { - // TODO: Delegate to `Ipv6Addr::is_unique_local()` when stabilized. - self.0.network().octets()[0] == 0xfd - } + /// The prefix length for all VPC Subnets + const VPC_SUBNET_IPV6_PREFIX_LENGTH: u8 = 64; /// Return `true` if this subnetwork is a valid VPC prefix. /// /// This checks that the subnet is a unique local address, and has the VPC /// prefix length required. - pub fn is_vpc_prefix(&self) -> bool { - self.is_unique_local() - && self.0.prefix() == Self::VPC_IPV6_PREFIX_LENGTH - } + fn is_vpc_prefix(&self) -> bool; /// Return `true` if this subnetwork is a valid VPC Subnet, given the VPC's /// prefix. - pub fn is_vpc_subnet(&self, vpc_prefix: &Ipv6Net) -> bool { - self.is_unique_local() - && self.is_subnet_of(vpc_prefix.0) - && self.prefix() == Self::VPC_SUBNET_IPV6_PREFIX_LENGTH - } -} - -impl std::ops::Deref for Ipv6Net { - type Target = ipnetwork::Ipv6Network; - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl std::fmt::Display for Ipv6Net { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.0) - } -} - -impl From for Ipv6Net { - fn from(n: ipnetwork::Ipv6Network) -> Ipv6Net { - Self(n) - } + fn is_vpc_subnet(&self, vpc_prefix: &Self) -> bool; } -impl JsonSchema for Ipv6Net { - fn schema_name() -> String { - "Ipv6Net".to_string() - } - - fn json_schema( - _: &mut schemars::gen::SchemaGenerator, - ) -> schemars::schema::Schema { - schemars::schema::SchemaObject { - metadata: Some(Box::new(schemars::schema::Metadata { - title: Some("An IPv6 subnet".to_string()), - description: Some( - "An IPv6 subnet, including prefix and subnet mask" - .to_string(), - ), - examples: vec!["fd12:3456::/64".into()], - ..Default::default() - })), - instance_type: Some(schemars::schema::InstanceType::String.into()), - string: Some(Box::new(schemars::schema::StringValidation { - pattern: Some( - // Conforming to unique local addressing scheme, - // `fd00::/8`. - concat!( - r#"^([fF][dD])[0-9a-fA-F]{2}:("#, - r#"([0-9a-fA-F]{1,4}:){6}[0-9a-fA-F]{1,4}"#, - r#"|([0-9a-fA-F]{1,4}:){1,6}:)"#, - r#"([0-9a-fA-F]{1,4})?"#, - r#"\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])$"#, - ) - .to_string(), - ), - ..Default::default() - })), - ..Default::default() - } - .into() - } -} - -/// An `IpNet` represents an IP network, either IPv4 or IPv6. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] -pub enum IpNet { - V4(Ipv4Net), - V6(Ipv6Net), -} - -impl IpNet { - /// Return the underlying address. - pub fn ip(&self) -> IpAddr { - match self { - IpNet::V4(inner) => inner.ip().into(), - IpNet::V6(inner) => inner.ip().into(), - } - } - - /// Return the underlying prefix length. - pub fn prefix(&self) -> u8 { - match self { - IpNet::V4(inner) => inner.prefix(), - IpNet::V6(inner) => inner.prefix(), - } +impl Ipv6NetExt for oxnet::Ipv6Net { + fn is_vpc_prefix(&self) -> bool { + self.is_unique_local() && self.width() == Self::VPC_IPV6_PREFIX_LENGTH } - /// Return the first address in this subnet - pub fn first_address(&self) -> IpAddr { - match self { - IpNet::V4(inner) => IpAddr::from(inner.iter().next().unwrap()), - IpNet::V6(inner) => IpAddr::from(inner.iter().next().unwrap()), - } - } - - /// Return the last address in this subnet. - /// - /// For a subnet of size 1, e.g., a /32, this is the same as the first - /// address. - // NOTE: This is a workaround for the fact that the `ipnetwork` crate's - // iterator provides only the `Iterator::next()` method. That means that - // finding the last address is linear in the size of the subnet, which is - // completely untenable and totally avoidable with some addition. In the - // long term, we should either put up a patch to the `ipnetwork` crate or - // move the `ipnet` crate, which does provide an efficient iterator - // implementation. - pub fn last_address(&self) -> IpAddr { - match self { - IpNet::V4(inner) => { - let base: u32 = inner.network().into(); - let size = inner.size() - 1; - std::net::IpAddr::V4(std::net::Ipv4Addr::from(base + size)) - } - IpNet::V6(inner) => { - let base: u128 = inner.network().into(); - let size = inner.size() - 1; - std::net::IpAddr::V6(std::net::Ipv6Addr::from(base + size)) - } - } - } -} - -impl From for IpNet { - fn from(n: ipnetwork::IpNetwork) -> Self { - match n { - ipnetwork::IpNetwork::V4(v4) => IpNet::V4(Ipv4Net(v4)), - ipnetwork::IpNetwork::V6(v6) => IpNet::V6(Ipv6Net(v6)), - } - } -} - -impl From for IpNet { - fn from(n: Ipv4Net) -> IpNet { - IpNet::V4(n) - } -} - -impl From for IpNet { - fn from(n: Ipv4Addr) -> IpNet { - IpNet::V4(Ipv4Net(ipnetwork::Ipv4Network::from(n))) - } -} - -impl From for IpNet { - fn from(n: Ipv6Net) -> IpNet { - IpNet::V6(n) - } -} - -impl From for IpNet { - fn from(n: Ipv6Addr) -> IpNet { - IpNet::V6(Ipv6Net(ipnetwork::Ipv6Network::from(n))) - } -} - -impl From for IpNet { - fn from(n: IpAddr) -> IpNet { - match n { - IpAddr::V4(v4) => IpNet::from(v4), - IpAddr::V6(v6) => IpNet::from(v6), - } - } -} - -impl std::fmt::Display for IpNet { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - IpNet::V4(inner) => write!(f, "{}", inner), - IpNet::V6(inner) => write!(f, "{}", inner), - } - } -} - -impl FromStr for IpNet { - type Err = String; - - fn from_str(s: &str) -> Result { - let net = - s.parse::().map_err(|e| e.to_string())?; - match net { - ipnetwork::IpNetwork::V4(net) => Ok(IpNet::from(Ipv4Net(net))), - ipnetwork::IpNetwork::V6(net) => Ok(IpNet::from(Ipv6Net(net))), - } - } -} - -impl From for ipnetwork::IpNetwork { - fn from(net: IpNet) -> ipnetwork::IpNetwork { - match net { - IpNet::V4(net) => ipnetwork::IpNetwork::from(net.0), - IpNet::V6(net) => ipnetwork::IpNetwork::from(net.0), - } - } -} - -impl Serialize for IpNet { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - match self { - IpNet::V4(v4) => v4.serialize(serializer), - IpNet::V6(v6) => v6.serialize(serializer), - } - } -} - -impl<'de> Deserialize<'de> for IpNet { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - let net = ipnetwork::IpNetwork::deserialize(deserializer)?; - match net { - ipnetwork::IpNetwork::V4(net) => Ok(IpNet::from(Ipv4Net(net))), - ipnetwork::IpNetwork::V6(net) => Ok(IpNet::from(Ipv6Net(net))), - } - } -} - -impl JsonSchema for IpNet { - fn schema_name() -> String { - "IpNet".to_string() - } - - fn json_schema( - gen: &mut schemars::gen::SchemaGenerator, - ) -> schemars::schema::Schema { - schemars::schema::SchemaObject { - subschemas: Some(Box::new(schemars::schema::SubschemaValidation { - one_of: Some(vec![ - label_schema("v4", gen.subschema_for::()), - label_schema("v6", gen.subschema_for::()), - ]), - ..Default::default() - })), - ..Default::default() - } - .into() + fn is_vpc_subnet(&self, vpc_prefix: &Self) -> bool { + self.is_unique_local() + && self.is_subnet_of(vpc_prefix) + && self.width() == Self::VPC_SUBNET_IPV6_PREFIX_LENGTH } } @@ -1835,7 +1542,7 @@ pub enum VpcFirewallRuleTarget { /// The rule applies to a specific IP address Ip(IpAddr), /// The rule applies to a specific IP subnet - IpNet(IpNet), + IpNet(oxnet::IpNet), // Tags not yet implemented // Tag(Name), } @@ -1866,7 +1573,7 @@ pub enum VpcFirewallRuleHostFilter { /// The rule applies to traffic from/to a specific IP address Ip(IpAddr), /// The rule applies to traffic from/to a specific IP subnet - IpNet(IpNet), + IpNet(oxnet::IpNet), // TODO: Internet gateways not yet implemented // #[display("inetgw:{0}")] // InternetGateway(Name), @@ -2374,7 +2081,7 @@ pub struct LoopbackAddress { pub switch_location: String, /// The loopback IP address and prefix length. - pub address: IpNet, + pub address: oxnet::IpNet, } /// A switch port represents a physical external port on a rack switch. @@ -2439,7 +2146,7 @@ pub struct SwitchPortSettingsView { pub routes: Vec, /// BGP peer settings. - pub bgp_peers: Vec, + pub bgp_peers: Vec, /// Layer 3 IP address settings. pub addresses: Vec, @@ -2493,6 +2200,74 @@ pub struct SwitchPortConfig { pub geometry: SwitchPortGeometry, } +/// The speed of a link. +#[derive(Copy, Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq)] +#[serde(rename_all = "snake_case")] +pub enum LinkSpeed { + /// Zero gigabits per second. + Speed0G, + /// 1 gigabit per second. + Speed1G, + /// 10 gigabits per second. + Speed10G, + /// 25 gigabits per second. + Speed25G, + /// 40 gigabits per second. + Speed40G, + /// 50 gigabits per second. + Speed50G, + /// 100 gigabits per second. + Speed100G, + /// 200 gigabits per second. + Speed200G, + /// 400 gigabits per second. + Speed400G, +} + +/// The forward error correction mode of a link. +#[derive(Copy, Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq)] +#[serde(rename_all = "snake_case")] +pub enum LinkFec { + /// Firecode forward error correction. + Firecode, + /// No forward error correction. + None, + /// Reed-Solomon forward error correction. + Rs, +} + +impl From for LinkFec { + fn from(x: crate::api::internal::shared::PortFec) -> LinkFec { + match x { + crate::api::internal::shared::PortFec::Firecode => Self::Firecode, + crate::api::internal::shared::PortFec::None => Self::None, + crate::api::internal::shared::PortFec::Rs => Self::Rs, + } + } +} + +impl From for LinkSpeed { + fn from(x: crate::api::internal::shared::PortSpeed) -> Self { + match x { + crate::api::internal::shared::PortSpeed::Speed0G => Self::Speed0G, + crate::api::internal::shared::PortSpeed::Speed1G => Self::Speed1G, + crate::api::internal::shared::PortSpeed::Speed10G => Self::Speed10G, + crate::api::internal::shared::PortSpeed::Speed25G => Self::Speed25G, + crate::api::internal::shared::PortSpeed::Speed40G => Self::Speed40G, + crate::api::internal::shared::PortSpeed::Speed50G => Self::Speed50G, + crate::api::internal::shared::PortSpeed::Speed100G => { + Self::Speed100G + } + crate::api::internal::shared::PortSpeed::Speed200G => { + Self::Speed200G + } + crate::api::internal::shared::PortSpeed::Speed400G => { + Self::Speed400G + } + } + } +} + /// A link configuration for a port settings object. #[derive(Clone, Debug, Deserialize, JsonSchema, Serialize, PartialEq)] pub struct SwitchPortLinkConfig { @@ -2508,6 +2283,15 @@ pub struct SwitchPortLinkConfig { /// The maximum transmission unit for this link. pub mtu: u16, + + /// The forward error correction mode of the link. + pub fec: LinkFec, + + /// The configured speed of the link. + pub speed: LinkSpeed, + + /// Whether or not the link has autonegotiation enabled. + pub autoneg: bool, } /// A link layer discovery protocol (LLDP) service configuration. @@ -2539,7 +2323,7 @@ pub struct LldpConfig { pub system_description: String, /// THE LLDP management IP TLV. - pub management_ip: IpNet, + pub management_ip: oxnet::IpNet, } /// Describes the kind of an switch interface. @@ -2606,16 +2390,17 @@ pub struct SwitchPortRouteConfig { pub interface_name: String, /// The route's destination network. - pub dst: IpNet, + pub dst: oxnet::IpNet, /// The route's gateway address. - pub gw: IpNet, + pub gw: oxnet::IpNet, /// The VLAN identifier for the route. Use this if the gateway is reachable /// over an 802.1Q tagged L2 segment. pub vlan_id: Option, } +/* /// A BGP peer configuration for a port settings object. #[derive(Clone, Debug, Deserialize, JsonSchema, Serialize, PartialEq)] pub struct SwitchPortBgpPeerConfig { @@ -2634,6 +2419,74 @@ pub struct SwitchPortBgpPeerConfig { /// The address of the peer. pub addr: IpAddr, } +*/ + +/// A BGP peer configuration for an interface. Includes the set of announcements +/// that will be advertised to the peer identified by `addr`. The `bgp_config` +/// parameter is a reference to global BGP parameters. The `interface_name` +/// indicates what interface the peer should be contacted on. +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq)] +pub struct BgpPeer { + /// The global BGP configuration used for establishing a session with this + /// peer. + pub bgp_config: NameOrId, + + /// The name of interface to peer on. This is relative to the port + /// configuration this BGP peer configuration is a part of. For example this + /// value could be phy0 to refer to a primary physical interface. Or it + /// could be vlan47 to refer to a VLAN interface. + pub interface_name: String, + + /// The address of the host to peer with. + pub addr: IpAddr, + + /// How long to hold peer connections between keepalives (seconds). + pub hold_time: u32, + + /// How long to hold a peer in idle before attempting a new session + /// (seconds). + pub idle_hold_time: u32, + + /// How long to delay sending an open request after establishing a TCP + /// session (seconds). + pub delay_open: u32, + + /// How long to to wait between TCP connection retries (seconds). + pub connect_retry: u32, + + /// How often to send keepalive requests (seconds). + pub keepalive: u32, + + /// Require that a peer has a specified ASN. + pub remote_asn: Option, + + /// Require messages from a peer have a minimum IP time to live field. + pub min_ttl: Option, + + /// Use the given key for TCP-MD5 authentication with the peer. + pub md5_auth_key: Option, + + /// Apply the provided multi-exit discriminator (MED) updates sent to the peer. + pub multi_exit_discriminator: Option, + + /// Include the provided communities in updates sent to the peer. + pub communities: Vec, + + /// Apply a local preference to routes received from this peer. + pub local_pref: Option, + + /// Enforce that the first AS in paths received from this peer is the peer's AS. + pub enforce_first_as: bool, + + /// Define import policy for a peer. + pub allowed_import: ImportExportPolicy, + + /// Define export policy for a peer. + pub allowed_export: ImportExportPolicy, + + /// Associate a VLAN ID with a peer. + pub vlan_id: Option, +} /// A base BGP configuration. #[derive( @@ -2669,7 +2522,7 @@ pub struct BgpAnnouncement { pub address_lot_block_id: Uuid, /// The IP network being announced. - pub network: IpNet, + pub network: oxnet::IpNet, } /// An IP address configuration for a port settings object. @@ -2682,7 +2535,7 @@ pub struct SwitchPortAddressConfig { pub address_lot_block_id: Uuid, /// The IP address and prefix. - pub address: IpNet, + pub address: oxnet::IpNet, /// The interface name this address belongs to. // TODO: https://github.com/oxidecomputer/omicron/issues/3050 @@ -2694,7 +2547,7 @@ pub struct SwitchPortAddressConfig { #[derive(Clone, Debug, Deserialize, JsonSchema, Serialize, PartialEq)] #[serde(rename_all = "snake_case")] pub enum BgpPeerState { - /// Initial state. Refuse all incomming BGP connections. No resources + /// Initial state. Refuse all incoming BGP connections. No resources /// allocated to peer. Idle, @@ -2713,7 +2566,7 @@ pub enum BgpPeerState { /// Synchronizing with peer. SessionSetup, - /// Session established. Able to exchange update, notification and keepliave + /// Session established. Able to exchange update, notification and keepalive /// messages with peers. Established, } @@ -2809,7 +2662,7 @@ impl AggregateBgpMessageHistory { #[derive(Clone, Debug, Deserialize, JsonSchema, Serialize, PartialEq)] pub struct BgpImportedRouteIpv4 { /// The destination network prefix. - pub prefix: Ipv4Net, + pub prefix: oxnet::Ipv4Net, /// The nexthop the prefix is reachable through. pub nexthop: Ipv4Addr, @@ -2944,12 +2797,32 @@ pub struct Probe { pub sled: Uuid, } +/// Define policy relating to the import and export of prefixes from a BGP +/// peer. +#[derive( + Default, + Debug, + Serialize, + Deserialize, + Clone, + JsonSchema, + Eq, + PartialEq, + Hash, +)] +#[serde(rename_all = "snake_case", tag = "type", content = "value")] +pub enum ImportExportPolicy { + /// Do not perform any filtering. + #[default] + NoFiltering, + Allow(Vec), +} + #[cfg(test)] mod test { use serde::Deserialize; use serde::Serialize; - use super::IpNet; use super::RouteDestination; use super::RouteTarget; use super::SemverVersion; @@ -3405,31 +3278,29 @@ mod test { #[test] fn test_ipv6_net_operations() { - use super::Ipv6Net; - assert!(Ipv6Net("fd00::/8".parse().unwrap()).is_unique_local()); - assert!(!Ipv6Net("fe00::/8".parse().unwrap()).is_unique_local()); - - assert!(Ipv6Net("fd00::/48".parse().unwrap()).is_vpc_prefix()); - assert!(!Ipv6Net("fe00::/48".parse().unwrap()).is_vpc_prefix()); - assert!(!Ipv6Net("fd00::/40".parse().unwrap()).is_vpc_prefix()); - - let vpc_prefix = Ipv6Net("fd00::/48".parse().unwrap()); - assert!( - Ipv6Net("fd00::/64".parse().unwrap()).is_vpc_subnet(&vpc_prefix) - ); - assert!( - !Ipv6Net("fd10::/64".parse().unwrap()).is_vpc_subnet(&vpc_prefix) - ); - assert!( - !Ipv6Net("fd00::/63".parse().unwrap()).is_vpc_subnet(&vpc_prefix) - ); - } - - #[test] - fn test_ipv4_net_operations() { - use super::{IpNet, Ipv4Net}; - let x: IpNet = "0.0.0.0/0".parse().unwrap(); - assert_eq!(x, IpNet::V4(Ipv4Net("0.0.0.0/0".parse().unwrap()))) + use super::Ipv6NetExt; + use oxnet::Ipv6Net; + + assert!("fd00::/8".parse::().unwrap().is_unique_local()); + assert!(!"fe00::/8".parse::().unwrap().is_unique_local()); + + assert!("fd00::/48".parse::().unwrap().is_vpc_prefix()); + assert!(!"fe00::/48".parse::().unwrap().is_vpc_prefix()); + assert!(!"fd00::/40".parse::().unwrap().is_vpc_prefix()); + + let vpc_prefix = "fd00::/48".parse::().unwrap(); + assert!("fd00::/64" + .parse::() + .unwrap() + .is_vpc_subnet(&vpc_prefix)); + assert!(!"fd10::/64" + .parse::() + .unwrap() + .is_vpc_subnet(&vpc_prefix)); + assert!(!"fd00::/63" + .parse::() + .unwrap() + .is_vpc_subnet(&vpc_prefix)); } #[test] @@ -3560,92 +3431,6 @@ mod test { assert!("hash:super_random".parse::().is_err()); } - #[test] - fn test_ipnet_serde() { - //TODO: none of this actually exercises - // schemars::schema::StringValidation bits and the schemars - // documentation is not forthcoming on how this might be accomplished. - let net_str = "fd00:2::/32"; - let net = IpNet::from_str(net_str).unwrap(); - let ser = serde_json::to_string(&net).unwrap(); - - assert_eq!(format!(r#""{}""#, net_str), ser); - let net_des = serde_json::from_str::(&ser).unwrap(); - assert_eq!(net, net_des); - - let net_str = "fd00:47::1/64"; - let net = IpNet::from_str(net_str).unwrap(); - let ser = serde_json::to_string(&net).unwrap(); - - assert_eq!(format!(r#""{}""#, net_str), ser); - let net_des = serde_json::from_str::(&ser).unwrap(); - assert_eq!(net, net_des); - - let net_str = "192.168.1.1/16"; - let net = IpNet::from_str(net_str).unwrap(); - let ser = serde_json::to_string(&net).unwrap(); - - assert_eq!(format!(r#""{}""#, net_str), ser); - let net_des = serde_json::from_str::(&ser).unwrap(); - assert_eq!(net, net_des); - - let net_str = "0.0.0.0/0"; - let net = IpNet::from_str(net_str).unwrap(); - let ser = serde_json::to_string(&net).unwrap(); - - assert_eq!(format!(r#""{}""#, net_str), ser); - let net_des = serde_json::from_str::(&ser).unwrap(); - assert_eq!(net, net_des); - } - - #[test] - fn test_ipnet_first_last_address() { - use std::net::IpAddr; - use std::net::Ipv4Addr; - use std::net::Ipv6Addr; - let net: IpNet = "fd00::/128".parse().unwrap(); - assert_eq!( - net.first_address(), - IpAddr::from(Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 0)), - ); - assert_eq!( - net.last_address(), - IpAddr::from(Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 0)), - ); - - let net: IpNet = "fd00::/64".parse().unwrap(); - assert_eq!( - net.first_address(), - IpAddr::from(Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 0)), - ); - assert_eq!( - net.last_address(), - IpAddr::from(Ipv6Addr::new( - 0xfd00, 0, 0, 0, 0xffff, 0xffff, 0xffff, 0xffff - )), - ); - - let net: IpNet = "10.0.0.0/16".parse().unwrap(); - assert_eq!( - net.first_address(), - IpAddr::from(Ipv4Addr::new(10, 0, 0, 0)), - ); - assert_eq!( - net.last_address(), - IpAddr::from(Ipv4Addr::new(10, 0, 255, 255)), - ); - - let net: IpNet = "10.0.0.0/32".parse().unwrap(); - assert_eq!( - net.first_address(), - IpAddr::from(Ipv4Addr::new(10, 0, 0, 0)), - ); - assert_eq!( - net.last_address(), - IpAddr::from(Ipv4Addr::new(10, 0, 0, 0)), - ); - } - #[test] fn test_macaddr() { use super::MacAddr; diff --git a/common/src/api/internal/nexus.rs b/common/src/api/internal/nexus.rs index 24ef9a16aa..de611262bf 100644 --- a/common/src/api/internal/nexus.rs +++ b/common/src/api/internal/nexus.rs @@ -6,7 +6,7 @@ use crate::api::external::{ ByteCount, DiskState, Generation, Hostname, InstanceCpuCount, - InstanceState, IpNet, SemverVersion, Vni, + InstanceState, SemverVersion, Vni, }; use chrono::{DateTime, Utc}; use omicron_uuid_kinds::DownstairsRegionKind; @@ -110,19 +110,18 @@ pub struct ProducerEndpoint { /// The IP address and port at which `oximeter` can collect metrics from the /// producer. pub address: SocketAddr, - /// The API base route from which `oximeter` can collect metrics. - /// - /// The full route is `{base_route}/{id}`. - pub base_route: String, /// The interval on which `oximeter` should collect metrics. pub interval: Duration, } -impl ProducerEndpoint { - /// Return the route that can be used to request metric data. - pub fn collection_route(&self) -> String { - format!("{}/{}", &self.base_route, &self.id) - } +/// Response to a successful producer registration. +#[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq, Serialize)] +pub struct ProducerRegistrationResponse { + /// Period within which producers must renew their lease. + /// + /// Producers are required to periodically re-register with Nexus, to ensure + /// that they are still collected from by `oximeter`. + pub lease_duration: Duration, } /// An identifier for a single update artifact. @@ -252,7 +251,7 @@ mod tests { #[derive(Clone, Debug, Deserialize, Serialize, PartialEq, JsonSchema)] #[serde(tag = "type", content = "value", rename_all = "snake_case")] pub enum HostIdentifier { - Ip(IpNet), + Ip(oxnet::IpNet), Vpc(Vni), } diff --git a/common/src/api/internal/shared.rs b/common/src/api/internal/shared.rs index c123e1f9c8..9e3e1a71f5 100644 --- a/common/src/api/internal/shared.rs +++ b/common/src/api/internal/shared.rs @@ -4,13 +4,16 @@ //! Types shared between Nexus and Sled Agent. -use crate::api::external::{self, BfdMode, Name}; -use ipnetwork::{IpNetwork, Ipv4Network, Ipv6Network}; +use crate::{ + address::NUM_SOURCE_NAT_PORTS, + api::external::{self, BfdMode, ImportExportPolicy, Name}, +}; +use oxnet::{IpNet, Ipv4Net, Ipv6Net}; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use std::{ collections::HashMap, - fmt::Display, + fmt, net::{IpAddr, Ipv4Addr, Ipv6Addr}, str::FromStr, }; @@ -50,7 +53,7 @@ pub struct NetworkInterface { pub name: external::Name, pub ip: IpAddr, pub mac: external::MacAddr, - pub subnet: external::IpNet, + pub subnet: IpNet, pub vni: external::Vni, pub primary: bool, pub slot: u8, @@ -58,16 +61,95 @@ pub struct NetworkInterface { /// An IP address and port range used for source NAT, i.e., making /// outbound network connections from guests or services. -#[derive( - Debug, Clone, Copy, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, -)] +// Note that `Deserialize` is manually implemented; if you make any changes to +// the fields of this structure, you must make them to that implementation too. +#[derive(Debug, Clone, Copy, Serialize, JsonSchema, PartialEq, Eq, Hash)] pub struct SourceNatConfig { /// The external address provided to the instance or service. pub ip: IpAddr, /// The first port used for source NAT, inclusive. - pub first_port: u16, + first_port: u16, /// The last port used for source NAT, also inclusive. - pub last_port: u16, + last_port: u16, +} + +// We implement `Deserialize` manually to add validity checking on the port +// range. +impl<'de> Deserialize<'de> for SourceNatConfig { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + use serde::de::Error; + + // The fields of `SourceNatConfigShadow` should exactly match the fields + // of `SourceNatConfig`. We're not really using serde's remote derive, + // but by adding the attribute we get compile-time checking that all the + // field names and types match. (It doesn't check the _order_, but that + // should be fine as long as we're using JSON or similar formats.) + #[derive(Deserialize)] + #[serde(remote = "SourceNatConfig")] + struct SourceNatConfigShadow { + ip: IpAddr, + first_port: u16, + last_port: u16, + } + + let shadow = SourceNatConfigShadow::deserialize(deserializer)?; + SourceNatConfig::new(shadow.ip, shadow.first_port, shadow.last_port) + .map_err(D::Error::custom) + } +} + +impl SourceNatConfig { + /// Construct a `SourceNatConfig` with the given port range, both inclusive. + /// + /// # Errors + /// + /// Fails if `(first_port, last_port)` is not aligned to + /// [`NUM_SOURCE_NAT_PORTS`]. + pub fn new( + ip: IpAddr, + first_port: u16, + last_port: u16, + ) -> Result { + if first_port % NUM_SOURCE_NAT_PORTS == 0 + && last_port + .checked_sub(first_port) + .and_then(|diff| diff.checked_add(1)) + == Some(NUM_SOURCE_NAT_PORTS) + { + Ok(Self { ip, first_port, last_port }) + } else { + Err(SourceNatConfigError::UnalignedPortPair { + first_port, + last_port, + }) + } + } + + /// Get the port range. + /// + /// Guaranteed to be aligned to [`NUM_SOURCE_NAT_PORTS`]. + pub fn port_range(&self) -> std::ops::RangeInclusive { + self.first_port..=self.last_port + } + + /// Get the port range as a raw tuple; both values are inclusive. + /// + /// Guaranteed to be aligned to [`NUM_SOURCE_NAT_PORTS`]. + pub fn port_range_raw(&self) -> (u16, u16) { + self.port_range().into_inner() + } +} + +#[derive(Debug, thiserror::Error)] +pub enum SourceNatConfigError { + #[error( + "snat port range is not aligned to {NUM_SOURCE_NAT_PORTS}: \ + ({first_port}, {last_port})" + )] + UnalignedPortPair { first_port: u16, last_port: u16 }, } // We alias [`RackNetworkConfig`] to the current version of the protocol, so @@ -77,7 +159,7 @@ pub type RackNetworkConfig = RackNetworkConfigV1; /// Initial network configuration #[derive(Clone, Debug, Deserialize, Serialize, PartialEq, JsonSchema)] pub struct RackNetworkConfigV1 { - pub rack_subnet: Ipv6Network, + pub rack_subnet: Ipv6Net, // TODO: #3591 Consider making infra-ip ranges implicit for uplinks /// First ip address to be used for configuring network infrastructure pub infra_ip_first: Ipv4Addr, @@ -92,17 +174,25 @@ pub struct RackNetworkConfigV1 { pub bfd: Vec, } -#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, JsonSchema)] +#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq, JsonSchema)] pub struct BgpConfig { /// The autonomous system number for the BGP configuration. pub asn: u32, /// The set of prefixes for the BGP router to originate. - pub originate: Vec, + pub originate: Vec, + + /// Shaper to apply to outgoing messages. + #[serde(default)] + pub shaper: Option, + + /// Checker to apply to incoming messages. + #[serde(default)] + pub checker: Option, } -#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, JsonSchema)] +#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq, JsonSchema)] pub struct BgpPeerConfig { - /// The autonomous sysetm number of the router the peer belongs to. + /// The autonomous system number of the router the peer belongs to. pub asn: u32, /// Switch port the peer is reachable on. pub port: String, @@ -119,9 +209,76 @@ pub struct BgpPeerConfig { pub connect_retry: Option, /// The interval to send keepalive messages at. pub keepalive: Option, + /// Require that a peer has a specified ASN. + #[serde(default)] + pub remote_asn: Option, + /// Require messages from a peer have a minimum IP time to live field. + #[serde(default)] + pub min_ttl: Option, + /// Use the given key for TCP-MD5 authentication with the peer. + #[serde(default)] + pub md5_auth_key: Option, + /// Apply the provided multi-exit discriminator (MED) updates sent to the peer. + #[serde(default)] + pub multi_exit_discriminator: Option, + /// Include the provided communities in updates sent to the peer. + #[serde(default)] + pub communities: Vec, + /// Apply a local preference to routes received from this peer. + #[serde(default)] + pub local_pref: Option, + /// Enforce that the first AS in paths received from this peer is the peer's AS. + #[serde(default)] + pub enforce_first_as: bool, + /// Define import policy for a peer. + #[serde(default)] + pub allowed_import: ImportExportPolicy, + /// Define export policy for a peer. + #[serde(default)] + pub allowed_export: ImportExportPolicy, + /// Associate a VLAN ID with a BGP peer session. + #[serde(default)] + pub vlan_id: Option, } -#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, JsonSchema)] +impl BgpPeerConfig { + /// The default hold time for a BGP peer in seconds. + pub const DEFAULT_HOLD_TIME: u64 = 6; + + /// The default idle hold time for a BGP peer in seconds. + pub const DEFAULT_IDLE_HOLD_TIME: u64 = 3; + + /// The default delay open time for a BGP peer in seconds. + pub const DEFAULT_DELAY_OPEN: u64 = 0; + + /// The default connect retry time for a BGP peer in seconds. + pub const DEFAULT_CONNECT_RETRY: u64 = 3; + + /// The default keepalive time for a BGP peer in seconds. + pub const DEFAULT_KEEPALIVE: u64 = 2; + + pub fn hold_time(&self) -> u64 { + self.hold_time.unwrap_or(Self::DEFAULT_HOLD_TIME) + } + + pub fn idle_hold_time(&self) -> u64 { + self.idle_hold_time.unwrap_or(Self::DEFAULT_IDLE_HOLD_TIME) + } + + pub fn delay_open(&self) -> u64 { + self.delay_open.unwrap_or(Self::DEFAULT_DELAY_OPEN) + } + + pub fn connect_retry(&self) -> u64 { + self.connect_retry.unwrap_or(Self::DEFAULT_CONNECT_RETRY) + } + + pub fn keepalive(&self) -> u64 { + self.keepalive.unwrap_or(Self::DEFAULT_KEEPALIVE) + } +} + +#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq, JsonSchema)] pub struct BfdPeerConfig { pub local: Option, pub remote: IpAddr, @@ -131,20 +288,23 @@ pub struct BfdPeerConfig { pub switch: SwitchLocation, } -#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, JsonSchema)] +#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq, JsonSchema)] pub struct RouteConfig { /// The destination of the route. - pub destination: IpNetwork, + pub destination: IpNet, /// The nexthop/gateway address. pub nexthop: IpAddr, + /// The VLAN id associated with this route. + #[serde(default)] + pub vlan_id: Option, } -#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, JsonSchema)] +#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq, JsonSchema)] pub struct PortConfigV1 { /// The set of routes associated with this port. pub routes: Vec, /// This port's addresses. - pub addresses: Vec, + pub addresses: Vec, /// Switch the port belongs to. pub switch: SwitchLocation, /// Nmae of the port this config applies to. @@ -166,6 +326,7 @@ impl From for PortConfigV1 { routes: vec![RouteConfig { destination: "0.0.0.0/0".parse().unwrap(), nexthop: value.gateway_ip.into(), + vlan_id: None, }], addresses: vec![value.uplink_cidr.into()], switch: value.switch, @@ -194,7 +355,7 @@ pub struct UplinkConfig { pub uplink_port_fec: PortFec, /// IP Address and prefix (e.g., `192.168.0.1/16`) to apply to switchport /// (must be in infra_ip pool) - pub uplink_cidr: Ipv4Network, + pub uplink_cidr: Ipv4Net, /// VLAN id to use for uplink pub uplink_vid: Option, } @@ -212,7 +373,7 @@ pub struct HostPortConfig { /// IP Address and prefix (e.g., `192.168.0.1/16`) to apply to switchport /// (must be in infra_ip pool) - pub addrs: Vec, + pub addrs: Vec, } impl From for HostPortConfig { @@ -243,8 +404,8 @@ pub enum SwitchLocation { Switch1, } -impl Display for SwitchLocation { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +impl fmt::Display for SwitchLocation { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { SwitchLocation::Switch0 => write!(f, "switch0"), SwitchLocation::Switch1 => write!(f, "switch1"), @@ -255,6 +416,12 @@ impl Display for SwitchLocation { #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] pub struct ParseSwitchLocationError(String); +impl std::fmt::Display for ParseSwitchLocationError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "parse switch location error: {}", self.0) + } +} + impl FromStr for SwitchLocation { type Err = ParseSwitchLocationError; @@ -280,7 +447,7 @@ pub enum ExternalPortDiscovery { /// Switchport Speed options #[derive( - Copy, Clone, Debug, Deserialize, Serialize, PartialEq, JsonSchema, Hash, + Copy, Clone, Debug, Deserialize, Serialize, PartialEq, Eq, JsonSchema, Hash, )] #[serde(rename_all = "snake_case")] pub enum PortSpeed { @@ -304,9 +471,25 @@ pub enum PortSpeed { Speed400G, } +impl fmt::Display for PortSpeed { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + PortSpeed::Speed0G => write!(f, "0G"), + PortSpeed::Speed1G => write!(f, "1G"), + PortSpeed::Speed10G => write!(f, "10G"), + PortSpeed::Speed25G => write!(f, "25G"), + PortSpeed::Speed40G => write!(f, "40G"), + PortSpeed::Speed50G => write!(f, "50G"), + PortSpeed::Speed100G => write!(f, "100G"), + PortSpeed::Speed200G => write!(f, "200G"), + PortSpeed::Speed400G => write!(f, "400G"), + } + } +} + /// Switchport FEC options #[derive( - Copy, Clone, Debug, Deserialize, Serialize, PartialEq, JsonSchema, Hash, + Copy, Clone, Debug, Deserialize, Serialize, PartialEq, Eq, JsonSchema, Hash, )] #[serde(rename_all = "snake_case")] pub enum PortFec { @@ -314,3 +497,144 @@ pub enum PortFec { None, Rs, } + +impl fmt::Display for PortFec { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + PortFec::Firecode => write!(f, "Firecode R-FEC"), + PortFec::None => write!(f, "None"), + PortFec::Rs => write!(f, "RS-FEC"), + } + } +} + +/// Description of source IPs allowed to reach rack services. +#[derive(Clone, Debug, Deserialize, Eq, JsonSchema, PartialEq, Serialize)] +#[serde(rename_all = "snake_case", tag = "allow", content = "ips")] +pub enum AllowedSourceIps { + /// Allow traffic from any external IP address. + Any, + /// Restrict access to a specific set of source IP addresses or subnets. + /// + /// All others are prevented from reaching rack services. + List(IpAllowList), +} + +impl TryFrom> for AllowedSourceIps { + type Error = &'static str; + fn try_from(list: Vec) -> Result { + IpAllowList::try_from(list).map(Self::List) + } +} + +impl TryFrom<&[ipnetwork::IpNetwork]> for AllowedSourceIps { + type Error = &'static str; + fn try_from(list: &[ipnetwork::IpNetwork]) -> Result { + IpAllowList::try_from(list).map(Self::List) + } +} + +/// A non-empty allowlist of IP subnets. +#[derive(Clone, Debug, Deserialize, Eq, JsonSchema, PartialEq, Serialize)] +#[serde(try_from = "Vec", into = "Vec")] +#[schemars(transparent)] +pub struct IpAllowList(Vec); + +impl IpAllowList { + /// Return the entries of the list as a slice. + pub fn as_slice(&self) -> &[IpNet] { + &self.0 + } + + /// Return an iterator over the entries of the list. + pub fn iter(&self) -> impl Iterator { + self.0.iter() + } + + /// Consume the list into an iterator. + pub fn into_iter(self) -> impl Iterator { + self.0.into_iter() + } + + /// Return the number of entries in the allowlist. + /// + /// Note that this is always >= 1, though we return a usize for simplicity. + pub fn len(&self) -> usize { + self.0.len() + } +} + +impl From for Vec { + fn from(list: IpAllowList) -> Self { + list.0 + } +} + +impl TryFrom> for IpAllowList { + type Error = &'static str; + fn try_from(list: Vec) -> Result { + if list.is_empty() { + return Err("IP allowlist must not be empty"); + } + Ok(Self(list)) + } +} + +impl TryFrom<&[ipnetwork::IpNetwork]> for IpAllowList { + type Error = &'static str; + + fn try_from(list: &[ipnetwork::IpNetwork]) -> Result { + if list.is_empty() { + return Err("IP allowlist must not be empty"); + } + Ok(Self(list.into_iter().map(|net| (*net).into()).collect())) + } +} + +#[cfg(test)] +mod tests { + use crate::api::internal::shared::AllowedSourceIps; + use oxnet::{IpNet, Ipv4Net, Ipv6Net}; + use std::net::{Ipv4Addr, Ipv6Addr}; + + #[test] + fn test_deserialize_allowed_source_ips() { + let parsed: AllowedSourceIps = serde_json::from_str( + r#"{"allow":"list","ips":["127.0.0.1/32","10.0.0.0/24","fd00::1/64"]}"#, + ) + .unwrap(); + assert_eq!( + parsed, + AllowedSourceIps::try_from(vec![ + Ipv4Net::host_net(Ipv4Addr::LOCALHOST).into(), + IpNet::V4( + Ipv4Net::new(Ipv4Addr::new(10, 0, 0, 0), 24).unwrap() + ), + IpNet::V6( + Ipv6Net::new( + Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 1), + 64 + ) + .unwrap() + ), + ]) + .unwrap() + ); + } + + #[test] + fn test_deserialize_unknown_string() { + serde_json::from_str::(r#"{"allow":"wat"}"#) + .expect_err( + "Should not be able to deserialize from unknown variant name", + ); + } + + #[test] + fn test_deserialize_any_into_allowed_external_ips() { + assert_eq!( + AllowedSourceIps::Any, + serde_json::from_str(r#"{"allow":"any"}"#).unwrap(), + ); + } +} diff --git a/common/src/disk.rs b/common/src/disk.rs index 0cf9b6e073..c6d60c5140 100644 --- a/common/src/disk.rs +++ b/common/src/disk.rs @@ -22,6 +22,6 @@ use serde::{Deserialize, Serialize}; )] pub struct DiskIdentity { pub vendor: String, - pub serial: String, pub model: String, + pub serial: String, } diff --git a/common/src/ledger.rs b/common/src/ledger.rs index 71d03fa8ee..a52c2441ca 100644 --- a/common/src/ledger.rs +++ b/common/src/ledger.rs @@ -7,7 +7,7 @@ use async_trait::async_trait; use camino::{Utf8Path, Utf8PathBuf}; use serde::{de::DeserializeOwned, Serialize}; -use slog::{debug, info, warn, Logger}; +use slog::{debug, error, info, warn, Logger}; #[derive(thiserror::Error, Debug)] pub enum Error { @@ -127,7 +127,7 @@ impl Ledger { let mut one_successful_write = false; for path in self.paths.iter() { if let Err(e) = self.atomic_write(&path).await { - warn!(self.log, "Failed to write to {}: {e}", path); + warn!(self.log, "Failed to write ledger"; "path" => ?path, "err" => ?e); failed_paths.push((path.to_path_buf(), e)); } else { one_successful_write = true; @@ -135,6 +135,7 @@ impl Ledger { } if !one_successful_write { + error!(self.log, "No successful writes to ledger"); return Err(Error::FailedToWrite { failed_paths }); } Ok(()) @@ -310,7 +311,7 @@ mod test { let log = &logctx.log; // Create the ledger, initialize contents. - let config_dirs = vec![ + let config_dirs = [ camino_tempfile::Utf8TempDir::new().unwrap(), camino_tempfile::Utf8TempDir::new().unwrap(), ]; diff --git a/common/src/lib.rs b/common/src/lib.rs index 24fa4dfba0..a92237adfa 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -28,6 +28,7 @@ pub mod disk; pub mod ledger; pub mod update; pub mod vlan; +pub mod zpool_name; pub use update::hex_schema; diff --git a/common/src/zpool_name.rs b/common/src/zpool_name.rs new file mode 100644 index 0000000000..df5ca8ea31 --- /dev/null +++ b/common/src/zpool_name.rs @@ -0,0 +1,279 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Zpool labels and kinds shared between Nexus and Sled Agents + +use camino::{Utf8Path, Utf8PathBuf}; +use omicron_uuid_kinds::ZpoolUuid; +use schemars::JsonSchema; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; +use std::fmt; +use std::str::FromStr; +pub const ZPOOL_EXTERNAL_PREFIX: &str = "oxp_"; +pub const ZPOOL_INTERNAL_PREFIX: &str = "oxi_"; + +/// Describes the different classes of Zpools. +#[derive( + Copy, Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, JsonSchema, +)] +#[serde(rename_all = "snake_case")] +pub enum ZpoolKind { + // This zpool is used for external storage (u.2) + External, + // This zpool is used for internal storage (m.2) + Internal, +} + +/// A wrapper around a zpool name. +/// +/// This expects that the format will be: `ox{i,p}_` - we parse the prefix +/// when reading the structure, and validate that the UUID can be utilized. +#[derive(Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)] +pub struct ZpoolName { + id: ZpoolUuid, + kind: ZpoolKind, +} + +const ZPOOL_NAME_REGEX: &str = r"^ox[ip]_[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"; + +/// Custom JsonSchema implementation to encode the constraints on Name. +impl JsonSchema for ZpoolName { + fn schema_name() -> String { + "ZpoolName".to_string() + } + fn json_schema( + _: &mut schemars::gen::SchemaGenerator, + ) -> schemars::schema::Schema { + schemars::schema::SchemaObject { + metadata: Some(Box::new(schemars::schema::Metadata { + title: Some( + "The name of a Zpool".to_string(), + ), + description: Some( + "Zpool names are of the format ox{i,p}_. They are either \ + Internal or External, and should be unique" + .to_string(), + ), + ..Default::default() + })), + instance_type: Some(schemars::schema::InstanceType::String.into()), + string: Some(Box::new(schemars::schema::StringValidation { + pattern: Some(ZPOOL_NAME_REGEX.to_owned()), + ..Default::default() + })), + ..Default::default() + } + .into() + } +} + +impl ZpoolName { + pub fn new_internal(id: ZpoolUuid) -> Self { + Self { id, kind: ZpoolKind::Internal } + } + + pub fn new_external(id: ZpoolUuid) -> Self { + Self { id, kind: ZpoolKind::External } + } + + pub fn id(&self) -> ZpoolUuid { + self.id + } + + pub fn kind(&self) -> ZpoolKind { + self.kind + } + + /// Returns a path to a dataset's mountpoint within the zpool. + /// + /// For example: oxp_(UUID) -> /pool/ext/(UUID)/(dataset) + pub fn dataset_mountpoint( + &self, + root: &Utf8Path, + dataset: &str, + ) -> Utf8PathBuf { + let mut path = Utf8PathBuf::new(); + path.push(root); + path.push("pool"); + match self.kind { + ZpoolKind::External => path.push("ext"), + ZpoolKind::Internal => path.push("int"), + }; + path.push(self.id().to_string()); + path.push(dataset); + path + } +} + +impl<'de> Deserialize<'de> for ZpoolName { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let s = String::deserialize(deserializer)?; + ZpoolName::from_str(&s).map_err(serde::de::Error::custom) + } +} + +impl Serialize for ZpoolName { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&self.to_string()) + } +} + +impl FromStr for ZpoolName { + type Err = String; + + fn from_str(s: &str) -> Result { + if let Some(s) = s.strip_prefix(ZPOOL_EXTERNAL_PREFIX) { + let id = ZpoolUuid::from_str(s).map_err(|e| e.to_string())?; + Ok(ZpoolName::new_external(id)) + } else if let Some(s) = s.strip_prefix(ZPOOL_INTERNAL_PREFIX) { + let id = ZpoolUuid::from_str(s).map_err(|e| e.to_string())?; + Ok(ZpoolName::new_internal(id)) + } else { + Err(format!( + "Bad zpool name {s}; must start with '{ZPOOL_EXTERNAL_PREFIX}' or '{ZPOOL_INTERNAL_PREFIX}'", + )) + } + } +} + +impl fmt::Display for ZpoolName { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let prefix = match self.kind { + ZpoolKind::External => ZPOOL_EXTERNAL_PREFIX, + ZpoolKind::Internal => ZPOOL_INTERNAL_PREFIX, + }; + write!(f, "{prefix}{}", self.id) + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_zpool_name_regex() { + let valid = [ + "oxi_d462a7f7-b628-40fe-80ff-4e4189e2d62b", + "oxp_d462a7f7-b628-40fe-80ff-4e4189e2d62b", + ]; + + let invalid = [ + "", + // Whitespace + " oxp_d462a7f7-b628-40fe-80ff-4e4189e2d62b", + "oxp_d462a7f7-b628-40fe-80ff-4e4189e2d62b ", + // Case sensitivity + "oxp_D462A7F7-b628-40fe-80ff-4e4189e2d62b", + // Bad prefix + "ox_d462a7f7-b628-40fe-80ff-4e4189e2d62b", + "oxa_d462a7f7-b628-40fe-80ff-4e4189e2d62b", + "oxi-d462a7f7-b628-40fe-80ff-4e4189e2d62b", + "oxp-d462a7f7-b628-40fe-80ff-4e4189e2d62b", + // Missing Prefix + "d462a7f7-b628-40fe-80ff-4e4189e2d62b", + // Bad UUIDs (Not following UUIDv4 format) + "oxi_d462a7f7-b628-30fe-80ff-4e4189e2d62b", + "oxi_d462a7f7-b628-40fe-c0ff-4e4189e2d62b", + ]; + + let r = regress::Regex::new(ZPOOL_NAME_REGEX) + .expect("validation regex is valid"); + for input in valid { + let m = r + .find(input) + .unwrap_or_else(|| panic!("input {input} did not match regex")); + assert_eq!(m.start(), 0, "input {input} did not match start"); + assert_eq!(m.end(), input.len(), "input {input} did not match end"); + } + + for input in invalid { + assert!( + r.find(input).is_none(), + "invalid input {input} should not match validation regex" + ); + } + } + + #[test] + fn test_parse_zpool_name_json() { + #[derive(Serialize, Deserialize, JsonSchema)] + struct TestDataset { + pool_name: ZpoolName, + } + + // Confirm that we can convert from a JSON string to a a ZpoolName + let json_string = + r#"{"pool_name":"oxi_d462a7f7-b628-40fe-80ff-4e4189e2d62b"}"#; + let dataset: TestDataset = serde_json::from_str(json_string) + .expect("Could not parse ZpoolName from Json Object"); + assert!(matches!(dataset.pool_name.kind, ZpoolKind::Internal)); + + // Confirm we can go the other way (ZpoolName to JSON string) too. + let j = serde_json::to_string(&dataset) + .expect("Cannot convert back to JSON string"); + assert_eq!(j, json_string); + } + + fn toml_string(s: &str) -> String { + format!("zpool_name = \"{}\"", s) + } + + fn parse_name(s: &str) -> Result { + toml_string(s) + .parse::() + .expect("Cannot parse as TOML value") + .get("zpool_name") + .expect("Missing key") + .clone() + .try_into::() + } + + #[test] + fn test_parse_external_zpool_name() { + let uuid: ZpoolUuid = + "d462a7f7-b628-40fe-80ff-4e4189e2d62b".parse().unwrap(); + let good_name = format!("{}{}", ZPOOL_EXTERNAL_PREFIX, uuid); + + let name = parse_name(&good_name).expect("Cannot parse as ZpoolName"); + assert_eq!(uuid, name.id()); + assert_eq!(ZpoolKind::External, name.kind()); + } + + #[test] + fn test_parse_internal_zpool_name() { + let uuid: ZpoolUuid = + "d462a7f7-b628-40fe-80ff-4e4189e2d62b".parse().unwrap(); + let good_name = format!("{}{}", ZPOOL_INTERNAL_PREFIX, uuid); + + let name = parse_name(&good_name).expect("Cannot parse as ZpoolName"); + assert_eq!(uuid, name.id()); + assert_eq!(ZpoolKind::Internal, name.kind()); + } + + #[test] + fn test_parse_bad_zpool_names() { + let bad_names = vec![ + // Nonsense string + "this string is GARBAGE", + // Missing prefix + "d462a7f7-b628-40fe-80ff-4e4189e2d62b", + // Underscores + "oxp_d462a7f7_b628_40fe_80ff_4e4189e2d62b", + ]; + + for bad_name in &bad_names { + assert!( + parse_name(&bad_name).is_err(), + "Parsing {} should fail", + bad_name + ); + } + } +} diff --git a/dev-tools/crdb-seed/Cargo.toml b/dev-tools/crdb-seed/Cargo.toml index aff26995dc..778a65b4b5 100644 --- a/dev-tools/crdb-seed/Cargo.toml +++ b/dev-tools/crdb-seed/Cargo.toml @@ -5,6 +5,9 @@ edition = "2021" license = "MPL-2.0" readme = "README.md" +[lints] +workspace = true + [dependencies] anyhow.workspace = true dropshot.workspace = true diff --git a/dev-tools/omdb/Cargo.toml b/dev-tools/omdb/Cargo.toml index 813a4b9552..3c466b1683 100644 --- a/dev-tools/omdb/Cargo.toml +++ b/dev-tools/omdb/Cargo.toml @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [build-dependencies] omicron-rpaths.workspace = true @@ -32,6 +35,7 @@ nexus-db-queries.workspace = true nexus-reconfigurator-preparation.workspace = true nexus-types.workspace = true omicron-common.workspace = true +omicron-uuid-kinds.workspace = true oximeter-client.workspace = true # See omicron-rpaths for more about the "pq-sys" dependency. pq-sys = "*" diff --git a/dev-tools/omdb/src/bin/omdb/crucible_agent.rs b/dev-tools/omdb/src/bin/omdb/crucible_agent.rs new file mode 100644 index 0000000000..b5c36e0b3d --- /dev/null +++ b/dev-tools/omdb/src/bin/omdb/crucible_agent.rs @@ -0,0 +1,184 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! omdb commands that query a crucible-agent + +use anyhow::bail; +use anyhow::Context; +use clap::Args; +use clap::Subcommand; +use crucible_agent_client::types::RegionId; +use crucible_agent_client::Client; +use tabled::Tabled; + +use crate::helpers::CONNECTION_OPTIONS_HEADING; +use crate::Omdb; + +/// Arguments to the "omdb crucible-agent" subcommand +#[derive(Debug, Args)] +pub struct CrucibleAgentArgs { + /// URL of the crucible agent internal API + #[clap( + long, + env = "OMDB_CRUCIBLE_AGENT_URL", + global = true, + help_heading = CONNECTION_OPTIONS_HEADING, + )] + crucible_agent_url: Option, + + #[command(subcommand)] + command: CrucibleAgentCommands, +} + +/// Subcommands for the "omdb crucible-agent" subcommand +#[derive(Debug, Subcommand)] +enum CrucibleAgentCommands { + /// print information about regions + #[clap(subcommand)] + Regions(RegionCommands), + /// print information about snapshots + #[clap(subcommand)] + Snapshots(SnapshotCommands), +} + +#[derive(Debug, Subcommand)] +enum RegionCommands { + /// Print list of all running control plane regions + List, +} + +#[derive(Debug, Subcommand)] +enum SnapshotCommands { + /// Print list of all running control plane snapshots + List, +} + +impl CrucibleAgentArgs { + /// Run a `omdb crucible-agent` subcommand. + pub(crate) async fn run_cmd( + &self, + _omdb: &Omdb, + ) -> Result<(), anyhow::Error> { + // The crucible agent URL is required, but can come + // from the environment, in which case it won't be on the command line. + let Some(crucible_agent_url) = &self.crucible_agent_url else { + bail!( + "crucible agent URL must be specified with \ + --crucible-agent-url or by setting the environment variable \ + OMDB_CRUCIBLE_AGENT_URL" + ); + }; + let client = Client::new(crucible_agent_url); + + match &self.command { + CrucibleAgentCommands::Regions(RegionCommands::List) => { + cmd_region_list(&client).await + } + CrucibleAgentCommands::Snapshots(SnapshotCommands::List) => { + cmd_snapshot_list(&client).await + } + } + } +} + +#[derive(Tabled)] +#[tabled(rename_all = "SCREAMING_SNAKE_CASE")] +struct Region { + region_id: String, + state: String, + block_size: String, + extent_size: String, + extent_count: String, + port: String, +} + +/// Runs `omdb crucible-agent regions list` +async fn cmd_region_list( + client: &crucible_agent_client::Client, +) -> Result<(), anyhow::Error> { + let regions = client.region_list().await.context("listing regions")?; + + let mut rows = Vec::new(); + for region in regions.iter() { + rows.push(Region { + region_id: region.id.clone().to_string(), + state: region.state.to_string(), + block_size: region.block_size.to_string(), + extent_size: region.extent_size.to_string(), + extent_count: region.extent_count.to_string(), + port: region.port_number.to_string(), + }); + } + let table = tabled::Table::new(rows) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + + println!("{}", table); + Ok(()) +} + +#[derive(Tabled)] +#[tabled(rename_all = "SCREAMING_SNAKE_CASE")] +struct Snapshot { + region_id: String, + snapshot_id: String, + state: String, + port: String, +} +/// Runs `omdb crucible-agent snapshot list` +async fn cmd_snapshot_list( + client: &crucible_agent_client::Client, +) -> Result<(), anyhow::Error> { + let regions = client.region_list().await.context("listing regions")?; + + let mut rows = Vec::new(); + for region in regions.iter() { + let snapshots = match client + .region_get_snapshots(&RegionId(region.id.to_string())) + .await + { + Ok(snapshots) => snapshots, + Err(e) => { + println!( + "Error {} looking at region {} for snapshots", + e, + region.id.to_string() + ); + continue; + } + }; + if snapshots.snapshots.is_empty() { + continue; + } + for snap in snapshots.snapshots.iter() { + match snapshots.running_snapshots.get(&snap.name) { + Some(rs) => { + rows.push(Snapshot { + region_id: region.id.clone().to_string(), + snapshot_id: snap.name.to_string(), + state: rs.state.to_string(), + port: rs.port_number.to_string(), + }); + } + None => { + rows.push(Snapshot { + region_id: region.id.clone().to_string(), + snapshot_id: snap.name.to_string(), + state: "---".to_string(), + port: "---".to_string(), + }); + } + } + } + } + let table = tabled::Table::new(rows) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + + println!("{}", table); + + Ok(()) +} diff --git a/dev-tools/omdb/src/bin/omdb/db.rs b/dev-tools/omdb/src/bin/omdb/db.rs index a4243fff31..549f289ad0 100644 --- a/dev-tools/omdb/src/bin/omdb/db.rs +++ b/dev-tools/omdb/src/bin/omdb/db.rs @@ -15,6 +15,8 @@ // NOTE: emanates from Tabled macros #![allow(clippy::useless_vec)] +use crate::helpers::CONNECTION_OPTIONS_HEADING; +use crate::helpers::DATABASE_OPTIONS_HEADING; use crate::Omdb; use anyhow::anyhow; use anyhow::bail; @@ -24,6 +26,7 @@ use async_bb8_diesel::AsyncRunQueryDsl; use async_bb8_diesel::AsyncSimpleConnection; use camino::Utf8PathBuf; use chrono::SecondsFormat; +use clap::ArgAction; use clap::Args; use clap::Subcommand; use clap::ValueEnum; @@ -35,8 +38,6 @@ use diesel::JoinOnDsl; use diesel::NullableExpressionMethods; use diesel::OptionalExtension; use diesel::TextExpressionMethods; -use dropshot::PaginationOrder; -use futures::StreamExt; use gateway_client::types::SpType; use ipnetwork::IpNetwork; use nexus_config::PostgresConfigWithUrl; @@ -50,6 +51,7 @@ use nexus_db_model::ExternalIp; use nexus_db_model::HwBaseboardId; use nexus_db_model::Instance; use nexus_db_model::InvCollection; +use nexus_db_model::InvPhysicalDisk; use nexus_db_model::IpAttachState; use nexus_db_model::IpKind; use nexus_db_model::NetworkInterface; @@ -72,28 +74,32 @@ use nexus_db_queries::db; use nexus_db_queries::db::datastore::read_only_resources_associated_with_volume; use nexus_db_queries::db::datastore::CrucibleTargets; use nexus_db_queries::db::datastore::DataStoreConnection; -use nexus_db_queries::db::datastore::DataStoreInventoryTest; use nexus_db_queries::db::datastore::InstanceAndActiveVmm; use nexus_db_queries::db::identity::Asset; use nexus_db_queries::db::lookup::LookupPath; use nexus_db_queries::db::model::ServiceKind; use nexus_db_queries::db::queries::ALLOW_FULL_TABLE_SCAN_SQL; use nexus_db_queries::db::DataStore; -use nexus_reconfigurator_preparation::policy_from_db; use nexus_types::deployment::Blueprint; -use nexus_types::deployment::OmicronZoneType; -use nexus_types::deployment::UnstableReconfiguratorState; +use nexus_types::deployment::BlueprintZoneDisposition; +use nexus_types::deployment::BlueprintZoneFilter; +use nexus_types::deployment::BlueprintZoneType; +use nexus_types::deployment::SledFilter; +use nexus_types::external_api::views::SledPolicy; +use nexus_types::external_api::views::SledState; use nexus_types::identity::Resource; use nexus_types::internal_api::params::DnsRecord; use nexus_types::internal_api::params::Srv; use nexus_types::inventory::CabooseWhich; use nexus_types::inventory::Collection; use nexus_types::inventory::RotPageWhich; -use omicron_common::address::NEXUS_REDUNDANCY; use omicron_common::api::external::DataPageParams; use omicron_common::api::external::Generation; -use omicron_common::api::external::LookupType; +use omicron_common::api::external::InstanceState; use omicron_common::api::external::MacAddr; +use omicron_uuid_kinds::CollectionUuid; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::SledUuid; use sled_agent_client::types::VolumeConstructionRequest; use std::borrow::Cow; use std::cmp::Ordering; @@ -161,7 +167,12 @@ pub struct DbArgs { #[derive(Debug, Args)] pub struct DbUrlOptions { /// URL of the database SQL interface - #[clap(long, env("OMDB_DB_URL"))] + #[clap( + long, + env = "OMDB_DB_URL", + global = true, + help_heading = CONNECTION_OPTIONS_HEADING, + )] db_url: Option, } @@ -229,13 +240,20 @@ pub struct DbFetchOptions { #[clap( long = "fetch-limit", default_value_t = NonZeroU32::new(500).unwrap(), - env("OMDB_FETCH_LIMIT"), + env = "OMDB_FETCH_LIMIT", + global = true, + help_heading = DATABASE_OPTIONS_HEADING, )] fetch_limit: NonZeroU32, /// whether to include soft-deleted records when enumerating objects that /// can be soft-deleted - #[clap(long, default_value_t = false)] + #[clap( + long, + default_value_t = false, + global = true, + help_heading = DATABASE_OPTIONS_HEADING, + )] include_deleted: bool, } @@ -252,12 +270,10 @@ enum DbCommands { Inventory(InventoryArgs), /// Save the current Reconfigurator inputs to a file ReconfiguratorSave(ReconfiguratorSaveArgs), - /// Print information about control plane services - Services(ServicesArgs), /// Print information about sleds - Sleds, + Sleds(SledsArgs), /// Print information about customer instances - Instances, + Instances(InstancesOptions), /// Print information about the network Network(NetworkArgs), /// Print information about snapshots @@ -346,6 +362,13 @@ impl CliDnsGroup { } } +#[derive(Debug, Args)] +struct InstancesOptions { + /// Only show the running instances + #[arg(short, long, action=ArgAction::SetTrue)] + running: bool, +} + #[derive(Debug, Args)] struct InventoryArgs { #[command(subcommand)] @@ -360,6 +383,8 @@ enum InventoryCommands { Cabooses, /// list and show details from particular collections Collections(CollectionsArgs), + /// show all physical disks every found + PhysicalDisks(PhysicalDisksArgs), /// list all root of trust pages ever found RotPages, } @@ -381,12 +406,21 @@ enum CollectionsCommands { #[derive(Debug, Args)] struct CollectionsShowArgs { /// id of the collection - id: Uuid, + id: CollectionUuid, /// show long strings in their entirety #[clap(long)] show_long_strings: bool, } +#[derive(Debug, Args, Clone, Copy)] +struct PhysicalDisksArgs { + #[clap(long)] + collection_id: Option, + + #[clap(long, requires("collection_id"))] + sled_id: Option, +} + #[derive(Debug, Args)] struct ReconfiguratorSaveArgs { /// where to save the output @@ -394,17 +428,10 @@ struct ReconfiguratorSaveArgs { } #[derive(Debug, Args)] -struct ServicesArgs { - #[command(subcommand)] - command: ServicesCommands, -} - -#[derive(Debug, Subcommand)] -enum ServicesCommands { - /// List service instances - ListInstances, - /// List service instances, grouped by sled - ListBySled, +struct SledsArgs { + /// Show sleds that match the given filter + #[clap(short = 'F', long, value_enum)] + filter: Option, } #[derive(Debug, Args)] @@ -413,7 +440,7 @@ struct NetworkArgs { command: NetworkCommands, /// Print out raw data structures from the data store. - #[clap(long)] + #[clap(long, global = true)] verbose: bool, } @@ -511,37 +538,22 @@ impl DbArgs { cmd_db_reconfigurator_save( &opctx, &datastore, - &self.fetch_opts, reconfig_save_args, ) .await } - DbCommands::Services(ServicesArgs { - command: ServicesCommands::ListInstances, - }) => { - cmd_db_services_list_instances( - &opctx, - &datastore, - &self.fetch_opts, - ) - .await + DbCommands::Sleds(args) => { + cmd_db_sleds(&opctx, &datastore, &self.fetch_opts, args).await } - DbCommands::Services(ServicesArgs { - command: ServicesCommands::ListBySled, - }) => { - cmd_db_services_list_by_sled( + DbCommands::Instances(instances_options) => { + cmd_db_instances( &opctx, &datastore, &self.fetch_opts, + instances_options.running, ) .await } - DbCommands::Sleds => { - cmd_db_sleds(&opctx, &datastore, &self.fetch_opts).await - } - DbCommands::Instances => { - cmd_db_instances(&opctx, &datastore, &self.fetch_opts).await - } DbCommands::Network(NetworkArgs { command: NetworkCommands::ListEips, verbose, @@ -679,42 +691,23 @@ async fn lookup_instance( .with_context(|| format!("loading instance {instance_id}")) } -/// Helper function to look up the kind of the service with the given ID. +#[derive(Clone, Debug)] +struct ServiceInfo { + service_kind: ServiceKind, + disposition: BlueprintZoneDisposition, +} + +/// Helper function to look up the service with the given ID. /// -/// Requires the caller to first have fetched the current target blueprint, so -/// we can find services that have been added by Reconfigurator. -async fn lookup_service_kind( - datastore: &DataStore, +/// Requires the caller to first have fetched the current target blueprint. +async fn lookup_service_info( service_id: Uuid, - current_target_blueprint: Option<&Blueprint>, -) -> anyhow::Result> { - let conn = datastore.pool_connection_for_tests().await?; - - // We need to check the `service` table (populated during rack setup)... - { - use db::schema::service::dsl; - if let Some(kind) = dsl::service - .filter(dsl::id.eq(service_id)) - .limit(1) - .select(dsl::kind) - .get_result_async(&*conn) - .await - .optional() - .with_context(|| format!("loading service {service_id}"))? - { - return Ok(Some(kind)); - } - } - - // ...and if we don't find the service, check the latest blueprint, because - // the service might have been added by Reconfigurator after RSS ran. - let Some(blueprint) = current_target_blueprint else { - return Ok(None); - }; - - let Some(zone_config) = - blueprint.all_omicron_zones().find_map(|(_sled_id, zone_config)| { - if zone_config.id == service_id { + blueprint: &Blueprint, +) -> anyhow::Result> { + let Some(zone_config) = blueprint + .all_omicron_zones(BlueprintZoneFilter::All) + .find_map(|(_sled_id, zone_config)| { + if zone_config.id.into_untyped_uuid() == service_id { Some(zone_config) } else { None @@ -725,22 +718,20 @@ async fn lookup_service_kind( }; let service_kind = match &zone_config.zone_type { - OmicronZoneType::BoundaryNtp { .. } - | OmicronZoneType::InternalNtp { .. } => ServiceKind::Ntp, - OmicronZoneType::Clickhouse { .. } => ServiceKind::Clickhouse, - OmicronZoneType::ClickhouseKeeper { .. } => { - ServiceKind::ClickhouseKeeper - } - OmicronZoneType::CockroachDb { .. } => ServiceKind::Cockroach, - OmicronZoneType::Crucible { .. } => ServiceKind::Crucible, - OmicronZoneType::CruciblePantry { .. } => ServiceKind::CruciblePantry, - OmicronZoneType::ExternalDns { .. } => ServiceKind::ExternalDns, - OmicronZoneType::InternalDns { .. } => ServiceKind::InternalDns, - OmicronZoneType::Nexus { .. } => ServiceKind::Nexus, - OmicronZoneType::Oximeter { .. } => ServiceKind::Oximeter, + BlueprintZoneType::BoundaryNtp(_) + | BlueprintZoneType::InternalNtp(_) => ServiceKind::Ntp, + BlueprintZoneType::Clickhouse(_) => ServiceKind::Clickhouse, + BlueprintZoneType::ClickhouseKeeper(_) => ServiceKind::ClickhouseKeeper, + BlueprintZoneType::CockroachDb(_) => ServiceKind::Cockroach, + BlueprintZoneType::Crucible(_) => ServiceKind::Crucible, + BlueprintZoneType::CruciblePantry(_) => ServiceKind::CruciblePantry, + BlueprintZoneType::ExternalDns(_) => ServiceKind::ExternalDns, + BlueprintZoneType::InternalDns(_) => ServiceKind::InternalDns, + BlueprintZoneType::Nexus(_) => ServiceKind::Nexus, + BlueprintZoneType::Oximeter(_) => ServiceKind::Oximeter, }; - Ok(Some(service_kind)) + Ok(Some(ServiceInfo { service_kind, disposition: zone_config.disposition })) } /// Helper function to looks up a probe with the given ID. @@ -1282,16 +1273,6 @@ async fn cmd_db_disk_physical( // SERVICES -#[derive(Tabled)] -#[tabled(rename_all = "SCREAMING_SNAKE_CASE")] -struct ServiceInstanceRow { - #[tabled(rename = "SERVICE")] - kind: String, - instance_id: Uuid, - addr: String, - sled_serial: String, -} - // Snapshots fn format_snapshot(state: &SnapshotState) -> impl Display { match state { @@ -1445,135 +1426,16 @@ async fn cmd_db_snapshot_info( Ok(()) } -/// Run `omdb db services list-instances`. -async fn cmd_db_services_list_instances( - opctx: &OpContext, - datastore: &DataStore, - fetch_opts: &DbFetchOptions, -) -> Result<(), anyhow::Error> { - let limit = fetch_opts.fetch_limit; - let sled_list = datastore - .sled_list(&opctx, &first_page(limit)) - .await - .context("listing sleds")?; - check_limit(&sled_list, limit, || String::from("listing sleds")); - - let sleds: BTreeMap = - sled_list.into_iter().map(|s| (s.id(), s)).collect(); - - let mut rows = vec![]; - - for service_kind in ServiceKind::iter() { - let context = - || format!("listing instances of kind {:?}", service_kind); - let instances = datastore - .services_list_kind(&opctx, service_kind, &first_page(limit)) - .await - .with_context(&context)?; - check_limit(&instances, limit, &context); - - rows.extend(instances.into_iter().map(|instance| { - let addr = - std::net::SocketAddrV6::new(*instance.ip, *instance.port, 0, 0) - .to_string(); - - ServiceInstanceRow { - kind: format!("{:?}", service_kind), - instance_id: instance.id(), - addr, - sled_serial: sleds - .get(&instance.sled_id) - .map(|s| s.serial_number()) - .unwrap_or("unknown") - .to_string(), - } - })); - } - - let table = tabled::Table::new(rows) - .with(tabled::settings::Style::empty()) - .with(tabled::settings::Padding::new(0, 1, 0, 0)) - .to_string(); - - println!("{}", table); - - Ok(()) -} - // SLEDS -#[derive(Tabled)] -#[tabled(rename_all = "SCREAMING_SNAKE_CASE")] -struct ServiceInstanceSledRow { - #[tabled(rename = "SERVICE")] - kind: String, - instance_id: Uuid, - addr: String, -} - -/// Run `omdb db services list-by-sled`. -async fn cmd_db_services_list_by_sled( - opctx: &OpContext, - datastore: &DataStore, - fetch_opts: &DbFetchOptions, -) -> Result<(), anyhow::Error> { - let limit = fetch_opts.fetch_limit; - let sled_list = datastore - .sled_list(&opctx, &first_page(limit)) - .await - .context("listing sleds")?; - check_limit(&sled_list, limit, || String::from("listing sleds")); - - let sleds: BTreeMap = - sled_list.into_iter().map(|s| (s.id(), s)).collect(); - let mut services_by_sled: BTreeMap> = - BTreeMap::new(); - - for service_kind in ServiceKind::iter() { - let context = - || format!("listing instances of kind {:?}", service_kind); - let instances = datastore - .services_list_kind(&opctx, service_kind, &first_page(limit)) - .await - .with_context(&context)?; - check_limit(&instances, limit, &context); - - for i in instances { - let addr = - std::net::SocketAddrV6::new(*i.ip, *i.port, 0, 0).to_string(); - let sled_instances = - services_by_sled.entry(i.sled_id).or_insert_with(Vec::new); - sled_instances.push(ServiceInstanceSledRow { - kind: format!("{:?}", service_kind), - instance_id: i.id(), - addr, - }) - } - } - - for (sled_id, instances) in services_by_sled { - println!( - "sled: {} (id {})\n", - sleds.get(&sled_id).map(|s| s.serial_number()).unwrap_or("unknown"), - sled_id, - ); - let table = tabled::Table::new(instances) - .with(tabled::settings::Style::empty()) - .with(tabled::settings::Padding::new(0, 1, 0, 0)) - .to_string(); - println!("{}", textwrap::indent(&table.to_string(), " ")); - println!(""); - } - - Ok(()) -} - #[derive(Tabled)] #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] struct SledRow { serial: String, ip: String, role: &'static str, + policy: SledPolicy, + state: SledState, id: Uuid, } @@ -1584,6 +1446,8 @@ impl From for SledRow { serial: s.serial_number().to_string(), ip: s.address().to_string(), role: if s.is_scrimlet() { "scrimlet" } else { "-" }, + policy: s.policy(), + state: s.state().into(), } } } @@ -1593,10 +1457,22 @@ async fn cmd_db_sleds( opctx: &OpContext, datastore: &DataStore, fetch_opts: &DbFetchOptions, + args: &SledsArgs, ) -> Result<(), anyhow::Error> { let limit = fetch_opts.fetch_limit; + let filter = match args.filter { + Some(filter) => filter, + None => { + eprintln!( + "note: listing all commissioned sleds \ + (use -F to filter, e.g. -F in-service)" + ); + SledFilter::Commissioned + } + }; + let sleds = datastore - .sled_list(&opctx, &first_page(limit)) + .sled_list(&opctx, &first_page(limit), filter) .await .context("listing sleds")?; check_limit(&sleds, limit, || String::from("listing sleds")); @@ -1604,7 +1480,7 @@ async fn cmd_db_sleds( let rows = sleds.into_iter().map(|s| SledRow::from(s)); let table = tabled::Table::new(rows) .with(tabled::settings::Style::empty()) - .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .with(tabled::settings::Padding::new(1, 1, 0, 0)) .to_string(); println!("{}", table); @@ -1628,6 +1504,7 @@ async fn cmd_db_instances( opctx: &OpContext, datastore: &DataStore, fetch_opts: &DbFetchOptions, + running: bool, ) -> Result<(), anyhow::Error> { use db::schema::instance::dsl; use db::schema::vmm::dsl as vmm_dsl; @@ -1681,6 +1558,10 @@ async fn cmd_db_instances( "-".to_string() }; + if running && i.effective_state() != InstanceState::Running { + continue; + } + let cir = CustomerInstanceRow { id: i.instance().id().to_string(), name: i.instance().name().to_string(), @@ -1941,9 +1822,20 @@ async fn cmd_db_eips( } enum Owner { - Instance { id: Uuid, project: String, name: String }, - Service { id: Uuid, kind: String }, - Project { id: Uuid, name: String }, + Instance { + id: Uuid, + project: String, + name: String, + }, + Service { + id: Uuid, + kind: String, + disposition: Option, + }, + Project { + id: Uuid, + name: String, + }, None, } @@ -1976,6 +1868,13 @@ async fn cmd_db_eips( Self::None => "none".to_string(), } } + + fn disposition(&self) -> Option { + match self { + Self::Service { disposition, .. } => *disposition, + _ => None, + } + } } #[derive(Tabled)] @@ -1988,6 +1887,13 @@ async fn cmd_db_eips( owner_kind: &'static str, owner_id: String, owner_name: String, + #[tabled(display_with = "display_option_blank")] + owner_disposition: Option, + } + + // Display an empty cell for an Option if it's None. + fn display_option_blank(opt: &Option) -> String { + opt.as_ref().map(|x| x.to_string()).unwrap_or_else(|| "".to_string()) } if verbose { @@ -2001,26 +1907,29 @@ async fn cmd_db_eips( let mut rows = Vec::new(); - let current_target_blueprint = datastore + let (_, current_target_blueprint) = datastore .blueprint_target_get_current_full(opctx) .await - .context("loading current target blueprint")? - .map(|(_, blueprint)| blueprint); + .context("loading current target blueprint")?; for ip in &ips { let owner = if let Some(owner_id) = ip.parent_id { if ip.is_service { - let kind = match lookup_service_kind( - datastore, + let (kind, disposition) = match lookup_service_info( owner_id, - current_target_blueprint.as_ref(), + ¤t_target_blueprint, ) .await? { - Some(kind) => format!("{kind:?}"), - None => "UNKNOWN (service ID not found)".to_string(), + Some(info) => ( + format!("{:?}", info.service_kind), + Some(info.disposition), + ), + None => { + ("UNKNOWN (service ID not found)".to_string(), None) + } }; - Owner::Service { id: owner_id, kind } + Owner::Service { id: owner_id, kind, disposition } } else { let instance = match lookup_instance(datastore, owner_id).await? { @@ -2084,6 +1993,7 @@ async fn cmd_db_eips( owner_kind: owner.kind(), owner_id: owner.id(), owner_name: owner.name(), + owner_disposition: owner.disposition(), }; rows.push(row); } @@ -2108,7 +2018,7 @@ async fn cmd_db_network_list_vnics( struct NicRow { ip: IpNetwork, mac: MacAddr, - slot: i16, + slot: u8, primary: bool, kind: &'static str, subnet: String, @@ -2229,7 +2139,7 @@ async fn cmd_db_network_list_vnics( let row = NicRow { ip: nic.ip, mac: *nic.mac, - slot: nic.slot, + slot: *nic.slot, primary: nic.primary, kind, subnet, @@ -2736,6 +2646,9 @@ async fn cmd_db_inventory( ) .await } + InventoryCommands::PhysicalDisks(args) => { + cmd_db_inventory_physical_disks(&conn, limit, args).await + } InventoryCommands::RotPages => { cmd_db_inventory_rot_pages(&conn, limit).await } @@ -2820,6 +2733,63 @@ async fn cmd_db_inventory_cabooses( Ok(()) } +async fn cmd_db_inventory_physical_disks( + conn: &DataStoreConnection<'_>, + limit: NonZeroU32, + args: PhysicalDisksArgs, +) -> Result<(), anyhow::Error> { + #[derive(Tabled)] + #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] + struct DiskRow { + inv_collection_id: Uuid, + sled_id: Uuid, + slot: i64, + vendor: String, + model: String, + serial: String, + variant: String, + } + + use db::schema::inv_physical_disk::dsl; + let mut query = dsl::inv_physical_disk.into_boxed(); + query = query.limit(i64::from(u32::from(limit))); + + if let Some(collection_id) = args.collection_id { + query = query.filter( + dsl::inv_collection_id.eq(collection_id.into_untyped_uuid()), + ); + } + + if let Some(sled_id) = args.sled_id { + query = query.filter(dsl::sled_id.eq(sled_id.into_untyped_uuid())); + } + + let disks = query + .select(InvPhysicalDisk::as_select()) + .load_async(&**conn) + .await + .context("loading physical disks")?; + + let rows = disks.into_iter().map(|disk| DiskRow { + inv_collection_id: disk.inv_collection_id.into_untyped_uuid(), + sled_id: disk.sled_id.into_untyped_uuid(), + slot: disk.slot, + vendor: disk.vendor, + model: disk.model.clone(), + serial: disk.model.clone(), + variant: format!("{:?}", disk.variant), + }); + + let table = tabled::Table::new(rows) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + + println!("{}", table); + + Ok(()) +} + async fn cmd_db_inventory_rot_pages( conn: &DataStoreConnection<'_>, limit: NonZeroU32, @@ -2862,7 +2832,7 @@ async fn cmd_db_inventory_collections_list( #[derive(Tabled)] #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] struct CollectionRow { - id: Uuid, + id: CollectionUuid, started: String, took: String, nsps: i64, @@ -2911,7 +2881,7 @@ async fn cmd_db_inventory_collections_list( .num_milliseconds() ); rows.push(CollectionRow { - id: collection.id, + id: collection.id.into(), started: humantime::format_rfc3339_seconds( collection.time_started.into(), ) @@ -2935,7 +2905,7 @@ async fn cmd_db_inventory_collections_list( async fn cmd_db_inventory_collections_show( opctx: &OpContext, datastore: &DataStore, - id: Uuid, + id: CollectionUuid, long_string_formatter: LongStringFormatter, ) -> Result<(), anyhow::Error> { let collection = datastore @@ -3222,7 +3192,7 @@ fn inv_collection_print_sleds(collection: &Collection) { println!(" ZONES FOUND"); for z in &zones.zones.zones { - println!(" zone {} (type {})", z.id, z.zone_type.label()); + println!(" zone {} (type {})", z.id, z.zone_type.kind()); } } else { println!(" warning: no zone information found"); @@ -3275,96 +3245,15 @@ impl LongStringFormatter { async fn cmd_db_reconfigurator_save( opctx: &OpContext, datastore: &DataStore, - fetch_opts: &DbFetchOptions, reconfig_save_args: &ReconfiguratorSaveArgs, ) -> Result<(), anyhow::Error> { // See Nexus::blueprint_planning_context(). - eprint!("assembling policy ... "); - let sled_rows = datastore - .sled_list_all_batched(opctx) - .await - .context("listing sleds")?; - let zpool_rows = datastore - .zpool_list_all_external_batched(opctx) - .await - .context("listing zpools")?; - let ip_pool_range_rows = { - let (authz_service_ip_pool, _) = datastore - .ip_pools_service_lookup(opctx) - .await - .context("fetching IP services pool")?; - datastore - .ip_pool_list_ranges_batched(opctx, &authz_service_ip_pool) - .await - .context("listing services IP pool ranges")? - }; - - let policy = policy_from_db( - &sled_rows, - &zpool_rows, - &ip_pool_range_rows, - NEXUS_REDUNDANCY, + eprint!("assembling reconfigurator state ... "); + let state = nexus_reconfigurator_preparation::reconfigurator_state_load( + opctx, datastore, ) - .context("assembling policy")?; - eprintln!("done."); - - eprint!("loading inventory collections ... "); - let collection_ids = datastore - .inventory_collections() - .await - .context("listing collections")?; - let collections = futures::stream::iter(collection_ids) - .filter_map(|id| async move { - let read = datastore - .inventory_collection_read(opctx, id) - .await - .with_context(|| format!("reading collection {}", id)); - if let Err(error) = &read { - eprintln!("warning: {}", error); - } - read.ok() - }) - .collect::>() - .await; - eprintln!("done."); - - eprint!("loading blueprints ... "); - let limit = fetch_opts.fetch_limit; - let pagparams = DataPageParams { - marker: None, - direction: PaginationOrder::Ascending, - limit, - }; - let blueprint_ids = datastore - .blueprints_list(opctx, &pagparams) - .await - .context("listing blueprints")?; - check_limit(&blueprint_ids, limit, || "listing blueprint ids"); - let blueprints = futures::stream::iter(blueprint_ids) - .filter_map(|bpm| async move { - let blueprint_id = bpm.id; - let read = datastore - .blueprint_read( - opctx, - &nexus_db_queries::authz::Blueprint::new( - nexus_db_queries::authz::FLEET, - blueprint_id, - LookupType::ById(blueprint_id), - ), - ) - .await - .with_context(|| format!("reading blueprint {}", blueprint_id)); - if let Err(error) = &read { - eprintln!("warning: {}", error); - } - read.ok() - }) - .collect::>() - .await; - eprintln!("done."); - - let state = - UnstableReconfiguratorState { policy: policy, collections, blueprints }; + .await?; + eprintln!("done"); let output_path = &reconfig_save_args.output_file; let file = std::fs::OpenOptions::new() diff --git a/dev-tools/omdb/src/bin/omdb/helpers.rs b/dev-tools/omdb/src/bin/omdb/helpers.rs new file mode 100644 index 0000000000..f436340a38 --- /dev/null +++ b/dev-tools/omdb/src/bin/omdb/helpers.rs @@ -0,0 +1,9 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Utility helpers for the omdb CLI. + +pub(crate) const CONNECTION_OPTIONS_HEADING: &str = "Connection Options"; +pub(crate) const DATABASE_OPTIONS_HEADING: &str = "Database Options"; +pub(crate) const SAFETY_OPTIONS_HEADING: &str = "Safety Options"; diff --git a/dev-tools/omdb/src/bin/omdb/main.rs b/dev-tools/omdb/src/bin/omdb/main.rs index 17de22c2fa..7469e2ba54 100644 --- a/dev-tools/omdb/src/bin/omdb/main.rs +++ b/dev-tools/omdb/src/bin/omdb/main.rs @@ -33,14 +33,20 @@ //! find strange things when debugging but we need our tools to tell us as //! much as they can!) +use anyhow::anyhow; +use anyhow::ensure; use anyhow::Context; use clap::Parser; use clap::Subcommand; +use futures::StreamExt; use omicron_common::address::Ipv6Subnet; use std::net::SocketAddr; use std::net::SocketAddrV6; +use tokio::net::TcpSocket; +mod crucible_agent; mod db; +mod helpers; mod mgs; mod nexus; mod oximeter; @@ -62,6 +68,7 @@ async fn main() -> Result<(), anyhow::Error> { OmdbCommands::Nexus(nexus) => nexus.run_cmd(&args, &log).await, OmdbCommands::Oximeter(oximeter) => oximeter.run_cmd(&log).await, OmdbCommands::SledAgent(sled) => sled.run_cmd(&args, &log).await, + OmdbCommands::CrucibleAgent(crucible) => crucible.run_cmd(&args).await, } } @@ -78,14 +85,25 @@ struct Omdb { long, value_parser = parse_dropshot_log_level, default_value = "warn", + global = true, )] log_level: dropshot::ConfigLoggingLevel, - #[arg(env = "OMDB_DNS_SERVER", long)] + #[arg( + long, + env = "OMDB_DNS_SERVER", + global = true, + help_heading = helpers::CONNECTION_OPTIONS_HEADING, + )] dns_server: Option, - /// allow potentially-destructive subcommands - #[arg(short = 'w', long = "destructive")] + /// Allow potentially-destructive subcommands. + #[arg( + short = 'w', + long = "destructive", + global = true, + help_heading = helpers::SAFETY_OPTIONS_HEADING, + )] allow_destructive: bool, #[command(subcommand)] @@ -115,6 +133,7 @@ mod check_allow_destructive { } impl Omdb { + /// Return the socket addresses of all instances of a service in DNS async fn dns_lookup_all( &self, log: slog::Logger, @@ -127,6 +146,65 @@ impl Omdb { .with_context(|| format!("looking up {:?} in DNS", service_name)) } + /// Return the socket address of one instance of a service that we can at + /// least successfully connect to + async fn dns_lookup_one( + &self, + log: slog::Logger, + service_name: internal_dns::ServiceName, + ) -> Result { + let addrs = self.dns_lookup_all(log, service_name).await?; + ensure!( + !addrs.is_empty(), + "expected at least one address from successful DNS lookup for {:?}", + service_name + ); + + // The caller is going to pick one of these addresses to connect to. + // Let's try to pick one that's at least not obviously broken by + // attempting to connect to whatever we found and returning any that we + // successfully connected to. It'd be nice if we could return the + // socket directly, but our callers are creating reqwest clients that + // cannot easily consume a socket directly. + // + // This approach scales poorly and there are many failure modes that + // this does not cover. But in the absence of better connection + // management, and with the risks in `omdb` being pretty low, and the + // value of it working pretty high, here we are. This approach should + // not be replicated elsewhere. + async fn try_connect( + sockaddr_v6: SocketAddrV6, + ) -> Result<(), anyhow::Error> { + let _ = TcpSocket::new_v6() + .context("creating socket")? + .connect(SocketAddr::from(sockaddr_v6)) + .await + .with_context(|| format!("connect \"{}\"", sockaddr_v6))?; + Ok(()) + } + + let mut socket_stream = futures::stream::iter(addrs) + .map(|sockaddr_v6| async move { + (sockaddr_v6, try_connect(sockaddr_v6).await) + }) + .buffer_unordered(3); + + while let Some((sockaddr, connect_result)) = socket_stream.next().await + { + match connect_result { + Ok(()) => return Ok(sockaddr), + Err(error) => { + eprintln!( + "warning: failed to connect to {:?} at {}: {:#}", + service_name, sockaddr, error + ); + } + } + } + + Err(anyhow!("failed to connect to any instances of {:?}", service_name)) + } + async fn dns_resolver( &self, log: slog::Logger, @@ -181,6 +259,8 @@ impl Omdb { #[derive(Debug, Subcommand)] #[allow(clippy::large_enum_variant)] enum OmdbCommands { + /// Debug a specific crucible-agent + CrucibleAgent(crucible_agent::CrucibleAgentArgs), /// Query the control plane database (CockroachDB) Db(db::DbArgs), /// Debug a specific Management Gateway Service instance diff --git a/dev-tools/omdb/src/bin/omdb/mgs.rs b/dev-tools/omdb/src/bin/omdb/mgs.rs index ece4c4f109..7f33d5de15 100644 --- a/dev-tools/omdb/src/bin/omdb/mgs.rs +++ b/dev-tools/omdb/src/bin/omdb/mgs.rs @@ -4,6 +4,7 @@ //! Prototype code for collecting information from systems in the rack +use crate::helpers::CONNECTION_OPTIONS_HEADING; use crate::Omdb; use anyhow::Context; use clap::Args; @@ -32,7 +33,12 @@ use sensors::SensorsArgs; #[derive(Debug, Args)] pub struct MgsArgs { /// URL of an MGS instance to query - #[clap(long, env("OMDB_MGS_URL"))] + #[clap( + long, + env = "OMDB_MGS_URL", + global = true, + help_heading = CONNECTION_OPTIONS_HEADING, + )] mgs_url: Option, #[command(subcommand)] @@ -66,16 +72,12 @@ impl MgsArgs { eprintln!( "note: MGS URL not specified. Will pick one from DNS." ); - let addrs = omdb - .dns_lookup_all( + let addr = omdb + .dns_lookup_one( log.clone(), internal_dns::ServiceName::ManagementGatewayService, ) .await?; - let addr = addrs.into_iter().next().expect( - "expected at least one MGS address from \ - successful DNS lookup", - ); format!("http://{}", addr) } }; diff --git a/dev-tools/omdb/src/bin/omdb/mgs/dashboard.rs b/dev-tools/omdb/src/bin/omdb/mgs/dashboard.rs index 153618b7c0..cd7628a840 100644 --- a/dev-tools/omdb/src/bin/omdb/mgs/dashboard.rs +++ b/dev-tools/omdb/src/bin/omdb/mgs/dashboard.rs @@ -101,16 +101,6 @@ trait Attributes: DynClone { fn y_axis_label(&self) -> String; fn axis_value(&self, val: f64) -> String; fn legend_value(&self, val: f64) -> String; - - fn increase(&mut self, _ndx: usize) -> Option { - None - } - - fn decrease(&mut self, _ndx: usize) -> Option { - None - } - - fn clear(&mut self) {} } dyn_clone::clone_trait_object!(Attributes); @@ -357,7 +347,7 @@ impl Graph { for (_ndx, s) in &mut self.series.iter_mut().enumerate() { if let Some(datum) = s.raw[offs] { - let point = (i as f64, datum as f64); + let point = (i as f64, f64::from(datum)); if self.interpolate != 0 { if let Some(last) = s.data.last() { @@ -374,7 +364,7 @@ impl Graph { } } - s.data.push((i as f64, datum as f64)); + s.data.push((i as f64, f64::from(datum))); } } } diff --git a/dev-tools/omdb/src/bin/omdb/mgs/sensors.rs b/dev-tools/omdb/src/bin/omdb/mgs/sensors.rs index d00bebd96c..f36e8633f9 100644 --- a/dev-tools/omdb/src/bin/omdb/mgs/sensors.rs +++ b/dev-tools/omdb/src/bin/omdb/mgs/sensors.rs @@ -480,12 +480,10 @@ fn sp_info_csv( } if let Some(sensor) = Sensor::from_string(&record[1], &record[2]) { - if sensors.get(&sensor).is_some() { + if !sensors.insert(sensor.clone()) { break; } - sensors.insert(sensor.clone()); - for (ndx, sp) in sps.iter().enumerate() { if let Some(sp) = sp { let value = match record[ndx + len].parse::() { diff --git a/dev-tools/omdb/src/bin/omdb/nexus.rs b/dev-tools/omdb/src/bin/omdb/nexus.rs index 26f2e07a41..09ae82b5d9 100644 --- a/dev-tools/omdb/src/bin/omdb/nexus.rs +++ b/dev-tools/omdb/src/bin/omdb/nexus.rs @@ -6,30 +6,39 @@ use crate::check_allow_destructive::DestructiveOperationToken; use crate::db::DbUrlOptions; +use crate::helpers::CONNECTION_OPTIONS_HEADING; use crate::Omdb; use anyhow::bail; use anyhow::Context; +use camino::Utf8PathBuf; use chrono::DateTime; use chrono::SecondsFormat; use chrono::Utc; use clap::Args; use clap::Subcommand; use clap::ValueEnum; +use futures::future::try_join; use futures::TryStreamExt; use nexus_client::types::ActivationReason; use nexus_client::types::BackgroundTask; +use nexus_client::types::BackgroundTasksActivateRequest; use nexus_client::types::CurrentStatus; use nexus_client::types::LastResult; use nexus_client::types::SledSelector; use nexus_client::types::UninitializedSledId; use nexus_db_queries::db::lookup::LookupPath; +use nexus_types::deployment::Blueprint; use nexus_types::inventory::BaseboardId; +use omicron_uuid_kinds::CollectionUuid; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::SledUuid; use reedline::DefaultPrompt; use reedline::DefaultPromptSegment; use reedline::Reedline; use serde::Deserialize; use slog_error_chain::InlineErrorChain; use std::collections::BTreeMap; +use std::str::FromStr; use tabled::Tabled; use uuid::Uuid; @@ -37,7 +46,12 @@ use uuid::Uuid; #[derive(Debug, Args)] pub struct NexusArgs { /// URL of the Nexus internal API - #[clap(long, env("OMDB_NEXUS_URL"))] + #[clap( + long, + env = "OMDB_NEXUS_URL", + global = true, + help_heading = CONNECTION_OPTIONS_HEADING, + )] nexus_internal_url: Option, #[command(subcommand)] @@ -70,6 +84,15 @@ enum BackgroundTasksCommands { List, /// Print human-readable summary of the status of each background task Show, + /// Activate one or more background tasks + Activate(BackgroundTasksActivateArgs), +} + +#[derive(Debug, Args)] +struct BackgroundTasksActivateArgs { + /// Name of the background tasks to activate + #[clap(value_name = "TASK_NAME", required = true)] + tasks: Vec, } #[derive(Debug, Args)] @@ -84,36 +107,90 @@ enum BlueprintsCommands { List, /// Show a blueprint Show(BlueprintIdArgs), - /// Diff two blueprint + /// Diff two blueprints Diff(BlueprintIdsArgs), /// Delete a blueprint Delete(BlueprintIdArgs), /// Interact with the current target blueprint Target(BlueprintsTargetArgs), - /// Generate an initial blueprint from a specific inventory collection - GenerateFromCollection(CollectionIdArgs), /// Generate a new blueprint Regenerate, + /// Import a blueprint + Import(BlueprintImportArgs), } -#[derive(Debug, Args)] +#[derive(Debug, Clone, Copy)] +enum BlueprintIdOrCurrentTarget { + CurrentTarget, + BlueprintId(Uuid), +} + +impl FromStr for BlueprintIdOrCurrentTarget { + type Err = uuid::Error; + + fn from_str(s: &str) -> Result { + if matches!(s, "current-target" | "current" | "target") { + Ok(Self::CurrentTarget) + } else { + let id = s.parse()?; + Ok(Self::BlueprintId(id)) + } + } +} + +impl BlueprintIdOrCurrentTarget { + async fn resolve_to_id( + &self, + client: &nexus_client::Client, + ) -> anyhow::Result { + match self { + Self::CurrentTarget => { + let target = client + .blueprint_target_view() + .await + .context("getting current blueprint target")?; + Ok(target.target_id) + } + Self::BlueprintId(id) => Ok(*id), + } + } + + async fn resolve_to_blueprint( + &self, + client: &nexus_client::Client, + ) -> anyhow::Result { + let id = self.resolve_to_id(client).await?; + let response = client.blueprint_view(&id).await.with_context(|| { + let suffix = match self { + BlueprintIdOrCurrentTarget::CurrentTarget => { + " (current target)" + } + BlueprintIdOrCurrentTarget::BlueprintId(_) => "", + }; + format!("fetching blueprint {id}{suffix}") + })?; + Ok(response.into_inner()) + } +} + +#[derive(Debug, Clone, Copy, Args)] struct BlueprintIdArgs { - /// id of a blueprint - blueprint_id: Uuid, + /// id of blueprint (or `target` for the current target) + blueprint_id: BlueprintIdOrCurrentTarget, } #[derive(Debug, Args)] struct BlueprintIdsArgs { - /// id of first blueprint - blueprint1_id: Uuid, - /// id of second blueprint - blueprint2_id: Uuid, + /// id of first blueprint (or `target` for the current target) + blueprint1_id: BlueprintIdOrCurrentTarget, + /// id of second blueprint (or `target` for the current target) + blueprint2_id: BlueprintIdOrCurrentTarget, } #[derive(Debug, Args)] struct CollectionIdArgs { /// id of an inventory collection - collection_id: Uuid, + collection_id: CollectionUuid, } #[derive(Debug, Args)] @@ -156,6 +233,12 @@ enum BlueprintTargetSetEnabled { Inherit, } +#[derive(Debug, Args)] +struct BlueprintImportArgs { + /// path to a file containing a JSON-serialized blueprint + input: Utf8PathBuf, +} + #[derive(Debug, Args)] struct SledsArgs { #[command(subcommand)] @@ -189,7 +272,7 @@ struct SledExpungeArgs { db_url_opts: DbUrlOptions, /// sled ID - sled_id: Uuid, + sled_id: SledUuid, } impl NexusArgs { @@ -205,16 +288,12 @@ impl NexusArgs { eprintln!( "note: Nexus URL not specified. Will pick one from DNS." ); - let addrs = omdb - .dns_lookup_all( + let addr = omdb + .dns_lookup_one( log.clone(), internal_dns::ServiceName::Nexus, ) .await?; - let addr = addrs.into_iter().next().expect( - "expected at least one Nexus address from \ - successful DNS lookup", - ); format!("http://{}", addr) } }; @@ -231,6 +310,12 @@ impl NexusArgs { NexusCommands::BackgroundTasks(BackgroundTasksArgs { command: BackgroundTasksCommands::Show, }) => cmd_nexus_background_tasks_show(&client).await, + NexusCommands::BackgroundTasks(BackgroundTasksArgs { + command: BackgroundTasksCommands::Activate(args), + }) => { + let token = omdb.check_allow_destructive()?; + cmd_nexus_background_tasks_activate(&client, args, token).await + } NexusCommands::Blueprints(BlueprintsArgs { command: BlueprintsCommands::List, @@ -293,13 +378,10 @@ impl NexusArgs { cmd_nexus_blueprints_regenerate(&client, token).await } NexusCommands::Blueprints(BlueprintsArgs { - command: BlueprintsCommands::GenerateFromCollection(args), + command: BlueprintsCommands::Import(args), }) => { let token = omdb.check_allow_destructive()?; - cmd_nexus_blueprints_generate_from_collection( - &client, args, token, - ) - .await + cmd_nexus_blueprints_import(&client, token, args).await } NexusCommands::Sleds(SledsArgs { @@ -401,6 +483,26 @@ async fn cmd_nexus_background_tasks_show( Ok(()) } +/// Runs `omdb nexus background-tasks activate` +async fn cmd_nexus_background_tasks_activate( + client: &nexus_client::Client, + args: &BackgroundTasksActivateArgs, + // This isn't quite "destructive" in the sense that of it being potentially + // dangerous, but it does modify the system rather than being a read-only + // view on it. + _destruction_token: DestructiveOperationToken, +) -> Result<(), anyhow::Error> { + let body = + BackgroundTasksActivateRequest { bgtask_names: args.tasks.clone() }; + client + .bgtask_activate(&body) + .await + .context("error activating background tasks")?; + + eprintln!("activated background tasks: {}", args.tasks.join(", ")); + Ok(()) +} + fn print_task(bgtask: &BackgroundTask) { println!("task: {:?}", bgtask.name); println!( @@ -573,7 +675,7 @@ fn print_task_details(bgtask: &BackgroundTask, details: &serde_json::Value) { ); let server_results = &details.server_results; - if server_results.len() != 0 { + if !server_results.is_empty() { let rows = server_results.iter().map(|(addr, result)| { DnsPropRow { dns_server_addr: addr, @@ -697,7 +799,7 @@ fn print_task_details(bgtask: &BackgroundTask, details: &serde_json::Value) { println!(""); println!(" TLS certificates: {}", tls_cert_rows.len()); - if tls_cert_rows.len() > 0 { + if !tls_cert_rows.is_empty() { let table = tabled::Table::new(tls_cert_rows) .with(tabled::settings::Style::empty()) .with(tabled::settings::Padding::new(0, 1, 0, 0)) @@ -793,6 +895,160 @@ fn print_task_details(bgtask: &BackgroundTask, details: &serde_json::Value) { ); } }; + } else if name == "instance_watcher" { + #[derive(Deserialize)] + struct TaskSuccess { + /// total number of instances checked + total_instances: usize, + + /// number of stale instance metrics that were deleted + pruned_instances: usize, + + /// instance states from completed checks. + /// + /// this is a mapping of stringified instance states to the number + /// of instances in that state. these stringified states correspond + /// to the `state` field recorded by the instance watcher's + /// `virtual_machine:check` timeseries with the `healthy` field set + /// to `true`. any changes to the instance state type which cause it + /// to print differently will be counted as a distinct state. + instance_states: BTreeMap, + + /// instance check failures. + /// + /// this is a mapping of stringified instance check failure reasons + /// to the number of instances with checks that failed for that + /// reason. these stringified failure reasons correspond to the + /// `state` field recorded by the instance watcher's + /// `virtual_machine:check` timeseries with the `healthy` field set + /// to `false`. any changes to the instance state type which cause + /// it to print differently will be counted as a distinct failure + /// reason. + failed_checks: BTreeMap, + + /// instance checks that could not be completed successfully. + /// + /// this is a mapping of stringified instance check errors + /// to the number of instance checks that were not completed due to + /// that error. these stringified errors correspond to the `reason ` + /// field recorded by the instance watcher's + /// `virtual_machine:incomplete_check` timeseries. any changes to + /// the check error type which cause it to print + /// differently will be counted as a distinct check error. + incomplete_checks: BTreeMap, + } + + match serde_json::from_value::(details.clone()) { + Err(error) => eprintln!( + "warning: failed to interpret task details: {:?}: {:?}", + error, details + ), + Ok(TaskSuccess { + total_instances, + pruned_instances, + instance_states, + failed_checks, + incomplete_checks, + }) => { + let total_successes: usize = instance_states.values().sum(); + let total_failures: usize = failed_checks.values().sum(); + let total_incomplete: usize = incomplete_checks.values().sum(); + println!(" total instances checked: {total_instances}",); + println!( + " checks completed: {}", + total_successes + total_failures + ); + println!(" successful checks: {total_successes}",); + for (state, count) in &instance_states { + println!(" -> {count} instances {state}") + } + + println!(" failed checks: {total_failures}"); + for (failure, count) in &failed_checks { + println!(" -> {count} {failure}") + } + println!( + " checks that could not be completed: {total_incomplete}", + ); + for (error, count) in &incomplete_checks { + println!(" -> {count} {error} errors") + } + println!( + " stale instance metrics pruned: {pruned_instances}" + ); + } + }; + } else if name == "service_firewall_rule_propagation" { + match serde_json::from_value::(details.clone()) { + Err(error) => eprintln!( + "warning: failed to interpret task details: {:?}: {:?}", + error, details + ), + Ok(serde_json::Value::Object(map)) => { + if !map.is_empty() { + eprintln!( + " unexpected return value from task: {:?}", + map + ) + } + } + Ok(val) => { + eprintln!(" unexpected return value from task: {:?}", val) + } + }; + } else if name == "abandoned_vmm_reaper" { + #[derive(Deserialize)] + struct TaskSuccess { + /// total number of abandoned VMMs found + found: usize, + + /// number of abandoned VMM records that were deleted + vmms_deleted: usize, + + /// number of abandoned VMM records that were already deleted when + /// we tried to delete them. + vmms_already_deleted: usize, + + /// sled resource reservations that were released + sled_reservations_deleted: usize, + + /// number of errors that occurred during the activation + error_count: usize, + + /// the last error that occurred during execution. + error: Option, + } + match serde_json::from_value::(details.clone()) { + Err(error) => eprintln!( + "warning: failed to interpret task details: {:?}: {:?}", + error, details + ), + Ok(TaskSuccess { + found, + vmms_deleted, + vmms_already_deleted, + sled_reservations_deleted, + error_count, + error, + }) => { + if let Some(error) = error { + println!(" task did not complete successfully!"); + println!(" total errors: {error_count}"); + println!(" most recent error: {error}"); + } + + println!(" total abandoned VMMs found: {found}"); + println!(" VMM records deleted: {vmms_deleted}"); + println!( + " VMM records already deleted by another Nexus: {}", + vmms_already_deleted, + ); + println!( + " sled resource reservations deleted: {}", + sled_reservations_deleted, + ); + } + }; } else { println!( "warning: unknown background task: {:?} \ @@ -954,11 +1210,8 @@ async fn cmd_nexus_blueprints_show( client: &nexus_client::Client, args: &BlueprintIdArgs, ) -> Result<(), anyhow::Error> { - let blueprint = client - .blueprint_view(&args.blueprint_id) - .await - .with_context(|| format!("fetching blueprint {}", args.blueprint_id))?; - println!("{:?}", blueprint); + let blueprint = args.blueprint_id.resolve_to_blueprint(client).await?; + println!("{}", blueprint.display()); Ok(()) } @@ -966,13 +1219,13 @@ async fn cmd_nexus_blueprints_diff( client: &nexus_client::Client, args: &BlueprintIdsArgs, ) -> Result<(), anyhow::Error> { - let b1 = client.blueprint_view(&args.blueprint1_id).await.with_context( - || format!("fetching blueprint {}", args.blueprint1_id), - )?; - let b2 = client.blueprint_view(&args.blueprint2_id).await.with_context( - || format!("fetching blueprint {}", args.blueprint2_id), - )?; - println!("{}", b1.diff_sleds(&b2).display()); + let (b1, b2) = try_join( + args.blueprint1_id.resolve_to_blueprint(client), + args.blueprint2_id.resolve_to_blueprint(client), + ) + .await?; + let diff = b2.diff_since_blueprint(&b1); + println!("{}", diff.display()); Ok(()) } @@ -981,11 +1234,12 @@ async fn cmd_nexus_blueprints_delete( args: &BlueprintIdArgs, _destruction_token: DestructiveOperationToken, ) -> Result<(), anyhow::Error> { + let blueprint_id = args.blueprint_id.resolve_to_id(client).await?; let _ = client - .blueprint_delete(&args.blueprint_id) + .blueprint_delete(&blueprint_id) .await - .with_context(|| format!("deleting blueprint {}", args.blueprint_id))?; - println!("blueprint {} deleted", args.blueprint_id); + .with_context(|| format!("deleting blueprint {blueprint_id}"))?; + println!("blueprint {blueprint_id} deleted"); Ok(()) } @@ -1044,49 +1298,48 @@ async fn cmd_nexus_blueprints_target_set_enabled( enabled: bool, _destruction_token: DestructiveOperationToken, ) -> Result<(), anyhow::Error> { + let blueprint_id = args.blueprint_id.resolve_to_id(client).await?; let description = if enabled { "enabled" } else { "disabled" }; client .blueprint_target_set_enabled( &nexus_client::types::BlueprintTargetSet { - target_id: args.blueprint_id, + target_id: blueprint_id, enabled, }, ) .await .with_context(|| { - format!("setting blueprint {} to {description}", args.blueprint_id) + format!("setting blueprint {blueprint_id} to {description}") })?; - eprintln!("set target blueprint {} to {description}", args.blueprint_id); + eprintln!("set target blueprint {blueprint_id} to {description}"); Ok(()) } -async fn cmd_nexus_blueprints_generate_from_collection( +async fn cmd_nexus_blueprints_regenerate( client: &nexus_client::Client, - args: &CollectionIdArgs, _destruction_token: DestructiveOperationToken, ) -> Result<(), anyhow::Error> { - let blueprint = client - .blueprint_generate_from_collection( - &nexus_client::types::CollectionId { - collection_id: args.collection_id, - }, - ) - .await - .context("creating blueprint from collection id")?; - eprintln!( - "created blueprint {} from collection id {}", - blueprint.id, args.collection_id - ); + let blueprint = + client.blueprint_regenerate().await.context("generating blueprint")?; + eprintln!("generated new blueprint {}", blueprint.id); Ok(()) } -async fn cmd_nexus_blueprints_regenerate( +async fn cmd_nexus_blueprints_import( client: &nexus_client::Client, _destruction_token: DestructiveOperationToken, + args: &BlueprintImportArgs, ) -> Result<(), anyhow::Error> { - let blueprint = - client.blueprint_regenerate().await.context("generating blueprint")?; - eprintln!("generated new blueprint {}", blueprint.id); + let input_path = &args.input; + let contents = std::fs::read_to_string(input_path) + .with_context(|| format!("open {:?}", input_path))?; + let blueprint: Blueprint = serde_json::from_str(&contents) + .with_context(|| format!("read {:?}", input_path))?; + client + .blueprint_import(&blueprint) + .await + .with_context(|| format!("upload {:?}", input_path))?; + eprintln!("uploaded new blueprint {}", blueprint.id); Ok(()) } @@ -1138,14 +1391,16 @@ async fn cmd_nexus_sled_add( args: &SledAddArgs, _destruction_token: DestructiveOperationToken, ) -> Result<(), anyhow::Error> { - client + let sled_id = client .sled_add(&UninitializedSledId { part: args.part.clone(), serial: args.serial.clone(), }) .await - .context("adding sled")?; - eprintln!("added sled {} ({})", args.serial, args.part); + .context("adding sled")? + .into_inner() + .id; + eprintln!("added sled {} ({}): {sled_id}", args.serial, args.part); Ok(()) } @@ -1172,7 +1427,7 @@ async fn cmd_nexus_sled_expunge( // First, we need to look up the sled so we know its serial number. let (_authz_sled, sled) = LookupPath::new(opctx, &datastore) - .sled_id(args.sled_id) + .sled_id(args.sled_id.into_untyped_uuid()) .fetch() .await .with_context(|| format!("failed to find sled {}", args.sled_id))?; @@ -1250,7 +1505,7 @@ async fn cmd_nexus_sled_expunge( } let old_policy = client - .sled_expunge(&SledSelector { sled: args.sled_id }) + .sled_expunge(&SledSelector { sled: args.sled_id.into_untyped_uuid() }) .await .context("expunging sled")? .into_inner(); diff --git a/dev-tools/omdb/src/bin/omdb/oximeter.rs b/dev-tools/omdb/src/bin/omdb/oximeter.rs index e0f20556a2..a6dc2ce011 100644 --- a/dev-tools/omdb/src/bin/omdb/oximeter.rs +++ b/dev-tools/omdb/src/bin/omdb/oximeter.rs @@ -4,6 +4,7 @@ //! omdb commands that query oximeter +use crate::helpers::CONNECTION_OPTIONS_HEADING; use anyhow::Context; use clap::Args; use clap::Subcommand; @@ -20,7 +21,14 @@ use uuid::Uuid; #[derive(Debug, Args)] pub struct OximeterArgs { /// URL of the oximeter collector to query - #[arg(long, env("OMDB_OXIMETER_URL"))] + #[arg( + long, + env = "OMDB_OXIMETER_URL", + // This can't be global = true (i.e. passed in later in the + // command-line) because global options can't be required. If this + // changes to being optional, we should set global = true. + help_heading = CONNECTION_OPTIONS_HEADING, + )] oximeter_url: String, #[command(subcommand)] @@ -67,6 +75,11 @@ impl OximeterArgs { .with(tabled::settings::Padding::new(0, 1, 0, 0)) .to_string(); println!("Collector ID: {}\n", info.id); + let last_refresh = info + .last_refresh + .map(|r| r.to_string()) + .unwrap_or(String::from("Never")); + println!("Last refresh: {}\n", last_refresh); println!("{table}"); Ok(()) } @@ -77,7 +90,6 @@ impl OximeterArgs { struct Producer { id: Uuid, address: SocketAddr, - base_route: String, interval: String, } @@ -87,7 +99,6 @@ impl From for Producer { Self { id: p.id, address: p.address.parse().unwrap(), - base_route: p.base_route, interval: humantime::format_duration(interval).to_string(), } } diff --git a/dev-tools/omdb/src/bin/omdb/sled_agent.rs b/dev-tools/omdb/src/bin/omdb/sled_agent.rs index 2d9e19d253..9a9a17eff4 100644 --- a/dev-tools/omdb/src/bin/omdb/sled_agent.rs +++ b/dev-tools/omdb/src/bin/omdb/sled_agent.rs @@ -4,6 +4,7 @@ //! omdb commands that query or update specific Sleds +use crate::helpers::CONNECTION_OPTIONS_HEADING; use crate::Omdb; use anyhow::bail; use anyhow::Context; @@ -14,7 +15,12 @@ use clap::Subcommand; #[derive(Debug, Args)] pub struct SledAgentArgs { /// URL of the Sled internal API - #[clap(long, env("OMDB_SLED_AGENT_URL"))] + #[clap( + long, + env = "OMDB_SLED_AGENT_URL", + global = true, + help_heading = CONNECTION_OPTIONS_HEADING, + )] sled_agent_url: Option, #[command(subcommand)] diff --git a/dev-tools/omdb/tests/env.out b/dev-tools/omdb/tests/env.out index 0f0aff1789..ccb824cda4 100644 --- a/dev-tools/omdb/tests/env.out +++ b/dev-tools/omdb/tests/env.out @@ -2,13 +2,14 @@ EXECUTING COMMAND: omdb ["db", "--db-url", "postgresql://root@[::1]:REDACTED_POR termination: Exited(0) --------------------------------------------- stdout: -SERIAL IP ROLE ID -sim-039be560 [::1]:REDACTED_PORT scrimlet REDACTED_UUID_REDACTED_UUID_REDACTED -sim-b6d65341 [::1]:REDACTED_PORT scrimlet REDACTED_UUID_REDACTED_UUID_REDACTED + SERIAL IP ROLE POLICY STATE ID + sim-039be560 [::1]:REDACTED_PORT scrimlet not provisionable active ..................... + sim-b6d65341 [::1]:REDACTED_PORT scrimlet in service active ..................... --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable note: database schema version matches expected () +note: listing all commissioned sleds (use -F to filter, e.g. -F in-service) ============================================= EXECUTING COMMAND: omdb ["db", "--db-url", "junk", "sleds"] termination: Exited(2) @@ -24,6 +25,11 @@ EXECUTING COMMAND: omdb ["nexus", "--nexus-internal-url", "http://127.0.0.1:REDA termination: Exited(0) --------------------------------------------- stdout: +task: "abandoned_vmm_reaper" + deletes sled reservations for VMMs that have been abandoned by their + instances + + task: "bfd_manager" Manages bidirectional fowarding detection (BFD) configuration on rack switches @@ -71,10 +77,18 @@ task: "external_endpoints" on each one +task: "instance_watcher" + periodically checks instance states + + task: "inventory_collection" collects hardware and software inventory data from the whole system +task: "metrics_producer_gc" + unregisters Oximeter metrics producers that have not renewed their lease + + task: "nat_v4_garbage_collector" prunes soft-deleted IPV4 NAT entries from ipv4_nat_entry table based on a predetermined retention policy @@ -84,10 +98,19 @@ task: "phantom_disks" detects and un-deletes phantom disks +task: "physical_disk_adoption" + ensure new physical disks are automatically marked in-service + + task: "region_replacement" detects if a region requires replacing and begins the process +task: "service_firewall_rule_propagation" + propagates VPC firewall rules for Omicron services with external network + connectivity + + task: "service_zone_nat_tracker" ensures service zone nat records are recorded in NAT RPW table @@ -96,6 +119,10 @@ task: "switch_port_config_manager" manages switch port settings for rack switches +task: "v2p_manager" + manages opte v2p mappings for vpc networking + + --------------------------------------------- stderr: note: using Nexus URL http://127.0.0.1:REDACTED_PORT @@ -118,6 +145,11 @@ EXECUTING COMMAND: omdb ["nexus", "background-tasks", "doc"] termination: Exited(0) --------------------------------------------- stdout: +task: "abandoned_vmm_reaper" + deletes sled reservations for VMMs that have been abandoned by their + instances + + task: "bfd_manager" Manages bidirectional fowarding detection (BFD) configuration on rack switches @@ -165,10 +197,18 @@ task: "external_endpoints" on each one +task: "instance_watcher" + periodically checks instance states + + task: "inventory_collection" collects hardware and software inventory data from the whole system +task: "metrics_producer_gc" + unregisters Oximeter metrics producers that have not renewed their lease + + task: "nat_v4_garbage_collector" prunes soft-deleted IPV4 NAT entries from ipv4_nat_entry table based on a predetermined retention policy @@ -178,10 +218,19 @@ task: "phantom_disks" detects and un-deletes phantom disks +task: "physical_disk_adoption" + ensure new physical disks are automatically marked in-service + + task: "region_replacement" detects if a region requires replacing and begins the process +task: "service_firewall_rule_propagation" + propagates VPC firewall rules for Omicron services with external network + connectivity + + task: "service_zone_nat_tracker" ensures service zone nat records are recorded in NAT RPW table @@ -190,6 +239,10 @@ task: "switch_port_config_manager" manages switch port settings for rack switches +task: "v2p_manager" + manages opte v2p mappings for vpc networking + + --------------------------------------------- stderr: note: Nexus URL not specified. Will pick one from DNS. @@ -199,6 +252,11 @@ EXECUTING COMMAND: omdb ["--dns-server", "[::1]:REDACTED_PORT", "nexus", "backgr termination: Exited(0) --------------------------------------------- stdout: +task: "abandoned_vmm_reaper" + deletes sled reservations for VMMs that have been abandoned by their + instances + + task: "bfd_manager" Manages bidirectional fowarding detection (BFD) configuration on rack switches @@ -246,10 +304,18 @@ task: "external_endpoints" on each one +task: "instance_watcher" + periodically checks instance states + + task: "inventory_collection" collects hardware and software inventory data from the whole system +task: "metrics_producer_gc" + unregisters Oximeter metrics producers that have not renewed their lease + + task: "nat_v4_garbage_collector" prunes soft-deleted IPV4 NAT entries from ipv4_nat_entry table based on a predetermined retention policy @@ -259,10 +325,19 @@ task: "phantom_disks" detects and un-deletes phantom disks +task: "physical_disk_adoption" + ensure new physical disks are automatically marked in-service + + task: "region_replacement" detects if a region requires replacing and begins the process +task: "service_firewall_rule_propagation" + propagates VPC firewall rules for Omicron services with external network + connectivity + + task: "service_zone_nat_tracker" ensures service zone nat records are recorded in NAT RPW table @@ -271,6 +346,10 @@ task: "switch_port_config_manager" manages switch port settings for rack switches +task: "v2p_manager" + manages opte v2p mappings for vpc networking + + --------------------------------------------- stderr: note: Nexus URL not specified. Will pick one from DNS. @@ -280,27 +359,29 @@ EXECUTING COMMAND: omdb ["db", "sleds"] termination: Exited(0) --------------------------------------------- stdout: -SERIAL IP ROLE ID -sim-039be560 [::1]:REDACTED_PORT scrimlet REDACTED_UUID_REDACTED_UUID_REDACTED -sim-b6d65341 [::1]:REDACTED_PORT scrimlet REDACTED_UUID_REDACTED_UUID_REDACTED + SERIAL IP ROLE POLICY STATE ID + sim-039be560 [::1]:REDACTED_PORT scrimlet not provisionable active ..................... + sim-b6d65341 [::1]:REDACTED_PORT scrimlet in service active ..................... --------------------------------------------- stderr: note: database URL not specified. Will search DNS. note: (override with --db-url or OMDB_DB_URL) note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable note: database schema version matches expected () +note: listing all commissioned sleds (use -F to filter, e.g. -F in-service) ============================================= EXECUTING COMMAND: omdb ["--dns-server", "[::1]:REDACTED_PORT", "db", "sleds"] termination: Exited(0) --------------------------------------------- stdout: -SERIAL IP ROLE ID -sim-039be560 [::1]:REDACTED_PORT scrimlet REDACTED_UUID_REDACTED_UUID_REDACTED -sim-b6d65341 [::1]:REDACTED_PORT scrimlet REDACTED_UUID_REDACTED_UUID_REDACTED + SERIAL IP ROLE POLICY STATE ID + sim-039be560 [::1]:REDACTED_PORT scrimlet not provisionable active ..................... + sim-b6d65341 [::1]:REDACTED_PORT scrimlet in service active ..................... --------------------------------------------- stderr: note: database URL not specified. Will search DNS. note: (override with --db-url or OMDB_DB_URL) note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable note: database schema version matches expected () +note: listing all commissioned sleds (use -F to filter, e.g. -F in-service) ============================================= diff --git a/dev-tools/omdb/tests/successes.out b/dev-tools/omdb/tests/successes.out index dcdd3b3e26..07ebeb10bf 100644 --- a/dev-tools/omdb/tests/successes.out +++ b/dev-tools/omdb/tests/successes.out @@ -26,7 +26,7 @@ termination: Exited(0) stdout: DNS zone: oxide-dev.test (External) requested version: 2 (created at ) -version created by Nexus: REDACTED_UUID_REDACTED_UUID_REDACTED +version created by Nexus: ..................... version created because: create silo: "test-suite-silo" changes: names added: 1, names removed: 0 @@ -58,7 +58,7 @@ stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable note: database schema version matches expected () ============================================= -EXECUTING COMMAND: omdb ["db", "reconfigurator-save", ""] +EXECUTING COMMAND: omdb ["db", "reconfigurator-save", ""] termination: Exited(0) --------------------------------------------- stdout: @@ -66,57 +66,28 @@ stdout: stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable note: database schema version matches expected () -assembling policy ... done. -loading inventory collections ... done. -loading blueprints ... done. -wrote +assembling reconfigurator state ... done +wrote ============================================= -EXECUTING COMMAND: omdb ["db", "services", "list-instances"] -termination: Exited(0) ---------------------------------------------- -stdout: -SERVICE INSTANCE_ID ADDR SLED_SERIAL -CruciblePantry REDACTED_UUID_REDACTED_UUID_REDACTED [::1]:REDACTED_PORT sim-b6d65341 -ExternalDns REDACTED_UUID_REDACTED_UUID_REDACTED [::1]:REDACTED_PORT sim-b6d65341 -InternalDns REDACTED_UUID_REDACTED_UUID_REDACTED [::1]:REDACTED_PORT sim-b6d65341 -Nexus REDACTED_UUID_REDACTED_UUID_REDACTED [::ffff:127.0.0.1]:REDACTED_PORT sim-b6d65341 -Mgd REDACTED_UUID_REDACTED_UUID_REDACTED [::1]:REDACTED_PORT sim-039be560 -Mgd REDACTED_UUID_REDACTED_UUID_REDACTED [::1]:REDACTED_PORT sim-b6d65341 ---------------------------------------------- -stderr: -note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected () -============================================= -EXECUTING COMMAND: omdb ["db", "services", "list-by-sled"] +EXECUTING COMMAND: omdb ["db", "sleds"] termination: Exited(0) --------------------------------------------- stdout: -sled: sim-039be560 (id REDACTED_UUID_REDACTED_UUID_REDACTED) - - SERVICE INSTANCE_ID ADDR - Mgd REDACTED_UUID_REDACTED_UUID_REDACTED [::1]:REDACTED_PORT - -sled: sim-b6d65341 (id REDACTED_UUID_REDACTED_UUID_REDACTED) - - SERVICE INSTANCE_ID ADDR - CruciblePantry REDACTED_UUID_REDACTED_UUID_REDACTED [::1]:REDACTED_PORT - ExternalDns REDACTED_UUID_REDACTED_UUID_REDACTED [::1]:REDACTED_PORT - InternalDns REDACTED_UUID_REDACTED_UUID_REDACTED [::1]:REDACTED_PORT - Nexus REDACTED_UUID_REDACTED_UUID_REDACTED [::ffff:127.0.0.1]:REDACTED_PORT - Mgd REDACTED_UUID_REDACTED_UUID_REDACTED [::1]:REDACTED_PORT - + SERIAL IP ROLE POLICY STATE ID + sim-039be560 [::1]:REDACTED_PORT scrimlet not provisionable active ..................... + sim-b6d65341 [::1]:REDACTED_PORT scrimlet in service active ..................... --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable note: database schema version matches expected () +note: listing all commissioned sleds (use -F to filter, e.g. -F in-service) ============================================= -EXECUTING COMMAND: omdb ["db", "sleds"] +EXECUTING COMMAND: omdb ["db", "sleds", "-F", "discretionary"] termination: Exited(0) --------------------------------------------- stdout: -SERIAL IP ROLE ID -sim-039be560 [::1]:REDACTED_PORT scrimlet REDACTED_UUID_REDACTED_UUID_REDACTED -sim-b6d65341 [::1]:REDACTED_PORT scrimlet REDACTED_UUID_REDACTED_UUID_REDACTED + SERIAL IP ROLE POLICY STATE ID + sim-b6d65341 [::1]:REDACTED_PORT scrimlet in service active ..................... --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable @@ -231,6 +202,11 @@ EXECUTING COMMAND: omdb ["nexus", "background-tasks", "doc"] termination: Exited(0) --------------------------------------------- stdout: +task: "abandoned_vmm_reaper" + deletes sled reservations for VMMs that have been abandoned by their + instances + + task: "bfd_manager" Manages bidirectional fowarding detection (BFD) configuration on rack switches @@ -278,10 +254,18 @@ task: "external_endpoints" on each one +task: "instance_watcher" + periodically checks instance states + + task: "inventory_collection" collects hardware and software inventory data from the whole system +task: "metrics_producer_gc" + unregisters Oximeter metrics producers that have not renewed their lease + + task: "nat_v4_garbage_collector" prunes soft-deleted IPV4 NAT entries from ipv4_nat_entry table based on a predetermined retention policy @@ -291,10 +275,19 @@ task: "phantom_disks" detects and un-deletes phantom disks +task: "physical_disk_adoption" + ensure new physical disks are automatically marked in-service + + task: "region_replacement" detects if a region requires replacing and begins the process +task: "service_firewall_rule_propagation" + propagates VPC firewall rules for Omicron services with external network + connectivity + + task: "service_zone_nat_tracker" ensures service zone nat records are recorded in NAT RPW table @@ -303,6 +296,10 @@ task: "switch_port_config_manager" manages switch port settings for rack switches +task: "v2p_manager" + manages opte v2p mappings for vpc networking + + --------------------------------------------- stderr: note: using Nexus URL http://127.0.0.1:REDACTED_PORT/ @@ -314,14 +311,14 @@ stdout: task: "dns_config_internal" configured period: every 1m currently executing: no - last completed activation: iter 3, triggered by an explicit signal + last completed activation: , triggered by an explicit signal started at (s ago) and ran for ms last generation found: 1 task: "dns_servers_internal" configured period: every 1m currently executing: no - last completed activation: iter 3, triggered by an explicit signal + last completed activation: , triggered by an explicit signal started at (s ago) and ran for ms servers found: 1 @@ -331,7 +328,7 @@ task: "dns_servers_internal" task: "dns_propagation_internal" configured period: every 1m currently executing: no - last completed activation: iter 4, triggered by a dependent task completing + last completed activation: , triggered by a dependent task completing started at (s ago) and ran for ms attempt to propagate generation: 1 @@ -342,14 +339,14 @@ task: "dns_propagation_internal" task: "dns_config_external" configured period: every 1m currently executing: no - last completed activation: iter 3, triggered by an explicit signal + last completed activation: , triggered by an explicit signal started at (s ago) and ran for ms last generation found: 2 task: "dns_servers_external" configured period: every 1m currently executing: no - last completed activation: iter 3, triggered by an explicit signal + last completed activation: , triggered by an explicit signal started at (s ago) and ran for ms servers found: 1 @@ -359,7 +356,7 @@ task: "dns_servers_external" task: "dns_propagation_external" configured period: every 1m currently executing: no - last completed activation: iter 4, triggered by a dependent task completing + last completed activation: , triggered by a dependent task completing started at (s ago) and ran for ms attempt to propagate generation: 2 @@ -370,87 +367,269 @@ task: "dns_propagation_external" task: "nat_v4_garbage_collector" configured period: every 30s currently executing: no - last completed activation: iter 2, triggered by an explicit signal + last completed activation: , triggered by an explicit signal started at (s ago) and ran for ms last completion reported error: failed to resolve addresses for Dendrite services: no record found for Query { name: Name("_dendrite._tcp.control-plane.oxide.internal."), query_type: SRV, query_class: IN } task: "blueprint_loader" configured period: every 1m 40s currently executing: no - last completed activation: iter 2, triggered by an explicit signal + last completed activation: , triggered by an explicit signal started at (s ago) and ran for ms -warning: unknown background task: "blueprint_loader" (don't know how to interpret details: Object {"status": String("no target blueprint")}) + last completion reported error: failed to read target blueprint: Internal Error: no target blueprint set task: "blueprint_executor" configured period: every 10m currently executing: no - last completed activation: iter 2, triggered by an explicit signal + last completed activation: , triggered by an explicit signal started at (s ago) and ran for ms last completion reported error: no blueprint +task: "abandoned_vmm_reaper" + configured period: every 1m + currently executing: no + last completed activation: , triggered by an explicit signal + started at (s ago) and ran for ms + total abandoned VMMs found: 0 + VMM records deleted: 0 + VMM records already deleted by another Nexus: 0 + sled resource reservations deleted: 0 + task: "bfd_manager" configured period: every 30s currently executing: no - last completed activation: iter 2, triggered by an explicit signal + last completed activation: , triggered by an explicit signal started at (s ago) and ran for ms last completion reported error: failed to resolve addresses for Dendrite services: no record found for Query { name: Name("_dendrite._tcp.control-plane.oxide.internal."), query_type: SRV, query_class: IN } task: "external_endpoints" configured period: every 1m currently executing: no - last completed activation: iter 3, triggered by an explicit signal + last completed activation: , triggered by an explicit signal started at (s ago) and ran for ms external API endpoints: 2 ('*' below marks default) SILO_ID DNS_NAME - REDACTED_UUID_REDACTED_UUID_REDACTED default-silo.sys.oxide-dev.test - * REDACTED_UUID_REDACTED_UUID_REDACTED test-suite-silo.sys.oxide-dev.test + ..................... default-silo.sys.oxide-dev.test + * ..................... test-suite-silo.sys.oxide-dev.test warnings: 2 - warning: silo REDACTED_UUID_REDACTED_UUID_REDACTED with DNS name "default-silo.sys.oxide-dev.test" has no usable certificates - warning: silo REDACTED_UUID_REDACTED_UUID_REDACTED with DNS name "test-suite-silo.sys.oxide-dev.test" has no usable certificates + warning: silo ..................... with DNS name "default-silo.sys.oxide-dev.test" has no usable certificates + warning: silo ..................... with DNS name "test-suite-silo.sys.oxide-dev.test" has no usable certificates TLS certificates: 0 +task: "instance_watcher" + configured period: every 30s + currently executing: no + last completed activation: , triggered by an explicit signal + started at (s ago) and ran for ms + total instances checked: 0 + checks completed: 0 + successful checks: 0 + failed checks: 0 + checks that could not be completed: 0 + stale instance metrics pruned: 0 + task: "inventory_collection" configured period: every 10m currently executing: no - last completed activation: iter 3, triggered by an explicit signal + last completed activation: , triggered by an explicit signal started at (s ago) and ran for ms - last collection id: REDACTED_UUID_REDACTED_UUID_REDACTED + last collection id: ..................... last collection started: last collection done: +task: "metrics_producer_gc" + configured period: every 1m + currently executing: no + last completed activation: , triggered by an explicit signal + started at (s ago) and ran for ms +warning: unknown background task: "metrics_producer_gc" (don't know how to interpret details: Object {"expiration": String(""), "pruned": Array []}) + task: "phantom_disks" configured period: every 30s currently executing: no - last completed activation: iter 2, triggered by an explicit signal + last completed activation: , triggered by an explicit signal started at (s ago) and ran for ms number of phantom disks deleted: 0 number of phantom disk delete errors: 0 +task: "physical_disk_adoption" + configured period: every 30s + currently executing: no + last completed activation: , triggered by a dependent task completing + started at (s ago) and ran for ms + last completion reported error: task disabled + task: "region_replacement" configured period: every 30s currently executing: no - last completed activation: iter 2, triggered by an explicit signal + last completed activation: , triggered by an explicit signal started at (s ago) and ran for ms number of region replacements started ok: 0 number of region replacement start errors: 0 +task: "service_firewall_rule_propagation" + configured period: every 5m + currently executing: no + last completed activation: , triggered by an explicit signal + started at (s ago) and ran for ms + task: "service_zone_nat_tracker" configured period: every 30s currently executing: no - last completed activation: iter 2, triggered by an explicit signal + last completed activation: , triggered by an explicit signal started at (s ago) and ran for ms last completion reported error: inventory collection is None task: "switch_port_config_manager" configured period: every 30s currently executing: no - last completed activation: iter 2, triggered by an explicit signal + last completed activation: , triggered by an explicit signal started at (s ago) and ran for ms warning: unknown background task: "switch_port_config_manager" (don't know how to interpret details: Object {}) +task: "v2p_manager" + configured period: every 30s + currently executing: no + last completed activation: , triggered by an explicit signal + started at (s ago) and ran for ms +warning: unknown background task: "v2p_manager" (don't know how to interpret details: Object {}) + +--------------------------------------------- +stderr: +note: using Nexus URL http://127.0.0.1:REDACTED_PORT/ +============================================= +EXECUTING COMMAND: omdb ["--destructive", "nexus", "background-tasks", "activate", "inventory_collection"] +termination: Exited(0) +--------------------------------------------- +stdout: +--------------------------------------------- +stderr: +note: using Nexus URL http://127.0.0.1:REDACTED_PORT/ +activated background tasks: inventory_collection +============================================= +EXECUTING COMMAND: omdb ["nexus", "blueprints", "list"] +termination: Exited(0) +--------------------------------------------- +stdout: +T ENA ID PARENT TIME_CREATED +* no ............. +--------------------------------------------- +stderr: +note: using Nexus URL http://127.0.0.1:REDACTED_PORT/ +============================================= +EXECUTING COMMAND: omdb ["nexus", "blueprints", "show", "............."] +termination: Exited(0) +--------------------------------------------- +stdout: +blueprint ............. +parent: + +!..................... +WARNING: Zones exist without physical disks! + omicron zones at generation 2: + --------------------------------------------------------------------------------------- + zone type zone id disposition underlay IP + --------------------------------------------------------------------------------------- + clickhouse ..................... in service ::1 + cockroach_db ..................... in service ::1 + crucible_pantry ..................... in service ::1 + external_dns ..................... in service ::1 + internal_dns ..................... in service ::1 + nexus ..................... in service ::ffff:127.0.0.1 + + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: d4d87aa2ad877a4cc2fddd0573952362739110de + cluster.preserve_downgrade_option: "22.1" + + METADATA: + created by::::::::::: nexus-test-utils + created at::::::::::: + comment:::::::::::::: initial test blueprint + internal DNS version: 1 + external DNS version: 2 + + +--------------------------------------------- +stderr: +note: using Nexus URL http://127.0.0.1:REDACTED_PORT/ +============================================= +EXECUTING COMMAND: omdb ["nexus", "blueprints", "show", "current-target"] +termination: Exited(0) +--------------------------------------------- +stdout: +blueprint ............. +parent: + +!..................... +WARNING: Zones exist without physical disks! + omicron zones at generation 2: + --------------------------------------------------------------------------------------- + zone type zone id disposition underlay IP + --------------------------------------------------------------------------------------- + clickhouse ..................... in service ::1 + cockroach_db ..................... in service ::1 + crucible_pantry ..................... in service ::1 + external_dns ..................... in service ::1 + internal_dns ..................... in service ::1 + nexus ..................... in service ::ffff:127.0.0.1 + + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: d4d87aa2ad877a4cc2fddd0573952362739110de + cluster.preserve_downgrade_option: "22.1" + + METADATA: + created by::::::::::: nexus-test-utils + created at::::::::::: + comment:::::::::::::: initial test blueprint + internal DNS version: 1 + external DNS version: 2 + + +--------------------------------------------- +stderr: +note: using Nexus URL http://127.0.0.1:REDACTED_PORT/ +============================================= +EXECUTING COMMAND: omdb ["nexus", "blueprints", "diff", ".............", "current-target"] +termination: Exited(0) +--------------------------------------------- +stdout: +from: blueprint ............. +to: blueprint ............. + + UNCHANGED SLEDS: + + sled .....................: + + sled .....................: + + omicron zones at generation 2: + --------------------------------------------------------------------------------------- + zone type zone id disposition underlay IP + --------------------------------------------------------------------------------------- + clickhouse ..................... in service ::1 + cockroach_db ..................... in service ::1 + crucible_pantry ..................... in service ::1 + external_dns ..................... in service ::1 + internal_dns ..................... in service ::1 + nexus ..................... in service ::ffff:127.0.0.1 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: d4d87aa2ad877a4cc2fddd0573952362739110de (unchanged) + cluster.preserve_downgrade_option: "22.1" (unchanged) + + METADATA: + internal DNS version: 1 (unchanged) + external DNS version: 2 (unchanged) + + --------------------------------------------- stderr: note: using Nexus URL http://127.0.0.1:REDACTED_PORT/ diff --git a/dev-tools/omdb/tests/test_all_output.rs b/dev-tools/omdb/tests/test_all_output.rs index 4a9802eee6..19be33631d 100644 --- a/dev-tools/omdb/tests/test_all_output.rs +++ b/dev-tools/omdb/tests/test_all_output.rs @@ -9,10 +9,12 @@ use expectorate::assert_contents; use nexus_test_utils_macros::nexus_test; +use nexus_types::deployment::SledFilter; use nexus_types::deployment::UnstableReconfiguratorState; use omicron_test_utils::dev::test_cmds::path_to_executable; -use omicron_test_utils::dev::test_cmds::redact_variable; +use omicron_test_utils::dev::test_cmds::redact_extra; use omicron_test_utils::dev::test_cmds::run_command; +use omicron_test_utils::dev::test_cmds::ExtraRedactions; use slog_error_chain::InlineErrorChain; use std::fmt::Write; use std::path::Path; @@ -44,19 +46,21 @@ async fn test_omdb_usage_errors() { &["db", "dns"], &["db", "dns", "diff"], &["db", "dns", "names"], - &["db", "services"], + &["db", "sleds", "--help"], &["db", "snapshots"], &["db", "network"], &["mgs"], &["nexus"], &["nexus", "background-tasks"], + &["nexus", "blueprints"], + &["nexus", "sleds"], &["sled-agent"], &["sled-agent", "zones"], &["sled-agent", "zpools"], ]; for args in invocations { - do_run(&mut output, |exec| exec, &cmd_path, args, &[]).await; + do_run(&mut output, |exec| exec, &cmd_path, args).await; } assert_contents("tests/usage_errors.out", &output); @@ -77,7 +81,10 @@ async fn test_omdb_success_cases(cptestctx: &ControlPlaneTestContext) { let tmpdir = camino_tempfile::tempdir() .expect("failed to create temporary directory"); let tmppath = tmpdir.path().join("reconfigurator-save.out"); + let initial_blueprint_id = cptestctx.initial_blueprint_id.to_string(); + let mut output = String::new(); + let invocations: &[&[&str]] = &[ &["db", "disks", "list"], &["db", "dns", "show"], @@ -85,12 +92,28 @@ async fn test_omdb_success_cases(cptestctx: &ControlPlaneTestContext) { &["db", "dns", "names", "external", "2"], &["db", "instances"], &["db", "reconfigurator-save", tmppath.as_str()], - &["db", "services", "list-instances"], - &["db", "services", "list-by-sled"], &["db", "sleds"], + &["db", "sleds", "-F", "discretionary"], &["mgs", "inventory"], &["nexus", "background-tasks", "doc"], &["nexus", "background-tasks", "show"], + &[ + "--destructive", + "nexus", + "background-tasks", + "activate", + "inventory_collection", + ], + &["nexus", "blueprints", "list"], + &["nexus", "blueprints", "show", &initial_blueprint_id], + &["nexus", "blueprints", "show", "current-target"], + &[ + "nexus", + "blueprints", + "diff", + &initial_blueprint_id, + "current-target", + ], // We can't easily test the sled agent output because that's only // provided by a real sled agent, which is not available in the // ControlPlaneTestContext. @@ -101,7 +124,7 @@ async fn test_omdb_success_cases(cptestctx: &ControlPlaneTestContext) { let p = postgres_url.to_string(); let u = nexus_internal_url.clone(); let g = mgs_url.clone(); - do_run( + do_run_extra( &mut output, move |exec| { exec.env("OMDB_DB_URL", &p) @@ -110,7 +133,9 @@ async fn test_omdb_success_cases(cptestctx: &ControlPlaneTestContext) { }, &cmd_path, args, - &[tmppath.as_str()], + ExtraRedactions::new() + .variable_length("tmp_path", tmppath.as_str()) + .fixed_length("blueprint_id", &initial_blueprint_id), ) .await; } @@ -136,8 +161,14 @@ async fn test_omdb_success_cases(cptestctx: &ControlPlaneTestContext) { InlineErrorChain::new(&error), ) }); - assert!(parsed.policy.sleds.len() > 0); - assert!(parsed.collections.len() > 0); + // Did we find at least one sled in the planning input, and at least one + // collection? + assert!(parsed + .planning_input + .all_sled_ids(SledFilter::Commissioned) + .next() + .is_some()); + assert!(!parsed.collections.is_empty()); gwtestctx.teardown().await; } @@ -163,7 +194,7 @@ async fn test_omdb_env_settings(cptestctx: &ControlPlaneTestContext) { // Database URL // Case 1: specified on the command line let args = &["db", "--db-url", &postgres_url, "sleds"]; - do_run(&mut output, |exec| exec, &cmd_path, args, &[]).await; + do_run(&mut output, |exec| exec, &cmd_path, args).await; // Case 2: specified in multiple places (command-line argument wins) let args = &["db", "--db-url", "junk", "sleds"]; @@ -173,7 +204,6 @@ async fn test_omdb_env_settings(cptestctx: &ControlPlaneTestContext) { move |exec| exec.env("OMDB_DB_URL", &p), &cmd_path, args, - &[], ) .await; @@ -186,7 +216,7 @@ async fn test_omdb_env_settings(cptestctx: &ControlPlaneTestContext) { "background-tasks", "doc", ]; - do_run(&mut output, |exec| exec, &cmd_path.clone(), args, &[]).await; + do_run(&mut output, |exec| exec, &cmd_path.clone(), args).await; // Case 2: specified in multiple places (command-line argument wins) let args = @@ -197,7 +227,6 @@ async fn test_omdb_env_settings(cptestctx: &ControlPlaneTestContext) { move |exec| exec.env("OMDB_NEXUS_URL", &n), &cmd_path, args, - &[], ) .await; @@ -210,7 +239,6 @@ async fn test_omdb_env_settings(cptestctx: &ControlPlaneTestContext) { move |exec| exec.env("OMDB_DNS_SERVER", dns_sockaddr.to_string()), &cmd_path, args, - &[], ) .await; @@ -221,7 +249,7 @@ async fn test_omdb_env_settings(cptestctx: &ControlPlaneTestContext) { "background-tasks", "doc", ]; - do_run(&mut output, move |exec| exec, &cmd_path, args, &[]).await; + do_run(&mut output, move |exec| exec, &cmd_path, args).await; let args = &["db", "sleds"]; do_run( @@ -229,12 +257,11 @@ async fn test_omdb_env_settings(cptestctx: &ControlPlaneTestContext) { move |exec| exec.env("OMDB_DNS_SERVER", dns_sockaddr.to_string()), &cmd_path, args, - &[], ) .await; let args = &["--dns-server", &dns_sockaddr.to_string(), "db", "sleds"]; - do_run(&mut output, move |exec| exec, &cmd_path, args, &[]).await; + do_run(&mut output, move |exec| exec, &cmd_path, args).await; assert_contents("tests/env.out", &output); } @@ -244,7 +271,19 @@ async fn do_run( modexec: F, cmd_path: &Path, args: &[&str], - extra_redactions: &[&str], +) where + F: FnOnce(Exec) -> Exec + Send + 'static, +{ + do_run_extra(output, modexec, cmd_path, args, &ExtraRedactions::new()) + .await; +} + +async fn do_run_extra( + output: &mut String, + modexec: F, + cmd_path: &Path, + args: &[&str], + extra_redactions: &ExtraRedactions<'_>, ) where F: FnOnce(Exec) -> Exec + Send + 'static, { @@ -254,7 +293,7 @@ async fn do_run( "EXECUTING COMMAND: {} {:?}\n", cmd_path.file_name().expect("missing command").to_string_lossy(), args.iter() - .map(|r| redact_variable(r, extra_redactions)) + .map(|r| redact_extra(r, extra_redactions)) .collect::>(), ) .unwrap(); @@ -287,9 +326,9 @@ async fn do_run( write!(output, "termination: {:?}\n", exit_status).unwrap(); write!(output, "---------------------------------------------\n").unwrap(); write!(output, "stdout:\n").unwrap(); - output.push_str(&redact_variable(&stdout_text, extra_redactions)); + output.push_str(&redact_extra(&stdout_text, extra_redactions)); write!(output, "---------------------------------------------\n").unwrap(); write!(output, "stderr:\n").unwrap(); - output.push_str(&redact_variable(&stderr_text, extra_redactions)); + output.push_str(&redact_extra(&stderr_text, extra_redactions)); write!(output, "=============================================\n").unwrap(); } diff --git a/dev-tools/omdb/tests/usage_errors.out b/dev-tools/omdb/tests/usage_errors.out index b704982266..15fc9d322e 100644 --- a/dev-tools/omdb/tests/usage_errors.out +++ b/dev-tools/omdb/tests/usage_errors.out @@ -9,18 +9,23 @@ Omicron debugger (unstable) Usage: omdb [OPTIONS] Commands: - db Query the control plane database (CockroachDB) - mgs Debug a specific Management Gateway Service instance - nexus Debug a specific Nexus instance - oximeter Query oximeter collector state - sled-agent Debug a specific Sled - help Print this message or the help of the given subcommand(s) + crucible-agent Debug a specific crucible-agent + db Query the control plane database (CockroachDB) + mgs Debug a specific Management Gateway Service instance + nexus Debug a specific Nexus instance + oximeter Query oximeter collector state + sled-agent Debug a specific Sled + help Print this message or the help of the given subcommand(s) Options: - --log-level log level filter [env: LOG_LEVEL=] [default: warn] + --log-level log level filter [env: LOG_LEVEL=] [default: warn] + -h, --help Print help (see more with '--help') + +Connection Options: --dns-server [env: OMDB_DNS_SERVER=] - -w, --destructive allow potentially-destructive subcommands - -h, --help Print help (see more with '--help') + +Safety Options: + -w, --destructive Allow potentially-destructive subcommands ============================================= EXECUTING COMMAND: omdb ["--help"] termination: Exited(0) @@ -34,12 +39,13 @@ using internal APIs. This is a prototype. The commands and output are unstable Usage: omdb [OPTIONS] Commands: - db Query the control plane database (CockroachDB) - mgs Debug a specific Management Gateway Service instance - nexus Debug a specific Nexus instance - oximeter Query oximeter collector state - sled-agent Debug a specific Sled - help Print this message or the help of the given subcommand(s) + crucible-agent Debug a specific crucible-agent + db Query the control plane database (CockroachDB) + mgs Debug a specific Management Gateway Service instance + nexus Debug a specific Nexus instance + oximeter Query oximeter collector state + sled-agent Debug a specific Sled + help Print this message or the help of the given subcommand(s) Options: --log-level @@ -48,14 +54,16 @@ Options: [env: LOG_LEVEL=] [default: warn] + -h, --help + Print help (see a summary with '-h') + +Connection Options: --dns-server [env: OMDB_DNS_SERVER=] +Safety Options: -w, --destructive - allow potentially-destructive subcommands - - -h, --help - Print help (see a summary with '-h') + Allow potentially-destructive subcommands --------------------------------------------- stderr: ============================================= @@ -99,7 +107,6 @@ Commands: dns Print information about internal and external DNS inventory Print information about collected hardware/software inventory reconfigurator-save Save the current Reconfigurator inputs to a file - services Print information about control plane services sleds Print information about sleds instances Print information about customer instances network Print information about the network @@ -108,12 +115,21 @@ Commands: help Print this message or the help of the given subcommand(s) Options: - --db-url URL of the database SQL interface [env: OMDB_DB_URL=] + --log-level log level filter [env: LOG_LEVEL=] [default: warn] + -h, --help Print help + +Connection Options: + --db-url URL of the database SQL interface [env: OMDB_DB_URL=] + --dns-server [env: OMDB_DNS_SERVER=] + +Database Options: --fetch-limit limit to apply to queries that fetch rows [env: OMDB_FETCH_LIMIT=] [default: 500] --include-deleted whether to include soft-deleted records when enumerating objects that can be soft-deleted - -h, --help Print help + +Safety Options: + -w, --destructive Allow potentially-destructive subcommands ============================================= EXECUTING COMMAND: omdb ["db", "--help"] termination: Exited(0) @@ -129,7 +145,6 @@ Commands: dns Print information about internal and external DNS inventory Print information about collected hardware/software inventory reconfigurator-save Save the current Reconfigurator inputs to a file - services Print information about control plane services sleds Print information about sleds instances Print information about customer instances network Print information about the network @@ -138,12 +153,21 @@ Commands: help Print this message or the help of the given subcommand(s) Options: - --db-url URL of the database SQL interface [env: OMDB_DB_URL=] + --log-level log level filter [env: LOG_LEVEL=] [default: warn] + -h, --help Print help + +Connection Options: + --db-url URL of the database SQL interface [env: OMDB_DB_URL=] + --dns-server [env: OMDB_DNS_SERVER=] + +Database Options: --fetch-limit limit to apply to queries that fetch rows [env: OMDB_FETCH_LIMIT=] [default: 500] --include-deleted whether to include soft-deleted records when enumerating objects that can be soft-deleted - -h, --help Print help + +Safety Options: + -w, --destructive Allow potentially-destructive subcommands --------------------------------------------- stderr: ============================================= @@ -155,7 +179,7 @@ stdout: stderr: Print information about disks -Usage: omdb db disks +Usage: omdb db disks [OPTIONS] Commands: info Get info for a specific disk @@ -164,7 +188,21 @@ Commands: help Print this message or the help of the given subcommand(s) Options: - -h, --help Print help + --log-level log level filter [env: LOG_LEVEL=] [default: warn] + -h, --help Print help + +Connection Options: + --db-url URL of the database SQL interface [env: OMDB_DB_URL=] + --dns-server [env: OMDB_DNS_SERVER=] + +Database Options: + --fetch-limit limit to apply to queries that fetch rows [env: + OMDB_FETCH_LIMIT=] [default: 500] + --include-deleted whether to include soft-deleted records when enumerating objects + that can be soft-deleted + +Safety Options: + -w, --destructive Allow potentially-destructive subcommands ============================================= EXECUTING COMMAND: omdb ["db", "dns"] termination: Exited(2) @@ -174,7 +212,7 @@ stdout: stderr: Print information about internal and external DNS -Usage: omdb db dns +Usage: omdb db dns [OPTIONS] Commands: show Summarize current version of all DNS zones @@ -183,7 +221,21 @@ Commands: help Print this message or the help of the given subcommand(s) Options: - -h, --help Print help + --log-level log level filter [env: LOG_LEVEL=] [default: warn] + -h, --help Print help + +Connection Options: + --db-url URL of the database SQL interface [env: OMDB_DB_URL=] + --dns-server [env: OMDB_DNS_SERVER=] + +Database Options: + --fetch-limit limit to apply to queries that fetch rows [env: + OMDB_FETCH_LIMIT=] [default: 500] + --include-deleted whether to include soft-deleted records when enumerating objects + that can be soft-deleted + +Safety Options: + -w, --destructive Allow potentially-destructive subcommands ============================================= EXECUTING COMMAND: omdb ["db", "dns", "diff"] termination: Exited(2) @@ -213,23 +265,62 @@ Usage: omdb db dns names For more information, try '--help'. ============================================= -EXECUTING COMMAND: omdb ["db", "services"] -termination: Exited(2) +EXECUTING COMMAND: omdb ["db", "sleds", "--help"] +termination: Exited(0) --------------------------------------------- stdout: ---------------------------------------------- -stderr: -Print information about control plane services - -Usage: omdb db services +Print information about sleds -Commands: - list-instances List service instances - list-by-sled List service instances, grouped by sled - help Print this message or the help of the given subcommand(s) +Usage: omdb db sleds [OPTIONS] Options: - -h, --help Print help + -F, --filter + Show sleds that match the given filter + + Possible values: + - commissioned: All sleds that are currently part of the control plane cluster + - decommissioned: All sleds that were previously part of the control plane cluster + but have been decommissioned + - discretionary: Sleds that are eligible for discretionary services + - in-service: Sleds that are in service (even if they might not be eligible + for discretionary services) + - query-during-inventory: Sleds whose sled agents should be queried for inventory + - reservation-create: Sleds on which reservations can be created + - vpc-firewall: Sleds which should be sent VPC firewall rules + + --log-level + log level filter + + [env: LOG_LEVEL=] + [default: warn] + + -h, --help + Print help (see a summary with '-h') + +Connection Options: + --db-url + URL of the database SQL interface + + [env: OMDB_DB_URL=] + + --dns-server + [env: OMDB_DNS_SERVER=] + +Database Options: + --fetch-limit + limit to apply to queries that fetch rows + + [env: OMDB_FETCH_LIMIT=] + [default: 500] + + --include-deleted + whether to include soft-deleted records when enumerating objects that can be soft-deleted + +Safety Options: + -w, --destructive + Allow potentially-destructive subcommands +--------------------------------------------- +stderr: ============================================= EXECUTING COMMAND: omdb ["db", "snapshots"] termination: Exited(2) @@ -239,7 +330,7 @@ stdout: stderr: Print information about snapshots -Usage: omdb db snapshots +Usage: omdb db snapshots [OPTIONS] Commands: info Get info for a specific snapshot @@ -247,7 +338,21 @@ Commands: help Print this message or the help of the given subcommand(s) Options: - -h, --help Print help + --log-level log level filter [env: LOG_LEVEL=] [default: warn] + -h, --help Print help + +Connection Options: + --db-url URL of the database SQL interface [env: OMDB_DB_URL=] + --dns-server [env: OMDB_DNS_SERVER=] + +Database Options: + --fetch-limit limit to apply to queries that fetch rows [env: + OMDB_FETCH_LIMIT=] [default: 500] + --include-deleted whether to include soft-deleted records when enumerating objects + that can be soft-deleted + +Safety Options: + -w, --destructive Allow potentially-destructive subcommands ============================================= EXECUTING COMMAND: omdb ["db", "network"] termination: Exited(2) @@ -265,8 +370,22 @@ Commands: help Print this message or the help of the given subcommand(s) Options: - --verbose Print out raw data structures from the data store - -h, --help Print help + --log-level log level filter [env: LOG_LEVEL=] [default: warn] + --verbose Print out raw data structures from the data store + -h, --help Print help + +Connection Options: + --db-url URL of the database SQL interface [env: OMDB_DB_URL=] + --dns-server [env: OMDB_DNS_SERVER=] + +Database Options: + --fetch-limit limit to apply to queries that fetch rows [env: + OMDB_FETCH_LIMIT=] [default: 500] + --include-deleted whether to include soft-deleted records when enumerating objects + that can be soft-deleted + +Safety Options: + -w, --destructive Allow potentially-destructive subcommands ============================================= EXECUTING COMMAND: omdb ["mgs"] termination: Exited(2) @@ -285,8 +404,15 @@ Commands: help Print this message or the help of the given subcommand(s) Options: - --mgs-url URL of an MGS instance to query [env: OMDB_MGS_URL=] - -h, --help Print help + --log-level log level filter [env: LOG_LEVEL=] [default: warn] + -h, --help Print help + +Connection Options: + --mgs-url URL of an MGS instance to query [env: OMDB_MGS_URL=] + --dns-server [env: OMDB_DNS_SERVER=] + +Safety Options: + -w, --destructive Allow potentially-destructive subcommands ============================================= EXECUTING COMMAND: omdb ["nexus"] termination: Exited(2) @@ -305,9 +431,16 @@ Commands: help Print this message or the help of the given subcommand(s) Options: + --log-level log level filter [env: LOG_LEVEL=] [default: warn] + -h, --help Print help + +Connection Options: --nexus-internal-url URL of the Nexus internal API [env: OMDB_NEXUS_URL=] - -h, --help Print help + --dns-server [env: OMDB_DNS_SERVER=] + +Safety Options: + -w, --destructive Allow potentially-destructive subcommands ============================================= EXECUTING COMMAND: omdb ["nexus", "background-tasks"] termination: Exited(2) @@ -317,16 +450,86 @@ stdout: stderr: print information about background tasks -Usage: omdb nexus background-tasks +Usage: omdb nexus background-tasks [OPTIONS] Commands: - doc Show documentation about background tasks - list Print a summary of the status of all background tasks - show Print human-readable summary of the status of each background task - help Print this message or the help of the given subcommand(s) + doc Show documentation about background tasks + list Print a summary of the status of all background tasks + show Print human-readable summary of the status of each background task + activate Activate one or more background tasks + help Print this message or the help of the given subcommand(s) Options: - -h, --help Print help + --log-level log level filter [env: LOG_LEVEL=] [default: warn] + -h, --help Print help + +Connection Options: + --nexus-internal-url URL of the Nexus internal API [env: + OMDB_NEXUS_URL=] + --dns-server [env: OMDB_DNS_SERVER=] + +Safety Options: + -w, --destructive Allow potentially-destructive subcommands +============================================= +EXECUTING COMMAND: omdb ["nexus", "blueprints"] +termination: Exited(2) +--------------------------------------------- +stdout: +--------------------------------------------- +stderr: +interact with blueprints + +Usage: omdb nexus blueprints [OPTIONS] + +Commands: + list List all blueprints + show Show a blueprint + diff Diff two blueprints + delete Delete a blueprint + target Interact with the current target blueprint + regenerate Generate a new blueprint + import Import a blueprint + help Print this message or the help of the given subcommand(s) + +Options: + --log-level log level filter [env: LOG_LEVEL=] [default: warn] + -h, --help Print help + +Connection Options: + --nexus-internal-url URL of the Nexus internal API [env: + OMDB_NEXUS_URL=] + --dns-server [env: OMDB_DNS_SERVER=] + +Safety Options: + -w, --destructive Allow potentially-destructive subcommands +============================================= +EXECUTING COMMAND: omdb ["nexus", "sleds"] +termination: Exited(2) +--------------------------------------------- +stdout: +--------------------------------------------- +stderr: +interact with sleds + +Usage: omdb nexus sleds [OPTIONS] + +Commands: + list-uninitialized List all uninitialized sleds + add Add an uninitialized sled + expunge Expunge a sled (DANGEROUS) + help Print this message or the help of the given subcommand(s) + +Options: + --log-level log level filter [env: LOG_LEVEL=] [default: warn] + -h, --help Print help + +Connection Options: + --nexus-internal-url URL of the Nexus internal API [env: + OMDB_NEXUS_URL=] + --dns-server [env: OMDB_DNS_SERVER=] + +Safety Options: + -w, --destructive Allow potentially-destructive subcommands ============================================= EXECUTING COMMAND: omdb ["sled-agent"] termination: Exited(2) @@ -345,8 +548,15 @@ Commands: help Print this message or the help of the given subcommand(s) Options: + --log-level log level filter [env: LOG_LEVEL=] [default: warn] + -h, --help Print help + +Connection Options: --sled-agent-url URL of the Sled internal API [env: OMDB_SLED_AGENT_URL=] - -h, --help Print help + --dns-server [env: OMDB_DNS_SERVER=] + +Safety Options: + -w, --destructive Allow potentially-destructive subcommands ============================================= EXECUTING COMMAND: omdb ["sled-agent", "zones"] termination: Exited(2) @@ -356,14 +566,22 @@ stdout: stderr: print information about zones -Usage: omdb sled-agent zones +Usage: omdb sled-agent zones [OPTIONS] Commands: list Print list of all running control plane zones help Print this message or the help of the given subcommand(s) Options: - -h, --help Print help + --log-level log level filter [env: LOG_LEVEL=] [default: warn] + -h, --help Print help + +Connection Options: + --sled-agent-url URL of the Sled internal API [env: OMDB_SLED_AGENT_URL=] + --dns-server [env: OMDB_DNS_SERVER=] + +Safety Options: + -w, --destructive Allow potentially-destructive subcommands ============================================= EXECUTING COMMAND: omdb ["sled-agent", "zpools"] termination: Exited(2) @@ -373,12 +591,20 @@ stdout: stderr: print information about zpools -Usage: omdb sled-agent zpools +Usage: omdb sled-agent zpools [OPTIONS] Commands: list Print list of all zpools managed by the sled agent help Print this message or the help of the given subcommand(s) Options: - -h, --help Print help + --log-level log level filter [env: LOG_LEVEL=] [default: warn] + -h, --help Print help + +Connection Options: + --sled-agent-url URL of the Sled internal API [env: OMDB_SLED_AGENT_URL=] + --dns-server [env: OMDB_DNS_SERVER=] + +Safety Options: + -w, --destructive Allow potentially-destructive subcommands ============================================= diff --git a/dev-tools/omicron-dev/Cargo.toml b/dev-tools/omicron-dev/Cargo.toml index 6aa480b2c6..1dcc4eada7 100644 --- a/dev-tools/omicron-dev/Cargo.toml +++ b/dev-tools/omicron-dev/Cargo.toml @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [build-dependencies] omicron-rpaths.workspace = true diff --git a/dev-tools/oxlog/Cargo.toml b/dev-tools/oxlog/Cargo.toml index 5d7cfaf5c1..9b59647691 100644 --- a/dev-tools/oxlog/Cargo.toml +++ b/dev-tools/oxlog/Cargo.toml @@ -4,11 +4,15 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] anyhow.workspace = true camino.workspace = true chrono.workspace = true clap.workspace = true +sigpipe.workspace = true uuid.workspace = true omicron-workspace-hack.workspace = true diff --git a/dev-tools/oxlog/src/bin/oxlog.rs b/dev-tools/oxlog/src/bin/oxlog.rs index 88e067c382..ed1c1a1fc8 100644 --- a/dev-tools/oxlog/src/bin/oxlog.rs +++ b/dev-tools/oxlog/src/bin/oxlog.rs @@ -47,7 +47,7 @@ struct FilterArgs { #[arg(short, long)] archived: bool, - // Print only the extra log files + /// Print only the extra log files #[arg(short, long)] extra: bool, @@ -57,6 +57,8 @@ struct FilterArgs { } fn main() -> Result<(), anyhow::Error> { + sigpipe::reset(); + let cli = Cli::parse(); match cli.command { diff --git a/dev-tools/oxlog/src/lib.rs b/dev-tools/oxlog/src/lib.rs index 625d360368..0e72b4b13b 100644 --- a/dev-tools/oxlog/src/lib.rs +++ b/dev-tools/oxlog/src/lib.rs @@ -98,6 +98,10 @@ impl LogFile { } } } + + pub fn file_name_cmp(&self, other: &Self) -> std::cmp::Ordering { + self.path.file_name().cmp(&other.path.file_name()) + } } impl PartialEq for LogFile { @@ -142,6 +146,22 @@ pub struct SvcLogs { pub extra: Vec, } +impl SvcLogs { + /// Sort the archived and extra log files by filename. + /// + /// readdir traverses over directories in indeterminate order, so sort by + /// filename (which is enough to sort by service name and timestamp in most + /// cases). + /// + /// Generally we don't want to sort by full path, because log files may be + /// scattered across several different directories -- and we care more + /// about filename than which directory they are in. + pub fn sort_by_file_name(&mut self) { + self.archived.sort_unstable_by(LogFile::file_name_cmp); + self.extra.sort_unstable_by(LogFile::file_name_cmp); + } +} + // These probably don't warrant newtypes. They are just to make the // keys in maps a bit easier to read. type ZoneName = String; @@ -284,10 +304,19 @@ impl Zones { load_extra_logs(dir, svc_name, &mut output, filter.show_empty); } } + + sort_logs(&mut output); + output } } +fn sort_logs(output: &mut BTreeMap) { + for svc_logs in output.values_mut() { + svc_logs.sort_by_file_name(); + } +} + const OX_SMF_PREFIXES: [&str; 2] = ["oxide-", "system-illumos-"]; /// Return true if the provided file name appears to be a valid log file for an @@ -464,4 +493,60 @@ mod tests { ) .is_none()); } + + #[test] + fn test_sort_logs() { + use super::{LogFile, SvcLogs}; + use std::collections::BTreeMap; + + let mut logs = BTreeMap::new(); + logs.insert( + "blah".to_string(), + SvcLogs { + current: None, + archived: vec![ + // "foo" comes after "bar", but the sorted order should + // have 1600000000 before 1700000000. + LogFile { + path: "/bar/blah:default.log.1700000000".into(), + size: None, + modified: None, + }, + LogFile { + path: "/foo/blah:default.log.1600000000".into(), + size: None, + modified: None, + }, + ], + extra: vec![ + // "foo" comes after "bar", but the sorted order should + // have log1 before log2. + LogFile { + path: "/foo/blah/sub.default.log1".into(), + size: None, + modified: None, + }, + LogFile { + path: "/bar/blah/sub.default.log2".into(), + size: None, + modified: None, + }, + ], + }, + ); + + super::sort_logs(&mut logs); + + let svc_logs = logs.get("blah").unwrap(); + assert_eq!( + svc_logs.archived[0].path, + "/foo/blah:default.log.1600000000" + ); + assert_eq!( + svc_logs.archived[1].path, + "/bar/blah:default.log.1700000000" + ); + assert_eq!(svc_logs.extra[0].path, "/foo/blah/sub.default.log1"); + assert_eq!(svc_logs.extra[1].path, "/bar/blah/sub.default.log2"); + } } diff --git a/dev-tools/reconfigurator-cli/Cargo.toml b/dev-tools/reconfigurator-cli/Cargo.toml index 8a8ea85544..5edf9d0ef8 100644 --- a/dev-tools/reconfigurator-cli/Cargo.toml +++ b/dev-tools/reconfigurator-cli/Cargo.toml @@ -4,19 +4,28 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [build-dependencies] omicron-rpaths.workspace = true [dependencies] anyhow.workspace = true +assert_matches.workspace = true camino.workspace = true clap.workspace = true +dns-service-client.workspace = true dropshot.workspace = true humantime.workspace = true indexmap.workspace = true nexus-reconfigurator-planning.workspace = true +nexus-reconfigurator-execution.workspace = true nexus-types.workspace = true omicron-common.workspace = true +omicron-uuid-kinds.workspace = true +# See omicron-rpaths for more about the "pq-sys" dependency. +pq-sys = "*" reedline.workspace = true serde_json.workspace = true slog-error-chain.workspace = true @@ -29,9 +38,16 @@ omicron-workspace-hack.workspace = true [dev-dependencies] camino-tempfile.workspace = true expectorate.workspace = true +nexus-client.workspace = true +nexus-db-queries.workspace = true +nexus-reconfigurator-preparation.workspace = true +nexus-test-utils.workspace = true +nexus-test-utils-macros.workspace = true +omicron-nexus.workspace = true omicron-test-utils.workspace = true -regex.workspace = true +serde.workspace = true subprocess.workspace = true +tokio.workspace = true # Disable doc builds by default for our binaries to work around issue # rust-lang/cargo#8373. These docs would not be very useful anyway. diff --git a/dev-tools/reconfigurator-cli/build.rs b/dev-tools/reconfigurator-cli/build.rs new file mode 100644 index 0000000000..1ba9acd41c --- /dev/null +++ b/dev-tools/reconfigurator-cli/build.rs @@ -0,0 +1,10 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +// See omicron-rpaths for documentation. +// NOTE: This file MUST be kept in sync with the other build.rs files in this +// repository. +fn main() { + omicron_rpaths::configure_default_omicron_rpaths(); +} diff --git a/dev-tools/reconfigurator-cli/src/main.rs b/dev-tools/reconfigurator-cli/src/main.rs index b59fc96703..1c9d9866a8 100644 --- a/dev-tools/reconfigurator-cli/src/main.rs +++ b/dev-tools/reconfigurator-cli/src/main.rs @@ -8,18 +8,35 @@ use anyhow::{anyhow, bail, Context}; use camino::Utf8PathBuf; use clap::CommandFactory; use clap::FromArgMatches; +use clap::ValueEnum; use clap::{Args, Parser, Subcommand}; +use dns_service_client::DnsDiff; use indexmap::IndexMap; +use nexus_reconfigurator_execution::blueprint_external_dns_config; +use nexus_reconfigurator_execution::blueprint_internal_dns_config; use nexus_reconfigurator_planning::blueprint_builder::BlueprintBuilder; +use nexus_reconfigurator_planning::blueprint_builder::EnsureMultiple; use nexus_reconfigurator_planning::planner::Planner; use nexus_reconfigurator_planning::system::{ SledBuilder, SledHwInventory, SystemDescription, }; +use nexus_types::deployment::BlueprintZoneFilter; +use nexus_types::deployment::OmicronZoneNic; +use nexus_types::deployment::PlanningInput; +use nexus_types::deployment::SledFilter; use nexus_types::deployment::{Blueprint, UnstableReconfiguratorState}; +use nexus_types::internal_api::params::DnsConfigParams; use nexus_types::inventory::Collection; use nexus_types::inventory::OmicronZonesConfig; +use nexus_types::inventory::SledRole; use omicron_common::api::external::Generation; +use omicron_common::api::external::Name; +use omicron_uuid_kinds::CollectionUuid; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::SledUuid; +use omicron_uuid_kinds::VnicUuid; use reedline::{Reedline, Signal}; +use std::collections::BTreeMap; use std::io::BufRead; use swrite::{swriteln, SWrite}; use tabled::Tabled; @@ -36,14 +53,117 @@ struct ReconfiguratorSim { system: SystemDescription, /// inventory collections created by the user - collections: IndexMap, + collections: IndexMap, /// blueprints created by the user blueprints: IndexMap, + /// internal DNS configurations + internal_dns: BTreeMap, + /// external DNS configurations + external_dns: BTreeMap, + + /// Set of silo names configured + /// + /// These are used to determine the contents of external DNS. + silo_names: Vec, + + /// External DNS zone name configured + external_dns_zone_name: String, + + /// Policy overrides + num_nexus: Option, + log: slog::Logger, } +impl ReconfiguratorSim { + fn blueprint_lookup(&self, id: Uuid) -> Result<&Blueprint, anyhow::Error> { + self.blueprints + .get(&id) + .ok_or_else(|| anyhow!("no such blueprint: {}", id)) + } + + fn blueprint_insert_new(&mut self, blueprint: Blueprint) { + let previous = self.blueprints.insert(blueprint.id, blueprint); + assert!(previous.is_none()); + } + + fn blueprint_insert_loaded( + &mut self, + blueprint: Blueprint, + ) -> Result<(), anyhow::Error> { + let entry = self.blueprints.entry(blueprint.id); + if let indexmap::map::Entry::Occupied(_) = &entry { + return Err(anyhow!("blueprint already exists: {}", blueprint.id)); + } + let _ = entry.or_insert(blueprint); + Ok(()) + } + + fn planning_input( + &self, + parent_blueprint: &Blueprint, + ) -> anyhow::Result { + let mut builder = self + .system + .to_planning_input_builder() + .context("generating planning input builder")?; + + // The internal and external DNS numbers that go here are supposed to be + // the _current_ internal and external DNS generations at the point + // when planning happened. This is racy (these generations can change + // immediately after they're fetched from the database) but correctness + // only requires that the values here be *no newer* than the real + // values so it's okay if the real values get changed. + // + // The problem is we have no real system here to fetch these values + // from. What should the value be? + // + // - If we assume that the parent blueprint here was successfully + // executed immediately before generating this plan, then the values + // here should come from the generation number produced by executing + // the parent blueprint. + // + // - If the parent blueprint was never executed, or execution is still + // in progress, or if other blueprints have been executed in the + // meantime that changed DNS, then the values here could be different + // (older if the blueprint was never executed or is currently + // executing and newer if other blueprints have changed DNS in the + // meantime). + // + // But in this CLI, there's no execution at all. As a result, there's + // no way to really choose between these -- and it doesn't really + // matter, either. We'll just pick the parent blueprint's. + builder.set_internal_dns_version(parent_blueprint.internal_dns_version); + builder.set_external_dns_version(parent_blueprint.external_dns_version); + + for (_, zone) in + parent_blueprint.all_omicron_zones(BlueprintZoneFilter::All) + { + if let Some((external_ip, nic)) = + zone.zone_type.external_networking() + { + builder + .add_omicron_zone_external_ip(zone.id, external_ip) + .context("adding omicron zone external IP")?; + let nic = OmicronZoneNic { + // TODO-cleanup use `TypedUuid` everywhere + id: VnicUuid::from_untyped_uuid(nic.id), + mac: nic.mac, + ip: nic.ip, + slot: nic.slot, + primary: nic.primary, + }; + builder + .add_omicron_zone_nic(zone.id, nic) + .context("adding omicron zone NIC")?; + } + } + Ok(builder.build()) + } +} + /// interactive REPL for exploring the planner #[derive(Parser, Debug)] struct CmdReconfiguratorSim { @@ -65,7 +185,12 @@ fn main() -> anyhow::Result<()> { system: SystemDescription::new(), collections: IndexMap::new(), blueprints: IndexMap::new(), + internal_dns: BTreeMap::new(), + external_dns: BTreeMap::new(), log, + silo_names: vec!["example-silo".parse().unwrap()], + external_dns_zone_name: String::from("oxide.example"), + num_nexus: None, }; if let Some(input_file) = cmd.input_file { @@ -162,18 +287,23 @@ fn process_entry(sim: &mut ReconfiguratorSim, entry: String) -> LoopResult { Commands::SledList => cmd_sled_list(sim), Commands::SledAdd(args) => cmd_sled_add(sim, args), Commands::SledShow(args) => cmd_sled_show(sim, args), + Commands::SiloList => cmd_silo_list(sim), + Commands::SiloAdd(args) => cmd_silo_add(sim, args), + Commands::SiloRemove(args) => cmd_silo_remove(sim, args), Commands::InventoryList => cmd_inventory_list(sim), Commands::InventoryGenerate => cmd_inventory_generate(sim), Commands::BlueprintList => cmd_blueprint_list(sim), - Commands::BlueprintFromInventory(args) => { - cmd_blueprint_from_inventory(sim, args) - } + Commands::BlueprintEdit(args) => cmd_blueprint_edit(sim, args), Commands::BlueprintPlan(args) => cmd_blueprint_plan(sim, args), Commands::BlueprintShow(args) => cmd_blueprint_show(sim, args), Commands::BlueprintDiff(args) => cmd_blueprint_diff(sim, args), + Commands::BlueprintDiffDns(args) => cmd_blueprint_diff_dns(sim, args), Commands::BlueprintDiffInventory(args) => { cmd_blueprint_diff_inventory(sim, args) } + Commands::BlueprintSave(args) => cmd_blueprint_save(sim, args), + Commands::Show => cmd_show(sim), + Commands::Set(args) => cmd_set(sim, args), Commands::Load(args) => cmd_load(sim, args), Commands::FileContents(args) => cmd_file_contents(args), Commands::Save(args) => cmd_save(sim, args), @@ -206,6 +336,13 @@ enum Commands { /// show details about one sled SledShow(SledArgs), + /// list silos + SiloList, + /// add a silo + SiloAdd(SiloAddRemoveArgs), + /// remove a silo + SiloRemove(SiloAddRemoveArgs), + /// list all inventory collections InventoryList, /// generates an inventory collection from the configured sleds @@ -213,16 +350,26 @@ enum Commands { /// list all blueprints BlueprintList, - /// generate a blueprint that represents the contents of an inventory - BlueprintFromInventory(InventoryArgs), /// run planner to generate a new blueprint BlueprintPlan(BlueprintPlanArgs), + /// edit contents of a blueprint directly + BlueprintEdit(BlueprintEditArgs), /// show details about a blueprint BlueprintShow(BlueprintArgs), /// show differences between two blueprints BlueprintDiff(BlueprintDiffArgs), + /// show differences between a blueprint and a particular DNS version + BlueprintDiffDns(BlueprintDiffDnsArgs), /// show differences between a blueprint and an inventory collection BlueprintDiffInventory(BlueprintDiffInventoryArgs), + /// write one blueprint to a file + BlueprintSave(BlueprintSaveArgs), + + /// show system properties + Show, + /// set system properties + #[command(subcommand)] + Set(SetArgs), /// save state to a file Save(SaveArgs), @@ -235,19 +382,25 @@ enum Commands { #[derive(Debug, Args)] struct SledAddArgs { /// id of the new sled - sled_id: Option, + sled_id: Option, } #[derive(Debug, Args)] struct SledArgs { /// id of the sled - sled_id: Uuid, + sled_id: SledUuid, +} + +#[derive(Debug, Args)] +struct SiloAddRemoveArgs { + /// name of the silo + silo_name: Name, } #[derive(Debug, Args)] struct InventoryArgs { /// id of the inventory collection to use in planning - collection_id: Uuid, + collection_id: CollectionUuid, } #[derive(Debug, Args)] @@ -255,7 +408,30 @@ struct BlueprintPlanArgs { /// id of the blueprint on which this one will be based parent_blueprint_id: Uuid, /// id of the inventory collection to use in planning - collection_id: Uuid, + collection_id: CollectionUuid, +} + +#[derive(Debug, Args)] +struct BlueprintEditArgs { + /// id of the blueprint to edit + blueprint_id: Uuid, + /// "creator" field for the new blueprint + #[arg(long)] + creator: Option, + /// "comment" field for the new blueprint + #[arg(long)] + comment: Option, + #[command(subcommand)] + edit_command: BlueprintEditCommands, +} + +#[derive(Debug, Subcommand)] +enum BlueprintEditCommands { + /// add a Nexus instance to a particular sled + AddNexus { + /// sled on which to deploy the new instance + sled_id: SledUuid, + }, } #[derive(Debug, Args)] @@ -264,12 +440,36 @@ struct BlueprintArgs { blueprint_id: Uuid, } +#[derive(Debug, Args)] +struct BlueprintDiffDnsArgs { + /// DNS group (internal or external) + dns_group: CliDnsGroup, + /// DNS version to diff against + dns_version: u32, + /// id of the blueprint + blueprint_id: Uuid, +} + +#[derive(Clone, Copy, Debug, ValueEnum)] +enum CliDnsGroup { + Internal, + External, +} + #[derive(Debug, Args)] struct BlueprintDiffInventoryArgs { /// id of the inventory collection - collection_id: Uuid, + collection_id: CollectionUuid, + /// id of the blueprint + blueprint_id: Uuid, +} + +#[derive(Debug, Args)] +struct BlueprintSaveArgs { /// id of the blueprint blueprint_id: Uuid, + /// output file + filename: Utf8PathBuf, } #[derive(Debug, Args)] @@ -280,6 +480,14 @@ struct BlueprintDiffArgs { blueprint2_id: Uuid, } +#[derive(Debug, Subcommand)] +enum SetArgs { + /// target number of Nexus instances (for planning) + NumNexus { num_nexus: u16 }, + /// system's external DNS zone name (suffix) + ExternalDnsZoneName { zone_name: String }, +} + #[derive(Debug, Args)] struct LoadArgs { /// input file @@ -287,7 +495,7 @@ struct LoadArgs { /// id of inventory collection to use for sled details /// (may be omitted only if the file contains only one collection) - collection_id: Option, + collection_id: Option, } #[derive(Debug, Args)] @@ -304,23 +512,63 @@ struct SaveArgs { // Command handlers +fn cmd_silo_list( + sim: &mut ReconfiguratorSim, +) -> anyhow::Result> { + let mut s = String::new(); + for silo_name in &sim.silo_names { + swriteln!(s, "{}", silo_name); + } + Ok(Some(s)) +} + +fn cmd_silo_add( + sim: &mut ReconfiguratorSim, + args: SiloAddRemoveArgs, +) -> anyhow::Result> { + if sim.silo_names.contains(&args.silo_name) { + bail!("silo already exists: {:?}", &args.silo_name); + } + + sim.silo_names.push(args.silo_name); + Ok(None) +} + +fn cmd_silo_remove( + sim: &mut ReconfiguratorSim, + args: SiloAddRemoveArgs, +) -> anyhow::Result> { + let size_before = sim.silo_names.len(); + sim.silo_names.retain(|n| *n != args.silo_name); + if sim.silo_names.len() == size_before { + bail!("no such silo: {:?}", &args.silo_name); + } + Ok(None) +} + fn cmd_sled_list( sim: &mut ReconfiguratorSim, ) -> anyhow::Result> { #[derive(Tabled)] #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] struct Sled { - id: Uuid, + id: SledUuid, nzpools: usize, subnet: String, } - let policy = sim.system.to_policy().context("failed to generate policy")?; - let rows = policy.sleds.iter().map(|(sled_id, sled_resources)| Sled { - id: *sled_id, - subnet: sled_resources.subnet.net().to_string(), - nzpools: sled_resources.zpools.len(), - }); + let planning_input = sim + .system + .to_planning_input_builder() + .context("failed to generate planning input")? + .build(); + let rows = planning_input.all_sled_resources(SledFilter::Commissioned).map( + |(sled_id, sled_resources)| Sled { + id: sled_id, + subnet: sled_resources.subnet.net().to_string(), + nzpools: sled_resources.zpools.len(), + }, + ); let table = tabled::Table::new(rows) .with(tabled::settings::Style::empty()) .with(tabled::settings::Padding::new(0, 1, 0, 0)) @@ -345,18 +593,22 @@ fn cmd_sled_show( sim: &mut ReconfiguratorSim, args: SledArgs, ) -> anyhow::Result> { - let policy = sim.system.to_policy().context("failed to generate policy")?; + let planning_input = sim + .system + .to_planning_input_builder() + .context("failed to generate planning_input builder")? + .build(); let sled_id = args.sled_id; - let sled_resources = policy - .sleds - .get(&sled_id) - .ok_or_else(|| anyhow!("no sled with id {:?}", sled_id))?; + let sled_resources = planning_input + .sled_resources(&sled_id) + .ok_or_else(|| anyhow!("no sled with id {sled_id}"))?; let mut s = String::new(); swriteln!(s, "sled {}", sled_id); swriteln!(s, "subnet {}", sled_resources.subnet.net()); swriteln!(s, "zpools ({}):", sled_resources.zpools.len()); - for z in &sled_resources.zpools { - swriteln!(s, " {:?}", z); + for (zpool, disk) in &sled_resources.zpools { + swriteln!(s, " {:?}", zpool); + swriteln!(s, " ↳ {:?}", disk); } Ok(Some(s)) } @@ -367,7 +619,7 @@ fn cmd_inventory_list( #[derive(Tabled)] #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] struct InventoryRow { - id: Uuid, + id: CollectionUuid, nerrors: usize, time_done: String, } @@ -397,8 +649,9 @@ fn cmd_inventory_generate( sim.system.to_collection_builder().context("generating inventory")?; // For an inventory we just generated from thin air, pretend like each sled // has no zones on it. - let sled_ids = sim.system.to_policy().unwrap().sleds.into_keys(); - for sled_id in sled_ids { + let planning_input = + sim.system.to_planning_input_builder().unwrap().build(); + for sled_id in planning_input.all_sled_ids(SledFilter::Commissioned) { builder .found_sled_omicron_zones( "fake sled agent", @@ -439,57 +692,23 @@ fn cmd_blueprint_list( Ok(Some(table)) } -fn cmd_blueprint_from_inventory( - sim: &mut ReconfiguratorSim, - args: InventoryArgs, -) -> anyhow::Result> { - let collection_id = args.collection_id; - let collection = sim - .collections - .get(&collection_id) - .ok_or_else(|| anyhow!("no such collection: {}", collection_id))?; - let dns_version = Generation::new(); - let policy = sim.system.to_policy().context("generating policy")?; - let creator = "reconfigurator-sim"; - let blueprint = BlueprintBuilder::build_initial_from_collection( - collection, - dns_version, - dns_version, - &policy, - creator, - ) - .context("building collection")?; - let rv = format!( - "generated blueprint {} from inventory collection {}", - blueprint.id, collection_id - ); - sim.blueprints.insert(blueprint.id, blueprint); - Ok(Some(rv)) -} - fn cmd_blueprint_plan( sim: &mut ReconfiguratorSim, args: BlueprintPlanArgs, ) -> anyhow::Result> { let parent_blueprint_id = args.parent_blueprint_id; let collection_id = args.collection_id; - let parent_blueprint = sim - .blueprints - .get(&parent_blueprint_id) - .ok_or_else(|| anyhow!("no such blueprint: {}", parent_blueprint_id))?; + let parent_blueprint = sim.blueprint_lookup(parent_blueprint_id)?; let collection = sim .collections .get(&collection_id) .ok_or_else(|| anyhow!("no such collection: {}", collection_id))?; - let dns_version = Generation::new(); - let policy = sim.system.to_policy().context("generating policy")?; let creator = "reconfigurator-sim"; + let planning_input = sim.planning_input(parent_blueprint)?; let planner = Planner::new_based_on( sim.log.clone(), parent_blueprint, - dns_version, - dns_version, - &policy, + &planning_input, creator, collection, ) @@ -499,7 +718,47 @@ fn cmd_blueprint_plan( "generated blueprint {} based on parent blueprint {}", blueprint.id, parent_blueprint_id, ); - sim.blueprints.insert(blueprint.id, blueprint); + sim.blueprint_insert_new(blueprint); + Ok(Some(rv)) +} + +fn cmd_blueprint_edit( + sim: &mut ReconfiguratorSim, + args: BlueprintEditArgs, +) -> anyhow::Result> { + let blueprint_id = args.blueprint_id; + let blueprint = sim.blueprint_lookup(blueprint_id)?; + let creator = args.creator.as_deref().unwrap_or("reconfigurator-cli"); + let planning_input = sim.planning_input(blueprint)?; + let mut builder = BlueprintBuilder::new_based_on( + &sim.log, + blueprint, + &planning_input, + creator, + ) + .context("creating blueprint builder")?; + + if let Some(comment) = args.comment { + builder.comment(comment); + } + + let label = match args.edit_command { + BlueprintEditCommands::AddNexus { sled_id } => { + let current = builder.sled_num_nexus_zones(sled_id); + let added = builder + .sled_ensure_zone_multiple_nexus(sled_id, current + 1) + .context("failed to add Nexus zone")?; + assert_matches::assert_matches!(added, EnsureMultiple::Added(1)); + format!("added Nexus zone to sled {}", sled_id) + } + }; + + let new_blueprint = builder.build(); + let rv = format!( + "blueprint {} created from blueprint {}: {}", + new_blueprint.id, blueprint_id, label + ); + sim.blueprint_insert_new(new_blueprint); Ok(Some(rv)) } @@ -507,10 +766,7 @@ fn cmd_blueprint_show( sim: &mut ReconfiguratorSim, args: BlueprintArgs, ) -> anyhow::Result> { - let blueprint = sim - .blueprints - .get(&args.blueprint_id) - .ok_or_else(|| anyhow!("no such blueprint: {}", args.blueprint_id))?; + let blueprint = sim.blueprint_lookup(args.blueprint_id)?; Ok(Some(format!("{}", blueprint.display()))) } @@ -518,19 +774,115 @@ fn cmd_blueprint_diff( sim: &mut ReconfiguratorSim, args: BlueprintDiffArgs, ) -> anyhow::Result> { + let mut rv = String::new(); let blueprint1_id = args.blueprint1_id; let blueprint2_id = args.blueprint2_id; - let blueprint1 = sim - .blueprints - .get(&blueprint1_id) - .ok_or_else(|| anyhow!("no such blueprint: {}", blueprint1_id))?; - let blueprint2 = sim - .blueprints - .get(&blueprint2_id) - .ok_or_else(|| anyhow!("no such blueprint: {}", blueprint2_id))?; + let blueprint1 = sim.blueprint_lookup(blueprint1_id)?; + let blueprint2 = sim.blueprint_lookup(blueprint2_id)?; + + let sled_diff = blueprint2.diff_since_blueprint(&blueprint1); + swriteln!(rv, "{}", sled_diff.display()); + + // Diff'ing DNS is a little trickier. First, compute what DNS should be for + // each blueprint. To do that we need to construct a list of sleds suitable + // for the executor. + let sleds_by_id = make_sleds_by_id(&sim)?; + let internal_dns_config1 = blueprint_internal_dns_config( + &blueprint1, + &sleds_by_id, + &Default::default(), + ); + let internal_dns_config2 = blueprint_internal_dns_config( + &blueprint2, + &sleds_by_id, + &Default::default(), + ); + let dns_diff = DnsDiff::new(&internal_dns_config1, &internal_dns_config2) + .context("failed to assemble DNS diff")?; + swriteln!(rv, "internal DNS:\n{}", dns_diff); + + let external_dns_config1 = blueprint_external_dns_config( + &blueprint1, + &sim.silo_names, + sim.external_dns_zone_name.clone(), + ); + let external_dns_config2 = blueprint_external_dns_config( + &blueprint2, + &sim.silo_names, + sim.external_dns_zone_name.clone(), + ); + let dns_diff = DnsDiff::new(&external_dns_config1, &external_dns_config2) + .context("failed to assemble external DNS diff")?; + swriteln!(rv, "external DNS:\n{}", dns_diff); - let diff = blueprint1.diff_sleds(&blueprint2); - Ok(Some(diff.display().to_string())) + Ok(Some(rv)) +} + +fn make_sleds_by_id( + sim: &ReconfiguratorSim, +) -> Result< + BTreeMap, + anyhow::Error, +> { + let collection = sim + .system + .to_collection_builder() + .context( + "unexpectedly failed to create collection for current set of sleds", + )? + .build(); + let sleds_by_id: BTreeMap<_, _> = collection + .sled_agents + .iter() + .map(|(sled_id, sled_agent_info)| { + let sled = nexus_reconfigurator_execution::Sled::new( + *sled_id, + sled_agent_info.sled_agent_address, + sled_agent_info.sled_role == SledRole::Scrimlet, + ); + (*sled_id, sled) + }) + .collect(); + Ok(sleds_by_id) +} + +fn cmd_blueprint_diff_dns( + sim: &mut ReconfiguratorSim, + args: BlueprintDiffDnsArgs, +) -> anyhow::Result> { + let dns_group = args.dns_group; + let dns_version = Generation::from(args.dns_version); + let blueprint_id = args.blueprint_id; + let blueprint = sim.blueprint_lookup(blueprint_id)?; + + let existing_dns_config = match dns_group { + CliDnsGroup::Internal => sim.internal_dns.get(&dns_version), + CliDnsGroup::External => sim.external_dns.get(&dns_version), + } + .ok_or_else(|| { + anyhow!("no such {:?} DNS version: {}", dns_group, dns_version) + })?; + + let blueprint_dns_zone = match dns_group { + CliDnsGroup::Internal => { + let sleds_by_id = make_sleds_by_id(sim)?; + blueprint_internal_dns_config( + blueprint, + &sleds_by_id, + &Default::default(), + ) + } + CliDnsGroup::External => blueprint_external_dns_config( + blueprint, + &sim.silo_names, + sim.external_dns_zone_name.clone(), + ), + }; + + let existing_dns_zone = existing_dns_config.sole_zone()?; + let dns_diff = DnsDiff::new(&existing_dns_zone, &blueprint_dns_zone) + .context("failed to assemble DNS diff")?; + Ok(Some(dns_diff.to_string())) } fn cmd_blueprint_diff_inventory( @@ -542,47 +894,132 @@ fn cmd_blueprint_diff_inventory( let collection = sim.collections.get(&collection_id).ok_or_else(|| { anyhow!("no such inventory collection: {}", collection_id) })?; - let blueprint = sim - .blueprints - .get(&blueprint_id) - .ok_or_else(|| anyhow!("no such blueprint: {}", blueprint_id))?; - - let diff = blueprint.diff_sleds_from_collection(&collection); + let blueprint = sim.blueprint_lookup(blueprint_id)?; + let diff = blueprint.diff_since_collection(&collection); Ok(Some(diff.display().to_string())) } +fn cmd_blueprint_save( + sim: &mut ReconfiguratorSim, + args: BlueprintSaveArgs, +) -> anyhow::Result> { + let blueprint_id = args.blueprint_id; + let blueprint = sim.blueprint_lookup(blueprint_id)?; + + let output_path = &args.filename; + let output_str = serde_json::to_string_pretty(&blueprint) + .context("serializing blueprint")?; + std::fs::write(&output_path, &output_str) + .with_context(|| format!("write {:?}", output_path))?; + Ok(Some(format!("saved blueprint {} to {:?}", blueprint_id, output_path))) +} + fn cmd_save( sim: &mut ReconfiguratorSim, args: SaveArgs, ) -> anyhow::Result> { - let policy = sim.system.to_policy().context("creating policy")?; + let planning_input = sim + .system + .to_planning_input_builder() + .context("creating planning input builder")? + .build(); let saved = UnstableReconfiguratorState { - policy, + planning_input, collections: sim.collections.values().cloned().collect(), blueprints: sim.blueprints.values().cloned().collect(), + internal_dns: sim.internal_dns.clone(), + external_dns: sim.external_dns.clone(), + silo_names: sim.silo_names.clone(), + external_dns_zone_names: vec![sim.external_dns_zone_name.clone()], }; let output_path = &args.filename; - let outfile = std::fs::OpenOptions::new() - .create_new(true) - .write(true) - .open(output_path) - .with_context(|| format!("open {:?}", output_path))?; - serde_json::to_writer_pretty(&outfile, &saved) - .with_context(|| format!("writing to {:?}", output_path)) - .unwrap_or_else(|e| panic!("{:#}", e)); + let output_str = + serde_json::to_string_pretty(&saved).context("serializing state")?; + std::fs::write(&output_path, &output_str) + .with_context(|| format!("write {:?}", output_path))?; Ok(Some(format!( - "saved policy, collections, and blueprints to {:?}", + "saved planning input, collections, and blueprints to {:?}", output_path ))) } +fn cmd_show(sim: &mut ReconfiguratorSim) -> anyhow::Result> { + let mut s = String::new(); + do_print_properties(&mut s, sim); + swriteln!( + s, + "target number of Nexus instances: {}", + match sim.num_nexus { + Some(n) => n.to_string(), + None => String::from("default"), + } + ); + Ok(Some(s)) +} + +fn do_print_properties(s: &mut String, sim: &ReconfiguratorSim) { + swriteln!( + s, + "configured external DNS zone name: {}", + sim.external_dns_zone_name, + ); + swriteln!( + s, + "configured silo names: {}", + sim.silo_names + .iter() + .map(|s| s.as_str()) + .collect::>() + .join(", ") + ); + swriteln!( + s, + "internal DNS generations: {}", + sim.internal_dns + .keys() + .map(|s| s.to_string()) + .collect::>() + .join(", "), + ); + swriteln!( + s, + "external DNS generations: {}", + sim.external_dns + .keys() + .map(|s| s.to_string()) + .collect::>() + .join(", "), + ); +} + +fn cmd_set( + sim: &mut ReconfiguratorSim, + args: SetArgs, +) -> anyhow::Result> { + Ok(Some(match args { + SetArgs::NumNexus { num_nexus } => { + let rv = format!("{:?} -> {}", sim.num_nexus, num_nexus); + sim.num_nexus = Some(num_nexus); + sim.system.target_nexus_zone_count(usize::from(num_nexus)); + rv + } + SetArgs::ExternalDnsZoneName { zone_name } => { + let rv = + format!("{:?} -> {:?}", sim.external_dns_zone_name, zone_name); + sim.external_dns_zone_name = zone_name; + rv + } + })) +} + fn read_file( input_path: &camino::Utf8Path, ) -> anyhow::Result { let file = std::fs::File::open(input_path) .with_context(|| format!("open {:?}", input_path))?; - serde_json::from_reader(file) + let bufread = std::io::BufReader::new(file); + serde_json::from_reader(bufread) .with_context(|| format!("read {:?}", input_path)) } @@ -633,9 +1070,15 @@ fn cmd_load( }, )?; - let current_policy = sim.system.to_policy().context("generating policy")?; - for (sled_id, sled_resources) in loaded.policy.sleds { - if current_policy.sleds.contains_key(&sled_id) { + let current_planning_input = sim + .system + .to_planning_input_builder() + .context("generating planning input")? + .build(); + for (sled_id, sled_details) in + loaded.planning_input.all_sleds(SledFilter::Commissioned) + { + if current_planning_input.sled_resources(&sled_id).is_some() { swriteln!( s, "sled {}: skipped (one with \ @@ -658,34 +1101,26 @@ fn cmd_load( continue; }; - let inventory_sp = match &inventory_sled_agent.baseboard_id { - Some(baseboard_id) => { - let inv_sp = primary_collection - .sps - .get(baseboard_id) - .ok_or_else(|| { - anyhow!( - "error: load sled {}: missing SP inventory", - sled_id - ) - })?; - let inv_rot = primary_collection - .rots - .get(baseboard_id) - .ok_or_else(|| { - anyhow!( - "error: load sled {}: missing RoT inventory", - sled_id - ) - })?; - Some(SledHwInventory { baseboard_id, sp: inv_sp, rot: inv_rot }) - } - None => None, - }; + let inventory_sp = inventory_sled_agent.baseboard_id.as_ref().and_then( + |baseboard_id| { + let inv_sp = primary_collection.sps.get(baseboard_id); + let inv_rot = primary_collection.rots.get(baseboard_id); + if let (Some(inv_sp), Some(inv_rot)) = (inv_sp, inv_rot) { + Some(SledHwInventory { + baseboard_id: &baseboard_id, + sp: inv_sp, + rot: inv_rot, + }) + } else { + None + } + }, + ); let result = sim.system.sled_full( sled_id, - sled_resources, + sled_details.policy, + sled_details.resources.clone(), inventory_sp, inventory_sled_agent, ); @@ -713,18 +1148,48 @@ fn cmd_load( } for blueprint in loaded.blueprints { - if sim.blueprints.contains_key(&blueprint.id) { + let blueprint_id = blueprint.id; + match sim.blueprint_insert_loaded(blueprint) { + Ok(_) => { + swriteln!(s, "blueprint {} loaded", blueprint_id); + } + Err(error) => { + swriteln!( + s, + "blueprint {}: skipped ({:#})", + blueprint_id, + error + ); + } + } + } + + sim.system.service_ip_pool_ranges( + loaded.planning_input.service_ip_pool_ranges().to_vec(), + ); + swriteln!( + s, + "loaded service IP pool ranges: {:?}", + loaded.planning_input.service_ip_pool_ranges() + ); + + sim.internal_dns = loaded.internal_dns; + sim.external_dns = loaded.external_dns; + sim.silo_names = loaded.silo_names; + + let nnames = loaded.external_dns_zone_names.len(); + if nnames > 0 { + if nnames > 1 { swriteln!( s, - "blueprint {}: skipped (one with the \ - same id is already loaded)", - blueprint.id + "warn: found {} external DNS names; using only the first one", + nnames ); - } else { - swriteln!(s, "blueprint {} loaded", blueprint.id); - sim.blueprints.insert(blueprint.id, blueprint); } + sim.external_dns_zone_name = + loaded.external_dns_zone_names.into_iter().next().unwrap(); } + do_print_properties(&mut s, sim); swriteln!(s, "loaded data from {:?}", input_path); Ok(Some(s)) @@ -735,7 +1200,9 @@ fn cmd_file_contents(args: FileContentsArgs) -> anyhow::Result> { let mut s = String::new(); - for (sled_id, sled_resources) in loaded.policy.sleds { + for (sled_id, sled_resources) in + loaded.planning_input.all_sled_resources(SledFilter::Commissioned) + { swriteln!( s, "sled: {} (subnet: {}, zpools: {})", @@ -765,5 +1232,14 @@ fn cmd_file_contents(args: FileContentsArgs) -> anyhow::Result> { ); } + swriteln!(s, "internal DNS generations: {:?}", loaded.internal_dns.keys(),); + swriteln!(s, "external DNS generations: {:?}", loaded.external_dns.keys(),); + swriteln!(s, "silo names: {:?}", loaded.silo_names); + swriteln!( + s, + "external DNS zone names: {}", + loaded.external_dns_zone_names.join(", ") + ); + Ok(Some(s)) } diff --git a/dev-tools/reconfigurator-cli/tests/config.test.toml b/dev-tools/reconfigurator-cli/tests/config.test.toml new file mode 120000 index 0000000000..6050ca47dd --- /dev/null +++ b/dev-tools/reconfigurator-cli/tests/config.test.toml @@ -0,0 +1 @@ +../../../nexus/tests/config.test.toml \ No newline at end of file diff --git a/dev-tools/reconfigurator-cli/tests/output/cmd-stdout b/dev-tools/reconfigurator-cli/tests/output/cmd-stdout index 10b158f218..a2d6d3d17b 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmd-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmd-stdout @@ -9,50 +9,60 @@ ID > -> sled-show REDACTED_UUID_REDACTED_UUID_REDACTED -error: no sled with id REDACTED_UUID_REDACTED_UUID_REDACTED +> sled-show ..................... +error: no sled with id ..................... -> sled-add REDACTED_UUID_REDACTED_UUID_REDACTED +> sled-add ..................... added sled > sled-list ID NZPOOLS SUBNET -REDACTED_UUID_REDACTED_UUID_REDACTED 10 fd00:1122:3344:101::/64 +..................... 10 fd00:1122:3344:101::/64 -> sled-show REDACTED_UUID_REDACTED_UUID_REDACTED -sled REDACTED_UUID_REDACTED_UUID_REDACTED +> sled-show ..................... +sled ..................... subnet fd00:1122:3344:101::/64 zpools (10): - ZpoolName("oxp_REDACTED_UUID_REDACTED_UUID_REDACTED") - ZpoolName("oxp_REDACTED_UUID_REDACTED_UUID_REDACTED") - ZpoolName("oxp_REDACTED_UUID_REDACTED_UUID_REDACTED") - ZpoolName("oxp_REDACTED_UUID_REDACTED_UUID_REDACTED") - ZpoolName("oxp_REDACTED_UUID_REDACTED_UUID_REDACTED") - ZpoolName("oxp_REDACTED_UUID_REDACTED_UUID_REDACTED") - ZpoolName("oxp_REDACTED_UUID_REDACTED_UUID_REDACTED") - ZpoolName("oxp_REDACTED_UUID_REDACTED_UUID_REDACTED") - ZpoolName("oxp_REDACTED_UUID_REDACTED_UUID_REDACTED") - ZpoolName("oxp_REDACTED_UUID_REDACTED_UUID_REDACTED") - - -> sled-add REDACTED_UUID_REDACTED_UUID_REDACTED + ..................... (zpool) + ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + ..................... (zpool) + ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + ..................... (zpool) + ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + ..................... (zpool) + ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + ..................... (zpool) + ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + ..................... (zpool) + ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + ..................... (zpool) + ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + ..................... (zpool) + ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + ..................... (zpool) + ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + ..................... (zpool) + ↳ SledDisk { disk_identity: DiskIdentity { vendor: "fake-vendor", model: "fake-model", serial: "serial-....................." }, disk_id: ..................... (physical_disk), policy: InService, state: Active } + + +> sled-add ..................... added sled -> sled-add REDACTED_UUID_REDACTED_UUID_REDACTED +> sled-add ..................... added sled > sled-list ID NZPOOLS SUBNET -REDACTED_UUID_REDACTED_UUID_REDACTED 10 fd00:1122:3344:103::/64 -REDACTED_UUID_REDACTED_UUID_REDACTED 10 fd00:1122:3344:102::/64 -REDACTED_UUID_REDACTED_UUID_REDACTED 10 fd00:1122:3344:101::/64 +..................... 10 fd00:1122:3344:103::/64 +..................... 10 fd00:1122:3344:102::/64 +..................... 10 fd00:1122:3344:101::/64 > > inventory-generate -generated inventory collection REDACTED_UUID_REDACTED_UUID_REDACTED from configured sleds +generated inventory collection ..................... from configured sleds > inventory-list ID NERRORS TIME_DONE -REDACTED_UUID_REDACTED_UUID_REDACTED 0 +..................... 0 diff --git a/dev-tools/reconfigurator-cli/tests/test_basic.rs b/dev-tools/reconfigurator-cli/tests/test_basic.rs index 6048aece1b..1ae78487a3 100644 --- a/dev-tools/reconfigurator-cli/tests/test_basic.rs +++ b/dev-tools/reconfigurator-cli/tests/test_basic.rs @@ -2,14 +2,34 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. +use anyhow::Context; +use camino::Utf8Path; use expectorate::assert_contents; +use nexus_db_queries::authn; +use nexus_db_queries::authz; +use nexus_db_queries::context::OpContext; +use nexus_test_utils::SLED_AGENT_UUID; +use nexus_test_utils_macros::nexus_test; +use nexus_types::deployment::Blueprint; +use nexus_types::deployment::UnstableReconfiguratorState; +use omicron_common::api::external::Error; +use omicron_test_utils::dev::poll::wait_for_condition; +use omicron_test_utils::dev::poll::CondCheckError; use omicron_test_utils::dev::test_cmds::assert_exit_code; use omicron_test_utils::dev::test_cmds::path_to_executable; use omicron_test_utils::dev::test_cmds::redact_variable; use omicron_test_utils::dev::test_cmds::run_command; use omicron_test_utils::dev::test_cmds::EXIT_SUCCESS; +use omicron_uuid_kinds::SledUuid; +use slog::debug; +use std::io::BufReader; +use std::io::BufWriter; use std::path::PathBuf; +use std::sync::Arc; +use std::time::Duration; use subprocess::Exec; +use swrite::swriteln; +use swrite::SWrite; fn path_to_cli() -> PathBuf { path_to_executable(env!("CARGO_BIN_EXE_reconfigurator-cli")) @@ -21,7 +41,215 @@ fn test_basic() { let exec = Exec::cmd(path_to_cli()).arg("tests/input/cmds.txt"); let (exit_status, stdout_text, stderr_text) = run_command(exec); assert_exit_code(exit_status, EXIT_SUCCESS, &stderr_text); - let stdout_text = redact_variable(&stdout_text, &[]); + let stdout_text = redact_variable(&stdout_text); assert_contents("tests/output/cmd-stdout", &stdout_text); assert_contents("tests/output/cmd-stderr", &stderr_text); } + +type ControlPlaneTestContext = + nexus_test_utils::ControlPlaneTestContext; + +// Tests a round trip of blueprint editing: start with the blueprint that's +// present in a running system, fetch it with the rest of the reconfigurator +// state, load it into reconfigurator-cli, edit it, save that to a file, then +// import it back. +#[nexus_test] +async fn test_blueprint_edit(cptestctx: &ControlPlaneTestContext) { + // Setup + let nexus = &cptestctx.server.server_context().nexus; + let datastore = nexus.datastore(); + let log = &cptestctx.logctx.log; + let opctx = OpContext::for_background( + log.clone(), + Arc::new(authz::Authz::new(log)), + authn::Context::internal_api(), + datastore.clone(), + ); + let tmpdir = camino_tempfile::tempdir().expect("failed to create tmpdir"); + // Save the path and prevent the temporary directory from being cleaned up + // automatically. We want to be preserve the contents if this test fails. + let tmpdir_path = tmpdir.into_path(); + let saved_state1_path = tmpdir_path.join("reconfigurator-state1.json"); + let saved_state2_path = tmpdir_path.join("reconfigurator-state2.json"); + let script1_path = tmpdir_path.join("cmds1"); + let script2_path = tmpdir_path.join("cmds2"); + let new_blueprint_path = tmpdir_path.join("new_blueprint.json"); + + println!("temporary directory: {}", tmpdir_path); + + // Wait until Nexus has successfully completed an inventory collection. + // We don't need it directly but we want it to be present in the saved + // reconfigurator state. + let collection = wait_for_condition( + || async { + let result = + datastore.inventory_get_latest_collection(&opctx).await; + let log_result = match &result { + Ok(Some(_)) => Ok("found"), + Ok(None) => Ok("not found"), + Err(error) => Err(error), + }; + debug!( + log, + "attempt to fetch latest inventory collection"; + "result" => ?log_result, + ); + + match result { + Ok(None) => Err(CondCheckError::NotYet), + Ok(Some(c)) => Ok(c), + Err(Error::ServiceUnavailable { .. }) => { + Err(CondCheckError::NotYet) + } + Err(error) => Err(CondCheckError::Failed(error)), + } + }, + &Duration::from_millis(50), + &Duration::from_secs(30), + ) + .await + .expect("took too long to find first inventory collection"); + + // Assemble state that we can load into reconfigurator-cli. + let state1 = nexus_reconfigurator_preparation::reconfigurator_state_load( + &opctx, datastore, + ) + .await + .expect("failed to assemble reconfigurator state"); + + // Smoke check the initial state. + let sled_id: SledUuid = SLED_AGENT_UUID.parse().unwrap(); + assert!(state1.planning_input.sled_resources(&sled_id).is_some()); + assert!(!state1.planning_input.service_ip_pool_ranges().is_empty()); + assert!(!state1.silo_names.is_empty()); + assert!(!state1.external_dns_zone_names.is_empty()); + // We waited for the first inventory collection already. + assert!(state1.collections.iter().any(|c| c.id == collection.id)); + assert!(!state1.collections.is_empty()); + // Test suite setup establishes the initial blueprint. + assert!(!state1.blueprints.is_empty()); + // Setup requires that internal and external DNS be configured so we should + // have at least the current DNS generations here. + assert!(!state1.internal_dns.is_empty()); + assert!(!state1.external_dns.is_empty()); + + // unwrap: we checked above that this list was non-empty. + let blueprint = state1.blueprints.first().unwrap(); + + // Write a reconfigurator-cli script to load the file, edit the + // blueprint, and save the entire state to a new file. + let mut s = String::new(); + swriteln!(s, "load {} {}", saved_state1_path, collection.id); + swriteln!(s, "blueprint-edit {} add-nexus {}", blueprint.id, sled_id); + swriteln!(s, "save {}", saved_state2_path); + std::fs::write(&script1_path, &s) + .with_context(|| format!("write {}", &script1_path)) + .unwrap(); + + // Run this reconfigurator-cli invocation. + write_json(&saved_state1_path, &state1).unwrap(); + let exec = Exec::cmd(path_to_cli()).arg(&script1_path); + let (exit_status, _, stderr_text) = run_command(exec); + assert_exit_code(exit_status, EXIT_SUCCESS, &stderr_text); + + // Load the new file and find the new blueprint name. + let state2: UnstableReconfiguratorState = + read_json(&saved_state2_path).unwrap(); + assert_eq!(state2.blueprints.len(), state1.blueprints.len() + 1); + let new_blueprint = state2.blueprints.into_iter().rev().next().unwrap(); + assert_ne!(new_blueprint.id, blueprint.id); + + // While we're at it, smoke check the new blueprint. + assert_eq!(new_blueprint.parent_blueprint_id, Some(blueprint.id)); + assert_eq!(new_blueprint.creator, "reconfigurator-cli"); + + // Now run reconfigurator-cli again just to save the new blueprint. This is + // a little unfortunate but it's hard to avoid if we want to test that + // blueprint-save works. + let mut s = String::new(); + swriteln!(s, "load {} {}", saved_state2_path, collection.id); + swriteln!(s, "blueprint-save {} {}", new_blueprint.id, new_blueprint_path); + std::fs::write(&script2_path, &s) + .with_context(|| format!("write {}", &script2_path)) + .unwrap(); + let exec = Exec::cmd(path_to_cli()).arg(&script2_path); + let (exit_status, _, stderr_text) = run_command(exec); + assert_exit_code(exit_status, EXIT_SUCCESS, &stderr_text); + + // Load the blueprint we just wrote. + let new_blueprint2: Blueprint = read_json(&new_blueprint_path).unwrap(); + assert_eq!(new_blueprint, new_blueprint2); + + // Import the new blueprint. + let nexus_internal_url = + format!("http://{}/", cptestctx.internal_client.bind_address); + let nexus_client = + nexus_client::Client::new(&nexus_internal_url, log.clone()); + nexus_client + .blueprint_import(&new_blueprint) + .await + .expect("failed to import new blueprint"); + + let found_blueprint = nexus_client + .blueprint_view(&new_blueprint.id) + .await + .expect("failed to find imported blueprint in Nexus") + .into_inner(); + assert_eq!(found_blueprint, new_blueprint2); + + // Set the blueprint as the (disabled) target. + nexus_client + .blueprint_target_set(&nexus_client::types::BlueprintTargetSet { + target_id: new_blueprint.id, + enabled: false, + }) + .await + .context("setting target blueprint") + .unwrap(); + + // Read that back. + let target = nexus_client + .blueprint_target_view() + .await + .context("fetching target blueprint") + .unwrap(); + assert_eq!(target.target_id, new_blueprint.id); + + // Now clean up the temporary directory. + for path in [ + saved_state1_path, + saved_state2_path, + script1_path, + script2_path, + new_blueprint_path, + ] { + std::fs::remove_file(&path) + .with_context(|| format!("remove {}", path)) + .unwrap(); + } + + std::fs::remove_dir(&tmpdir_path) + .with_context(|| format!("remove {}", tmpdir_path)) + .unwrap(); +} + +fn read_json serde::Deserialize<'a>>( + path: &Utf8Path, +) -> Result { + let file = std::fs::File::open(path) + .with_context(|| format!("open {:?}", path))?; + let bufread = BufReader::new(file); + serde_json::from_reader(bufread).with_context(|| format!("read {:?}", path)) +} + +fn write_json( + path: &Utf8Path, + obj: &T, +) -> Result<(), anyhow::Error> { + let file = std::fs::File::create(path) + .with_context(|| format!("create {:?}", path))?; + let bufwrite = BufWriter::new(file); + serde_json::to_writer_pretty(bufwrite, obj) + .with_context(|| format!("write {:?}", path))?; + Ok(()) +} diff --git a/dev-tools/releng/Cargo.toml b/dev-tools/releng/Cargo.toml new file mode 100644 index 0000000000..19ede6c24d --- /dev/null +++ b/dev-tools/releng/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "omicron-releng" +version = "0.1.0" +edition = "2021" +license = "MPL-2.0" + +[dependencies] +anyhow.workspace = true +camino.workspace = true +camino-tempfile.workspace = true +cargo_metadata.workspace = true +chrono.workspace = true +clap.workspace = true +fs-err = { workspace = true, features = ["tokio"] } +futures.workspace = true +hex.workspace = true +omicron-common.workspace = true +omicron-workspace-hack.workspace = true +omicron-zone-package.workspace = true +once_cell.workspace = true +reqwest.workspace = true +semver.workspace = true +serde.workspace = true +sha2.workspace = true +shell-words.workspace = true +slog.workspace = true +slog-async.workspace = true +slog-term.workspace = true +tar.workspace = true +tokio = { workspace = true, features = ["full"] } +toml.workspace = true +tufaceous-lib.workspace = true + +[lints] +workspace = true diff --git a/dev-tools/releng/src/cmd.rs b/dev-tools/releng/src/cmd.rs new file mode 100644 index 0000000000..198eabf99f --- /dev/null +++ b/dev-tools/releng/src/cmd.rs @@ -0,0 +1,167 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use std::ffi::OsStr; +use std::path::Path; +use std::process::ExitStatus; +use std::process::Output; +use std::process::Stdio; +use std::time::Instant; + +use anyhow::ensure; +use anyhow::Context; +use anyhow::Result; +use slog::debug; +use slog::Logger; + +/// Wrapper for `tokio::process::Command` where the builder methods take/return +/// `self`, plus a number of convenience methods. +pub(crate) struct Command { + inner: tokio::process::Command, +} + +impl Command { + pub(crate) fn new(program: impl AsRef) -> Command { + Command { inner: tokio::process::Command::new(program) } + } + + pub(crate) fn arg(mut self, arg: impl AsRef) -> Command { + self.inner.arg(arg); + self + } + + pub(crate) fn args( + mut self, + args: impl IntoIterator>, + ) -> Command { + self.inner.args(args); + self + } + + pub(crate) fn current_dir(mut self, dir: impl AsRef) -> Command { + self.inner.current_dir(dir); + self + } + + pub(crate) fn env( + mut self, + key: impl AsRef, + value: impl AsRef, + ) -> Command { + self.inner.env(key, value); + self + } + + pub(crate) fn env_remove(mut self, key: impl AsRef) -> Command { + self.inner.env_remove(key); + self + } + + pub(crate) async fn is_success(mut self, logger: &Logger) -> Result { + self.inner + .stdin(Stdio::null()) + .stdout(Stdio::inherit()) + .stderr(Stdio::inherit()); + Ok(xtrace(&mut self, logger).await?.status.success()) + } + + pub(crate) async fn ensure_success( + mut self, + logger: &Logger, + ) -> Result<()> { + self.inner + .stdin(Stdio::null()) + .stdout(Stdio::inherit()) + .stderr(Stdio::inherit()); + let status = xtrace(&mut self, logger).await?.status; + check_status(self, status) + } + + pub(crate) async fn ensure_stdout( + mut self, + logger: &Logger, + ) -> Result { + self.inner + .stdin(Stdio::null()) + .stdout(Stdio::piped()) + .stderr(Stdio::inherit()); + let output = xtrace(&mut self, logger).await?; + check_status(self, output.status)?; + String::from_utf8(output.stdout).context("command stdout was not UTF-8") + } + + pub(crate) fn into_parts(self) -> (Description, tokio::process::Command) { + (Description { str: self.to_string() }, self.inner) + } +} + +impl std::fmt::Display for Command { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let command = self.inner.as_std(); + for (name, value) in command.get_envs() { + if let Some(value) = value { + write!( + f, + "{}={} ", + shell_words::quote(&name.to_string_lossy()), + shell_words::quote(&value.to_string_lossy()) + )?; + } + } + write!( + f, + "{}", + shell_words::quote(&command.get_program().to_string_lossy()) + )?; + for arg in command.get_args() { + write!(f, " {}", shell_words::quote(&arg.to_string_lossy()))?; + } + Ok(()) + } +} + +/// Returned from [`Command::into_parts`] for use in the `job` module. +pub(crate) struct Description { + str: String, +} + +impl Description { + pub(crate) fn check_status(&self, status: ExitStatus) -> Result<()> { + check_status(self, status) + } +} + +impl std::fmt::Display for Description { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.str) + } +} + +fn check_status( + command: impl std::fmt::Display, + status: ExitStatus, +) -> Result<()> { + ensure!(status.success(), "command `{}` exited with {}", command, status); + Ok(()) +} + +async fn xtrace(command: &mut Command, logger: &Logger) -> Result { + command.inner.stdin(Stdio::null()).kill_on_drop(true); + debug!(logger, "running: {}", command); + let start = Instant::now(); + let output = command + .inner + .spawn() + .with_context(|| format!("failed to exec `{}`", command))? + .wait_with_output() + .await + .with_context(|| format!("failed to wait on `{}`", command))?; + debug!( + logger, + "process exited with {} ({:?})", + output.status, + Instant::now().saturating_duration_since(start) + ); + Ok(output) +} diff --git a/dev-tools/releng/src/hubris.rs b/dev-tools/releng/src/hubris.rs new file mode 100644 index 0000000000..685a729a9f --- /dev/null +++ b/dev-tools/releng/src/hubris.rs @@ -0,0 +1,148 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use std::collections::BTreeMap; +use std::collections::HashMap; + +use anyhow::Context; +use anyhow::Result; +use camino::Utf8PathBuf; +use fs_err::tokio as fs; +use futures::future::TryFutureExt; +use omicron_common::api::external::SemverVersion; +use omicron_common::api::internal::nexus::KnownArtifactKind; +use semver::Version; +use serde::Deserialize; +use tufaceous_lib::assemble::DeserializedArtifactData; +use tufaceous_lib::assemble::DeserializedArtifactSource; +use tufaceous_lib::assemble::DeserializedFileArtifactSource; +use tufaceous_lib::assemble::DeserializedManifest; + +pub(crate) async fn fetch_hubris_artifacts( + base_url: &'static str, + client: reqwest::Client, + manifest_list: Utf8PathBuf, + output_dir: Utf8PathBuf, +) -> Result<()> { + macro_rules! zip { + ($expr:expr) => { + output_dir.join(format!("{}.zip", $expr)) + }; + } + + fs::create_dir_all(&output_dir).await?; + + // This could be parallelized with FuturesUnordered but in practice this + // takes less time than OS builds. + + let mut manifest = DeserializedManifest { + system_version: SemverVersion(Version::new(0, 0, 0)), + artifacts: BTreeMap::new(), + }; + + for line in fs::read_to_string(manifest_list).await?.lines() { + if let Some(hash) = line.split_whitespace().next() { + let data = fetch_hash(base_url, &client, hash).await?; + let str = String::from_utf8(data).with_context(|| { + format!("hubris artifact manifest {} was not UTF-8", hash) + })?; + let hash_manifest: Manifest = + toml::from_str(&str).with_context(|| { + format!( + "failed to deserialize hubris artifact manifest {}", + hash + ) + })?; + for (kind, artifacts) in hash_manifest.artifacts { + for artifact in artifacts { + let (source, hashes) = match artifact.source { + Source::File(file) => ( + DeserializedArtifactSource::File { + path: zip!(file.hash), + }, + vec![file.hash], + ), + Source::CompositeRot { archive_a, archive_b } => ( + DeserializedArtifactSource::CompositeRot { + archive_a: + DeserializedFileArtifactSource::File { + path: zip!(archive_a.hash), + }, + archive_b: + DeserializedFileArtifactSource::File { + path: zip!(archive_b.hash), + }, + }, + vec![archive_a.hash, archive_b.hash], + ), + }; + manifest.artifacts.entry(kind).or_default().push( + DeserializedArtifactData { + name: artifact.name, + version: artifact.version, + source, + }, + ); + for hash in hashes { + let data = fetch_hash(base_url, &client, &hash).await?; + fs::write(output_dir.join(zip!(hash)), data).await?; + } + } + } + } + } + + fs::write( + output_dir.join("manifest.toml"), + toml::to_string_pretty(&manifest)?.into_bytes(), + ) + .await?; + Ok(()) +} + +async fn fetch_hash( + base_url: &'static str, + client: &reqwest::Client, + hash: &str, +) -> Result> { + client + .get(format!("{}/artifact/{}", base_url, hash)) + .send() + .and_then(|response| response.json()) + .await + .with_context(|| { + format!( + "failed to fetch hubris artifact {} from {}", + hash, base_url + ) + }) +} + +// These structs are similar to `DeserializeManifest` and friends from +// tufaceous-lib, except that the source is a hash instead of a file path. This +// hash is used to download the artifact from Permission Slip. +#[derive(Deserialize)] +struct Manifest { + #[serde(rename = "artifact")] + artifacts: HashMap>, +} + +#[derive(Deserialize)] +struct Artifact { + name: String, + version: SemverVersion, + source: Source, +} + +#[derive(Deserialize)] +#[serde(tag = "kind", rename_all = "kebab-case")] +enum Source { + File(FileSource), + CompositeRot { archive_a: FileSource, archive_b: FileSource }, +} + +#[derive(Deserialize)] +struct FileSource { + hash: String, +} diff --git a/dev-tools/releng/src/job.rs b/dev-tools/releng/src/job.rs new file mode 100644 index 0000000000..dcb58a0b92 --- /dev/null +++ b/dev-tools/releng/src/job.rs @@ -0,0 +1,305 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! A quick-and-dirty job runner. +//! +//! Jobs are async functions given a name. All jobs must be described before the +//! jobs can be run (`Jobs::run_all` consumes the job runner). Jobs can depend +//! on other jobs, which is implemented via `tokio::sync::oneshot` channels; a +//! completed job sends a message to all registered receivers, which are waiting +//! on the messages in order to run. This essentially creates a DAG, except +//! instead of us having to keep track of it, we make it Tokio's problem. +//! +//! A `tokio::sync::Semaphore` is used to restrict the number of jobs to +//! `std::thread::available_parallelism`, except for a hardcoded list of +//! prioritized job names that are allowed to ignore this. + +use std::collections::HashMap; +use std::future::Future; +use std::process::Stdio; +use std::sync::Arc; +use std::time::Instant; + +use anyhow::anyhow; +use anyhow::Context; +use anyhow::Result; +use camino::Utf8Path; +use camino::Utf8PathBuf; +use fs_err::tokio::File; +use futures::future::BoxFuture; +use futures::future::FutureExt; +use futures::stream::FuturesUnordered; +use futures::stream::TryStreamExt; +use slog::info; +use slog::Logger; +use tokio::io::AsyncBufReadExt; +use tokio::io::AsyncRead; +use tokio::io::AsyncWrite; +use tokio::io::AsyncWriteExt; +use tokio::io::BufReader; +use tokio::sync::oneshot; +use tokio::sync::oneshot::error::RecvError; +use tokio::sync::Semaphore; + +use crate::cmd::Command; + +// We want these two jobs to run without delay because they take the longest +// amount of time, so we allow them to run without taking a permit first. +const PERMIT_NOT_REQUIRED: [&str; 2] = ["host-package", "host-image"]; + +pub(crate) struct Jobs { + logger: Logger, + permits: Arc, + log_dir: Utf8PathBuf, + map: HashMap, +} + +struct Job { + future: BoxFuture<'static, Result<()>>, + wait_for: Vec>, + notify: Vec>, +} + +pub(crate) struct Selector<'a> { + jobs: &'a mut Jobs, + name: String, +} + +impl Jobs { + pub(crate) fn new( + logger: &Logger, + permits: Arc, + log_dir: &Utf8Path, + ) -> Jobs { + Jobs { + logger: logger.clone(), + permits, + log_dir: log_dir.to_owned(), + map: HashMap::new(), + } + } + + pub(crate) fn push( + &mut self, + name: impl AsRef, + future: impl Future> + Send + 'static, + ) -> Selector<'_> { + let name = name.as_ref().to_owned(); + assert!(!self.map.contains_key(&name), "duplicate job name {}", name); + self.map.insert( + name.clone(), + Job { + future: run_job( + self.logger.clone(), + self.permits.clone(), + name.clone(), + future, + ) + .boxed(), + wait_for: Vec::new(), + notify: Vec::new(), + }, + ); + Selector { jobs: self, name } + } + + pub(crate) fn push_command( + &mut self, + name: impl AsRef, + command: Command, + ) -> Selector<'_> { + let name = name.as_ref().to_owned(); + assert!(!self.map.contains_key(&name), "duplicate job name {}", name); + self.map.insert( + name.clone(), + Job { + future: spawn_with_output( + command, + self.logger.clone(), + self.permits.clone(), + name.clone(), + self.log_dir.join(&name).with_extension("log"), + ) + .boxed(), + wait_for: Vec::new(), + notify: Vec::new(), + }, + ); + Selector { jobs: self, name } + } + + pub(crate) fn select(&mut self, name: impl AsRef) -> Selector<'_> { + Selector { jobs: self, name: name.as_ref().to_owned() } + } + + pub(crate) async fn run_all(self) -> Result<()> { + self.map + .into_values() + .map(Job::run) + .collect::>() + .try_collect::<()>() + .await + } +} + +impl Job { + async fn run(self) -> Result<()> { + let result: Result<(), RecvError> = self + .wait_for + .into_iter() + .collect::>() + .try_collect::<()>() + .await; + result.map_err(|_| anyhow!("dependency failed"))?; + + self.future.await?; + for sender in self.notify { + // Ignore the error here -- the only reason we should fail to send + // our message is if a task has failed or the user hit Ctrl-C, at + // which point a bunch of error logging is not particularly useful. + sender.send(()).ok(); + } + Ok(()) + } +} + +impl<'a> Selector<'a> { + #[track_caller] + pub(crate) fn after(self, other: impl AsRef) -> Self { + let (sender, receiver) = oneshot::channel(); + self.jobs + .map + .get_mut(&self.name) + .expect("invalid job name") + .wait_for + .push(receiver); + self.jobs + .map + .get_mut(other.as_ref()) + .expect("invalid job name") + .notify + .push(sender); + self + } +} + +macro_rules! info_or_error { + ($logger:expr, $result:expr, $($tt:tt)*) => { + if $result.is_ok() { + ::slog::info!($logger, $($tt)*); + } else { + ::slog::error!($logger, $($tt)*); + } + }; +} + +async fn run_job( + logger: Logger, + permits: Arc, + name: String, + future: impl Future> + Send + 'static, +) -> Result<()> { + if !PERMIT_NOT_REQUIRED.contains(&name.as_str()) { + let _ = permits.acquire_owned().await?; + } + + info!(logger, "[{}] running task", name); + let start = Instant::now(); + let result = tokio::spawn(future).await?; + let duration = Instant::now().saturating_duration_since(start); + info_or_error!( + logger, + result, + "[{}] task {} ({:?})", + name, + if result.is_ok() { "succeeded" } else { "failed" }, + duration + ); + result +} + +async fn spawn_with_output( + command: Command, + logger: Logger, + permits: Arc, + name: String, + log_path: Utf8PathBuf, +) -> Result<()> { + if !PERMIT_NOT_REQUIRED.contains(&name.as_str()) { + let _ = permits.acquire_owned().await?; + } + + let (command_desc, mut command) = command.into_parts(); + + let log_file_1 = File::create(log_path).await?; + let log_file_2 = log_file_1.try_clone().await?; + + info!(logger, "[{}] running: {}", name, command_desc); + let start = Instant::now(); + let mut child = command + .kill_on_drop(true) + .stdin(Stdio::null()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .with_context(|| format!("failed to exec `{}`", command_desc))?; + + let stdout = spawn_reader( + format!("[{:>16}] ", name), + child.stdout.take().unwrap(), + tokio::io::stdout(), + log_file_1, + ); + let stderr = spawn_reader( + format!("[{:>16}] ", name), + child.stderr.take().unwrap(), + tokio::io::stderr(), + log_file_2, + ); + + let status = child.wait().await.with_context(|| { + format!("I/O error while waiting for job {:?} to complete", name) + })?; + let result = command_desc.check_status(status); + info_or_error!( + logger, + result, + "[{}] process exited with {} ({:?})", + name, + status, + Instant::now().saturating_duration_since(start) + ); + + // bubble up any errors from `spawn_reader` + stdout.await??; + stderr.await??; + + result +} + +fn spawn_reader( + prefix: String, + reader: impl AsyncRead + Send + Unpin + 'static, + mut terminal_writer: impl AsyncWrite + Send + Unpin + 'static, + logfile_writer: File, +) -> tokio::task::JoinHandle> { + let mut reader = BufReader::new(reader); + let mut logfile_writer = tokio::fs::File::from(logfile_writer); + let mut buf = prefix.into_bytes(); + let prefix_len = buf.len(); + tokio::spawn(async move { + loop { + buf.truncate(prefix_len); + // We have no particular control over the output from the child + // processes we run, so we read until a newline character without + // relying on valid UTF-8 output. + let size = reader.read_until(b'\n', &mut buf).await?; + if size == 0 { + return Ok(()); + } + terminal_writer.write_all(&buf).await?; + logfile_writer.write_all(&buf[prefix_len..]).await?; + } + }) +} diff --git a/dev-tools/releng/src/main.rs b/dev-tools/releng/src/main.rs new file mode 100644 index 0000000000..445090115d --- /dev/null +++ b/dev-tools/releng/src/main.rs @@ -0,0 +1,746 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +mod cmd; +mod hubris; +mod job; +mod tuf; + +use std::sync::Arc; +use std::time::Duration; +use std::time::Instant; + +use anyhow::bail; +use anyhow::Context; +use anyhow::Result; +use camino::Utf8PathBuf; +use chrono::Utc; +use clap::Parser; +use fs_err::tokio as fs; +use omicron_zone_package::config::Config; +use once_cell::sync::Lazy; +use semver::Version; +use slog::debug; +use slog::error; +use slog::info; +use slog::Drain; +use slog::Logger; +use slog_term::FullFormat; +use slog_term::TermDecorator; +use tokio::sync::Semaphore; + +use crate::cmd::Command; +use crate::job::Jobs; + +/// The base version we're currently building. Build information is appended to +/// this later on. +/// +/// Under current policy, each new release is a major version bump, and +/// generally referred to only by the major version (e.g. 8.0.0 is referred +/// to as "v8", "version 8", or "release 8" to customers). The use of semantic +/// versioning is mostly to hedge for perhaps wanting something more granular in +/// the future. +const BASE_VERSION: Version = Version::new(8, 0, 0); + +#[derive(Debug, Clone, Copy)] +enum InstallMethod { + /// Unpack the tarball to `/opt/oxide/`, and install + /// `pkg/manifest.xml` (if it exists) to + /// `/lib/svc/manifest/site/.xml`. + Install, + /// Copy the tarball to `/opt/oxide/.tar.gz`. + Bundle, +} + +/// Packages to install or bundle in the host OS image. +const HOST_IMAGE_PACKAGES: [(&str, InstallMethod); 7] = [ + ("mg-ddm-gz", InstallMethod::Install), + ("omicron-sled-agent", InstallMethod::Install), + ("overlay", InstallMethod::Bundle), + ("oxlog", InstallMethod::Install), + ("propolis-server", InstallMethod::Bundle), + ("pumpkind-gz", InstallMethod::Install), + ("switch-asic", InstallMethod::Bundle), +]; +/// Packages to install or bundle in the recovery (trampoline) OS image. +const RECOVERY_IMAGE_PACKAGES: [(&str, InstallMethod); 2] = [ + ("installinator", InstallMethod::Install), + ("mg-ddm-gz", InstallMethod::Install), +]; +/// Packages to ship with the TUF repo. +const TUF_PACKAGES: [&str; 11] = [ + "clickhouse_keeper", + "clickhouse", + "cockroachdb", + "crucible-pantry-zone", + "crucible-zone", + "external-dns", + "internal-dns", + "nexus", + "ntp", + "oximeter", + "probe", +]; + +const HELIOS_REPO: &str = "https://pkg.oxide.computer/helios/2/dev/"; + +static WORKSPACE_DIR: Lazy = Lazy::new(|| { + // $CARGO_MANIFEST_DIR is at `.../omicron/dev-tools/releng` + let mut dir = + Utf8PathBuf::from(std::env::var("CARGO_MANIFEST_DIR").expect( + "$CARGO_MANIFEST_DIR is not set; run this via `cargo xtask releng`", + )); + dir.pop(); + dir.pop(); + dir +}); + +/// Run the Oxide release engineering process and produce a TUF repo that can be +/// used to update a rack. +/// +/// For more information, see `docs/releng.adoc` in the Omicron repository. +/// +/// Note that `--host-dataset` and `--recovery-dataset` must be set to different +/// values to build the two OS images in parallel. This is strongly recommended. +#[derive(Parser)] +#[command(name = "cargo xtask releng", bin_name = "cargo xtask releng")] +struct Args { + /// ZFS dataset to use for `helios-build` when building the host image + #[clap(long, default_value_t = Self::default_dataset("host"))] + host_dataset: String, + + /// ZFS dataset to use for `helios-build` when building the recovery + /// (trampoline) image + #[clap(long, default_value_t = Self::default_dataset("recovery"))] + recovery_dataset: String, + + /// Path to a Helios repository checkout (default: "helios" in the same + /// directory as "omicron") + #[clap(long, default_value_t = Self::default_helios_dir())] + helios_dir: Utf8PathBuf, + + /// Ignore the current HEAD of the Helios repository checkout + #[clap(long)] + ignore_helios_origin: bool, + + /// Output dir for TUF repo and log files + #[clap(long, default_value_t = Self::default_output_dir())] + output_dir: Utf8PathBuf, + + /// Path to the directory containing the rustup proxy `bin/cargo` (usually + /// set by Cargo) + #[clap(long, env = "CARGO_HOME")] + cargo_home: Option, + + /// Path to the git binary + #[clap(long, env = "GIT", default_value = "git")] + git_bin: Utf8PathBuf, + + /// Path to a pre-built omicron-package binary (skips building if set) + #[clap(long, env = "OMICRON_PACKAGE")] + omicron_package_bin: Option, +} + +impl Args { + fn default_dataset(name: &str) -> String { + format!( + "rpool/images/{}/{}", + std::env::var("LOGNAME").expect("$LOGNAME is not set"), + name + ) + } + + fn default_helios_dir() -> Utf8PathBuf { + WORKSPACE_DIR + .parent() + .expect("omicron is presumably not cloned at /") + .join("helios") + } + + fn default_output_dir() -> Utf8PathBuf { + WORKSPACE_DIR.join("out/releng") + } +} + +#[tokio::main] +async fn main() -> Result<()> { + let args = Args::parse(); + + let decorator = TermDecorator::new().build(); + let drain = FullFormat::new(decorator).build().fuse(); + let drain = slog_async::Async::new(drain).build().fuse(); + let logger = Logger::root(drain, slog::o!()); + + // Change the working directory to the workspace root. + debug!(logger, "changing working directory to {}", *WORKSPACE_DIR); + std::env::set_current_dir(&*WORKSPACE_DIR) + .context("failed to change working directory to workspace root")?; + + // Determine the target directory. + let target_dir = cargo_metadata::MetadataCommand::new() + .no_deps() + .exec() + .context("failed to get cargo metadata")? + .target_directory; + + // We build everything in Omicron with $CARGO, but we need to use the rustup + // proxy for Cargo when outside Omicron. + let rustup_cargo = match &args.cargo_home { + Some(path) => path.join("bin/cargo"), + None => Utf8PathBuf::from("cargo"), + }; + // `var_os` here is deliberate: if CARGO is set to a non-UTF-8 path we + // shouldn't do something confusing as a fallback. + let cargo = match std::env::var_os("CARGO") { + Some(path) => Utf8PathBuf::try_from(std::path::PathBuf::from(path)) + .context("$CARGO is not valid UTF-8")?, + None => rustup_cargo.clone(), + }; + + let permits = Arc::new(Semaphore::new( + std::thread::available_parallelism() + .context("couldn't get available parallelism")? + .into(), + )); + + let commit = Command::new(&args.git_bin) + .args(["rev-parse", "HEAD"]) + .ensure_stdout(&logger) + .await? + .trim() + .to_owned(); + + let mut version = BASE_VERSION.clone(); + // Differentiate between CI and local builds. We use `0.word` as the + // prerelease field because it comes before `alpha`. + version.pre = + if std::env::var_os("CI").is_some() { "0.ci" } else { "0.local" } + .parse()?; + // Set the build metadata to the current commit hash. + let mut build = String::with_capacity(14); + build.push_str("git"); + build.extend(commit.chars().take(11)); + version.build = build.parse()?; + let version_str = version.to_string(); + info!(logger, "version: {}", version_str); + + let manifest = Arc::new(omicron_zone_package::config::parse_manifest( + &fs::read_to_string(WORKSPACE_DIR.join("package-manifest.toml")) + .await?, + )?); + let opte_version = + fs::read_to_string(WORKSPACE_DIR.join("tools/opte_version")).await?; + + let client = reqwest::ClientBuilder::new() + .connect_timeout(Duration::from_secs(15)) + .timeout(Duration::from_secs(15)) + .build() + .context("failed to build reqwest client")?; + + // PREFLIGHT ============================================================== + let mut preflight_ok = true; + + for package in HOST_IMAGE_PACKAGES + .into_iter() + .chain(RECOVERY_IMAGE_PACKAGES) + .map(|(package, _)| package) + .chain(TUF_PACKAGES) + { + if !manifest.packages.contains_key(package) { + error!( + logger, + "package {} to be installed in the OS image \ + is not listed in the package manifest", + package + ); + preflight_ok = false; + } + } + + // Ensure the Helios checkout exists + if args.helios_dir.exists() { + if !args.ignore_helios_origin { + // check that our helios clone is up to date + Command::new(&args.git_bin) + .arg("-C") + .arg(&args.helios_dir) + .args(["fetch", "--no-write-fetch-head", "origin", "master"]) + .ensure_success(&logger) + .await?; + let stdout = Command::new(&args.git_bin) + .arg("-C") + .arg(&args.helios_dir) + .args(["rev-parse", "HEAD", "origin/master"]) + .ensure_stdout(&logger) + .await?; + let mut lines = stdout.lines(); + let first = + lines.next().context("git-rev-parse output was empty")?; + if !lines.all(|line| line == first) { + error!( + logger, + "helios checkout at {0} is out-of-date; run \ + `git pull -C {0}`, or run omicron-releng with \ + --ignore-helios-origin or --helios-path", + shell_words::quote(args.helios_dir.as_str()) + ); + preflight_ok = false; + } + } + } else { + info!(logger, "cloning helios to {}", args.helios_dir); + Command::new(&args.git_bin) + .args(["clone", "https://github.com/oxidecomputer/helios.git"]) + .arg(&args.helios_dir) + .ensure_success(&logger) + .await?; + } + // Record the branch and commit in the output + Command::new(&args.git_bin) + .arg("-C") + .arg(&args.helios_dir) + .args(["status", "--branch", "--porcelain=2"]) + .ensure_success(&logger) + .await?; + + // Check that the omicron1 brand is installed + if !Command::new("pkg") + .args(["verify", "-q", "/system/zones/brand/omicron1/tools"]) + .is_success(&logger) + .await? + { + error!( + logger, + "the omicron1 brand is not installed; install it with \ + `pfexec pkg install /system/zones/brand/omicron1/tools`" + ); + preflight_ok = false; + } + + // Check that the datasets for helios-image to use exist + for (dataset, option) in [ + (&args.host_dataset, "--host-dataset"), + (&args.recovery_dataset, "--recovery-dataset"), + ] { + if !Command::new("zfs") + .arg("list") + .arg(dataset) + .is_success(&logger) + .await? + { + error!( + logger, + "the dataset {0} does not exist; run `pfexec zfs create \ + -p {0}`, or specify a different one with {1}", + shell_words::quote(dataset), + option + ); + preflight_ok = false; + } + } + + if !preflight_ok { + bail!("some preflight checks failed"); + } + + fs::create_dir_all(&args.output_dir).await?; + + // DEFINE JOBS ============================================================ + let tempdir = camino_tempfile::tempdir() + .context("failed to create temporary directory")?; + let mut jobs = Jobs::new(&logger, permits.clone(), &args.output_dir); + + jobs.push_command( + "helios-setup", + Command::new("ptime") + .args(["-m", "gmake", "setup"]) + .current_dir(&args.helios_dir) + // ?!?! + // somehow, the Makefile does not see a new `$(PWD)` without this. + .env("PWD", &args.helios_dir) + // Setting `BUILD_OS` to no makes setup skip repositories we don't + // need for building the OS itself (we are just building an image + // from an already-built OS). + .env("BUILD_OS", "no") + .env_remove("CARGO") + .env_remove("RUSTUP_TOOLCHAIN"), + ); + + // Download the toolchain for phbl before we get to the image build steps. + // (This is possibly a micro-optimization.) + jobs.push_command( + "phbl-toolchain", + Command::new(&rustup_cargo) + .arg("--version") + .current_dir(args.helios_dir.join("projects/phbl")) + .env_remove("CARGO") + .env_remove("RUSTUP_TOOLCHAIN"), + ) + .after("helios-setup"); + + let omicron_package = if let Some(path) = &args.omicron_package_bin { + // omicron-package is provided, so don't build it. + jobs.push("omicron-package", std::future::ready(Ok(()))); + path.clone() + } else { + jobs.push_command( + "omicron-package", + Command::new("ptime").args([ + "-m", + cargo.as_str(), + "build", + "--locked", + "--release", + "--bin", + "omicron-package", + ]), + ); + target_dir.join("release/omicron-package") + }; + + // Generate `omicron-package stamp` jobs for a list of packages as a nested + // `Jobs`. Returns the selector for the outer job. + // + // (This could be a function but the resulting function would have too many + // confusable arguments.) + macro_rules! stamp_packages { + ($name:expr, $target:expr, $packages:expr) => {{ + let mut stamp_jobs = + Jobs::new(&logger, permits.clone(), &args.output_dir); + for package in $packages { + stamp_jobs.push_command( + format!("stamp-{}", package), + Command::new(&omicron_package) + .args([ + "--target", + $target.as_str(), + "--artifacts", + $target.artifacts_path(&args).as_str(), + "stamp", + package, + &version_str, + ]) + .env_remove("CARGO_MANIFEST_DIR"), + ); + } + jobs.push($name, stamp_jobs.run_all()) + }}; + } + + for target in [Target::Host, Target::Recovery] { + let artifacts_path = target.artifacts_path(&args); + + // omicron-package target create + jobs.push_command( + format!("{}-target", target), + Command::new(&omicron_package) + .args([ + "--target", + target.as_str(), + "--artifacts", + artifacts_path.as_str(), + "target", + "create", + ]) + .args(target.target_args()) + .env_remove("CARGO_MANIFEST_DIR"), + ) + .after("omicron-package"); + + // omicron-package package + jobs.push_command( + format!("{}-package", target), + Command::new(&omicron_package) + .args([ + "--target", + target.as_str(), + "--artifacts", + artifacts_path.as_str(), + "package", + ]) + .env_remove("CARGO_MANIFEST_DIR"), + ) + .after(format!("{}-target", target)); + + // omicron-package stamp + stamp_packages!( + format!("{}-stamp", target), + target, + target.proto_package_names() + ) + .after(format!("{}-package", target)); + + // [build proto dir, to be overlaid into disk image] + let proto_dir = tempdir.path().join("proto").join(target.as_str()); + jobs.push( + format!("{}-proto", target), + build_proto_area( + artifacts_path, + proto_dir.clone(), + target.proto_packages(), + manifest.clone(), + ), + ) + .after(format!("{}-stamp", target)); + + // The ${os_short_commit} token will be expanded by `helios-build` + let image_name = format!( + "{} {}/${{os_short_commit}} {}", + target.image_prefix(), + commit.chars().take(7).collect::(), + Utc::now().format("%Y-%m-%d %H:%M") + ); + + // helios-build experiment-image + jobs.push_command( + format!("{}-image", target), + Command::new("ptime") + .arg("-m") + .arg(args.helios_dir.join("helios-build")) + .arg("experiment-image") + .arg("-o") // output directory for image + .arg(args.output_dir.join(format!("os-{}", target))) + .arg("-p") // use an external package repository + .arg(format!("helios-dev={}", HELIOS_REPO)) + .arg("-F") // pass extra image builder features + .arg(format!("optever={}", opte_version.trim())) + .arg("-P") // include all files from extra proto area + .arg(proto_dir.join("root")) + .arg("-N") // image name + .arg(image_name) + .arg("-s") // tempdir name suffix + .arg(target.as_str()) + .args(target.image_build_args()) + .current_dir(&args.helios_dir) + .env( + "IMAGE_DATASET", + match target { + Target::Host => &args.host_dataset, + Target::Recovery => &args.recovery_dataset, + }, + ) + .env_remove("CARGO") + .env_remove("RUSTUP_TOOLCHAIN"), + ) + .after("helios-setup") + .after(format!("{}-proto", target)); + } + // Build the recovery target after we build the host target. Only one + // of these will build at a time since Cargo locks its target directory; + // since host-package and host-image both take longer than their recovery + // counterparts, this should be the fastest option to go first. + jobs.select("recovery-package").after("host-package"); + if args.host_dataset == args.recovery_dataset { + // If the datasets are the same, we can't parallelize these. + jobs.select("recovery-image").after("host-image"); + } + + // Set up /root/.profile in the host OS image. + jobs.push( + "host-profile", + host_add_root_profile(tempdir.path().join("proto/host/root/root")), + ) + .after("host-proto"); + jobs.select("host-image").after("host-profile"); + + stamp_packages!("tuf-stamp", Target::Host, TUF_PACKAGES) + .after("host-stamp") + .after("recovery-stamp"); + + // Run `cargo xtask verify-libraries --release`. (This was formerly run in + // the build-and-test Buildomat job, but this fits better here where we've + // already built most of the binaries.) + jobs.push_command( + "verify-libraries", + Command::new(&cargo).args(["xtask", "verify-libraries", "--release"]), + ) + .after("host-package") + .after("recovery-package"); + + for (name, base_url) in [ + ("staging", "https://permslip-staging.corp.oxide.computer"), + ("production", "https://signer-us-west.corp.oxide.computer"), + ] { + jobs.push( + format!("hubris-{}", name), + hubris::fetch_hubris_artifacts( + base_url, + client.clone(), + WORKSPACE_DIR.join(format!("tools/permslip_{}", name)), + args.output_dir.join(format!("hubris-{}", name)), + ), + ); + } + + jobs.push( + "tuf-repo", + tuf::build_tuf_repo( + logger.clone(), + args.output_dir.clone(), + version, + manifest, + ), + ) + .after("tuf-stamp") + .after("host-image") + .after("recovery-image") + .after("hubris-staging") + .after("hubris-production"); + + // RUN JOBS =============================================================== + let start = Instant::now(); + jobs.run_all().await?; + info!( + logger, + "all jobs completed in {:?}", + Instant::now().saturating_duration_since(start) + ); + Ok(()) +} + +#[derive(Clone, Copy)] +enum Target { + Host, + Recovery, +} + +impl Target { + fn as_str(self) -> &'static str { + match self { + Target::Host => "host", + Target::Recovery => "recovery", + } + } + + fn artifacts_path(self, args: &Args) -> Utf8PathBuf { + match self { + Target::Host => WORKSPACE_DIR.join("out"), + Target::Recovery => { + args.output_dir.join(format!("artifacts-{}", self)) + } + } + } + + fn target_args(self) -> &'static [&'static str] { + match self { + Target::Host => &[ + "--image", + "standard", + "--machine", + "gimlet", + "--switch", + "asic", + "--rack-topology", + "multi-sled", + ], + Target::Recovery => &["--image", "trampoline"], + } + } + + fn proto_packages(self) -> &'static [(&'static str, InstallMethod)] { + match self { + Target::Host => &HOST_IMAGE_PACKAGES, + Target::Recovery => &RECOVERY_IMAGE_PACKAGES, + } + } + + fn proto_package_names(self) -> impl Iterator { + self.proto_packages().iter().map(|(name, _)| *name) + } + + fn image_prefix(self) -> &'static str { + match self { + Target::Host => "ci", + Target::Recovery => "recovery", + } + } + + fn image_build_args(self) -> &'static [&'static str] { + match self { + Target::Host => &[ + "-B", // include omicron1 brand + ], + Target::Recovery => &[ + "-R", // recovery image + ], + } + } +} + +impl std::fmt::Display for Target { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.as_str()) + } +} + +async fn build_proto_area( + mut package_dir: Utf8PathBuf, + proto_dir: Utf8PathBuf, + packages: &'static [(&'static str, InstallMethod)], + manifest: Arc, +) -> Result<()> { + let opt_oxide = proto_dir.join("root/opt/oxide"); + let manifest_site = proto_dir.join("root/lib/svc/manifest/site"); + fs::create_dir_all(&opt_oxide).await?; + + // use the stamped packages + package_dir.push("versioned"); + + for &(package_name, method) in packages { + let package = + manifest.packages.get(package_name).expect("checked in preflight"); + match method { + InstallMethod::Install => { + let path = opt_oxide.join(&package.service_name); + fs::create_dir(&path).await?; + + let cloned_path = path.clone(); + let cloned_package_dir = package_dir.to_owned(); + tokio::task::spawn_blocking(move || -> Result<()> { + let mut archive = tar::Archive::new(std::fs::File::open( + cloned_package_dir + .join(package_name) + .with_extension("tar"), + )?); + archive.unpack(cloned_path).with_context(|| { + format!("failed to extract {}.tar.gz", package_name) + })?; + Ok(()) + }) + .await??; + + let smf_manifest = path.join("pkg").join("manifest.xml"); + if smf_manifest.exists() { + fs::create_dir_all(&manifest_site).await?; + fs::rename( + smf_manifest, + manifest_site + .join(&package.service_name) + .with_extension("xml"), + ) + .await?; + } + } + InstallMethod::Bundle => { + fs::copy( + package_dir.join(format!("{}.tar.gz", package_name)), + opt_oxide.join(format!("{}.tar.gz", package.service_name)), + ) + .await?; + } + } + } + + Ok(()) +} + +async fn host_add_root_profile(host_proto_root: Utf8PathBuf) -> Result<()> { + fs::create_dir_all(&host_proto_root).await?; + fs::write( + host_proto_root.join(".profile"), + "# Add opteadm, ddadm, oxlog to PATH\n\ + export PATH=$PATH:/opt/oxide/opte/bin:/opt/oxide/mg-ddm:/opt/oxide/oxlog\n", + ).await?; + Ok(()) +} diff --git a/dev-tools/releng/src/tuf.rs b/dev-tools/releng/src/tuf.rs new file mode 100644 index 0000000000..2a880210eb --- /dev/null +++ b/dev-tools/releng/src/tuf.rs @@ -0,0 +1,149 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use std::sync::Arc; + +use anyhow::Context; +use anyhow::Result; +use camino::Utf8PathBuf; +use chrono::Duration; +use chrono::Timelike; +use chrono::Utc; +use fs_err::tokio as fs; +use fs_err::tokio::File; +use omicron_common::api::external::SemverVersion; +use omicron_common::api::internal::nexus::KnownArtifactKind; +use omicron_zone_package::config::Config; +use semver::Version; +use sha2::Digest; +use sha2::Sha256; +use slog::Logger; +use tokio::io::AsyncReadExt; +use tufaceous_lib::assemble::ArtifactManifest; +use tufaceous_lib::assemble::DeserializedArtifactData; +use tufaceous_lib::assemble::DeserializedArtifactSource; +use tufaceous_lib::assemble::DeserializedControlPlaneZoneSource; +use tufaceous_lib::assemble::DeserializedManifest; +use tufaceous_lib::assemble::OmicronRepoAssembler; +use tufaceous_lib::Key; + +pub(crate) async fn build_tuf_repo( + logger: Logger, + output_dir: Utf8PathBuf, + version: Version, + package_manifest: Arc, +) -> Result<()> { + // We currently go about this somewhat strangely; the old release + // engineering process produced a Tufaceous manifest, and (the now very many + // copies of) the TUF repo download-and-unpack script we use expects to be + // able to download a manifest. So we build up a `DeserializedManifest`, + // write it to disk, and then turn it into an `ArtifactManifest` to actually + // build the repo. + + // Start a new manifest by loading the Hubris staging manifest. + let mut manifest = DeserializedManifest::from_path( + &output_dir.join("hubris-staging/manifest.toml"), + ) + .context("failed to open intermediate hubris staging manifest")?; + // Set the version. + manifest.system_version = SemverVersion(version); + + // Load the Hubris production manifest and merge it in. + let hubris_production = DeserializedManifest::from_path( + &output_dir.join("hubris-production/manifest.toml"), + ) + .context("failed to open intermediate hubris production manifest")?; + for (kind, artifacts) in hubris_production.artifacts { + manifest.artifacts.entry(kind).or_default().extend(artifacts); + } + + // Add the OS images. + manifest.artifacts.insert( + KnownArtifactKind::Host, + vec![DeserializedArtifactData { + name: "host".to_string(), + version: manifest.system_version.clone(), + source: DeserializedArtifactSource::File { + path: output_dir.join("os-host/os.tar.gz"), + }, + }], + ); + manifest.artifacts.insert( + KnownArtifactKind::Trampoline, + vec![DeserializedArtifactData { + name: "trampoline".to_string(), + version: manifest.system_version.clone(), + source: DeserializedArtifactSource::File { + path: output_dir.join("os-recovery/os.tar.gz"), + }, + }], + ); + + // Add the control plane zones. + let mut zones = Vec::new(); + for package in crate::TUF_PACKAGES { + zones.push(DeserializedControlPlaneZoneSource::File { + file_name: Some(format!( + "{}.tar.gz", + package_manifest + .packages + .get(package) + .expect("checked in preflight") + .service_name + )), + path: crate::WORKSPACE_DIR + .join("out/versioned") + .join(format!("{}.tar.gz", package)), + }); + } + manifest.artifacts.insert( + KnownArtifactKind::ControlPlane, + vec![DeserializedArtifactData { + name: "control-plane".to_string(), + version: manifest.system_version.clone(), + source: DeserializedArtifactSource::CompositeControlPlane { zones }, + }], + ); + + // Serialize the manifest out. + fs::write( + output_dir.join("manifest.toml"), + toml::to_string_pretty(&manifest)?.into_bytes(), + ) + .await?; + + // Convert the manifest. + let manifest = ArtifactManifest::from_deserialized(&output_dir, manifest)?; + manifest.verify_all_present()?; + // Assemble the repo. + let keys = vec![Key::generate_ed25519()]; + let expiry = Utc::now().with_nanosecond(0).unwrap() + Duration::weeks(1); + OmicronRepoAssembler::new( + &logger, + manifest, + keys, + expiry, + output_dir.join("repo.zip"), + ) + .build() + .await?; + // Generate the checksum file. + let mut hasher = Sha256::new(); + let mut buf = [0; 8192]; + let mut file = File::open(output_dir.join("repo.zip")).await?; + loop { + let n = file.read(&mut buf).await?; + if n == 0 { + break; + } + hasher.update(&buf[..n]); + } + fs::write( + output_dir.join("repo.zip.sha256.txt"), + format!("{}\n", hex::encode(&hasher.finalize())), + ) + .await?; + + Ok(()) +} diff --git a/dev-tools/xtask/Cargo.toml b/dev-tools/xtask/Cargo.toml index 73bfe0b37a..2aecde57e5 100644 --- a/dev-tools/xtask/Cargo.toml +++ b/dev-tools/xtask/Cargo.toml @@ -4,12 +4,16 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] anyhow.workspace = true camino.workspace = true -cargo_toml = "0.19" -cargo_metadata = "0.18" +cargo_toml = "0.20" +cargo_metadata.workspace = true clap.workspace = true +macaddr.workspace = true serde.workspace = true toml.workspace = true fs-err.workspace = true diff --git a/dev-tools/xtask/src/check_workspace_deps.rs b/dev-tools/xtask/src/check_workspace_deps.rs new file mode 100644 index 0000000000..76e405ce1a --- /dev/null +++ b/dev-tools/xtask/src/check_workspace_deps.rs @@ -0,0 +1,134 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Subcommand: cargo xtask check-workspace-deps + +use anyhow::{bail, Context, Result}; +use camino::Utf8Path; +use cargo_toml::{Dependency, Manifest}; +use fs_err as fs; +use std::collections::BTreeMap; + +const WORKSPACE_HACK_PACKAGE_NAME: &str = "omicron-workspace-hack"; + +pub fn run_cmd() -> Result<()> { + // Ignore issues with "pq-sys". See the omicron-rpaths package for details. + const EXCLUDED: &[&'static str] = &["pq-sys"]; + + // Collect a list of all packages used in any workspace package as a + // workspace dependency. + let mut workspace_dependencies = BTreeMap::new(); + + // Collect a list of all packages used in any workspace package as a + // NON-workspace dependency. + let mut non_workspace_dependencies = BTreeMap::new(); + + // Load information about the Cargo workspace. + let workspace = crate::load_workspace()?; + let mut nwarnings = 0; + let mut nerrors = 0; + + // Iterate the workspace packages and fill out the maps above. + for pkg_info in workspace.workspace_packages() { + let manifest_path = &pkg_info.manifest_path; + let manifest = read_cargo_toml(manifest_path)?; + + // Check that `[lints] workspace = true` is set. + if !manifest.lints.map(|lints| lints.workspace).unwrap_or(false) { + eprintln!( + "error: package {:?} does not have `[lints] workspace = true` set", + pkg_info.name + ); + nerrors += 1; + } + + if pkg_info.name == WORKSPACE_HACK_PACKAGE_NAME { + // Skip over workspace-hack because hakari doesn't yet support + // workspace deps: https://github.com/guppy-rs/guppy/issues/7 + continue; + } + + for tree in [ + &manifest.dependencies, + &manifest.dev_dependencies, + &manifest.build_dependencies, + ] { + for (name, dep) in tree { + if let Dependency::Inherited(inherited) = dep { + if inherited.workspace { + workspace_dependencies + .entry(name.to_owned()) + .or_insert_with(Vec::new) + .push(pkg_info.name.clone()); + + if !inherited.features.is_empty() { + eprintln!( + "warning: package is used as a workspace dep \ + with extra features: {:?} (in {:?})", + name, pkg_info.name, + ); + nwarnings += 1; + } + + continue; + } + } + + non_workspace_dependencies + .entry(name.to_owned()) + .or_insert_with(Vec::new) + .push(pkg_info.name.clone()); + } + } + } + + // Look for any packages that are used as both a workspace dependency and a + // non-workspace dependency. Generally, the non-workspace dependency should + // be replaced with a workspace dependency. + for (pkgname, ws_examples) in &workspace_dependencies { + if let Some(non_ws_examples) = non_workspace_dependencies.get(pkgname) { + eprintln!( + "error: package is used as both a workspace dep and a \ + non-workspace dep: {:?}", + pkgname + ); + eprintln!(" workspace dep: {}", ws_examples.join(", ")); + eprintln!(" non-workspace dep: {}", non_ws_examples.join(", ")); + nerrors += 1; + } + } + + // Look for any packages used as non-workspace dependencies by more than one + // workspace package. These should generally be moved to a workspace + // dependency. + for (pkgname, examples) in + non_workspace_dependencies.iter().filter(|(pkgname, examples)| { + examples.len() > 1 && !EXCLUDED.contains(&pkgname.as_str()) + }) + { + eprintln!( + "error: package is used by multiple workspace packages without \ + a workspace dependency: {:?}", + pkgname + ); + eprintln!(" used in: {}", examples.join(", ")); + nerrors += 1; + } + + eprintln!( + "check-workspace-deps: errors: {}, warnings: {}", + nerrors, nwarnings + ); + + if nerrors != 0 { + bail!("errors with workspace dependencies"); + } + + Ok(()) +} + +fn read_cargo_toml(path: &Utf8Path) -> Result { + let bytes = fs::read(path)?; + Manifest::from_slice(&bytes).with_context(|| format!("parse {:?}", path)) +} diff --git a/dev-tools/xtask/src/clippy.rs b/dev-tools/xtask/src/clippy.rs new file mode 100644 index 0000000000..8c454fdebf --- /dev/null +++ b/dev-tools/xtask/src/clippy.rs @@ -0,0 +1,74 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Subcommand: cargo xtask clippy + +use anyhow::{bail, Context, Result}; +use clap::Parser; +use std::process::Command; + +#[derive(Parser)] +pub struct ClippyArgs { + /// Automatically apply lint suggestions. + #[clap(long)] + fix: bool, + /// Error format passed to `cargo clippy`. + #[clap(long, value_name = "FMT")] + message_format: Option, +} + +pub fn run_cmd(args: ClippyArgs) -> Result<()> { + let cargo = + std::env::var("CARGO").unwrap_or_else(|_| String::from("cargo")); + let mut command = Command::new(&cargo); + command.arg("clippy"); + + if args.fix { + command.arg("--fix"); + } + + // Pass along the `--message-format` flag if it was provided. + // + // We don't really care about validating that it's a valid argument to + // `cargo check --message-format`, because `cargo check` will error out if + // it's unrecognized, and repeating the validation here just presents an + // opportunity to get out of sync with what Cargo actually accepts should a + // new message format be added. + if let Some(fmt) = args.message_format { + command.args(["--message-format", &fmt]); + } + + command + // Make sure we check everything. + .arg("--all-targets") + .arg("--") + // For a list of lints, see + // https://rust-lang.github.io/rust-clippy/master. + // + // We disallow warnings by default. + .arg("--deny") + .arg("warnings"); + + eprintln!( + "running: {:?} {}", + &cargo, + command + .get_args() + .map(|arg| format!("{:?}", arg.to_str().unwrap())) + .collect::>() + .join(" ") + ); + + let exit_status = command + .spawn() + .context("failed to spawn child process")? + .wait() + .context("failed to wait for child process")?; + + if !exit_status.success() { + bail!("clippy failed: {}", exit_status); + } + + Ok(()) +} diff --git a/dev-tools/xtask/src/external.rs b/dev-tools/xtask/src/external.rs new file mode 100644 index 0000000000..9c0bc69b55 --- /dev/null +++ b/dev-tools/xtask/src/external.rs @@ -0,0 +1,72 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! External xtasks. (extasks?) + +use std::ffi::{OsStr, OsString}; +use std::os::unix::process::CommandExt; +use std::process::Command; + +use anyhow::{Context, Result}; +use clap::Parser; + +/// Argument parser for external xtasks. +/// +/// In general we want all developer tasks to be discoverable simply by running +/// `cargo xtask`, but some development tools end up with a particularly +/// large dependency tree. It's not ideal to have to pay the cost of building +/// our release engineering tooling if all the user wants to do is check for +/// workspace dependency issues. +/// +/// `External` provides a pattern for creating xtasks that live in other crates. +/// An external xtask is defined on `crate::Cmds` as a tuple variant containing +/// `External`, which captures all arguments and options (even `--help`) as +/// a `Vec`. The main function then calls `External::exec` with the +/// appropriate bin target name and any additional Cargo arguments. +#[derive(Parser)] +#[clap( + disable_help_flag(true), + disable_help_subcommand(true), + disable_version_flag(true) +)] +pub struct External { + #[clap(trailing_var_arg(true), allow_hyphen_values(true))] + args: Vec, + + // This stores an in-progress Command builder. `cargo_args` appends args + // to it, and `exec` consumes it. Clap does not treat this as a command + // (`skip`), but fills in this field by calling `new_command`. + #[clap(skip = new_command())] + command: Command, +} + +impl External { + /// Add additional arguments to `cargo run` (for instance, to run the + /// external xtask in release mode). + pub fn cargo_args( + mut self, + args: impl IntoIterator>, + ) -> External { + self.command.args(args); + self + } + + pub fn exec(mut self, bin_target: impl AsRef) -> Result<()> { + let error = self + .command + .arg("--bin") + .arg(bin_target) + .arg("--") + .args(self.args) + .exec(); + Err(error).context("failed to exec `cargo run`") + } +} + +fn new_command() -> Command { + let cargo = std::env::var_os("CARGO").unwrap_or_else(|| "cargo".into()); + let mut command = Command::new(cargo); + command.arg("run"); + command +} diff --git a/dev-tools/xtask/src/main.rs b/dev-tools/xtask/src/main.rs index c682fc247e..9f1131e758 100644 --- a/dev-tools/xtask/src/main.rs +++ b/dev-tools/xtask/src/main.rs @@ -6,21 +6,26 @@ //! //! See . -use anyhow::{bail, Context, Result}; -use camino::Utf8Path; +use anyhow::{Context, Result}; use cargo_metadata::Metadata; -use cargo_toml::{Dependency, Manifest}; use clap::{Parser, Subcommand}; -use fs_err as fs; -use std::{collections::BTreeMap, process::Command}; + +mod check_workspace_deps; +mod clippy; +#[cfg_attr(not(target_os = "illumos"), allow(dead_code))] +mod external; #[cfg(target_os = "illumos")] -mod illumos; +mod verify_libraries; #[cfg(target_os = "illumos")] -use illumos::cmd_verify_libraries; +mod virtual_hardware; #[derive(Parser)] -#[command(name = "cargo xtask", about = "Workspace-related developer tools")] +#[command( + name = "cargo xtask", + bin_name = "cargo xtask", + about = "Workspace-related developer tools" +)] struct Args { #[command(subcommand)] cmd: Cmds, @@ -32,208 +37,53 @@ enum Cmds { /// workspace CheckWorkspaceDeps, /// Run configured clippy checks - Clippy(ClippyArgs), + Clippy(clippy::ClippyArgs), + + #[cfg(target_os = "illumos")] + /// Build a TUF repo + Releng(external::External), /// Verify we are not leaking library bindings outside of intended /// crates + #[cfg(target_os = "illumos")] + VerifyLibraries(verify_libraries::Args), + /// Manage virtual hardware + #[cfg(target_os = "illumos")] + VirtualHardware(virtual_hardware::Args), + + /// (this command is only available on illumos) + #[cfg(not(target_os = "illumos"))] + Releng, + /// (this command is only available on illumos) + #[cfg(not(target_os = "illumos"))] VerifyLibraries, -} - -#[derive(Parser)] -struct ClippyArgs { - /// Automatically apply lint suggestions. - #[clap(long)] - fix: bool, + /// (this command is only available on illumos) + #[cfg(not(target_os = "illumos"))] + VirtualHardware, } fn main() -> Result<()> { let args = Args::parse(); match args.cmd { - Cmds::Clippy(args) => cmd_clippy(args), - Cmds::CheckWorkspaceDeps => cmd_check_workspace_deps(), - Cmds::VerifyLibraries => cmd_verify_libraries(), - } -} - -fn cmd_clippy(args: ClippyArgs) -> Result<()> { - let cargo = - std::env::var("CARGO").unwrap_or_else(|_| String::from("cargo")); - let mut command = Command::new(&cargo); - command.arg("clippy"); - - if args.fix { - command.arg("--fix"); - } - - command - // Make sure we check everything. - .arg("--all-targets") - .arg("--") - // For a list of lints, see - // https://rust-lang.github.io/rust-clippy/master. - // - // We disallow warnings by default. - .arg("--deny") - .arg("warnings") - // Clippy's style nits are useful, but not worth keeping in CI. This - // override belongs in src/lib.rs, and it is there, but that doesn't - // reliably work due to rust-lang/rust-clippy#6610. - .arg("--allow") - .arg("clippy::style") - // But continue to warn on anything in the "disallowed_" namespace. - // (These will be turned into errors by `--deny warnings` above.) - .arg("--warn") - .arg("clippy::disallowed_macros") - .arg("--warn") - .arg("clippy::disallowed_methods") - .arg("--warn") - .arg("clippy::disallowed_names") - .arg("--warn") - .arg("clippy::disallowed_script_idents") - .arg("--warn") - .arg("clippy::disallowed_types"); - - eprintln!( - "running: {:?} {}", - &cargo, - command - .get_args() - .map(|arg| format!("{:?}", arg.to_str().unwrap())) - .collect::>() - .join(" ") - ); - - let exit_status = command - .spawn() - .context("failed to spawn child process")? - .wait() - .context("failed to wait for child process")?; - - if !exit_status.success() { - bail!("clippy failed: {}", exit_status); - } - - Ok(()) -} - -const WORKSPACE_HACK_PACKAGE_NAME: &str = "omicron-workspace-hack"; - -fn cmd_check_workspace_deps() -> Result<()> { - // Ignore issues with "pq-sys". See the omicron-rpaths package for details. - const EXCLUDED: &[&'static str] = &["pq-sys"]; - - // Collect a list of all packages used in any workspace package as a - // workspace dependency. - let mut workspace_dependencies = BTreeMap::new(); + Cmds::Clippy(args) => clippy::run_cmd(args), + Cmds::CheckWorkspaceDeps => check_workspace_deps::run_cmd(), - // Collect a list of all packages used in any workspace package as a - // NON-workspace dependency. - let mut non_workspace_dependencies = BTreeMap::new(); - - // Load information about the Cargo workspace. - let workspace = load_workspace()?; - let mut nwarnings = 0; - let mut nerrors = 0; - - // Iterate the workspace packages and fill out the maps above. - for pkg_info in workspace.workspace_packages() { - if pkg_info.name == WORKSPACE_HACK_PACKAGE_NAME { - // Skip over workspace-hack because hakari doesn't yet support - // workspace deps: https://github.com/guppy-rs/guppy/issues/7 - continue; - } - - let manifest_path = &pkg_info.manifest_path; - let manifest = read_cargo_toml(manifest_path)?; - for tree in [ - &manifest.dependencies, - &manifest.dev_dependencies, - &manifest.build_dependencies, - ] { - for (name, dep) in tree { - if let Dependency::Inherited(inherited) = dep { - if inherited.workspace { - workspace_dependencies - .entry(name.to_owned()) - .or_insert_with(Vec::new) - .push(pkg_info.name.clone()); - - if !inherited.features.is_empty() { - eprintln!( - "warning: package is used as a workspace dep \ - with extra features: {:?} (in {:?})", - name, pkg_info.name, - ); - nwarnings += 1; - } - - continue; - } - } - - non_workspace_dependencies - .entry(name.to_owned()) - .or_insert_with(Vec::new) - .push(pkg_info.name.clone()); - } + #[cfg(target_os = "illumos")] + Cmds::Releng(external) => { + external.cargo_args(["--release"]).exec("omicron-releng") } - } - - // Look for any packages that are used as both a workspace dependency and a - // non-workspace dependency. Generally, the non-workspace dependency should - // be replaced with a workspace dependency. - for (pkgname, ws_examples) in &workspace_dependencies { - if let Some(non_ws_examples) = non_workspace_dependencies.get(pkgname) { - eprintln!( - "error: package is used as both a workspace dep and a \ - non-workspace dep: {:?}", - pkgname - ); - eprintln!(" workspace dep: {}", ws_examples.join(", ")); - eprintln!(" non-workspace dep: {}", non_ws_examples.join(", ")); - nerrors += 1; + #[cfg(target_os = "illumos")] + Cmds::VerifyLibraries(args) => verify_libraries::run_cmd(args), + #[cfg(target_os = "illumos")] + Cmds::VirtualHardware(args) => virtual_hardware::run_cmd(args), + + #[cfg(not(target_os = "illumos"))] + Cmds::Releng | Cmds::VerifyLibraries | Cmds::VirtualHardware => { + anyhow::bail!("this command is only available on illumos"); } } - - // Look for any packages used as non-workspace dependencies by more than one - // workspace package. These should generally be moved to a workspace - // dependency. - for (pkgname, examples) in - non_workspace_dependencies.iter().filter(|(pkgname, examples)| { - examples.len() > 1 && !EXCLUDED.contains(&pkgname.as_str()) - }) - { - eprintln!( - "error: package is used by multiple workspace packages without \ - a workspace dependency: {:?}", - pkgname - ); - eprintln!(" used in: {}", examples.join(", ")); - nerrors += 1; - } - - eprintln!( - "check-workspace-deps: errors: {}, warnings: {}", - nerrors, nwarnings - ); - - if nerrors != 0 { - bail!("errors with workspace dependencies"); - } - - Ok(()) -} - -#[cfg(not(target_os = "illumos"))] -fn cmd_verify_libraries() -> Result<()> { - unimplemented!("Library verification is only available on illumos!") -} - -fn read_cargo_toml(path: &Utf8Path) -> Result { - let bytes = fs::read(path)?; - Manifest::from_slice(&bytes).with_context(|| format!("parse {:?}", path)) } -fn load_workspace() -> Result { +pub fn load_workspace() -> Result { cargo_metadata::MetadataCommand::new() .exec() .context("loading cargo metadata") diff --git a/dev-tools/xtask/src/illumos.rs b/dev-tools/xtask/src/verify_libraries.rs similarity index 89% rename from dev-tools/xtask/src/illumos.rs rename to dev-tools/xtask/src/verify_libraries.rs index a2daab2c9e..af2b87daf1 100644 --- a/dev-tools/xtask/src/illumos.rs +++ b/dev-tools/xtask/src/verify_libraries.rs @@ -5,6 +5,7 @@ use anyhow::{bail, Context, Result}; use camino::Utf8Path; use cargo_metadata::Message; +use clap::Parser; use fs_err as fs; use serde::Deserialize; use std::{ @@ -16,6 +17,13 @@ use swrite::{swriteln, SWrite}; use crate::load_workspace; +#[derive(Parser)] +pub struct Args { + /// Build in release mode + #[clap(long)] + release: bool, +} + #[derive(Deserialize, Debug)] struct LibraryConfig { binary_allow_list: Option>, @@ -83,20 +91,29 @@ fn verify_executable( Ok(()) } -pub fn cmd_verify_libraries() -> Result<()> { + +pub fn run_cmd(args: Args) -> Result<()> { let metadata = load_workspace()?; let mut config_path = metadata.workspace_root; config_path.push(".cargo/xtask.toml"); let config = read_xtask_toml(&config_path)?; let cargo = std::env::var("CARGO").unwrap_or_else(|_| "cargo".to_string()); - let mut command = Command::new(cargo) - .args(["build", "--bins", "--message-format=json-render-diagnostics"]) + let mut command = Command::new(cargo); + command.args([ + "build", + "--bins", + "--message-format=json-render-diagnostics", + ]); + if args.release { + command.arg("--release"); + } + let mut child = command .stdout(Stdio::piped()) .spawn() .context("failed to spawn cargo build")?; - let reader = BufReader::new(command.stdout.take().context("take stdout")?); + let reader = BufReader::new(child.stdout.take().context("take stdout")?); let mut errors = Default::default(); for message in cargo_metadata::Message::parse_stream(reader) { @@ -108,7 +125,7 @@ pub fn cmd_verify_libraries() -> Result<()> { } } - let status = command.wait()?; + let status = child.wait()?; if !status.success() { bail!("Failed to execute cargo build successfully {}", status); } diff --git a/dev-tools/xtask/src/virtual_hardware.rs b/dev-tools/xtask/src/virtual_hardware.rs new file mode 100644 index 0000000000..d013ff6505 --- /dev/null +++ b/dev-tools/xtask/src/virtual_hardware.rs @@ -0,0 +1,890 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Subcommand: cargo xtask virtual-hardware + +use anyhow::{anyhow, bail, Context, Result}; +use camino::{Utf8Path, Utf8PathBuf}; +use clap::{Parser, Subcommand}; +use macaddr::MacAddr; +use serde::Deserialize; +use std::process::{Command, Output}; +use std::str::FromStr; + +#[derive(Subcommand)] +enum Commands { + /// Create virtual hardware to simulate a Gimlet + Create { + /// The physical link over which Chelsio links are simulated + /// + /// Will be inferred by `dladm show-phys` if unsupplied. + #[clap(long, env)] + physical_link: Option, + + /// Sets `promisc-filtered` off for the sc0_1 vnic. + /// + /// Won't do anything if unsupplied. + #[clap(long)] + promiscuous_filter_off: bool, + + /// The gateway IP address of your local network + /// + /// Will be inferred via `netstat` if unsupplied. + #[clap(long)] + gateway_ip: Option, + + /// The configured mode for softnpu + #[clap(long, env, default_value = "zone")] + softnpu_mode: String, + + /// The MAC address of your gateway IP + /// + /// Will be inferred via `arp` if unsupplied. + #[clap(long)] + gateway_mac: Option, + + #[command(flatten)] + pxa: Pxa, + + #[clap(long, default_value = PXA_MAC_DEFAULT)] + pxa_mac: String, + }, + /// Destroy virtual hardware which was initialized with "Create" + Destroy, +} + +/// Describes which objects should be manipulated by these commands. +#[derive(clap::ValueEnum, Clone, Copy, Debug)] +pub enum Scope { + /// Everything (this is the default). + All, + /// Only storage (e.g. vdevs). + Disks, + /// Only networking (e.g. SoftNPU). + Network, +} + +#[derive(clap::Args)] +#[group(multiple = true)] +pub struct Pxa { + /// The first IP address your Oxide cluster can use. + /// + /// Requires `pxa-end`. + #[clap(long = "pxa-start", requires = "end", env = "PXA_START")] + start: Option, + + /// The last IP address your Oxide cluster can use + /// + /// Requires `pxa-start`. + #[clap(long = "pxa-end", requires = "start", env = "PXA_END")] + end: Option, +} + +#[derive(Parser)] +pub struct Args { + #[clap(long, value_enum, default_value_t = Scope::All)] + scope: Scope, + + /// The directory in which virtual devices are stored + #[clap(long, default_value = "/var/tmp")] + vdev_dir: Utf8PathBuf, + + #[command(subcommand)] + command: Commands, +} + +static NO_INSTALL_MARKER: &'static str = "/etc/opt/oxide/NO_INSTALL"; +const GB: u64 = 1 << 30; +const VDEV_SIZE: u64 = 20 * GB; + +const ARP: &'static str = "/usr/sbin/arp"; +const DLADM: &'static str = "/usr/sbin/dladm"; +const IPADM: &'static str = "/usr/sbin/ipadm"; +const MODINFO: &'static str = "/usr/sbin/modinfo"; +const MODUNLOAD: &'static str = "/usr/sbin/modunload"; +const NETSTAT: &'static str = "/usr/bin/netstat"; +const PFEXEC: &'static str = "/usr/bin/pfexec"; +const PING: &'static str = "/usr/sbin/ping"; +const SWAP: &'static str = "/usr/sbin/swap"; +const ZFS: &'static str = "/usr/sbin/zfs"; +const ZLOGIN: &'static str = "/usr/sbin/zlogin"; +const ZPOOL: &'static str = "/usr/sbin/zpool"; +const ZONEADM: &'static str = "/usr/sbin/zoneadm"; + +const SIDECAR_LITE_COMMIT: &'static str = + "960f11afe859e0316088e04578aedb700fba6159"; +const SOFTNPU_COMMIT: &'static str = "3203c51cf4473d30991b522062ac0df2e045c2f2"; +const PXA_MAC_DEFAULT: &'static str = "a8:e1:de:01:70:1d"; + +const PXA_WARNING: &'static str = r#" You have not set up the proxy-ARP environment variables + PXA_START and PXA_END. These variables are necessary to allow + SoftNPU to respond to ARP requests for the portion of the + network you've dedicated to Omicron. + You must either destroy / recreate the Omicron environment + with these variables or run `scadm standalon add-proxy-arp` + in the SoftNPU zone later"#; + +pub fn run_cmd(args: Args) -> Result<()> { + if Utf8Path::new(NO_INSTALL_MARKER).exists() { + bail!("This system has the marker file {NO_INSTALL_MARKER}, aborting"); + } + + let workspace_root = match crate::load_workspace() { + Ok(metadata) => metadata.workspace_root, + Err(_err) => { + let pwd = Utf8PathBuf::try_from(std::env::current_dir()?)?; + eprintln!( + "Couldn't find Cargo.toml, using {pwd} as workspace root" + ); + pwd + } + }; + + let smf_path = "smf/sled-agent/non-gimlet/config.toml"; + let sled_agent_config = workspace_root.join(smf_path); + if !sled_agent_config.exists() { + bail!("Could not find {smf_path}. We need it to configure vdevs"); + } + + let npuzone_path = "out/npuzone/npuzone"; + let npu_zone = workspace_root.join(npuzone_path); + if !npu_zone.exists() { + bail!("Could not find {npuzone_path}. We need it to configure SoftNPU"); + } + + match args.command { + Commands::Create { + physical_link, + promiscuous_filter_off, + softnpu_mode, + gateway_ip, + gateway_mac, + pxa, + pxa_mac, + } => { + let physical_link = if let Some(l) = physical_link { + l + } else { + default_physical_link()? + }; + + println!("creating virtual hardware"); + if matches!(args.scope, Scope::All | Scope::Disks) { + ensure_vdevs(&sled_agent_config, &args.vdev_dir)?; + } + if matches!(args.scope, Scope::All | Scope::Network) + && softnpu_mode == "zone" + { + ensure_simulated_links(&physical_link, promiscuous_filter_off)?; + ensure_softnpu_zone(&npu_zone)?; + initialize_softnpu_zone(gateway_ip, gateway_mac, pxa, pxa_mac)?; + } + println!("created virtual hardware"); + } + Commands::Destroy => { + println!("destroying virtual hardware"); + verify_omicron_uninstalled()?; + demount_backingfs()?; + if matches!(args.scope, Scope::All | Scope::Network) { + unload_xde_driver()?; + remove_softnpu_zone(&npu_zone)?; + remove_vnics()?; + } + if matches!(args.scope, Scope::All | Scope::Disks) { + destroy_vdevs(&sled_agent_config, &args.vdev_dir)?; + } + println!("destroyed virtual hardware"); + } + } + + Ok(()) +} + +fn verify_omicron_uninstalled() -> Result<()> { + let mut cmd = Command::new("svcs"); + cmd.arg("svc:/oxide/sled-agent:default"); + if let Ok(_) = execute(cmd) { + bail!("Omicron is still installed, please run `omicron-package uninstall` first"); + } + Ok(()) +} + +// Some services have their working data overlaid by backing mounts from the +// internal boot disk. Before we can destroy the ZFS pools, we need to unmount +// these. +fn demount_backingfs() -> Result<()> { + const BACKED_SERVICES: &str = "svc:/system/fmd:default"; + println!("Disabling {BACKED_SERVICES}"); + svcadm_temporary_toggle(BACKED_SERVICES, false)?; + for dataset in zfs_list_internal("noauto", "yes")? { + println!("unmounting: {dataset}"); + zfs_umount(&dataset)?; + } + println!("Re-enabling {BACKED_SERVICES}"); + svcadm_temporary_toggle(BACKED_SERVICES, true)?; + Ok(()) +} + +fn unload_xde_driver() -> Result<()> { + let cmd = Command::new(MODINFO); + let output = execute(cmd)?; + + let id = String::from_utf8(output.stdout) + .context("Invalid modinfo output")? + .lines() + .find_map(|line| { + let mut cols = line.trim().splitn(2, ' '); + let id = cols.next()?; + let desc = cols.next()?; + if !desc.contains("xde") { + return None; + } + return Some(id.to_string()); + }); + + let Some(id) = id else { + println!("xde driver already unloaded"); + return Ok(()); + }; + println!("unloading xde driver"); + + let mut cmd = Command::new(PFEXEC); + cmd.arg(MODUNLOAD); + cmd.arg("-i"); + cmd.arg(id); + execute(cmd)?; + Ok(()) +} + +fn remove_softnpu_zone(npu_zone: &Utf8Path) -> Result<()> { + println!("ensuring softnpu zone destroyed"); + let mut cmd = Command::new(PFEXEC); + cmd.arg(npu_zone); + cmd.args([ + "destroy", + "sidecar", + "--omicron-zone", + "--ports", + "sc0_0,tfportrear0_0", + "--ports", + "sc0_1,tfportqsfp0_0", + ]); + if let Err(output) = execute(cmd) { + // Don't throw an error if the zone was already removed + if output.to_string().contains("No such zone configured") { + println!("zone {npu_zone} already destroyed"); + return Ok(()); + } else { + return Err(output); + } + } + Ok(()) +} + +fn remove_vnics() -> Result<()> { + delete_address("lo0/underlay")?; + delete_interface("sc0_1")?; + delete_vnic("sc0_1")?; + + for i in 0..=1 { + let net = format!("net{i}"); + let sc = format!("sc{i}_0"); + + delete_interface(&net)?; + delete_simnet(&net)?; + delete_simnet(&sc)?; + } + + Ok(()) +} + +fn ensure_simulated_links( + physical_link: &str, + promiscuous_filter_off: bool, +) -> Result<()> { + for i in 0..=1 { + let net = format!("net{i}"); + let sc = format!("sc{i}_0"); + if !simnet_exists(&net) { + create_simnet(&net)?; + create_simnet(&sc)?; + modify_simnet(&sc, &net)?; + set_linkprop(&sc, "mtu", "9000")?; + } + println!("Simnet {net}/{sc} exists"); + } + + let sc = "sc0_1".to_string(); + if !vnic_exists(&sc) { + create_vnic(&sc, physical_link, PXA_MAC_DEFAULT)?; + if promiscuous_filter_off { + set_linkprop(&sc, "promisc-filtered", "off")?; + } + } + println!("Vnic {sc} exists"); + Ok(()) +} + +fn ensure_softnpu_zone(npu_zone: &Utf8Path) -> Result<()> { + let zones = zoneadm_list()?; + if !zones.iter().any(|z| z == "sidecar_softnpu") { + if !npu_zone.exists() { + bail!("npu binary is not installed. Please re-run ./tools/install_prerequisites.sh"); + } + + let mut cmd = Command::new(PFEXEC); + cmd.arg(npu_zone); + cmd.args([ + "create", + "sidecar", + "--omicron-zone", + "--ports", + "sc0_0,tfportrear0_0", + "--ports", + "sc0_1,tfportqsfp0_0", + "--sidecar-lite-commit", + SIDECAR_LITE_COMMIT, + "--softnpu-commit", + SOFTNPU_COMMIT, + ]); + execute(cmd)?; + } + + Ok(()) +} + +fn initialize_softnpu_zone( + gateway_ip: Option, + gateway_mac: Option, + pxa: Pxa, + pxa_mac: String, +) -> Result<()> { + let gateway_ip = match gateway_ip { + Some(ip) => ip, + None => default_gateway_ip()?, + }; + println!("Using {gateway_ip} as gateway ip"); + + let gateway_mac = get_gateway_mac(gateway_mac, &gateway_ip)?.to_string(); + println!("using {gateway_mac} as gateway mac"); + + // Configure upstream network gateway ARP entry + println!("configuring SoftNPU ARP entry"); + run_scadm_command(vec!["add-arp-entry", &gateway_ip, &gateway_mac])?; + + match (pxa.start, pxa.end) { + (Some(start), Some(end)) => { + println!("configuring SoftNPU proxy ARP"); + run_scadm_command(vec!["add-proxy-arp", &start, &end, &pxa_mac])?; + } + _ => { + eprintln!("{PXA_WARNING}"); + } + } + + let output = run_scadm_command(vec!["dump-state"])?; + let stdout = String::from_utf8(output.stdout) + .context("Invalid dump-state output")?; + println!("SoftNPU state:"); + for line in stdout.lines() { + println!(" {line}"); + } + + Ok(()) +} + +fn run_scadm_command(args: Vec<&str>) -> Result { + let mut cmd = Command::new(PFEXEC); + cmd.args([ + ZLOGIN, + "sidecar_softnpu", + "/softnpu/scadm", + "--server", + "/softnpu/server", + "--client", + "/softnpu/client", + "standalone", + ]); + for arg in &args { + cmd.arg(arg); + } + Ok(execute(cmd)?) +} + +fn default_gateway_ip() -> Result { + let mut cmd = Command::new(NETSTAT); + cmd.args(["-rn", "-f", "inet"]); + let output = execute(cmd)?; + + String::from_utf8(output.stdout) + .context("Invalid netstat output")? + .lines() + .find_map(|line| { + let mut columns = line.trim().split_whitespace(); + let dst = columns.next()?; + let gateway = columns.next()?; + + if dst == "default" { + return Some(gateway.to_owned()); + } + None + }) + .ok_or_else(|| anyhow!("No default gateway found")) +} + +fn get_gateway_mac( + gateway_mac: Option, + gateway_ip: &str, +) -> Result { + match gateway_mac { + Some(mac) => Ok(MacAddr::from_str(&mac)?), + None => { + let attempts = 3; + for i in 0..=attempts { + println!( + "Pinging {gateway_ip} and sleeping ({i} / {attempts})" + ); + let mut cmd = Command::new(PING); + cmd.arg(&gateway_ip); + execute(cmd)?; + std::thread::sleep(std::time::Duration::from_secs(1)); + } + + let mut cmd = Command::new(ARP); + cmd.arg("-an"); + let output = execute(cmd)?; + + let mac = String::from_utf8(output.stdout) + .context("Invalid arp output")? + .lines() + .find_map(|line| { + let mut columns = line.trim().split_whitespace().skip(1); + let ip = columns.next()?; + let mac = columns.last()?; + if ip == gateway_ip { + return Some(mac.to_string()); + } + None + }) + .ok_or_else(|| anyhow!("No gateway MAC found"))?; + Ok(MacAddr::from_str(&mac)?) + } + } +} + +/// This is a subset of omicron-sled-agent's "config/Config" structure. +/// +/// We don't depend on it directly to avoid rebuilding whenever the +/// Sled Agent changes, though it's important for us to stay in sync +/// to parse these fields correctly. +#[derive(Clone, Debug, Deserialize)] +struct SledAgentConfig { + /// Optional list of virtual devices to be used as "discovered disks". + pub vdevs: Option>, +} + +impl SledAgentConfig { + fn read(path: &Utf8Path) -> Result { + let config = std::fs::read_to_string(path)?; + Ok(toml::from_str(&config) + .context("Could not parse sled agent config as toml")?) + } +} + +fn ensure_vdevs( + sled_agent_config: &Utf8Path, + vdev_dir: &Utf8Path, +) -> Result<()> { + let config = SledAgentConfig::read(sled_agent_config)?; + + let Some(vdevs) = &config.vdevs else { + bail!("No vdevs found in this configuration"); + }; + + for vdev in vdevs { + let vdev_path = if vdev.is_absolute() { + vdev.to_owned() + } else { + vdev_dir.join(vdev) + }; + + if vdev_path.exists() { + println!("{vdev_path} already exists"); + } else { + println!("creating {vdev_path}"); + let file = std::fs::File::create(&vdev_path)?; + file.set_len(VDEV_SIZE)?; + } + } + Ok(()) +} + +const ZVOL_ROOT: &str = "/dev/zvol/dsk"; + +fn destroy_vdevs( + sled_agent_config: &Utf8Path, + vdev_dir: &Utf8Path, +) -> Result<()> { + let swap_devices = swap_list()?; + let zpools = omicron_zpool_list()?; + + for zpool in &zpools { + println!("destroying: {zpool}"); + // Remove any swap devices that appear used by this zpool + for swap_device in &swap_devices { + if swap_device + .starts_with(Utf8PathBuf::from(ZVOL_ROOT).join(&zpool)) + { + println!("Removing {swap_device} from {zpool}"); + swap_delete(&swap_device)?; + } + } + + // Then remove the zpool itself + zpool_destroy(zpool)?; + println!("destroyed: {zpool}"); + } + + // Remove the vdev files themselves, if they are regular files + let config = SledAgentConfig::read(sled_agent_config)?; + if let Some(vdevs) = &config.vdevs { + for vdev in vdevs { + let vdev_path = if vdev.is_absolute() { + vdev.to_owned() + } else { + vdev_dir.join(vdev) + }; + + if !vdev_path.exists() { + continue; + } + + let metadata = std::fs::metadata(&vdev_path)?; + + if metadata.file_type().is_file() { + std::fs::remove_file(&vdev_path)?; + println!("deleted {vdev_path}"); + } + } + } + + Ok(()) +} + +fn execute(mut cmd: Command) -> Result { + let output = cmd + .output() + .context(format!("Could not start command: {:?}", cmd.get_program()))?; + if !output.status.success() { + let stderr = + String::from_utf8(output.stderr).unwrap_or_else(|_| String::new()); + + bail!( + "{:?} failed: {} (stderr: {stderr})", + cmd.get_program(), + output.status + ) + } + + Ok(output) +} + +// Lists all files used for swap +fn swap_list() -> Result> { + let mut cmd = Command::new(SWAP); + cmd.arg("-l"); + + let output = cmd.output().context(format!("Could not start swap"))?; + if !output.status.success() { + if let Ok(stderr) = String::from_utf8(output.stderr) { + // This is an exceptional case - if there are no swap devices, + // we treat this error case as an "empty result". + if stderr.trim() == "No swap devices configured" { + return Ok(vec![]); + } + eprint!("{}", stderr); + } + bail!("swap failed: {}", output.status); + } + + Ok(String::from_utf8(output.stdout) + .context("Invalid swap output")? + .lines() + .skip(1) + .filter_map(|line| { + line.split_whitespace().next().map(|s| Utf8PathBuf::from(s)) + }) + .collect()) +} + +// Deletes a specific swap file +fn swap_delete(file: &Utf8Path) -> Result<()> { + let mut cmd = Command::new(PFEXEC); + cmd.arg(SWAP); + cmd.arg("-d"); + cmd.arg(file); + execute(cmd)?; + Ok(()) +} + +static ZPOOL_PREFIXES: [&'static str; 2] = ["oxp_", "oxi_"]; + +// Lists all zpools managed by omicron. +fn omicron_zpool_list() -> Result> { + let mut cmd = Command::new(ZPOOL); + cmd.args(["list", "-Hpo", "name"]); + let output = execute(cmd)?; + + Ok(String::from_utf8(output.stdout) + .context("Invalid zpool list output")? + .lines() + .filter_map(|line| { + let pool = line.trim().to_string(); + if ZPOOL_PREFIXES.iter().any(|pfx| pool.starts_with(pfx)) { + Some(pool) + } else { + None + } + }) + .collect()) +} + +fn svcadm_temporary_toggle(svc: &str, enable: bool) -> Result<()> { + let mut cmd = Command::new(PFEXEC); + cmd.arg("svcadm"); + if enable { + cmd.arg("enable"); + } else { + cmd.arg("disable"); + } + cmd.arg("-st"); + cmd.arg(svc); + execute(cmd)?; + Ok(()) +} + +fn zfs_list_internal(canmount: &str, mounted: &str) -> Result> { + let mut cmd = Command::new(ZFS); + cmd.args(["list", "-rHpo", "name,canmount,mounted"]); + let output = execute(cmd)?; + + Ok(String::from_utf8(output.stdout) + .context("Invalid zfs list output")? + .lines() + .filter_map(|line| { + let mut cols = line.trim().split_whitespace(); + let dataset = cols.next()?; + if !dataset.starts_with("oxi_") { + return None; + } + if canmount != cols.next()? { + return None; + } + if mounted != cols.next()? { + return None; + } + return Some(dataset.to_string()); + }) + .collect()) +} + +fn zfs_umount(dataset: &str) -> Result<()> { + let mut cmd = Command::new(PFEXEC); + cmd.args([ZFS, "umount"]); + cmd.arg(dataset); + execute(cmd)?; + Ok(()) +} + +fn zpool_destroy(pool: &str) -> Result<()> { + let mut cmd = Command::new(PFEXEC); + cmd.args([ZFS, "destroy", "-r"]); + cmd.arg(pool); + execute(cmd)?; + + // This can fail with an "already unmounted" error, which we opt to ignore. + // + // If it was important, then the zpool destroy command should fail below + // anyway. + let mut cmd = Command::new(PFEXEC); + cmd.args([ZFS, "unmount"]); + cmd.arg(pool); + if let Err(err) = execute(cmd) { + eprintln!( + "Failed to unmount {pool}: {err}, attempting to destroy anyway" + ); + } + + let mut cmd = Command::new(PFEXEC); + cmd.args([ZPOOL, "destroy"]); + cmd.arg(pool); + execute(cmd)?; + + Ok(()) +} + +fn delete_address(addr: &str) -> Result<()> { + let mut cmd = Command::new(PFEXEC); + cmd.arg(IPADM); + cmd.arg("delete-addr"); + cmd.arg(addr); + + let output = cmd.output().context("Failed to start ipadm")?; + if !output.status.success() { + let stderr = String::from_utf8(output.stderr)?; + if stderr.contains("Object not found") { + return Ok(()); + } + bail!("ipadm delete-addr failed: {} (stderr: {stderr})", output.status); + } + + Ok(()) +} + +fn delete_interface(iface: &str) -> Result<()> { + let mut cmd = Command::new(PFEXEC); + cmd.arg(IPADM); + cmd.arg("delete-if"); + cmd.arg(iface); + + let output = cmd.output().context("Failed to start ipadm")?; + if !output.status.success() { + let stderr = String::from_utf8(output.stderr)?; + if stderr.contains("Interface does not exist") { + return Ok(()); + } + bail!("ipadm delete-if failed: {} (stderr: {stderr})", output.status); + } + + Ok(()) +} + +fn delete_vnic(vnic: &str) -> Result<()> { + let mut cmd = Command::new(PFEXEC); + cmd.arg(DLADM); + cmd.arg("delete-vnic"); + cmd.arg(vnic); + + let output = cmd.output().context("Failed to start dladm")?; + if !output.status.success() { + let stderr = String::from_utf8(output.stderr)?; + if stderr.contains("invalid link name") { + return Ok(()); + } + bail!("dladm delete-vnic failed: {} (stderr: {stderr})", output.status); + } + + Ok(()) +} + +fn delete_simnet(simnet: &str) -> Result<()> { + let mut cmd = Command::new(PFEXEC); + cmd.arg(DLADM); + cmd.arg("delete-simnet"); + cmd.arg("-t"); + cmd.arg(simnet); + + let output = cmd.output().context("Failed to start dladm")?; + if !output.status.success() { + let stderr = String::from_utf8(output.stderr)?; + if stderr.contains("not found") { + return Ok(()); + } + bail!( + "dleadm delete-simnet failed: {} (stderr: {stderr})", + output.status + ); + } + + Ok(()) +} + +fn default_physical_link() -> Result { + let mut cmd = Command::new(DLADM); + cmd.args(["show-phys", "-p", "-o", "LINK"]); + let output = execute(cmd)?; + + Ok(String::from_utf8(output.stdout) + .context("Invalid dladm output")? + .lines() + .next() + .ok_or_else(|| anyhow!("Empty dladm output"))? + .to_string()) +} + +// Returns "true" if the VNIC exists. +// +// Returns false if it does not exist, or if we cannot tell. +fn vnic_exists(vnic: &str) -> bool { + let mut cmd = Command::new(DLADM); + cmd.args(["show-vnic", "-p", "-o", "LINK"]); + cmd.arg(vnic); + match execute(cmd) { + Ok(_) => true, + Err(_) => false, + } +} + +fn create_vnic(vnic: &str, physical_link: &str, mac: &str) -> Result<()> { + let mut cmd = Command::new(PFEXEC); + cmd.args([DLADM, "create-vnic", "-t"]); + cmd.arg(vnic); + cmd.arg("-l"); + cmd.arg(physical_link); + cmd.arg("-m"); + cmd.arg(mac); + execute(cmd)?; + Ok(()) +} + +fn create_simnet(simnet: &str) -> Result<()> { + let mut cmd = Command::new(PFEXEC); + cmd.args([DLADM, "create-simnet", "-t"]); + cmd.arg(simnet); + execute(cmd)?; + Ok(()) +} + +fn modify_simnet(simnet: &str, peer: &str) -> Result<()> { + let mut cmd = Command::new(PFEXEC); + cmd.args([DLADM, "modify-simnet", "-t", "-p"]); + cmd.arg(peer); + cmd.arg(simnet); + execute(cmd)?; + Ok(()) +} + +fn set_linkprop(link: &str, key: &str, value: &str) -> Result<()> { + let mut cmd = Command::new(PFEXEC); + cmd.args([DLADM, "set-linkprop", "-p"]); + cmd.arg(format!("{key}={value}")); + cmd.arg(link); + execute(cmd)?; + Ok(()) +} + +// Returns "true" if the simnet exists. +// +// Returns false if it does not exist, or if we cannot tell. +fn simnet_exists(simnet: &str) -> bool { + let mut cmd = Command::new(DLADM); + cmd.args(["show-simnet", "-p", "-o", "LINK"]); + cmd.arg(simnet); + match execute(cmd) { + Ok(_) => true, + Err(_) => false, + } +} + +fn zoneadm_list() -> Result> { + let mut cmd = Command::new(ZONEADM); + cmd.arg("list"); + let output = execute(cmd)?; + + Ok(String::from_utf8(output.stdout) + .context("Invalid zoneadm output")? + .lines() + .map(|line| line.trim().to_owned()) + .collect()) +} diff --git a/dns-server/Cargo.toml b/dns-server/Cargo.toml index f91cbfafdb..237d2a2fbb 100644 --- a/dns-server/Cargo.toml +++ b/dns-server/Cargo.toml @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] anyhow.workspace = true camino.workspace = true @@ -33,11 +36,11 @@ uuid.workspace = true omicron-workspace-hack.workspace = true [dev-dependencies] +camino-tempfile.workspace = true expectorate.workspace = true omicron-test-utils.workspace = true openapiv3.workspace = true openapi-lint.workspace = true serde_json.workspace = true subprocess.workspace = true -tempdir.workspace = true trust-dns-resolver.workspace = true diff --git a/dns-server/src/storage.rs b/dns-server/src/storage.rs index 270cc500d1..21fb9ebdc6 100644 --- a/dns-server/src/storage.rs +++ b/dns-server/src/storage.rs @@ -783,6 +783,7 @@ mod test { use crate::storage::QueryError; use anyhow::Context; use camino::Utf8PathBuf; + use camino_tempfile::Utf8TempDir; use omicron_test_utils::dev::test_setup_log; use std::collections::BTreeSet; use std::collections::HashMap; @@ -796,7 +797,7 @@ mod test { /// our tests and helps make sure they get cleaned up properly. struct TestContext { logctx: dropshot::test_util::LogContext, - tmpdir: tempdir::TempDir, + tmpdir: Utf8TempDir, store: Store, db: Arc, } @@ -804,12 +805,9 @@ mod test { impl TestContext { fn new(test_name: &str) -> TestContext { let logctx = test_setup_log(test_name); - let tmpdir = tempdir::TempDir::new("dns-server-storage-test") + let tmpdir = Utf8TempDir::with_prefix("dns-server-storage-test") .expect("failed to create tmp directory for test"); - let storage_path = - Utf8PathBuf::from_path_buf(tmpdir.path().to_path_buf()).expect( - "failed to create Utf8PathBuf for test temporary directory", - ); + let storage_path = tmpdir.path().to_path_buf(); let db = Arc::new( sled::open(&storage_path).context("creating db").unwrap(), diff --git a/dns-server/tests/basic_test.rs b/dns-server/tests/basic_test.rs index 98cd1487ab..19666e82c1 100644 --- a/dns-server/tests/basic_test.rs +++ b/dns-server/tests/basic_test.rs @@ -3,6 +3,7 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. use anyhow::{Context, Result}; +use camino_tempfile::Utf8TempDir; use dns_service_client::{ types::{DnsConfigParams, DnsConfigZone, DnsRecord, Srv}, Client, @@ -332,7 +333,7 @@ struct TestContext { resolver: TokioAsyncResolver, dns_server: dns_server::dns_server::ServerHandle, dropshot_server: dropshot::HttpServer, - tmp: tempdir::TempDir, + tmp: Utf8TempDir, logctx: LogContext, } @@ -401,7 +402,7 @@ fn test_config( test_name: &str, ) -> Result< ( - tempdir::TempDir, + Utf8TempDir, dns_server::storage::Config, dropshot::ConfigDropshot, LogContext, @@ -409,10 +410,9 @@ fn test_config( anyhow::Error, > { let logctx = test_setup_log(test_name); - let tmp_dir = tempdir::TempDir::new("dns-server-test")?; + let tmp_dir = Utf8TempDir::with_prefix("dns-server-test")?; let mut storage_path = tmp_dir.path().to_path_buf(); storage_path.push("test"); - let storage_path = storage_path.to_str().unwrap().into(); let config_storage = dns_server::storage::Config { storage_path, keep_old_generations: 3 }; let config_dropshot = dropshot::ConfigDropshot { diff --git a/dns-server/tests/commands_test.rs b/dns-server/tests/commands_test.rs index 85a6f0e07e..8c812cb2aa 100644 --- a/dns-server/tests/commands_test.rs +++ b/dns-server/tests/commands_test.rs @@ -2,7 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -use camino::Utf8PathBuf; +use camino_tempfile::Utf8TempDir; use dns_server::storage::Store; use omicron_test_utils::dev::test_cmds::assert_exit_code; use omicron_test_utils::dev::test_cmds::path_to_executable; @@ -17,10 +17,9 @@ const CMD_DNSADM: &str = env!("CARGO_BIN_EXE_dnsadm"); async fn test_dnsadm() { // Start a DNS server with some sample data. let logctx = test_setup_log("test_dnsadm"); - let tmpdir = tempdir::TempDir::new("test_dnsadm") + let tmpdir = Utf8TempDir::with_prefix("test_dnsadm") .expect("failed to create tmp directory for test"); - let storage_path = Utf8PathBuf::from_path_buf(tmpdir.path().to_path_buf()) - .expect("failed to create Utf8PathBuf for test temporary directory"); + let storage_path = tmpdir.path().to_path_buf(); let store = Store::new( logctx.log.clone(), diff --git a/docs/boundary-services-a-to-z.adoc b/docs/boundary-services-a-to-z.adoc index e4c47ac7f9..3f3c0237dd 100644 --- a/docs/boundary-services-a-to-z.adoc +++ b/docs/boundary-services-a-to-z.adoc @@ -7,12 +7,10 @@ The virtual hardware making up SoftNPU is depicted in the diagram below. image::plumbing.png[] -The `softnpu` zone will be configured and launched during the -`create_virtual_hardware.sh` script. +The `softnpu` zone will be configured and launched during +`cargo xtask virtual-hardware create`. Once the control plane is running, `softnpu` can be configured via `dendrite` using the `swadm` binary located in the `oxz_switch` zone. This is not necessary under normal operation, as the switch state will be managed automatically by the -control plane and networking daemons. An example script is provided in -`tools/scrimlet/softnpu-init.sh`. This script should work without modification -for basic development setups, but feel free to tweak it as needed. +control plane and networking daemons. diff --git a/docs/crdb-upgrades.adoc b/docs/crdb-upgrades.adoc new file mode 100644 index 0000000000..eecfa9194e --- /dev/null +++ b/docs/crdb-upgrades.adoc @@ -0,0 +1,115 @@ +:showtitle: +:numbered: +:toc: left + += So You Want To Upgrade CockroachDB + +CockroachDB has a number of overlapping things called "versions": + +1. The `cockroachdb` executable is built from a particular version, such + as v22.2.19. We'll call this the *executable version*. +2. The executable version is made up of three components: a number + representing the release year, a number representing which release + it was within that year, and a patch release number. The first two + components constitute the *major version* (such as v22.2). +3. There is also a version for the on-disk data format that CockroachDB + writes and manages. This is called the *cluster version*. When + you create a new cluster while running major version v22.2, it + is initialized at cluster version `22.2`. Each major version of + CockroachDB can operate on both its own associated cluster version, + and the previous major version's cluster version, to facilitate + rolling upgrades. + +By default the cluster version is upgraded and _finalized_ once +all nodes in the cluster have upgraded to a new major version +(the CockroachDB docs refer to this as "auto-finalization"). +<> However, it is not possible to downgrade the +cluster version. To mitigate the risk of one-way upgrades, we use a +CockroachDB cluster setting named `cluster.preserve_downgrade_option` +to prevent auto-finalization and... preserve our option to downgrade in +a future release, as the option name would suggest. We then perform an +upgrade to the next major version across at least two releases, which we +refer to as a tick-tock cycle: + +- In a *tick release*, we upgrade the executable versions across the + cluster. +- In a *tock release*, we release our downgrade option and allow + CockroachDB to perform the cluster upgrade automatically. When the + upgrade is complete, we configure the "preserve downgrade option" + setting again to prepare for the next tick release. + +(This is not strictly speaking a "tick-tock" cycle, because any number +of releases may occur between a tick and a tock, and between a tock and +a tick, but they must occur in that order.) + +== Process for a tick release + +. Determine whether to move to the next major release of CockroachDB. + We have generally avoided being early adopters of new major releases + and prefer to see the rate of https://www.cockroachlabs.com/docs/advisories/[technical + advisories] that solely affect the new major version drop off. (This + generally won't stop you from working on building and testing the + next major version, however, as the build process sometimes changes + significantly from release to release.) +. Build a new version of CockroachDB for illumos. You will want to + update the https://github.com/oxidecomputer/garbage-compactor/tree/master/cockroach[build + scripts in garbage-compactor]. +. In your local Omicron checkout on a Helios machine, unpack the + resulting tarball to `out/cockroachdb`, and update `tools/cockroachdb_version` + to the version you've built. +. Add an enum variant for the new version to `CockroachDbClusterVersion` + in `nexus/types/src/deployment/planning_input.rs`, and change the + associated constant `NEWLY_INITIALIZED` to that value. +. Run the test suite, which should catch any unexpected SQL + compatibility issues between releases and help validate that your + build works. + * You will need to run the `test_omdb_success_cases` test from + omicron-omdb with `EXPECTORATE=overwrite`; this file contains the + expected output of various omdb commands, including a fingerprint of + CockroachDB's cluster state. +. Submit a PR for your changes to garbage-compactor; when merged, + publish the final build to the `oxide-cockroachdb-build` S3 bucket. +. Update `tools/cockroachdb_checksums`. For non-illumos checksums, use + the https://www.cockroachlabs.com/docs/releases/[official releases] + matching the version you built. +. Submit a PR with your changes (including `tools/cockroachdb_version` + and `tools/cockroachdb_checksums`) to Omicron. + +== Process for a tock release + +. Change the associated constant `CockroachDbClusterVersion::POLICY` in + `nexus/types/src/deployment/planning_input.rs` from the previous major + version to the current major version. + +== What Nexus does + +The Reconfigurator collects the current cluster version, and compares +this to the desired cluster version set by policy (which we update in +tock releases). + +If they do not match, CockroachDB ensures the +`cluster.preserve_downgrade_option` setting is the default value (an +empty string), which allows CockroachDB to perform the upgrade to the +desired version. The end result of this upgrade is that the current and +desired cluster versions will match. + +When they match, Nexus ensures that the +`cluster.preserve_downgrade_option` setting is set to the current +cluster version, to prevent automatic cluster upgrades when CockroachDB +is next upgraded to a new major version. + +Because blueprints are serialized and continue to run even if the +underlying state has changed, Nexus needs to ensure its view of the +world is not out-of-date. Nexus saves a fingerprint of the current +cluster state in the blueprint (intended to be opaque, but ultimately +a hash of the cluster version and executable version of the node we're +currently connected to). When setting CockroachDB options, it verifies +this fingerprint in a way that causes an error instead of setting the +option. + +[bibliography] +== External References + +- [[[crdb-tn-upgrades]]] Cockroach Labs. Cluster versions and upgrades. + November 2023. + https://github.com/cockroachdb/cockroach/blob/53262957399e6e0fccd63c91add57a510b86dc9a/docs/tech-notes/version_upgrades.md diff --git a/docs/how-to-run.adoc b/docs/how-to-run.adoc index e286fe3730..c904dca757 100644 --- a/docs/how-to-run.adoc +++ b/docs/how-to-run.adoc @@ -199,46 +199,40 @@ The rest of these instructions assume that you're building and running Omicron o The Sled Agent supports operation on both: * a Gimlet (i.e., real Oxide hardware), and -* an ordinary PC that's been set up to look like a Gimlet using the `./tools/create_virtual_hardware.sh` script (described next). +* an ordinary PC running illumos that's been set up to look like a Gimlet using `cargo xtask virtual-hardware create` (described next). This script also sets up a "softnpu" zone to implement Boundary Services. SoftNPU simulates the Tofino device that's used in real systems. Just like Tofino, it can implement sled-to-sled networking, but that's beyond the scope of this doc. -If you're running on a PC and using either of the networking configurations mentioned above, you can usually just run this script with a few environment vaiables set. These environment variables tell SoftNPU about your local network. You will need the gateway for your network as well as the whole range of IPs that you've carved out for the Oxide system (see <<_external_networking>> above): +If you're running on a PC and using either of the networking configurations mentioned above, you can usually just run this script with a few argumnets set. These arguments tell SoftNPU about your local network. You will need the gateway for your network as well as the whole range of IPs that you've carved out for the Oxide system (see <<_external_networking>> above): [source,bash] ---- -export GATEWAY_IP=192.168.1.199 # The gateway IP address for your local network (see above) -export PXA_START=192.168.1.20 # The first IP address your Oxide cluster can use (see above) -export PXA_END=192.168.1.40 # The last IP address your Oxide cluster can use (see above) +cargo xtask virtual-hardware create + --gateway-ip 192.168.1.199 # The gateway IP address for your local network (see above) + --pxa-start 192.168.1.20 # The first IP address your Oxide cluster can use (see above) + --pxa-end 192.168.1.40 # The last IP address your Oxide cluster can use (see above) ---- -If you're using the fake sled-local external network mentioned above, then you'll need to set PHYSICAL_LINK: +If you're using the fake sled-local external network mentioned above, then you'll need to set `--physical-link`: [source,bash] ---- -export PHYSICAL_LINK=fake_external_stub0 # The etherstub for the fake external network + --physical-link fake_external_stub0 # The etherstub for the fake external network ---- If you're using an existing external network, you likely don't need to specify anything here because the script will choose one. You can specify a particular one if you want, though: [source,bash] ---- -export PHYSICAL_LINK=igb0 # The physical link for your external network. + --physical-link igb0 # The physical link for your external network. ---- -Having set those variables, you're ready to run: +If you're running on a bench Gimlet, you may not need (or want) most of what `cargo xtask virtual-hardware create` does, but you do still need SoftNPU. You can tweak what resources are created with the `--scope` flag. -[source,bash] ----- -$ pfexec ./tools/create_virtual_hardware.sh ----- - -If you're running on a Gimlet, you don't need (or want) most of what `create_virtual_hardware.sh` does, but you do still need SoftNPU. You'll have to look at the script and run that part by hand. - -Later, you can clean up the resources created by `create_virtual_hardware.sh` with: +Later, you can clean up the resources created by `cargo xtask virtual-hardware create` with: ---- -$ pfexec ./tools/destroy_virtual_hardware.sh +$ cargo xtask virtual-hardware destroy ---- If you've done all this before and Omicron is still running, these resources will be in use and this script will fail. Uninstall Omicron (see below) before running this script. @@ -702,7 +696,7 @@ Once all the Omicron services are uninstalled, you can also remove the previousl [source,console] ---- -$ pfexec ./tools/destroy_virtual_hardware.sh +$ cargo xtask virtual-hardware destroy ---- == More information diff --git a/docs/releng.adoc b/docs/releng.adoc new file mode 100644 index 0000000000..31252c9a89 --- /dev/null +++ b/docs/releng.adoc @@ -0,0 +1,81 @@ +:showtitle: +:numbered: +:toc: left + += Oxide Release Engineering + +Omicron is the Oxide control plane, and thus brings together all of the +various components outside of this repo that make up the software on the +product. This includes (but definitely isn't limited to): + +- https://github.com/oxidecomputer/propolis[Propolis], our hypervisor +- https://github.com/oxidecomputer/helios[Helios], our host operating + system +- https://github.com/oxidecomputer/crucible[Crucible], our block storage + service +- https://github.com/oxidecomputer/maghemite[Maghemite], our switch + control software and routing protocol +- https://github.com/oxidecomputer/hubris[Hubris], our embedded + microcontroller operating system used on the root of trust and service + processors +- https://github.com/oxidecomputer/console[The web console] + +Each of these has their own build processes that produce some sort of +usable artifact, whether that is an illumos zone or a tarball of static +assets. + +The release engineering process builds the control plane and combines +it with the many external artifacts into a final artifact -- a Zip +archive of a TUF repository -- that contains everything necessary for +the product to operate. This process is run on each commit to ensure it +is always functional. You can also run the process locally with +`cargo xtask releng`. + +== Process overview + +`cargo xtask releng` performs all of these steps in parallel (with +the temporary exception of artifact downloads handled by +`tools/install_builder_prerequisites.sh`): + +. `tools/install_builder_prerequisites.sh` downloads several artifacts + (via the `tools/ci_*` scripts) that are necessary to build Omicron; + many of these are ultimately packaged by `omicron-package`. These + scripts are generally controlled by the `tools/*_version` and + `tools/*_checksums` files. +. `cargo xtask releng` downloads the current root of trust and + service processor images built by the Hubris release engineering + process, which are signed in https://github.com/oxidecomputer/permission-slip[Permission Slip]. + This is controlled by the `tools/permslip_production` and + `tools/permslip_staging` files. +. `omicron-package` is the heart of the release engineering process; it + reads the manifest from `package-manifest.toml`, runs an appropriate + `cargo build` command, downloads any additional artifacts, and + packages them into a series of illumos zones and tarballs. (It can + also manage installation and uninstallation of these zones; see + how-to-run.adoc.) +. Some of the illumos zones are distributed with the OS images (because + they are reliant on OS-specific APIs), and some are distributed + separately. `cargo xtask releng` unpacks the zones for the OS image + into a temporary directory that is overlaid onto the OS image in the + next step. +. `helios-build` from the https://github.com/oxidecomputer/helios[Helios] + repository then builds two images: the *host* image, which is used + during normal operation, and the *trampoline* (or *recovery*) image, + which is used to update the host image. +. Finally, `cargo xtask releng` generates a Zip archive of a + https://theupdateframework.io/[TUF] repository, which contains the + host and trampoline OS images, the ROT and SP images, and all the + illumos zones that are not installed into the OS images. This archive + can be uploaded to Wicket to perform an upgrade of the rack while the + control plane is not running. + +== Beyond `cargo xtask releng` + +Currently we use TUF repos generated in CI (by `cargo xtask releng`) +directly. These repositories use a generated throwaway key to sign +the TUF metadata. In the limit, we will have a process to sign release +builds of these TUF repositories, which will be available as a Zip +archive for an operator to upload to Nexus or Wicket, as well as an +HTTPS repository for racks connected to the internet or with access to +a proxy to perform automatic updates. The exact nature of the PKI and +trust policies for each of these update flows is under discussion. diff --git a/end-to-end-tests/Cargo.toml b/end-to-end-tests/Cargo.toml index 0fb9efd5cc..1102094b61 100644 --- a/end-to-end-tests/Cargo.toml +++ b/end-to-end-tests/Cargo.toml @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] anyhow = { workspace = true, features = ["backtrace"] } async-trait.workspace = true @@ -16,8 +19,8 @@ omicron-test-utils.workspace = true oxide-client.workspace = true rand.workspace = true reqwest = { workspace = true, features = ["cookies"] } -russh = "0.42.0" -russh-keys = "0.42.0" +russh = "0.43.0" +russh-keys = "0.43.0" serde.workspace = true serde_json.workspace = true tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } diff --git a/end-to-end-tests/src/instance_launch.rs b/end-to-end-tests/src/instance_launch.rs index 377fef4c0b..be30f89492 100644 --- a/end-to-end-tests/src/instance_launch.rs +++ b/end-to-end-tests/src/instance_launch.rs @@ -303,10 +303,9 @@ impl russh::client::Handler for SshClient { type Error = anyhow::Error; async fn check_server_key( - self, + &mut self, server_public_key: &PublicKey, - ) -> Result<(Self, bool), Self::Error> { - let b = &self.host_key == server_public_key; - Ok((self, b)) + ) -> Result { + Ok(&self.host_key == server_public_key) } } diff --git a/flake.lock b/flake.lock index f2dfc1b532..7c6acc0815 100644 --- a/flake.lock +++ b/flake.lock @@ -20,11 +20,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1706371002, - "narHash": "sha256-dwuorKimqSYgyu8Cw6ncKhyQjUDOyuXoxDTVmAXq88s=", + "lastModified": 1712791164, + "narHash": "sha256-3sbWO1mbpWsLepZGbWaMovSO7ndZeFqDSdX0hZ9nVyw=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "c002c6aa977ad22c60398daaa9be52f2203d0006", + "rev": "1042fd8b148a9105f3c0aca3a6177fd1d9360ba5", "type": "github" }, "original": { @@ -48,11 +48,11 @@ ] }, "locked": { - "lastModified": 1706634984, - "narHash": "sha256-xn7lGPE8gRGBe3Lt8ESoN/uUHm7IrbiV7siupwjHX1o=", + "lastModified": 1712888034, + "narHash": "sha256-SmBeT3oxdwOzheSfxZmk+3xmv98Z3zlzjlnl9nBdOIE=", "owner": "oxalica", "repo": "rust-overlay", - "rev": "883b84c426107a8ec020e7124f263d7c35a5bb9f", + "rev": "96fbdc73dec8eaa5a9d4a9b307b75c9a856e5dec", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 8897d9428d..6828577403 100644 --- a/flake.nix +++ b/flake.nix @@ -224,6 +224,7 @@ buildInputs = [ glibc gcc-unwrapped + openssl.dev ]; installPhase = @@ -259,7 +260,7 @@ # can't give Nix those hashes and must instead determine it ourselves. # this means that we will have to update this SHA if the clickhouse # version changes. - sha256 = "1lgxwh67apgl386ilpg0iy5xkyz12q4lgnz08zswjbxv88ra0qxj"; + sha256 = "0wx8w9sdms5hsc9f835ivsissf15wjzdb9cvxr65xdi384i9pkzx"; src = builtins.fetchurl { inherit sha256; @@ -428,10 +429,3 @@ }; }; } - - - - - - - diff --git a/gateway-cli/Cargo.toml b/gateway-cli/Cargo.toml index 2412bf950f..22aa09fe92 100644 --- a/gateway-cli/Cargo.toml +++ b/gateway-cli/Cargo.toml @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] anyhow.workspace = true clap.workspace = true diff --git a/gateway-test-utils/Cargo.toml b/gateway-test-utils/Cargo.toml index 81b7686eb2..08e22228fe 100644 --- a/gateway-test-utils/Cargo.toml +++ b/gateway-test-utils/Cargo.toml @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] camino.workspace = true dropshot.workspace = true diff --git a/gateway/Cargo.toml b/gateway/Cargo.toml index 450c4b445e..2ddd9421b7 100644 --- a/gateway/Cargo.toml +++ b/gateway/Cargo.toml @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] anyhow.workspace = true base64.workspace = true diff --git a/gateway/src/http_entrypoints.rs b/gateway/src/http_entrypoints.rs index b5a765a8a8..727ba0950d 100644 --- a/gateway/src/http_entrypoints.rs +++ b/gateway/src/http_entrypoints.rs @@ -298,6 +298,49 @@ struct UpdatePreparationProgress { total: u32, } +/// Result of reading an SP sensor. +#[derive( + Debug, + Clone, + Copy, + PartialEq, + PartialOrd, + Serialize, + Deserialize, + JsonSchema, +)] +pub struct SpSensorReading { + /// SP-centric timestamp of when `result` was recorded from this sensor. + /// + /// Currently this value represents "milliseconds since the last SP boot" + /// and is primarily useful as a delta between sensors on this SP (assuming + /// no reboot in between). The meaning could change with future SP releases. + pub timestamp: u64, + /// Value (or error) from the sensor. + pub result: SpSensorReadingResult, +} + +/// Single reading (or error) from an SP sensor. +#[derive( + Debug, + Clone, + Copy, + PartialEq, + PartialOrd, + Deserialize, + Serialize, + JsonSchema, +)] +#[serde(tag = "kind", rename_all = "snake_case")] +pub enum SpSensorReadingResult { + Success { value: f32 }, + DeviceOff, + DeviceError, + DeviceNotPresent, + DeviceUnavailable, + DeviceTimeout, +} + /// List of components from a single SP. #[derive(Debug, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] pub struct SpComponentList { @@ -535,6 +578,16 @@ struct PathSp { sp: SpIdentifier, } +#[derive(Deserialize, JsonSchema)] +struct PathSpSensorId { + /// ID for the SP that the gateway service translates into the appropriate + /// port for communicating with the given SP. + #[serde(flatten)] + sp: SpIdentifier, + /// ID for the sensor on the SP. + sensor_id: u32, +} + #[derive(Serialize, Deserialize, JsonSchema)] struct PathSpComponent { /// ID for the SP that the gateway service translates into the appropriate @@ -625,6 +678,28 @@ async fn sp_startup_options_set( Ok(HttpResponseUpdatedNoContent {}) } +/// Read the current value of a sensor by ID +/// +/// Sensor IDs come from the host topo tree. +#[endpoint { + method = GET, + path = "/sp/{type}/{slot}/sensor/{sensor_id}/value", +}] +async fn sp_sensor_read_value( + rqctx: RequestContext>, + path: Path, +) -> Result, HttpError> { + let apictx = rqctx.context(); + let PathSpSensorId { sp, sensor_id } = path.into_inner(); + let sp_id = sp.into(); + let sp = apictx.mgmt_switch.sp(sp_id)?; + let value = sp.read_sensor_value(sensor_id).await.map_err(|err| { + SpCommsError::SpCommunicationFailed { sp: sp_id, err } + })?; + + Ok(HttpResponseOk(value.into())) +} + /// List components of an SP /// /// A component is a distinct entity under an SP's direct control. This lists @@ -1511,6 +1586,7 @@ pub fn api() -> GatewayApiDescription { api.register(sp_power_state_set)?; api.register(sp_installinator_image_id_set)?; api.register(sp_installinator_image_id_delete)?; + api.register(sp_sensor_read_value)?; api.register(sp_component_list)?; api.register(sp_component_get)?; api.register(sp_component_caboose_get)?; diff --git a/gateway/src/http_entrypoints/conversions.rs b/gateway/src/http_entrypoints/conversions.rs index a4aef7425e..df3d1c5436 100644 --- a/gateway/src/http_entrypoints/conversions.rs +++ b/gateway/src/http_entrypoints/conversions.rs @@ -20,6 +20,8 @@ use super::SpComponentPresence; use super::SpIdentifier; use super::SpIgnition; use super::SpIgnitionSystemType; +use super::SpSensorReading; +use super::SpSensorReadingResult; use super::SpState; use super::SpType; use super::SpUpdateStatus; @@ -40,6 +42,31 @@ pub(super) fn component_from_str(s: &str) -> Result { }) } +impl From for SpSensorReading { + fn from(value: gateway_messages::SensorReading) -> Self { + Self { + timestamp: value.timestamp, + result: match value.value { + Ok(value) => SpSensorReadingResult::Success { value }, + Err(err) => err.into(), + }, + } + } +} + +impl From for SpSensorReadingResult { + fn from(value: gateway_messages::SensorDataMissing) -> Self { + use gateway_messages::SensorDataMissing; + match value { + SensorDataMissing::DeviceOff => Self::DeviceOff, + SensorDataMissing::DeviceError => Self::DeviceError, + SensorDataMissing::DeviceNotPresent => Self::DeviceNotPresent, + SensorDataMissing::DeviceUnavailable => Self::DeviceUnavailable, + SensorDataMissing::DeviceTimeout => Self::DeviceTimeout, + } + } +} + impl From for SpUpdateStatus { fn from(status: UpdateStatus) -> Self { match status { diff --git a/illumos-utils/Cargo.toml b/illumos-utils/Cargo.toml index e4a99095fd..3d17745b7e 100644 --- a/illumos-utils/Cargo.toml +++ b/illumos-utils/Cargo.toml @@ -5,6 +5,9 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] anyhow.workspace = true async-trait.workspace = true @@ -19,8 +22,10 @@ ipnetwork.workspace = true libc.workspace = true macaddr.workspace = true omicron-common.workspace = true +omicron-uuid-kinds.workspace = true oxide-vpc.workspace = true oxlog.workspace = true +oxnet.workspace = true schemars.workspace = true serde.workspace = true slog.workspace = true @@ -28,6 +33,7 @@ smf.workspace = true thiserror.workspace = true tokio.workspace = true uuid.workspace = true +whoami.workspace = true zone.workspace = true # only enabled via the `testing` feature @@ -46,6 +52,3 @@ toml.workspace = true [features] # Enable to generate MockZones testing = ["mockall"] -# Useful for tests that want real functionality and ability to run without -# pfexec -tmp_keypath = [] diff --git a/illumos-utils/src/dumpadm.rs b/illumos-utils/src/dumpadm.rs index e37874f795..5a8247041b 100644 --- a/illumos-utils/src/dumpadm.rs +++ b/illumos-utils/src/dumpadm.rs @@ -1,11 +1,10 @@ use crate::{execute, ExecutionError}; -use byteorder::{LittleEndian, ReadBytesExt}; use camino::Utf8PathBuf; use std::ffi::OsString; -use std::fs::File; -use std::io::{Seek, SeekFrom}; use std::os::unix::ffi::OsStringExt; use std::process::Command; +use tokio::fs::File; +use tokio::io::{AsyncReadExt, AsyncSeekExt, SeekFrom}; pub const DUMPADM: &str = "/usr/sbin/dumpadm"; pub const SAVECORE: &str = "/usr/bin/savecore"; @@ -48,11 +47,11 @@ pub enum DumpHdrError { /// been a core written there at all, Err(DumpHdrError::InvalidVersion) if the /// dumphdr isn't the one we know how to handle (10), or other variants of /// DumpHdrError if there are I/O failures while reading the block device. -pub fn dump_flag_is_valid( +pub async fn dump_flag_is_valid( dump_slice: &Utf8PathBuf, ) -> Result { - let mut f = File::open(dump_slice).map_err(DumpHdrError::OpenRaw)?; - f.seek(SeekFrom::Start(DUMP_OFFSET)).map_err(DumpHdrError::Seek)?; + let mut f = File::open(dump_slice).await.map_err(DumpHdrError::OpenRaw)?; + f.seek(SeekFrom::Start(DUMP_OFFSET)).await.map_err(DumpHdrError::Seek)?; // read the first few fields of dumphdr. // typedef struct dumphdr { @@ -62,21 +61,18 @@ pub fn dump_flag_is_valid( // /* [...] */ // } - let magic = - f.read_u32::().map_err(DumpHdrError::ReadMagic)?; - if magic != DUMP_MAGIC { + let magic = f.read_u32().await.map_err(DumpHdrError::ReadMagic)?; + if magic != DUMP_MAGIC.to_be() { return Err(DumpHdrError::InvalidMagic(magic)); } - let version = - f.read_u32::().map_err(DumpHdrError::ReadVersion)?; - if version != DUMP_VERSION { + let version = f.read_u32().await.map_err(DumpHdrError::ReadVersion)?; + if version != DUMP_VERSION.to_be() { return Err(DumpHdrError::InvalidVersion(version)); } - let flags = - f.read_u32::().map_err(DumpHdrError::ReadFlags)?; - Ok((flags & DF_VALID) != 0) + let flags = f.read_u32().await.map_err(DumpHdrError::ReadFlags)?; + Ok((flags & DF_VALID.to_be()) != 0) } pub enum DumpContentType { diff --git a/illumos-utils/src/lib.rs b/illumos-utils/src/lib.rs index 550170b0f2..d041c866b0 100644 --- a/illumos-utils/src/lib.rs +++ b/illumos-utils/src/lib.rs @@ -24,6 +24,7 @@ pub mod route; pub mod running_zone; pub mod scf; pub mod svc; +pub mod svcadm; pub mod vmm_reservoir; pub mod zfs; pub mod zone; diff --git a/illumos-utils/src/opte/firewall_rules.rs b/illumos-utils/src/opte/firewall_rules.rs index 78d2ec0b73..1df0e7421a 100644 --- a/illumos-utils/src/opte/firewall_rules.rs +++ b/illumos-utils/src/opte/firewall_rules.rs @@ -7,7 +7,6 @@ use crate::opte::params::VpcFirewallRule; use crate::opte::Vni; use macaddr::MacAddr6; -use omicron_common::api::external::IpNet; use omicron_common::api::external::VpcFirewallRuleAction; use omicron_common::api::external::VpcFirewallRuleDirection; use omicron_common::api::external::VpcFirewallRuleProtocol; @@ -27,6 +26,7 @@ use oxide_vpc::api::Ipv6PrefixLen; use oxide_vpc::api::Ports; use oxide_vpc::api::ProtoFilter; use oxide_vpc::api::Protocol; +use oxnet::IpNet; trait FromVpcFirewallRule { fn action(&self) -> FirewallAction; @@ -62,29 +62,25 @@ impl FromVpcFirewallRule for VpcFirewallRule { fn hosts(&self) -> Vec
{ match self.filter_hosts { - Some(ref hosts) if hosts.len() > 0 => hosts + Some(ref hosts) if !hosts.is_empty() => hosts .iter() .map(|host| match host { - HostIdentifier::Ip(IpNet::V4(net)) - if net.prefix() == 32 => - { - Address::Ip(IpAddr::Ip4(net.ip().into())) + HostIdentifier::Ip(IpNet::V4(net)) if net.is_host_net() => { + Address::Ip(IpAddr::Ip4(net.addr().into())) } HostIdentifier::Ip(IpNet::V4(net)) => { Address::Subnet(IpCidr::Ip4(Ipv4Cidr::new( - net.ip().into(), - Ipv4PrefixLen::new(net.prefix()).unwrap(), + net.addr().into(), + Ipv4PrefixLen::new(net.width()).unwrap(), ))) } - HostIdentifier::Ip(IpNet::V6(net)) - if net.prefix() == 128 => - { - Address::Ip(IpAddr::Ip6(net.ip().into())) + HostIdentifier::Ip(IpNet::V6(net)) if net.is_host_net() => { + Address::Ip(IpAddr::Ip6(net.addr().into())) } HostIdentifier::Ip(IpNet::V6(net)) => { Address::Subnet(IpCidr::Ip6(Ipv6Cidr::new( - net.ip().into(), - Ipv6PrefixLen::new(net.prefix()).unwrap(), + net.addr().into(), + Ipv6PrefixLen::new(net.width()).unwrap(), ))) } HostIdentifier::Vpc(vni) => { @@ -98,7 +94,7 @@ impl FromVpcFirewallRule for VpcFirewallRule { fn ports(&self) -> Ports { match self.filter_ports { - Some(ref ports) if ports.len() > 0 => Ports::PortList( + Some(ref ports) if !ports.is_empty() => Ports::PortList( ports .iter() .flat_map(|range| { @@ -117,7 +113,7 @@ impl FromVpcFirewallRule for VpcFirewallRule { fn protos(&self) -> Vec { match self.filter_protocols { - Some(ref protos) if protos.len() > 0 => protos + Some(ref protos) if !protos.is_empty() => protos .iter() .map(|proto| { ProtoFilter::Proto(match proto { diff --git a/illumos-utils/src/opte/illumos.rs b/illumos-utils/src/opte/illumos.rs index 527172b976..90bf0bb16a 100644 --- a/illumos-utils/src/opte/illumos.rs +++ b/illumos-utils/src/opte/illumos.rs @@ -92,7 +92,7 @@ pub fn initialize_xde_driver( const MESSAGE: &str = concat!( "There must be at least two underlay NICs for the xde ", "driver to operate. These are currently created by ", - "`./tools/create_virtual_hardware.sh`. Please ensure that ", + "`cargo xtask virtual-hardware create`. Please ensure that ", "script has been run, and that two VNICs named `net{0,1}` ", "exist on the system." ); diff --git a/illumos-utils/src/opte/params.rs b/illumos-utils/src/opte/params.rs index df1f33cb92..17c61d680f 100644 --- a/illumos-utils/src/opte/params.rs +++ b/illumos-utils/src/opte/params.rs @@ -31,26 +31,16 @@ pub struct VpcFirewallRule { } /// A mapping from a virtual NIC to a physical host -#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq)] -pub struct SetVirtualNetworkInterfaceHost { +#[derive( + Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq, Eq, Hash, +)] +pub struct VirtualNetworkInterfaceHost { pub virtual_ip: IpAddr, pub virtual_mac: external::MacAddr, pub physical_host_ip: Ipv6Addr, pub vni: external::Vni, } -/// The data needed to identify a virtual IP for which a sled maintains an OPTE -/// virtual-to-physical mapping such that that mapping can be deleted. -#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq)] -pub struct DeleteVirtualNetworkInterfaceHost { - /// The virtual IP whose mapping should be deleted. - pub virtual_ip: IpAddr, - - /// The VNI for the network containing the virtual IP whose mapping should - /// be deleted. - pub vni: external::Vni, -} - /// DHCP configuration for a port /// /// Not present here: Hostname (DHCPv4 option 12; used in DHCPv6 option 39); we diff --git a/illumos-utils/src/opte/port_manager.rs b/illumos-utils/src/opte/port_manager.rs index 2b2f622070..726aa01a2a 100644 --- a/illumos-utils/src/opte/port_manager.rs +++ b/illumos-utils/src/opte/port_manager.rs @@ -5,8 +5,7 @@ //! Manager for all OPTE ports on a Helios system use crate::opte::opte_firewall_rules; -use crate::opte::params::DeleteVirtualNetworkInterfaceHost; -use crate::opte::params::SetVirtualNetworkInterfaceHost; +use crate::opte::params::VirtualNetworkInterfaceHost; use crate::opte::params::VpcFirewallRule; use crate::opte::Error; use crate::opte::Gateway; @@ -141,7 +140,7 @@ impl PortManager { ); return Err(Error::InvalidPortIpConfig); }; - let ports = snat.first_port..=snat.last_port; + let ports = snat.port_range(); Some($snat_t { external_ip: snat_ip.into(), ports }) } None => None, @@ -428,7 +427,7 @@ impl PortManager { ); return Err(Error::InvalidPortIpConfig); }; - let ports = snat.first_port..=snat.last_port; + let ports = snat.port_range(); Some($snat_t { external_ip: snat_ip.into(), ports }) } None => None, @@ -570,10 +569,62 @@ impl PortManager { Ok(()) } + #[cfg(target_os = "illumos")] + pub fn list_virtual_nics( + &self, + ) -> Result, Error> { + use macaddr::MacAddr6; + use opte_ioctl::OpteHdl; + + let hdl = OpteHdl::open(OpteHdl::XDE_CTL)?; + let v2p = + hdl.dump_v2p(&oxide_vpc::api::DumpVirt2PhysReq { unused: 99 })?; + let mut mappings: Vec<_> = vec![]; + + for mapping in v2p.mappings { + let vni = mapping + .vni + .as_u32() + .try_into() + .expect("opte VNI should be 24 bits"); + + for entry in mapping.ip4 { + mappings.push(VirtualNetworkInterfaceHost { + virtual_ip: IpAddr::V4(entry.0.into()), + virtual_mac: MacAddr6::from(entry.1.ether.bytes()).into(), + physical_host_ip: entry.1.ip.into(), + vni, + }); + } + + for entry in mapping.ip6 { + mappings.push(VirtualNetworkInterfaceHost { + virtual_ip: IpAddr::V6(entry.0.into()), + virtual_mac: MacAddr6::from(entry.1.ether.bytes()).into(), + physical_host_ip: entry.1.ip.into(), + vni, + }); + } + } + + Ok(mappings) + } + + #[cfg(not(target_os = "illumos"))] + pub fn list_virtual_nics( + &self, + ) -> Result, Error> { + info!( + self.inner.log, + "Listing virtual nics (ignored)"; + ); + Ok(vec![]) + } + #[cfg(target_os = "illumos")] pub fn set_virtual_nic_host( &self, - mapping: &SetVirtualNetworkInterfaceHost, + mapping: &VirtualNetworkInterfaceHost, ) -> Result<(), Error> { use opte_ioctl::OpteHdl; @@ -600,7 +651,7 @@ impl PortManager { #[cfg(not(target_os = "illumos"))] pub fn set_virtual_nic_host( &self, - mapping: &SetVirtualNetworkInterfaceHost, + mapping: &VirtualNetworkInterfaceHost, ) -> Result<(), Error> { info!( self.inner.log, @@ -613,20 +664,41 @@ impl PortManager { #[cfg(target_os = "illumos")] pub fn unset_virtual_nic_host( &self, - _mapping: &DeleteVirtualNetworkInterfaceHost, + mapping: &VirtualNetworkInterfaceHost, ) -> Result<(), Error> { - // TODO requires https://github.com/oxidecomputer/opte/issues/332 + use opte_ioctl::OpteHdl; + + info!( + self.inner.log, + "Clearing mapping of virtual NIC to physical host"; + "mapping" => ?&mapping, + ); + + let hdl = OpteHdl::open(OpteHdl::XDE_CTL)?; + hdl.clear_v2p(&oxide_vpc::api::ClearVirt2PhysReq { + vip: mapping.virtual_ip.into(), + phys: oxide_vpc::api::PhysNet { + ether: oxide_vpc::api::MacAddr::from( + (*mapping.virtual_mac).into_array(), + ), + ip: mapping.physical_host_ip.into(), + vni: Vni::new(mapping.vni).unwrap(), + }, + })?; - slog::warn!(self.inner.log, "unset_virtual_nic_host unimplmented"); Ok(()) } #[cfg(not(target_os = "illumos"))] pub fn unset_virtual_nic_host( &self, - _mapping: &DeleteVirtualNetworkInterfaceHost, + mapping: &VirtualNetworkInterfaceHost, ) -> Result<(), Error> { - info!(self.inner.log, "Ignoring unset of virtual NIC mapping"); + info!( + self.inner.log, + "Ignoring unset of virtual NIC mapping"; + "mapping" => ?&mapping, + ); Ok(()) } } diff --git a/illumos-utils/src/svcadm.rs b/illumos-utils/src/svcadm.rs new file mode 100644 index 0000000000..0d472187df --- /dev/null +++ b/illumos-utils/src/svcadm.rs @@ -0,0 +1,21 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Utilities for manipulating SMF services. + +use crate::zone::SVCADM; +use crate::{execute, ExecutionError, PFEXEC}; + +/// Wraps commands for interacting with svcadm. +pub struct Svcadm {} + +#[cfg_attr(any(test, feature = "testing"), mockall::automock)] +impl Svcadm { + pub fn refresh_logadm_upgrade() -> Result<(), ExecutionError> { + let mut cmd = std::process::Command::new(PFEXEC); + let cmd = cmd.args(&[SVCADM, "refresh", "logadm-upgrade"]); + execute(cmd)?; + Ok(()) + } +} diff --git a/illumos-utils/src/zfs.rs b/illumos-utils/src/zfs.rs index c111955761..139e6fe607 100644 --- a/illumos-utils/src/zfs.rs +++ b/illumos-utils/src/zfs.rs @@ -5,7 +5,7 @@ //! Utilities for poking at ZFS. use crate::{execute, PFEXEC}; -use camino::Utf8PathBuf; +use camino::{Utf8Path, Utf8PathBuf}; use omicron_common::disk::DiskIdentity; use std::fmt; @@ -28,8 +28,6 @@ pub const ZFS: &str = "/usr/sbin/zfs"; /// the keys and recreate the files on demand when creating and mounting /// encrypted filesystems. We then zero them and unlink them. pub const KEYPATH_ROOT: &str = "/var/run/oxide/"; -// Use /tmp so we don't have to worry about running tests with pfexec -pub const TEST_KEYPATH_ROOT: &str = "/tmp"; /// Error returned by [`Zfs::list_datasets`]. #[derive(thiserror::Error, Debug)] @@ -168,27 +166,34 @@ impl fmt::Display for Keypath { } } -#[cfg(not(feature = "tmp_keypath"))] -impl From<&DiskIdentity> for Keypath { - fn from(id: &DiskIdentity) -> Self { - build_keypath(id, KEYPATH_ROOT) - } -} - -#[cfg(feature = "tmp_keypath")] -impl From<&DiskIdentity> for Keypath { - fn from(id: &DiskIdentity) -> Self { - build_keypath(id, TEST_KEYPATH_ROOT) +impl Keypath { + /// Constructs a Keypath for the specified disk within the supplied root + /// directory. + /// + /// By supplying "root", tests can override the location where these paths + /// are stored to non-global locations. + pub fn new>(id: &DiskIdentity, root: &P) -> Keypath { + let keypath_root = Utf8PathBuf::from(KEYPATH_ROOT); + let mut keypath = keypath_root.as_path(); + let keypath_directory = loop { + match keypath.strip_prefix("/") { + Ok(stripped) => keypath = stripped, + Err(_) => break root.as_ref().join(keypath), + } + }; + std::fs::create_dir_all(&keypath_directory) + .expect("Cannot ensure directory for keys"); + + let filename = format!( + "{}-{}-{}-zfs-aes-256-gcm.key", + id.vendor, id.serial, id.model + ); + let path: Utf8PathBuf = + [keypath_directory.as_str(), &filename].iter().collect(); + Keypath(path) } } -fn build_keypath(id: &DiskIdentity, root: &str) -> Keypath { - let filename = - format!("{}-{}-{}-zfs-aes-256-gcm.key", id.vendor, id.serial, id.model); - let path: Utf8PathBuf = [root, &filename].iter().collect(); - Keypath(path) -} - #[derive(Debug)] pub struct EncryptionDetails { pub keypath: Keypath, @@ -332,6 +337,20 @@ impl Zfs { err: err.into(), })?; + // We ensure that the currently running process has the ability to + // act on the underlying mountpoint. + if !zoned { + let mut command = std::process::Command::new(PFEXEC); + let user = whoami::username(); + let mount = format!("{mountpoint}"); + let cmd = command.args(["chown", "-R", &user, &mount]); + execute(cmd).map_err(|err| EnsureFilesystemError { + name: name.to_string(), + mountpoint: mountpoint.clone(), + err: err.into(), + })?; + } + if let Some(SizeDetails { quota, compression }) = size_details { // Apply any quota and compression mode. Self::apply_properties(name, &mountpoint, quota, compression)?; @@ -603,7 +622,8 @@ pub fn get_all_omicron_datasets_for_delete() -> anyhow::Result> { // This includes cockroachdb, clickhouse, and crucible datasets. let zpools = crate::zpool::Zpool::list()?; for pool in &zpools { - let internal = pool.kind() == crate::zpool::ZpoolKind::Internal; + let internal = + pool.kind() == omicron_common::zpool_name::ZpoolKind::Internal; let pool = pool.to_string(); for dataset in &Zfs::list_datasets(&pool)? { // Avoid erasing crashdump, backing data and swap datasets on diff --git a/illumos-utils/src/zpool.rs b/illumos-utils/src/zpool.rs index f2c395e22b..fa93760f99 100644 --- a/illumos-utils/src/zpool.rs +++ b/illumos-utils/src/zpool.rs @@ -5,17 +5,15 @@ //! Utilities for managing Zpools. use crate::{execute, ExecutionError, PFEXEC}; -use camino::{Utf8Path, Utf8PathBuf}; -use schemars::JsonSchema; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use std::fmt; +use camino::Utf8Path; use std::str::FromStr; -use uuid::Uuid; -const ZPOOL_EXTERNAL_PREFIX: &str = "oxp_"; -const ZPOOL_INTERNAL_PREFIX: &str = "oxi_"; +pub use omicron_common::zpool_name::ZpoolName; + const ZPOOL: &str = "/usr/sbin/zpool"; +pub const ZPOOL_MOUNTPOINT_ROOT: &str = "/"; + #[derive(thiserror::Error, Debug, PartialEq, Eq)] #[error("Failed to parse output: {0}")] pub struct ParseError(String); @@ -192,7 +190,7 @@ impl Zpool { let mut cmd = std::process::Command::new(PFEXEC); cmd.env_clear(); cmd.env("LC_ALL", "C.UTF-8"); - cmd.arg(ZPOOL).arg("create"); + cmd.arg(ZPOOL).args(["create", "-o", "ashift=12"]); cmd.arg(&name.to_string()); cmd.arg(vdev); execute(&mut cmd).map_err(Error::from)?; @@ -302,262 +300,10 @@ impl Zpool { } } -#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, JsonSchema)] -#[serde(rename_all = "snake_case")] -pub enum ZpoolKind { - // This zpool is used for external storage (u.2) - External, - // This zpool is used for internal storage (m.2) - Internal, -} - -/// A wrapper around a zpool name. -/// -/// This expects that the format will be: `ox{i,p}_` - we parse the prefix -/// when reading the structure, and validate that the UUID can be utilized. -#[derive(Clone, Debug, Hash, PartialEq, Eq)] -pub struct ZpoolName { - id: Uuid, - kind: ZpoolKind, -} - -const ZPOOL_NAME_REGEX: &str = r"^ox[ip]_[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"; - -/// Custom JsonSchema implementation to encode the constraints on Name. -impl JsonSchema for ZpoolName { - fn schema_name() -> String { - "ZpoolName".to_string() - } - fn json_schema( - _: &mut schemars::gen::SchemaGenerator, - ) -> schemars::schema::Schema { - schemars::schema::SchemaObject { - metadata: Some(Box::new(schemars::schema::Metadata { - title: Some( - "The name of a Zpool".to_string(), - ), - description: Some( - "Zpool names are of the format ox{i,p}_. They are either \ - Internal or External, and should be unique" - .to_string(), - ), - ..Default::default() - })), - instance_type: Some(schemars::schema::InstanceType::String.into()), - string: Some(Box::new(schemars::schema::StringValidation { - pattern: Some(ZPOOL_NAME_REGEX.to_owned()), - ..Default::default() - })), - ..Default::default() - } - .into() - } -} - -impl ZpoolName { - pub fn new_internal(id: Uuid) -> Self { - Self { id, kind: ZpoolKind::Internal } - } - - pub fn new_external(id: Uuid) -> Self { - Self { id, kind: ZpoolKind::External } - } - - pub fn id(&self) -> Uuid { - self.id - } - - pub fn kind(&self) -> ZpoolKind { - self.kind - } - - /// Returns a path to a dataset's mountpoint within the zpool. - /// - /// For example: oxp_(UUID) -> /pool/ext/(UUID)/(dataset) - pub fn dataset_mountpoint(&self, dataset: &str) -> Utf8PathBuf { - let mut path = Utf8PathBuf::new(); - path.push("/pool"); - match self.kind { - ZpoolKind::External => path.push("ext"), - ZpoolKind::Internal => path.push("int"), - }; - path.push(self.id().to_string()); - path.push(dataset); - path - } -} - -impl<'de> Deserialize<'de> for ZpoolName { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let s = String::deserialize(deserializer)?; - ZpoolName::from_str(&s).map_err(serde::de::Error::custom) - } -} - -impl Serialize for ZpoolName { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&self.to_string()) - } -} - -impl FromStr for ZpoolName { - type Err = String; - - fn from_str(s: &str) -> Result { - if let Some(s) = s.strip_prefix(ZPOOL_EXTERNAL_PREFIX) { - let id = Uuid::from_str(s).map_err(|e| e.to_string())?; - Ok(ZpoolName::new_external(id)) - } else if let Some(s) = s.strip_prefix(ZPOOL_INTERNAL_PREFIX) { - let id = Uuid::from_str(s).map_err(|e| e.to_string())?; - Ok(ZpoolName::new_internal(id)) - } else { - Err(format!( - "Bad zpool name {s}; must start with '{ZPOOL_EXTERNAL_PREFIX}' or '{ZPOOL_INTERNAL_PREFIX}'", - )) - } - } -} - -impl fmt::Display for ZpoolName { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let prefix = match self.kind { - ZpoolKind::External => ZPOOL_EXTERNAL_PREFIX, - ZpoolKind::Internal => ZPOOL_INTERNAL_PREFIX, - }; - write!(f, "{prefix}{}", self.id) - } -} - #[cfg(test)] mod test { use super::*; - #[test] - fn test_zpool_name_regex() { - let valid = [ - "oxi_d462a7f7-b628-40fe-80ff-4e4189e2d62b", - "oxp_d462a7f7-b628-40fe-80ff-4e4189e2d62b", - ]; - - let invalid = [ - "", - // Whitespace - " oxp_d462a7f7-b628-40fe-80ff-4e4189e2d62b", - "oxp_d462a7f7-b628-40fe-80ff-4e4189e2d62b ", - // Case sensitivity - "oxp_D462A7F7-b628-40fe-80ff-4e4189e2d62b", - // Bad prefix - "ox_d462a7f7-b628-40fe-80ff-4e4189e2d62b", - "oxa_d462a7f7-b628-40fe-80ff-4e4189e2d62b", - "oxi-d462a7f7-b628-40fe-80ff-4e4189e2d62b", - "oxp-d462a7f7-b628-40fe-80ff-4e4189e2d62b", - // Missing Prefix - "d462a7f7-b628-40fe-80ff-4e4189e2d62b", - // Bad UUIDs (Not following UUIDv4 format) - "oxi_d462a7f7-b628-30fe-80ff-4e4189e2d62b", - "oxi_d462a7f7-b628-40fe-c0ff-4e4189e2d62b", - ]; - - let r = regress::Regex::new(ZPOOL_NAME_REGEX) - .expect("validation regex is valid"); - for input in valid { - let m = r - .find(input) - .unwrap_or_else(|| panic!("input {input} did not match regex")); - assert_eq!(m.start(), 0, "input {input} did not match start"); - assert_eq!(m.end(), input.len(), "input {input} did not match end"); - } - - for input in invalid { - assert!( - r.find(input).is_none(), - "invalid input {input} should not match validation regex" - ); - } - } - - #[test] - fn test_parse_zpool_name_json() { - #[derive(Serialize, Deserialize, JsonSchema)] - struct TestDataset { - pool_name: ZpoolName, - } - - // Confirm that we can convert from a JSON string to a a ZpoolName - let json_string = - r#"{"pool_name":"oxi_d462a7f7-b628-40fe-80ff-4e4189e2d62b"}"#; - let dataset: TestDataset = serde_json::from_str(json_string) - .expect("Could not parse ZpoolName from Json Object"); - assert!(matches!(dataset.pool_name.kind, ZpoolKind::Internal)); - - // Confirm we can go the other way (ZpoolName to JSON string) too. - let j = serde_json::to_string(&dataset) - .expect("Cannot convert back to JSON string"); - assert_eq!(j, json_string); - } - - fn toml_string(s: &str) -> String { - format!("zpool_name = \"{}\"", s) - } - - fn parse_name(s: &str) -> Result { - toml_string(s) - .parse::() - .expect("Cannot parse as TOML value") - .get("zpool_name") - .expect("Missing key") - .clone() - .try_into::() - } - - #[test] - fn test_parse_external_zpool_name() { - let uuid: Uuid = - "d462a7f7-b628-40fe-80ff-4e4189e2d62b".parse().unwrap(); - let good_name = format!("{}{}", ZPOOL_EXTERNAL_PREFIX, uuid); - - let name = parse_name(&good_name).expect("Cannot parse as ZpoolName"); - assert_eq!(uuid, name.id()); - assert_eq!(ZpoolKind::External, name.kind()); - } - - #[test] - fn test_parse_internal_zpool_name() { - let uuid: Uuid = - "d462a7f7-b628-40fe-80ff-4e4189e2d62b".parse().unwrap(); - let good_name = format!("{}{}", ZPOOL_INTERNAL_PREFIX, uuid); - - let name = parse_name(&good_name).expect("Cannot parse as ZpoolName"); - assert_eq!(uuid, name.id()); - assert_eq!(ZpoolKind::Internal, name.kind()); - } - - #[test] - fn test_parse_bad_zpool_names() { - let bad_names = vec![ - // Nonsense string - "this string is GARBAGE", - // Missing prefix - "d462a7f7-b628-40fe-80ff-4e4189e2d62b", - // Underscores - "oxp_d462a7f7_b628_40fe_80ff_4e4189e2d62b", - ]; - - for bad_name in &bad_names { - assert!( - parse_name(&bad_name).is_err(), - "Parsing {} should fail", - bad_name - ); - } - } - #[test] fn test_parse_zpool() { let name = "rpool"; diff --git a/installinator-artifactd/Cargo.toml b/installinator-artifactd/Cargo.toml index e9ddc222cd..236ea7a51c 100644 --- a/installinator-artifactd/Cargo.toml +++ b/installinator-artifactd/Cargo.toml @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] anyhow.workspace = true async-trait.workspace = true diff --git a/installinator-common/Cargo.toml b/installinator-common/Cargo.toml index dd8540c6f8..4c5560148f 100644 --- a/installinator-common/Cargo.toml +++ b/installinator-common/Cargo.toml @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] anyhow.workspace = true camino.workspace = true diff --git a/installinator/Cargo.toml b/installinator/Cargo.toml index abfcc5e892..ebdb6269b7 100644 --- a/installinator/Cargo.toml +++ b/installinator/Cargo.toml @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] anyhow.workspace = true async-trait.workspace = true diff --git a/installinator/src/hardware.rs b/installinator/src/hardware.rs index b037384cbe..a48d816dc8 100644 --- a/installinator/src/hardware.rs +++ b/installinator/src/hardware.rs @@ -9,6 +9,7 @@ use anyhow::Result; use sled_hardware::DiskVariant; use sled_hardware::HardwareManager; use sled_hardware::SledMode; +use sled_storage::config::MountConfig; use sled_storage::disk::Disk; use sled_storage::disk::RawDisk; use slog::info; @@ -24,8 +25,8 @@ impl Hardware { .context("failed to detect whether host is a gimlet")?; ensure!(is_gimlet, "hardware scan only supported on gimlets"); - let hardware = - HardwareManager::new(log, SledMode::Auto).map_err(|err| { + let hardware = HardwareManager::new(log, SledMode::Auto, vec![]) + .map_err(|err| { anyhow!("failed to create HardwareManager: {err}") })?; @@ -49,9 +50,15 @@ impl Hardware { ); } DiskVariant::M2 => { - let disk = Disk::new(log, disk, None) - .await - .context("failed to instantiate Disk handle for M.2")?; + let disk = Disk::new( + log, + &MountConfig::default(), + disk, + None, + None, + ) + .await + .context("failed to instantiate Disk handle for M.2")?; m2_disks.push(disk); } } diff --git a/installinator/src/write.rs b/installinator/src/write.rs index 380595b4cd..c7710baff7 100644 --- a/installinator/src/write.rs +++ b/installinator/src/write.rs @@ -116,6 +116,7 @@ impl WriteDestination { let zpool_name = disk.zpool_name().clone(); let control_plane_dir = zpool_name.dataset_mountpoint( + illumos_utils::zpool::ZPOOL_MOUNTPOINT_ROOT.into(), sled_storage::dataset::INSTALL_DATASET, ); diff --git a/internal-dns-cli/Cargo.toml b/internal-dns-cli/Cargo.toml index dab92c6d7c..dae0af0280 100644 --- a/internal-dns-cli/Cargo.toml +++ b/internal-dns-cli/Cargo.toml @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] anyhow.workspace = true clap.workspace = true diff --git a/internal-dns/Cargo.toml b/internal-dns/Cargo.toml index 96993ce6a2..c08cc012c1 100644 --- a/internal-dns/Cargo.toml +++ b/internal-dns/Cargo.toml @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] anyhow.workspace = true chrono.workspace = true @@ -11,6 +14,7 @@ dns-service-client.workspace = true futures.workspace = true hyper.workspace = true omicron-common.workspace = true +omicron-uuid-kinds.workspace = true reqwest = { workspace = true, features = ["rustls-tls", "stream"] } slog.workspace = true thiserror.workspace = true diff --git a/internal-dns/src/config.rs b/internal-dns/src/config.rs index 5eee34bd51..43d6c96d2d 100644 --- a/internal-dns/src/config.rs +++ b/internal-dns/src/config.rs @@ -62,38 +62,26 @@ use crate::names::{ServiceName, DNS_ZONE}; use anyhow::{anyhow, ensure}; +use core::fmt; use dns_service_client::types::{DnsConfigParams, DnsConfigZone, DnsRecord}; use omicron_common::api::external::Generation; +use omicron_uuid_kinds::{OmicronZoneUuid, SledUuid}; use std::collections::BTreeMap; use std::net::Ipv6Addr; -use uuid::Uuid; - -/// Zones that can be referenced within the internal DNS system. -#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)] -pub enum ZoneVariant { - /// This non-global zone runs an instance of Dendrite. - /// - /// This implies that the Sled is a scrimlet. - // When this variant is used, the UUID in the record should match the sled - // itself. - Dendrite, - /// All other non-global zones. - Other, -} /// Used to construct the DNS name for a control plane host #[derive(Clone, Debug, PartialEq, PartialOrd)] pub enum Host { /// Used to construct an AAAA record for a sled. - Sled(Uuid), + Sled(SledUuid), /// Used to construct an AAAA record for a zone on a sled. - Zone { id: Uuid, variant: ZoneVariant }, + Zone(Zone), } impl Host { - pub fn for_zone(id: Uuid, variant: ZoneVariant) -> Host { - Host::Zone { id, variant } + pub fn for_zone(zone: Zone) -> Host { + Host::Zone(zone) } /// Returns the DNS name for this host, ignoring the zone part of the DNS @@ -101,10 +89,10 @@ impl Host { pub(crate) fn dns_name(&self) -> String { match &self { Host::Sled(id) => format!("{}.sled", id), - Host::Zone { id, variant: ZoneVariant::Dendrite } => { + Host::Zone(Zone::Dendrite(id)) => { format!("dendrite-{}.host", id) } - Host::Zone { id, variant: ZoneVariant::Other } => { + Host::Zone(Zone::Other(id)) => { format!("{}.host", id) } } @@ -161,26 +149,29 @@ pub struct DnsConfigBuilder { /// similar to service_instances_zones, but for services that run on sleds service_instances_sleds: BTreeMap>, - - /// generation number for this config - generation: Generation, } /// Describes a host of type "sled" in the control plane DNS zone #[derive(Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)] -pub struct Sled(Uuid); +pub struct Sled(SledUuid); /// Describes a host of type "zone" (an illumos zone) in the control plane DNS /// zone #[derive(Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)] -pub struct Zone { - id: Uuid, - variant: ZoneVariant, +pub enum Zone { + /// This non-global zone runs an instance of Dendrite. + /// + /// This implies that the Sled is a scrimlet. + // When this variant is used, the UUID in the record should match the sled + // itself. + Dendrite(SledUuid), + /// All other non-global zones. + Other(OmicronZoneUuid), } impl Zone { pub(crate) fn to_host(&self) -> Host { - Host::Zone { id: self.id, variant: self.variant } + Host::Zone(self.clone()) } pub(crate) fn dns_name(&self) -> String { @@ -188,6 +179,17 @@ impl Zone { } } +impl fmt::Display for Zone { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Zone::Dendrite(sled_uuid) => { + write!(f, "{} (dendrite)", sled_uuid) + } + Zone::Other(zone_uuid) => write!(f, "{} (other)", zone_uuid), + } + } +} + impl DnsConfigBuilder { pub fn new() -> Self { DnsConfigBuilder { @@ -195,7 +197,6 @@ impl DnsConfigBuilder { zones: BTreeMap::new(), service_instances_zones: BTreeMap::new(), service_instances_sleds: BTreeMap::new(), - generation: Generation::new(), } } @@ -211,7 +212,7 @@ impl DnsConfigBuilder { /// configuration. pub fn host_sled( &mut self, - sled_id: Uuid, + sled_id: SledUuid, addr: Ipv6Addr, ) -> anyhow::Result { match self.sleds.insert(Sled(sled_id), addr) { @@ -237,10 +238,10 @@ impl DnsConfigBuilder { /// configuration. pub fn host_dendrite( &mut self, - sled_id: Uuid, + sled_id: SledUuid, addr: Ipv6Addr, ) -> anyhow::Result { - self.host_zone_internal(sled_id, ZoneVariant::Dendrite, addr) + self.host_zone_internal(Zone::Dendrite(sled_id), addr) } /// Add a new host of type "zone" to the configuration @@ -255,24 +256,22 @@ impl DnsConfigBuilder { /// configuration. pub fn host_zone( &mut self, - zone_id: Uuid, + zone_id: OmicronZoneUuid, addr: Ipv6Addr, ) -> anyhow::Result { - self.host_zone_internal(zone_id, ZoneVariant::Other, addr) + self.host_zone_internal(Zone::Other(zone_id), addr) } fn host_zone_internal( &mut self, - id: Uuid, - variant: ZoneVariant, + zone: Zone, addr: Ipv6Addr, ) -> anyhow::Result { - let zone = Zone { id, variant }; match self.zones.insert(zone.clone(), addr) { None => Ok(zone), Some(existing) => Err(anyhow!( "multiple definitions for zone {} (previously {}, now {})", - id, + zone, existing, addr )), @@ -297,8 +296,7 @@ impl DnsConfigBuilder { // DnsBuilder. ensure!( self.zones.contains_key(&zone), - "zone {} has not been defined", - zone.id + "zone {zone} has not been defined", ); let set = self @@ -311,7 +309,7 @@ impl DnsConfigBuilder { "service {}: zone {}: registered twice \ (previously port {}, now {})", service.dns_name(), - zone.id, + zone, existing, port )), @@ -336,7 +334,7 @@ impl DnsConfigBuilder { // DnsBuilder. ensure!( self.sleds.contains_key(&sled), - "sled {:?} has not been defined", + "sled {} has not been defined", sled.0 ); @@ -366,7 +364,7 @@ impl DnsConfigBuilder { /// configuration. pub fn host_zone_with_one_backend( &mut self, - zone_id: Uuid, + zone_id: OmicronZoneUuid, addr: Ipv6Addr, service: ServiceName, port: u16, @@ -384,7 +382,7 @@ impl DnsConfigBuilder { /// configuration. pub fn host_zone_switch( &mut self, - sled_id: Uuid, + sled_id: SledUuid, switch_zone_ip: Ipv6Addr, dendrite_port: u16, mgs_port: u16, @@ -400,14 +398,9 @@ impl DnsConfigBuilder { self.service_backend_zone(ServiceName::Mgd, &zone, mgd_port) } - pub fn generation(&mut self, generation: Generation) { - self.generation = generation; - } - - /// Construct a complete [`DnsConfigParams`] (suitable for propagating to - /// our DNS servers) for the control plane DNS zone described up to this - /// point - pub fn build(self) -> DnsConfigParams { + /// Construct a `DnsConfigZone` describing the control plane zone described + /// up to this point + pub fn build_zone(self) -> DnsConfigZone { // Assemble the set of "AAAA" records for sleds. let sled_records = self.sleds.into_iter().map(|(sled, sled_ip)| { let name = Host::Sled(sled.0).dns_name(); @@ -465,23 +458,28 @@ impl DnsConfigBuilder { .chain(srv_records_zones) .collect(); + DnsConfigZone { zone_name: DNS_ZONE.to_owned(), records: all_records } + } + + /// Construct a complete [`DnsConfigParams`] (suitable for propagating to + /// our DNS servers) for the control plane DNS zone described up to this + /// point + pub fn build_full_config_for_initial_generation(self) -> DnsConfigParams { + let zone = self.build_zone(); DnsConfigParams { - generation: u64::from(self.generation), + generation: u64::from(Generation::new()), time_created: chrono::Utc::now(), - zones: vec![DnsConfigZone { - zone_name: DNS_ZONE.to_owned(), - records: all_records, - }], + zones: vec![zone], } } } #[cfg(test)] mod test { - use super::{DnsConfigBuilder, Host, ServiceName, ZoneVariant}; - use crate::DNS_ZONE; + use super::{DnsConfigBuilder, Host, ServiceName}; + use crate::{config::Zone, DNS_ZONE}; + use omicron_uuid_kinds::{OmicronZoneUuid, SledUuid}; use std::{collections::BTreeMap, io::Write, net::Ipv6Addr}; - use uuid::Uuid; #[test] fn display_srv_service() { @@ -499,30 +497,33 @@ mod test { ServiceName::CruciblePantry.dns_name(), "_crucible-pantry._tcp", ); - let uuid = Uuid::nil(); + + let sled_uuid = SledUuid::nil(); + let zone_uuid = OmicronZoneUuid::nil(); assert_eq!( - ServiceName::Crucible(uuid).dns_name(), - "_crucible._tcp.00000000-0000-0000-0000-000000000000", + ServiceName::SledAgent(sled_uuid).dns_name(), + "_sledagent._tcp.00000000-0000-0000-0000-000000000000", ); assert_eq!( - ServiceName::SledAgent(uuid).dns_name(), - "_sledagent._tcp.00000000-0000-0000-0000-000000000000", + ServiceName::Crucible(zone_uuid).dns_name(), + "_crucible._tcp.00000000-0000-0000-0000-000000000000", ); } #[test] fn display_hosts() { - let uuid = Uuid::nil(); + let sled_uuid = SledUuid::nil(); + let zone_uuid = OmicronZoneUuid::nil(); assert_eq!( - Host::Sled(uuid).dns_name(), + Host::Sled(sled_uuid).dns_name(), "00000000-0000-0000-0000-000000000000.sled", ); assert_eq!( - Host::Zone { id: uuid, variant: ZoneVariant::Other }.dns_name(), + Host::Zone(Zone::Other(zone_uuid)).dns_name(), "00000000-0000-0000-0000-000000000000.host", ); assert_eq!( - Host::Zone { id: uuid, variant: ZoneVariant::Dendrite }.dns_name(), + Host::Zone(Zone::Dendrite(sled_uuid)).dns_name(), "dendrite-00000000-0000-0000-0000-000000000000.host", ); } @@ -546,12 +547,12 @@ mod test { fn test_builder_output() { let mut output = std::io::Cursor::new(Vec::new()); - let sled1_uuid: Uuid = SLED1_UUID.parse().unwrap(); - let sled2_uuid: Uuid = SLED2_UUID.parse().unwrap(); - let zone1_uuid: Uuid = ZONE1_UUID.parse().unwrap(); - let zone2_uuid: Uuid = ZONE2_UUID.parse().unwrap(); - let zone3_uuid: Uuid = ZONE3_UUID.parse().unwrap(); - let zone4_uuid: Uuid = ZONE4_UUID.parse().unwrap(); + let sled1_uuid: SledUuid = SLED1_UUID.parse().unwrap(); + let sled2_uuid: SledUuid = SLED2_UUID.parse().unwrap(); + let zone1_uuid: OmicronZoneUuid = ZONE1_UUID.parse().unwrap(); + let zone2_uuid: OmicronZoneUuid = ZONE2_UUID.parse().unwrap(); + let zone3_uuid: OmicronZoneUuid = ZONE3_UUID.parse().unwrap(); + let zone4_uuid: OmicronZoneUuid = ZONE4_UUID.parse().unwrap(); let builder_empty = DnsConfigBuilder::new(); @@ -609,7 +610,7 @@ mod test { ("zones_only", builder_zones_only), ("non_trivial", builder_non_trivial), ] { - let config = builder.build(); + let config = builder.build_full_config_for_initial_generation(); assert_eq!(config.generation, 1); assert_eq!(config.zones.len(), 1); assert_eq!(config.zones[0].zone_name, DNS_ZONE); @@ -629,8 +630,8 @@ mod test { #[test] fn test_builder_errors() { - let sled1_uuid: Uuid = SLED1_UUID.parse().unwrap(); - let zone1_uuid: Uuid = ZONE1_UUID.parse().unwrap(); + let sled1_uuid: SledUuid = SLED1_UUID.parse().unwrap(); + let zone1_uuid: OmicronZoneUuid = ZONE1_UUID.parse().unwrap(); // Duplicate sled, with both the same IP and a different one let mut builder = DnsConfigBuilder::new(); @@ -656,15 +657,15 @@ mod test { assert_eq!( error.to_string(), "multiple definitions for zone \ - 001de000-c04e-4000-8000-000000000001 (previously ::1:1, \ - now ::1:1)" + 001de000-c04e-4000-8000-000000000001 (other) \ + (previously ::1:1, now ::1:1)" ); let error = builder.host_zone(zone1_uuid, ZONE2_IP).unwrap_err(); assert_eq!( error.to_string(), "multiple definitions for zone \ - 001de000-c04e-4000-8000-000000000001 (previously ::1:1, \ - now ::1:2)" + 001de000-c04e-4000-8000-000000000001 (other) \ + (previously ::1:1, now ::1:2)" ); // Specify an undefined zone or sled. (This requires a second builder.) @@ -677,7 +678,8 @@ mod test { .unwrap_err(); assert_eq!( error.to_string(), - "zone 001de000-c04e-4000-8000-000000000001 has not been defined" + "zone 001de000-c04e-4000-8000-000000000001 (other) \ + has not been defined" ); let error = builder2 .service_backend_sled(ServiceName::Oximeter, &sled, 123) @@ -700,7 +702,7 @@ mod test { assert_eq!( error.to_string(), "service _oximeter._tcp: zone \ - 001de000-c04e-4000-8000-000000000001: registered twice \ + 001de000-c04e-4000-8000-000000000001 (other): registered twice \ (previously port 123, now 123)" ); let error = builder @@ -709,7 +711,7 @@ mod test { assert_eq!( error.to_string(), "service _oximeter._tcp: zone \ - 001de000-c04e-4000-8000-000000000001: registered twice \ + 001de000-c04e-4000-8000-000000000001 (other): registered twice \ (previously port 123, now 456)" ); } diff --git a/internal-dns/src/names.rs b/internal-dns/src/names.rs index 8cafe4ac97..3017d3b3fc 100644 --- a/internal-dns/src/names.rs +++ b/internal-dns/src/names.rs @@ -4,7 +4,7 @@ //! Well-known DNS names and related types for internal DNS (see RFD 248) -use uuid::Uuid; +use omicron_uuid_kinds::{OmicronZoneUuid, SledUuid}; /// Name for the control plane DNS zone pub const DNS_ZONE: &str = "control-plane.oxide.internal"; @@ -28,8 +28,8 @@ pub enum ServiceName { Dendrite, Tfport, CruciblePantry, - SledAgent(Uuid), - Crucible(Uuid), + SledAgent(SledUuid), + Crucible(OmicronZoneUuid), BoundaryNtp, InternalNtp, Maghemite, //TODO change to Dpd - maghemite has several services. diff --git a/internal-dns/src/resolver.rs b/internal-dns/src/resolver.rs index 114333cb61..cf5def01c5 100644 --- a/internal-dns/src/resolver.rs +++ b/internal-dns/src/resolver.rs @@ -118,7 +118,7 @@ impl Resolver { .get_dns_subnets() .into_iter() .map(|dns_subnet| { - let ip_addr = IpAddr::V6(dns_subnet.dns_address().ip()); + let ip_addr = IpAddr::V6(dns_subnet.dns_address()); SocketAddr::new(ip_addr, DNS_PORT) }) .collect() @@ -382,6 +382,7 @@ mod test { RequestContext, }; use omicron_test_utils::dev::test_setup_log; + use omicron_uuid_kinds::OmicronZoneUuid; use slog::{o, Logger}; use std::collections::HashMap; use std::net::Ipv6Addr; @@ -389,7 +390,6 @@ mod test { use std::net::SocketAddrV6; use std::str::FromStr; use tempfile::TempDir; - use uuid::Uuid; struct DnsServer { // We hang onto the storage_path even though it's never used because @@ -526,11 +526,11 @@ mod test { let mut dns_config = DnsConfigBuilder::new(); let ip = Ipv6Addr::from_str("ff::01").unwrap(); - let zone = dns_config.host_zone(Uuid::new_v4(), ip).unwrap(); + let zone = dns_config.host_zone(OmicronZoneUuid::new_v4(), ip).unwrap(); dns_config .service_backend_zone(ServiceName::Cockroach, &zone, 12345) .unwrap(); - let dns_config = dns_config.build(); + let dns_config = dns_config.build_full_config_for_initial_generation(); dns_server.update(&dns_config).await.unwrap(); let resolver = dns_server.resolver().unwrap(); @@ -584,31 +584,34 @@ mod test { let srv_crdb = ServiceName::Cockroach; let srv_clickhouse = ServiceName::Clickhouse; - let srv_backend = ServiceName::Crucible(Uuid::new_v4()); + let srv_backend = ServiceName::Crucible(OmicronZoneUuid::new_v4()); let mut dns_builder = DnsConfigBuilder::new(); for db_ip in &cockroach_addrs { - let zone = - dns_builder.host_zone(Uuid::new_v4(), *db_ip.ip()).unwrap(); + let zone = dns_builder + .host_zone(OmicronZoneUuid::new_v4(), *db_ip.ip()) + .unwrap(); dns_builder .service_backend_zone(srv_crdb, &zone, db_ip.port()) .unwrap(); } let zone = dns_builder - .host_zone(Uuid::new_v4(), *clickhouse_addr.ip()) + .host_zone(OmicronZoneUuid::new_v4(), *clickhouse_addr.ip()) .unwrap(); dns_builder .service_backend_zone(srv_clickhouse, &zone, clickhouse_addr.port()) .unwrap(); - let zone = - dns_builder.host_zone(Uuid::new_v4(), *crucible_addr.ip()).unwrap(); + let zone = dns_builder + .host_zone(OmicronZoneUuid::new_v4(), *crucible_addr.ip()) + .unwrap(); dns_builder .service_backend_zone(srv_backend, &zone, crucible_addr.port()) .unwrap(); - let mut dns_config = dns_builder.build(); + let mut dns_config = + dns_builder.build_full_config_for_initial_generation(); dns_server.update(&dns_config).await.unwrap(); // Look up Cockroach @@ -684,10 +687,11 @@ mod test { // Insert a record, observe that it exists. let mut dns_builder = DnsConfigBuilder::new(); let ip1 = Ipv6Addr::from_str("ff::01").unwrap(); - let zone = dns_builder.host_zone(Uuid::new_v4(), ip1).unwrap(); + let zone = + dns_builder.host_zone(OmicronZoneUuid::new_v4(), ip1).unwrap(); let srv_crdb = ServiceName::Cockroach; dns_builder.service_backend_zone(srv_crdb, &zone, 12345).unwrap(); - let dns_config = dns_builder.build(); + let dns_config = dns_builder.build_full_config_for_initial_generation(); dns_server.update(&dns_config).await.unwrap(); let found_ip = resolver .lookup_ipv6(ServiceName::Cockroach) @@ -699,10 +703,12 @@ mod test { // updated. let mut dns_builder = DnsConfigBuilder::new(); let ip2 = Ipv6Addr::from_str("ee::02").unwrap(); - let zone = dns_builder.host_zone(Uuid::new_v4(), ip2).unwrap(); + let zone = + dns_builder.host_zone(OmicronZoneUuid::new_v4(), ip2).unwrap(); let srv_crdb = ServiceName::Cockroach; dns_builder.service_backend_zone(srv_crdb, &zone, 54321).unwrap(); - let mut dns_config = dns_builder.build(); + let mut dns_config = + dns_builder.build_full_config_for_initial_generation(); dns_config.generation += 1; dns_server.update(&dns_config).await.unwrap(); let found_ip = resolver @@ -832,11 +838,11 @@ mod test { // Add a record for the new service. let mut dns_config = DnsConfigBuilder::new(); - let zone = dns_config.host_zone(Uuid::new_v4(), ip).unwrap(); + let zone = dns_config.host_zone(OmicronZoneUuid::new_v4(), ip).unwrap(); dns_config .service_backend_zone(ServiceName::Nexus, &zone, port) .unwrap(); - let dns_config = dns_config.build(); + let dns_config = dns_config.build_full_config_for_initial_generation(); dns_server.update(&dns_config).await.unwrap(); // Confirm that we can access this record manually. @@ -914,11 +920,11 @@ mod test { // Since both servers are authoritative, we also shut down the first // server. let mut dns_config = DnsConfigBuilder::new(); - let zone = dns_config.host_zone(Uuid::new_v4(), ip).unwrap(); + let zone = dns_config.host_zone(OmicronZoneUuid::new_v4(), ip).unwrap(); dns_config .service_backend_zone(ServiceName::Nexus, &zone, port) .unwrap(); - let dns_config = dns_config.build(); + let dns_config = dns_config.build_full_config_for_initial_generation(); dns_server1.cleanup_successful(); dns_server2.update(&dns_config).await.unwrap(); @@ -950,7 +956,7 @@ mod test { // Create DNS config with a single service and multiple backends. let mut dns_config = DnsConfigBuilder::new(); - let id1 = Uuid::new_v4(); + let id1 = OmicronZoneUuid::new_v4(); let ip1 = Ipv6Addr::new(0xfd, 0, 0, 0, 0, 0, 0, 0x1); let addr1 = SocketAddrV6::new(ip1, 15001, 0, 0); let zone1 = dns_config.host_zone(id1, ip1).unwrap(); @@ -958,7 +964,7 @@ mod test { .service_backend_zone(ServiceName::Cockroach, &zone1, addr1.port()) .unwrap(); - let id2 = Uuid::new_v4(); + let id2 = OmicronZoneUuid::new_v4(); let ip2 = Ipv6Addr::new(0xfd, 0, 0, 0, 0, 0, 0, 0x2); let addr2 = SocketAddrV6::new(ip2, 15002, 0, 0); let zone2 = dns_config.host_zone(id2, ip2).unwrap(); @@ -967,7 +973,8 @@ mod test { .unwrap(); // Plumb records onto DNS server - let mut dns_config = dns_config.build(); + let mut dns_config = + dns_config.build_full_config_for_initial_generation(); dns_server.update(&dns_config).await.unwrap(); // Using the resolver we should get back both addresses diff --git a/internal-dns/tests/output/test-server.json b/internal-dns/tests/output/test-server.json index 5720dec19f..5ed5d37161 100644 --- a/internal-dns/tests/output/test-server.json +++ b/internal-dns/tests/output/test-server.json @@ -67,4 +67,4 @@ } } } -} \ No newline at end of file +} diff --git a/ipcc/Cargo.toml b/ipcc/Cargo.toml index 98a781ab86..a9278349e1 100644 --- a/ipcc/Cargo.toml +++ b/ipcc/Cargo.toml @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] ciborium.workspace = true libc.workspace = true diff --git a/key-manager/Cargo.toml b/key-manager/Cargo.toml index c44ec61ea4..3e00758c9e 100644 --- a/key-manager/Cargo.toml +++ b/key-manager/Cargo.toml @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] async-trait.workspace = true hkdf.workspace = true diff --git a/key-manager/src/lib.rs b/key-manager/src/lib.rs index 7ca3cfa3bb..13dd9543a8 100644 --- a/key-manager/src/lib.rs +++ b/key-manager/src/lib.rs @@ -102,7 +102,7 @@ enum StorageKeyRequest { /// the sled-agent starts. The `HardwareMonitor` gets the StorageKeyRequester /// from the bootstrap agent. If this changes, we should remove the `Clone` to /// limit who has access to the storage keys. -#[derive(Clone)] +#[derive(Debug, Clone)] pub struct StorageKeyRequester { tx: mpsc::Sender, } diff --git a/nexus-config/Cargo.toml b/nexus-config/Cargo.toml index af7eb70f62..9d23be302f 100644 --- a/nexus-config/Cargo.toml +++ b/nexus-config/Cargo.toml @@ -3,6 +3,9 @@ name = "nexus-config" version = "0.1.0" edition = "2021" +[lints] +workspace = true + [dependencies] anyhow.workspace = true camino.workspace = true diff --git a/nexus-config/src/nexus_config.rs b/nexus-config/src/nexus_config.rs index 8b94d0154b..321064df49 100644 --- a/nexus-config/src/nexus_config.rs +++ b/nexus-config/src/nexus_config.rs @@ -353,12 +353,16 @@ pub struct BackgroundTaskConfig { pub dns_internal: DnsTasksConfig, /// configuration for external DNS background tasks pub dns_external: DnsTasksConfig, + /// configuration for metrics producer garbage collection background task + pub metrics_producer_gc: MetricsProducerGcConfig, /// configuration for external endpoint list watcher pub external_endpoints: ExternalEndpointsConfig, /// configuration for nat table garbage collector pub nat_cleanup: NatCleanupConfig, /// configuration for inventory tasks pub inventory: InventoryConfig, + /// configuration for physical disk adoption tasks + pub physical_disk_adoption: PhysicalDiskAdoptionConfig, /// configuration for phantom disks task pub phantom_disks: PhantomDiskConfig, /// configuration for blueprint related tasks @@ -371,6 +375,14 @@ pub struct BackgroundTaskConfig { pub switch_port_settings_manager: SwitchPortSettingsManagerConfig, /// configuration for region replacement task pub region_replacement: RegionReplacementConfig, + /// configuration for instance watcher task + pub instance_watcher: InstanceWatcherConfig, + /// configuration for service VPC firewall propagation task + pub service_firewall_propagation: ServiceFirewallPropagationConfig, + /// configuration for v2p mapping propagation task + pub v2p_mapping_propagation: V2PMappingPropagationConfig, + /// configuration for abandoned VMM reaper task + pub abandoned_vmm_reaper: AbandonedVmmReaperConfig, } #[serde_as] @@ -395,6 +407,15 @@ pub struct DnsTasksConfig { pub max_concurrent_server_updates: usize, } +#[serde_as] +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub struct MetricsProducerGcConfig { + /// period (in seconds) for periodic activations of the background task that + /// garbage collects metrics producers whose leases have expired + #[serde_as(as = "DurationSeconds")] + pub period_secs: Duration, +} + #[serde_as] #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] pub struct ExternalEndpointsConfig { @@ -405,6 +426,20 @@ pub struct ExternalEndpointsConfig { // allow/disallow wildcard certs, don't serve expired certs, etc.) } +#[serde_as] +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub struct PhysicalDiskAdoptionConfig { + /// period (in seconds) for periodic activations of this background task + #[serde_as(as = "DurationSeconds")] + pub period_secs: Duration, + + /// A toggle to disable automated disk adoption. + /// + /// Default: Off + #[serde(default)] + pub disable: bool, +} + #[serde_as] #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] pub struct NatCleanupConfig { @@ -492,6 +527,38 @@ pub struct RegionReplacementConfig { pub period_secs: Duration, } +#[serde_as] +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub struct InstanceWatcherConfig { + /// period (in seconds) for periodic activations of this background task + #[serde_as(as = "DurationSeconds")] + pub period_secs: Duration, +} + +#[serde_as] +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub struct ServiceFirewallPropagationConfig { + /// period (in seconds) for periodic activations of this background task + #[serde_as(as = "DurationSeconds")] + pub period_secs: Duration, +} + +#[serde_as] +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub struct V2PMappingPropagationConfig { + /// period (in seconds) for periodic activations of this background task + #[serde_as(as = "DurationSeconds")] + pub period_secs: Duration, +} + +#[serde_as] +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub struct AbandonedVmmReaperConfig { + /// period (in seconds) for periodic activations of this background task + #[serde_as(as = "DurationSeconds")] + pub period_secs: Duration, +} + /// Configuration for a nexus server #[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] pub struct PackageConfig { @@ -714,18 +781,24 @@ mod test { dns_external.period_secs_servers = 6 dns_external.period_secs_propagation = 7 dns_external.max_concurrent_server_updates = 8 + metrics_producer_gc.period_secs = 60 external_endpoints.period_secs = 9 nat_cleanup.period_secs = 30 bfd_manager.period_secs = 30 inventory.period_secs = 10 inventory.nkeep = 11 inventory.disable = false + physical_disk_adoption.period_secs = 30 phantom_disks.period_secs = 30 blueprints.period_secs_load = 10 blueprints.period_secs_execute = 60 sync_service_zone_nat.period_secs = 30 switch_port_settings_manager.period_secs = 30 region_replacement.period_secs = 30 + instance_watcher.period_secs = 30 + service_firewall_propagation.period_secs = 300 + v2p_mapping_propagation.period_secs = 30 + abandoned_vmm_reaper.period_secs = 60 [default_region_allocation_strategy] type = "random" seed = 0 @@ -816,6 +889,9 @@ mod test { period_secs_propagation: Duration::from_secs(7), max_concurrent_server_updates: 8, }, + metrics_producer_gc: MetricsProducerGcConfig { + period_secs: Duration::from_secs(60) + }, external_endpoints: ExternalEndpointsConfig { period_secs: Duration::from_secs(9), }, @@ -830,6 +906,10 @@ mod test { nkeep: 11, disable: false, }, + physical_disk_adoption: PhysicalDiskAdoptionConfig { + period_secs: Duration::from_secs(30), + disable: false, + }, phantom_disks: PhantomDiskConfig { period_secs: Duration::from_secs(30), }, @@ -847,6 +927,19 @@ mod test { region_replacement: RegionReplacementConfig { period_secs: Duration::from_secs(30), }, + instance_watcher: InstanceWatcherConfig { + period_secs: Duration::from_secs(30), + }, + service_firewall_propagation: + ServiceFirewallPropagationConfig { + period_secs: Duration::from_secs(300), + }, + v2p_mapping_propagation: V2PMappingPropagationConfig { + period_secs: Duration::from_secs(30) + }, + abandoned_vmm_reaper: AbandonedVmmReaperConfig { + period_secs: Duration::from_secs(60), + } }, default_region_allocation_strategy: crate::nexus_config::RegionAllocationStrategy::Random { @@ -899,18 +992,24 @@ mod test { dns_external.period_secs_servers = 6 dns_external.period_secs_propagation = 7 dns_external.max_concurrent_server_updates = 8 + metrics_producer_gc.period_secs = 60 external_endpoints.period_secs = 9 nat_cleanup.period_secs = 30 bfd_manager.period_secs = 30 inventory.period_secs = 10 inventory.nkeep = 3 inventory.disable = false + physical_disk_adoption.period_secs = 30 phantom_disks.period_secs = 30 blueprints.period_secs_load = 10 blueprints.period_secs_execute = 60 sync_service_zone_nat.period_secs = 30 switch_port_settings_manager.period_secs = 30 region_replacement.period_secs = 30 + instance_watcher.period_secs = 30 + service_firewall_propagation.period_secs = 300 + v2p_mapping_propagation.period_secs = 30 + abandoned_vmm_reaper.period_secs = 60 [default_region_allocation_strategy] type = "random" "##, diff --git a/nexus/Cargo.toml b/nexus/Cargo.toml index 57d929d44d..0b0bd097bc 100644 --- a/nexus/Cargo.toml +++ b/nexus/Cargo.toml @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [build-dependencies] omicron-rpaths.workspace = true @@ -37,9 +40,9 @@ internal-dns.workspace = true ipnetwork.workspace = true itertools.workspace = true macaddr.workspace = true -mime_guess.workspace = true # Not under "dev-dependencies"; these also need to be implemented for # integration tests. +nexus-client.workspace = true nexus-config.workspace = true nexus-networking.workspace = true nexus-test-interface.workspace = true @@ -48,6 +51,7 @@ once_cell.workspace = true openssl.workspace = true oximeter-client.workspace = true oximeter-db.workspace = true +oxnet.workspace = true parse-display.workspace = true paste.workspace = true # See omicron-rpaths for more about the "pq-sys" dependency. @@ -70,12 +74,14 @@ slog.workspace = true slog-async.workspace = true slog-dtrace.workspace = true slog-error-chain.workspace = true +display-error-chain.workspace = true slog-term.workspace = true steno.workspace = true tempfile.workspace = true thiserror.workspace = true tokio = { workspace = true, features = ["full"] } tokio-postgres = { workspace = true, features = ["with-serde_json-1"] } +tokio-util = { workspace = true, features = ["codec"] } tough.workspace = true trust-dns-resolver.workspace = true uuid.workspace = true @@ -84,6 +90,7 @@ nexus-defaults.workspace = true nexus-db-model.workspace = true nexus-db-queries.workspace = true nexus-inventory.workspace = true +nexus-metrics-producer-gc.workspace = true nexus-reconfigurator-execution.workspace = true nexus-reconfigurator-planning.workspace = true nexus-reconfigurator-preparation.workspace = true @@ -117,6 +124,7 @@ omicron-test-utils.workspace = true openapi-lint.workspace = true openapiv3.workspace = true oxide-client.workspace = true +oximeter-collector.workspace = true pem.workspace = true petgraph.workspace = true pretty_assertions.workspace = true diff --git a/nexus/authz-macros/Cargo.toml b/nexus/authz-macros/Cargo.toml index 4d2640abee..beba0a3cf6 100644 --- a/nexus/authz-macros/Cargo.toml +++ b/nexus/authz-macros/Cargo.toml @@ -7,6 +7,9 @@ license = "MPL-2.0" [lib] proc-macro = true +[lints] +workspace = true + [dependencies] heck.workspace = true nexus-macros-common.workspace = true diff --git a/nexus/db-macros/Cargo.toml b/nexus/db-macros/Cargo.toml index 8032ba814d..fa477c1f00 100644 --- a/nexus/db-macros/Cargo.toml +++ b/nexus/db-macros/Cargo.toml @@ -8,6 +8,9 @@ license = "MPL-2.0" [lib] proc-macro = true +[lints] +workspace = true + [dependencies] heck.workspace = true nexus-macros-common.workspace = true diff --git a/nexus/db-model/Cargo.toml b/nexus/db-model/Cargo.toml index 45a086a5b3..a7b6cd9de1 100644 --- a/nexus/db-model/Cargo.toml +++ b/nexus/db-model/Cargo.toml @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [build-dependencies] omicron-rpaths.workspace = true @@ -19,6 +22,7 @@ macaddr.workspace = true newtype_derive.workspace = true omicron-uuid-kinds.workspace = true once_cell.workspace = true +oxnet.workspace = true parse-display.workspace = true # See omicron-rpaths for more about the "pq-sys" dependency. pq-sys = "*" @@ -28,6 +32,8 @@ schemars = { workspace = true, features = ["chrono", "uuid1"] } semver.workspace = true serde.workspace = true serde_json.workspace = true +slog.workspace = true +slog-error-chain.workspace = true steno.workspace = true strum.workspace = true thiserror.workspace = true diff --git a/nexus/db-model/src/allow_list.rs b/nexus/db-model/src/allow_list.rs new file mode 100644 index 0000000000..5b6ca67ddf --- /dev/null +++ b/nexus/db-model/src/allow_list.rs @@ -0,0 +1,93 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/5.0/. + +// Copyright 2024 Oxide Computer Company + +//! Database representation of allowed source IP address, for implementing basic +//! IP allowlisting. + +use crate::schema::allow_list; +use chrono::DateTime; +use chrono::Utc; +use ipnetwork::IpNetwork; +use nexus_types::external_api::params; +use nexus_types::external_api::views; +use omicron_common::api::external; +use omicron_common::api::external::Error; +use serde::Deserialize; +use serde::Serialize; +use uuid::Uuid; + +/// Database model for an allowlist of source IP addresses. +#[derive( + Queryable, Insertable, Selectable, Clone, Debug, Serialize, Deserialize, +)] +#[diesel(table_name = allow_list)] +pub struct AllowList { + pub id: Uuid, + pub time_created: DateTime, + pub time_modified: DateTime, + pub allowed_ips: Option>, +} + +impl AllowList { + /// Construct a new allowlist record. + pub fn new(id: Uuid, allowed_ips: external::AllowedSourceIps) -> Self { + let now = Utc::now(); + let allowed_ips = match allowed_ips { + external::AllowedSourceIps::Any => None, + external::AllowedSourceIps::List(list) => { + Some(list.into_iter().map(Into::into).collect()) + } + }; + Self { id, time_created: now, time_modified: now, allowed_ips } + } + + /// Create an `AllowedSourceIps` type from the contained address. + pub fn allowed_source_ips( + &self, + ) -> Result { + match &self.allowed_ips { + Some(list) => external::AllowedSourceIps::try_from(list.as_slice()) + .map_err(|_| { + Error::internal_error( + "Allowlist from database is empty, but NULL \ + should be used to allow any source IP", + ) + }), + None => Ok(external::AllowedSourceIps::Any), + } + } +} + +#[derive(AsChangeset)] +#[diesel(table_name = allow_list, treat_none_as_null = true)] +pub struct AllowListUpdate { + /// The new list of allowed IPs. + pub allowed_ips: Option>, +} + +impl From for AllowListUpdate { + fn from(params: params::AllowListUpdate) -> Self { + let allowed_ips = match params.allowed_ips { + external::AllowedSourceIps::Any => None, + external::AllowedSourceIps::List(list) => { + Some(list.into_iter().map(Into::into).collect()) + } + }; + Self { allowed_ips } + } +} + +impl TryFrom for views::AllowList { + type Error = Error; + + fn try_from(db: AllowList) -> Result { + db.allowed_source_ips().map(|allowed_ips| Self { + time_created: db.time_created, + time_modified: db.time_modified, + allowed_ips, + }) + } +} diff --git a/nexus/db-model/src/bgp.rs b/nexus/db-model/src/bgp.rs index 02201f60e4..8aaa08ebb7 100644 --- a/nexus/db-model/src/bgp.rs +++ b/nexus/db-model/src/bgp.rs @@ -31,6 +31,8 @@ pub struct BgpConfig { pub asn: SqlU32, pub bgp_announce_set_id: Uuid, pub vrf: Option, + pub shaper: Option, + pub checker: Option, } impl Into for BgpConfig { @@ -59,6 +61,8 @@ impl BgpConfig { asn: c.asn.into(), bgp_announce_set_id, vrf: c.vrf.as_ref().map(|x| x.to_string()), + shaper: c.shaper.as_ref().map(|x| x.to_string()), + checker: c.checker.as_ref().map(|x| x.to_string()), } } } @@ -131,4 +135,11 @@ pub struct BgpPeerView { pub hold_time: SqlU32, pub idle_hold_time: SqlU32, pub keepalive: SqlU32, + pub remote_asn: Option, + pub min_ttl: Option, + pub md5_auth_key: Option, + pub multi_exit_discriminator: Option, + pub local_pref: Option, + pub enforce_first_as: bool, + pub vlan_id: Option, } diff --git a/nexus/db-model/src/bytecount.rs b/nexus/db-model/src/bytecount.rs index 92a01db43f..53e00eb78d 100644 --- a/nexus/db-model/src/bytecount.rs +++ b/nexus/db-model/src/bytecount.rs @@ -93,7 +93,7 @@ impl TryFrom for ByteCount { let mut multiplier = 1; for digit in digits.iter().rev() { - result += *digit as i64 * multiplier; + result += i64::from(*digit) * multiplier; multiplier *= 10000; } diff --git a/nexus/db-model/src/deployment.rs b/nexus/db-model/src/deployment.rs index e56c8bff54..e6a66543c7 100644 --- a/nexus/db-model/src/deployment.rs +++ b/nexus/db-model/src/deployment.rs @@ -8,17 +8,30 @@ use crate::inventory::ZoneType; use crate::omicron_zone_config::{OmicronZone, OmicronZoneNic}; use crate::schema::{ - blueprint, bp_omicron_zone, bp_omicron_zone_nic, - bp_omicron_zones_not_in_service, bp_sled_omicron_zones, bp_target, + blueprint, bp_omicron_physical_disk, bp_omicron_zone, bp_omicron_zone_nic, + bp_sled_omicron_physical_disks, bp_sled_omicron_zones, bp_sled_state, + bp_target, +}; +use crate::typed_uuid::DbTypedUuid; +use crate::{ + impl_enum_type, ipv6, Generation, MacAddr, Name, SledState, SqlU16, SqlU32, + SqlU8, }; -use crate::{ipv6, Generation, MacAddr, Name, SqlU16, SqlU32, SqlU8}; use chrono::{DateTime, Utc}; use ipnetwork::IpNetwork; +use nexus_types::deployment::BlueprintPhysicalDiskConfig; +use nexus_types::deployment::BlueprintPhysicalDisksConfig; use nexus_types::deployment::BlueprintTarget; use nexus_types::deployment::BlueprintZoneConfig; use nexus_types::deployment::BlueprintZoneDisposition; use nexus_types::deployment::BlueprintZonesConfig; +use nexus_types::deployment::CockroachDbPreserveDowngrade; use omicron_common::api::internal::shared::NetworkInterface; +use omicron_common::disk::DiskIdentity; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::SledUuid; +use omicron_uuid_kinds::ZpoolUuid; +use omicron_uuid_kinds::{ExternalIpKind, SledKind}; use uuid::Uuid; /// See [`nexus_types::deployment::Blueprint`]. @@ -29,6 +42,8 @@ pub struct Blueprint { pub parent_blueprint_id: Option, pub internal_dns_version: Generation, pub external_dns_version: Generation, + pub cockroachdb_fingerprint: String, + pub cockroachdb_setting_preserve_downgrade: Option, pub time_created: DateTime, pub creator: String, pub comment: String, @@ -41,6 +56,10 @@ impl From<&'_ nexus_types::deployment::Blueprint> for Blueprint { parent_blueprint_id: bp.parent_blueprint_id, internal_dns_version: Generation(bp.internal_dns_version), external_dns_version: Generation(bp.external_dns_version), + cockroachdb_fingerprint: bp.cockroachdb_fingerprint.clone(), + cockroachdb_setting_preserve_downgrade: bp + .cockroachdb_setting_preserve_downgrade + .to_optional_string(), time_created: bp.time_created, creator: bp.creator.clone(), comment: bp.comment.clone(), @@ -55,6 +74,12 @@ impl From for nexus_types::deployment::BlueprintMetadata { parent_blueprint_id: value.parent_blueprint_id, internal_dns_version: *value.internal_dns_version, external_dns_version: *value.external_dns_version, + cockroachdb_fingerprint: value.cockroachdb_fingerprint, + cockroachdb_setting_preserve_downgrade: + CockroachDbPreserveDowngrade::from_optional_string( + &value.cockroachdb_setting_preserve_downgrade, + ) + .ok(), time_created: value.time_created, creator: value.creator, comment: value.comment, @@ -93,35 +118,113 @@ impl From for nexus_types::deployment::BlueprintTarget { } } +/// See [`nexus_types::deployment::Blueprint::sled_state`]. +#[derive(Queryable, Clone, Debug, Selectable, Insertable)] +#[diesel(table_name = bp_sled_state)] +pub struct BpSledState { + pub blueprint_id: Uuid, + pub sled_id: DbTypedUuid, + pub sled_state: SledState, +} + +/// See [`nexus_types::deployment::BlueprintPhysicalDisksConfig`]. +#[derive(Queryable, Clone, Debug, Selectable, Insertable)] +#[diesel(table_name = bp_sled_omicron_physical_disks)] +pub struct BpSledOmicronPhysicalDisks { + pub blueprint_id: Uuid, + pub sled_id: Uuid, + pub generation: Generation, +} + +impl BpSledOmicronPhysicalDisks { + pub fn new( + blueprint_id: Uuid, + sled_id: Uuid, + disks_config: &BlueprintPhysicalDisksConfig, + ) -> Self { + Self { + blueprint_id, + sled_id, + generation: Generation(disks_config.generation), + } + } +} + +/// See [`nexus_types::deployment::BlueprintPhysicalDiskConfig`]. +#[derive(Queryable, Clone, Debug, Selectable, Insertable)] +#[diesel(table_name = bp_omicron_physical_disk)] +pub struct BpOmicronPhysicalDisk { + pub blueprint_id: Uuid, + pub sled_id: Uuid, + + pub vendor: String, + pub serial: String, + pub model: String, + + pub id: Uuid, + pub pool_id: Uuid, +} + +impl BpOmicronPhysicalDisk { + pub fn new( + blueprint_id: Uuid, + sled_id: Uuid, + disk_config: &BlueprintPhysicalDiskConfig, + ) -> Self { + Self { + blueprint_id, + sled_id, + vendor: disk_config.identity.vendor.clone(), + serial: disk_config.identity.serial.clone(), + model: disk_config.identity.model.clone(), + id: disk_config.id, + pool_id: disk_config.pool_id.into_untyped_uuid(), + } + } +} + +impl From for BlueprintPhysicalDiskConfig { + fn from(disk: BpOmicronPhysicalDisk) -> Self { + Self { + identity: DiskIdentity { + vendor: disk.vendor, + serial: disk.serial, + model: disk.model, + }, + id: disk.id, + pool_id: ZpoolUuid::from_untyped_uuid(disk.pool_id), + } + } +} + /// See [`nexus_types::deployment::OmicronZonesConfig`]. #[derive(Queryable, Clone, Debug, Selectable, Insertable)] #[diesel(table_name = bp_sled_omicron_zones)] pub struct BpSledOmicronZones { pub blueprint_id: Uuid, - pub sled_id: Uuid, + pub sled_id: DbTypedUuid, pub generation: Generation, } impl BpSledOmicronZones { pub fn new( blueprint_id: Uuid, - sled_id: Uuid, + sled_id: SledUuid, zones_config: &BlueprintZonesConfig, ) -> Self { Self { blueprint_id, - sled_id, + sled_id: sled_id.into(), generation: Generation(zones_config.generation), } } } - /// See [`nexus_types::deployment::OmicronZoneConfig`]. #[derive(Queryable, Clone, Debug, Selectable, Insertable)] #[diesel(table_name = bp_omicron_zone)] pub struct BpOmicronZone { pub blueprint_id: Uuid, - pub sled_id: Uuid, + pub sled_id: DbTypedUuid, pub id: Uuid, pub underlay_address: ipv6::Ipv6Addr, pub zone_type: ZoneType, @@ -141,18 +244,32 @@ pub struct BpOmicronZone { pub snat_ip: Option, pub snat_first_port: Option, pub snat_last_port: Option, + + disposition: DbBpZoneDisposition, + + pub external_ip_id: Option>, } impl BpOmicronZone { pub fn new( blueprint_id: Uuid, - sled_id: Uuid, - zone: &BlueprintZoneConfig, + sled_id: SledUuid, + blueprint_zone: &BlueprintZoneConfig, ) -> Result { - let zone = OmicronZone::new(sled_id, &zone.config)?; + let external_ip_id = blueprint_zone + .zone_type + .external_networking() + .map(|(ip, _)| ip.id()); + let zone = OmicronZone::new( + sled_id, + blueprint_zone.id.into_untyped_uuid(), + blueprint_zone.underlay_address, + &blueprint_zone.zone_type.clone().into(), + external_ip_id, + )?; Ok(Self { blueprint_id, - sled_id: zone.sled_id, + sled_id: zone.sled_id.into(), id: zone.id, underlay_address: zone.underlay_address, zone_type: zone.zone_type, @@ -172,16 +289,17 @@ impl BpOmicronZone { snat_ip: zone.snat_ip, snat_first_port: zone.snat_first_port, snat_last_port: zone.snat_last_port, + disposition: to_db_bp_zone_disposition(blueprint_zone.disposition), + external_ip_id: zone.external_ip_id.map(From::from), }) } pub fn into_blueprint_zone_config( self, nic_row: Option, - disposition: BlueprintZoneDisposition, ) -> Result { let zone = OmicronZone { - sled_id: self.sled_id, + sled_id: self.sled_id.into(), id: self.id, underlay_address: self.underlay_address, zone_type: self.zone_type, @@ -201,10 +319,57 @@ impl BpOmicronZone { snat_ip: self.snat_ip, snat_first_port: self.snat_first_port, snat_last_port: self.snat_last_port, + external_ip_id: self.external_ip_id.map(From::from), }; - let config = - zone.into_omicron_zone_config(nic_row.map(OmicronZoneNic::from))?; - Ok(BlueprintZoneConfig { config, disposition }) + zone.into_blueprint_zone_config( + self.disposition.into(), + nic_row.map(OmicronZoneNic::from), + ) + } +} + +impl_enum_type!( + #[derive(Clone, SqlType, Debug, QueryId)] + #[diesel(postgres_type(name = "bp_zone_disposition", schema = "public"))] + pub struct DbBpZoneDispositionEnum; + + /// This type is not actually public, because [`BlueprintZoneDisposition`] + /// interacts with external logic. + /// + /// However, it must be marked `pub` to avoid errors like `crate-private + /// type `BpZoneDispositionEnum` in public interface`. Marking this type `pub`, + /// without actually making it public, tricks rustc in a desirable way. + #[derive(Clone, Copy, Debug, AsExpression, FromSqlRow, PartialEq)] + #[diesel(sql_type = DbBpZoneDispositionEnum)] + pub enum DbBpZoneDisposition; + + // Enum values + InService => b"in_service" + Quiesced => b"quiesced" + Expunged => b"expunged" +); + +/// Converts a [`BlueprintZoneDisposition`] to a version that can be inserted +/// into a database. +pub fn to_db_bp_zone_disposition( + disposition: BlueprintZoneDisposition, +) -> DbBpZoneDisposition { + match disposition { + BlueprintZoneDisposition::InService => DbBpZoneDisposition::InService, + BlueprintZoneDisposition::Quiesced => DbBpZoneDisposition::Quiesced, + BlueprintZoneDisposition::Expunged => DbBpZoneDisposition::Expunged, + } +} + +impl From for BlueprintZoneDisposition { + fn from(disposition: DbBpZoneDisposition) -> Self { + match disposition { + DbBpZoneDisposition::InService => { + BlueprintZoneDisposition::InService + } + DbBpZoneDisposition::Quiesced => BlueprintZoneDisposition::Quiesced, + DbBpZoneDisposition::Expunged => BlueprintZoneDisposition::Expunged, + } } } @@ -242,8 +407,11 @@ impl BpOmicronZoneNic { blueprint_id: Uuid, zone: &BlueprintZoneConfig, ) -> Result, anyhow::Error> { - let zone_nic = OmicronZoneNic::new(&zone.config)?; - Ok(zone_nic.map(|nic| Self { + let Some((_, nic)) = zone.zone_type.external_networking() else { + return Ok(None); + }; + let nic = OmicronZoneNic::new(zone.id.into_untyped_uuid(), nic)?; + Ok(Some(Self { blueprint_id, id: nic.id, name: nic.name, @@ -265,12 +433,63 @@ impl BpOmicronZoneNic { } } -/// Nexus wants to think in terms of "zones in service", but since most zones of -/// most blueprints are in service, we store the zones NOT in service in the -/// database. We handle that inversion internally in the db-queries layer. -#[derive(Queryable, Clone, Debug, Selectable, Insertable)] -#[diesel(table_name = bp_omicron_zones_not_in_service)] -pub struct BpOmicronZoneNotInService { - pub blueprint_id: Uuid, - pub bp_omicron_zone_id: Uuid, +mod diesel_util { + use crate::{ + schema::bp_omicron_zone::disposition, to_db_bp_zone_disposition, + DbBpZoneDisposition, + }; + use diesel::{ + helper_types::EqAny, prelude::*, query_dsl::methods::FilterDsl, + }; + use nexus_types::deployment::{ + BlueprintZoneDisposition, BlueprintZoneFilter, + }; + + /// An extension trait to apply a [`BlueprintZoneFilter`] to a Diesel + /// expression. + /// + /// This is applicable to any Diesel expression which includes the + /// `bp_omicron_zone` table. + /// + /// This needs to live here, rather than in `nexus-db-queries`, because it + /// names the `DbBpZoneDisposition` type which is private to this crate. + pub trait ApplyBlueprintZoneFilterExt { + type Output; + + /// Applies a [`BlueprintZoneFilter`] to a Diesel expression. + fn blueprint_zone_filter( + self, + filter: BlueprintZoneFilter, + ) -> Self::Output; + } + + impl ApplyBlueprintZoneFilterExt for E + where + E: FilterDsl, + { + type Output = E::Output; + + fn blueprint_zone_filter( + self, + filter: BlueprintZoneFilter, + ) -> Self::Output { + // This is only boxed for ease of reference above. + let all_matching_dispositions: BoxedIterator = + Box::new( + BlueprintZoneDisposition::all_matching(filter) + .map(to_db_bp_zone_disposition), + ); + + FilterDsl::filter( + self, + disposition.eq_any(all_matching_dispositions), + ) + } + } + + type BoxedIterator = Box>; + type BlueprintZoneFilterQuery = + EqAny>; } + +pub use diesel_util::ApplyBlueprintZoneFilterExt; diff --git a/nexus/db-model/src/external_ip.rs b/nexus/db-model/src/external_ip.rs index 337e7ef2a7..2a68b4d7d0 100644 --- a/nexus/db-model/src/external_ip.rs +++ b/nexus/db-model/src/external_ip.rs @@ -9,6 +9,7 @@ use crate::impl_enum_type; use crate::schema::external_ip; use crate::schema::floating_ip; use crate::Name; +use crate::ServiceNetworkInterface; use crate::SqlU16; use chrono::DateTime; use chrono::Utc; @@ -16,16 +17,25 @@ use db_macros::Resource; use diesel::Queryable; use diesel::Selectable; use ipnetwork::IpNetwork; +use nexus_types::deployment::OmicronZoneExternalFloatingIp; +use nexus_types::deployment::OmicronZoneExternalIp; +use nexus_types::deployment::OmicronZoneExternalSnatIp; use nexus_types::external_api::params; use nexus_types::external_api::shared; use nexus_types::external_api::views; -use omicron_common::address::NUM_SOURCE_NAT_PORTS; +use nexus_types::inventory::SourceNatConfig; use omicron_common::api::external::Error; use omicron_common::api::external::IdentityMetadata; +use omicron_common::api::internal::shared::SourceNatConfigError; +use omicron_uuid_kinds::ExternalIpUuid; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::OmicronZoneUuid; use schemars::JsonSchema; use serde::Deserialize; use serde::Serialize; use sled_agent_client::types::InstanceExternalIpBody; +use sled_agent_client::ZoneKind; +use slog_error_chain::SlogInlineError; use std::convert::TryFrom; use std::net::IpAddr; use uuid::Uuid; @@ -130,6 +140,57 @@ pub struct ExternalIp { pub is_probe: bool, } +#[derive(Debug, thiserror::Error, SlogInlineError)] +pub enum OmicronZoneExternalIpError { + #[error("database IP is for an instance")] + IpIsForInstance, + #[error("invalid SNAT configuration")] + InvalidSnatConfig(#[from] SourceNatConfigError), + #[error( + "Omicron zone has a range of IPs ({0}); only a single IP is supported" + )] + NotSingleIp(IpNetwork), + #[error( + "database IP is ephemeral; currently unsupported for Omicron zones" + )] + EphemeralIp, +} + +impl TryFrom<&'_ ExternalIp> for OmicronZoneExternalIp { + type Error = OmicronZoneExternalIpError; + + fn try_from(row: &ExternalIp) -> Result { + if !row.is_service { + return Err(OmicronZoneExternalIpError::IpIsForInstance); + } + let size = match row.ip.size() { + ipnetwork::NetworkSize::V4(n) => u128::from(n), + ipnetwork::NetworkSize::V6(n) => n, + }; + if size != 1 { + return Err(OmicronZoneExternalIpError::NotSingleIp(row.ip)); + } + + match row.kind { + IpKind::SNat => Ok(Self::Snat(OmicronZoneExternalSnatIp { + id: ExternalIpUuid::from_untyped_uuid(row.id), + snat_cfg: SourceNatConfig::new( + row.ip.ip(), + row.first_port.0, + row.last_port.0, + )?, + })), + IpKind::Floating => { + Ok(Self::Floating(OmicronZoneExternalFloatingIp { + id: ExternalIpUuid::from_untyped_uuid(row.id), + ip: row.ip.ip(), + })) + } + IpKind::Ephemeral => Err(OmicronZoneExternalIpError::EphemeralIp), + } + } +} + /// A view type constructed from `ExternalIp` used to represent Floating IP /// objects in user-facing APIs. /// @@ -152,15 +213,13 @@ pub struct FloatingIp { pub project_id: Uuid, } -impl From +impl TryFrom for omicron_common::api::internal::shared::SourceNatConfig { - fn from(eip: ExternalIp) -> Self { - Self { - ip: eip.ip.ip(), - first_port: eip.first_port.0, - last_port: eip.last_port.0, - } + type Error = SourceNatConfigError; + + fn try_from(eip: ExternalIp) -> Result { + Self::new(eip.ip.ip(), eip.first_port.0, eip.last_port.0) } } @@ -302,104 +361,64 @@ impl IncompleteExternalIp { } } - pub fn for_service_explicit( - id: Uuid, - name: &Name, - description: &str, - service_id: Uuid, - pool_id: Uuid, - address: IpAddr, - ) -> Self { - Self { - id, - name: Some(name.clone()), - description: Some(description.to_string()), - time_created: Utc::now(), - kind: IpKind::Floating, - is_service: true, - is_probe: false, - parent_id: Some(service_id), - pool_id, - project_id: None, - explicit_ip: Some(IpNetwork::from(address)), - explicit_port_range: None, - state: IpAttachState::Attached, - } - } - - pub fn for_service_explicit_snat( - id: Uuid, - service_id: Uuid, - pool_id: Uuid, - address: IpAddr, - (first_port, last_port): (u16, u16), - ) -> Self { - assert!( - (first_port % NUM_SOURCE_NAT_PORTS == 0) - && (last_port - first_port + 1) == NUM_SOURCE_NAT_PORTS, - "explicit port range must be aligned to {}", - NUM_SOURCE_NAT_PORTS, - ); - let explicit_port_range = Some((first_port.into(), last_port.into())); - let kind = IpKind::SNat; - Self { - id, - name: None, - description: None, - time_created: Utc::now(), - kind, - is_service: true, - is_probe: false, - parent_id: Some(service_id), - pool_id, - project_id: None, - explicit_ip: Some(IpNetwork::from(address)), - explicit_port_range, - state: kind.initial_state(), - } - } - - pub fn for_service( - id: Uuid, - name: &Name, - description: &str, - service_id: Uuid, + pub fn for_omicron_zone( pool_id: Uuid, + external_ip: OmicronZoneExternalIp, + zone_id: OmicronZoneUuid, + zone_kind: ZoneKind, ) -> Self { - let kind = IpKind::Floating; - Self { - id, - name: Some(name.clone()), - description: Some(description.to_string()), - time_created: Utc::now(), - kind, - is_service: true, - is_probe: false, - parent_id: Some(service_id), - pool_id, - project_id: None, - explicit_ip: None, - explicit_port_range: None, - state: IpAttachState::Attached, - } - } + let (kind, port_range, name, description, state) = match external_ip { + OmicronZoneExternalIp::Floating(_) => { + // We'll name this external IP the same as we'll name the NIC + // associated with this zone. + let name = ServiceNetworkInterface::name(zone_id, zone_kind); + + // Using `IpAttachState::Attached` preserves existing behavior, + // `IpKind::Floating.initial_state()` is `::Detached`. If/when + // we do more to unify IPs between services and instances, this + // probably needs to be addressed. + let state = IpAttachState::Attached; + + ( + IpKind::Floating, + None, + Some(name), + Some(zone_kind.to_string()), + state, + ) + } + OmicronZoneExternalIp::Snat(OmicronZoneExternalSnatIp { + snat_cfg, + .. + }) => { + let (first_port, last_port) = snat_cfg.port_range_raw(); + let kind = IpKind::SNat; + ( + kind, + Some((first_port.into(), last_port.into())), + // Only floating IPs are allowed to have names and + // descriptions. + None, + None, + kind.initial_state(), + ) + } + }; - pub fn for_service_snat(id: Uuid, service_id: Uuid, pool_id: Uuid) -> Self { - let kind = IpKind::SNat; Self { - id, - name: None, - description: None, + id: external_ip.id().into_untyped_uuid(), + name, + description, time_created: Utc::now(), kind, is_service: true, is_probe: false, - parent_id: Some(service_id), + parent_id: Some(zone_id.into_untyped_uuid()), pool_id, project_id: None, - explicit_ip: None, - explicit_port_range: None, - state: kind.initial_state(), + explicit_ip: Some(IpNetwork::from(external_ip.ip())), + explicit_port_range: port_range, + state, } } @@ -531,7 +550,7 @@ impl TryFrom for FloatingIp { ))?; let identity = FloatingIpIdentity { - id: ip.id, + id: ip.id.into_untyped_uuid(), name, description, time_created: ip.time_created, @@ -571,6 +590,7 @@ impl From for views::FloatingIp { views::FloatingIp { ip: ip.ip.ip(), + ip_pool_id: ip.ip_pool_id, identity, project_id: ip.project_id, instance_id: ip.parent_id, diff --git a/nexus/db-model/src/instance.rs b/nexus/db-model/src/instance.rs index f7731ff903..286c68ac7c 100644 --- a/nexus/db-model/src/instance.rs +++ b/nexus/db-model/src/instance.rs @@ -107,6 +107,8 @@ impl DatastoreAttachTargetConfig for Instance { } impl DatastoreAttachTargetConfig for Instance { + // TODO-cleanup ideally this would be an ExternalIpUuid, haven't quite + // figured out how to make that work type Id = Uuid; type CollectionIdColumn = instance::dsl::id; diff --git a/nexus/db-model/src/inventory.rs b/nexus/db-model/src/inventory.rs index cde067f3e8..456987f0ce 100644 --- a/nexus/db-model/src/inventory.rs +++ b/nexus/db-model/src/inventory.rs @@ -12,6 +12,7 @@ use crate::schema::{ inv_sled_agent, inv_sled_omicron_zones, inv_zpool, sw_caboose, sw_root_of_trust_page, }; +use crate::typed_uuid::DbTypedUuid; use crate::PhysicalDiskKind; use crate::{ impl_enum_type, ipv6, ByteCount, Generation, MacAddr, Name, ServiceKind, @@ -31,6 +32,12 @@ use nexus_types::inventory::{ BaseboardId, Caboose, Collection, PowerState, RotPage, RotSlot, }; use omicron_common::api::internal::shared::NetworkInterface; +use omicron_uuid_kinds::CollectionKind; +use omicron_uuid_kinds::CollectionUuid; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::SledKind; +use omicron_uuid_kinds::SledUuid; +use omicron_uuid_kinds::ZpoolUuid; use uuid::Uuid; // See [`nexus_types::inventory::PowerState`]. @@ -246,16 +253,33 @@ impl From for nexus_types::inventory::SpType { #[derive(Queryable, Insertable, Clone, Debug, Selectable)] #[diesel(table_name = inv_collection)] pub struct InvCollection { - pub id: Uuid, + pub id: DbTypedUuid, pub time_started: DateTime, pub time_done: DateTime, pub collector: String, } +impl InvCollection { + /// Creates a new `InvCollection`. + pub fn new( + id: CollectionUuid, + time_started: DateTime, + time_done: DateTime, + collector: String, + ) -> Self { + InvCollection { id: id.into(), time_started, time_done, collector } + } + + /// Returns the ID. + pub fn id(&self) -> CollectionUuid { + self.id.into() + } +} + impl<'a> From<&'a Collection> for InvCollection { fn from(c: &'a Collection) -> Self { InvCollection { - id: c.id, + id: c.id.into(), time_started: c.time_started, time_done: c.time_done, collector: c.collector.clone(), @@ -369,26 +393,34 @@ impl From for RotPage { #[derive(Queryable, Insertable, Clone, Debug, Selectable)] #[diesel(table_name = inv_collection_error)] pub struct InvCollectionError { - pub inv_collection_id: Uuid, + pub inv_collection_id: DbTypedUuid, pub idx: SqlU16, pub message: String, } impl InvCollectionError { - pub fn new(inv_collection_id: Uuid, idx: u16, message: String) -> Self { + pub fn new( + inv_collection_id: CollectionUuid, + idx: u16, + message: String, + ) -> Self { InvCollectionError { - inv_collection_id, + inv_collection_id: inv_collection_id.into(), idx: SqlU16::from(idx), message, } } + + pub fn inv_collection_id(&self) -> CollectionUuid { + self.inv_collection_id.into() + } } /// See [`nexus_types::inventory::ServiceProcessor`]. #[derive(Queryable, Clone, Debug, Selectable)] #[diesel(table_name = inv_service_processor)] pub struct InvServiceProcessor { - pub inv_collection_id: Uuid, + pub inv_collection_id: DbTypedUuid, pub hw_baseboard_id: Uuid, pub time_collected: DateTime, pub source: String, @@ -596,10 +628,10 @@ impl From for nexus_types::inventory::SledRole { #[derive(Queryable, Clone, Debug, Selectable, Insertable)] #[diesel(table_name = inv_sled_agent)] pub struct InvSledAgent { - pub inv_collection_id: Uuid, + pub inv_collection_id: DbTypedUuid, pub time_collected: DateTime, pub source: String, - pub sled_id: Uuid, + pub sled_id: DbTypedUuid, pub hw_baseboard_id: Option, pub sled_agent_ip: ipv6::Ipv6Addr, pub sled_agent_port: SqlU16, @@ -611,7 +643,7 @@ pub struct InvSledAgent { impl InvSledAgent { pub fn new_without_baseboard( - collection_id: Uuid, + collection_id: CollectionUuid, sled_agent: &nexus_types::inventory::SledAgent, ) -> Result { // It's irritating to have to check this case at runtime. The challenge @@ -633,10 +665,10 @@ impl InvSledAgent { )) } else { Ok(InvSledAgent { - inv_collection_id: collection_id, + inv_collection_id: collection_id.into(), time_collected: sled_agent.time_collected, source: sled_agent.source.clone(), - sled_id: sled_agent.sled_id, + sled_id: sled_agent.sled_id.into(), hw_baseboard_id: None, sled_agent_ip: ipv6::Ipv6Addr::from( *sled_agent.sled_agent_address.ip(), @@ -659,8 +691,8 @@ impl InvSledAgent { #[derive(Queryable, Clone, Debug, Selectable, Insertable)] #[diesel(table_name = inv_physical_disk)] pub struct InvPhysicalDisk { - pub inv_collection_id: Uuid, - pub sled_id: Uuid, + pub inv_collection_id: DbTypedUuid, + pub sled_id: DbTypedUuid, pub slot: i64, pub vendor: String, pub model: String, @@ -670,13 +702,13 @@ pub struct InvPhysicalDisk { impl InvPhysicalDisk { pub fn new( - inv_collection_id: Uuid, - sled_id: Uuid, + inv_collection_id: CollectionUuid, + sled_id: SledUuid, disk: nexus_types::inventory::PhysicalDisk, ) -> Self { Self { - inv_collection_id, - sled_id, + inv_collection_id: inv_collection_id.into(), + sled_id: sled_id.into(), slot: disk.slot, vendor: disk.identity.vendor, model: disk.identity.model, @@ -704,24 +736,24 @@ impl From for nexus_types::inventory::PhysicalDisk { #[derive(Queryable, Clone, Debug, Selectable, Insertable)] #[diesel(table_name = inv_zpool)] pub struct InvZpool { - pub inv_collection_id: Uuid, + pub inv_collection_id: DbTypedUuid, pub time_collected: DateTime, pub id: Uuid, - pub sled_id: Uuid, + pub sled_id: DbTypedUuid, pub total_size: ByteCount, } impl InvZpool { pub fn new( - inv_collection_id: Uuid, - sled_id: Uuid, + inv_collection_id: CollectionUuid, + sled_id: SledUuid, zpool: &nexus_types::inventory::Zpool, ) -> Self { Self { - inv_collection_id, + inv_collection_id: inv_collection_id.into(), time_collected: zpool.time_collected, - id: zpool.id, - sled_id, + id: zpool.id.into_untyped_uuid(), + sled_id: sled_id.into(), total_size: zpool.total_size.into(), } } @@ -731,7 +763,7 @@ impl From for nexus_types::inventory::Zpool { fn from(pool: InvZpool) -> Self { Self { time_collected: pool.time_collected, - id: pool.id, + id: ZpoolUuid::from_untyped_uuid(pool.id), total_size: *pool.total_size, } } @@ -741,23 +773,23 @@ impl From for nexus_types::inventory::Zpool { #[derive(Queryable, Clone, Debug, Selectable, Insertable)] #[diesel(table_name = inv_sled_omicron_zones)] pub struct InvSledOmicronZones { - pub inv_collection_id: Uuid, + pub inv_collection_id: DbTypedUuid, pub time_collected: DateTime, pub source: String, - pub sled_id: Uuid, + pub sled_id: DbTypedUuid, pub generation: Generation, } impl InvSledOmicronZones { pub fn new( - inv_collection_id: Uuid, + inv_collection_id: CollectionUuid, zones_found: &nexus_types::inventory::OmicronZonesFound, ) -> InvSledOmicronZones { InvSledOmicronZones { - inv_collection_id, + inv_collection_id: inv_collection_id.into(), time_collected: zones_found.time_collected, source: zones_found.source.clone(), - sled_id: zones_found.sled_id, + sled_id: zones_found.sled_id.into(), generation: Generation(zones_found.zones.generation), } } @@ -768,7 +800,7 @@ impl InvSledOmicronZones { nexus_types::inventory::OmicronZonesFound { time_collected: self.time_collected, source: self.source, - sled_id: self.sled_id, + sled_id: self.sled_id.into(), zones: nexus_types::inventory::OmicronZonesConfig { generation: *self.generation, zones: Vec::new(), @@ -821,8 +853,8 @@ impl From for ServiceKind { #[derive(Queryable, Clone, Debug, Selectable, Insertable)] #[diesel(table_name = inv_omicron_zone)] pub struct InvOmicronZone { - pub inv_collection_id: Uuid, - pub sled_id: Uuid, + pub inv_collection_id: DbTypedUuid, + pub sled_id: DbTypedUuid, pub id: Uuid, pub underlay_address: ipv6::Ipv6Addr, pub zone_type: ZoneType, @@ -846,14 +878,22 @@ pub struct InvOmicronZone { impl InvOmicronZone { pub fn new( - inv_collection_id: Uuid, - sled_id: Uuid, + inv_collection_id: CollectionUuid, + sled_id: SledUuid, zone: &nexus_types::inventory::OmicronZoneConfig, ) -> Result { - let zone = OmicronZone::new(sled_id, zone)?; + // Inventory zones do not know the external IP ID. + let external_ip_id = None; + let zone = OmicronZone::new( + sled_id, + zone.id, + zone.underlay_address, + &zone.zone_type, + external_ip_id, + )?; Ok(Self { - inv_collection_id, - sled_id: zone.sled_id, + inv_collection_id: inv_collection_id.into(), + sled_id: zone.sled_id.into(), id: zone.id, underlay_address: zone.underlay_address, zone_type: zone.zone_type, @@ -881,7 +921,7 @@ impl InvOmicronZone { nic_row: Option, ) -> Result { let zone = OmicronZone { - sled_id: self.sled_id, + sled_id: self.sled_id.into(), id: self.id, underlay_address: self.underlay_address, zone_type: self.zone_type, @@ -901,6 +941,9 @@ impl InvOmicronZone { snat_ip: self.snat_ip, snat_first_port: self.snat_first_port, snat_last_port: self.snat_last_port, + // Inventory zones don't know an external IP ID, and Omicron zone + // configs don't need it. + external_ip_id: None, }; zone.into_omicron_zone_config(nic_row.map(OmicronZoneNic::from)) } @@ -909,7 +952,7 @@ impl InvOmicronZone { #[derive(Queryable, Clone, Debug, Selectable, Insertable)] #[diesel(table_name = inv_omicron_zone_nic)] pub struct InvOmicronZoneNic { - inv_collection_id: Uuid, + inv_collection_id: DbTypedUuid, pub id: Uuid, name: Name, ip: IpNetwork, @@ -937,12 +980,15 @@ impl From for OmicronZoneNic { impl InvOmicronZoneNic { pub fn new( - inv_collection_id: Uuid, + inv_collection_id: CollectionUuid, zone: &nexus_types::inventory::OmicronZoneConfig, ) -> Result, anyhow::Error> { - let zone_nic = OmicronZoneNic::new(zone)?; - Ok(zone_nic.map(|nic| Self { - inv_collection_id, + let Some(nic) = zone.zone_type.service_vnic() else { + return Ok(None); + }; + let nic = OmicronZoneNic::new(zone.id, nic)?; + Ok(Some(Self { + inv_collection_id: inv_collection_id.into(), id: nic.id, name: nic.name, ip: nic.ip, diff --git a/nexus/db-model/src/ipv4_nat_entry.rs b/nexus/db-model/src/ipv4_nat_entry.rs index c3763346c6..4ff1ee9171 100644 --- a/nexus/db-model/src/ipv4_nat_entry.rs +++ b/nexus/db-model/src/ipv4_nat_entry.rs @@ -81,10 +81,10 @@ pub struct Ipv4NatEntryView { impl From for Ipv4NatEntryView { fn from(value: Ipv4NatChange) -> Self { Self { - external_address: value.external_address.ip(), + external_address: value.external_address.addr(), first_port: value.first_port.into(), last_port: value.last_port.into(), - sled_address: value.sled_address.ip(), + sled_address: value.sled_address.addr(), vni: value.vni.0, mac: *value.mac, gen: value.version, diff --git a/nexus/db-model/src/ipv4net.rs b/nexus/db-model/src/ipv4net.rs index eaf8a6eed8..b2cf6ffefa 100644 --- a/nexus/db-model/src/ipv4net.rs +++ b/nexus/db-model/src/ipv4net.rs @@ -10,7 +10,6 @@ use diesel::serialize::{self, ToSql}; use diesel::sql_types; use ipnetwork::IpNetwork; use nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; -use omicron_common::api::external; use serde::Deserialize; use serde::Serialize; use std::net::Ipv4Addr; @@ -27,10 +26,10 @@ use std::net::Ipv4Addr; Deserialize, )] #[diesel(sql_type = sql_types::Inet)] -pub struct Ipv4Net(pub external::Ipv4Net); +pub struct Ipv4Net(pub oxnet::Ipv4Net); -NewtypeFrom! { () pub struct Ipv4Net(external::Ipv4Net); } -NewtypeDeref! { () pub struct Ipv4Net(external::Ipv4Net); } +NewtypeFrom! { () pub struct Ipv4Net(oxnet::Ipv4Net); } +NewtypeDeref! { () pub struct Ipv4Net(oxnet::Ipv4Net); } impl Ipv4Net { /// Check if an address is a valid user-requestable address for this subnet @@ -41,19 +40,19 @@ impl Ipv4Net { if !self.contains(addr) { return Err(RequestAddressError::OutsideSubnet( addr.into(), - self.0 .0.into(), + oxnet::IpNet::from(self.0).into(), )); } // Only the first N addresses are reserved if self - .iter() + .addr_iter() .take(NUM_INITIAL_RESERVED_IP_ADDRESSES) .any(|this| this == addr) { return Err(RequestAddressError::Reserved); } // Last address in the subnet is reserved - if addr == self.broadcast() { + if addr == self.broadcast().expect("narrower subnet than expected") { return Err(RequestAddressError::Broadcast); } @@ -67,7 +66,7 @@ impl ToSql for Ipv4Net { out: &mut serialize::Output<'a, '_, Pg>, ) -> serialize::Result { >::to_sql( - &IpNetwork::V4(*self.0), + &IpNetwork::V4(self.0.into()), &mut out.reborrow(), ) } @@ -81,7 +80,7 @@ where fn from_sql(bytes: DB::RawValue<'_>) -> deserialize::Result { let inet = IpNetwork::from_sql(bytes)?; match inet { - IpNetwork::V4(net) => Ok(Ipv4Net(external::Ipv4Net(net))), + IpNetwork::V4(net) => Ok(Ipv4Net(net.into())), _ => Err("Expected IPV4".into()), } } diff --git a/nexus/db-model/src/ipv6net.rs b/nexus/db-model/src/ipv6net.rs index d516b67ed9..adcf732f42 100644 --- a/nexus/db-model/src/ipv6net.rs +++ b/nexus/db-model/src/ipv6net.rs @@ -9,7 +9,6 @@ use diesel::serialize::{self, ToSql}; use diesel::sql_types; use ipnetwork::IpNetwork; use nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; -use omicron_common::api::external; use rand::{rngs::StdRng, SeedableRng}; use serde::Deserialize; use serde::Serialize; @@ -29,10 +28,10 @@ use crate::RequestAddressError; Deserialize, )] #[diesel(sql_type = sql_types::Inet)] -pub struct Ipv6Net(pub external::Ipv6Net); +pub struct Ipv6Net(pub oxnet::Ipv6Net); -NewtypeFrom! { () pub struct Ipv6Net(external::Ipv6Net); } -NewtypeDeref! { () pub struct Ipv6Net(external::Ipv6Net); } +NewtypeFrom! { () pub struct Ipv6Net(oxnet::Ipv6Net); } +NewtypeDeref! { () pub struct Ipv6Net(oxnet::Ipv6Net); } impl Ipv6Net { /// Generate a random subnetwork from this one, of the given prefix length. @@ -48,10 +47,10 @@ impl Ipv6Net { use rand::RngCore; const MAX_IPV6_SUBNET_PREFIX: u8 = 128; - if prefix < self.prefix() || prefix > MAX_IPV6_SUBNET_PREFIX { + if prefix < self.width() || prefix > MAX_IPV6_SUBNET_PREFIX { return None; } - if prefix == self.prefix() { + if prefix == self.width() { return Some(*self); } @@ -72,17 +71,17 @@ impl Ipv6Net { let full_mask = !(u128::MAX >> prefix); // Get the existing network address and mask. - let network = u128::from_be_bytes(self.network().octets()); - let network_mask = u128::from_be_bytes(self.mask().octets()); + let network = u128::from(self.prefix()); + let network_mask = u128::from(self.mask_addr()); // Take random bits _only_ where the new mask is set. let random_mask = full_mask ^ network_mask; let out = (network & network_mask) | (random & random_mask); - let addr = std::net::Ipv6Addr::from(out.to_be_bytes()); - let net = ipnetwork::Ipv6Network::new(addr, prefix) + let addr = std::net::Ipv6Addr::from(out); + let net = oxnet::Ipv6Net::new(addr, prefix) .expect("Failed to create random subnet"); - Some(Self(external::Ipv6Net(net))) + Some(Self(net)) } /// Check if an address is a valid user-requestable address for this subnet @@ -93,7 +92,7 @@ impl Ipv6Net { if !self.contains(addr) { return Err(RequestAddressError::OutsideSubnet( addr.into(), - self.0 .0.into(), + oxnet::IpNet::from(self.0).into(), )); } // Only the first N addresses are reserved @@ -114,7 +113,7 @@ impl ToSql for Ipv6Net { out: &mut serialize::Output<'a, '_, Pg>, ) -> serialize::Result { >::to_sql( - &IpNetwork::V6(self.0 .0), + &IpNetwork::V6(self.0.into()), &mut out.reborrow(), ) } @@ -128,7 +127,7 @@ where fn from_sql(bytes: DB::RawValue<'_>) -> deserialize::Result { let inet = IpNetwork::from_sql(bytes)?; match inet { - IpNetwork::V6(net) => Ok(Ipv6Net(external::Ipv6Net(net))), + IpNetwork::V6(net) => Ok(Ipv6Net(net.into())), _ => Err("Expected IPV6".into()), } } diff --git a/nexus/db-model/src/network_interface.rs b/nexus/db-model/src/network_interface.rs index fdcfcbf588..95bb8bb7f2 100644 --- a/nexus/db-model/src/network_interface.rs +++ b/nexus/db-model/src/network_interface.rs @@ -8,13 +8,19 @@ use crate::schema::instance_network_interface; use crate::schema::network_interface; use crate::schema::service_network_interface; use crate::Name; +use crate::SqlU8; use chrono::DateTime; use chrono::Utc; use db_macros::Resource; use diesel::AsChangeset; +use ipnetwork::NetworkSize; use nexus_types::external_api::params; use nexus_types::identity::Resource; use omicron_common::api::{external, internal}; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::OmicronZoneUuid; +use omicron_uuid_kinds::VnicUuid; +use sled_agent_client::ZoneKind; use uuid::Uuid; /// The max number of interfaces that may be associated with a resource, @@ -59,7 +65,7 @@ pub struct NetworkInterface { // If neither is specified, auto-assign one of each? pub ip: ipnetwork::IpNetwork, - pub slot: i16, + pub slot: SqlU8, #[diesel(column_name = is_primary)] pub primary: bool, } @@ -67,7 +73,7 @@ pub struct NetworkInterface { impl NetworkInterface { pub fn into_internal( self, - subnet: external::IpNet, + subnet: oxnet::IpNet, ) -> internal::shared::NetworkInterface { internal::shared::NetworkInterface { id: self.id(), @@ -91,10 +97,10 @@ impl NetworkInterface { name: self.name().clone(), ip: self.ip.ip(), mac: self.mac.into(), - subnet: subnet, + subnet, vni: external::Vni::try_from(0).unwrap(), primary: self.primary, - slot: self.slot.try_into().unwrap(), + slot: *self.slot, } } } @@ -117,7 +123,7 @@ pub struct InstanceNetworkInterface { pub mac: MacAddr, pub ip: ipnetwork::IpNetwork, - pub slot: i16, + pub slot: SqlU8, #[diesel(column_name = is_primary)] pub primary: bool, } @@ -140,11 +146,78 @@ pub struct ServiceNetworkInterface { pub mac: MacAddr, pub ip: ipnetwork::IpNetwork, - pub slot: i16, + pub slot: SqlU8, #[diesel(column_name = is_primary)] pub primary: bool, } +impl ServiceNetworkInterface { + /// Generate a suitable [`Name`] for the given Omicron zone ID and kind. + pub fn name(zone_id: OmicronZoneUuid, zone_kind: ZoneKind) -> Name { + // Ideally we'd use `zone_kind.to_string()` here, but that uses + // underscores as separators which aren't allowed in `Name`s. We also + // preserve some existing naming behavior where NTP external networking + // is just called "ntp", not "boundary-ntp". + // + // Most of these zone kinds do not get external networking and therefore + // we don't need to be able to generate names for them, but it's simpler + // to give them valid descriptions than worry about error handling here. + let prefix = match zone_kind { + ZoneKind::BoundaryNtp | ZoneKind::InternalNtp => "ntp", + ZoneKind::Clickhouse => "clickhouse", + ZoneKind::ClickhouseKeeper => "clickhouse-keeper", + ZoneKind::CockroachDb => "cockroach", + ZoneKind::Crucible => "crucible", + ZoneKind::CruciblePantry => "crucible-pantry", + ZoneKind::ExternalDns => "external-dns", + ZoneKind::InternalDns => "internal-dns", + ZoneKind::Nexus => "nexus", + ZoneKind::Oximeter => "oximeter", + }; + + // Now that we have a valid prefix, we know this format string + // always produces a valid `Name`, so we'll unwrap here. + let name = format!("{prefix}-{zone_id}") + .parse() + .expect("valid name failed to parse"); + + Name(name) + } +} + +#[derive(Debug, thiserror::Error)] +#[error("Service NIC {nic_id} has a range of IPs ({ip}); only a single IP is supported")] +pub struct ServiceNicNotSingleIpError { + pub nic_id: Uuid, + pub ip: ipnetwork::IpNetwork, +} + +impl TryFrom<&'_ ServiceNetworkInterface> + for nexus_types::deployment::OmicronZoneNic +{ + type Error = ServiceNicNotSingleIpError; + + fn try_from(nic: &ServiceNetworkInterface) -> Result { + let size = match nic.ip.size() { + NetworkSize::V4(n) => u128::from(n), + NetworkSize::V6(n) => n, + }; + if size != 1 { + return Err(ServiceNicNotSingleIpError { + nic_id: nic.id(), + ip: nic.ip, + }); + } + Ok(Self { + id: VnicUuid::from_untyped_uuid(nic.id()), + mac: *nic.mac, + ip: nic.ip.ip(), + slot: *nic.slot, + primary: nic.primary, + }) + } +} + impl NetworkInterface { /// Treat this `NetworkInterface` as an `InstanceNetworkInterface`. /// diff --git a/nexus/db-model/src/omicron_zone_config.rs b/nexus/db-model/src/omicron_zone_config.rs index ce3127a9b3..c2258dba6c 100644 --- a/nexus/db-model/src/omicron_zone_config.rs +++ b/nexus/db-model/src/omicron_zone_config.rs @@ -11,21 +11,27 @@ //! collecting extra metadata like uptime). This module provides conversion //! helpers for the parts of those tables that are common between the two. -use std::net::SocketAddrV6; - use crate::inventory::ZoneType; use crate::{ipv6, MacAddr, Name, SqlU16, SqlU32, SqlU8}; use anyhow::{anyhow, bail, ensure, Context}; use ipnetwork::IpNetwork; -use nexus_types::inventory::OmicronZoneType; -use omicron_common::api::internal::shared::{ - NetworkInterface, NetworkInterfaceKind, +use nexus_types::deployment::BlueprintZoneDisposition; +use nexus_types::deployment::BlueprintZoneType; +use nexus_types::deployment::{ + blueprint_zone_type, OmicronZoneExternalFloatingAddr, + OmicronZoneExternalFloatingIp, OmicronZoneExternalSnatIp, +}; +use nexus_types::inventory::{NetworkInterface, OmicronZoneType}; +use omicron_common::api::internal::shared::NetworkInterfaceKind; +use omicron_uuid_kinds::{ + ExternalIpUuid, GenericUuid, OmicronZoneUuid, SledUuid, }; +use std::net::{IpAddr, Ipv6Addr, SocketAddr, SocketAddrV6}; use uuid::Uuid; #[derive(Debug)] pub(crate) struct OmicronZone { - pub(crate) sled_id: Uuid, + pub(crate) sled_id: SledUuid, pub(crate) id: Uuid, pub(crate) underlay_address: ipv6::Ipv6Addr, pub(crate) zone_type: ZoneType, @@ -45,15 +51,20 @@ pub(crate) struct OmicronZone { pub(crate) snat_ip: Option, pub(crate) snat_first_port: Option, pub(crate) snat_last_port: Option, + // Only present for BlueprintZoneConfig; always `None` for OmicronZoneConfig + pub(crate) external_ip_id: Option, } impl OmicronZone { pub(crate) fn new( - sled_id: Uuid, - zone: &nexus_types::inventory::OmicronZoneConfig, + sled_id: SledUuid, + zone_id: Uuid, + zone_underlay_address: Ipv6Addr, + zone_type: &nexus_types::inventory::OmicronZoneType, + external_ip_id: Option, ) -> anyhow::Result { - let id = zone.id; - let underlay_address = ipv6::Ipv6Addr::from(zone.underlay_address); + let id = zone_id; + let underlay_address = ipv6::Ipv6Addr::from(zone_underlay_address); let mut nic_id = None; let mut dns_gz_address = None; let mut dns_gz_address_index = None; @@ -68,8 +79,7 @@ impl OmicronZone { let mut second_service_ip = None; let mut second_service_port = None; - let (zone_type, primary_service_sockaddr_str, dataset) = match &zone - .zone_type + let (zone_type, primary_service_sockaddr_str, dataset) = match zone_type { OmicronZoneType::BoundaryNtp { address, @@ -79,12 +89,13 @@ impl OmicronZone { nic, snat_cfg, } => { + let (first_port, last_port) = snat_cfg.port_range_raw(); ntp_ntp_servers = Some(ntp_servers.clone()); ntp_dns_servers = Some(dns_servers.clone()); - ntp_ntp_domain = domain.clone(); + ntp_ntp_domain.clone_from(domain); snat_ip = Some(IpNetwork::from(snat_cfg.ip)); - snat_first_port = Some(SqlU16::from(snat_cfg.first_port)); - snat_last_port = Some(SqlU16::from(snat_cfg.last_port)); + snat_first_port = Some(SqlU16::from(first_port)); + snat_last_port = Some(SqlU16::from(last_port)); nic_id = Some(nic.id); (ZoneType::BoundaryNtp, address, None) } @@ -151,7 +162,7 @@ impl OmicronZone { } => { ntp_ntp_servers = Some(ntp_servers.clone()); ntp_dns_servers = Some(dns_servers.clone()); - ntp_ntp_domain = domain.clone(); + ntp_ntp_domain.clone_from(domain); (ZoneType::InternalNtp, address, None) } OmicronZoneType::Nexus { @@ -172,8 +183,7 @@ impl OmicronZone { } }; - let dataset_zpool_name = - dataset.map(|d| d.pool_name.as_str().to_string()); + let dataset_zpool_name = dataset.map(|d| d.pool_name.to_string()); let primary_service_sockaddr = primary_service_sockaddr_str .parse::() .with_context(|| { @@ -210,6 +220,152 @@ impl OmicronZone { snat_ip, snat_first_port, snat_last_port, + external_ip_id, + }) + } + + pub(crate) fn into_blueprint_zone_config( + self, + disposition: BlueprintZoneDisposition, + nic_row: Option, + ) -> anyhow::Result { + let common = self.into_zone_config_common(nic_row)?; + let address = common.primary_service_address; + let zone_type = match common.zone_type { + ZoneType::BoundaryNtp => { + let snat_cfg = match ( + common.snat_ip, + common.snat_first_port, + common.snat_last_port, + ) { + (Some(ip), Some(first_port), Some(last_port)) => { + nexus_types::inventory::SourceNatConfig::new( + ip.ip(), + *first_port, + *last_port, + ) + .context("bad SNAT config for boundary NTP")? + } + _ => bail!( + "expected non-NULL snat properties, \ + found at least one NULL" + ), + }; + BlueprintZoneType::BoundaryNtp( + blueprint_zone_type::BoundaryNtp { + address, + dns_servers: common.ntp_dns_servers?, + domain: common.ntp_domain, + nic: common.nic?, + ntp_servers: common.ntp_ntp_servers?, + external_ip: OmicronZoneExternalSnatIp { + id: common.external_ip_id?, + snat_cfg, + }, + }, + ) + } + ZoneType::Clickhouse => { + BlueprintZoneType::Clickhouse(blueprint_zone_type::Clickhouse { + address, + dataset: common.dataset?, + }) + } + ZoneType::ClickhouseKeeper => BlueprintZoneType::ClickhouseKeeper( + blueprint_zone_type::ClickhouseKeeper { + address, + dataset: common.dataset?, + }, + ), + ZoneType::CockroachDb => BlueprintZoneType::CockroachDb( + blueprint_zone_type::CockroachDb { + address, + dataset: common.dataset?, + }, + ), + ZoneType::Crucible => { + BlueprintZoneType::Crucible(blueprint_zone_type::Crucible { + address, + dataset: common.dataset?, + }) + } + ZoneType::CruciblePantry => BlueprintZoneType::CruciblePantry( + blueprint_zone_type::CruciblePantry { address }, + ), + ZoneType::ExternalDns => BlueprintZoneType::ExternalDns( + blueprint_zone_type::ExternalDns { + dataset: common.dataset?, + dns_address: OmicronZoneExternalFloatingAddr { + id: common.external_ip_id?, + addr: common.dns_address?, + }, + http_address: address, + nic: common.nic?, + }, + ), + ZoneType::InternalDns => BlueprintZoneType::InternalDns( + blueprint_zone_type::InternalDns { + dataset: common.dataset?, + dns_address: match common.dns_address? { + SocketAddr::V4(addr) => { + bail!("expected V6 address; got {addr}") + } + SocketAddr::V6(addr) => addr, + }, + http_address: address, + gz_address: *common.dns_gz_address.ok_or_else(|| { + anyhow!("expected dns_gz_address, found none") + })?, + gz_address_index: *common.dns_gz_address_index.ok_or_else( + || anyhow!("expected dns_gz_address_index, found none"), + )?, + }, + ), + ZoneType::InternalNtp => BlueprintZoneType::InternalNtp( + blueprint_zone_type::InternalNtp { + address, + dns_servers: common.ntp_dns_servers?, + domain: common.ntp_domain, + ntp_servers: common.ntp_ntp_servers?, + }, + ), + ZoneType::Nexus => { + BlueprintZoneType::Nexus(blueprint_zone_type::Nexus { + internal_address: address, + nic: common.nic?, + external_tls: common + .nexus_external_tls + .ok_or_else(|| anyhow!("expected 'external_tls'"))?, + external_ip: OmicronZoneExternalFloatingIp { + id: common.external_ip_id?, + ip: common + .second_service_ip + .ok_or_else(|| { + anyhow!("expected second service IP") + })? + .ip(), + }, + external_dns_servers: common + .nexus_external_dns_servers + .ok_or_else(|| { + anyhow!("expected 'external_dns_servers'") + })? + .into_iter() + .map(|i| i.ip()) + .collect(), + }) + } + ZoneType::Oximeter => { + BlueprintZoneType::Oximeter(blueprint_zone_type::Oximeter { + address, + }) + } + }; + Ok(nexus_types::deployment::BlueprintZoneConfig { + disposition, + id: OmicronZoneUuid::from_untyped_uuid(common.id), + underlay_address: std::net::Ipv6Addr::from(common.underlay_address), + zone_type, }) } @@ -217,13 +373,115 @@ impl OmicronZone { self, nic_row: Option, ) -> anyhow::Result { - let address = SocketAddrV6::new( + let common = self.into_zone_config_common(nic_row)?; + let address = common.primary_service_address.to_string(); + + let zone_type = match common.zone_type { + ZoneType::BoundaryNtp => { + let snat_cfg = match ( + common.snat_ip, + common.snat_first_port, + common.snat_last_port, + ) { + (Some(ip), Some(first_port), Some(last_port)) => { + nexus_types::inventory::SourceNatConfig::new( + ip.ip(), + *first_port, + *last_port, + ) + .context("bad SNAT config for boundary NTP")? + } + _ => bail!( + "expected non-NULL snat properties, \ + found at least one NULL" + ), + }; + OmicronZoneType::BoundaryNtp { + address, + dns_servers: common.ntp_dns_servers?, + domain: common.ntp_domain, + nic: common.nic?, + ntp_servers: common.ntp_ntp_servers?, + snat_cfg, + } + } + ZoneType::Clickhouse => OmicronZoneType::Clickhouse { + address, + dataset: common.dataset?, + }, + ZoneType::ClickhouseKeeper => OmicronZoneType::ClickhouseKeeper { + address, + dataset: common.dataset?, + }, + ZoneType::CockroachDb => OmicronZoneType::CockroachDb { + address, + dataset: common.dataset?, + }, + ZoneType::Crucible => { + OmicronZoneType::Crucible { address, dataset: common.dataset? } + } + ZoneType::CruciblePantry => { + OmicronZoneType::CruciblePantry { address } + } + ZoneType::ExternalDns => OmicronZoneType::ExternalDns { + dataset: common.dataset?, + dns_address: common.dns_address?.to_string(), + http_address: address, + nic: common.nic?, + }, + ZoneType::InternalDns => OmicronZoneType::InternalDns { + dataset: common.dataset?, + dns_address: common.dns_address?.to_string(), + http_address: address, + gz_address: *common.dns_gz_address.ok_or_else(|| { + anyhow!("expected dns_gz_address, found none") + })?, + gz_address_index: *common.dns_gz_address_index.ok_or_else( + || anyhow!("expected dns_gz_address_index, found none"), + )?, + }, + ZoneType::InternalNtp => OmicronZoneType::InternalNtp { + address, + dns_servers: common.ntp_dns_servers?, + domain: common.ntp_domain, + ntp_servers: common.ntp_ntp_servers?, + }, + ZoneType::Nexus => OmicronZoneType::Nexus { + internal_address: address, + nic: common.nic?, + external_tls: common + .nexus_external_tls + .ok_or_else(|| anyhow!("expected 'external_tls'"))?, + external_ip: common + .second_service_ip + .ok_or_else(|| anyhow!("expected second service IP"))? + .ip(), + external_dns_servers: common + .nexus_external_dns_servers + .ok_or_else(|| anyhow!("expected 'external_dns_servers'"))? + .into_iter() + .map(|i| i.ip()) + .collect(), + }, + ZoneType::Oximeter => OmicronZoneType::Oximeter { address }, + }; + Ok(nexus_types::inventory::OmicronZoneConfig { + id: common.id, + underlay_address: std::net::Ipv6Addr::from(common.underlay_address), + zone_type, + }) + } + + fn into_zone_config_common( + self, + nic_row: Option, + ) -> anyhow::Result { + let primary_service_address = SocketAddrV6::new( std::net::Ipv6Addr::from(self.primary_service_ip), *self.primary_service_port, 0, 0, - ) - .to_string(); + ); // Assemble a value that we can use to extract the NIC _if necessary_ // and report an error if it was needed but not found. @@ -275,8 +533,7 @@ impl OmicronZone { let dns_address = match (self.second_service_ip, self.second_service_port) { (Some(dns_ip), Some(dns_port)) => { - Ok(std::net::SocketAddr::new(dns_ip.ip(), *dns_port) - .to_string()) + Ok(std::net::SocketAddr::new(dns_ip.ip(), *dns_port)) } _ => Err(anyhow!( "expected second service IP and port, \ @@ -294,99 +551,58 @@ impl OmicronZone { let ntp_ntp_servers = self.ntp_ntp_servers.ok_or_else(|| anyhow!("expected ntp_servers")); - let zone_type = match self.zone_type { - ZoneType::BoundaryNtp => { - let snat_cfg = match ( - self.snat_ip, - self.snat_first_port, - self.snat_last_port, - ) { - (Some(ip), Some(first_port), Some(last_port)) => { - nexus_types::inventory::SourceNatConfig { - ip: ip.ip(), - first_port: *first_port, - last_port: *last_port, - } - } - _ => bail!( - "expected non-NULL snat properties, \ - found at least one NULL" - ), - }; - OmicronZoneType::BoundaryNtp { - address, - dns_servers: ntp_dns_servers?, - domain: self.ntp_domain, - nic: nic?, - ntp_servers: ntp_ntp_servers?, - snat_cfg, - } - } - ZoneType::Clickhouse => { - OmicronZoneType::Clickhouse { address, dataset: dataset? } - } - ZoneType::ClickhouseKeeper => { - OmicronZoneType::ClickhouseKeeper { address, dataset: dataset? } - } - ZoneType::CockroachDb => { - OmicronZoneType::CockroachDb { address, dataset: dataset? } - } - ZoneType::Crucible => { - OmicronZoneType::Crucible { address, dataset: dataset? } - } - ZoneType::CruciblePantry => { - OmicronZoneType::CruciblePantry { address } - } - ZoneType::ExternalDns => OmicronZoneType::ExternalDns { - dataset: dataset?, - dns_address: dns_address?, - http_address: address, - nic: nic?, - }, - ZoneType::InternalDns => OmicronZoneType::InternalDns { - dataset: dataset?, - dns_address: dns_address?, - http_address: address, - gz_address: *self.dns_gz_address.ok_or_else(|| { - anyhow!("expected dns_gz_address, found none") - })?, - gz_address_index: *self.dns_gz_address_index.ok_or_else( - || anyhow!("expected dns_gz_address_index, found none"), - )?, - }, - ZoneType::InternalNtp => OmicronZoneType::InternalNtp { - address, - dns_servers: ntp_dns_servers?, - domain: self.ntp_domain, - ntp_servers: ntp_ntp_servers?, - }, - ZoneType::Nexus => OmicronZoneType::Nexus { - internal_address: address, - nic: nic?, - external_tls: self - .nexus_external_tls - .ok_or_else(|| anyhow!("expected 'external_tls'"))?, - external_ip: self - .second_service_ip - .ok_or_else(|| anyhow!("expected second service IP"))? - .ip(), - external_dns_servers: self - .nexus_external_dns_servers - .ok_or_else(|| anyhow!("expected 'external_dns_servers'"))? - .into_iter() - .map(|i| i.ip()) - .collect(), - }, - ZoneType::Oximeter => OmicronZoneType::Oximeter { address }, - }; - Ok(nexus_types::inventory::OmicronZoneConfig { + // Do the same for the external IP ID. + let external_ip_id = + self.external_ip_id.context("expected an external IP ID"); + + Ok(ZoneConfigCommon { id: self.id, - underlay_address: std::net::Ipv6Addr::from(self.underlay_address), - zone_type, + underlay_address: self.underlay_address, + zone_type: self.zone_type, + primary_service_address, + snat_ip: self.snat_ip, + snat_first_port: self.snat_first_port, + snat_last_port: self.snat_last_port, + ntp_domain: self.ntp_domain, + dns_gz_address: self.dns_gz_address, + dns_gz_address_index: self.dns_gz_address_index, + nexus_external_tls: self.nexus_external_tls, + nexus_external_dns_servers: self.nexus_external_dns_servers, + second_service_ip: self.second_service_ip, + nic, + dataset, + dns_address, + ntp_dns_servers, + ntp_ntp_servers, + external_ip_id, }) } } +struct ZoneConfigCommon { + id: Uuid, + underlay_address: ipv6::Ipv6Addr, + zone_type: ZoneType, + primary_service_address: SocketAddrV6, + snat_ip: Option, + snat_first_port: Option, + snat_last_port: Option, + ntp_domain: Option, + dns_gz_address: Option, + dns_gz_address_index: Option, + nexus_external_tls: Option, + nexus_external_dns_servers: Option>, + second_service_ip: Option, + // These properties may or may not be needed, depending on the zone type. We + // store results here that can be unpacked once we determine our zone type. + nic: anyhow::Result, + dataset: anyhow::Result, + dns_address: anyhow::Result, + ntp_dns_servers: anyhow::Result>, + ntp_ntp_servers: anyhow::Result>, + external_ip_id: anyhow::Result, +} + #[derive(Debug)] pub(crate) struct OmicronZoneNic { pub(crate) id: Uuid, @@ -401,38 +617,31 @@ pub(crate) struct OmicronZoneNic { impl OmicronZoneNic { pub(crate) fn new( - zone: &nexus_types::inventory::OmicronZoneConfig, - ) -> anyhow::Result> { - match &zone.zone_type { - OmicronZoneType::ExternalDns { nic, .. } - | OmicronZoneType::BoundaryNtp { nic, .. } - | OmicronZoneType::Nexus { nic, .. } => { - // We do not bother storing the NIC's kind and associated id - // because it should be inferrable from the other information - // that we have. Verify that here. - ensure!( - matches!( - nic.kind, - NetworkInterfaceKind::Service{ id } if id == zone.id - ), - "expected zone's NIC kind to be \"service\" and the \ - id to match the zone's id ({})", - zone.id - ); + zone_id: Uuid, + nic: &nexus_types::inventory::NetworkInterface, + ) -> anyhow::Result { + // We do not bother storing the NIC's kind and associated id + // because it should be inferrable from the other information + // that we have. Verify that here. + ensure!( + matches!( + nic.kind, + NetworkInterfaceKind::Service{ id } if id == zone_id + ), + "expected zone's NIC kind to be \"service\" and the \ + id to match the zone's id ({zone_id})", + ); - Ok(Some(Self { - id: nic.id, - name: Name::from(nic.name.clone()), - ip: IpNetwork::from(nic.ip), - mac: MacAddr::from(nic.mac), - subnet: IpNetwork::from(nic.subnet), - vni: SqlU32::from(u32::from(nic.vni)), - is_primary: nic.primary, - slot: SqlU8::from(nic.slot), - })) - } - _ => Ok(None), - } + Ok(Self { + id: nic.id, + name: Name::from(nic.name.clone()), + ip: IpNetwork::from(nic.ip), + mac: MacAddr::from(nic.mac), + subnet: IpNetwork::from(nic.subnet), + vni: SqlU32::from(u32::from(nic.vni)), + is_primary: nic.primary, + slot: SqlU8::from(nic.slot), + }) } pub(crate) fn into_network_interface_for_zone( diff --git a/nexus/db-model/src/physical_disk.rs b/nexus/db-model/src/physical_disk.rs index 3628f7077f..c6ef97ee1f 100644 --- a/nexus/db-model/src/physical_disk.rs +++ b/nexus/db-model/src/physical_disk.rs @@ -2,7 +2,9 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -use super::{Generation, PhysicalDiskKind}; +use super::{ + Generation, PhysicalDiskKind, PhysicalDiskPolicy, PhysicalDiskState, +}; use crate::collection::DatastoreCollectionConfig; use crate::schema::{physical_disk, zpool}; use chrono::{DateTime, Utc}; @@ -25,10 +27,14 @@ pub struct PhysicalDisk { pub variant: PhysicalDiskKind, pub sled_id: Uuid, + pub disk_policy: PhysicalDiskPolicy, + pub disk_state: PhysicalDiskState, } impl PhysicalDisk { + /// Creates a new in-service, active disk pub fn new( + id: Uuid, vendor: String, serial: String, model: String, @@ -36,7 +42,7 @@ impl PhysicalDisk { sled_id: Uuid, ) -> Self { Self { - identity: PhysicalDiskIdentity::new(Uuid::new_v4()), + identity: PhysicalDiskIdentity::new(id), time_deleted: None, rcgen: Generation::new(), vendor, @@ -44,23 +50,15 @@ impl PhysicalDisk { model, variant, sled_id, + disk_policy: PhysicalDiskPolicy::InService, + disk_state: PhysicalDiskState::Active, } } - pub fn uuid(&self) -> Uuid { + pub fn id(&self) -> Uuid { self.identity.id } - // This is slightly gross, but: - // the `authz_resource` macro really expects that the "primary_key" - // for an object can be acquired by "id()". - // - // The PhysicalDisk object does actually have a separate convenience - // UUID, but may be looked by up vendor/serial/model too. - pub fn id(&self) -> (String, String, String) { - (self.vendor.clone(), self.serial.clone(), self.model.clone()) - } - pub fn time_deleted(&self) -> Option> { self.time_deleted } @@ -70,6 +68,8 @@ impl From for views::PhysicalDisk { fn from(disk: PhysicalDisk) -> Self { Self { identity: disk.identity(), + policy: disk.disk_policy.into(), + state: disk.disk_state.into(), sled_id: Some(disk.sled_id), vendor: disk.vendor, serial: disk.serial, diff --git a/nexus/db-model/src/physical_disk_policy.rs b/nexus/db-model/src/physical_disk_policy.rs new file mode 100644 index 0000000000..85b6feccf2 --- /dev/null +++ b/nexus/db-model/src/physical_disk_policy.rs @@ -0,0 +1,54 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Database representation of a disks's operator-defined policy. +//! +//! This is related to, but different from `PhysicalDiskState`: a disk's **policy** is +//! its disposition as specified by the operator, while its **state** refers to +//! what's currently on it, as determined by Nexus. +//! +//! For example, a disk might be in the `Active` state, but have a policy of +//! `Expunged` -- this would mean that Nexus knows about resources currently +//! provisioned on the disk, but the operator has said that it should be marked +//! as gone. + +use super::impl_enum_type; +use nexus_types::external_api::views; +use serde::{Deserialize, Serialize}; + +impl_enum_type!( + #[derive(Clone, SqlType, Debug, QueryId)] + #[diesel(postgres_type(name = "physical_disk_policy", schema = "public"))] + pub struct PhysicalDiskPolicyEnum; + + #[derive(Clone, Copy, Debug, AsExpression, FromSqlRow, Serialize, Deserialize, PartialEq)] + #[diesel(sql_type = PhysicalDiskPolicyEnum)] + pub enum PhysicalDiskPolicy; + + // Enum values + InService => b"in_service" + Expunged => b"expunged" +); + +impl From for views::PhysicalDiskPolicy { + fn from(policy: PhysicalDiskPolicy) -> Self { + match policy { + PhysicalDiskPolicy::InService => { + views::PhysicalDiskPolicy::InService + } + PhysicalDiskPolicy::Expunged => views::PhysicalDiskPolicy::Expunged, + } + } +} + +impl From for PhysicalDiskPolicy { + fn from(policy: views::PhysicalDiskPolicy) -> Self { + match policy { + views::PhysicalDiskPolicy::InService => { + PhysicalDiskPolicy::InService + } + views::PhysicalDiskPolicy::Expunged => PhysicalDiskPolicy::Expunged, + } + } +} diff --git a/nexus/db-model/src/physical_disk_state.rs b/nexus/db-model/src/physical_disk_state.rs new file mode 100644 index 0000000000..0dcc8f139a --- /dev/null +++ b/nexus/db-model/src/physical_disk_state.rs @@ -0,0 +1,54 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Database representation of a physical disk's state as understood by Nexus. + +use super::impl_enum_type; +use nexus_types::external_api::views; +use serde::{Deserialize, Serialize}; +use std::fmt; +use strum::EnumIter; + +impl_enum_type!( + #[derive(Clone, SqlType, Debug, QueryId)] + #[diesel(postgres_type(name = "physical_disk_state", schema = "public"))] + pub struct PhysicalDiskStateEnum; + + #[derive(Clone, Copy, Debug, AsExpression, FromSqlRow, Serialize, Deserialize, PartialEq, Eq, EnumIter)] + #[diesel(sql_type = PhysicalDiskStateEnum)] + pub enum PhysicalDiskState; + + // Enum values + Active => b"active" + Decommissioned => b"decommissioned" +); + +impl fmt::Display for PhysicalDiskState { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // Forward to the canonical implementation in nexus-types. + views::PhysicalDiskState::from(*self).fmt(f) + } +} + +impl From for views::PhysicalDiskState { + fn from(state: PhysicalDiskState) -> Self { + match state { + PhysicalDiskState::Active => views::PhysicalDiskState::Active, + PhysicalDiskState::Decommissioned => { + views::PhysicalDiskState::Decommissioned + } + } + } +} + +impl From for PhysicalDiskState { + fn from(state: views::PhysicalDiskState) -> Self { + match state { + views::PhysicalDiskState::Active => PhysicalDiskState::Active, + views::PhysicalDiskState::Decommissioned => { + PhysicalDiskState::Decommissioned + } + } + } +} diff --git a/nexus/db-model/src/producer_endpoint.rs b/nexus/db-model/src/producer_endpoint.rs index 55533690f1..74a7356adb 100644 --- a/nexus/db-model/src/producer_endpoint.rs +++ b/nexus/db-model/src/producer_endpoint.rs @@ -2,6 +2,9 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. +use std::net::SocketAddr; +use std::time::Duration; + use super::SqlU16; use crate::impl_enum_type; use crate::schema::metric_producer; @@ -44,9 +47,20 @@ impl From for internal::nexus::ProducerKind { } } +impl From for internal::nexus::ProducerEndpoint { + fn from(ep: ProducerEndpoint) -> Self { + internal::nexus::ProducerEndpoint { + id: ep.id(), + kind: ep.kind.into(), + address: SocketAddr::new(ep.ip.ip(), *ep.port), + interval: Duration::from_secs_f64(ep.interval), + } + } +} + /// Information announced by a metric server, used so that clients can contact it and collect /// available metric data from it. -#[derive(Queryable, Insertable, Debug, Clone, Selectable, Asset)] +#[derive(Queryable, Insertable, Debug, Clone, Selectable, Asset, PartialEq)] #[diesel(table_name = metric_producer)] pub struct ProducerEndpoint { #[diesel(embed)] @@ -56,7 +70,6 @@ pub struct ProducerEndpoint { pub ip: ipnetwork::IpNetwork, pub port: SqlU16, pub interval: f64, - pub base_route: String, pub oximeter_id: Uuid, } @@ -72,14 +85,8 @@ impl ProducerEndpoint { kind: endpoint.kind.into(), ip: endpoint.address.ip().into(), port: endpoint.address.port().into(), - base_route: endpoint.base_route.clone(), interval: endpoint.interval.as_secs_f64(), oximeter_id, } } - - /// Return the route that can be used to request metric data. - pub fn collection_route(&self) -> String { - format!("{}/{}", &self.base_route, self.id()) - } } diff --git a/nexus/db-model/src/region.rs b/nexus/db-model/src/region.rs index fefc4f4fce..441f928405 100644 --- a/nexus/db-model/src/region.rs +++ b/nexus/db-model/src/region.rs @@ -58,6 +58,9 @@ impl Region { } } + pub fn id(&self) -> Uuid { + self.identity.id + } pub fn volume_id(&self) -> Uuid { self.volume_id } diff --git a/nexus/db-model/src/region_replacement.rs b/nexus/db-model/src/region_replacement.rs new file mode 100644 index 0000000000..a04710f53d --- /dev/null +++ b/nexus/db-model/src/region_replacement.rs @@ -0,0 +1,165 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use super::impl_enum_type; +use crate::schema::region_replacement; +use crate::Region; +use chrono::DateTime; +use chrono::Utc; +use serde::{Deserialize, Serialize}; +use uuid::Uuid; + +impl_enum_type!( + #[derive(SqlType, Debug, QueryId)] + #[diesel(postgres_type(name = "region_replacement_state", schema = "public"))] + pub struct RegionReplacementStateEnum; + + #[derive(Copy, Clone, Debug, AsExpression, FromSqlRow, Serialize, Deserialize, PartialEq)] + #[diesel(sql_type = RegionReplacementStateEnum)] + pub enum RegionReplacementState; + + // Enum values + Requested => b"requested" + Allocating => b"allocating" + Running => b"running" + Driving => b"driving" + ReplacementDone => b"replacement_done" + Completing => b"completing" + Complete => b"complete" +); + +impl std::str::FromStr for RegionReplacementState { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "requested" => Ok(RegionReplacementState::Requested), + "allocating" => Ok(RegionReplacementState::Allocating), + "running" => Ok(RegionReplacementState::Running), + "driving" => Ok(RegionReplacementState::Driving), + "replacement_done" => Ok(RegionReplacementState::ReplacementDone), + "complete" => Ok(RegionReplacementState::Complete), + "completing" => Ok(RegionReplacementState::Completing), + _ => Err(format!("unrecognized value {} for enum", s)), + } + } +} + +/// Database representation of a Region replacement request. +/// +/// This record stores the data related to the operations required for Nexus to +/// orchestrate replacing a region in a volume. It transitions through the +/// following states: +/// +/// ```text +/// Requested <-- --- +/// | | +/// | | | +/// v | | responsibility of region +/// | | replacement start saga +/// Allocating -- | +/// | +/// | | +/// v --- +/// --- +/// Running <-- | +/// | | +/// | | | +/// v | | responsibility of region +/// | | replacement drive saga +/// Driving -- | +/// | +/// | | +/// v --- +/// --- +/// ReplacementDone <-- | +/// | | +/// | | | +/// v | | +/// | | responsibility of region +/// Completing -- | replacement finish saga +/// | +/// | | +/// v | +/// | +/// Completed --- +/// ``` +/// +/// which are captured in the RegionReplacementState enum. Annotated on the +/// right are which sagas are responsible for which state transitions. The state +/// transitions themselves are performed by these sagas and all involve a query +/// that: +/// +/// - checks that the starting state (and other values as required) make sense +/// - updates the state while setting a unique operating_saga_id id (and any +/// other fields as appropriate) +/// +/// As multiple background tasks will be waking up, checking to see what sagas +/// need to be triggered, and requesting that these region replacement sagas +/// run, this is meant to block multiple sagas from running at the same time in +/// an effort to cut down on interference - most will unwind at the first step +/// of performing this state transition instead of somewhere in the middle. +/// +/// The correctness of a region replacement relies on certain operations +/// happening only when the record is in a certain state. For example: Nexus +/// should not undo a volume modification _after_ an upstairs has been sent a +/// replacement request, so volume modification happens at the Allocating state +/// (in the start saga), and replacement requests are only sent in the Driving +/// state (in the drive saga) - this ensures that replacement requests are only +/// sent if the start saga completed successfully, meaning the volume +/// modification was committed to the database and will not change or be +/// unwound. +/// +/// See also: RegionReplacementStep records +#[derive( + Queryable, + Insertable, + Debug, + Clone, + Selectable, + Serialize, + Deserialize, + PartialEq, +)] +#[diesel(table_name = region_replacement)] +pub struct RegionReplacement { + pub id: Uuid, + + pub request_time: DateTime, + + /// The region being replaced + pub old_region_id: Uuid, + + /// The volume whose region is being replaced + pub volume_id: Uuid, + + /// A synthetic volume that only is used to later delete the old region + pub old_region_volume_id: Option, + + /// The new region that will be used to replace the old one + pub new_region_id: Option, + + pub replacement_state: RegionReplacementState, + + pub operating_saga_id: Option, +} + +impl RegionReplacement { + pub fn for_region(region: &Region) -> Self { + Self::new(region.id(), region.volume_id()) + } + + pub fn new(old_region_id: Uuid, volume_id: Uuid) -> Self { + Self { + id: Uuid::new_v4(), + request_time: Utc::now(), + old_region_id, + volume_id, + old_region_volume_id: None, + new_region_id: None, + replacement_state: RegionReplacementState::Requested, + operating_saga_id: None, + } + } +} diff --git a/nexus/db-model/src/region_replacement_step.rs b/nexus/db-model/src/region_replacement_step.rs new file mode 100644 index 0000000000..c0a61b958c --- /dev/null +++ b/nexus/db-model/src/region_replacement_step.rs @@ -0,0 +1,85 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use super::impl_enum_type; +use crate::ipv6; +use crate::schema::region_replacement_step; +use crate::SqlU16; +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; +use std::net::SocketAddrV6; +use uuid::Uuid; + +impl_enum_type!( + #[derive(SqlType, Debug, QueryId)] + #[diesel(postgres_type(name = "region_replacement_step_type", schema = "public"))] + pub struct RegionReplacementStepTypeEnum; + + #[derive(Copy, Clone, Debug, AsExpression, FromSqlRow, Serialize, Deserialize, PartialEq)] + #[diesel(sql_type = RegionReplacementStepTypeEnum)] + pub enum RegionReplacementStepType; + + // What is driving the repair forward? + Propolis => b"propolis" + Pantry => b"pantry" +); + +/// Database representation of a Region replacement repair step +/// +/// As region replacement takes place, Nexus will be making calls to services in +/// order to trigger the necessary Crucible operations meant to actually perform +/// the replacement. These steps are recorded in the database so that they can +/// be consulted by subsequent steps, and additionally act as breadcrumbs if +/// there is an issue. +/// +/// See also: RegionReplacement records +#[derive( + Queryable, + Insertable, + Debug, + Clone, + Selectable, + Serialize, + Deserialize, + PartialEq, +)] +#[diesel(table_name = region_replacement_step)] +pub struct RegionReplacementStep { + pub replacement_id: Uuid, + + pub step_time: DateTime, + + pub step_type: RegionReplacementStepType, + + pub step_associated_instance_id: Option, + pub step_associated_vmm_id: Option, + + pub step_associated_pantry_ip: Option, + pub step_associated_pantry_port: Option, + pub step_associated_pantry_job_id: Option, +} + +impl RegionReplacementStep { + pub fn instance_and_vmm_ids(&self) -> Option<(Uuid, Uuid)> { + if self.step_type != RegionReplacementStepType::Propolis { + return None; + } + + let instance_id = self.step_associated_instance_id?; + let vmm_id = self.step_associated_vmm_id?; + + Some((instance_id, vmm_id)) + } + + pub fn pantry_address(&self) -> Option { + if self.step_type != RegionReplacementStepType::Pantry { + return None; + } + + let ip = self.step_associated_pantry_ip?; + let port = self.step_associated_pantry_port?; + + Some(SocketAddrV6::new(*ip, *port, 0, 0)) + } +} diff --git a/nexus/db-model/src/saga_types.rs b/nexus/db-model/src/saga_types.rs index bb21e803bc..3ad3e2603c 100644 --- a/nexus/db-model/src/saga_types.rs +++ b/nexus/db-model/src/saga_types.rs @@ -123,7 +123,7 @@ impl ToSql for SagaNodeId { out: &mut serialize::Output<'a, '_, Pg>, ) -> serialize::Result { // Diesel newtype -> steno type -> u32 -> i64 -> SQL - let id = u32::from(self.0) as i64; + let id = i64::from(u32::from(self.0)); >::to_sql(&id, &mut out.reborrow()) } } diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index c533b426bd..94e699443c 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -209,6 +209,42 @@ table! { delay_open -> Int8, connect_retry -> Int8, keepalive -> Int8, + remote_asn -> Nullable, + min_ttl -> Nullable, + md5_auth_key -> Nullable, + multi_exit_discriminator -> Nullable, + local_pref -> Nullable, + enforce_first_as -> Bool, + allow_import_list_active -> Bool, + allow_export_list_active -> Bool, + vlan_id -> Nullable + } +} + +table! { + switch_port_settings_bgp_peer_config_communities (port_settings_id, interface_name, addr, community) { + port_settings_id -> Uuid, + interface_name -> Text, + addr -> Inet, + community -> Int8, + } +} + +table! { + switch_port_settings_bgp_peer_config_allow_export (port_settings_id, interface_name, addr, prefix) { + port_settings_id -> Uuid, + interface_name -> Text, + addr -> Inet, + prefix -> Inet, + } +} + +table! { + switch_port_settings_bgp_peer_config_allow_import (port_settings_id, interface_name, addr, prefix) { + port_settings_id -> Uuid, + interface_name -> Text, + addr -> Inet, + prefix -> Inet, } } @@ -223,6 +259,8 @@ table! { asn -> Int8, bgp_announce_set_id -> Uuid, vrf -> Nullable, + shaper -> Nullable, + checker -> Nullable, } } @@ -237,6 +275,24 @@ table! { hold_time -> Int8, idle_hold_time -> Int8, keepalive -> Int8, + remote_asn -> Nullable, + min_ttl -> Nullable, + md5_auth_key -> Nullable, + multi_exit_discriminator -> Nullable, + local_pref -> Nullable, + enforce_first_as -> Bool, + vlan_id -> Nullable, + } +} + +table! { + v2p_mapping_view (nic_id) { + nic_id -> Uuid, + sled_id -> Uuid, + sled_ip -> Inet, + vni -> Int4, + mac -> Int8, + ip -> Inet, } } @@ -391,6 +447,7 @@ table! { state_generation -> Int8, } } +joinable!(vmm -> sled (sled_id)); table! { sled_instance (id) { @@ -417,7 +474,6 @@ table! { ip -> Inet, port -> Int4, interval -> Float8, - base_route -> Text, oximeter_id -> Uuid, } } @@ -483,6 +539,7 @@ table! { is_primary -> Bool, } } +joinable!(instance_network_interface -> instance (instance_id)); table! { service_network_interface (id) { @@ -873,20 +930,6 @@ table! { } } -table! { - service (id) { - id -> Uuid, - time_created -> Timestamptz, - time_modified -> Timestamptz, - - sled_id -> Uuid, - zone_id -> Nullable, - ip -> Inet, - port -> Int4, - kind -> crate::ServiceKindEnum, - } -} - table! { physical_disk (id) { id -> Uuid, @@ -900,6 +943,8 @@ table! { model -> Text, variant -> crate::PhysicalDiskKindEnum, + disk_policy -> crate::PhysicalDiskPolicyEnum, + disk_state -> crate::PhysicalDiskStateEnum, sled_id -> Uuid, } } @@ -991,6 +1036,8 @@ table! { } } +allow_tables_to_appear_in_same_query!(zpool, dataset); + table! { region (id) { id -> Uuid, @@ -1006,6 +1053,8 @@ table! { } } +allow_tables_to_appear_in_same_query!(zpool, region); + table! { region_snapshot (dataset_id, region_id, snapshot_id) { dataset_id -> Uuid, @@ -1455,6 +1504,9 @@ table! { internal_dns_version -> Int8, external_dns_version -> Int8, + cockroachdb_fingerprint -> Text, + + cockroachdb_setting_preserve_downgrade -> Nullable, } } @@ -1469,6 +1521,38 @@ table! { } } +table! { + bp_sled_state (blueprint_id, sled_id) { + blueprint_id -> Uuid, + sled_id -> Uuid, + + sled_state -> crate::SledStateEnum, + } +} + +table! { + bp_sled_omicron_physical_disks (blueprint_id, sled_id) { + blueprint_id -> Uuid, + sled_id -> Uuid, + + generation -> Int8, + } +} + +table! { + bp_omicron_physical_disk (blueprint_id, id) { + blueprint_id -> Uuid, + sled_id -> Uuid, + + vendor -> Text, + serial -> Text, + model -> Text, + + id -> Uuid, + pool_id -> Uuid, + } +} + table! { bp_sled_omicron_zones (blueprint_id, sled_id) { blueprint_id -> Uuid, @@ -1503,6 +1587,8 @@ table! { snat_ip -> Nullable, snat_first_port -> Nullable, snat_last_port -> Nullable, + disposition -> crate::DbBpZoneDispositionEnum, + external_ip_id -> Nullable, } } @@ -1520,13 +1606,6 @@ table! { } } -table! { - bp_omicron_zones_not_in_service (blueprint_id, bp_omicron_zone_id) { - blueprint_id -> Uuid, - bp_omicron_zone_id -> Uuid, - } -} - table! { bootstore_keys (key, generation) { key -> Text, @@ -1616,6 +1695,50 @@ table! { } } +table! { + allow_list (id) { + id -> Uuid, + time_created -> Timestamptz, + time_modified -> Timestamptz, + allowed_ips -> Nullable>, + } +} + +table! { + region_replacement (id) { + id -> Uuid, + request_time -> Timestamptz, + old_region_id -> Uuid, + volume_id -> Uuid, + old_region_volume_id -> Nullable, + new_region_id -> Nullable, + replacement_state -> crate::RegionReplacementStateEnum, + operating_saga_id -> Nullable, + } +} + +table! { + volume_repair (volume_id) { + volume_id -> Uuid, + repair_id -> Uuid, + } +} + +table! { + region_replacement_step (replacement_id, step_time, step_type) { + replacement_id -> Uuid, + step_time -> Timestamptz, + step_type -> crate::RegionReplacementStepTypeEnum, + + step_associated_instance_id -> Nullable, + step_associated_vmm_id -> Nullable, + + step_associated_pantry_ip -> Nullable, + step_associated_pantry_port -> Nullable, + step_associated_pantry_job_id -> Nullable, + } +} + table! { db_metadata (singleton) { singleton -> Bool, @@ -1657,8 +1780,10 @@ allow_tables_to_appear_in_same_query!( metric_producer, network_interface, instance_network_interface, + inv_physical_disk, service_network_interface, oximeter, + physical_disk, project, rack, region, @@ -1668,7 +1793,6 @@ allow_tables_to_appear_in_same_query!( silo, identity_provider, console_session, - service, sled, sled_resource, router_route, @@ -1684,7 +1808,6 @@ allow_tables_to_appear_in_same_query!( ); allow_tables_to_appear_in_same_query!(dns_zone, dns_version, dns_name); -allow_tables_to_appear_in_same_query!(external_ip, service); // used for query to check whether an IP pool association has any allocated IPs before deleting allow_tables_to_appear_in_same_query!(external_ip, instance); @@ -1708,3 +1831,5 @@ allow_tables_to_appear_in_same_query!(volume, virtual_provisioning_resource); allow_tables_to_appear_in_same_query!(ssh_key, instance_ssh_key, instance); joinable!(instance_ssh_key -> ssh_key (ssh_key_id)); joinable!(instance_ssh_key -> instance (instance_id)); + +allow_tables_to_appear_in_same_query!(sled, sled_instance); diff --git a/nexus/db-model/src/schema_versions.rs b/nexus/db-model/src/schema_versions.rs index e35cc3c38a..75e1d7e440 100644 --- a/nexus/db-model/src/schema_versions.rs +++ b/nexus/db-model/src/schema_versions.rs @@ -17,7 +17,7 @@ use std::collections::BTreeMap; /// /// This must be updated when you change the database schema. Refer to /// schema/crdb/README.adoc in the root of this repository for details. -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(47, 0, 0); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(66, 0, 0); /// List of all past database schema versions, in *reverse* order /// @@ -29,6 +29,25 @@ static KNOWN_VERSIONS: Lazy> = Lazy::new(|| { // | leaving the first copy as an example for the next person. // v // KnownVersion::new(next_int, "unique-dirname-with-the-sql-files"), + KnownVersion::new(66, "blueprint-crdb-preserve-downgrade"), + KnownVersion::new(65, "region-replacement"), + KnownVersion::new(64, "add-view-for-v2p-mappings"), + KnownVersion::new(63, "remove-producer-base-route-column"), + KnownVersion::new(62, "allocate-subnet-decommissioned-sleds"), + KnownVersion::new(61, "blueprint-add-sled-state"), + KnownVersion::new(60, "add-lookup-vmm-by-sled-id-index"), + KnownVersion::new(59, "enforce-first-as-default"), + KnownVersion::new(58, "insert-default-allowlist"), + KnownVersion::new(57, "add-allowed-source-ips"), + KnownVersion::new(56, "bgp-oxpop-features"), + KnownVersion::new(55, "add-lookup-sled-by-policy-and-state-index"), + KnownVersion::new(54, "blueprint-add-external-ip-id"), + KnownVersion::new(53, "drop-service-table"), + KnownVersion::new(52, "blueprint-physical-disk"), + KnownVersion::new(51, "blueprint-disposition-column"), + KnownVersion::new(50, "add-lookup-disk-by-volume-id-index"), + KnownVersion::new(49, "physical-disk-state-and-policy"), + KnownVersion::new(48, "add-metrics-producers-time-modified-index"), KnownVersion::new(47, "add-view-for-bgp-peer-configs"), KnownVersion::new(46, "first-named-migration"), // The first many schema versions only vary by major or patch number and diff --git a/nexus/db-model/src/service.rs b/nexus/db-model/src/service.rs deleted file mode 100644 index 45d3ca5a16..0000000000 --- a/nexus/db-model/src/service.rs +++ /dev/null @@ -1,44 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -use super::ServiceKind; -use crate::ipv6; -use crate::schema::service; -use crate::SqlU16; -use db_macros::Asset; -use std::net::SocketAddrV6; -use uuid::Uuid; - -/// Representation of services which may run on Sleds. -#[derive(Queryable, Insertable, Debug, Clone, Selectable, Asset)] -#[diesel(table_name = service)] -pub struct Service { - #[diesel(embed)] - identity: ServiceIdentity, - - pub sled_id: Uuid, - pub zone_id: Option, - pub ip: ipv6::Ipv6Addr, - pub port: SqlU16, - pub kind: ServiceKind, -} - -impl Service { - pub fn new( - id: Uuid, - sled_id: Uuid, - zone_id: Option, - addr: SocketAddrV6, - kind: ServiceKind, - ) -> Self { - Self { - identity: ServiceIdentity::new(id), - sled_id, - zone_id, - ip: addr.ip().into(), - port: addr.port().into(), - kind, - } - } -} diff --git a/nexus/db-model/src/sled.rs b/nexus/db-model/src/sled.rs index a603f28d57..5019366733 100644 --- a/nexus/db-model/src/sled.rs +++ b/nexus/db-model/src/sled.rs @@ -5,7 +5,7 @@ use super::{ByteCount, Generation, SledState, SqlU16, SqlU32}; use crate::collection::DatastoreCollectionConfig; use crate::ipv6; -use crate::schema::{physical_disk, service, sled, zpool}; +use crate::schema::{physical_disk, sled, zpool}; use crate::sled::shared::Baseboard; use crate::sled_policy::DbSledPolicy; use chrono::{DateTime, Utc}; @@ -114,6 +114,10 @@ impl Sled { pub fn state(&self) -> SledState { self.state } + + pub fn time_modified(&self) -> DateTime { + self.identity.time_modified + } } impl From for views::Sled { @@ -177,13 +181,6 @@ impl DatastoreCollectionConfig for Sled { type CollectionIdColumn = zpool::dsl::sled_id; } -impl DatastoreCollectionConfig for Sled { - type CollectionId = Uuid; - type GenerationNumberColumn = sled::dsl::rcgen; - type CollectionTimeDeletedColumn = sled::dsl::time_deleted; - type CollectionIdColumn = service::dsl::sled_id; -} - /// Form of `Sled` used for updates from sled-agent. This is missing some /// columns that are present in `Sled` because sled-agent doesn't control them. #[derive(Debug, Clone)] @@ -347,3 +344,67 @@ impl SledReservationConstraintBuilder { self.constraints } } + +mod diesel_util { + use crate::{ + schema::sled::{sled_policy, sled_state}, + sled_policy::DbSledPolicy, + to_db_sled_policy, + }; + use diesel::{ + helper_types::{And, EqAny}, + prelude::*, + query_dsl::methods::FilterDsl, + }; + use nexus_types::{ + deployment::SledFilter, + external_api::views::{SledPolicy, SledState}, + }; + + /// An extension trait to apply a [`SledFilter`] to a Diesel expression. + /// + /// This is applicable to any Diesel expression which includes the `sled` + /// table. + /// + /// This needs to live here, rather than in `nexus-db-queries`, because it + /// names the `DbSledPolicy` type which is private to this crate. + pub trait ApplySledFilterExt { + type Output; + + /// Applies a [`SledFilter`] to a Diesel expression. + fn sled_filter(self, filter: SledFilter) -> Self::Output; + } + + impl ApplySledFilterExt for E + where + E: FilterDsl, + { + type Output = E::Output; + + fn sled_filter(self, filter: SledFilter) -> Self::Output { + use crate::schema::sled::dsl as sled_dsl; + + // These are only boxed for ease of reference above. + let all_matching_policies: BoxedIterator = Box::new( + SledPolicy::all_matching(filter).map(to_db_sled_policy), + ); + let all_matching_states: BoxedIterator = + Box::new(SledState::all_matching(filter).map(Into::into)); + + FilterDsl::filter( + self, + sled_dsl::sled_policy + .eq_any(all_matching_policies) + .and(sled_dsl::sled_state.eq_any(all_matching_states)), + ) + } + } + + type BoxedIterator = Box>; + type SledFilterQuery = And< + EqAny>, + EqAny>, + >; +} + +pub use diesel_util::ApplySledFilterExt; diff --git a/nexus/db-model/src/sled_instance.rs b/nexus/db-model/src/sled_instance.rs index e3a901264d..bbc92ddf18 100644 --- a/nexus/db-model/src/sled_instance.rs +++ b/nexus/db-model/src/sled_instance.rs @@ -41,3 +41,9 @@ impl From for views::SledInstance { } } } + +impl SledInstance { + pub fn instance_id(&self) -> Uuid { + self.identity.id + } +} diff --git a/nexus/db-model/src/sled_resource_kind.rs b/nexus/db-model/src/sled_resource_kind.rs index c17eb2e106..b9a59bdc30 100644 --- a/nexus/db-model/src/sled_resource_kind.rs +++ b/nexus/db-model/src/sled_resource_kind.rs @@ -15,8 +15,5 @@ impl_enum_type!( pub enum SledResourceKind; // Enum values - Dataset => b"dataset" - Service => b"service" Instance => b"instance" - Reserved => b"reserved" ); diff --git a/nexus/db-model/src/sled_underlay_subnet_allocation.rs b/nexus/db-model/src/sled_underlay_subnet_allocation.rs index 4da0bea669..3cb9579f1b 100644 --- a/nexus/db-model/src/sled_underlay_subnet_allocation.rs +++ b/nexus/db-model/src/sled_underlay_subnet_allocation.rs @@ -3,14 +3,16 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. use crate::schema::sled_underlay_subnet_allocation; +use crate::typed_uuid::DbTypedUuid; +use omicron_uuid_kinds::SledKind; use uuid::Uuid; /// Underlay allocation for a sled added to an initialized rack -#[derive(Queryable, Insertable, Debug, Clone, Selectable)] +#[derive(Queryable, Insertable, Debug, Clone, PartialEq, Eq, Selectable)] #[diesel(table_name = sled_underlay_subnet_allocation)] pub struct SledUnderlaySubnetAllocation { pub rack_id: Uuid, - pub sled_id: Uuid, + pub sled_id: DbTypedUuid, pub subnet_octet: i16, pub hw_baseboard_id: Uuid, } diff --git a/nexus/db-model/src/switch_port.rs b/nexus/db-model/src/switch_port.rs index 6ed918dae5..b10f6ba679 100644 --- a/nexus/db-model/src/switch_port.rs +++ b/nexus/db-model/src/switch_port.rs @@ -5,18 +5,22 @@ use crate::schema::{ lldp_config, lldp_service_config, switch_port, switch_port_settings, switch_port_settings_address_config, switch_port_settings_bgp_peer_config, + switch_port_settings_bgp_peer_config_allow_export, + switch_port_settings_bgp_peer_config_allow_import, + switch_port_settings_bgp_peer_config_communities, switch_port_settings_group, switch_port_settings_groups, switch_port_settings_interface_config, switch_port_settings_link_config, switch_port_settings_port_config, switch_port_settings_route_config, }; -use crate::SqlU16; use crate::{impl_enum_type, SqlU32}; +use crate::{SqlU16, SqlU8}; use db_macros::Resource; use diesel::AsChangeset; use ipnetwork::IpNetwork; use nexus_types::external_api::params; use nexus_types::identity::Resource; use omicron_common::api::external; +use omicron_common::api::external::{BgpPeer, ImportExportPolicy}; use omicron_common::api::internal::shared::{PortFec, PortSpeed}; use serde::{Deserialize, Serialize}; use uuid::Uuid; @@ -106,12 +110,22 @@ impl From for PortFec { } } -impl From for SwitchLinkFec { - fn from(value: params::LinkFec) -> Self { +impl From for SwitchLinkFec { + fn from(value: external::LinkFec) -> Self { match value { - params::LinkFec::Firecode => SwitchLinkFec::Firecode, - params::LinkFec::None => SwitchLinkFec::None, - params::LinkFec::Rs => SwitchLinkFec::Rs, + external::LinkFec::Firecode => SwitchLinkFec::Firecode, + external::LinkFec::None => SwitchLinkFec::None, + external::LinkFec::Rs => SwitchLinkFec::Rs, + } + } +} + +impl From for external::LinkFec { + fn from(value: SwitchLinkFec) -> Self { + match value { + SwitchLinkFec::Firecode => external::LinkFec::Firecode, + SwitchLinkFec::None => external::LinkFec::None, + SwitchLinkFec::Rs => external::LinkFec::Rs, } } } @@ -132,18 +146,34 @@ impl From for PortSpeed { } } -impl From for SwitchLinkSpeed { - fn from(value: params::LinkSpeed) -> Self { +impl From for SwitchLinkSpeed { + fn from(value: external::LinkSpeed) -> Self { + match value { + external::LinkSpeed::Speed0G => SwitchLinkSpeed::Speed0G, + external::LinkSpeed::Speed1G => SwitchLinkSpeed::Speed1G, + external::LinkSpeed::Speed10G => SwitchLinkSpeed::Speed10G, + external::LinkSpeed::Speed25G => SwitchLinkSpeed::Speed25G, + external::LinkSpeed::Speed40G => SwitchLinkSpeed::Speed40G, + external::LinkSpeed::Speed50G => SwitchLinkSpeed::Speed50G, + external::LinkSpeed::Speed100G => SwitchLinkSpeed::Speed100G, + external::LinkSpeed::Speed200G => SwitchLinkSpeed::Speed200G, + external::LinkSpeed::Speed400G => SwitchLinkSpeed::Speed400G, + } + } +} + +impl From for external::LinkSpeed { + fn from(value: SwitchLinkSpeed) -> Self { match value { - params::LinkSpeed::Speed0G => SwitchLinkSpeed::Speed0G, - params::LinkSpeed::Speed1G => SwitchLinkSpeed::Speed1G, - params::LinkSpeed::Speed10G => SwitchLinkSpeed::Speed10G, - params::LinkSpeed::Speed25G => SwitchLinkSpeed::Speed25G, - params::LinkSpeed::Speed40G => SwitchLinkSpeed::Speed40G, - params::LinkSpeed::Speed50G => SwitchLinkSpeed::Speed50G, - params::LinkSpeed::Speed100G => SwitchLinkSpeed::Speed100G, - params::LinkSpeed::Speed200G => SwitchLinkSpeed::Speed200G, - params::LinkSpeed::Speed400G => SwitchLinkSpeed::Speed400G, + SwitchLinkSpeed::Speed0G => external::LinkSpeed::Speed0G, + SwitchLinkSpeed::Speed1G => external::LinkSpeed::Speed1G, + SwitchLinkSpeed::Speed10G => external::LinkSpeed::Speed10G, + SwitchLinkSpeed::Speed25G => external::LinkSpeed::Speed25G, + SwitchLinkSpeed::Speed40G => external::LinkSpeed::Speed40G, + SwitchLinkSpeed::Speed50G => external::LinkSpeed::Speed50G, + SwitchLinkSpeed::Speed100G => external::LinkSpeed::Speed100G, + SwitchLinkSpeed::Speed200G => external::LinkSpeed::Speed200G, + SwitchLinkSpeed::Speed400G => external::LinkSpeed::Speed400G, } } } @@ -387,6 +417,9 @@ impl Into for SwitchPortLinkConfig { lldp_service_config_id: self.lldp_service_config_id, link_name: self.link_name.clone(), mtu: self.mtu.into(), + fec: self.fec.into(), + speed: self.speed.into(), + autoneg: self.autoneg, } } } @@ -568,6 +601,69 @@ pub struct SwitchPortBgpPeerConfig { pub delay_open: SqlU32, pub connect_retry: SqlU32, pub keepalive: SqlU32, + pub remote_asn: Option, + pub min_ttl: Option, + pub md5_auth_key: Option, + pub multi_exit_discriminator: Option, + pub local_pref: Option, + pub enforce_first_as: bool, + pub allow_import_list_active: bool, + pub allow_export_list_active: bool, + pub vlan_id: Option, +} + +#[derive( + Queryable, + Insertable, + Selectable, + Clone, + Debug, + Serialize, + Deserialize, + AsChangeset, +)] +#[diesel(table_name = switch_port_settings_bgp_peer_config_communities)] +pub struct SwitchPortBgpPeerConfigCommunity { + pub port_settings_id: Uuid, + pub interface_name: String, + pub addr: IpNetwork, + pub community: SqlU32, +} + +#[derive( + Queryable, + Insertable, + Selectable, + Clone, + Debug, + Serialize, + Deserialize, + AsChangeset, +)] +#[diesel(table_name = switch_port_settings_bgp_peer_config_allow_export)] +pub struct SwitchPortBgpPeerConfigAllowExport { + pub port_settings_id: Uuid, + pub interface_name: String, + pub addr: IpNetwork, + pub prefix: IpNetwork, +} + +#[derive( + Queryable, + Insertable, + Selectable, + Clone, + Debug, + Serialize, + Deserialize, + AsChangeset, +)] +#[diesel(table_name = switch_port_settings_bgp_peer_config_allow_import)] +pub struct SwitchPortBgpPeerConfigAllowImport { + pub port_settings_id: Uuid, + pub interface_name: String, + pub addr: IpNetwork, + pub prefix: IpNetwork, } impl SwitchPortBgpPeerConfig { @@ -576,34 +672,35 @@ impl SwitchPortBgpPeerConfig { port_settings_id: Uuid, bgp_config_id: Uuid, interface_name: String, - addr: IpNetwork, - hold_time: SqlU32, - idle_hold_time: SqlU32, - delay_open: SqlU32, - connect_retry: SqlU32, - keepalive: SqlU32, + p: &BgpPeer, ) -> Self { Self { port_settings_id, bgp_config_id, interface_name, - addr, - hold_time, - idle_hold_time, - delay_open, - connect_retry, - keepalive, - } - } -} - -impl Into for SwitchPortBgpPeerConfig { - fn into(self) -> external::SwitchPortBgpPeerConfig { - external::SwitchPortBgpPeerConfig { - port_settings_id: self.port_settings_id, - bgp_config_id: self.bgp_config_id, - interface_name: self.interface_name.clone(), - addr: self.addr.ip(), + addr: p.addr.into(), + hold_time: p.hold_time.into(), + idle_hold_time: p.delay_open.into(), + delay_open: p.connect_retry.into(), + connect_retry: p.connect_retry.into(), + keepalive: p.keepalive.into(), + remote_asn: p.remote_asn.map(|x| x.into()), + min_ttl: p.min_ttl.map(|x| x.into()), + md5_auth_key: p.md5_auth_key.clone(), + multi_exit_discriminator: p + .multi_exit_discriminator + .map(|x| x.into()), + local_pref: p.local_pref.map(|x| x.into()), + enforce_first_as: p.enforce_first_as, + allow_import_list_active: match &p.allowed_import { + ImportExportPolicy::NoFiltering => false, + _ => true, + }, + allow_export_list_active: match &p.allowed_export { + ImportExportPolicy::NoFiltering => false, + _ => true, + }, + vlan_id: p.vlan_id.map(|x| x.into()), } } } diff --git a/nexus/db-model/src/typed_uuid.rs b/nexus/db-model/src/typed_uuid.rs index 7785b8c7dc..1e54e242f3 100644 --- a/nexus/db-model/src/typed_uuid.rs +++ b/nexus/db-model/src/typed_uuid.rs @@ -10,6 +10,7 @@ use diesel::deserialize::{self, FromSql}; use diesel::serialize::{self, ToSql}; use diesel::sql_types; use omicron_uuid_kinds::{GenericUuid, TypedUuid, TypedUuidKind}; +use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use std::fmt; use std::str::FromStr; @@ -31,7 +32,7 @@ pub fn to_db_typed_uuid(id: TypedUuid) -> DbTypedUuid { /// `db-model` crate (this type is not exported at the top level). External /// users must use omicron-common's `TypedUuid`. #[derive_where(Clone, Copy, Eq, Ord, PartialEq, PartialOrd, Hash)] -#[derive(AsExpression, FromSqlRow, Serialize, Deserialize)] +#[derive(AsExpression, FromSqlRow, Serialize, Deserialize, JsonSchema)] #[diesel(sql_type = sql_types::Uuid)] #[serde(transparent, bound = "")] pub struct DbTypedUuid(pub(crate) TypedUuid); diff --git a/nexus/db-model/src/upstairs_repair.rs b/nexus/db-model/src/upstairs_repair.rs index 311592f8e4..ed281b6c64 100644 --- a/nexus/db-model/src/upstairs_repair.rs +++ b/nexus/db-model/src/upstairs_repair.rs @@ -106,6 +106,7 @@ pub struct UpstairsRepairNotification { pub upstairs_id: DbTypedUuid, pub session_id: DbTypedUuid, + // The Downstairs being repaired pub region_id: DbTypedUuid, pub target_ip: ipv6::Ipv6Addr, pub target_port: SqlU16, diff --git a/nexus/db-model/src/v2p_mapping.rs b/nexus/db-model/src/v2p_mapping.rs new file mode 100644 index 0000000000..43831f7503 --- /dev/null +++ b/nexus/db-model/src/v2p_mapping.rs @@ -0,0 +1,16 @@ +use crate::schema::v2p_mapping_view; +use crate::{MacAddr, Vni}; +use ipnetwork::IpNetwork; +use serde::{Deserialize, Serialize}; +use uuid::Uuid; + +#[derive(Queryable, Selectable, Clone, Debug, Serialize, Deserialize)] +#[diesel(table_name = v2p_mapping_view)] +pub struct V2PMappingView { + pub nic_id: Uuid, + pub sled_id: Uuid, + pub sled_ip: IpNetwork, + pub vni: Vni, + pub mac: MacAddr, + pub ip: IpNetwork, +} diff --git a/nexus/db-model/src/volume_repair.rs b/nexus/db-model/src/volume_repair.rs new file mode 100644 index 0000000000..a92fcd3425 --- /dev/null +++ b/nexus/db-model/src/volume_repair.rs @@ -0,0 +1,20 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use crate::schema::volume_repair; +use uuid::Uuid; + +/// When modifying a Volume by replacing its parts, Nexus should take care to +/// only replace one region or snapshot for a volume at a time. Technically, the +/// Upstairs can support two at a time, but codifying "only one at a time" is +/// safer, and does not allow the possiblity for a Nexus bug to replace all +/// three regions of a region set at a time (aka total data loss!). This "one at +/// a time" constraint is enforced by each repair also creating a VolumeRepair +/// record, a table for which there is a UNIQUE CONSTRAINT on the volume ID. +#[derive(Queryable, Insertable, Debug, Selectable, Clone)] +#[diesel(table_name = volume_repair)] +pub struct VolumeRepair { + pub volume_id: Uuid, + pub repair_id: Uuid, +} diff --git a/nexus/db-model/src/vpc.rs b/nexus/db-model/src/vpc.rs index 8a4dc0e349..88879a0436 100644 --- a/nexus/db-model/src/vpc.rs +++ b/nexus/db-model/src/vpc.rs @@ -14,6 +14,7 @@ use nexus_types::external_api::params; use nexus_types::external_api::views; use nexus_types::identity::Resource; use omicron_common::api::external; +use omicron_common::api::external::Ipv6NetExt; use serde::Deserialize; use serde::Serialize; use uuid::Uuid; @@ -83,22 +84,20 @@ impl IncompleteVpc { params: params::VpcCreate, ) -> Result { let identity = VpcIdentity::new(vpc_id, params.identity); - let ipv6_prefix = IpNetwork::from( - match params.ipv6_prefix { - None => defaults::random_vpc_ipv6_prefix(), - Some(prefix) => { - if prefix.is_vpc_prefix() { - Ok(prefix) - } else { - Err(external::Error::invalid_request( - "VPC IPv6 address prefixes must be in the \ + let ipv6_prefix = oxnet::IpNet::from(match params.ipv6_prefix { + None => defaults::random_vpc_ipv6_prefix(), + Some(prefix) => { + if prefix.is_vpc_prefix() { + Ok(prefix) + } else { + Err(external::Error::invalid_request( + "VPC IPv6 address prefixes must be in the \ Unique Local Address range `fd00::/48` (RFD 4193)", - )) - } + )) } - }? - .0, - ); + } + }?) + .into(); Ok(Self { identity, project_id, diff --git a/nexus/db-model/src/vpc_subnet.rs b/nexus/db-model/src/vpc_subnet.rs index 407c933ef2..f3c90a908e 100644 --- a/nexus/db-model/src/vpc_subnet.rs +++ b/nexus/db-model/src/vpc_subnet.rs @@ -50,8 +50,8 @@ impl VpcSubnet { subnet_id: Uuid, vpc_id: Uuid, identity: external::IdentityMetadataCreateParams, - ipv4_block: external::Ipv4Net, - ipv6_block: external::Ipv6Net, + ipv4_block: oxnet::Ipv4Net, + ipv6_block: oxnet::Ipv6Net, ) -> Self { let identity = VpcSubnetIdentity::new(subnet_id, identity); Self { diff --git a/nexus/db-queries/Cargo.toml b/nexus/db-queries/Cargo.toml index 5f99b904fc..135f2fcdf7 100644 --- a/nexus/db-queries/Cargo.toml +++ b/nexus/db-queries/Cargo.toml @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [build-dependencies] omicron-rpaths.workspace = true @@ -30,6 +33,7 @@ newtype_derive.workspace = true once_cell.workspace = true openssl.workspace = true oso.workspace = true +oxnet.workspace = true paste.workspace = true # See omicron-rpaths for more about the "pq-sys" dependency. pq-sys = "*" @@ -65,6 +69,13 @@ omicron-uuid-kinds.workspace = true oximeter.workspace = true omicron-workspace-hack.workspace = true +# only enabled during tests or via the `testing` feature +omicron-test-utils = { workspace = true, optional = true } + +[features] +# Enable to export `datastore_test` +testing = ["omicron-test-utils"] + [dev-dependencies] assert_matches.workspace = true camino-tempfile.workspace = true diff --git a/nexus/db-queries/src/authn/external/mod.rs b/nexus/db-queries/src/authn/external/mod.rs index 051db35ebf..623544d38c 100644 --- a/nexus/db-queries/src/authn/external/mod.rs +++ b/nexus/db-queries/src/authn/external/mod.rs @@ -94,7 +94,7 @@ where }), Err(source) => Err(authn::Error { reason: Reason::LoadSiloAuthnPolicy { source }, - schemes_tried: schemes_tried, + schemes_tried, }), }; } diff --git a/nexus/db-queries/src/authn/silos.rs b/nexus/db-queries/src/authn/silos.rs index ff1ae71133..fc5068fc3c 100644 --- a/nexus/db-queries/src/authn/silos.rs +++ b/nexus/db-queries/src/authn/silos.rs @@ -413,7 +413,7 @@ impl SamlIdentityProvider { group.trim().to_string(); // Skip empty groups - if group.len() == 0 { + if group.is_empty() { continue; } diff --git a/nexus/db-queries/src/authz/api_resources.rs b/nexus/db-queries/src/authz/api_resources.rs index 70bc9ab2eb..69b883a8cf 100644 --- a/nexus/db-queries/src/authz/api_resources.rs +++ b/nexus/db-queries/src/authz/api_resources.rs @@ -1060,7 +1060,7 @@ authz_resource! { authz_resource! { name = "PhysicalDisk", parent = "Fleet", - primary_key = (String, String, String), + primary_key = Uuid, roles_allowed = false, polar_snippet = FleetChild, } diff --git a/nexus/db-queries/src/authz/policy_test/resources.rs b/nexus/db-queries/src/authz/policy_test/resources.rs index 96cefb3db4..bc30e77fac 100644 --- a/nexus/db-queries/src/authz/policy_test/resources.rs +++ b/nexus/db-queries/src/authz/policy_test/resources.rs @@ -102,10 +102,12 @@ pub async fn make_resources( make_services(&mut builder).await; + let physical_disk_id = + "c9f923f6-caf3-4c83-96f9-8ffe8c627dd2".parse().unwrap(); builder.new_resource(authz::PhysicalDisk::new( authz::FLEET, - ("vendor".to_string(), "serial".to_string(), "model".to_string()), - LookupType::ByCompositeId("vendor-serial-model".to_string()), + physical_disk_id, + LookupType::ById(physical_disk_id), )); let device_user_code = String::from("a-device-user-code"); diff --git a/nexus/db-queries/src/db/collection_insert.rs b/nexus/db-queries/src/db/collection_insert.rs index ef2a4a4d48..69906e6498 100644 --- a/nexus/db-queries/src/db/collection_insert.rs +++ b/nexus/db-queries/src/db/collection_insert.rs @@ -409,7 +409,7 @@ mod test { use async_bb8_diesel::{ AsyncRunQueryDsl, AsyncSimpleConnection, ConnectionManager, }; - use chrono::{NaiveDateTime, TimeZone, Utc}; + use chrono::{DateTime, Utc}; use db_macros::Resource; use diesel::expression_methods::ExpressionMethods; use diesel::pg::Pg; @@ -498,12 +498,8 @@ mod test { let resource_id = uuid::Uuid::parse_str("223cb7f7-0d3a-4a4e-a5e1-ad38ecb785d8") .unwrap(); - let create_time = Utc.from_utc_datetime( - &NaiveDateTime::from_timestamp_opt(0, 0).unwrap(), - ); - let modify_time = Utc.from_utc_datetime( - &NaiveDateTime::from_timestamp_opt(1, 0).unwrap(), - ); + let create_time = DateTime::from_timestamp(0, 0).unwrap(); + let modify_time = DateTime::from_timestamp(1, 0).unwrap(); let insert = Collection::insert_resource( collection_id, diesel::insert_into(resource::table).values(vec![( @@ -615,12 +611,8 @@ mod test { .await .unwrap(); - let create_time = Utc.from_utc_datetime( - &NaiveDateTime::from_timestamp_opt(0, 0).unwrap(), - ); - let modify_time = Utc.from_utc_datetime( - &NaiveDateTime::from_timestamp_opt(1, 0).unwrap(), - ); + let create_time = DateTime::from_timestamp(0, 0).unwrap(); + let modify_time = DateTime::from_timestamp(1, 0).unwrap(); let resource = Collection::insert_resource( collection_id, diesel::insert_into(resource::table).values(vec![( diff --git a/nexus/db-queries/src/db/datastore/address_lot.rs b/nexus/db-queries/src/db/datastore/address_lot.rs index 9c75c6fd1b..459c2a4c36 100644 --- a/nexus/db-queries/src/db/datastore/address_lot.rs +++ b/nexus/db-queries/src/db/datastore/address_lot.rs @@ -384,7 +384,7 @@ pub(crate) async fn try_reserve_block( address_lot_id: lot_id, first_address: inet, last_address: inet, - anycast: anycast, + anycast, }; diesel::insert_into(rsvd_block_dsl::address_lot_rsvd_block) diff --git a/nexus/db-queries/src/db/datastore/allow_list.rs b/nexus/db-queries/src/db/datastore/allow_list.rs new file mode 100644 index 0000000000..111ccad08f --- /dev/null +++ b/nexus/db-queries/src/db/datastore/allow_list.rs @@ -0,0 +1,209 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Datastore methods for operating on source IP allowlists. + +use crate::authz; +use crate::context::OpContext; +use crate::db::error::public_error_from_diesel; +use crate::db::error::ErrorHandler; +use crate::db::fixed_data::allow_list::USER_FACING_SERVICES_ALLOW_LIST_ID; +use crate::db::DbConnection; +use async_bb8_diesel::AsyncRunQueryDsl; +use diesel::ExpressionMethods; +use diesel::QueryDsl; +use diesel::SelectableHelper; +use nexus_db_model::schema::allow_list; +use nexus_db_model::AllowList; +use omicron_common::api::external::AllowedSourceIps; +use omicron_common::api::external::Error; +use omicron_common::api::external::LookupType; +use omicron_common::api::external::ResourceType; + +impl super::DataStore { + /// Fetch the list of allowed source IPs. + /// + /// This is currently effectively a singleton, since it is populated by RSS + /// and we only provide APIs in Nexus to update it, not create / delete. + pub async fn allow_list_view( + &self, + opctx: &OpContext, + ) -> Result { + opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; + let conn = self.pool_connection_authorized(opctx).await?; + allow_list::dsl::allow_list + .find(USER_FACING_SERVICES_ALLOW_LIST_ID) + .first_async::(&*conn) + .await + .map_err(|e| { + public_error_from_diesel( + e, + ErrorHandler::NotFoundByLookup( + ResourceType::AllowList, + LookupType::ById(USER_FACING_SERVICES_ALLOW_LIST_ID), + ), + ) + }) + } + + /// Upsert and return a list of allowed source IPs. + pub async fn allow_list_upsert( + &self, + opctx: &OpContext, + allowed_ips: AllowedSourceIps, + ) -> Result { + let conn = self.pool_connection_authorized(opctx).await?; + Self::allow_list_upsert_on_connection(opctx, &conn, allowed_ips).await + } + + pub(crate) async fn allow_list_upsert_on_connection( + opctx: &OpContext, + conn: &async_bb8_diesel::Connection, + allowed_ips: AllowedSourceIps, + ) -> Result { + use allow_list::dsl; + opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; + let record = + AllowList::new(USER_FACING_SERVICES_ALLOW_LIST_ID, allowed_ips); + diesel::insert_into(dsl::allow_list) + .values(record.clone()) + .returning(AllowList::as_returning()) + .on_conflict(dsl::id) + .do_update() + .set(( + dsl::allowed_ips.eq(record.allowed_ips), + dsl::time_modified.eq(record.time_modified), + )) + .get_result_async(conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } +} + +#[cfg(test)] +mod tests { + use crate::db::{ + datastore::test_utils::datastore_test, + fixed_data::allow_list::USER_FACING_SERVICES_ALLOW_LIST_ID, + }; + use nexus_test_utils::db::test_setup_database; + use omicron_common::api::external; + use omicron_test_utils::dev; + + #[tokio::test] + async fn test_allowed_source_ip_database_ops() { + let logctx = dev::test_setup_log("test_allowed_source_ip_database_ops"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + // Should have the default to start with. + let record = datastore + .allow_list_view(&opctx) + .await + .expect("Expected default data populated in dbinit.sql"); + assert!( + record.allowed_ips.is_none(), + "Expected default ANY allowlist, represented as NULL in the DB" + ); + + // Upsert an allowlist, with some specific IPs. + let ips = + vec!["10.0.0.0/8".parse().unwrap(), "::1/64".parse().unwrap()]; + let allowed_ips = + external::AllowedSourceIps::try_from(ips.as_slice()).unwrap(); + let record = datastore + .allow_list_upsert(&opctx, allowed_ips) + .await + .expect("Expected this insert to succeed"); + assert_eq!( + record.id, USER_FACING_SERVICES_ALLOW_LIST_ID, + "Record should have hard-coded allowlist ID" + ); + assert_eq!( + ips, + record.allowed_ips.unwrap(), + "Inserted and re-read incorrect allowed source ips" + ); + + // Upsert a new list, verify it's changed. + let new_ips = + vec!["10.0.0.0/4".parse().unwrap(), "fd00::10/32".parse().unwrap()]; + let allowed_ips = + external::AllowedSourceIps::try_from(new_ips.as_slice()).unwrap(); + let new_record = datastore + .allow_list_upsert(&opctx, allowed_ips) + .await + .expect("Expected this insert to succeed"); + assert_eq!( + new_record.id, USER_FACING_SERVICES_ALLOW_LIST_ID, + "Record should have hard-coded allowlist ID" + ); + assert_eq!( + record.time_created, new_record.time_created, + "Time created should not have changed" + ); + assert!( + record.time_modified < new_record.time_modified, + "Time modified should have changed" + ); + assert_eq!( + &new_ips, + new_record.allowed_ips.as_ref().unwrap(), + "Updated allowed IPs are incorrect" + ); + + // Insert an allowlist letting anything in, and check it. + let record = new_record; + let allowed_ips = external::AllowedSourceIps::Any; + let new_record = datastore + .allow_list_upsert(&opctx, allowed_ips) + .await + .expect("Expected this insert to succeed"); + assert_eq!( + new_record.id, USER_FACING_SERVICES_ALLOW_LIST_ID, + "Record should have hard-coded allowlist ID" + ); + assert_eq!( + record.time_created, new_record.time_created, + "Time created should not have changed" + ); + assert!( + record.time_modified < new_record.time_modified, + "Time modified should have changed" + ); + assert!( + new_record.allowed_ips.is_none(), + "NULL should be used in the DB to represent any allowed source IPs", + ); + + // Lastly change it back to a real list again. + let record = new_record; + let allowed_ips = + external::AllowedSourceIps::try_from(new_ips.as_slice()).unwrap(); + let new_record = datastore + .allow_list_upsert(&opctx, allowed_ips) + .await + .expect("Expected this insert to succeed"); + assert_eq!( + new_record.id, USER_FACING_SERVICES_ALLOW_LIST_ID, + "Record should have hard-coded allowlist ID" + ); + assert_eq!( + record.time_created, new_record.time_created, + "Time created should not have changed" + ); + assert!( + record.time_modified < new_record.time_modified, + "Time modified should have changed" + ); + assert_eq!( + &new_ips, + new_record.allowed_ips.as_ref().unwrap(), + "Updated allowed IPs are incorrect" + ); + + db.cleanup().await.expect("failed to cleanup database"); + logctx.cleanup_successful(); + } +} diff --git a/nexus/db-queries/src/db/datastore/bgp.rs b/nexus/db-queries/src/db/datastore/bgp.rs index bc103ecff9..feb41443b2 100644 --- a/nexus/db-queries/src/db/datastore/bgp.rs +++ b/nexus/db-queries/src/db/datastore/bgp.rs @@ -13,7 +13,10 @@ use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; use diesel::{ExpressionMethods, QueryDsl, SelectableHelper}; use ipnetwork::IpNetwork; -use nexus_db_model::BgpPeerView; +use nexus_db_model::{ + BgpPeerView, SwitchPortBgpPeerConfigAllowExport, + SwitchPortBgpPeerConfigAllowImport, SwitchPortBgpPeerConfigCommunity, +}; use nexus_types::external_api::params; use nexus_types::identity::Resource; use omicron_common::api::external::http_pagination::PaginatedBy; @@ -492,4 +495,114 @@ impl DataStore { Ok(results) } + + pub async fn communities_for_peer( + &self, + opctx: &OpContext, + port_settings_id: Uuid, + interface_name: &String, + addr: IpNetwork, + ) -> ListResultVec { + use db::schema::switch_port_settings_bgp_peer_config_communities::dsl; + + let results = dsl::switch_port_settings_bgp_peer_config_communities + .filter(dsl::port_settings_id.eq(port_settings_id)) + .filter(dsl::interface_name.eq(interface_name.clone())) + .filter(dsl::addr.eq(addr)) + .load_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + Ok(results) + } + + pub async fn allow_export_for_peer( + &self, + opctx: &OpContext, + port_settings_id: Uuid, + interface_name: &String, + addr: IpNetwork, + ) -> LookupResult>> { + use db::schema::switch_port_settings_bgp_peer_config as db_peer; + use db::schema::switch_port_settings_bgp_peer_config::dsl as peer_dsl; + use db::schema::switch_port_settings_bgp_peer_config_allow_export as db_allow; + use db::schema::switch_port_settings_bgp_peer_config_allow_export::dsl; + + let conn = self.pool_connection_authorized(opctx).await?; + let result = self + .transaction_retry_wrapper("bgp_allow_export_for_peer") + .transaction(&conn, |conn| async move { + let active = peer_dsl::switch_port_settings_bgp_peer_config + .filter(db_peer::port_settings_id.eq(port_settings_id)) + .select(db_peer::allow_export_list_active) + .limit(1) + .first_async::(&conn) + .await?; + + if !active { + return Ok(None); + } + + let list = + dsl::switch_port_settings_bgp_peer_config_allow_export + .filter(db_allow::port_settings_id.eq(port_settings_id)) + .filter( + db_allow::interface_name.eq(interface_name.clone()), + ) + .filter(db_allow::addr.eq(addr)) + .load_async(&conn) + .await?; + + Ok(Some(list)) + }) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + Ok(result) + } + + pub async fn allow_import_for_peer( + &self, + opctx: &OpContext, + port_settings_id: Uuid, + interface_name: &String, + addr: IpNetwork, + ) -> LookupResult>> { + use db::schema::switch_port_settings_bgp_peer_config as db_peer; + use db::schema::switch_port_settings_bgp_peer_config::dsl as peer_dsl; + use db::schema::switch_port_settings_bgp_peer_config_allow_import as db_allow; + use db::schema::switch_port_settings_bgp_peer_config_allow_import::dsl; + + let conn = self.pool_connection_authorized(opctx).await?; + let result = self + .transaction_retry_wrapper("bgp_allow_export_for_peer") + .transaction(&conn, |conn| async move { + let active = peer_dsl::switch_port_settings_bgp_peer_config + .filter(db_peer::port_settings_id.eq(port_settings_id)) + .select(db_peer::allow_import_list_active) + .limit(1) + .first_async::(&conn) + .await?; + + if !active { + return Ok(None); + } + + let list = + dsl::switch_port_settings_bgp_peer_config_allow_import + .filter(db_allow::port_settings_id.eq(port_settings_id)) + .filter( + db_allow::interface_name.eq(interface_name.clone()), + ) + .filter(db_allow::addr.eq(addr)) + .load_async(&conn) + .await?; + + Ok(Some(list)) + }) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + Ok(result) + } } diff --git a/nexus/db-queries/src/db/datastore/cockroachdb_settings.rs b/nexus/db-queries/src/db/datastore/cockroachdb_settings.rs new file mode 100644 index 0000000000..177cf673e7 --- /dev/null +++ b/nexus/db-queries/src/db/datastore/cockroachdb_settings.rs @@ -0,0 +1,229 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Datastore methods involving CockroachDB settings, which are managed by the +//! Reconfigurator. + +use super::DataStore; +use crate::context::OpContext; +use crate::db::error::public_error_from_diesel; +use crate::db::error::ErrorHandler; +use crate::db::raw_query_builder::QueryBuilder; +use async_bb8_diesel::AsyncRunQueryDsl; +use diesel::deserialize::Queryable; +use diesel::sql_types; +use nexus_types::deployment::CockroachDbSettings; +use omicron_common::api::external::Error; +use omicron_common::api::external::LookupResult; + +/// This bit of SQL calculates a "state fingerprint" for the CockroachDB +/// cluster. `DataStore::cockroachdb_settings` calculates the fingerprint and +/// returns it to the caller. `DataStore::cockroach_setting_set_*` requires the +/// caller send the fingerprint, and it verifies it against the current state of +/// the cluster. +/// +/// This is done to help prevent TOCTOU-class bugs that arise from blueprint +/// planning taking place before blueprint execution. Here are the ones we're +/// aware of, which guide the contents of this fingerprint: +/// +/// - If the cluster version has changed, we are probably in the middle of +/// an upgrade. We should not be setting any settings and should re-plan. +/// (`crdb_internal.active_version()`) +/// - If the major version of CockroachDB has changed, we should not trust +/// the blueprint's value for the `cluster.preserve_downgrade_option` +/// setting; if set to an empty string and we've just upgraded the software +/// to the next major version, this will result in unwanted finalization. +/// (`crdb_internal.node_executable_version()`) +/// +/// Because these are run as part of a gadget that allows CockroachDB to verify +/// the fingerprint during a `SET CLUSTER SETTING` statement, which cannot +/// be run as part of a multi-transaction statement or CTE, we are limited to +/// values that can be returned from built-in functions and operators. +/// +/// This fingerprint should return a STRING value. It is safe to modify how this +/// fingerprint is calculated between Nexus releases; the stale fingerprint in +/// the previous blueprint will be rejected. +const STATE_FINGERPRINT_SQL: &str = r#" + encode(digest( + crdb_internal.active_version() + || crdb_internal.node_executable_version() + , 'sha1'), 'hex') +"#; + +impl DataStore { + /// Get the current CockroachDB settings. + pub async fn cockroachdb_settings( + &self, + opctx: &OpContext, + ) -> LookupResult { + #[derive(Debug, Queryable)] + struct QueryOutput { + state_fingerprint: String, + version: String, + preserve_downgrade: String, + } + type QueryRow = (sql_types::Text, sql_types::Text, sql_types::Text); + + let conn = self.pool_connection_authorized(opctx).await?; + let output: QueryOutput = QueryBuilder::new() + .sql("SELECT ") + .sql(STATE_FINGERPRINT_SQL) + .sql(", * FROM ") + .sql("[SHOW CLUSTER SETTING version], ") + .sql("[SHOW CLUSTER SETTING cluster.preserve_downgrade_option]") + .query::() + .get_result_async(&*conn) + .await + .map_err(|err| { + public_error_from_diesel(err, ErrorHandler::Server) + })?; + Ok(CockroachDbSettings { + state_fingerprint: output.state_fingerprint, + version: output.version, + preserve_downgrade: output.preserve_downgrade, + }) + } + + /// Set a CockroachDB setting with a `String` value. + /// + /// This cannot be run in a multi-statement transaction. + pub async fn cockroachdb_setting_set_string( + &self, + opctx: &OpContext, + state_fingerprint: String, + setting: &'static str, + value: String, + ) -> Result<(), Error> { + let conn = self.pool_connection_authorized(opctx).await?; + QueryBuilder::new() + .sql("SET CLUSTER SETTING ") + .sql(setting) + // `CASE` is the one conditional statement we get out of the + // CockroachDB grammar for `SET CLUSTER SETTING`. + .sql(" = CASE ") + .sql(STATE_FINGERPRINT_SQL) + .sql(" = ") + .param() + .sql(" WHEN TRUE THEN ") + .param() + // This is the gadget that allows us to reject changing a setting + // if the fingerprint doesn't match. CockroachDB settings are typed, + // but none of them are nullable, and NULL cannot be coerced into + // any of them, so this branch returns an error if it's hit (tested + // below in `test_state_fingerprint`). + .sql(" ELSE NULL END") + .bind::(state_fingerprint) + .bind::(value.clone()) + .query::<()>() + .execute_async(&*conn) + .await + .map_err(|err| { + public_error_from_diesel(err, ErrorHandler::Server) + })?; + info!( + opctx.log, + "set cockroachdb setting"; + "setting" => setting, + "value" => &value, + ); + Ok(()) + } +} + +#[cfg(test)] +mod test { + use super::{CockroachDbSettings, OpContext}; + use nexus_test_utils::db::test_setup_database; + use nexus_types::deployment::CockroachDbClusterVersion; + use omicron_common::api::external::Error; + use omicron_test_utils::dev; + use std::sync::Arc; + + #[tokio::test] + async fn test_preserve_downgrade() { + let logctx = dev::test_setup_log("test_preserve_downgrade"); + let mut db = test_setup_database(&logctx.log).await; + let (_, datastore) = + crate::db::datastore::test_utils::datastore_test(&logctx, &db) + .await; + let opctx = + OpContext::for_tests(logctx.log.new(o!()), Arc::clone(&datastore)); + + let settings = datastore.cockroachdb_settings(&opctx).await.unwrap(); + // With a fresh cluster, this is the expected state + let version = CockroachDbClusterVersion::NEWLY_INITIALIZED.to_string(); + assert_eq!(settings.version, version); + assert_eq!(settings.preserve_downgrade, ""); + + // Verify that if a fingerprint is wrong, we get the expected SQL error + // back. + let Err(Error::InternalError { internal_message }) = datastore + .cockroachdb_setting_set_string( + &opctx, + String::new(), + "cluster.preserve_downgrade_option", + version.clone(), + ) + .await + else { + panic!("should have returned an internal error"); + }; + assert_eq!( + internal_message, + "unexpected database error: \ + cannot use unknown tree.dNull value for string setting" + ); + // And ensure that the state didn't change. + assert_eq!( + settings, + datastore.cockroachdb_settings(&opctx).await.unwrap() + ); + + // Test setting it (twice, to verify doing it again doesn't trigger + // an error) + for _ in 0..2 { + datastore + .cockroachdb_setting_set_string( + &opctx, + settings.state_fingerprint.clone(), + "cluster.preserve_downgrade_option", + version.clone(), + ) + .await + .unwrap(); + assert_eq!( + datastore.cockroachdb_settings(&opctx).await.unwrap(), + CockroachDbSettings { + state_fingerprint: settings.state_fingerprint.clone(), + version: version.clone(), + preserve_downgrade: version.clone(), + } + ); + } + + // Test resetting it (twice, same reason) + for _ in 0..2 { + datastore + .cockroachdb_setting_set_string( + &opctx, + settings.state_fingerprint.clone(), + "cluster.preserve_downgrade_option", + String::new(), + ) + .await + .unwrap(); + assert_eq!( + datastore.cockroachdb_settings(&opctx).await.unwrap(), + CockroachDbSettings { + state_fingerprint: settings.state_fingerprint.clone(), + version: version.clone(), + preserve_downgrade: String::new(), + } + ); + } + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } +} diff --git a/nexus/db-queries/src/db/datastore/dataset.rs b/nexus/db-queries/src/db/datastore/dataset.rs index 292f13354f..3617f6d7fc 100644 --- a/nexus/db-queries/src/db/datastore/dataset.rs +++ b/nexus/db-queries/src/db/datastore/dataset.rs @@ -230,7 +230,10 @@ mod test { // Create a fake zpool that backs our fake datasets. let zpool_id = Uuid::new_v4(); let zpool = Zpool::new(zpool_id, sled_id, Uuid::new_v4()); - datastore.zpool_upsert(zpool).await.expect("failed to upsert zpool"); + datastore + .zpool_insert(opctx, zpool) + .await + .expect("failed to upsert zpool"); // Inserting a new dataset should succeed. let dataset1 = datastore diff --git a/nexus/db-queries/src/db/datastore/deployment.rs b/nexus/db-queries/src/db/datastore/deployment.rs index 02645ca4f6..790dc0d72c 100644 --- a/nexus/db-queries/src/db/datastore/deployment.rs +++ b/nexus/db-queries/src/db/datastore/deployment.rs @@ -35,25 +35,29 @@ use diesel::OptionalExtension; use diesel::QueryDsl; use diesel::RunQueryDsl; use nexus_db_model::Blueprint as DbBlueprint; +use nexus_db_model::BpOmicronPhysicalDisk; use nexus_db_model::BpOmicronZone; use nexus_db_model::BpOmicronZoneNic; -use nexus_db_model::BpOmicronZoneNotInService; +use nexus_db_model::BpSledOmicronPhysicalDisks; use nexus_db_model::BpSledOmicronZones; +use nexus_db_model::BpSledState; use nexus_db_model::BpTarget; use nexus_types::deployment::Blueprint; use nexus_types::deployment::BlueprintMetadata; +use nexus_types::deployment::BlueprintPhysicalDisksConfig; use nexus_types::deployment::BlueprintTarget; -use nexus_types::deployment::BlueprintZoneDisposition; -use nexus_types::deployment::BlueprintZoneFilter; use nexus_types::deployment::BlueprintZonesConfig; +use nexus_types::deployment::CockroachDbPreserveDowngrade; +use nexus_types::external_api::views::SledState; use omicron_common::api::external::DataPageParams; use omicron_common::api::external::Error; use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupType; use omicron_common::api::external::ResourceType; use omicron_common::bail_unless; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::SledUuid; use std::collections::BTreeMap; -use std::collections::BTreeSet; use uuid::Uuid; impl DataStore { @@ -108,36 +112,40 @@ impl DataStore { let row_blueprint = DbBlueprint::from(blueprint); let blueprint_id = row_blueprint.id; - // `Blueprint` stores the policy for each zone next to the zone itself. - // This would ideally be represented as a simple column in - // bp_omicron_zone. - // - // But historically, `Blueprint` used to store the set of zones in - // service in a BTreeSet. Since most zones are expected to be in - // service, we store the set of zones NOT in service (which we expect - // to be much smaller, often empty). Build that inverted set here. - // - // This will soon be replaced with an extra column in the - // `bp_omicron_zone` table, coupled with other data migrations. - let omicron_zones_not_in_service = blueprint - .all_blueprint_zones(BlueprintZoneFilter::All) - .filter_map(|(_, zone)| { - // This is going to go away soon when we change the database - // representation to store the zone disposition enum next to - // each zone. For now, do an exhaustive match so that this - // fails if we add a new variant. - match zone.disposition { - BlueprintZoneDisposition::InService => None, - BlueprintZoneDisposition::Quiesced => { - Some(BpOmicronZoneNotInService { - blueprint_id, - bp_omicron_zone_id: zone.config.id, - }) - } - } + let sled_states = blueprint + .sled_state + .iter() + .map(|(&sled_id, &state)| BpSledState { + blueprint_id, + sled_id: sled_id.into(), + sled_state: state.into(), }) .collect::>(); + let sled_omicron_physical_disks = blueprint + .blueprint_disks + .iter() + .map(|(sled_id, disks_config)| { + BpSledOmicronPhysicalDisks::new( + blueprint_id, + sled_id.into_untyped_uuid(), + disks_config, + ) + }) + .collect::>(); + let omicron_physical_disks = blueprint + .blueprint_disks + .iter() + .flat_map(|(sled_id, disks_config)| { + disks_config.disks.iter().map(move |disk| { + BpOmicronPhysicalDisk::new( + blueprint_id, + sled_id.into_untyped_uuid(), + disk, + ) + }) + }) + .collect::>(); let sled_omicron_zones = blueprint .blueprint_zones .iter() @@ -161,7 +169,7 @@ impl DataStore { .flat_map(|zones_config| { zones_config.zones.iter().filter_map(|zone| { BpOmicronZoneNic::new(blueprint_id, zone) - .with_context(|| format!("zone {:?}", zone.config.id)) + .with_context(|| format!("zone {}", zone.id)) .map_err(|e| Error::internal_error(&format!("{:#}", e))) .transpose() }) @@ -192,6 +200,34 @@ impl DataStore { .await?; } + // Insert all the sled states for this blueprint. + { + use db::schema::bp_sled_state::dsl as sled_state; + + let _ = diesel::insert_into(sled_state::bp_sled_state) + .values(sled_states) + .execute_async(&conn) + .await?; + } + + // Insert all physical disks for this blueprint. + + { + use db::schema::bp_sled_omicron_physical_disks::dsl as sled_disks; + let _ = diesel::insert_into(sled_disks::bp_sled_omicron_physical_disks) + .values(sled_omicron_physical_disks) + .execute_async(&conn) + .await?; + } + + { + use db::schema::bp_omicron_physical_disk::dsl as omicron_disk; + let _ = diesel::insert_into(omicron_disk::bp_omicron_physical_disk) + .values(omicron_physical_disks) + .execute_async(&conn) + .await?; + } + // Insert all the Omicron zones for this blueprint. { use db::schema::bp_sled_omicron_zones::dsl as sled_zones; @@ -218,15 +254,6 @@ impl DataStore { .await?; } - { - use db::schema::bp_omicron_zones_not_in_service::dsl; - let _ = - diesel::insert_into(dsl::bp_omicron_zones_not_in_service) - .values(omicron_zones_not_in_service) - .execute_async(&conn) - .await?; - } - Ok(()) }) .await @@ -257,6 +284,8 @@ impl DataStore { parent_blueprint_id, internal_dns_version, external_dns_version, + cockroachdb_fingerprint, + cockroachdb_setting_preserve_downgrade, time_created, creator, comment, @@ -280,17 +309,64 @@ impl DataStore { blueprint.parent_blueprint_id, *blueprint.internal_dns_version, *blueprint.external_dns_version, + blueprint.cockroachdb_fingerprint, + blueprint.cockroachdb_setting_preserve_downgrade, blueprint.time_created, blueprint.creator, blueprint.comment, ) }; + let cockroachdb_setting_preserve_downgrade = + CockroachDbPreserveDowngrade::from_optional_string( + &cockroachdb_setting_preserve_downgrade, + ) + .map_err(|_| { + Error::internal_error(&format!( + "unrecognized cluster version {:?}", + cockroachdb_setting_preserve_downgrade + )) + })?; + + // Load the sled states for this blueprint. + let sled_state: BTreeMap = { + use db::schema::bp_sled_state::dsl; + + let mut sled_state = BTreeMap::new(); + let mut paginator = Paginator::new(SQL_BATCH_SIZE); + while let Some(p) = paginator.next() { + let batch = paginated( + dsl::bp_sled_state, + dsl::sled_id, + &p.current_pagparams(), + ) + .filter(dsl::blueprint_id.eq(blueprint_id)) + .select(BpSledState::as_select()) + .load_async(&*conn) + .await + .map_err(|e| { + public_error_from_diesel(e, ErrorHandler::Server) + })?; + + paginator = p.found_batch(&batch, &|s| s.sled_id); + + for s in batch { + let old = sled_state + .insert(s.sled_id.into(), s.sled_state.into()); + bail_unless!( + old.is_none(), + "found duplicate sled ID in bp_sled_state: {}", + s.sled_id + ); + } + } + sled_state + }; // Read this blueprint's `bp_sled_omicron_zones` rows, which describes // the `OmicronZonesConfig` generation number for each sled that is a // part of this blueprint. Construct the BTreeMap we ultimately need, // but all the `zones` vecs will be empty until our next query below. - let mut blueprint_zones: BTreeMap = { + let mut blueprint_zones: BTreeMap = { use db::schema::bp_sled_omicron_zones::dsl; let mut blueprint_zones = BTreeMap::new(); @@ -313,7 +389,7 @@ impl DataStore { for s in batch { let old = blueprint_zones.insert( - s.sled_id, + s.sled_id.into(), BlueprintZonesConfig { generation: *s.generation, zones: Vec::new(), @@ -330,82 +406,87 @@ impl DataStore { blueprint_zones }; - // Assemble a mutable map of all the NICs found, by NIC id. As we - // match these up with the corresponding zone below, we'll remove items - // from this set. That way we can tell if the same NIC was used twice - // or not used at all. - let mut omicron_zone_nics = { - use db::schema::bp_omicron_zone_nic::dsl; + // Do the same thing we just did for zones, but for physical disks too. + let mut blueprint_disks: BTreeMap< + SledUuid, + BlueprintPhysicalDisksConfig, + > = { + use db::schema::bp_sled_omicron_physical_disks::dsl; - let mut omicron_zone_nics = BTreeMap::new(); + let mut blueprint_physical_disks = BTreeMap::new(); let mut paginator = Paginator::new(SQL_BATCH_SIZE); while let Some(p) = paginator.next() { let batch = paginated( - dsl::bp_omicron_zone_nic, - dsl::id, + dsl::bp_sled_omicron_physical_disks, + dsl::sled_id, &p.current_pagparams(), ) .filter(dsl::blueprint_id.eq(blueprint_id)) - .select(BpOmicronZoneNic::as_select()) + .select(BpSledOmicronPhysicalDisks::as_select()) .load_async(&*conn) .await .map_err(|e| { public_error_from_diesel(e, ErrorHandler::Server) })?; - paginator = p.found_batch(&batch, &|n| n.id); + paginator = p.found_batch(&batch, &|s| s.sled_id); - for n in batch { - let nic_id = n.id; - let old = omicron_zone_nics.insert(nic_id, n); + for s in batch { + let old = blueprint_physical_disks.insert( + SledUuid::from_untyped_uuid(s.sled_id), + BlueprintPhysicalDisksConfig { + generation: *s.generation, + disks: Vec::new(), + }, + ); bail_unless!( old.is_none(), - "found duplicate NIC ID in bp_omicron_zone_nic: {}", - nic_id, + "found duplicate sled ID in bp_sled_omicron_physical_disks: {}", + s.sled_id ); } } - omicron_zone_nics + blueprint_physical_disks }; - // Load the list of not-in-service zones. Similar to NICs, we'll use a - // mutable set of zone IDs so we can tell if a zone we expected to be - // inactive wasn't present in the blueprint at all. - let mut omicron_zones_not_in_service = { - use db::schema::bp_omicron_zones_not_in_service::dsl; + // Assemble a mutable map of all the NICs found, by NIC id. As we + // match these up with the corresponding zone below, we'll remove items + // from this set. That way we can tell if the same NIC was used twice + // or not used at all. + let mut omicron_zone_nics = { + use db::schema::bp_omicron_zone_nic::dsl; - let mut omicron_zones_not_in_service = BTreeSet::new(); + let mut omicron_zone_nics = BTreeMap::new(); let mut paginator = Paginator::new(SQL_BATCH_SIZE); while let Some(p) = paginator.next() { let batch = paginated( - dsl::bp_omicron_zones_not_in_service, - dsl::bp_omicron_zone_id, + dsl::bp_omicron_zone_nic, + dsl::id, &p.current_pagparams(), ) .filter(dsl::blueprint_id.eq(blueprint_id)) - .select(BpOmicronZoneNotInService::as_select()) + .select(BpOmicronZoneNic::as_select()) .load_async(&*conn) .await .map_err(|e| { public_error_from_diesel(e, ErrorHandler::Server) })?; - paginator = p.found_batch(&batch, &|z| z.bp_omicron_zone_id); + paginator = p.found_batch(&batch, &|n| n.id); - for z in batch { - let inserted = omicron_zones_not_in_service - .insert(z.bp_omicron_zone_id); + for n in batch { + let nic_id = n.id; + let old = omicron_zone_nics.insert(nic_id, n); bail_unless!( - inserted, - "found duplicate zone ID in \ - bp_omicron_zones_not_in_service: {}", - z.bp_omicron_zone_id, + old.is_none(), + "found duplicate NIC ID in bp_omicron_zone_nic: {}", + nic_id, ); } } - omicron_zones_not_in_service + omicron_zone_nics }; // Load all the zones for each sled. @@ -450,30 +531,23 @@ impl DataStore { }) }) .transpose()?; - let sled_zones = blueprint_zones - .get_mut(&z.sled_id) - .ok_or_else(|| { + let sled_id = SledUuid::from(z.sled_id); + let zone_id = z.id; + let sled_zones = + blueprint_zones.get_mut(&sled_id).ok_or_else(|| { // This error means that we found a row in // bp_omicron_zone with no associated record in // bp_sled_omicron_zones. This should be // impossible and reflects either a bug or database // corruption. Error::internal_error(&format!( - "zone {:?}: unknown sled: {:?}", - z.id, z.sled_id + "zone {zone_id}: unknown sled: {sled_id}", )) })?; - let zone_id = z.id; - let disposition = - if omicron_zones_not_in_service.remove(&zone_id) { - BlueprintZoneDisposition::Quiesced - } else { - BlueprintZoneDisposition::InService - }; let zone = z - .into_blueprint_zone_config(nic_row, disposition) + .into_blueprint_zone_config(nic_row) .with_context(|| { - format!("zone {:?}: parse from database", zone_id) + format!("zone {zone_id}: parse from database") }) .map_err(|e| { Error::internal_error(&format!( @@ -486,23 +560,74 @@ impl DataStore { } } + // Sort all zones to match what blueprint builders do. + for (_, zones_config) in blueprint_zones.iter_mut() { + zones_config.sort(); + } + bail_unless!( omicron_zone_nics.is_empty(), "found extra Omicron zone NICs: {:?}", omicron_zone_nics.keys() ); - bail_unless!( - omicron_zones_not_in_service.is_empty(), - "found extra Omicron zones not in service: {:?}", - omicron_zones_not_in_service, - ); + + // Load all the physical disks for each sled. + { + use db::schema::bp_omicron_physical_disk::dsl; + + let mut paginator = Paginator::new(SQL_BATCH_SIZE); + while let Some(p) = paginator.next() { + // `paginated` implicitly orders by our `id`, which is also + // handy for testing: the physical disks are always consistently ordered + let batch = paginated( + dsl::bp_omicron_physical_disk, + dsl::id, + &p.current_pagparams(), + ) + .filter(dsl::blueprint_id.eq(blueprint_id)) + .select(BpOmicronPhysicalDisk::as_select()) + .load_async(&*conn) + .await + .map_err(|e| { + public_error_from_diesel(e, ErrorHandler::Server) + })?; + + paginator = p.found_batch(&batch, &|d| d.id); + + for d in batch { + let sled_disks = blueprint_disks + .get_mut(&SledUuid::from_untyped_uuid(d.sled_id)) + .ok_or_else(|| { + // This error means that we found a row in + // bp_omicron_physical_disk with no associated record in + // bp_sled_omicron_physical_disks. This should be + // impossible and reflects either a bug or database + // corruption. + Error::internal_error(&format!( + "disk {}: unknown sled: {}", + d.id, d.sled_id + )) + })?; + sled_disks.disks.push(d.into()); + } + } + } + + // Sort all disks to match what blueprint builders do. + for (_, disks_config) in blueprint_disks.iter_mut() { + disks_config.disks.sort_unstable_by_key(|d| d.id); + } Ok(Blueprint { id: blueprint_id, blueprint_zones, + blueprint_disks, + sled_state, parent_blueprint_id, internal_dns_version, external_dns_version, + cockroachdb_fingerprint, + cockroachdb_setting_preserve_downgrade, time_created, creator, comment, @@ -528,25 +653,25 @@ impl DataStore { let ( nblueprints, + nsled_states, + nsled_physical_disks, + nphysical_disks, nsled_agent_zones, nzones, nnics, - nzones_not_in_service, ) = conn .transaction_async(|conn| async move { // Ensure that blueprint we're about to delete is not the // current target. let current_target = self.blueprint_current_target_only(&conn).await?; - if let Some(current_target) = current_target { - if current_target.target_id == blueprint_id { - return Err(TransactionError::CustomError( - Error::conflict(format!( - "blueprint {blueprint_id} is the \ - current target and cannot be deleted", - )), - )); - } + if current_target.target_id == blueprint_id { + return Err(TransactionError::CustomError( + Error::conflict(format!( + "blueprint {blueprint_id} is the \ + current target and cannot be deleted", + )), + )); } // Remove the record describing the blueprint itself. @@ -568,6 +693,37 @@ impl DataStore { )); } + // Remove rows associated with sled states. + let nsled_states = { + use db::schema::bp_sled_state::dsl; + diesel::delete( + dsl::bp_sled_state + .filter(dsl::blueprint_id.eq(blueprint_id)), + ) + .execute_async(&conn) + .await? + }; + + // Remove rows associated with Omicron physical disks + let nsled_physical_disks = { + use db::schema::bp_sled_omicron_physical_disks::dsl; + diesel::delete( + dsl::bp_sled_omicron_physical_disks + .filter(dsl::blueprint_id.eq(blueprint_id)), + ) + .execute_async(&conn) + .await? + }; + let nphysical_disks = { + use db::schema::bp_omicron_physical_disk::dsl; + diesel::delete( + dsl::bp_omicron_physical_disk + .filter(dsl::blueprint_id.eq(blueprint_id)), + ) + .execute_async(&conn) + .await? + }; + // Remove rows associated with Omicron zones let nsled_agent_zones = { use db::schema::bp_sled_omicron_zones::dsl; @@ -599,22 +755,14 @@ impl DataStore { .await? }; - let nzones_not_in_service = { - use db::schema::bp_omicron_zones_not_in_service::dsl; - diesel::delete( - dsl::bp_omicron_zones_not_in_service - .filter(dsl::blueprint_id.eq(blueprint_id)), - ) - .execute_async(&conn) - .await? - }; - Ok(( nblueprints, + nsled_states, + nsled_physical_disks, + nphysical_disks, nsled_agent_zones, nzones, nnics, - nzones_not_in_service, )) }) .await @@ -628,10 +776,12 @@ impl DataStore { info!(&opctx.log, "removed blueprint"; "blueprint_id" => blueprint_id.to_string(), "nblueprints" => nblueprints, + "nsled_states" => nsled_states, + "nsled_physical_disks" => nsled_physical_disks, + "nphysical_disks" => nphysical_disks, "nsled_agent_zones" => nsled_agent_zones, "nzones" => nzones, "nnics" => nnics, - "nzones_not_in_service" => nzones_not_in_service, ); Ok(()) @@ -776,14 +926,11 @@ impl DataStore { pub async fn blueprint_target_get_current_full( &self, opctx: &OpContext, - ) -> Result, Error> { + ) -> Result<(BlueprintTarget, Blueprint), Error> { opctx.authorize(authz::Action::Read, &authz::BLUEPRINT_CONFIG).await?; let conn = self.pool_connection_authorized(opctx).await?; - let Some(target) = self.blueprint_current_target_only(&conn).await? - else { - return Ok(None); - }; + let target = self.blueprint_current_target_only(&conn).await?; // The blueprint for the current target cannot be deleted while it is // the current target, but it's possible someone else (a) made a new @@ -794,14 +941,14 @@ impl DataStore { let authz_blueprint = authz_blueprint_from_id(target.target_id); let blueprint = self.blueprint_read(opctx, &authz_blueprint).await?; - Ok(Some((target, blueprint))) + Ok((target, blueprint)) } /// Get the current target blueprint, if one exists pub async fn blueprint_target_get_current( &self, opctx: &OpContext, - ) -> Result, Error> { + ) -> Result { opctx.authorize(authz::Action::Read, &authz::BLUEPRINT_CONFIG).await?; let conn = self.pool_connection_authorized(opctx).await?; self.blueprint_current_target_only(&conn).await @@ -814,7 +961,7 @@ impl DataStore { async fn blueprint_current_target_only( &self, conn: &async_bb8_diesel::Connection, - ) -> Result, Error> { + ) -> Result { use db::schema::bp_target::dsl; let current_target = dsl::bp_target @@ -824,7 +971,16 @@ impl DataStore { .optional() .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; - Ok(current_target.map(BlueprintTarget::from)) + // We expect a target blueprint to be set on all systems. RSS sets an + // initial blueprint, but we shipped systems before it did so. We added + // target blueprints to those systems via support operations, but let's + // be careful here and return a specific error for this case. + let current_target = + current_target.ok_or_else(|| Error::InternalError { + internal_message: "no target blueprint set".to_string(), + })?; + + Ok(current_target.into()) } } @@ -940,7 +1096,17 @@ impl From for Error { /// AND "parent_blueprint_id" IS NULL /// AND NOT EXISTS (SELECT version FROM current_target) /// ) = 1, -/// , +/// -- Sometime between v22.1.9 and v22.2.19, Cockroach's type checker +/// -- became too smart for our `CAST(... as UUID)` error checking +/// -- gadget: it can infer that `` must be a UUID, so +/// -- then tries to parse 'parent-not-target' and 'no-such-blueprint' +/// -- as UUIDs _during typechecking_, which causes the query to always +/// -- fail. We can defeat this by casting the UUID to text here, which +/// -- will allow the 'parent-not-target' and 'no-such-blueprint' +/// -- sentinels to survive type checking, making it to query execution +/// -- where they will only be cast to UUIDs at runtime in the failure +/// -- cases they're supposed to catch. +/// CAST( AS text), /// 'parent-not-target' /// ) /// ) AS UUID) @@ -1104,8 +1270,9 @@ impl QueryFragment for InsertTargetQuery { SELECT version FROM current_target) \ ) = 1, ", ); + out.push_sql(" CAST("); out.push_bind_param::(&self.target_id)?; - out.push_sql(", "); + out.push_sql(" AS text), "); out.push_bind_param::( &PARENT_NOT_TARGET_SENTINEL, )?; @@ -1176,25 +1343,36 @@ mod tests { use nexus_inventory::now_db_precision; use nexus_reconfigurator_planning::blueprint_builder::BlueprintBuilder; use nexus_reconfigurator_planning::blueprint_builder::Ensure; + use nexus_reconfigurator_planning::example::example; use nexus_test_utils::db::test_setup_database; - use nexus_types::deployment::Policy; + use nexus_types::deployment::BlueprintZoneDisposition; + use nexus_types::deployment::BlueprintZoneFilter; + use nexus_types::deployment::PlanningInput; + use nexus_types::deployment::PlanningInputBuilder; + use nexus_types::deployment::SledDetails; + use nexus_types::deployment::SledDisk; + use nexus_types::deployment::SledFilter; use nexus_types::deployment::SledResources; + use nexus_types::external_api::views::PhysicalDiskPolicy; + use nexus_types::external_api::views::PhysicalDiskState; use nexus_types::external_api::views::SledPolicy; - use nexus_types::external_api::views::SledState; use nexus_types::inventory::Collection; use omicron_common::address::Ipv6Subnet; - use omicron_common::api::external::Generation; + use omicron_common::disk::DiskIdentity; use omicron_test_utils::dev; + use omicron_uuid_kinds::PhysicalDiskUuid; + use omicron_uuid_kinds::SledUuid; + use omicron_uuid_kinds::ZpoolUuid; + use once_cell::sync::Lazy; + use pretty_assertions::assert_eq; use rand::thread_rng; use rand::Rng; + use slog::Logger; use std::mem; use std::net::Ipv6Addr; - static EMPTY_POLICY: Policy = Policy { - sleds: BTreeMap::new(), - service_ip_pool_ranges: Vec::new(), - target_nexus_zone_count: 0, - }; + static EMPTY_PLANNING_INPUT: Lazy = + Lazy::new(|| PlanningInputBuilder::empty_input()); // This is a not-super-future-maintainer-friendly helper to check that all // the subtables related to blueprints have been pruned of a specific @@ -1222,7 +1400,6 @@ mod tests { query_count!(blueprint, id), query_count!(bp_omicron_zone, blueprint_id), query_count!(bp_omicron_zone_nic, blueprint_id), - query_count!(bp_omicron_zones_not_in_service, blueprint_id), ] { let count: i64 = result.unwrap(); assert_eq!( @@ -1233,84 +1410,63 @@ mod tests { } } - // Create a fake set of `SledResources`, either with a subnet matching + // Create a fake set of `SledDetails`, either with a subnet matching // `ip` or with an arbitrary one. - fn fake_sled_resources(ip: Option) -> SledResources { - use illumos_utils::zpool::ZpoolName; + fn fake_sled_details(ip: Option) -> SledDetails { let zpools = (0..4) - .map(|_| { - let name = ZpoolName::new_external(Uuid::new_v4()).to_string(); - name.parse().unwrap() + .map(|i| { + ( + ZpoolUuid::new_v4(), + SledDisk { + disk_identity: DiskIdentity { + vendor: String::from("v"), + serial: format!("s-{i}"), + model: String::from("m"), + }, + disk_id: PhysicalDiskUuid::new_v4(), + policy: PhysicalDiskPolicy::InService, + state: PhysicalDiskState::Active, + }, + ) }) .collect(); let ip = ip.unwrap_or_else(|| thread_rng().gen::().into()); - SledResources { + let resources = SledResources { zpools, subnet: Ipv6Subnet::new(ip) }; + SledDetails { policy: SledPolicy::provisionable(), state: SledState::Active, - zpools, - subnet: Ipv6Subnet::new(ip), + resources, } } - // Create a `Policy` that contains all the sleds found in `collection` - fn policy_from_collection(collection: &Collection) -> Policy { - Policy { - sleds: collection - .sled_agents - .iter() - .map(|(sled_id, agent)| { - // `Collection` doesn't currently hold zpool names, so - // we'll construct fake resources for each sled. - ( - *sled_id, - fake_sled_resources(Some( - *agent.sled_agent_address.ip(), - )), - ) - }) - .collect(), - service_ip_pool_ranges: Vec::new(), - target_nexus_zone_count: collection - .all_omicron_zones() - .filter(|z| z.zone_type.is_nexus()) - .count(), - } - } + fn representative( + log: &Logger, + test_name: &str, + ) -> (Collection, PlanningInput, Blueprint) { + // We'll start with an example system. + let (mut base_collection, planning_input, mut blueprint) = + example(log, test_name, 3); - fn representative() -> (Collection, Policy, Blueprint) { - // We'll start with a representative collection... + // Take a more thorough collection representative (includes SPs, + // etc.)... let mut collection = nexus_inventory::examples::representative().builder.build(); - // ...and then mutate it such that the omicron zones it reports match - // the sled agent IDs it reports. Steal the sled agent info and drop the - // fake sled-agent IDs: - let mut empty_map = BTreeMap::new(); - mem::swap(&mut empty_map, &mut collection.sled_agents); - let mut sled_agents = empty_map.into_values().collect::>(); - - // Now reinsert them with IDs pulled from the omicron zones. This - // assumes we have more fake sled agents than omicron zones, which is - // currently true for the representative collection. - for &sled_id in collection.omicron_zones.keys() { - let some_sled_agent = sled_agents.pop().expect( - "fewer representative sled agents than \ - representative omicron zones sleds", - ); - collection.sled_agents.insert(sled_id, some_sled_agent); - } + // ... and replace its sled agent and Omicron zones with those from our + // example system. + mem::swap( + &mut collection.sled_agents, + &mut base_collection.sled_agents, + ); + mem::swap( + &mut collection.omicron_zones, + &mut base_collection.omicron_zones, + ); - let policy = policy_from_collection(&collection); - let blueprint = BlueprintBuilder::build_initial_from_collection( - &collection, - Generation::new(), - Generation::new(), - &policy, - "test", - ) - .unwrap(); + // Treat this blueprint as the initial blueprint for the system. + blueprint.parent_blueprint_id = None; - (collection, policy, blueprint) + (collection, planning_input, blueprint) } async fn blueprint_list_all_ids( @@ -1333,17 +1489,11 @@ mod tests { let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; - // Create an empty collection and a blueprint from it - let collection = - nexus_inventory::CollectionBuilder::new("test").build(); - let blueprint1 = BlueprintBuilder::build_initial_from_collection( - &collection, - Generation::new(), - Generation::new(), - &EMPTY_POLICY, + // Create an empty blueprint from it + let blueprint1 = BlueprintBuilder::build_empty_with_sleds( + std::iter::empty(), "test", - ) - .unwrap(); + ); let authz_blueprint = authz_blueprint_from_id(blueprint1.id); // Trying to read it from the database should fail with the relevant @@ -1362,7 +1512,7 @@ mod tests { let blueprint_read = datastore .blueprint_read(&opctx, &authz_blueprint) .await - .expect("failed to read collection back"); + .expect("failed to read blueprint back"); assert_eq!(blueprint1, blueprint_read); assert_eq!( blueprint_list_all_ids(&opctx, &datastore).await, @@ -1378,10 +1528,12 @@ mod tests { datastore.blueprint_insert(&opctx, &blueprint1).await.unwrap_err(); assert!(err.to_string().contains("duplicate key")); - // Delete the blueprint and ensure it's really gone. - datastore.blueprint_delete(&opctx, &authz_blueprint).await.unwrap(); - ensure_blueprint_fully_deleted(&datastore, blueprint1.id).await; - assert_eq!(blueprint_list_all_ids(&opctx, &datastore).await, []); + // We could try to test deleting this blueprint, but deletion checks + // that the blueprint being deleted isn't the current target, and we + // haven't set a current target at all as part of this test. Instead of + // going through the motions of creating another blueprint and making it + // the target just to test deletion, we'll end this test here, and rely + // on other tests to check blueprint deletion. // Clean up. db.cleanup().await.unwrap(); @@ -1390,13 +1542,15 @@ mod tests { #[tokio::test] async fn test_representative_blueprint() { + const TEST_NAME: &str = "test_representative_blueprint"; // Setup - let logctx = dev::test_setup_log("test_representative_blueprint"); + let logctx = dev::test_setup_log(TEST_NAME); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; // Create a cohesive representative collection/policy/blueprint - let (collection, mut policy, blueprint1) = representative(); + let (collection, planning_input, blueprint1) = + representative(&logctx.log, TEST_NAME); let authz_blueprint1 = authz_blueprint_from_id(blueprint1.id); // Write it to the database and read it back. @@ -1415,13 +1569,16 @@ mod tests { ); // Check the number of blueprint elements against our collection. - assert_eq!(blueprint1.blueprint_zones.len(), policy.sleds.len()); + assert_eq!( + blueprint1.blueprint_zones.len(), + planning_input.all_sled_ids(SledFilter::Commissioned).count(), + ); assert_eq!( blueprint1.blueprint_zones.len(), collection.omicron_zones.len() ); assert_eq!( - blueprint1.all_omicron_zones().count(), + blueprint1.all_omicron_zones(BlueprintZoneFilter::All).count(), collection.all_omicron_zones().count() ); // All zones should be in service. @@ -1441,7 +1598,7 @@ mod tests { .unwrap(); assert_eq!( datastore.blueprint_target_get_current_full(&opctx).await.unwrap(), - Some((bp1_target, blueprint1.clone())) + (bp1_target, blueprint1.clone()) ); let err = datastore .blueprint_delete(&opctx, &authz_blueprint1) @@ -1455,51 +1612,95 @@ mod tests { "unexpected error: {err}" ); - // Add a new sled to `policy`. - let new_sled_id = Uuid::new_v4(); - policy.sleds.insert(new_sled_id, fake_sled_resources(None)); - let new_sled_zpools = &policy.sleds.get(&new_sled_id).unwrap().zpools; + // Add a new sled. + let new_sled_id = SledUuid::new_v4(); - // Create a builder for a child blueprint. While we're at it, use a - // different DNS version to test that that works. + // While we're at it, use a different DNS version to test that that + // works. let new_internal_dns_version = blueprint1.internal_dns_version.next(); let new_external_dns_version = new_internal_dns_version.next(); + let planning_input = { + let mut builder = planning_input.into_builder(); + builder + .add_sled(new_sled_id, fake_sled_details(None)) + .expect("failed to add sled"); + builder.set_internal_dns_version(new_internal_dns_version); + builder.set_external_dns_version(new_external_dns_version); + builder.build() + }; + let new_sled_zpools = + &planning_input.sled_resources(&new_sled_id).unwrap().zpools; + + // Create a builder for a child blueprint. let mut builder = BlueprintBuilder::new_based_on( &logctx.log, &blueprint1, - new_internal_dns_version, - new_external_dns_version, - &policy, + &planning_input, "test", ) .expect("failed to create builder"); + // Ensure disks on our sled + assert_eq!( + builder + .sled_ensure_disks( + new_sled_id, + &planning_input + .sled_resources(&new_sled_id) + .unwrap() + .clone(), + ) + .unwrap(), + Ensure::Added + ); + // Add zones to our new sled. assert_eq!( builder.sled_ensure_zone_ntp(new_sled_id).unwrap(), Ensure::Added ); - for zpool_name in new_sled_zpools { + for zpool_id in new_sled_zpools.keys() { assert_eq!( builder - .sled_ensure_zone_crucible(new_sled_id, zpool_name.clone()) + .sled_ensure_zone_crucible(new_sled_id, *zpool_id) .unwrap(), Ensure::Added ); } - let num_new_sled_zones = 1 + new_sled_zpools.len(); + + let num_new_ntp_zones = 1; + let num_new_crucible_zones = new_sled_zpools.len(); + let num_new_sled_zones = num_new_ntp_zones + num_new_crucible_zones; let blueprint2 = builder.build(); let authz_blueprint2 = authz_blueprint_from_id(blueprint2.id); - // Check that we added the new sled and its zones. + let diff = blueprint2.diff_since_blueprint(&blueprint1); + println!("b1 -> b2: {}", diff.display()); + println!("b1 disks: {:?}", blueprint1.blueprint_disks); + println!("b2 disks: {:?}", blueprint2.blueprint_disks); + // Check that we added the new sled, as well as its disks and zones. + assert_eq!( + blueprint1 + .blueprint_disks + .values() + .map(|c| c.disks.len()) + .sum::() + + new_sled_zpools.len(), + blueprint2 + .blueprint_disks + .values() + .map(|c| c.disks.len()) + .sum::() + ); assert_eq!( blueprint1.blueprint_zones.len() + 1, blueprint2.blueprint_zones.len() ); assert_eq!( - blueprint1.all_omicron_zones().count() + num_new_sled_zones, - blueprint2.all_omicron_zones().count() + blueprint1.all_omicron_zones(BlueprintZoneFilter::All).count() + + num_new_sled_zones, + blueprint2.all_omicron_zones(BlueprintZoneFilter::All).count() ); // All zones should be in service. @@ -1515,7 +1716,8 @@ mod tests { .blueprint_read(&opctx, &authz_blueprint2) .await .expect("failed to read collection back"); - println!("diff: {}", blueprint2.diff_sleds(&blueprint_read).display()); + let diff = blueprint_read.diff_since_blueprint(&blueprint2); + println!("diff: {}", diff.display()); assert_eq!(blueprint2, blueprint_read); assert_eq!(blueprint2.internal_dns_version, new_internal_dns_version); assert_eq!(blueprint2.external_dns_version, new_external_dns_version); @@ -1541,7 +1743,7 @@ mod tests { .unwrap(); assert_eq!( datastore.blueprint_target_get_current_full(&opctx).await.unwrap(), - Some((bp2_target, blueprint2.clone())) + (bp2_target, blueprint2.clone()) ); let err = datastore .blueprint_delete(&opctx, &authz_blueprint2) @@ -1597,31 +1799,26 @@ mod tests { )) ); - // There should be no current target still. - assert_eq!( - datastore.blueprint_target_get_current_full(&opctx).await.unwrap(), - None - ); + // There should be no current target; this is never expected in a real + // system, since RSS sets an initial target blueprint, so we should get + // an error. + let err = datastore + .blueprint_target_get_current_full(&opctx) + .await + .unwrap_err(); + assert!(err.to_string().contains("no target blueprint set")); // Create three blueprints: // * `blueprint1` has no parent // * `blueprint2` and `blueprint3` both have `blueprint1` as parent - let collection = - nexus_inventory::CollectionBuilder::new("test").build(); - let blueprint1 = BlueprintBuilder::build_initial_from_collection( - &collection, - Generation::new(), - Generation::new(), - &EMPTY_POLICY, + let blueprint1 = BlueprintBuilder::build_empty_with_sleds( + std::iter::empty(), "test1", - ) - .unwrap(); + ); let blueprint2 = BlueprintBuilder::new_based_on( &logctx.log, &blueprint1, - Generation::new(), - Generation::new(), - &EMPTY_POLICY, + &EMPTY_PLANNING_INPUT, "test2", ) .expect("failed to create builder") @@ -1629,9 +1826,7 @@ mod tests { let blueprint3 = BlueprintBuilder::new_based_on( &logctx.log, &blueprint1, - Generation::new(), - Generation::new(), - &EMPTY_POLICY, + &EMPTY_PLANNING_INPUT, "test3", ) .expect("failed to create builder") @@ -1674,11 +1869,14 @@ mod tests { Error::from(InsertTargetError::ParentNotTarget(blueprint2.id)) ); - // There should be no current target still. - assert_eq!( - datastore.blueprint_target_get_current_full(&opctx).await.unwrap(), - None - ); + // There should be no current target; this is never expected in a real + // system, since RSS sets an initial target blueprint, so we should get + // an error. + let err = datastore + .blueprint_target_get_current_full(&opctx) + .await + .unwrap_err(); + assert!(err.to_string().contains("no target blueprint set")); // We should be able to insert blueprint1, which has no parent (matching // the currently-empty `bp_target` table's lack of a target). @@ -1688,7 +1886,7 @@ mod tests { .unwrap(); assert_eq!( datastore.blueprint_target_get_current_full(&opctx).await.unwrap(), - Some((bp1_target, blueprint1.clone())) + (bp1_target, blueprint1.clone()) ); // Now that blueprint1 is the current target, we should be able to @@ -1699,7 +1897,7 @@ mod tests { .unwrap(); assert_eq!( datastore.blueprint_target_get_current_full(&opctx).await.unwrap(), - Some((bp3_target, blueprint3.clone())) + (bp3_target, blueprint3.clone()) ); // Now that blueprint3 is the target, trying to insert blueprint1 or @@ -1727,9 +1925,7 @@ mod tests { let blueprint4 = BlueprintBuilder::new_based_on( &logctx.log, &blueprint3, - Generation::new(), - Generation::new(), - &EMPTY_POLICY, + &EMPTY_PLANNING_INPUT, "test3", ) .expect("failed to create builder") @@ -1747,7 +1943,7 @@ mod tests { .unwrap(); assert_eq!( datastore.blueprint_target_get_current_full(&opctx).await.unwrap(), - Some((bp4_target, blueprint4)) + (bp4_target, blueprint4) ); // Clean up. @@ -1763,22 +1959,14 @@ mod tests { let (opctx, datastore) = datastore_test(&logctx, &db).await; // Create an initial blueprint and a child. - let collection = - nexus_inventory::CollectionBuilder::new("test").build(); - let blueprint1 = BlueprintBuilder::build_initial_from_collection( - &collection, - Generation::new(), - Generation::new(), - &EMPTY_POLICY, + let blueprint1 = BlueprintBuilder::build_empty_with_sleds( + std::iter::empty(), "test1", - ) - .unwrap(); + ); let blueprint2 = BlueprintBuilder::new_based_on( &logctx.log, &blueprint1, - Generation::new(), - Generation::new(), - &EMPTY_POLICY, + &EMPTY_PLANNING_INPUT, "test2", ) .expect("failed to create builder") @@ -1808,7 +1996,7 @@ mod tests { .unwrap(); assert_eq!( datastore.blueprint_target_get_current(&opctx).await.unwrap(), - Some(bp1_target), + bp1_target, ); // We should be able to toggle its enabled status an arbitrary number of @@ -1821,7 +2009,7 @@ mod tests { .unwrap(); assert_eq!( datastore.blueprint_target_get_current(&opctx).await.unwrap(), - Some(bp1_target), + bp1_target, ); } @@ -1842,7 +2030,7 @@ mod tests { .unwrap(); assert_eq!( datastore.blueprint_target_get_current(&opctx).await.unwrap(), - Some(bp2_target), + bp2_target, ); // We can no longer toggle the enabled bit of bp1_target. @@ -1863,7 +2051,7 @@ mod tests { .unwrap(); assert_eq!( datastore.blueprint_target_get_current(&opctx).await.unwrap(), - Some(bp2_target), + bp2_target, ); } @@ -1874,7 +2062,7 @@ mod tests { fn assert_all_zones_in_service(blueprint: &Blueprint) { let not_in_service = blueprint - .all_blueprint_zones(BlueprintZoneFilter::All) + .all_omicron_zones(BlueprintZoneFilter::All) .filter(|(_, z)| { z.disposition != BlueprintZoneDisposition::InService }) diff --git a/nexus/db-queries/src/db/datastore/disk.rs b/nexus/db-queries/src/db/datastore/disk.rs index 2916573322..e1d504761c 100644 --- a/nexus/db-queries/src/db/datastore/disk.rs +++ b/nexus/db-queries/src/db/datastore/disk.rs @@ -170,7 +170,7 @@ impl DataStore { opctx.authorize(authz::Action::Modify, authz_instance).await?; opctx.authorize(authz::Action::Modify, authz_disk).await?; - let ok_to_attach_disk_states = vec![ + let ok_to_attach_disk_states = [ api::external::DiskState::Creating, api::external::DiskState::Detached, ]; @@ -311,7 +311,7 @@ impl DataStore { opctx.authorize(authz::Action::Modify, authz_disk).await?; let ok_to_detach_disk_states = - vec![api::external::DiskState::Attached(authz_instance.id())]; + [api::external::DiskState::Attached(authz_instance.id())]; let ok_to_detach_disk_state_labels: Vec<_> = ok_to_detach_disk_states.iter().map(|s| s.label()).collect(); @@ -811,6 +811,22 @@ impl DataStore { .map(|(disk, _, _)| disk) .collect()) } + + pub async fn disk_for_volume_id( + &self, + volume_id: Uuid, + ) -> LookupResult> { + let conn = self.pool_connection_unauthorized().await?; + + use db::schema::disk::dsl; + dsl::disk + .filter(dsl::volume_id.eq(volume_id)) + .select(Disk::as_select()) + .first_async(&*conn) + .await + .optional() + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } } #[cfg(test)] diff --git a/nexus/db-queries/src/db/datastore/dns.rs b/nexus/db-queries/src/db/datastore/dns.rs index b12df1875f..6fe524686d 100644 --- a/nexus/db-queries/src/db/datastore/dns.rs +++ b/nexus/db-queries/src/db/datastore/dns.rs @@ -6,6 +6,7 @@ use super::DataStore; use crate::authz; use crate::context::OpContext; use crate::db; +use crate::db::datastore::SQL_BATCH_SIZE; use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::model::DnsGroup; @@ -21,6 +22,8 @@ use crate::db::TransactionError; use async_bb8_diesel::AsyncConnection; use async_bb8_diesel::AsyncRunQueryDsl; use diesel::prelude::*; +use futures::future::BoxFuture; +use futures::FutureExt; use nexus_types::internal_api::params::DnsConfigParams; use nexus_types::internal_api::params::DnsConfigZone; use nexus_types::internal_api::params::DnsRecord; @@ -686,6 +689,52 @@ impl DnsVersionUpdateBuilder { } } +/// Extra interfaces that are not intended for use in Nexus, but useful for +/// testing and `omdb` +pub trait DataStoreDnsTest: Send + Sync { + /// Fetch the DNS configuration for a specific group and version + fn dns_config_read_version<'a>( + &'a self, + opctx: &'a OpContext, + dns_group: DnsGroup, + version: omicron_common::api::external::Generation, + ) -> BoxFuture<'_, Result>; +} + +impl DataStoreDnsTest for DataStore { + fn dns_config_read_version<'a>( + &'a self, + opctx: &'a OpContext, + dns_group: DnsGroup, + version: omicron_common::api::external::Generation, + ) -> BoxFuture<'_, Result> { + async move { + use db::schema::dns_version::dsl; + let dns_version = dsl::dns_version + .filter(dsl::dns_group.eq(dns_group)) + .filter(dsl::version.eq(Generation::from(version))) + .select(DnsVersion::as_select()) + .first_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| { + // Technically, we could produce a `NotFound` error here. + // But since this is only for testing, it's okay to produce + // an InternalError. + public_error_from_diesel(e, ErrorHandler::Server) + })?; + + self.dns_config_read_version( + opctx, + &opctx.log, + SQL_BATCH_SIZE, + &dns_version, + ) + .await + } + .boxed() + } +} + #[cfg(test)] mod test { use crate::db::datastore::test_utils::datastore_test; diff --git a/nexus/db-queries/src/db/datastore/external_ip.rs b/nexus/db-queries/src/db/datastore/external_ip.rs index 017d2f22d2..c3cd45669f 100644 --- a/nexus/db-queries/src/db/datastore/external_ip.rs +++ b/nexus/db-queries/src/db/datastore/external_ip.rs @@ -5,6 +5,7 @@ //! [`DataStore`] methods on [`ExternalIp`]s. use super::DataStore; +use super::SQL_BATCH_SIZE; use crate::authz; use crate::authz::ApiResource; use crate::context::OpContext; @@ -24,6 +25,7 @@ use crate::db::model::IncompleteExternalIp; use crate::db::model::IpKind; use crate::db::model::Name; use crate::db::pagination::paginated; +use crate::db::pagination::Paginator; use crate::db::pool::DbConnection; use crate::db::queries::external_ip::NextExternalIp; use crate::db::queries::external_ip::MAX_EXTERNAL_IPS_PER_INSTANCE; @@ -38,18 +40,21 @@ use diesel::prelude::*; use nexus_db_model::FloatingIpUpdate; use nexus_db_model::Instance; use nexus_db_model::IpAttachState; +use nexus_types::deployment::OmicronZoneExternalIp; use nexus_types::identity::Resource; use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::external::CreateResult; +use omicron_common::api::external::DataPageParams; use omicron_common::api::external::DeleteResult; use omicron_common::api::external::Error; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; -use omicron_common::api::external::NameOrId; use omicron_common::api::external::ResourceType; use omicron_common::api::external::UpdateResult; +use omicron_uuid_kinds::OmicronZoneUuid; use ref_cast::RefCast; +use sled_agent_client::ZoneKind; use std::net::IpAddr; use uuid::Uuid; @@ -78,33 +83,14 @@ impl DataStore { opctx: &OpContext, ip_id: Uuid, probe_id: Uuid, - pool_name: Option, + pool: Option, ) -> CreateResult { - let pool = match pool_name { - Some(NameOrId::Name(name)) => { - let (.., pool) = LookupPath::new(opctx, &self) - .ip_pool_name(&name.into()) - .fetch_for(authz::Action::CreateChild) - .await?; - pool - } - Some(NameOrId::Id(id)) => { - let (.., pool) = LookupPath::new(opctx, &self) - .ip_pool_id(id) - .fetch_for(authz::Action::CreateChild) - .await?; - pool - } - // If no name given, use the default pool - None => { - let (.., pool) = self.ip_pools_fetch_default(&opctx).await?; - pool - } - }; - - let pool_id = pool.identity.id; - let data = - IncompleteExternalIp::for_ephemeral_probe(ip_id, probe_id, pool_id); + let authz_pool = self.resolve_pool_for_allocation(&opctx, pool).await?; + let data = IncompleteExternalIp::for_ephemeral_probe( + ip_id, + probe_id, + authz_pool.id(), + ); self.allocate_external_ip(opctx, data).await } @@ -134,33 +120,9 @@ impl DataStore { // - At most MAX external IPs per instance // Naturally, we now *need* to destroy the ephemeral IP if the newly alloc'd // IP was not attached, including on idempotent success. - let pool = match pool { - Some(authz_pool) => { - let (.., pool) = LookupPath::new(opctx, &self) - .ip_pool_id(authz_pool.id()) - // any authenticated user can CreateChild on an IP pool. this is - // meant to represent allocating an IP - .fetch_for(authz::Action::CreateChild) - .await?; - - // If this pool is not linked to the current silo, 404 - // As name resolution happens one layer up, we need to use the *original* - // authz Pool. - if self.ip_pool_fetch_link(opctx, pool.id()).await.is_err() { - return Err(authz_pool.not_found()); - } - pool - } - // If no name given, use the default logic - None => { - let (.., pool) = self.ip_pools_fetch_default(&opctx).await?; - pool - } - }; - - let pool_id = pool.identity.id; - let data = IncompleteExternalIp::for_ephemeral(ip_id, pool_id); + let authz_pool = self.resolve_pool_for_allocation(&opctx, pool).await?; + let data = IncompleteExternalIp::for_ephemeral(ip_id, authz_pool.id()); // We might not be able to acquire a new IP, but in the event of an // idempotent or double attach this failure is allowed. @@ -206,7 +168,7 @@ impl DataStore { } /// Fetch all external IP addresses of any kind for the provided service. - pub async fn service_lookup_external_ips( + pub async fn external_ip_list_service( &self, opctx: &OpContext, service_id: Uuid, @@ -222,42 +184,31 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } - /// Allocates an IP address for internal service usage. - pub async fn allocate_service_ip( - &self, - opctx: &OpContext, - ip_id: Uuid, - name: &Name, - description: &str, - service_id: Uuid, - ) -> CreateResult { - let (.., pool) = self.ip_pools_service_lookup(opctx).await?; - - let data = IncompleteExternalIp::for_service( - ip_id, - name, - description, - service_id, - pool.id(), - ); - self.allocate_external_ip(opctx, data).await - } - - /// Allocates an SNAT IP address for internal service usage. - pub async fn allocate_service_snat_ip( + /// If a pool is specified, make sure it's linked to this silo. If a pool is + /// not specified, fetch the default pool for this silo. Once the pool is + /// resolved (by either method) do an auth check. Then return the pool. + async fn resolve_pool_for_allocation( &self, opctx: &OpContext, - ip_id: Uuid, - service_id: Uuid, - ) -> CreateResult { - let (.., pool) = self.ip_pools_service_lookup(opctx).await?; + pool: Option, + ) -> LookupResult { + let authz_pool = match pool { + Some(authz_pool) => { + self.ip_pool_fetch_link(opctx, authz_pool.id()) + .await + .map_err(|_| authz_pool.not_found())?; - let data = IncompleteExternalIp::for_service_snat( - ip_id, - service_id, - pool.id(), - ); - self.allocate_external_ip(opctx, data).await + authz_pool + } + // If no pool specified, use the default logic + None => { + let (authz_pool, ..) = + self.ip_pools_fetch_default(&opctx).await?; + authz_pool + } + }; + opctx.authorize(authz::Action::CreateChild, &authz_pool).await?; + Ok(authz_pool) } /// Allocates a floating IP address for instance usage. @@ -271,29 +222,7 @@ impl DataStore { ) -> CreateResult { let ip_id = Uuid::new_v4(); - // This implements the same pattern as in `allocate_instance_ephemeral_ip` to - // check that a chosen pool is valid from within the current silo. - let pool = match pool { - Some(authz_pool) => { - let (.., pool) = LookupPath::new(opctx, &self) - .ip_pool_id(authz_pool.id()) - .fetch_for(authz::Action::CreateChild) - .await?; - - if self.ip_pool_fetch_link(opctx, pool.id()).await.is_err() { - return Err(authz_pool.not_found()); - } - - pool - } - // If no name given, use the default logic - None => { - let (.., pool) = self.ip_pools_fetch_default(&opctx).await?; - pool - } - }; - - let pool_id = pool.id(); + let authz_pool = self.resolve_pool_for_allocation(&opctx, pool).await?; let data = if let Some(ip) = ip { IncompleteExternalIp::for_floating_explicit( @@ -302,7 +231,7 @@ impl DataStore { &identity.description, project_id, ip, - pool_id, + authz_pool.id(), ) } else { IncompleteExternalIp::for_floating( @@ -310,7 +239,7 @@ impl DataStore { &Name(identity.name), &identity.description, project_id, - pool_id, + authz_pool.id(), ) }; @@ -380,54 +309,67 @@ impl DataStore { }) } - /// Allocates an explicit Floating IP address for an internal service. - /// - /// Unlike the other IP allocation requests, this does not search for an - /// available IP address, it asks for one explicitly. - pub async fn allocate_explicit_service_ip( + /// Allocates an explicit IP address for an Omicron zone. + pub async fn external_ip_allocate_omicron_zone( &self, opctx: &OpContext, - ip_id: Uuid, - name: &Name, - description: &str, - service_id: Uuid, - ip: IpAddr, + zone_id: OmicronZoneUuid, + zone_kind: ZoneKind, + external_ip: OmicronZoneExternalIp, ) -> CreateResult { let (authz_pool, pool) = self.ip_pools_service_lookup(opctx).await?; opctx.authorize(authz::Action::CreateChild, &authz_pool).await?; - let data = IncompleteExternalIp::for_service_explicit( - ip_id, - name, - description, - service_id, + let data = IncompleteExternalIp::for_omicron_zone( pool.id(), - ip, + external_ip, + zone_id, + zone_kind, ); self.allocate_external_ip(opctx, data).await } - /// Allocates an explicit SNAT IP address for an internal service. + /// List one page of all external IPs allocated to internal services + pub async fn external_ip_list_service_all( + &self, + opctx: &OpContext, + pagparams: &DataPageParams<'_, Uuid>, + ) -> ListResultVec { + use db::schema::external_ip::dsl; + + let (authz_pool, _pool) = self.ip_pools_service_lookup(opctx).await?; + opctx.authorize(authz::Action::ListChildren, &authz_pool).await?; + + paginated(dsl::external_ip, dsl::id, pagparams) + .filter(dsl::is_service) + .filter(dsl::time_deleted.is_null()) + .select(ExternalIp::as_select()) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + /// List all external IPs allocated to internal services, making as many + /// queries as needed to get them all /// - /// Unlike the other IP allocation requests, this does not search for an - /// available IP address, it asks for one explicitly. - pub async fn allocate_explicit_service_snat_ip( + /// This should generally not be used in API handlers or other + /// latency-sensitive contexts, but it can make sense in saga actions or + /// background tasks. + pub async fn external_ip_list_service_all_batched( &self, opctx: &OpContext, - ip_id: Uuid, - service_id: Uuid, - ip: IpAddr, - port_range: (u16, u16), - ) -> CreateResult { - let (authz_pool, pool) = self.ip_pools_service_lookup(opctx).await?; - opctx.authorize(authz::Action::CreateChild, &authz_pool).await?; - let data = IncompleteExternalIp::for_service_explicit_snat( - ip_id, - service_id, - pool.id(), - ip, - port_range, - ); - self.allocate_external_ip(opctx, data).await + ) -> ListResultVec { + opctx.check_complex_operations_allowed()?; + + let mut all_ips = Vec::new(); + let mut paginator = Paginator::new(SQL_BATCH_SIZE); + while let Some(p) = paginator.next() { + let batch = self + .external_ip_list_service_all(opctx, &p.current_pagparams()) + .await?; + paginator = p.found_batch(&batch, &|ip: &ExternalIp| ip.id); + all_ips.extend(batch); + } + Ok(all_ips) } /// Attempt to move a target external IP from detached to attaching, @@ -543,7 +485,7 @@ impl DataStore { attach will be safe to retry once start/stop completes" )), state if SAFE_TO_ATTACH_INSTANCE_STATES.contains(&state) => { - if attached_count >= MAX_EXTERNAL_IPS_PLUS_SNAT as i64 { + if attached_count >= i64::from(MAX_EXTERNAL_IPS_PLUS_SNAT) { Error::invalid_request(&format!( "an instance may not have more than \ {MAX_EXTERNAL_IPS_PER_INSTANCE} external IP addresses", @@ -1163,3 +1105,125 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::db::datastore::test_utils::datastore_test; + use nexus_test_utils::db::test_setup_database; + use nexus_types::deployment::OmicronZoneExternalFloatingIp; + use nexus_types::deployment::OmicronZoneExternalSnatIp; + use nexus_types::external_api::shared::IpRange; + use nexus_types::inventory::SourceNatConfig; + use omicron_common::address::NUM_SOURCE_NAT_PORTS; + use omicron_test_utils::dev; + use omicron_uuid_kinds::ExternalIpUuid; + use std::collections::BTreeSet; + use std::net::Ipv4Addr; + + async fn read_all_service_ips( + datastore: &DataStore, + opctx: &OpContext, + ) -> Vec { + let all_batched = datastore + .external_ip_list_service_all_batched(opctx) + .await + .expect("failed to fetch all service IPs batched"); + let all_paginated = datastore + .external_ip_list_service_all(opctx, &DataPageParams::max_page()) + .await + .expect("failed to fetch all service IPs paginated"); + assert_eq!(all_batched, all_paginated); + all_batched + } + + #[tokio::test] + async fn test_service_ip_list() { + usdt::register_probes().unwrap(); + let logctx = dev::test_setup_log("test_service_ip_list"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + // No IPs, to start + let ips = read_all_service_ips(&datastore, &opctx).await; + assert_eq!(ips, vec![]); + + // Set up service IP pool range + let ip_range = IpRange::try_from(( + Ipv4Addr::new(10, 0, 0, 1), + Ipv4Addr::new(10, 0, 0, 10), + )) + .unwrap(); + let (service_ip_pool, _) = datastore + .ip_pools_service_lookup(&opctx) + .await + .expect("lookup service ip pool"); + datastore + .ip_pool_add_range(&opctx, &service_ip_pool, &ip_range) + .await + .expect("add range to service ip pool"); + + // Allocate a bunch of fake service IPs. + let mut external_ips = Vec::new(); + let mut allocate_snat = false; // flip-flop between regular and snat + for ip in ip_range.iter() { + let external_ip = if allocate_snat { + OmicronZoneExternalIp::Snat(OmicronZoneExternalSnatIp { + id: ExternalIpUuid::new_v4(), + snat_cfg: SourceNatConfig::new( + ip, + 0, + NUM_SOURCE_NAT_PORTS - 1, + ) + .unwrap(), + }) + } else { + OmicronZoneExternalIp::Floating(OmicronZoneExternalFloatingIp { + id: ExternalIpUuid::new_v4(), + ip, + }) + }; + let external_ip = datastore + .external_ip_allocate_omicron_zone( + &opctx, + OmicronZoneUuid::new_v4(), + ZoneKind::Nexus, + external_ip, + ) + .await + .expect("failed to allocate service IP"); + external_ips.push(external_ip); + allocate_snat = !allocate_snat; + } + external_ips.sort_by_key(|ip| ip.id); + + // Ensure we see them all. + let ips = read_all_service_ips(&datastore, &opctx).await; + assert_eq!(ips, external_ips); + + // Deallocate a few, and ensure we don't see them anymore. + let mut removed_ip_ids = BTreeSet::new(); + for (i, external_ip) in external_ips.iter().enumerate() { + if i % 3 == 0 { + let id = external_ip.id; + datastore + .deallocate_external_ip(&opctx, id) + .await + .expect("failed to deallocate IP"); + removed_ip_ids.insert(id); + } + } + + // Check that we removed at least one, then prune them from our list of + // expected IPs. + assert!(!removed_ip_ids.is_empty()); + external_ips.retain(|ip| !removed_ip_ids.contains(&ip.id)); + + // Ensure we see them all remaining IPs. + let ips = read_all_service_ips(&datastore, &opctx).await; + assert_eq!(ips, external_ips); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } +} diff --git a/nexus/db-queries/src/db/datastore/instance.rs b/nexus/db-queries/src/db/datastore/instance.rs index acea7bb4e3..ce40e20501 100644 --- a/nexus/db-queries/src/db/datastore/instance.rs +++ b/nexus/db-queries/src/db/datastore/instance.rs @@ -22,6 +22,7 @@ use crate::db::model::Instance; use crate::db::model::InstanceRuntimeState; use crate::db::model::Name; use crate::db::model::Project; +use crate::db::model::Sled; use crate::db::model::Vmm; use crate::db::pagination::paginated; use crate::db::update_and_check::UpdateAndCheck; @@ -29,11 +30,14 @@ use crate::db::update_and_check::UpdateStatus; use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; use diesel::prelude::*; +use nexus_db_model::ApplySledFilterExt; use nexus_db_model::Disk; use nexus_db_model::VmmRuntimeState; +use nexus_types::deployment::SledFilter; use omicron_common::api; use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::external::CreateResult; +use omicron_common::api::external::DataPageParams; use omicron_common::api::external::DeleteResult; use omicron_common::api::external::Error; use omicron_common::api::external::ListResultVec; @@ -385,6 +389,58 @@ impl DataStore { Ok((instance_updated, vmm_updated)) } + /// Lists all instances on in-service sleds with active Propolis VMM + /// processes, returning the instance along with the VMM on which it's + /// running, the sled on which the VMM is running, and the project that owns + /// the instance. + /// + /// The query performed by this function is paginated by the sled's UUID. + pub async fn instance_and_vmm_list_by_sled_agent( + &self, + opctx: &OpContext, + pagparams: &DataPageParams<'_, Uuid>, + ) -> ListResultVec<(Sled, Instance, Vmm, Project)> { + use crate::db::schema::{ + instance::dsl as instance_dsl, project::dsl as project_dsl, + sled::dsl as sled_dsl, vmm::dsl as vmm_dsl, + }; + opctx.authorize(authz::Action::Read, &authz::FLEET).await?; + let conn = self.pool_connection_authorized(opctx).await?; + + let result = paginated(sled_dsl::sled, sled_dsl::id, pagparams) + .filter(sled_dsl::time_deleted.is_null()) + .sled_filter(SledFilter::InService) + .inner_join( + vmm_dsl::vmm + .on(vmm_dsl::sled_id + .eq(sled_dsl::id) + .and(vmm_dsl::time_deleted.is_null())) + .inner_join( + instance_dsl::instance + .on(instance_dsl::id + .eq(vmm_dsl::instance_id) + .and(instance_dsl::time_deleted.is_null())) + .inner_join( + project_dsl::project.on(project_dsl::id + .eq(instance_dsl::project_id) + .and(project_dsl::time_deleted.is_null())), + ), + ), + ) + .sled_filter(SledFilter::InService) + .select(( + Sled::as_select(), + Instance::as_select(), + Vmm::as_select(), + Project::as_select(), + )) + .load_async::<(Sled, Instance, Vmm, Project)>(&*conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + Ok(result) + } + pub async fn project_delete_instance( &self, opctx: &OpContext, @@ -407,7 +463,7 @@ impl DataStore { let detached_label = api::external::DiskState::Detached.label(); let ok_to_detach_disk_states = - vec![api::external::DiskState::Attached(authz_instance.id())]; + [api::external::DiskState::Attached(authz_instance.id())]; let ok_to_detach_disk_state_labels: Vec<_> = ok_to_detach_disk_states.iter().map(|s| s.label()).collect(); diff --git a/nexus/db-queries/src/db/datastore/inventory.rs b/nexus/db-queries/src/db/datastore/inventory.rs index 1a6d59337c..6faa8ea251 100644 --- a/nexus/db-queries/src/db/datastore/inventory.rs +++ b/nexus/db-queries/src/db/datastore/inventory.rs @@ -28,6 +28,7 @@ use diesel::QueryDsl; use diesel::Table; use futures::future::BoxFuture; use futures::FutureExt; +use nexus_db_model::to_db_typed_uuid; use nexus_db_model::CabooseWhichEnum; use nexus_db_model::HwBaseboardId; use nexus_db_model::HwPowerState; @@ -62,6 +63,9 @@ use omicron_common::api::external::InternalContext; use omicron_common::api::external::LookupType; use omicron_common::api::external::ResourceType; use omicron_common::bail_unless; +use omicron_uuid_kinds::CollectionUuid; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::SledUuid; use std::collections::BTreeMap; use std::collections::BTreeSet; use std::num::NonZeroU32; @@ -92,7 +96,8 @@ impl DataStore { // It's helpful to assemble some values before entering the transaction // so that we can produce the `Error` type that we want here. let row_collection = InvCollection::from(collection); - let collection_id = row_collection.id; + let collection_id = row_collection.id(); + let db_collection_id = to_db_typed_uuid(collection_id); let baseboards = collection .baseboards .iter() @@ -289,7 +294,8 @@ impl DataStore { for (baseboard_id, sp) in &collection.sps { let selection = db::schema::hw_baseboard_id::table .select(( - collection_id.into_sql::(), + db_collection_id + .into_sql::(), baseboard_dsl::id, sp.time_collected .into_sql::(), @@ -372,7 +378,8 @@ impl DataStore { for (baseboard_id, rot) in &collection.rots { let selection = db::schema::hw_baseboard_id::table .select(( - collection_id.into_sql::(), + db_collection_id + .into_sql::(), baseboard_dsl::id, rot.time_collected .into_sql::(), @@ -546,7 +553,8 @@ impl DataStore { .select(( dsl_baseboard_id::id, dsl_sw_caboose::id, - collection_id.into_sql::(), + db_collection_id + .into_sql::(), found_caboose .time_collected .into_sql::(), @@ -623,7 +631,8 @@ impl DataStore { .select(( dsl_baseboard_id::id, dsl_sw_rot_page::id, - collection_id.into_sql::(), + db_collection_id + .into_sql::(), found_rot_page .time_collected .into_sql::(), @@ -718,7 +727,8 @@ impl DataStore { ); let selection = db::schema::hw_baseboard_id::table .select(( - collection_id.into_sql::(), + db_collection_id + .into_sql::(), sled_agent .time_collected .into_sql::(), @@ -726,8 +736,7 @@ impl DataStore { .source .clone() .into_sql::(), - sled_agent - .sled_id + (sled_agent.sled_id.into_untyped_uuid()) .into_sql::(), baseboard_dsl::id.nullable(), nexus_db_model::ipv6::Ipv6Addr::from( @@ -954,7 +963,7 @@ impl DataStore { &self, opctx: &OpContext, nkeep: u32, - ) -> Result, Error> { + ) -> Result, Error> { let conn = self.pool_connection_authorized(opctx).await?; // Diesel requires us to use aliases in order to refer to the // `inv_collection` table twice in the same query. @@ -1049,8 +1058,8 @@ impl DataStore { .rev() .find(|(_i, (_collection_id, nerrors))| *nerrors == 0); let candidate = match last_completed_idx { - Some((0, _)) => candidates.iter().skip(1).next(), - _ => candidates.iter().next(), + Some((0, _)) => candidates.get(1), + _ => candidates.first(), } .map(|(collection_id, _nerrors)| *collection_id); if let Some(c) = candidate { @@ -1067,7 +1076,7 @@ impl DataStore { "candidates" => ?candidates, ); } - Ok(candidate) + Ok(candidate.map(CollectionUuid::from_untyped_uuid)) } /// Removes an inventory collection from the database @@ -1076,7 +1085,7 @@ impl DataStore { async fn inventory_delete_collection( &self, opctx: &OpContext, - collection_id: Uuid, + collection_id: CollectionUuid, ) -> Result<(), Error> { // As with inserting a whole collection, we remove it in one big // transaction for simplicity. Similar considerations apply. We could @@ -1085,6 +1094,7 @@ impl DataStore { // start removing it and we'd also need to make sure we didn't leak a // collection if we crash while deleting it. let conn = self.pool_connection_authorized(opctx).await?; + let db_collection_id = to_db_typed_uuid(collection_id); let ( ncollections, nsps, @@ -1104,129 +1114,130 @@ impl DataStore { let ncollections = { use db::schema::inv_collection::dsl; diesel::delete( - dsl::inv_collection.filter(dsl::id.eq(collection_id)), + dsl::inv_collection + .filter(dsl::id.eq(db_collection_id)), ) .execute_async(&conn) .await? }; // Remove rows for service processors. - let nsps = { - use db::schema::inv_service_processor::dsl; - diesel::delete( - dsl::inv_service_processor - .filter(dsl::inv_collection_id.eq(collection_id)), - ) - .execute_async(&conn) - .await? - }; + let nsps = + { + use db::schema::inv_service_processor::dsl; + diesel::delete(dsl::inv_service_processor.filter( + dsl::inv_collection_id.eq(db_collection_id), + )) + .execute_async(&conn) + .await? + }; // Remove rows for roots of trust. - let nrots = { - use db::schema::inv_root_of_trust::dsl; - diesel::delete( - dsl::inv_root_of_trust - .filter(dsl::inv_collection_id.eq(collection_id)), - ) - .execute_async(&conn) - .await? - }; + let nrots = + { + use db::schema::inv_root_of_trust::dsl; + diesel::delete(dsl::inv_root_of_trust.filter( + dsl::inv_collection_id.eq(db_collection_id), + )) + .execute_async(&conn) + .await? + }; // Remove rows for cabooses found. - let ncabooses = { - use db::schema::inv_caboose::dsl; - diesel::delete( - dsl::inv_caboose - .filter(dsl::inv_collection_id.eq(collection_id)), - ) - .execute_async(&conn) - .await? - }; + let ncabooses = + { + use db::schema::inv_caboose::dsl; + diesel::delete(dsl::inv_caboose.filter( + dsl::inv_collection_id.eq(db_collection_id), + )) + .execute_async(&conn) + .await? + }; // Remove rows for root of trust pages found. - let nrot_pages = { - use db::schema::inv_root_of_trust_page::dsl; - diesel::delete( - dsl::inv_root_of_trust_page - .filter(dsl::inv_collection_id.eq(collection_id)), - ) - .execute_async(&conn) - .await? - }; + let nrot_pages = + { + use db::schema::inv_root_of_trust_page::dsl; + diesel::delete(dsl::inv_root_of_trust_page.filter( + dsl::inv_collection_id.eq(db_collection_id), + )) + .execute_async(&conn) + .await? + }; // Remove rows for sled agents found. - let nsled_agents = { - use db::schema::inv_sled_agent::dsl; - diesel::delete( - dsl::inv_sled_agent - .filter(dsl::inv_collection_id.eq(collection_id)), - ) - .execute_async(&conn) - .await? - }; + let nsled_agents = + { + use db::schema::inv_sled_agent::dsl; + diesel::delete(dsl::inv_sled_agent.filter( + dsl::inv_collection_id.eq(db_collection_id), + )) + .execute_async(&conn) + .await? + }; // Remove rows for physical disks found. - let nphysical_disks = { - use db::schema::inv_physical_disk::dsl; - diesel::delete( - dsl::inv_physical_disk - .filter(dsl::inv_collection_id.eq(collection_id)), - ) - .execute_async(&conn) - .await? - }; + let nphysical_disks = + { + use db::schema::inv_physical_disk::dsl; + diesel::delete(dsl::inv_physical_disk.filter( + dsl::inv_collection_id.eq(db_collection_id), + )) + .execute_async(&conn) + .await? + }; // Remove rows associated with Omicron zones - let nsled_agent_zones = { - use db::schema::inv_sled_omicron_zones::dsl; - diesel::delete( - dsl::inv_sled_omicron_zones - .filter(dsl::inv_collection_id.eq(collection_id)), - ) - .execute_async(&conn) - .await? - }; + let nsled_agent_zones = + { + use db::schema::inv_sled_omicron_zones::dsl; + diesel::delete(dsl::inv_sled_omicron_zones.filter( + dsl::inv_collection_id.eq(db_collection_id), + )) + .execute_async(&conn) + .await? + }; - let nzones = { - use db::schema::inv_omicron_zone::dsl; - diesel::delete( - dsl::inv_omicron_zone - .filter(dsl::inv_collection_id.eq(collection_id)), - ) - .execute_async(&conn) - .await? - }; + let nzones = + { + use db::schema::inv_omicron_zone::dsl; + diesel::delete(dsl::inv_omicron_zone.filter( + dsl::inv_collection_id.eq(db_collection_id), + )) + .execute_async(&conn) + .await? + }; - let nnics = { - use db::schema::inv_omicron_zone_nic::dsl; - diesel::delete( - dsl::inv_omicron_zone_nic - .filter(dsl::inv_collection_id.eq(collection_id)), - ) - .execute_async(&conn) - .await? - }; + let nnics = + { + use db::schema::inv_omicron_zone_nic::dsl; + diesel::delete(dsl::inv_omicron_zone_nic.filter( + dsl::inv_collection_id.eq(db_collection_id), + )) + .execute_async(&conn) + .await? + }; - let nzpools = { - use db::schema::inv_zpool::dsl; - diesel::delete( - dsl::inv_zpool - .filter(dsl::inv_collection_id.eq(collection_id)), - ) - .execute_async(&conn) - .await? - }; + let nzpools = + { + use db::schema::inv_zpool::dsl; + diesel::delete(dsl::inv_zpool.filter( + dsl::inv_collection_id.eq(db_collection_id), + )) + .execute_async(&conn) + .await? + }; // Remove rows for errors encountered. - let nerrors = { - use db::schema::inv_collection_error::dsl; - diesel::delete( - dsl::inv_collection_error - .filter(dsl::inv_collection_id.eq(collection_id)), - ) - .execute_async(&conn) - .await? - }; + let nerrors = + { + use db::schema::inv_collection_error::dsl; + diesel::delete(dsl::inv_collection_error.filter( + dsl::inv_collection_id.eq(db_collection_id), + )) + .execute_async(&conn) + .await? + }; Ok(( ncollections, @@ -1316,14 +1327,20 @@ impl DataStore { return Ok(None); }; - Ok(Some(self.inventory_collection_read(opctx, collection_id).await?)) + Ok(Some( + self.inventory_collection_read( + opctx, + CollectionUuid::from_untyped_uuid(collection_id), + ) + .await?, + )) } /// Attempt to read the current collection pub async fn inventory_collection_read( &self, opctx: &OpContext, - id: Uuid, + id: CollectionUuid, ) -> Result { self.inventory_collection_read_batched(opctx, id, SQL_BATCH_SIZE).await } @@ -1343,15 +1360,16 @@ impl DataStore { async fn inventory_collection_read_batched( &self, opctx: &OpContext, - id: Uuid, + id: CollectionUuid, batch_size: NonZeroU32, ) -> Result { let conn = self.pool_connection_authorized(opctx).await?; + let db_id = to_db_typed_uuid(id); let (time_started, time_done, collector) = { use db::schema::inv_collection::dsl; let collections = dsl::inv_collection - .filter(dsl::id.eq(id)) + .filter(dsl::id.eq(db_id)) .limit(2) .select(InvCollection::as_select()) .load_async(&*conn) @@ -1378,7 +1396,7 @@ impl DataStore { dsl::idx, &p.current_pagparams(), ) - .filter(dsl::inv_collection_id.eq(id)) + .filter(dsl::inv_collection_id.eq(db_id)) .order_by(dsl::idx) .select(InvCollectionError::as_select()) .load_async(&*conn) @@ -1405,7 +1423,7 @@ impl DataStore { dsl::hw_baseboard_id, &p.current_pagparams(), ) - .filter(dsl::inv_collection_id.eq(id)) + .filter(dsl::inv_collection_id.eq(db_id)) .select(InvServiceProcessor::as_select()) .load_async(&*conn) .await @@ -1436,7 +1454,7 @@ impl DataStore { dsl::hw_baseboard_id, &p.current_pagparams(), ) - .filter(dsl::inv_collection_id.eq(id)) + .filter(dsl::inv_collection_id.eq(db_id)) .select(InvRootOfTrust::as_select()) .load_async(&*conn) .await @@ -1467,7 +1485,7 @@ impl DataStore { dsl::sled_id, &p.current_pagparams(), ) - .filter(dsl::inv_collection_id.eq(id)) + .filter(dsl::inv_collection_id.eq(db_id)) .select(InvSledAgent::as_select()) .load_async(&*conn) .await @@ -1499,7 +1517,7 @@ impl DataStore { (dsl::sled_id, dsl::slot), &p.current_pagparams(), ) - .filter(dsl::inv_collection_id.eq(id)) + .filter(dsl::inv_collection_id.eq(db_id)) .select(InvPhysicalDisk::as_select()) .load_async(&*conn) .await @@ -1509,7 +1527,10 @@ impl DataStore { paginator = p.found_batch(&batch, &|row| (row.sled_id, row.slot)); for disk in batch { - disks.entry(disk.sled_id).or_default().push(disk.into()); + disks + .entry(disk.sled_id.into_untyped_uuid()) + .or_default() + .push(disk.into()); } } disks @@ -1528,7 +1549,7 @@ impl DataStore { (dsl::sled_id, dsl::id), &p.current_pagparams(), ) - .filter(dsl::inv_collection_id.eq(id)) + .filter(dsl::inv_collection_id.eq(db_id)) .select(InvZpool::as_select()) .load_async(&*conn) .await @@ -1537,7 +1558,10 @@ impl DataStore { })?; paginator = p.found_batch(&batch, &|row| (row.sled_id, row.id)); for zpool in batch { - zpools.entry(zpool.sled_id).or_default().push(zpool.into()); + zpools + .entry(zpool.sled_id.into_untyped_uuid()) + .or_default() + .push(zpool.into()); } } zpools @@ -1610,57 +1634,54 @@ impl DataStore { }) }) .collect::, _>>()?; - let sled_agents: BTreeMap<_, _> = - sled_agent_rows - .into_iter() - .map(|s: InvSledAgent| { - let sled_id = s.sled_id; - let baseboard_id = s - .hw_baseboard_id - .map(|id| { - baseboards_by_id.get(&id).cloned().ok_or_else( - || { - Error::internal_error( + let sled_agents: BTreeMap<_, _> = sled_agent_rows + .into_iter() + .map(|s: InvSledAgent| { + let sled_id = SledUuid::from(s.sled_id); + let baseboard_id = s + .hw_baseboard_id + .map(|id| { + baseboards_by_id.get(&id).cloned().ok_or_else(|| { + Error::internal_error( "missing baseboard that we should have fetched", ) - }, - ) }) - .transpose()?; - let sled_agent = nexus_types::inventory::SledAgent { - time_collected: s.time_collected, - source: s.source, - sled_id, - baseboard_id, - sled_agent_address: std::net::SocketAddrV6::new( - std::net::Ipv6Addr::from(s.sled_agent_ip), - u16::from(s.sled_agent_port), - 0, - 0, - ), - sled_role: nexus_types::inventory::SledRole::from( - s.sled_role, - ), - usable_hardware_threads: u32::from( - s.usable_hardware_threads, - ), - usable_physical_ram: s.usable_physical_ram.into(), - reservoir_size: s.reservoir_size.into(), - disks: physical_disks - .get(&sled_id) - .map(|disks| disks.to_vec()) - .unwrap_or_default(), - zpools: zpools - .get(&sled_id) - .map(|zpools| zpools.to_vec()) - .unwrap_or_default(), - }; - Ok((sled_id, sled_agent)) - }) - .collect::, - Error, - >>()?; + }) + .transpose()?; + let sled_agent = nexus_types::inventory::SledAgent { + time_collected: s.time_collected, + source: s.source, + sled_id, + baseboard_id, + sled_agent_address: std::net::SocketAddrV6::new( + std::net::Ipv6Addr::from(s.sled_agent_ip), + u16::from(s.sled_agent_port), + 0, + 0, + ), + sled_role: nexus_types::inventory::SledRole::from( + s.sled_role, + ), + usable_hardware_threads: u32::from( + s.usable_hardware_threads, + ), + usable_physical_ram: s.usable_physical_ram.into(), + reservoir_size: s.reservoir_size.into(), + disks: physical_disks + .get(sled_id.as_untyped_uuid()) + .map(|disks| disks.to_vec()) + .unwrap_or_default(), + zpools: zpools + .get(sled_id.as_untyped_uuid()) + .map(|zpools| zpools.to_vec()) + .unwrap_or_default(), + }; + Ok((sled_id, sled_agent)) + }) + .collect::, + Error, + >>()?; // Fetch records of cabooses found. let inv_caboose_rows = { @@ -1675,7 +1696,7 @@ impl DataStore { (dsl::hw_baseboard_id, dsl::which), &p.current_pagparams(), ) - .filter(dsl::inv_collection_id.eq(id)) + .filter(dsl::inv_collection_id.eq(db_id)) .select(InvCaboose::as_select()) .load_async(&*conn) .await @@ -1777,7 +1798,7 @@ impl DataStore { (dsl::hw_baseboard_id, dsl::which), &p.current_pagparams(), ) - .filter(dsl::inv_collection_id.eq(id)) + .filter(dsl::inv_collection_id.eq(db_id)) .select(InvRotPage::as_select()) .load_async(&*conn) .await @@ -1879,7 +1900,7 @@ impl DataStore { // number. We'll assemble these directly into the data structure we're // trying to build, which maps sled ids to objects describing the zones // found on each sled. - let mut omicron_zones: BTreeMap<_, _> = { + let mut omicron_zones: BTreeMap = { use db::schema::inv_sled_omicron_zones::dsl; let mut zones = BTreeMap::new(); @@ -1891,7 +1912,7 @@ impl DataStore { dsl::sled_id, &p.current_pagparams(), ) - .filter(dsl::inv_collection_id.eq(id)) + .filter(dsl::inv_collection_id.eq(db_id)) .select(InvSledOmicronZones::as_select()) .load_async(&*conn) .await @@ -1901,7 +1922,7 @@ impl DataStore { paginator = p.found_batch(&batch, &|row| row.sled_id); zones.extend(batch.into_iter().map(|sled_zones_config| { ( - sled_zones_config.sled_id, + sled_zones_config.sled_id.into(), sled_zones_config.into_uninit_zones_found(), ) })) @@ -1927,7 +1948,7 @@ impl DataStore { dsl::id, &p.current_pagparams(), ) - .filter(dsl::inv_collection_id.eq(id)) + .filter(dsl::inv_collection_id.eq(db_id)) .select(InvOmicronZoneNic::as_select()) .load_async(&*conn) .await @@ -1956,7 +1977,7 @@ impl DataStore { dsl::id, &p.current_pagparams(), ) - .filter(dsl::inv_collection_id.eq(id)) + .filter(dsl::inv_collection_id.eq(db_id)) // It's not strictly necessary to order these by id. Doing so // ensures a consistent representation for `Collection`, which // makes testing easier. It's already indexed to do this, too. @@ -1990,16 +2011,17 @@ impl DataStore { }) }) .transpose()?; - let map = omicron_zones.get_mut(&z.sled_id).ok_or_else(|| { - // This error means that we found a row in inv_omicron_zone with - // no associated record in inv_sled_omicron_zones. This should - // be impossible and reflects either a bug or database - // corruption. - Error::internal_error(&format!( - "zone {:?}: unknown sled: {:?}", - z.id, z.sled_id - )) - })?; + let map = + omicron_zones.get_mut(&z.sled_id.into()).ok_or_else(|| { + // This error means that we found a row in inv_omicron_zone + // with no associated record in inv_sled_omicron_zones. + // This should be impossible and reflects either a bug or + // database corruption. + Error::internal_error(&format!( + "zone {:?}: unknown sled: {:?}", + z.id, z.sled_id + )) + })?; let zone_id = z.id; let zone = z .into_omicron_zone_config(nic_row) @@ -2043,11 +2065,15 @@ pub trait DataStoreInventoryTest: Send + Sync { /// List all collections /// /// This does not paginate. - fn inventory_collections(&self) -> BoxFuture>>; + fn inventory_collections( + &self, + ) -> BoxFuture>>; } impl DataStoreInventoryTest for DataStore { - fn inventory_collections(&self) -> BoxFuture>> { + fn inventory_collections( + &self, + ) -> BoxFuture>> { async { let conn = self .pool_connection_for_tests() @@ -2059,12 +2085,14 @@ impl DataStoreInventoryTest for DataStore { .context("failed to allow table scan")?; use db::schema::inv_collection::dsl; - dsl::inv_collection - .select(dsl::id) + let collections = dsl::inv_collection + .select(InvCollection::as_select()) .order_by(dsl::time_started) .load_async(&conn) .await - .context("failed to list collections") + .context("failed to list collections")?; + + Ok(collections) }) .await } @@ -2093,6 +2121,7 @@ mod test { use nexus_types::inventory::RotPageWhich; use omicron_common::api::external::Error; use omicron_test_utils::dev; + use omicron_uuid_kinds::CollectionUuid; use pretty_assertions::assert_eq; use std::num::NonZeroU32; @@ -2355,7 +2384,13 @@ mod test { // `collection1`, which _is_ the only one with no errors. So we should // get back `collection2`. assert_eq!( - datastore.inventory_collections().await.unwrap(), + &datastore + .inventory_collections() + .await + .unwrap() + .iter() + .map(|c| c.id.into()) + .collect::>(), &[ collection1.id, collection2.id, @@ -2379,7 +2414,13 @@ mod test { .await .expect("failed to prune collections"); assert_eq!( - datastore.inventory_collections().await.unwrap(), + datastore + .inventory_collections() + .await + .unwrap() + .iter() + .map(|c| c.id.into()) + .collect::>(), &[collection1.id, collection3.id, collection4.id, collection5.id,] ); // Again, we should skip over collection1 and delete the next oldest: @@ -2389,7 +2430,13 @@ mod test { .await .expect("failed to prune collections"); assert_eq!( - datastore.inventory_collections().await.unwrap(), + datastore + .inventory_collections() + .await + .unwrap() + .iter() + .map(|c| c.id.into()) + .collect::>(), &[collection1.id, collection4.id, collection5.id,] ); // At this point, if we're keeping 3, we don't need to prune anything. @@ -2398,7 +2445,13 @@ mod test { .await .expect("failed to prune collections"); assert_eq!( - datastore.inventory_collections().await.unwrap(), + datastore + .inventory_collections() + .await + .unwrap() + .iter() + .map(|c| c.id.into()) + .collect::>(), &[collection1.id, collection4.id, collection5.id,] ); @@ -2415,7 +2468,13 @@ mod test { .await .expect("failed to insert collection"); assert_eq!( - datastore.inventory_collections().await.unwrap(), + datastore + .inventory_collections() + .await + .unwrap() + .iter() + .map(|c| c.id.into()) + .collect::>(), &[collection1.id, collection4.id, collection5.id, collection6.id,] ); datastore @@ -2423,7 +2482,13 @@ mod test { .await .expect("failed to prune collections"); assert_eq!( - datastore.inventory_collections().await.unwrap(), + datastore + .inventory_collections() + .await + .unwrap() + .iter() + .map(|c| c.id.into()) + .collect::>(), &[collection4.id, collection5.id, collection6.id,] ); // Again, at this point, we should not prune anything. @@ -2432,7 +2497,13 @@ mod test { .await .expect("failed to prune collections"); assert_eq!( - datastore.inventory_collections().await.unwrap(), + datastore + .inventory_collections() + .await + .unwrap() + .iter() + .map(|c| c.id.into()) + .collect::>(), &[collection4.id, collection5.id, collection6.id,] ); @@ -2453,7 +2524,13 @@ mod test { .await .expect("failed to prune collections"); assert_eq!( - datastore.inventory_collections().await.unwrap(), + datastore + .inventory_collections() + .await + .unwrap() + .iter() + .map(|c| c.id.into()) + .collect::>(), &[collection5.id, collection6.id, collection7.id,] ); @@ -2487,7 +2564,13 @@ mod test { .await .expect("failed to prune collections"); assert_eq!( - datastore.inventory_collections().await.unwrap(), + datastore + .inventory_collections() + .await + .unwrap() + .iter() + .map(|c| c.id.into()) + .collect::>(), &[collection6.id,] ); @@ -2497,7 +2580,7 @@ mod test { .inventory_delete_collection(&opctx, collection6.id) .await .expect("failed to delete collection"); - assert_eq!(datastore.inventory_collections().await.unwrap(), &[]); + assert!(datastore.inventory_collections().await.unwrap().is_empty()); conn.transaction_async(|conn| async move { conn.batch_execute_async(ALLOW_FULL_TABLE_SCAN_SQL).await.unwrap(); diff --git a/nexus/db-queries/src/db/datastore/ipv4_nat_entry.rs b/nexus/db-queries/src/db/datastore/ipv4_nat_entry.rs index 670ca08960..5b370f27a9 100644 --- a/nexus/db-queries/src/db/datastore/ipv4_nat_entry.rs +++ b/nexus/db-queries/src/db/datastore/ipv4_nat_entry.rs @@ -303,7 +303,7 @@ impl DataStore { .gt(version) .or(dsl::version_removed.gt(version)), ) - .limit(limit as i64) + .limit(i64::from(limit)) .select(Ipv4NatEntry::as_select()) .load_async(&*self.pool_connection_authorized(opctx).await?) .await @@ -322,7 +322,7 @@ impl DataStore { let nat_changes = dsl::ipv4_nat_changes .filter(dsl::version.gt(version)) - .limit(limit as i64) + .limit(i64::from(limit)) .order_by(dsl::version) .select(Ipv4NatChange::as_select()) .load_async(&*self.pool_connection_authorized(opctx).await?) @@ -406,13 +406,11 @@ mod test { // Each change (creation / deletion) to the NAT table should increment the // version number of the row in the NAT table - let external_address = external::Ipv4Net( - ipnetwork::Ipv4Network::try_from("10.0.0.100").unwrap(), - ); + let external_address = + oxnet::Ipv4Net::host_net("10.0.0.100".parse().unwrap()); - let sled_address = external::Ipv6Net( - ipnetwork::Ipv6Network::try_from("fd00:1122:3344:104::1").unwrap(), - ); + let sled_address = + oxnet::Ipv6Net::host_net("fd00:1122:3344:104::1".parse().unwrap()); // Add a nat entry. let nat1 = Ipv4NatValues { @@ -565,13 +563,11 @@ mod test { // Each change (creation / deletion) to the NAT table should increment the // version number of the row in the NAT table - let external_address = external::Ipv4Net( - ipnetwork::Ipv4Network::try_from("10.0.0.100").unwrap(), - ); + let external_address = + oxnet::Ipv4Net::host_net("10.0.0.100".parse().unwrap()); - let sled_address = external::Ipv6Net( - ipnetwork::Ipv6Network::try_from("fd00:1122:3344:104::1").unwrap(), - ); + let sled_address = + oxnet::Ipv6Net::host_net("fd00:1122:3344:104::1".parse().unwrap()); // Add a nat entry. let nat1 = Ipv4NatValues { @@ -711,13 +707,11 @@ mod test { // 1. an entry should be deleted during the next sync // 2. an entry that should be kept during the next sync - let external_address = external::Ipv4Net( - ipnetwork::Ipv4Network::try_from("10.0.0.100").unwrap(), - ); + let external_address = + oxnet::Ipv4Net::host_net("10.0.0.100".parse().unwrap()); - let sled_address = external::Ipv6Net( - ipnetwork::Ipv6Network::try_from("fd00:1122:3344:104::1").unwrap(), - ); + let sled_address = + oxnet::Ipv6Net::host_net("fd00:1122:3344:104::1".parse().unwrap()); // Add a nat entry. let nat1 = Ipv4NatValues { @@ -833,13 +827,12 @@ mod test { let addresses = (0..=255).map(|i| { let addr = Ipv4Addr::new(10, 0, 0, i); - let net = ipnetwork::Ipv4Network::new(addr, 32).unwrap(); - external::Ipv4Net(net) + let net = oxnet::Ipv4Net::new(addr, 32).unwrap(); + net }); - let sled_address = external::Ipv6Net( - ipnetwork::Ipv6Network::try_from("fd00:1122:3344:104::1").unwrap(), - ); + let sled_address = + oxnet::Ipv6Net::host_net("fd00:1122:3344:104::1".parse().unwrap()); let nat_entries = addresses.map(|external_address| { // build a bunch of nat entries @@ -908,7 +901,7 @@ mod test { .expect("did not find a deleted nat entry with a matching version number"); assert_eq!( - deleted_nat.external_address.ip(), + deleted_nat.external_address.addr(), change.external_address ); assert_eq!( @@ -917,7 +910,7 @@ mod test { ); assert_eq!(deleted_nat.last_port, change.last_port.into()); assert_eq!( - deleted_nat.sled_address.ip(), + deleted_nat.sled_address.addr(), change.sled_address ); assert_eq!(*deleted_nat.mac, change.mac); @@ -933,13 +926,13 @@ mod test { assert!(added_nat.version_removed.is_none()); assert_eq!( - added_nat.external_address.ip(), + added_nat.external_address.addr(), change.external_address ); assert_eq!(added_nat.first_port, change.first_port.into()); assert_eq!(added_nat.last_port, change.last_port.into()); assert_eq!( - added_nat.sled_address.ip(), + added_nat.sled_address.addr(), change.sled_address ); assert_eq!(*added_nat.mac, change.mac); diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index 0020cf99b3..b5cb749162 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -48,10 +48,12 @@ use std::sync::Arc; use uuid::Uuid; mod address_lot; +mod allow_list; mod bfd; mod bgp; mod bootstore; mod certificate; +mod cockroachdb_settings; mod console_session; mod dataset; mod db_metadata; @@ -71,13 +73,15 @@ mod oximeter; mod physical_disk; mod probe; mod project; +#[cfg(any(test, feature = "testing"))] +pub mod pub_test_utils; mod quota; mod rack; mod region; +mod region_replacement; mod region_snapshot; mod role; mod saga; -mod service; mod silo; mod silo_group; mod silo_user; @@ -92,6 +96,7 @@ mod switch_port; pub(crate) mod test_utils; mod update; mod utilization; +mod v2p_mapping; mod virtual_provisioning_collection; mod vmm; mod volume; @@ -99,22 +104,28 @@ mod vpc; mod zpool; pub use address_lot::AddressLotCreateResult; +pub use dns::DataStoreDnsTest; pub use dns::DnsVersionUpdateBuilder; pub use instance::InstanceAndActiveVmm; pub use inventory::DataStoreInventoryTest; use nexus_db_model::AllSchemaVersions; pub use probe::ProbeInfo; pub use rack::RackInit; +pub use rack::SledUnderlayAllocationResult; pub use silo::Discoverability; +pub use sled::SledTransition; +pub use sled::TransitionError; pub use switch_port::SwitchPortSettingsCombinedResult; pub use virtual_provisioning_collection::StorageType; pub use volume::read_only_resources_associated_with_volume; pub use volume::CrucibleResources; pub use volume::CrucibleTargets; +pub use volume::VolumeCheckoutReason; +pub use volume::VolumeReplacementParams; // Number of unique datasets required to back a region. // TODO: This should likely turn into a configuration option. -pub(crate) const REGION_REDUNDANCY_THRESHOLD: usize = 3; +pub const REGION_REDUNDANCY_THRESHOLD: usize = 3; /// The name of the built-in IP pool for Oxide services. pub const SERVICE_IP_POOL_NAME: &str = "oxide-service-pool"; @@ -380,28 +391,29 @@ mod test { use crate::db::lookup::LookupPath; use crate::db::model::{ BlockSize, ConsoleSession, Dataset, DatasetKind, ExternalIp, - PhysicalDisk, PhysicalDiskKind, Project, Rack, Region, Service, - ServiceKind, SiloUser, SledBaseboard, SledSystemHardware, SledUpdate, - SshKey, VpcSubnet, Zpool, + PhysicalDisk, PhysicalDiskKind, PhysicalDiskPolicy, PhysicalDiskState, + Project, Rack, Region, SiloUser, SledBaseboard, SledSystemHardware, + SledUpdate, SshKey, VpcSubnet, Zpool, }; use crate::db::queries::vpc_subnet::FilterConflictingVpcSubnetRangesQuery; use chrono::{Duration, Utc}; use futures::stream; use futures::StreamExt; use nexus_config::RegionAllocationStrategy; - use nexus_db_model::Generation; use nexus_db_model::IpAttachState; + use nexus_db_model::{to_db_typed_uuid, Generation}; use nexus_test_utils::db::test_setup_database; use nexus_types::external_api::params; - use omicron_common::api::external::DataPageParams; use omicron_common::api::external::{ ByteCount, Error, IdentityMetadataCreateParams, LookupType, Name, }; use omicron_test_utils::dev; + use omicron_uuid_kinds::CollectionUuid; + use omicron_uuid_kinds::GenericUuid; + use omicron_uuid_kinds::SledUuid; use std::collections::HashMap; use std::collections::HashSet; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddrV6}; - use std::num::NonZeroU32; use std::sync::Arc; use strum::EnumCount; use uuid::Uuid; @@ -600,7 +612,7 @@ mod test { } // Creates a test sled, returns its UUID. - async fn create_test_sled(datastore: &DataStore) -> Uuid { + async fn create_test_sled(datastore: &DataStore) -> SledUuid { let bogus_addr = SocketAddrV6::new( Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 1), 8080, @@ -608,10 +620,10 @@ mod test { 0, ); let rack_id = Uuid::new_v4(); - let sled_id = Uuid::new_v4(); + let sled_id = SledUuid::new_v4(); let sled_update = SledUpdate::new( - sled_id, + sled_id.into_untyped_uuid(), bogus_addr, sled_baseboard_for_test(), sled_system_hardware_for_test(), @@ -627,37 +639,44 @@ mod test { } const TEST_VENDOR: &str = "test-vendor"; - const TEST_SERIAL: &str = "test-serial"; const TEST_MODEL: &str = "test-model"; + /// Creates a disk on a sled of a particular kind. + /// + /// The "serial" value of the disk is supplied by the + /// caller, and is arbitrary, but should be unique. async fn create_test_physical_disk( datastore: &DataStore, opctx: &OpContext, - sled_id: Uuid, + sled_id: SledUuid, kind: PhysicalDiskKind, + serial: String, ) -> Uuid { let physical_disk = PhysicalDisk::new( + Uuid::new_v4(), TEST_VENDOR.into(), - TEST_SERIAL.into(), + serial, TEST_MODEL.into(), kind, - sled_id, + sled_id.into_untyped_uuid(), ); datastore - .physical_disk_upsert(opctx, physical_disk.clone()) + .physical_disk_insert(opctx, physical_disk.clone()) .await .expect("Failed to upsert physical disk"); - physical_disk.uuid() + physical_disk.id() } // Creates a test zpool, returns its UUID. async fn create_test_zpool( datastore: &DataStore, - sled_id: Uuid, + opctx: &OpContext, + sled_id: SledUuid, physical_disk_id: Uuid, ) -> Uuid { let zpool_id = create_test_zpool_not_in_inventory( datastore, + opctx, sled_id, physical_disk_id, ) @@ -673,12 +692,14 @@ mod test { // However, this helper doesn't add the zpool to the inventory just yet. async fn create_test_zpool_not_in_inventory( datastore: &DataStore, - sled_id: Uuid, + opctx: &OpContext, + sled_id: SledUuid, physical_disk_id: Uuid, ) -> Uuid { let zpool_id = Uuid::new_v4(); - let zpool = Zpool::new(zpool_id, sled_id, physical_disk_id); - datastore.zpool_upsert(zpool).await.unwrap(); + let zpool = + Zpool::new(zpool_id, sled_id.into_untyped_uuid(), physical_disk_id); + datastore.zpool_insert(opctx, zpool).await.unwrap(); zpool_id } @@ -687,17 +708,17 @@ mod test { async fn add_test_zpool_to_inventory( datastore: &DataStore, zpool_id: Uuid, - sled_id: Uuid, + sled_id: SledUuid, ) { use db::schema::inv_zpool::dsl; - let inv_collection_id = Uuid::new_v4(); + let inv_collection_id = CollectionUuid::new_v4(); let time_collected = Utc::now(); let inv_pool = nexus_db_model::InvZpool { - inv_collection_id, + inv_collection_id: inv_collection_id.into(), time_collected, id: zpool_id, - sled_id, + sled_id: to_db_typed_uuid(sled_id), total_size: test_zpool_size().into(), }; diesel::insert_into(dsl::inv_zpool) @@ -735,12 +756,12 @@ mod test { ineligible: SledToDatasetMap, // A map from eligible dataset IDs to their corresponding sled IDs. - eligible_dataset_ids: HashMap, + eligible_dataset_ids: HashMap, ineligible_dataset_ids: HashMap, } // Map of sled IDs to dataset IDs. - type SledToDatasetMap = HashMap>; + type SledToDatasetMap = HashMap>; impl TestDatasets { async fn create( @@ -810,33 +831,36 @@ mod test { number_of_sleds: usize, ) -> SledToDatasetMap { // Create sleds... - let sled_ids: Vec = stream::iter(0..number_of_sleds) + let sled_ids: Vec = stream::iter(0..number_of_sleds) .then(|_| create_test_sled(&datastore)) .collect() .await; struct PhysicalDisk { - sled_id: Uuid, + sled_id: SledUuid, disk_id: Uuid, } // create 9 disks on each sled let physical_disks: Vec = stream::iter(sled_ids) .map(|sled_id| { - let sled_id_iter: Vec = + let sled_id_iter: Vec = (0..9).map(|_| sled_id).collect(); - stream::iter(sled_id_iter).then(|sled_id| { - let disk_id_future = create_test_physical_disk( - &datastore, - opctx, - sled_id, - PhysicalDiskKind::U2, - ); - async move { - let disk_id = disk_id_future.await; - PhysicalDisk { sled_id, disk_id } - } - }) + stream::iter(sled_id_iter).enumerate().then( + |(i, sled_id)| { + let disk_id_future = create_test_physical_disk( + &datastore, + opctx, + sled_id, + PhysicalDiskKind::U2, + format!("{sled_id}, disk index {i}"), + ); + async move { + let disk_id = disk_id_future.await; + PhysicalDisk { sled_id, disk_id } + } + }, + ) }) .flatten() .collect() @@ -844,7 +868,7 @@ mod test { #[derive(Copy, Clone)] struct Zpool { - sled_id: Uuid, + sled_id: SledUuid, pool_id: Uuid, } @@ -853,6 +877,7 @@ mod test { .then(|disk| { let pool_id_future = create_test_zpool( &datastore, + &opctx, disk.sled_id, disk.disk_id, ); @@ -933,7 +958,7 @@ mod test { let expected_region_count = REGION_REDUNDANCY_THRESHOLD; let dataset_and_regions = datastore - .region_allocate( + .disk_region_allocate( &opctx, volume_id, ¶ms.disk_source, @@ -962,8 +987,8 @@ mod test { // This is a little goofy, but it catches a bug that has // happened before. The returned columns share names (like // "id"), so we need to process them in-order. - assert!(regions.get(&dataset.id()).is_none()); - assert!(disk_datasets.get(®ion.id()).is_none()); + assert!(!regions.contains(&dataset.id())); + assert!(!disk_datasets.contains(®ion.id())); // Dataset must not be eligible for provisioning. if let Some(kind) = @@ -1026,7 +1051,7 @@ mod test { let expected_region_count = REGION_REDUNDANCY_THRESHOLD; let dataset_and_regions = datastore - .region_allocate( + .disk_region_allocate( &opctx, volume_id, ¶ms.disk_source, @@ -1113,7 +1138,7 @@ mod test { let volume_id = Uuid::new_v4(); let err = datastore - .region_allocate( + .disk_region_allocate( &opctx, volume_id, ¶ms.disk_source, @@ -1158,7 +1183,7 @@ mod test { ); let volume_id = Uuid::new_v4(); let mut dataset_and_regions1 = datastore - .region_allocate( + .disk_region_allocate( &opctx, volume_id, ¶ms.disk_source, @@ -1171,7 +1196,7 @@ mod test { // Use a different allocation ordering to ensure we're idempotent even // if the shuffle changes. let mut dataset_and_regions2 = datastore - .region_allocate( + .disk_region_allocate( &opctx, volume_id, ¶ms.disk_source, @@ -1221,6 +1246,7 @@ mod test { &opctx, sled_id, PhysicalDiskKind::U2, + "fake serial".to_string(), ) .await; @@ -1229,6 +1255,7 @@ mod test { .then(|_| { create_test_zpool_not_in_inventory( &datastore, + &opctx, sled_id, physical_disk_id, ) @@ -1264,7 +1291,7 @@ mod test { ); let volume1_id = Uuid::new_v4(); let err = datastore - .region_allocate( + .disk_region_allocate( &opctx, volume1_id, ¶ms.disk_source, @@ -1287,7 +1314,7 @@ mod test { add_test_zpool_to_inventory(&datastore, zpool_id, sled_id).await; } datastore - .region_allocate( + .disk_region_allocate( &opctx, volume1_id, ¶ms.disk_source, @@ -1317,6 +1344,7 @@ mod test { &opctx, sled_id, PhysicalDiskKind::U2, + "fake serial".to_string(), ) .await; @@ -1324,7 +1352,12 @@ mod test { let zpool_ids: Vec = stream::iter(0..REGION_REDUNDANCY_THRESHOLD - 1) .then(|_| { - create_test_zpool(&datastore, sled_id, physical_disk_id) + create_test_zpool( + &datastore, + &opctx, + sled_id, + physical_disk_id, + ) }) .collect() .await; @@ -1357,7 +1390,7 @@ mod test { ); let volume1_id = Uuid::new_v4(); let err = datastore - .region_allocate( + .disk_region_allocate( &opctx, volume1_id, ¶ms.disk_source, @@ -1379,6 +1412,123 @@ mod test { logctx.cleanup_successful(); } + #[tokio::test] + async fn test_region_allocation_only_considers_disks_in_service() { + let logctx = dev::test_setup_log( + "test_region_allocation_only_considers_disks_in_service", + ); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + // Create a sled... + let sled_id = create_test_sled(&datastore).await; + + // ... and create several disks on that sled, each with a zpool/dataset. + let mut physical_disk_ids = vec![]; + for i in 0..REGION_REDUNDANCY_THRESHOLD { + let physical_disk_id = create_test_physical_disk( + &datastore, + &opctx, + sled_id, + PhysicalDiskKind::U2, + format!("fake serial #{i}"), + ) + .await; + let zpool_id = create_test_zpool( + &datastore, + &opctx, + sled_id, + physical_disk_id, + ) + .await; + let bogus_addr = SocketAddrV6::new(Ipv6Addr::LOCALHOST, 8080, 0, 0); + let dataset = Dataset::new( + Uuid::new_v4(), + zpool_id, + bogus_addr, + DatasetKind::Crucible, + ); + datastore.dataset_upsert(dataset).await.unwrap(); + physical_disk_ids.push(physical_disk_id); + } + + // Check the following combinations of physical disk policy/state + // on region allocation. Since we only created + // REGION_REDUNDANCY_THRESHOLD disks/zpools/datasets, updating the + // state of a single disk should be sufficient to prevent the + // allocations from occurring. + use PhysicalDiskPolicy as Policy; + use PhysicalDiskState as State; + + // Just a bool with a fancier name -- determines whether or not + // we expect the policy/state combinations to pass or not. + enum AllocationShould { + Fail, + Succeed, + } + + let policy_state_combos = [ + (Policy::Expunged, State::Active, AllocationShould::Fail), + (Policy::Expunged, State::Decommissioned, AllocationShould::Fail), + (Policy::InService, State::Decommissioned, AllocationShould::Fail), + // Save this one for last, since it actually leaves an allocation + // lying around. + (Policy::InService, State::Active, AllocationShould::Succeed), + ]; + + let volume_id = Uuid::new_v4(); + let params = create_test_disk_create_params( + "disk", + ByteCount::from_mebibytes_u32(500), + ); + + for (policy, state, expected) in policy_state_combos { + // Update policy/state only on a single physical disk. + // + // The rest are assumed "in service" + "active". + datastore + .physical_disk_update_policy( + &opctx, + physical_disk_ids[0], + policy, + ) + .await + .unwrap(); + datastore + .physical_disk_update_state(&opctx, physical_disk_ids[0], state) + .await + .unwrap(); + + let result = datastore + .disk_region_allocate( + &opctx, + volume_id, + ¶ms.disk_source, + params.size, + &RegionAllocationStrategy::Random { seed: Some(0) }, + ) + .await; + + match expected { + AllocationShould::Fail => { + let err = result.unwrap_err(); + let expected = "Not enough zpool space to allocate disks"; + assert!( + err.to_string().contains(expected), + "Saw error: \'{err}\', but expected \'{expected}\'" + ); + assert!(matches!(err, Error::InsufficientCapacity { .. })); + } + AllocationShould::Succeed => { + let _ = result.expect("Allocation should have succeeded"); + } + } + } + + let _ = db.cleanup().await; + logctx.cleanup_successful(); + } + #[tokio::test] async fn test_region_allocation_out_of_space_fails() { let logctx = @@ -1399,7 +1549,7 @@ mod test { let volume1_id = Uuid::new_v4(); assert!(datastore - .region_allocate( + .disk_region_allocate( &opctx, volume1_id, ¶ms.disk_source, @@ -1443,8 +1593,8 @@ mod test { name: external::Name::try_from(String::from("name")).unwrap(), description: String::from("description"), }, - external::Ipv4Net("172.30.0.0/22".parse().unwrap()), - external::Ipv6Net("fd00::/64".parse().unwrap()), + "172.30.0.0/22".parse().unwrap(), + "fd00::/64".parse().unwrap(), ); let values = FilterConflictingVpcSubnetRangesQuery::new(subnet); let query = @@ -1601,130 +1751,6 @@ mod test { logctx.cleanup_successful(); } - #[tokio::test] - async fn test_service_upsert_and_list() { - let logctx = dev::test_setup_log("test_service_upsert_and_list"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; - - // Create a sled on which the service should exist. - let sled_id = create_test_sled(&datastore).await; - - // Create a few new service to exist on this sled. - let service1_id = - "ab7bd7fd-7c37-48ab-a84a-9c09a90c4c7f".parse().unwrap(); - let addr = SocketAddrV6::new(Ipv6Addr::LOCALHOST, 123, 0, 0); - let kind = ServiceKind::Nexus; - - let service1 = - Service::new(service1_id, sled_id, Some(service1_id), addr, kind); - let result = - datastore.service_upsert(&opctx, service1.clone()).await.unwrap(); - assert_eq!(service1.id(), result.id()); - assert_eq!(service1.ip, result.ip); - assert_eq!(service1.kind, result.kind); - - let service2_id = - "fe5b6e3d-dfee-47b4-8719-c54f78912c0b".parse().unwrap(); - let service2 = Service::new(service2_id, sled_id, None, addr, kind); - let result = - datastore.service_upsert(&opctx, service2.clone()).await.unwrap(); - assert_eq!(service2.id(), result.id()); - assert_eq!(service2.ip, result.ip); - assert_eq!(service2.kind, result.kind); - - let service3_id = Uuid::new_v4(); - let kind = ServiceKind::Oximeter; - let service3 = Service::new( - service3_id, - sled_id, - Some(Uuid::new_v4()), - addr, - kind, - ); - let result = - datastore.service_upsert(&opctx, service3.clone()).await.unwrap(); - assert_eq!(service3.id(), result.id()); - assert_eq!(service3.ip, result.ip); - assert_eq!(service3.kind, result.kind); - - // Try listing services of one kind. - let services = datastore - .services_list_kind( - &opctx, - ServiceKind::Nexus, - &DataPageParams { - marker: None, - direction: dropshot::PaginationOrder::Ascending, - limit: NonZeroU32::new(3).unwrap(), - }, - ) - .await - .unwrap(); - assert_eq!(services[0].id(), service1.id()); - assert_eq!(services[0].sled_id, service1.sled_id); - assert_eq!(services[0].zone_id, service1.zone_id); - assert_eq!(services[0].kind, service1.kind); - assert_eq!(services[1].id(), service2.id()); - assert_eq!(services[1].sled_id, service2.sled_id); - assert_eq!(services[1].zone_id, service2.zone_id); - assert_eq!(services[1].kind, service2.kind); - assert_eq!(services.len(), 2); - - // Try listing services of a different kind. - let services = datastore - .services_list_kind( - &opctx, - ServiceKind::Oximeter, - &DataPageParams { - marker: None, - direction: dropshot::PaginationOrder::Ascending, - limit: NonZeroU32::new(3).unwrap(), - }, - ) - .await - .unwrap(); - assert_eq!(services[0].id(), service3.id()); - assert_eq!(services[0].sled_id, service3.sled_id); - assert_eq!(services[0].zone_id, service3.zone_id); - assert_eq!(services[0].kind, service3.kind); - assert_eq!(services.len(), 1); - - // Try listing services of a kind for which there are no services. - let services = datastore - .services_list_kind( - &opctx, - ServiceKind::Dendrite, - &DataPageParams { - marker: None, - direction: dropshot::PaginationOrder::Ascending, - limit: NonZeroU32::new(3).unwrap(), - }, - ) - .await - .unwrap(); - assert!(services.is_empty()); - - // As a quick check, try supplying a marker. - let services = datastore - .services_list_kind( - &opctx, - ServiceKind::Nexus, - &DataPageParams { - marker: Some(&service1_id), - direction: dropshot::PaginationOrder::Ascending, - limit: NonZeroU32::new(3).unwrap(), - }, - ) - .await - .unwrap(); - assert_eq!(services.len(), 1); - assert_eq!(services[0].id(), service2.id()); - - db.cleanup().await.unwrap(); - logctx.cleanup_successful(); - } - #[tokio::test] async fn test_rack_initialize_is_idempotent() { let logctx = dev::test_setup_log("test_rack_initialize_is_idempotent"); diff --git a/nexus/db-queries/src/db/datastore/network_interface.rs b/nexus/db-queries/src/db/datastore/network_interface.rs index 1bccca4e97..af3f832e35 100644 --- a/nexus/db-queries/src/db/datastore/network_interface.rs +++ b/nexus/db-queries/src/db/datastore/network_interface.rs @@ -5,6 +5,7 @@ //! [`DataStore`] methods on [`NetworkInterface`]s. use super::DataStore; +use super::SQL_BATCH_SIZE; use crate::authz; use crate::context::OpContext; use crate::db; @@ -22,6 +23,7 @@ use crate::db::model::NetworkInterfaceKind; use crate::db::model::NetworkInterfaceUpdate; use crate::db::model::VpcSubnet; use crate::db::pagination::paginated; +use crate::db::pagination::Paginator; use crate::db::pool::DbConnection; use crate::db::queries::network_interface; use crate::transaction_retry::OptionalError; @@ -30,8 +32,10 @@ use chrono::Utc; use diesel::prelude::*; use diesel::result::Error as DieselError; use nexus_db_model::ServiceNetworkInterface; +use nexus_types::identity::Resource; use omicron_common::api::external; use omicron_common::api::external::http_pagination::PaginatedBy; +use omicron_common::api::external::DataPageParams; use omicron_common::api::external::DeleteResult; use omicron_common::api::external::Error; use omicron_common::api::external::ListResultVec; @@ -64,9 +68,9 @@ impl From for omicron_common::api::internal::shared::NetworkInterface { nic: NicInfo, ) -> omicron_common::api::internal::shared::NetworkInterface { let ip_subnet = if nic.ip.is_ipv4() { - external::IpNet::V4(nic.ipv4_block.0) + oxnet::IpNet::V4(nic.ipv4_block.0) } else { - external::IpNet::V6(nic.ipv6_block.0) + oxnet::IpNet::V6(nic.ipv6_block.0) }; let kind = match nic.kind { NetworkInterfaceKind::Instance => { @@ -169,6 +173,58 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } + /// List one page of all network interfaces associated with internal services + pub async fn service_network_interfaces_all_list( + &self, + opctx: &OpContext, + pagparams: &DataPageParams<'_, Uuid>, + ) -> ListResultVec { + use db::schema::service_network_interface::dsl; + + // See the comment in `service_create_network_interface`. There's no + // obvious parent for a service network interface (as opposed to + // instance network interfaces, which require ListChildren on the + // instance to list). As a logical proxy, we check for listing children + // of the service IP pool. + let (authz_pool, _pool) = self.ip_pools_service_lookup(opctx).await?; + opctx.authorize(authz::Action::ListChildren, &authz_pool).await?; + + paginated(dsl::service_network_interface, dsl::id, pagparams) + .filter(dsl::time_deleted.is_null()) + .select(ServiceNetworkInterface::as_select()) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + /// List all network interfaces associated with internal services, making as + /// many queries as needed to get them all + /// + /// This should generally not be used in API handlers or other + /// latency-sensitive contexts, but it can make sense in saga actions or + /// background tasks. + pub async fn service_network_interfaces_all_list_batched( + &self, + opctx: &OpContext, + ) -> ListResultVec { + opctx.check_complex_operations_allowed()?; + + let mut all_ips = Vec::new(); + let mut paginator = Paginator::new(SQL_BATCH_SIZE); + while let Some(p) = paginator.next() { + let batch = self + .service_network_interfaces_all_list( + opctx, + &p.current_pagparams(), + ) + .await?; + paginator = p + .found_batch(&batch, &|nic: &ServiceNetworkInterface| nic.id()); + all_ips.extend(batch); + } + Ok(all_ips) + } + /// Create a network interface attached to the provided service zone. pub async fn service_create_network_interface( &self, @@ -315,12 +371,19 @@ impl DataStore { /// /// Note that the primary interface for an instance cannot be deleted if /// there are any secondary interfaces. + /// + /// To support idempotency, such as in saga operations, this method returns + /// an extra boolean. The meaning of return values are: + /// - `Ok(true)`: The record was deleted during this call + /// - `Ok(false)`: The record was already deleted, such as by a previous + /// call + /// - `Err(_)`: Any other condition, including a non-existent record. pub async fn instance_delete_network_interface( &self, opctx: &OpContext, authz_instance: &authz::Instance, authz_interface: &authz::InstanceNetworkInterface, - ) -> Result<(), network_interface::DeleteError> { + ) -> Result { opctx .authorize(authz::Action::Delete, authz_interface) .await @@ -332,17 +395,59 @@ impl DataStore { ); query .clone() - .execute_async( + .execute_and_check( &*self .pool_connection_authorized(opctx) .await .map_err(network_interface::DeleteError::External)?, ) .await - .map_err(|e| { - network_interface::DeleteError::from_diesel(e, &query) - })?; - Ok(()) + .map_err(|e| network_interface::DeleteError::from_diesel(e, &query)) + } + + /// Delete a `ServiceNetworkInterface` attached to a provided service. + /// + /// To support idempotency, such as in saga operations, this method returns + /// an extra boolean. The meaning of return values are: + /// - `Ok(true)`: The record was deleted during this call + /// - `Ok(false)`: The record was already deleted, such as by a previous + /// call + /// - `Err(_)`: Any other condition, including a non-existent record. + pub async fn service_delete_network_interface( + &self, + opctx: &OpContext, + service_id: Uuid, + network_interface_id: Uuid, + ) -> Result { + // See the comment in `service_create_network_interface`. There's no + // obvious parent for a service network interface (as opposed to + // instance network interfaces, which require permissions on the + // instance). As a logical proxy, we check for listing children of the + // service IP pool. + let (authz_service_ip_pool, _) = self + .ip_pools_service_lookup(opctx) + .await + .map_err(network_interface::DeleteError::External)?; + opctx + .authorize(authz::Action::Delete, &authz_service_ip_pool) + .await + .map_err(network_interface::DeleteError::External)?; + + let query = network_interface::DeleteQuery::new( + NetworkInterfaceKind::Service, + service_id, + network_interface_id, + ); + query + .clone() + .execute_and_check( + &*self + .pool_connection_authorized(opctx) + .await + .map_err(network_interface::DeleteError::External)?, + ) + .await + .map_err(|e| network_interface::DeleteError::from_diesel(e, &query)) } /// Return information about network interfaces required for the sled @@ -687,4 +792,167 @@ impl DataStore { public_error_from_diesel(e, ErrorHandler::Server) }) } + + /// List all network interfaces associated with all instances, making as + /// many queries as needed to get them all + /// + /// This should generally not be used in API handlers or other + /// latency-sensitive contexts, but it can make sense in saga actions or + /// background tasks. + /// + /// This particular method was added for propagating v2p mappings via RPWs + pub async fn instance_network_interfaces_all_list_batched( + &self, + opctx: &OpContext, + ) -> ListResultVec { + opctx.check_complex_operations_allowed()?; + + let mut all_interfaces = Vec::new(); + let mut paginator = Paginator::new(SQL_BATCH_SIZE); + while let Some(p) = paginator.next() { + let batch = self + .instance_network_interfaces_all_list( + opctx, + &p.current_pagparams(), + ) + .await?; + paginator = p + .found_batch(&batch, &|nic: &InstanceNetworkInterface| { + nic.id() + }); + all_interfaces.extend(batch); + } + Ok(all_interfaces) + } + + /// List one page of all network interfaces associated with instances + pub async fn instance_network_interfaces_all_list( + &self, + opctx: &OpContext, + pagparams: &DataPageParams<'_, Uuid>, + ) -> ListResultVec { + use db::schema::instance_network_interface::dsl; + + // See the comment in `service_create_network_interface`. There's no + // obvious parent for a service network interface (as opposed to + // instance network interfaces, which require ListChildren on the + // instance to list). As a logical proxy, we check for listing children + // of the service IP pool. + let (authz_pool, _pool) = self.ip_pools_service_lookup(opctx).await?; + opctx.authorize(authz::Action::ListChildren, &authz_pool).await?; + + paginated(dsl::instance_network_interface, dsl::id, pagparams) + .filter(dsl::time_deleted.is_null()) + .select(InstanceNetworkInterface::as_select()) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::db::datastore::test_utils::datastore_test; + use crate::db::fixed_data::vpc_subnet::NEXUS_VPC_SUBNET; + use nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; + use nexus_test_utils::db::test_setup_database; + use omicron_common::address::NEXUS_OPTE_IPV4_SUBNET; + use omicron_test_utils::dev; + use std::collections::BTreeSet; + + async fn read_all_service_nics( + datastore: &DataStore, + opctx: &OpContext, + ) -> Vec { + let all_batched = datastore + .service_network_interfaces_all_list_batched(opctx) + .await + .expect("failed to fetch all service NICs batched"); + let all_paginated = datastore + .service_network_interfaces_all_list( + opctx, + &DataPageParams::max_page(), + ) + .await + .expect("failed to fetch all service NICs paginated"); + assert_eq!(all_batched, all_paginated); + all_batched + } + + #[tokio::test] + async fn test_service_network_interfaces_list() { + usdt::register_probes().unwrap(); + let logctx = + dev::test_setup_log("test_service_network_interfaces_list"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + // No IPs, to start + let nics = read_all_service_nics(&datastore, &opctx).await; + assert_eq!(nics, vec![]); + + // Insert 10 Nexus NICs + let ip_range = NEXUS_OPTE_IPV4_SUBNET + .addr_iter() + .skip(NUM_INITIAL_RESERVED_IP_ADDRESSES) + .take(10); + let mut macs = external::MacAddr::iter_system(); + let mut service_nics = Vec::new(); + for (i, ip) in ip_range.enumerate() { + let name = format!("service-nic-{i}"); + let interface = IncompleteNetworkInterface::new_service( + Uuid::new_v4(), + Uuid::new_v4(), + NEXUS_VPC_SUBNET.clone(), + external::IdentityMetadataCreateParams { + name: name.parse().unwrap(), + description: name, + }, + ip.into(), + macs.next().unwrap(), + 0, + ) + .unwrap(); + let nic = datastore + .service_create_network_interface(&opctx, interface) + .await + .expect("failed to insert service nic"); + service_nics.push(nic); + } + service_nics.sort_by_key(|nic| nic.id()); + + // Ensure we see them all. + let nics = read_all_service_nics(&datastore, &opctx).await; + assert_eq!(nics, service_nics); + + // Delete a few, and ensure we don't see them anymore. + let mut removed_nic_ids = BTreeSet::new(); + for (i, nic) in service_nics.iter().enumerate() { + if i % 3 == 0 { + let id = nic.id(); + datastore + .service_delete_network_interface( + &opctx, + nic.service_id, + id, + ) + .await + .expect("failed to delete NIC"); + removed_nic_ids.insert(id); + } + } + + // Check that we removed at least one, then prune them from our list of + // expected IPs. + assert!(!removed_nic_ids.is_empty()); + service_nics.retain(|nic| !removed_nic_ids.contains(&nic.id())); + + // Ensure we see them all remaining IPs. + let nics = read_all_service_nics(&datastore, &opctx).await; + assert_eq!(nics, service_nics); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } } diff --git a/nexus/db-queries/src/db/datastore/oximeter.rs b/nexus/db-queries/src/db/datastore/oximeter.rs index 116e8586b0..1aa3435cb6 100644 --- a/nexus/db-queries/src/db/datastore/oximeter.rs +++ b/nexus/db-queries/src/db/datastore/oximeter.rs @@ -5,13 +5,18 @@ //! [`DataStore`] methods related to Oximeter. use super::DataStore; +use super::SQL_BATCH_SIZE; +use crate::context::OpContext; use crate::db; use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; +use crate::db::identity::Asset; use crate::db::model::OximeterInfo; use crate::db::model::ProducerEndpoint; use crate::db::pagination::paginated; +use crate::db::pagination::Paginator; use async_bb8_diesel::AsyncRunQueryDsl; +use chrono::DateTime; use chrono::Utc; use diesel::prelude::*; use omicron_common::api::external::DataPageParams; @@ -24,12 +29,13 @@ impl DataStore { /// Lookup an oximeter instance by its ID. pub async fn oximeter_lookup( &self, + opctx: &OpContext, id: &Uuid, ) -> Result { use db::schema::oximeter::dsl; dsl::oximeter .find(*id) - .first_async(&*self.pool_connection_unauthorized().await?) + .first_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } @@ -37,6 +43,7 @@ impl DataStore { /// Create a record for a new Oximeter instance pub async fn oximeter_create( &self, + opctx: &OpContext, info: &OximeterInfo, ) -> Result<(), Error> { use db::schema::oximeter::dsl; @@ -54,7 +61,7 @@ impl DataStore { dsl::ip.eq(info.ip), dsl::port.eq(info.port), )) - .execute_async(&*self.pool_connection_unauthorized().await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { public_error_from_diesel( @@ -71,12 +78,13 @@ impl DataStore { /// List the oximeter collector instances pub async fn oximeter_list( &self, + opctx: &OpContext, page_params: &DataPageParams<'_, Uuid>, ) -> ListResultVec { use db::schema::oximeter::dsl; paginated(dsl::oximeter, dsl::id, page_params) .load_async::( - &*self.pool_connection_unauthorized().await?, + &*self.pool_connection_authorized(opctx).await?, ) .await .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) @@ -85,6 +93,7 @@ impl DataStore { /// Create a record for a new producer endpoint pub async fn producer_endpoint_create( &self, + opctx: &OpContext, producer: &ProducerEndpoint, ) -> Result<(), Error> { use db::schema::metric_producer::dsl; @@ -100,9 +109,8 @@ impl DataStore { dsl::ip.eq(producer.ip), dsl::port.eq(producer.port), dsl::interval.eq(producer.interval), - dsl::base_route.eq(producer.base_route.clone()), )) - .execute_async(&*self.pool_connection_unauthorized().await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { public_error_from_diesel( @@ -123,13 +131,14 @@ impl DataStore { /// returned. If there was no record, `None` is returned. pub async fn producer_endpoint_delete( &self, + opctx: &OpContext, id: &Uuid, ) -> Result, Error> { use db::schema::metric_producer::dsl; diesel::delete(dsl::metric_producer.find(*id)) .returning(dsl::oximeter_id) .get_result_async::( - &*self.pool_connection_unauthorized().await?, + &*self.pool_connection_authorized(opctx).await?, ) .await .optional() @@ -139,6 +148,7 @@ impl DataStore { /// List the producer endpoint records by the oximeter instance to which they're assigned. pub async fn producers_list_by_oximeter_id( &self, + opctx: &OpContext, oximeter_id: Uuid, pagparams: &DataPageParams<'_, Uuid>, ) -> ListResultVec { @@ -147,7 +157,7 @@ impl DataStore { .filter(dsl::oximeter_id.eq(oximeter_id)) .order_by((dsl::oximeter_id, dsl::id)) .select(ProducerEndpoint::as_select()) - .load_async(&*self.pool_connection_unauthorized().await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { public_error_from_diesel( @@ -159,4 +169,186 @@ impl DataStore { ) }) } + + /// Fetches a page of the list of producer endpoint records with a + /// `time_modified` date older than `expiration` + pub async fn producers_list_expired( + &self, + opctx: &OpContext, + expiration: DateTime, + pagparams: &DataPageParams<'_, Uuid>, + ) -> ListResultVec { + use db::schema::metric_producer::dsl; + + paginated(dsl::metric_producer, dsl::id, pagparams) + .filter(dsl::time_modified.lt(expiration)) + .order_by((dsl::oximeter_id, dsl::id)) + .select(ProducerEndpoint::as_select()) + .load_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + /// List all producer endpoint records with a `time_modified` date older + /// than `expiration`, making as many queries as needed to get them all + /// + /// This should generally not be used in API handlers or other + /// latency-sensitive contexts, but it can make sense in saga actions or + /// background tasks. + pub async fn producers_list_expired_batched( + &self, + opctx: &OpContext, + expiration: DateTime, + ) -> ListResultVec { + opctx.check_complex_operations_allowed()?; + + let mut producers = Vec::new(); + let mut paginator = Paginator::new(SQL_BATCH_SIZE); + while let Some(p) = paginator.next() { + let batch = self + .producers_list_expired( + opctx, + expiration, + &p.current_pagparams(), + ) + .await?; + paginator = p.found_batch(&batch, &|p: &ProducerEndpoint| p.id()); + producers.extend(batch); + } + + Ok(producers) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use db::datastore::pub_test_utils::datastore_test; + use nexus_test_utils::db::test_setup_database; + use nexus_types::internal_api::params; + use omicron_common::api::internal::nexus; + use omicron_test_utils::dev; + use std::time::Duration; + + async fn read_time_modified( + datastore: &DataStore, + producer_id: Uuid, + ) -> DateTime { + use db::schema::metric_producer::dsl; + + let conn = datastore.pool_connection_for_tests().await.unwrap(); + match dsl::metric_producer + .filter(dsl::id.eq(producer_id)) + .select(dsl::time_modified) + .first_async(&*conn) + .await + { + Ok(time_modified) => time_modified, + Err(err) => panic!( + "failed to read time_modified for producer {producer_id}: \ + {err}" + ), + } + } + + async fn read_expired_producers( + opctx: &OpContext, + datastore: &DataStore, + expiration: DateTime, + ) -> Vec { + let expired_one_page = datastore + .producers_list_expired( + opctx, + expiration, + &DataPageParams::max_page(), + ) + .await + .expect("failed to read max_page of expired producers"); + let expired_batched = datastore + .producers_list_expired_batched(opctx, expiration) + .await + .expect("failed to read batched expired producers"); + assert_eq!(expired_one_page, expired_batched); + expired_batched + } + + #[tokio::test] + async fn test_producers_list_expired() { + // Setup + let logctx = dev::test_setup_log("test_producers_list_expired"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = + datastore_test(&logctx, &db, Uuid::new_v4()).await; + + // Insert an Oximeter collector + let collector_info = OximeterInfo::new(¶ms::OximeterInfo { + collector_id: Uuid::new_v4(), + address: "[::1]:0".parse().unwrap(), // unused + }); + datastore + .oximeter_create(&opctx, &collector_info) + .await + .expect("failed to insert collector"); + + // Insert a producer + let producer = ProducerEndpoint::new( + &nexus::ProducerEndpoint { + id: Uuid::new_v4(), + kind: nexus::ProducerKind::Service, + address: "[::1]:0".parse().unwrap(), // unused + interval: Duration::from_secs(0), // unused + }, + collector_info.id, + ); + datastore + .producer_endpoint_create(&opctx, &producer) + .await + .expect("failed to insert producer"); + + // Our producer should show up when we list by its collector + let mut all_producers = datastore + .producers_list_by_oximeter_id( + &opctx, + collector_info.id, + &DataPageParams::max_page(), + ) + .await + .expect("failed to list all producers"); + assert_eq!(all_producers.len(), 1); + assert_eq!(all_producers[0].id(), producer.id()); + + // Steal this producer so we have a database-precision timestamp and can + // use full equality checks moving forward. + let producer = all_producers.pop().unwrap(); + + let producer_time_modified = + read_time_modified(&datastore, producer.id()).await; + + // Whether it's expired depends on the expiration date we specify; it + // should show up if the expiration time is newer than the producer's + // time_modified... + let expired_producers = read_expired_producers( + &opctx, + &datastore, + producer_time_modified + Duration::from_secs(1), + ) + .await; + assert_eq!( + expired_producers.as_slice(), + std::slice::from_ref(&producer) + ); + + // ... but not if the the producer has been modified since the + // expiration. + let expired_producers = read_expired_producers( + &opctx, + &datastore, + producer_time_modified - Duration::from_secs(1), + ) + .await; + assert_eq!(expired_producers.as_slice(), &[]); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } } diff --git a/nexus/db-queries/src/db/datastore/physical_disk.rs b/nexus/db-queries/src/db/datastore/physical_disk.rs index 81fc14d1d7..e51d59075e 100644 --- a/nexus/db-queries/src/db/datastore/physical_disk.rs +++ b/nexus/db-queries/src/db/datastore/physical_disk.rs @@ -12,13 +12,21 @@ use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; +use crate::db::model::ApplySledFilterExt; +use crate::db::model::InvPhysicalDisk; use crate::db::model::PhysicalDisk; +use crate::db::model::PhysicalDiskKind; +use crate::db::model::PhysicalDiskPolicy; +use crate::db::model::PhysicalDiskState; use crate::db::model::Sled; +use crate::db::model::Zpool; use crate::db::pagination::paginated; +use crate::db::TransactionError; +use crate::transaction_retry::OptionalError; use async_bb8_diesel::AsyncRunQueryDsl; -use chrono::{DateTime, Utc}; +use chrono::Utc; use diesel::prelude::*; -use diesel::upsert::{excluded, on_constraint}; +use nexus_types::deployment::SledFilter; use omicron_common::api::external::CreateResult; use omicron_common::api::external::DataPageParams; use omicron_common::api::external::DeleteResult; @@ -26,57 +34,215 @@ use omicron_common::api::external::Error; use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupType; use omicron_common::api::external::ResourceType; +use omicron_uuid_kinds::CollectionUuid; +use omicron_uuid_kinds::GenericUuid; use uuid::Uuid; impl DataStore { + /// Inserts a physical disk and zpool together in a transaction + pub async fn physical_disk_and_zpool_insert( + &self, + opctx: &OpContext, + disk: PhysicalDisk, + zpool: Zpool, + ) -> Result<(), Error> { + let conn = &*self.pool_connection_authorized(&opctx).await?; + let err = OptionalError::new(); + + self.transaction_retry_wrapper("physical_disk_adoption") + .transaction(&conn, |conn| { + let err = err.clone(); + let disk = disk.clone(); + let zpool = zpool.clone(); + async move { + // Verify that the sled into which we are inserting the disk + // and zpool pair is still in-service. + // + // Although the "physical_disk_insert" and "zpool_insert" + // functions below check that the Sled hasn't been deleted, + // they do not currently check that the Sled has not been + // expunged. + Self::check_sled_in_service_on_connection( + &conn, + disk.sled_id, + ) + .await + .map_err(|txn_error| txn_error.into_diesel(&err))?; + + Self::physical_disk_insert_on_connection( + &conn, opctx, disk, + ) + .await + .map_err(|txn_error| txn_error.into_diesel(&err))?; + + Self::zpool_insert_on_connection(&conn, opctx, zpool) + .await + .map_err(|txn_error| txn_error.into_diesel(&err))?; + Ok(()) + } + }) + .await + .map_err(|e| { + match err.take() { + // A called function performed its own error propagation. + Some(txn_error) => txn_error.into(), + // The transaction setup/teardown itself encountered a diesel error. + None => public_error_from_diesel(e, ErrorHandler::Server), + } + })?; + Ok(()) + } + /// Stores a new physical disk in the database. /// /// - If the Vendor, Serial, and Model fields are the same as an existing - /// row in the table, the following fields may be updated: - /// - Sled ID - /// - Time Deleted - /// - Time Modified + /// row in the table, an error is thrown. /// - If the primary key (ID) is the same as an existing row in the table, /// an error is thrown. - pub async fn physical_disk_upsert( + pub async fn physical_disk_insert( &self, opctx: &OpContext, disk: PhysicalDisk, ) -> CreateResult { + let conn = &*self.pool_connection_authorized(&opctx).await?; + let disk = Self::physical_disk_insert_on_connection(&conn, opctx, disk) + .await?; + Ok(disk) + } + + pub async fn physical_disk_insert_on_connection( + conn: &async_bb8_diesel::Connection, + opctx: &OpContext, + disk: PhysicalDisk, + ) -> Result> { opctx.authorize(authz::Action::Read, &authz::FLEET).await?; use db::schema::physical_disk::dsl; - let now = Utc::now(); let sled_id = disk.sled_id; let disk_in_db = Sled::insert_resource( sled_id, - diesel::insert_into(dsl::physical_disk) - .values(disk.clone()) - .on_conflict(on_constraint("vendor_serial_model_unique")) - .do_update() - .set(( - dsl::sled_id.eq(excluded(dsl::sled_id)), - dsl::time_deleted.eq(Option::>::None), - dsl::time_modified.eq(now), - )), - ) - .insert_and_get_result_async( - &*self.pool_connection_authorized(&opctx).await?, + diesel::insert_into(dsl::physical_disk).values(disk.clone()), ) + .insert_and_get_result_async(conn) .await .map_err(|e| match e { AsyncInsertError::CollectionNotFound => Error::ObjectNotFound { type_name: ResourceType::Sled, lookup_type: LookupType::ById(sled_id), }, - AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel(e, ErrorHandler::Server) - } + AsyncInsertError::DatabaseError(e) => public_error_from_diesel( + e, + ErrorHandler::Conflict( + ResourceType::PhysicalDisk, + &disk.id().to_string(), + ), + ), })?; Ok(disk_in_db) } + pub async fn physical_disk_update_policy( + &self, + opctx: &OpContext, + id: Uuid, + policy: PhysicalDiskPolicy, + ) -> Result<(), Error> { + opctx.authorize(authz::Action::Read, &authz::FLEET).await?; + use db::schema::physical_disk::dsl; + + diesel::update(dsl::physical_disk.filter(dsl::id.eq(id))) + .filter(dsl::time_deleted.is_null()) + .set(dsl::disk_policy.eq(policy)) + .execute_async(&*self.pool_connection_authorized(&opctx).await?) + .await + .map_err(|err| { + public_error_from_diesel(err, ErrorHandler::Server) + })?; + Ok(()) + } + + pub async fn physical_disk_update_state( + &self, + opctx: &OpContext, + id: Uuid, + state: PhysicalDiskState, + ) -> Result<(), Error> { + opctx.authorize(authz::Action::Read, &authz::FLEET).await?; + use db::schema::physical_disk::dsl; + + diesel::update(dsl::physical_disk.filter(dsl::id.eq(id))) + .filter(dsl::time_deleted.is_null()) + .set(dsl::disk_state.eq(state)) + .execute_async(&*self.pool_connection_authorized(&opctx).await?) + .await + .map_err(|err| { + public_error_from_diesel(err, ErrorHandler::Server) + })?; + Ok(()) + } + + /// Returns all physical disks which: + /// + /// - Appear on in-service sleds + /// - Appear in inventory + /// - Do not have any records of expungement + /// + /// If "inventory_collection_id" is not associated with a collection, this + /// function returns an empty list, rather than failing. + pub async fn physical_disk_uninitialized_list( + &self, + opctx: &OpContext, + inventory_collection_id: CollectionUuid, + ) -> ListResultVec { + opctx.authorize(authz::Action::Read, &authz::FLEET).await?; + + use db::schema::inv_physical_disk::dsl as inv_physical_disk_dsl; + use db::schema::physical_disk::dsl as physical_disk_dsl; + use db::schema::sled::dsl as sled_dsl; + + sled_dsl::sled + // If the sled is not in-service, drop the list immediately. + .filter(sled_dsl::time_deleted.is_null()) + .sled_filter(SledFilter::InService) + // Look up all inventory physical disks that could match this sled + .inner_join( + inv_physical_disk_dsl::inv_physical_disk.on( + inv_physical_disk_dsl::inv_collection_id + .eq(inventory_collection_id.into_untyped_uuid()) + .and(inv_physical_disk_dsl::sled_id.eq(sled_dsl::id)) + .and( + inv_physical_disk_dsl::variant + .eq(PhysicalDiskKind::U2), + ), + ), + ) + // Filter out any disks in the inventory for which we have ever had + // a control plane disk. + .filter(diesel::dsl::not(diesel::dsl::exists( + physical_disk_dsl::physical_disk + .select(0.into_sql::()) + .filter(physical_disk_dsl::sled_id.eq(sled_dsl::id)) + .filter(physical_disk_dsl::variant.eq(PhysicalDiskKind::U2)) + .filter( + physical_disk_dsl::vendor + .eq(inv_physical_disk_dsl::vendor), + ) + .filter( + physical_disk_dsl::model + .eq(inv_physical_disk_dsl::model), + ) + .filter( + physical_disk_dsl::serial + .eq(inv_physical_disk_dsl::serial), + ), + ))) + .select(InvPhysicalDisk::as_select()) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + pub async fn physical_disk_list( &self, opctx: &OpContext, @@ -126,6 +292,7 @@ impl DataStore { .filter(dsl::serial.eq(serial)) .filter(dsl::model.eq(model)) .filter(dsl::sled_id.eq(sled_id)) + .filter(dsl::time_deleted.is_null()) .set(dsl::time_deleted.eq(now)) .execute_async(&*self.pool_connection_authorized(opctx).await?) .await @@ -141,12 +308,17 @@ mod test { sled_baseboard_for_test, sled_system_hardware_for_test, }; use crate::db::datastore::test_utils::datastore_test; + use crate::db::lookup::LookupPath; use crate::db::model::{PhysicalDiskKind, Sled, SledUpdate}; use dropshot::PaginationOrder; use nexus_db_model::Generation; use nexus_test_utils::db::test_setup_database; use nexus_types::identity::Asset; + use omicron_common::api::external::ByteCount; + use omicron_common::disk::DiskIdentity; use omicron_test_utils::dev; + use sled_agent_client::types::DiskVariant; + use sled_agent_client::types::InventoryDisk; use std::net::{Ipv6Addr, SocketAddrV6}; use std::num::NonZeroU32; @@ -162,9 +334,11 @@ mod test { rack_id, Generation::new(), ); - db.sled_upsert(sled_update) + let (sled, _) = db + .sled_upsert(sled_update) .await - .expect("Could not upsert sled during test prep") + .expect("Could not upsert sled during test prep"); + sled } fn list_disk_params() -> DataPageParams<'static, Uuid> { @@ -175,99 +349,10 @@ mod test { } } - // Only checking some fields: - // - The UUID of the disk may actually not be the same as the upserted one; - // the "vendor/serial/model" value is the more critical unique identifier. - // NOTE: Could we derive a UUID from the VSM values? - // - The 'time' field precision can be modified slightly when inserted into - // the DB. - fn assert_disks_equal_ignore_uuid(lhs: &PhysicalDisk, rhs: &PhysicalDisk) { - assert_eq!(lhs.time_deleted().is_some(), rhs.time_deleted().is_some()); - assert_eq!(lhs.vendor, rhs.vendor); - assert_eq!(lhs.serial, rhs.serial); - assert_eq!(lhs.model, rhs.model); - assert_eq!(lhs.variant, rhs.variant); - assert_eq!(lhs.sled_id, rhs.sled_id); - } - #[tokio::test] - async fn physical_disk_upsert_different_uuid_idempotent() { - let logctx = dev::test_setup_log( - "physical_disk_upsert_different_uuid_idempotent", - ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; - - let sled = create_test_sled(&datastore).await; - let sled_id = sled.id(); - - // Insert a disk - let disk = PhysicalDisk::new( - String::from("Oxide"), - String::from("123"), - String::from("FakeDisk"), - PhysicalDiskKind::U2, - sled_id, - ); - let first_observed_disk = datastore - .physical_disk_upsert(&opctx, disk.clone()) - .await - .expect("Failed first attempt at upserting disk"); - assert_eq!(disk.uuid(), first_observed_disk.uuid()); - assert_disks_equal_ignore_uuid(&disk, &first_observed_disk); - - // Observe the inserted disk - let pagparams = list_disk_params(); - let disks = datastore - .sled_list_physical_disks(&opctx, sled_id, &pagparams) - .await - .expect("Failed to list physical disks"); - assert_eq!(disks.len(), 1); - assert_eq!(disk.uuid(), disks[0].uuid()); - assert_disks_equal_ignore_uuid(&disk, &disks[0]); - - // Insert the same disk, with a different UUID primary key - let disk_again = PhysicalDisk::new( - String::from("Oxide"), - String::from("123"), - String::from("FakeDisk"), - PhysicalDiskKind::U2, - sled_id, - ); - let second_observed_disk = datastore - .physical_disk_upsert(&opctx, disk_again.clone()) - .await - .expect("Failed second upsert of physical disk"); - // This check is pretty important - note that we return the original - // UUID, not the new one. - assert_ne!(disk_again.uuid(), second_observed_disk.uuid()); - assert_eq!(disk_again.id(), second_observed_disk.id()); - assert_disks_equal_ignore_uuid(&disk_again, &second_observed_disk); - assert!( - first_observed_disk.time_modified() - <= second_observed_disk.time_modified() - ); - - let disks = datastore - .sled_list_physical_disks(&opctx, sled_id, &pagparams) - .await - .expect("Failed to re-list physical disks"); - - // We'll use the old primary key - assert_eq!(disks.len(), 1); - assert_eq!(disk.uuid(), disks[0].uuid()); - assert_ne!(disk_again.uuid(), disks[0].uuid()); - assert_disks_equal_ignore_uuid(&disk, &disks[0]); - assert_disks_equal_ignore_uuid(&disk_again, &disks[0]); - - db.cleanup().await.unwrap(); - logctx.cleanup_successful(); - } - - #[tokio::test] - async fn physical_disk_upsert_same_uuid_idempotent() { + async fn physical_disk_insert_same_uuid_collides() { let logctx = - dev::test_setup_log("physical_disk_upsert_same_uuid_idempotent"); + dev::test_setup_log("physical_disk_insert_same_uuid_collides"); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; @@ -276,6 +361,7 @@ mod test { // Insert a disk let disk = PhysicalDisk::new( + Uuid::new_v4(), String::from("Oxide"), String::from("123"), String::from("FakeDisk"), @@ -283,41 +369,31 @@ mod test { sled_id, ); let first_observed_disk = datastore - .physical_disk_upsert(&opctx, disk.clone()) + .physical_disk_insert(&opctx, disk.clone()) .await .expect("Failed first attempt at upserting disk"); - assert_eq!(disk.uuid(), first_observed_disk.uuid()); + assert_eq!(disk.id(), first_observed_disk.id()); // Insert a disk with an identical UUID - let second_observed_disk = datastore - .physical_disk_upsert(&opctx, disk.clone()) + let err = datastore + .physical_disk_insert(&opctx, disk.clone()) .await - .expect("Should have succeeded upserting disk"); - assert_eq!(disk.uuid(), second_observed_disk.uuid()); + .expect_err("Should have failed upserting disk"); + assert!( - first_observed_disk.time_modified() - <= second_observed_disk.time_modified() - ); - assert_disks_equal_ignore_uuid( - &first_observed_disk, - &second_observed_disk, + err.to_string() + .contains("Object (of type PhysicalDisk) already exists"), + "{err}" ); - let pagparams = list_disk_params(); - let disks = datastore - .sled_list_physical_disks(&opctx, sled_id, &pagparams) - .await - .expect("Failed to list physical disks"); - assert_eq!(disks.len(), 1); - db.cleanup().await.unwrap(); logctx.cleanup_successful(); } #[tokio::test] - async fn physical_disk_upsert_different_disks() { + async fn physical_disk_insert_different_disks() { let logctx = - dev::test_setup_log("physical_disk_upsert_different_disks"); + dev::test_setup_log("physical_disk_insert_different_disks"); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; @@ -326,6 +402,7 @@ mod test { // Insert a disk let disk = PhysicalDisk::new( + Uuid::new_v4(), String::from("Oxide"), String::from("123"), String::from("FakeDisk"), @@ -333,12 +410,13 @@ mod test { sled_id, ); datastore - .physical_disk_upsert(&opctx, disk.clone()) + .physical_disk_insert(&opctx, disk.clone()) .await .expect("Failed first attempt at upserting disk"); // Insert a second disk let disk = PhysicalDisk::new( + Uuid::new_v4(), String::from("Noxide"), String::from("456"), String::from("UnrealDisk"), @@ -346,7 +424,7 @@ mod test { sled_id, ); datastore - .physical_disk_upsert(&opctx, disk.clone()) + .physical_disk_insert(&opctx, disk.clone()) .await .expect("Failed first attempt at upserting disk"); @@ -371,6 +449,7 @@ mod test { // Insert a disk let disk = PhysicalDisk::new( + Uuid::new_v4(), String::from("Oxide"), String::from("123"), String::from("FakeDisk"), @@ -378,7 +457,7 @@ mod test { sled.id(), ); datastore - .physical_disk_upsert(&opctx, disk.clone()) + .physical_disk_insert(&opctx, disk.clone()) .await .expect("Failed first attempt at upserting disk"); let pagparams = list_disk_params(); @@ -427,9 +506,9 @@ mod test { // - Disk is detached from Sled A (and the detach is reported to Nexus) // - Disk is attached into Sled B #[tokio::test] - async fn physical_disk_upsert_delete_reupsert_new_sled() { + async fn physical_disk_insert_delete_reupsert_new_sled() { let logctx = dev::test_setup_log( - "physical_disk_upsert_delete_reupsert_new_sled", + "physical_disk_insert_delete_reupsert_new_sled", ); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; @@ -439,6 +518,7 @@ mod test { // Insert a disk let disk = PhysicalDisk::new( + Uuid::new_v4(), String::from("Oxide"), String::from("123"), String::from("FakeDisk"), @@ -446,7 +526,7 @@ mod test { sled_a.id(), ); datastore - .physical_disk_upsert(&opctx, disk.clone()) + .physical_disk_insert(&opctx, disk.clone()) .await .expect("Failed first attempt at upserting disk"); let pagparams = list_disk_params(); @@ -483,8 +563,9 @@ mod test { .expect("Failed to list physical disks"); assert!(disks.is_empty()); - // "Report the disk" from the second sled + // Attach the disk to the second sled let disk = PhysicalDisk::new( + Uuid::new_v4(), String::from("Oxide"), String::from("123"), String::from("FakeDisk"), @@ -492,7 +573,7 @@ mod test { sled_b.id(), ); datastore - .physical_disk_upsert(&opctx, disk.clone()) + .physical_disk_insert(&opctx, disk.clone()) .await .expect("Failed second attempt at upserting disk"); @@ -519,9 +600,9 @@ mod test { // notification to Nexus). // - Disk is attached into Sled B #[tokio::test] - async fn physical_disk_upsert_reupsert_new_sled() { + async fn physical_disk_insert_reupsert_new_sled() { let logctx = - dev::test_setup_log("physical_disk_upsert_reupsert_new_sled"); + dev::test_setup_log("physical_disk_insert_reupsert_new_sled"); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; @@ -530,6 +611,7 @@ mod test { // Insert a disk let disk = PhysicalDisk::new( + Uuid::new_v4(), String::from("Oxide"), String::from("123"), String::from("FakeDisk"), @@ -537,7 +619,7 @@ mod test { sled_a.id(), ); datastore - .physical_disk_upsert(&opctx, disk.clone()) + .physical_disk_insert(&opctx, disk.clone()) .await .expect("Failed first attempt at upserting disk"); let pagparams = list_disk_params(); @@ -552,8 +634,21 @@ mod test { .expect("Failed to list physical disks"); assert!(disks.is_empty()); + // Remove the disk from the first sled + datastore + .physical_disk_delete( + &opctx, + disk.vendor.clone(), + disk.serial.clone(), + disk.model.clone(), + disk.sled_id, + ) + .await + .expect("Failed to delete disk"); + // "Report the disk" from the second sled let disk = PhysicalDisk::new( + Uuid::new_v4(), String::from("Oxide"), String::from("123"), String::from("FakeDisk"), @@ -561,7 +656,7 @@ mod test { sled_b.id(), ); datastore - .physical_disk_upsert(&opctx, disk.clone()) + .physical_disk_insert(&opctx, disk.clone()) .await .expect("Failed second attempt at upserting disk"); @@ -579,4 +674,288 @@ mod test { db.cleanup().await.unwrap(); logctx.cleanup_successful(); } + + // Most of this data doesn't matter, but adds a sled + // to an inventory with a supplied set of disks. + fn add_sled_to_inventory( + builder: &mut nexus_inventory::CollectionBuilder, + sled: &Sled, + disks: Vec, + ) { + builder + .found_sled_inventory( + "fake sled agent", + sled_agent_client::types::Inventory { + baseboard: sled_agent_client::types::Baseboard::Gimlet { + identifier: sled.serial_number().to_string(), + model: sled.part_number().to_string(), + revision: 0, + }, + reservoir_size: ByteCount::from(1024), + sled_role: sled_agent_client::types::SledRole::Gimlet, + sled_agent_address: "[::1]:56792".parse().unwrap(), + sled_id: sled.id(), + usable_hardware_threads: 10, + usable_physical_ram: ByteCount::from(1024 * 1024), + disks, + zpools: vec![], + }, + ) + .unwrap(); + } + + fn create_inv_disk(serial: String, slot: i64) -> InventoryDisk { + InventoryDisk { + identity: DiskIdentity { + serial, + vendor: "vendor".to_string(), + model: "model".to_string(), + }, + variant: DiskVariant::U2, + slot, + } + } + + fn create_disk_zpool_combo( + sled_id: Uuid, + inv_disk: &InventoryDisk, + ) -> (PhysicalDisk, Zpool) { + let disk = PhysicalDisk::new( + Uuid::new_v4(), + inv_disk.identity.vendor.clone(), + inv_disk.identity.serial.clone(), + inv_disk.identity.model.clone(), + PhysicalDiskKind::U2, + sled_id, + ); + + let zpool = Zpool::new(Uuid::new_v4(), sled_id, disk.id()); + (disk, zpool) + } + + #[tokio::test] + async fn physical_disk_cannot_insert_to_expunged_sled() { + let logctx = + dev::test_setup_log("physical_disk_cannot_insert_to_expunged_sled"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + let sled = create_test_sled(&datastore).await; + + // We can insert a disk into a sled that is not yet expunged + let inv_disk = create_inv_disk("serial-001".to_string(), 1); + let (disk, zpool) = create_disk_zpool_combo(sled.id(), &inv_disk); + datastore + .physical_disk_and_zpool_insert(&opctx, disk, zpool) + .await + .unwrap(); + + // Mark the sled as expunged + let sled_lookup = + LookupPath::new(&opctx, &datastore).sled_id(sled.id()); + let (authz_sled,) = + sled_lookup.lookup_for(authz::Action::Modify).await.unwrap(); + datastore + .sled_set_policy_to_expunged(&opctx, &authz_sled) + .await + .unwrap(); + + // Now that the sled is expunged, inserting the disk should fail + let inv_disk = create_inv_disk("serial-002".to_string(), 2); + let (disk, zpool) = create_disk_zpool_combo(sled.id(), &inv_disk); + let err = datastore + .physical_disk_and_zpool_insert(&opctx, disk, zpool) + .await + .unwrap_err(); + + let expected = format!("Sled {} is not in service", sled.id()); + let actual = err.to_string(); + assert!( + actual.contains(&expected), + "Expected string: {expected} within actual error: {actual}", + ); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn physical_disk_uninitialized_list() { + let logctx = dev::test_setup_log("physical_disk_uninitialized_list"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + let sled_a = create_test_sled(&datastore).await; + let sled_b = create_test_sled(&datastore).await; + + // No inventory -> No uninitialized disks + let uninitialized_disks = datastore + .physical_disk_uninitialized_list( + &opctx, + CollectionUuid::new_v4(), // Collection that does not exist + ) + .await + .expect("Failed to look up uninitialized disks"); + assert!(uninitialized_disks.is_empty()); + + // Create inventory disks for both sleds + let mut builder = nexus_inventory::CollectionBuilder::new("test"); + let disks_a = vec![ + create_inv_disk("serial-001".to_string(), 1), + create_inv_disk("serial-002".to_string(), 2), + create_inv_disk("serial-003".to_string(), 3), + ]; + let disks_b = vec![ + create_inv_disk("serial-101".to_string(), 1), + create_inv_disk("serial-102".to_string(), 2), + create_inv_disk("serial-103".to_string(), 3), + ]; + add_sled_to_inventory(&mut builder, &sled_a, disks_a.clone()); + add_sled_to_inventory(&mut builder, &sled_b, disks_b.clone()); + let collection = builder.build(); + let collection_id = collection.id; + datastore + .inventory_insert_collection(&opctx, &collection) + .await + .expect("failed to insert collection"); + + // Now when we list the uninitialized disks, we should see everything in + // the inventory. + let uninitialized_disks = datastore + .physical_disk_uninitialized_list(&opctx, collection_id) + .await + .expect("Failed to list uninitialized disks"); + assert_eq!(uninitialized_disks.len(), 6); + + // Normalize the data a bit -- convert to nexus types, and sort vecs for + // stability in the comparison. + let mut uninitialized_disks: Vec = + uninitialized_disks.into_iter().map(|d| d.into()).collect(); + uninitialized_disks + .sort_by(|a, b| a.identity.partial_cmp(&b.identity).unwrap()); + let mut expected_disks: Vec = + disks_a + .iter() + .map(|d| d.clone().into()) + .chain(disks_b.iter().map(|d| d.clone().into())) + .collect(); + expected_disks + .sort_by(|a, b| a.identity.partial_cmp(&b.identity).unwrap()); + assert_eq!(uninitialized_disks, expected_disks); + + // Let's create control plane objects for some of these disks. + // + // They should no longer show up when we list uninitialized devices. + // + // This creates disks for: 001, 002, and 101. + // It leaves the following uninitialized: 003, 102, 103 + let (disk_001, zpool) = + create_disk_zpool_combo(sled_a.id(), &disks_a[0]); + datastore + .physical_disk_and_zpool_insert(&opctx, disk_001, zpool) + .await + .unwrap(); + let (disk_002, zpool) = + create_disk_zpool_combo(sled_a.id(), &disks_a[1]); + datastore + .physical_disk_and_zpool_insert(&opctx, disk_002, zpool) + .await + .unwrap(); + let (disk_101, zpool) = + create_disk_zpool_combo(sled_b.id(), &disks_b[0]); + datastore + .physical_disk_and_zpool_insert(&opctx, disk_101, zpool) + .await + .unwrap(); + + let uninitialized_disks = datastore + .physical_disk_uninitialized_list(&opctx, collection_id) + .await + .expect("Failed to list uninitialized disks"); + assert_eq!(uninitialized_disks.len(), 3); + + // Pay careful attention to our indexing below. + // + // We're grabbing the last disk of "disks_a" (which still is + // uninitailized) and the last two disks of "disks_b" (of which both are + // still uninitialized). + let mut uninitialized_disks: Vec = + uninitialized_disks.into_iter().map(|d| d.into()).collect(); + uninitialized_disks + .sort_by(|a, b| a.identity.partial_cmp(&b.identity).unwrap()); + let mut expected_disks: Vec = + disks_a[2..3] + .iter() + .map(|d| d.clone().into()) + .chain(disks_b[1..3].iter().map(|d| d.clone().into())) + .collect(); + expected_disks + .sort_by(|a, b| a.identity.partial_cmp(&b.identity).unwrap()); + assert_eq!(uninitialized_disks, expected_disks); + + // Create physical disks for all remaining devices. + // + // Observe no remaining uninitialized disks. + let (disk_003, zpool) = + create_disk_zpool_combo(sled_a.id(), &disks_a[2]); + datastore + .physical_disk_and_zpool_insert(&opctx, disk_003.clone(), zpool) + .await + .unwrap(); + let (disk_102, zpool) = + create_disk_zpool_combo(sled_b.id(), &disks_b[1]); + datastore + .physical_disk_and_zpool_insert(&opctx, disk_102.clone(), zpool) + .await + .unwrap(); + let (disk_103, zpool) = + create_disk_zpool_combo(sled_b.id(), &disks_b[2]); + datastore + .physical_disk_and_zpool_insert(&opctx, disk_103.clone(), zpool) + .await + .unwrap(); + + let uninitialized_disks = datastore + .physical_disk_uninitialized_list(&opctx, collection_id) + .await + .expect("Failed to list uninitialized disks"); + assert_eq!(uninitialized_disks.len(), 0); + + // Expunge some disks, observe that they do not re-appear as + // initialized. + use db::schema::physical_disk::dsl; + + // Set a disk to "deleted". + let now = Utc::now(); + diesel::update(dsl::physical_disk) + .filter(dsl::id.eq(disk_003.id())) + .filter(dsl::time_deleted.is_null()) + .set(dsl::time_deleted.eq(now)) + .execute_async( + &*datastore.pool_connection_authorized(&opctx).await.unwrap(), + ) + .await + .unwrap(); + + // Set another disk to "expunged" + diesel::update(dsl::physical_disk) + .filter(dsl::id.eq(disk_102.id())) + .filter(dsl::time_deleted.is_null()) + .set(dsl::disk_policy.eq(PhysicalDiskPolicy::Expunged)) + .execute_async( + &*datastore.pool_connection_authorized(&opctx).await.unwrap(), + ) + .await + .unwrap(); + + // The set of uninitialized disks should remain at zero + let uninitialized_disks = datastore + .physical_disk_uninitialized_list(&opctx, collection_id) + .await + .expect("Failed to list uninitialized disks"); + assert_eq!(uninitialized_disks.len(), 0); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } } diff --git a/nexus/db-queries/src/db/datastore/probe.rs b/nexus/db-queries/src/db/datastore/probe.rs index f1e737e353..a96f857163 100644 --- a/nexus/db-queries/src/db/datastore/probe.rs +++ b/nexus/db-queries/src/db/datastore/probe.rs @@ -15,7 +15,6 @@ use diesel::{ExpressionMethods, QueryDsl, SelectableHelper}; use nexus_db_model::IncompleteNetworkInterface; use nexus_db_model::Probe; use nexus_db_model::VpcSubnet; -use nexus_types::external_api::params; use nexus_types::identity::Resource; use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::external::CreateResult; @@ -278,20 +277,19 @@ impl super::DataStore { &self, opctx: &OpContext, authz_project: &authz::Project, - new_probe: ¶ms::ProbeCreate, + probe: &Probe, + ip_pool: Option, ) -> CreateResult { //TODO in transaction use db::schema::probe::dsl; let pool = self.pool_connection_authorized(opctx).await?; - let probe = Probe::from_create(new_probe, authz_project.id()); - let _eip = self .allocate_probe_ephemeral_ip( opctx, Uuid::new_v4(), probe.id(), - new_probe.ip_pool.clone().map(Into::into), + ip_pool, ) .await?; diff --git a/nexus/db-queries/src/db/datastore/pub_test_utils.rs b/nexus/db-queries/src/db/datastore/pub_test_utils.rs new file mode 100644 index 0000000000..5259a03656 --- /dev/null +++ b/nexus/db-queries/src/db/datastore/pub_test_utils.rs @@ -0,0 +1,66 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Test support code that can be enabled by dependencies via this crate's +//! `testing` feature. +//! +//! This feature should only be enabled under `dev-dependencies` to avoid this +//! test support code leaking into release binaries. + +use crate::authz; +use crate::context::OpContext; +use crate::db; +use crate::db::DataStore; +use dropshot::test_util::LogContext; +use omicron_test_utils::dev::db::CockroachInstance; +use std::sync::Arc; +use uuid::Uuid; + +/// Constructs a DataStore for use in test suites that has preloaded the +/// built-in users, roles, and role assignments that are needed for basic +/// operation +#[cfg(any(test, feature = "testing"))] +pub async fn datastore_test( + logctx: &LogContext, + db: &CockroachInstance, + rack_id: Uuid, +) -> (OpContext, Arc) { + use crate::authn; + + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = Arc::new(db::Pool::new(&logctx.log, &cfg)); + let datastore = + Arc::new(DataStore::new(&logctx.log, pool, None).await.unwrap()); + + // Create an OpContext with the credentials of "db-init" just for the + // purpose of loading the built-in users, roles, and assignments. + let opctx = OpContext::for_background( + logctx.log.new(o!()), + Arc::new(authz::Authz::new(&logctx.log)), + authn::Context::internal_db_init(), + Arc::clone(&datastore), + ); + + // TODO: Can we just call "Populate" instead of doing this? + datastore.load_builtin_users(&opctx).await.unwrap(); + datastore.load_builtin_roles(&opctx).await.unwrap(); + datastore.load_builtin_role_asgns(&opctx).await.unwrap(); + datastore.load_builtin_silos(&opctx).await.unwrap(); + datastore.load_builtin_projects(&opctx).await.unwrap(); + datastore.load_builtin_vpcs(&opctx).await.unwrap(); + datastore.load_silo_users(&opctx).await.unwrap(); + datastore.load_silo_user_role_assignments(&opctx).await.unwrap(); + datastore + .load_builtin_fleet_virtual_provisioning_collection(&opctx) + .await + .unwrap(); + datastore.load_builtin_rack_data(&opctx, rack_id).await.unwrap(); + + // Create an OpContext with the credentials of "test-privileged" for general + // testing. + let opctx = + OpContext::for_tests(logctx.log.new(o!()), Arc::clone(&datastore)); + + (opctx, datastore) +} diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index 94e033ec3c..d836185d87 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -21,12 +21,15 @@ use crate::db::fixed_data::vpc_subnet::DNS_VPC_SUBNET; use crate::db::fixed_data::vpc_subnet::NEXUS_VPC_SUBNET; use crate::db::fixed_data::vpc_subnet::NTP_VPC_SUBNET; use crate::db::identity::Asset; +use crate::db::lookup::LookupPath; use crate::db::model::Dataset; use crate::db::model::IncompleteExternalIp; +use crate::db::model::PhysicalDisk; use crate::db::model::Rack; use crate::db::model::Zpool; use crate::db::pagination::paginated; use crate::db::pool::DbConnection; +use crate::db::TransactionError; use async_bb8_diesel::AsyncConnection; use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; @@ -34,36 +37,38 @@ use diesel::prelude::*; use diesel::result::Error as DieselError; use diesel::upsert::excluded; use ipnetwork::IpNetwork; -use nexus_db_model::DnsGroup; -use nexus_db_model::DnsZone; -use nexus_db_model::ExternalIp; use nexus_db_model::IncompleteNetworkInterface; use nexus_db_model::InitialDnsGroup; use nexus_db_model::PasswordHashString; use nexus_db_model::SiloUser; use nexus_db_model::SiloUserPasswordHash; +use nexus_db_model::SledState; use nexus_db_model::SledUnderlaySubnetAllocation; +use nexus_types::deployment::blueprint_zone_type; use nexus_types::deployment::Blueprint; use nexus_types::deployment::BlueprintTarget; -use nexus_types::deployment::OmicronZoneType; +use nexus_types::deployment::BlueprintZoneConfig; +use nexus_types::deployment::BlueprintZoneFilter; +use nexus_types::deployment::BlueprintZoneType; +use nexus_types::deployment::OmicronZoneExternalIp; use nexus_types::external_api::params as external_params; use nexus_types::external_api::shared; use nexus_types::external_api::shared::IdentityType; use nexus_types::external_api::shared::IpRange; use nexus_types::external_api::shared::SiloRole; use nexus_types::identity::Resource; -use nexus_types::internal_api::params as internal_params; +use omicron_common::api::external::AllowedSourceIps; use omicron_common::api::external::DataPageParams; use omicron_common::api::external::Error; use omicron_common::api::external::IdentityMetadataCreateParams; -use omicron_common::api::external::InternalContext; use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupType; use omicron_common::api::external::ResourceType; use omicron_common::api::external::UpdateResult; use omicron_common::bail_unless; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::SledUuid; use slog_error_chain::InlineErrorChain; -use std::net::IpAddr; use std::sync::{Arc, OnceLock}; use uuid::Uuid; @@ -73,7 +78,8 @@ pub struct RackInit { pub rack_id: Uuid, pub rack_subnet: IpNetwork, pub blueprint: Blueprint, - pub services: Vec, + pub physical_disks: Vec, + pub zpools: Vec, pub datasets: Vec, pub service_ip_pool_ranges: Vec, pub internal_dns: InitialDnsGroup, @@ -83,6 +89,7 @@ pub struct RackInit { pub recovery_user_id: external_params::UserId, pub recovery_user_password_hash: omicron_passwords::PasswordHashString, pub dns_update: DnsVersionUpdateBuilder, + pub allowed_source_ips: AllowedSourceIps, } /// Possible errors while trying to initialize rack @@ -92,8 +99,9 @@ enum RackInitError { AddingNic(Error), BlueprintInsert(Error), BlueprintTargetSet(Error), - ServiceInsert(Error), DatasetInsert { err: AsyncInsertError, zpool_id: Uuid }, + PhysicalDiskInsert(Error), + ZpoolInsert(Error), RackUpdate { err: DieselError, rack_id: Uuid }, DnsSerialization(Error), Silo(Error), @@ -102,6 +110,8 @@ enum RackInitError { Retryable(DieselError), // Other non-retryable database error Database(DieselError), + // Error adding initial allowed source IP list + AllowedSourceIpError(Error), } // Catch-all for Diesel error conversion into RackInitError, which @@ -130,9 +140,8 @@ impl From for Error { public_error_from_diesel(e, ErrorHandler::Server) } }, - RackInitError::ServiceInsert(err) => Error::internal_error( - &format!("failed to insert Service record: {:#}", err), - ), + RackInitError::PhysicalDiskInsert(err) => err, + RackInitError::ZpoolInsert(err) => err, RackInitError::BlueprintInsert(err) => Error::internal_error( &format!("failed to insert Blueprint: {:#}", err), ), @@ -166,10 +175,20 @@ impl From for Error { "failed operation due to database error: {:#}", err )), + RackInitError::AllowedSourceIpError(err) => err, } } } +/// Possible results of attempting a new sled underlay allocation +#[derive(Debug, Clone)] +pub enum SledUnderlayAllocationResult { + /// A new allocation was created + New(SledUnderlaySubnetAllocation), + /// A prior allocation associated with a commissioned sled was found + CommissionedSled(SledUnderlaySubnetAllocation), +} + impl DataStore { pub async fn rack_list( &self, @@ -293,7 +312,7 @@ impl DataStore { opctx: &OpContext, rack_id: Uuid, hw_baseboard_id: Uuid, - ) -> Result { + ) -> Result { // Fetch all the existing allocations via self.rack_id let allocations = self.rack_subnet_allocations(opctx, rack_id).await?; @@ -304,17 +323,50 @@ impl DataStore { const MIN_SUBNET_OCTET: i16 = 33; let mut new_allocation = SledUnderlaySubnetAllocation { rack_id, - sled_id: Uuid::new_v4(), + sled_id: SledUuid::new_v4().into(), subnet_octet: MIN_SUBNET_OCTET, hw_baseboard_id, }; - let mut allocation_already_exists = false; for allocation in allocations { if allocation.hw_baseboard_id == new_allocation.hw_baseboard_id { - // We already have an allocation for this sled. - new_allocation = allocation; - allocation_already_exists = true; - break; + // We already have an allocation for this sled, but we need to + // check whether this allocation matches a sled that has been + // decommissioned. (The same physical sled, tracked by + // `hw_baseboard_id`, can be logically removed from the control + // plane via decommissioning, then added back again later, which + // requires allocating a new subnet.) + match LookupPath::new(opctx, self) + .sled_id(allocation.sled_id.into_untyped_uuid()) + .optional_fetch_for(authz::Action::Read) + .await? + .map(|(_, sled)| sled.state()) + { + Some(SledState::Active) => { + // This allocation is for an active sled; return the + // existing allocation. + return Ok( + SledUnderlayAllocationResult::CommissionedSled( + allocation, + ), + ); + } + Some(SledState::Decommissioned) => { + // This allocation was for a now-decommissioned sled; + // ignore it and keep searching. + } + None => { + // This allocation is still "new" in the sense that it + // is assigned to a sled that has not yet upserted + // itself to join the control plane. We must return + // `::New(_)` here to ensure idempotence of allocation + // (e.g., if we allocate a sled, but its sled-agent + // crashes before it can upsert itself, we need to be + // able to get the same allocation back again). + return Ok(SledUnderlayAllocationResult::New( + allocation, + )); + } + } } if allocation.subnet_octet == new_allocation.subnet_octet { bail_unless!( @@ -330,11 +382,8 @@ impl DataStore { // allocations when sleds are being added. We will need another // mechanism ala generation numbers when we must interleave additions // and removals of sleds. - if !allocation_already_exists { - self.sled_subnet_allocation_insert(opctx, &new_allocation).await?; - } - - Ok(new_allocation) + self.sled_subnet_allocation_insert(opctx, &new_allocation).await?; + Ok(SledUnderlayAllocationResult::New(new_allocation)) } /// Return all current underlay allocations for the rack. @@ -461,123 +510,147 @@ impl DataStore { Ok(()) } - async fn rack_populate_service_records( + async fn rack_populate_service_networking_records( &self, conn: &async_bb8_diesel::Connection, log: &slog::Logger, service_pool: &db::model::IpPool, - service: internal_params::ServicePutRequest, + zone_config: &BlueprintZoneConfig, ) -> Result<(), RackInitError> { - use internal_params::ServiceKind; - - let service_db = db::model::Service::new( - service.service_id, - service.sled_id, - service.zone_id, - service.address, - service.kind.clone().into(), - ); - self.service_upsert_conn(conn, service_db).await.map_err( - |e| match e.retryable() { - Retryable(e) => RackInitError::Retryable(e), - NotRetryable(e) => RackInitError::ServiceInsert(e.into()), - }, - )?; - // For services with external connectivity, we record their // explicit IP allocation and create a service NIC as well. - let service_ip_nic = match service.kind { - ServiceKind::ExternalDns { external_address, ref nic } - | ServiceKind::Nexus { external_address, ref nic } => { - let db_ip = IncompleteExternalIp::for_service_explicit( - Uuid::new_v4(), - &db::model::Name(nic.name.clone()), - &format!("{}", service.kind), - service.service_id, - service_pool.id(), - external_address, - ); - let vpc_subnet = match service.kind { - ServiceKind::ExternalDns { .. } => DNS_VPC_SUBNET.clone(), - ServiceKind::Nexus { .. } => NEXUS_VPC_SUBNET.clone(), - _ => unreachable!(), - }; + let zone_type = &zone_config.zone_type; + let service_ip_nic = match zone_type { + BlueprintZoneType::ExternalDns( + blueprint_zone_type::ExternalDns { nic, dns_address, .. }, + ) => { + let external_ip = + OmicronZoneExternalIp::Floating(dns_address.into_ip()); let db_nic = IncompleteNetworkInterface::new_service( nic.id, - service.service_id, - vpc_subnet, + zone_config.id.into_untyped_uuid(), + DNS_VPC_SUBNET.clone(), IdentityMetadataCreateParams { name: nic.name.clone(), - description: format!("{} service vNIC", service.kind), + description: format!( + "{} service vNIC", + zone_type.kind() + ), }, nic.ip, nic.mac, nic.slot, ) .map_err(|e| RackInitError::AddingNic(e))?; - Some((db_ip, db_nic)) + Some((external_ip, db_nic)) } - ServiceKind::BoundaryNtp { snat, ref nic } => { - let db_ip = IncompleteExternalIp::for_service_explicit_snat( - Uuid::new_v4(), - service.service_id, - service_pool.id(), - snat.ip, - (snat.first_port, snat.last_port), - ); + BlueprintZoneType::Nexus(blueprint_zone_type::Nexus { + nic, + external_ip, + .. + }) => { + let external_ip = OmicronZoneExternalIp::Floating(*external_ip); + let db_nic = IncompleteNetworkInterface::new_service( + nic.id, + zone_config.id.into_untyped_uuid(), + NEXUS_VPC_SUBNET.clone(), + IdentityMetadataCreateParams { + name: nic.name.clone(), + description: format!( + "{} service vNIC", + zone_type.kind() + ), + }, + nic.ip, + nic.mac, + nic.slot, + ) + .map_err(|e| RackInitError::AddingNic(e))?; + Some((external_ip, db_nic)) + } + BlueprintZoneType::BoundaryNtp( + blueprint_zone_type::BoundaryNtp { external_ip, nic, .. }, + ) => { + let external_ip = OmicronZoneExternalIp::Snat(*external_ip); let db_nic = IncompleteNetworkInterface::new_service( nic.id, - service.service_id, + zone_config.id.into_untyped_uuid(), NTP_VPC_SUBNET.clone(), IdentityMetadataCreateParams { name: nic.name.clone(), - description: format!("{} service vNIC", service.kind), + description: format!( + "{} service vNIC", + zone_type.kind() + ), }, nic.ip, nic.mac, nic.slot, ) .map_err(|e| RackInitError::AddingNic(e))?; - Some((db_ip, db_nic)) + Some((external_ip, db_nic)) } - _ => None, + BlueprintZoneType::InternalNtp(_) + | BlueprintZoneType::Clickhouse(_) + | BlueprintZoneType::ClickhouseKeeper(_) + | BlueprintZoneType::CockroachDb(_) + | BlueprintZoneType::Crucible(_) + | BlueprintZoneType::CruciblePantry(_) + | BlueprintZoneType::InternalDns(_) + | BlueprintZoneType::Oximeter(_) => None, }; - if let Some((db_ip, db_nic)) = service_ip_nic { - Self::allocate_external_ip_on_connection(conn, db_ip) - .await - .map_err(|err| { - error!( - log, - "Initializing Rack: Failed to allocate \ - IP address for {}", - service.kind; - "err" => %err, - ); - match err.retryable() { - Retryable(e) => RackInitError::Retryable(e), - NotRetryable(e) => RackInitError::AddingIp(e.into()), - } - })?; + let Some((external_ip, db_nic)) = service_ip_nic else { + info!( + log, + "No networking records needed for {} service", + zone_type.kind(), + ); + return Ok(()); + }; + let db_ip = IncompleteExternalIp::for_omicron_zone( + service_pool.id(), + external_ip, + zone_config.id, + zone_config.zone_type.kind(), + ); + Self::allocate_external_ip_on_connection(conn, db_ip).await.map_err( + |err| { + error!( + log, + "Initializing Rack: Failed to allocate \ + IP address for {}", + zone_type.kind(); + "err" => %err, + ); + match err.retryable() { + Retryable(e) => RackInitError::Retryable(e), + NotRetryable(e) => RackInitError::AddingIp(e.into()), + } + }, + )?; - self.create_network_interface_raw_conn(conn, db_nic) - .await - .map(|_| ()) - .or_else(|e| { - use db::queries::network_interface::InsertError; - match e { - InsertError::InterfaceAlreadyExists( - _, - db::model::NetworkInterfaceKind::Service, - ) => Ok(()), - InsertError::Retryable(err) => { - Err(RackInitError::Retryable(err)) - } - _ => Err(RackInitError::AddingNic(e.into_external())), + self.create_network_interface_raw_conn(conn, db_nic) + .await + .map(|_| ()) + .or_else(|e| { + use db::queries::network_interface::InsertError; + match e { + InsertError::InterfaceAlreadyExists( + _, + db::model::NetworkInterfaceKind::Service, + ) => Ok(()), + InsertError::Retryable(err) => { + Err(RackInitError::Retryable(err)) } - })?; - } + _ => Err(RackInitError::AddingNic(e.into_external())), + } + })?; + info!( + log, + "Inserted networking records for {} service", + zone_type.kind(), + ); - info!(log, "Inserted records for {} service", service.kind); Ok(()) } @@ -613,7 +686,8 @@ impl DataStore { async move { let rack_id = rack_init.rack_id; let blueprint = rack_init.blueprint; - let services = rack_init.services; + let physical_disks = rack_init.physical_disks; + let zpools = rack_init.zpools; let datasets = rack_init.datasets; let service_ip_pool_ranges = rack_init.service_ip_pool_ranges; @@ -644,7 +718,14 @@ impl DataStore { return Ok::<_, DieselError>(rack); } - // Otherwise, insert blueprint and datasets. + // Otherwise, insert: + // - Services + // - PhysicalDisks + // - Zpools + // - Datasets + // - A blueprint + // + // Which RSS has already allocated during bootstrapping. // Set up the IP pool for internal services. for range in service_ip_pool_ranges { @@ -707,21 +788,49 @@ impl DataStore { DieselError::RollbackTransaction })?; - // Allocate records for all services. - for service in services { - self.rack_populate_service_records( + // Allocate networking records for all services. + for (_, zone_config) in blueprint.all_omicron_zones(BlueprintZoneFilter::ShouldBeRunning) { + self.rack_populate_service_networking_records( &conn, &log, &service_pool, - service, + zone_config, ) .await .map_err(|e| { + error!(log, "Failed to upsert physical disk"; "err" => ?e); err.set(e).unwrap(); DieselError::RollbackTransaction })?; } - info!(log, "Inserted services"); + info!(log, "Inserted service networking records"); + + for physical_disk in physical_disks { + info!(log, "physical disk upsert in handoff: {physical_disk:#?}"); + if let Err(e) = Self::physical_disk_insert_on_connection(&conn, &opctx, physical_disk) + .await { + if !matches!(e, TransactionError::CustomError(Error::ObjectAlreadyExists { .. })) { + error!(log, "Failed to upsert physical disk"; "err" => #%e); + err.set(RackInitError::PhysicalDiskInsert(e.into())) + .unwrap(); + return Err(DieselError::RollbackTransaction); + } + } + } + + info!(log, "Inserted physical disks"); + + for zpool in zpools { + if let Err(e) = Self::zpool_insert_on_connection(&conn, &opctx, zpool).await { + if !matches!(e, TransactionError::CustomError(Error::ObjectAlreadyExists { .. })) { + error!(log, "Failed to upsert zpool"; "err" => #%e); + err.set(RackInitError::ZpoolInsert(e.into())).unwrap(); + return Err(DieselError::RollbackTransaction); + } + } + } + + info!(log, "Inserted zpools"); for dataset in datasets { use db::schema::dataset::dsl; @@ -793,6 +902,17 @@ impl DataStore { } })?; + // Insert the initial source IP allowlist for requests to + // user-facing services. + Self::allow_list_upsert_on_connection( + opctx, + &conn, + rack_init.allowed_source_ips, + ).await.map_err(|e| { + err.set(RackInitError::AllowedSourceIpError(e)).unwrap(); + DieselError::RollbackTransaction + })?; + let rack = diesel::update(rack_dsl::rack) .filter(rack_dsl::id.eq(rack_id)) .set(( @@ -870,54 +990,6 @@ impl DataStore { Ok(()) } - - pub async fn nexus_external_addresses( - &self, - opctx: &OpContext, - blueprint: Option<&Blueprint>, - ) -> Result<(Vec, Vec), Error> { - opctx.authorize(authz::Action::Read, &authz::DNS_CONFIG).await?; - - let dns_zones = self - .dns_zones_list_all(opctx, DnsGroup::External) - .await - .internal_context("listing DNS zones to list external addresses")?; - - let nexus_external_ips = if let Some(blueprint) = blueprint { - blueprint - .all_omicron_zones() - .filter_map(|(_, z)| match z.zone_type { - OmicronZoneType::Nexus { external_ip, .. } => { - Some(external_ip) - } - _ => None, - }) - .collect() - } else { - use crate::db::schema::external_ip::dsl as extip_dsl; - use crate::db::schema::service::dsl as service_dsl; - - let conn = self.pool_connection_authorized(opctx).await?; - - extip_dsl::external_ip - .inner_join(service_dsl::service.on( - service_dsl::id.eq(extip_dsl::parent_id.assume_not_null()), - )) - .filter(extip_dsl::parent_id.is_not_null()) - .filter(extip_dsl::time_deleted.is_null()) - .filter(extip_dsl::is_service) - .filter(service_dsl::kind.eq(db::model::ServiceKind::Nexus)) - .select(ExternalIp::as_select()) - .get_results_async(&*conn) - .await - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))? - .into_iter() - .map(|external_ip| external_ip.ip.ip()) - .collect() - }; - - Ok((nexus_external_ips, dns_zones)) - } } #[cfg(test)] @@ -928,32 +1000,49 @@ mod test { }; use crate::db::datastore::test_utils::datastore_test; use crate::db::datastore::Discoverability; - use crate::db::lookup::LookupPath; use crate::db::model::ExternalIp; use crate::db::model::IpKind; use crate::db::model::IpPoolRange; - use crate::db::model::Service; - use crate::db::model::ServiceKind; use crate::db::model::Sled; use async_bb8_diesel::AsyncSimpleConnection; - use internal_params::DnsRecord; use nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; use nexus_db_model::{DnsGroup, Generation, InitialDnsGroup, SledUpdate}; + use nexus_inventory::now_db_precision; + use nexus_reconfigurator_planning::system::{ + SledBuilder, SystemDescription, + }; use nexus_test_utils::db::test_setup_database; + use nexus_types::deployment::BlueprintZonesConfig; + use nexus_types::deployment::CockroachDbPreserveDowngrade; + use nexus_types::deployment::{ + BlueprintZoneConfig, OmicronZoneExternalFloatingAddr, + OmicronZoneExternalFloatingIp, + }; + use nexus_types::deployment::{ + BlueprintZoneDisposition, OmicronZoneExternalSnatIp, + }; use nexus_types::external_api::shared::SiloIdentityMode; + use nexus_types::external_api::views::SledState; use nexus_types::identity::Asset; - use nexus_types::internal_api::params::ServiceNic; + use nexus_types::internal_api::params::DnsRecord; + use nexus_types::inventory::NetworkInterface; + use nexus_types::inventory::NetworkInterfaceKind; use omicron_common::address::{ DNS_OPTE_IPV4_SUBNET, NEXUS_OPTE_IPV4_SUBNET, NTP_OPTE_IPV4_SUBNET, }; use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::external::{ - IdentityMetadataCreateParams, MacAddr, + IdentityMetadataCreateParams, MacAddr, Vni, }; use omicron_common::api::internal::shared::SourceNatConfig; use omicron_test_utils::dev; + use omicron_uuid_kinds::{ExternalIpUuid, OmicronZoneUuid}; + use omicron_uuid_kinds::{GenericUuid, ZpoolUuid}; + use omicron_uuid_kinds::{SledUuid, TypedUuid}; + use oxnet::IpNet; + use sled_agent_client::types::OmicronZoneDataset; use std::collections::{BTreeMap, HashMap}; - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddrV6}; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV6}; use std::num::NonZeroU32; // Default impl is for tests only, and really just so that tests can more @@ -966,14 +1055,20 @@ mod test { blueprint: Blueprint { id: Uuid::new_v4(), blueprint_zones: BTreeMap::new(), + blueprint_disks: BTreeMap::new(), + sled_state: BTreeMap::new(), + cockroachdb_setting_preserve_downgrade: + CockroachDbPreserveDowngrade::DoNotModify, parent_blueprint_id: None, internal_dns_version: *Generation::new(), external_dns_version: *Generation::new(), + cockroachdb_fingerprint: String::new(), time_created: Utc::now(), creator: "test suite".to_string(), comment: "test suite".to_string(), }, - services: vec![], + physical_disks: vec![], + zpools: vec![], datasets: vec![], service_ip_pool_ranges: vec![], internal_dns: InitialDnsGroup::new( @@ -1019,6 +1114,7 @@ mod test { "test suite".to_string(), "test suite".to_string(), ), + allowed_source_ips: AllowedSourceIps::Any, } } } @@ -1136,8 +1232,7 @@ mod test { logctx.cleanup_successful(); } - async fn create_test_sled(db: &DataStore) -> Sled { - let sled_id = Uuid::new_v4(); + async fn create_test_sled(db: &DataStore, sled_id: Uuid) -> Sled { let addr = SocketAddrV6::new(Ipv6Addr::LOCALHOST, 0, 0, 0); let sled_update = SledUpdate::new( sled_id, @@ -1147,9 +1242,11 @@ mod test { rack_id(), Generation::new(), ); - db.sled_upsert(sled_update) + let (sled, _) = db + .sled_upsert(sled_update) .await - .expect("Could not upsert sled during test prep") + .expect("Could not upsert sled during test prep"); + sled } // Hacky macro helper to: @@ -1186,20 +1283,37 @@ mod test { }; } - fn_to_get_all!(service, Service); fn_to_get_all!(external_ip, ExternalIp); fn_to_get_all!(ip_pool_range, IpPoolRange); fn_to_get_all!(dataset, Dataset); + fn random_dataset() -> OmicronZoneDataset { + OmicronZoneDataset { + pool_name: illumos_utils::zpool::ZpoolName::new_external( + ZpoolUuid::new_v4(), + ) + .to_string() + .parse() + .unwrap(), + } + } + + fn sled_states_active( + sled_ids: impl Iterator, + ) -> BTreeMap { + sled_ids.map(|sled_id| (sled_id, SledState::Active)).collect() + } + #[tokio::test] async fn rack_set_initialized_with_services() { - let logctx = dev::test_setup_log("rack_set_initialized_with_services"); + let test_name = "rack_set_initialized_with_services"; + let logctx = dev::test_setup_log(test_name); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; - let sled1 = create_test_sled(&datastore).await; - let sled2 = create_test_sled(&datastore).await; - let sled3 = create_test_sled(&datastore).await; + let sled1 = create_test_sled(&datastore, Uuid::new_v4()).await; + let sled2 = create_test_sled(&datastore, Uuid::new_v4()).await; + let sled3 = create_test_sled(&datastore, Uuid::new_v4()).await; let service_ip_pool_ranges = vec![IpRange::try_from(( Ipv4Addr::new(1, 2, 3, 4), @@ -1207,115 +1321,230 @@ mod test { )) .unwrap()]; + let mut system = SystemDescription::new(); + system + .service_ip_pool_ranges(service_ip_pool_ranges.clone()) + .sled( + SledBuilder::new().id(TypedUuid::from_untyped_uuid(sled1.id())), + ) + .expect("failed to add sled1") + .sled( + SledBuilder::new().id(TypedUuid::from_untyped_uuid(sled2.id())), + ) + .expect("failed to add sled2") + .sled( + SledBuilder::new().id(TypedUuid::from_untyped_uuid(sled3.id())), + ) + .expect("failed to add sled3"); + let external_dns_ip = IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)); let external_dns_pip = DNS_OPTE_IPV4_SUBNET - .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES as u32 + 1) + .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES + 1) .unwrap(); - let external_dns_id = Uuid::new_v4(); + let external_dns_id = OmicronZoneUuid::new_v4(); let nexus_ip = IpAddr::V4(Ipv4Addr::new(1, 2, 3, 6)); let nexus_pip = NEXUS_OPTE_IPV4_SUBNET - .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES as u32 + 1) + .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES + 1) .unwrap(); - let nexus_id = Uuid::new_v4(); + let nexus_id = OmicronZoneUuid::new_v4(); let ntp1_ip = IpAddr::V4(Ipv4Addr::new(1, 2, 3, 5)); let ntp1_pip = NTP_OPTE_IPV4_SUBNET - .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES as u32 + 1) + .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES + 1) .unwrap(); - let ntp1_id = Uuid::new_v4(); + let ntp1_id = OmicronZoneUuid::new_v4(); let ntp2_ip = IpAddr::V4(Ipv4Addr::new(1, 2, 3, 5)); let ntp2_pip = NTP_OPTE_IPV4_SUBNET - .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES as u32 + 2) + .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES + 2) .unwrap(); - let ntp2_id = Uuid::new_v4(); - let ntp3_id = Uuid::new_v4(); + let ntp2_id = OmicronZoneUuid::new_v4(); + let ntp3_id = OmicronZoneUuid::new_v4(); let mut macs = MacAddr::iter_system(); - let services = vec![ - internal_params::ServicePutRequest { - service_id: external_dns_id, - sled_id: sled1.id(), - zone_id: Some(external_dns_id), - address: SocketAddrV6::new(Ipv6Addr::LOCALHOST, 123, 0, 0), - kind: internal_params::ServiceKind::ExternalDns { - external_address: external_dns_ip, - nic: ServiceNic { - id: Uuid::new_v4(), - name: "external-dns".parse().unwrap(), - ip: external_dns_pip.into(), - mac: macs.next().unwrap(), - slot: 0, - }, - }, - }, - internal_params::ServicePutRequest { - service_id: ntp1_id, - sled_id: sled1.id(), - zone_id: Some(ntp1_id), - address: SocketAddrV6::new(Ipv6Addr::LOCALHOST, 9090, 0, 0), - kind: internal_params::ServiceKind::BoundaryNtp { - snat: SourceNatConfig { - ip: ntp1_ip, - first_port: 16384, - last_port: 32767, - }, - nic: ServiceNic { - id: Uuid::new_v4(), - name: "ntp1".parse().unwrap(), - ip: ntp1_pip.into(), - mac: macs.next().unwrap(), - slot: 0, + + let mut blueprint_zones = BTreeMap::new(); + blueprint_zones.insert( + SledUuid::from_untyped_uuid(sled1.id()), + BlueprintZonesConfig { + generation: Generation::new().next(), + zones: vec![ + BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id: external_dns_id, + underlay_address: Ipv6Addr::LOCALHOST, + zone_type: BlueprintZoneType::ExternalDns( + blueprint_zone_type::ExternalDns { + dataset: random_dataset(), + http_address: "[::1]:80".parse().unwrap(), + dns_address: OmicronZoneExternalFloatingAddr { + id: ExternalIpUuid::new_v4(), + addr: SocketAddr::new(external_dns_ip, 53), + }, + nic: NetworkInterface { + id: Uuid::new_v4(), + kind: NetworkInterfaceKind::Service { + id: external_dns_id.into_untyped_uuid(), + }, + name: "external-dns".parse().unwrap(), + ip: external_dns_pip.into(), + mac: macs.next().unwrap(), + subnet: IpNet::from(*DNS_OPTE_IPV4_SUBNET), + vni: Vni::SERVICES_VNI, + primary: true, + slot: 0, + }, + }, + ), }, - }, - }, - internal_params::ServicePutRequest { - service_id: nexus_id, - sled_id: sled2.id(), - zone_id: Some(nexus_id), - address: SocketAddrV6::new(Ipv6Addr::LOCALHOST, 456, 0, 0), - kind: internal_params::ServiceKind::Nexus { - external_address: nexus_ip, - nic: ServiceNic { - id: Uuid::new_v4(), - name: "nexus".parse().unwrap(), - ip: nexus_pip.into(), - mac: macs.next().unwrap(), - slot: 0, + BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id: ntp1_id, + underlay_address: Ipv6Addr::LOCALHOST, + zone_type: BlueprintZoneType::BoundaryNtp( + blueprint_zone_type::BoundaryNtp { + address: "[::1]:80".parse().unwrap(), + ntp_servers: vec![], + dns_servers: vec![], + domain: None, + nic: NetworkInterface { + id: Uuid::new_v4(), + kind: NetworkInterfaceKind::Service { + id: ntp1_id.into_untyped_uuid(), + }, + name: "ntp1".parse().unwrap(), + ip: ntp1_pip.into(), + mac: macs.next().unwrap(), + subnet: IpNet::from(*NTP_OPTE_IPV4_SUBNET), + vni: Vni::SERVICES_VNI, + primary: true, + slot: 0, + }, + external_ip: OmicronZoneExternalSnatIp { + id: ExternalIpUuid::new_v4(), + snat_cfg: SourceNatConfig::new( + ntp1_ip, 16384, 32767, + ) + .unwrap(), + }, + }, + ), }, - }, + ], }, - internal_params::ServicePutRequest { - service_id: ntp2_id, - sled_id: sled2.id(), - zone_id: Some(ntp2_id), - address: SocketAddrV6::new(Ipv6Addr::LOCALHOST, 9090, 0, 0), - kind: internal_params::ServiceKind::BoundaryNtp { - snat: SourceNatConfig { - ip: ntp2_ip, - first_port: 0, - last_port: 16383, + ); + blueprint_zones.insert( + SledUuid::from_untyped_uuid(sled2.id()), + BlueprintZonesConfig { + generation: Generation::new().next(), + zones: vec![ + BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id: nexus_id, + underlay_address: Ipv6Addr::LOCALHOST, + zone_type: BlueprintZoneType::Nexus( + blueprint_zone_type::Nexus { + internal_address: "[::1]:80".parse().unwrap(), + external_ip: OmicronZoneExternalFloatingIp { + id: ExternalIpUuid::new_v4(), + ip: nexus_ip, + }, + external_tls: false, + external_dns_servers: vec![], + nic: NetworkInterface { + id: Uuid::new_v4(), + kind: NetworkInterfaceKind::Service { + id: nexus_id.into_untyped_uuid(), + }, + name: "nexus".parse().unwrap(), + ip: nexus_pip.into(), + mac: macs.next().unwrap(), + subnet: IpNet::from( + *NEXUS_OPTE_IPV4_SUBNET, + ), + vni: Vni::SERVICES_VNI, + primary: true, + slot: 0, + }, + }, + ), }, - nic: ServiceNic { - id: Uuid::new_v4(), - name: "ntp2".parse().unwrap(), - ip: ntp2_pip.into(), - mac: macs.next().unwrap(), - slot: 0, + BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id: ntp2_id, + underlay_address: Ipv6Addr::LOCALHOST, + zone_type: BlueprintZoneType::BoundaryNtp( + blueprint_zone_type::BoundaryNtp { + address: "[::1]:80".parse().unwrap(), + ntp_servers: vec![], + dns_servers: vec![], + domain: None, + nic: NetworkInterface { + id: Uuid::new_v4(), + kind: NetworkInterfaceKind::Service { + id: ntp2_id.into_untyped_uuid(), + }, + name: "ntp2".parse().unwrap(), + ip: ntp2_pip.into(), + mac: macs.next().unwrap(), + subnet: IpNet::from(*NTP_OPTE_IPV4_SUBNET), + vni: Vni::SERVICES_VNI, + primary: true, + slot: 0, + }, + external_ip: OmicronZoneExternalSnatIp { + id: ExternalIpUuid::new_v4(), + snat_cfg: SourceNatConfig::new( + ntp2_ip, 0, 16383, + ) + .unwrap(), + }, + }, + ), }, - }, + ], }, - internal_params::ServicePutRequest { - service_id: ntp3_id, - sled_id: sled3.id(), - zone_id: Some(ntp3_id), - address: SocketAddrV6::new(Ipv6Addr::LOCALHOST, 9090, 0, 0), - kind: internal_params::ServiceKind::InternalNtp, + ); + blueprint_zones.insert( + SledUuid::from_untyped_uuid(sled3.id()), + BlueprintZonesConfig { + generation: Generation::new().next(), + zones: vec![BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id: ntp3_id, + underlay_address: Ipv6Addr::LOCALHOST, + zone_type: BlueprintZoneType::InternalNtp( + blueprint_zone_type::InternalNtp { + address: "[::1]:80".parse().unwrap(), + ntp_servers: vec![], + dns_servers: vec![], + domain: None, + }, + ), + }], }, - ]; + ); + for zone_config in blueprint_zones.values_mut() { + zone_config.sort(); + } + let blueprint = Blueprint { + id: Uuid::new_v4(), + sled_state: sled_states_active(blueprint_zones.keys().copied()), + blueprint_zones, + blueprint_disks: BTreeMap::new(), + cockroachdb_setting_preserve_downgrade: + CockroachDbPreserveDowngrade::DoNotModify, + parent_blueprint_id: None, + internal_dns_version: *Generation::new(), + external_dns_version: *Generation::new(), + cockroachdb_fingerprint: String::new(), + time_created: now_db_precision(), + creator: "test suite".to_string(), + comment: "test blueprint".to_string(), + }; let rack = datastore .rack_set_initialized( &opctx, RackInit { - services: services.clone(), + blueprint: blueprint.clone(), service_ip_pool_ranges, ..Default::default() }, @@ -1326,48 +1555,12 @@ mod test { assert_eq!(rack.id(), rack_id()); assert!(rack.initialized); - let observed_services = get_all_services(&datastore).await; - let observed_datasets = get_all_datasets(&datastore).await; - - // We should see all the services we initialized - assert_eq!(observed_services.len(), 5); - let dns_service = observed_services - .iter() - .find(|s| s.id() == external_dns_id) - .unwrap(); - let nexus_service = - observed_services.iter().find(|s| s.id() == nexus_id).unwrap(); - let ntp1_service = - observed_services.iter().find(|s| s.id() == ntp1_id).unwrap(); - let ntp2_service = - observed_services.iter().find(|s| s.id() == ntp2_id).unwrap(); - let ntp3_service = - observed_services.iter().find(|s| s.id() == ntp3_id).unwrap(); - - assert_eq!(dns_service.sled_id, sled1.id()); - assert_eq!(dns_service.kind, ServiceKind::ExternalDns); - assert_eq!(*dns_service.ip, Ipv6Addr::LOCALHOST); - assert_eq!(*dns_service.port, 123); - - assert_eq!(nexus_service.sled_id, sled2.id()); - assert_eq!(nexus_service.kind, ServiceKind::Nexus); - assert_eq!(*nexus_service.ip, Ipv6Addr::LOCALHOST); - assert_eq!(*nexus_service.port, 456); - - assert_eq!(ntp1_service.sled_id, sled1.id()); - assert_eq!(ntp1_service.kind, ServiceKind::Ntp); - assert_eq!(*ntp1_service.ip, Ipv6Addr::LOCALHOST); - assert_eq!(*ntp1_service.port, 9090); - - assert_eq!(ntp2_service.sled_id, sled2.id()); - assert_eq!(ntp2_service.kind, ServiceKind::Ntp); - assert_eq!(*ntp2_service.ip, Ipv6Addr::LOCALHOST); - assert_eq!(*ntp2_service.port, 9090); - - assert_eq!(ntp3_service.sled_id, sled3.id()); - assert_eq!(ntp3_service.kind, ServiceKind::Ntp); - assert_eq!(*ntp3_service.ip, Ipv6Addr::LOCALHOST); - assert_eq!(*ntp3_service.port, 9090); + // We should see the blueprint we passed in. + let (_blueprint_target, observed_blueprint) = datastore + .blueprint_target_get_current_full(&opctx) + .await + .expect("failed to read blueprint"); + assert_eq!(observed_blueprint, blueprint); // We should also see the single external IP allocated for each service // save for the non-boundary NTP service. @@ -1375,39 +1568,35 @@ mod test { assert_eq!(observed_external_ips.len(), 4); let dns_external_ip = observed_external_ips .iter() - .find(|e| e.parent_id == Some(external_dns_id)) + .find(|e| e.parent_id == Some(external_dns_id.into_untyped_uuid())) .unwrap(); let nexus_external_ip = observed_external_ips .iter() - .find(|e| e.parent_id == Some(nexus_id)) + .find(|e| e.parent_id == Some(nexus_id.into_untyped_uuid())) .unwrap(); let ntp1_external_ip = observed_external_ips .iter() - .find(|e| e.parent_id == Some(ntp1_id)) + .find(|e| e.parent_id == Some(ntp1_id.into_untyped_uuid())) .unwrap(); let ntp2_external_ip = observed_external_ips .iter() - .find(|e| e.parent_id == Some(ntp2_id)) + .find(|e| e.parent_id == Some(ntp2_id.into_untyped_uuid())) .unwrap(); assert!(!observed_external_ips .iter() - .any(|e| e.parent_id == Some(ntp3_id))); + .any(|e| e.parent_id == Some(ntp3_id.into_untyped_uuid()))); - assert_eq!(dns_external_ip.parent_id, Some(dns_service.id())); assert!(dns_external_ip.is_service); assert_eq!(dns_external_ip.kind, IpKind::Floating); - assert_eq!(nexus_external_ip.parent_id, Some(nexus_service.id())); assert!(nexus_external_ip.is_service); assert_eq!(nexus_external_ip.kind, IpKind::Floating); - assert_eq!(ntp1_external_ip.parent_id, Some(ntp1_service.id())); assert!(ntp1_external_ip.is_service); assert_eq!(ntp1_external_ip.kind, IpKind::SNat); assert_eq!(ntp1_external_ip.first_port.0, 16384); assert_eq!(ntp1_external_ip.last_port.0, 32767); - assert_eq!(ntp2_external_ip.parent_id, Some(ntp2_service.id())); assert!(ntp2_external_ip.is_service); assert_eq!(ntp2_external_ip.kind, IpKind::SNat); assert_eq!(ntp2_external_ip.first_port.0, 0); @@ -1452,6 +1641,7 @@ mod test { ); assert_eq!(ntp2_external_ip.ip.ip(), ntp2_ip); + let observed_datasets = get_all_datasets(&datastore).await; assert!(observed_datasets.is_empty()); db.cleanup().await.unwrap(); @@ -1460,67 +1650,111 @@ mod test { #[tokio::test] async fn rack_set_initialized_with_many_nexus_services() { - let logctx = dev::test_setup_log( - "rack_set_initialized_with_many_nexus_services", - ); + let test_name = "rack_set_initialized_with_many_nexus_services"; + let logctx = dev::test_setup_log(test_name); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; - let sled = create_test_sled(&datastore).await; + let sled = create_test_sled(&datastore, Uuid::new_v4()).await; // Ask for two Nexus services, with different external IPs. let nexus_ip_start = Ipv4Addr::new(1, 2, 3, 4); let nexus_ip_end = Ipv4Addr::new(1, 2, 3, 5); - let nexus_id1 = Uuid::new_v4(); - let nexus_id2 = Uuid::new_v4(); + let service_ip_pool_ranges = + vec![IpRange::try_from((nexus_ip_start, nexus_ip_end)) + .expect("Cannot create IP Range")]; + + let mut system = SystemDescription::new(); + system + .service_ip_pool_ranges(service_ip_pool_ranges.clone()) + .sled( + SledBuilder::new().id(TypedUuid::from_untyped_uuid(sled.id())), + ) + .expect("failed to add sled"); + + let nexus_id1 = OmicronZoneUuid::new_v4(); + let nexus_id2 = OmicronZoneUuid::new_v4(); let nexus_pip1 = NEXUS_OPTE_IPV4_SUBNET - .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES as u32 + 1) + .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES + 1) .unwrap(); let nexus_pip2 = NEXUS_OPTE_IPV4_SUBNET - .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES as u32 + 2) + .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES + 2) .unwrap(); let mut macs = MacAddr::iter_system(); - let mut services = vec![ - internal_params::ServicePutRequest { - service_id: nexus_id1, - sled_id: sled.id(), - zone_id: Some(nexus_id1), - address: SocketAddrV6::new(Ipv6Addr::LOCALHOST, 123, 0, 0), - kind: internal_params::ServiceKind::Nexus { - external_address: IpAddr::V4(nexus_ip_start), - nic: ServiceNic { - id: Uuid::new_v4(), - name: "nexus1".parse().unwrap(), - ip: nexus_pip1.into(), - mac: macs.next().unwrap(), - slot: 0, + + let mut blueprint_zones = BTreeMap::new(); + blueprint_zones.insert( + SledUuid::from_untyped_uuid(sled.id()), + BlueprintZonesConfig { + generation: Generation::new().next(), + zones: vec![ + BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id: nexus_id1, + underlay_address: Ipv6Addr::LOCALHOST, + zone_type: BlueprintZoneType::Nexus( + blueprint_zone_type::Nexus { + internal_address: "[::1]:80".parse().unwrap(), + external_ip: OmicronZoneExternalFloatingIp { + id: ExternalIpUuid::new_v4(), + ip: nexus_ip_start.into(), + }, + external_tls: false, + external_dns_servers: vec![], + nic: NetworkInterface { + id: Uuid::new_v4(), + kind: NetworkInterfaceKind::Service { + id: nexus_id1.into_untyped_uuid(), + }, + name: "nexus1".parse().unwrap(), + ip: nexus_pip1.into(), + mac: macs.next().unwrap(), + subnet: IpNet::from( + *NEXUS_OPTE_IPV4_SUBNET, + ), + vni: Vni::SERVICES_VNI, + primary: true, + slot: 0, + }, + }, + ), }, - }, - }, - internal_params::ServicePutRequest { - service_id: nexus_id2, - sled_id: sled.id(), - zone_id: Some(nexus_id2), - address: SocketAddrV6::new(Ipv6Addr::LOCALHOST, 456, 0, 0), - kind: internal_params::ServiceKind::Nexus { - external_address: IpAddr::V4(nexus_ip_end), - nic: ServiceNic { - id: Uuid::new_v4(), - name: "nexus2".parse().unwrap(), - ip: nexus_pip2.into(), - mac: macs.next().unwrap(), - slot: 0, + BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id: nexus_id2, + underlay_address: Ipv6Addr::LOCALHOST, + zone_type: BlueprintZoneType::Nexus( + blueprint_zone_type::Nexus { + internal_address: "[::1]:80".parse().unwrap(), + external_ip: OmicronZoneExternalFloatingIp { + id: ExternalIpUuid::new_v4(), + ip: nexus_ip_end.into(), + }, + external_tls: false, + external_dns_servers: vec![], + nic: NetworkInterface { + id: Uuid::new_v4(), + kind: NetworkInterfaceKind::Service { + id: nexus_id2.into_untyped_uuid(), + }, + name: "nexus2".parse().unwrap(), + ip: nexus_pip2.into(), + mac: macs.next().unwrap(), + subnet: oxnet::IpNet::from( + *NEXUS_OPTE_IPV4_SUBNET, + ), + vni: Vni::SERVICES_VNI, + primary: true, + slot: 0, + }, + }, + ), }, - }, + ], }, - ]; - services - .sort_by(|a, b| a.service_id.partial_cmp(&b.service_id).unwrap()); + ); let datasets = vec![]; - let service_ip_pool_ranges = - vec![IpRange::try_from((nexus_ip_start, nexus_ip_end)) - .expect("Cannot create IP Range")]; let internal_records = vec![ DnsRecord::Aaaa("fe80::1:2:3:4".parse().unwrap()), @@ -1544,11 +1778,30 @@ mod test { HashMap::from([("api.sys".to_string(), external_records.clone())]), ); + for zone_config in blueprint_zones.values_mut() { + zone_config.sort(); + } + let blueprint = Blueprint { + id: Uuid::new_v4(), + sled_state: sled_states_active(blueprint_zones.keys().copied()), + blueprint_zones, + blueprint_disks: BTreeMap::new(), + cockroachdb_setting_preserve_downgrade: + CockroachDbPreserveDowngrade::DoNotModify, + parent_blueprint_id: None, + internal_dns_version: *Generation::new(), + external_dns_version: *Generation::new(), + cockroachdb_fingerprint: String::new(), + time_created: now_db_precision(), + creator: "test suite".to_string(), + comment: "test blueprint".to_string(), + }; + let rack = datastore .rack_set_initialized( &opctx, RackInit { - services: services.clone(), + blueprint: blueprint.clone(), datasets: datasets.clone(), service_ip_pool_ranges, internal_dns, @@ -1562,21 +1815,20 @@ mod test { assert_eq!(rack.id(), rack_id()); assert!(rack.initialized); - let mut observed_services = get_all_services(&datastore).await; - let observed_datasets = get_all_datasets(&datastore).await; + // We should see the blueprint we passed in. + let (_blueprint_target, observed_blueprint) = datastore + .blueprint_target_get_current_full(&opctx) + .await + .expect("failed to read blueprint"); + assert_eq!(observed_blueprint, blueprint); // We should see both of the Nexus services we provisioned. - assert_eq!(observed_services.len(), 2); - observed_services.sort_by(|a, b| a.id().partial_cmp(&b.id()).unwrap()); - - assert_eq!(observed_services[0].sled_id, sled.id()); - assert_eq!(observed_services[1].sled_id, sled.id()); - assert_eq!(observed_services[0].kind, ServiceKind::Nexus); - assert_eq!(observed_services[1].kind, ServiceKind::Nexus); - assert_eq!(*observed_services[0].ip, Ipv6Addr::LOCALHOST); - assert_eq!(*observed_services[1].ip, Ipv6Addr::LOCALHOST); - assert_eq!(*observed_services[0].port, services[0].address.port()); - assert_eq!(*observed_services[1].port, services[1].address.port()); + let mut observed_zones: Vec<_> = observed_blueprint + .all_omicron_zones(BlueprintZoneFilter::All) + .map(|(_, z)| z) + .collect(); + observed_zones.sort_by_key(|z| z.id); + assert_eq!(observed_zones.len(), 2); // We should see both IPs allocated for these services. let observed_external_ips = get_all_external_ips(&datastore).await; @@ -1593,25 +1845,39 @@ mod test { // The address allocated for the service should match the input. assert_eq!( - observed_external_ips[&observed_services[0].id()].ip.ip(), - if let internal_params::ServiceKind::Nexus { - external_address, + observed_external_ips[observed_zones[0].id.as_untyped_uuid()] + .ip + .ip(), + if let BlueprintZoneType::Nexus(blueprint_zone_type::Nexus { + external_ip, .. - } = services[0].kind + }) = &blueprint + .all_omicron_zones(BlueprintZoneFilter::All) + .next() + .unwrap() + .1 + .zone_type { - external_address + external_ip.ip } else { - panic!("Unexpected service kind") + panic!("Unexpected zone type") } ); assert_eq!( - observed_external_ips[&observed_services[1].id()].ip.ip(), - if let internal_params::ServiceKind::Nexus { - external_address, + observed_external_ips[observed_zones[1].id.as_untyped_uuid()] + .ip + .ip(), + if let BlueprintZoneType::Nexus(blueprint_zone_type::Nexus { + external_ip, .. - } = services[1].kind + }) = &blueprint + .all_omicron_zones(BlueprintZoneFilter::All) + .nth(1) + .unwrap() + .1 + .zone_type { - external_address + external_ip.ip } else { panic!("Unexpected service kind") } @@ -1627,6 +1893,7 @@ mod test { assert_eq!(observed_ip_pool_ranges.len(), 1); assert_eq!(observed_ip_pool_ranges[0].ip_pool_id, svc_pool.id()); + let observed_datasets = get_all_datasets(&datastore).await; assert!(observed_datasets.is_empty()); // Verify the internal and external DNS configurations. @@ -1666,41 +1933,86 @@ mod test { #[tokio::test] async fn rack_set_initialized_missing_service_pool_ip_throws_error() { - let logctx = dev::test_setup_log( - "rack_set_initialized_missing_service_pool_ip_throws_error", - ); + let test_name = + "rack_set_initialized_missing_service_pool_ip_throws_error"; + let logctx = dev::test_setup_log(test_name); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; - let sled = create_test_sled(&datastore).await; + let sled = create_test_sled(&datastore, Uuid::new_v4()).await; + + let mut system = SystemDescription::new(); + system + .sled( + SledBuilder::new().id(TypedUuid::from_untyped_uuid(sled.id())), + ) + .expect("failed to add sled"); let nexus_ip = IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)); let nexus_pip = NEXUS_OPTE_IPV4_SUBNET - .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES as u32 + 1) + .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES + 1) .unwrap(); - let nexus_id = Uuid::new_v4(); + let nexus_id = OmicronZoneUuid::new_v4(); let mut macs = MacAddr::iter_system(); - let services = vec![internal_params::ServicePutRequest { - service_id: nexus_id, - sled_id: sled.id(), - zone_id: Some(nexus_id), - address: SocketAddrV6::new(Ipv6Addr::LOCALHOST, 123, 0, 0), - kind: internal_params::ServiceKind::Nexus { - external_address: nexus_ip, - nic: ServiceNic { - id: Uuid::new_v4(), - name: "nexus".parse().unwrap(), - ip: nexus_pip.into(), - mac: macs.next().unwrap(), - slot: 0, - }, + let mut blueprint_zones = BTreeMap::new(); + blueprint_zones.insert( + SledUuid::from_untyped_uuid(sled.id()), + BlueprintZonesConfig { + generation: Generation::new().next(), + zones: vec![BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id: nexus_id, + underlay_address: Ipv6Addr::LOCALHOST, + zone_type: BlueprintZoneType::Nexus( + blueprint_zone_type::Nexus { + internal_address: "[::1]:80".parse().unwrap(), + external_ip: OmicronZoneExternalFloatingIp { + id: ExternalIpUuid::new_v4(), + ip: nexus_ip, + }, + external_tls: false, + external_dns_servers: vec![], + nic: NetworkInterface { + id: Uuid::new_v4(), + kind: NetworkInterfaceKind::Service { + id: nexus_id.into_untyped_uuid(), + }, + name: "nexus".parse().unwrap(), + ip: nexus_pip.into(), + mac: macs.next().unwrap(), + subnet: IpNet::from(*NEXUS_OPTE_IPV4_SUBNET), + vni: Vni::SERVICES_VNI, + primary: true, + slot: 0, + }, + }, + ), + }], }, - }]; + ); + for zone_config in blueprint_zones.values_mut() { + zone_config.sort(); + } + let blueprint = Blueprint { + id: Uuid::new_v4(), + sled_state: sled_states_active(blueprint_zones.keys().copied()), + blueprint_zones, + blueprint_disks: BTreeMap::new(), + cockroachdb_setting_preserve_downgrade: + CockroachDbPreserveDowngrade::DoNotModify, + parent_blueprint_id: None, + internal_dns_version: *Generation::new(), + external_dns_version: *Generation::new(), + cockroachdb_fingerprint: String::new(), + time_created: now_db_precision(), + creator: "test suite".to_string(), + comment: "test blueprint".to_string(), + }; let result = datastore .rack_set_initialized( &opctx, - RackInit { services: services.clone(), ..Default::default() }, + RackInit { blueprint: blueprint.clone(), ..Default::default() }, ) .await; assert!(result.is_err()); @@ -1709,7 +2021,6 @@ mod test { "Invalid Request: Requested external IP address not available" ); - assert!(get_all_services(&datastore).await.is_empty()); assert!(get_all_datasets(&datastore).await.is_empty()); assert!(get_all_external_ips(&datastore).await.is_empty()); @@ -1719,68 +2030,129 @@ mod test { #[tokio::test] async fn rack_set_initialized_overlapping_ips_throws_error() { - let logctx = dev::test_setup_log( - "rack_set_initialized_overlapping_ips_throws_error", - ); + let test_name = "rack_set_initialized_overlapping_ips_throws_error"; + let logctx = dev::test_setup_log(test_name); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; - let sled = create_test_sled(&datastore).await; + let sled = create_test_sled(&datastore, Uuid::new_v4()).await; - // Request two services which happen to be using the same IP address. let ip = IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)); - let external_dns_id = Uuid::new_v4(); + let service_ip_pool_ranges = vec![IpRange::from(ip)]; + + let mut system = SystemDescription::new(); + system + .service_ip_pool_ranges(service_ip_pool_ranges.clone()) + .sled( + SledBuilder::new().id(TypedUuid::from_untyped_uuid(sled.id())), + ) + .expect("failed to add sled"); + + // Request two services which happen to be using the same IP address. + let external_dns_id = OmicronZoneUuid::new_v4(); let external_dns_pip = DNS_OPTE_IPV4_SUBNET - .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES as u32 + 1) + .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES + 1) .unwrap(); - let nexus_id = Uuid::new_v4(); + let nexus_id = OmicronZoneUuid::new_v4(); let nexus_pip = NEXUS_OPTE_IPV4_SUBNET - .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES as u32 + 1) + .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES + 1) .unwrap(); let mut macs = MacAddr::iter_system(); - let services = vec![ - internal_params::ServicePutRequest { - service_id: external_dns_id, - sled_id: sled.id(), - zone_id: Some(external_dns_id), - address: SocketAddrV6::new(Ipv6Addr::LOCALHOST, 123, 0, 0), - kind: internal_params::ServiceKind::ExternalDns { - external_address: ip, - nic: ServiceNic { - id: Uuid::new_v4(), - name: "external-dns".parse().unwrap(), - ip: external_dns_pip.into(), - mac: macs.next().unwrap(), - slot: 0, + let mut blueprint_zones = BTreeMap::new(); + blueprint_zones.insert( + SledUuid::from_untyped_uuid(sled.id()), + BlueprintZonesConfig { + generation: Generation::new().next(), + zones: vec![ + BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id: external_dns_id, + underlay_address: Ipv6Addr::LOCALHOST, + zone_type: BlueprintZoneType::ExternalDns( + blueprint_zone_type::ExternalDns { + dataset: random_dataset(), + http_address: "[::1]:80".parse().unwrap(), + dns_address: OmicronZoneExternalFloatingAddr { + id: ExternalIpUuid::new_v4(), + addr: SocketAddr::new(ip, 53), + }, + nic: NetworkInterface { + id: Uuid::new_v4(), + kind: NetworkInterfaceKind::Service { + id: external_dns_id.into_untyped_uuid(), + }, + name: "external-dns".parse().unwrap(), + ip: external_dns_pip.into(), + mac: macs.next().unwrap(), + subnet: IpNet::from(*DNS_OPTE_IPV4_SUBNET), + vni: Vni::SERVICES_VNI, + primary: true, + slot: 0, + }, + }, + ), }, - }, - }, - internal_params::ServicePutRequest { - service_id: nexus_id, - sled_id: sled.id(), - zone_id: Some(nexus_id), - address: SocketAddrV6::new(Ipv6Addr::LOCALHOST, 123, 0, 0), - kind: internal_params::ServiceKind::Nexus { - external_address: ip, - nic: ServiceNic { - id: Uuid::new_v4(), - name: "nexus".parse().unwrap(), - ip: nexus_pip.into(), - mac: macs.next().unwrap(), - slot: 0, + BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id: nexus_id, + underlay_address: Ipv6Addr::LOCALHOST, + zone_type: BlueprintZoneType::Nexus( + blueprint_zone_type::Nexus { + internal_address: "[::1]:80".parse().unwrap(), + external_ip: OmicronZoneExternalFloatingIp { + id: ExternalIpUuid::new_v4(), + ip, + }, + external_tls: false, + external_dns_servers: vec![], + nic: NetworkInterface { + id: Uuid::new_v4(), + kind: NetworkInterfaceKind::Service { + id: nexus_id.into_untyped_uuid(), + }, + name: "nexus".parse().unwrap(), + ip: nexus_pip.into(), + mac: macs.next().unwrap(), + subnet: IpNet::from( + *NEXUS_OPTE_IPV4_SUBNET, + ), + vni: Vni::SERVICES_VNI, + primary: true, + slot: 0, + }, + }, + ), }, - }, + ], }, - ]; - let service_ip_pool_ranges = vec![IpRange::from(ip)]; + ); + + for zone_config in blueprint_zones.values_mut() { + zone_config.sort(); + } + let blueprint = Blueprint { + id: Uuid::new_v4(), + sled_state: sled_states_active(blueprint_zones.keys().copied()), + blueprint_zones, + blueprint_disks: BTreeMap::new(), + cockroachdb_setting_preserve_downgrade: + CockroachDbPreserveDowngrade::DoNotModify, + parent_blueprint_id: None, + internal_dns_version: *Generation::new(), + external_dns_version: *Generation::new(), + cockroachdb_fingerprint: String::new(), + time_created: now_db_precision(), + creator: "test suite".to_string(), + comment: "test blueprint".to_string(), + }; let result = datastore .rack_set_initialized( &opctx, RackInit { rack_id: rack_id(), - services: services.clone(), + blueprint: blueprint.clone(), service_ip_pool_ranges, ..Default::default() }, @@ -1792,7 +2164,6 @@ mod test { "Invalid Request: Requested external IP address not available", ); - assert!(get_all_services(&datastore).await.is_empty()); assert!(get_all_datasets(&datastore).await.is_empty()); assert!(get_all_external_ips(&datastore).await.is_empty()); @@ -1817,7 +2188,7 @@ mod test { for i in 0..5i16 { let allocation = SledUnderlaySubnetAllocation { rack_id, - sled_id: Uuid::new_v4(), + sled_id: SledUuid::new_v4().into(), subnet_octet: 33 + i, hw_baseboard_id: Uuid::new_v4(), }; @@ -1837,7 +2208,7 @@ mod test { // sled_id. Ensure we get an error due to a unique constraint. let mut should_fail_allocation = SledUnderlaySubnetAllocation { rack_id, - sled_id: Uuid::new_v4(), + sled_id: SledUuid::new_v4().into(), subnet_octet: 37, hw_baseboard_id: Uuid::new_v4(), }; @@ -1865,7 +2236,7 @@ mod test { // Allocations outside our expected range fail let mut should_fail_allocation = SledUnderlaySubnetAllocation { rack_id, - sled_id: Uuid::new_v4(), + sled_id: SledUuid::new_v4().into(), subnet_octet: 32, hw_baseboard_id: Uuid::new_v4(), }; @@ -1901,18 +2272,30 @@ mod test { let rack_id = Uuid::new_v4(); + let mut hw_baseboard_ids = vec![]; let mut allocated_octets = vec![]; for _ in 0..5 { + let hw_baseboard_id = Uuid::new_v4(); + hw_baseboard_ids.push(hw_baseboard_id); allocated_octets.push( - datastore + match datastore .allocate_sled_underlay_subnet_octets( &opctx, rack_id, - Uuid::new_v4(), + hw_baseboard_id, ) .await .unwrap() - .subnet_octet, + { + SledUnderlayAllocationResult::New(allocation) => { + allocation.subnet_octet + } + SledUnderlayAllocationResult::CommissionedSled( + allocation, + ) => { + panic!("unexpected allocation {allocation:?}"); + } + }, ); } @@ -1928,6 +2311,149 @@ mod test { allocations.iter().map(|a| a.subnet_octet).collect::>() ); + // If we attempt to insert the same baseboards again, we should get the + // same new allocations back. + for (&hw_baseboard_id, prev_allocation) in + hw_baseboard_ids.iter().zip(&allocations) + { + match datastore + .allocate_sled_underlay_subnet_octets( + &opctx, + rack_id, + hw_baseboard_id, + ) + .await + .unwrap() + { + SledUnderlayAllocationResult::New(allocation) => { + assert_eq!(allocation, *prev_allocation); + } + SledUnderlayAllocationResult::CommissionedSled(allocation) => { + panic!("unexpected allocation {allocation:?}"); + } + } + } + + // Pick one of the hw_baseboard_ids and insert a sled record. We should + // get back the `CommissionedSled` allocation result if we retry + // allocation of that baseboard. + create_test_sled( + &datastore, + allocations[0].sled_id.into_untyped_uuid(), + ) + .await; + match datastore + .allocate_sled_underlay_subnet_octets( + &opctx, + rack_id, + hw_baseboard_ids[0], + ) + .await + .unwrap() + { + SledUnderlayAllocationResult::New(allocation) => { + panic!("unexpected allocation {allocation:?}"); + } + SledUnderlayAllocationResult::CommissionedSled(allocation) => { + assert_eq!(allocation, allocations[0]); + } + } + + // If we attempt to insert the same baseboard again and that baseboard + // is only assigned to decommissioned sleds, we should get a new + // allocation. We'll pick one hw baseboard ID, create a `Sled` for it, + // decommission that sled, and confirm we get a new octet, five times in + // a loop (to emulate the same sled being added and decommissioned + // multiple times). + let mut next_expected_octet = *expected.last().unwrap() + 1; + let mut prior_allocation = allocations.last().unwrap().clone(); + let target_hw_baseboard_id = *hw_baseboard_ids.last().unwrap(); + for _ in 0..5 { + // Commission the sled. + let sled = create_test_sled( + &datastore, + prior_allocation.sled_id.into_untyped_uuid(), + ) + .await; + + // If we attempt this same baseboard again, we get the existing + // allocation back. + match datastore + .allocate_sled_underlay_subnet_octets( + &opctx, + rack_id, + target_hw_baseboard_id, + ) + .await + .unwrap() + { + SledUnderlayAllocationResult::New(allocation) => { + panic!("unexpected allocation {allocation:?}"); + } + SledUnderlayAllocationResult::CommissionedSled(existing) => { + assert_eq!(existing, prior_allocation); + } + } + + // Decommission the sled. + let (authz_sled,) = LookupPath::new(&opctx, &datastore) + .sled_id(sled.id()) + .lookup_for(authz::Action::Modify) + .await + .expect("found target sled ID"); + datastore + .sled_set_policy_to_expunged(&opctx, &authz_sled) + .await + .expect("expunged sled"); + datastore + .sled_set_state_to_decommissioned(&opctx, &authz_sled) + .await + .expect("decommissioned sled"); + + // Attempt a new allocation for the same hw_baseboard_id. + let allocation = match datastore + .allocate_sled_underlay_subnet_octets( + &opctx, + rack_id, + target_hw_baseboard_id, + ) + .await + .unwrap() + { + SledUnderlayAllocationResult::New(allocation) => allocation, + SledUnderlayAllocationResult::CommissionedSled(allocation) => { + panic!("unexpected existing allocation {allocation:?}"); + } + }; + + // We should get the next octet with a new sled ID. + assert_eq!(allocation.subnet_octet, next_expected_octet); + assert_ne!(allocation.sled_id.into_untyped_uuid(), sled.id()); + prior_allocation = allocation; + + // Ensure if we attempt this same baseboard again, we get the + // same allocation back (the sled hasn't been commissioned yet). + match datastore + .allocate_sled_underlay_subnet_octets( + &opctx, + rack_id, + target_hw_baseboard_id, + ) + .await + .unwrap() + { + SledUnderlayAllocationResult::New(allocation) => { + assert_eq!(prior_allocation, allocation); + } + SledUnderlayAllocationResult::CommissionedSled(existing) => { + panic!("unexpected allocation {existing:?}"); + } + } + + // Bump our expectations for the next iteration. + next_expected_octet += 1; + } + db.cleanup().await.unwrap(); logctx.cleanup_successful(); } diff --git a/nexus/db-queries/src/db/datastore/region.rs b/nexus/db-queries/src/db/datastore/region.rs index ad89a9ca93..d7da24cce3 100644 --- a/nexus/db-queries/src/db/datastore/region.rs +++ b/nexus/db-queries/src/db/datastore/region.rs @@ -8,10 +8,12 @@ use super::DataStore; use super::RunnableQuery; use crate::context::OpContext; use crate::db; +use crate::db::datastore::REGION_REDUNDANCY_THRESHOLD; use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::lookup::LookupPath; use crate::db::model::Dataset; +use crate::db::model::PhysicalDiskPolicy; use crate::db::model::Region; use crate::transaction_retry::OptionalError; use async_bb8_diesel::AsyncRunQueryDsl; @@ -21,6 +23,7 @@ use nexus_types::external_api::params; use omicron_common::api::external; use omicron_common::api::external::DeleteResult; use omicron_common::api::external::Error; +use omicron_common::api::external::LookupResult; use slog::Logger; use uuid::Uuid; @@ -56,6 +59,34 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } + pub async fn get_region(&self, region_id: Uuid) -> Result { + use db::schema::region::dsl; + dsl::region + .filter(dsl::id.eq(region_id)) + .select(Region::as_select()) + .get_result_async::( + &*self.pool_connection_unauthorized().await?, + ) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + pub async fn get_region_optional( + &self, + region_id: Uuid, + ) -> Result, Error> { + use db::schema::region::dsl; + dsl::region + .filter(dsl::id.eq(region_id)) + .select(Region::as_select()) + .get_result_async::( + &*self.pool_connection_unauthorized().await?, + ) + .await + .optional() + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + async fn get_block_size_from_disk_source( &self, opctx: &OpContext, @@ -99,7 +130,7 @@ impl DataStore { size: external::ByteCount, ) -> (u64, u64) { let blocks_per_extent = - Self::EXTENT_SIZE / block_size.to_bytes() as u64; + Self::EXTENT_SIZE / u64::from(block_size.to_bytes()); let size = size.to_bytes(); @@ -115,27 +146,85 @@ impl DataStore { /// /// Returns the allocated regions, as well as the datasets to which they /// belong. - pub async fn region_allocate( + pub async fn disk_region_allocate( + &self, + opctx: &OpContext, + volume_id: Uuid, + disk_source: ¶ms::DiskSource, + size: external::ByteCount, + allocation_strategy: &RegionAllocationStrategy, + ) -> Result, Error> { + self.arbitrary_region_allocate( + opctx, + volume_id, + disk_source, + size, + allocation_strategy, + REGION_REDUNDANCY_THRESHOLD, + ) + .await + } + + /// Idempotently allocates an arbitrary number of regions for a volume. + /// + /// For regular disk creation, this will be REGION_REDUNDANCY_THRESHOLD. + /// + /// For region replacement, it's important to allocate the *new* region for + /// a volume while respecting the current region allocation strategy. This + /// requires setting `num_regions_required` to one more than the current + /// level for a volume. If a single region is allocated in isolation this + /// could land on the same dataset as one of the existing volume's regions. + /// + /// Returns the allocated regions, as well as the datasets to which they + /// belong. + pub async fn arbitrary_region_allocate( &self, opctx: &OpContext, volume_id: Uuid, disk_source: ¶ms::DiskSource, size: external::ByteCount, allocation_strategy: &RegionAllocationStrategy, + num_regions_required: usize, ) -> Result, Error> { let block_size = self.get_block_size_from_disk_source(opctx, &disk_source).await?; let (blocks_per_extent, extent_count) = Self::get_crucible_allocation(&block_size, size); + self.arbitrary_region_allocate_direct( + opctx, + volume_id, + u64::from(block_size.to_bytes()), + blocks_per_extent, + extent_count, + allocation_strategy, + num_regions_required, + ) + .await + } + + #[allow(clippy::too_many_arguments)] + pub async fn arbitrary_region_allocate_direct( + &self, + opctx: &OpContext, + volume_id: Uuid, + block_size: u64, + blocks_per_extent: u64, + extent_count: u64, + allocation_strategy: &RegionAllocationStrategy, + num_regions_required: usize, + ) -> Result, Error> { let query = crate::db::queries::region_allocation::allocation_query( volume_id, - block_size.to_bytes() as u64, + block_size, blocks_per_extent, extent_count, allocation_strategy, + num_regions_required, ); + let conn = self.pool_connection_authorized(&opctx).await?; + let dataset_and_regions: Vec<(Dataset, Region)> = query.get_results_async(&*conn).await.map_err(|e| { crate::db::queries::region_allocation::from_diesel(e) @@ -147,6 +236,7 @@ impl DataStore { "volume_id" => %volume_id, "datasets_and_regions" => ?dataset_and_regions, ); + Ok(dataset_and_regions) } @@ -275,6 +365,40 @@ impl DataStore { Ok(0) } } + + /// Find regions on expunged disks + pub async fn find_regions_on_expunged_physical_disks( + &self, + opctx: &OpContext, + ) -> LookupResult> { + let conn = self.pool_connection_authorized(opctx).await?; + + use db::schema::dataset::dsl as dataset_dsl; + use db::schema::physical_disk::dsl as physical_disk_dsl; + use db::schema::region::dsl as region_dsl; + use db::schema::zpool::dsl as zpool_dsl; + + region_dsl::region + .filter(region_dsl::dataset_id.eq_any( + dataset_dsl::dataset + .filter(dataset_dsl::time_deleted.is_null()) + .filter(dataset_dsl::pool_id.eq_any( + zpool_dsl::zpool + .filter(zpool_dsl::time_deleted.is_null()) + .filter(zpool_dsl::physical_disk_id.eq_any( + physical_disk_dsl::physical_disk + .filter(physical_disk_dsl::disk_policy.eq(PhysicalDiskPolicy::Expunged)) + .select(physical_disk_dsl::id) + )) + .select(zpool_dsl::id) + )) + .select(dataset_dsl::id) + )) + .select(Region::as_select()) + .load_async(&*conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } } #[cfg(test)] @@ -329,7 +453,7 @@ mod test { // Note that i64::MAX bytes is an invalid disk size as it's not // divisible by 4096. Create the maximum sized disk here. let max_disk_size = i64::MAX - - (i64::MAX % (BlockSize::AdvancedFormat.to_bytes() as i64)); + - (i64::MAX % i64::from(BlockSize::AdvancedFormat.to_bytes())); let (blocks_per_extent, extent_count) = DataStore::get_crucible_allocation( &BlockSize::AdvancedFormat, @@ -338,16 +462,16 @@ mod test { // We should still be rounding up to the nearest extent size. assert_eq!( - extent_count as u128 * DataStore::EXTENT_SIZE as u128, + u128::from(extent_count) * u128::from(DataStore::EXTENT_SIZE), i64::MAX as u128 + 1, ); // Assert that the regions allocated will fit this disk assert!( max_disk_size as u128 - <= extent_count as u128 - * blocks_per_extent as u128 - * DataStore::EXTENT_SIZE as u128 + <= u128::from(extent_count) + * u128::from(blocks_per_extent) + * u128::from(DataStore::EXTENT_SIZE) ); } } diff --git a/nexus/db-queries/src/db/datastore/region_replacement.rs b/nexus/db-queries/src/db/datastore/region_replacement.rs new file mode 100644 index 0000000000..d12d123e7e --- /dev/null +++ b/nexus/db-queries/src/db/datastore/region_replacement.rs @@ -0,0 +1,907 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! [`DataStore`] methods on [`RegionReplacement`]s. + +use super::DataStore; +use crate::context::OpContext; +use crate::db; +use crate::db::datastore::SQL_BATCH_SIZE; +use crate::db::error::public_error_from_diesel; +use crate::db::error::ErrorHandler; +use crate::db::model::Region; +use crate::db::model::RegionReplacement; +use crate::db::model::RegionReplacementState; +use crate::db::model::RegionReplacementStep; +use crate::db::model::UpstairsRepairNotification; +use crate::db::model::UpstairsRepairNotificationType; +use crate::db::model::VolumeRepair; +use crate::db::pagination::paginated; +use crate::db::pagination::Paginator; +use crate::db::update_and_check::UpdateAndCheck; +use crate::db::update_and_check::UpdateStatus; +use crate::db::TransactionError; +use async_bb8_diesel::AsyncConnection; +use async_bb8_diesel::AsyncRunQueryDsl; +use diesel::prelude::*; +use omicron_common::api::external::Error; +use omicron_uuid_kinds::DownstairsRegionKind; +use omicron_uuid_kinds::TypedUuid; +use uuid::Uuid; + +impl DataStore { + /// Create and insert a region replacement request for a Region, returning the ID of the + /// request. + pub async fn create_region_replacement_request_for_region( + &self, + opctx: &OpContext, + region: &Region, + ) -> Result { + let request = RegionReplacement::for_region(region); + let request_id = request.id; + + self.insert_region_replacement_request(opctx, request).await?; + + Ok(request_id) + } + + /// Insert a region replacement request into the DB, also creating the + /// VolumeRepair record. + pub async fn insert_region_replacement_request( + &self, + opctx: &OpContext, + request: RegionReplacement, + ) -> Result<(), Error> { + self.pool_connection_authorized(opctx) + .await? + .transaction_async(|conn| async move { + use db::schema::region_replacement::dsl; + use db::schema::volume_repair::dsl as volume_repair_dsl; + + diesel::insert_into(volume_repair_dsl::volume_repair) + .values(VolumeRepair { + volume_id: request.volume_id, + repair_id: request.id, + }) + .execute_async(&conn) + .await?; + + diesel::insert_into(dsl::region_replacement) + .values(request) + .execute_async(&conn) + .await?; + + Ok(()) + }) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + pub async fn get_region_replacement_request_by_id( + &self, + opctx: &OpContext, + id: Uuid, + ) -> Result { + use db::schema::region_replacement::dsl; + + dsl::region_replacement + .filter(dsl::id.eq(id)) + .get_result_async::( + &*self.pool_connection_authorized(opctx).await?, + ) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + pub async fn get_requested_region_replacements( + &self, + opctx: &OpContext, + ) -> Result, Error> { + opctx.check_complex_operations_allowed()?; + + let mut replacements = Vec::new(); + let mut paginator = Paginator::new(SQL_BATCH_SIZE); + let conn = self.pool_connection_authorized(opctx).await?; + + while let Some(p) = paginator.next() { + use db::schema::region_replacement::dsl; + + let batch = paginated( + dsl::region_replacement, + dsl::id, + &p.current_pagparams(), + ) + .filter( + dsl::replacement_state.eq(RegionReplacementState::Requested), + ) + .get_results_async::(&*conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + paginator = p.found_batch(&batch, &|r| r.id); + replacements.extend(batch); + } + + Ok(replacements) + } + + /// Return region replacement requests that are in state `Running` with no + /// currently operating saga. These need to be checked on or driven forward. + pub async fn get_running_region_replacements( + &self, + opctx: &OpContext, + ) -> Result, Error> { + use db::schema::region_replacement::dsl; + + dsl::region_replacement + .filter(dsl::replacement_state.eq(RegionReplacementState::Running)) + .filter(dsl::operating_saga_id.is_null()) + .get_results_async::( + &*self.pool_connection_authorized(opctx).await?, + ) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + /// Return region replacement requests that are in state `ReplacementDone` + /// with no currently operating saga. These need to be completed. + pub async fn get_done_region_replacements( + &self, + opctx: &OpContext, + ) -> Result, Error> { + use db::schema::region_replacement::dsl; + + dsl::region_replacement + .filter( + dsl::replacement_state + .eq(RegionReplacementState::ReplacementDone), + ) + .filter(dsl::operating_saga_id.is_null()) + .get_results_async::( + &*self.pool_connection_authorized(opctx).await?, + ) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + /// Transition a RegionReplacement record from Requested to Allocating, + /// setting a unique id at the same time. + pub async fn set_region_replacement_allocating( + &self, + opctx: &OpContext, + region_replacement_id: Uuid, + operating_saga_id: Uuid, + ) -> Result<(), Error> { + use db::schema::region_replacement::dsl; + let updated = diesel::update(dsl::region_replacement) + .filter(dsl::id.eq(region_replacement_id)) + .filter( + dsl::replacement_state.eq(RegionReplacementState::Requested), + ) + .filter(dsl::operating_saga_id.is_null()) + .set(( + dsl::replacement_state.eq(RegionReplacementState::Allocating), + dsl::operating_saga_id.eq(operating_saga_id), + )) + .check_if_exists::(region_replacement_id) + .execute_and_check(&*self.pool_connection_authorized(opctx).await?) + .await; + + match updated { + Ok(result) => match result.status { + UpdateStatus::Updated => Ok(()), + UpdateStatus::NotUpdatedButExists => { + let record = result.found; + + if record.operating_saga_id == Some(operating_saga_id) + && record.replacement_state + == RegionReplacementState::Allocating + { + Ok(()) + } else { + Err(Error::conflict(format!( + "region replacement {} set to {:?} (operating saga id {:?})", + region_replacement_id, + record.replacement_state, + record.operating_saga_id, + ))) + } + } + }, + + Err(e) => Err(public_error_from_diesel(e, ErrorHandler::Server)), + } + } + + /// Transition a RegionReplacement record from Allocating to Requested, + /// clearing the operating saga id. + pub async fn undo_set_region_replacement_allocating( + &self, + opctx: &OpContext, + region_replacement_id: Uuid, + operating_saga_id: Uuid, + ) -> Result<(), Error> { + use db::schema::region_replacement::dsl; + let updated = diesel::update(dsl::region_replacement) + .filter(dsl::id.eq(region_replacement_id)) + .filter( + dsl::replacement_state.eq(RegionReplacementState::Allocating), + ) + .filter(dsl::operating_saga_id.eq(operating_saga_id)) + .set(( + dsl::replacement_state.eq(RegionReplacementState::Requested), + dsl::operating_saga_id.eq(Option::::None), + )) + .check_if_exists::(region_replacement_id) + .execute_and_check(&*self.pool_connection_authorized(opctx).await?) + .await; + + match updated { + Ok(result) => match result.status { + UpdateStatus::Updated => Ok(()), + UpdateStatus::NotUpdatedButExists => { + let record = result.found; + + if record.operating_saga_id == None + && record.replacement_state + == RegionReplacementState::Requested + { + Ok(()) + } else { + Err(Error::conflict(format!( + "region replacement {} set to {:?} (operating saga id {:?})", + region_replacement_id, + record.replacement_state, + record.operating_saga_id, + ))) + } + } + }, + + Err(e) => Err(public_error_from_diesel(e, ErrorHandler::Server)), + } + } + + /// Transition from Allocating to Running, and clear the operating saga id. + pub async fn set_region_replacement_running( + &self, + opctx: &OpContext, + region_replacement_id: Uuid, + operating_saga_id: Uuid, + new_region_id: Uuid, + old_region_volume_id: Uuid, + ) -> Result<(), Error> { + use db::schema::region_replacement::dsl; + let updated = diesel::update(dsl::region_replacement) + .filter(dsl::id.eq(region_replacement_id)) + .filter(dsl::operating_saga_id.eq(operating_saga_id)) + .filter( + dsl::replacement_state.eq(RegionReplacementState::Allocating), + ) + .set(( + dsl::replacement_state.eq(RegionReplacementState::Running), + dsl::old_region_volume_id.eq(Some(old_region_volume_id)), + dsl::new_region_id.eq(Some(new_region_id)), + dsl::operating_saga_id.eq(Option::::None), + )) + .check_if_exists::(region_replacement_id) + .execute_and_check(&*self.pool_connection_authorized(opctx).await?) + .await; + + match updated { + Ok(result) => match result.status { + UpdateStatus::Updated => Ok(()), + UpdateStatus::NotUpdatedButExists => { + let record = result.found; + + if record.operating_saga_id == None + && record.replacement_state + == RegionReplacementState::Running + && record.new_region_id == Some(new_region_id) + && record.old_region_volume_id + == Some(old_region_volume_id) + { + Ok(()) + } else { + Err(Error::conflict(format!( + "region replacement {} set to {:?} (operating saga id {:?})", + region_replacement_id, + record.replacement_state, + record.operating_saga_id, + ))) + } + } + }, + + Err(e) => Err(public_error_from_diesel(e, ErrorHandler::Server)), + } + } + + /// Find an in-progress region replacement request by new region id + pub async fn lookup_in_progress_region_replacement_request_by_new_region_id( + &self, + opctx: &OpContext, + new_region_id: TypedUuid, + ) -> Result, Error> { + use db::schema::region_replacement::dsl; + + dsl::region_replacement + .filter( + dsl::new_region_id + .eq(nexus_db_model::to_db_typed_uuid(new_region_id)), + ) + .filter(dsl::replacement_state.ne(RegionReplacementState::Complete)) + .get_result_async::( + &*self.pool_connection_authorized(opctx).await?, + ) + .await + .optional() + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + /// Find a region replacement request by old region id + pub async fn lookup_region_replacement_request_by_old_region_id( + &self, + opctx: &OpContext, + old_region_id: TypedUuid, + ) -> Result, Error> { + use db::schema::region_replacement::dsl; + + dsl::region_replacement + .filter( + dsl::old_region_id + .eq(nexus_db_model::to_db_typed_uuid(old_region_id)), + ) + .get_result_async::( + &*self.pool_connection_authorized(opctx).await?, + ) + .await + .optional() + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + /// Transition a RegionReplacement record from Running to Driving, + /// setting a unique id at the same time. + pub async fn set_region_replacement_driving( + &self, + opctx: &OpContext, + region_replacement_id: Uuid, + operating_saga_id: Uuid, + ) -> Result<(), Error> { + use db::schema::region_replacement::dsl; + let updated = diesel::update(dsl::region_replacement) + .filter(dsl::id.eq(region_replacement_id)) + .filter(dsl::replacement_state.eq(RegionReplacementState::Running)) + .filter(dsl::operating_saga_id.is_null()) + .set(( + dsl::replacement_state.eq(RegionReplacementState::Driving), + dsl::operating_saga_id.eq(operating_saga_id), + )) + .check_if_exists::(region_replacement_id) + .execute_and_check(&*self.pool_connection_authorized(opctx).await?) + .await; + + match updated { + Ok(result) => match result.status { + UpdateStatus::Updated => Ok(()), + UpdateStatus::NotUpdatedButExists => { + let record = result.found; + + if record.operating_saga_id == Some(operating_saga_id) + && record.replacement_state + == RegionReplacementState::Driving + { + Ok(()) + } else { + Err(Error::conflict(format!( + "region replacement {} set to {:?} (operating saga id {:?})", + region_replacement_id, + record.replacement_state, + record.operating_saga_id, + ))) + } + } + }, + + Err(e) => Err(public_error_from_diesel(e, ErrorHandler::Server)), + } + } + + /// Transition a RegionReplacement record from Driving to Running, + /// clearing the operating saga id. + pub async fn undo_set_region_replacement_driving( + &self, + opctx: &OpContext, + region_replacement_id: Uuid, + operating_saga_id: Uuid, + ) -> Result<(), Error> { + use db::schema::region_replacement::dsl; + let updated = diesel::update(dsl::region_replacement) + .filter(dsl::id.eq(region_replacement_id)) + .filter(dsl::replacement_state.eq(RegionReplacementState::Driving)) + .filter(dsl::operating_saga_id.eq(operating_saga_id)) + .set(( + dsl::replacement_state.eq(RegionReplacementState::Running), + dsl::operating_saga_id.eq(Option::::None), + )) + .check_if_exists::(region_replacement_id) + .execute_and_check(&*self.pool_connection_authorized(opctx).await?) + .await; + + match updated { + Ok(result) => match result.status { + UpdateStatus::Updated => Ok(()), + UpdateStatus::NotUpdatedButExists => { + let record = result.found; + + if record.operating_saga_id == None + && record.replacement_state + == RegionReplacementState::Running + { + Ok(()) + } else { + Err(Error::conflict(format!( + "region replacement {} set to {:?} (operating saga id {:?})", + region_replacement_id, + record.replacement_state, + record.operating_saga_id, + ))) + } + } + }, + + Err(e) => Err(public_error_from_diesel(e, ErrorHandler::Server)), + } + } + + /// Transition a RegionReplacement record from Driving to ReplacementDone, + /// clearing the operating saga id. + pub async fn set_region_replacement_from_driving_to_done( + &self, + opctx: &OpContext, + region_replacement_id: Uuid, + operating_saga_id: Uuid, + ) -> Result<(), Error> { + use db::schema::region_replacement::dsl; + let updated = diesel::update(dsl::region_replacement) + .filter(dsl::id.eq(region_replacement_id)) + .filter(dsl::replacement_state.eq(RegionReplacementState::Driving)) + .filter(dsl::operating_saga_id.eq(operating_saga_id)) + .set(( + dsl::replacement_state + .eq(RegionReplacementState::ReplacementDone), + dsl::operating_saga_id.eq(Option::::None), + )) + .check_if_exists::(region_replacement_id) + .execute_and_check(&*self.pool_connection_authorized(opctx).await?) + .await; + + match updated { + Ok(result) => match result.status { + UpdateStatus::Updated => Ok(()), + UpdateStatus::NotUpdatedButExists => { + let record = result.found; + + if record.operating_saga_id == None + && record.replacement_state + == RegionReplacementState::ReplacementDone + { + Ok(()) + } else { + Err(Error::conflict(format!( + "region replacement {} set to {:?} (operating saga id {:?})", + region_replacement_id, + record.replacement_state, + record.operating_saga_id, + ))) + } + } + }, + + Err(e) => Err(public_error_from_diesel(e, ErrorHandler::Server)), + } + } + + /// Return the most current step for a region replacement request + pub async fn current_region_replacement_request_step( + &self, + opctx: &OpContext, + id: Uuid, + ) -> Result, Error> { + use db::schema::region_replacement_step::dsl; + + dsl::region_replacement_step + .filter(dsl::replacement_id.eq(id)) + .order_by(dsl::step_time.desc()) + .first_async::( + &*self.pool_connection_authorized(opctx).await?, + ) + .await + .optional() + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + /// Record a step taken to drive a region replacement forward + pub async fn add_region_replacement_request_step( + &self, + opctx: &OpContext, + step: RegionReplacementStep, + ) -> Result<(), Error> { + use db::schema::region_replacement_step::dsl; + + let conn = self.pool_connection_authorized(opctx).await?; + + diesel::insert_into(dsl::region_replacement_step) + .values(step) + .on_conflict((dsl::replacement_id, dsl::step_time, dsl::step_type)) + .do_nothing() + .execute_async(&*conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + Ok(()) + } + + /// Transition a RegionReplacement record from ReplacementDone to Completing, + /// setting a unique id at the same time. + pub async fn set_region_replacement_completing( + &self, + opctx: &OpContext, + region_replacement_id: Uuid, + operating_saga_id: Uuid, + ) -> Result<(), Error> { + use db::schema::region_replacement::dsl; + let updated = diesel::update(dsl::region_replacement) + .filter(dsl::id.eq(region_replacement_id)) + .filter( + dsl::replacement_state + .eq(RegionReplacementState::ReplacementDone), + ) + .filter(dsl::operating_saga_id.is_null()) + .set(( + dsl::replacement_state.eq(RegionReplacementState::Completing), + dsl::operating_saga_id.eq(operating_saga_id), + )) + .check_if_exists::(region_replacement_id) + .execute_and_check(&*self.pool_connection_authorized(opctx).await?) + .await; + + match updated { + Ok(result) => match result.status { + UpdateStatus::Updated => Ok(()), + UpdateStatus::NotUpdatedButExists => { + let record = result.found; + + if record.operating_saga_id == Some(operating_saga_id) + && record.replacement_state + == RegionReplacementState::Completing + { + Ok(()) + } else { + Err(Error::conflict(format!( + "region replacement {} set to {:?} (operating saga id {:?})", + region_replacement_id, + record.replacement_state, + record.operating_saga_id, + ))) + } + } + }, + + Err(e) => Err(public_error_from_diesel(e, ErrorHandler::Server)), + } + } + + /// Transition a RegionReplacement record from Completing to ReplacementDone, + /// clearing the operating saga id. + pub async fn undo_set_region_replacement_completing( + &self, + opctx: &OpContext, + region_replacement_id: Uuid, + operating_saga_id: Uuid, + ) -> Result<(), Error> { + use db::schema::region_replacement::dsl; + let updated = diesel::update(dsl::region_replacement) + .filter(dsl::id.eq(region_replacement_id)) + .filter( + dsl::replacement_state.eq(RegionReplacementState::Completing), + ) + .filter(dsl::operating_saga_id.eq(operating_saga_id)) + .set(( + dsl::replacement_state + .eq(RegionReplacementState::ReplacementDone), + dsl::operating_saga_id.eq(Option::::None), + )) + .check_if_exists::(region_replacement_id) + .execute_and_check(&*self.pool_connection_authorized(opctx).await?) + .await; + + match updated { + Ok(result) => match result.status { + UpdateStatus::Updated => Ok(()), + UpdateStatus::NotUpdatedButExists => { + let record = result.found; + + if record.operating_saga_id == None + && record.replacement_state + == RegionReplacementState::ReplacementDone + { + Ok(()) + } else { + Err(Error::conflict(format!( + "region replacement {} set to {:?} (operating saga id {:?})", + region_replacement_id, + record.replacement_state, + record.operating_saga_id, + ))) + } + } + }, + + Err(e) => Err(public_error_from_diesel(e, ErrorHandler::Server)), + } + } + + /// Transition a RegionReplacement record from Completing to Complete, + /// clearing the operating saga id. Also removes the `volume_repair` record + /// that is taking a "lock" on the Volume. + pub async fn set_region_replacement_complete( + &self, + opctx: &OpContext, + region_replacement_id: Uuid, + operating_saga_id: Uuid, + ) -> Result<(), Error> { + type TxnError = TransactionError; + + self.pool_connection_authorized(opctx) + .await? + .transaction_async(|conn| async move { + use db::schema::volume_repair::dsl as volume_repair_dsl; + + diesel::delete( + volume_repair_dsl::volume_repair + .filter(volume_repair_dsl::repair_id.eq(region_replacement_id)) + ) + .execute_async(&conn) + .await?; + + use db::schema::region_replacement::dsl; + + let result = diesel::update(dsl::region_replacement) + .filter(dsl::id.eq(region_replacement_id)) + .filter( + dsl::replacement_state.eq(RegionReplacementState::Completing), + ) + .filter(dsl::operating_saga_id.eq(operating_saga_id)) + .set(( + dsl::replacement_state.eq(RegionReplacementState::Complete), + dsl::operating_saga_id.eq(Option::::None), + )) + .check_if_exists::(region_replacement_id) + .execute_and_check(&conn) + .await?; + + match result.status { + UpdateStatus::Updated => Ok(()), + UpdateStatus::NotUpdatedButExists => { + let record = result.found; + + if record.operating_saga_id == None + && record.replacement_state + == RegionReplacementState::Complete + { + Ok(()) + } else { + Err(TxnError::CustomError(Error::conflict(format!( + "region replacement {} set to {:?} (operating saga id {:?})", + region_replacement_id, + record.replacement_state, + record.operating_saga_id, + )))) + } + } + } + }) + .await + .map_err(|e| match e { + TxnError::CustomError(error) => error, + + TxnError::Database(error) => { + public_error_from_diesel(error, ErrorHandler::Server) + } + }) + } + + /// Nexus has been notified by an Upstairs (or has otherwised determined) + /// that a region replacement is done, so update the record. This may arrive + /// in the middle of a drive saga invocation, so do not filter on state or + /// operating saga id! + pub async fn mark_region_replacement_as_done( + &self, + opctx: &OpContext, + region_replacement_id: Uuid, + ) -> Result<(), Error> { + use db::schema::region_replacement::dsl; + let updated = diesel::update(dsl::region_replacement) + .filter(dsl::id.eq(region_replacement_id)) + .set(( + dsl::replacement_state + .eq(RegionReplacementState::ReplacementDone), + dsl::operating_saga_id.eq(Option::::None), + )) + .check_if_exists::(region_replacement_id) + .execute_and_check(&*self.pool_connection_authorized(opctx).await?) + .await; + + match updated { + Ok(result) => match result.status { + UpdateStatus::Updated => Ok(()), + + UpdateStatus::NotUpdatedButExists => { + let record = result.found; + + if record.operating_saga_id == None + && record.replacement_state + == RegionReplacementState::ReplacementDone + { + Ok(()) + } else { + Err(Error::conflict(format!( + "region replacement {} set to {:?} (operating saga id {:?})", + region_replacement_id, + record.replacement_state, + record.operating_saga_id, + ))) + } + } + }, + + Err(e) => Err(public_error_from_diesel(e, ErrorHandler::Server)), + } + } + + /// Check if a region replacement request has at least one matching + /// successful "repair finished" notification. + // + // For the purposes of changing the state of a region replacement request to + // `ReplacementDone`, check if Nexus has seen at least related one + // successful "repair finished" notification. + // + // Note: after a region replacement request has transitioned to `Complete`, + // there may be many future "repair finished" notifications for the "new" + // region that are unrelated to the replacement request. + pub async fn request_has_matching_successful_finish_notification( + &self, + opctx: &OpContext, + region_replacement: &RegionReplacement, + ) -> Result { + let Some(new_region_id) = region_replacement.new_region_id else { + return Err(Error::invalid_request(format!( + "region replacement {} has no new region id!", + region_replacement.id, + ))); + }; + + use db::schema::upstairs_repair_notification::dsl; + + let maybe_notification = dsl::upstairs_repair_notification + .filter(dsl::region_id.eq(new_region_id)) + .filter( + dsl::notification_type + .eq(UpstairsRepairNotificationType::Succeeded), + ) + .first_async::( + &*self.pool_connection_authorized(opctx).await?, + ) + .await + .optional() + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + Ok(maybe_notification.is_some()) + } +} + +#[cfg(test)] +mod test { + use super::*; + + use crate::db::datastore::test_utils::datastore_test; + use nexus_test_utils::db::test_setup_database; + use omicron_test_utils::dev; + + #[tokio::test] + async fn test_one_replacement_per_volume() { + let logctx = dev::test_setup_log("test_one_replacement_per_volume"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + let region_1_id = Uuid::new_v4(); + let region_2_id = Uuid::new_v4(); + let volume_id = Uuid::new_v4(); + + let request_1 = RegionReplacement::new(region_1_id, volume_id); + let request_2 = RegionReplacement::new(region_2_id, volume_id); + + datastore + .insert_region_replacement_request(&opctx, request_1) + .await + .unwrap(); + datastore + .insert_region_replacement_request(&opctx, request_2) + .await + .unwrap_err(); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_replacement_done_in_middle_of_drive_saga() { + // If Nexus receives a notification that a repair has finished in the + // middle of a drive saga, then make sure the replacement request state + // ends up as `ReplacementDone`. + + let logctx = dev::test_setup_log( + "test_replacement_done_in_middle_of_drive_saga", + ); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + let region_id = Uuid::new_v4(); + let volume_id = Uuid::new_v4(); + + let request = { + let mut request = RegionReplacement::new(region_id, volume_id); + request.replacement_state = RegionReplacementState::Running; + request + }; + + datastore + .insert_region_replacement_request(&opctx, request.clone()) + .await + .unwrap(); + + // Transition to Driving + + let saga_id = Uuid::new_v4(); + + datastore + .set_region_replacement_driving(&opctx, request.id, saga_id) + .await + .unwrap(); + + // Now, Nexus receives a notification that the repair has finished + // successfully + + datastore + .mark_region_replacement_as_done(&opctx, request.id) + .await + .unwrap(); + + // Ensure that the state is ReplacementDone, and the operating saga id + // is cleared. + + let actual_request = datastore + .get_region_replacement_request_by_id(&opctx, request.id) + .await + .unwrap(); + + assert_eq!( + actual_request.replacement_state, + RegionReplacementState::ReplacementDone + ); + assert_eq!(actual_request.operating_saga_id, None); + + // The Drive saga will unwind when it tries to set the state back to + // Running. + + datastore + .undo_set_region_replacement_driving(&opctx, request.id, saga_id) + .await + .unwrap_err(); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } +} diff --git a/nexus/db-queries/src/db/datastore/service.rs b/nexus/db-queries/src/db/datastore/service.rs deleted file mode 100644 index df7ed27a6d..0000000000 --- a/nexus/db-queries/src/db/datastore/service.rs +++ /dev/null @@ -1,115 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -//! [`DataStore`] methods on [`Service`]s. - -use super::DataStore; -use crate::authz; -use crate::context::OpContext; -use crate::db; -use crate::db::collection_insert::AsyncInsertError; -use crate::db::collection_insert::DatastoreCollection; -use crate::db::error::public_error_from_diesel; -use crate::db::error::retryable; -use crate::db::error::ErrorHandler; -use crate::db::error::TransactionError; -use crate::db::identity::Asset; -use crate::db::model::Service; -use crate::db::model::Sled; -use crate::db::pagination::paginated; -use crate::db::pool::DbConnection; -use async_bb8_diesel::AsyncRunQueryDsl; -use chrono::Utc; -use diesel::prelude::*; -use diesel::upsert::excluded; -use nexus_db_model::ServiceKind; -use omicron_common::api::external::CreateResult; -use omicron_common::api::external::DataPageParams; -use omicron_common::api::external::Error; -use omicron_common::api::external::ListResultVec; -use omicron_common::api::external::LookupType; -use omicron_common::api::external::ResourceType; -use uuid::Uuid; - -impl DataStore { - /// Stores a new service in the database. - pub async fn service_upsert( - &self, - opctx: &OpContext, - service: Service, - ) -> CreateResult { - let conn = self.pool_connection_authorized(opctx).await?; - self.service_upsert_conn(&conn, service).await.map_err(|e| match e { - TransactionError::CustomError(err) => err, - TransactionError::Database(err) => { - public_error_from_diesel(err, ErrorHandler::Server) - } - }) - } - - /// Stores a new service in the database (using an existing db connection). - pub(crate) async fn service_upsert_conn( - &self, - conn: &async_bb8_diesel::Connection, - service: Service, - ) -> Result> { - use db::schema::service::dsl; - - let service_id = service.id(); - let sled_id = service.sled_id; - Sled::insert_resource( - sled_id, - diesel::insert_into(dsl::service) - .values(service) - .on_conflict(dsl::id) - .do_update() - .set(( - dsl::time_modified.eq(Utc::now()), - dsl::sled_id.eq(excluded(dsl::sled_id)), - dsl::ip.eq(excluded(dsl::ip)), - dsl::port.eq(excluded(dsl::port)), - dsl::kind.eq(excluded(dsl::kind)), - )), - ) - .insert_and_get_result_async(conn) - .await - .map_err(|e| match e { - AsyncInsertError::CollectionNotFound => { - TransactionError::CustomError(Error::ObjectNotFound { - type_name: ResourceType::Sled, - lookup_type: LookupType::ById(sled_id), - }) - } - AsyncInsertError::DatabaseError(e) => { - if retryable(&e) { - return TransactionError::Database(e); - } - TransactionError::CustomError(public_error_from_diesel( - e, - ErrorHandler::Conflict( - ResourceType::Service, - &service_id.to_string(), - ), - )) - } - }) - } - - /// List services of a given kind - pub async fn services_list_kind( - &self, - opctx: &OpContext, - kind: ServiceKind, - pagparams: &DataPageParams<'_, Uuid>, - ) -> ListResultVec { - opctx.authorize(authz::Action::Read, &authz::FLEET).await?; - use db::schema::service::dsl; - paginated(dsl::service, dsl::id, pagparams) - .filter(dsl::kind.eq(kind)) - .select(Service::as_select()) - .load_async(&*self.pool_connection_authorized(opctx).await?) - .await - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) - } -} diff --git a/nexus/db-queries/src/db/datastore/sled.rs b/nexus/db-queries/src/db/datastore/sled.rs index 93d3d0e6a2..bf43b9182d 100644 --- a/nexus/db-queries/src/db/datastore/sled.rs +++ b/nexus/db-queries/src/db/datastore/sled.rs @@ -19,11 +19,15 @@ use crate::db::model::SledState; use crate::db::model::SledUpdate; use crate::db::pagination::paginated; use crate::db::pagination::Paginator; +use crate::db::pool::DbConnection; use crate::db::update_and_check::{UpdateAndCheck, UpdateStatus}; +use crate::db::TransactionError; use crate::transaction_retry::OptionalError; use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; use diesel::prelude::*; +use nexus_db_model::ApplySledFilterExt; +use nexus_types::deployment::SledFilter; use nexus_types::external_api::views::SledPolicy; use nexus_types::external_api::views::SledProvisionPolicy; use nexus_types::identity::Asset; @@ -31,8 +35,10 @@ use omicron_common::api::external; use omicron_common::api::external::CreateResult; use omicron_common::api::external::DataPageParams; use omicron_common::api::external::DeleteResult; +use omicron_common::api::external::Error; use omicron_common::api::external::ListResultVec; use omicron_common::api::external::ResourceType; +use omicron_common::bail_unless; use std::fmt; use strum::IntoEnumIterator; use thiserror::Error; @@ -41,22 +47,27 @@ use uuid::Uuid; impl DataStore { /// Stores a new sled in the database. /// + /// Returns the sled, and whether or not it was updated on success. + /// /// Returns an error if `sled_agent_gen` is stale, or the sled is /// decommissioned. pub async fn sled_upsert( &self, sled_update: SledUpdate, - ) -> CreateResult { + ) -> CreateResult<(Sled, bool)> { use db::schema::sled::dsl; // required for conditional upsert use diesel::query_dsl::methods::FilterDsl; - diesel::insert_into(dsl::sled) - .values(sled_update.clone().into_insertable()) + let insertable_sled = sled_update.clone().into_insertable(); + let now = insertable_sled.time_modified(); + + let sled = diesel::insert_into(dsl::sled) + .values(insertable_sled) .on_conflict(dsl::id) .do_update() .set(( - dsl::time_modified.eq(Utc::now()), + dsl::time_modified.eq(now), dsl::ip.eq(sled_update.ip), dsl::port.eq(sled_update.port), dsl::rack_id.eq(sled_update.rack_id), @@ -80,18 +91,51 @@ impl DataStore { &sled_update.id().to_string(), ), ) - }) + })?; + + // We compare only seconds since the epoch, because writing to and + // reading from the database causes us to lose precision. + let was_modified = now.timestamp() == sled.time_modified().timestamp(); + Ok((sled, was_modified)) + } + + /// Confirms that a sled exists and is in-service. + /// + /// This function may be called from a transaction context. + pub async fn check_sled_in_service_on_connection( + conn: &async_bb8_diesel::Connection, + sled_id: Uuid, + ) -> Result<(), TransactionError> { + use db::schema::sled::dsl; + let sled_exists_and_in_service = diesel::select(diesel::dsl::exists( + dsl::sled + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(sled_id)) + .sled_filter(SledFilter::InService), + )) + .get_result_async::(conn) + .await?; + + bail_unless!( + sled_exists_and_in_service, + "Sled {} is not in service", + sled_id, + ); + + Ok(()) } pub async fn sled_list( &self, opctx: &OpContext, pagparams: &DataPageParams<'_, Uuid>, + sled_filter: SledFilter, ) -> ListResultVec { opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; use db::schema::sled::dsl; paginated(dsl::sled, dsl::id, pagparams) .select(Sled::as_select()) + .sled_filter(sled_filter) .load_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) @@ -105,6 +149,7 @@ impl DataStore { pub async fn sled_list_all_batched( &self, opctx: &OpContext, + sled_filter: SledFilter, ) -> ListResultVec { opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; opctx.check_complex_operations_allowed()?; @@ -112,7 +157,9 @@ impl DataStore { let mut all_sleds = Vec::new(); let mut paginator = Paginator::new(SQL_BATCH_SIZE); while let Some(p) = paginator.next() { - let batch = self.sled_list(opctx, &p.current_pagparams()).await?; + let batch = self + .sled_list(opctx, &p.current_pagparams(), sled_filter) + .await?; paginator = p.found_batch(&batch, &|s: &nexus_db_model::Sled| s.id()); all_sleds.extend(batch); @@ -198,26 +245,22 @@ impl DataStore { // Generate a query describing all of the sleds that have space // for this reservation. - let mut sled_targets = - sled_dsl::sled - .left_join( - resource_dsl::sled_resource - .on(resource_dsl::sled_id.eq(sled_dsl::id)), - ) - .group_by(sled_dsl::id) - .having( - sled_has_space_for_threads - .and(sled_has_space_for_rss) - .and(sled_has_space_in_reservoir), - ) - .filter(sled_dsl::time_deleted.is_null()) - // Ensure that the sled is in-service and active. - .filter(sled_dsl::sled_policy.eq( - to_db_sled_policy(SledPolicy::provisionable()), - )) - .filter(sled_dsl::sled_state.eq(SledState::Active)) - .select(sled_dsl::id) - .into_boxed(); + let mut sled_targets = sled_dsl::sled + .left_join( + resource_dsl::sled_resource + .on(resource_dsl::sled_id.eq(sled_dsl::id)), + ) + .group_by(sled_dsl::id) + .having( + sled_has_space_for_threads + .and(sled_has_space_for_rss) + .and(sled_has_space_in_reservoir), + ) + .filter(sled_dsl::time_deleted.is_null()) + // Ensure that reservations can be created on the sled. + .sled_filter(SledFilter::ReservationCreate) + .select(sled_dsl::id) + .into_boxed(); // Further constrain the sled IDs according to any caller- // supplied constraints. @@ -329,6 +372,9 @@ impl DataStore { /// sufficient warning to the operator. /// /// This is idempotent, and it returns the old policy of the sled. + /// + /// Calling this function also implicitly marks the disks attached to a sled + /// as "expunged". pub async fn sled_set_policy_to_expunged( &self, opctx: &OpContext, @@ -348,73 +394,128 @@ impl DataStore { &self, opctx: &OpContext, authz_sled: &authz::Sled, - new_policy: SledPolicy, + new_sled_policy: SledPolicy, check: ValidateTransition, ) -> Result { - use db::schema::sled::dsl; - opctx.authorize(authz::Action::Modify, authz_sled).await?; let sled_id = authz_sled.id(); - let query = diesel::update(dsl::sled) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::id.eq(sled_id)); - - let t = SledTransition::Policy(new_policy); - let valid_old_policies = t.valid_old_policies(); - let valid_old_states = t.valid_old_states(); - - let query = match check { - ValidateTransition::Yes => query - .filter(dsl::sled_policy.eq_any( - valid_old_policies.into_iter().map(to_db_sled_policy), - )) - .filter( - dsl::sled_state.eq_any(valid_old_states.iter().copied()), - ) - .into_boxed(), - #[cfg(test)] - ValidateTransition::No => query.into_boxed(), - }; + let err = OptionalError::new(); + let conn = self.pool_connection_authorized(opctx).await?; + let policy = self + .transaction_retry_wrapper("sled_set_policy") + .transaction(&conn, |conn| { + let err = err.clone(); - let query = query - .set(( - dsl::sled_policy.eq(to_db_sled_policy(new_policy)), - dsl::time_modified.eq(Utc::now()), - )) - .check_if_exists::(sled_id); + async move { + let t = SledTransition::Policy(new_sled_policy); + let valid_old_policies = t.valid_old_policies(); + let valid_old_states = t.valid_old_states(); + + use db::schema::sled::dsl; + let query = diesel::update(dsl::sled) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(sled_id)); + + let query = match check { + ValidateTransition::Yes => query + .filter( + dsl::sled_policy.eq_any( + valid_old_policies + .into_iter() + .map(to_db_sled_policy), + ), + ) + .filter( + dsl::sled_state + .eq_any(valid_old_states.iter().copied()), + ) + .into_boxed(), + #[cfg(test)] + ValidateTransition::No => query.into_boxed(), + }; + + let query = query + .set(( + dsl::sled_policy + .eq(to_db_sled_policy(new_sled_policy)), + dsl::time_modified.eq(Utc::now()), + )) + .check_if_exists::(sled_id); + + let result = query.execute_and_check(&conn).await?; + + let old_policy = match (check, result.status) { + (ValidateTransition::Yes, UpdateStatus::Updated) => { + result.found.policy() + } + ( + ValidateTransition::Yes, + UpdateStatus::NotUpdatedButExists, + ) => { + // Two reasons this can happen: + // 1. An idempotent update: this is treated as a + // success. + // 2. Invalid state transition: a failure. + // + // To differentiate between the two, check that the + // new policy is the same as the old policy, and + // that the old state is valid. + if result.found.policy() == new_sled_policy + && valid_old_states + .contains(&result.found.state()) + { + result.found.policy() + } else { + return Err(err.bail( + TransitionError::InvalidTransition { + current: result.found, + transition: SledTransition::Policy( + new_sled_policy, + ), + }, + )); + } + } + #[cfg(test)] + (ValidateTransition::No, _) => result.found.policy(), + }; + + // When a sled is expunged, the associated disks with that + // sled should also be implicitly set to expunged. + let new_disk_policy = match new_sled_policy { + SledPolicy::InService { .. } => None, + SledPolicy::Expunged => { + Some(nexus_db_model::PhysicalDiskPolicy::Expunged) + } + }; + if let Some(new_disk_policy) = new_disk_policy { + use db::schema::physical_disk::dsl as physical_disk_dsl; + diesel::update(physical_disk_dsl::physical_disk) + .filter(physical_disk_dsl::time_deleted.is_null()) + .filter(physical_disk_dsl::sled_id.eq(sled_id)) + .set( + physical_disk_dsl::disk_policy + .eq(new_disk_policy), + ) + .execute_async(&conn) + .await?; + } - let result = query - .execute_and_check(&*self.pool_connection_authorized(opctx).await?) + Ok(old_policy) + } + }) .await - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; - - match (check, result.status) { - (ValidateTransition::Yes, UpdateStatus::Updated) => { - Ok(result.found.policy()) - } - (ValidateTransition::Yes, UpdateStatus::NotUpdatedButExists) => { - // Two reasons this can happen: - // 1. An idempotent update: this is treated as a success. - // 2. Invalid state transition: a failure. - // - // To differentiate between the two, check that the new policy - // is the same as the old policy, and that the old state is - // valid. - if result.found.policy() == new_policy - && valid_old_states.contains(&result.found.state()) - { - Ok(result.found.policy()) - } else { - Err(TransitionError::InvalidTransition { - current: result.found, - transition: SledTransition::Policy(new_policy), - }) + .map_err(|e| { + if let Some(err) = err.take() { + return err; } - } - #[cfg(test)] - (ValidateTransition::No, _) => Ok(result.found.policy()), - } + TransitionError::from(public_error_from_diesel( + e, + ErrorHandler::Server, + )) + })?; + Ok(policy) } /// Marks the state of the sled as decommissioned, as believed by Nexus. @@ -427,13 +528,13 @@ impl DataStore { /// # Errors /// /// This method returns an error if the sled policy is not a state that is - /// valid to decommission from (i.e. if, for the current sled policy, - /// [`SledPolicy::is_decommissionable`] returns `false`). + /// valid to decommission from (i.e. if [`SledPolicy::is_decommissionable`] + /// returns `false`). pub async fn sled_set_state_to_decommissioned( &self, opctx: &OpContext, authz_sled: &authz::Sled, - ) -> Result { + ) -> Result { self.sled_set_state_impl( opctx, authz_sled, @@ -441,14 +542,13 @@ impl DataStore { ValidateTransition::Yes, ) .await - .map_err(|error| error.into_external_error()) } pub(super) async fn sled_set_state_impl( &self, opctx: &OpContext, authz_sled: &authz::Sled, - new_state: SledState, + new_sled_state: SledState, check: ValidateTransition, ) -> Result { use db::schema::sled::dsl; @@ -456,62 +556,124 @@ impl DataStore { opctx.authorize(authz::Action::Modify, authz_sled).await?; let sled_id = authz_sled.id(); - let query = diesel::update(dsl::sled) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::id.eq(sled_id)); - - let t = SledTransition::State(new_state); - let valid_old_policies = t.valid_old_policies(); - let valid_old_states = t.valid_old_states(); - - let query = match check { - ValidateTransition::Yes => query - .filter(dsl::sled_policy.eq_any( - valid_old_policies.iter().copied().map(to_db_sled_policy), - )) - .filter(dsl::sled_state.eq_any(valid_old_states)) - .into_boxed(), - #[cfg(test)] - ValidateTransition::No => query.into_boxed(), - }; + let err = OptionalError::new(); + let conn = self.pool_connection_authorized(opctx).await?; + let old_state = self + .transaction_retry_wrapper("sled_set_state") + .transaction(&conn, |conn| { + let err = err.clone(); - let query = query - .set(( - dsl::sled_state.eq(new_state), - dsl::time_modified.eq(Utc::now()), - )) - .check_if_exists::(sled_id); + async move { + let query = diesel::update(dsl::sled) + .filter(dsl::time_deleted.is_null()) + .filter(dsl::id.eq(sled_id)); + + let t = SledTransition::State(new_sled_state); + let valid_old_policies = t.valid_old_policies(); + let valid_old_states = t.valid_old_states(); + + let query = match check { + ValidateTransition::Yes => query + .filter( + dsl::sled_policy.eq_any( + valid_old_policies + .iter() + .copied() + .map(to_db_sled_policy), + ), + ) + .filter(dsl::sled_state.eq_any(valid_old_states)) + .into_boxed(), + #[cfg(test)] + ValidateTransition::No => query.into_boxed(), + }; + + let query = query + .set(( + dsl::sled_state.eq(new_sled_state), + dsl::time_modified.eq(Utc::now()), + )) + .check_if_exists::(sled_id); + + let result = query.execute_and_check(&conn).await?; + + let old_state = match (check, result.status) { + (ValidateTransition::Yes, UpdateStatus::Updated) => { + result.found.state() + } + ( + ValidateTransition::Yes, + UpdateStatus::NotUpdatedButExists, + ) => { + // Two reasons this can happen: + // 1. An idempotent update: this is treated as a + // success. + // 2. Invalid state transition: a failure. + // + // To differentiate between the two, check that the + // new state is the same as the old state, and the + // found policy is valid. + if result.found.state() == new_sled_state + && valid_old_policies + .contains(&result.found.policy()) + { + result.found.state() + } else { + return Err(err.bail( + TransitionError::InvalidTransition { + current: result.found, + transition: SledTransition::State( + new_sled_state, + ), + }, + )); + } + } + #[cfg(test)] + (ValidateTransition::No, _) => result.found.state(), + }; + + // When a sled is decommissioned, the associated disks with + // that sled should also be implicitly set to + // decommissioned. + // + // We use an explicit `match` to force ourselves to consider + // disk state if we add any addition sled states in the + // future. + let new_disk_state = match new_sled_state { + SledState::Active => None, + SledState::Decommissioned => Some( + nexus_db_model::PhysicalDiskState::Decommissioned, + ), + }; + if let Some(new_disk_state) = new_disk_state { + use db::schema::physical_disk::dsl as physical_disk_dsl; + diesel::update(physical_disk_dsl::physical_disk) + .filter(physical_disk_dsl::time_deleted.is_null()) + .filter(physical_disk_dsl::sled_id.eq(sled_id)) + .set( + physical_disk_dsl::disk_state + .eq(new_disk_state), + ) + .execute_async(&conn) + .await?; + } - let result = query - .execute_and_check(&*self.pool_connection_authorized(opctx).await?) + Ok(old_state) + } + }) .await - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; - - match (check, result.status) { - (ValidateTransition::Yes, UpdateStatus::Updated) => { - Ok(result.found.state()) - } - (ValidateTransition::Yes, UpdateStatus::NotUpdatedButExists) => { - // Two reasons this can happen: - // 1. An idempotent update: this is treated as a success. - // 2. Invalid state transition: a failure. - // - // To differentiate between the two, check that the new state - // is the same as the old state, and the found policy is valid. - if result.found.state() == new_state - && valid_old_policies.contains(&result.found.policy()) - { - Ok(result.found.state()) - } else { - Err(TransitionError::InvalidTransition { - current: result.found, - transition: SledTransition::State(new_state), - }) + .map_err(|e| { + if let Some(err) = err.take() { + return err; } - } - #[cfg(test)] - (ValidateTransition::No, _) => Ok(result.found.state()), - } + TransitionError::from(public_error_from_diesel( + e, + ErrorHandler::Server, + )) + })?; + + Ok(old_state) } } @@ -523,7 +685,7 @@ impl DataStore { // valid for a new policy or state, except idempotent transitions. #[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub(super) enum SledTransition { +pub enum SledTransition { Policy(SledPolicy), State(SledState), } @@ -614,7 +776,7 @@ impl IntoEnumIterator for SledTransition { /// An error that occurred while setting a policy or state. #[derive(Debug, Error)] #[must_use] -pub(super) enum TransitionError { +pub enum TransitionError { /// The state transition check failed. /// /// The sled is returned. @@ -675,10 +837,16 @@ mod test { use anyhow::{Context, Result}; use itertools::Itertools; use nexus_db_model::Generation; + use nexus_db_model::PhysicalDisk; + use nexus_db_model::PhysicalDiskKind; + use nexus_db_model::PhysicalDiskPolicy; + use nexus_db_model::PhysicalDiskState; use nexus_test_utils::db::test_setup_database; use nexus_types::identity::Asset; use omicron_common::api::external; use omicron_test_utils::dev; + use omicron_uuid_kinds::GenericUuid; + use omicron_uuid_kinds::SledUuid; use predicates::{prelude::*, BoxPredicate}; use std::net::{Ipv6Addr, SocketAddrV6}; @@ -693,7 +861,7 @@ mod test { let (_opctx, datastore) = datastore_test(&logctx, &db).await; let mut sled_update = test_new_sled_update(); - let observed_sled = + let (observed_sled, _) = datastore.sled_upsert(sled_update.clone()).await.unwrap(); assert_eq!( observed_sled.usable_hardware_threads, @@ -726,7 +894,7 @@ mod test { sled_update.sled_agent_gen.0 = sled_update.sled_agent_gen.0.next(); // Test that upserting the sled propagates those changes to the DB. - let observed_sled = datastore + let (observed_sled, _) = datastore .sled_upsert(sled_update.clone()) .await .expect("Could not upsert sled during test prep"); @@ -753,7 +921,7 @@ mod test { let (_opctx, datastore) = datastore_test(&logctx, &db).await; let mut sled_update = test_new_sled_update(); - let observed_sled = + let (observed_sled, _) = datastore.sled_upsert(sled_update.clone()).await.unwrap(); assert_eq!(observed_sled.reservoir_size, sled_update.reservoir_size); @@ -775,7 +943,7 @@ mod test { sled_update.sled_agent_gen.0 = sled_update.sled_agent_gen.0.next(); // Test that upserting the sled propagates those changes to the DB. - let observed_sled = datastore + let (observed_sled, _) = datastore .sled_upsert(sled_update.clone()) .await .expect("Could not upsert sled during test prep"); @@ -797,7 +965,7 @@ mod test { ); sled_update.sled_agent_gen.0 = current_gen.0.next(); // Test that upserting the sled propagates those changes to the DB. - let observed_sled = datastore + let (observed_sled, _) = datastore .sled_upsert(sled_update.clone()) .await .expect("Could not upsert sled during test prep"); @@ -816,7 +984,7 @@ mod test { let (opctx, datastore) = datastore_test(&logctx, &db).await; let mut sled_update = test_new_sled_update(); - let observed_sled = + let (observed_sled, _) = datastore.sled_upsert(sled_update.clone()).await.unwrap(); assert_eq!( observed_sled.usable_hardware_threads, @@ -833,7 +1001,7 @@ mod test { sled_set_state( &opctx, &datastore, - observed_sled.id(), + SledUuid::from_untyped_uuid(observed_sled.id()), SledState::Decommissioned, ValidateTransition::No, Expected::Ok(SledState::Active), @@ -895,20 +1063,26 @@ mod test { let (opctx, datastore) = datastore_test(&logctx, &db).await; // Define some sleds that resources cannot be provisioned on. - let non_provisionable_sled = + let (non_provisionable_sled, _) = datastore.sled_upsert(test_new_sled_update()).await.unwrap(); - let expunged_sled = + let (expunged_sled, _) = datastore.sled_upsert(test_new_sled_update()).await.unwrap(); - let decommissioned_sled = + let (decommissioned_sled, _) = datastore.sled_upsert(test_new_sled_update()).await.unwrap(); - let illegal_decommissioned_sled = + let (illegal_decommissioned_sled, _) = datastore.sled_upsert(test_new_sled_update()).await.unwrap(); let ineligible_sleds = IneligibleSleds { - non_provisionable: non_provisionable_sled.id(), - expunged: expunged_sled.id(), - decommissioned: decommissioned_sled.id(), - illegal_decommissioned: illegal_decommissioned_sled.id(), + non_provisionable: SledUuid::from_untyped_uuid( + non_provisionable_sled.id(), + ), + expunged: SledUuid::from_untyped_uuid(expunged_sled.id()), + decommissioned: SledUuid::from_untyped_uuid( + decommissioned_sled.id(), + ), + illegal_decommissioned: SledUuid::from_untyped_uuid( + illegal_decommissioned_sled.id(), + ), }; ineligible_sleds.setup(&opctx, &datastore).await.unwrap(); @@ -934,7 +1108,7 @@ mod test { // Now add a provisionable sled and try again. let sled_update = test_new_sled_update(); - let provisionable_sled = + let (provisionable_sled, _) = datastore.sled_upsert(sled_update.clone()).await.unwrap(); // Try a few times to ensure that resources never get allocated to the @@ -967,6 +1141,131 @@ mod test { logctx.cleanup_successful(); } + async fn lookup_physical_disk( + datastore: &DataStore, + id: Uuid, + ) -> PhysicalDisk { + use db::schema::physical_disk::dsl; + dsl::physical_disk + .filter(dsl::id.eq(id)) + .filter(dsl::time_deleted.is_null()) + .select(PhysicalDisk::as_select()) + .get_result_async( + &*datastore + .pool_connection_for_tests() + .await + .expect("No connection"), + ) + .await + .expect("Failed to lookup physical disk") + } + + #[tokio::test] + async fn test_sled_expungement_also_expunges_disks() { + let logctx = + dev::test_setup_log("test_sled_expungement_also_expunges_disks"); + let mut db = test_setup_database(&logctx.log).await; + + let (opctx, datastore) = datastore_test(&logctx, &db).await; + + // Set up a sled to test against. + let (sled, _) = + datastore.sled_upsert(test_new_sled_update()).await.unwrap(); + let sled_id = SledUuid::from_untyped_uuid(sled.id()); + + // Add a couple disks to this sled. + // + // (Note: This isn't really enough DB fakery to actually provision e.g. + // Crucible regions, but it creates enough of a control plane object to + // be associated with the Sled by UUID) + let disk1 = PhysicalDisk::new( + Uuid::new_v4(), + "vendor1".to_string(), + "serial1".to_string(), + "model1".to_string(), + PhysicalDiskKind::U2, + sled_id.into_untyped_uuid(), + ); + let disk2 = PhysicalDisk::new( + Uuid::new_v4(), + "vendor2".to_string(), + "serial2".to_string(), + "model2".to_string(), + PhysicalDiskKind::U2, + sled_id.into_untyped_uuid(), + ); + + datastore + .physical_disk_insert(&opctx, disk1.clone()) + .await + .expect("Failed to upsert physical disk"); + datastore + .physical_disk_insert(&opctx, disk2.clone()) + .await + .expect("Failed to upsert physical disk"); + + // Confirm the disks are "in-service". + // + // We verify this state because it should be changing below. + assert_eq!( + PhysicalDiskPolicy::InService, + lookup_physical_disk(&datastore, disk1.id()).await.disk_policy + ); + assert_eq!( + PhysicalDiskPolicy::InService, + lookup_physical_disk(&datastore, disk2.id()).await.disk_policy + ); + + // Expunge the sled. As a part of this process, the query should UPDATE + // the physical_disk table. + sled_set_policy( + &opctx, + &datastore, + sled_id, + SledPolicy::Expunged, + ValidateTransition::Yes, + Expected::Ok(SledPolicy::provisionable()), + ) + .await + .expect("Could not expunge sled"); + + // Observe that the disk policy is now expunged + assert_eq!( + PhysicalDiskPolicy::Expunged, + lookup_physical_disk(&datastore, disk1.id()).await.disk_policy + ); + assert_eq!( + PhysicalDiskPolicy::Expunged, + lookup_physical_disk(&datastore, disk2.id()).await.disk_policy + ); + + // We can now decommission the sled, which should also decommission the + // disks. + sled_set_state( + &opctx, + &datastore, + sled_id, + SledState::Decommissioned, + ValidateTransition::Yes, + Expected::Ok(SledState::Active), + ) + .await + .expect("decommissioned sled"); + + // Observe that the disk state is now decommissioned + assert_eq!( + PhysicalDiskState::Decommissioned, + lookup_physical_disk(&datastore, disk1.id()).await.disk_state + ); + assert_eq!( + PhysicalDiskState::Decommissioned, + lookup_physical_disk(&datastore, disk2.id()).await.disk_state + ); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + #[tokio::test] async fn test_sled_transitions() { // Test valid and invalid state and policy transitions. @@ -986,7 +1285,9 @@ mod test { ( // In-service and active sleds can be marked as expunged. Before::new( - predicate::in_iter(SledPolicy::all_in_service()), + predicate::in_iter(SledPolicy::all_matching( + SledFilter::InService, + )), predicate::eq(SledState::Active), ), SledTransition::Policy(SledPolicy::Expunged), @@ -995,7 +1296,9 @@ mod test { // The provision policy of in-service sleds can be changed, or // kept the same (1 of 2). Before::new( - predicate::in_iter(SledPolicy::all_in_service()), + predicate::in_iter(SledPolicy::all_matching( + SledFilter::InService, + )), predicate::eq(SledState::Active), ), SledTransition::Policy(SledPolicy::InService { @@ -1005,7 +1308,9 @@ mod test { ( // (2 of 2) Before::new( - predicate::in_iter(SledPolicy::all_in_service()), + predicate::in_iter(SledPolicy::all_matching( + SledFilter::InService, + )), predicate::eq(SledState::Active), ), SledTransition::Policy(SledPolicy::InService { @@ -1057,14 +1362,15 @@ mod test { .enumerate(); // Set up a sled to test against. - let sled = datastore.sled_upsert(test_new_sled_update()).await.unwrap(); + let (sled, _) = + datastore.sled_upsert(test_new_sled_update()).await.unwrap(); let sled_id = sled.id(); for (i, ((policy, state), after)) in all_transitions { test_sled_state_transitions_once( &opctx, &datastore, - sled_id, + SledUuid::from_untyped_uuid(sled_id), policy, state, after, @@ -1087,7 +1393,7 @@ mod test { async fn test_sled_state_transitions_once( opctx: &OpContext, datastore: &DataStore, - sled_id: Uuid, + sled_id: SledUuid, before_policy: SledPolicy, before_state: SledState, after: SledTransition, @@ -1199,8 +1505,7 @@ mod test { /// Tests listing large numbers of sleds via the batched interface #[tokio::test] async fn sled_list_batch() { - let logctx = - dev::test_setup_log("sled_reservation_create_non_provisionable"); + let logctx = dev::test_setup_log("sled_list_batch"); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; @@ -1233,9 +1538,10 @@ mod test { assert_eq!(ninserted, size); let sleds = datastore - .sled_list_all_batched(&opctx) + .sled_list_all_batched(&opctx, SledFilter::Commissioned) .await .expect("failed to list all sleds"); + // We don't need to sort these ids because the sleds are enumerated in // id order. let found_ids: Vec<_> = sleds.into_iter().map(|s| s.id()).collect(); diff --git a/nexus/db-queries/src/db/datastore/snapshot.rs b/nexus/db-queries/src/db/datastore/snapshot.rs index 7a3f84bbb2..9d4900e2a4 100644 --- a/nexus/db-queries/src/db/datastore/snapshot.rs +++ b/nexus/db-queries/src/db/datastore/snapshot.rs @@ -31,6 +31,7 @@ use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::external::CreateResult; use omicron_common::api::external::Error; use omicron_common::api::external::ListResultVec; +use omicron_common::api::external::LookupResult; use omicron_common::api::external::LookupType; use omicron_common::api::external::ResourceType; use omicron_common::api::external::UpdateResult; @@ -304,4 +305,21 @@ impl DataStore { } } } + + pub async fn find_snapshot_by_destination_volume_id( + &self, + opctx: &OpContext, + volume_id: Uuid, + ) -> LookupResult> { + let conn = self.pool_connection_authorized(opctx).await?; + + use db::schema::snapshot::dsl; + dsl::snapshot + .filter(dsl::destination_volume_id.eq(volume_id)) + .select(Snapshot::as_select()) + .first_async(&*conn) + .await + .optional() + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } } diff --git a/nexus/db-queries/src/db/datastore/switch_port.rs b/nexus/db-queries/src/db/datastore/switch_port.rs index 842cd4bf11..edb16e95ac 100644 --- a/nexus/db-queries/src/db/datastore/switch_port.rs +++ b/nexus/db-queries/src/db/datastore/switch_port.rs @@ -2,6 +2,9 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. +use std::collections::BTreeMap; +use std::net::IpAddr; + use super::DataStore; use crate::context::OpContext; use crate::db; @@ -25,16 +28,72 @@ use diesel::{ CombineDsl, ExpressionMethods, JoinOnDsl, NullableExpressionMethods, QueryDsl, SelectableHelper, }; +use ipnetwork::IpNetwork; +use nexus_db_model::{ + SqlU16, SqlU32, SqlU8, SwitchPortBgpPeerConfigAllowExport, + SwitchPortBgpPeerConfigAllowImport, SwitchPortBgpPeerConfigCommunity, +}; use nexus_types::external_api::params; use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::external::{ - self, CreateResult, DataPageParams, DeleteResult, Error, ListResultVec, - LookupResult, NameOrId, ResourceType, UpdateResult, + self, CreateResult, DataPageParams, DeleteResult, Error, + ImportExportPolicy, ListResultVec, LookupResult, NameOrId, ResourceType, + UpdateResult, }; use ref_cast::RefCast; use serde::{Deserialize, Serialize}; use uuid::Uuid; +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct BgpPeerConfig { + pub port_settings_id: Uuid, + pub bgp_config_id: Uuid, + pub interface_name: String, + pub addr: IpNetwork, + pub hold_time: SqlU32, + pub idle_hold_time: SqlU32, + pub delay_open: SqlU32, + pub connect_retry: SqlU32, + pub keepalive: SqlU32, + pub remote_asn: Option, + pub min_ttl: Option, + pub md5_auth_key: Option, + pub multi_exit_discriminator: Option, + pub local_pref: Option, + pub enforce_first_as: bool, + pub allowed_import: ImportExportPolicy, + pub allowed_export: ImportExportPolicy, + pub communities: Vec, + pub vlan_id: Option, +} + +impl Into for BgpPeerConfig { + fn into(self) -> external::BgpPeer { + external::BgpPeer { + bgp_config: self.bgp_config_id.into(), + interface_name: self.interface_name.clone(), + addr: self.addr.ip(), + hold_time: self.hold_time.into(), + idle_hold_time: self.idle_hold_time.into(), + delay_open: self.delay_open.into(), + connect_retry: self.connect_retry.into(), + keepalive: self.keepalive.into(), + remote_asn: self.remote_asn.map(Into::into), + min_ttl: self.min_ttl.map(Into::into), + md5_auth_key: self.md5_auth_key.clone(), + multi_exit_discriminator: self + .multi_exit_discriminator + .map(Into::into), + communities: self.communities, + local_pref: self.local_pref.map(Into::into), + enforce_first_as: self.enforce_first_as, + allowed_import: self.allowed_import, + allowed_export: self.allowed_export, + vlan_id: self.vlan_id.map(Into::into), + } + } +} + #[derive(Clone, Debug, Deserialize, Serialize)] pub struct SwitchPortSettingsCombinedResult { pub settings: SwitchPortSettings, @@ -45,7 +104,7 @@ pub struct SwitchPortSettingsCombinedResult { pub interfaces: Vec, pub vlan_interfaces: Vec, pub routes: Vec, - pub bgp_peers: Vec, + pub bgp_peers: Vec, pub addresses: Vec, } @@ -142,12 +201,14 @@ impl DataStore { ) -> CreateResult { use db::schema::{ address_lot::dsl as address_lot_dsl, - //XXX ANNOUNCE bgp_announce_set::dsl as bgp_announce_set_dsl, bgp_config::dsl as bgp_config_dsl, lldp_service_config::dsl as lldp_config_dsl, switch_port_settings::dsl as port_settings_dsl, switch_port_settings_address_config::dsl as address_config_dsl, switch_port_settings_bgp_peer_config::dsl as bgp_peer_dsl, + switch_port_settings_bgp_peer_config_allow_export::dsl as allow_export_dsl, + switch_port_settings_bgp_peer_config_allow_import::dsl as allow_import_dsl, + switch_port_settings_bgp_peer_config_communities::dsl as bgp_communities_dsl, switch_port_settings_interface_config::dsl as interface_config_dsl, switch_port_settings_link_config::dsl as link_config_dsl, switch_port_settings_port_config::dsl as port_config_dsl, @@ -158,7 +219,6 @@ impl DataStore { #[derive(Debug)] enum SwitchPortSettingsCreateError { AddressLotNotFound, - //XXX ANNOUNCE BgpAnnounceSetNotFound, BgpConfigNotFound, ReserveBlock(ReserveBlockError), } @@ -181,7 +241,7 @@ impl DataStore { //let port_settings = SwitchPortSettings::new(¶ms.identity); let db_port_settings: SwitchPortSettings = diesel::insert_into(port_settings_dsl::switch_port_settings) - .values(port_settings) + .values(port_settings.clone()) .returning(SwitchPortSettings::as_returning()) .get_result_async(&conn) .await?; @@ -245,6 +305,7 @@ impl DataStore { .returning(LldpServiceConfig::as_returning()) .get_results_async(&conn) .await?; + result.links = diesel::insert_into( link_config_dsl::switch_port_settings_link_config) @@ -305,9 +366,12 @@ impl DataStore { .get_results_async(&conn) .await?; + let mut peer_by_addr: BTreeMap = BTreeMap::new(); + let mut bgp_peer_config = Vec::new(); for (interface_name, peer_config) in ¶ms.bgp_peers { for p in &peer_config.peers { + peer_by_addr.insert(p.addr, &p); use db::schema::bgp_config; let bgp_config_id = match &p.bgp_config { NameOrId::Id(id) => *id, @@ -329,21 +393,73 @@ impl DataStore { } }; + if let ImportExportPolicy::Allow(list) = &p.allowed_import { + let id = port_settings.identity.id; + let to_insert: Vec = list.clone().into_iter().map(|x| { + SwitchPortBgpPeerConfigAllowImport { + port_settings_id: id, + interface_name: interface_name.clone(), + addr: p.addr.into(), + prefix: x.into(), + } + }).collect(); + + diesel::insert_into(allow_import_dsl::switch_port_settings_bgp_peer_config_allow_import) + .values(to_insert) + .execute_async(&conn) + .await?; + } + + if let ImportExportPolicy::Allow(list) = &p.allowed_export { + let id = port_settings.identity.id; + let to_insert: Vec = list + .clone() + .into_iter() + .map(|x| { + SwitchPortBgpPeerConfigAllowExport { + port_settings_id: id, + interface_name: interface_name.clone(), + addr: p.addr.into(), + prefix: x.into(), + } + }).collect(); + + diesel::insert_into(allow_export_dsl::switch_port_settings_bgp_peer_config_allow_export) + .values(to_insert) + .execute_async(&conn) + .await?; + } + + if !p.communities.is_empty() { + let id = port_settings.identity.id; + let to_insert: Vec = p.communities + .clone() + .into_iter() + .map(|x| { + SwitchPortBgpPeerConfigCommunity{ + port_settings_id: id, + interface_name: interface_name.clone(), + addr: p.addr.into(), + community: x.into(), + } + }).collect(); + + diesel::insert_into(bgp_communities_dsl::switch_port_settings_bgp_peer_config_communities) + .values(to_insert) + .execute_async(&conn) + .await?; + } + bgp_peer_config.push(SwitchPortBgpPeerConfig::new( psid, bgp_config_id, interface_name.clone(), - p.addr.into(), - p.hold_time.into(), - p.idle_hold_time.into(), - p.delay_open.into(), - p.connect_retry.into(), - p.keepalive.into(), + p, )); } } - result.bgp_peers = + let db_bgp_peers: Vec = diesel::insert_into( bgp_peer_dsl::switch_port_settings_bgp_peer_config) .values(bgp_peer_config) @@ -351,6 +467,40 @@ impl DataStore { .get_results_async(&conn) .await?; + for p in db_bgp_peers.into_iter() { + let view = BgpPeerConfig { + port_settings_id: p.port_settings_id, + bgp_config_id: p.bgp_config_id, + interface_name: p.interface_name, + addr: p.addr, + hold_time: p.hold_time, + idle_hold_time: p.idle_hold_time, + delay_open: p.delay_open, + connect_retry: p.connect_retry, + keepalive: p.keepalive, + remote_asn: p.remote_asn, + min_ttl: p.min_ttl, + md5_auth_key: p.md5_auth_key, + multi_exit_discriminator: p.multi_exit_discriminator, + local_pref: p.local_pref, + enforce_first_as: p.enforce_first_as, + vlan_id: p.vlan_id, + allowed_import: peer_by_addr.get(&p.addr.ip()) + .map(|x| x.allowed_import.clone()) + .unwrap_or(ImportExportPolicy::NoFiltering) + .clone(), + allowed_export: peer_by_addr.get(&p.addr.ip()) + .map(|x| x.allowed_export.clone()) + .unwrap_or(ImportExportPolicy::NoFiltering) + .clone(), + communities: peer_by_addr.get(&p.addr.ip()) + .map(|x| x.communities.clone()) + .unwrap_or(Vec::new()) + .clone(), + }; + result.bgp_peers.push(view); + } + let mut address_config = Vec::new(); use db::schema::address_lot; for (interface_name, a) in ¶ms.addresses { @@ -379,7 +529,7 @@ impl DataStore { let (block, rsvd_block) = crate::db::datastore::address_lot::try_reserve_block( address_lot_id, - address.address.ip().into(), + address.address.addr().into(), // TODO: Should we allow anycast addresses for switch_ports? // anycast false, @@ -685,6 +835,11 @@ impl DataStore { use db::schema::switch_port_settings::{ self, dsl as port_settings_dsl, }; + use db::schema::{ + switch_port_settings_bgp_peer_config_allow_import::dsl as allow_import_dsl, + switch_port_settings_bgp_peer_config_allow_export::dsl as allow_export_dsl, + switch_port_settings_bgp_peer_config_communities::dsl as bgp_communities_dsl, + }; let id = match name_or_id { NameOrId::Id(id) => *id, @@ -800,13 +955,86 @@ impl DataStore { self as bgp_peer, dsl as bgp_peer_dsl, }; - result.bgp_peers = + let peers: Vec = bgp_peer_dsl::switch_port_settings_bgp_peer_config .filter(bgp_peer::port_settings_id.eq(id)) .select(SwitchPortBgpPeerConfig::as_select()) .load_async::(&conn) .await?; + for p in peers.iter() { + let allowed_import: ImportExportPolicy = if p.allow_import_list_active { + let db_list: Vec = + allow_import_dsl::switch_port_settings_bgp_peer_config_allow_import + .filter(allow_import_dsl::port_settings_id.eq(id)) + .filter(allow_import_dsl::interface_name.eq(p.interface_name.clone())) + .filter(allow_import_dsl::addr.eq(p.addr)) + .select(SwitchPortBgpPeerConfigAllowImport::as_select()) + .load_async::(&conn) + .await?; + + ImportExportPolicy::Allow(db_list + .into_iter() + .map(|x| x.prefix.into()) + .collect() + ) + } else { + ImportExportPolicy::NoFiltering + }; + + let allowed_export: ImportExportPolicy = if p.allow_export_list_active { + let db_list: Vec = + allow_export_dsl::switch_port_settings_bgp_peer_config_allow_export + .filter(allow_export_dsl::port_settings_id.eq(id)) + .filter(allow_export_dsl::interface_name.eq(p.interface_name.clone())) + .filter(allow_export_dsl::addr.eq(p.addr)) + .select(SwitchPortBgpPeerConfigAllowExport::as_select()) + .load_async::(&conn) + .await?; + + ImportExportPolicy::Allow(db_list + .into_iter() + .map(|x| x.prefix.into()) + .collect() + ) + } else { + ImportExportPolicy::NoFiltering + }; + + let communities: Vec = + bgp_communities_dsl::switch_port_settings_bgp_peer_config_communities + .filter(bgp_communities_dsl::port_settings_id.eq(id)) + .filter(bgp_communities_dsl::interface_name.eq(p.interface_name.clone())) + .filter(bgp_communities_dsl::addr.eq(p.addr)) + .select(SwitchPortBgpPeerConfigCommunity::as_select()) + .load_async::(&conn) + .await?; + + let view = BgpPeerConfig { + port_settings_id: p.port_settings_id, + bgp_config_id: p.bgp_config_id, + interface_name: p.interface_name.clone(), + addr: p.addr, + hold_time: p.hold_time, + idle_hold_time: p.idle_hold_time, + delay_open: p.delay_open, + connect_retry: p.connect_retry, + keepalive: p.keepalive, + remote_asn: p.remote_asn, + min_ttl: p.min_ttl, + md5_auth_key: p.md5_auth_key.clone(), + multi_exit_discriminator: p.multi_exit_discriminator, + local_pref: p.local_pref, + enforce_first_as: p.enforce_first_as, + vlan_id: p.vlan_id, + communities: communities.into_iter().map(|c| c.community.0).collect(), + allowed_import, + allowed_export, + }; + + result.bgp_peers.push(view); + } + // get the address configs use db::schema::switch_port_settings_address_config::{ self as address_config, dsl as address_config_dsl, @@ -1196,11 +1424,12 @@ mod test { use crate::db::datastore::UpdatePrecondition; use nexus_test_utils::db::test_setup_database; use nexus_types::external_api::params::{ - BgpAnnounceSetCreate, BgpConfigCreate, BgpPeer, BgpPeerConfig, + BgpAnnounceSetCreate, BgpConfigCreate, BgpPeerConfig, SwitchPortConfigCreate, SwitchPortGeometry, SwitchPortSettingsCreate, }; use omicron_common::api::external::{ - IdentityMetadataCreateParams, Name, NameOrId, + BgpPeer, IdentityMetadataCreateParams, ImportExportPolicy, Name, + NameOrId, }; use omicron_test_utils::dev; use std::collections::HashMap; @@ -1242,6 +1471,8 @@ mod test { "test-announce-set".parse().unwrap(), ), vrf: None, + checker: None, + shaper: None, }; datastore.bgp_config_set(&opctx, &bgp_config).await.unwrap(); @@ -1262,9 +1493,6 @@ mod test { "phy0".into(), BgpPeerConfig { peers: vec![BgpPeer { - bgp_announce_set: NameOrId::Name( - "test-announce-set".parse().unwrap(), - ), bgp_config: NameOrId::Name( "test-bgp-config".parse().unwrap(), ), @@ -1275,6 +1503,16 @@ mod test { delay_open: 0, connect_retry: 0, keepalive: 0, + remote_asn: None, + min_ttl: None, + md5_auth_key: None, + multi_exit_discriminator: None, + communities: Vec::new(), + local_pref: None, + enforce_first_as: false, + allowed_export: ImportExportPolicy::NoFiltering, + allowed_import: ImportExportPolicy::NoFiltering, + vlan_id: None, }], }, )]), diff --git a/nexus/db-queries/src/db/datastore/test_utils.rs b/nexus/db-queries/src/db/datastore/test_utils.rs index 6d26ad044b..4678e07f47 100644 --- a/nexus/db-queries/src/db/datastore/test_utils.rs +++ b/nexus/db-queries/src/db/datastore/test_utils.rs @@ -6,7 +6,6 @@ use crate::authz; use crate::context::OpContext; -use crate::db; use crate::db::datastore::ValidateTransition; use crate::db::lookup::LookupPath; use crate::db::DataStore; @@ -15,60 +14,23 @@ use anyhow::ensure; use anyhow::Context; use anyhow::Result; use dropshot::test_util::LogContext; +use futures::future::try_join_all; use nexus_db_model::SledState; use nexus_types::external_api::views::SledPolicy; use nexus_types::external_api::views::SledProvisionPolicy; use omicron_test_utils::dev::db::CockroachInstance; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::SledUuid; use std::sync::Arc; use strum::EnumCount; use uuid::Uuid; -/// Constructs a DataStore for use in test suites that has preloaded the -/// built-in users, roles, and role assignments that are needed for basic -/// operation -#[cfg(test)] -pub async fn datastore_test( +pub(crate) async fn datastore_test( logctx: &LogContext, db: &CockroachInstance, ) -> (OpContext, Arc) { - use crate::authn; - - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = Arc::new(db::Pool::new(&logctx.log, &cfg)); - let datastore = - Arc::new(DataStore::new(&logctx.log, pool, None).await.unwrap()); - - // Create an OpContext with the credentials of "db-init" just for the - // purpose of loading the built-in users, roles, and assignments. - let opctx = OpContext::for_background( - logctx.log.new(o!()), - Arc::new(authz::Authz::new(&logctx.log)), - authn::Context::internal_db_init(), - Arc::clone(&datastore), - ); - - // TODO: Can we just call "Populate" instead of doing this? let rack_id = Uuid::parse_str(nexus_test_utils::RACK_UUID).unwrap(); - datastore.load_builtin_users(&opctx).await.unwrap(); - datastore.load_builtin_roles(&opctx).await.unwrap(); - datastore.load_builtin_role_asgns(&opctx).await.unwrap(); - datastore.load_builtin_silos(&opctx).await.unwrap(); - datastore.load_builtin_projects(&opctx).await.unwrap(); - datastore.load_builtin_vpcs(&opctx).await.unwrap(); - datastore.load_silo_users(&opctx).await.unwrap(); - datastore.load_silo_user_role_assignments(&opctx).await.unwrap(); - datastore - .load_builtin_fleet_virtual_provisioning_collection(&opctx) - .await - .unwrap(); - datastore.load_builtin_rack_data(&opctx, rack_id).await.unwrap(); - - // Create an OpContext with the credentials of "test-privileged" for general - // testing. - let opctx = - OpContext::for_tests(logctx.log.new(o!()), Arc::clone(&datastore)); - - (opctx, datastore) + super::pub_test_utils::datastore_test(logctx, db, rack_id).await } /// Denotes a specific way in which a sled is ineligible. @@ -89,16 +51,16 @@ pub(super) enum IneligibleSledKind { /// This is less error-prone than several places duplicating this logic. #[derive(Debug)] pub(super) struct IneligibleSleds { - pub(super) non_provisionable: Uuid, - pub(super) expunged: Uuid, - pub(super) decommissioned: Uuid, - pub(super) illegal_decommissioned: Uuid, + pub(super) non_provisionable: SledUuid, + pub(super) expunged: SledUuid, + pub(super) decommissioned: SledUuid, + pub(super) illegal_decommissioned: SledUuid, } impl IneligibleSleds { pub(super) fn iter( &self, - ) -> impl Iterator { + ) -> impl Iterator { [ (IneligibleSledKind::NonProvisionable, self.non_provisionable), (IneligibleSledKind::Expunged, self.expunged), @@ -231,18 +193,79 @@ impl IneligibleSleds { Ok(()) } + + /// Brings all of the sleds back to being in-service and provisionable. + /// + /// This is never going to happen in production, but it's easier to do this + /// in many tests than to set up a new set of sleds. + /// + /// Note: there's no memory of the previous state stored here -- this just + /// resets the sleds to the default state. + pub async fn undo( + &self, + opctx: &OpContext, + datastore: &DataStore, + ) -> Result<()> { + async fn undo_single( + opctx: &OpContext, + datastore: &DataStore, + sled_id: SledUuid, + kind: IneligibleSledKind, + ) -> Result<()> { + sled_set_policy( + &opctx, + &datastore, + sled_id, + SledPolicy::provisionable(), + ValidateTransition::No, + Expected::Ignore, + ) + .await + .with_context(|| { + format!( + "failed to set provisionable policy for sled {} ({:?})", + sled_id, kind, + ) + })?; + + sled_set_state( + &opctx, + &datastore, + sled_id, + SledState::Active, + ValidateTransition::No, + Expected::Ignore, + ) + .await + .with_context(|| { + format!( + "failed to set active state for sled {} ({:?})", + sled_id, kind, + ) + })?; + + Ok(()) + } + + _ = try_join_all(self.iter().map(|(kind, sled_id)| { + undo_single(opctx, datastore, sled_id, kind) + })) + .await?; + + Ok(()) + } } pub(super) async fn sled_set_policy( opctx: &OpContext, datastore: &DataStore, - sled_id: Uuid, + sled_id: SledUuid, new_policy: SledPolicy, check: ValidateTransition, expected_old_policy: Expected, ) -> Result<()> { let (authz_sled, _) = LookupPath::new(&opctx, &datastore) - .sled_id(sled_id) + .sled_id(sled_id.into_untyped_uuid()) .fetch_for(authz::Action::Modify) .await .unwrap(); @@ -284,13 +307,13 @@ pub(super) async fn sled_set_policy( pub(super) async fn sled_set_state( opctx: &OpContext, datastore: &DataStore, - sled_id: Uuid, + sled_id: SledUuid, new_state: SledState, check: ValidateTransition, expected_old_state: Expected, ) -> Result<()> { let (authz_sled, _) = LookupPath::new(&opctx, &datastore) - .sled_id(sled_id) + .sled_id(sled_id.into_untyped_uuid()) .fetch_for(authz::Action::Modify) .await .unwrap(); diff --git a/nexus/db-queries/src/db/datastore/v2p_mapping.rs b/nexus/db-queries/src/db/datastore/v2p_mapping.rs new file mode 100644 index 0000000000..6c00957e7d --- /dev/null +++ b/nexus/db-queries/src/db/datastore/v2p_mapping.rs @@ -0,0 +1,45 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use super::DataStore; +use crate::context::OpContext; +use crate::db; +use crate::db::datastore::SQL_BATCH_SIZE; +use crate::db::error::{public_error_from_diesel, ErrorHandler}; +use crate::db::model::V2PMappingView; +use crate::db::pagination::paginated; +use crate::db::pagination::Paginator; +use async_bb8_diesel::AsyncRunQueryDsl; +use diesel::{QueryDsl, SelectableHelper}; +use omicron_common::api::external::ListResultVec; + +impl DataStore { + pub async fn v2p_mappings( + &self, + opctx: &OpContext, + ) -> ListResultVec { + use db::schema::v2p_mapping_view::dsl; + + opctx.check_complex_operations_allowed()?; + + let mut mappings = Vec::new(); + let mut paginator = Paginator::new(SQL_BATCH_SIZE); + while let Some(p) = paginator.next() { + let batch = paginated( + dsl::v2p_mapping_view, + dsl::nic_id, + &p.current_pagparams(), + ) + .select(V2PMappingView::as_select()) + .load_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + paginator = p.found_batch(&batch, &|mapping| mapping.nic_id); + mappings.extend(batch); + } + + Ok(mappings) + } +} diff --git a/nexus/db-queries/src/db/datastore/vmm.rs b/nexus/db-queries/src/db/datastore/vmm.rs index b9bfd7697e..b8fb47de26 100644 --- a/nexus/db-queries/src/db/datastore/vmm.rs +++ b/nexus/db-queries/src/db/datastore/vmm.rs @@ -9,8 +9,10 @@ use crate::authz; use crate::context::OpContext; use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; +use crate::db::model::InstanceState as DbInstanceState; use crate::db::model::Vmm; use crate::db::model::VmmRuntimeState; +use crate::db::pagination::paginated; use crate::db::schema::vmm::dsl; use crate::db::update_and_check::UpdateAndCheck; use crate::db::update_and_check::UpdateStatus; @@ -18,7 +20,10 @@ use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; use diesel::prelude::*; use omicron_common::api::external::CreateResult; +use omicron_common::api::external::DataPageParams; use omicron_common::api::external::Error; +use omicron_common::api::external::InstanceState as ApiInstanceState; +use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; use omicron_common::api::external::LookupType; use omicron_common::api::external::ResourceType; @@ -50,9 +55,6 @@ impl DataStore { opctx: &OpContext, vmm_id: &Uuid, ) -> UpdateResult { - use crate::db::model::InstanceState as DbInstanceState; - use omicron_common::api::external::InstanceState as ApiInstanceState; - let valid_states = vec![ DbInstanceState::new(ApiInstanceState::Destroyed), DbInstanceState::new(ApiInstanceState::Failed), @@ -61,9 +63,15 @@ impl DataStore { let updated = diesel::update(dsl::vmm) .filter(dsl::id.eq(*vmm_id)) .filter(dsl::state.eq_any(valid_states)) + .filter(dsl::time_deleted.is_null()) .set(dsl::time_deleted.eq(Utc::now())) - .execute_async(&*self.pool_connection_authorized(opctx).await?) + .check_if_exists::(*vmm_id) + .execute_and_check(&*self.pool_connection_authorized(opctx).await?) .await + .map(|r| match r.status { + UpdateStatus::Updated => true, + UpdateStatus::NotUpdatedButExists => false, + }) .map_err(|e| { public_error_from_diesel( e, @@ -74,7 +82,7 @@ impl DataStore { ) })?; - Ok(updated != 0) + Ok(updated) } pub async fn vmm_fetch( @@ -155,7 +163,7 @@ impl DataStore { .filter(dsl::id.eq(*vmm_id)) .set(( dsl::propolis_ip.eq(new_ip), - dsl::propolis_port.eq(new_port as i32), + dsl::propolis_port.eq(i32::from(new_port)), )) .returning(Vmm::as_returning()) .get_result_async(&*self.pool_connection_authorized(opctx).await?) @@ -164,4 +172,63 @@ impl DataStore { Ok(vmm) } + + /// Lists VMMs which have been abandoned by their instances after a + /// migration and are in need of cleanup. + /// + /// A VMM is considered "abandoned" if (and only if): + /// + /// - It is in the `Destroyed` state. + /// - It is not currently running an instance, and it is also not the + /// migration target of any instance (i.e. it is not pointed to by + /// any instance record's `active_propolis_id` and `target_propolis_id` + /// fields). + /// - It has not been deleted yet. + pub async fn vmm_list_abandoned( + &self, + opctx: &OpContext, + pagparams: &DataPageParams<'_, Uuid>, + ) -> ListResultVec { + use crate::db::schema::instance::dsl as instance_dsl; + let destroyed = DbInstanceState::new(ApiInstanceState::Destroyed); + paginated(dsl::vmm, dsl::id, pagparams) + // In order to be considered "abandoned", a VMM must be: + // - in the `Destroyed` state + .filter(dsl::state.eq(destroyed)) + // - not deleted yet + .filter(dsl::time_deleted.is_null()) + // - not pointed to by any instance's `active_propolis_id` or + // `target_propolis_id`. + // + .left_join( + // Left join with the `instance` table on the VMM's instance ID, so + // that we can check if the instance pointed to by this VMM (if + // any exists) has this VMM pointed to by its + // `active_propolis_id` or `target_propolis_id` fields. + instance_dsl::instance + .on(instance_dsl::id.eq(dsl::instance_id)), + ) + .filter( + dsl::id + .nullable() + .ne(instance_dsl::active_propolis_id) + // In SQL, *all* comparisons with NULL are `false`, even `!= + // NULL`, so we have to explicitly check for nulls here, or + // else VMMs whose instances have no `active_propolis_id` + // will not be considered abandoned (incorrectly). + .or(instance_dsl::active_propolis_id.is_null()), + ) + .filter( + dsl::id + .nullable() + .ne(instance_dsl::target_propolis_id) + // As above, we must add this clause because SQL nulls have + // the most irritating behavior possible. + .or(instance_dsl::target_propolis_id.is_null()), + ) + .select(Vmm::as_select()) + .load_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } } diff --git a/nexus/db-queries/src/db/datastore/volume.rs b/nexus/db-queries/src/db/datastore/volume.rs index a9646b9ef6..a7b9273aa8 100644 --- a/nexus/db-queries/src/db/datastore/volume.rs +++ b/nexus/db-queries/src/db/datastore/volume.rs @@ -11,8 +11,10 @@ use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::identity::Asset; use crate::db::model::Dataset; +use crate::db::model::Disk; use crate::db::model::DownstairsClientStopRequestNotification; use crate::db::model::DownstairsClientStoppedNotification; +use crate::db::model::Instance; use crate::db::model::Region; use crate::db::model::RegionSnapshot; use crate::db::model::UpstairsRepairNotification; @@ -25,6 +27,7 @@ use anyhow::bail; use async_bb8_diesel::AsyncRunQueryDsl; use diesel::prelude::*; use diesel::OptionalExtension; +use nexus_types::identity::Resource; use omicron_common::api::external::CreateResult; use omicron_common::api::external::DeleteResult; use omicron_common::api::external::Error; @@ -42,8 +45,43 @@ use serde::Deserialize; use serde::Deserializer; use serde::Serialize; use sled_agent_client::types::VolumeConstructionRequest; +use std::net::SocketAddrV6; use uuid::Uuid; +#[derive(Debug, Clone, Copy)] +pub enum VolumeCheckoutReason { + /// Check out a read-only Volume. + ReadOnlyCopy, + + /// Check out a Volume to modify and store back to the database. + CopyAndModify, + + /// Check out a Volume to send to Propolis to start an instance. + InstanceStart { vmm_id: Uuid }, + + /// Check out a Volume to send to a migration destination Propolis. + InstanceMigrate { vmm_id: Uuid, target_vmm_id: Uuid }, + + /// Check out a Volume to send to a Pantry (for background maintenance + /// operations). + Pantry, +} + +#[derive(Debug, thiserror::Error)] +enum VolumeGetError { + #[error("Serde error during volume_checkout: {0}")] + SerdeError(#[from] serde_json::Error), + + #[error("Updated {0} database rows, expected {1}")] + UnexpectedDatabaseUpdate(usize, usize), + + #[error("Checkout condition failed: {0}")] + CheckoutConditionFailed(String), + + #[error("Invalid Volume: {0}")] + InvalidVolume(String), +} + impl DataStore { pub async fn volume_create(&self, volume: Volume) -> CreateResult { use db::schema::volume::dsl; @@ -194,6 +232,244 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } + async fn volume_checkout_allowed( + reason: &VolumeCheckoutReason, + vcr: &VolumeConstructionRequest, + maybe_disk: Option, + maybe_instance: Option, + ) -> Result<(), VolumeGetError> { + match reason { + VolumeCheckoutReason::ReadOnlyCopy => { + // When checking out to make a copy (usually for use as a + // read-only parent), the volume must be read only. Even if a + // call-site that uses Copy sends this copied Volume to a + // Propolis or Pantry, the Upstairs that will be created will be + // read-only, and will not take over from other read-only + // Upstairs. + + match volume_is_read_only(&vcr) { + Ok(read_only) => { + if !read_only { + return Err(VolumeGetError::CheckoutConditionFailed( + String::from("Non-read-only Volume Checkout for use Copy!") + )); + } + + Ok(()) + } + + Err(e) => Err(VolumeGetError::InvalidVolume(e.to_string())), + } + } + + VolumeCheckoutReason::CopyAndModify => { + // `CopyAndModify` is used when taking a read/write Volume, + // modifying it (for example, when taking a snapshot, to point + // to read-only resources), and committing it back to the DB. + // This is a checkout of a read/write Volume, so creating an + // Upstairs from it *may* take over from something else. The + // call-site must ensure this doesn't happen, but we can't do + // that here. + + Ok(()) + } + + VolumeCheckoutReason::InstanceStart { vmm_id } => { + // Check out this volume to send to Propolis to start an + // Instance. The VMM id in the enum must match the instance's + // propolis_id. + + let Some(instance) = &maybe_instance else { + return Err(VolumeGetError::CheckoutConditionFailed( + format!( + "InstanceStart {}: instance does not exist", + vmm_id + ), + )); + }; + + let runtime = instance.runtime(); + match (runtime.propolis_id, runtime.dst_propolis_id) { + (Some(_), Some(_)) => { + Err(VolumeGetError::CheckoutConditionFailed( + format!( + "InstanceStart {}: instance {} is undergoing migration", + vmm_id, + instance.id(), + ) + )) + } + + (None, None) => { + Err(VolumeGetError::CheckoutConditionFailed( + format!( + "InstanceStart {}: instance {} has no propolis ids", + vmm_id, + instance.id(), + ) + )) + } + + (Some(propolis_id), None) => { + if propolis_id != *vmm_id { + return Err(VolumeGetError::CheckoutConditionFailed( + format!( + "InstanceStart {}: instance {} propolis id {} mismatch", + vmm_id, + instance.id(), + propolis_id, + ) + )); + } + + Ok(()) + } + + (None, Some(dst_propolis_id)) => { + Err(VolumeGetError::CheckoutConditionFailed( + format!( + "InstanceStart {}: instance {} has no propolis id but dst propolis id {}", + vmm_id, + instance.id(), + dst_propolis_id, + ) + )) + } + } + } + + VolumeCheckoutReason::InstanceMigrate { vmm_id, target_vmm_id } => { + // Check out this volume to send to destination Propolis to + // migrate an Instance. Only take over from the specified source + // VMM. + + let Some(instance) = &maybe_instance else { + return Err(VolumeGetError::CheckoutConditionFailed( + format!( + "InstanceMigrate {} {}: instance does not exist", + vmm_id, target_vmm_id + ), + )); + }; + + let runtime = instance.runtime(); + match (runtime.propolis_id, runtime.dst_propolis_id) { + (Some(propolis_id), Some(dst_propolis_id)) => { + if propolis_id != *vmm_id || dst_propolis_id != *target_vmm_id { + return Err(VolumeGetError::CheckoutConditionFailed( + format!( + "InstanceMigrate {} {}: instance {} propolis id mismatches {} {}", + vmm_id, + target_vmm_id, + instance.id(), + propolis_id, + dst_propolis_id, + ) + )); + } + + Ok(()) + } + + (None, None) => { + Err(VolumeGetError::CheckoutConditionFailed( + format!( + "InstanceMigrate {} {}: instance {} has no propolis ids", + vmm_id, + target_vmm_id, + instance.id(), + ) + )) + } + + (Some(propolis_id), None) => { + // XXX is this right? + if propolis_id != *vmm_id { + return Err(VolumeGetError::CheckoutConditionFailed( + format!( + "InstanceMigrate {} {}: instance {} propolis id {} mismatch", + vmm_id, + target_vmm_id, + instance.id(), + propolis_id, + ) + )); + } + + Ok(()) + } + + (None, Some(dst_propolis_id)) => { + Err(VolumeGetError::CheckoutConditionFailed( + format!( + "InstanceMigrate {} {}: instance {} has no propolis id but dst propolis id {}", + vmm_id, + target_vmm_id, + instance.id(), + dst_propolis_id, + ) + )) + } + } + } + + VolumeCheckoutReason::Pantry => { + // Check out this Volume to send to a Pantry, which will create + // a read/write Upstairs, for background maintenance operations. + // There must not be any Propolis, otherwise this will take over + // from that and cause errors for guest OSes. + + let Some(disk) = maybe_disk else { + // This volume isn't backing a disk, it won't take over from + // a Propolis' Upstairs. + return Ok(()); + }; + + let Some(attach_instance_id) = + disk.runtime().attach_instance_id + else { + // The volume is backing a disk that is not attached to an + // instance. At this moment it won't take over from a + // Propolis' Upstairs, so send it to a Pantry to create an + // Upstairs there. A future checkout that happens after + // this transaction that is sent to a Propolis _will_ take + // over from this checkout (sent to a Pantry), which is ok. + return Ok(()); + }; + + let Some(instance) = maybe_instance else { + // The instance, which the disk that this volume backs is + // attached to, doesn't exist? + // + // XXX this is a Nexus bug! + return Err(VolumeGetError::CheckoutConditionFailed( + format!( + "Pantry: instance {} backing disk {} does not exist?", + attach_instance_id, + disk.id(), + ) + )); + }; + + if let Some(propolis_id) = instance.runtime().propolis_id { + // The instance, which the disk that this volume backs is + // attached to, exists and has an active propolis ID. A + // propolis _may_ exist, so bail here - an activation from + // the Pantry is not allowed to take over from a Propolis. + Err(VolumeGetError::CheckoutConditionFailed(format!( + "Pantry: possible Propolis {}", + propolis_id + ))) + } else { + // The instance, which the disk that this volume backs is + // attached to, exists, but there is no active propolis ID. + // This is ok. + Ok(()) + } + } + } + } + /// Checkout a copy of the Volume from the database. /// This action (getting a copy) will increase the generation number /// of Volumes of the VolumeConstructionRequest::Volume type that have @@ -203,18 +479,10 @@ impl DataStore { pub async fn volume_checkout( &self, volume_id: Uuid, + reason: VolumeCheckoutReason, ) -> LookupResult { use db::schema::volume::dsl; - #[derive(Debug, thiserror::Error)] - enum VolumeGetError { - #[error("Serde error during volume_checkout: {0}")] - SerdeError(#[from] serde_json::Error), - - #[error("Updated {0} database rows, expected {1}")] - UnexpectedDatabaseUpdate(usize, usize), - } - // We perform a transaction here, to be sure that on completion // of this, the database contains an updated version of the // volume with the generation number incremented (for the volume @@ -241,6 +509,56 @@ impl DataStore { err.bail(VolumeGetError::SerdeError(e)) })?; + // The VolumeConstructionRequest resulting from this checkout will have its + // generation numbers bumped, and as result will (if it has non-read-only + // sub-volumes) take over from previous read/write activations when sent to a + // place that will `construct` a new Volume. Depending on the checkout reason, + // prevent creating multiple read/write Upstairs acting on the same Volume, + // except where the take over is intended. + + let (maybe_disk, maybe_instance) = { + use db::schema::instance::dsl as instance_dsl; + use db::schema::disk::dsl as disk_dsl; + + let maybe_disk: Option = disk_dsl::disk + .filter(disk_dsl::time_deleted.is_null()) + .filter(disk_dsl::volume_id.eq(volume_id)) + .select(Disk::as_select()) + .get_result_async(&conn) + .await + .optional()?; + + let maybe_instance: Option = if let Some(disk) = &maybe_disk { + if let Some(attach_instance_id) = disk.runtime().attach_instance_id { + instance_dsl::instance + .filter(instance_dsl::time_deleted.is_null()) + .filter(instance_dsl::id.eq(attach_instance_id)) + .select(Instance::as_select()) + .get_result_async(&conn) + .await + .optional()? + } else { + // Disk not attached to an instance + None + } + } else { + // Volume not associated with disk + None + }; + + (maybe_disk, maybe_instance) + }; + + if let Err(e) = Self::volume_checkout_allowed( + &reason, + &vcr, + maybe_disk, + maybe_instance, + ) + .await { + return Err(err.bail(e)); + } + // Look to see if the VCR is a Volume type, and if so, look at // its sub_volumes. If they are of type Region, then we need // to update their generation numbers and record that update @@ -353,8 +671,17 @@ impl DataStore { .await .map_err(|e| { if let Some(err) = err.take() { - return Error::internal_error(&format!("Transaction error: {}", err)); + match err { + VolumeGetError::CheckoutConditionFailed(message) => { + return Error::conflict(message); + } + + _ => { + return Error::internal_error(&format!("Transaction error: {}", err)); + } + } } + public_error_from_diesel(e, ErrorHandler::Server) }) } @@ -447,8 +774,9 @@ impl DataStore { pub async fn volume_checkout_randomize_ids( &self, volume_id: Uuid, + reason: VolumeCheckoutReason, ) -> CreateResult { - let volume = self.volume_checkout(volume_id).await?; + let volume = self.volume_checkout(volume_id, reason).await?; let vcr: sled_agent_client::types::VolumeConstructionRequest = serde_json::from_str(volume.data())?; @@ -823,6 +1151,48 @@ impl DataStore { }) } + /// Return all the read-write regions in a volume whose target address + /// matches the argument dataset's. + pub async fn get_dataset_rw_regions_in_volume( + &self, + opctx: &OpContext, + dataset_id: Uuid, + volume_id: Uuid, + ) -> LookupResult> { + let conn = self.pool_connection_authorized(opctx).await?; + + let dataset = { + use db::schema::dataset::dsl; + + dsl::dataset + .filter(dsl::id.eq(dataset_id)) + .select(Dataset::as_select()) + .first_async(&*conn) + .await + .map_err(|e| { + public_error_from_diesel(e, ErrorHandler::Server) + })? + }; + + let Some(volume) = self.volume_get(volume_id).await? else { + return Err(Error::internal_error("volume is gone!?")); + }; + + let vcr: VolumeConstructionRequest = + serde_json::from_str(&volume.data())?; + + let mut targets: Vec = vec![]; + + find_matching_rw_regions_in_volume( + &vcr, + dataset.address().ip(), + &mut targets, + ) + .map_err(|e| Error::internal_error(&e.to_string()))?; + + Ok(targets) + } + // An Upstairs is created as part of a Volume hierarchy if the Volume // Construction Request includes a "Region" variant. This may be at any // layer of the Volume, and some notifications will come from an Upstairs @@ -1256,6 +1626,274 @@ impl DataStore { } } +pub struct VolumeReplacementParams { + pub volume_id: Uuid, + pub region_id: Uuid, + pub region_addr: SocketAddrV6, +} + +impl DataStore { + /// Replace a read-write region in a Volume with a new region. + pub async fn volume_replace_region( + &self, + existing: VolumeReplacementParams, + replacement: VolumeReplacementParams, + ) -> Result<(), Error> { + // In a single transaction: + // + // - set the existing region's volume id to the replacement's volume id + // - set the replacement region's volume id to the existing's volume id + // - update the existing volume's construction request to replace the + // existing region's SocketAddrV6 with the replacement region's + // + // This function's effects can be undone by calling it with swapped + // parameters. + // + // # Example # + // + // Imagine `volume_replace_region` is called with the following, + // pretending that UUIDs are just eight uppercase letters: + // + // let existing = VolumeReplacementParams { + // volume_id: TARGET_VOL, + // region_id: TARGET_REG, + // region_addr: "[fd00:1122:3344:145::10]:40001", + // } + // + // let replace = VolumeReplacementParams { + // volume_id: NEW_VOL, + // region_id: NEW_REG, + // region_addr: "[fd00:1122:3344:322::4]:3956", + // } + // + // In the database, the relevant records (and columns) of the region + // table look like this prior to the transaction: + // + // id | volume_id + // -------------| --------- + // TARGET_REG | TARGET_VOL + // NEW_REG | NEW_VOL + // + // TARGET_VOL has a volume construction request where one of the targets + // list will contain TARGET_REG's address: + // + // { + // "type": "volume", + // "block_size": 512, + // "id": "TARGET_VOL", + // "read_only_parent": { + // ... + // }, + // "sub_volumes": [ + // { + // ... + // "opts": { + // ... + // "target": [ + // "[fd00:1122:3344:103::3]:19004", + // "[fd00:1122:3344:79::12]:27015", + // "[fd00:1122:3344:145::10]:40001" <----- + // ] + // } + // } + // ] + // } + // + // Note it is not required for the replacement volume to exist as a + // database record for this transaction. + // + // The first part of the transaction will swap the volume IDs of the + // existing and replacement region records: + // + // id | volume_id + // ------------| --------- + // TARGET_REG | NEW_VOL + // NEW_REG | TARGET_VOL + // + // The second part of the transaction will update the volume + // construction request of TARGET_VOL by finding and replacing + // TARGET_REG's address (in the appropriate targets array) with + // NEW_REG's address: + // + // { + // ... + // "target": [ + // "[fd00:1122:3344:103::3]:19004", + // "[fd00:1122:3344:79::12]:27015", + // "[fd00:1122:3344:322::4]:3956" <----- + // ] + // ... + // } + // + // After the transaction, the caller should ensure that TARGET_REG is + // referenced (via its socket address) in NEW_VOL. For an example, this + // is done as part of the region replacement start saga. + + #[derive(Debug, thiserror::Error)] + enum VolumeReplaceRegionError { + #[error("Error from Volume region replacement: {0}")] + Public(Error), + + #[error("Serde error during Volume region replacement: {0}")] + SerdeError(#[from] serde_json::Error), + + #[error("Target Volume deleted")] + TargetVolumeDeleted, + + #[error("Region replacement error: {0}")] + RegionReplacementError(#[from] anyhow::Error), + } + let err = OptionalError::new(); + + let conn = self.pool_connection_unauthorized().await?; + self.transaction_retry_wrapper("volume_replace_region") + .transaction(&conn, |conn| { + let err = err.clone(); + async move { + use db::schema::region::dsl as region_dsl; + use db::schema::volume::dsl as volume_dsl; + + // Set the existing region's volume id to the replacement's + // volume id + diesel::update(region_dsl::region) + .filter(region_dsl::id.eq(existing.region_id)) + .set(region_dsl::volume_id.eq(replacement.volume_id)) + .execute_async(&conn) + .await + .map_err(|e| { + err.bail_retryable_or_else(e, |e| { + VolumeReplaceRegionError::Public( + public_error_from_diesel( + e, + ErrorHandler::Server, + ) + ) + }) + })?; + + // Set the replacement region's volume id to the existing's + // volume id + diesel::update(region_dsl::region) + .filter(region_dsl::id.eq(replacement.region_id)) + .set(region_dsl::volume_id.eq(existing.volume_id)) + .execute_async(&conn) + .await + .map_err(|e| { + err.bail_retryable_or_else(e, |e| { + VolumeReplaceRegionError::Public( + public_error_from_diesel( + e, + ErrorHandler::Server, + ) + ) + }) + })?; + + // Update the existing volume's construction request to + // replace the existing region's SocketAddrV6 with the + // replacement region's + let maybe_old_volume = { + volume_dsl::volume + .filter(volume_dsl::id.eq(existing.volume_id)) + .select(Volume::as_select()) + .first_async::(&conn) + .await + .optional() + .map_err(|e| { + err.bail_retryable_or_else(e, |e| { + VolumeReplaceRegionError::Public( + public_error_from_diesel( + e, + ErrorHandler::Server, + ) + ) + }) + })? + }; + + let old_volume = if let Some(old_volume) = maybe_old_volume { + old_volume + } else { + // existing volume was deleted, so return an error, we + // can't perform the region replacement now! + return Err(err.bail(VolumeReplaceRegionError::TargetVolumeDeleted)); + }; + + let old_vcr: VolumeConstructionRequest = + match serde_json::from_str(&old_volume.data()) { + Ok(vcr) => vcr, + Err(e) => { + return Err(err.bail(VolumeReplaceRegionError::SerdeError(e))); + }, + }; + + // Copy the old volume's VCR, changing out the old region + // for the new. + let new_vcr = match replace_region_in_vcr( + &old_vcr, + existing.region_addr, + replacement.region_addr, + ) { + Ok(new_vcr) => new_vcr, + Err(e) => { + return Err(err.bail( + VolumeReplaceRegionError::RegionReplacementError(e) + )); + } + }; + + let new_volume_data = serde_json::to_string( + &new_vcr, + ) + .map_err(|e| { + err.bail(VolumeReplaceRegionError::SerdeError(e)) + })?; + + // Update the existing volume's data + diesel::update(volume_dsl::volume) + .filter(volume_dsl::id.eq(existing.volume_id)) + .set(volume_dsl::data.eq(new_volume_data)) + .execute_async(&conn) + .await + .map_err(|e| { + err.bail_retryable_or_else(e, |e| { + VolumeReplaceRegionError::Public( + public_error_from_diesel( + e, + ErrorHandler::Server, + ) + ) + }) + })?; + + Ok(()) + } + }) + .await + .map_err(|e| { + if let Some(err) = err.take() { + match err { + VolumeReplaceRegionError::Public(e) => e, + + VolumeReplaceRegionError::SerdeError(_) => { + Error::internal_error(&err.to_string()) + } + + VolumeReplaceRegionError::TargetVolumeDeleted => { + Error::internal_error(&err.to_string()) + } + + VolumeReplaceRegionError::RegionReplacementError(_) => { + Error::internal_error(&err.to_string()) + } + } + } else { + public_error_from_diesel(e, ErrorHandler::Server) + } + }) + } +} + /// Return the targets from a VolumeConstructionRequest. /// /// The targets of a volume construction request map to resources. @@ -1309,6 +1947,164 @@ pub fn read_only_resources_associated_with_volume( } } +/// Returns true if the sub-volumes of a Volume are all read-only +pub fn volume_is_read_only( + vcr: &VolumeConstructionRequest, +) -> anyhow::Result { + match vcr { + VolumeConstructionRequest::Volume { sub_volumes, .. } => { + for sv in sub_volumes { + match sv { + VolumeConstructionRequest::Region { opts, .. } => { + if !opts.read_only { + return Ok(false); + } + } + + _ => { + bail!("Saw non-Region in sub-volume {:?}", sv); + } + } + } + + Ok(true) + } + + VolumeConstructionRequest::Region { .. } => { + // We don't support a pure Region VCR at the volume + // level in the database, so this choice should + // never be encountered, but I want to know if it is. + panic!("Region not supported as a top level volume"); + } + + VolumeConstructionRequest::File { .. } => { + // Effectively, this is read-only, as this BlockIO implementation + // does not have a `write` implementation. This will be hit if + // trying to make a snapshot or image out of a + // `YouCanBootAnythingAsLongAsItsAlpine` image source. + Ok(true) + } + + VolumeConstructionRequest::Url { .. } => { + // ImageSource::Url was deprecated + bail!("Saw VolumeConstructionRequest::Url"); + } + } +} + +/// Replace a Region in a VolumeConstructionRequest +/// +/// Note that UUIDs are not randomized by this step: Crucible will reject a +/// `target_replace` call if the replacement VolumeConstructionRequest does not +/// exactly match the original, except for a single Region difference. +/// +/// Note that the generation number _is_ bumped in this step, otherwise +/// `compare_vcr_for_update` will reject the update. +fn replace_region_in_vcr( + vcr: &VolumeConstructionRequest, + old_region: SocketAddrV6, + new_region: SocketAddrV6, +) -> anyhow::Result { + match vcr { + VolumeConstructionRequest::Volume { + id, + block_size, + sub_volumes, + read_only_parent, + } => Ok(VolumeConstructionRequest::Volume { + id: *id, + block_size: *block_size, + sub_volumes: sub_volumes + .iter() + .map(|subvol| -> anyhow::Result { + replace_region_in_vcr(&subvol, old_region, new_region) + }) + .collect::>>()?, + + // Only replacing R/W regions + read_only_parent: read_only_parent.clone(), + }), + + VolumeConstructionRequest::Url { id, block_size, url } => { + Ok(VolumeConstructionRequest::Url { + id: *id, + block_size: *block_size, + url: url.clone(), + }) + } + + VolumeConstructionRequest::Region { + block_size, + blocks_per_extent, + extent_count, + opts, + gen, + } => { + let mut opts = opts.clone(); + + for target in &mut opts.target { + let parsed_target: SocketAddrV6 = target.parse()?; + if parsed_target == old_region { + *target = new_region.to_string(); + } + } + + Ok(VolumeConstructionRequest::Region { + block_size: *block_size, + blocks_per_extent: *blocks_per_extent, + extent_count: *extent_count, + opts, + gen: *gen + 1, + }) + } + + VolumeConstructionRequest::File { id, block_size, path } => { + Ok(VolumeConstructionRequest::File { + id: *id, + block_size: *block_size, + path: path.clone(), + }) + } + } +} + +/// Find Regions in a Volume's subvolumes list whose target match the argument +/// IP, and add them to the supplied Vec. +fn find_matching_rw_regions_in_volume( + vcr: &VolumeConstructionRequest, + ip: &std::net::Ipv6Addr, + matched_targets: &mut Vec, +) -> anyhow::Result<()> { + match vcr { + VolumeConstructionRequest::Volume { sub_volumes, .. } => { + for sub_volume in sub_volumes { + find_matching_rw_regions_in_volume( + sub_volume, + ip, + matched_targets, + )?; + } + } + + VolumeConstructionRequest::Url { .. } => {} + + VolumeConstructionRequest::Region { opts, .. } => { + if !opts.read_only { + for target in &opts.target { + let parsed_target: SocketAddrV6 = target.parse()?; + if parsed_target.ip() == ip { + matched_targets.push(parsed_target); + } + } + } + } + + VolumeConstructionRequest::File { .. } => {} + } + + Ok(()) +} + #[cfg(test)] mod tests { use super::*; @@ -1316,6 +2112,7 @@ mod tests { use crate::db::datastore::test_utils::datastore_test; use nexus_test_utils::db::test_setup_database; use omicron_test_utils::dev; + use sled_agent_client::types::CrucibleOpts; // Assert that Nexus will not fail to deserialize an old version of // CrucibleResources that was serialized before schema update 6.0.0. @@ -1422,4 +2219,211 @@ mod tests { db.cleanup().await.unwrap(); logctx.cleanup_successful(); } + + #[tokio::test] + async fn test_volume_replace_region() { + let logctx = dev::test_setup_log("test_volume_replace_region"); + let log = logctx.log.new(o!()); + let mut db = test_setup_database(&log).await; + let (_opctx, db_datastore) = datastore_test(&logctx, &db).await; + + // Insert four Region records (three, plus one additionally allocated) + + let volume_id = Uuid::new_v4(); + let new_volume_id = Uuid::new_v4(); + + let mut region_and_volume_ids = [ + (Uuid::new_v4(), volume_id), + (Uuid::new_v4(), volume_id), + (Uuid::new_v4(), volume_id), + (Uuid::new_v4(), new_volume_id), + ]; + + { + let conn = db_datastore.pool_connection_for_tests().await.unwrap(); + + for i in 0..4 { + let (_, volume_id) = region_and_volume_ids[i]; + + let region = Region::new( + Uuid::new_v4(), // dataset id + volume_id, + 512_i64.try_into().unwrap(), + 10, + 10, + ); + + region_and_volume_ids[i].0 = region.id(); + + use nexus_db_model::schema::region::dsl; + diesel::insert_into(dsl::region) + .values(region.clone()) + .execute_async(&*conn) + .await + .unwrap(); + } + } + + let _volume = db_datastore + .volume_create(nexus_db_model::Volume::new( + volume_id, + serde_json::to_string(&VolumeConstructionRequest::Volume { + id: volume_id, + block_size: 512, + sub_volumes: vec![VolumeConstructionRequest::Region { + block_size: 512, + blocks_per_extent: 10, + extent_count: 10, + gen: 1, + opts: CrucibleOpts { + id: volume_id, + target: vec![ + String::from("[fd00:1122:3344:101::1]:11111"), // target to replace + String::from("[fd00:1122:3344:102::1]:22222"), + String::from("[fd00:1122:3344:103::1]:33333"), + ], + lossy: false, + flush_timeout: None, + key: None, + cert_pem: None, + key_pem: None, + root_cert_pem: None, + control: None, + read_only: false, + }, + }], + read_only_parent: None, + }) + .unwrap(), + )) + .await + .unwrap(); + + // Replace one + + let target = region_and_volume_ids[0]; + let replacement = region_and_volume_ids[3]; + + db_datastore + .volume_replace_region( + /* target */ + db::datastore::VolumeReplacementParams { + volume_id: target.1, + region_id: target.0, + region_addr: "[fd00:1122:3344:101::1]:11111" + .parse() + .unwrap(), + }, + /* replacement */ + db::datastore::VolumeReplacementParams { + volume_id: replacement.1, + region_id: replacement.0, + region_addr: "[fd55:1122:3344:101::1]:11111" + .parse() + .unwrap(), + }, + ) + .await + .unwrap(); + + let vcr: VolumeConstructionRequest = serde_json::from_str( + db_datastore.volume_get(volume_id).await.unwrap().unwrap().data(), + ) + .unwrap(); + + // Ensure the shape of the resulting VCR + assert_eq!( + &vcr, + &VolumeConstructionRequest::Volume { + id: volume_id, + block_size: 512, + sub_volumes: vec![VolumeConstructionRequest::Region { + block_size: 512, + blocks_per_extent: 10, + extent_count: 10, + gen: 2, // generation number bumped + opts: CrucibleOpts { + id: volume_id, + target: vec![ + String::from("[fd55:1122:3344:101::1]:11111"), // replaced + String::from("[fd00:1122:3344:102::1]:22222"), + String::from("[fd00:1122:3344:103::1]:33333"), + ], + lossy: false, + flush_timeout: None, + key: None, + cert_pem: None, + key_pem: None, + root_cert_pem: None, + control: None, + read_only: false, + }, + }], + read_only_parent: None, + }, + ); + + // Now undo the replacement. Note volume ID is not swapped. + db_datastore + .volume_replace_region( + /* target */ + db::datastore::VolumeReplacementParams { + volume_id: target.1, + region_id: replacement.0, + region_addr: "[fd55:1122:3344:101::1]:11111" + .parse() + .unwrap(), + }, + /* replacement */ + db::datastore::VolumeReplacementParams { + volume_id: replacement.1, + region_id: target.0, + region_addr: "[fd00:1122:3344:101::1]:11111" + .parse() + .unwrap(), + }, + ) + .await + .unwrap(); + + let vcr: VolumeConstructionRequest = serde_json::from_str( + db_datastore.volume_get(volume_id).await.unwrap().unwrap().data(), + ) + .unwrap(); + + // Ensure the shape of the resulting VCR + assert_eq!( + &vcr, + &VolumeConstructionRequest::Volume { + id: volume_id, + block_size: 512, + sub_volumes: vec![VolumeConstructionRequest::Region { + block_size: 512, + blocks_per_extent: 10, + extent_count: 10, + gen: 3, // generation number bumped + opts: CrucibleOpts { + id: volume_id, + target: vec![ + String::from("[fd00:1122:3344:101::1]:11111"), // back to what it was + String::from("[fd00:1122:3344:102::1]:22222"), + String::from("[fd00:1122:3344:103::1]:33333"), + ], + lossy: false, + flush_timeout: None, + key: None, + cert_pem: None, + key_pem: None, + root_cert_pem: None, + control: None, + read_only: false, + }, + }], + read_only_parent: None, + }, + ); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } } diff --git a/nexus/db-queries/src/db/datastore/vpc.rs b/nexus/db-queries/src/db/datastore/vpc.rs index dd05498038..98af47f0e2 100644 --- a/nexus/db-queries/src/db/datastore/vpc.rs +++ b/nexus/db-queries/src/db/datastore/vpc.rs @@ -14,6 +14,8 @@ use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::fixed_data::vpc::SERVICES_VPC_ID; use crate::db::identity::Resource; +use crate::db::model::ApplyBlueprintZoneFilterExt; +use crate::db::model::ApplySledFilterExt; use crate::db::model::IncompleteVpc; use crate::db::model::InstanceNetworkInterface; use crate::db::model::Name; @@ -43,8 +45,8 @@ use diesel::prelude::*; use diesel::result::DatabaseErrorKind; use diesel::result::Error as DieselError; use ipnetwork::IpNetwork; -use nexus_types::deployment::BlueprintZoneDisposition; use nexus_types::deployment::BlueprintZoneFilter; +use nexus_types::deployment::SledFilter; use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::external::CreateResult; use omicron_common::api::external::DeleteResult; @@ -653,7 +655,7 @@ impl DataStore { // Sleds to notify when firewall rules change. use db::schema::{ bp_omicron_zone, bp_target, instance, instance_network_interface, - service, service_network_interface, sled, vmm, + service_network_interface, sled, vmm, }; // Diesel requires us to use aliases in order to refer to the // `bp_target` table twice in the same query. @@ -663,11 +665,7 @@ impl DataStore { ); let instance_query = instance_network_interface::table - .inner_join( - instance::table - .on(instance::id - .eq(instance_network_interface::instance_id)), - ) + .inner_join(instance::table) .inner_join( vmm::table .on(vmm::id.nullable().eq(instance::active_propolis_id)), @@ -679,62 +677,7 @@ impl DataStore { .filter(vmm::time_deleted.is_null()) .select(Sled::as_select()); - // When Nexus accepts the rack initialization handoff from RSS, it - // populates the `service` table. We eventually want to retire it - // (https://github.com/oxidecomputer/omicron/issues/4947), and the - // Reconfigurator does not add new entries to it. We still need to query - // it for systems that are not yet under Reconfigurator control... - let rss_service_query = service_network_interface::table - .inner_join( - service::table - .on(service::id.eq(service_network_interface::service_id)), - ) - .inner_join(sled::table.on(sled::id.eq(service::sled_id))) - .filter(service_network_interface::vpc_id.eq(vpc_id)) - .filter(service_network_interface::time_deleted.is_null()) - .select(Sled::as_select()); - - // ... and we also need to query for the current target blueprint to - // support systems that _are_ under Reconfigurator control. - - { - // Ideally this would do something like: - // - // .filter(bp_omicron_zone::disposition.eq_any( - // BlueprintZoneDisposition::all_matching( - // BlueprintZoneFilter::VpcFirewall, - // ), - // ) - // - // But that doesn't quite work today because we currently don't - // store the disposition enum next to each zone. Instead, this code - // makes its decision to select which sleds to return by just - // ignoring the zones_in_service table today. - // - // The purpose of this otherwise pointless block is to ensure that - // it is correct to ensure that the expressed logic by - // `BlueprintZoneFilter::VpcFirewall` matches the actual - // implementation. It will hopefully soon be replaced with storing - // the disposition in the bp_omicron_zone table and using the - // filter directly. - - let mut matching = BlueprintZoneDisposition::all_matching( - BlueprintZoneFilter::VpcFirewall, - ) - .collect::>(); - matching.sort(); - let mut all = BlueprintZoneDisposition::all_matching( - BlueprintZoneFilter::All, - ) - .collect::>(); - all.sort(); - debug_assert_eq!( - matching, all, - "vpc firewall dispositions should match all dispositions" - ); - } - - let reconfig_service_query = service_network_interface::table + let service_query = service_network_interface::table .inner_join(bp_omicron_zone::table.on( bp_omicron_zone::id.eq(service_network_interface::service_id), )) @@ -759,6 +702,11 @@ impl DataStore { .limit(1), ), ) + // Filter out services that are expunged and shouldn't be resolved + // here. + .blueprint_zone_filter( + BlueprintZoneFilter::ShouldDeployVpcFirewallRules, + ) .filter(service_network_interface::vpc_id.eq(vpc_id)) .filter(service_network_interface::time_deleted.is_null()) .select(Sled::as_select()); @@ -766,6 +714,7 @@ impl DataStore { let mut sleds = sled::table .select(Sled::as_select()) .filter(sled::time_deleted.is_null()) + .sled_filter(SledFilter::VpcFirewall) .into_boxed(); if !sleds_filter.is_empty() { sleds = sleds.filter(sled::id.eq_any(sleds_filter.to_vec())); @@ -773,11 +722,7 @@ impl DataStore { let conn = self.pool_connection_unauthorized().await?; sleds - .intersect( - instance_query - .union(rss_service_query) - .union(reconfig_service_query), - ) + .intersect(instance_query.union(service_query)) .get_results_async(&*conn) .await .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) @@ -1223,8 +1168,8 @@ impl DataStore { let mut result = BTreeMap::new(); for subnet in subnets { let entry = result.entry(subnet.name).or_insert_with(Vec::new); - entry.push(IpNetwork::V4(subnet.ipv4_block.0 .0)); - entry.push(IpNetwork::V6(subnet.ipv6_block.0 .0)); + entry.push(IpNetwork::V4(subnet.ipv4_block.0.into())); + entry.push(IpNetwork::V6(subnet.ipv6_block.0.into())); } Ok(result) } @@ -1284,33 +1229,28 @@ mod tests { use crate::db::datastore::test::sled_baseboard_for_test; use crate::db::datastore::test::sled_system_hardware_for_test; use crate::db::datastore::test_utils::datastore_test; + use crate::db::datastore::test_utils::IneligibleSleds; use crate::db::fixed_data::vpc_subnet::NEXUS_VPC_SUBNET; use crate::db::model::Project; use crate::db::queries::vpc::MAX_VNI_SEARCH_RANGE_SIZE; - use nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; + use nexus_db_model::IncompleteNetworkInterface; use nexus_db_model::SledUpdate; + use nexus_reconfigurator_planning::blueprint_builder::BlueprintBuilder; + use nexus_reconfigurator_planning::system::SledBuilder; + use nexus_reconfigurator_planning::system::SystemDescription; use nexus_test_utils::db::test_setup_database; use nexus_types::deployment::Blueprint; use nexus_types::deployment::BlueprintTarget; use nexus_types::deployment::BlueprintZoneConfig; use nexus_types::deployment::BlueprintZoneDisposition; - use nexus_types::deployment::BlueprintZonesConfig; - use nexus_types::deployment::OmicronZoneConfig; - use nexus_types::deployment::OmicronZoneType; use nexus_types::external_api::params; use nexus_types::identity::Asset; - use omicron_common::address::NEXUS_OPTE_IPV4_SUBNET; use omicron_common::api::external; use omicron_common::api::external::Generation; - use omicron_common::api::external::IpNet; - use omicron_common::api::external::MacAddr; - use omicron_common::api::external::Vni; - use omicron_common::api::internal::shared::NetworkInterface; - use omicron_common::api::internal::shared::NetworkInterfaceKind; use omicron_test_utils::dev; + use omicron_uuid_kinds::GenericUuid; + use omicron_uuid_kinds::SledUuid; use slog::info; - use std::collections::BTreeMap; - use std::net::IpAddr; // Test that we detect the right error condition and return None when we // fail to insert a VPC due to VNI exhaustion. @@ -1530,123 +1470,41 @@ mod tests { logctx.cleanup_successful(); } - #[derive(Debug)] - struct Harness { - rack_id: Uuid, - sled_ids: Vec, - nexuses: Vec, - } - - #[derive(Debug)] - struct HarnessNexus { - id: Uuid, - ip: IpAddr, - mac: MacAddr, - nic_id: Uuid, + async fn assert_service_sled_ids( + datastore: &DataStore, + expected_sled_ids: &[SledUuid], + ) { + let mut service_sled_ids = datastore + .vpc_resolve_to_sleds(*SERVICES_VPC_ID, &[]) + .await + .expect("failed to resolve to sleds") + .into_iter() + .map(|sled| SledUuid::from_untyped_uuid(sled.id())) + .collect::>(); + service_sled_ids.sort(); + assert_eq!(expected_sled_ids, service_sled_ids); } - impl Harness { - fn new(num_sleds: usize) -> Self { - let mut sled_ids = - (0..num_sleds).map(|_| Uuid::new_v4()).collect::>(); - sled_ids.sort(); - - let mut nexus_ips = NEXUS_OPTE_IPV4_SUBNET - .iter() - .skip(NUM_INITIAL_RESERVED_IP_ADDRESSES) - .map(IpAddr::from); - let mut nexus_macs = MacAddr::iter_system(); - let nexuses = (0..num_sleds) - .map(|_| HarnessNexus { - id: Uuid::new_v4(), - ip: nexus_ips.next().unwrap(), - mac: nexus_macs.next().unwrap(), - nic_id: Uuid::new_v4(), - }) - .collect::>(); - Self { rack_id: Uuid::new_v4(), sled_ids, nexuses } - } - - fn db_sleds(&self) -> impl Iterator + '_ { - self.sled_ids.iter().copied().map(|sled_id| { - SledUpdate::new( - sled_id, - "[::1]:0".parse().unwrap(), - sled_baseboard_for_test(), - sled_system_hardware_for_test(), - self.rack_id, - Generation::new().into(), - ) - }) - } - - fn db_services( - &self, - ) -> impl Iterator< - Item = (db::model::Service, db::model::IncompleteNetworkInterface), - > + '_ { - self.sled_ids.iter().zip(&self.nexuses).map(|(sled_id, nexus)| { - let service = db::model::Service::new( - nexus.id, - *sled_id, - Some(nexus.id), - "[::1]:0".parse().unwrap(), - db::model::ServiceKind::Nexus, - ); - let name = format!("test-nexus-{}", nexus.id); - let nic = db::model::IncompleteNetworkInterface::new_service( - nexus.nic_id, - nexus.id, - NEXUS_VPC_SUBNET.clone(), - IdentityMetadataCreateParams { - name: name.parse().unwrap(), - description: name, - }, - nexus.ip, - nexus.mac, - 0, - ) - .expect("failed to create incomplete Nexus NIC"); - (service, nic) - }) - } - - fn blueprint_zone_configs( - &self, - ) -> impl Iterator + '_ { - self.db_services().map(|(service, nic)| { - let config = OmicronZoneConfig { - id: service.id(), - underlay_address: "::1".parse().unwrap(), - zone_type: OmicronZoneType::Nexus { - internal_address: "[::1]:0".to_string(), - external_ip: "::1".parse().unwrap(), - nic: NetworkInterface { - id: nic.identity.id, - kind: NetworkInterfaceKind::Service { - id: service.id(), - }, - name: format!("test-nic-{}", nic.identity.id) - .parse() - .unwrap(), - ip: nic.ip.unwrap(), - mac: nic.mac.unwrap(), - subnet: IpNet::from(*NEXUS_OPTE_IPV4_SUBNET), - vni: Vni::SERVICES_VNI, - primary: true, - slot: nic.slot.unwrap(), - }, - external_tls: false, - external_dns_servers: Vec::new(), - }, - }; - let zone_config = BlueprintZoneConfig { - config, - disposition: BlueprintZoneDisposition::InService, - }; - (service.sled_id, zone_config) - }) - } + async fn bp_insert_and_make_target( + opctx: &OpContext, + datastore: &DataStore, + bp: &Blueprint, + ) { + datastore + .blueprint_insert(opctx, bp) + .await + .expect("inserted blueprint"); + datastore + .blueprint_target_set_current( + opctx, + BlueprintTarget { + target_id: bp.id, + enabled: true, + time_made_target: Utc::now(), + }, + ) + .await + .expect("made blueprint the target"); } #[tokio::test] @@ -1659,250 +1517,233 @@ mod tests { let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; - // Helper function to fetch and sort the IDs of sleds we've resolved the - // SERVICES_VPC_ID to. - let fetch_service_sled_ids = || async { - let mut service_sled_ids = datastore - .vpc_resolve_to_sleds(*SERVICES_VPC_ID, &[]) - .await - .expect("failed to resolve to sleds") - .into_iter() - .map(|sled| sled.id()) - .collect::>(); - service_sled_ids.sort(); - service_sled_ids - }; - - // Create four sleds. - let harness = Harness::new(4); - for sled in harness.db_sleds() { - datastore.sled_upsert(sled).await.expect("failed to upsert sled"); - } - - // Insert two Nexus records into `service`, emulating RSS. - for (service, nic) in harness.db_services().take(2) { - datastore - .service_upsert(&opctx, service) - .await - .expect("failed to insert RSS-like service"); + // Set up our fake system with 5 sleds. + let rack_id = Uuid::new_v4(); + let mut system = SystemDescription::new(); + let mut sled_ids = Vec::new(); + for _ in 0..5 { + let sled_id = SledUuid::new_v4(); + sled_ids.push(sled_id); + system.sled(SledBuilder::new().id(sled_id)).expect("adding sled"); datastore - .service_create_network_interface_raw(&opctx, nic) + .sled_upsert(SledUpdate::new( + sled_id.into_untyped_uuid(), + "[::1]:0".parse().unwrap(), + sled_baseboard_for_test(), + sled_system_hardware_for_test(), + rack_id, + Generation::new().into(), + )) .await - .expect("failed to insert Nexus NIC"); + .expect("upserting sled"); } - - // Ensure we find the two sleds we expect after adding Nexus records. - assert_eq!(&harness.sled_ids[..2], fetch_service_sled_ids().await); - - // Create a blueprint that has a Nexus on our third sled. (This - // blueprint is completely invalid in many ways, but all we care about - // here is inserting relevant records in `bp_omicron_zone`.) - let bp1_zones = { - let (sled_id, zone_config) = harness - .blueprint_zone_configs() - .nth(2) - .expect("fewer than 3 services in test harness"); - let mut zones = BTreeMap::new(); - zones.insert( - sled_id, - BlueprintZonesConfig { - generation: Generation::new(), - zones: vec![zone_config], + sled_ids.sort_unstable(); + let planning_input = system + .to_planning_input_builder() + .expect("creating planning builder") + .build(); + + // Helper to convert a zone's nic into an insertable nic. + let db_nic_from_zone = |zone_config: &BlueprintZoneConfig| { + let (_, nic) = zone_config + .zone_type + .external_networking() + .expect("external networking for zone type"); + IncompleteNetworkInterface::new_service( + nic.id, + zone_config.id.into_untyped_uuid(), + NEXUS_VPC_SUBNET.clone(), + IdentityMetadataCreateParams { + name: nic.name.clone(), + description: nic.name.to_string(), }, - ); - zones - }; - let bp1_id = Uuid::new_v4(); - let bp1 = Blueprint { - id: bp1_id, - blueprint_zones: bp1_zones, - parent_blueprint_id: None, - internal_dns_version: Generation::new(), - external_dns_version: Generation::new(), - time_created: Utc::now(), - creator: "test".to_string(), - comment: "test".to_string(), + nic.ip, + nic.mac, + nic.slot, + ) + .expect("creating service nic") }; - datastore - .blueprint_insert(&opctx, &bp1) - .await - .expect("failed to insert blueprint"); - - // We haven't set a blueprint target yet, so we should still only see - // the two RSS-inserted service-running sleds. - assert_eq!(&harness.sled_ids[..2], fetch_service_sled_ids().await); - // Make bp1 the current target. - datastore - .blueprint_target_set_current( - &opctx, - BlueprintTarget { - target_id: bp1_id, - enabled: true, - time_made_target: Utc::now(), - }, + // Create an initial, empty blueprint, and make it the target. + let bp0 = BlueprintBuilder::build_empty_with_sleds( + sled_ids.iter().copied(), + "test", + ); + bp_insert_and_make_target(&opctx, &datastore, &bp0).await; + + // Our blueprint doesn't describe any services, so we shouldn't find any + // sled IDs running services. + assert_service_sled_ids(&datastore, &[]).await; + + // Create a blueprint that has a Nexus on our third sled. + let bp1 = { + let mut builder = BlueprintBuilder::new_based_on( + &logctx.log, + &bp0, + &planning_input, + "test", ) - .await - .expect("failed to set blueprint target"); + .expect("created blueprint builder"); + builder + .sled_ensure_zone_multiple_nexus_with_config( + sled_ids[2], + 1, + false, + Vec::new(), + ) + .expect("added nexus to third sled"); + builder.build() + }; + bp_insert_and_make_target(&opctx, &datastore, &bp1).await; // bp1 is the target, but we haven't yet inserted a vNIC record, so - // we'll still only see the original 2 sleds. - assert_eq!(&harness.sled_ids[..2], fetch_service_sled_ids().await); + // we still won't see any services on sleds. + assert_service_sled_ids(&datastore, &[]).await; // Insert the relevant service NIC record (normally performed by the // reconfigurator's executor). - datastore + let bp1_nic = datastore .service_create_network_interface_raw( &opctx, - harness.db_services().nth(2).unwrap().1, + db_nic_from_zone(&bp1.blueprint_zones[&sled_ids[2]].zones[0]), ) .await .expect("failed to insert service VNIC"); - - // We should now see _three_ sleds running services. - assert_eq!(&harness.sled_ids[..3], fetch_service_sled_ids().await); - - // Create another blueprint with no services and make it the target. - let bp2_id = Uuid::new_v4(); - let bp2 = Blueprint { - id: bp2_id, - blueprint_zones: BTreeMap::new(), - parent_blueprint_id: Some(bp1_id), - internal_dns_version: Generation::new(), - external_dns_version: Generation::new(), - time_created: Utc::now(), - creator: "test".to_string(), - comment: "test".to_string(), + // We should now see our third sled running a service. + assert_service_sled_ids(&datastore, &[sled_ids[2]]).await; + + // Create another blueprint, remove the one nexus we added, and make it + // the target. + let bp2 = { + let mut bp2 = bp1.clone(); + bp2.id = Uuid::new_v4(); + bp2.parent_blueprint_id = Some(bp1.id); + let sled2_zones = bp2 + .blueprint_zones + .get_mut(&sled_ids[2]) + .expect("zones for third sled"); + sled2_zones.zones.clear(); + sled2_zones.generation = sled2_zones.generation.next(); + bp2 }; - datastore - .blueprint_insert(&opctx, &bp2) - .await - .expect("failed to insert blueprint"); - datastore - .blueprint_target_set_current( - &opctx, - BlueprintTarget { - target_id: bp2_id, - enabled: true, - time_made_target: Utc::now(), - }, - ) - .await - .expect("failed to set blueprint target"); + bp_insert_and_make_target(&opctx, &datastore, &bp2).await; // We haven't removed the service NIC record, but we should no longer - // see the third sled here, because we should be back to just the - // original two services in the `service` table. - assert_eq!(&harness.sled_ids[..2], fetch_service_sled_ids().await); + // see the third sled here. We should be back to no sleds with services. + assert_service_sled_ids(&datastore, &[]).await; - // Insert a service NIC record for our fourth sled's Nexus. This - // shouldn't change our VPC resolution. + // Delete the service NIC record so we can reuse this IP later. datastore - .service_create_network_interface_raw( + .service_delete_network_interface( &opctx, - harness.db_services().nth(3).unwrap().1, + bp1.blueprint_zones[&sled_ids[2]].zones[0] + .id + .into_untyped_uuid(), + bp1_nic.id(), ) .await - .expect("failed to insert service VNIC"); - assert_eq!(&harness.sled_ids[..2], fetch_service_sled_ids().await); - - // Create a blueprint that has a Nexus on our fourth sled. This - // shouldn't change our VPC resolution. - let bp3_zones = { - let (sled_id, zone_config) = harness - .blueprint_zone_configs() - .nth(3) - .expect("fewer than 3 services in test harness"); - let mut zones = BTreeMap::new(); - zones.insert( - sled_id, - BlueprintZonesConfig { - generation: Generation::new(), - zones: vec![zone_config], - }, - ); - zones - }; - let bp3_id = Uuid::new_v4(); - let bp3 = Blueprint { - id: bp3_id, - blueprint_zones: bp3_zones, - parent_blueprint_id: Some(bp2_id), - internal_dns_version: Generation::new(), - external_dns_version: Generation::new(), - time_created: Utc::now(), - creator: "test".to_string(), - comment: "test".to_string(), - }; - datastore - .blueprint_insert(&opctx, &bp3) - .await - .expect("failed to insert blueprint"); - assert_eq!(&harness.sled_ids[..2], fetch_service_sled_ids().await); - - // Make this blueprint the target. We've already created the service - // VNIC, so we should immediately see our fourth sled in VPC resolution. - datastore - .blueprint_target_set_current( - &opctx, - BlueprintTarget { - target_id: bp3_id, - enabled: true, - time_made_target: Utc::now(), - }, + .expect("deleted bp1 nic"); + + // Create a blueprint with Nexus on all our sleds. + let bp3 = { + let mut builder = BlueprintBuilder::new_based_on( + &logctx.log, + &bp2, + &planning_input, + "test", ) - .await - .expect("failed to set blueprint target"); - assert_eq!( - &[harness.sled_ids[0], harness.sled_ids[1], harness.sled_ids[3]] - as &[Uuid], - fetch_service_sled_ids().await - ); - - // Finally, create a blueprint that includes our third and fourth sleds, - // make it the target, and ensure we resolve to all four sleds. - let bp4_zones = { - let mut zones = BTreeMap::new(); - for (sled_id, zone_config) in - harness.blueprint_zone_configs().skip(2) - { - zones.insert( - sled_id, - BlueprintZonesConfig { - generation: Generation::new(), - zones: vec![zone_config], - }, - ); + .expect("created blueprint builder"); + for &sled_id in &sled_ids { + builder + .sled_ensure_zone_multiple_nexus_with_config( + sled_id, + 1, + false, + Vec::new(), + ) + .expect("added nexus to third sled"); } - zones + builder.build() }; - let bp4_id = Uuid::new_v4(); - let bp4 = Blueprint { - id: bp4_id, - blueprint_zones: bp4_zones, - parent_blueprint_id: Some(bp3_id), - internal_dns_version: Generation::new(), - external_dns_version: Generation::new(), - time_created: Utc::now(), - creator: "test".to_string(), - comment: "test".to_string(), + + // Insert the service NIC records for all the Nexuses. + for &sled_id in &sled_ids { + datastore + .service_create_network_interface_raw( + &opctx, + db_nic_from_zone(&bp3.blueprint_zones[&sled_id].zones[0]), + ) + .await + .expect("failed to insert service VNIC"); + } + + // We haven't made bp3 the target yet, so our resolution is still based + // on bp2; more service vNICs shouldn't matter. + assert_service_sled_ids(&datastore, &[]).await; + + // Make bp3 the target; we should immediately resolve that there are + // services on the sleds we set up in bp3. + bp_insert_and_make_target(&opctx, &datastore, &bp3).await; + assert_service_sled_ids(&datastore, &sled_ids).await; + + // --- + + // Mark some sleds as ineligible. Only the non-provisionable and + // in-service sleds should be returned. + let ineligible = IneligibleSleds { + expunged: sled_ids[0], + decommissioned: sled_ids[1], + illegal_decommissioned: sled_ids[2], + non_provisionable: sled_ids[3], }; - datastore - .blueprint_insert(&opctx, &bp4) + ineligible + .setup(&opctx, &datastore) .await - .expect("failed to insert blueprint"); - datastore - .blueprint_target_set_current( - &opctx, - BlueprintTarget { - target_id: bp4_id, - enabled: true, - time_made_target: Utc::now(), - }, - ) + .expect("failed to set up ineligible sleds"); + assert_service_sled_ids(&datastore, &sled_ids[3..=4]).await; + + // --- + + // Bring the sleds marked above back to life. + ineligible + .undo(&opctx, &datastore) .await - .expect("failed to set blueprint target"); - assert_eq!(harness.sled_ids, fetch_service_sled_ids().await); + .expect("failed to undo ineligible sleds"); + assert_service_sled_ids(&datastore, &sled_ids).await; + + // Make a new blueprint marking one of the zones as quiesced and one as + // expunged. Ensure that the sled with *quiesced* zone is returned by + // vpc_resolve_to_sleds, but the sled with the *expunged* zone is not. + // (But other services are still running.) + let bp4 = { + let mut bp4 = bp3.clone(); + bp4.id = Uuid::new_v4(); + bp4.parent_blueprint_id = Some(bp3.id); + + // Sled index 2's Nexus is quiesced (should be included). + let sled2 = bp4 + .blueprint_zones + .get_mut(&sled_ids[2]) + .expect("zones for sled"); + sled2.zones[0].disposition = BlueprintZoneDisposition::Quiesced; + sled2.generation = sled2.generation.next(); + + // Sled index 3's zone is expunged (should be excluded). + let sled3 = bp4 + .blueprint_zones + .get_mut(&sled_ids[3]) + .expect("zones for sled"); + sled3.zones[0].disposition = BlueprintZoneDisposition::Expunged; + sled3.generation = sled3.generation.next(); + + bp4 + }; + bp_insert_and_make_target(&opctx, &datastore, &bp4).await; + assert_service_sled_ids( + &datastore, + &[sled_ids[0], sled_ids[1], sled_ids[2], sled_ids[4]], + ) + .await; db.cleanup().await.unwrap(); logctx.cleanup_successful(); diff --git a/nexus/db-queries/src/db/datastore/zpool.rs b/nexus/db-queries/src/db/datastore/zpool.rs index b894d5c509..a771202387 100644 --- a/nexus/db-queries/src/db/datastore/zpool.rs +++ b/nexus/db-queries/src/db/datastore/zpool.rs @@ -14,10 +14,12 @@ use crate::db::datastore::OpContext; use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::identity::Asset; +use crate::db::model::PhysicalDisk; use crate::db::model::Sled; use crate::db::model::Zpool; use crate::db::pagination::paginated; use crate::db::pagination::Paginator; +use crate::db::TransactionError; use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; use diesel::prelude::*; @@ -32,12 +34,29 @@ use omicron_common::api::external::ResourceType; use uuid::Uuid; impl DataStore { + pub async fn zpool_insert( + &self, + opctx: &OpContext, + zpool: Zpool, + ) -> CreateResult { + let conn = &*self.pool_connection_authorized(&opctx).await?; + let zpool = + Self::zpool_insert_on_connection(&conn, opctx, zpool).await?; + Ok(zpool) + } + /// Stores a new zpool in the database. - pub async fn zpool_upsert(&self, zpool: Zpool) -> CreateResult { + pub async fn zpool_insert_on_connection( + conn: &async_bb8_diesel::Connection, + opctx: &OpContext, + zpool: Zpool, + ) -> Result> { + opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; + use db::schema::zpool::dsl; let sled_id = zpool.sled_id; - Sled::insert_resource( + let pool = Sled::insert_resource( sled_id, diesel::insert_into(dsl::zpool) .values(zpool.clone()) @@ -48,9 +67,7 @@ impl DataStore { dsl::sled_id.eq(excluded(dsl::sled_id)), )), ) - .insert_and_get_result_async( - &*self.pool_connection_unauthorized().await?, - ) + .insert_and_get_result_async(conn) .await .map_err(|e| match e { AsyncInsertError::CollectionNotFound => Error::ObjectNotFound { @@ -64,7 +81,9 @@ impl DataStore { &zpool.id().to_string(), ), ), - }) + })?; + + Ok(pool) } /// Fetches a page of the list of all zpools on U.2 disks in all sleds @@ -72,7 +91,7 @@ impl DataStore { &self, opctx: &OpContext, pagparams: &DataPageParams<'_, Uuid>, - ) -> ListResultVec { + ) -> ListResultVec<(Zpool, PhysicalDisk)> { opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; use db::schema::physical_disk::dsl as dsl_physical_disk; @@ -86,7 +105,7 @@ impl DataStore { ), ), ) - .select(Zpool::as_select()) + .select((Zpool::as_select(), PhysicalDisk::as_select())) .load_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) @@ -101,7 +120,7 @@ impl DataStore { pub async fn zpool_list_all_external_batched( &self, opctx: &OpContext, - ) -> ListResultVec { + ) -> ListResultVec<(Zpool, PhysicalDisk)> { opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; opctx.check_complex_operations_allowed()?; let mut zpools = Vec::new(); @@ -110,8 +129,7 @@ impl DataStore { let batch = self .zpool_list_all_external(opctx, &p.current_pagparams()) .await?; - paginator = - p.found_batch(&batch, &|z: &nexus_db_model::Zpool| z.id()); + paginator = p.found_batch(&batch, &|(z, _)| z.id()); zpools.extend(batch); } diff --git a/nexus/db-queries/src/db/error.rs b/nexus/db-queries/src/db/error.rs index fc7f30da93..29607588a4 100644 --- a/nexus/db-queries/src/db/error.rs +++ b/nexus/db-queries/src/db/error.rs @@ -4,6 +4,7 @@ //! Error handling and conversions. +use crate::transaction_retry::OptionalError; use diesel::result::DatabaseErrorInformation; use diesel::result::DatabaseErrorKind as DieselErrorKind; use diesel::result::Error as DieselError; @@ -70,6 +71,30 @@ impl TransactionError { } } +impl TransactionError { + /// Converts a TransactionError into a diesel error. + /// + /// The following pattern is used frequently in retryable transactions: + /// + /// - Create an `OptionalError>` + /// - Execute a series of database operations, which return the + /// `TransactionError` type + /// - If the underlying operations return a retryable error from diesel, + /// propagate that out. + /// - Otherwise, set the OptionalError to a value of T, and rollback the transaction. + /// + /// This function assists with that conversion. + pub fn into_diesel( + self, + err: &OptionalError>, + ) -> DieselError { + match self.retryable() { + MaybeRetryable::NotRetryable(txn_error) => err.bail(txn_error), + MaybeRetryable::Retryable(diesel_error) => diesel_error, + } + } +} + impl From for TransactionError { fn from(err: PublicError) -> Self { TransactionError::CustomError(err) diff --git a/nexus/db-queries/src/db/explain.rs b/nexus/db-queries/src/db/explain.rs index 3de5b4f280..24fd993040 100644 --- a/nexus/db-queries/src/db/explain.rs +++ b/nexus/db-queries/src/db/explain.rs @@ -4,6 +4,11 @@ //! Utility allowing Diesel to EXPLAIN queries. +// These utilities can be useful during development, so we don't want to +// `#[cfg(test)]` the module, but it's likely they won't be used outside of +// tests. +#![cfg_attr(not(test), allow(dead_code))] + use super::pool::DbConnection; use async_bb8_diesel::AsyncRunQueryDsl; use async_trait::async_trait; @@ -17,33 +22,6 @@ use diesel::result::Error as DieselError; /// Q: The Query we're explaining. /// /// EXPLAIN: -pub trait Explainable { - /// Syncronously issues an explain statement. - fn explain( - self, - conn: &mut DbConnection, - ) -> Result; -} - -impl Explainable for Q -where - Q: QueryFragment - + QueryId - + RunQueryDsl - + Sized - + 'static, -{ - fn explain( - self, - conn: &mut DbConnection, - ) -> Result { - Ok(ExplainStatement { query: self } - .get_results::(conn)? - .join("\n")) - } -} - -/// An async variant of [`Explainable`]. #[async_trait] pub trait ExplainableAsync { /// Asynchronously issues an explain statement. @@ -185,7 +163,8 @@ mod test { logctx.cleanup_successful(); } - // Tests that ".explain()" can tell us when we're doing full table scans. + // Tests that ".explain_async()" can tell us when we're doing full table + // scans. #[tokio::test] async fn test_explain_full_table_scan() { let logctx = dev::test_setup_log("test_explain_full_table_scan"); diff --git a/nexus/db-queries/src/db/fixed_data/allow_list.rs b/nexus/db-queries/src/db/fixed_data/allow_list.rs new file mode 100644 index 0000000000..33178f5530 --- /dev/null +++ b/nexus/db-queries/src/db/fixed_data/allow_list.rs @@ -0,0 +1,11 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +// Copyright 2024 Oxide Computer Company + +//! Fixed data for source IP allowlist implementation. + +/// UUID of singleton source IP allowlist. +pub static USER_FACING_SERVICES_ALLOW_LIST_ID: uuid::Uuid = + uuid::uuid!("001de000-a110-4000-8000-000000000000"); diff --git a/nexus/db-queries/src/db/fixed_data/mod.rs b/nexus/db-queries/src/db/fixed_data/mod.rs index 4f896eb5d1..13444141cb 100644 --- a/nexus/db-queries/src/db/fixed_data/mod.rs +++ b/nexus/db-queries/src/db/fixed_data/mod.rs @@ -30,9 +30,11 @@ // 001de000-4401 built-in services project // 001de000-074c built-in services vpc // 001de000-c470 built-in services vpc subnets +// 001de000-all0 singleton ID for source IP allowlist ("all0" is like "allow") use once_cell::sync::Lazy; +pub mod allow_list; pub mod project; pub mod role_assignment; pub mod role_builtin; @@ -65,6 +67,7 @@ fn assert_valid_uuid(id: &uuid::Uuid) { #[cfg(test)] mod test { + use super::allow_list::USER_FACING_SERVICES_ALLOW_LIST_ID; use super::assert_valid_uuid; use super::FLEET_ID; @@ -72,4 +75,9 @@ mod test { fn test_builtin_fleet_id_is_valid() { assert_valid_uuid(&FLEET_ID); } + + #[test] + fn test_allowlist_id_is_valid() { + assert_valid_uuid(&USER_FACING_SERVICES_ALLOW_LIST_ID); + } } diff --git a/nexus/db-queries/src/db/lookup.rs b/nexus/db-queries/src/db/lookup.rs index 380c9db140..487a68b517 100644 --- a/nexus/db-queries/src/db/lookup.rs +++ b/nexus/db-queries/src/db/lookup.rs @@ -364,18 +364,8 @@ impl<'a> LookupPath<'a> { } /// Select a resource of type PhysicalDisk, identified by its id - pub fn physical_disk( - self, - vendor: &str, - serial: &str, - model: &str, - ) -> PhysicalDisk<'a> { - PhysicalDisk::PrimaryKey( - Root { lookup_root: self }, - vendor.to_string(), - serial.to_string(), - model.to_string(), - ) + pub fn physical_disk(self, id: Uuid) -> PhysicalDisk<'a> { + PhysicalDisk::PrimaryKey(Root { lookup_root: self }, id) } pub fn silo_image_id(self, id: Uuid) -> SiloImage<'a> { @@ -836,11 +826,7 @@ lookup_resource! { children = [], lookup_by_name = false, soft_deletes = true, - primary_key_columns = [ - { column_name = "vendor", rust_type = String }, - { column_name = "serial", rust_type = String }, - { column_name = "model", rust_type = String } - ] + primary_key_columns = [ { column_name = "id", rust_type = Uuid } ] } lookup_resource! { diff --git a/nexus/db-queries/src/db/pool_connection.rs b/nexus/db-queries/src/db/pool_connection.rs index 0331a3a103..dae6a0ee51 100644 --- a/nexus/db-queries/src/db/pool_connection.rs +++ b/nexus/db-queries/src/db/pool_connection.rs @@ -4,204 +4,26 @@ //! Customization that happens on each connection as they're acquired. -use async_bb8_diesel::AsyncConnection; -use async_bb8_diesel::AsyncRunQueryDsl; use async_bb8_diesel::AsyncSimpleConnection; use async_bb8_diesel::Connection; use async_bb8_diesel::ConnectionError; use async_trait::async_trait; use bb8::CustomizeConnection; -use diesel::pg::GetPgMetadataCache; -use diesel::pg::PgMetadataCacheKey; -use diesel::prelude::*; use diesel::PgConnection; use diesel_dtrace::DTraceConnection; -use std::collections::HashMap; -use tokio::sync::Mutex; pub type DbConnection = DTraceConnection; -// This is a list of all user-defined types (ENUMS) in the current DB schema. -// -// Diesel looks up user-defined types as they are encountered, and loads -// them into a metadata cache. Although this cost is amortized over the lifetime -// of a connection, this can be slower than desired: -// - Diesel issues a round-trip database call on each user-defined type -// - The cache of OIDs for user-defined types is "per-connection", so when -// using a connection pool, we redo all these calls for new connections. -// -// To mitigate: We look up a list of user-defined types here on first access -// to the connection, and pre-populate the cache. Furthermore, we save this -// information and use it to populate other connections too, without incurring -// another database lookup. -// -// See https://github.com/oxidecomputer/omicron/issues/4733 for more context. -static CUSTOM_TYPE_KEYS: &'static [&'static str] = &[ - "address_lot_kind", - "authentication_mode", - "bfd_mode", - "block_size", - "caboose_which", - "dataset_kind", - "dns_group", - "downstairs_client_stop_request_reason_type", - "downstairs_client_stopped_reason_type", - "hw_power_state", - "hw_rot_slot", - "identity_type", - "instance_state", - "ip_attach_state", - "ip_kind", - "ip_pool_resource_type", - "network_interface_kind", - "physical_disk_kind", - "producer_kind", - "provider_type", - "root_of_trust_page_which", - "router_route_kind", - "saga_state", - "service_kind", - "sled_policy", - "sled_resource_kind", - "sled_role", - "sled_state", - "snapshot_state", - "sp_type", - "switch_interface_kind", - "switch_link_fec", - "switch_link_speed", - "switch_port_geometry", - "upstairs_repair_notification_type", - "upstairs_repair_type", - "user_provision_type", - "vpc_firewall_rule_action", - "vpc_firewall_rule_direction", - "vpc_firewall_rule_protocol", - "vpc_firewall_rule_status", - "vpc_router_kind", - "zone_type", -]; -const CUSTOM_TYPE_SCHEMA: &'static str = "public"; - pub const DISALLOW_FULL_TABLE_SCAN_SQL: &str = "set disallow_full_table_scans = on; set large_full_scan_rows = 0;"; -#[derive(Debug)] -struct OIDCache(HashMap, (u32, u32)>); - -impl OIDCache { - // Populate a new OID cache by pre-filling values - async fn new( - conn: &mut Connection, - ) -> Result { - // Lookup all the OIDs for custom types. - // - // As a reminder, this is an optimization: - // - If we supply a value in CUSTOM_TYPE_KEYS that does not - // exist in the schema, the corresponding row won't be - // found, so the value will be ignored. - // - If we don't supply a value in CUSTOM_TYPE_KEYS, even - // though it DOES exist in the schema, it'll likewise not - // get pre-populated into the cache. Diesel would observe - // the cache miss, and perform the lookup later. - let results: Vec = pg_type::table - .select((pg_type::typname, pg_type::oid, pg_type::typarray)) - .inner_join( - pg_namespace::table - .on(pg_type::typnamespace.eq(pg_namespace::oid)), - ) - .filter(pg_type::typname.eq_any(CUSTOM_TYPE_KEYS)) - .filter(pg_namespace::nspname.eq(CUSTOM_TYPE_SCHEMA)) - .load_async(&*conn) - .await?; - - // Convert the OIDs into a ("Cache Key", "OID Tuple") pair, - // and store the result in a HashMap. - // - // We'll iterate over this HashMap to pre-populate the connection-local cache for all - // future connections, including this one. - Ok::<_, ConnectionError>(Self(HashMap::from_iter( - results.into_iter().map( - |PgTypeMetadata { typname, oid, array_oid }| { - ( - PgMetadataCacheKey::new( - Some(CUSTOM_TYPE_SCHEMA.into()), - std::borrow::Cow::Owned(typname), - ), - (oid, array_oid), - ) - }, - ), - ))) - } -} - -// String-based representation of the CockroachDB version. -// -// We currently do minimal parsing of this value, but it should -// be distinct between different revisions of CockroachDB. -// This version includes the semver version of the DB, but also -// build and target information. -#[derive(Debug, Eq, PartialEq, Hash)] -struct CockroachVersion(String); - -impl CockroachVersion { - async fn new( - conn: &Connection, - ) -> Result { - diesel::sql_function!(fn version() -> Text); - - let version = - diesel::select(version()).get_result_async::(conn).await?; - Ok(Self(version)) - } -} - /// A customizer for all new connections made to CockroachDB, from Diesel. #[derive(Debug)] -pub(crate) struct ConnectionCustomizer { - oid_caches: Mutex>, -} +pub(crate) struct ConnectionCustomizer {} impl ConnectionCustomizer { pub(crate) fn new() -> Self { - Self { oid_caches: Mutex::new(HashMap::new()) } - } - - async fn populate_metadata_cache( - &self, - conn: &mut Connection, - ) -> Result<(), ConnectionError> { - // Look up the CockroachDB version for new connections, to ensure - // that OID caches are distinct between different CRDB versions. - // - // This step is performed out of an abundance of caution: OIDs are not - // necessarily stable across major releases of CRDB, and this ensures - // that the OID lookups on custom types do not cross this version - // boundary. - let version = CockroachVersion::new(conn).await?; - - // Lookup the OID cache, or populate it if we haven't previously - // established a connection to this database version. - let mut oid_caches = self.oid_caches.lock().await; - let entry = oid_caches.entry(version); - use std::collections::hash_map::Entry::*; - let oid_cache = match entry { - Occupied(ref entry) => entry.get(), - Vacant(entry) => entry.insert(OIDCache::new(conn).await?), - }; - - // Copy the OID cache into this specific connection. - // - // NOTE: I don't love that this is blocking (due to "as_sync_conn"), but the - // "get_metadata_cache" method does not seem implemented for types that could have a - // non-Postgres backend. - let mut sync_conn = conn.as_sync_conn(); - let cache = sync_conn.get_metadata_cache(); - for (k, v) in &oid_cache.0 { - cache.store_type(k.clone(), *v); - } - Ok(()) + Self {} } async fn disallow_full_table_scans( @@ -221,90 +43,7 @@ impl CustomizeConnection, ConnectionError> &self, conn: &mut Connection, ) -> Result<(), ConnectionError> { - self.populate_metadata_cache(conn).await?; self.disallow_full_table_scans(conn).await?; Ok(()) } } - -#[derive(Debug, Clone, Hash, PartialEq, Eq, Queryable)] -pub struct PgTypeMetadata { - typname: String, - oid: u32, - array_oid: u32, -} - -table! { - pg_type (oid) { - oid -> Oid, - typname -> Text, - typarray -> Oid, - typnamespace -> Oid, - } -} - -table! { - pg_namespace (oid) { - oid -> Oid, - nspname -> Text, - } -} - -allow_tables_to_appear_in_same_query!(pg_type, pg_namespace); - -#[cfg(test)] -mod test { - use super::*; - use nexus_test_utils::db::test_setup_database; - use omicron_test_utils::dev; - - // Ensure that the "CUSTOM_TYPE_KEYS" values match the enums - // we find within the database. - // - // If the two are out-of-sync, identify the values causing problems. - #[tokio::test] - async fn all_enums_in_prepopulate_list() { - let logctx = dev::test_setup_log("test_project_creation"); - let mut crdb = test_setup_database(&logctx.log).await; - let client = crdb.connect().await.expect("Failed to connect to CRDB"); - - // https://www.cockroachlabs.com/docs/stable/show-enums - let rows = client - .query("SHOW ENUMS FROM omicron.public;", &[]) - .await - .unwrap_or_else(|_| panic!("failed to list enums")); - client.cleanup().await.expect("cleaning up after listing enums"); - - let mut observed_public_enums = rows - .into_iter() - .map(|row| -> String { - for i in 0..row.len() { - if row.columns()[i].name() == "name" { - return row.get(i); - } - } - panic!("Missing 'name' in row: {row:?}"); - }) - .collect::>(); - observed_public_enums.sort(); - - let mut expected_enums: Vec = - CUSTOM_TYPE_KEYS.into_iter().map(|s| s.to_string()).collect(); - expected_enums.sort(); - - pretty_assertions::assert_eq!( - observed_public_enums, - expected_enums, - "Enums did not match.\n\ - If the type is present on the left, but not the right:\n\ - \tThe enum is in the DB, but not in CUSTOM_TYPE_KEYS.\n\ - \tConsider adding it, so we can pre-populate the OID cache.\n\ - If the type is present on the right, but not the left:\n\ - \tThe enum is not the DB, but it is in CUSTOM_TYPE_KEYS.\n\ - \tConsider removing it, because the type no longer exists" - ); - - crdb.cleanup().await.unwrap(); - logctx.cleanup_successful(); - } -} diff --git a/nexus/db-queries/src/db/queries/external_ip.rs b/nexus/db-queries/src/db/queries/external_ip.rs index 0502450121..7d5e254aac 100644 --- a/nexus/db-queries/src/db/queries/external_ip.rs +++ b/nexus/db-queries/src/db/queries/external_ip.rs @@ -879,7 +879,6 @@ mod tests { use crate::db::model::IpKind; use crate::db::model::IpPool; use crate::db::model::IpPoolRange; - use crate::db::model::Name; use async_bb8_diesel::AsyncRunQueryDsl; use diesel::{ExpressionMethods, QueryDsl, SelectableHelper}; use dropshot::test_util::LogContext; @@ -889,13 +888,21 @@ mod tests { use nexus_db_model::IpPoolResource; use nexus_db_model::IpPoolResourceType; use nexus_test_utils::db::test_setup_database; + use nexus_types::deployment::OmicronZoneExternalFloatingIp; + use nexus_types::deployment::OmicronZoneExternalIp; + use nexus_types::deployment::OmicronZoneExternalSnatIp; use nexus_types::external_api::params::InstanceCreate; use nexus_types::external_api::shared::IpRange; + use nexus_types::inventory::SourceNatConfig; use omicron_common::address::NUM_SOURCE_NAT_PORTS; use omicron_common::api::external::Error; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_test_utils::dev; use omicron_test_utils::dev::db::CockroachInstance; + use omicron_uuid_kinds::ExternalIpUuid; + use omicron_uuid_kinds::GenericUuid; + use omicron_uuid_kinds::OmicronZoneUuid; + use sled_agent_client::ZoneKind; use std::net::IpAddr; use std::net::Ipv4Addr; use std::sync::Arc; @@ -1325,163 +1332,9 @@ mod tests { } #[tokio::test] - async fn test_next_external_ip_for_service() { - let context = - TestContext::new("test_next_external_ip_for_service").await; - - let ip_range = IpRange::try_from(( - Ipv4Addr::new(10, 0, 0, 1), - Ipv4Addr::new(10, 0, 0, 3), - )) - .unwrap(); - context.initialize_ip_pool(SERVICE_IP_POOL_NAME, ip_range).await; - - // Allocate an IP address as we would for an external, rack-associated - // service. - let service1_id = Uuid::new_v4(); - - // Check that `service_lookup_external_ips` returns an empty vector for - // a service with no external IPs. - assert_eq!( - context - .db_datastore - .service_lookup_external_ips(&context.opctx, service1_id) - .await - .expect("Failed to look up service external IPs"), - Vec::new(), - ); - - let id1 = Uuid::new_v4(); - let ip1 = context - .db_datastore - .allocate_service_ip( - &context.opctx, - id1, - &Name("service1-ip".parse().unwrap()), - "service1-ip", - service1_id, - ) - .await - .expect("Failed to allocate service IP address"); - assert!(ip1.is_service); - assert_eq!(ip1.kind, IpKind::Floating); - assert_eq!(ip1.ip.ip(), IpAddr::V4(Ipv4Addr::new(10, 0, 0, 1))); - assert_eq!(ip1.first_port.0, 0); - assert_eq!(ip1.last_port.0, u16::MAX); - assert_eq!(ip1.parent_id, Some(service1_id)); - assert_eq!( - context - .db_datastore - .service_lookup_external_ips(&context.opctx, service1_id) - .await - .expect("Failed to look up service external IPs"), - vec![ip1], - ); - - // Allocate an SNat IP - let service2_id = Uuid::new_v4(); - let id2 = Uuid::new_v4(); - let ip2 = context - .db_datastore - .allocate_service_snat_ip(&context.opctx, id2, service2_id) - .await - .expect("Failed to allocate service IP address"); - assert!(ip2.is_service); - assert_eq!(ip2.kind, IpKind::SNat); - assert_eq!(ip2.ip.ip(), IpAddr::V4(Ipv4Addr::new(10, 0, 0, 2))); - assert_eq!(ip2.first_port.0, 0); - assert_eq!(ip2.last_port.0, 16383); - assert_eq!(ip2.parent_id, Some(service2_id)); - assert_eq!( - context - .db_datastore - .service_lookup_external_ips(&context.opctx, service2_id) - .await - .expect("Failed to look up service external IPs"), - vec![ip2], - ); - - // Allocate the next IP address - let service3_id = Uuid::new_v4(); - let id3 = Uuid::new_v4(); - let ip3 = context - .db_datastore - .allocate_service_ip( - &context.opctx, - id3, - &Name("service3-ip".parse().unwrap()), - "service3-ip", - service3_id, - ) - .await - .expect("Failed to allocate service IP address"); - assert!(ip3.is_service); - assert_eq!(ip3.kind, IpKind::Floating); - assert_eq!(ip3.ip.ip(), IpAddr::V4(Ipv4Addr::new(10, 0, 0, 3))); - assert_eq!(ip3.first_port.0, 0); - assert_eq!(ip3.last_port.0, u16::MAX); - assert_eq!(ip3.parent_id, Some(service3_id)); - assert_eq!( - context - .db_datastore - .service_lookup_external_ips(&context.opctx, service3_id) - .await - .expect("Failed to look up service external IPs"), - vec![ip3], - ); - - // Once we're out of IP addresses, test that we see the right error. - let service3_id = Uuid::new_v4(); - let id3 = Uuid::new_v4(); - let err = context - .db_datastore - .allocate_service_ip( - &context.opctx, - id3, - &Name("service3-ip".parse().unwrap()), - "service3-ip", - service3_id, - ) - .await - .expect_err("Should have failed to allocate after pool exhausted"); - assert_eq!( - err, - Error::insufficient_capacity( - "No external IP addresses available", - "NextExternalIp::new returned NotFound", - ), - ); - - // But we should be able to allocate another SNat IP - let service4_id = Uuid::new_v4(); - let id4 = Uuid::new_v4(); - let ip4 = context - .db_datastore - .allocate_service_snat_ip(&context.opctx, id4, service4_id) - .await - .expect("Failed to allocate service IP address"); - assert!(ip4.is_service); - assert_eq!(ip4.kind, IpKind::SNat); - assert_eq!(ip4.ip.ip(), IpAddr::V4(Ipv4Addr::new(10, 0, 0, 2))); - assert_eq!(ip4.first_port.0, 16384); - assert_eq!(ip4.last_port.0, 32767); - assert_eq!(ip4.parent_id, Some(service4_id)); - assert_eq!( - context - .db_datastore - .service_lookup_external_ips(&context.opctx, service4_id) - .await - .expect("Failed to look up service external IPs"), - vec![ip4], - ); - - context.success().await; - } - - #[tokio::test] - async fn test_explicit_external_ip_for_service_is_idempotent() { + async fn test_external_ip_allocate_omicron_zone_is_idempotent() { let context = TestContext::new( - "test_explicit_external_ip_for_service_is_idempotent", + "test_external_ip_allocate_omicron_zone_is_idempotent", ) .await; @@ -1492,19 +1345,27 @@ mod tests { .unwrap(); context.initialize_ip_pool(SERVICE_IP_POOL_NAME, ip_range).await; + let ip_10_0_0_2 = + OmicronZoneExternalIp::Floating(OmicronZoneExternalFloatingIp { + id: ExternalIpUuid::new_v4(), + ip: "10.0.0.2".parse().unwrap(), + }); + let ip_10_0_0_3 = + OmicronZoneExternalIp::Floating(OmicronZoneExternalFloatingIp { + id: ExternalIpUuid::new_v4(), + ip: "10.0.0.3".parse().unwrap(), + }); + // Allocate an IP address as we would for an external, rack-associated // service. - let service_id = Uuid::new_v4(); - let id = Uuid::new_v4(); + let service_id = OmicronZoneUuid::new_v4(); let ip = context .db_datastore - .allocate_explicit_service_ip( + .external_ip_allocate_omicron_zone( &context.opctx, - id, - &Name("service-ip".parse().unwrap()), - "service-ip", service_id, - IpAddr::V4(Ipv4Addr::new(10, 0, 0, 3)), + ZoneKind::Nexus, + ip_10_0_0_3, ) .await .expect("Failed to allocate service IP address"); @@ -1512,18 +1373,16 @@ mod tests { assert_eq!(ip.ip.ip(), IpAddr::V4(Ipv4Addr::new(10, 0, 0, 3))); assert_eq!(ip.first_port.0, 0); assert_eq!(ip.last_port.0, u16::MAX); - assert_eq!(ip.parent_id, Some(service_id)); + assert_eq!(ip.parent_id, Some(service_id.into_untyped_uuid())); // Try allocating the same service IP again. let ip_again = context .db_datastore - .allocate_explicit_service_ip( + .external_ip_allocate_omicron_zone( &context.opctx, - id, - &Name("service-ip".parse().unwrap()), - "service-ip", service_id, - IpAddr::V4(Ipv4Addr::new(10, 0, 0, 3)), + ZoneKind::Nexus, + ip_10_0_0_3, ) .await .expect("Failed to allocate service IP address"); @@ -1535,13 +1394,14 @@ mod tests { // different UUID. let err = context .db_datastore - .allocate_explicit_service_ip( + .external_ip_allocate_omicron_zone( &context.opctx, - Uuid::new_v4(), - &Name("service-ip".parse().unwrap()), - "service-ip", service_id, - IpAddr::V4(Ipv4Addr::new(10, 0, 0, 3)), + ZoneKind::Nexus, + OmicronZoneExternalIp::Floating(OmicronZoneExternalFloatingIp { + id: ExternalIpUuid::new_v4(), + ip: ip_10_0_0_3.ip(), + }), ) .await .expect_err("Should have failed to re-allocate same IP address (different UUID)"); @@ -1554,13 +1414,14 @@ mod tests { // different input address. let err = context .db_datastore - .allocate_explicit_service_ip( + .external_ip_allocate_omicron_zone( &context.opctx, - id, - &Name("service-ip".parse().unwrap()), - "service-ip", service_id, - IpAddr::V4(Ipv4Addr::new(10, 0, 0, 2)), + ZoneKind::Nexus, + OmicronZoneExternalIp::Floating(OmicronZoneExternalFloatingIp { + id: ip_10_0_0_3.id(), + ip: ip_10_0_0_2.ip(), + }), ) .await .expect_err("Should have failed to re-allocate different IP address (same UUID)"); @@ -1571,14 +1432,19 @@ mod tests { // Try allocating the same service IP once more, but do it with a // different port range. + let ip_10_0_0_3_snat_0 = + OmicronZoneExternalIp::Snat(OmicronZoneExternalSnatIp { + id: ip_10_0_0_3.id(), + snat_cfg: SourceNatConfig::new(ip_10_0_0_3.ip(), 0, 16383) + .unwrap(), + }); let err = context .db_datastore - .allocate_explicit_service_snat_ip( + .external_ip_allocate_omicron_zone( &context.opctx, - id, service_id, - IpAddr::V4(Ipv4Addr::new(10, 0, 0, 3)), - (0, 16383), + ZoneKind::BoundaryNtp, + ip_10_0_0_3_snat_0, ) .await .expect_err("Should have failed to re-allocate different IP address (different port range)"); @@ -1588,16 +1454,24 @@ mod tests { ); // This time start with an explicit SNat - let snat_service_id = Uuid::new_v4(); - let snat_id = Uuid::new_v4(); + let ip_10_0_0_1_snat_32768 = + OmicronZoneExternalIp::Snat(OmicronZoneExternalSnatIp { + id: ExternalIpUuid::new_v4(), + snat_cfg: SourceNatConfig::new( + "10.0.0.1".parse().unwrap(), + 32768, + 49151, + ) + .unwrap(), + }); + let snat_service_id = OmicronZoneUuid::new_v4(); let snat_ip = context .db_datastore - .allocate_explicit_service_snat_ip( + .external_ip_allocate_omicron_zone( &context.opctx, - snat_id, snat_service_id, - IpAddr::V4(Ipv4Addr::new(10, 0, 0, 1)), - (32768, 49151), + ZoneKind::BoundaryNtp, + ip_10_0_0_1_snat_32768, ) .await .expect("Failed to allocate service IP address"); @@ -1606,17 +1480,19 @@ mod tests { assert_eq!(snat_ip.ip.ip(), IpAddr::V4(Ipv4Addr::new(10, 0, 0, 1))); assert_eq!(snat_ip.first_port.0, 32768); assert_eq!(snat_ip.last_port.0, 49151); - assert_eq!(snat_ip.parent_id, Some(snat_service_id)); + assert_eq!( + snat_ip.parent_id, + Some(snat_service_id.into_untyped_uuid()) + ); // Try allocating the same service IP again. let snat_ip_again = context .db_datastore - .allocate_explicit_service_snat_ip( + .external_ip_allocate_omicron_zone( &context.opctx, - snat_id, snat_service_id, - IpAddr::V4(Ipv4Addr::new(10, 0, 0, 1)), - (32768, 49151), + ZoneKind::BoundaryNtp, + ip_10_0_0_1_snat_32768, ) .await .expect("Failed to allocate service IP address"); @@ -1628,14 +1504,23 @@ mod tests { // Try allocating the same service IP once more, but do it with a // different port range. + let ip_10_0_0_1_snat_49152 = + OmicronZoneExternalIp::Snat(OmicronZoneExternalSnatIp { + id: ip_10_0_0_1_snat_32768.id(), + snat_cfg: SourceNatConfig::new( + ip_10_0_0_1_snat_32768.ip(), + 49152, + 65535, + ) + .unwrap(), + }); let err = context .db_datastore - .allocate_explicit_service_snat_ip( + .external_ip_allocate_omicron_zone( &context.opctx, - snat_id, snat_service_id, - IpAddr::V4(Ipv4Addr::new(10, 0, 0, 1)), - (49152, 65535), + ZoneKind::BoundaryNtp, + ip_10_0_0_1_snat_49152, ) .await .expect_err("Should have failed to re-allocate different IP address (different port range)"); @@ -1648,9 +1533,9 @@ mod tests { } #[tokio::test] - async fn test_explicit_external_ip_for_service_out_of_range() { + async fn test_external_ip_allocate_omicron_zone_out_of_range() { let context = TestContext::new( - "test_explicit_external_ip_for_service_out_of_range", + "test_external_ip_allocate_omicron_zone_out_of_range", ) .await; @@ -1661,17 +1546,20 @@ mod tests { .unwrap(); context.initialize_ip_pool(SERVICE_IP_POOL_NAME, ip_range).await; - let service_id = Uuid::new_v4(); - let id = Uuid::new_v4(); + let ip_10_0_0_5 = + OmicronZoneExternalIp::Floating(OmicronZoneExternalFloatingIp { + id: ExternalIpUuid::new_v4(), + ip: "10.0.0.5".parse().unwrap(), + }); + + let service_id = OmicronZoneUuid::new_v4(); let err = context .db_datastore - .allocate_explicit_service_ip( + .external_ip_allocate_omicron_zone( &context.opctx, - id, - &Name("service-ip".parse().unwrap()), - "service-ip", service_id, - IpAddr::V4(Ipv4Addr::new(10, 0, 0, 5)), + ZoneKind::Nexus, + ip_10_0_0_5, ) .await .expect_err("Should have failed to allocate out-of-bounds IP"); @@ -1683,116 +1571,6 @@ mod tests { context.success().await; } - #[tokio::test] - async fn test_insert_external_ip_for_service_is_idempotent() { - let context = TestContext::new( - "test_insert_external_ip_for_service_is_idempotent", - ) - .await; - - let ip_range = IpRange::try_from(( - Ipv4Addr::new(10, 0, 0, 1), - Ipv4Addr::new(10, 0, 0, 2), - )) - .unwrap(); - context.initialize_ip_pool(SERVICE_IP_POOL_NAME, ip_range).await; - - // Allocate an IP address as we would for an external, rack-associated - // service. - let service_id = Uuid::new_v4(); - let id = Uuid::new_v4(); - let ip = context - .db_datastore - .allocate_service_ip( - &context.opctx, - id, - &Name("service-ip".parse().unwrap()), - "service-ip", - service_id, - ) - .await - .expect("Failed to allocate service IP address"); - assert!(ip.is_service); - assert_eq!(ip.ip.ip(), IpAddr::V4(Ipv4Addr::new(10, 0, 0, 1))); - assert_eq!(ip.first_port.0, 0); - assert_eq!(ip.last_port.0, u16::MAX); - assert_eq!(ip.parent_id, Some(service_id)); - - let ip_again = context - .db_datastore - .allocate_service_ip( - &context.opctx, - id, - &Name("service-ip".parse().unwrap()), - "service-ip", - service_id, - ) - .await - .expect("Failed to allocate service IP address"); - - assert_eq!(ip.id, ip_again.id); - assert_eq!(ip.ip.ip(), ip_again.ip.ip()); - - context.success().await; - } - - // This test is identical to "test_insert_external_ip_is_idempotent", - // but tries to make an idempotent allocation after all addresses in the - // pool have been allocated. - #[tokio::test] - async fn test_insert_external_ip_for_service_is_idempotent_even_when_full() - { - let context = TestContext::new( - "test_insert_external_ip_is_idempotent_even_when_full", - ) - .await; - - let ip_range = IpRange::try_from(( - Ipv4Addr::new(10, 0, 0, 1), - Ipv4Addr::new(10, 0, 0, 1), - )) - .unwrap(); - context.initialize_ip_pool(SERVICE_IP_POOL_NAME, ip_range).await; - - // Allocate an IP address as we would for an external, rack-associated - // service. - let service_id = Uuid::new_v4(); - let id = Uuid::new_v4(); - let ip = context - .db_datastore - .allocate_service_ip( - &context.opctx, - id, - &Name("service-ip".parse().unwrap()), - "service-ip", - service_id, - ) - .await - .expect("Failed to allocate service IP address"); - assert!(ip.is_service); - assert_eq!(ip.ip.ip(), IpAddr::V4(Ipv4Addr::new(10, 0, 0, 1))); - assert_eq!(ip.first_port.0, 0); - assert_eq!(ip.last_port.0, u16::MAX); - assert_eq!(ip.parent_id, Some(service_id)); - - let ip_again = context - .db_datastore - .allocate_service_ip( - &context.opctx, - id, - &Name("service-ip".parse().unwrap()), - "service-ip", - service_id, - ) - .await - .expect("Failed to allocate service IP address"); - - assert_eq!(ip.id, ip_again.id); - assert_eq!(ip.ip.ip(), ip_again.ip.ip()); - - context.success().await; - } - #[tokio::test] async fn test_insert_external_ip_is_idempotent() { let context = diff --git a/nexus/db-queries/src/db/queries/network_interface.rs b/nexus/db-queries/src/db/queries/network_interface.rs index c0fc18aca1..69c1827b6d 100644 --- a/nexus/db-queries/src/db/queries/network_interface.rs +++ b/nexus/db-queries/src/db/queries/network_interface.rs @@ -11,15 +11,16 @@ use crate::db::pool::DbConnection; use crate::db::queries::next_item::DefaultShiftGenerator; use crate::db::queries::next_item::NextItem; use crate::db::schema::network_interface::dsl; +use async_bb8_diesel::AsyncRunQueryDsl; use chrono::DateTime; use chrono::Utc; use diesel::pg::Pg; use diesel::prelude::Column; -use diesel::query_builder::AstPass; use diesel::query_builder::QueryFragment; use diesel::query_builder::QueryId; +use diesel::query_builder::{AstPass, Query}; use diesel::result::Error as DieselError; -use diesel::sql_types; +use diesel::sql_types::{self, Nullable}; use diesel::Insertable; use diesel::QueryResult; use diesel::RunQueryDsl; @@ -31,6 +32,7 @@ use nexus_db_model::{NetworkInterfaceKindEnum, SqlU8}; use omicron_common::api::external; use omicron_common::api::external::MacAddr; use once_cell::sync::Lazy; +use slog_error_chain::SlogInlineError; use std::net::IpAddr; use uuid::Uuid; @@ -1059,7 +1061,7 @@ impl InsertQuery { let next_mac_subquery = NextMacAddress::new(interface.subnet.vpc_id, interface.kind); let next_ipv4_address_subquery = NextIpv4Address::new( - interface.subnet.ipv4_block.0 .0, + interface.subnet.ipv4_block.0.into(), interface.subnet.identity.id, ); let next_slot_subquery = NextNicSlot::new(interface.parent_id); @@ -1502,18 +1504,39 @@ fn push_instance_state_verification_subquery<'a>( /// parent_id = AND /// kind = AND /// time_deleted IS NULL -/// ) = 1, +/// ) <= 1, /// '', /// 'secondaries' /// ) AS UUID) +/// ), +/// found_interface AS ( +/// SELECT +/// id +/// FROM +/// network_interface +/// WHERE +/// id = +/// ), +/// updated AS ( +/// UPDATE +/// network_interface +/// SET +/// time_deleted = NOW() +/// WHERE +/// id = AND +/// time_deleted IS NULL +/// RETURNING +/// id /// ) -/// UPDATE -/// network_interface -/// SET -/// time_deleted = NOW() -/// WHERE -/// id = AND -/// time_deleted IS NULL +/// SELECT +/// found_interface.id, +/// updated.id +/// FROM +/// found_interface +/// LEFT JOIN +/// updated +/// ON +/// found_interface.id = updated.id /// ``` /// /// Notes @@ -1545,6 +1568,37 @@ impl DeleteQuery { parent_id_str: parent_id.to_string(), } } + + /// Issue the delete and parses the result. + /// + /// The three outcomes are: + /// - Ok(Row exists and was deleted) + /// - Ok(Row exists, but was not deleted) + /// - Error (row doesn't exist, or other diesel error) + pub async fn execute_and_check( + self, + conn: &async_bb8_diesel::Connection, + ) -> Result { + let (found_id, deleted_id) = + self.get_result_async::<(Option, Option)>(conn).await?; + match (found_id, deleted_id) { + (Some(found), Some(deleted)) => { + assert_eq!( + found, deleted, + "internal query error: mismatched interface IDs" + ); + Ok(true) + } + (Some(_), None) => Ok(false), + (None, Some(deleted)) => { + panic!( + "internal query error: \ + deleted nonexisted interface {deleted}" + ) + } + (None, None) => Err(DieselError::NotFound), + } + } } impl QueryId for DeleteQuery { @@ -1592,13 +1646,21 @@ impl QueryFragment for DeleteQuery { )?; out.push_sql(" AND "); out.push_identifier(dsl::time_deleted::NAME)?; - out.push_sql(" IS NULL) = 1, "); + out.push_sql(" IS NULL) <= 1, "); out.push_bind_param::(&self.parent_id_str)?; out.push_sql(", "); out.push_bind_param::( &DeleteError::HAS_SECONDARIES_SENTINEL, )?; - out.push_sql(") AS UUID)) UPDATE "); + out.push_sql(") AS UUID)), found_interface AS (SELECT "); + out.push_identifier(dsl::id::NAME)?; + out.push_sql(" FROM "); + NETWORK_INTERFACE_FROM_CLAUSE.walk_ast(out.reborrow())?; + out.push_sql(" WHERE "); + out.push_identifier(dsl::id::NAME)?; + out.push_sql(" = "); + out.push_bind_param::(&self.interface_id)?; + out.push_sql("), updated AS (UPDATE "); NETWORK_INTERFACE_FROM_CLAUSE.walk_ast(out.reborrow())?; out.push_sql(" SET "); out.push_identifier(dsl::time_deleted::NAME)?; @@ -1608,25 +1670,43 @@ impl QueryFragment for DeleteQuery { out.push_bind_param::(&self.interface_id)?; out.push_sql(" AND "); out.push_identifier(dsl::time_deleted::NAME)?; - out.push_sql(" IS NULL"); + out.push_sql(" IS NULL RETURNING "); + out.push_identifier(dsl::id::NAME)?; + out.push_sql(") SELECT found_interface."); + out.push_identifier(dsl::id::NAME)?; + out.push_sql(", updated."); + out.push_identifier(dsl::id::NAME)?; + out.push_sql(" FROM found_interface LEFT JOIN updated"); + out.push_sql(" ON found_interface."); + out.push_identifier(dsl::id::NAME)?; + out.push_sql(" = updated."); + out.push_identifier(dsl::id::NAME)?; Ok(()) } } +impl Query for DeleteQuery { + type SqlType = (Nullable, Nullable); +} + impl RunQueryDsl for DeleteQuery {} /// Errors related to deleting a network interface -#[derive(Debug)] +#[derive(Debug, thiserror::Error, SlogInlineError, PartialEq)] pub enum DeleteError { /// Attempting to delete the primary interface, while there still exist /// secondary interfaces. + #[error("cannot delete primary interface while secondaries exist")] SecondariesExist(Uuid), /// Instance must be stopped or failed prior to deleting interfaces from it + #[error("cannot delete interface in current instance state")] InstanceBadState(Uuid), /// The instance does not exist at all, or is in the destroyed state. + #[error("instance not found ({0})")] InstanceNotFound(Uuid), /// Any other error - External(external::Error), + #[error("cannot delete interface")] + External(#[source] external::Error), } impl DeleteError { @@ -1649,6 +1729,24 @@ impl DeleteError { query.parent_id, ) } + // Faithfully plumb through `NotFound` + DieselError::NotFound => { + let type_name = match query.kind { + NetworkInterfaceKind::Instance => { + external::ResourceType::InstanceNetworkInterface + } + NetworkInterfaceKind::Service => { + external::ResourceType::ServiceNetworkInterface + } + NetworkInterfaceKind::Probe => { + external::ResourceType::ProbeNetworkInterface + } + }; + DeleteError::External(external::Error::ObjectNotFound { + type_name, + lookup_type: external::LookupType::ById(query.interface_id), + }) + } // Any other error at all is a bug _ => DeleteError::External(error::public_error_from_diesel( e, @@ -1743,6 +1841,7 @@ fn decode_delete_network_interface_database_error( mod tests { use super::first_available_address; use super::last_address_offset; + use super::DeleteError; use super::InsertError; use super::MAX_NICS_PER_INSTANCE; use super::NUM_INITIAL_RESERVED_IP_ADDRESSES; @@ -1760,8 +1859,6 @@ mod tests { use crate::db::queries::network_interface::NextMacShifts; use async_bb8_diesel::AsyncRunQueryDsl; use dropshot::test_util::LogContext; - use ipnetwork::Ipv4Network; - use ipnetwork::Ipv6Network; use model::NetworkInterfaceKind; use nexus_test_utils::db::test_setup_database; use nexus_types::external_api::params; @@ -1772,11 +1869,11 @@ mod tests { use omicron_common::api::external::Error; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::InstanceCpuCount; - use omicron_common::api::external::Ipv4Net; - use omicron_common::api::external::Ipv6Net; use omicron_common::api::external::MacAddr; use omicron_test_utils::dev; use omicron_test_utils::dev::db::CockroachInstance; + use oxnet::Ipv4Net; + use oxnet::Ipv6Net; use std::collections::HashSet; use std::convert::TryInto; use std::net::IpAddr; @@ -1896,25 +1993,13 @@ mod tests { let vpc_id = Uuid::new_v4(); let mut subnets = Vec::with_capacity(n_subnets as _); for i in 0..n_subnets { - let ipv4net = Ipv4Net( - Ipv4Network::new(Ipv4Addr::new(172, 30, 0, i), 28).unwrap(), - ); - let ipv6net = Ipv6Net( - Ipv6Network::new( - Ipv6Addr::new( - 0xfd12, - 0x3456, - 0x7890, - i.into(), - 0, - 0, - 0, - 0, - ), - 64, - ) - .unwrap(), - ); + let ipv4net = + Ipv4Net::new(Ipv4Addr::new(172, 30, 0, i), 28).unwrap(); + let ipv6net = Ipv6Net::new( + Ipv6Addr::new(0xfd12, 0x3456, 0x7890, i.into(), 0, 0, 0, 0), + 64, + ) + .unwrap(); let subnet = VpcSubnet::new( Uuid::new_v4(), vpc_id, @@ -1934,9 +2019,11 @@ mod tests { self.subnets .iter() .map(|subnet| { - subnet.ipv4_block.size() as usize - - NUM_INITIAL_RESERVED_IP_ADDRESSES - - 1 + let size_minus_1 = match subnet.ipv4_block.size() { + Some(n) => n - 1, + None => u32::MAX, + } as usize; + size_minus_1 - NUM_INITIAL_RESERVED_IP_ADDRESSES }) .collect() } @@ -2042,6 +2129,81 @@ mod tests { } } + #[tokio::test] + async fn test_delete_service_is_idempotent() { + let context = + TestContext::new("test_delete_service_is_idempotent", 2).await; + let service_id = Uuid::new_v4(); + let ip = context.net1.subnets[0] + .ipv4_block + .addr_iter() + .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES) + .unwrap(); + let interface = IncompleteNetworkInterface::new_service( + Uuid::new_v4(), + service_id, + context.net1.subnets[0].clone(), + IdentityMetadataCreateParams { + name: "service-nic".parse().unwrap(), + description: String::from("service nic"), + }, + ip.into(), + MacAddr::random_system(), + 0, + ) + .unwrap(); + let inserted_interface = context + .db_datastore + .service_create_network_interface_raw(&context.opctx, interface) + .await + .expect("Failed to insert interface"); + + // We should be able to delete twice, and be told that the first delete + // modified the row and the second did not. + let first_deleted = context + .db_datastore + .service_delete_network_interface( + &context.opctx, + service_id, + inserted_interface.id(), + ) + .await + .expect("failed first delete"); + assert!(first_deleted, "first delete removed interface"); + + let second_deleted = context + .db_datastore + .service_delete_network_interface( + &context.opctx, + service_id, + inserted_interface.id(), + ) + .await + .expect("failed second delete"); + assert!(!second_deleted, "second delete did nothing"); + + // Attempting to delete a nonexistent interface should fail. + let bogus_id = Uuid::new_v4(); + let err = context + .db_datastore + .service_delete_network_interface( + &context.opctx, + service_id, + bogus_id, + ) + .await + .expect_err( + "unexpectedly succeeded deleting nonexistent interface", + ); + let expected_err = + DeleteError::External(external::Error::ObjectNotFound { + type_name: external::ResourceType::ServiceNetworkInterface, + lookup_type: external::LookupType::ById(bogus_id), + }); + assert_eq!(err, expected_err); + context.success().await; + } + #[tokio::test] async fn test_insert_running_instance_fails() { let context = @@ -2142,7 +2304,7 @@ mod tests { TestContext::new("test_insert_sequential_ip_allocation", 2).await; let addresses = context.net1.subnets[0] .ipv4_block - .iter() + .addr_iter() .skip(NUM_INITIAL_RESERVED_IP_ADDRESSES); for (i, expected_address) in addresses.take(2).enumerate() { @@ -2238,7 +2400,7 @@ mod tests { let service_id = Uuid::new_v4(); let ip = context.net1.subnets[0] .ipv4_block - .iter() + .addr_iter() .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES) .unwrap(); let mac = MacAddr::random_system(); @@ -2273,7 +2435,7 @@ mod tests { let mut used_macs = HashSet::new(); let mut ips = context.net1.subnets[0] .ipv4_block - .iter() + .addr_iter() .skip(NUM_INITIAL_RESERVED_IP_ADDRESSES); for slot in 0..u8::try_from(MAX_NICS_PER_INSTANCE).unwrap() { let service_id = Uuid::new_v4(); @@ -2300,7 +2462,7 @@ mod tests { .service_create_network_interface_raw(&context.opctx, interface) .await .expect("Failed to insert interface"); - assert_eq!(inserted_interface.slot, i16::from(slot)); + assert_eq!(*inserted_interface.slot, slot); } context.success().await; @@ -2313,7 +2475,7 @@ mod tests { let mut ips = context.net1.subnets[0] .ipv4_block - .iter() + .addr_iter() .skip(NUM_INITIAL_RESERVED_IP_ADDRESSES); // Insert a service NIC @@ -2373,12 +2535,12 @@ mod tests { let ip0 = context.net1.subnets[0] .ipv4_block - .iter() + .addr_iter() .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES) .unwrap(); let ip1 = context.net1.subnets[1] .ipv4_block - .iter() + .addr_iter() .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES) .unwrap(); @@ -2413,7 +2575,7 @@ mod tests { .service_create_network_interface_raw(&context.opctx, interface) .await .expect("Failed to insert interface"); - assert_eq!(inserted_interface.slot, 0); + assert_eq!(*inserted_interface.slot, 0); // Inserting an interface with the same slot on the same service should let new_interface = IncompleteNetworkInterface::new_service( @@ -2776,8 +2938,7 @@ mod tests { ) .await .expect("Should be able to insert up to 8 interfaces"); - let actual_slot = usize::try_from(inserted_interface.slot) - .expect("Bad slot index"); + let actual_slot = usize::from(*inserted_interface.slot); assert_eq!( slot, actual_slot, "Failed to allocate next available interface slot" diff --git a/nexus/db-queries/src/db/queries/region_allocation.rs b/nexus/db-queries/src/db/queries/region_allocation.rs index f31b32bf41..83cc7483c9 100644 --- a/nexus/db-queries/src/db/queries/region_allocation.rs +++ b/nexus/db-queries/src/db/queries/region_allocation.rs @@ -5,7 +5,6 @@ //! Implementation of queries for provisioning regions. use crate::db::column_walker::AllColumnsOf; -use crate::db::datastore::REGION_REDUNDANCY_THRESHOLD; use crate::db::model::{Dataset, Region}; use crate::db::raw_query_builder::{QueryBuilder, TypedSqlQuery}; use crate::db::schema; @@ -74,6 +73,7 @@ pub fn allocation_query( blocks_per_extent: u64, extent_count: u64, allocation_strategy: &RegionAllocationStrategy, + redundancy: usize, ) -> TypedSqlQuery<(SelectableSql, SelectableSql)> { let (seed, distinct_sleds) = { let (input_seed, distinct_sleds) = match allocation_strategy { @@ -90,7 +90,7 @@ pub fn allocation_query( .unwrap() .as_nanos() }, - |seed| seed as u128, + |seed| u128::from(seed), ), distinct_sleds, ) @@ -99,7 +99,7 @@ pub fn allocation_query( let seed = seed.to_le_bytes().to_vec(); let size_delta = block_size * blocks_per_extent * extent_count; - let redundancy: i64 = i64::try_from(REGION_REDUNDANCY_THRESHOLD).unwrap(); + let redundancy: i64 = i64::try_from(redundancy).unwrap(); let builder = QueryBuilder::new().sql( // Find all old regions associated with a particular volume @@ -117,13 +117,23 @@ pub fn allocation_query( dataset.pool_id, sum(dataset.size_used) AS size_used FROM dataset WHERE ((dataset.size_used IS NOT NULL) AND (dataset.time_deleted IS NULL)) GROUP BY dataset.pool_id),") + + // Any zpool already have this volume's existing regions? .sql(" - candidate_zpools AS ("); + existing_zpools AS ( + SELECT + dataset.pool_id + FROM + dataset INNER JOIN old_regions ON (old_regions.dataset_id = dataset.id) + ),") - // Identifies zpools with enough space for region allocation. + // Identifies zpools with enough space for region allocation, that are not + // currently used by this Volume's existing regions. // // NOTE: 'distinct_sleds' changes the format of the underlying SQL query, as it uses // distinct bind parameters depending on the conditional branch. + .sql(" + candidate_zpools AS ("); let builder = if distinct_sleds { builder.sql("SELECT DISTINCT ON (zpool.sled_id) ") } else { @@ -131,21 +141,22 @@ pub fn allocation_query( }; let builder = builder.sql(" old_zpool_usage.pool_id - FROM ( + FROM old_zpool_usage - INNER JOIN + INNER JOIN (zpool INNER JOIN sled ON (zpool.sled_id = sled.id)) ON (zpool.id = old_zpool_usage.pool_id) - ) + INNER JOIN + physical_disk ON (zpool.physical_disk_id = physical_disk.id) WHERE ( - ((old_zpool_usage.size_used + ").param().sql(" ) <= + (old_zpool_usage.size_used + ").param().sql(" ) <= (SELECT total_size FROM omicron.public.inv_zpool WHERE inv_zpool.id = old_zpool_usage.pool_id ORDER BY inv_zpool.time_collected DESC LIMIT 1) - ) - AND - (sled.sled_policy = 'in_service') - AND - (sled.sled_state = 'active') + AND sled.sled_policy = 'in_service' + AND sled.sled_state = 'active' + AND physical_disk.disk_policy = 'in_service' + AND physical_disk.disk_state = 'active' + AND NOT(zpool.id = ANY(SELECT existing_zpools.pool_id FROM existing_zpools)) )" ).bind::(size_delta as i64); @@ -182,6 +193,7 @@ pub fn allocation_query( ORDER BY dataset.pool_id, md5((CAST(dataset.id as BYTEA) || ").param().sql(")) ),") .bind::(seed.clone()) + // We order by md5 to shuffle the ordering of the datasets. // md5 has a uniform output distribution so it does the job. .sql(" @@ -194,6 +206,7 @@ pub fn allocation_query( ),") .bind::(seed) .bind::(redundancy) + // Create the regions-to-be-inserted for the volume. .sql(" candidate_regions AS ( @@ -206,12 +219,20 @@ pub fn allocation_query( ").param().sql(" AS block_size, ").param().sql(" AS blocks_per_extent, ").param().sql(" AS extent_count - FROM shuffled_candidate_datasets + FROM shuffled_candidate_datasets") + // Only select the *additional* number of candidate regions for the required + // redundancy level + .sql(" + LIMIT (").param().sql(" - ( + SELECT COUNT(*) FROM old_regions + )) ),") .bind::(volume_id) .bind::(block_size as i64) .bind::(blocks_per_extent as i64) .bind::(extent_count as i64) + .bind::(redundancy) + // A subquery which summarizes the changes we intend to make, showing: // // 1. Which datasets will have size adjustments @@ -225,6 +246,7 @@ pub fn allocation_query( ((candidate_regions.block_size * candidate_regions.blocks_per_extent) * candidate_regions.extent_count) AS size_used_delta FROM (candidate_regions INNER JOIN dataset ON (dataset.id = candidate_regions.dataset_id)) ),") + // Confirms whether or not the insertion and updates should // occur. // @@ -251,17 +273,60 @@ pub fn allocation_query( // an error instead. .sql(" do_insert AS ( - SELECT ((( - ((SELECT COUNT(*) FROM old_regions LIMIT 1) < ").param().sql(") AND - CAST(IF(((SELECT COUNT(*) FROM candidate_zpools LIMIT 1) >= ").param().sql(concatcp!("), 'TRUE', '", NOT_ENOUGH_ZPOOL_SPACE_SENTINEL, "') AS BOOL)) AND - CAST(IF(((SELECT COUNT(*) FROM candidate_regions LIMIT 1) >= ")).param().sql(concatcp!("), 'TRUE', '", NOT_ENOUGH_DATASETS_SENTINEL, "') AS BOOL)) AND - CAST(IF(((SELECT COUNT(DISTINCT dataset.pool_id) FROM (candidate_regions INNER JOIN dataset ON (candidate_regions.dataset_id = dataset.id)) LIMIT 1) >= ")).param().sql(concatcp!("), 'TRUE', '", NOT_ENOUGH_UNIQUE_ZPOOLS_SENTINEL, "') AS BOOL) - ) AS insert - ),")) + SELECT (((") + // There's regions not allocated yet + .sql(" + ((SELECT COUNT(*) FROM old_regions LIMIT 1) < ").param().sql(") AND") + // Enough filtered candidate zpools + existing zpools to meet redundancy + .sql(" + CAST(IF((( + ( + (SELECT COUNT(*) FROM candidate_zpools LIMIT 1) + + (SELECT COUNT(*) FROM existing_zpools LIMIT 1) + ) + ) >= ").param().sql(concatcp!("), 'TRUE', '", NOT_ENOUGH_ZPOOL_SPACE_SENTINEL, "') AS BOOL)) AND")) + // Enough candidate regions + existing regions to meet redundancy + .sql(" + CAST(IF((( + ( + (SELECT COUNT(*) FROM candidate_regions LIMIT 1) + + (SELECT COUNT(*) FROM old_regions LIMIT 1) + ) + ) >= ").param().sql(concatcp!("), 'TRUE', '", NOT_ENOUGH_DATASETS_SENTINEL, "') AS BOOL)) AND")) + // Enough unique zpools (looking at both existing and new) to meet redundancy + .sql(" + CAST(IF((( + ( + SELECT + COUNT(DISTINCT pool_id) + FROM + ( + ( + SELECT + dataset.pool_id + FROM + candidate_regions + INNER JOIN dataset ON (candidate_regions.dataset_id = dataset.id) + ) + UNION + ( + SELECT + dataset.pool_id + FROM + old_regions + INNER JOIN dataset ON (old_regions.dataset_id = dataset.id) + ) + ) + LIMIT 1 + ) + ) >= ").param().sql(concatcp!("), 'TRUE', '", NOT_ENOUGH_UNIQUE_ZPOOLS_SENTINEL, "') AS BOOL) + ) AS insert + ),")) .bind::(redundancy) .bind::(redundancy) .bind::(redundancy) .bind::(redundancy) + .sql(" inserted_regions AS ( INSERT INTO region @@ -302,6 +367,7 @@ UNION #[cfg(test)] mod test { use super::*; + use crate::db::datastore::REGION_REDUNDANCY_THRESHOLD; use crate::db::explain::ExplainableAsync; use crate::db::raw_query_builder::expectorate_query_contents; use nexus_test_utils::db::test_setup_database; @@ -328,6 +394,7 @@ mod test { &RegionAllocationStrategy::RandomWithDistinctSleds { seed: Some(1), }, + REGION_REDUNDANCY_THRESHOLD, ); expectorate_query_contents( ®ion_allocate, @@ -343,6 +410,7 @@ mod test { blocks_per_extent, extent_count, &RegionAllocationStrategy::Random { seed: Some(1) }, + REGION_REDUNDANCY_THRESHOLD, ); expectorate_query_contents( ®ion_allocate, @@ -375,6 +443,7 @@ mod test { blocks_per_extent, extent_count, &RegionAllocationStrategy::RandomWithDistinctSleds { seed: None }, + REGION_REDUNDANCY_THRESHOLD, ); let _ = region_allocate .explain_async(&conn) @@ -389,6 +458,7 @@ mod test { blocks_per_extent, extent_count, &RegionAllocationStrategy::Random { seed: None }, + REGION_REDUNDANCY_THRESHOLD, ); let _ = region_allocate .explain_async(&conn) diff --git a/nexus/db-queries/src/db/queries/vpc_subnet.rs b/nexus/db-queries/src/db/queries/vpc_subnet.rs index 9ddec32080..72f2771a1e 100644 --- a/nexus/db-queries/src/db/queries/vpc_subnet.rs +++ b/nexus/db-queries/src/db/queries/vpc_subnet.rs @@ -43,7 +43,9 @@ impl SubnetError { DatabaseErrorKind::NotNullViolation, ref info, ) if info.message() == IPV4_OVERLAP_ERROR_MESSAGE => { - SubnetError::OverlappingIpRange(subnet.ipv4_block.0 .0.into()) + SubnetError::OverlappingIpRange(ipnetwork::IpNetwork::V4( + subnet.ipv4_block.0.into(), + )) } // Attempt to insert overlapping IPv6 subnet @@ -51,7 +53,9 @@ impl SubnetError { DatabaseErrorKind::NotNullViolation, ref info, ) if info.message() == IPV6_OVERLAP_ERROR_MESSAGE => { - SubnetError::OverlappingIpRange(subnet.ipv6_block.0 .0.into()) + SubnetError::OverlappingIpRange(ipnetwork::IpNetwork::V6( + subnet.ipv6_block.0.into(), + )) } // Conflicting name for the subnet within a VPC @@ -233,8 +237,10 @@ pub struct FilterConflictingVpcSubnetRangesQuery { impl FilterConflictingVpcSubnetRangesQuery { pub fn new(subnet: VpcSubnet) -> Self { - let ipv4_block = ipnetwork::IpNetwork::from(subnet.ipv4_block.0 .0); - let ipv6_block = ipnetwork::IpNetwork::from(subnet.ipv6_block.0 .0); + let ipv4_block = + ipnetwork::Ipv4Network::from(subnet.ipv4_block.0).into(); + let ipv6_block = + ipnetwork::Ipv6Network::from(subnet.ipv6_block.0).into(); Self { subnet, ipv4_block, ipv6_block } } } @@ -394,8 +400,6 @@ mod test { use ipnetwork::IpNetwork; use nexus_test_utils::db::test_setup_database; use omicron_common::api::external::IdentityMetadataCreateParams; - use omicron_common::api::external::Ipv4Net; - use omicron_common::api::external::Ipv6Net; use omicron_common::api::external::Name; use omicron_test_utils::dev; use std::convert::TryInto; @@ -409,10 +413,10 @@ mod test { name: name.clone(), description: description.to_string(), }; - let ipv4_block = Ipv4Net("172.30.0.0/22".parse().unwrap()); - let other_ipv4_block = Ipv4Net("172.31.0.0/22".parse().unwrap()); - let ipv6_block = Ipv6Net("fd12:3456:7890::/64".parse().unwrap()); - let other_ipv6_block = Ipv6Net("fd00::/64".parse().unwrap()); + let ipv4_block = "172.30.0.0/22".parse().unwrap(); + let other_ipv4_block = "172.31.0.0/22".parse().unwrap(); + let ipv6_block = "fd12:3456:7890::/64".parse().unwrap(); + let other_ipv6_block = "fd00::/64".parse().unwrap(); let name = "a-name".to_string().try_into().unwrap(); let other_name = "b-name".to_string().try_into().unwrap(); let description = "some description".to_string(); @@ -491,7 +495,7 @@ mod test { .expect_err("Should not be able to insert VPC Subnet with overlapping IPv6 range"); assert_eq!( err, - SubnetError::OverlappingIpRange(IpNetwork::from(ipv6_block.0)), + SubnetError::OverlappingIpRange(ipnetwork::IpNetwork::from(oxnet::IpNet::from(ipv6_block))), "SubnetError variant should include the exact IP range that overlaps" ); let new_row = VpcSubnet::new( @@ -507,7 +511,7 @@ mod test { .expect_err("Should not be able to insert VPC Subnet with overlapping IPv4 range"); assert_eq!( err, - SubnetError::OverlappingIpRange(IpNetwork::from(ipv4_block.0)), + SubnetError::OverlappingIpRange(ipnetwork::IpNetwork::from(oxnet::IpNet::from(ipv4_block))), "SubnetError variant should include the exact IP range that overlaps" ); diff --git a/nexus/db-queries/src/db/raw_query_builder.rs b/nexus/db-queries/src/db/raw_query_builder.rs index 1dc4f4c054..d108062833 100644 --- a/nexus/db-queries/src/db/raw_query_builder.rs +++ b/nexus/db-queries/src/db/raw_query_builder.rs @@ -69,18 +69,6 @@ enum TrustedStrVariants { ValidatedExplicitly(String), } -trait SqlQueryBinds { - fn add_bind(self, bind_counter: &BindParamCounter) -> Self; -} - -impl<'a, Query> SqlQueryBinds - for diesel::query_builder::BoxedSqlQuery<'a, Pg, Query> -{ - fn add_bind(self, bind_counter: &BindParamCounter) -> Self { - self.sql("$").sql(bind_counter.next().to_string()) - } -} - type BoxedQuery = diesel::query_builder::BoxedSqlQuery< 'static, Pg, diff --git a/nexus/db-queries/src/db/sec_store.rs b/nexus/db-queries/src/db/sec_store.rs index 1c63a48463..f8fd4ab86d 100644 --- a/nexus/db-queries/src/db/sec_store.rs +++ b/nexus/db-queries/src/db/sec_store.rs @@ -7,9 +7,13 @@ use crate::db::{self, model::Generation}; use anyhow::Context; use async_trait::async_trait; +use dropshot::HttpError; +use futures::TryFutureExt; +use omicron_common::backoff; use slog::Logger; use std::fmt; use std::sync::Arc; +use std::time::Duration; use steno::SagaId; /// Implementation of [`steno::SecStore`] backed by the Omicron CockroachDB @@ -53,16 +57,74 @@ impl steno::SecStore for CockroachDbSecStore { } async fn record_event(&self, event: steno::SagaNodeEvent) { - debug!(&self.log, "recording saga event"; + let log = self.log.new(o!( "saga_id" => event.saga_id.to_string(), - "node_id" => ?event.node_id, - "event_type" => ?event.event_type, - ); + "node_id" => event.node_id.to_string(), + "event_type" => format!("{:?}", event.event_type), + )); + + debug!(&log, "recording saga event"); let our_event = db::saga_types::SagaNodeEvent::new(event, self.sec_id); - // TODO-robustness This should be wrapped with a retry loop rather than - // unwrapping the result. See omicron#2416. - self.datastore.saga_create_event(&our_event).await.unwrap(); + backoff::retry_notify_ext( + // This is an internal service query to CockroachDB. + backoff::retry_policy_internal_service(), + || { + // An interesting question is how to handle errors. + // + // In general, there are some kinds of database errors that are + // temporary/server errors (e.g. network failures), and some + // that are permanent/client errors (e.g. conflict during + // insertion). The permanent ones would require operator + // intervention to fix. + // + // However, there is no way to bubble up errors here, and for + // good reason: it is inherent to the nature of sagas that + // progress is durably recorded. So within *this* code there is + // no option but to retry forever. (Below, however, we do mark + // errors that likely require operator intervention.) + // + // At a higher level, callers should plan for the fact that + // record_event could potentially loop forever. See + // https://github.com/oxidecomputer/omicron/issues/5406 and the + // note in `nexus/src/app/saga.rs`'s `execute_saga` for more + // details. + self.datastore + .saga_create_event(&our_event) + .map_err(backoff::BackoffError::transient) + }, + move |error, call_count, total_duration| { + let http_error = HttpError::from(error.clone()); + if http_error.status_code.is_client_error() { + error!( + &log, + "client error while recording saga event (likely \ + requires operator intervention), retrying anyway"; + "error" => &error, + "call_count" => call_count, + "total_duration" => ?total_duration, + ); + } else if total_duration > Duration::from_secs(20) { + warn!( + &log, + "server error while recording saga event, retrying"; + "error" => &error, + "call_count" => call_count, + "total_duration" => ?total_duration, + ); + } else { + info!( + &log, + "server error while recording saga event, retrying"; + "error" => &error, + "call_count" => call_count, + "total_duration" => ?total_duration, + ); + } + }, + ) + .await + .expect("the above backoff retries forever") } async fn saga_update(&self, id: SagaId, update: steno::SagaCachedState) { diff --git a/nexus/db-queries/src/transaction_retry.rs b/nexus/db-queries/src/transaction_retry.rs index 74a94a0b8f..558bb574c9 100644 --- a/nexus/db-queries/src/transaction_retry.rs +++ b/nexus/db-queries/src/transaction_retry.rs @@ -353,7 +353,9 @@ mod test { assert_eq!( target_fields["name"].value, FieldValue::String( - "test_transaction_retry_produces_samples".to_string() + "test_transaction_retry_produces_samples" + .to_string() + .into() ) ); diff --git a/nexus/db-queries/tests/output/authz-roles.out b/nexus/db-queries/tests/output/authz-roles.out index ee55d775f0..0482cdfd2a 100644 --- a/nexus/db-queries/tests/output/authz-roles.out +++ b/nexus/db-queries/tests/output/authz-roles.out @@ -894,7 +894,7 @@ resource: Service id "7f7bb301-5dc9-41f1-ab29-d369f4835079" silo1-proj1-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ unauthenticated ! ! ! ! ! ! ! ! -resource: PhysicalDisk id "vendor-serial-model" +resource: PhysicalDisk id "c9f923f6-caf3-4c83-96f9-8ffe8c627dd2" USER Q R LC RP M MP CC D fleet-admin ✘ ✔ ✔ ✔ ✔ ✔ ✔ ✔ diff --git a/nexus/db-queries/tests/output/region_allocate_distinct_sleds.sql b/nexus/db-queries/tests/output/region_allocate_distinct_sleds.sql index 7aa85458a6..b797e0bef7 100644 --- a/nexus/db-queries/tests/output/region_allocate_distinct_sleds.sql +++ b/nexus/db-queries/tests/output/region_allocate_distinct_sleds.sql @@ -26,6 +26,13 @@ WITH GROUP BY dataset.pool_id ), + existing_zpools + AS ( + SELECT + dataset.pool_id + FROM + dataset INNER JOIN old_regions ON old_regions.dataset_id = dataset.id + ), candidate_zpools AS ( SELECT @@ -34,6 +41,7 @@ WITH old_zpool_usage INNER JOIN (zpool INNER JOIN sled ON zpool.sled_id = sled.id) ON zpool.id = old_zpool_usage.pool_id + INNER JOIN physical_disk ON zpool.physical_disk_id = physical_disk.id WHERE (old_zpool_usage.size_used + $2) <= ( @@ -50,6 +58,9 @@ WITH ) AND sled.sled_policy = 'in_service' AND sled.sled_state = 'active' + AND physical_disk.disk_policy = 'in_service' + AND physical_disk.disk_state = 'active' + AND NOT (zpool.id = ANY (SELECT existing_zpools.pool_id FROM existing_zpools)) ORDER BY zpool.sled_id, md5(CAST(zpool.id AS BYTES) || $3) ), @@ -89,6 +100,8 @@ WITH $10 AS extent_count FROM shuffled_candidate_datasets + LIMIT + $11 - (SELECT count(*) FROM old_regions) ), proposed_dataset_changes AS ( @@ -107,10 +120,18 @@ WITH SELECT ( ( - (SELECT count(*) FROM old_regions LIMIT 1) < $11 + (SELECT count(*) FROM old_regions LIMIT 1) < $12 AND CAST( IF( - ((SELECT count(*) FROM candidate_zpools LIMIT 1) >= $12), + ( + ( + ( + (SELECT count(*) FROM candidate_zpools LIMIT 1) + + (SELECT count(*) FROM existing_zpools LIMIT 1) + ) + ) + >= $13 + ), 'TRUE', 'Not enough space' ) @@ -119,7 +140,15 @@ WITH ) AND CAST( IF( - ((SELECT count(*) FROM candidate_regions LIMIT 1) >= $13), + ( + ( + ( + (SELECT count(*) FROM candidate_regions LIMIT 1) + + (SELECT count(*) FROM old_regions LIMIT 1) + ) + ) + >= $14 + ), 'TRUE', 'Not enough datasets' ) @@ -130,15 +159,31 @@ WITH IF( ( ( - SELECT - count(DISTINCT dataset.pool_id) - FROM - candidate_regions - INNER JOIN dataset ON candidate_regions.dataset_id = dataset.id - LIMIT - 1 + ( + SELECT + count(DISTINCT pool_id) + FROM + ( + ( + SELECT + dataset.pool_id + FROM + candidate_regions + INNER JOIN dataset ON candidate_regions.dataset_id = dataset.id + ) + UNION + ( + SELECT + dataset.pool_id + FROM + old_regions INNER JOIN dataset ON old_regions.dataset_id = dataset.id + ) + ) + LIMIT + 1 + ) ) - >= $14 + >= $15 ), 'TRUE', 'Not enough unique zpools selected' diff --git a/nexus/db-queries/tests/output/region_allocate_random_sleds.sql b/nexus/db-queries/tests/output/region_allocate_random_sleds.sql index 0918c8f2d1..4f60ddf5fe 100644 --- a/nexus/db-queries/tests/output/region_allocate_random_sleds.sql +++ b/nexus/db-queries/tests/output/region_allocate_random_sleds.sql @@ -26,6 +26,13 @@ WITH GROUP BY dataset.pool_id ), + existing_zpools + AS ( + SELECT + dataset.pool_id + FROM + dataset INNER JOIN old_regions ON old_regions.dataset_id = dataset.id + ), candidate_zpools AS ( SELECT @@ -34,6 +41,7 @@ WITH old_zpool_usage INNER JOIN (zpool INNER JOIN sled ON zpool.sled_id = sled.id) ON zpool.id = old_zpool_usage.pool_id + INNER JOIN physical_disk ON zpool.physical_disk_id = physical_disk.id WHERE (old_zpool_usage.size_used + $2) <= ( @@ -50,6 +58,9 @@ WITH ) AND sled.sled_policy = 'in_service' AND sled.sled_state = 'active' + AND physical_disk.disk_policy = 'in_service' + AND physical_disk.disk_state = 'active' + AND NOT (zpool.id = ANY (SELECT existing_zpools.pool_id FROM existing_zpools)) ), candidate_datasets AS ( @@ -87,6 +98,8 @@ WITH $9 AS extent_count FROM shuffled_candidate_datasets + LIMIT + $10 - (SELECT count(*) FROM old_regions) ), proposed_dataset_changes AS ( @@ -105,10 +118,18 @@ WITH SELECT ( ( - (SELECT count(*) FROM old_regions LIMIT 1) < $10 + (SELECT count(*) FROM old_regions LIMIT 1) < $11 AND CAST( IF( - ((SELECT count(*) FROM candidate_zpools LIMIT 1) >= $11), + ( + ( + ( + (SELECT count(*) FROM candidate_zpools LIMIT 1) + + (SELECT count(*) FROM existing_zpools LIMIT 1) + ) + ) + >= $12 + ), 'TRUE', 'Not enough space' ) @@ -117,7 +138,15 @@ WITH ) AND CAST( IF( - ((SELECT count(*) FROM candidate_regions LIMIT 1) >= $12), + ( + ( + ( + (SELECT count(*) FROM candidate_regions LIMIT 1) + + (SELECT count(*) FROM old_regions LIMIT 1) + ) + ) + >= $13 + ), 'TRUE', 'Not enough datasets' ) @@ -128,15 +157,31 @@ WITH IF( ( ( - SELECT - count(DISTINCT dataset.pool_id) - FROM - candidate_regions - INNER JOIN dataset ON candidate_regions.dataset_id = dataset.id - LIMIT - 1 + ( + SELECT + count(DISTINCT pool_id) + FROM + ( + ( + SELECT + dataset.pool_id + FROM + candidate_regions + INNER JOIN dataset ON candidate_regions.dataset_id = dataset.id + ) + UNION + ( + SELECT + dataset.pool_id + FROM + old_regions INNER JOIN dataset ON old_regions.dataset_id = dataset.id + ) + ) + LIMIT + 1 + ) ) - >= $13 + >= $14 ), 'TRUE', 'Not enough unique zpools selected' diff --git a/nexus/defaults/Cargo.toml b/nexus/defaults/Cargo.toml index 535b78054b..1d941deb8e 100644 --- a/nexus/defaults/Cargo.toml +++ b/nexus/defaults/Cargo.toml @@ -4,9 +4,13 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] ipnetwork.workspace = true once_cell.workspace = true +oxnet.workspace = true rand.workspace = true serde_json.workspace = true diff --git a/nexus/defaults/src/lib.rs b/nexus/defaults/src/lib.rs index dd08b4e4ab..32def47b9e 100644 --- a/nexus/defaults/src/lib.rs +++ b/nexus/defaults/src/lib.rs @@ -4,12 +4,10 @@ //! Default values for data in the Nexus API, when not provided explicitly in a request. -use ipnetwork::Ipv4Network; -use ipnetwork::Ipv6Network; use omicron_common::api::external; -use omicron_common::api::external::Ipv4Net; -use omicron_common::api::external::Ipv6Net; use once_cell::sync::Lazy; +use oxnet::Ipv4Net; +use oxnet::Ipv6Net; use std::net::Ipv4Addr; use std::net::Ipv6Addr; @@ -20,10 +18,8 @@ pub const DEFAULT_PRIMARY_NIC_NAME: &str = "net0"; /// The default IPv4 subnet range assigned to the default VPC Subnet, when /// the VPC is created, if one is not provided in the request. See /// for details. -pub static DEFAULT_VPC_SUBNET_IPV4_BLOCK: Lazy = - Lazy::new(|| { - Ipv4Net(Ipv4Network::new(Ipv4Addr::new(172, 30, 0, 0), 22).unwrap()) - }); +pub static DEFAULT_VPC_SUBNET_IPV4_BLOCK: Lazy = + Lazy::new(|| Ipv4Net::new(Ipv4Addr::new(172, 30, 0, 0), 22).unwrap()); pub static DEFAULT_FIREWALL_RULES: Lazy = Lazy::new(|| { @@ -73,24 +69,24 @@ pub fn random_vpc_ipv6_prefix() -> Result { "Unable to allocate random IPv6 address range", ) })?; - Ok(Ipv6Net( - Ipv6Network::new( - Ipv6Addr::from(bytes), - Ipv6Net::VPC_IPV6_PREFIX_LENGTH, - ) - .unwrap(), - )) + Ok(Ipv6Net::new( + Ipv6Addr::from(bytes), + omicron_common::address::VPC_IPV6_PREFIX_LENGTH, + ) + .unwrap()) } #[cfg(test)] mod tests { + use omicron_common::api::external::Ipv6NetExt; + use super::*; #[test] fn test_random_vpc_ipv6_prefix() { let network = random_vpc_ipv6_prefix().unwrap(); assert!(network.is_vpc_prefix()); - let octets = network.network().octets(); + let octets = network.prefix().octets(); assert!(octets[6..].iter().all(|x| *x == 0)); } } diff --git a/nexus/examples/config.toml b/nexus/examples/config.toml index 2e946a9c38..d90c240e8e 100644 --- a/nexus/examples/config.toml +++ b/nexus/examples/config.toml @@ -91,6 +91,7 @@ dns_external.period_secs_config = 60 dns_external.period_secs_servers = 60 dns_external.period_secs_propagation = 60 dns_external.max_concurrent_server_updates = 5 +metrics_producer_gc.period_secs = 60 # How frequently we check the list of stored TLS certificates. This is # approximately an upper bound on how soon after updating the list of # certificates it will take _other_ Nexus instances to notice and stop serving @@ -106,11 +107,17 @@ inventory.nkeep = 5 # Disable inventory collection altogether (for emergencies) inventory.disable = false phantom_disks.period_secs = 30 +physical_disk_adoption.period_secs = 30 blueprints.period_secs_load = 10 blueprints.period_secs_execute = 60 sync_service_zone_nat.period_secs = 30 switch_port_settings_manager.period_secs = 30 region_replacement.period_secs = 30 +# How frequently to query the status of active instances. +instance_watcher.period_secs = 30 +service_firewall_propagation.period_secs = 300 +v2p_mapping_propagation.period_secs = 30 +abandoned_vmm_reaper.period_secs = 60 [default_region_allocation_strategy] # allocate region on 3 random distinct zpools, on 3 random distinct sleds. diff --git a/nexus/inventory/Cargo.toml b/nexus/inventory/Cargo.toml index 1c20e8f8b6..e185808caa 100644 --- a/nexus/inventory/Cargo.toml +++ b/nexus/inventory/Cargo.toml @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] anyhow.workspace = true base64.workspace = true @@ -13,12 +16,14 @@ gateway-client.workspace = true gateway-messages.workspace = true nexus-types.workspace = true omicron-common.workspace = true +omicron-uuid-kinds.workspace = true reqwest.workspace = true serde_json.workspace = true sled-agent-client.workspace = true slog.workspace = true strum.workspace = true thiserror.workspace = true +typed-rng.workspace = true uuid.workspace = true omicron-workspace-hack.workspace = true diff --git a/nexus/inventory/src/builder.rs b/nexus/inventory/src/builder.rs index 2e482fcebf..bfa330669f 100644 --- a/nexus/inventory/src/builder.rs +++ b/nexus/inventory/src/builder.rs @@ -27,11 +27,15 @@ use nexus_types::inventory::RotState; use nexus_types::inventory::ServiceProcessor; use nexus_types::inventory::SledAgent; use nexus_types::inventory::Zpool; +use omicron_uuid_kinds::CollectionKind; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::SledUuid; use std::collections::BTreeMap; use std::collections::BTreeSet; +use std::hash::Hash; use std::sync::Arc; use thiserror::Error; -use uuid::Uuid; +use typed_rng::TypedUuidRng; /// Describes an operational error encountered during the collection process /// @@ -84,8 +88,10 @@ pub struct CollectionBuilder { BTreeMap, CabooseFound>>, rot_pages_found: BTreeMap, RotPageFound>>, - sleds: BTreeMap, - omicron_zones: BTreeMap, + sleds: BTreeMap, + omicron_zones: BTreeMap, + // We just generate one UUID for each collection. + id_rng: TypedUuidRng, } impl CollectionBuilder { @@ -111,6 +117,7 @@ impl CollectionBuilder { rot_pages_found: BTreeMap::new(), sleds: BTreeMap::new(), omicron_zones: BTreeMap::new(), + id_rng: TypedUuidRng::from_entropy(), } } @@ -123,7 +130,7 @@ impl CollectionBuilder { } Collection { - id: Uuid::new_v4(), + id: self.id_rng.next(), errors: self.errors.into_iter().map(|e| e.to_string()).collect(), time_started: self.time_started, time_done: now_db_precision(), @@ -140,6 +147,18 @@ impl CollectionBuilder { } } + /// Within tests, set a seeded RNG for deterministic results. + /// + /// This will ensure that tests that use this builder will produce the same + /// results each time they are run. + pub fn set_rng_seed(&mut self, seed: H) -> &mut Self { + // Important to add some more bytes here, so that builders with the + // same seed but different purposes don't end up with the same UUIDs. + const SEED_EXTRA: &str = "collection-builder"; + self.id_rng.set_seed(seed, SEED_EXTRA); + self + } + /// Record service processor state `sp_state` reported by MGS /// /// `sp_type` and `slot` identify which SP this was. @@ -412,7 +431,7 @@ impl CollectionBuilder { source: &str, inventory: sled_agent_client::types::Inventory, ) -> Result<(), anyhow::Error> { - let sled_id = inventory.sled_id; + let sled_id = SledUuid::from_untyped_uuid(inventory.sled_id); // Normalize the baseboard id, if any. use sled_agent_client::types::Baseboard; @@ -429,8 +448,7 @@ impl CollectionBuilder { } Baseboard::Unknown => { self.found_error(InventoryError::from(anyhow!( - "sled {:?}: reported unknown baseboard", - sled_id + "sled {sled_id}: reported unknown baseboard", ))); None } @@ -444,8 +462,7 @@ impl CollectionBuilder { Ok(addr) => addr, Err(error) => { self.found_error(InventoryError::from(anyhow!( - "sled {:?}: bad sled agent address: {:?}: {:#}", - sled_id, + "sled {sled_id}: bad sled agent address: {:?}: {:#}", inventory.sled_agent_address, error, ))); @@ -473,11 +490,8 @@ impl CollectionBuilder { if let Some(previous) = self.sleds.get(&sled_id) { Err(anyhow!( - "sled {:?}: reported sled multiple times \ - (previously {:?}, now {:?})", - sled_id, - previous, - sled, + "sled {sled_id}: reported sled multiple times \ + (previously {previous:?}, now {sled:?})", )) } else { self.sleds.insert(sled_id, sled); @@ -489,13 +503,12 @@ impl CollectionBuilder { pub fn found_sled_omicron_zones( &mut self, source: &str, - sled_id: Uuid, + sled_id: SledUuid, zones: sled_agent_client::types::OmicronZonesConfig, ) -> Result<(), anyhow::Error> { if let Some(previous) = self.omicron_zones.get(&sled_id) { Err(anyhow!( - "sled {:?} omicron zones: reported previously: {:?}", - sled_id, + "sled {sled_id} omicron zones: reported previously: {:?}", previous )) } else { diff --git a/nexus/inventory/src/collector.rs b/nexus/inventory/src/collector.rs index ad5ae7d024..48761479b0 100644 --- a/nexus/inventory/src/collector.rs +++ b/nexus/inventory/src/collector.rs @@ -15,6 +15,8 @@ use nexus_types::inventory::CabooseWhich; use nexus_types::inventory::Collection; use nexus_types::inventory::RotPage; use nexus_types::inventory::RotPageWhich; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::SledUuid; use slog::o; use slog::{debug, error}; use std::sync::Arc; @@ -347,7 +349,7 @@ impl<'a> Collector<'a> { } }; - let sled_id = inventory.sled_id; + let sled_id = SledUuid::from_untyped_uuid(inventory.sled_id); self.in_progress.found_sled_inventory(&sled_agent_url, inventory)?; let maybe_config = @@ -490,7 +492,7 @@ mod test { &mut s, " zone {} type {}\n", zone.id, - zone.zone_type.label(), + zone.zone_type.kind(), ) .unwrap(); } diff --git a/nexus/inventory/src/examples.rs b/nexus/inventory/src/examples.rs index 5cc6b687d4..1a0c70f456 100644 --- a/nexus/inventory/src/examples.rs +++ b/nexus/inventory/src/examples.rs @@ -17,9 +17,10 @@ use nexus_types::inventory::OmicronZonesConfig; use nexus_types::inventory::RotPage; use nexus_types::inventory::RotPageWhich; use omicron_common::api::external::ByteCount; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::SledUuid; use std::sync::Arc; use strum::IntoEnumIterator; -use uuid::Uuid; /// Returns an example Collection used for testing /// @@ -276,7 +277,7 @@ pub fn representative() -> Representative { let disks = vec![ // Let's say we have one manufacturer for our M.2... sled_agent_client::types::InventoryDisk { - identity: sled_agent_client::types::DiskIdentity { + identity: omicron_common::disk::DiskIdentity { vendor: "macrohard".to_string(), model: "box".to_string(), serial: "XXIV".to_string(), @@ -286,7 +287,7 @@ pub fn representative() -> Representative { }, // ... and a couple different vendors for our U.2s sled_agent_client::types::InventoryDisk { - identity: sled_agent_client::types::DiskIdentity { + identity: omicron_common::disk::DiskIdentity { vendor: "memetendo".to_string(), model: "swatch".to_string(), serial: "0001".to_string(), @@ -295,7 +296,7 @@ pub fn representative() -> Representative { slot: 1, }, sled_agent_client::types::InventoryDisk { - identity: sled_agent_client::types::DiskIdentity { + identity: omicron_common::disk::DiskIdentity { vendor: "memetendo".to_string(), model: "swatch".to_string(), serial: "0002".to_string(), @@ -304,7 +305,7 @@ pub fn representative() -> Representative { slot: 2, }, sled_agent_client::types::InventoryDisk { - identity: sled_agent_client::types::DiskIdentity { + identity: omicron_common::disk::DiskIdentity { vendor: "tony".to_string(), model: "craystation".to_string(), serial: "5".to_string(), @@ -447,7 +448,19 @@ pub struct Representative { pub sleds: [Arc; 4], pub switch: Arc, pub psc: Arc, - pub sled_agents: [Uuid; 4], + pub sled_agents: [SledUuid; 4], +} + +impl Representative { + pub fn new( + builder: CollectionBuilder, + sleds: [Arc; 4], + switch: Arc, + psc: Arc, + sled_agents: [SledUuid; 4], + ) -> Self { + Self { builder, sleds, switch, psc, sled_agents } + } } /// Returns an SP state that can be used to populate a collection for testing @@ -487,7 +500,7 @@ pub fn rot_page(unique: &str) -> RotPage { } pub fn sled_agent( - sled_id: Uuid, + sled_id: SledUuid, baseboard: sled_agent_client::types::Baseboard, sled_role: sled_agent_client::types::SledRole, disks: Vec, @@ -498,7 +511,7 @@ pub fn sled_agent( reservoir_size: ByteCount::from(1024), sled_role, sled_agent_address: "[::1]:56792".parse().unwrap(), - sled_id, + sled_id: sled_id.into_untyped_uuid(), usable_hardware_threads: 10, usable_physical_ram: ByteCount::from(1024 * 1024), disks, diff --git a/nexus/macros-common/Cargo.toml b/nexus/macros-common/Cargo.toml index 9d4390a6d2..6f7266a5a6 100644 --- a/nexus/macros-common/Cargo.toml +++ b/nexus/macros-common/Cargo.toml @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] proc-macro2.workspace = true syn = { workspace = true, features = ["extra-traits"] } diff --git a/nexus/metrics-producer-gc/Cargo.toml b/nexus/metrics-producer-gc/Cargo.toml new file mode 100644 index 0000000000..74d94e8d95 --- /dev/null +++ b/nexus/metrics-producer-gc/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "nexus-metrics-producer-gc" +version = "0.1.0" +edition = "2021" + +[lints] +workspace = true + +[build-dependencies] +omicron-rpaths.workspace = true + +[dependencies] +chrono.workspace = true +futures.workspace = true +nexus-db-queries.workspace = true +omicron-common.workspace = true +oximeter-client.workspace = true +slog.workspace = true +slog-error-chain.workspace = true +thiserror.workspace = true +uuid.workspace = true + +# See omicron-rpaths for more about the "pq-sys" dependency. This is needed +# because we use the database in the test suite, though it doesn't appear to +# work to put the pq-sys dependency only in dev-dependencies. +pq-sys = "*" + +omicron-workspace-hack.workspace = true + +[dev-dependencies] +async-bb8-diesel.workspace = true +diesel.workspace = true +httptest.workspace = true +ipnetwork.workspace = true +nexus-db-model.workspace = true +nexus-db-queries = { workspace = true, features = ["testing"] } +nexus-test-utils.workspace = true +nexus-types.workspace = true +omicron-test-utils.workspace = true +tokio.workspace = true diff --git a/nexus/metrics-producer-gc/build.rs b/nexus/metrics-producer-gc/build.rs new file mode 100644 index 0000000000..1ba9acd41c --- /dev/null +++ b/nexus/metrics-producer-gc/build.rs @@ -0,0 +1,10 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +// See omicron-rpaths for documentation. +// NOTE: This file MUST be kept in sync with the other build.rs files in this +// repository. +fn main() { + omicron_rpaths::configure_default_omicron_rpaths(); +} diff --git a/nexus/metrics-producer-gc/src/lib.rs b/nexus/metrics-producer-gc/src/lib.rs new file mode 100644 index 0000000000..4ed8f1bbb5 --- /dev/null +++ b/nexus/metrics-producer-gc/src/lib.rs @@ -0,0 +1,369 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Garbage collection of expired metrics producers +//! +//! A metrics producer is expected to reregister itself periodically. This crate +//! provides a mechanism to clean up any producers that have stopped +//! reregistering, both removing their registration records from the database +//! and notifying their assigned collector. It is expected to be invoked from a +//! Nexus background task. + +use chrono::DateTime; +use chrono::Utc; +use futures::stream::FuturesUnordered; +use futures::StreamExt; +use nexus_db_queries::context::OpContext; +use nexus_db_queries::db::identity::Asset; +use nexus_db_queries::db::model::ProducerEndpoint; +use nexus_db_queries::db::DataStore; +use omicron_common::api::external::Error as DbError; +use oximeter_client::Client as OximeterClient; +use slog::info; +use slog::o; +use slog::warn; +use slog::Logger; +use slog_error_chain::InlineErrorChain; +use std::collections::btree_map::Entry; +use std::collections::BTreeMap; +use std::collections::BTreeSet; +use std::net::SocketAddr; +use uuid::Uuid; + +#[derive(Debug, Clone)] +pub struct PrunedProducers { + pub successes: BTreeSet, + pub failures: BTreeMap, +} + +#[derive(Debug, thiserror::Error, slog_error_chain::SlogInlineError)] +pub enum Error { + #[error("failed to list expired producers")] + ListExpiredProducers(#[source] DbError), + #[error("failed to get Oximeter info for {id}")] + GetOximterInfo { + id: Uuid, + #[source] + err: DbError, + }, +} + +/// Make one garbage collection pass over the metrics producers. +pub async fn prune_expired_producers( + opctx: &OpContext, + datastore: &DataStore, + expiration: DateTime, +) -> Result { + // Get the list of expired producers we need to prune. + let expired_producers = + ExpiredProducers::new(opctx, datastore, expiration).await?; + + // Build a FuturesUnordered to prune each expired producer. + let mut all_prunes = expired_producers + .producer_client_pairs() + .map(|(producer, client)| async { + let result = unregister_producer( + opctx, datastore, producer, client, &opctx.log, + ) + .await; + (producer.id(), result) + }) + .collect::>(); + + // Collect all the results. + let mut successes = BTreeSet::new(); + let mut failures = BTreeMap::new(); + while let Some((id, result)) = all_prunes.next().await { + match result { + Ok(()) => { + successes.insert(id); + } + Err(err) => { + failures.insert(id, err); + } + } + } + Ok(PrunedProducers { successes, failures }) +} + +async fn unregister_producer( + opctx: &OpContext, + datastore: &DataStore, + producer: &ProducerEndpoint, + client: &OximeterClient, + log: &Logger, +) -> Result<(), DbError> { + // Attempt to notify this producer's collector that the producer's lease has + // expired. This is an optimistic notification: if it fails, we will still + // prune the producer from the database, so that the next time this + // collector asks Nexus for its list of producers, this expired producer is + // gone. + match client.producer_delete(&producer.id()).await { + Ok(_) => { + info!( + log, "successfully notified Oximeter of expired producer"; + "collector-id" => %producer.oximeter_id, + "producer-id" => %producer.id(), + ); + } + Err(err) => { + warn!( + log, "failed to notify Oximeter of expired producer"; + "collector-id" => %producer.oximeter_id, + "producer-id" => %producer.id(), + InlineErrorChain::new(&err), + ); + } + } + + datastore.producer_endpoint_delete(opctx, &producer.id()).await.map(|_| ()) +} + +// Internal combination of all expired producers and a set of OximeterClients +// for each producer. +struct ExpiredProducers { + producers: Vec, + clients: BTreeMap, +} + +impl ExpiredProducers { + async fn new( + opctx: &OpContext, + datastore: &DataStore, + expiration: DateTime, + ) -> Result { + let producers = datastore + .producers_list_expired_batched(opctx, expiration) + .await + .map_err(Error::ListExpiredProducers)?; + + let mut clients = BTreeMap::new(); + for producer in &producers { + let entry = match clients.entry(producer.oximeter_id) { + Entry::Vacant(entry) => entry, + Entry::Occupied(_) => continue, + }; + let info = datastore + .oximeter_lookup(opctx, &producer.oximeter_id) + .await + .map_err(|err| Error::GetOximterInfo { + id: producer.oximeter_id, + err, + })?; + let client_log = + opctx.log.new(o!("oximeter-collector" => info.id.to_string())); + let address = SocketAddr::new(info.ip.ip(), *info.port); + let client = + OximeterClient::new(&format!("http://{address}"), client_log); + entry.insert(client); + } + + Ok(Self { producers, clients }) + } + + fn producer_client_pairs( + &self, + ) -> impl Iterator { + self.producers.iter().map(|producer| { + // In `new()` we add a client for every producer.oximeter_id, so we + // can unwrap this lookup. + let client = self.clients.get(&producer.oximeter_id).unwrap(); + (producer, client) + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use async_bb8_diesel::AsyncRunQueryDsl; + use diesel::ExpressionMethods; + use diesel::QueryDsl; + use httptest::matchers::request; + use httptest::responders::status_code; + use httptest::Expectation; + use nexus_db_model::OximeterInfo; + use nexus_db_queries::db::datastore::pub_test_utils::datastore_test; + use nexus_test_utils::db::test_setup_database; + use nexus_types::internal_api::params; + use omicron_common::api::internal::nexus; + use omicron_test_utils::dev; + use std::time::Duration; + + async fn read_time_modified( + datastore: &DataStore, + producer_id: Uuid, + ) -> DateTime { + use nexus_db_queries::db::schema::metric_producer::dsl; + + let conn = datastore.pool_connection_for_tests().await.unwrap(); + match dsl::metric_producer + .filter(dsl::id.eq(producer_id)) + .select(dsl::time_modified) + .first_async(&*conn) + .await + { + Ok(time_modified) => time_modified, + Err(err) => panic!( + "failed to read time_modified for producer {producer_id}: \ + {err}" + ), + } + } + + #[tokio::test] + async fn test_prune_expired_producers() { + // Setup + let logctx = dev::test_setup_log("test_prune_expired_producers"); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = + datastore_test(&logctx, &db, Uuid::new_v4()).await; + + // Insert an Oximeter collector + let collector_info = OximeterInfo::new(¶ms::OximeterInfo { + collector_id: Uuid::new_v4(), + address: "[::1]:0".parse().unwrap(), + }); + datastore + .oximeter_create(&opctx, &collector_info) + .await + .expect("failed to insert collector"); + + // GC'ing expired producers should succeed if there are no producers at + // all. + let pruned = prune_expired_producers(&opctx, &datastore, Utc::now()) + .await + .expect("failed to prune expired producers"); + assert!(pruned.successes.is_empty()); + assert!(pruned.failures.is_empty()); + + // Insert a producer. + let producer = ProducerEndpoint::new( + &nexus::ProducerEndpoint { + id: Uuid::new_v4(), + kind: nexus::ProducerKind::Service, + address: "[::1]:0".parse().unwrap(), // unused + interval: Duration::from_secs(0), // unused + }, + collector_info.id, + ); + datastore + .producer_endpoint_create(&opctx, &producer) + .await + .expect("failed to insert producer"); + + let producer_time_modified = + read_time_modified(&datastore, producer.id()).await; + + // GC'ing expired producers with an expiration time older than our + // producer's `time_modified` should not prune anything. + let pruned = prune_expired_producers( + &opctx, + &datastore, + producer_time_modified - Duration::from_secs(1), + ) + .await + .expect("failed to prune expired producers"); + assert!(pruned.successes.is_empty()); + assert!(pruned.failures.is_empty()); + + // GC'ing expired producers with an expiration time _newer_ than our + // producer's `time_modified` should prune our one producer. + let pruned = prune_expired_producers( + &opctx, + &datastore, + producer_time_modified + Duration::from_secs(1), + ) + .await + .expect("failed to prune expired producers"); + let expected_success = + [producer.id()].into_iter().collect::>(); + assert_eq!(pruned.successes, expected_success); + assert!(pruned.failures.is_empty()); + + // GC'ing again with the same expiration should do nothing, because we + // already pruned the producer. + let pruned = prune_expired_producers( + &opctx, + &datastore, + producer_time_modified + Duration::from_secs(1), + ) + .await + .expect("failed to prune expired producers"); + assert!(pruned.successes.is_empty()); + assert!(pruned.failures.is_empty()); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_prune_expired_producers_notifies_collector() { + // Setup + let logctx = dev::test_setup_log( + "test_prune_expired_producers_notifies_collector", + ); + let mut db = test_setup_database(&logctx.log).await; + let (opctx, datastore) = + datastore_test(&logctx, &db, Uuid::new_v4()).await; + + let mut collector = httptest::Server::run(); + + // Insert an Oximeter collector + let collector_info = OximeterInfo::new(¶ms::OximeterInfo { + collector_id: Uuid::new_v4(), + address: collector.addr(), + }); + datastore + .oximeter_create(&opctx, &collector_info) + .await + .expect("failed to insert collector"); + + // Insert a producer. + let producer = ProducerEndpoint::new( + &nexus::ProducerEndpoint { + id: Uuid::new_v4(), + kind: nexus::ProducerKind::Service, + address: "[::1]:0".parse().unwrap(), // unused + interval: Duration::from_secs(0), // unused + }, + collector_info.id, + ); + datastore + .producer_endpoint_create(&opctx, &producer) + .await + .expect("failed to insert producer"); + + let producer_time_modified = + read_time_modified(&datastore, producer.id()).await; + + // GC'ing expired producers with an expiration time _newer_ than our + // producer's `time_modified` should prune our one producer and notify + // the collector that it's doing so. + collector.expect( + Expectation::matching(request::method_path( + "DELETE", + format!("/producers/{}", producer.id()), + )) + .respond_with(status_code(204)), + ); + + let pruned = prune_expired_producers( + &opctx, + &datastore, + producer_time_modified + Duration::from_secs(1), + ) + .await + .expect("failed to prune expired producers"); + let expected_success = + [producer.id()].into_iter().collect::>(); + assert_eq!(pruned.successes, expected_success); + assert!(pruned.failures.is_empty()); + + collector.verify_and_clear(); + + db.cleanup().await.unwrap(); + logctx.cleanup_successful(); + } +} diff --git a/nexus/networking/Cargo.toml b/nexus/networking/Cargo.toml index 11f6d83993..510fd6ca27 100644 --- a/nexus/networking/Cargo.toml +++ b/nexus/networking/Cargo.toml @@ -4,11 +4,15 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] futures.workspace = true ipnetwork.workspace = true nexus-db-queries.workspace = true omicron-common.workspace = true +oxnet.workspace = true reqwest.workspace = true sled-agent-client.workspace = true slog.workspace = true diff --git a/nexus/networking/src/firewall_rules.rs b/nexus/networking/src/firewall_rules.rs index 85c457d522..a656c673ca 100644 --- a/nexus/networking/src/firewall_rules.rs +++ b/nexus/networking/src/firewall_rules.rs @@ -9,6 +9,7 @@ use ipnetwork::IpNetwork; use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; use nexus_db_queries::db; +use nexus_db_queries::db::fixed_data::vpc::SERVICES_VPC_ID; use nexus_db_queries::db::identity::Asset; use nexus_db_queries::db::identity::Resource; use nexus_db_queries::db::lookup; @@ -16,12 +17,14 @@ use nexus_db_queries::db::lookup::LookupPath; use nexus_db_queries::db::model::Name; use nexus_db_queries::db::DataStore; use omicron_common::api::external; +use omicron_common::api::external::AllowedSourceIps; use omicron_common::api::external::Error; -use omicron_common::api::external::IpNet; use omicron_common::api::external::ListResultVec; use omicron_common::api::internal::nexus::HostIdentifier; use omicron_common::api::internal::shared::NetworkInterface; +use oxnet::IpNet; use slog::debug; +use slog::error; use slog::info; use slog::warn; use slog::Logger; @@ -186,6 +189,44 @@ pub async fn resolve_firewall_rules_for_sled_agent( "subnet_networks" => ?subnet_networks, ); + // Lookup an rules implied by the user-facing services IP allowlist. + // + // These rules are implicit, and not stored in the firewall rule table, + // since they're logically a different thing. However, we implement the + // allowlist by _modifying_ any existing rules targeting the internal Oxide + // services VPC. The point here is to restrict the hosts allowed to make + // connections, but otherwise leave the rules unmodified. For example, we + // want to make sure that our external DNS server only receives UDP traffic + // on port 53. Adding a _new_ firewall rule for the allowlist with higher + // priority would remove this port / protocol requirement. Instead, we + // modify the rules in-place. + let allowed_ips = if allowlist_applies_to_vpc(vpc) { + let allowed_ips = + lookup_allowed_source_ips(datastore, opctx, log).await?; + match &allowed_ips { + AllowedSourceIps::Any => { + debug!( + log, + "Allowlist for user-facing services is set to \ + allow any inbound traffic. Existing VPC firewall \ + rules will not be modified." + ); + } + AllowedSourceIps::List(list) => { + debug!( + log, + "Found allowlist for user-facing services \ + with explicit IP list. Existing VPC firewall \ + rules will be modified to match."; + "allow_list" => ?list, + ); + } + } + Some(allowed_ips) + } else { + None + }; + // Compile resolved rules for the sled agents. let mut sled_agent_rules = Vec::with_capacity(rules.len()); for rule in rules { @@ -266,9 +307,43 @@ pub async fn resolve_firewall_rules_for_sled_agent( continue; } - let filter_hosts = match &rule.filter_hosts { - None => None, - Some(hosts) => { + // Construct the set of filter hosts from the DB rule. + let filter_hosts = match (&rule.filter_hosts, &allowed_ips) { + // No host filters, but we need to insert the allowlist entries. + // + // This is the expected case when applying rules for the built-in + // Oxide-services VPCs, which do not contain host filters. (See + // `nexus_db_queries::fixed_data::vpc_firewall_rule` for those + // rules.) If those rules change to include any filter hosts, this + // logic needs to change as well. + (None, Some(allowed_ips)) => match allowed_ips { + AllowedSourceIps::Any => None, + AllowedSourceIps::List(list) => Some( + list.iter() + .copied() + .map(|ip| HostIdentifier::Ip(ip).into()) + .collect(), + ), + }, + + // No rules exist, and we don't need to add anything for the + // allowlist. + (None, None) => None, + + (Some(_), Some(_)) => { + return Err(Error::internal_error( + "While trying to apply the user-facing services allowlist, \ + we found unexpected host filters already in the rules. These \ + are expected to have no built-in rules which filter on \ + the hosts, so that we can modify the rules to apply the \ + allowlist without worrying about destroying those built-in \ + host filters." + )); + } + + // There are host filters, but we don't need to apply the allowlist + // to this VPC either, so insert the rules as-is. + (Some(hosts), None) => { let mut host_addrs = Vec::with_capacity(hosts.len()); for host in hosts { match &host.0 { @@ -278,7 +353,7 @@ pub async fn resolve_firewall_rules_for_sled_agent( .unwrap_or(&no_interfaces) { host_addrs.push( - HostIdentifier::Ip(IpNet::from( + HostIdentifier::Ip(IpNet::host_net( interface.ip, )) .into(), @@ -287,7 +362,7 @@ pub async fn resolve_firewall_rules_for_sled_agent( } external::VpcFirewallRuleHostFilter::Subnet(name) => { for subnet in subnet_networks - .get(&name) + .get(name) .unwrap_or(&no_networks) { host_addrs.push( @@ -298,7 +373,8 @@ pub async fn resolve_firewall_rules_for_sled_agent( } external::VpcFirewallRuleHostFilter::Ip(addr) => { host_addrs.push( - HostIdentifier::Ip(IpNet::from(*addr)).into(), + HostIdentifier::Ip(IpNet::host_net(*addr)) + .into(), ) } external::VpcFirewallRuleHostFilter::IpNet(net) => { @@ -306,7 +382,7 @@ pub async fn resolve_firewall_rules_for_sled_agent( } external::VpcFirewallRuleHostFilter::Vpc(name) => { for interface in vpc_interfaces - .get(&name) + .get(name) .unwrap_or(&no_interfaces) { host_addrs.push( @@ -439,3 +515,26 @@ pub async fn plumb_service_firewall_rules( .await?; Ok(()) } + +/// Return true if the user-facing services allowlist applies to a VPC. +fn allowlist_applies_to_vpc(vpc: &db::model::Vpc) -> bool { + vpc.id() == *SERVICES_VPC_ID +} + +/// Return the list of allowed IPs from the database. +async fn lookup_allowed_source_ips( + datastore: &DataStore, + opctx: &OpContext, + log: &Logger, +) -> Result { + match datastore.allow_list_view(opctx).await { + Ok(allowed) => { + slog::trace!(log, "fetched allowlist from DB"; "allowed" => ?allowed); + allowed.allowed_source_ips() + } + Err(e) => { + error!(log, "failed to fetch allowlist from DB"; "err" => ?e); + Err(e) + } + } +} diff --git a/nexus/reconfigurator/execution/Cargo.toml b/nexus/reconfigurator/execution/Cargo.toml index b479ae67ee..34056b45a1 100644 --- a/nexus/reconfigurator/execution/Cargo.toml +++ b/nexus/reconfigurator/execution/Cargo.toml @@ -3,6 +3,9 @@ name = "nexus-reconfigurator-execution" version = "0.1.0" edition = "2021" +[lints] +workspace = true + [build-dependencies] omicron-rpaths.workspace = true @@ -11,7 +14,6 @@ anyhow.workspace = true dns-service-client.workspace = true chrono.workspace = true futures.workspace = true -illumos-utils.workspace = true internal-dns.workspace = true nexus-config.workspace = true nexus-db-model.workspace = true @@ -19,6 +21,8 @@ nexus-db-queries.workspace = true nexus-networking.workspace = true nexus-types.workspace = true omicron-common.workspace = true +omicron-uuid-kinds.workspace = true +oxnet.workspace = true reqwest.workspace = true sled-agent-client.workspace = true slog.workspace = true @@ -33,6 +37,8 @@ pq-sys = "*" omicron-workspace-hack.workspace = true [dev-dependencies] +async-bb8-diesel.workspace = true +diesel.workspace = true httptest.workspace = true ipnet.workspace = true nexus-reconfigurator-planning.workspace = true diff --git a/nexus/reconfigurator/execution/src/cockroachdb.rs b/nexus/reconfigurator/execution/src/cockroachdb.rs new file mode 100644 index 0000000000..101a7372c5 --- /dev/null +++ b/nexus/reconfigurator/execution/src/cockroachdb.rs @@ -0,0 +1,113 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Ensures CockroachDB settings are set + +use anyhow::Context; +use nexus_db_queries::context::OpContext; +use nexus_db_queries::db::DataStore; +use nexus_types::deployment::Blueprint; + +pub(crate) async fn ensure_settings( + opctx: &OpContext, + datastore: &DataStore, + blueprint: &Blueprint, +) -> anyhow::Result<()> { + if let Some(value) = + blueprint.cockroachdb_setting_preserve_downgrade.to_optional_string() + { + datastore + .cockroachdb_setting_set_string( + opctx, + blueprint.cockroachdb_fingerprint.clone(), + "cluster.preserve_downgrade_option", + value, + ) + .await + .context("failed to set cluster.preserve_downgrade_option")?; + } + Ok(()) +} + +#[cfg(test)] +mod test { + use super::*; + use crate::overridables::Overridables; + use nexus_db_queries::authn; + use nexus_db_queries::authz; + use nexus_test_utils_macros::nexus_test; + use nexus_types::deployment::CockroachDbClusterVersion; + use std::sync::Arc; + + type ControlPlaneTestContext = + nexus_test_utils::ControlPlaneTestContext; + + #[nexus_test] + async fn test_ensure_preserve_downgrade_option( + cptestctx: &ControlPlaneTestContext, + ) { + let nexus = &cptestctx.server.server_context().nexus; + let datastore = nexus.datastore(); + let log = &cptestctx.logctx.log; + let opctx = OpContext::for_background( + log.clone(), + Arc::new(authz::Authz::new(log)), + authn::Context::internal_api(), + datastore.clone(), + ); + + // Fetch the initial CockroachDB settings. + let settings = datastore + .cockroachdb_settings(&opctx) + .await + .expect("failed to get cockroachdb settings"); + // Fetch the initial blueprint installed during rack initialization. + let (_blueprint_target, blueprint) = datastore + .blueprint_target_get_current_full(&opctx) + .await + .expect("failed to get blueprint from datastore"); + eprintln!("blueprint: {}", blueprint.display()); + // The initial blueprint should already have these filled in. + assert_eq!( + blueprint.cockroachdb_fingerprint, + settings.state_fingerprint + ); + assert_eq!( + blueprint.cockroachdb_setting_preserve_downgrade, + CockroachDbClusterVersion::NEWLY_INITIALIZED.into() + ); + // The cluster version, preserve downgrade setting, and + // `NEWLY_INITIALIZED` should all match. + assert_eq!( + settings.version, + CockroachDbClusterVersion::NEWLY_INITIALIZED.to_string() + ); + assert_eq!( + settings.preserve_downgrade, + CockroachDbClusterVersion::NEWLY_INITIALIZED.to_string() + ); + // Execute the initial blueprint. + let overrides = Overridables::for_test(cptestctx); + crate::realize_blueprint_with_overrides( + &opctx, + datastore, + &blueprint, + "test-suite", + &overrides, + ) + .await + .expect("failed to execute initial blueprint"); + // The CockroachDB settings should not have changed. + assert_eq!( + settings, + datastore + .cockroachdb_settings(&opctx) + .await + .expect("failed to get cockroachdb settings") + ); + + // TODO(iliana): when we upgrade to v22.2, test an actual cluster + // upgrade when crdb-seed is run with the old CockroachDB + } +} diff --git a/nexus/reconfigurator/execution/src/datasets.rs b/nexus/reconfigurator/execution/src/datasets.rs index 1d08f3b294..c4f5cbae82 100644 --- a/nexus/reconfigurator/execution/src/datasets.rs +++ b/nexus/reconfigurator/execution/src/datasets.rs @@ -5,19 +5,19 @@ //! Ensures dataset records required by a given blueprint use anyhow::Context; -use illumos_utils::zpool::ZpoolName; use nexus_db_model::Dataset; use nexus_db_model::DatasetKind; use nexus_db_queries::context::OpContext; use nexus_db_queries::db::DataStore; -use nexus_types::deployment::OmicronZoneConfig; -use nexus_types::deployment::OmicronZoneType; +use nexus_types::deployment::blueprint_zone_type; +use nexus_types::deployment::BlueprintZoneConfig; +use nexus_types::deployment::BlueprintZoneType; use nexus_types::identity::Asset; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::OmicronZoneUuid; use slog::info; use slog::warn; -use slog_error_chain::InlineErrorChain; use std::collections::BTreeSet; -use std::net::SocketAddrV6; /// For each crucible zone in `all_omicron_zones`, ensure that a corresponding /// dataset record exists in `datastore` @@ -27,7 +27,7 @@ use std::net::SocketAddrV6; pub(crate) async fn ensure_crucible_dataset_records_exist( opctx: &OpContext, datastore: &DataStore, - all_omicron_zones: impl Iterator, + all_omicron_zones: impl Iterator, ) -> anyhow::Result { // Before attempting to insert any datasets, first query for any existing // dataset records so we can filter them out. This looks like a typical @@ -44,14 +44,17 @@ pub(crate) async fn ensure_crucible_dataset_records_exist( .await .context("failed to list all datasets")? .into_iter() - .map(|dataset| dataset.id()) + .map(|dataset| OmicronZoneUuid::from_untyped_uuid(dataset.id())) .collect::>(); let mut num_inserted = 0; let mut num_already_exist = 0; for zone in all_omicron_zones { - let OmicronZoneType::Crucible { address, dataset } = &zone.zone_type + let BlueprintZoneType::Crucible(blueprint_zone_type::Crucible { + address, + dataset, + }) = &zone.zone_type else { continue; }; @@ -64,33 +67,13 @@ pub(crate) async fn ensure_crucible_dataset_records_exist( continue; } - // Map progenitor client strings into the types we need. We never - // expect these to fail. - let addr: SocketAddrV6 = match address.parse() { - Ok(addr) => addr, - Err(err) => { - warn!( - opctx.log, "failed to parse crucible zone address"; - "address" => address, - "err" => InlineErrorChain::new(&err), - ); - continue; - } - }; - let zpool_name: ZpoolName = match dataset.pool_name.parse() { - Ok(name) => name, - Err(err) => { - warn!( - opctx.log, "failed to parse crucible zone pool name"; - "pool_name" => &*dataset.pool_name, - "err" => err, - ); - continue; - } - }; - - let pool_id = zpool_name.id(); - let dataset = Dataset::new(id, pool_id, addr, DatasetKind::Crucible); + let pool_id = dataset.pool_name.id(); + let dataset = Dataset::new( + id.into_untyped_uuid(), + pool_id.into_untyped_uuid(), + *address, + DatasetKind::Crucible, + ); let maybe_inserted = datastore .dataset_insert_if_not_exists(dataset) .await @@ -143,8 +126,15 @@ mod tests { use nexus_db_model::SledSystemHardware; use nexus_db_model::SledUpdate; use nexus_db_model::Zpool; + use nexus_reconfigurator_planning::example::example; use nexus_test_utils_macros::nexus_test; + use nexus_types::deployment::BlueprintZoneDisposition; + use nexus_types::deployment::BlueprintZoneFilter; + use omicron_common::zpool_name::ZpoolName; + use omicron_uuid_kinds::GenericUuid; + use omicron_uuid_kinds::ZpoolUuid; use sled_agent_client::types::OmicronZoneDataset; + use sled_agent_client::types::OmicronZoneType; use uuid::Uuid; type ControlPlaneTestContext = @@ -154,8 +144,10 @@ mod tests { async fn test_ensure_crucible_dataset_records_exist( cptestctx: &ControlPlaneTestContext, ) { + const TEST_NAME: &str = "test_ensure_crucible_dataset_records_exist"; + // Set up. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), @@ -163,15 +155,14 @@ mod tests { ); let opctx = &opctx; - // Use the standard representative inventory collection. - let representative = nexus_inventory::examples::representative(); - let collection = representative.builder.build(); + // Use the standard example system. + let (collection, _, blueprint) = example(&opctx.log, TEST_NAME, 5); // Record the sleds and zpools contained in this collection. let rack_id = Uuid::new_v4(); for (&sled_id, config) in &collection.omicron_zones { let sled = SledUpdate::new( - sled_id, + sled_id.into_untyped_uuid(), "[::1]:0".parse().unwrap(), SledBaseboard { serial_number: format!("test-{sled_id}"), @@ -194,15 +185,13 @@ mod tests { else { continue; }; - let zpool_name: ZpoolName = - dataset.pool_name.parse().expect("invalid zpool name"); let zpool = Zpool::new( - zpool_name.id(), - sled_id, + dataset.pool_name.id().into_untyped_uuid(), + sled_id.into_untyped_uuid(), Uuid::new_v4(), // physical_disk_id ); datastore - .zpool_upsert(zpool) + .zpool_insert(opctx, zpool) .await .expect("failed to upsert zpool"); } @@ -223,10 +212,17 @@ mod tests { .len(), 0 ); + + // Collect all the blueprint zones. + let all_omicron_zones = blueprint + .all_omicron_zones(BlueprintZoneFilter::All) + .map(|(_, zone)| zone) + .collect::>(); + let ndatasets_inserted = ensure_crucible_dataset_records_exist( opctx, datastore, - collection.all_omicron_zones(), + all_omicron_zones.iter().copied(), ) .await .expect("failed to ensure crucible datasets"); @@ -247,7 +243,7 @@ mod tests { let ndatasets_inserted = ensure_crucible_dataset_records_exist( opctx, datastore, - collection.all_omicron_zones(), + all_omicron_zones.iter().copied(), ) .await .expect("failed to ensure crucible datasets"); @@ -263,38 +259,38 @@ mod tests { // Create another zpool on one of the sleds, so we can add a new // crucible zone that uses it. - let new_zpool_id = Uuid::new_v4(); + let new_zpool_id = ZpoolUuid::new_v4(); for &sled_id in collection.omicron_zones.keys().take(1) { let zpool = Zpool::new( - new_zpool_id, - sled_id, + new_zpool_id.into_untyped_uuid(), + sled_id.into_untyped_uuid(), Uuid::new_v4(), // physical_disk_id ); datastore - .zpool_upsert(zpool) + .zpool_insert(opctx, zpool) .await .expect("failed to upsert zpool"); } // Call `ensure_crucible_dataset_records_exist` again, adding a new // crucible zone. It should insert only this new zone. - let new_zone = OmicronZoneConfig { - id: Uuid::new_v4(), + let new_zone = BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id: OmicronZoneUuid::new_v4(), underlay_address: "::1".parse().unwrap(), - zone_type: OmicronZoneType::Crucible { - address: "[::1]:0".to_string(), - dataset: OmicronZoneDataset { - pool_name: ZpoolName::new_external(new_zpool_id) - .to_string() - .parse() - .unwrap(), + zone_type: BlueprintZoneType::Crucible( + blueprint_zone_type::Crucible { + address: "[::1]:0".parse().unwrap(), + dataset: OmicronZoneDataset { + pool_name: ZpoolName::new_external(new_zpool_id), + }, }, - }, + ), }; let ndatasets_inserted = ensure_crucible_dataset_records_exist( opctx, datastore, - collection.all_omicron_zones().chain(std::iter::once(&new_zone)), + all_omicron_zones.iter().copied().chain(std::iter::once(&new_zone)), ) .await .expect("failed to ensure crucible datasets"); diff --git a/nexus/reconfigurator/execution/src/dns.rs b/nexus/reconfigurator/execution/src/dns.rs index 7d7e24b6cf..ec48a35cbe 100644 --- a/nexus/reconfigurator/execution/src/dns.rs +++ b/nexus/reconfigurator/execution/src/dns.rs @@ -6,20 +6,19 @@ use crate::overridables::Overridables; use crate::Sled; -use anyhow::Context; use dns_service_client::DnsDiff; use internal_dns::DnsConfigBuilder; use internal_dns::ServiceName; use nexus_db_model::DnsGroup; -use nexus_db_model::Silo; use nexus_db_queries::context::OpContext; use nexus_db_queries::db::datastore::Discoverability; use nexus_db_queries::db::datastore::DnsVersionUpdateBuilder; -use nexus_db_queries::db::fixed_data::silo::DEFAULT_SILO_ID; +use nexus_db_queries::db::fixed_data::silo::DEFAULT_SILO; use nexus_db_queries::db::DataStore; +use nexus_types::deployment::blueprint_zone_type; use nexus_types::deployment::Blueprint; use nexus_types::deployment::BlueprintZoneFilter; -use nexus_types::deployment::OmicronZoneType; +use nexus_types::deployment::BlueprintZoneType; use nexus_types::identity::Resource; use nexus_types::internal_api::params::DnsConfigParams; use nexus_types::internal_api::params::DnsConfigZone; @@ -27,19 +26,20 @@ use nexus_types::internal_api::params::DnsRecord; use omicron_common::api::external::Error; use omicron_common::api::external::Generation; use omicron_common::api::external::InternalContext; +use omicron_common::api::external::Name; +use omicron_common::bail_unless; +use omicron_uuid_kinds::SledUuid; use slog::{debug, info, o}; use std::collections::BTreeMap; use std::collections::HashMap; use std::net::IpAddr; -use std::net::SocketAddrV6; -use uuid::Uuid; pub(crate) async fn deploy_dns( opctx: &OpContext, datastore: &DataStore, creator: String, blueprint: &Blueprint, - sleds_by_id: &BTreeMap, + sleds_by_id: &BTreeMap, overrides: &Overridables, ) -> Result<(), Error> { // First, fetch the current DNS configs. @@ -59,34 +59,37 @@ pub(crate) async fn deploy_dns( // we know it's being hit when we exercise this condition. // Next, construct the DNS config represented by the blueprint. - let internal_dns_config_blueprint = - blueprint_internal_dns_config(blueprint, sleds_by_id, overrides) - .map_err(|e| { - Error::internal_error(&format!( - "error constructing internal DNS config: {:#}", - e - )) - })?; + let internal_dns_zone_blueprint = + blueprint_internal_dns_config(blueprint, sleds_by_id, overrides); let silos = datastore .silo_list_all_batched(opctx, Discoverability::All) .await .internal_context("listing Silos (for configuring external DNS)")? .into_iter() - // We do not generate a DNS name for the "default" Silo. - .filter(|silo| silo.id() != *DEFAULT_SILO_ID) + .map(|silo| silo.name().clone()) .collect::>(); - let (nexus_external_ips, nexus_external_dns_zones) = - datastore.nexus_external_addresses(opctx, Some(blueprint)).await?; - let nexus_external_dns_zone_names = nexus_external_dns_zones + let nexus_external_dns_zone_names = datastore + .dns_zones_list_all(opctx, DnsGroup::External) + .await + .internal_context("listing DNS zones")? .into_iter() .map(|z| z.zone_name) .collect::>(); - let external_dns_config_blueprint = blueprint_external_dns_config( + // Other parts of the system support multiple external DNS zone names. We + // do not here. If we decide to support this in the future, this mechanism + // will need to be updated. + bail_unless!( + nexus_external_dns_zone_names.len() == 1, + "expected exactly one external DNS zone" + ); + // unwrap: we just checked the length. + let external_dns_zone_name = + nexus_external_dns_zone_names.into_iter().next().unwrap(); + let external_dns_zone_blueprint = blueprint_external_dns_config( blueprint, - &nexus_external_ips, &silos, - &nexus_external_dns_zone_names, + external_dns_zone_name, ); // Deploy the changes. @@ -96,7 +99,7 @@ pub(crate) async fn deploy_dns( creator.clone(), blueprint, &internal_dns_config_current, - &internal_dns_config_blueprint, + internal_dns_zone_blueprint, DnsGroup::Internal, ) .await?; @@ -106,7 +109,7 @@ pub(crate) async fn deploy_dns( creator, blueprint, &external_dns_config_current, - &external_dns_config_blueprint, + external_dns_zone_blueprint, DnsGroup::External, ) .await?; @@ -119,13 +122,19 @@ pub(crate) async fn deploy_dns_one( creator: String, blueprint: &Blueprint, dns_config_current: &DnsConfigParams, - dns_config_blueprint: &DnsConfigParams, + dns_zone_blueprint: DnsConfigZone, dns_group: DnsGroup, ) -> Result<(), Error> { let log = opctx .log .new(o!("blueprint_execution" => format!("dns {:?}", dns_group))); + // Other parts of the system support multiple external DNS zones. We do not + // do so here. + let dns_zone_current = dns_config_current + .sole_zone() + .map_err(|e| Error::internal_error(&format!("{:#}", e)))?; + // Looking at the current contents of DNS, prepare an update that will make // it match what it should be. let comment = format!("blueprint {} ({})", blueprint.id, blueprint.comment); @@ -134,8 +143,8 @@ pub(crate) async fn deploy_dns_one( dns_group, comment, creator, - dns_config_current, - dns_config_blueprint, + dns_zone_current, + &dns_zone_blueprint, )?; let Some(update) = maybe_update else { // Nothing to do. @@ -208,6 +217,16 @@ pub(crate) async fn deploy_dns_one( // In both cases, the system will (1) converge to having successfully // executed the target blueprint, and (2) never have rolled any changes back // -- DNS only ever moves forward, closer to the latest desired state. + let blueprint_generation = match dns_group { + DnsGroup::Internal => blueprint.internal_dns_version, + DnsGroup::External => blueprint.external_dns_version, + }; + let dns_config_blueprint = DnsConfigParams { + zones: vec![dns_zone_blueprint], + time_created: chrono::Utc::now(), + generation: u64::from(blueprint_generation.next()), + }; + info!( log, "attempting to update from generation {} to generation {}", @@ -229,9 +248,9 @@ pub(crate) async fn deploy_dns_one( /// Returns the expected contents of internal DNS based on the given blueprint pub fn blueprint_internal_dns_config( blueprint: &Blueprint, - sleds_by_id: &BTreeMap, + sleds_by_id: &BTreeMap, overrides: &Overridables, -) -> Result { +) -> DnsConfigZone { // The DNS names configured here should match what RSS configures for the // same zones. It's tricky to have RSS share the same code because it uses // Sled Agent's _internal_ `OmicronZoneConfig` (and friends), whereas we're @@ -240,80 +259,53 @@ pub fn blueprint_internal_dns_config( // the details. let mut dns_builder = DnsConfigBuilder::new(); - // It's annoying that we have to parse this because it really should be - // valid already. See oxidecomputer/omicron#4988. - fn parse_port(address: &str) -> Result { - address - .parse::() - .with_context(|| format!("parsing socket address {:?}", address)) - .map(|addr| addr.port()) - } - for (_, zone) in - blueprint.all_blueprint_zones(BlueprintZoneFilter::InternalDns) + blueprint.all_omicron_zones(BlueprintZoneFilter::ShouldBeInInternalDns) { - let context = || { - format!( - "parsing {} zone with id {}", - zone.config.zone_type.label(), - zone.config.id - ) - }; - - let (service_name, port) = match &zone.config.zone_type { - OmicronZoneType::BoundaryNtp { address, .. } => { - let port = parse_port(&address).with_context(context)?; - (ServiceName::BoundaryNtp, port) - } - OmicronZoneType::InternalNtp { address, .. } => { - let port = parse_port(&address).with_context(context)?; - (ServiceName::InternalNtp, port) - } - OmicronZoneType::Clickhouse { address, .. } => { - let port = parse_port(&address).with_context(context)?; - (ServiceName::Clickhouse, port) - } - OmicronZoneType::ClickhouseKeeper { address, .. } => { - let port = parse_port(&address).with_context(context)?; - (ServiceName::ClickhouseKeeper, port) - } - OmicronZoneType::CockroachDb { address, .. } => { - let port = parse_port(&address).with_context(context)?; - (ServiceName::Cockroach, port) - } - OmicronZoneType::Nexus { internal_address, .. } => { - let port = - parse_port(internal_address).with_context(context)?; - (ServiceName::Nexus, port) - } - OmicronZoneType::Crucible { address, .. } => { - let port = parse_port(address).with_context(context)?; - (ServiceName::Crucible(zone.config.id), port) - } - OmicronZoneType::CruciblePantry { address } => { - let port = parse_port(address).with_context(context)?; - (ServiceName::CruciblePantry, port) - } - OmicronZoneType::Oximeter { address } => { - let port = parse_port(address).with_context(context)?; - (ServiceName::Oximeter, port) - } - OmicronZoneType::ExternalDns { http_address, .. } => { - let port = parse_port(http_address).with_context(context)?; - (ServiceName::ExternalDns, port) - } - OmicronZoneType::InternalDns { http_address, .. } => { - let port = parse_port(http_address).with_context(context)?; - (ServiceName::InternalDns, port) - } + let (service_name, port) = match &zone.zone_type { + BlueprintZoneType::BoundaryNtp( + blueprint_zone_type::BoundaryNtp { address, .. }, + ) => (ServiceName::BoundaryNtp, address.port()), + BlueprintZoneType::InternalNtp( + blueprint_zone_type::InternalNtp { address, .. }, + ) => (ServiceName::InternalNtp, address.port()), + BlueprintZoneType::Clickhouse( + blueprint_zone_type::Clickhouse { address, .. }, + ) => (ServiceName::Clickhouse, address.port()), + BlueprintZoneType::ClickhouseKeeper( + blueprint_zone_type::ClickhouseKeeper { address, .. }, + ) => (ServiceName::ClickhouseKeeper, address.port()), + BlueprintZoneType::CockroachDb( + blueprint_zone_type::CockroachDb { address, .. }, + ) => (ServiceName::Cockroach, address.port()), + BlueprintZoneType::Nexus(blueprint_zone_type::Nexus { + internal_address, + .. + }) => (ServiceName::Nexus, internal_address.port()), + BlueprintZoneType::Crucible(blueprint_zone_type::Crucible { + address, + .. + }) => (ServiceName::Crucible(zone.id), address.port()), + BlueprintZoneType::CruciblePantry( + blueprint_zone_type::CruciblePantry { address }, + ) => (ServiceName::CruciblePantry, address.port()), + BlueprintZoneType::Oximeter(blueprint_zone_type::Oximeter { + address, + }) => (ServiceName::Oximeter, address.port()), + BlueprintZoneType::ExternalDns( + blueprint_zone_type::ExternalDns { http_address, .. }, + ) => (ServiceName::ExternalDns, http_address.port()), + BlueprintZoneType::InternalDns( + blueprint_zone_type::InternalDns { http_address, .. }, + ) => (ServiceName::InternalDns, http_address.port()), }; // This unwrap is safe because this function only fails if we provide // the same zone id twice, which should not be possible here. dns_builder .host_zone_with_one_backend( - zone.config.id, - zone.config.underlay_address, + zone.id, + zone.underlay_address, service_name, port, ) @@ -336,44 +328,43 @@ pub fn blueprint_internal_dns_config( .unwrap(); } - // We set the generation number for the internal DNS to be newer than - // whatever it was when this blueprint was generated. This will only be - // used if the generated DNS contents are different from what's current. - dns_builder.generation(blueprint.internal_dns_version.next()); - Ok(dns_builder.build()) + dns_builder.build_zone() } pub fn blueprint_external_dns_config( blueprint: &Blueprint, - nexus_external_ips: &[IpAddr], - silos: &[Silo], - external_dns_zone_names: &[String], -) -> DnsConfigParams { + silos: &[Name], + external_dns_zone_name: String, +) -> DnsConfigZone { + let nexus_external_ips = blueprint_nexus_external_ips(blueprint); + let dns_records: Vec = nexus_external_ips .into_iter() .map(|addr| match addr { - IpAddr::V4(addr) => DnsRecord::A(*addr), - IpAddr::V6(addr) => DnsRecord::Aaaa(*addr), + IpAddr::V4(addr) => DnsRecord::A(addr), + IpAddr::V6(addr) => DnsRecord::Aaaa(addr), }) .collect(); let records = silos .into_iter() - .map(|silo| (silo_dns_name(&silo.name()), dns_records.clone())) - .collect::>>(); - - let zones = external_dns_zone_names - .into_iter() - .map(|zone_name| DnsConfigZone { - zone_name: zone_name.to_owned(), - records: records.clone(), + // We do not generate a DNS name for the "default" Silo. + // + // We use the name here rather than the id. It shouldn't really matter + // since every system will have this silo and so no other Silo could + // have this name. But callers (particularly the test suite and + // reconfigurator-cli) specify silos by name, not id, so if we used the + // id here then they'd have to apply this filter themselves (and this + // abstraction, such as it is, would be leakier). + .filter_map(|silo_name| { + (silo_name != DEFAULT_SILO.name()) + .then(|| (silo_dns_name(&silo_name), dns_records.clone())) }) - .collect(); + .collect::>>(); - DnsConfigParams { - generation: u64::from(blueprint.external_dns_version.next()), - time_created: chrono::Utc::now(), - zones, + DnsConfigZone { + zone_name: external_dns_zone_name, + records: records.clone(), } } @@ -382,12 +373,12 @@ fn dns_compute_update( dns_group: DnsGroup, comment: String, creator: String, - current_config: &DnsConfigParams, - new_config: &DnsConfigParams, + current_zone: &DnsConfigZone, + new_zone: &DnsConfigZone, ) -> Result, Error> { let mut update = DnsVersionUpdateBuilder::new(dns_group, comment, creator); - let diff = DnsDiff::new(¤t_config, &new_config) + let diff = DnsDiff::new(¤t_zone, &new_zone) .map_err(|e| Error::internal_error(&format!("{:#}", e)))?; if diff.is_empty() { info!(log, "no changes"); @@ -446,6 +437,20 @@ pub fn silo_dns_name(name: &omicron_common::api::external::Name) -> String { format!("{}.sys", name) } +/// Return the Nexus external addresses according to the given blueprint +pub fn blueprint_nexus_external_ips(blueprint: &Blueprint) -> Vec { + blueprint + .all_omicron_zones(BlueprintZoneFilter::ShouldBeExternallyReachable) + .filter_map(|(_, z)| match z.zone_type { + BlueprintZoneType::Nexus(blueprint_zone_type::Nexus { + external_ip, + .. + }) => Some(external_ip.ip), + _ => None, + }) + .collect() +} + #[cfg(test)] mod test { use super::*; @@ -460,25 +465,24 @@ mod test { use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; use nexus_db_queries::db::DataStore; - use nexus_inventory::CollectionBuilder; + use nexus_inventory::now_db_precision; use nexus_reconfigurator_planning::blueprint_builder::BlueprintBuilder; use nexus_reconfigurator_planning::blueprint_builder::EnsureMultiple; use nexus_reconfigurator_planning::example::example; - use nexus_reconfigurator_preparation::policy_from_db; + use nexus_reconfigurator_preparation::PlanningInputFromDb; use nexus_test_utils::resource_helpers::create_silo; use nexus_test_utils_macros::nexus_test; use nexus_types::deployment::Blueprint; use nexus_types::deployment::BlueprintTarget; use nexus_types::deployment::BlueprintZoneConfig; use nexus_types::deployment::BlueprintZoneDisposition; - use nexus_types::deployment::OmicronZoneConfig; - use nexus_types::deployment::OmicronZoneType; - use nexus_types::deployment::Policy; - use nexus_types::deployment::SledResources; - use nexus_types::deployment::ZpoolName; + use nexus_types::deployment::BlueprintZonesConfig; + use nexus_types::deployment::CockroachDbClusterVersion; + use nexus_types::deployment::CockroachDbPreserveDowngrade; + use nexus_types::deployment::CockroachDbSettings; + use nexus_types::deployment::SledFilter; use nexus_types::external_api::params; use nexus_types::external_api::shared; - use nexus_types::external_api::views::SledPolicy; use nexus_types::external_api::views::SledState; use nexus_types::identity::Resource; use nexus_types::internal_api::params::DnsConfigParams; @@ -495,6 +499,8 @@ mod test { use omicron_common::api::external::Generation; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_test_utils::dev::test_setup_log; + use omicron_uuid_kinds::ExternalIpUuid; + use omicron_uuid_kinds::OmicronZoneUuid; use std::collections::BTreeMap; use std::collections::BTreeSet; use std::collections::HashMap; @@ -502,31 +508,12 @@ mod test { use std::net::Ipv4Addr; use std::net::Ipv6Addr; use std::net::SocketAddrV6; - use std::str::FromStr; use std::sync::Arc; use uuid::Uuid; type ControlPlaneTestContext = nexus_test_utils::ControlPlaneTestContext; - fn blueprint_empty() -> Blueprint { - let builder = CollectionBuilder::new("test-suite"); - let collection = builder.build(); - let policy = Policy { - sleds: BTreeMap::new(), - service_ip_pool_ranges: vec![], - target_nexus_zone_count: 3, - }; - BlueprintBuilder::build_initial_from_collection( - &collection, - Generation::new(), - Generation::new(), - &policy, - "test-suite", - ) - .expect("failed to generate empty blueprint") - } - fn dns_config_empty() -> DnsConfigParams { DnsConfigParams { generation: 1, @@ -541,14 +528,16 @@ mod test { /// test blueprint_internal_dns_config(): trivial case of an empty blueprint #[test] fn test_blueprint_internal_dns_empty() { - let blueprint = blueprint_empty(); + let blueprint = BlueprintBuilder::build_empty_with_sleds( + std::iter::empty(), + "test-suite", + ); let blueprint_dns = blueprint_internal_dns_config( &blueprint, &BTreeMap::new(), &Default::default(), - ) - .unwrap(); - assert!(blueprint_dns.sole_zone().unwrap().records.is_empty()); + ); + assert!(blueprint_dns.records.is_empty()); } /// test blueprint_dns_config(): exercise various different conditions @@ -567,78 +556,95 @@ mod test { let rack_subnet = ipnet::Ipv6Net::new(rack_subnet_base, RACK_PREFIX).unwrap(); let possible_sled_subnets = rack_subnet.subnets(SLED_PREFIX).unwrap(); - // Ignore sleds with no associated zones in the inventory. - // This is included in the "representative" collection, but it's - // not allowed by BlueprintBuilder::build_initial_from_collection(). - let policy_sleds = collection - .omicron_zones - .keys() - .zip(possible_sled_subnets) - .map(|(sled_id, subnet)| { - let sled_resources = SledResources { - policy: SledPolicy::provisionable(), - state: SledState::Active, - zpools: BTreeSet::from([ZpoolName::from_str(&format!( - "oxp_{}", - Uuid::new_v4() - )) - .unwrap()]), - subnet: Ipv6Subnet::new(subnet.network()), - }; - (*sled_id, sled_resources) - }) - .collect(); - let policy = Policy { - sleds: policy_sleds, - service_ip_pool_ranges: vec![], - target_nexus_zone_count: 3, - }; + // Convert the inventory `OmicronZonesConfig`s into + // `BlueprintZonesConfig`. This is going to get more painful over time + // as we add to blueprints, but for now we can make this work. + let mut blueprint_zones = BTreeMap::new(); + + // Also assume any sled in the collection is active. + let mut sled_state = BTreeMap::new(); + + for (sled_id, zones_config) in collection.omicron_zones { + blueprint_zones.insert( + sled_id, + BlueprintZonesConfig { + generation: zones_config.zones.generation, + zones: zones_config + .zones + .zones + .into_iter() + .map(|config| -> BlueprintZoneConfig { + BlueprintZoneConfig::from_omicron_zone_config( + config, + BlueprintZoneDisposition::InService, + // We don't get external IP IDs in inventory + // collections. We'll just make one up for every + // zone that needs one here. This is gross. + Some(ExternalIpUuid::new_v4()), + ) + .expect("failed to convert zone config") + }) + .collect(), + }, + ); + sled_state.insert(sled_id, SledState::Active); + } + let dns_empty = dns_config_empty(); let initial_dns_generation = Generation::from(u32::try_from(dns_empty.generation).unwrap()); - let mut blueprint = BlueprintBuilder::build_initial_from_collection( - &collection, - initial_dns_generation, - Generation::new(), - &policy, - "test-suite", - ) - .expect("failed to build initial blueprint"); + let mut blueprint = Blueprint { + id: Uuid::new_v4(), + blueprint_zones, + blueprint_disks: BTreeMap::new(), + sled_state, + cockroachdb_setting_preserve_downgrade: + CockroachDbPreserveDowngrade::DoNotModify, + parent_blueprint_id: None, + internal_dns_version: initial_dns_generation, + external_dns_version: Generation::new(), + cockroachdb_fingerprint: String::new(), + time_created: now_db_precision(), + creator: "test-suite".to_string(), + comment: "test blueprint".to_string(), + }; // To make things slightly more interesting, let's add a zone that's // not currently in service. - let out_of_service_id = Uuid::new_v4(); + let out_of_service_id = OmicronZoneUuid::new_v4(); let out_of_service_addr = Ipv6Addr::LOCALHOST; blueprint.blueprint_zones.values_mut().next().unwrap().zones.push( BlueprintZoneConfig { - config: OmicronZoneConfig { - id: out_of_service_id, - underlay_address: out_of_service_addr, - zone_type: OmicronZoneType::Oximeter { + disposition: BlueprintZoneDisposition::Quiesced, + id: out_of_service_id, + underlay_address: out_of_service_addr, + zone_type: BlueprintZoneType::Oximeter( + blueprint_zone_type::Oximeter { address: SocketAddrV6::new( out_of_service_addr, 12345, 0, 0, - ) - .to_string(), + ), }, - }, - disposition: BlueprintZoneDisposition::Quiesced, + ), }, ); // To generate the blueprint's DNS config, we need to make up a // different set of information about the Quiesced fake system. - let sleds_by_id = policy - .sleds - .iter() + let sleds_by_id = blueprint + .blueprint_zones + .keys() + .zip(possible_sled_subnets) .enumerate() - .map(|(i, (sled_id, sled_resources))| { + .map(|(i, (sled_id, subnet))| { let sled_info = Sled { id: *sled_id, - sled_agent_address: get_sled_address(sled_resources.subnet), + sled_agent_address: get_sled_address(Ipv6Subnet::new( + subnet.network(), + )), // The first two of these (arbitrarily) will be marked // Scrimlets. is_scrimlet: i < 2, @@ -647,17 +653,11 @@ mod test { }) .collect(); - let dns_config_blueprint = blueprint_internal_dns_config( + let blueprint_dns_zone = blueprint_internal_dns_config( &blueprint, &sleds_by_id, &Default::default(), - ) - .unwrap(); - assert_eq!( - dns_config_blueprint.generation, - u64::from(initial_dns_generation.next()) ); - let blueprint_dns_zone = dns_config_blueprint.sole_zone().unwrap(); assert_eq!(blueprint_dns_zone.zone_name, DNS_ZONE); // Now, verify a few different properties about the generated DNS @@ -685,19 +685,24 @@ mod test { // To start, we need a mapping from underlay IP to the corresponding // Omicron zone. let mut omicron_zones_by_ip: BTreeMap<_, _> = blueprint - .all_omicron_zones() - .filter(|(_, zone)| zone.id != out_of_service_id) + .all_omicron_zones(BlueprintZoneFilter::ShouldBeInInternalDns) .map(|(_, zone)| (zone.underlay_address, zone.id)) .collect(); println!("omicron zones by IP: {:#?}", omicron_zones_by_ip); + // Check to see that the quiesced zone was actually excluded + assert!(omicron_zones_by_ip + .values() + .all(|id| *id != out_of_service_id)); + // We also want a mapping from underlay IP to the corresponding switch // zone. In this case, the value is the Scrimlet's sled id. let mut switch_sleds_by_ip: BTreeMap<_, _> = sleds_by_id .iter() .filter_map(|(sled_id, sled)| { if sled.is_scrimlet { - let sled_subnet = policy.sleds.get(sled_id).unwrap().subnet; + let sled_subnet = + sleds_by_id.get(sled_id).unwrap().subnet(); let switch_zone_ip = get_switch_zone_address(sled_subnet); Some((switch_zone_ip, *sled_id)) } else { @@ -833,16 +838,9 @@ mod test { async fn test_blueprint_external_dns_basic() { static TEST_NAME: &str = "test_blueprint_external_dns_basic"; let logctx = test_setup_log(TEST_NAME); - let (collection, policy) = example(&logctx.log, TEST_NAME, 5); - let initial_external_dns_generation = Generation::new(); - let blueprint = BlueprintBuilder::build_initial_from_collection( - &collection, - Generation::new(), - initial_external_dns_generation, - &policy, - "test suite", - ) - .expect("failed to generate initial blueprint"); + let (_, _, mut blueprint) = example(&logctx.log, TEST_NAME, 5); + blueprint.internal_dns_version = Generation::new(); + blueprint.external_dns_version = Generation::new(); let my_silo = Silo::new(params::SiloCreate { identity: IdentityMetadataCreateParams { @@ -858,101 +856,47 @@ mod test { }) .unwrap(); - let nexus_external_ips: Vec<_> = blueprint - .all_omicron_zones() - .filter_map(|(_, z)| match &z.zone_type { - OmicronZoneType::Nexus { external_ip, .. } => { - Some(*external_ip) - } - _ => None, - }) - .collect(); - // It shouldn't ever be possible to have no Silos at all, but at least // make sure we don't panic. - let external_dns_config = blueprint_external_dns_config( + let external_dns_zone = blueprint_external_dns_config( &blueprint, - &nexus_external_ips, &[], - &[String::from("oxide.test")], - ); - assert_eq!( - external_dns_config.generation, - u64::from(initial_external_dns_generation.next()) + String::from("oxide.test"), ); - assert_eq!(external_dns_config.zones.len(), 1); - assert_eq!(external_dns_config.zones[0].zone_name, "oxide.test"); - assert!(external_dns_config.zones[0].records.is_empty()); + assert_eq!(external_dns_zone.zone_name, "oxide.test"); + assert!(external_dns_zone.records.is_empty()); - // Same with external DNS zones. - let external_dns_config = blueprint_external_dns_config( + // Now check a more typical case. + let external_dns_zone = blueprint_external_dns_config( &blueprint, - &nexus_external_ips, - std::slice::from_ref(&my_silo), - &[], - ); - assert_eq!( - external_dns_config.generation, - u64::from(initial_external_dns_generation.next()) - ); - assert!(external_dns_config.zones.is_empty()); - - // Same with external IPs. - let external_dns_config = blueprint_external_dns_config( - &blueprint, - &[], - std::slice::from_ref(&my_silo), - &[String::from("oxide.test")], - ); - assert_eq!( - external_dns_config.generation, - u64::from(initial_external_dns_generation.next()) - ); - - // Now check a more typical case. (Although we wouldn't normally have - // more than one external DNS zone, it's a more general case and pretty - // easy to test.) - let external_dns_config = blueprint_external_dns_config( - &blueprint, - &nexus_external_ips, - std::slice::from_ref(&my_silo), - &[String::from("oxide1.test"), String::from("oxide2.test")], - ); - assert_eq!( - external_dns_config.generation, - u64::from(initial_external_dns_generation.next()) + std::slice::from_ref(my_silo.name()), + String::from("oxide.test"), ); - assert_eq!(external_dns_config.zones.len(), 2); - assert_eq!( - external_dns_config.zones[0].records, - external_dns_config.zones[1].records - ); - assert_eq!( - external_dns_config.zones[0].zone_name, - String::from("oxide1.test"), - ); - assert_eq!( - external_dns_config.zones[1].zone_name, - String::from("oxide2.test"), - ); - let records = &external_dns_config.zones[0].records; + assert_eq!(external_dns_zone.zone_name, String::from("oxide.test")); + let records = &external_dns_zone.records; assert_eq!(records.len(), 1); let silo_records = records .get(&silo_dns_name(my_silo.name())) .expect("missing silo DNS records"); + // Helper for converting dns records for a given silo to IpAddrs + let records_to_ips = |silo_records: &Vec<_>| { + let mut ips: Vec<_> = silo_records + .into_iter() + .map(|record| match record { + DnsRecord::A(v) => IpAddr::V4(*v), + DnsRecord::Aaaa(v) => IpAddr::V6(*v), + DnsRecord::Srv(_) => panic!("unexpected SRV record"), + }) + .collect(); + ips.sort(); + ips + }; + // Here we're hardcoding the contents of the example blueprint. It // currently puts one Nexus zone on each sled. If we change the example // blueprint, change the expected set of IPs here. - let mut silo_record_ips: Vec<_> = silo_records - .into_iter() - .map(|record| match record { - DnsRecord::A(v) => IpAddr::V4(*v), - DnsRecord::Aaaa(v) => IpAddr::V6(*v), - DnsRecord::Srv(_) => panic!("unexpected SRV record"), - }) - .collect(); - silo_record_ips.sort(); + let silo_record_ips: Vec<_> = records_to_ips(silo_records); assert_eq!( silo_record_ips, &[ @@ -963,6 +907,42 @@ mod test { "192.0.2.6".parse::().unwrap(), ] ); + + // Change the zone disposition to quiesced for the nexus zone on the + // first sled. This should ensure we don't get an external DNS record + // back for that sled. + let (_, bp_zones_config) = + blueprint.blueprint_zones.iter_mut().next().unwrap(); + let nexus_zone = bp_zones_config + .zones + .iter_mut() + .find(|z| z.zone_type.is_nexus()) + .unwrap(); + nexus_zone.disposition = BlueprintZoneDisposition::Quiesced; + + // Retrieve the DNS config based on the modified blueprint + let external_dns_zone = blueprint_external_dns_config( + &blueprint, + std::slice::from_ref(my_silo.name()), + String::from("oxide.test"), + ); + let silo_records = &external_dns_zone + .records + .get(&silo_dns_name(my_silo.name())) + .expect("missing silo DNS records"); + let silo_record_ips: Vec<_> = records_to_ips(silo_records); + + // We shouldn't see the excluded Nexus address + assert_eq!( + silo_record_ips, + &[ + "192.0.2.3".parse::().unwrap(), + "192.0.2.4".parse::().unwrap(), + "192.0.2.5".parse::().unwrap(), + "192.0.2.6".parse::().unwrap(), + ] + ); + logctx.cleanup_successful(); } @@ -972,14 +952,14 @@ mod test { // Start with an empty DNS config. There's no database update needed // when updating the DNS config to itself. - let dns_empty = dns_config_empty(); + let dns_empty = &dns_config_empty().zones[0]; match dns_compute_update( &logctx.log, DnsGroup::Internal, "test-suite".to_string(), "test-suite".to_string(), - &dns_empty, - &dns_empty, + dns_empty, + dns_empty, ) { Ok(None) => (), Err(error) => { @@ -991,40 +971,26 @@ mod test { // Now let's do something a little less trivial. Set up two slightly // different DNS configurations, compute the database update, and make // sure it matches what we expect. - let dns_config1 = DnsConfigParams { - generation: 4, - time_created: chrono::Utc::now(), - zones: vec![DnsConfigZone { - zone_name: "my-zone".to_string(), - records: HashMap::from([ - ( - "ex1".to_string(), - vec![DnsRecord::A(Ipv4Addr::LOCALHOST)], - ), - ( - "ex2".to_string(), - vec![DnsRecord::A("192.168.1.3".parse().unwrap())], - ), - ]), - }], + let dns_zone1 = DnsConfigZone { + zone_name: "my-zone".to_string(), + records: HashMap::from([ + ("ex1".to_string(), vec![DnsRecord::A(Ipv4Addr::LOCALHOST)]), + ( + "ex2".to_string(), + vec![DnsRecord::A("192.168.1.3".parse().unwrap())], + ), + ]), }; - let dns_config2 = DnsConfigParams { - generation: 4, - time_created: chrono::Utc::now(), - zones: vec![DnsConfigZone { - zone_name: "my-zone".to_string(), - records: HashMap::from([ - ( - "ex2".to_string(), - vec![DnsRecord::A("192.168.1.4".parse().unwrap())], - ), - ( - "ex3".to_string(), - vec![DnsRecord::A(Ipv4Addr::LOCALHOST)], - ), - ]), - }], + let dns_zone2 = DnsConfigZone { + zone_name: "my-zone".to_string(), + records: HashMap::from([ + ( + "ex2".to_string(), + vec![DnsRecord::A("192.168.1.4".parse().unwrap())], + ), + ("ex3".to_string(), vec![DnsRecord::A(Ipv4Addr::LOCALHOST)]), + ]), }; let update = dns_compute_update( @@ -1032,8 +998,8 @@ mod test { DnsGroup::Internal, "test-suite".to_string(), "test-suite".to_string(), - &dns_config1, - &dns_config2, + &dns_zone1, + &dns_zone2, ) .expect("failed to compute update") .expect("unexpectedly produced no update"); @@ -1056,8 +1022,8 @@ mod test { ); // Test the difference between two configs whose SRV records differ. - let mut dns_config1 = dns_config1.clone(); - dns_config1.zones[0].records.insert( + let mut dns_zone1 = dns_zone1.clone(); + dns_zone1.records.insert( String::from("_nexus._tcp"), vec![ DnsRecord::Srv(Srv { @@ -1075,40 +1041,36 @@ mod test { ], ); // A clone of the same one should of course be the same as the original. - let mut dns_config2 = dns_config1.clone(); + let mut dns_zone2 = dns_zone1.clone(); let update = dns_compute_update( &logctx.log, DnsGroup::Internal, "test-suite".to_string(), "test-suite".to_string(), - &dns_config1, - &dns_config2, + &dns_zone1, + &dns_zone2, ) .expect("failed to compute update"); assert!(update.is_none()); // If we shift the order of the items, it should still reflect no // changes. - let records = - dns_config2.zones[0].records.get_mut("_nexus._tcp").unwrap(); + let records = dns_zone2.records.get_mut("_nexus._tcp").unwrap(); records.rotate_left(1); - assert!( - records != dns_config1.zones[0].records.get("_nexus._tcp").unwrap() - ); + assert!(records != dns_zone1.records.get("_nexus._tcp").unwrap()); let update = dns_compute_update( &logctx.log, DnsGroup::Internal, "test-suite".to_string(), "test-suite".to_string(), - &dns_config1, - &dns_config2, + &dns_zone1, + &dns_zone2, ) .expect("failed to compute update"); assert!(update.is_none()); // If we add another record, there should indeed be a new update. - let records = - dns_config2.zones[0].records.get_mut("_nexus._tcp").unwrap(); + let records = dns_zone2.records.get_mut("_nexus._tcp").unwrap(); records.push(DnsRecord::Srv(Srv { port: 123, prio: 1, @@ -1122,8 +1084,8 @@ mod test { DnsGroup::Internal, "test-suite".to_string(), "test-suite".to_string(), - &dns_config1, - &dns_config2, + &dns_zone1, + &dns_zone2, ) .expect("failed to compute update") .expect("expected an update"); @@ -1140,6 +1102,15 @@ mod test { logctx.cleanup_successful(); } + fn diff_sole_zones<'a>( + left: &'a DnsConfigParams, + right: &'a DnsConfigParams, + ) -> DnsDiff<'a> { + let left_zone = left.sole_zone().unwrap(); + let right_zone = right.sole_zone().unwrap(); + DnsDiff::new(left_zone, right_zone).unwrap() + } + // Tests end-to-end DNS behavior: // // - If we create a blueprint matching the current system, and then apply @@ -1161,7 +1132,7 @@ mod test { async fn test_silos_external_dns_end_to_end( cptestctx: &ControlPlaneTestContext, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let log = &cptestctx.logctx.log; let opctx = OpContext::for_background( @@ -1182,12 +1153,14 @@ mod test { .expect("fetching initial external DNS"); // Fetch the initial blueprint installed during rack initialization. - let (_blueprint_target, blueprint) = datastore + let (_blueprint_target, mut blueprint) = datastore .blueprint_target_get_current_full(&opctx) .await - .expect("failed to read current target blueprint") - .expect("no target blueprint set"); - eprintln!("blueprint: {:?}", blueprint); + .expect("failed to read current target blueprint"); + eprintln!("blueprint: {}", blueprint.display()); + // Override the CockroachDB settings so that we don't try to set them. + blueprint.cockroachdb_setting_preserve_downgrade = + CockroachDbPreserveDowngrade::DoNotModify; // Now, execute the initial blueprint. let overrides = Overridables::for_test(cptestctx); @@ -1229,7 +1202,10 @@ mod test { // Now, go through the motions of provisioning a new Nexus zone. // We do this directly with BlueprintBuilder to avoid the planner // deciding to make other unrelated changes. - let sled_rows = datastore.sled_list_all_batched(&opctx).await.unwrap(); + let sled_rows = datastore + .sled_list_all_batched(&opctx, SledFilter::Commissioned) + .await + .unwrap(); let zpool_rows = datastore.zpool_list_all_external_batched(&opctx).await.unwrap(); let ip_pool_range_rows = { @@ -1240,29 +1216,45 @@ mod test { .await .unwrap() }; - let mut policy = policy_from_db( - &sled_rows, - &zpool_rows, - &ip_pool_range_rows, - // This is not used because we're not actually going through the - // planner. - NEXUS_REDUNDANCY, - ) - .unwrap(); - // We'll need another (fake) external IP for this new Nexus. - policy - .service_ip_pool_ranges - .push(IpRange::from(IpAddr::V4(Ipv4Addr::LOCALHOST))); + let planning_input = { + let mut builder = PlanningInputFromDb { + sled_rows: &sled_rows, + zpool_rows: &zpool_rows, + ip_pool_range_rows: &ip_pool_range_rows, + internal_dns_version: Generation::from( + u32::try_from(dns_initial_internal.generation).unwrap(), + ) + .into(), + external_dns_version: Generation::from( + u32::try_from(dns_latest_external.generation).unwrap(), + ) + .into(), + // These are not used because we're not actually going through + // the planner. + cockroachdb_settings: &CockroachDbSettings::empty(), + external_ip_rows: &[], + service_nic_rows: &[], + target_nexus_zone_count: NEXUS_REDUNDANCY, + target_cockroachdb_cluster_version: + CockroachDbClusterVersion::POLICY, + log, + } + .build() + .unwrap() + .into_builder(); + + // We'll need another (fake) external IP for this new Nexus. + builder + .policy_mut() + .service_ip_pool_ranges + .push(IpRange::from(IpAddr::V4(Ipv4Addr::LOCALHOST))); + + builder.build() + }; let mut builder = BlueprintBuilder::new_based_on( &log, &blueprint, - Generation::from( - u32::try_from(dns_initial_internal.generation).unwrap(), - ), - Generation::from( - u32::try_from(dns_latest_external.generation).unwrap(), - ), - &policy, + &planning_input, "test suite", ) .unwrap(); @@ -1274,14 +1266,14 @@ mod test { .unwrap(); assert_eq!(rv, EnsureMultiple::Added(1)); let blueprint2 = builder.build(); - eprintln!("blueprint2: {:?}", blueprint2); + eprintln!("blueprint2: {}", blueprint2.display()); // Figure out the id of the new zone. let zones_before = blueprint - .all_omicron_zones() + .all_omicron_zones(BlueprintZoneFilter::All) .filter_map(|(_, z)| z.zone_type.is_nexus().then_some(z.id)) .collect::>(); let zones_after = blueprint2 - .all_omicron_zones() + .all_omicron_zones(BlueprintZoneFilter::All) .filter_map(|(_, z)| z.zone_type.is_nexus().then_some(z.id)) .collect::>(); let new_zones: Vec<_> = zones_after.difference(&zones_before).collect(); @@ -1328,16 +1320,14 @@ mod test { dns_initial_internal.generation + 1, ); - let diff = - DnsDiff::new(&dns_initial_internal, &dns_latest_internal).unwrap(); + let diff = diff_sole_zones(&dns_initial_internal, &dns_latest_internal); // There should be one new AAAA record for the zone itself. let new_records: Vec<_> = diff.names_added().collect(); let (new_name, &[DnsRecord::Aaaa(_)]) = new_records[0] else { panic!("did not find expected AAAA record for new Nexus zone"); }; let new_zone_host = internal_dns::config::Host::for_zone( - new_zone_id, - internal_dns::config::ZoneVariant::Other, + internal_dns::config::Zone::Other(new_zone_id), ); assert!(new_zone_host.fqdn().starts_with(new_name)); @@ -1368,7 +1358,7 @@ mod test { dns_previous_external.generation + 1, ); let diff = - DnsDiff::new(&dns_previous_external, &dns_latest_external).unwrap(); + diff_sole_zones(&dns_previous_external, &dns_latest_external); assert!(diff.names_added().next().is_none()); assert!(diff.names_removed().next().is_none()); let changed: Vec<_> = diff.names_changed().collect(); @@ -1494,7 +1484,7 @@ mod test { assert_eq!(old_external.generation + 1, dns_latest_external.generation); // Specifically, there should be one new name (for the new Silo). - let diff = DnsDiff::new(&old_external, &dns_latest_external).unwrap(); + let diff = diff_sole_zones(&old_external, &dns_latest_external); assert!(diff.names_removed().next().is_none()); assert!(diff.names_changed().next().is_none()); let added = diff.names_added().collect::>(); diff --git a/nexus/reconfigurator/execution/src/external_networking.rs b/nexus/reconfigurator/execution/src/external_networking.rs new file mode 100644 index 0000000000..13cf601135 --- /dev/null +++ b/nexus/reconfigurator/execution/src/external_networking.rs @@ -0,0 +1,1199 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Manages allocation and deallocation of external networking resources +//! required for blueprint realization + +use anyhow::bail; +use anyhow::Context; +use nexus_db_model::IncompleteNetworkInterface; +use nexus_db_queries::context::OpContext; +use nexus_db_queries::db::fixed_data::vpc_subnet::DNS_VPC_SUBNET; +use nexus_db_queries::db::fixed_data::vpc_subnet::NEXUS_VPC_SUBNET; +use nexus_db_queries::db::fixed_data::vpc_subnet::NTP_VPC_SUBNET; +use nexus_db_queries::db::DataStore; +use nexus_types::deployment::BlueprintZoneConfig; +use nexus_types::deployment::OmicronZoneExternalIp; +use omicron_common::api::external::IdentityMetadataCreateParams; +use omicron_common::api::internal::shared::NetworkInterface; +use omicron_common::api::internal::shared::NetworkInterfaceKind; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::OmicronZoneUuid; +use sled_agent_client::ZoneKind; +use slog::debug; +use slog::error; +use slog::info; +use slog::warn; +use slog::Logger; +use slog_error_chain::InlineErrorChain; + +pub(crate) async fn ensure_zone_external_networking_allocated( + opctx: &OpContext, + datastore: &DataStore, + zones_to_allocate: impl Iterator, +) -> anyhow::Result<()> { + for z in zones_to_allocate { + let Some((external_ip, nic)) = z.zone_type.external_networking() else { + continue; + }; + + let log = opctx.log.new(slog::o!( + "action" => "allocate-external-networking", + "zone_kind" => z.zone_type.kind().to_string(), + "zone_id" => z.id.to_string(), + "ip" => format!("{external_ip:?}"), + "nic" => format!("{nic:?}"), + )); + + let kind = z.zone_type.kind(); + ensure_external_service_ip( + opctx, + datastore, + kind, + z.id, + external_ip, + &log, + ) + .await?; + ensure_service_nic(opctx, datastore, kind, z.id, nic, &log).await?; + } + + Ok(()) +} + +pub(crate) async fn ensure_zone_external_networking_deallocated( + opctx: &OpContext, + datastore: &DataStore, + zones_to_deallocate: impl Iterator, +) -> anyhow::Result<()> { + for z in zones_to_deallocate { + let Some((external_ip, nic)) = z.zone_type.external_networking() else { + continue; + }; + + let kind = z.zone_type.kind(); + let log = opctx.log.new(slog::o!( + "action" => "deallocate-external-networking", + "zone_kind" => z.zone_type.kind().to_string(), + "zone_id" => z.id.to_string(), + "ip" => format!("{external_ip:?}"), + "nic" => format!("{nic:?}"), + )); + + let deleted_ip = datastore + .deallocate_external_ip(opctx, external_ip.id().into_untyped_uuid()) + .await + .with_context(|| { + format!( + "failed to delete external IP {external_ip:?} \ + for {kind} zone {}", + z.id + ) + })?; + if deleted_ip { + info!(log, "successfully deleted Omicron zone external IP"); + } else { + debug!(log, "Omicron zone external IP already deleted"); + } + + let deleted_nic = datastore + .service_delete_network_interface( + opctx, + z.id.into_untyped_uuid(), + nic.id, + ) + .await + .with_context(|| { + format!( + "failed to delete service VNIC {nic:?} for {kind} zone {}", + z.id + ) + })?; + if deleted_nic { + info!(log, "successfully deleted Omicron zone vNIC"); + } else { + debug!(log, "Omicron zone vNIC already deleted"); + } + } + + Ok(()) +} + +// Helper function to determine whether a given external IP address is +// already allocated to a specific service zone. +async fn is_external_ip_already_allocated( + opctx: &OpContext, + datastore: &DataStore, + zone_kind: ZoneKind, + zone_id: OmicronZoneUuid, + external_ip: OmicronZoneExternalIp, + log: &Logger, +) -> anyhow::Result { + // localhost is used by many components in the test suite. We can't use + // the normal path because normally a given external IP must only be + // used once. Just treat localhost in the test suite as though it's + // already allocated. We do the same in is_nic_already_allocated(). + if cfg!(test) && external_ip.ip().is_loopback() { + return Ok(true); + } + + let allocated_ips = datastore + .external_ip_list_service(opctx, zone_id.into_untyped_uuid()) + .await + .with_context(|| { + format!("failed to look up external IPs for {zone_kind} {zone_id}") + })?; + + // We expect to find either 0 or exactly 1 IP for any given zone. If 0, + // we know the IP isn't allocated; if 1, we'll check that it matches + // below. + let existing_ip = match allocated_ips.as_slice() { + [] => { + info!(log, "external IP allocation required for zone"); + + return Ok(false); + } + [ip] => ip, + _ => { + warn!( + log, "zone has multiple IPs allocated"; + "allocated_ips" => ?allocated_ips, + ); + bail!( + "zone {zone_id} already has {} IPs allocated (expected 1)", + allocated_ips.len() + ); + } + }; + + // We expect this to always succeed; a failure here means we've stored + // an Omicron zone IP in the database that can't be converted back to an + // Omicron zone IP! + let existing_ip = match OmicronZoneExternalIp::try_from(existing_ip) { + Ok(existing_ip) => existing_ip, + Err(err) => { + error!(log, "invalid IP in database for zone"; &err); + bail!( + "zone {zone_id} has invalid IP database record: {}", + InlineErrorChain::new(&err) + ); + } + }; + + if existing_ip == external_ip { + info!(log, "found already-allocated external IP"); + Ok(true) + } else { + warn!( + log, "zone has unexpected IP allocated"; + "allocated_ip" => ?existing_ip, + ); + bail!("zone {zone_id} has a different IP allocated ({existing_ip:?})",); + } +} + +// Helper function to determine whether a given NIC is already allocated to +// a specific service zone. +async fn is_nic_already_allocated( + opctx: &OpContext, + datastore: &DataStore, + zone_kind: ZoneKind, + zone_id: OmicronZoneUuid, + nic: &NetworkInterface, + log: &Logger, +) -> anyhow::Result { + // See the comment in is_external_ip_already_allocated(). + if cfg!(test) && nic.ip.is_loopback() { + return Ok(true); + } + + let allocated_nics = datastore + .service_list_network_interfaces(opctx, zone_id.into_untyped_uuid()) + .await + .with_context(|| { + format!("failed to look up NICs for {zone_kind} {zone_id}") + })?; + + if !allocated_nics.is_empty() { + // All the service zones that want NICs only expect to have a single + // one. Bail out here if this zone already has one or more allocated + // NICs but not the one we think it needs. + // + // This doesn't check the allocated NIC's subnet against our NICs, + // because that would require an extra DB lookup. We'll assume if + // these main properties are correct, the subnet is too. + for allocated_nic in &allocated_nics { + if allocated_nic.ip.ip() == nic.ip + && *allocated_nic.mac == nic.mac + && *allocated_nic.slot == nic.slot + && allocated_nic.primary == nic.primary + { + info!(log, "found already-allocated NIC"); + return Ok(true); + } + } + + warn!( + log, "zone has unexpected NICs allocated"; + "allocated_nics" => ?allocated_nics, + ); + + bail!( + "zone {zone_id} already has {} non-matching NIC(s) allocated", + allocated_nics.len() + ); + } + + info!(log, "NIC allocation required for zone"); + + Ok(false) +} + +async fn ensure_external_service_ip( + opctx: &OpContext, + datastore: &DataStore, + zone_kind: ZoneKind, + zone_id: OmicronZoneUuid, + external_ip: OmicronZoneExternalIp, + log: &Logger, +) -> anyhow::Result<()> { + // Only attempt to allocate `external_ip` if it isn't already assigned + // to this zone. + // + // Checking for the existing of the external IP and then creating it + // if not found inserts a classic TOCTOU race: what if another Nexus + // is running concurrently, we both check and see that the IP is not + // allocated, then both attempt to create it? We believe this is + // okay: the loser of the race (i.e., the one whose create tries to + // commit second) will fail to allocate the IP, which will bubble + // out and prevent realization of the current blueprint. That's + // exactly what we want if two Nexuses try to realize the same + // blueprint at the same time. + if is_external_ip_already_allocated( + opctx, + datastore, + zone_kind, + zone_id, + external_ip, + log, + ) + .await? + { + return Ok(()); + } + datastore + .external_ip_allocate_omicron_zone( + opctx, + zone_id, + zone_kind, + external_ip, + ) + .await + .with_context(|| { + format!( + "failed to allocate IP to {zone_kind} {zone_id}: \ + {external_ip:?}" + ) + })?; + + info!(log, "successfully allocated external IP"); + + Ok(()) +} + +// All service zones with external connectivity get service vNICs. +async fn ensure_service_nic( + opctx: &OpContext, + datastore: &DataStore, + zone_kind: ZoneKind, + service_id: OmicronZoneUuid, + nic: &NetworkInterface, + log: &Logger, +) -> anyhow::Result<()> { + // We don't pass `nic.kind` into the database below, but instead + // explicitly call `service_create_network_interface`. Ensure this is + // indeed a service NIC. + match &nic.kind { + NetworkInterfaceKind::Instance { .. } => { + bail!("invalid NIC kind (expected service, got instance)") + } + NetworkInterfaceKind::Probe { .. } => { + bail!("invalid NIC kind (expected service, got probe)") + } + NetworkInterfaceKind::Service { .. } => (), + } + + let nic_subnet = match zone_kind { + ZoneKind::BoundaryNtp => &*NTP_VPC_SUBNET, + ZoneKind::ExternalDns => &*DNS_VPC_SUBNET, + ZoneKind::Nexus => &*NEXUS_VPC_SUBNET, + ZoneKind::Clickhouse + | ZoneKind::ClickhouseKeeper + | ZoneKind::CockroachDb + | ZoneKind::Crucible + | ZoneKind::CruciblePantry + | ZoneKind::InternalDns + | ZoneKind::InternalNtp + | ZoneKind::Oximeter => { + bail!("no VPC subnet available for {zone_kind} zone") + } + }; + + // Only attempt to allocate `nic` if it isn't already assigned to this + // zone. + // + // This is subject to the same kind of TOCTOU race as described for IP + // allocation in `ensure_external_service_ip`, and we believe it's okay + // for the same reasons as described there. + if is_nic_already_allocated( + opctx, datastore, zone_kind, service_id, nic, log, + ) + .await? + { + return Ok(()); + } + let nic_arg = IncompleteNetworkInterface::new_service( + nic.id, + service_id.into_untyped_uuid(), + nic_subnet.clone(), + IdentityMetadataCreateParams { + name: nic.name.clone(), + description: format!("{zone_kind} service vNIC"), + }, + nic.ip, + nic.mac, + nic.slot, + ) + .with_context(|| { + format!( + "failed to convert NIC into IncompleteNetworkInterface: {nic:?}" + ) + })?; + let created_nic = datastore + .service_create_network_interface(opctx, nic_arg) + .await + .map_err(|err| err.into_external()) + .with_context(|| { + format!( + "failed to allocate NIC to {zone_kind} {service_id}: \ + {nic:?}" + ) + })?; + + // We don't pass all the properties of `nic` into the create request + // above. Double-check that the properties the DB assigned match + // what we expect. + // + // We do not check `nic.vni`, because it's not stored in the + // database. (All services are given the constant vni + // `Vni::SERVICES_VNI`.) + if created_nic.primary != nic.primary || *created_nic.slot != nic.slot { + warn!( + log, "unexpected property on allocated NIC"; + "allocated_primary" => created_nic.primary, + "allocated_slot" => *created_nic.slot, + ); + + // Now what? We've allocated a NIC in the database but it's + // incorrect. Should we try to delete it? That would be best + // effort (we could fail to delete, or we could crash between + // creation and deletion). + // + // We only expect services to have one NIC, so the only way it + // should be possible to get a different primary/slot value is + // if somehow this same service got a _different_ NIC allocated + // to it in the TOCTOU race window above. That should be + // impossible with the way we generate blueprints, so we'll just + // return a scary error here and expect to never see it. + bail!( + "database cleanup required: unexpected NIC ({created_nic:?}) \ + allocated for {zone_kind} {service_id}" + ); + } + + info!(log, "successfully allocated service vNIC"); + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + use async_bb8_diesel::AsyncSimpleConnection; + use chrono::DateTime; + use chrono::Utc; + use nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; + use nexus_db_model::SqlU16; + use nexus_db_queries::db::queries::ALLOW_FULL_TABLE_SCAN_SQL; + use nexus_test_utils_macros::nexus_test; + use nexus_types::deployment::blueprint_zone_type; + use nexus_types::deployment::BlueprintZoneConfig; + use nexus_types::deployment::BlueprintZoneDisposition; + use nexus_types::deployment::BlueprintZoneType; + use nexus_types::deployment::OmicronZoneDataset; + use nexus_types::deployment::OmicronZoneExternalFloatingAddr; + use nexus_types::deployment::OmicronZoneExternalFloatingIp; + use nexus_types::deployment::OmicronZoneExternalSnatIp; + use nexus_types::identity::Resource; + use nexus_types::inventory::SourceNatConfig; + use omicron_common::address::IpRange; + use omicron_common::address::IpRangeIter; + use omicron_common::address::DNS_OPTE_IPV4_SUBNET; + use omicron_common::address::NEXUS_OPTE_IPV4_SUBNET; + use omicron_common::address::NTP_OPTE_IPV4_SUBNET; + use omicron_common::address::NUM_SOURCE_NAT_PORTS; + use omicron_common::api::external::MacAddr; + use omicron_common::api::external::Vni; + use omicron_uuid_kinds::ExternalIpUuid; + use oxnet::IpNet; + use std::net::IpAddr; + use std::net::Ipv6Addr; + use std::net::SocketAddr; + use uuid::Uuid; + + type ControlPlaneTestContext = + nexus_test_utils::ControlPlaneTestContext; + + struct Harness { + external_ips_range: IpRange, + external_ips: IpRangeIter, + + nexus_id: OmicronZoneUuid, + nexus_external_ip: OmicronZoneExternalFloatingIp, + nexus_nic: NetworkInterface, + dns_id: OmicronZoneUuid, + dns_external_addr: OmicronZoneExternalFloatingAddr, + dns_nic: NetworkInterface, + ntp_id: OmicronZoneUuid, + ntp_external_ip: OmicronZoneExternalSnatIp, + ntp_nic: NetworkInterface, + } + + impl Harness { + fn new() -> Self { + let external_ips_range = IpRange::try_from(( + "192.0.2.1".parse::().unwrap(), + "192.0.2.100".parse::().unwrap(), + )) + .expect("bad IP range"); + let mut external_ips = external_ips_range.iter(); + + let nexus_id = OmicronZoneUuid::new_v4(); + let nexus_external_ip = OmicronZoneExternalFloatingIp { + id: ExternalIpUuid::new_v4(), + ip: external_ips.next().expect("exhausted external_ips"), + }; + let nexus_nic = NetworkInterface { + id: Uuid::new_v4(), + kind: NetworkInterfaceKind::Service { + id: nexus_id.into_untyped_uuid(), + }, + name: "test-nexus".parse().expect("bad name"), + ip: NEXUS_OPTE_IPV4_SUBNET + .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES) + .unwrap() + .into(), + mac: MacAddr::random_system(), + subnet: IpNet::from(*NEXUS_OPTE_IPV4_SUBNET), + vni: Vni::SERVICES_VNI, + primary: true, + slot: 0, + }; + + let dns_id = OmicronZoneUuid::new_v4(); + let dns_external_addr = OmicronZoneExternalFloatingAddr { + id: ExternalIpUuid::new_v4(), + addr: SocketAddr::new( + external_ips.next().expect("exhausted external_ips"), + 0, + ), + }; + let dns_nic = NetworkInterface { + id: Uuid::new_v4(), + kind: NetworkInterfaceKind::Service { + id: dns_id.into_untyped_uuid(), + }, + name: "test-external-dns".parse().expect("bad name"), + ip: DNS_OPTE_IPV4_SUBNET + .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES) + .unwrap() + .into(), + mac: MacAddr::random_system(), + subnet: IpNet::from(*DNS_OPTE_IPV4_SUBNET), + vni: Vni::SERVICES_VNI, + primary: true, + slot: 0, + }; + + // Boundary NTP: + let ntp_id = OmicronZoneUuid::new_v4(); + let ntp_external_ip = OmicronZoneExternalSnatIp { + id: ExternalIpUuid::new_v4(), + snat_cfg: SourceNatConfig::new( + external_ips.next().expect("exhausted external_ips"), + NUM_SOURCE_NAT_PORTS, + 2 * NUM_SOURCE_NAT_PORTS - 1, + ) + .unwrap(), + }; + let ntp_nic = NetworkInterface { + id: Uuid::new_v4(), + kind: NetworkInterfaceKind::Service { + id: ntp_id.into_untyped_uuid(), + }, + name: "test-external-ntp".parse().expect("bad name"), + ip: NTP_OPTE_IPV4_SUBNET + .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES) + .unwrap() + .into(), + mac: MacAddr::random_system(), + subnet: IpNet::from(*NTP_OPTE_IPV4_SUBNET), + vni: Vni::SERVICES_VNI, + primary: true, + slot: 0, + }; + + Self { + external_ips_range, + external_ips, + nexus_id, + nexus_external_ip, + nexus_nic, + dns_id, + dns_external_addr, + dns_nic, + ntp_id, + ntp_external_ip, + ntp_nic, + } + } + + async fn set_up_service_ip_pool( + &self, + opctx: &OpContext, + datastore: &DataStore, + ) { + let (ip_pool, _) = datastore + .ip_pools_service_lookup(&opctx) + .await + .expect("failed to find service IP pool"); + datastore + .ip_pool_add_range(&opctx, &ip_pool, &self.external_ips_range) + .await + .expect("failed to expand service IP pool"); + } + + fn zone_configs(&self) -> Vec { + vec![ + BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id: self.nexus_id, + underlay_address: Ipv6Addr::LOCALHOST, + zone_type: BlueprintZoneType::Nexus( + blueprint_zone_type::Nexus { + internal_address: "[::1]:0".parse().unwrap(), + external_ip: self.nexus_external_ip, + nic: self.nexus_nic.clone(), + external_tls: false, + external_dns_servers: Vec::new(), + }, + ), + }, + BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id: self.dns_id, + underlay_address: Ipv6Addr::LOCALHOST, + zone_type: BlueprintZoneType::ExternalDns( + blueprint_zone_type::ExternalDns { + dataset: OmicronZoneDataset { + pool_name: format!("oxp_{}", Uuid::new_v4()) + .parse() + .expect("bad name"), + }, + http_address: "[::1]:0".parse().unwrap(), + dns_address: self.dns_external_addr, + nic: self.dns_nic.clone(), + }, + ), + }, + BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id: self.ntp_id, + underlay_address: Ipv6Addr::LOCALHOST, + zone_type: BlueprintZoneType::BoundaryNtp( + blueprint_zone_type::BoundaryNtp { + address: "[::1]:0".parse().unwrap(), + ntp_servers: Vec::new(), + dns_servers: Vec::new(), + domain: None, + nic: self.ntp_nic.clone(), + external_ip: self.ntp_external_ip, + }, + ), + }, + ] + } + + async fn assert_ips_exist_in_datastore( + &self, + opctx: &OpContext, + datastore: &DataStore, + ) { + let db_nexus_ips = datastore + .external_ip_list_service( + &opctx, + self.nexus_id.into_untyped_uuid(), + ) + .await + .expect("failed to get external IPs"); + assert_eq!(db_nexus_ips.len(), 1); + assert!(db_nexus_ips[0].is_service); + assert_eq!( + db_nexus_ips[0].parent_id, + Some(self.nexus_id.into_untyped_uuid()) + ); + assert_eq!( + db_nexus_ips[0].id, + self.nexus_external_ip.id.into_untyped_uuid() + ); + assert_eq!(db_nexus_ips[0].ip, self.nexus_external_ip.ip.into()); + assert_eq!(db_nexus_ips[0].first_port, SqlU16(0)); + assert_eq!(db_nexus_ips[0].last_port, SqlU16(65535)); + + let db_dns_ips = datastore + .external_ip_list_service( + &opctx, + self.dns_id.into_untyped_uuid(), + ) + .await + .expect("failed to get external IPs"); + assert_eq!(db_dns_ips.len(), 1); + assert!(db_dns_ips[0].is_service); + assert_eq!( + db_dns_ips[0].parent_id, + Some(self.dns_id.into_untyped_uuid()) + ); + assert_eq!( + db_dns_ips[0].id, + self.dns_external_addr.id.into_untyped_uuid() + ); + assert_eq!( + db_dns_ips[0].ip, + self.dns_external_addr.addr.ip().into() + ); + assert_eq!(db_dns_ips[0].first_port, SqlU16(0)); + assert_eq!(db_dns_ips[0].last_port, SqlU16(65535)); + + let db_ntp_ips = datastore + .external_ip_list_service( + &opctx, + self.ntp_id.into_untyped_uuid(), + ) + .await + .expect("failed to get external IPs"); + assert_eq!(db_ntp_ips.len(), 1); + assert!(db_ntp_ips[0].is_service); + assert_eq!( + db_ntp_ips[0].parent_id, + Some(self.ntp_id.into_untyped_uuid()) + ); + assert_eq!( + db_ntp_ips[0].id, + self.ntp_external_ip.id.into_untyped_uuid() + ); + assert_eq!( + db_ntp_ips[0].ip, + self.ntp_external_ip.snat_cfg.ip.into() + ); + assert_eq!( + db_ntp_ips[0].first_port.0..=db_ntp_ips[0].last_port.0, + self.ntp_external_ip.snat_cfg.port_range() + ); + } + + async fn assert_nics_exist_in_datastore( + &self, + opctx: &OpContext, + datastore: &DataStore, + ) { + let db_nexus_nics = datastore + .service_list_network_interfaces( + &opctx, + self.nexus_id.into_untyped_uuid(), + ) + .await + .expect("failed to get NICs"); + assert_eq!(db_nexus_nics.len(), 1); + assert_eq!(db_nexus_nics[0].id(), self.nexus_nic.id); + assert_eq!( + db_nexus_nics[0].service_id, + self.nexus_id.into_untyped_uuid() + ); + assert_eq!(db_nexus_nics[0].vpc_id, NEXUS_VPC_SUBNET.vpc_id); + assert_eq!(db_nexus_nics[0].subnet_id, NEXUS_VPC_SUBNET.id()); + assert_eq!(*db_nexus_nics[0].mac, self.nexus_nic.mac); + assert_eq!(db_nexus_nics[0].ip, self.nexus_nic.ip.into()); + assert_eq!(*db_nexus_nics[0].slot, self.nexus_nic.slot); + assert_eq!(db_nexus_nics[0].primary, self.nexus_nic.primary); + + let db_dns_nics = datastore + .service_list_network_interfaces( + &opctx, + self.dns_id.into_untyped_uuid(), + ) + .await + .expect("failed to get NICs"); + assert_eq!(db_dns_nics.len(), 1); + assert_eq!(db_dns_nics[0].id(), self.dns_nic.id); + assert_eq!( + db_dns_nics[0].service_id, + self.dns_id.into_untyped_uuid() + ); + assert_eq!(db_dns_nics[0].vpc_id, DNS_VPC_SUBNET.vpc_id); + assert_eq!(db_dns_nics[0].subnet_id, DNS_VPC_SUBNET.id()); + assert_eq!(*db_dns_nics[0].mac, self.dns_nic.mac); + assert_eq!(db_dns_nics[0].ip, self.dns_nic.ip.into()); + assert_eq!(*db_dns_nics[0].slot, self.dns_nic.slot); + assert_eq!(db_dns_nics[0].primary, self.dns_nic.primary); + + let db_ntp_nics = datastore + .service_list_network_interfaces( + &opctx, + self.ntp_id.into_untyped_uuid(), + ) + .await + .expect("failed to get NICs"); + assert_eq!(db_ntp_nics.len(), 1); + assert_eq!(db_ntp_nics[0].id(), self.ntp_nic.id); + assert_eq!( + db_ntp_nics[0].service_id, + self.ntp_id.into_untyped_uuid() + ); + assert_eq!(db_ntp_nics[0].vpc_id, NTP_VPC_SUBNET.vpc_id); + assert_eq!(db_ntp_nics[0].subnet_id, NTP_VPC_SUBNET.id()); + assert_eq!(*db_ntp_nics[0].mac, self.ntp_nic.mac); + assert_eq!(db_ntp_nics[0].ip, self.ntp_nic.ip.into()); + assert_eq!(*db_ntp_nics[0].slot, self.ntp_nic.slot); + assert_eq!(db_ntp_nics[0].primary, self.ntp_nic.primary); + } + + async fn assert_ips_are_deleted_in_datastore( + &self, + datastore: &DataStore, + ) { + use async_bb8_diesel::AsyncRunQueryDsl; + use diesel::prelude::*; + use nexus_db_model::schema::external_ip::dsl; + + let conn = datastore.pool_connection_for_tests().await.unwrap(); + let ips: Vec<(Uuid, Option>)> = datastore + .transaction_retry_wrapper("read_external_ips") + .transaction(&conn, |conn| async move { + conn.batch_execute_async(ALLOW_FULL_TABLE_SCAN_SQL) + .await + .unwrap(); + Ok(dsl::external_ip + .filter(dsl::parent_id.eq_any([ + self.nexus_id.into_untyped_uuid(), + self.dns_id.into_untyped_uuid(), + self.ntp_id.into_untyped_uuid(), + ])) + .select((dsl::id, dsl::time_deleted)) + .get_results_async(&conn) + .await + .unwrap()) + }) + .await + .unwrap(); + + for (id, time_deleted) in &ips { + eprintln!("{id} {time_deleted:?}"); + } + + // We should have found records for all three zone IPs. + assert_eq!(ips.len(), 3); + assert!(ips.iter().any( + |(id, _)| id == self.nexus_external_ip.id.as_untyped_uuid() + )); + assert!(ips.iter().any( + |(id, _)| id == self.dns_external_addr.id.as_untyped_uuid() + )); + assert!( + ips.iter() + .any(|(id, _)| id + == self.ntp_external_ip.id.as_untyped_uuid()) + ); + + // All rows should indicate deleted records. + assert!(ips.iter().all(|(_, time_deleted)| time_deleted.is_some())); + } + + async fn assert_nics_are_deleted_in_datastore( + &self, + datastore: &DataStore, + ) { + use async_bb8_diesel::AsyncRunQueryDsl; + use diesel::prelude::*; + use nexus_db_model::schema::service_network_interface::dsl; + + let conn = datastore.pool_connection_for_tests().await.unwrap(); + let nics: Vec<(Uuid, Option>)> = datastore + .transaction_retry_wrapper("read_external_ips") + .transaction(&conn, |conn| async move { + conn.batch_execute_async(ALLOW_FULL_TABLE_SCAN_SQL) + .await + .unwrap(); + Ok(dsl::service_network_interface + .filter(dsl::service_id.eq_any([ + self.nexus_id.into_untyped_uuid(), + self.dns_id.into_untyped_uuid(), + self.ntp_id.into_untyped_uuid(), + ])) + .select((dsl::id, dsl::time_deleted)) + .get_results_async(&conn) + .await + .unwrap()) + }) + .await + .unwrap(); + + for (id, time_deleted) in &nics { + eprintln!("{id} {time_deleted:?}"); + } + + // We should have found records for all three zone NICs. + assert_eq!(nics.len(), 3); + assert!(nics.iter().any(|(id, _)| *id == self.nexus_nic.id)); + assert!(nics.iter().any(|(id, _)| *id == self.dns_nic.id)); + assert!(nics.iter().any(|(id, _)| *id == self.ntp_nic.id)); + + // All rows should indicate deleted records. + assert!(nics + .iter() + .all(|(_, time_deleted)| time_deleted.is_some())); + } + } + + #[nexus_test] + async fn test_allocate_external_networking( + cptestctx: &ControlPlaneTestContext, + ) { + // Set up. + let nexus = &cptestctx.server.server_context().nexus; + let datastore = nexus.datastore(); + let opctx = OpContext::for_tests( + cptestctx.logctx.log.clone(), + datastore.clone(), + ); + + // Generate the test values we care about. + let mut harness = Harness::new(); + harness.set_up_service_ip_pool(&opctx, datastore).await; + + // Build the `zones` map needed by `ensure_zone_resources_allocated`, + // with an arbitrary sled_id. + let zones = harness.zone_configs(); + + // Initialize resource allocation: this should succeed and create all + // the relevant db records. + ensure_zone_external_networking_allocated( + &opctx, + datastore, + zones.iter(), + ) + .await + .with_context(|| format!("{zones:#?}")) + .unwrap(); + + // Check that the external IP and NIC records were created. + harness.assert_ips_exist_in_datastore(&opctx, datastore).await; + harness.assert_nics_exist_in_datastore(&opctx, datastore).await; + + // We should be able to run the function again with the same inputs, and + // it should succeed without inserting any new records. + ensure_zone_external_networking_allocated( + &opctx, + datastore, + zones.iter(), + ) + .await + .with_context(|| format!("{zones:#?}")) + .unwrap(); + harness.assert_ips_exist_in_datastore(&opctx, datastore).await; + harness.assert_nics_exist_in_datastore(&opctx, datastore).await; + + // Now that we've tested the happy path, try some requests that ought to + // fail because the request includes an external IP that doesn't match + // the already-allocated external IPs from above. + let bogus_ip = + harness.external_ips.next().expect("exhausted external_ips"); + for mutate_zones_fn in [ + // non-matching IP on Nexus + (&|zones: &mut [BlueprintZoneConfig]| { + for zone in zones { + if let BlueprintZoneType::Nexus( + blueprint_zone_type::Nexus { + ref mut external_ip, .. + }, + ) = &mut zone.zone_type + { + external_ip.ip = bogus_ip; + return format!( + "zone {} has a different IP allocated", + zone.id + ); + } + } + + panic!("didn't find expected zone"); + }) as &dyn Fn(&mut [BlueprintZoneConfig]) -> String, + // non-matching IP on External DNS + &|zones| { + for zone in zones { + if let BlueprintZoneType::ExternalDns( + blueprint_zone_type::ExternalDns { + ref mut dns_address, + .. + }, + ) = &mut zone.zone_type + { + dns_address.addr.set_ip(bogus_ip); + return format!( + "zone {} has a different IP allocated", + zone.id + ); + } + } + panic!("didn't find expected zone"); + }, + // non-matching SNAT port range on Boundary NTP + &|zones| { + for zone in zones { + if let BlueprintZoneType::BoundaryNtp( + blueprint_zone_type::BoundaryNtp { + ref mut external_ip, + .. + }, + ) = &mut zone.zone_type + { + let (mut first, mut last) = + external_ip.snat_cfg.port_range_raw(); + first += NUM_SOURCE_NAT_PORTS; + last += NUM_SOURCE_NAT_PORTS; + external_ip.snat_cfg = SourceNatConfig::new( + external_ip.snat_cfg.ip, + first, + last, + ) + .unwrap(); + return format!( + "zone {} has a different IP allocated", + zone.id + ); + } + } + panic!("didn't find expected zone"); + }, + ] { + // Run `mutate_zones_fn` on our config... + let (mutated_zones, expected_error) = { + let mut zones = zones.clone(); + let expected_error = mutate_zones_fn(&mut zones); + (zones, expected_error) + }; + + // and check that we get the error we expect. + let err = ensure_zone_external_networking_allocated( + &opctx, + datastore, + mutated_zones.iter(), + ) + .await + .expect_err("unexpected success"); + assert!( + err.to_string().contains(&expected_error), + "expected {expected_error:?}, got {err:#}" + ); + } + + // Also try some requests that ought to fail because the request + // includes a NIC that doesn't match the already-allocated NICs from + // above. + // + // All three zone types have a `nic` property, so here our mutating + // function only modifies that, and the body of our loop tries it on all + // three to ensure we get the errors we expect no matter the zone type. + for mutate_nic_fn in [ + // switch kind from Service to Instance + (&|_: OmicronZoneUuid, nic: &mut NetworkInterface| { + match &nic.kind { + NetworkInterfaceKind::Instance { .. } => { + panic!( + "invalid NIC kind (expected service, got instance)" + ) + } + NetworkInterfaceKind::Probe { .. } => { + panic!( + "invalid NIC kind (expected service, got instance)" + ) + } + NetworkInterfaceKind::Service { id } => { + let id = *id; + nic.kind = NetworkInterfaceKind::Instance { id }; + } + } + "invalid NIC kind".to_string() + }) + as &dyn Fn(OmicronZoneUuid, &mut NetworkInterface) -> String, + // non-matching IP + &|zone_id, nic| { + nic.ip = bogus_ip; + format!("zone {zone_id} already has 1 non-matching NIC") + }, + ] { + // Try this NIC mutation on Nexus... + let mut mutated_zones = zones.clone(); + for zone in &mut mutated_zones { + if let BlueprintZoneType::Nexus(blueprint_zone_type::Nexus { + ref mut nic, + .. + }) = &mut zone.zone_type + { + let expected_error = mutate_nic_fn(zone.id, nic); + + let err = ensure_zone_external_networking_allocated( + &opctx, + datastore, + mutated_zones.iter(), + ) + .await + .expect_err("unexpected success"); + + assert!( + err.to_string().contains(&expected_error), + "expected {expected_error:?}, got {err:#}" + ); + + break; + } + } + + // ... and again on ExternalDns + let mut mutated_zones = zones.clone(); + for zone in &mut mutated_zones { + if let BlueprintZoneType::ExternalDns( + blueprint_zone_type::ExternalDns { ref mut nic, .. }, + ) = &mut zone.zone_type + { + let expected_error = mutate_nic_fn(zone.id, nic); + + let err = ensure_zone_external_networking_allocated( + &opctx, + datastore, + mutated_zones.iter(), + ) + .await + .expect_err("unexpected success"); + + assert!( + err.to_string().contains(&expected_error), + "expected {expected_error:?}, got {err:#}" + ); + + break; + } + } + + // ... and again on BoundaryNtp + let mut mutated_zones = zones.clone(); + for zone in &mut mutated_zones { + if let BlueprintZoneType::BoundaryNtp( + blueprint_zone_type::BoundaryNtp { ref mut nic, .. }, + ) = &mut zone.zone_type + { + let expected_error = mutate_nic_fn(zone.id, nic); + + let err = ensure_zone_external_networking_allocated( + &opctx, + datastore, + mutated_zones.iter(), + ) + .await + .expect_err("unexpected success"); + + assert!( + err.to_string().contains(&expected_error), + "expected {expected_error:?}, got {err:#}" + ); + + break; + } + } + } + } + + #[nexus_test] + async fn test_deallocate_external_networking( + cptestctx: &ControlPlaneTestContext, + ) { + // Set up. + let nexus = &cptestctx.server.server_context().nexus; + let datastore = nexus.datastore(); + let opctx = OpContext::for_tests( + cptestctx.logctx.log.clone(), + datastore.clone(), + ); + + // Generate the test values we care about. + let harness = Harness::new(); + harness.set_up_service_ip_pool(&opctx, datastore).await; + + // Build the `zones` map needed by `ensure_zone_resources_allocated`, + // with an arbitrary sled_id. + let zones = harness.zone_configs(); + + // Initialize resource allocation: this should succeed and create all + // the relevant db records. + ensure_zone_external_networking_allocated( + &opctx, + datastore, + zones.iter(), + ) + .await + .with_context(|| format!("{zones:#?}")) + .unwrap(); + + // Check that the external IP and NIC records were created. + harness.assert_ips_exist_in_datastore(&opctx, datastore).await; + harness.assert_nics_exist_in_datastore(&opctx, datastore).await; + + // Deallocate resources: this should succeed and mark all relevant db + // records deleted. + ensure_zone_external_networking_deallocated( + &opctx, + datastore, + zones.iter(), + ) + .await + .with_context(|| format!("{zones:#?}")) + .unwrap(); + + harness.assert_ips_are_deleted_in_datastore(datastore).await; + harness.assert_nics_are_deleted_in_datastore(datastore).await; + + // This operation should be idempotent: we can run it again, and the + // records remain deleted. + ensure_zone_external_networking_deallocated( + &opctx, + datastore, + zones.iter(), + ) + .await + .with_context(|| format!("{zones:#?}")) + .unwrap(); + + harness.assert_ips_are_deleted_in_datastore(datastore).await; + harness.assert_nics_are_deleted_in_datastore(datastore).await; + } +} diff --git a/nexus/reconfigurator/execution/src/lib.rs b/nexus/reconfigurator/execution/src/lib.rs index 1373c9a31f..63bb4b24f0 100644 --- a/nexus/reconfigurator/execution/src/lib.rs +++ b/nexus/reconfigurator/execution/src/lib.rs @@ -10,32 +10,50 @@ use anyhow::{anyhow, Context}; use nexus_db_queries::context::OpContext; use nexus_db_queries::db::DataStore; use nexus_types::deployment::Blueprint; +use nexus_types::deployment::BlueprintZoneFilter; +use nexus_types::deployment::SledFilter; +use nexus_types::external_api::views::SledState; use nexus_types::identity::Asset; use omicron_common::address::Ipv6Subnet; use omicron_common::address::SLED_PREFIX; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::SledUuid; use overridables::Overridables; use slog::info; use slog_error_chain::InlineErrorChain; use std::collections::BTreeMap; use std::net::SocketAddrV6; -use uuid::Uuid; - -pub use dns::silo_dns_name; +mod cockroachdb; mod datasets; mod dns; +mod external_networking; +mod omicron_physical_disks; mod omicron_zones; mod overridables; -mod resource_allocation; +mod sled_state; + +pub use dns::blueprint_external_dns_config; +pub use dns::blueprint_internal_dns_config; +pub use dns::blueprint_nexus_external_ips; +pub use dns::silo_dns_name; -struct Sled { - id: Uuid, +pub struct Sled { + id: SledUuid, sled_agent_address: SocketAddrV6, is_scrimlet: bool, } impl Sled { - pub fn subnet(&self) -> Ipv6Subnet { + pub fn new( + id: SledUuid, + sled_agent_address: SocketAddrV6, + is_scrimlet: bool, + ) -> Sled { + Sled { id, sled_agent_address, is_scrimlet } + } + + pub(crate) fn subnet(&self) -> Ipv6Subnet { Ipv6Subnet::::new(*self.sled_agent_address.ip()) } } @@ -43,7 +61,7 @@ impl Sled { impl From for Sled { fn from(value: nexus_db_model::Sled) -> Self { Sled { - id: value.id(), + id: SledUuid::from_untyped_uuid(value.id()), sled_agent_address: value.address(), is_scrimlet: value.is_scrimlet(), } @@ -95,22 +113,50 @@ where "blueprint_id" => %blueprint.id ); - resource_allocation::ensure_zone_resources_allocated( + // Deallocate external networking resources for non-externally-reachable + // zones first. This will allow external networking resource allocation to + // succeed if we are swapping an external IP between two zones (e.g., moving + // a specific external IP from an old external DNS zone to a new one). + external_networking::ensure_zone_external_networking_deallocated( &opctx, datastore, - blueprint.all_omicron_zones().map(|(_sled_id, zone)| zone), + blueprint + .all_omicron_zones_not_in( + BlueprintZoneFilter::ShouldBeExternallyReachable, + ) + .map(|(_sled_id, zone)| zone), ) .await .map_err(|err| vec![err])?; - let sleds_by_id: BTreeMap = datastore - .sled_list_all_batched(&opctx) + external_networking::ensure_zone_external_networking_allocated( + &opctx, + datastore, + blueprint + .all_omicron_zones(BlueprintZoneFilter::ShouldBeExternallyReachable) + .map(|(_sled_id, zone)| zone), + ) + .await + .map_err(|err| vec![err])?; + + let sleds_by_id: BTreeMap = datastore + .sled_list_all_batched(&opctx, SledFilter::InService) .await .context("listing all sleds") .map_err(|e| vec![e])? .into_iter() - .map(|db_sled| (db_sled.id(), Sled::from(db_sled))) + .map(|db_sled| { + (SledUuid::from_untyped_uuid(db_sled.id()), Sled::from(db_sled)) + }) .collect(); + + omicron_physical_disks::deploy_disks( + &opctx, + &sleds_by_id, + &blueprint.blueprint_disks, + ) + .await?; + omicron_zones::deploy_zones( &opctx, &sleds_by_id, @@ -140,7 +186,9 @@ where datasets::ensure_crucible_dataset_records_exist( &opctx, datastore, - blueprint.all_omicron_zones().map(|(_sled_id, zone)| zone), + blueprint + .all_omicron_zones(BlueprintZoneFilter::ShouldBeRunning) + .map(|(_sled_id, zone)| zone), ) .await .map_err(|err| vec![err])?; @@ -151,10 +199,28 @@ where String::from(nexus_label), blueprint, &sleds_by_id, - &overrides, + overrides, ) .await .map_err(|e| vec![anyhow!("{}", InlineErrorChain::new(&e))])?; + sled_state::decommission_sleds( + &opctx, + datastore, + blueprint + .sled_state + .iter() + .filter(|&(_, &state)| state == SledState::Decommissioned) + .map(|(&sled_id, _)| sled_id), + ) + .await?; + + // This is likely to error if any cluster upgrades are in progress (which + // can take some time), so it should remain at the end so that other parts + // of the blueprint can progress normally. + cockroachdb::ensure_settings(&opctx, datastore, blueprint) + .await + .map_err(|err| vec![err])?; + Ok(()) } diff --git a/nexus/reconfigurator/execution/src/omicron_physical_disks.rs b/nexus/reconfigurator/execution/src/omicron_physical_disks.rs new file mode 100644 index 0000000000..d7d8604e7e --- /dev/null +++ b/nexus/reconfigurator/execution/src/omicron_physical_disks.rs @@ -0,0 +1,353 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Manges deployment of Omicron physical disks to Sled Agents. + +use crate::Sled; +use anyhow::anyhow; +use anyhow::Context; +use futures::stream; +use futures::StreamExt; +use nexus_db_queries::context::OpContext; +use nexus_types::deployment::BlueprintPhysicalDisksConfig; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::SledUuid; +use slog::info; +use slog::o; +use slog::warn; +use std::collections::BTreeMap; + +/// Idempotently ensure that the specified Omicron disks are deployed to the +/// corresponding sleds +pub(crate) async fn deploy_disks( + opctx: &OpContext, + sleds_by_id: &BTreeMap, + sled_configs: &BTreeMap, +) -> Result<(), Vec> { + let errors: Vec<_> = stream::iter(sled_configs) + .filter_map(|(sled_id, config)| async move { + let log = opctx.log.new(o!( + "sled_id" => sled_id.to_string(), + "generation" => config.generation.to_string(), + )); + + let db_sled = match sleds_by_id.get(&sled_id) { + Some(sled) => sled, + None => { + let err = anyhow!("sled not found in db list: {}", sled_id); + warn!(log, "{err:#}"); + return Some(err); + } + }; + + let client = nexus_networking::sled_client_from_address( + sled_id.into_untyped_uuid(), + db_sled.sled_agent_address, + &log, + ); + let result = + client.omicron_physical_disks_put(&config).await.with_context( + || format!("Failed to put {config:#?} to sled {sled_id}"), + ); + match result { + Err(error) => { + warn!(log, "{error:#}"); + Some(error) + } + Ok(result) => { + let (errs, successes): (Vec<_>, Vec<_>) = result + .into_inner() + .status + .into_iter() + .partition(|status| status.err.is_some()); + + if !errs.is_empty() { + warn!( + log, + "Failed to deploy storage for sled agent"; + "successfully configured disks" => successes.len(), + "failed disk configurations" => errs.len(), + ); + for err in &errs { + warn!(log, "{err:?}"); + } + return Some(anyhow!( + "failure deploying disks: {:?}", + errs + )); + } + + info!( + log, + "Successfully deployed storage for sled agent"; + "successfully configured disks" => successes.len(), + ); + None + } + } + }) + .collect() + .await; + + if errors.is_empty() { + Ok(()) + } else { + Err(errors) + } +} + +#[cfg(test)] +mod test { + use super::deploy_disks; + use crate::Sled; + use httptest::matchers::{all_of, json_decoded, request}; + use httptest::responders::json_encoded; + use httptest::responders::status_code; + use httptest::Expectation; + use nexus_db_queries::context::OpContext; + use nexus_test_utils_macros::nexus_test; + use nexus_types::deployment::{ + Blueprint, BlueprintPhysicalDiskConfig, BlueprintPhysicalDisksConfig, + BlueprintTarget, CockroachDbPreserveDowngrade, + }; + use omicron_common::api::external::Generation; + use omicron_common::disk::DiskIdentity; + use omicron_uuid_kinds::SledUuid; + use omicron_uuid_kinds::ZpoolUuid; + use std::collections::BTreeMap; + use std::net::SocketAddr; + use uuid::Uuid; + + type ControlPlaneTestContext = + nexus_test_utils::ControlPlaneTestContext; + + fn create_blueprint( + blueprint_disks: BTreeMap, + ) -> (BlueprintTarget, Blueprint) { + let id = Uuid::new_v4(); + ( + BlueprintTarget { + target_id: id, + enabled: true, + time_made_target: chrono::Utc::now(), + }, + Blueprint { + id, + blueprint_zones: BTreeMap::new(), + blueprint_disks, + sled_state: BTreeMap::new(), + cockroachdb_setting_preserve_downgrade: + CockroachDbPreserveDowngrade::DoNotModify, + parent_blueprint_id: None, + internal_dns_version: Generation::new(), + external_dns_version: Generation::new(), + cockroachdb_fingerprint: String::new(), + time_created: chrono::Utc::now(), + creator: "test".to_string(), + comment: "test blueprint".to_string(), + }, + ) + } + + #[nexus_test] + async fn test_deploy_omicron_disks(cptestctx: &ControlPlaneTestContext) { + let nexus = &cptestctx.server.server_context().nexus; + let datastore = nexus.datastore(); + let opctx = OpContext::for_tests( + cptestctx.logctx.log.clone(), + datastore.clone(), + ); + + // Create some fake sled-agent servers to respond to disk puts and add + // sleds to CRDB. + let mut s1 = httptest::Server::run(); + let mut s2 = httptest::Server::run(); + let sled_id1 = SledUuid::new_v4(); + let sled_id2 = SledUuid::new_v4(); + let sleds_by_id: BTreeMap = + [(sled_id1, &s1), (sled_id2, &s2)] + .into_iter() + .map(|(sled_id, server)| { + let SocketAddr::V6(addr) = server.addr() else { + panic!("Expected Ipv6 address. Got {}", server.addr()); + }; + let sled = Sled { + id: sled_id, + sled_agent_address: addr, + is_scrimlet: false, + }; + (sled_id, sled) + }) + .collect(); + + // Get a success result back when the blueprint has an empty set of + // disks. + let (_, blueprint) = create_blueprint(BTreeMap::new()); + deploy_disks(&opctx, &sleds_by_id, &blueprint.blueprint_disks) + .await + .expect("failed to deploy no disks"); + + // Disks are updated in a particular order, but each request contains + // the full set of disks that must be running. + // See `rack_setup::service::ServiceInner::run` for more details. + fn make_disks() -> BlueprintPhysicalDisksConfig { + BlueprintPhysicalDisksConfig { + generation: Generation::new(), + disks: vec![BlueprintPhysicalDiskConfig { + identity: DiskIdentity { + vendor: "test-vendor".to_string(), + serial: "test-serial".to_string(), + model: "test-model".to_string(), + }, + id: Uuid::new_v4(), + pool_id: ZpoolUuid::new_v4(), + }], + } + } + + // Create a blueprint with only one disk for both servers + // We reuse the same `OmicronDisksConfig` because the details don't + // matter for this test. + let disks1 = make_disks(); + let disks2 = make_disks(); + let (_, blueprint) = create_blueprint(BTreeMap::from([ + (sled_id1, disks1.clone()), + (sled_id2, disks2.clone()), + ])); + + // Set expectations for the initial requests sent to the fake + // sled-agents. + for s in [&mut s1, &mut s2] { + s.expect( + Expectation::matching(all_of![ + request::method_path("PUT", "/omicron-physical-disks",), + // Our generation number should be 1 and there should + // be only a single disk. + request::body(json_decoded( + |c: &BlueprintPhysicalDisksConfig| { + c.generation == 1u32.into() && c.disks.len() == 1 + } + )) + ]) + .respond_with(json_encoded( + sled_agent_client::types::DisksManagementResult { + status: vec![], + }, + )), + ); + } + + // Execute it. + deploy_disks(&opctx, &sleds_by_id, &blueprint.blueprint_disks) + .await + .expect("failed to deploy initial disks"); + + s1.verify_and_clear(); + s2.verify_and_clear(); + + // Do it again. This should trigger the same request. + for s in [&mut s1, &mut s2] { + s.expect( + Expectation::matching(request::method_path( + "PUT", + "/omicron-physical-disks", + )) + .respond_with(json_encoded( + sled_agent_client::types::DisksManagementResult { + status: vec![], + }, + )), + ); + } + deploy_disks(&opctx, &sleds_by_id, &blueprint.blueprint_disks) + .await + .expect("failed to deploy same disks"); + s1.verify_and_clear(); + s2.verify_and_clear(); + + // Take another lap, but this time, have one server fail the request and + // try again. + s1.expect( + Expectation::matching(request::method_path( + "PUT", + "/omicron-physical-disks", + )) + .respond_with(json_encoded( + sled_agent_client::types::DisksManagementResult { + status: vec![], + }, + )), + ); + s2.expect( + Expectation::matching(request::method_path( + "PUT", + "/omicron-physical-disks", + )) + .respond_with(status_code(500)), + ); + + let errors = + deploy_disks(&opctx, &sleds_by_id, &blueprint.blueprint_disks) + .await + .expect_err("unexpectedly succeeded in deploying disks"); + + println!("{:?}", errors); + assert_eq!(errors.len(), 1); + assert!(errors[0] + .to_string() + .starts_with("Failed to put OmicronPhysicalDisksConfig")); + s1.verify_and_clear(); + s2.verify_and_clear(); + + // We can also observe "partial failures", where the HTTP-evel response + // is successful, but it indicates that the disk provisioning ran into + // problems. + s1.expect( + Expectation::matching(request::method_path( + "PUT", + "/omicron-physical-disks", + )) + .respond_with(json_encoded( + sled_agent_client::types::DisksManagementResult { + status: vec![], + }, + )), + ); + s2.expect( + Expectation::matching(request::method_path( + "PUT", + "/omicron-physical-disks", + )) + .respond_with(json_encoded(sled_agent_client::types::DisksManagementResult { + status: vec![ + sled_agent_client::types::DiskManagementStatus { + identity: omicron_common::disk::DiskIdentity { + vendor: "v".to_string(), + serial: "s".to_string(), + model: "m".to_string(), + }, + + // This error could occur if a disk is removed + err: Some(sled_agent_client::types::DiskManagementError::NotFound), + } + ] + })), + ); + + let errors = + deploy_disks(&opctx, &sleds_by_id, &blueprint.blueprint_disks) + .await + .expect_err("unexpectedly succeeded in deploying disks"); + + println!("{:?}", errors); + assert_eq!(errors.len(), 1); + assert!( + errors[0].to_string().starts_with("failure deploying disks"), + "{}", + errors[0].to_string() + ); + s1.verify_and_clear(); + s2.verify_and_clear(); + } +} diff --git a/nexus/reconfigurator/execution/src/omicron_zones.rs b/nexus/reconfigurator/execution/src/omicron_zones.rs index 0150c40e9e..a40d65411b 100644 --- a/nexus/reconfigurator/execution/src/omicron_zones.rs +++ b/nexus/reconfigurator/execution/src/omicron_zones.rs @@ -2,7 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -//! Manges deployment of Omicron zones to Sled Agents +//! Manages deployment of Omicron zones to Sled Agents use crate::Sled; use anyhow::anyhow; @@ -12,36 +12,45 @@ use futures::StreamExt; use nexus_db_queries::context::OpContext; use nexus_types::deployment::BlueprintZoneFilter; use nexus_types::deployment::BlueprintZonesConfig; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::SledUuid; use slog::info; use slog::warn; use std::collections::BTreeMap; -use uuid::Uuid; /// Idempotently ensure that the specified Omicron zones are deployed to the /// corresponding sleds pub(crate) async fn deploy_zones( opctx: &OpContext, - sleds_by_id: &BTreeMap, - zones: &BTreeMap, + sleds_by_id: &BTreeMap, + zones: &BTreeMap, ) -> Result<(), Vec> { let errors: Vec<_> = stream::iter(zones) .filter_map(|(sled_id, config)| async move { let db_sled = match sleds_by_id.get(sled_id) { Some(sled) => sled, None => { - let err = anyhow!("sled not found in db list: {}", sled_id); + if config.are_all_zones_expunged() { + info!( + opctx.log, + "Skipping zone deployment to expunged sled"; + "sled_id" => %sled_id + ); + return None; + } + let err = anyhow!("sled not found in db list: {sled_id}"); warn!(opctx.log, "{err:#}"); return Some(err); } }; let client = nexus_networking::sled_client_from_address( - *sled_id, + sled_id.into_untyped_uuid(), db_sled.sled_agent_address, &opctx.log, ); let omicron_zones = config - .to_omicron_zones_config(BlueprintZoneFilter::SledAgentPut); + .to_omicron_zones_config(BlueprintZoneFilter::ShouldBeRunning); let result = client .omicron_zones_put(&omicron_zones) .await @@ -85,15 +94,18 @@ mod test { use httptest::Expectation; use nexus_db_queries::context::OpContext; use nexus_test_utils_macros::nexus_test; - use nexus_types::deployment::OmicronZonesConfig; + use nexus_types::deployment::{ + blueprint_zone_type, BlueprintZoneType, CockroachDbPreserveDowngrade, + OmicronZonesConfig, + }; use nexus_types::deployment::{ Blueprint, BlueprintTarget, BlueprintZoneConfig, BlueprintZoneDisposition, BlueprintZonesConfig, }; - use nexus_types::inventory::{ - OmicronZoneConfig, OmicronZoneDataset, OmicronZoneType, - }; + use nexus_types::inventory::OmicronZoneDataset; use omicron_common::api::external::Generation; + use omicron_uuid_kinds::OmicronZoneUuid; + use omicron_uuid_kinds::SledUuid; use std::collections::BTreeMap; use std::net::SocketAddr; use uuid::Uuid; @@ -102,7 +114,7 @@ mod test { nexus_test_utils::ControlPlaneTestContext; fn create_blueprint( - blueprint_zones: BTreeMap, + blueprint_zones: BTreeMap, ) -> (BlueprintTarget, Blueprint) { let id = Uuid::new_v4(); ( @@ -114,9 +126,14 @@ mod test { Blueprint { id, blueprint_zones, + blueprint_disks: BTreeMap::new(), + sled_state: BTreeMap::new(), + cockroachdb_setting_preserve_downgrade: + CockroachDbPreserveDowngrade::DoNotModify, parent_blueprint_id: None, internal_dns_version: Generation::new(), external_dns_version: Generation::new(), + cockroachdb_fingerprint: String::new(), time_created: chrono::Utc::now(), creator: "test".to_string(), comment: "test blueprint".to_string(), @@ -126,7 +143,7 @@ mod test { #[nexus_test] async fn test_deploy_omicron_zones(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), @@ -137,9 +154,9 @@ mod test { // sleds to CRDB. let mut s1 = httptest::Server::run(); let mut s2 = httptest::Server::run(); - let sled_id1 = Uuid::new_v4(); - let sled_id2 = Uuid::new_v4(); - let sleds_by_id: BTreeMap = + let sled_id1 = SledUuid::new_v4(); + let sled_id2 = SledUuid::new_v4(); + let sleds_by_id: BTreeMap = [(sled_id1, &s1), (sled_id2, &s2)] .into_iter() .map(|(sled_id, server)| { @@ -169,22 +186,22 @@ mod test { BlueprintZonesConfig { generation: Generation::new(), zones: vec![BlueprintZoneConfig { - config: OmicronZoneConfig { - id: Uuid::new_v4(), - underlay_address: "::1".parse().unwrap(), - zone_type: OmicronZoneType::InternalDns { + disposition: BlueprintZoneDisposition::InService, + id: OmicronZoneUuid::new_v4(), + underlay_address: "::1".parse().unwrap(), + zone_type: BlueprintZoneType::InternalDns( + blueprint_zone_type::InternalDns { dataset: OmicronZoneDataset { pool_name: format!("oxp_{}", Uuid::new_v4()) .parse() .unwrap(), }, - dns_address: "oh-hello-internal-dns".into(), + dns_address: "[::1]:0".parse().unwrap(), gz_address: "::1".parse().unwrap(), gz_address_index: 0, - http_address: "some-ipv6-address".into(), + http_address: "[::1]:0".parse().unwrap(), }, - }, - disposition: BlueprintZoneDisposition::InService, + ), }], } } @@ -274,27 +291,31 @@ mod test { zones: &mut BlueprintZonesConfig, disposition: BlueprintZoneDisposition, ) { - zones.generation = zones.generation.next(); zones.zones.push(BlueprintZoneConfig { - config: OmicronZoneConfig { - id: Uuid::new_v4(), - underlay_address: "::1".parse().unwrap(), - zone_type: OmicronZoneType::InternalNtp { - address: "::1".into(), + disposition, + id: OmicronZoneUuid::new_v4(), + underlay_address: "::1".parse().unwrap(), + zone_type: BlueprintZoneType::InternalNtp( + blueprint_zone_type::InternalNtp { + address: "[::1]:0".parse().unwrap(), dns_servers: vec!["::1".parse().unwrap()], domain: None, ntp_servers: vec!["some-ntp-server-addr".into()], }, - }, - disposition, + ), }); } // Both in-service and quiesced zones should be deployed. // - // TODO: add expunged zones to the test (should not be deployed). + // The expunged zone should not be deployed. append_zone(&mut zones1, BlueprintZoneDisposition::InService); + append_zone(&mut zones1, BlueprintZoneDisposition::Expunged); append_zone(&mut zones2, BlueprintZoneDisposition::Quiesced); + // Bump the generation for each config + zones1.generation = zones1.generation.next(); + zones2.generation = zones2.generation.next(); + let (_, blueprint) = create_blueprint(BTreeMap::from([ (sled_id1, zones1), (sled_id2, zones2), diff --git a/nexus/reconfigurator/execution/src/overridables.rs b/nexus/reconfigurator/execution/src/overridables.rs index 5c4ce7dc6f..f59e3228f4 100644 --- a/nexus/reconfigurator/execution/src/overridables.rs +++ b/nexus/reconfigurator/execution/src/overridables.rs @@ -8,9 +8,9 @@ use omicron_common::address::DENDRITE_PORT; use omicron_common::address::MGD_PORT; use omicron_common::address::MGS_PORT; use omicron_common::address::SLED_PREFIX; +use omicron_uuid_kinds::SledUuid; use std::collections::BTreeMap; use std::net::Ipv6Addr; -use uuid::Uuid; /// Override values used during blueprint execution /// @@ -23,59 +23,59 @@ use uuid::Uuid; #[derive(Debug, Default)] pub struct Overridables { /// map: sled id -> TCP port on which that sled's Dendrite is listening - pub dendrite_ports: BTreeMap, + pub dendrite_ports: BTreeMap, /// map: sled id -> TCP port on which that sled's MGS is listening - pub mgs_ports: BTreeMap, + pub mgs_ports: BTreeMap, /// map: sled id -> TCP port on which that sled's MGD is listening - pub mgd_ports: BTreeMap, + pub mgd_ports: BTreeMap, /// map: sled id -> IP address of the sled's switch zone - pub switch_zone_ips: BTreeMap, + pub switch_zone_ips: BTreeMap, } impl Overridables { /// Specify the TCP port on which this sled's Dendrite is listening #[cfg(test)] - fn override_dendrite_port(&mut self, sled_id: Uuid, port: u16) { + fn override_dendrite_port(&mut self, sled_id: SledUuid, port: u16) { self.dendrite_ports.insert(sled_id, port); } /// Returns the TCP port on which this sled's Dendrite is listening - pub fn dendrite_port(&self, sled_id: Uuid) -> u16 { + pub fn dendrite_port(&self, sled_id: SledUuid) -> u16 { self.dendrite_ports.get(&sled_id).copied().unwrap_or(DENDRITE_PORT) } /// Specify the TCP port on which this sled's MGS is listening #[cfg(test)] - fn override_mgs_port(&mut self, sled_id: Uuid, port: u16) { + fn override_mgs_port(&mut self, sled_id: SledUuid, port: u16) { self.mgs_ports.insert(sled_id, port); } /// Returns the TCP port on which this sled's MGS is listening - pub fn mgs_port(&self, sled_id: Uuid) -> u16 { + pub fn mgs_port(&self, sled_id: SledUuid) -> u16 { self.mgs_ports.get(&sled_id).copied().unwrap_or(MGS_PORT) } /// Specify the TCP port on which this sled's MGD is listening #[cfg(test)] - fn override_mgd_port(&mut self, sled_id: Uuid, port: u16) { + fn override_mgd_port(&mut self, sled_id: SledUuid, port: u16) { self.mgd_ports.insert(sled_id, port); } /// Returns the TCP port on which this sled's MGD is listening - pub fn mgd_port(&self, sled_id: Uuid) -> u16 { + pub fn mgd_port(&self, sled_id: SledUuid) -> u16 { self.mgd_ports.get(&sled_id).copied().unwrap_or(MGD_PORT) } /// Specify the IP address of this switch zone #[cfg(test)] - fn override_switch_zone_ip(&mut self, sled_id: Uuid, addr: Ipv6Addr) { + fn override_switch_zone_ip(&mut self, sled_id: SledUuid, addr: Ipv6Addr) { self.switch_zone_ips.insert(sled_id, addr); } /// Returns the IP address of this sled's switch zone pub fn switch_zone_ip( &self, - sled_id: Uuid, + sled_id: SledUuid, sled_subnet: Ipv6Subnet, ) -> Ipv6Addr { self.switch_zone_ips diff --git a/nexus/reconfigurator/execution/src/resource_allocation.rs b/nexus/reconfigurator/execution/src/resource_allocation.rs deleted file mode 100644 index 92262ce133..0000000000 --- a/nexus/reconfigurator/execution/src/resource_allocation.rs +++ /dev/null @@ -1,987 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -//! Manges allocation of resources required for blueprint realization - -use anyhow::bail; -use anyhow::Context; -use nexus_db_model::IncompleteNetworkInterface; -use nexus_db_model::Name; -use nexus_db_model::SqlU16; -use nexus_db_model::VpcSubnet; -use nexus_db_queries::context::OpContext; -use nexus_db_queries::db::fixed_data::vpc_subnet::DNS_VPC_SUBNET; -use nexus_db_queries::db::fixed_data::vpc_subnet::NEXUS_VPC_SUBNET; -use nexus_db_queries::db::fixed_data::vpc_subnet::NTP_VPC_SUBNET; -use nexus_db_queries::db::DataStore; -use nexus_types::deployment::OmicronZoneType; -use nexus_types::deployment::SourceNatConfig; -use nexus_types::inventory::OmicronZoneConfig; -use omicron_common::api::external::IdentityMetadataCreateParams; -use omicron_common::api::internal::shared::NetworkInterface; -use omicron_common::api::internal::shared::NetworkInterfaceKind; -use slog::info; -use slog::warn; -use std::net::IpAddr; -use std::net::SocketAddr; -use uuid::Uuid; - -pub(crate) async fn ensure_zone_resources_allocated( - opctx: &OpContext, - datastore: &DataStore, - all_omicron_zones: impl Iterator, -) -> anyhow::Result<()> { - let allocator = ResourceAllocator { opctx, datastore }; - - for z in all_omicron_zones { - match &z.zone_type { - OmicronZoneType::Nexus { external_ip, nic, .. } => { - allocator - .ensure_nexus_external_networking_allocated( - z.id, - *external_ip, - nic, - ) - .await?; - } - OmicronZoneType::ExternalDns { dns_address, nic, .. } => { - allocator - .ensure_external_dns_external_networking_allocated( - z.id, - dns_address, - nic, - ) - .await?; - } - OmicronZoneType::BoundaryNtp { snat_cfg, nic, .. } => { - allocator - .ensure_boundary_ntp_external_networking_allocated( - z.id, snat_cfg, nic, - ) - .await?; - } - OmicronZoneType::InternalNtp { .. } - | OmicronZoneType::Clickhouse { .. } - | OmicronZoneType::ClickhouseKeeper { .. } - | OmicronZoneType::CockroachDb { .. } - | OmicronZoneType::Crucible { .. } - | OmicronZoneType::CruciblePantry { .. } - | OmicronZoneType::InternalDns { .. } - | OmicronZoneType::Oximeter { .. } => (), - } - } - - Ok(()) -} - -struct ResourceAllocator<'a> { - opctx: &'a OpContext, - datastore: &'a DataStore, -} - -impl<'a> ResourceAllocator<'a> { - // Helper function to determine whether a given external IP address is - // already allocated to a specific service zone. - async fn is_external_ip_already_allocated( - &self, - zone_type: &'static str, - zone_id: Uuid, - external_ip: IpAddr, - port_range: Option<(u16, u16)>, - ) -> anyhow::Result { - // localhost is used by many components in the test suite. We can't use - // the normal path because normally a given external IP must only be - // used once. Just treat localhost in the test suite as though it's - // already allocated. We do the same in is_nic_already_allocated(). - if cfg!(test) && external_ip.is_loopback() { - return Ok(true); - } - - let allocated_ips = self - .datastore - .service_lookup_external_ips(self.opctx, zone_id) - .await - .with_context(|| { - format!( - "failed to look up external IPs for {zone_type} {zone_id}" - ) - })?; - - if !allocated_ips.is_empty() { - // All the service zones that want external IP addresses only expect - // to have a single IP. This service already has (at least) one: - // make sure this list includes the one we want, or return an error. - for allocated_ip in &allocated_ips { - if allocated_ip.ip.ip() == external_ip - && port_range - .map(|(first, last)| { - allocated_ip.first_port == SqlU16(first) - && allocated_ip.last_port == SqlU16(last) - }) - .unwrap_or(true) - { - info!( - self.opctx.log, "found already-allocated external IP"; - "zone_type" => zone_type, - "zone_id" => %zone_id, - "ip" => %external_ip, - ); - return Ok(true); - } - } - - warn!( - self.opctx.log, "zone has unexpected IPs allocated"; - "zone_type" => zone_type, - "zone_id" => %zone_id, - "want_ip" => %external_ip, - "allocated_ips" => ?allocated_ips, - ); - bail!( - "zone {zone_id} already has {} non-matching IP(s) allocated", - allocated_ips.len() - ); - } - - info!( - self.opctx.log, "external IP allocation required for zone"; - "zone_type" => zone_type, - "zone_id" => %zone_id, - "ip" => %external_ip, - ); - - Ok(false) - } - - // Helper function to determine whether a given NIC is already allocated to - // a specific service zone. - async fn is_nic_already_allocated( - &self, - zone_type: &'static str, - zone_id: Uuid, - nic: &NetworkInterface, - ) -> anyhow::Result { - // See the comment in is_external_ip_already_allocated(). - if cfg!(test) && nic.ip.is_loopback() { - return Ok(true); - } - - let allocated_nics = self - .datastore - .service_list_network_interfaces(self.opctx, zone_id) - .await - .with_context(|| { - format!("failed to look up NICs for {zone_type} {zone_id}") - })?; - - if !allocated_nics.is_empty() { - // All the service zones that want NICs only expect to have a single - // one. Bail out here if this zone already has one or more allocated - // NICs but not the one we think it needs. - // - // This doesn't check the allocated NIC's subnet against our NICs, - // because that would require an extra DB lookup. We'll assume if - // these main properties are correct, the subnet is too. - for allocated_nic in &allocated_nics { - if allocated_nic.ip.ip() == nic.ip - && *allocated_nic.mac == nic.mac - && allocated_nic.slot == i16::from(nic.slot) - && allocated_nic.primary == nic.primary - { - info!( - self.opctx.log, "found already-allocated NIC"; - "zone_type" => zone_type, - "zone_id" => %zone_id, - "nic" => ?allocated_nic, - ); - return Ok(true); - } - } - - warn!( - self.opctx.log, "zone has unexpected NICs allocated"; - "zone_type" => zone_type, - "zone_id" => %zone_id, - "want_nic" => ?nic, - "allocated_nics" => ?allocated_nics, - ); - - bail!( - "zone {zone_id} already has {} non-matching NIC(s) allocated", - allocated_nics.len() - ); - } - - info!( - self.opctx.log, "NIC allocation required for zone"; - "zone_type" => zone_type, - "zone_id" => %zone_id, - "nid" => ?nic, - ); - - Ok(false) - } - - // Nexus and ExternalDns both use non-SNAT service IPs; this method is used - // to allocate external networking for both of them. - async fn ensure_external_service_ip( - &self, - zone_type: &'static str, - service_id: Uuid, - external_ip: IpAddr, - ip_name: &Name, - ) -> anyhow::Result<()> { - // Only attempt to allocate `external_ip` if it isn't already assigned - // to this zone. - // - // Checking for the existing of the external IP and then creating it - // if not found inserts a classic TOCTOU race: what if another Nexus - // is running concurrently, we both check and see that the IP is not - // allocated, then both attempt to create it? We believe this is - // okay: the loser of the race (i.e., the one whose create tries to - // commit second) will fail to allocate the IP, which will bubble - // out and prevent realization of the current blueprint. That's - // exactly what we want if two Nexuses try to realize the same - // blueprint at the same time. - if self - .is_external_ip_already_allocated( - zone_type, - service_id, - external_ip, - None, - ) - .await? - { - return Ok(()); - } - let ip_id = Uuid::new_v4(); - let description = zone_type; - self.datastore - .allocate_explicit_service_ip( - self.opctx, - ip_id, - ip_name, - description, - service_id, - external_ip, - ) - .await - .with_context(|| { - format!( - "failed to allocate IP to {zone_type} {service_id}: \ - {external_ip}" - ) - })?; - - info!( - self.opctx.log, "successfully allocated external IP"; - "zone_type" => zone_type, - "zone_id" => %service_id, - "ip" => %external_ip, - "ip_id" => %ip_id, - ); - - Ok(()) - } - - // BoundaryNtp uses a SNAT service IPs; this method is similar to - // `ensure_external_service_ip` but accounts for that. - async fn ensure_external_service_snat_ip( - &self, - zone_type: &'static str, - service_id: Uuid, - snat: &SourceNatConfig, - ) -> anyhow::Result<()> { - // Only attempt to allocate `external_ip` if it isn't already assigned - // to this zone. - // - // This is subject to the same kind of TOCTOU race as described for IP - // allocation in `ensure_external_service_ip`, and we believe it's okay - // for the same reasons as described there. - if self - .is_external_ip_already_allocated( - zone_type, - service_id, - snat.ip, - Some((snat.first_port, snat.last_port)), - ) - .await? - { - return Ok(()); - } - - let ip_id = Uuid::new_v4(); - self.datastore - .allocate_explicit_service_snat_ip( - self.opctx, - ip_id, - service_id, - snat.ip, - (snat.first_port, snat.last_port), - ) - .await - .with_context(|| { - format!( - "failed to allocate snat IP to {zone_type} {service_id}: \ - {snat:?}" - ) - })?; - - info!( - self.opctx.log, "successfully allocated external SNAT IP"; - "zone_type" => zone_type, - "zone_id" => %service_id, - "snat" => ?snat, - "ip_id" => %ip_id, - ); - - Ok(()) - } - - // All service zones with external connectivity get service vNICs. - async fn ensure_service_nic( - &self, - zone_type: &'static str, - service_id: Uuid, - nic: &NetworkInterface, - nic_subnet: &VpcSubnet, - ) -> anyhow::Result<()> { - // We don't pass `nic.kind` into the database below, but instead - // explicitly call `service_create_network_interface`. Ensure this is - // indeed a service NIC. - match &nic.kind { - NetworkInterfaceKind::Instance { .. } => { - bail!("invalid NIC kind (expected service, got instance)") - } - NetworkInterfaceKind::Service { .. } => (), - NetworkInterfaceKind::Probe { .. } => (), - } - - // Only attempt to allocate `nic` if it isn't already assigned to this - // zone. - // - // This is subject to the same kind of TOCTOU race as described for IP - // allocation in `ensure_external_service_ip`, and we believe it's okay - // for the same reasons as described there. - if self.is_nic_already_allocated(zone_type, service_id, nic).await? { - return Ok(()); - } - let nic_arg = IncompleteNetworkInterface::new_service( - nic.id, - service_id, - nic_subnet.clone(), - IdentityMetadataCreateParams { - name: nic.name.clone(), - description: format!("{zone_type} service vNIC"), - }, - nic.ip, - nic.mac, - nic.slot, - ) - .with_context(|| { - format!( - "failed to convert NIC into IncompleteNetworkInterface: {nic:?}" - ) - })?; - let created_nic = self - .datastore - .service_create_network_interface(self.opctx, nic_arg) - .await - .map_err(|err| err.into_external()) - .with_context(|| { - format!( - "failed to allocate NIC to {zone_type} {service_id}: \ - {nic:?}" - ) - })?; - - // We don't pass all the properties of `nic` into the create request - // above. Double-check that the properties the DB assigned match - // what we expect. - // - // We do not check `nic.vni`, because it's not stored in the - // database. (All services are given the constant vni - // `Vni::SERVICES_VNI`.) - if created_nic.primary != nic.primary - || created_nic.slot != i16::from(nic.slot) - { - warn!( - self.opctx.log, "unexpected property on allocated NIC"; - "db_primary" => created_nic.primary, - "expected_primary" => nic.primary, - "db_slot" => created_nic.slot, - "expected_slot" => nic.slot, - ); - - // Now what? We've allocated a NIC in the database but it's - // incorrect. Should we try to delete it? That would be best - // effort (we could fail to delete, or we could crash between - // creation and deletion). - // - // We only expect services to have one NIC, so the only way it - // should be possible to get a different primary/slot value is - // if somehow this same service got a _different_ NIC allocated - // to it in the TOCTOU race window above. That should be - // impossible with the way we generate blueprints, so we'll just - // return a scary error here and expect to never see it. - bail!( - "database cleanup required: \ - unexpected NIC ({created_nic:?}) \ - allocated for {zone_type} {service_id}" - ); - } - - info!( - self.opctx.log, "successfully allocated service vNIC"; - "zone_type" => zone_type, - "zone_id" => %service_id, - "nic" => ?nic, - ); - - Ok(()) - } - - async fn ensure_nexus_external_networking_allocated( - &self, - zone_id: Uuid, - external_ip: IpAddr, - nic: &NetworkInterface, - ) -> anyhow::Result<()> { - self.ensure_external_service_ip( - "nexus", - zone_id, - external_ip, - &Name(nic.name.clone()), - ) - .await?; - self.ensure_service_nic("nexus", zone_id, nic, &NEXUS_VPC_SUBNET) - .await?; - Ok(()) - } - - async fn ensure_external_dns_external_networking_allocated( - &self, - zone_id: Uuid, - dns_address: &str, - nic: &NetworkInterface, - ) -> anyhow::Result<()> { - let dns_address = - dns_address.parse::().with_context(|| { - format!("failed to parse ExternalDns address {dns_address}") - })?; - self.ensure_external_service_ip( - "external_dns", - zone_id, - dns_address.ip(), - &Name(nic.name.clone()), - ) - .await?; - self.ensure_service_nic("external_dns", zone_id, nic, &DNS_VPC_SUBNET) - .await?; - Ok(()) - } - - async fn ensure_boundary_ntp_external_networking_allocated( - &self, - zone_id: Uuid, - snat: &SourceNatConfig, - nic: &NetworkInterface, - ) -> anyhow::Result<()> { - self.ensure_external_service_snat_ip("ntp", zone_id, snat).await?; - self.ensure_service_nic("ntp", zone_id, nic, &NTP_VPC_SUBNET).await?; - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; - use nexus_test_utils_macros::nexus_test; - use nexus_types::deployment::OmicronZoneConfig; - use nexus_types::deployment::OmicronZoneDataset; - use nexus_types::identity::Resource; - use omicron_common::address::IpRange; - use omicron_common::address::DNS_OPTE_IPV4_SUBNET; - use omicron_common::address::NEXUS_OPTE_IPV4_SUBNET; - use omicron_common::address::NTP_OPTE_IPV4_SUBNET; - use omicron_common::address::NUM_SOURCE_NAT_PORTS; - use omicron_common::api::external::IpNet; - use omicron_common::api::external::MacAddr; - use omicron_common::api::external::Vni; - use std::net::IpAddr; - use std::net::Ipv6Addr; - use std::net::SocketAddrV6; - - type ControlPlaneTestContext = - nexus_test_utils::ControlPlaneTestContext; - - #[nexus_test] - async fn test_allocate_external_networking( - cptestctx: &ControlPlaneTestContext, - ) { - // Set up. - let nexus = &cptestctx.server.apictx().nexus; - let datastore = nexus.datastore(); - let opctx = OpContext::for_tests( - cptestctx.logctx.log.clone(), - datastore.clone(), - ); - - // Create an external IP range we can use for our services. - let external_ip_range = IpRange::try_from(( - "192.0.2.1".parse::().unwrap(), - "192.0.2.100".parse::().unwrap(), - )) - .expect("bad IP range"); - let mut external_ips = external_ip_range.iter(); - - // Add the external IP range to the services IP pool. - let (ip_pool, _) = datastore - .ip_pools_service_lookup(&opctx) - .await - .expect("failed to find service IP pool"); - datastore - .ip_pool_add_range(&opctx, &ip_pool, &external_ip_range) - .await - .expect("failed to expand service IP pool"); - - // Generate the values we care about. (Other required zone config params - // that we don't care about will be filled in below arbitrarily.) - - // Nexus: - let nexus_id = Uuid::new_v4(); - let nexus_external_ip = - external_ips.next().expect("exhausted external_ips"); - let nexus_nic = NetworkInterface { - id: Uuid::new_v4(), - kind: NetworkInterfaceKind::Service { id: nexus_id }, - name: "test-nexus".parse().expect("bad name"), - ip: NEXUS_OPTE_IPV4_SUBNET - .iter() - .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES) - .unwrap() - .into(), - mac: MacAddr::random_system(), - subnet: IpNet::from(*NEXUS_OPTE_IPV4_SUBNET), - vni: Vni::SERVICES_VNI, - primary: true, - slot: 0, - }; - - // External DNS: - let dns_id = Uuid::new_v4(); - let dns_external_ip = - external_ips.next().expect("exhausted external_ips"); - let dns_nic = NetworkInterface { - id: Uuid::new_v4(), - kind: NetworkInterfaceKind::Service { id: dns_id }, - name: "test-external-dns".parse().expect("bad name"), - ip: DNS_OPTE_IPV4_SUBNET - .iter() - .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES) - .unwrap() - .into(), - mac: MacAddr::random_system(), - subnet: IpNet::from(*DNS_OPTE_IPV4_SUBNET), - vni: Vni::SERVICES_VNI, - primary: true, - slot: 0, - }; - - // Boundary NTP: - let ntp_id = Uuid::new_v4(); - let ntp_snat = SourceNatConfig { - ip: external_ips.next().expect("exhausted external_ips"), - first_port: NUM_SOURCE_NAT_PORTS, - last_port: 2 * NUM_SOURCE_NAT_PORTS - 1, - }; - let ntp_nic = NetworkInterface { - id: Uuid::new_v4(), - kind: NetworkInterfaceKind::Service { id: ntp_id }, - name: "test-external-ntp".parse().expect("bad name"), - ip: NTP_OPTE_IPV4_SUBNET - .iter() - .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES) - .unwrap() - .into(), - mac: MacAddr::random_system(), - subnet: IpNet::from(*NTP_OPTE_IPV4_SUBNET), - vni: Vni::SERVICES_VNI, - primary: true, - slot: 0, - }; - - // Build the `zones` map needed by `ensure_zone_resources_allocated`, - // with an arbitrary sled_id. - let zones = vec![ - OmicronZoneConfig { - id: nexus_id, - underlay_address: Ipv6Addr::LOCALHOST, - zone_type: OmicronZoneType::Nexus { - internal_address: Ipv6Addr::LOCALHOST.to_string(), - external_ip: nexus_external_ip, - nic: nexus_nic.clone(), - external_tls: false, - external_dns_servers: Vec::new(), - }, - }, - OmicronZoneConfig { - id: dns_id, - underlay_address: Ipv6Addr::LOCALHOST, - zone_type: OmicronZoneType::ExternalDns { - dataset: OmicronZoneDataset { - pool_name: format!("oxp_{}", Uuid::new_v4()) - .parse() - .expect("bad name"), - }, - http_address: SocketAddrV6::new( - Ipv6Addr::LOCALHOST, - 0, - 0, - 0, - ) - .to_string(), - dns_address: SocketAddr::new(dns_external_ip, 0) - .to_string(), - nic: dns_nic.clone(), - }, - }, - OmicronZoneConfig { - id: ntp_id, - underlay_address: Ipv6Addr::LOCALHOST, - zone_type: OmicronZoneType::BoundaryNtp { - address: SocketAddr::new(dns_external_ip, 0).to_string(), - ntp_servers: Vec::new(), - dns_servers: Vec::new(), - domain: None, - nic: ntp_nic.clone(), - snat_cfg: ntp_snat, - }, - }, - ]; - - // Initialize resource allocation: this should succeed and create all - // the relevant db records. - ensure_zone_resources_allocated(&opctx, datastore, zones.iter()) - .await - .with_context(|| format!("{zones:#?}")) - .unwrap(); - - // Check that the external IP records were created. - let db_nexus_ips = datastore - .service_lookup_external_ips(&opctx, nexus_id) - .await - .expect("failed to get external IPs"); - assert_eq!(db_nexus_ips.len(), 1); - assert!(db_nexus_ips[0].is_service); - assert_eq!(db_nexus_ips[0].parent_id, Some(nexus_id)); - assert_eq!(db_nexus_ips[0].ip, nexus_external_ip.into()); - assert_eq!(db_nexus_ips[0].first_port, SqlU16(0)); - assert_eq!(db_nexus_ips[0].last_port, SqlU16(65535)); - - let db_dns_ips = datastore - .service_lookup_external_ips(&opctx, dns_id) - .await - .expect("failed to get external IPs"); - assert_eq!(db_dns_ips.len(), 1); - assert!(db_dns_ips[0].is_service); - assert_eq!(db_dns_ips[0].parent_id, Some(dns_id)); - assert_eq!(db_dns_ips[0].ip, dns_external_ip.into()); - assert_eq!(db_dns_ips[0].first_port, SqlU16(0)); - assert_eq!(db_dns_ips[0].last_port, SqlU16(65535)); - - let db_ntp_ips = datastore - .service_lookup_external_ips(&opctx, ntp_id) - .await - .expect("failed to get external IPs"); - assert_eq!(db_ntp_ips.len(), 1); - assert!(db_ntp_ips[0].is_service); - assert_eq!(db_ntp_ips[0].parent_id, Some(ntp_id)); - assert_eq!(db_ntp_ips[0].ip, ntp_snat.ip.into()); - assert_eq!(db_ntp_ips[0].first_port, SqlU16(ntp_snat.first_port)); - assert_eq!(db_ntp_ips[0].last_port, SqlU16(ntp_snat.last_port)); - - // Check that the NIC records were created. - let db_nexus_nics = datastore - .service_list_network_interfaces(&opctx, nexus_id) - .await - .expect("failed to get NICs"); - assert_eq!(db_nexus_nics.len(), 1); - assert_eq!(db_nexus_nics[0].id(), nexus_nic.id); - assert_eq!(db_nexus_nics[0].service_id, nexus_id); - assert_eq!(db_nexus_nics[0].vpc_id, NEXUS_VPC_SUBNET.vpc_id); - assert_eq!(db_nexus_nics[0].subnet_id, NEXUS_VPC_SUBNET.id()); - assert_eq!(*db_nexus_nics[0].mac, nexus_nic.mac); - assert_eq!(db_nexus_nics[0].ip, nexus_nic.ip.into()); - assert_eq!(db_nexus_nics[0].slot, i16::from(nexus_nic.slot)); - assert_eq!(db_nexus_nics[0].primary, nexus_nic.primary); - - let db_dns_nics = datastore - .service_list_network_interfaces(&opctx, dns_id) - .await - .expect("failed to get NICs"); - assert_eq!(db_dns_nics.len(), 1); - assert_eq!(db_dns_nics[0].id(), dns_nic.id); - assert_eq!(db_dns_nics[0].service_id, dns_id); - assert_eq!(db_dns_nics[0].vpc_id, DNS_VPC_SUBNET.vpc_id); - assert_eq!(db_dns_nics[0].subnet_id, DNS_VPC_SUBNET.id()); - assert_eq!(*db_dns_nics[0].mac, dns_nic.mac); - assert_eq!(db_dns_nics[0].ip, dns_nic.ip.into()); - assert_eq!(db_dns_nics[0].slot, i16::from(dns_nic.slot)); - assert_eq!(db_dns_nics[0].primary, dns_nic.primary); - - let db_ntp_nics = datastore - .service_list_network_interfaces(&opctx, ntp_id) - .await - .expect("failed to get NICs"); - assert_eq!(db_ntp_nics.len(), 1); - assert_eq!(db_ntp_nics[0].id(), ntp_nic.id); - assert_eq!(db_ntp_nics[0].service_id, ntp_id); - assert_eq!(db_ntp_nics[0].vpc_id, NTP_VPC_SUBNET.vpc_id); - assert_eq!(db_ntp_nics[0].subnet_id, NTP_VPC_SUBNET.id()); - assert_eq!(*db_ntp_nics[0].mac, ntp_nic.mac); - assert_eq!(db_ntp_nics[0].ip, ntp_nic.ip.into()); - assert_eq!(db_ntp_nics[0].slot, i16::from(ntp_nic.slot)); - assert_eq!(db_ntp_nics[0].primary, ntp_nic.primary); - - // We should be able to run the function again with the same inputs, and - // it should succeed without inserting any new records. - ensure_zone_resources_allocated(&opctx, datastore, zones.iter()) - .await - .with_context(|| format!("{zones:#?}")) - .unwrap(); - assert_eq!( - db_nexus_ips, - datastore - .service_lookup_external_ips(&opctx, nexus_id) - .await - .expect("failed to get external IPs") - ); - assert_eq!( - db_dns_ips, - datastore - .service_lookup_external_ips(&opctx, dns_id) - .await - .expect("failed to get external IPs") - ); - assert_eq!( - db_ntp_ips, - datastore - .service_lookup_external_ips(&opctx, ntp_id) - .await - .expect("failed to get external IPs") - ); - assert_eq!( - db_nexus_nics, - datastore - .service_list_network_interfaces(&opctx, nexus_id) - .await - .expect("failed to get NICs") - ); - assert_eq!( - db_dns_nics, - datastore - .service_list_network_interfaces(&opctx, dns_id) - .await - .expect("failed to get NICs") - ); - assert_eq!( - db_ntp_nics, - datastore - .service_list_network_interfaces(&opctx, ntp_id) - .await - .expect("failed to get NICs") - ); - - // Now that we've tested the happy path, try some requests that ought to - // fail because the request includes an external IP that doesn't match - // the already-allocated external IPs from above. - let bogus_ip = external_ips.next().expect("exhausted external_ips"); - for mutate_zones_fn in [ - // non-matching IP on Nexus - (&|zones: &mut [OmicronZoneConfig]| { - for zone in zones { - if let OmicronZoneType::Nexus { - ref mut external_ip, .. - } = &mut zone.zone_type - { - *external_ip = bogus_ip; - return format!( - "zone {} already has 1 non-matching IP", - zone.id - ); - } - } - - panic!("didn't find expected zone"); - }) as &dyn Fn(&mut [OmicronZoneConfig]) -> String, - // non-matching IP on External DNS - &|zones| { - for zone in zones { - if let OmicronZoneType::ExternalDns { - ref mut dns_address, - .. - } = &mut zone.zone_type - { - *dns_address = SocketAddr::new(bogus_ip, 0).to_string(); - return format!( - "zone {} already has 1 non-matching IP", - zone.id - ); - } - } - panic!("didn't find expected zone"); - }, - // non-matching SNAT port range on Boundary NTP - &|zones| { - for zone in zones { - if let OmicronZoneType::BoundaryNtp { - ref mut snat_cfg, - .. - } = &mut zone.zone_type - { - snat_cfg.first_port += NUM_SOURCE_NAT_PORTS; - snat_cfg.last_port += NUM_SOURCE_NAT_PORTS; - return format!( - "zone {} already has 1 non-matching IP", - zone.id - ); - } - } - panic!("didn't find expected zone"); - }, - ] { - // Run `mutate_zones_fn` on our config... - let (mutated_zones, expected_error) = { - let mut zones = zones.clone(); - let expected_error = mutate_zones_fn(&mut zones); - (zones, expected_error) - }; - - // and check that we get the error we expect. - let err = ensure_zone_resources_allocated( - &opctx, - datastore, - mutated_zones.iter(), - ) - .await - .expect_err("unexpected success"); - assert!( - err.to_string().contains(&expected_error), - "expected {expected_error:?}, got {err:#}" - ); - } - - // Also try some requests that ought to fail because the request - // includes a NIC that doesn't match the already-allocated NICs from - // above. - // - // All three zone types have a `nic` property, so here our mutating - // function only modifies that, and the body of our loop tries it on all - // three to ensure we get the errors we expect no matter the zone type. - for mutate_nic_fn in [ - // switch kind from Service to Instance - (&|_: Uuid, nic: &mut NetworkInterface| { - match &nic.kind { - NetworkInterfaceKind::Instance { .. } => { - panic!( - "invalid NIC kind (expected service, got instance)" - ) - } - NetworkInterfaceKind::Probe { .. } => { - panic!( - "invalid NIC kind (expected service, got instance)" - ) - } - NetworkInterfaceKind::Service { id } => { - let id = *id; - nic.kind = NetworkInterfaceKind::Instance { id }; - } - } - "invalid NIC kind".to_string() - }) as &dyn Fn(Uuid, &mut NetworkInterface) -> String, - // non-matching IP - &|zone_id, nic| { - nic.ip = bogus_ip; - format!("zone {zone_id} already has 1 non-matching NIC") - }, - ] { - // Try this NIC mutation on Nexus... - let mut mutated_zones = zones.clone(); - for zone in &mut mutated_zones { - if let OmicronZoneType::Nexus { ref mut nic, .. } = - &mut zone.zone_type - { - let expected_error = mutate_nic_fn(zone.id, nic); - - let err = ensure_zone_resources_allocated( - &opctx, - datastore, - mutated_zones.iter(), - ) - .await - .expect_err("unexpected success"); - - assert!( - err.to_string().contains(&expected_error), - "expected {expected_error:?}, got {err:#}" - ); - - break; - } - } - - // ... and again on ExternalDns - let mut mutated_zones = zones.clone(); - for zone in &mut mutated_zones { - if let OmicronZoneType::ExternalDns { ref mut nic, .. } = - &mut zone.zone_type - { - let expected_error = mutate_nic_fn(zone.id, nic); - - let err = ensure_zone_resources_allocated( - &opctx, - datastore, - mutated_zones.iter(), - ) - .await - .expect_err("unexpected success"); - - assert!( - err.to_string().contains(&expected_error), - "expected {expected_error:?}, got {err:#}" - ); - - break; - } - } - - // ... and again on BoundaryNtp - let mut mutated_zones = zones.clone(); - for zone in &mut mutated_zones { - if let OmicronZoneType::BoundaryNtp { ref mut nic, .. } = - &mut zone.zone_type - { - let expected_error = mutate_nic_fn(zone.id, nic); - - let err = ensure_zone_resources_allocated( - &opctx, - datastore, - mutated_zones.iter(), - ) - .await - .expect_err("unexpected success"); - - assert!( - err.to_string().contains(&expected_error), - "expected {expected_error:?}, got {err:#}" - ); - - break; - } - } - } - } -} diff --git a/nexus/reconfigurator/execution/src/sled_state.rs b/nexus/reconfigurator/execution/src/sled_state.rs new file mode 100644 index 0000000000..fafc1c2e44 --- /dev/null +++ b/nexus/reconfigurator/execution/src/sled_state.rs @@ -0,0 +1,146 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Updates sled states required by a given blueprint + +use anyhow::Context; +use nexus_db_model::SledState; +use nexus_db_queries::authz::Action; +use nexus_db_queries::context::OpContext; +use nexus_db_queries::db::datastore::TransitionError; +use nexus_db_queries::db::lookup::LookupPath; +use nexus_db_queries::db::DataStore; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::SledUuid; + +pub(crate) async fn decommission_sleds( + opctx: &OpContext, + datastore: &DataStore, + sled_ids_to_decommission: impl Iterator, +) -> Result<(), Vec> { + let mut errors = Vec::new(); + + for sled_id in sled_ids_to_decommission { + if let Err(err) = decommission_one_sled(opctx, datastore, sled_id).await + { + errors.push(err); + } + } + + if errors.is_empty() { + Ok(()) + } else { + Err(errors) + } +} + +async fn decommission_one_sled( + opctx: &OpContext, + datastore: &DataStore, + sled_id: SledUuid, +) -> anyhow::Result<()> { + let (authz_sled,) = LookupPath::new(opctx, datastore) + .sled_id(sled_id.into_untyped_uuid()) + .lookup_for(Action::Modify) + .await + .with_context(|| { + format!("failed to look up sled {sled_id} for modification") + })?; + match datastore.sled_set_state_to_decommissioned(opctx, &authz_sled).await { + Ok(_) => Ok(()), + // `sled_set_state_to_decommissioned` is not idempotent. If we're racing + // another Nexus (or we're repeating realization of a blueprint we've + // already realized), this sled may already be decommissioned; that's + // fine. + Err(TransitionError::InvalidTransition { current, .. }) + if current.state() == SledState::Decommissioned => + { + Ok(()) + } + Err(err) => Err(anyhow::Error::new(err) + .context(format!("failed to decommission sled {sled_id}"))), + } +} + +#[cfg(test)] +mod tests { + use super::*; + use nexus_test_utils_macros::nexus_test; + use nexus_types::deployment::SledFilter; + use nexus_types::identity::Asset; + + type ControlPlaneTestContext = + nexus_test_utils::ControlPlaneTestContext; + + async fn list_all_commissioned_sled_ids( + opctx: &OpContext, + datastore: &DataStore, + ) -> Vec { + datastore + .sled_list_all_batched(&opctx, SledFilter::Commissioned) + .await + .expect("listing sleds") + .into_iter() + .map(|sled| SledUuid::from_untyped_uuid(sled.id())) + .collect() + } + + #[nexus_test] + async fn test_decommission_is_idempotent( + cptestctx: &ControlPlaneTestContext, + ) { + let nexus = &cptestctx.server.server_context().nexus; + let datastore = nexus.datastore(); + let opctx = OpContext::for_tests( + cptestctx.logctx.log.clone(), + datastore.clone(), + ); + + let mut commissioned_sled_ids = + list_all_commissioned_sled_ids(&opctx, datastore).await; + + // Pick a sled to decommission. + let decommissioned_sled_id = + commissioned_sled_ids.pop().expect("at least one sled"); + + // Expunge the sled (required prior to decommissioning). + let (authz_sled,) = LookupPath::new(&opctx, datastore) + .sled_id(decommissioned_sled_id.into_untyped_uuid()) + .lookup_for(Action::Modify) + .await + .expect("lookup authz_sled"); + datastore + .sled_set_policy_to_expunged(&opctx, &authz_sled) + .await + .expect("expunged sled"); + + // Decommission the sled. + decommission_sleds( + &opctx, + datastore, + std::iter::once(decommissioned_sled_id), + ) + .await + .expect("decommissioned sled"); + + // Ensure the sled was marked decommissioned in the db. + assert_eq!( + commissioned_sled_ids, + list_all_commissioned_sled_ids(&opctx, datastore).await + ); + + // Try to decommission the sled again; this should be fine. + decommission_sleds( + &opctx, + datastore, + std::iter::once(decommissioned_sled_id), + ) + .await + .expect("decommissioned sled"); + assert_eq!( + commissioned_sled_ids, + list_all_commissioned_sled_ids(&opctx, datastore).await + ); + } +} diff --git a/nexus/reconfigurator/planning/Cargo.toml b/nexus/reconfigurator/planning/Cargo.toml index 3c4ba12ee4..989ad6aa32 100644 --- a/nexus/reconfigurator/planning/Cargo.toml +++ b/nexus/reconfigurator/planning/Cargo.toml @@ -3,27 +3,35 @@ name = "nexus-reconfigurator-planning" version = "0.1.0" edition = "2021" +[lints] +workspace = true + [dependencies] anyhow.workspace = true chrono.workspace = true +debug-ignore.workspace = true gateway-client.workspace = true indexmap.workspace = true internal-dns.workspace = true ipnet.workspace = true -ipnetwork.workspace = true nexus-config.workspace = true nexus-inventory.workspace = true nexus-types.workspace = true omicron-common.workspace = true +omicron-uuid-kinds.workspace = true +oxnet.workspace = true rand.workspace = true -rand_seeder.workspace = true sled-agent-client.workspace = true slog.workspace = true thiserror.workspace = true +typed-rng.workspace = true uuid.workspace = true omicron-workspace-hack.workspace = true [dev-dependencies] expectorate.workspace = true +maplit.workspace = true omicron-test-utils.workspace = true +proptest.workspace = true +test-strategy.workspace = true diff --git a/nexus/reconfigurator/planning/proptest-regressions/planner/omicron_zone_placement.txt b/nexus/reconfigurator/planning/proptest-regressions/planner/omicron_zone_placement.txt new file mode 100644 index 0000000000..bb2ad481bc --- /dev/null +++ b/nexus/reconfigurator/planning/proptest-regressions/planner/omicron_zone_placement.txt @@ -0,0 +1,7 @@ +# Seeds for failure cases proptest has generated in the past. It is +# automatically read and these particular cases re-run before any +# novel cases are generated. +# +# It is recommended to check this file in to source control so that +# everyone who runs the test benefits from these saved cases. +cc 72b902d1405681df2dd46efc097da6840ff1234dc9d0d7c0ecf07bed0b0e7d8d # shrinks to input = _TestPlaceOmicronZonesArgs { input: ArbitraryTestInput { existing_sleds: {[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]: ExistingSled { zones: ZonesToPlace { zones: [] }, waiting_for_ntp: false, num_disks: 1 }}, zones_to_place: ZonesToPlace { zones: [Nexus] } } } diff --git a/nexus/reconfigurator/planning/src/blueprint_builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder.rs deleted file mode 100644 index 0b0d422916..0000000000 --- a/nexus/reconfigurator/planning/src/blueprint_builder.rs +++ /dev/null @@ -1,1468 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -//! Low-level facility for generating Blueprints - -use crate::ip_allocator::IpAllocator; -use anyhow::anyhow; -use anyhow::bail; -use internal_dns::config::Host; -use internal_dns::config::ZoneVariant; -use ipnet::IpAdd; -use nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; -use nexus_inventory::now_db_precision; -use nexus_types::deployment::Blueprint; -use nexus_types::deployment::BlueprintZoneConfig; -use nexus_types::deployment::BlueprintZoneDisposition; -use nexus_types::deployment::BlueprintZonesConfig; -use nexus_types::deployment::OmicronZoneConfig; -use nexus_types::deployment::OmicronZoneDataset; -use nexus_types::deployment::OmicronZoneType; -use nexus_types::deployment::Policy; -use nexus_types::deployment::SledResources; -use nexus_types::deployment::ZpoolName; -use nexus_types::inventory::Collection; -use omicron_common::address::get_internal_dns_server_addresses; -use omicron_common::address::get_sled_address; -use omicron_common::address::get_switch_zone_address; -use omicron_common::address::CP_SERVICES_RESERVED_ADDRESSES; -use omicron_common::address::NEXUS_OPTE_IPV4_SUBNET; -use omicron_common::address::NEXUS_OPTE_IPV6_SUBNET; -use omicron_common::address::NTP_PORT; -use omicron_common::address::SLED_RESERVED_ADDRESSES; -use omicron_common::api::external::Generation; -use omicron_common::api::external::IpNet; -use omicron_common::api::external::MacAddr; -use omicron_common::api::external::Vni; -use omicron_common::api::internal::shared::NetworkInterface; -use omicron_common::api::internal::shared::NetworkInterfaceKind; -use rand::rngs::StdRng; -use rand::RngCore; -use rand::SeedableRng; -use slog::o; -use slog::Logger; -use std::collections::BTreeMap; -use std::collections::HashSet; -use std::hash::Hash; -use std::net::IpAddr; -use std::net::Ipv4Addr; -use std::net::Ipv6Addr; -use std::net::SocketAddrV6; -use thiserror::Error; -use uuid::Uuid; - -/// Errors encountered while assembling blueprints -#[derive(Debug, Error)] -pub enum Error { - #[error("sled {sled_id}: ran out of available addresses for sled")] - OutOfAddresses { sled_id: Uuid }, - #[error("no Nexus zones exist in parent blueprint")] - NoNexusZonesInParentBlueprint, - #[error("no external service IP addresses are available")] - NoExternalServiceIpAvailable, - #[error("no system MAC addresses are available")] - NoSystemMacAddressAvailable, - #[error("exhausted available Nexus IP addresses")] - ExhaustedNexusIps, - #[error("programming error in planner")] - Planner(#[from] anyhow::Error), -} - -/// Describes whether an idempotent "ensure" operation resulted in action taken -/// or no action was necessary -#[derive(Debug, Clone, Copy, Eq, PartialEq)] -pub enum Ensure { - /// action was taken - Added, - /// no action was necessary - NotNeeded, -} - -/// Describes whether an idempotent "ensure" operation resulted in multiple -/// actions taken or no action was necessary -#[derive(Debug, Clone, Copy, Eq, PartialEq)] -pub enum EnsureMultiple { - /// action was taken, and multiple items were added - Added(usize), - /// no action was necessary - NotNeeded, -} - -/// Helper for assembling a blueprint -/// -/// There are two basic ways to assemble a new blueprint: -/// -/// 1. Build one directly from a collection. Such blueprints have no parent -/// blueprint. They are not customizable. Use -/// [`BlueprintBuilder::build_initial_from_collection`] for this. This would -/// generally only be used once in the lifetime of a rack, to assemble the -/// first blueprint. -/// -/// 2. Build one _from_ another blueprint, called the "parent", making changes -/// as desired. Use [`BlueprintBuilder::new_based_on`] for this. Once the -/// new blueprint is created, there is no dependency on the parent one. -/// However, the new blueprint can only be made the system's target if its -/// parent is the current target. -pub struct BlueprintBuilder<'a> { - #[allow(dead_code)] - log: Logger, - - /// previous blueprint, on which this one will be based - parent_blueprint: &'a Blueprint, - internal_dns_version: Generation, - external_dns_version: Generation, - - // These fields are used to allocate resources from sleds. - policy: &'a Policy, - sled_ip_allocators: BTreeMap, - - // These fields will become part of the final blueprint. See the - // corresponding fields in `Blueprint`. - zones: BlueprintZonesBuilder<'a>, - creator: String, - comments: Vec, - - // These fields mirror how RSS chooses addresses for zone NICs. - nexus_v4_ips: Box + Send>, - nexus_v6_ips: Box + Send>, - - // Iterator of available external IPs for service zones - available_external_ips: Box + Send + 'a>, - - // Iterator of available MAC addresses in the system address range - available_system_macs: Box>, - - // Random number generator for new UUIDs - rng: BlueprintBuilderRng, -} - -impl<'a> BlueprintBuilder<'a> { - /// Directly construct a `Blueprint` from the contents of a particular - /// collection (representing no changes from the collection state) - pub fn build_initial_from_collection( - collection: &Collection, - internal_dns_version: Generation, - external_dns_version: Generation, - policy: &Policy, - creator: &str, - ) -> Result { - Self::build_initial_impl( - collection, - internal_dns_version, - external_dns_version, - policy, - creator, - BlueprintBuilderRng::new(), - ) - } - - /// A version of [`Self::build_initial_from_collection`] that allows the - /// blueprint ID to be generated from a random seed. - pub fn build_initial_from_collection_seeded( - collection: &Collection, - internal_dns_version: Generation, - external_dns_version: Generation, - policy: &Policy, - creator: &str, - seed: H, - ) -> Result { - let mut rng = BlueprintBuilderRng::new(); - rng.set_seed(seed); - Self::build_initial_impl( - collection, - internal_dns_version, - external_dns_version, - policy, - creator, - rng, - ) - } - - fn build_initial_impl( - collection: &Collection, - internal_dns_version: Generation, - external_dns_version: Generation, - policy: &Policy, - creator: &str, - mut rng: BlueprintBuilderRng, - ) -> Result { - let blueprint_zones = policy - .sleds - .keys() - .map(|sled_id| { - let zones = collection - .omicron_zones - .get(sled_id) - .map(|z| &z.zones) - .ok_or_else(|| { - // We should not find a sled that's supposed to be - // in-service but is not part of the inventory. It's - // not that that can't ever happen. This could happen - // when a sled is first being added to the system. Of - // course it could also happen if this sled agent failed - // our inventory request. But this is the initial - // blueprint (so this shouldn't be the "add sled" case) - // and we want to get it right (so we don't want to - // leave out sleds whose sled agent happened to be down - // when we tried to do this). The operator (or, more - // likely, a support person) will have to sort out - // what's going on if this happens. - Error::Planner(anyhow!( - "building initial blueprint: sled {:?} is \ - supposed to be in service but has no zones \ - in inventory", - sled_id - )) - })?; - - Ok(( - *sled_id, - BlueprintZonesConfig::initial_from_collection(&zones), - )) - }) - .collect::>()?; - Ok(Blueprint { - id: rng.blueprint_rng.next_uuid(), - blueprint_zones, - parent_blueprint_id: None, - internal_dns_version, - external_dns_version, - time_created: now_db_precision(), - creator: creator.to_owned(), - comment: format!("from collection {}", collection.id), - }) - } - - /// Construct a new `BlueprintBuilder` based on a previous blueprint, - /// starting with no changes from that state - pub fn new_based_on( - log: &Logger, - parent_blueprint: &'a Blueprint, - internal_dns_version: Generation, - external_dns_version: Generation, - policy: &'a Policy, - creator: &str, - ) -> anyhow::Result> { - let log = log.new(o!( - "component" => "BlueprintBuilder", - "parent_id" => parent_blueprint.id.to_string(), - )); - - // Scan through the parent blueprint and build several sets of "used - // resources". When adding new control plane zones to a sled, we may - // need to allocate new resources to that zone. However, allocation at - // this point is entirely optimistic and theoretical: our caller may - // discard the blueprint we create without ever making it the new - // target, or it might be an arbitrarily long time before it becomes the - // target. We need to be able to make allocation decisions that we - // expect the blueprint executor to be able to realize successfully if - // and when we become the target, but we cannot _actually_ perform - // resource allocation. - // - // To do this, we look at our parent blueprint's used resources, and - // then choose new resources that aren't already in use (if possible; if - // we need to allocate a new resource and the parent blueprint appears - // to be using all the resources of that kind, our blueprint generation - // will fail). - // - // For example, RSS assigns Nexus NIC IPs by stepping through a list of - // addresses based on `NEXUS_OPTE_IPVx_SUBNET` (as in the iterators - // below). We use the same list of addresses, but additionally need to - // filter out the existing IPs for any Nexus instances that already - // exist. - // - // Note that by building these iterators up front based on - // `parent_blueprint`, we cannot reuse resources in a case where we - // remove a zone that used a resource and then add another zone that - // wants the same kind of resource. We don't support zone removal yet, - // but expect this to be okay: we don't anticipate removal and addition - // to frequently be combined into the exact same blueprint, particularly - // in a way that expects the addition to reuse resources from the - // removal; we won't want to attempt to reuse resources from a zone - // until we know it's been fully removed. - let mut existing_nexus_v4_ips: HashSet = HashSet::new(); - let mut existing_nexus_v6_ips: HashSet = HashSet::new(); - let mut used_external_ips: HashSet = HashSet::new(); - let mut used_macs: HashSet = HashSet::new(); - - for (_, z) in parent_blueprint.all_omicron_zones() { - let zone_type = &z.zone_type; - if let OmicronZoneType::Nexus { nic, .. } = zone_type { - match nic.ip { - IpAddr::V4(ip) => { - if !existing_nexus_v4_ips.insert(ip) { - bail!("duplicate Nexus NIC IP: {ip}"); - } - } - IpAddr::V6(ip) => { - if !existing_nexus_v6_ips.insert(ip) { - bail!("duplicate Nexus NIC IP: {ip}"); - } - } - } - } - - if let Some(external_ip) = zone_type.external_ip()? { - // For the test suite, ignore localhost. It gets reused many - // times and that's okay. We don't expect to see localhost - // outside the test suite. - if !external_ip.is_loopback() - && !used_external_ips.insert(external_ip) - { - bail!("duplicate external IP: {external_ip}"); - } - } - if let Some(nic) = zone_type.service_vnic() { - if !used_macs.insert(nic.mac) { - bail!("duplicate service vNIC MAC: {}", nic.mac); - } - } - } - - // TODO-performance Building these iterators as "walk through the list - // and skip anything we've used already" is fine as long as we're - // talking about a small number of resources (e.g., single-digit number - // of Nexus instances), but wouldn't be ideal if we have many resources - // we need to skip. We could do something smarter here based on the sets - // of used resources we built above if needed. - let nexus_v4_ips = Box::new( - NEXUS_OPTE_IPV4_SUBNET - .0 - .iter() - .skip(NUM_INITIAL_RESERVED_IP_ADDRESSES) - .filter(move |ip| !existing_nexus_v4_ips.contains(ip)), - ); - let nexus_v6_ips = Box::new( - NEXUS_OPTE_IPV6_SUBNET - .0 - .iter() - .skip(NUM_INITIAL_RESERVED_IP_ADDRESSES) - .filter(move |ip| !existing_nexus_v6_ips.contains(ip)), - ); - let available_external_ips = Box::new( - policy - .service_ip_pool_ranges - .iter() - .flat_map(|r| r.iter()) - .filter(move |ip| !used_external_ips.contains(ip)), - ); - let available_system_macs = Box::new( - MacAddr::iter_system().filter(move |mac| !used_macs.contains(mac)), - ); - - Ok(BlueprintBuilder { - log, - parent_blueprint, - internal_dns_version, - external_dns_version, - policy, - sled_ip_allocators: BTreeMap::new(), - zones: BlueprintZonesBuilder::new(parent_blueprint), - creator: creator.to_owned(), - comments: Vec::new(), - nexus_v4_ips, - nexus_v6_ips, - available_external_ips, - available_system_macs, - rng: BlueprintBuilderRng::new(), - }) - } - - /// Assemble a final [`Blueprint`] based on the contents of the builder - pub fn build(mut self) -> Blueprint { - // Collect the Omicron zones config for each in-service sled. - let blueprint_zones = - self.zones.into_zones_map(self.policy.sleds.keys().copied()); - Blueprint { - id: self.rng.blueprint_rng.next_uuid(), - blueprint_zones, - parent_blueprint_id: Some(self.parent_blueprint.id), - internal_dns_version: self.internal_dns_version, - external_dns_version: self.external_dns_version, - time_created: now_db_precision(), - creator: self.creator, - comment: self.comments.join(", "), - } - } - - /// Within tests, set a seeded RNG for deterministic results. - /// - /// This will ensure that tests that use this builder will produce the same - /// results each time they are run. - pub fn set_rng_seed(&mut self, seed: H) -> &mut Self { - self.rng.set_seed(seed); - self - } - - /// Sets the blueprints "comment" - /// - /// This is a short human-readable string summarizing the changes reflected - /// in the blueprint. This is only intended for debugging. - pub fn comment(&mut self, comment: S) - where - String: From, - { - self.comments.push(String::from(comment)); - } - - pub fn sled_ensure_zone_ntp( - &mut self, - sled_id: Uuid, - ) -> Result { - // If there's already an NTP zone on this sled, do nothing. - let has_ntp = self - .zones - .current_sled_zones(sled_id) - .any(|z| z.config.zone_type.is_ntp()); - if has_ntp { - return Ok(Ensure::NotNeeded); - } - - let sled_info = self.sled_resources(sled_id)?; - let sled_subnet = sled_info.subnet; - let ip = self.sled_alloc_ip(sled_id)?; - let ntp_address = SocketAddrV6::new(ip, NTP_PORT, 0, 0); - - // Construct the list of internal DNS servers. - // - // It'd be tempting to get this list from the other internal NTP - // servers but there may not be any of those. We could also - // construct this list manually from the set of internal DNS servers - // actually deployed. Instead, we take the same approach as RSS: - // these are at known, fixed addresses relative to the AZ subnet - // (which itself is a known-prefix parent subnet of the sled subnet). - let dns_servers = - get_internal_dns_server_addresses(sled_subnet.net().network()); - - // The list of boundary NTP servers is not necessarily stored - // anywhere (unless there happens to be another internal NTP zone - // lying around). Recompute it based on what boundary servers - // currently exist. - let ntp_servers = self - .parent_blueprint - .all_omicron_zones() - .filter_map(|(_, z)| { - if matches!(z.zone_type, OmicronZoneType::BoundaryNtp { .. }) { - Some(Host::for_zone(z.id, ZoneVariant::Other).fqdn()) - } else { - None - } - }) - .collect(); - - let zone = OmicronZoneConfig { - id: self.rng.zone_rng.next_uuid(), - underlay_address: ip, - zone_type: OmicronZoneType::InternalNtp { - address: ntp_address.to_string(), - ntp_servers, - dns_servers, - domain: None, - }, - }; - let zone = BlueprintZoneConfig { - config: zone, - disposition: BlueprintZoneDisposition::InService, - }; - - self.sled_add_zone(sled_id, zone)?; - Ok(Ensure::Added) - } - - pub fn sled_ensure_zone_crucible( - &mut self, - sled_id: Uuid, - pool_name: ZpoolName, - ) -> Result { - // If this sled already has a Crucible zone on this pool, do nothing. - let has_crucible_on_this_pool = - self.zones.current_sled_zones(sled_id).any(|z| { - matches!( - &z.config.zone_type, - OmicronZoneType::Crucible { dataset, .. } - if dataset.pool_name == pool_name - ) - }); - if has_crucible_on_this_pool { - return Ok(Ensure::NotNeeded); - } - - let sled_info = self.sled_resources(sled_id)?; - if !sled_info.zpools.contains(&pool_name) { - return Err(Error::Planner(anyhow!( - "adding crucible zone for sled {:?}: \ - attempted to use unknown zpool {:?}", - sled_id, - pool_name - ))); - } - - let ip = self.sled_alloc_ip(sled_id)?; - let port = omicron_common::address::CRUCIBLE_PORT; - let address = SocketAddrV6::new(ip, port, 0, 0).to_string(); - let zone = OmicronZoneConfig { - id: self.rng.zone_rng.next_uuid(), - underlay_address: ip, - zone_type: OmicronZoneType::Crucible { - address, - dataset: OmicronZoneDataset { pool_name }, - }, - }; - - let zone = BlueprintZoneConfig { - config: zone, - disposition: BlueprintZoneDisposition::InService, - }; - self.sled_add_zone(sled_id, zone)?; - Ok(Ensure::Added) - } - - /// Return the number of Nexus zones that would be configured to run on the - /// given sled if this builder generated a blueprint - /// - /// This value may change before a blueprint is actually generated if - /// further changes are made to the builder. - pub fn sled_num_nexus_zones(&self, sled_id: Uuid) -> usize { - self.zones - .current_sled_zones(sled_id) - .filter(|z| z.config.zone_type.is_nexus()) - .count() - } - - pub fn sled_ensure_zone_multiple_nexus( - &mut self, - sled_id: Uuid, - desired_zone_count: usize, - ) -> Result { - // Whether Nexus should use TLS and what the external DNS servers it - // should use are currently provided at rack-setup time, and should be - // consistent across all Nexus instances. We'll assume we can copy them - // from any other Nexus zone in our parent blueprint. - // - // TODO-correctness Once these properties can be changed by a rack - // operator, this will need more work. At a minimum, if such a change - // goes through the blueprint system (which seems likely), we'll need to - // check that we're if this builder is being used to make such a change, - // that change is also reflected here in a new zone. Perhaps these - // settings should be part of `Policy` instead? - let (external_tls, external_dns_servers) = self - .parent_blueprint - .all_omicron_zones() - .find_map(|(_, z)| match &z.zone_type { - OmicronZoneType::Nexus { - external_tls, - external_dns_servers, - .. - } => Some((*external_tls, external_dns_servers.clone())), - _ => None, - }) - .ok_or(Error::NoNexusZonesInParentBlueprint)?; - self.sled_ensure_zone_multiple_nexus_with_config( - sled_id, - desired_zone_count, - external_tls, - external_dns_servers, - ) - } - - pub fn sled_ensure_zone_multiple_nexus_with_config( - &mut self, - sled_id: Uuid, - desired_zone_count: usize, - external_tls: bool, - external_dns_servers: Vec, - ) -> Result { - // How many Nexus zones do we need to add? - let nexus_count = self.sled_num_nexus_zones(sled_id); - let num_nexus_to_add = match desired_zone_count.checked_sub(nexus_count) - { - Some(0) => return Ok(EnsureMultiple::NotNeeded), - Some(n) => n, - None => { - return Err(Error::Planner(anyhow!( - "removing a Nexus zone not yet supported \ - (sled {sled_id} has {nexus_count}; \ - planner wants {desired_zone_count})" - ))); - } - }; - - for _ in 0..num_nexus_to_add { - let nexus_id = self.rng.zone_rng.next_uuid(); - let external_ip = self - .available_external_ips - .next() - .ok_or(Error::NoExternalServiceIpAvailable)?; - - let nic = { - let (ip, subnet) = match external_ip { - IpAddr::V4(_) => ( - self.nexus_v4_ips - .next() - .ok_or(Error::ExhaustedNexusIps)? - .into(), - IpNet::from(*NEXUS_OPTE_IPV4_SUBNET), - ), - IpAddr::V6(_) => ( - self.nexus_v6_ips - .next() - .ok_or(Error::ExhaustedNexusIps)? - .into(), - IpNet::from(*NEXUS_OPTE_IPV6_SUBNET), - ), - }; - let mac = self - .available_system_macs - .next() - .ok_or(Error::NoSystemMacAddressAvailable)?; - NetworkInterface { - id: self.rng.network_interface_rng.next_uuid(), - kind: NetworkInterfaceKind::Service { id: nexus_id }, - name: format!("nexus-{nexus_id}").parse().unwrap(), - ip, - mac, - subnet, - vni: Vni::SERVICES_VNI, - primary: true, - slot: 0, - } - }; - - let ip = self.sled_alloc_ip(sled_id)?; - let port = omicron_common::address::NEXUS_INTERNAL_PORT; - let internal_address = - SocketAddrV6::new(ip, port, 0, 0).to_string(); - let zone = OmicronZoneConfig { - id: nexus_id, - underlay_address: ip, - zone_type: OmicronZoneType::Nexus { - internal_address, - external_ip, - nic, - external_tls, - external_dns_servers: external_dns_servers.clone(), - }, - }; - let zone = BlueprintZoneConfig { - config: zone, - disposition: BlueprintZoneDisposition::InService, - }; - self.sled_add_zone(sled_id, zone)?; - } - - Ok(EnsureMultiple::Added(num_nexus_to_add)) - } - - fn sled_add_zone( - &mut self, - sled_id: Uuid, - zone: BlueprintZoneConfig, - ) -> Result<(), Error> { - // Check the sled id and return an appropriate error if it's invalid. - let _ = self.sled_resources(sled_id)?; - - let sled_zones = self.zones.change_sled_zones(sled_id); - // A sled should have a small number (< 20) of zones so a linear search - // should be very fast. - if sled_zones.zones.iter().any(|z| z.config.id == zone.config.id) { - return Err(Error::Planner(anyhow!( - "attempted to add zone that already exists: {}", - zone.config.id - ))); - } - sled_zones.zones.push(zone); - Ok(()) - } - - /// Returns a newly-allocated underlay address suitable for use by Omicron - /// zones - fn sled_alloc_ip(&mut self, sled_id: Uuid) -> Result { - let sled_subnet = self.sled_resources(sled_id)?.subnet; - let allocator = - self.sled_ip_allocators.entry(sled_id).or_insert_with(|| { - let sled_subnet_addr = sled_subnet.net().network(); - let minimum = sled_subnet_addr - .saturating_add(u128::from(SLED_RESERVED_ADDRESSES)); - let maximum = sled_subnet_addr - .saturating_add(u128::from(CP_SERVICES_RESERVED_ADDRESSES)); - assert!(sled_subnet.net().contains(minimum)); - assert!(sled_subnet.net().contains(maximum)); - let mut allocator = IpAllocator::new(minimum, maximum); - - // We shouldn't need to explicitly reserve the sled's global - // zone and switch addresses because they should be out of our - // range, but we do so just to be sure. - let sled_gz_addr = *get_sled_address(sled_subnet).ip(); - assert!(sled_subnet.net().contains(sled_gz_addr)); - assert!(minimum > sled_gz_addr); - assert!(maximum > sled_gz_addr); - let switch_zone_addr = get_switch_zone_address(sled_subnet); - assert!(sled_subnet.net().contains(switch_zone_addr)); - assert!(minimum > switch_zone_addr); - assert!(maximum > switch_zone_addr); - - // Record each of the sled's zones' underlay addresses as - // allocated. - for z in self.zones.current_sled_zones(sled_id) { - allocator.reserve(z.config.underlay_address); - } - - allocator - }); - - allocator.alloc().ok_or(Error::OutOfAddresses { sled_id }) - } - - fn sled_resources(&self, sled_id: Uuid) -> Result<&SledResources, Error> { - self.policy.sleds.get(&sled_id).ok_or_else(|| { - Error::Planner(anyhow!( - "attempted to use sled that is not in service: {}", - sled_id - )) - }) - } -} - -#[derive(Debug)] -struct BlueprintBuilderRng { - // Have separate RNGs for the different kinds of UUIDs we might add, - // generated from the main RNG. This is so that e.g. adding a new network - // interface doesn't alter the blueprint or sled UUID. - // - // In the future, when we switch to typed UUIDs, each of these will be - // associated with a specific `TypedUuidKind`. - blueprint_rng: UuidRng, - zone_rng: UuidRng, - network_interface_rng: UuidRng, -} - -impl BlueprintBuilderRng { - fn new() -> Self { - Self::new_from_rng(StdRng::from_entropy()) - } - - fn new_from_rng(mut root_rng: StdRng) -> Self { - let blueprint_rng = UuidRng::from_root_rng(&mut root_rng, "blueprint"); - let zone_rng = UuidRng::from_root_rng(&mut root_rng, "zone"); - let network_interface_rng = - UuidRng::from_root_rng(&mut root_rng, "network_interface"); - - BlueprintBuilderRng { blueprint_rng, zone_rng, network_interface_rng } - } - - fn set_seed(&mut self, seed: H) { - // Important to add some more bytes here, so that builders with the - // same seed but different purposes don't end up with the same UUIDs. - const SEED_EXTRA: &str = "blueprint-builder"; - let mut seeder = rand_seeder::Seeder::from((seed, SEED_EXTRA)); - *self = Self::new_from_rng(seeder.make_rng::()); - } -} - -#[derive(Debug)] -pub(crate) struct UuidRng { - rng: StdRng, -} - -impl UuidRng { - /// Returns a new `UuidRng` generated from the root RNG. - /// - /// `extra` is a string that should be unique to the purpose of the UUIDs. - fn from_root_rng(root_rng: &mut StdRng, extra: &'static str) -> Self { - let seed = root_rng.next_u64(); - let mut seeder = rand_seeder::Seeder::from((seed, extra)); - Self { rng: seeder.make_rng::() } - } - - /// `extra` is a string that should be unique to the purpose of the UUIDs. - pub(crate) fn from_seed(seed: H, extra: &'static str) -> Self { - let mut seeder = rand_seeder::Seeder::from((seed, extra)); - Self { rng: seeder.make_rng::() } - } - - /// Returns a new UUIDv4 generated from the RNG. - pub(crate) fn next_uuid(&mut self) -> Uuid { - let mut bytes = [0; 16]; - self.rng.fill_bytes(&mut bytes); - // Builder::from_random_bytes will turn the random bytes into a valid - // UUIDv4. (Parts of the system depend on the UUID actually being valid - // v4, so it's important that we don't just use `uuid::from_bytes`.) - uuid::Builder::from_random_bytes(bytes).into_uuid() - } -} - -/// Helper for working with sets of zones on each sled -/// -/// Tracking the set of zones is slightly non-trivial because we need to bump -/// the per-sled generation number iff the zones are changed. So we need to -/// keep track of whether we've changed the zones relative to the parent -/// blueprint. We do this by keeping a copy of any [`BlueprintZonesConfig`] -/// that we've changed and a _reference_ to the parent blueprint's zones. This -/// struct makes it easy for callers iterate over the right set of zones. -struct BlueprintZonesBuilder<'a> { - changed_zones: BTreeMap, - parent_zones: &'a BTreeMap, -} - -impl<'a> BlueprintZonesBuilder<'a> { - pub fn new(parent_blueprint: &'a Blueprint) -> BlueprintZonesBuilder { - BlueprintZonesBuilder { - changed_zones: BTreeMap::new(), - parent_zones: &parent_blueprint.blueprint_zones, - } - } - - /// Returns a mutable reference to a sled's Omicron zones *because* we're - /// going to change them. It's essential that the caller _does_ change them - /// because we will have bumped the generation number and we don't want to - /// do that if no changes are being made. - pub fn change_sled_zones( - &mut self, - sled_id: Uuid, - ) -> &mut BlueprintZonesConfig { - self.changed_zones.entry(sled_id).or_insert_with(|| { - if let Some(old_sled_zones) = self.parent_zones.get(&sled_id) { - BlueprintZonesConfig { - generation: old_sled_zones.generation.next(), - zones: old_sled_zones.zones.clone(), - } - } else { - // The first generation is reserved to mean the one containing - // no zones. See OmicronZonesConfig::INITIAL_GENERATION. So - // we start with the next one. - BlueprintZonesConfig { - generation: Generation::new().next(), - zones: vec![], - } - } - }) - } - - /// Iterates over the list of Omicron zones currently configured for this - /// sled in the blueprint that's being built - pub fn current_sled_zones( - &self, - sled_id: Uuid, - ) -> Box + '_> { - if let Some(sled_zones) = self - .changed_zones - .get(&sled_id) - .or_else(|| self.parent_zones.get(&sled_id)) - { - Box::new(sled_zones.zones.iter()) - } else { - Box::new(std::iter::empty()) - } - } - - /// Produces an owned map of zones for the requested sleds - pub fn into_zones_map( - mut self, - sled_ids: impl Iterator, - ) -> BTreeMap { - sled_ids - .map(|sled_id| { - // Start with self.changed_zones, which contains entries for any - // sled whose zones config is changing in this blueprint. - let mut zones = self - .changed_zones - .remove(&sled_id) - // If it's not there, use the config from the parent - // blueprint. - .or_else(|| self.parent_zones.get(&sled_id).cloned()) - // If it's not there either, then this must be a new sled - // and we haven't added any zones to it yet. Use the - // standard initial config. - .unwrap_or_else(|| BlueprintZonesConfig { - generation: Generation::new(), - zones: vec![], - }); - - zones.sort(); - - (sled_id, zones) - }) - .collect() - } -} - -#[cfg(test)] -pub mod test { - use super::*; - use crate::example::example; - use crate::example::ExampleSystem; - use crate::system::SledBuilder; - use omicron_common::address::IpRange; - use omicron_test_utils::dev::test_setup_log; - use sled_agent_client::types::{OmicronZoneConfig, OmicronZoneType}; - use std::collections::BTreeSet; - - pub const DEFAULT_N_SLEDS: usize = 3; - - /// Checks various conditions that should be true for all blueprints - pub fn verify_blueprint(blueprint: &Blueprint) { - let mut underlay_ips: BTreeMap = - BTreeMap::new(); - for (_, zone) in blueprint.all_omicron_zones() { - if let Some(previous) = - underlay_ips.insert(zone.underlay_address, zone) - { - panic!( - "found duplicate underlay IP {} in zones {} and \ - {}\n\nblueprint: {:#?}", - zone.underlay_address, zone.id, previous.id, blueprint - ); - } - } - } - - #[test] - fn test_initial() { - // Test creating a blueprint from a collection and verifying that it - // describes no changes. - static TEST_NAME: &str = "blueprint_builder_test_initial"; - let logctx = test_setup_log(TEST_NAME); - let (collection, policy) = - example(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); - let blueprint_initial = - BlueprintBuilder::build_initial_from_collection_seeded( - &collection, - Generation::new(), - Generation::new(), - &policy, - "the_test", - TEST_NAME, - ) - .expect("failed to create initial blueprint"); - verify_blueprint(&blueprint_initial); - - let diff = blueprint_initial.diff_sleds_from_collection(&collection); - println!( - "collection -> initial blueprint (expected no changes):\n{}", - diff.display() - ); - assert_eq!(diff.sleds_added().count(), 0); - assert_eq!(diff.sleds_removed().count(), 0); - assert_eq!(diff.sleds_changed().count(), 0); - - // Test a no-op blueprint. - let builder = BlueprintBuilder::new_based_on( - &logctx.log, - &blueprint_initial, - Generation::new(), - Generation::new(), - &policy, - "test_basic", - ) - .expect("failed to create builder"); - let blueprint = builder.build(); - verify_blueprint(&blueprint); - let diff = blueprint_initial.diff_sleds(&blueprint); - println!( - "initial blueprint -> next blueprint (expected no changes):\n{}", - diff.display() - ); - assert_eq!(diff.sleds_added().count(), 0); - assert_eq!(diff.sleds_removed().count(), 0); - assert_eq!(diff.sleds_changed().count(), 0); - - logctx.cleanup_successful(); - } - - #[test] - fn test_basic() { - static TEST_NAME: &str = "blueprint_builder_test_basic"; - let logctx = test_setup_log(TEST_NAME); - let mut example = - ExampleSystem::new(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); - let blueprint1 = &example.blueprint; - verify_blueprint(blueprint1); - - let mut builder = BlueprintBuilder::new_based_on( - &logctx.log, - blueprint1, - Generation::new(), - Generation::new(), - &example.policy, - "test_basic", - ) - .expect("failed to create builder"); - - // The example blueprint should have internal NTP zones on all the - // existing sleds, plus Crucible zones on all pools. So if we ensure - // all these zones exist, we should see no change. - for (sled_id, sled_resources) in &example.policy.sleds { - builder.sled_ensure_zone_ntp(*sled_id).unwrap(); - for pool_name in &sled_resources.zpools { - builder - .sled_ensure_zone_crucible(*sled_id, pool_name.clone()) - .unwrap(); - } - } - - let blueprint2 = builder.build(); - verify_blueprint(&blueprint2); - let diff = blueprint1.diff_sleds(&blueprint2); - println!( - "initial blueprint -> next blueprint (expected no changes):\n{}", - diff.display() - ); - assert_eq!(diff.sleds_added().count(), 0); - assert_eq!(diff.sleds_removed().count(), 0); - assert_eq!(diff.sleds_changed().count(), 0); - - // The next step is adding these zones to a new sled. - let new_sled_id = example.sled_rng.next_uuid(); - let _ = - example.system.sled(SledBuilder::new().id(new_sled_id)).unwrap(); - let policy = example.system.to_policy().unwrap(); - let mut builder = BlueprintBuilder::new_based_on( - &logctx.log, - &blueprint2, - Generation::new(), - Generation::new(), - &policy, - "test_basic", - ) - .expect("failed to create builder"); - builder.sled_ensure_zone_ntp(new_sled_id).unwrap(); - let new_sled_resources = policy.sleds.get(&new_sled_id).unwrap(); - for pool_name in &new_sled_resources.zpools { - builder - .sled_ensure_zone_crucible(new_sled_id, pool_name.clone()) - .unwrap(); - } - - let blueprint3 = builder.build(); - verify_blueprint(&blueprint3); - let diff = blueprint2.diff_sleds(&blueprint3); - println!("expecting new NTP and Crucible zones:\n{}", diff.display()); - - // No sleds were changed or removed. - assert_eq!(diff.sleds_changed().count(), 0); - assert_eq!(diff.sleds_removed().count(), 0); - - // One sled was added. - let sleds: Vec<_> = diff.sleds_added().collect(); - assert_eq!(sleds.len(), 1); - let (sled_id, new_sled_zones) = sleds[0]; - assert_eq!(sled_id, new_sled_id); - // The generation number should be newer than the initial default. - assert!(new_sled_zones.generation > Generation::new()); - - // All zones' underlay addresses ought to be on the sled's subnet. - for z in &new_sled_zones.zones { - assert!(new_sled_resources - .subnet - .net() - .contains(z.config.underlay_address)); - } - - // Check for an NTP zone. Its sockaddr's IP should also be on the - // sled's subnet. - assert!(new_sled_zones.zones.iter().any(|z| { - if let OmicronZoneType::InternalNtp { address, .. } = - &z.config.zone_type - { - let sockaddr = address.parse::().unwrap(); - assert!(new_sled_resources - .subnet - .net() - .contains(*sockaddr.ip())); - true - } else { - false - } - })); - let crucible_pool_names = new_sled_zones - .zones - .iter() - .filter_map(|z| { - if let OmicronZoneType::Crucible { address, dataset } = - &z.config.zone_type - { - let sockaddr = address.parse::().unwrap(); - let ip = sockaddr.ip(); - assert!(new_sled_resources.subnet.net().contains(*ip)); - Some(dataset.pool_name.clone()) - } else { - None - } - }) - .collect::>(); - assert_eq!(crucible_pool_names, new_sled_resources.zpools); - - logctx.cleanup_successful(); - } - - #[test] - fn test_add_nexus_with_no_existing_nexus_zones() { - static TEST_NAME: &str = - "blueprint_builder_test_add_nexus_with_no_existing_nexus_zones"; - let logctx = test_setup_log(TEST_NAME); - let (mut collection, policy) = - example(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); - - // We don't care about the DNS versions here. - let internal_dns_version = Generation::new(); - let external_dns_version = Generation::new(); - - // Adding a new Nexus zone currently requires copying settings from an - // existing Nexus zone. If we remove all Nexus zones from the - // collection, create a blueprint, then try to add a Nexus zone, it - // should fail. - for zones in collection.omicron_zones.values_mut() { - zones.zones.zones.retain(|z| { - !matches!(z.zone_type, OmicronZoneType::Nexus { .. }) - }); - } - - let parent = BlueprintBuilder::build_initial_from_collection_seeded( - &collection, - internal_dns_version, - external_dns_version, - &policy, - "test", - TEST_NAME, - ) - .expect("failed to create initial blueprint"); - - let mut builder = BlueprintBuilder::new_based_on( - &logctx.log, - &parent, - internal_dns_version, - external_dns_version, - &policy, - "test", - ) - .expect("failed to create builder"); - - let err = builder - .sled_ensure_zone_multiple_nexus( - collection - .omicron_zones - .keys() - .next() - .copied() - .expect("no sleds present"), - 1, - ) - .unwrap_err(); - - assert!( - matches!(err, Error::NoNexusZonesInParentBlueprint), - "unexpected error {err}" - ); - - logctx.cleanup_successful(); - } - - #[test] - fn test_add_nexus_error_cases() { - static TEST_NAME: &str = "blueprint_builder_test_add_nexus_error_cases"; - let logctx = test_setup_log(TEST_NAME); - let (mut collection, policy) = - example(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); - - // We don't care about the DNS versions here. - let internal_dns_version = Generation::new(); - let external_dns_version = Generation::new(); - - // Remove the Nexus zone from one of the sleds so that - // `sled_ensure_zone_nexus` can attempt to add a Nexus zone to - // `sled_id`. - let sled_id = { - let mut selected_sled_id = None; - for (sled_id, zones) in &mut collection.omicron_zones { - let nzones_before_retain = zones.zones.zones.len(); - zones.zones.zones.retain(|z| { - !matches!(z.zone_type, OmicronZoneType::Nexus { .. }) - }); - if zones.zones.zones.len() < nzones_before_retain { - selected_sled_id = Some(*sled_id); - break; - } - } - selected_sled_id.expect("found no sleds with Nexus zone") - }; - - let parent = BlueprintBuilder::build_initial_from_collection_seeded( - &collection, - Generation::new(), - Generation::new(), - &policy, - "test", - TEST_NAME, - ) - .expect("failed to create initial blueprint"); - - { - // Attempting to add Nexus to the sled we removed it from (with no - // other changes to the environment) should succeed. - let mut builder = BlueprintBuilder::new_based_on( - &logctx.log, - &parent, - internal_dns_version, - external_dns_version, - &policy, - "test", - ) - .expect("failed to create builder"); - let added = builder - .sled_ensure_zone_multiple_nexus(sled_id, 1) - .expect("failed to ensure nexus zone"); - - assert_eq!(added, EnsureMultiple::Added(1)); - } - - { - // Attempting to add multiple Nexus zones to the sled we removed it - // from (with no other changes to the environment) should also - // succeed. - let mut builder = BlueprintBuilder::new_based_on( - &logctx.log, - &parent, - internal_dns_version, - external_dns_version, - &policy, - "test", - ) - .expect("failed to create builder"); - let added = builder - .sled_ensure_zone_multiple_nexus(sled_id, 3) - .expect("failed to ensure nexus zone"); - - assert_eq!(added, EnsureMultiple::Added(3)); - } - - { - // Replace the policy's external service IP pool ranges with ranges - // that are already in use by existing zones. Attempting to add a - // Nexus with no remaining external IPs should fail. - let mut policy = policy.clone(); - let mut used_ip_ranges = Vec::new(); - for (_, z) in parent.all_omicron_zones() { - if let Some(ip) = z - .zone_type - .external_ip() - .expect("failed to check for external IP") - { - used_ip_ranges.push(IpRange::from(ip)); - } - } - assert!(!used_ip_ranges.is_empty()); - policy.service_ip_pool_ranges = used_ip_ranges; - - let mut builder = BlueprintBuilder::new_based_on( - &logctx.log, - &parent, - internal_dns_version, - external_dns_version, - &policy, - "test", - ) - .expect("failed to create builder"); - let err = builder - .sled_ensure_zone_multiple_nexus(sled_id, 1) - .unwrap_err(); - - assert!( - matches!(err, Error::NoExternalServiceIpAvailable), - "unexpected error {err}" - ); - } - - // We're not testing the `ExhaustedNexusIps` error case (where we've run - // out of Nexus OPTE addresses), because it's fairly diffiult to induce - // that from outside: we would need to start from a parent blueprint - // that contained a Nexus instance for every IP in the - // `NEXUS_OPTE_*_SUBNET`. We could hack around that by creating the - // `BlueprintBuilder` and mucking with its internals, but that doesn't - // seem like a particularly useful test either. - - logctx.cleanup_successful(); - } - - #[test] - fn test_invalid_parent_blueprint_two_zones_with_same_external_ip() { - static TEST_NAME: &str = - "blueprint_builder_test_invalid_parent_blueprint_\ - two_zones_with_same_external_ip"; - let logctx = test_setup_log(TEST_NAME); - let (mut collection, policy) = - example(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); - - // We should fail if the parent blueprint claims to contain two - // zones with the same external IP. Skim through the zones, copy the - // external IP from one Nexus zone, then assign it to a later Nexus - // zone. - let mut found_second_nexus_zone = false; - let mut nexus_external_ip = None; - - 'outer: for zones in collection.omicron_zones.values_mut() { - for z in zones.zones.zones.iter_mut() { - if let OmicronZoneType::Nexus { external_ip, .. } = - &mut z.zone_type - { - if let Some(ip) = nexus_external_ip { - *external_ip = ip; - found_second_nexus_zone = true; - break 'outer; - } else { - nexus_external_ip = Some(*external_ip); - continue 'outer; - } - } - } - } - assert!(found_second_nexus_zone, "only one Nexus zone present?"); - - let parent = BlueprintBuilder::build_initial_from_collection_seeded( - &collection, - Generation::new(), - Generation::new(), - &policy, - "test", - TEST_NAME, - ) - .unwrap(); - - match BlueprintBuilder::new_based_on( - &logctx.log, - &parent, - Generation::new(), - Generation::new(), - &policy, - "test", - ) { - Ok(_) => panic!("unexpected success"), - Err(err) => assert!( - err.to_string().contains("duplicate external IP"), - "unexpected error: {err:#}" - ), - }; - - logctx.cleanup_successful(); - } - - #[test] - fn test_invalid_parent_blueprint_two_nexus_zones_with_same_nic_ip() { - static TEST_NAME: &str = - "blueprint_builder_test_invalid_parent_blueprint_\ - two_nexus_zones_with_same_nic_ip"; - let logctx = test_setup_log(TEST_NAME); - let (mut collection, policy) = - example(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); - - // We should fail if the parent blueprint claims to contain two - // Nexus zones with the same NIC IP. Skim through the zones, copy - // the NIC IP from one Nexus zone, then assign it to a later - // Nexus zone. - let mut found_second_nexus_zone = false; - let mut nexus_nic_ip = None; - - 'outer: for zones in collection.omicron_zones.values_mut() { - for z in zones.zones.zones.iter_mut() { - if let OmicronZoneType::Nexus { nic, .. } = &mut z.zone_type { - if let Some(ip) = nexus_nic_ip { - nic.ip = ip; - found_second_nexus_zone = true; - break 'outer; - } else { - nexus_nic_ip = Some(nic.ip); - continue 'outer; - } - } - } - } - assert!(found_second_nexus_zone, "only one Nexus zone present?"); - - let parent = BlueprintBuilder::build_initial_from_collection_seeded( - &collection, - Generation::new(), - Generation::new(), - &policy, - "test", - TEST_NAME, - ) - .unwrap(); - - match BlueprintBuilder::new_based_on( - &logctx.log, - &parent, - Generation::new(), - Generation::new(), - &policy, - "test", - ) { - Ok(_) => panic!("unexpected success"), - Err(err) => assert!( - err.to_string().contains("duplicate Nexus NIC IP"), - "unexpected error: {err:#}" - ), - }; - - logctx.cleanup_successful(); - } - - #[test] - fn test_invalid_parent_blueprint_two_zones_with_same_vnic_mac() { - static TEST_NAME: &str = - "blueprint_builder_test_invalid_parent_blueprint_\ - two_zones_with_same_vnic_mac"; - let logctx = test_setup_log(TEST_NAME); - let (mut collection, policy) = - example(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); - - // We should fail if the parent blueprint claims to contain two - // zones with the same service vNIC MAC address. Skim through the - // zones, copy the NIC MAC from one Nexus zone, then assign it to a - // later Nexus zone. - let mut found_second_nexus_zone = false; - let mut nexus_nic_mac = None; - - 'outer: for zones in collection.omicron_zones.values_mut() { - for z in zones.zones.zones.iter_mut() { - if let OmicronZoneType::Nexus { nic, .. } = &mut z.zone_type { - if let Some(mac) = nexus_nic_mac { - nic.mac = mac; - found_second_nexus_zone = true; - break 'outer; - } else { - nexus_nic_mac = Some(nic.mac); - continue 'outer; - } - } - } - } - assert!(found_second_nexus_zone, "only one Nexus zone present?"); - - let parent = BlueprintBuilder::build_initial_from_collection_seeded( - &collection, - Generation::new(), - Generation::new(), - &policy, - "test", - TEST_NAME, - ) - .unwrap(); - - match BlueprintBuilder::new_based_on( - &logctx.log, - &parent, - Generation::new(), - Generation::new(), - &policy, - "test", - ) { - Ok(_) => panic!("unexpected success"), - Err(err) => assert!( - err.to_string().contains("duplicate service vNIC MAC"), - "unexpected error: {err:#}" - ), - }; - - logctx.cleanup_successful(); - } -} diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs new file mode 100644 index 0000000000..7e98b3906d --- /dev/null +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -0,0 +1,1790 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Low-level facility for generating Blueprints + +use crate::ip_allocator::IpAllocator; +use crate::planner::ZoneExpungeReason; +use anyhow::anyhow; +use internal_dns::config::Host; +use internal_dns::config::Zone; +use ipnet::IpAdd; +use nexus_inventory::now_db_precision; +use nexus_types::deployment::blueprint_zone_type; +use nexus_types::deployment::Blueprint; +use nexus_types::deployment::BlueprintPhysicalDiskConfig; +use nexus_types::deployment::BlueprintPhysicalDisksConfig; +use nexus_types::deployment::BlueprintZoneConfig; +use nexus_types::deployment::BlueprintZoneDisposition; +use nexus_types::deployment::BlueprintZoneFilter; +use nexus_types::deployment::BlueprintZoneType; +use nexus_types::deployment::BlueprintZonesConfig; +use nexus_types::deployment::CockroachDbPreserveDowngrade; +use nexus_types::deployment::DiskFilter; +use nexus_types::deployment::OmicronZoneDataset; +use nexus_types::deployment::OmicronZoneExternalFloatingIp; +use nexus_types::deployment::PlanningInput; +use nexus_types::deployment::SledFilter; +use nexus_types::deployment::SledResources; +use nexus_types::deployment::ZpoolName; +use nexus_types::external_api::views::SledState; +use omicron_common::address::get_internal_dns_server_addresses; +use omicron_common::address::get_sled_address; +use omicron_common::address::get_switch_zone_address; +use omicron_common::address::CP_SERVICES_RESERVED_ADDRESSES; +use omicron_common::address::NTP_PORT; +use omicron_common::address::SLED_RESERVED_ADDRESSES; +use omicron_common::api::external::Generation; +use omicron_common::api::external::Vni; +use omicron_common::api::internal::shared::NetworkInterface; +use omicron_common::api::internal::shared::NetworkInterfaceKind; +use omicron_uuid_kinds::ExternalIpKind; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::OmicronZoneKind; +use omicron_uuid_kinds::OmicronZoneUuid; +use omicron_uuid_kinds::PhysicalDiskUuid; +use omicron_uuid_kinds::SledUuid; +use omicron_uuid_kinds::ZpoolUuid; +use rand::rngs::StdRng; +use rand::SeedableRng; +use slog::debug; +use slog::error; +use slog::info; +use slog::o; +use slog::Logger; +use std::collections::BTreeMap; +use std::collections::BTreeSet; +use std::collections::HashSet; +use std::hash::Hash; +use std::net::IpAddr; +use std::net::Ipv6Addr; +use std::net::SocketAddrV6; +use thiserror::Error; +use typed_rng::TypedUuidRng; +use typed_rng::UuidRng; + +use super::external_networking::BuilderExternalNetworking; +use super::external_networking::ExternalNetworkingChoice; +use super::zones::is_already_expunged; +use super::zones::BuilderZoneState; +use super::zones::BuilderZonesConfig; + +/// Errors encountered while assembling blueprints +#[derive(Debug, Error)] +pub enum Error { + #[error("sled {sled_id}: ran out of available addresses for sled")] + OutOfAddresses { sled_id: SledUuid }, + #[error("no Nexus zones exist in parent blueprint")] + NoNexusZonesInParentBlueprint, + #[error("no external service IP addresses are available")] + NoExternalServiceIpAvailable, + #[error("no system MAC addresses are available")] + NoSystemMacAddressAvailable, + #[error("exhausted available Nexus IP addresses")] + ExhaustedNexusIps, + #[error( + "invariant violation: found decommissioned sled with \ + {num_zones} non-expunged zones: {sled_id}" + )] + DecommissionedSledWithNonExpungedZones { + sled_id: SledUuid, + num_zones: usize, + }, + #[error("programming error in planner")] + Planner(#[from] anyhow::Error), +} + +/// Describes whether an idempotent "ensure" operation resulted in action taken +/// or no action was necessary +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub enum Ensure { + /// action was taken + Added, + /// no action was necessary + NotNeeded, +} + +/// Describes whether an idempotent "ensure" operation resulted in multiple +/// actions taken or no action was necessary +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub enum EnsureMultiple { + /// action was taken, and multiple items were added + Added(usize), + /// no action was necessary + NotNeeded, +} + +/// Helper for assembling a blueprint +/// +/// There are two basic ways to assemble a new blueprint: +/// +/// 1. Build one directly. This would generally only be used once in the +/// lifetime of a rack, to assemble the first blueprint during rack setup. +/// It is also common in tests. To start with a blueprint that contains an +/// empty zone config for some number of sleds, use +/// [`BlueprintBuilder::build_empty_with_sleds`]. +/// +/// 2. Build one _from_ another blueprint, called the "parent", making changes +/// as desired. Use [`BlueprintBuilder::new_based_on`] for this. Once the +/// new blueprint is created, there is no dependency on the parent one. +/// However, the new blueprint can only be made the system's target if its +/// parent is the current target. +pub struct BlueprintBuilder<'a> { + #[allow(dead_code)] + log: Logger, + + /// previous blueprint, on which this one will be based + parent_blueprint: &'a Blueprint, + + // These fields are used to allocate resources for sleds. + input: &'a PlanningInput, + sled_ip_allocators: BTreeMap, + external_networking: BuilderExternalNetworking<'a>, + + // These fields will become part of the final blueprint. See the + // corresponding fields in `Blueprint`. + pub(super) zones: BlueprintZonesBuilder<'a>, + disks: BlueprintDisksBuilder<'a>, + sled_state: BTreeMap, + cockroachdb_setting_preserve_downgrade: CockroachDbPreserveDowngrade, + + creator: String, + comments: Vec, + + // Random number generator for new UUIDs + rng: BlueprintBuilderRng, +} + +impl<'a> BlueprintBuilder<'a> { + /// Directly construct a `Blueprint` that contains an empty zone config for + /// the given sleds. + pub fn build_empty_with_sleds( + sled_ids: impl Iterator, + creator: &str, + ) -> Blueprint { + Self::build_empty_with_sleds_impl( + sled_ids, + creator, + BlueprintBuilderRng::new(), + ) + } + + /// A version of [`Self::build_empty_with_sleds`] that allows the + /// blueprint ID to be generated from a random seed. + pub fn build_empty_with_sleds_seeded( + sled_ids: impl Iterator, + creator: &str, + seed: H, + ) -> Blueprint { + let mut rng = BlueprintBuilderRng::new(); + rng.set_seed(seed); + Self::build_empty_with_sleds_impl(sled_ids, creator, rng) + } + + fn build_empty_with_sleds_impl( + sled_ids: impl Iterator, + creator: &str, + mut rng: BlueprintBuilderRng, + ) -> Blueprint { + let blueprint_zones = sled_ids + .map(|sled_id| { + let config = BlueprintZonesConfig { + generation: Generation::new(), + zones: Vec::new(), + }; + (sled_id, config) + }) + .collect::>(); + let num_sleds = blueprint_zones.len(); + let sled_state = blueprint_zones + .keys() + .copied() + .map(|sled_id| (sled_id, SledState::Active)) + .collect(); + Blueprint { + id: rng.blueprint_rng.next(), + blueprint_zones, + blueprint_disks: BTreeMap::new(), + sled_state, + parent_blueprint_id: None, + internal_dns_version: Generation::new(), + external_dns_version: Generation::new(), + cockroachdb_fingerprint: String::new(), + cockroachdb_setting_preserve_downgrade: + CockroachDbPreserveDowngrade::DoNotModify, + time_created: now_db_precision(), + creator: creator.to_owned(), + comment: format!("starting blueprint with {num_sleds} empty sleds"), + } + } + + /// Construct a new `BlueprintBuilder` based on a previous blueprint, + /// starting with no changes from that state + pub fn new_based_on( + log: &Logger, + parent_blueprint: &'a Blueprint, + input: &'a PlanningInput, + creator: &str, + ) -> anyhow::Result> { + let log = log.new(o!( + "component" => "BlueprintBuilder", + "parent_id" => parent_blueprint.id.to_string(), + )); + + let external_networking = + BuilderExternalNetworking::new(parent_blueprint, input)?; + + // Prefer the sled state from our parent blueprint for sleds + // that were in it; there may be new sleds in `input`, in which + // case we'll use their current state as our starting point. + let mut sled_state = parent_blueprint.sled_state.clone(); + let mut commissioned_sled_ids = BTreeSet::new(); + for (sled_id, details) in input.all_sleds(SledFilter::Commissioned) { + commissioned_sled_ids.insert(sled_id); + sled_state.entry(sled_id).or_insert(details.state); + } + + // Make a garbage collection pass through `sled_state`. We want to keep + // any sleds which either: + // + // 1. do not have a desired state of `Decommissioned` + // 2. do have a desired state of `Decommissioned` and are still included + // in our input's list of commissioned sleds + // + // Sleds that don't fall into either of these cases have reached the + // actual `Decommissioned` state, which means we no longer need to carry + // forward that desired state. + sled_state.retain(|sled_id, state| { + *state != SledState::Decommissioned + || commissioned_sled_ids.contains(sled_id) + }); + + Ok(BlueprintBuilder { + log, + parent_blueprint, + input, + sled_ip_allocators: BTreeMap::new(), + external_networking, + zones: BlueprintZonesBuilder::new(parent_blueprint), + disks: BlueprintDisksBuilder::new(parent_blueprint), + sled_state, + cockroachdb_setting_preserve_downgrade: parent_blueprint + .cockroachdb_setting_preserve_downgrade, + creator: creator.to_owned(), + comments: Vec::new(), + rng: BlueprintBuilderRng::new(), + }) + } + + /// Iterates over the list of sled IDs for which we have zones. + /// + /// This may include decommissioned sleds. + pub fn sled_ids_with_zones(&self) -> impl Iterator { + self.zones.sled_ids_with_zones() + } + + pub fn current_sled_zones( + &self, + sled_id: SledUuid, + ) -> impl Iterator { + self.zones.current_sled_zones(sled_id).map(|(config, _)| config) + } + + /// Assemble a final [`Blueprint`] based on the contents of the builder + pub fn build(mut self) -> Blueprint { + // Collect the Omicron zones config for all sleds, including sleds that + // are no longer in service and need expungement work. + let blueprint_zones = self + .zones + .into_zones_map(self.input.all_sled_ids(SledFilter::Commissioned)); + let blueprint_disks = self + .disks + .into_disks_map(self.input.all_sled_ids(SledFilter::InService)); + Blueprint { + id: self.rng.blueprint_rng.next(), + blueprint_zones, + blueprint_disks, + sled_state: self.sled_state, + parent_blueprint_id: Some(self.parent_blueprint.id), + internal_dns_version: self.input.internal_dns_version(), + external_dns_version: self.input.external_dns_version(), + cockroachdb_fingerprint: self + .input + .cockroachdb_settings() + .state_fingerprint + .clone(), + cockroachdb_setting_preserve_downgrade: self + .cockroachdb_setting_preserve_downgrade, + time_created: now_db_precision(), + creator: self.creator, + comment: self.comments.join(", "), + } + } + + /// Set the desired state of the given sled. + pub fn set_sled_state( + &mut self, + sled_id: SledUuid, + desired_state: SledState, + ) { + self.sled_state.insert(sled_id, desired_state); + } + + /// Within tests, set a seeded RNG for deterministic results. + /// + /// This will ensure that tests that use this builder will produce the same + /// results each time they are run. + pub fn set_rng_seed(&mut self, seed: H) -> &mut Self { + self.rng.set_seed(seed); + self + } + + /// Sets the blueprints "comment" + /// + /// This is a short human-readable string summarizing the changes reflected + /// in the blueprint. This is only intended for debugging. + pub fn comment(&mut self, comment: S) + where + String: From, + { + self.comments.push(String::from(comment)); + } + + /// Expunges all zones from a sled. + /// + /// Returns a list of zone IDs expunged (excluding zones that were already + /// expunged). If the list is empty, then the operation was a no-op. + pub(crate) fn expunge_all_zones_for_sled( + &mut self, + sled_id: SledUuid, + reason: ZoneExpungeReason, + ) -> Result, Error> { + let log = self.log.new(o!( + "sled_id" => sled_id.to_string(), + )); + + // Do any zones need to be marked expunged? + let mut zones_to_expunge = BTreeSet::new(); + + let sled_zones = self.zones.current_sled_zones(sled_id); + for (z, state) in sled_zones { + let is_expunged = + is_already_expunged(z, state).map_err(|error| { + Error::Planner(anyhow!(error).context(format!( + "for sled {sled_id}, error computing zones to expunge" + ))) + })?; + + if !is_expunged { + zones_to_expunge.insert(z.id); + } + } + + if zones_to_expunge.is_empty() { + debug!( + log, + "sled has no zones that need expungement; skipping"; + ); + return Ok(zones_to_expunge); + } + + match reason { + ZoneExpungeReason::SledDecommissioned { policy } => { + // A sled marked as decommissioned should have no resources + // allocated to it. If it does, it's an illegal state, possibly + // introduced by a bug elsewhere in the system -- we need to + // produce a loud warning (i.e. an ERROR-level log message) on + // this, while still removing the zones. + error!( + &log, + "sled has state Decommissioned, yet has zones \ + allocated to it; will expunge them \ + (sled policy is \"{policy:?}\")" + ); + } + ZoneExpungeReason::SledExpunged => { + // This is the expected situation. + info!( + &log, + "expunged sled with {} non-expunged zones found \ + (will expunge all zones)", + zones_to_expunge.len() + ); + } + } + + // Now expunge all the zones that need it. + let change = self.zones.change_sled_zones(sled_id); + change.expunge_zones(zones_to_expunge.clone()).map_err(|error| { + anyhow!(error) + .context(format!("for sled {sled_id}, error expunging zones")) + })?; + + // Finally, add a comment describing what happened. + let reason = match reason { + ZoneExpungeReason::SledDecommissioned { .. } => { + "sled state is decommissioned" + } + ZoneExpungeReason::SledExpunged => "sled policy is expunged", + }; + + self.comment(format!( + "sled {} ({reason}): {} zones expunged", + sled_id, + zones_to_expunge.len(), + )); + + Ok(zones_to_expunge) + } + + /// Ensures that the blueprint contains disks for a sled which already + /// exists in the database. + /// + /// This operation must perform the following: + /// - Ensure that any disks / zpools that exist in the database + /// are propagated into the blueprint. + /// - Ensure that any disks that are expunged from the database are + /// removed from the blueprint. + pub fn sled_ensure_disks( + &mut self, + sled_id: SledUuid, + resources: &SledResources, + ) -> Result { + let (mut additions, removals) = { + // These are the disks known to our (last?) blueprint + let blueprint_disks: BTreeMap<_, _> = self + .disks + .current_sled_disks(sled_id) + .map(|disk| { + (PhysicalDiskUuid::from_untyped_uuid(disk.id), disk) + }) + .collect(); + + // These are the in-service disks as we observed them in the database, + // during the planning phase + let database_disks: BTreeMap<_, _> = resources + .all_disks(DiskFilter::InService) + .map(|(zpool, disk)| (disk.disk_id, (zpool, disk))) + .collect(); + + // Add any disks that appear in the database, but not the blueprint + let additions = database_disks + .iter() + .filter_map(|(disk_id, (zpool, disk))| { + if !blueprint_disks.contains_key(disk_id) { + Some(BlueprintPhysicalDiskConfig { + identity: disk.disk_identity.clone(), + id: disk_id.into_untyped_uuid(), + pool_id: **zpool, + }) + } else { + None + } + }) + .collect::>(); + + // Remove any disks that appear in the blueprint, but not the database + let removals: HashSet = blueprint_disks + .keys() + .filter_map(|disk_id| { + if !database_disks.contains_key(disk_id) { + Some(*disk_id) + } else { + None + } + }) + .collect(); + + (additions, removals) + }; + + if additions.is_empty() && removals.is_empty() { + return Ok(Ensure::NotNeeded); + } + + let disks = &mut self.disks.change_sled_disks(sled_id).disks; + + disks.append(&mut additions); + disks.retain(|config| { + !removals.contains(&PhysicalDiskUuid::from_untyped_uuid(config.id)) + }); + + Ok(Ensure::Added) + } + + pub fn sled_ensure_zone_ntp( + &mut self, + sled_id: SledUuid, + ) -> Result { + // If there's already an NTP zone on this sled, do nothing. + let has_ntp = self + .zones + .current_sled_zones(sled_id) + .any(|(z, _)| z.zone_type.is_ntp()); + if has_ntp { + return Ok(Ensure::NotNeeded); + } + + let sled_info = self.sled_resources(sled_id)?; + let sled_subnet = sled_info.subnet; + let ip = self.sled_alloc_ip(sled_id)?; + let ntp_address = SocketAddrV6::new(ip, NTP_PORT, 0, 0); + + // Construct the list of internal DNS servers. + // + // It'd be tempting to get this list from the other internal NTP + // servers but there may not be any of those. We could also + // construct this list manually from the set of internal DNS servers + // actually deployed. Instead, we take the same approach as RSS: + // these are at known, fixed addresses relative to the AZ subnet + // (which itself is a known-prefix parent subnet of the sled subnet). + let dns_servers = + get_internal_dns_server_addresses(sled_subnet.net().prefix()); + + // The list of boundary NTP servers is not necessarily stored + // anywhere (unless there happens to be another internal NTP zone + // lying around). Recompute it based on what boundary servers + // currently exist. + let ntp_servers = self + .parent_blueprint + .all_omicron_zones(BlueprintZoneFilter::All) + .filter_map(|(_, z)| { + if matches!(z.zone_type, BlueprintZoneType::BoundaryNtp(_)) { + Some(Host::for_zone(Zone::Other(z.id)).fqdn()) + } else { + None + } + }) + .collect(); + + let zone = BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id: self.rng.zone_rng.next(), + underlay_address: ip, + zone_type: BlueprintZoneType::InternalNtp( + blueprint_zone_type::InternalNtp { + address: ntp_address, + ntp_servers, + dns_servers, + domain: None, + }, + ), + }; + + self.sled_add_zone(sled_id, zone)?; + Ok(Ensure::Added) + } + + pub fn sled_ensure_zone_crucible( + &mut self, + sled_id: SledUuid, + zpool_id: ZpoolUuid, + ) -> Result { + let pool_name = ZpoolName::new_external(zpool_id); + + // If this sled already has a Crucible zone on this pool, do nothing. + let has_crucible_on_this_pool = + self.zones.current_sled_zones(sled_id).any(|(z, _)| { + matches!( + &z.zone_type, + BlueprintZoneType::Crucible(blueprint_zone_type::Crucible { + dataset, + .. + }) + if dataset.pool_name == pool_name + ) + }); + if has_crucible_on_this_pool { + return Ok(Ensure::NotNeeded); + } + + let sled_info = self.sled_resources(sled_id)?; + if !sled_info.zpools.contains_key(&zpool_id) { + return Err(Error::Planner(anyhow!( + "adding crucible zone for sled {:?}: \ + attempted to use unknown zpool {:?}", + sled_id, + pool_name + ))); + } + + let ip = self.sled_alloc_ip(sled_id)?; + let port = omicron_common::address::CRUCIBLE_PORT; + let address = SocketAddrV6::new(ip, port, 0, 0); + let zone = BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id: self.rng.zone_rng.next(), + underlay_address: ip, + zone_type: BlueprintZoneType::Crucible( + blueprint_zone_type::Crucible { + address, + dataset: OmicronZoneDataset { pool_name }, + }, + ), + }; + + self.sled_add_zone(sled_id, zone)?; + Ok(Ensure::Added) + } + + /// Return the number of Nexus zones that would be configured to run on the + /// given sled if this builder generated a blueprint + /// + /// This value may change before a blueprint is actually generated if + /// further changes are made to the builder. + pub fn sled_num_nexus_zones(&self, sled_id: SledUuid) -> usize { + self.zones + .current_sled_zones(sled_id) + .filter(|(z, _)| z.zone_type.is_nexus()) + .count() + } + + pub fn sled_ensure_zone_multiple_nexus( + &mut self, + sled_id: SledUuid, + desired_zone_count: usize, + ) -> Result { + // Whether Nexus should use TLS and what the external DNS servers it + // should use are currently provided at rack-setup time, and should be + // consistent across all Nexus instances. We'll assume we can copy them + // from any other Nexus zone in our parent blueprint. + // + // TODO-correctness Once these properties can be changed by a rack + // operator, this will need more work. At a minimum, if such a change + // goes through the blueprint system (which seems likely), we'll need to + // check that we're if this builder is being used to make such a change, + // that change is also reflected here in a new zone. Perhaps these + // settings should be part of `Policy` instead? + let (external_tls, external_dns_servers) = self + .parent_blueprint + .all_omicron_zones(BlueprintZoneFilter::All) + .find_map(|(_, z)| match &z.zone_type { + BlueprintZoneType::Nexus(nexus) => Some(( + nexus.external_tls, + nexus.external_dns_servers.clone(), + )), + _ => None, + }) + .ok_or(Error::NoNexusZonesInParentBlueprint)?; + self.sled_ensure_zone_multiple_nexus_with_config( + sled_id, + desired_zone_count, + external_tls, + external_dns_servers, + ) + } + + pub fn sled_ensure_zone_multiple_nexus_with_config( + &mut self, + sled_id: SledUuid, + desired_zone_count: usize, + external_tls: bool, + external_dns_servers: Vec, + ) -> Result { + // How many Nexus zones do we need to add? + let nexus_count = self.sled_num_nexus_zones(sled_id); + let num_nexus_to_add = match desired_zone_count.checked_sub(nexus_count) + { + Some(0) => return Ok(EnsureMultiple::NotNeeded), + Some(n) => n, + None => { + return Err(Error::Planner(anyhow!( + "removing a Nexus zone not yet supported \ + (sled {sled_id} has {nexus_count}; \ + planner wants {desired_zone_count})" + ))); + } + }; + + for _ in 0..num_nexus_to_add { + let nexus_id = self.rng.zone_rng.next(); + let ExternalNetworkingChoice { + external_ip, + nic_ip, + nic_subnet, + nic_mac, + } = self.external_networking.for_new_nexus()?; + let external_ip = OmicronZoneExternalFloatingIp { + id: self.rng.external_ip_rng.next(), + ip: external_ip, + }; + + let nic = { + NetworkInterface { + id: self.rng.network_interface_rng.next(), + kind: NetworkInterfaceKind::Service { + id: nexus_id.into_untyped_uuid(), + }, + name: format!("nexus-{nexus_id}").parse().unwrap(), + ip: nic_ip, + mac: nic_mac, + subnet: nic_subnet, + vni: Vni::SERVICES_VNI, + primary: true, + slot: 0, + } + }; + + let ip = self.sled_alloc_ip(sled_id)?; + let port = omicron_common::address::NEXUS_INTERNAL_PORT; + let internal_address = SocketAddrV6::new(ip, port, 0, 0); + let zone = BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id: nexus_id, + underlay_address: ip, + zone_type: BlueprintZoneType::Nexus( + blueprint_zone_type::Nexus { + internal_address, + external_ip, + nic, + external_tls, + external_dns_servers: external_dns_servers.clone(), + }, + ), + }; + self.sled_add_zone(sled_id, zone)?; + } + + Ok(EnsureMultiple::Added(num_nexus_to_add)) + } + + pub fn cockroachdb_preserve_downgrade( + &mut self, + version: CockroachDbPreserveDowngrade, + ) { + self.cockroachdb_setting_preserve_downgrade = version; + } + + fn sled_add_zone( + &mut self, + sled_id: SledUuid, + zone: BlueprintZoneConfig, + ) -> Result<(), Error> { + // Check the sled id and return an appropriate error if it's invalid. + let _ = self.sled_resources(sled_id)?; + + let sled_zones = self.zones.change_sled_zones(sled_id); + sled_zones.add_zone(zone).map_err(|error| { + anyhow!(error) + .context(format!("error adding zone to sled {sled_id}")) + })?; + + Ok(()) + } + + /// Returns a newly-allocated underlay address suitable for use by Omicron + /// zones + fn sled_alloc_ip(&mut self, sled_id: SledUuid) -> Result { + let sled_subnet = self.sled_resources(sled_id)?.subnet; + let allocator = + self.sled_ip_allocators.entry(sled_id).or_insert_with(|| { + let sled_subnet_addr = sled_subnet.net().prefix(); + let minimum = sled_subnet_addr + .saturating_add(u128::from(SLED_RESERVED_ADDRESSES)); + let maximum = sled_subnet_addr + .saturating_add(u128::from(CP_SERVICES_RESERVED_ADDRESSES)); + assert!(sled_subnet.net().contains(minimum)); + assert!(sled_subnet.net().contains(maximum)); + let mut allocator = IpAllocator::new(minimum, maximum); + + // We shouldn't need to explicitly reserve the sled's global + // zone and switch addresses because they should be out of our + // range, but we do so just to be sure. + let sled_gz_addr = *get_sled_address(sled_subnet).ip(); + assert!(sled_subnet.net().contains(sled_gz_addr)); + assert!(minimum > sled_gz_addr); + assert!(maximum > sled_gz_addr); + let switch_zone_addr = get_switch_zone_address(sled_subnet); + assert!(sled_subnet.net().contains(switch_zone_addr)); + assert!(minimum > switch_zone_addr); + assert!(maximum > switch_zone_addr); + + // Record each of the sled's zones' underlay addresses as + // allocated. + for (z, _) in self.zones.current_sled_zones(sled_id) { + allocator.reserve(z.underlay_address); + } + + allocator + }); + + allocator.alloc().ok_or(Error::OutOfAddresses { sled_id }) + } + + fn sled_resources( + &self, + sled_id: SledUuid, + ) -> Result<&SledResources, Error> { + self.input.sled_resources(&sled_id).ok_or_else(|| { + Error::Planner(anyhow!( + "attempted to use sled that is not currently known: {}", + sled_id + )) + }) + } +} + +#[derive(Debug)] +struct BlueprintBuilderRng { + // Have separate RNGs for the different kinds of UUIDs we might add, + // generated from the main RNG. This is so that e.g. adding a new network + // interface doesn't alter the blueprint or sled UUID. + // + // In the future, when we switch to typed UUIDs, each of these will be + // associated with a specific `TypedUuidKind`. + blueprint_rng: UuidRng, + zone_rng: TypedUuidRng, + network_interface_rng: UuidRng, + external_ip_rng: TypedUuidRng, +} + +impl BlueprintBuilderRng { + fn new() -> Self { + Self::new_from_parent(StdRng::from_entropy()) + } + + fn new_from_parent(mut parent: StdRng) -> Self { + let blueprint_rng = UuidRng::from_parent_rng(&mut parent, "blueprint"); + let zone_rng = TypedUuidRng::from_parent_rng(&mut parent, "zone"); + let network_interface_rng = + UuidRng::from_parent_rng(&mut parent, "network_interface"); + let external_ip_rng = + TypedUuidRng::from_parent_rng(&mut parent, "external_ip"); + + BlueprintBuilderRng { + blueprint_rng, + zone_rng, + network_interface_rng, + external_ip_rng, + } + } + + fn set_seed(&mut self, seed: H) { + // Important to add some more bytes here, so that builders with the + // same seed but different purposes don't end up with the same UUIDs. + const SEED_EXTRA: &str = "blueprint-builder"; + *self = Self::new_from_parent(typed_rng::from_seed(seed, SEED_EXTRA)); + } +} + +/// Helper for working with sets of zones on each sled +/// +/// Tracking the set of zones is slightly non-trivial because we need to bump +/// the per-sled generation number iff the zones are changed. So we need to +/// keep track of whether we've changed the zones relative to the parent +/// blueprint. We do this by keeping a copy of any [`BlueprintZonesConfig`] +/// that we've changed and a _reference_ to the parent blueprint's zones. This +/// struct makes it easy for callers iterate over the right set of zones. +pub(super) struct BlueprintZonesBuilder<'a> { + changed_zones: BTreeMap, + parent_zones: &'a BTreeMap, +} + +impl<'a> BlueprintZonesBuilder<'a> { + pub fn new(parent_blueprint: &'a Blueprint) -> BlueprintZonesBuilder { + BlueprintZonesBuilder { + changed_zones: BTreeMap::new(), + parent_zones: &parent_blueprint.blueprint_zones, + } + } + + /// Returns a mutable reference to a sled's Omicron zones *because* we're + /// going to change them. It's essential that the caller _does_ change them + /// because we will have bumped the generation number and we don't want to + /// do that if no changes are being made. + pub fn change_sled_zones( + &mut self, + sled_id: SledUuid, + ) -> &mut BuilderZonesConfig { + self.changed_zones.entry(sled_id).or_insert_with(|| { + if let Some(old_sled_zones) = self.parent_zones.get(&sled_id) { + BuilderZonesConfig::from_parent(old_sled_zones) + } else { + BuilderZonesConfig::new() + } + }) + } + + /// Iterates over the list of sled IDs for which we have zones. + /// + /// This may include decommissioned sleds. + pub fn sled_ids_with_zones(&self) -> impl Iterator { + let mut sled_ids = + self.changed_zones.keys().copied().collect::>(); + for &sled_id in self.parent_zones.keys() { + sled_ids.insert(sled_id); + } + sled_ids.into_iter() + } + + /// Iterates over the list of Omicron zones currently configured for this + /// sled in the blueprint that's being built, along with each zone's state + /// in the builder. + pub fn current_sled_zones( + &self, + sled_id: SledUuid, + ) -> Box + '_> + { + if let Some(sled_zones) = self.changed_zones.get(&sled_id) { + Box::new(sled_zones.iter_zones().map(|z| (z.zone(), z.state()))) + } else if let Some(parent_zones) = self.parent_zones.get(&sled_id) { + Box::new( + parent_zones + .zones + .iter() + .map(|z| (z, BuilderZoneState::Unchanged)), + ) + } else { + Box::new(std::iter::empty()) + } + } + + /// Produces an owned map of zones for the sleds recorded in this blueprint + /// plus any newly-added sleds + pub fn into_zones_map( + self, + added_sled_ids: impl Iterator, + ) -> BTreeMap { + // Start with self.changed_zones, which contains entries for any + // sled whose zones config is changing in this blueprint. + let mut zones = self + .changed_zones + .into_iter() + .map(|(sled_id, zones)| (sled_id, zones.build())) + .collect::>(); + + // Carry forward any zones from our parent blueprint. This may include + // zones for decommissioned sleds. + for (sled_id, parent_zones) in self.parent_zones { + zones.entry(*sled_id).or_insert_with(|| parent_zones.clone()); + } + + // Finally, insert any newly-added sleds. + for sled_id in added_sled_ids { + zones.entry(sled_id).or_insert_with(|| BlueprintZonesConfig { + generation: Generation::new(), + zones: vec![], + }); + } + + zones + } +} + +/// Helper for working with sets of disks on each sled +/// +/// Tracking the set of disks is slightly non-trivial because we need to bump +/// the per-sled generation number iff the disks are changed. So we need to +/// keep track of whether we've changed the disks relative to the parent +/// blueprint. We do this by keeping a copy of any [`BlueprintDisksConfig`] +/// that we've changed and a _reference_ to the parent blueprint's disks. This +/// struct makes it easy for callers iterate over the right set of disks. +struct BlueprintDisksBuilder<'a> { + changed_disks: BTreeMap, + parent_disks: &'a BTreeMap, +} + +impl<'a> BlueprintDisksBuilder<'a> { + pub fn new(parent_blueprint: &'a Blueprint) -> BlueprintDisksBuilder { + BlueprintDisksBuilder { + changed_disks: BTreeMap::new(), + parent_disks: &parent_blueprint.blueprint_disks, + } + } + + /// Returns a mutable reference to a sled's Omicron disks *because* we're + /// going to change them. It's essential that the caller _does_ change them + /// because we will have bumped the generation number and we don't want to + /// do that if no changes are being made. + pub fn change_sled_disks( + &mut self, + sled_id: SledUuid, + ) -> &mut BlueprintPhysicalDisksConfig { + self.changed_disks.entry(sled_id).or_insert_with(|| { + if let Some(old_sled_disks) = self.parent_disks.get(&sled_id) { + BlueprintPhysicalDisksConfig { + generation: old_sled_disks.generation.next(), + disks: old_sled_disks.disks.clone(), + } + } else { + // No requests have been sent to the disk previously, + // we should be able to use the first generation. + BlueprintPhysicalDisksConfig { + generation: Generation::new(), + disks: vec![], + } + } + }) + } + + /// Iterates over the list of Omicron disks currently configured for this + /// sled in the blueprint that's being built + pub fn current_sled_disks( + &self, + sled_id: SledUuid, + ) -> Box + '_> { + if let Some(sled_disks) = self + .changed_disks + .get(&sled_id) + .or_else(|| self.parent_disks.get(&sled_id)) + { + Box::new(sled_disks.disks.iter()) + } else { + Box::new(std::iter::empty()) + } + } + + /// Produces an owned map of disks for the requested sleds + pub fn into_disks_map( + mut self, + sled_ids: impl Iterator, + ) -> BTreeMap { + sled_ids + .map(|sled_id| { + // Start with self.changed_disks, which contains entries for any + // sled whose disks config is changing in this blueprint. + let mut disks = self + .changed_disks + .remove(&sled_id) + // If it's not there, use the config from the parent + // blueprint. + .or_else(|| self.parent_disks.get(&sled_id).cloned()) + // If it's not there either, then this must be a new sled + // and we haven't added any disks to it yet. Use the + // standard initial config. + .unwrap_or_else(|| BlueprintPhysicalDisksConfig { + generation: Generation::new(), + disks: vec![], + }); + disks.disks.sort_unstable_by_key(|d| d.id); + + (sled_id, disks) + }) + .collect() + } +} + +#[cfg(test)] +pub mod test { + use super::*; + use crate::example::example; + use crate::example::ExampleSystem; + use crate::system::SledBuilder; + use expectorate::assert_contents; + use nexus_types::deployment::BlueprintOrCollectionZoneConfig; + use nexus_types::deployment::BlueprintZoneFilter; + use nexus_types::deployment::OmicronZoneNetworkResources; + use nexus_types::external_api::views::SledPolicy; + use omicron_common::address::IpRange; + use omicron_test_utils::dev::test_setup_log; + use std::collections::BTreeSet; + use std::mem; + + pub const DEFAULT_N_SLEDS: usize = 3; + + /// Checks various conditions that should be true for all blueprints + pub fn verify_blueprint(blueprint: &Blueprint) { + let mut underlay_ips: BTreeMap = + BTreeMap::new(); + for (_, zone) in blueprint.all_omicron_zones(BlueprintZoneFilter::All) { + if let Some(previous) = + underlay_ips.insert(zone.underlay_address, zone) + { + panic!( + "found duplicate underlay IP {} in zones {} and {}\ + \n\n\ + blueprint: {}", + zone.underlay_address, + zone.id, + previous.id, + blueprint.display(), + ); + } + } + } + + #[test] + fn test_initial() { + // Test creating a blueprint from a collection and verifying that it + // describes no changes. + static TEST_NAME: &str = "blueprint_builder_test_initial"; + let logctx = test_setup_log(TEST_NAME); + let (collection, input, blueprint_initial) = + example(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); + verify_blueprint(&blueprint_initial); + + let diff = blueprint_initial.diff_since_collection(&collection); + // There are some differences with even a no-op diff between a + // collection and a blueprint, such as new data being added to + // blueprints like DNS generation numbers. + println!( + "collection -> initial blueprint \ + (expected no non-trivial changes):\n{}", + diff.display() + ); + assert_contents( + "tests/output/blueprint_builder_initial_diff.txt", + &diff.display().to_string(), + ); + assert_eq!(diff.sleds_added.len(), 0); + assert_eq!(diff.sleds_removed.len(), 0); + assert_eq!(diff.sleds_modified.len(), 0); + + // Test a no-op blueprint. + let builder = BlueprintBuilder::new_based_on( + &logctx.log, + &blueprint_initial, + &input, + "test_basic", + ) + .expect("failed to create builder"); + let blueprint = builder.build(); + verify_blueprint(&blueprint); + let diff = blueprint.diff_since_blueprint(&blueprint_initial); + println!( + "initial blueprint -> next blueprint (expected no changes):\n{}", + diff.display() + ); + assert_eq!(diff.sleds_added.len(), 0); + assert_eq!(diff.sleds_removed.len(), 0); + assert_eq!(diff.sleds_modified.len(), 0); + + logctx.cleanup_successful(); + } + + #[test] + fn test_basic() { + static TEST_NAME: &str = "blueprint_builder_test_basic"; + let logctx = test_setup_log(TEST_NAME); + let mut example = + ExampleSystem::new(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); + let blueprint1 = &example.blueprint; + verify_blueprint(blueprint1); + + let mut builder = BlueprintBuilder::new_based_on( + &logctx.log, + blueprint1, + &example.input, + "test_basic", + ) + .expect("failed to create builder"); + + // The example blueprint should have internal NTP zones on all the + // existing sleds, plus Crucible zones on all pools. So if we ensure + // all these zones exist, we should see no change. + for (sled_id, sled_resources) in + example.input.all_sled_resources(SledFilter::Commissioned) + { + builder.sled_ensure_zone_ntp(sled_id).unwrap(); + for pool_id in sled_resources.zpools.keys() { + builder.sled_ensure_zone_crucible(sled_id, *pool_id).unwrap(); + } + } + + let blueprint2 = builder.build(); + verify_blueprint(&blueprint2); + let diff = blueprint2.diff_since_blueprint(&blueprint1); + println!( + "initial blueprint -> next blueprint (expected no changes):\n{}", + diff.display() + ); + assert_eq!(diff.sleds_added.len(), 0); + assert_eq!(diff.sleds_removed.len(), 0); + assert_eq!(diff.sleds_modified.len(), 0); + + // The next step is adding these zones to a new sled. + let new_sled_id = example.sled_rng.next(); + let _ = + example.system.sled(SledBuilder::new().id(new_sled_id)).unwrap(); + let input = example.system.to_planning_input_builder().unwrap().build(); + let mut builder = BlueprintBuilder::new_based_on( + &logctx.log, + &blueprint2, + &input, + "test_basic", + ) + .expect("failed to create builder"); + builder.sled_ensure_zone_ntp(new_sled_id).unwrap(); + // TODO-cleanup use `TypedUuid` everywhere + let new_sled_resources = input.sled_resources(&new_sled_id).unwrap(); + for pool_id in new_sled_resources.zpools.keys() { + builder.sled_ensure_zone_crucible(new_sled_id, *pool_id).unwrap(); + } + + let blueprint3 = builder.build(); + verify_blueprint(&blueprint3); + let diff = blueprint3.diff_since_blueprint(&blueprint2); + println!("expecting new NTP and Crucible zones:\n{}", diff.display()); + + // No sleds were changed or removed. + assert_eq!(diff.sleds_modified.len(), 0); + assert_eq!(diff.sleds_removed.len(), 0); + + // One sled was added. + assert_eq!(diff.sleds_added.len(), 1); + let sled_id = diff.sleds_added.first().unwrap(); + let new_sled_zones = diff.zones.added.get(sled_id).unwrap(); + assert_eq!(*sled_id, new_sled_id); + // The generation number should be newer than the initial default. + assert!(new_sled_zones.generation_after.unwrap() > Generation::new()); + + // All zones' underlay addresses ought to be on the sled's subnet. + for z in &new_sled_zones.zones { + assert!(new_sled_resources + .subnet + .net() + .contains(z.underlay_address())); + } + + // Check for an NTP zone. Its sockaddr's IP should also be on the + // sled's subnet. + assert!(new_sled_zones.zones.iter().any(|z| { + if let BlueprintOrCollectionZoneConfig::Blueprint( + BlueprintZoneConfig { + zone_type: + BlueprintZoneType::InternalNtp( + blueprint_zone_type::InternalNtp { + address, .. + }, + ), + .. + }, + ) = &z + { + assert!(new_sled_resources + .subnet + .net() + .contains(*address.ip())); + true + } else { + false + } + })); + let crucible_pool_names = new_sled_zones + .zones + .iter() + .filter_map(|z| { + if let BlueprintOrCollectionZoneConfig::Blueprint( + BlueprintZoneConfig { + zone_type: + BlueprintZoneType::Crucible( + blueprint_zone_type::Crucible { + address, + dataset, + }, + ), + .. + }, + ) = &z + { + let ip = address.ip(); + assert!(new_sled_resources.subnet.net().contains(*ip)); + Some(dataset.pool_name.clone()) + } else { + None + } + }) + .collect::>(); + assert_eq!( + crucible_pool_names, + new_sled_resources + .zpools + .keys() + .map(|id| { ZpoolName::new_external(*id) }) + .collect() + ); + + logctx.cleanup_successful(); + } + + #[test] + fn test_prune_decommissioned_sleds() { + static TEST_NAME: &str = + "blueprint_builder_test_prune_decommissioned_sleds"; + let logctx = test_setup_log(TEST_NAME); + let (_, input, mut blueprint1) = + example(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); + verify_blueprint(&blueprint1); + + // Mark one sled as having a desired state of decommissioned. + let decommision_sled_id = blueprint1 + .sled_state + .keys() + .copied() + .next() + .expect("at least one sled"); + *blueprint1.sled_state.get_mut(&decommision_sled_id).unwrap() = + SledState::Decommissioned; + + // Change the input to note that the sled is expunged, but still active. + let mut builder = input.into_builder(); + builder.sleds_mut().get_mut(&decommision_sled_id).unwrap().policy = + SledPolicy::Expunged; + builder.sleds_mut().get_mut(&decommision_sled_id).unwrap().state = + SledState::Active; + let input = builder.build(); + + // Generate a new blueprint. This sled should still be included: even + // though the desired state is decommissioned, the current state is + // still active, so we should carry it forward. + let blueprint2 = BlueprintBuilder::new_based_on( + &logctx.log, + &blueprint1, + &input, + "test_prune_decommissioned_sleds", + ) + .expect("created builder") + .build(); + verify_blueprint(&blueprint2); + + // We carried forward the desired state. + assert_eq!( + blueprint2.sled_state.get(&decommision_sled_id).copied(), + Some(SledState::Decommissioned) + ); + + // Change the input to mark the sled decommissioned. (Normally realizing + // blueprint2 would make this change.) + let mut builder = input.into_builder(); + builder.sleds_mut().get_mut(&decommision_sled_id).unwrap().state = + SledState::Decommissioned; + let input = builder.build(); + + // Generate a new blueprint. This desired sled state should no longer be + // present: it has reached the terminal decommissioned state, so there's + // no more work to be done. + let blueprint3 = BlueprintBuilder::new_based_on( + &logctx.log, + &blueprint2, + &input, + "test_prune_decommissioned_sleds", + ) + .expect("created builder") + .build(); + verify_blueprint(&blueprint3); + + // Ensure we've dropped the decommissioned sled. (We may still have + // _zones_ for it that need cleanup work, but all state transitions for + // it are complete.) + assert_eq!( + blueprint3.sled_state.get(&decommision_sled_id).copied(), + None, + ); + + logctx.cleanup_successful(); + } + + #[test] + fn test_add_physical_disks() { + static TEST_NAME: &str = "blueprint_builder_test_add_physical_disks"; + let logctx = test_setup_log(TEST_NAME); + let (_, input, _) = example(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); + let input = { + // Clear out the external networking records from `input`, since + // we're building an empty blueprint. + let mut builder = input.into_builder(); + *builder.network_resources_mut() = + OmicronZoneNetworkResources::new(); + builder.build() + }; + + // Start with an empty blueprint (sleds with no zones). + let parent = BlueprintBuilder::build_empty_with_sleds_seeded( + input.all_sled_ids(SledFilter::Commissioned), + "test", + TEST_NAME, + ); + + { + // We start empty, and can add a disk + let mut builder = BlueprintBuilder::new_based_on( + &logctx.log, + &parent, + &input, + "test", + ) + .expect("failed to create builder"); + + assert!(builder.disks.changed_disks.is_empty()); + assert!(builder.disks.parent_disks.is_empty()); + + for (sled_id, sled_resources) in + input.all_sled_resources(SledFilter::InService) + { + assert_eq!( + builder + .sled_ensure_disks(sled_id, &sled_resources) + .unwrap(), + Ensure::Added, + ); + } + + assert!(!builder.disks.changed_disks.is_empty()); + assert!(builder.disks.parent_disks.is_empty()); + } + + logctx.cleanup_successful(); + } + + #[test] + fn test_add_nexus_with_no_existing_nexus_zones() { + static TEST_NAME: &str = + "blueprint_builder_test_add_nexus_with_no_existing_nexus_zones"; + let logctx = test_setup_log(TEST_NAME); + + // Discard the example blueprint and start with an empty one. + let (collection, input, _) = + example(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); + let input = { + // Clear out the external networking records from `input`, since + // we're building an empty blueprint. + let mut builder = input.into_builder(); + *builder.network_resources_mut() = + OmicronZoneNetworkResources::new(); + builder.build() + }; + let parent = BlueprintBuilder::build_empty_with_sleds_seeded( + input.all_sled_ids(SledFilter::Commissioned), + "test", + TEST_NAME, + ); + + // Adding a new Nexus zone currently requires copying settings from an + // existing Nexus zone. `parent` has no zones, so we should fail if we + // try to add a Nexus zone. + let mut builder = BlueprintBuilder::new_based_on( + &logctx.log, + &parent, + &input, + "test", + ) + .expect("failed to create builder"); + + let err = builder + .sled_ensure_zone_multiple_nexus( + collection + .omicron_zones + .keys() + .next() + .copied() + .expect("no sleds present"), + 1, + ) + .unwrap_err(); + + assert!( + matches!(err, Error::NoNexusZonesInParentBlueprint), + "unexpected error {err}" + ); + + logctx.cleanup_successful(); + } + + #[test] + fn test_add_nexus_error_cases() { + static TEST_NAME: &str = "blueprint_builder_test_add_nexus_error_cases"; + let logctx = test_setup_log(TEST_NAME); + let (mut collection, mut input, mut parent) = + example(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); + + // Remove the Nexus zone from one of the sleds so that + // `sled_ensure_zone_nexus` can attempt to add a Nexus zone to + // `sled_id`. + let sled_id = { + let mut selected_sled_id = None; + for (sled_id, zones) in &mut collection.omicron_zones { + let nzones_before_retain = zones.zones.zones.len(); + zones.zones.zones.retain(|z| !z.zone_type.is_nexus()); + if zones.zones.zones.len() < nzones_before_retain { + selected_sled_id = Some(*sled_id); + // Also remove this zone from the blueprint. + let mut removed_nexus = None; + parent + .blueprint_zones + .get_mut(sled_id) + .expect("missing sled") + .zones + .retain(|z| match &z.zone_type { + BlueprintZoneType::Nexus(z) => { + removed_nexus = Some(z.clone()); + false + } + _ => true, + }); + let removed_nexus = + removed_nexus.expect("removed Nexus from blueprint"); + + // Also remove this Nexus's external networking resources + // from `input`. + let mut builder = input.into_builder(); + let mut new_network_resources = + OmicronZoneNetworkResources::new(); + let old_network_resources = builder.network_resources_mut(); + for ip in old_network_resources.omicron_zone_external_ips() + { + if ip.ip.id() != removed_nexus.external_ip.id { + new_network_resources + .add_external_ip(ip.zone_id, ip.ip) + .expect("copied IP to new input"); + } + } + for nic in old_network_resources.omicron_zone_nics() { + if nic.nic.id.into_untyped_uuid() + != removed_nexus.nic.id + { + new_network_resources + .add_nic(nic.zone_id, nic.nic) + .expect("copied NIC to new input"); + } + } + mem::swap( + old_network_resources, + &mut new_network_resources, + ); + input = builder.build(); + + break; + } + } + selected_sled_id.expect("found no sleds with Nexus zone") + }; + + { + // Attempting to add Nexus to the sled we removed it from (with no + // other changes to the environment) should succeed. + let mut builder = BlueprintBuilder::new_based_on( + &logctx.log, + &parent, + &input, + "test", + ) + .expect("failed to create builder"); + let added = builder + .sled_ensure_zone_multiple_nexus(sled_id, 1) + .expect("failed to ensure nexus zone"); + + assert_eq!(added, EnsureMultiple::Added(1)); + } + + { + // Attempting to add multiple Nexus zones to the sled we removed it + // from (with no other changes to the environment) should also + // succeed. + let mut builder = BlueprintBuilder::new_based_on( + &logctx.log, + &parent, + &input, + "test", + ) + .expect("failed to create builder"); + let added = builder + .sled_ensure_zone_multiple_nexus(sled_id, 3) + .expect("failed to ensure nexus zone"); + + assert_eq!(added, EnsureMultiple::Added(3)); + } + + { + // Replace the policy's external service IP pool ranges with ranges + // that are already in use by existing zones. Attempting to add a + // Nexus with no remaining external IPs should fail. + let mut used_ip_ranges = Vec::new(); + for (_, z) in parent.all_omicron_zones(BlueprintZoneFilter::All) { + if let Some((external_ip, _)) = + z.zone_type.external_networking() + { + used_ip_ranges.push(IpRange::from(external_ip.ip())); + } + } + assert!(!used_ip_ranges.is_empty()); + let input = { + let mut builder = input.into_builder(); + builder.policy_mut().service_ip_pool_ranges = used_ip_ranges; + builder.build() + }; + + let mut builder = BlueprintBuilder::new_based_on( + &logctx.log, + &parent, + &input, + "test", + ) + .expect("failed to create builder"); + let err = builder + .sled_ensure_zone_multiple_nexus(sled_id, 1) + .unwrap_err(); + + assert!( + matches!(err, Error::NoExternalServiceIpAvailable), + "unexpected error {err}" + ); + } + + // We're not testing the `ExhaustedNexusIps` error case (where we've run + // out of Nexus OPTE addresses), because it's fairly diffiult to induce + // that from outside: we would need to start from a parent blueprint + // that contained a Nexus instance for every IP in the + // `NEXUS_OPTE_*_SUBNET`. We could hack around that by creating the + // `BlueprintBuilder` and mucking with its internals, but that doesn't + // seem like a particularly useful test either. + + logctx.cleanup_successful(); + } + + #[test] + fn test_invalid_parent_blueprint_two_zones_with_same_external_ip() { + static TEST_NAME: &str = + "blueprint_builder_test_invalid_parent_blueprint_\ + two_zones_with_same_external_ip"; + let logctx = test_setup_log(TEST_NAME); + let (_, input, mut parent) = + example(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); + + // We should fail if the parent blueprint claims to contain two + // zones with the same external IP. Skim through the zones, copy the + // external IP from one Nexus zone, then assign it to a later Nexus + // zone. + let mut found_second_nexus_zone = false; + let mut nexus_external_ip = None; + + 'outer: for zones in parent.blueprint_zones.values_mut() { + for z in zones.zones.iter_mut() { + if let BlueprintZoneType::Nexus(blueprint_zone_type::Nexus { + external_ip, + .. + }) = &mut z.zone_type + { + if let Some(ip) = nexus_external_ip { + *external_ip = ip; + found_second_nexus_zone = true; + break 'outer; + } else { + nexus_external_ip = Some(*external_ip); + continue 'outer; + } + } + } + } + assert!(found_second_nexus_zone, "only one Nexus zone present?"); + + match BlueprintBuilder::new_based_on( + &logctx.log, + &parent, + &input, + "test", + ) { + Ok(_) => panic!("unexpected success"), + Err(err) => assert!( + err.to_string().contains("duplicate external IP"), + "unexpected error: {err:#}" + ), + }; + + logctx.cleanup_successful(); + } + + #[test] + fn test_invalid_parent_blueprint_two_nexus_zones_with_same_nic_ip() { + static TEST_NAME: &str = + "blueprint_builder_test_invalid_parent_blueprint_\ + two_nexus_zones_with_same_nic_ip"; + let logctx = test_setup_log(TEST_NAME); + let (_, input, mut parent) = + example(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); + + // We should fail if the parent blueprint claims to contain two + // Nexus zones with the same NIC IP. Skim through the zones, copy + // the NIC IP from one Nexus zone, then assign it to a later + // Nexus zone. + let mut found_second_nexus_zone = false; + let mut nexus_nic_ip = None; + + 'outer: for zones in parent.blueprint_zones.values_mut() { + for z in zones.zones.iter_mut() { + if let BlueprintZoneType::Nexus(blueprint_zone_type::Nexus { + nic, + .. + }) = &mut z.zone_type + { + if let Some(ip) = nexus_nic_ip { + nic.ip = ip; + found_second_nexus_zone = true; + break 'outer; + } else { + nexus_nic_ip = Some(nic.ip); + continue 'outer; + } + } + } + } + assert!(found_second_nexus_zone, "only one Nexus zone present?"); + + match BlueprintBuilder::new_based_on( + &logctx.log, + &parent, + &input, + "test", + ) { + Ok(_) => panic!("unexpected success"), + Err(err) => assert!( + err.to_string().contains("duplicate Nexus NIC IP"), + "unexpected error: {err:#}" + ), + }; + + logctx.cleanup_successful(); + } + + #[test] + fn test_invalid_parent_blueprint_two_zones_with_same_vnic_mac() { + static TEST_NAME: &str = + "blueprint_builder_test_invalid_parent_blueprint_\ + two_zones_with_same_vnic_mac"; + let logctx = test_setup_log(TEST_NAME); + let (_, input, mut parent) = + example(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); + + // We should fail if the parent blueprint claims to contain two + // zones with the same service vNIC MAC address. Skim through the + // zones, copy the NIC MAC from one Nexus zone, then assign it to a + // later Nexus zone. + let mut found_second_nexus_zone = false; + let mut nexus_nic_mac = None; + + 'outer: for zones in parent.blueprint_zones.values_mut() { + for z in zones.zones.iter_mut() { + if let BlueprintZoneType::Nexus(blueprint_zone_type::Nexus { + nic, + .. + }) = &mut z.zone_type + { + if let Some(mac) = nexus_nic_mac { + nic.mac = mac; + found_second_nexus_zone = true; + break 'outer; + } else { + nexus_nic_mac = Some(nic.mac); + continue 'outer; + } + } + } + } + assert!(found_second_nexus_zone, "only one Nexus zone present?"); + + match BlueprintBuilder::new_based_on( + &logctx.log, + &parent, + &input, + "test", + ) { + Ok(_) => panic!("unexpected success"), + Err(err) => assert!( + err.to_string().contains("duplicate service vNIC MAC"), + "unexpected error: {err:#}" + ), + }; + + logctx.cleanup_successful(); + } +} diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/external_networking.rs b/nexus/reconfigurator/planning/src/blueprint_builder/external_networking.rs new file mode 100644 index 0000000000..950ce89c43 --- /dev/null +++ b/nexus/reconfigurator/planning/src/blueprint_builder/external_networking.rs @@ -0,0 +1,329 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use super::Error; +use anyhow::bail; +use debug_ignore::DebugIgnore; +use nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; +use nexus_types::deployment::Blueprint; +use nexus_types::deployment::BlueprintZoneFilter; +use nexus_types::deployment::BlueprintZoneType; +use nexus_types::deployment::PlanningInput; +use omicron_common::address::DNS_OPTE_IPV4_SUBNET; +use omicron_common::address::DNS_OPTE_IPV6_SUBNET; +use omicron_common::address::NEXUS_OPTE_IPV4_SUBNET; +use omicron_common::address::NEXUS_OPTE_IPV6_SUBNET; +use omicron_common::address::NTP_OPTE_IPV4_SUBNET; +use omicron_common::address::NTP_OPTE_IPV6_SUBNET; +use omicron_common::api::external::MacAddr; +use oxnet::IpNet; +use std::collections::HashSet; +use std::hash::Hash; +use std::net::IpAddr; +use std::net::Ipv4Addr; +use std::net::Ipv6Addr; + +#[derive(Debug)] +pub(super) struct BuilderExternalNetworking<'a> { + // These fields mirror how RSS chooses addresses for zone NICs. + nexus_v4_ips: AvailableIterator<'static, Ipv4Addr>, + nexus_v6_ips: AvailableIterator<'static, Ipv6Addr>, + + // Iterator of available external IPs for service zones + available_external_ips: AvailableIterator<'a, IpAddr>, + + // Iterator of available MAC addresses in the system address range + available_system_macs: AvailableIterator<'a, MacAddr>, +} + +impl<'a> BuilderExternalNetworking<'a> { + pub(super) fn new( + parent_blueprint: &'a Blueprint, + input: &'a PlanningInput, + ) -> anyhow::Result { + // Scan through the parent blueprint and build several sets of "used + // resources". When adding new control plane zones to a sled, we may + // need to allocate new resources to that zone. However, allocation at + // this point is entirely optimistic and theoretical: our caller may + // discard the blueprint we create without ever making it the new + // target, or it might be an arbitrarily long time before it becomes + // the target. We need to be able to make allocation decisions that we + // expect the blueprint executor to be able to realize successfully if + // and when we become the target, but we cannot _actually_ perform + // resource allocation. + // + // To do this, we look at our parent blueprint's used resources, and + // then choose new resources that aren't already in use (if possible; + // if we need to allocate a new resource and the parent blueprint + // appears to be using all the resources of that kind, our blueprint + // generation will fail). + // + // For example, RSS assigns Nexus NIC IPs by stepping through a list of + // addresses based on `NEXUS_OPTE_IPVx_SUBNET` (as in the iterators + // below). We use the same list of addresses, but additionally need to + // filter out the existing IPs for any Nexus instances that already + // exist. + // + // Note that by building these iterators up front based on + // `parent_blueprint`, we cannot reuse resources in a case where we + // remove a zone that used a resource and then add another zone that + // wants the same kind of resource. That is mostly okay, but there are + // some cases in which we may have to do that -- particularly external + // DNS zones, which tend to have a small number of fixed IPs. Solving + // that is a TODO. + // + // Also note that currently, we don't perform any kind of garbage + // collection on sleds and zones that no longer have any attached + // resources. Once a sled or zone is marked expunged, it will always + // stay in that state. + // https://github.com/oxidecomputer/omicron/issues/5552 tracks + // implementing this kind of garbage collection, and we should do it + // very soon. + + let mut existing_nexus_v4_ips: HashSet = HashSet::new(); + let mut existing_nexus_v6_ips: HashSet = HashSet::new(); + let mut used_external_ips: HashSet = HashSet::new(); + let mut used_macs: HashSet = HashSet::new(); + + for (_, z) in + parent_blueprint.all_omicron_zones(BlueprintZoneFilter::All) + { + let zone_type = &z.zone_type; + if let BlueprintZoneType::Nexus(nexus) = zone_type { + match nexus.nic.ip { + IpAddr::V4(ip) => { + if !existing_nexus_v4_ips.insert(ip) { + bail!("duplicate Nexus NIC IP: {ip}"); + } + } + IpAddr::V6(ip) => { + if !existing_nexus_v6_ips.insert(ip) { + bail!("duplicate Nexus NIC IP: {ip}"); + } + } + } + } + + if let Some((external_ip, nic)) = zone_type.external_networking() { + // For the test suite, ignore localhost. It gets reused many + // times and that's okay. We don't expect to see localhost + // outside the test suite. + if !external_ip.ip().is_loopback() + && !used_external_ips.insert(external_ip.ip()) + { + bail!("duplicate external IP: {external_ip:?}"); + } + + if !used_macs.insert(nic.mac) { + bail!("duplicate service vNIC MAC: {}", nic.mac); + } + } + } + + // Check the planning input: there shouldn't be any external networking + // resources in the database (the source of `input`) that we don't know + // about from the parent blueprint. + for external_ip_entry in + input.network_resources().omicron_zone_external_ips() + { + // As above, ignore localhost (used by the test suite). + if external_ip_entry.ip.ip().is_loopback() { + continue; + } + if !used_external_ips.contains(&external_ip_entry.ip.ip()) { + bail!( + "planning input contains unexpected external IP \ + (IP not found in parent blueprint): {external_ip_entry:?}" + ); + } + } + for nic_entry in input.network_resources().omicron_zone_nics() { + if !used_macs.contains(&nic_entry.nic.mac) { + bail!( + "planning input contains unexpected NIC \ + (MAC not found in parent blueprint): {nic_entry:?}" + ); + } + match nic_entry.nic.ip { + IpAddr::V4(ip) if NEXUS_OPTE_IPV4_SUBNET.contains(ip) => { + if !existing_nexus_v4_ips.contains(&ip) { + bail!( + "planning input contains unexpected NIC \ + (IP not found in parent blueprint): {nic_entry:?}" + ); + } + } + IpAddr::V4(ip) if NTP_OPTE_IPV4_SUBNET.contains(ip) => { + // TODO check existing_ntp_v4_ips, once it exists + } + IpAddr::V4(ip) if DNS_OPTE_IPV4_SUBNET.contains(ip) => { + // TODO check existing_dns_v4_ips, once it exists + } + IpAddr::V6(ip) if NEXUS_OPTE_IPV6_SUBNET.contains(ip) => { + if !existing_nexus_v6_ips.contains(&ip) { + bail!( + "planning input contains unexpected NIC \ + (IP not found in parent blueprint): {nic_entry:?}" + ); + } + } + IpAddr::V6(ip) if NTP_OPTE_IPV6_SUBNET.contains(ip) => { + // TODO check existing_ntp_v6_ips, once it exists + } + IpAddr::V6(ip) if DNS_OPTE_IPV6_SUBNET.contains(ip) => { + // TODO check existing_dns_v6_ips, once it exists + } + _ => { + bail!( + "planning input contains unexpected NIC \ + (IP not contained in known OPTE subnet): {nic_entry:?}" + ) + } + } + } + + // TODO-performance Building these iterators as "walk through the list + // and skip anything we've used already" is fine as long as we're + // talking about a small number of resources (e.g., single-digit number + // of Nexus instances), but wouldn't be ideal if we have many resources + // we need to skip. We could do something smarter here based on the sets + // of used resources we built above if needed. + let nexus_v4_ips = AvailableIterator::new( + NEXUS_OPTE_IPV4_SUBNET + .addr_iter() + .skip(NUM_INITIAL_RESERVED_IP_ADDRESSES), + existing_nexus_v4_ips, + ); + let nexus_v6_ips = AvailableIterator::new( + NEXUS_OPTE_IPV6_SUBNET + .iter() + .skip(NUM_INITIAL_RESERVED_IP_ADDRESSES), + existing_nexus_v6_ips, + ); + let available_external_ips = AvailableIterator::new( + input.service_ip_pool_ranges().iter().flat_map(|r| r.iter()), + used_external_ips, + ); + let available_system_macs = + AvailableIterator::new(MacAddr::iter_system(), used_macs); + + Ok(Self { + nexus_v4_ips, + nexus_v6_ips, + available_external_ips, + available_system_macs, + }) + } + + pub(super) fn for_new_nexus( + &mut self, + ) -> Result { + let external_ip = self + .available_external_ips + .next() + .ok_or(Error::NoExternalServiceIpAvailable)?; + let (nic_ip, nic_subnet) = match external_ip { + IpAddr::V4(_) => ( + self.nexus_v4_ips + .next() + .ok_or(Error::ExhaustedNexusIps)? + .into(), + IpNet::from(*NEXUS_OPTE_IPV4_SUBNET), + ), + IpAddr::V6(_) => ( + self.nexus_v6_ips + .next() + .ok_or(Error::ExhaustedNexusIps)? + .into(), + IpNet::from(*NEXUS_OPTE_IPV6_SUBNET), + ), + }; + let nic_mac = self + .available_system_macs + .next() + .ok_or(Error::NoSystemMacAddressAvailable)?; + + Ok(ExternalNetworkingChoice { + external_ip, + nic_ip, + nic_subnet, + nic_mac, + }) + } +} + +#[derive(Debug, Clone, Copy)] +pub(super) struct ExternalNetworkingChoice { + pub(super) external_ip: IpAddr, + pub(super) nic_ip: IpAddr, + pub(super) nic_subnet: IpNet, + pub(super) nic_mac: MacAddr, +} + +/// Combines a base iterator with an `in_use` set, filtering out any elements +/// that are in the "in_use" set. +/// +/// This can be done with a chained `.filter` on the iterator, but +/// `AvailableIterator` also allows for inspection of the `in_use` set. +/// +/// Note that this is a stateful iterator -- i.e. it implements `Iterator`, not +/// `IntoIterator`. That's what we currently need in the planner. +#[derive(Debug)] +struct AvailableIterator<'a, T> { + base: DebugIgnore + Send + 'a>>, + in_use: HashSet, +} + +impl<'a, T: Hash + Eq> AvailableIterator<'a, T> { + /// Creates a new `AvailableIterator` from a base iterator and a set of + /// elements that are in use. + fn new(base: I, in_use: impl IntoIterator) -> Self + where + I: Iterator + Send + 'a, + { + let in_use = in_use.into_iter().collect(); + AvailableIterator { base: DebugIgnore(Box::new(base)), in_use } + } +} + +impl Iterator for AvailableIterator<'_, T> { + type Item = T; + + fn next(&mut self) -> Option { + self.base.find(|item| !self.in_use.contains(item)) + } +} + +#[cfg(test)] +pub mod test { + use super::*; + use test_strategy::proptest; + + /// Test that `AvailableIterator` correctly filters out items that are in + /// use. + #[proptest] + fn test_available_iterator(items: HashSet<(i32, bool)>) { + let mut in_use_map = HashSet::new(); + let mut expected_available = Vec::new(); + let items: Vec<_> = items + .into_iter() + .map(|(item, in_use)| { + if in_use { + in_use_map.insert(item); + } else { + expected_available.push(item); + } + item + }) + .collect(); + + let available = AvailableIterator::new(items.into_iter(), in_use_map); + let actual_available = available.collect::>(); + + assert_eq!( + expected_available, actual_available, + "available items match" + ); + } +} diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/mod.rs b/nexus/reconfigurator/planning/src/blueprint_builder/mod.rs new file mode 100644 index 0000000000..99d3b41772 --- /dev/null +++ b/nexus/reconfigurator/planning/src/blueprint_builder/mod.rs @@ -0,0 +1,11 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Low-level facility for generating Blueprints + +mod builder; +mod external_networking; +mod zones; + +pub use builder::*; diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/zones.rs b/nexus/reconfigurator/planning/src/blueprint_builder/zones.rs new file mode 100644 index 0000000000..68e2b9c2a2 --- /dev/null +++ b/nexus/reconfigurator/planning/src/blueprint_builder/zones.rs @@ -0,0 +1,411 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use std::collections::BTreeSet; + +use nexus_types::deployment::{ + BlueprintZoneConfig, BlueprintZoneDisposition, BlueprintZonesConfig, +}; +use omicron_common::api::external::Generation; +use omicron_uuid_kinds::OmicronZoneUuid; +use thiserror::Error; + +#[derive(Debug)] +#[must_use] +pub(super) struct BuilderZonesConfig { + // The current generation -- this is bumped at blueprint build time and is + // otherwise not exposed to callers. + generation: Generation, + + // The list of zones, along with their state. + zones: Vec, +} + +impl BuilderZonesConfig { + pub(super) fn new() -> Self { + Self { + // Note that the first generation is reserved to mean the one + // containing no zones. See + // OmicronZonesConfig::INITIAL_GENERATION. + // + // Since we're currently assuming that creating a new + // `BuilderZonesConfig` means that we're going to add new zones + // shortly, we start with Generation::new() here. It'll get + // bumped up to the next one in `Self::build`. + generation: Generation::new(), + zones: vec![], + } + } + + pub(super) fn from_parent(parent: &BlueprintZonesConfig) -> Self { + Self { + // We'll bump this up at build time. + generation: parent.generation, + + zones: parent + .zones + .iter() + .map(|zone| BuilderZoneConfig { + zone: zone.clone(), + state: BuilderZoneState::Unchanged, + }) + .collect(), + } + } + + pub(super) fn add_zone( + &mut self, + zone: BlueprintZoneConfig, + ) -> Result<(), BuilderZonesConfigError> { + if self.zones.iter().any(|z| z.zone.id == zone.id) { + // We shouldn't be trying to add zones that already exist -- + // something went wrong in the planner logic. + return Err(BuilderZonesConfigError::AddExistingZone { + zone_id: zone.id, + }); + }; + + self.zones + .push(BuilderZoneConfig { zone, state: BuilderZoneState::Added }); + Ok(()) + } + + pub(super) fn expunge_zones( + &mut self, + mut zones: BTreeSet, + ) -> Result<(), BuilderZonesConfigError> { + for zone in &mut self.zones { + if zones.remove(&zone.zone.id) { + // Check that the zone is expungeable. Typically, zones passed + // in here should have had this check done to them already, but + // in case they're not, or in case something else about those + // zones changed in between, check again. + is_already_expunged(&zone.zone, zone.state)?; + zone.zone.disposition = BlueprintZoneDisposition::Expunged; + zone.state = BuilderZoneState::Modified; + } + } + + // All zones passed in should have been found -- are there any left + // over? + if !zones.is_empty() { + return Err(BuilderZonesConfigError::ExpungeUnmatchedZones { + unmatched: zones, + }); + } + + Ok(()) + } + + pub(super) fn iter_zones( + &self, + ) -> impl Iterator { + self.zones.iter() + } + + pub(super) fn build(self) -> BlueprintZonesConfig { + let mut ret = BlueprintZonesConfig { + // Something we could do here is to check if any zones have + // actually been modified, and if not, return the parent's + // generation. For now, we depend on callers to only call + // `BlueprintZonesBuilder::change_sled_zones` when they really + // mean it. + generation: self.generation.next(), + zones: self.zones.into_iter().map(|z| z.zone).collect(), + }; + ret.sort(); + ret + } +} + +pub(super) fn is_already_expunged( + zone: &BlueprintZoneConfig, + state: BuilderZoneState, +) -> Result { + match zone.disposition { + BlueprintZoneDisposition::InService + | BlueprintZoneDisposition::Quiesced => { + if state != BuilderZoneState::Unchanged { + // We shouldn't be trying to expunge zones that have also been + // changed in this blueprint -- something went wrong in the planner + // logic. + return Err(BuilderZonesConfigError::ExpungeModifiedZone { + zone_id: zone.id, + state, + }); + } + Ok(false) + } + BlueprintZoneDisposition::Expunged => { + // Treat expungement as idempotent. + Ok(true) + } + } +} + +#[derive(Debug)] +pub(super) struct BuilderZoneConfig { + zone: BlueprintZoneConfig, + state: BuilderZoneState, +} + +impl BuilderZoneConfig { + pub(super) fn zone(&self) -> &BlueprintZoneConfig { + &self.zone + } + + pub(super) fn state(&self) -> BuilderZoneState { + self.state + } +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub(super) enum BuilderZoneState { + Unchanged, + Modified, + Added, +} + +#[derive(Clone, Debug, PartialEq, Eq, Error)] +pub(super) enum BuilderZonesConfigError { + #[error("attempted to add zone that already exists: {zone_id}")] + AddExistingZone { zone_id: OmicronZoneUuid }, + #[error( + "attempted to expunge zone {zone_id} that was in state {state:?} \ + (can only expunge unchanged zones)" + )] + ExpungeModifiedZone { zone_id: OmicronZoneUuid, state: BuilderZoneState }, + #[error( + "while expunging zones, not all zones provided were found: {unmatched:?}" + )] + ExpungeUnmatchedZones { unmatched: BTreeSet }, +} + +#[cfg(test)] +mod tests { + use std::{ + collections::BTreeMap, + net::{Ipv6Addr, SocketAddrV6}, + }; + + use maplit::btreeset; + use nexus_types::{ + deployment::{ + blueprint_zone_type, BlueprintZoneType, SledDetails, SledFilter, + SledResources, + }, + external_api::views::{SledPolicy, SledState}, + }; + use omicron_common::address::Ipv6Subnet; + use omicron_test_utils::dev::test_setup_log; + + use crate::{ + blueprint_builder::{ + test::{verify_blueprint, DEFAULT_N_SLEDS}, + BlueprintBuilder, Ensure, + }, + example::ExampleSystem, + }; + + use super::*; + + /// A test focusing on `BlueprintZonesBuilder` and its internal logic. + #[test] + fn test_builder_zones() { + static TEST_NAME: &str = "blueprint_test_builder_zones"; + let logctx = test_setup_log(TEST_NAME); + let mut example = + ExampleSystem::new(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); + let blueprint_initial = example.blueprint; + + // Add a completely bare sled to the input. + let (new_sled_id, input2) = { + let mut input = example.input.clone().into_builder(); + let new_sled_id = example.sled_rng.next(); + input + .add_sled( + new_sled_id, + SledDetails { + policy: SledPolicy::provisionable(), + state: SledState::Active, + resources: SledResources { + subnet: Ipv6Subnet::new( + "fd00:1::".parse().unwrap(), + ), + zpools: BTreeMap::new(), + }, + }, + ) + .expect("adding new sled"); + + (new_sled_id, input.build()) + }; + + let mut builder = BlueprintBuilder::new_based_on( + &logctx.log, + &blueprint_initial, + &input2, + "the_test", + ) + .expect("creating blueprint builder"); + builder.set_rng_seed((TEST_NAME, "bp2")); + + // Test adding a new sled with an NTP zone. + assert_eq!( + builder.sled_ensure_zone_ntp(new_sled_id).unwrap(), + Ensure::Added + ); + + // Iterate over the zones for the sled and ensure that the NTP zone is + // present. + { + let mut zones = builder.zones.current_sled_zones(new_sled_id); + let (_, state) = zones.next().expect("exactly one zone for sled"); + assert!(zones.next().is_none(), "exactly one zone for sled"); + assert_eq!( + state, + BuilderZoneState::Added, + "NTP zone should have been added" + ); + } + + // Now, test adding a new zone (Oximeter, picked arbitrarily) to an + // existing sled. + let existing_sled_id = example + .input + .all_sled_ids(SledFilter::Commissioned) + .next() + .expect("at least one sled present"); + let change = builder.zones.change_sled_zones(existing_sled_id); + + let new_zone_id = OmicronZoneUuid::new_v4(); + change + .add_zone(BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id: new_zone_id, + underlay_address: Ipv6Addr::UNSPECIFIED, + zone_type: BlueprintZoneType::Oximeter( + blueprint_zone_type::Oximeter { + address: SocketAddrV6::new( + Ipv6Addr::UNSPECIFIED, + 0, + 0, + 0, + ), + }, + ), + }) + .expect("adding new zone"); + + // Attempt to expunge one of the other zones on the sled. + let existing_zone_id = change + .iter_zones() + .find(|z| z.zone.id != new_zone_id) + .expect("at least one existing zone") + .zone + .id; + change + .expunge_zones(btreeset! { existing_zone_id }) + .expect("expunging existing zone"); + // Do it again to ensure that expunging an already-expunged zone is + // idempotent, even within the same blueprint. + change + .expunge_zones(btreeset! { existing_zone_id }) + .expect("expunging already-expunged zone"); + // But expunging a zone that doesn't exist should fail. + let non_existent_zone_id = OmicronZoneUuid::new_v4(); + let non_existent_set = btreeset! { non_existent_zone_id }; + let error = change + .expunge_zones(non_existent_set.clone()) + .expect_err("expunging non-existent zone"); + assert_eq!( + error, + BuilderZonesConfigError::ExpungeUnmatchedZones { + unmatched: non_existent_set + } + ); + + { + // Iterate over the zones and ensure that the Oximeter zone is + // present, and marked added. + let mut zones = builder.zones.current_sled_zones(existing_sled_id); + zones + .find_map(|(z, state)| { + if z.id == new_zone_id { + assert_eq!( + state, + BuilderZoneState::Added, + "new zone ID {new_zone_id} should be marked added" + ); + Some(()) + } else { + None + } + }) + .expect("new zone ID should be present"); + } + + // Attempt to expunge the newly added Oximeter zone. This should fail + // because we only support expunging zones that are unchanged from the + // parent blueprint. + let error = builder + .zones + .change_sled_zones(existing_sled_id) + .expunge_zones(btreeset! { new_zone_id }) + .expect_err("expunging a new zone should fail"); + assert_eq!( + error, + BuilderZonesConfigError::ExpungeModifiedZone { + zone_id: new_zone_id, + state: BuilderZoneState::Added + } + ); + + // Now build the blueprint and ensure that all the changes we described + // above are present. + let blueprint = builder.build(); + verify_blueprint(&blueprint); + let diff = blueprint.diff_since_blueprint(&blueprint_initial); + println!("expecting new NTP and Oximeter zones:\n{}", diff.display()); + + // No sleds were removed. + assert_eq!(diff.sleds_removed.len(), 0); + + // One sled was added. + assert_eq!(diff.sleds_added.len(), 1); + let sled_id = diff.sleds_added.first().unwrap(); + assert_eq!(*sled_id, new_sled_id); + let new_sled_zones = diff.zones.added.get(sled_id).unwrap(); + // The generation number should be newer than the initial default. + assert_eq!( + new_sled_zones.generation_after.unwrap(), + Generation::new().next() + ); + assert_eq!(new_sled_zones.zones.len(), 1); + + // TODO: AJS - See comment above - we don't actually use the control sled anymore + // so the comparison was changed. + // One sled was modified: existing_sled_id + assert_eq!(diff.sleds_modified.len(), 1, "1 sled modified"); + for sled_id in &diff.sleds_modified { + assert_eq!(*sled_id, existing_sled_id); + let added = diff.zones.added.get(sled_id).unwrap(); + assert_eq!( + added.generation_after.unwrap(), + added.generation_before.unwrap().next() + ); + assert_eq!(added.zones.len(), 1); + let added_zone = &added.zones[0]; + assert_eq!(added_zone.id(), new_zone_id); + + assert!(!diff.zones.removed.contains_key(sled_id)); + let modified = diff.zones.modified.get(sled_id).unwrap(); + assert_eq!(modified.zones.len(), 1); + let modified_zone = &modified.zones[0]; + assert_eq!(modified_zone.zone.id(), existing_zone_id); + } + + logctx.cleanup_successful(); + } +} diff --git a/nexus/reconfigurator/planning/src/example.rs b/nexus/reconfigurator/planning/src/example.rs index 23df35e9ae..e52fe3fc4b 100644 --- a/nexus/reconfigurator/planning/src/example.rs +++ b/nexus/reconfigurator/planning/src/example.rs @@ -5,19 +5,22 @@ //! Example blueprints use crate::blueprint_builder::BlueprintBuilder; -use crate::blueprint_builder::UuidRng; use crate::system::SledBuilder; use crate::system::SystemDescription; use nexus_types::deployment::Blueprint; use nexus_types::deployment::BlueprintZoneFilter; -use nexus_types::deployment::Policy; +use nexus_types::deployment::OmicronZoneNic; +use nexus_types::deployment::PlanningInput; +use nexus_types::deployment::SledFilter; use nexus_types::inventory::Collection; -use omicron_common::api::external::Generation; -use sled_agent_client::types::OmicronZonesConfig; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::SledKind; +use omicron_uuid_kinds::VnicUuid; +use typed_rng::TypedUuidRng; pub struct ExampleSystem { pub system: SystemDescription, - pub policy: Policy, + pub input: PlanningInput, pub collection: Collection, pub blueprint: Blueprint, // If we add more types of RNGs than just sleds here, we'll need to @@ -27,7 +30,7 @@ pub struct ExampleSystem { // This is currently only used for tests, so it looks unused in normal // builds. But in the future it could be used by other consumers, too. #[allow(dead_code)] - pub(crate) sled_rng: UuidRng, + pub(crate) sled_rng: TypedUuidRng, } impl ExampleSystem { @@ -37,69 +40,49 @@ impl ExampleSystem { nsleds: usize, ) -> ExampleSystem { let mut system = SystemDescription::new(); - let mut sled_rng = UuidRng::from_seed(test_name, "ExampleSystem"); - let sled_ids: Vec<_> = - (0..nsleds).map(|_| sled_rng.next_uuid()).collect(); + let mut sled_rng = TypedUuidRng::from_seed(test_name, "ExampleSystem"); + let sled_ids: Vec<_> = (0..nsleds).map(|_| sled_rng.next()).collect(); for sled_id in &sled_ids { let _ = system.sled(SledBuilder::new().id(*sled_id)).unwrap(); } - let policy = system.to_policy().expect("failed to make policy"); - let mut inventory_builder = - system.to_collection_builder().expect("failed to build collection"); - - // For each sled, have it report 0 zones in the initial inventory. - // This will enable us to build a blueprint from the initial - // inventory, which we can then use to build new blueprints. - for sled_id in &sled_ids { - inventory_builder - .found_sled_omicron_zones( - "fake sled agent", - *sled_id, - OmicronZonesConfig { - generation: Generation::new(), - zones: vec![], - }, - ) - .expect("recording Omicron zones"); - } + let mut input_builder = system + .to_planning_input_builder() + .expect("failed to make planning input builder"); + let base_input = input_builder.clone().build(); - let empty_zone_inventory = inventory_builder.build(); - let initial_blueprint = - BlueprintBuilder::build_initial_from_collection_seeded( - &empty_zone_inventory, - Generation::new(), - Generation::new(), - &policy, - "test suite", - (test_name, "ExampleSystem initial"), - ) - .unwrap(); + // Start with an empty blueprint containing only our sleds, no zones. + let initial_blueprint = BlueprintBuilder::build_empty_with_sleds_seeded( + base_input.all_sled_ids(SledFilter::Commissioned), + "test suite", + (test_name, "ExampleSystem initial"), + ); // Now make a blueprint and collection with some zones on each sled. let mut builder = BlueprintBuilder::new_based_on( - &log, + log, &initial_blueprint, - Generation::new(), - Generation::new(), - &policy, + &base_input, "test suite", ) .unwrap(); builder.set_rng_seed((test_name, "ExampleSystem make_zones")); - for (sled_id, sled_resources) in &policy.sleds { - let _ = builder.sled_ensure_zone_ntp(*sled_id).unwrap(); + for (sled_id, sled_resources) in + base_input.all_sled_resources(SledFilter::Commissioned) + { + let _ = builder.sled_ensure_zone_ntp(sled_id).unwrap(); let _ = builder .sled_ensure_zone_multiple_nexus_with_config( - *sled_id, + sled_id, 1, false, vec![], ) .unwrap(); - for pool_name in &sled_resources.zpools { + let _ = builder.sled_ensure_disks(sled_id, sled_resources).unwrap(); + for pool_name in sled_resources.zpools.keys() { let _ = builder - .sled_ensure_zone_crucible(*sled_id, pool_name.clone()) + .sled_ensure_zone_crucible(sled_id, *pool_name) .unwrap(); } } @@ -107,17 +90,41 @@ impl ExampleSystem { let blueprint = builder.build(); let mut builder = system.to_collection_builder().expect("failed to build collection"); + builder.set_rng_seed((test_name, "ExampleSystem collection")); for sled_id in blueprint.sleds() { let Some(zones) = blueprint.blueprint_zones.get(&sled_id) else { continue; }; + for zone in zones.zones.iter() { + let service_id = zone.id; + if let Some((external_ip, nic)) = + zone.zone_type.external_networking() + { + input_builder + .add_omicron_zone_external_ip(service_id, external_ip) + .expect("failed to add Omicron zone external IP"); + input_builder + .add_omicron_zone_nic( + service_id, + OmicronZoneNic { + // TODO-cleanup use `TypedUuid` everywhere + id: VnicUuid::from_untyped_uuid(nic.id), + mac: nic.mac, + ip: nic.ip, + slot: nic.slot, + primary: nic.primary, + }, + ) + .expect("failed to add Omicron zone NIC"); + } + } builder .found_sled_omicron_zones( "fake sled agent", sled_id, zones.to_omicron_zones_config( - BlueprintZoneFilter::SledAgentPut, + BlueprintZoneFilter::ShouldBeRunning, ), ) .unwrap(); @@ -125,7 +132,7 @@ impl ExampleSystem { ExampleSystem { system, - policy, + input: input_builder.build(), collection: builder.build(), blueprint, sled_rng, @@ -133,7 +140,8 @@ impl ExampleSystem { } } -/// Returns a collection and policy describing a pretty simple system. +/// Returns a collection, planning input, and blueprint describing a pretty +/// simple system. /// /// The test name is used as the RNG seed. /// @@ -144,7 +152,7 @@ pub fn example( log: &slog::Logger, test_name: &str, nsleds: usize, -) -> (Collection, Policy) { +) -> (Collection, PlanningInput, Blueprint) { let example = ExampleSystem::new(log, test_name, nsleds); - (example.collection, example.policy) + (example.collection, example.input, example.blueprint) } diff --git a/nexus/reconfigurator/planning/src/planner.rs b/nexus/reconfigurator/planning/src/planner.rs index 60eef225d3..6ed81cbb63 100644 --- a/nexus/reconfigurator/planning/src/planner.rs +++ b/nexus/reconfigurator/planning/src/planner.rs @@ -10,20 +10,35 @@ use crate::blueprint_builder::BlueprintBuilder; use crate::blueprint_builder::Ensure; use crate::blueprint_builder::EnsureMultiple; use crate::blueprint_builder::Error; +use crate::planner::omicron_zone_placement::PlacementError; use nexus_types::deployment::Blueprint; -use nexus_types::deployment::Policy; +use nexus_types::deployment::BlueprintZoneDisposition; +use nexus_types::deployment::CockroachDbClusterVersion; +use nexus_types::deployment::CockroachDbPreserveDowngrade; +use nexus_types::deployment::CockroachDbSettings; +use nexus_types::deployment::PlanningInput; +use nexus_types::deployment::SledFilter; +use nexus_types::deployment::ZpoolFilter; +use nexus_types::external_api::views::SledPolicy; use nexus_types::external_api::views::SledState; use nexus_types::inventory::Collection; -use omicron_common::api::external::Generation; +use omicron_uuid_kinds::SledUuid; +use slog::error; use slog::{info, warn, Logger}; use std::collections::BTreeMap; use std::collections::BTreeSet; use std::hash::Hash; -use uuid::Uuid; +use std::str::FromStr; + +use self::omicron_zone_placement::DiscretionaryOmicronZone; +use self::omicron_zone_placement::OmicronZonePlacement; +use self::omicron_zone_placement::OmicronZonePlacementSledState; + +mod omicron_zone_placement; pub struct Planner<'a> { log: Logger, - policy: &'a Policy, + input: &'a PlanningInput, blueprint: BlueprintBuilder<'a>, // latest inventory collection // @@ -41,9 +56,7 @@ impl<'a> Planner<'a> { pub fn new_based_on( log: Logger, parent_blueprint: &'a Blueprint, - internal_dns_version: Generation, - external_dns_version: Generation, - policy: &'a Policy, + input: &'a PlanningInput, creator: &str, // NOTE: Right now, we just assume that this is the latest inventory // collection. See the comment on the corresponding field in `Planner`. @@ -52,12 +65,10 @@ impl<'a> Planner<'a> { let blueprint = BlueprintBuilder::new_based_on( &log, parent_blueprint, - internal_dns_version, - external_dns_version, - policy, + input, creator, )?; - Ok(Planner { log, policy, blueprint, inventory }) + Ok(Planner { log, input, blueprint, inventory }) } /// Within tests, set a seeded RNG for deterministic results. @@ -77,9 +88,128 @@ impl<'a> Planner<'a> { } fn do_plan(&mut self) -> Result<(), Error> { - // The only thing this planner currently knows how to do is add services - // to a sled that's missing them. So let's see if we're in that case. + // We perform planning in two loops: the first one turns expunged sleds + // into expunged zones, and the second one adds services. + + self.do_plan_expunge()?; + self.do_plan_add()?; + self.do_plan_decommission()?; + self.do_plan_cockroachdb_settings(); + + Ok(()) + } + + fn do_plan_decommission(&mut self) -> Result<(), Error> { + // Check for any sleds that are currently commissioned but can be + // decommissioned. Our gates for decommissioning are: + // + // 1. The policy indicates the sled has been removed (i.e., the policy + // is "expunged"; we may have other policies that satisfy this + // requirement in the future). + // 2. All zones associated with the sled have been marked expunged. + // 3. There are no instances assigned to this sled. This is blocked by + // omicron#4872, so today we omit this check entirely, as any sled + // that could be otherwise decommissioned that still has instances + // assigned to it needs support intervention for cleanup. + // 4. All disks associated with the sled have been marked expunged. This + // happens implicitly when a sled is expunged, so is covered by our + // first check. + for (sled_id, sled_details) in + self.input.all_sleds(SledFilter::Commissioned) + { + // Check 1: look for sleds that are expunged. + match (sled_details.policy, sled_details.state) { + // If the sled is still in service, don't decommission it. + (SledPolicy::InService { .. }, _) => continue, + // If the sled is already decommissioned it... why is it showing + // up when we ask for commissioned sleds? Warn, but don't try to + // decommission it again. + (SledPolicy::Expunged, SledState::Decommissioned) => { + error!( + self.log, + "decommissioned sled returned by \ + SledFilter::Commissioned"; + "sled_id" => %sled_id, + ); + continue; + } + // The sled is expunged but not yet decommissioned; fall through + // to check the rest of the criteria. + (SledPolicy::Expunged, SledState::Active) => (), + } + + // Check 2: have all this sled's zones been expunged? It's possible + // we ourselves have made this change, which is fine. + let all_zones_expunged = + self.blueprint.current_sled_zones(sled_id).all(|zone| { + zone.disposition == BlueprintZoneDisposition::Expunged + }); + + // Check 3: Are there any instances assigned to this sled? See + // comment above; while we wait for omicron#4872, we just assume + // there are no instances running. + let num_instances_assigned = 0; + + if all_zones_expunged && num_instances_assigned == 0 { + self.blueprint + .set_sled_state(sled_id, SledState::Decommissioned); + } + } + + Ok(()) + } + + fn do_plan_expunge(&mut self) -> Result<(), Error> { + let mut commissioned_sled_ids = BTreeSet::new(); + + // Remove services from sleds marked expunged. We use + // `SledFilter::Commissioned` and have a custom `needs_zone_expungement` + // function that allows us to produce better errors. + for (sled_id, sled_details) in + self.input.all_sleds(SledFilter::Commissioned) + { + commissioned_sled_ids.insert(sled_id); + + // Does this sled need zone expungement based on the details? + let Some(reason) = + needs_zone_expungement(sled_details.state, sled_details.policy) + else { + continue; + }; + + // Perform the expungement. + self.blueprint.expunge_all_zones_for_sled(sled_id, reason)?; + } + + // Check for any decommissioned sleds (i.e., sleds for which our + // blueprint has zones, but are not in the input sled list). Any zones + // for decommissioned sleds must have already be expunged for + // decommissioning to have happened; fail if we find non-expunged zones + // associated with a decommissioned sled. + for sled_id in self.blueprint.sled_ids_with_zones() { + if !commissioned_sled_ids.contains(&sled_id) { + let num_zones = self + .blueprint + .current_sled_zones(sled_id) + .filter(|zone| { + zone.disposition != BlueprintZoneDisposition::Expunged + }) + .count(); + if num_zones > 0 { + return Err( + Error::DecommissionedSledWithNonExpungedZones { + sled_id, + num_zones, + }, + ); + } + } + } + + Ok(()) + } + fn do_plan_add(&mut self) -> Result<(), Error> { // Internal DNS is a prerequisite for bringing up all other zones. At // this point, we assume that internal DNS (as a service) is already // functioning. At some point, this function will have to grow the @@ -96,25 +226,34 @@ impl<'a> Planner<'a> { // We will not mark sleds getting Crucible zones as ineligible; other // control plane service zones starting concurrently with Crucible zones // is fine. - let mut sleds_ineligible_for_services = BTreeSet::new(); - - for (sled_id, sled_info) in &self.policy.sleds { - // Decommissioned sleds don't get any services. (This is an - // explicit match so that when more states are added, this fails to - // compile.) - match sled_info.state { - SledState::Decommissioned => { - sleds_ineligible_for_services.insert(*sled_id); - continue; - } - SledState::Active => {} + let mut sleds_waiting_for_ntp_zone = BTreeSet::new(); + + for (sled_id, sled_resources) in + self.input.all_sled_resources(SledFilter::InService) + { + // First, we need to ensure that sleds are using their expected + // disks. This is necessary before we can allocate any zones. + if self.blueprint.sled_ensure_disks(sled_id, &sled_resources)? + == Ensure::Added + { + info!( + &self.log, + "altered physical disks"; + "sled_id" => %sled_id + ); + self.blueprint + .comment(&format!("sled {}: altered disks", sled_id)); + + // Note that this doesn't actually need to short-circuit the + // rest of the blueprint planning, as long as during execution + // we send this request first. } // Check for an NTP zone. Every sled should have one. If it's not // there, all we can do is provision that one zone. We have to wait // for that to succeed and synchronize the clock before we can // provision anything else. - if self.blueprint.sled_ensure_zone_ntp(*sled_id)? == Ensure::Added { + if self.blueprint.sled_ensure_zone_ntp(sled_id)? == Ensure::Added { info!( &self.log, "found sled missing NTP zone (will add one)"; @@ -125,7 +264,7 @@ impl<'a> Planner<'a> { // Don't make any other changes to this sled. However, this // change is compatible with any other changes to other sleds, // so we can "continue" here rather than "break". - sleds_ineligible_for_services.insert(*sled_id); + sleds_waiting_for_ntp_zone.insert(sled_id); continue; } @@ -149,7 +288,7 @@ impl<'a> Planner<'a> { let has_ntp_inventory = self .inventory .omicron_zones - .get(sled_id) + .get(&sled_id) .map(|sled_zones| { sled_zones.zones.zones.iter().any(|z| z.zone_type.is_ntp()) }) @@ -164,19 +303,19 @@ impl<'a> Planner<'a> { continue; } - // Every zpool on the sled should have a Crucible zone on it. + // Every provisionable zpool on the sled should have a Crucible zone on it. let mut ncrucibles_added = 0; - for zpool_name in &sled_info.zpools { + for zpool_id in sled_resources.all_zpools(ZpoolFilter::InService) { if self .blueprint - .sled_ensure_zone_crucible(*sled_id, zpool_name.clone())? + .sled_ensure_zone_crucible(sled_id, *zpool_id)? == Ensure::Added { info!( &self.log, "found sled zpool missing Crucible zone (will add one)"; "sled_id" => ?sled_id, - "zpool_name" => ?zpool_name, + "zpool_id" => ?zpool_id, ); ncrucibles_added += 1; } @@ -194,109 +333,103 @@ impl<'a> Planner<'a> { } } - // We've now placed all the services that should always exist on all - // sleds. Before moving on to make decisions about placing services that - // are _not_ present on all sleds, check the provision state of all our - // sleds so we can avoid any non-provisionable sleds under the - // assumption that there is something amiss with them. - sleds_ineligible_for_services.extend( - self.policy.sleds.iter().filter_map(|(sled_id, sled_info)| { - (!sled_info.is_eligible_for_discretionary_services()) - .then_some(*sled_id) - }), - ); - - self.ensure_correct_number_of_nexus_zones( - &sleds_ineligible_for_services, - )?; + self.ensure_correct_number_of_nexus_zones(&sleds_waiting_for_ntp_zone)?; Ok(()) } fn ensure_correct_number_of_nexus_zones( &mut self, - sleds_ineligible_for_services: &BTreeSet, + sleds_waiting_for_ntp_zone: &BTreeSet, ) -> Result<(), Error> { - // Bin every sled by the number of Nexus zones it currently has while - // counting the total number of Nexus zones. + // Count the number of Nexus zones on all in-service sleds. This will + // include sleds that are in service but not eligible for new services, + // but will not include sleds that have been expunged or decommissioned. let mut num_total_nexus = 0; - let mut sleds_by_num_nexus: BTreeMap> = - BTreeMap::new(); - for &sled_id in self.policy.sleds.keys() { + for sled_id in self.input.all_sled_ids(SledFilter::InService) { let num_nexus = self.blueprint.sled_num_nexus_zones(sled_id); num_total_nexus += num_nexus; - - // Only bin this sled if we're allowed to use it. If we have a sled - // we're not allowed to use that's already running a Nexus (seems - // fishy!), we counted its Nexus above but will ignore it here. - if !sleds_ineligible_for_services.contains(&sled_id) { - sleds_by_num_nexus.entry(num_nexus).or_default().push(sled_id); - } } // TODO-correctness What should we do if we have _too many_ Nexus // instances? For now, just log it the number of zones any time we have // at least the minimum number. - let nexus_to_add = - self.policy.target_nexus_zone_count.saturating_sub(num_total_nexus); + let mut nexus_to_add = self + .input + .target_nexus_zone_count() + .saturating_sub(num_total_nexus); if nexus_to_add == 0 { info!( self.log, "sufficient Nexus zones exist in plan"; - "desired_count" => self.policy.target_nexus_zone_count, + "desired_count" => self.input.target_nexus_zone_count(), "current_count" => num_total_nexus, ); return Ok(()); } - // Ensure we have at least one sled on which we can add Nexus zones. If - // we don't, we have nothing else to do. This isn't a hard error, - // because we might be waiting for NTP on all eligible sleds (although - // it would be weird, since we're presumably running from within Nexus - // on some sled). - if sleds_by_num_nexus.is_empty() { - warn!(self.log, "want to add Nexus zones, but no eligible sleds"); - return Ok(()); - } + let mut zone_placement = OmicronZonePlacement::new( + self.input + .all_sled_resources(SledFilter::Discretionary) + .filter(|(sled_id, _)| { + !sleds_waiting_for_ntp_zone.contains(&sled_id) + }) + .map(|(sled_id, sled_resources)| { + OmicronZonePlacementSledState { + sled_id, + num_zpools: sled_resources + .all_zpools(ZpoolFilter::InService) + .count(), + discretionary_zones: self + .blueprint + .current_sled_zones(sled_id) + .filter_map(|zone| { + DiscretionaryOmicronZone::from_zone_type( + &zone.zone_type, + ) + }) + .collect(), + } + }), + ); - // Build a map of sled -> new nexus zone count. - let mut sleds_to_change: BTreeMap = BTreeMap::new(); - - 'outer: for _ in 0..nexus_to_add { - // `sleds_by_num_nexus` is sorted by key already, and we want to - // pick from the lowest-numbered bin. We can just loop over its - // keys, expecting to stop on the first iteration, with the only - // exception being when we've removed all the sleds from a bin. - for (&num_nexus, sleds) in sleds_by_num_nexus.iter_mut() { - // `sleds` contains all sleds with the minimum number of Nexus - // zones. Pick one arbitrarily but deterministically. - let Some(sled_id) = sleds.pop() else { - // We already drained this bin; move on. - continue; - }; + // Build a map of sled -> new nexus zones to add. + let mut sleds_to_change: BTreeMap = BTreeMap::new(); - // This insert might overwrite an old value for this sled (e.g., - // in the "we have 1 sled and need to add many Nexus instances - // to it" case). That's fine. - sleds_to_change.insert(sled_id, num_nexus + 1); + for i in 0..nexus_to_add { + match zone_placement.place_zone(DiscretionaryOmicronZone::Nexus) { + Ok(sled_id) => { + *sleds_to_change.entry(sled_id).or_default() += 1; + } + Err(PlacementError::NoSledsEligible { .. }) => { + // We won't treat this as a hard error; it's possible + // (albeit unlikely?) we're in a weird state where we need + // more sleds or disks to come online, and we may need to be + // able to produce blueprints to achieve that status. + warn!( + self.log, + "failed to place all new desired Nexus instances"; + "placed" => i, + "wanted_to_place" => nexus_to_add, + ); - // Put this sled back in our map, but now with one more Nexus. - sleds_by_num_nexus - .entry(num_nexus + 1) - .or_default() - .push(sled_id); + // Adjust `nexus_to_add` downward so it's consistent with + // the number of Nexuses we're actually adding. + nexus_to_add = i; - continue 'outer; + break; + } } - - // This should be unreachable: it's only possible if we fail to find - // a nonempty vec in `sleds_by_num_nexus`, and we checked above that - // `sleds_by_num_nexus` is not empty. - unreachable!("logic error finding sleds for Nexus"); } // For each sled we need to change, actually do so. let mut total_added = 0; - for (sled_id, new_nexus_count) in sleds_to_change { + for (sled_id, additional_nexus_count) in sleds_to_change { + // TODO-cleanup This is awkward: the builder wants to know how many + // total Nexus zones go on a given sled, but we have a count of how + // many we want to add. Construct a new target count. Maybe the + // builder should provide a different interface here? + let new_nexus_count = self.blueprint.sled_num_nexus_zones(sled_id) + + additional_nexus_count; match self .blueprint .sled_ensure_zone_multiple_nexus(sled_id, new_nexus_count)? @@ -327,6 +460,133 @@ impl<'a> Planner<'a> { Ok(()) } + + fn do_plan_cockroachdb_settings(&mut self) { + // Figure out what we should set the CockroachDB "preserve downgrade + // option" setting to based on the planning input. + // + // CockroachDB version numbers look like SemVer but are not. Major + // version numbers consist of the first *two* components, which + // represent the year and the Nth release that year. So the major + // version in "22.2.7" is "22.2". + // + // A given major version of CockroachDB is backward compatible with the + // storage format of the previous major version of CockroachDB. This is + // shown by the `version` setting, which displays the current storage + // format version. When `version` is '22.2', versions v22.2.x or v23.1.x + // can be used to run a node. This allows for rolling upgrades of nodes + // within the cluster and also preserves the ability to rollback until + // the new software version can be validated. + // + // By default, when all nodes of a cluster are upgraded to a new major + // version, the upgrade is "auto-finalized"; `version` is changed to the + // new major version, and rolling back to a previous major version of + // CockroachDB is no longer possible. + // + // The `cluster.preserve_downgrade_option` setting can be used to + // control this. This setting can only be set to the current value + // of the `version` setting, and when it is set, CockroachDB will not + // perform auto-finalization. To perform finalization and finish the + // upgrade, a client must reset the "preserve downgrade option" setting. + // Finalization occurs in the background, and the "preserve downgrade + // option" setting should not be changed again until finalization + // completes. + // + // We determine the appropriate value for `preserve_downgrade_option` + // based on: + // + // 1. the _target_ cluster version from the `Policy` (what we want to + // be running) + // 2. the `version` setting reported by CockroachDB (what we're + // currently running) + // + // by saying: + // + // - If we don't recognize the `version` CockroachDB reports, we will + // do nothing. + // - If our target version is _equal to_ what CockroachDB reports, + // we will ensure `preserve_downgrade_option` is set to the current + // `version`. This prevents auto-finalization when we deploy the next + // major version of CockroachDB as part of an update. + // - If our target version is _older than_ what CockroachDB reports, we + // will also ensure `preserve_downgrade_option` is set to the current + // `version`. (This will happen on newly-initialized clusters when + // we deploy a version of CockroachDB that is newer than our current + // policy.) + // - If our target version is _newer than_ what CockroachDB reports, we + // will ensure `preserve_downgrade_option` is set to the default value + // (the empty string). This will trigger finalization. + + let policy = self.input.target_cockroachdb_cluster_version(); + let CockroachDbSettings { version, .. } = + self.input.cockroachdb_settings(); + let value = match CockroachDbClusterVersion::from_str(version) { + // The current version is known to us. + Ok(version) => { + if policy > version { + // Ensure `cluster.preserve_downgrade_option` is reset so we + // can upgrade. + CockroachDbPreserveDowngrade::AllowUpgrade + } else { + // The cluster version is equal to or newer than the + // version we want by policy. In either case, ensure + // `cluster.preserve_downgrade_option` is set. + CockroachDbPreserveDowngrade::Set(version) + } + } + // The current version is unknown to us; we are likely in the middle + // of an cluster upgrade. + Err(_) => CockroachDbPreserveDowngrade::DoNotModify, + }; + self.blueprint.cockroachdb_preserve_downgrade(value); + info!( + &self.log, + "will ensure cockroachdb setting"; + "setting" => "cluster.preserve_downgrade_option", + "value" => ?value, + ); + + // Hey! Listen! + // + // If we need to manage more CockroachDB settings, we should ensure + // that no settings will be modified if we don't recognize the current + // cluster version -- we're likely in the middle of an upgrade! + // + // https://www.cockroachlabs.com/docs/stable/cluster-settings#change-a-cluster-setting + } +} + +/// Returns `Some(reason)` if the sled needs its zones to be expunged, +/// based on the policy and state. +fn needs_zone_expungement( + state: SledState, + policy: SledPolicy, +) -> Option { + match state { + SledState::Active => {} + SledState::Decommissioned => { + // A decommissioned sled that still has resources attached to it is + // an illegal state, but representable. If we see a sled in this + // state, we should still expunge all zones in it, but parent code + // should warn on it. + return Some(ZoneExpungeReason::SledDecommissioned { policy }); + } + } + + match policy { + SledPolicy::InService { .. } => None, + SledPolicy::Expunged => Some(ZoneExpungeReason::SledExpunged), + } +} + +/// The reason a sled's zones need to be expunged. +/// +/// This is used only for introspection and logging -- it's not part of the +/// logical flow. +#[derive(Copy, Clone, Debug)] +pub(crate) enum ZoneExpungeReason { + SledDecommissioned { policy: SledPolicy }, + SledExpunged, } #[cfg(test)] @@ -334,20 +594,37 @@ mod test { use super::Planner; use crate::blueprint_builder::test::verify_blueprint; use crate::blueprint_builder::test::DEFAULT_N_SLEDS; - use crate::blueprint_builder::BlueprintBuilder; use crate::example::example; use crate::example::ExampleSystem; use crate::system::SledBuilder; + use chrono::NaiveDateTime; + use chrono::TimeZone; + use chrono::Utc; use expectorate::assert_contents; use nexus_inventory::now_db_precision; + use nexus_types::deployment::blueprint_zone_type; + use nexus_types::deployment::BlueprintDiff; + use nexus_types::deployment::BlueprintZoneDisposition; use nexus_types::deployment::BlueprintZoneFilter; + use nexus_types::deployment::BlueprintZoneType; + use nexus_types::deployment::CockroachDbClusterVersion; + use nexus_types::deployment::CockroachDbPreserveDowngrade; + use nexus_types::deployment::CockroachDbSettings; + use nexus_types::deployment::OmicronZoneNetworkResources; use nexus_types::external_api::views::SledPolicy; use nexus_types::external_api::views::SledProvisionPolicy; use nexus_types::external_api::views::SledState; - use nexus_types::inventory::OmicronZoneType; use nexus_types::inventory::OmicronZonesFound; use omicron_common::api::external::Generation; + use omicron_common::disk::DiskIdentity; use omicron_test_utils::dev::test_setup_log; + use omicron_uuid_kinds::GenericUuid; + use omicron_uuid_kinds::PhysicalDiskUuid; + use omicron_uuid_kinds::SledUuid; + use omicron_uuid_kinds::ZpoolUuid; + use sled_agent_client::ZoneKind; + use std::mem; + use typed_rng::TypedUuidRng; /// Runs through a basic sequence of blueprints for adding a sled #[test] @@ -355,37 +632,21 @@ mod test { static TEST_NAME: &str = "planner_basic_add_sled"; let logctx = test_setup_log(TEST_NAME); - // For our purposes, we don't care about the DNS generations. - let internal_dns_version = Generation::new(); - let external_dns_version = Generation::new(); - - // Use our example inventory collection. + // Use our example system. let mut example = ExampleSystem::new(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); + let blueprint1 = &example.blueprint; + verify_blueprint(blueprint1); - // Build the initial blueprint. We don't bother verifying it here - // because there's a separate test for that. - let blueprint1 = - BlueprintBuilder::build_initial_from_collection_seeded( - &example.collection, - internal_dns_version, - external_dns_version, - &example.policy, - "the_test", - (TEST_NAME, "bp1"), - ) - .expect("failed to create initial blueprint"); - verify_blueprint(&blueprint1); + println!("{}", blueprint1.display()); // Now run the planner. It should do nothing because our initial // system didn't have any issues that the planner currently knows how to // fix. let blueprint2 = Planner::new_based_on( logctx.log.clone(), - &blueprint1, - internal_dns_version, - external_dns_version, - &example.policy, + blueprint1, + &example.input, "no-op?", &example.collection, ) @@ -394,26 +655,30 @@ mod test { .plan() .expect("failed to plan"); - let diff = blueprint1.diff_sleds(&blueprint2); + let diff = blueprint2.diff_since_blueprint(blueprint1); println!("1 -> 2 (expected no changes):\n{}", diff.display()); - assert_eq!(diff.sleds_added().count(), 0); - assert_eq!(diff.sleds_removed().count(), 0); - assert_eq!(diff.sleds_changed().count(), 0); + assert_eq!(diff.sleds_added.len(), 0); + assert_eq!(diff.sleds_removed.len(), 0); + assert_eq!(diff.sleds_modified.len(), 0); + assert_eq!(diff.zones.added.len(), 0); + assert_eq!(diff.zones.removed.len(), 0); + assert_eq!(diff.zones.modified.len(), 0); + assert_eq!(diff.zones.errors.len(), 0); + assert_eq!(diff.physical_disks.added.len(), 0); + assert_eq!(diff.physical_disks.removed.len(), 0); verify_blueprint(&blueprint2); // Now add a new sled. - let new_sled_id = example.sled_rng.next_uuid(); + let new_sled_id = example.sled_rng.next(); let _ = example.system.sled(SledBuilder::new().id(new_sled_id)).unwrap(); - let policy = example.system.to_policy().unwrap(); + let input = example.system.to_planning_input_builder().unwrap().build(); // Check that the first step is to add an NTP zone let blueprint3 = Planner::new_based_on( logctx.log.clone(), &blueprint2, - internal_dns_version, - external_dns_version, - &policy, + &input, "test: add NTP?", &example.collection, ) @@ -422,7 +687,7 @@ mod test { .plan() .expect("failed to plan"); - let diff = blueprint2.diff_sleds(&blueprint3); + let diff = blueprint3.diff_since_blueprint(&blueprint2); println!( "2 -> 3 (expect new NTP zone on new sled):\n{}", diff.display() @@ -431,20 +696,18 @@ mod test { "tests/output/planner_basic_add_sled_2_3.txt", &diff.display().to_string(), ); - let sleds = diff.sleds_added().collect::>(); - let (sled_id, sled_zones) = sleds[0]; + assert_eq!(diff.sleds_added.len(), 1); + let sled_id = *diff.sleds_added.first().unwrap(); + let sled_zones = diff.zones.added.get(&sled_id).unwrap(); // We have defined elsewhere that the first generation contains no // zones. So the first one with zones must be newer. See // OmicronZonesConfig::INITIAL_GENERATION. - assert!(sled_zones.generation > Generation::new()); + assert!(sled_zones.generation_after.unwrap() > Generation::new()); assert_eq!(sled_id, new_sled_id); assert_eq!(sled_zones.zones.len(), 1); - assert!(matches!( - sled_zones.zones[0].config.zone_type, - OmicronZoneType::InternalNtp { .. } - )); - assert_eq!(diff.sleds_removed().count(), 0); - assert_eq!(diff.sleds_changed().count(), 0); + assert!(matches!(sled_zones.zones[0].kind(), ZoneKind::InternalNtp)); + assert_eq!(diff.sleds_removed.len(), 0); + assert_eq!(diff.sleds_modified.len(), 0); verify_blueprint(&blueprint3); // Check that with no change in inventory, the planner makes no changes. @@ -453,9 +716,7 @@ mod test { let blueprint4 = Planner::new_based_on( logctx.log.clone(), &blueprint3, - internal_dns_version, - external_dns_version, - &policy, + &input, "test: add nothing more", &example.collection, ) @@ -463,11 +724,11 @@ mod test { .with_rng_seed((TEST_NAME, "bp4")) .plan() .expect("failed to plan"); - let diff = blueprint3.diff_sleds(&blueprint4); + let diff = blueprint4.diff_since_blueprint(&blueprint3); println!("3 -> 4 (expected no changes):\n{}", diff.display()); - assert_eq!(diff.sleds_added().count(), 0); - assert_eq!(diff.sleds_removed().count(), 0); - assert_eq!(diff.sleds_changed().count(), 0); + assert_eq!(diff.sleds_added.len(), 0); + assert_eq!(diff.sleds_removed.len(), 0); + assert_eq!(diff.sleds_modified.len(), 0); verify_blueprint(&blueprint4); // Now update the inventory to have the requested NTP zone. @@ -485,7 +746,7 @@ mod test { .get(&new_sled_id) .expect("blueprint should contain zones for new sled") .to_omicron_zones_config( - BlueprintZoneFilter::SledAgentPut + BlueprintZoneFilter::ShouldBeRunning ) } ) @@ -495,9 +756,7 @@ mod test { let blueprint5 = Planner::new_based_on( logctx.log.clone(), &blueprint3, - internal_dns_version, - external_dns_version, - &policy, + &input, "test: add Crucible zones?", &collection, ) @@ -506,28 +765,30 @@ mod test { .plan() .expect("failed to plan"); - let diff = blueprint3.diff_sleds(&blueprint5); + let diff = blueprint5.diff_since_blueprint(&blueprint3); println!("3 -> 5 (expect Crucible zones):\n{}", diff.display()); assert_contents( "tests/output/planner_basic_add_sled_3_5.txt", &diff.display().to_string(), ); - assert_eq!(diff.sleds_added().count(), 0); - assert_eq!(diff.sleds_removed().count(), 0); - let sleds = diff.sleds_changed().collect::>(); - assert_eq!(sleds.len(), 1); - let (sled_id, sled_changes) = &sleds[0]; + assert_eq!(diff.sleds_added.len(), 0); + assert_eq!(diff.sleds_removed.len(), 0); + assert_eq!(diff.sleds_modified.len(), 1); + let sled_id = diff.sleds_modified.first().unwrap(); + assert_eq!(*sled_id, new_sled_id); + // No removed or modified zones on this sled + assert!(!diff.zones.removed.contains_key(sled_id)); + assert!(!diff.zones.modified.contains_key(sled_id)); + // 10 crucible zones addeed + let zones_added = diff.zones.added.get(sled_id).unwrap(); assert_eq!( - sled_changes.generation_after, - sled_changes.generation_before.next() + zones_added.generation_after.unwrap(), + zones_added.generation_before.unwrap().next() ); - assert_eq!(*sled_id, new_sled_id); - assert_eq!(sled_changes.zones_removed().count(), 0); - assert_eq!(sled_changes.zones_changed().count(), 0); - let zones = sled_changes.zones_added().collect::>(); - assert_eq!(zones.len(), 10); - for zone in &zones { - if !zone.config.zone_type.is_crucible() { + + assert_eq!(zones_added.zones.len(), 10); + for zone in &zones_added.zones { + if zone.kind() != ZoneKind::Crucible { panic!("unexpectedly added a non-Crucible zone: {zone:?}"); } } @@ -537,9 +798,7 @@ mod test { let blueprint6 = Planner::new_based_on( logctx.log.clone(), &blueprint5, - internal_dns_version, - external_dns_version, - &policy, + &input, "test: no-op?", &collection, ) @@ -548,11 +807,11 @@ mod test { .plan() .expect("failed to plan"); - let diff = blueprint5.diff_sleds(&blueprint6); + let diff = blueprint6.diff_since_blueprint(&blueprint5); println!("5 -> 6 (expect no changes):\n{}", diff.display()); - assert_eq!(diff.sleds_added().count(), 0); - assert_eq!(diff.sleds_removed().count(), 0); - assert_eq!(diff.sleds_changed().count(), 0); + assert_eq!(diff.sleds_added.len(), 0); + assert_eq!(diff.sleds_removed.len(), 0); + assert_eq!(diff.sleds_modified.len(), 0); verify_blueprint(&blueprint6); logctx.cleanup_successful(); @@ -565,41 +824,64 @@ mod test { static TEST_NAME: &str = "planner_add_multiple_nexus_to_one_sled"; let logctx = test_setup_log(TEST_NAME); - // For our purposes, we don't care about the DNS generations. - let internal_dns_version = Generation::new(); - let external_dns_version = Generation::new(); - - // Use our example inventory collection as a starting point, but strip - // it down to just one sled. - let (sled_id, collection, mut policy) = { - let (mut collection, mut policy) = + // Use our example system as a starting point, but strip it down to just + // one sled. + let (sled_id, blueprint1, collection, input) = { + let (mut collection, input, mut blueprint) = example(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); // Pick one sled ID to keep and remove the rest. + let mut builder = input.into_builder(); let keep_sled_id = - policy.sleds.keys().next().copied().expect("no sleds"); - policy.sleds.retain(|&k, _v| keep_sled_id == k); + builder.sleds().keys().next().copied().expect("no sleds"); + builder.sleds_mut().retain(|&k, _v| keep_sled_id == k); collection.sled_agents.retain(|&k, _v| keep_sled_id == k); collection.omicron_zones.retain(|&k, _v| keep_sled_id == k); assert_eq!(collection.sled_agents.len(), 1); assert_eq!(collection.omicron_zones.len(), 1); + blueprint.blueprint_zones.retain(|k, _v| keep_sled_id == *k); + blueprint.blueprint_disks.retain(|k, _v| keep_sled_id == *k); + + // Also remove all the networking resources for the zones we just + // stripped out; i.e., only keep those for `keep_sled_id`. + let mut new_network_resources = OmicronZoneNetworkResources::new(); + let old_network_resources = builder.network_resources_mut(); + for old_ip in old_network_resources.omicron_zone_external_ips() { + if blueprint.all_omicron_zones(BlueprintZoneFilter::All).any( + |(_, zone)| { + zone.zone_type + .external_networking() + .map(|(ip, _nic)| ip.id() == old_ip.ip.id()) + .unwrap_or(false) + }, + ) { + new_network_resources + .add_external_ip(old_ip.zone_id, old_ip.ip) + .expect("copied IP to new input"); + } + } + for old_nic in old_network_resources.omicron_zone_nics() { + if blueprint.all_omicron_zones(BlueprintZoneFilter::All).any( + |(_, zone)| { + zone.zone_type + .external_networking() + .map(|(_ip, nic)| { + nic.id == old_nic.nic.id.into_untyped_uuid() + }) + .unwrap_or(false) + }, + ) { + new_network_resources + .add_nic(old_nic.zone_id, old_nic.nic) + .expect("copied NIC to new input"); + } + } + mem::swap(old_network_resources, &mut &mut new_network_resources); - (keep_sled_id, collection, policy) + (keep_sled_id, blueprint, collection, builder.build()) }; - // Build the initial blueprint. - let blueprint1 = - BlueprintBuilder::build_initial_from_collection_seeded( - &collection, - internal_dns_version, - external_dns_version, - &policy, - "the_test", - (TEST_NAME, "bp1"), - ) - .expect("failed to create initial blueprint"); - // This blueprint should only have 1 Nexus instance on the one sled we // kept. assert_eq!(blueprint1.blueprint_zones.len(), 1); @@ -610,21 +892,21 @@ mod test { .expect("missing kept sled") .zones .iter() - .filter(|z| z.config.zone_type.is_nexus()) + .filter(|z| z.zone_type.is_nexus()) .count(), 1 ); // Now run the planner. It should add additional Nexus instances to the // one sled we have. - policy.target_nexus_zone_count = 5; + let mut builder = input.into_builder(); + builder.policy_mut().target_nexus_zone_count = 5; + let input = builder.build(); let blueprint2 = Planner::new_based_on( logctx.log.clone(), &blueprint1, - internal_dns_version, - external_dns_version, - &policy, - "add more Nexus", + &input, + "test_blueprint2", &collection, ) .expect("failed to create planner") @@ -632,20 +914,24 @@ mod test { .plan() .expect("failed to plan"); - let diff = blueprint1.diff_sleds(&blueprint2); + let diff = blueprint2.diff_since_blueprint(&blueprint1); println!("1 -> 2 (added additional Nexus zones):\n{}", diff.display()); - assert_eq!(diff.sleds_added().count(), 0); - assert_eq!(diff.sleds_removed().count(), 0); - let mut sleds = diff.sleds_changed().collect::>(); - assert_eq!(sleds.len(), 1); - let (changed_sled_id, sled_changes) = sleds.pop().unwrap(); - assert_eq!(changed_sled_id, sled_id); - assert_eq!(sled_changes.zones_removed().count(), 0); - assert_eq!(sled_changes.zones_changed().count(), 0); - let zones = sled_changes.zones_added().collect::>(); - assert_eq!(zones.len(), policy.target_nexus_zone_count - 1); - for zone in &zones { - if !zone.config.zone_type.is_nexus() { + assert_eq!(diff.sleds_added.len(), 0); + assert_eq!(diff.sleds_removed.len(), 0); + assert_eq!(diff.sleds_modified.len(), 1); + let changed_sled_id = diff.sleds_modified.first().unwrap(); + + // TODO-cleanup use `TypedUuid` everywhere + assert_eq!(*changed_sled_id, sled_id); + assert_eq!(diff.zones.removed.len(), 0); + assert_eq!(diff.zones.modified.len(), 0); + let zones_added = diff.zones.added.get(changed_sled_id).unwrap(); + assert_eq!( + zones_added.zones.len(), + input.target_nexus_zone_count() - 1 + ); + for zone in &zones_added.zones { + if zone.kind() != ZoneKind::Nexus { panic!("unexpectedly added a non-Nexus zone: {zone:?}"); } } @@ -661,22 +947,10 @@ mod test { "planner_spread_additional_nexus_zones_across_sleds"; let logctx = test_setup_log(TEST_NAME); - // Use our example inventory collection as a starting point. - let (collection, mut policy) = + // Use our example system as a starting point. + let (collection, input, blueprint1) = example(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); - // Build the initial blueprint. - let blueprint1 = - BlueprintBuilder::build_initial_from_collection_seeded( - &collection, - Generation::new(), - Generation::new(), - &policy, - "the_test", - (TEST_NAME, "bp1"), - ) - .expect("failed to create initial blueprint"); - // This blueprint should only have 3 Nexus zones: one on each sled. assert_eq!(blueprint1.blueprint_zones.len(), 3); for sled_config in blueprint1.blueprint_zones.values() { @@ -684,21 +958,21 @@ mod test { sled_config .zones .iter() - .filter(|z| z.config.zone_type.is_nexus()) + .filter(|z| z.zone_type.is_nexus()) .count(), 1 ); } // Now run the planner with a high number of target Nexus zones. - policy.target_nexus_zone_count = 14; + let mut builder = input.into_builder(); + builder.policy_mut().target_nexus_zone_count = 14; + let input = builder.build(); let blueprint2 = Planner::new_based_on( logctx.log.clone(), &blueprint1, - Generation::new(), - Generation::new(), - &policy, - "add more Nexus", + &input, + "test_blueprint2", &collection, ) .expect("failed to create planner") @@ -706,22 +980,21 @@ mod test { .plan() .expect("failed to plan"); - let diff = blueprint1.diff_sleds(&blueprint2); + let diff = blueprint2.diff_since_blueprint(&blueprint1); println!("1 -> 2 (added additional Nexus zones):\n{}", diff.display()); - assert_eq!(diff.sleds_added().count(), 0); - assert_eq!(diff.sleds_removed().count(), 0); - let sleds = diff.sleds_changed().collect::>(); + assert_eq!(diff.sleds_added.len(), 0); + assert_eq!(diff.sleds_removed.len(), 0); + assert_eq!(diff.sleds_modified.len(), 3); // All 3 sleds should get additional Nexus zones. We expect a total of // 11 new Nexus zones, which should be spread evenly across the three // sleds (two should get 4 and one should get 3). - assert_eq!(sleds.len(), 3); let mut total_new_nexus_zones = 0; - for (sled_id, sled_changes) in sleds { - assert_eq!(sled_changes.zones_removed().count(), 0); - assert_eq!(sled_changes.zones_changed().count(), 0); - let zones = sled_changes.zones_added().collect::>(); - match zones.len() { + for sled_id in diff.sleds_modified { + assert!(!diff.zones.removed.contains_key(&sled_id)); + assert!(!diff.zones.modified.contains_key(&sled_id)); + let zones_added = &diff.zones.added.get(&sled_id).unwrap().zones; + match zones_added.len() { n @ (3 | 4) => { total_new_nexus_zones += n; } @@ -729,8 +1002,8 @@ mod test { panic!("unexpected number of zones added to {sled_id}: {n}") } } - for zone in &zones { - if !zone.config.zone_type.is_nexus() { + for zone in zones_added { + if zone.kind() != ZoneKind::Nexus { panic!("unexpectedly added a non-Nexus zone: {zone:?}"); } } @@ -740,6 +1013,89 @@ mod test { logctx.cleanup_successful(); } + #[test] + fn test_crucible_allocation_skips_nonprovisionable_disks() { + static TEST_NAME: &str = + "planner_crucible_allocation_skips_nonprovisionable_disks"; + let logctx = test_setup_log(TEST_NAME); + + // Create an example system with a single sled + let (collection, input, blueprint1) = + example(&logctx.log, TEST_NAME, 1); + + let mut builder = input.into_builder(); + + // Avoid churning on the quantity of Nexus zones - we're okay staying at + // one. + builder.policy_mut().target_nexus_zone_count = 1; + + // Make generated disk ids deterministic + let mut disk_rng = + TypedUuidRng::from_seed(TEST_NAME, "NewPhysicalDisks"); + let mut new_sled_disk = |policy| nexus_types::deployment::SledDisk { + disk_identity: DiskIdentity { + vendor: "test-vendor".to_string(), + serial: "test-serial".to_string(), + model: "test-model".to_string(), + }, + disk_id: PhysicalDiskUuid::from(disk_rng.next()), + policy, + state: nexus_types::external_api::views::PhysicalDiskState::Active, + }; + + let (_, sled_details) = builder.sleds_mut().iter_mut().next().unwrap(); + + // Inject some new disks into the input. + // + // These counts are arbitrary, as long as they're non-zero + // for the sake of the test. + + const NEW_IN_SERVICE_DISKS: usize = 2; + const NEW_EXPUNGED_DISKS: usize = 1; + + let mut zpool_rng = TypedUuidRng::from_seed(TEST_NAME, "NewZpools"); + for _ in 0..NEW_IN_SERVICE_DISKS { + sled_details.resources.zpools.insert( + ZpoolUuid::from(zpool_rng.next()), + new_sled_disk(nexus_types::external_api::views::PhysicalDiskPolicy::InService), + ); + } + for _ in 0..NEW_EXPUNGED_DISKS { + sled_details.resources.zpools.insert( + ZpoolUuid::from(zpool_rng.next()), + new_sled_disk(nexus_types::external_api::views::PhysicalDiskPolicy::Expunged), + ); + } + + let input = builder.build(); + + let blueprint2 = Planner::new_based_on( + logctx.log.clone(), + &blueprint1, + &input, + "test: some new disks", + &collection, + ) + .expect("failed to create planner") + .with_rng_seed((TEST_NAME, "bp2")) + .plan() + .expect("failed to plan"); + + let diff = blueprint2.diff_since_blueprint(&blueprint1); + println!("1 -> 2 (some new disks, one expunged):\n{}", diff.display()); + assert_eq!(diff.sleds_modified.len(), 1); + let sled_id = diff.sleds_modified.first().unwrap(); + + // We should be adding a Crucible zone for each new in-service disk. + assert_eq!( + diff.zones.added.get(sled_id).unwrap().zones.len(), + NEW_IN_SERVICE_DISKS + ); + assert!(!diff.zones.removed.contains_key(sled_id)); + + logctx.cleanup_successful(); + } + /// Check that the planner will skip non-provisionable sleds when allocating /// extra Nexus zones #[test] @@ -748,25 +1104,14 @@ mod test { "planner_nexus_allocation_skips_nonprovisionable_sleds"; let logctx = test_setup_log(TEST_NAME); - // Use our example inventory collection as a starting point. + // Use our example system as a starting point. // // Request two extra sleds here so we test non-provisionable, expunged, // and decommissioned sleds. (When we add more kinds of // non-provisionable states in the future, we'll have to add more // sleds.) - let (collection, mut policy) = example(&logctx.log, TEST_NAME, 5); - - // Build the initial blueprint. - let blueprint1 = - BlueprintBuilder::build_initial_from_collection_seeded( - &collection, - Generation::new(), - Generation::new(), - &policy, - "the_test", - (TEST_NAME, "bp1"), - ) - .expect("failed to create initial blueprint"); + let (collection, input, mut blueprint1) = + example(&logctx.log, TEST_NAME, 5); // This blueprint should only have 5 Nexus zones: one on each sled. assert_eq!(blueprint1.blueprint_zones.len(), 5); @@ -775,7 +1120,7 @@ mod test { sled_config .zones .iter() - .filter(|z| z.config.zone_type.is_nexus()) + .filter(|z| z.zone_type.is_nexus()) .count(), 1 ); @@ -783,44 +1128,57 @@ mod test { // Arbitrarily choose some of the sleds and mark them non-provisionable // in various ways. - let mut sleds_iter = policy.sleds.iter_mut(); + let mut builder = input.into_builder(); + let mut sleds_iter = builder.sleds_mut().iter_mut(); let nonprovisionable_sled_id = { - let (sled_id, resources) = sleds_iter.next().expect("no sleds"); - resources.policy = SledPolicy::InService { + let (sled_id, details) = sleds_iter.next().expect("no sleds"); + details.policy = SledPolicy::InService { provision_policy: SledProvisionPolicy::NonProvisionable, }; *sled_id }; + println!("1 -> 2: marked non-provisionable {nonprovisionable_sled_id}"); let expunged_sled_id = { - let (sled_id, resources) = sleds_iter.next().expect("no sleds"); - resources.policy = SledPolicy::Expunged; + let (sled_id, details) = sleds_iter.next().expect("no sleds"); + details.policy = SledPolicy::Expunged; *sled_id }; + println!("1 -> 2: expunged {expunged_sled_id}"); let decommissioned_sled_id = { - let (sled_id, resources) = sleds_iter.next().expect("no sleds"); - resources.state = SledState::Decommissioned; + let (sled_id, details) = sleds_iter.next().expect("no sleds"); + details.state = SledState::Decommissioned; + + // Decommissioned sleds can only occur if their zones have been + // expunged, so lie and pretend like that already happened + // (otherwise the planner will rightfully fail to generate a new + // blueprint, because we're feeding it invalid inputs). + for zone in + &mut blueprint1.blueprint_zones.get_mut(sled_id).unwrap().zones + { + zone.disposition = BlueprintZoneDisposition::Expunged; + } + *sled_id }; + println!("1 -> 2: decommissioned {decommissioned_sled_id}"); // Now run the planner with a high number of target Nexus zones. The - // number (16) is chosen such that: - // - // * we start with 5 sleds - // * we need to add 11 Nexus zones - // * there are two sleds eligible for provisioning - // * => 5 or 6 new Nexus zones per sled + // number (9) is chosen such that: // - // When the planner gets smarter about removing zones from expunged - // and/or removed sleds, we'll have to adjust this number. - policy.target_nexus_zone_count = 16; - let blueprint2 = Planner::new_based_on( + // * we start with 5 sleds with 1 Nexus each + // * we take two sleds out of service (one expunged, one + // decommissioned), so we're down to 3 in-service Nexuses: we need to + // add 6 to get to the new policy target of 9 + // * of the remaining 3 sleds, only 2 are eligible for provisioning + // * each of those 2 sleds should get exactly 3 new Nexuses + builder.policy_mut().target_nexus_zone_count = 9; + let input = builder.build(); + let mut blueprint2 = Planner::new_based_on( logctx.log.clone(), &blueprint1, - Generation::new(), - Generation::new(), - &policy, - "add more Nexus", + &input, + "test_blueprint2", &collection, ) .expect("failed to create planner") @@ -828,45 +1186,406 @@ mod test { .plan() .expect("failed to plan"); - let diff = blueprint1.diff_sleds(&blueprint2); - println!("1 -> 2 (added additional Nexus zones):\n{}", diff.display()); + // Define a time_created for consistent output across runs. + blueprint2.time_created = + Utc.from_utc_datetime(&NaiveDateTime::UNIX_EPOCH); + + assert_contents( + "tests/output/planner_nonprovisionable_bp2.txt", + &blueprint2.display().to_string(), + ); + + let diff = blueprint2.diff_since_blueprint(&blueprint1); + println!( + "1 -> 2 (added additional Nexus zones, take 2 sleds out of service):\n{}", + diff.display() + ); assert_contents( "tests/output/planner_nonprovisionable_1_2.txt", &diff.display().to_string(), ); - assert_eq!(diff.sleds_added().count(), 0); - assert_eq!(diff.sleds_removed().count(), 0); - let sleds = diff.sleds_changed().collect::>(); - // Only 2 of the 3 sleds should get additional Nexus zones. We expect a - // total of 12 new Nexus zones, which should be spread evenly across the - // two sleds (one gets 6 and the other gets 5), while the + // The expunged and decommissioned sleds should have had all zones be + // marked as expunged. (Not removed! Just marked as expunged.) + // + // Note that at this point we're neither removing zones from the + // blueprint nor marking sleds as decommissioned -- we still need to do + // cleanup, and we aren't performing garbage collection on zones or + // sleds at the moment. + + assert_eq!(diff.sleds_added.len(), 0); + assert_eq!(diff.sleds_removed.len(), 0); + + assert_all_zones_expunged(&diff, expunged_sled_id, "expunged sled"); + + // Only 2 of the 3 remaining sleds (not the non-provisionable sled) + // should get additional Nexus zones. We expect a total of 6 new Nexus + // zones, which should be split evenly between the two sleds, while the // non-provisionable sled should be unchanged. - assert_eq!(sleds.len(), 2); + let mut remaining_modified_sleds = diff.sleds_modified.clone(); + remaining_modified_sleds.remove(&expunged_sled_id); + remaining_modified_sleds.remove(&decommissioned_sled_id); + + assert_eq!(remaining_modified_sleds.len(), 2); let mut total_new_nexus_zones = 0; - for (sled_id, sled_changes) in sleds { + for sled_id in remaining_modified_sleds { assert!(sled_id != nonprovisionable_sled_id); assert!(sled_id != expunged_sled_id); assert!(sled_id != decommissioned_sled_id); - assert_eq!(sled_changes.zones_removed().count(), 0); - assert_eq!(sled_changes.zones_changed().count(), 0); - let zones = sled_changes.zones_added().collect::>(); - match zones.len() { - n @ (5 | 6) => { - total_new_nexus_zones += n; - } - n => { - panic!("unexpected number of zones added to {sled_id}: {n}") - } - } - for zone in &zones { - let OmicronZoneType::Nexus { .. } = zone.config.zone_type - else { + assert!(!diff.zones.removed.contains_key(&sled_id)); + assert!(!diff.zones.modified.contains_key(&sled_id)); + let zones = &diff.zones.added.get(&sled_id).unwrap().zones; + for zone in zones { + if ZoneKind::Nexus != zone.kind() { panic!("unexpectedly added a non-Crucible zone: {zone:?}"); }; } + if zones.len() == 3 { + total_new_nexus_zones += 3; + } else { + panic!( + "unexpected number of zones added to {sled_id}: {}", + zones.len() + ); + } + } + assert_eq!(total_new_nexus_zones, 6); + + // --- + + // Also poke at some of the config by hand; we'll use this to test out + // diff output. This isn't a real blueprint, just one that we're + // creating to test diff output. + // + // Some of the things we're testing here: + // + // * modifying zones + // * removing zones + // * removing sleds + // * for modified sleds' zone config generation, both a bump and the + // generation staying the same (the latter should produce a warning) + let mut blueprint2a = blueprint2.clone(); + + enum NextCrucibleMutate { + Modify, + Remove, + Done, + } + let mut next = NextCrucibleMutate::Modify; + + // Leave the non-provisionable sled's generation alone. + let zones = &mut blueprint2a + .blueprint_zones + .get_mut(&nonprovisionable_sled_id) + .unwrap() + .zones; + + zones.retain_mut(|zone| { + if let BlueprintZoneType::Nexus(blueprint_zone_type::Nexus { + internal_address, + .. + }) = &mut zone.zone_type + { + // Change the internal address. + let mut segments = internal_address.ip().segments(); + segments[0] = segments[0].wrapping_add(1); + internal_address.set_ip(segments.into()); + true + } else if let BlueprintZoneType::Crucible(_) = zone.zone_type { + match next { + NextCrucibleMutate::Modify => { + zone.disposition = BlueprintZoneDisposition::Quiesced; + next = NextCrucibleMutate::Remove; + true + } + NextCrucibleMutate::Remove => { + next = NextCrucibleMutate::Done; + false + } + NextCrucibleMutate::Done => true, + } + } else if let BlueprintZoneType::InternalNtp(_) = + &mut zone.zone_type + { + // Change the underlay IP. + let mut segments = zone.underlay_address.segments(); + segments[0] += 1; + zone.underlay_address = segments.into(); + true + } else { + true + } + }); + + let expunged_zones = + blueprint2a.blueprint_zones.get_mut(&expunged_sled_id).unwrap(); + expunged_zones.zones.clear(); + expunged_zones.generation = expunged_zones.generation.next(); + + blueprint2a.blueprint_zones.remove(&decommissioned_sled_id); + + blueprint2a.external_dns_version = + blueprint2a.external_dns_version.next(); + + let diff = blueprint2a.diff_since_blueprint(&blueprint2); + println!("2 -> 2a (manually modified zones):\n{}", diff.display()); + assert_contents( + "tests/output/planner_nonprovisionable_2_2a.txt", + &diff.display().to_string(), + ); + + // --- + + logctx.cleanup_successful(); + } + + fn assert_all_zones_expunged( + diff: &BlueprintDiff, + expunged_sled_id: SledUuid, + desc: &str, + ) { + assert!( + !diff.zones.added.contains_key(&expunged_sled_id), + "for {desc}, no zones should have been added to blueprint" + ); + + // A zone disposition going to expunged *does not* mean that the + // zone is actually removed, i.e. `zones_removed` is still 0. Any + // zone removal will be part of some future garbage collection + // process that isn't currently defined. + + assert!( + !diff.zones.removed.contains_key(&expunged_sled_id), + "for {desc}, no zones should have been removed from blueprint" + ); + + // Run through all the common zones and ensure that all of them + // have been marked expunged. + let modified_zones = + diff.zones.modified.get(&expunged_sled_id).unwrap(); + assert_eq!( + modified_zones.generation_before.next(), + modified_zones.generation_after, + "for {desc}, generation should have been bumped" + ); + + for modified_zone in &modified_zones.zones { + assert_eq!( + modified_zone.zone.disposition(), + BlueprintZoneDisposition::Expunged, + "for {desc}, zone {} should have been marked expunged", + modified_zone.zone.id() + ); + } + } + + #[test] + fn planner_decommissions_sleds() { + static TEST_NAME: &str = "planner_decommissions_sleds"; + let logctx = test_setup_log(TEST_NAME); + + // Use our example system as a starting point. + let (collection, input, blueprint1) = + example(&logctx.log, TEST_NAME, DEFAULT_N_SLEDS); + + // Expunge one of the sleds. + let mut builder = input.into_builder(); + let expunged_sled_id = { + let mut iter = builder.sleds_mut().iter_mut(); + let (sled_id, details) = iter.next().expect("at least one sled"); + details.policy = SledPolicy::Expunged; + *sled_id + }; + + let input = builder.build(); + let mut blueprint2 = Planner::new_based_on( + logctx.log.clone(), + &blueprint1, + &input, + "test_blueprint2", + &collection, + ) + .expect("created planner") + .with_rng_seed((TEST_NAME, "bp2")) + .plan() + .expect("failed to plan"); + + // Define a time_created for consistent output across runs. + blueprint2.time_created = + Utc.from_utc_datetime(&NaiveDateTime::UNIX_EPOCH); + + assert_contents( + "tests/output/planner_decommissions_sleds_bp2.txt", + &blueprint2.display().to_string(), + ); + let diff = blueprint2.diff_since_blueprint(&blueprint1); + println!("1 -> 2 (expunged {expunged_sled_id}):\n{}", diff.display()); + assert_contents( + "tests/output/planner_decommissions_sleds_1_2.txt", + &diff.display().to_string(), + ); + + // All the zones of the expunged sled should be expunged, and the sled + // itself should be decommissioned. + assert!(blueprint2.blueprint_zones[&expunged_sled_id] + .are_all_zones_expunged()); + assert_eq!( + blueprint2.sled_state[&expunged_sled_id], + SledState::Decommissioned + ); + + // Remove the now-decommissioned sled from the planning input. + let mut builder = input.into_builder(); + builder.sleds_mut().remove(&expunged_sled_id); + let input = builder.build(); + + let blueprint3 = Planner::new_based_on( + logctx.log.clone(), + &blueprint2, + &input, + "test_blueprint3", + &collection, + ) + .expect("created planner") + .with_rng_seed((TEST_NAME, "bp3")) + .plan() + .expect("failed to plan"); + + // There should be no changes to the blueprint; we don't yet garbage + // collect zones, so we should still have the sled's expunged zones + // (even though the sled itself is no longer present in the list of + // commissioned sleds). + let diff = blueprint3.diff_since_blueprint(&blueprint2); + println!( + "2 -> 3 (decommissioned {expunged_sled_id}):\n{}", + diff.display() + ); + assert_eq!(diff.sleds_added.len(), 0); + assert_eq!(diff.sleds_removed.len(), 0); + assert_eq!(diff.sleds_modified.len(), 0); + assert_eq!(diff.sleds_unchanged.len(), DEFAULT_N_SLEDS); + + logctx.cleanup_successful(); + } + + #[test] + fn test_ensure_preserve_downgrade_option() { + static TEST_NAME: &str = "planner_ensure_preserve_downgrade_option"; + let logctx = test_setup_log(TEST_NAME); + + let (collection, input, bp1) = example(&logctx.log, TEST_NAME, 0); + let mut builder = input.into_builder(); + assert!(bp1.cockroachdb_fingerprint.is_empty()); + assert_eq!( + bp1.cockroachdb_setting_preserve_downgrade, + CockroachDbPreserveDowngrade::DoNotModify + ); + + // If `preserve_downgrade_option` is unset and the current cluster + // version matches `POLICY`, we ensure it is set. + builder.set_cockroachdb_settings(CockroachDbSettings { + state_fingerprint: "bp2".to_owned(), + version: CockroachDbClusterVersion::POLICY.to_string(), + preserve_downgrade: String::new(), + }); + let bp2 = Planner::new_based_on( + logctx.log.clone(), + &bp1, + &builder.clone().build(), + "initial settings", + &collection, + ) + .expect("failed to create planner") + .with_rng_seed((TEST_NAME, "bp2")) + .plan() + .expect("failed to plan"); + assert_eq!(bp2.cockroachdb_fingerprint, "bp2"); + assert_eq!( + bp2.cockroachdb_setting_preserve_downgrade, + CockroachDbClusterVersion::POLICY.into() + ); + + // If `preserve_downgrade_option` is unset and the current cluster + // version is known to us and _newer_ than `POLICY`, we still ensure + // it is set. (During a "tock" release, `POLICY == NEWLY_INITIALIZED` + // and this won't be materially different than the above test, but it + // shouldn't need to change when moving to a "tick" release.) + builder.set_cockroachdb_settings(CockroachDbSettings { + state_fingerprint: "bp3".to_owned(), + version: CockroachDbClusterVersion::NEWLY_INITIALIZED.to_string(), + preserve_downgrade: String::new(), + }); + let bp3 = Planner::new_based_on( + logctx.log.clone(), + &bp1, + &builder.clone().build(), + "initial settings", + &collection, + ) + .expect("failed to create planner") + .with_rng_seed((TEST_NAME, "bp3")) + .plan() + .expect("failed to plan"); + assert_eq!(bp3.cockroachdb_fingerprint, "bp3"); + assert_eq!( + bp3.cockroachdb_setting_preserve_downgrade, + CockroachDbClusterVersion::NEWLY_INITIALIZED.into() + ); + + // When we run the planner again after setting the setting, the inputs + // will change; we should still be ensuring the setting. + builder.set_cockroachdb_settings(CockroachDbSettings { + state_fingerprint: "bp4".to_owned(), + version: CockroachDbClusterVersion::NEWLY_INITIALIZED.to_string(), + preserve_downgrade: CockroachDbClusterVersion::NEWLY_INITIALIZED + .to_string(), + }); + let bp4 = Planner::new_based_on( + logctx.log.clone(), + &bp1, + &builder.clone().build(), + "after ensure", + &collection, + ) + .expect("failed to create planner") + .with_rng_seed((TEST_NAME, "bp4")) + .plan() + .expect("failed to plan"); + assert_eq!(bp4.cockroachdb_fingerprint, "bp4"); + assert_eq!( + bp4.cockroachdb_setting_preserve_downgrade, + CockroachDbClusterVersion::NEWLY_INITIALIZED.into() + ); + + // When `version` isn't recognized, do nothing regardless of the value + // of `preserve_downgrade`. + for preserve_downgrade in [ + String::new(), + CockroachDbClusterVersion::NEWLY_INITIALIZED.to_string(), + "definitely not a real cluster version".to_owned(), + ] { + builder.set_cockroachdb_settings(CockroachDbSettings { + state_fingerprint: "bp5".to_owned(), + version: "definitely not a real cluster version".to_owned(), + preserve_downgrade: preserve_downgrade.clone(), + }); + let bp5 = Planner::new_based_on( + logctx.log.clone(), + &bp1, + &builder.clone().build(), + "unknown version", + &collection, + ) + .expect("failed to create planner") + .with_rng_seed((TEST_NAME, format!("bp5-{}", preserve_downgrade))) + .plan() + .expect("failed to plan"); + assert_eq!(bp5.cockroachdb_fingerprint, "bp5"); + assert_eq!( + bp5.cockroachdb_setting_preserve_downgrade, + CockroachDbPreserveDowngrade::DoNotModify + ); } - assert_eq!(total_new_nexus_zones, 11); logctx.cleanup_successful(); } diff --git a/nexus/reconfigurator/planning/src/planner/omicron_zone_placement.rs b/nexus/reconfigurator/planning/src/planner/omicron_zone_placement.rs new file mode 100644 index 0000000000..26e72db434 --- /dev/null +++ b/nexus/reconfigurator/planning/src/planner/omicron_zone_placement.rs @@ -0,0 +1,493 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Omicron zone placement decisions + +use nexus_types::deployment::BlueprintZoneType; +use omicron_uuid_kinds::SledUuid; +use sled_agent_client::ZoneKind; +use std::cmp::Ordering; +use std::collections::BinaryHeap; +use std::mem; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[cfg_attr(test, derive(test_strategy::Arbitrary))] +pub(super) enum DiscretionaryOmicronZone { + Nexus, + // TODO expand this enum as we start to place more services +} + +impl DiscretionaryOmicronZone { + pub(super) fn from_zone_type( + zone_type: &BlueprintZoneType, + ) -> Option { + match zone_type { + BlueprintZoneType::Nexus(_) => Some(Self::Nexus), + // Zones that we should place but don't yet. + BlueprintZoneType::BoundaryNtp(_) + | BlueprintZoneType::Clickhouse(_) + | BlueprintZoneType::ClickhouseKeeper(_) + | BlueprintZoneType::CockroachDb(_) + | BlueprintZoneType::CruciblePantry(_) + | BlueprintZoneType::ExternalDns(_) + | BlueprintZoneType::InternalDns(_) + | BlueprintZoneType::Oximeter(_) => None, + // Zones that get special handling for placement (all sleds get + // them, although internal NTP has some interactions with boundary + // NTP that we don't yet handle, so this may change). + BlueprintZoneType::Crucible(_) + | BlueprintZoneType::InternalNtp(_) => None, + } + } +} + +impl From for ZoneKind { + fn from(zone: DiscretionaryOmicronZone) -> Self { + match zone { + DiscretionaryOmicronZone::Nexus => Self::Nexus, + } + } +} + +#[derive(Debug, thiserror::Error)] +pub(super) enum PlacementError { + #[error( + "no sleds eligible for placement of new {} zone", + ZoneKind::from(*zone_kind) + )] + NoSledsEligible { zone_kind: DiscretionaryOmicronZone }, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub(super) struct OmicronZonePlacementSledState { + pub sled_id: SledUuid, + pub num_zpools: usize, + pub discretionary_zones: Vec, +} + +/// `OmicronZonePlacement` keeps an internal heap of sleds and their current +/// discretionary zones and chooses sleds for placement of additional +/// discretionary zones. +#[derive(Debug, Clone)] +pub(super) struct OmicronZonePlacement { + sleds: OrderedSleds, +} + +impl OmicronZonePlacement { + /// Construct a new `OmicronZonePlacement` with a given set of eligible + /// sleds. + /// + /// Sleds which are not eligible for discretionary services for reasons + /// outside the knowledge of `OmicronZonePlacement` (e.g., sleds with a + /// policy or state that makes them ineligible) should be omitted from this + /// list of sleds. For now, sleds that are waiting for an NTP zone should be + /// omitted as well, although that may change in the future when we add + /// support for boundary NTP zone placement. + pub(super) fn new( + sleds: impl Iterator, + ) -> Self { + // We rebuild our heap whenever the zone type we're placing changes. We + // need to pick _something_ to start; this only matters for performance, + // not correctness (we don't have to rebuild the heap if `place_zone` is + // called with a zone kind that matches the current sorting). + let ordered_by = DiscretionaryOmicronZone::Nexus; + Self { sleds: OrderedSleds::new(ordered_by, sleds) } + } + + /// Attempt to place a new zone of kind `zone_kind` on one of the sleds + /// provided when this `OmicronZonePlacement` was created. + /// + /// On success, the internal heap held by `self` is updated assuming that a + /// new zone of the given kind was added to the sled returned by + /// `place_zone()`. This allows one `OmicronZonePlacement` to be reused + /// across multiple zone placement decisions, but requires the caller to + /// accept its decisions. If the caller decides not to add a zone to the + /// returned sled, the `OmicronZonePlacement` instance should be discarded + /// and a new one should be created for future placement decisions. + /// + /// Placement is currently minimal. The only hard requirement we enforce is + /// that a sled may only one run one instance of any given zone kind per + /// zpool it has (e.g., a sled with 5 zpools could run 5 Nexus instances and + /// 5 CockroachDb instances concurrently, but could not run 6 Nexus + /// instances). If there is at least one sled that satisfies this + /// requirement, this method will return `Ok(_)`. If there are multiple + /// sleds that satisfy this requirement, this method will return a sled + /// which has the fewest instances of `zone_kind`; if multiple sleds are + /// tied, it will pick the one with the fewest total discretionary zones; if + /// multiple sleds are still tied, it will pick deterministically (e.g., + /// choosing the lowest or highest sled ID). + /// + /// `OmicronZonePlacement` currently does not track _which_ zpools are + /// assigned to services. This could lead to it being overly conservative if + /// zpools that are not in service are hosting relevant zones. For example, + /// imagine a sled with two zpools: zpool-a and zpool-b. The sled has a + /// single Nexus instance with a transitory dataset on zpool-a. If zpool-a + /// is in a degraded state and considered not-in-service, + /// `OmicronZonePlacement` will be told by the planner that the sled has 1 + /// zpool. Our simple check of "at most one Nexus per zpool" would + /// erroneously fail to realize we could still add a Nexus (backed by + /// zpool-b), and would claim that the sled already has a Nexus for each + /// zpool. + /// + /// We punt on this problem for multiple reasons: + /// + /// 1. It's overly conservative; if we get into this state, we may refuse to + /// start services when we ought to be able to, but this isn't the worst + /// failure mode. In practice we should have far more options for + /// placement than we need for any of our control plane services, so + /// skipping a sled in this state should be fine. + /// 2. We don't yet track transitory datasets, so even if we wanted to know + /// which zpool Nexus was using (in the above example), we can't. + /// 3. We don't (yet?) have a way for a zpool to be present, backing a zone, + /// and not considered to be in service. The only zpools that aren't in + /// service belong to expunged disks, which can't be backing live + /// services. + pub(super) fn place_zone( + &mut self, + zone_kind: DiscretionaryOmicronZone, + ) -> Result { + self.sleds.ensure_ordered_by(zone_kind); + + let mut sleds_skipped = Vec::new(); + let mut chosen_sled = None; + while let Some(sled) = self.sleds.pop() { + // Ensure we have at least one zpool more than the number of + // `zone_kind` zones already placed on this sled. If we don't, we + // already have a zone of this kind on each zpool, so we'll skip + // this sled. + if sled + .discretionary_zones + .iter() + .filter(|&&z| z == zone_kind) + .count() + < sled.num_zpools + { + chosen_sled = Some(sled); + break; + } else { + sleds_skipped.push(sled); + } + } + + // Push any skipped sleds back onto our heap. + for sled in sleds_skipped { + self.sleds.push(sled); + } + + let mut sled = + chosen_sled.ok_or(PlacementError::NoSledsEligible { zone_kind })?; + let sled_id = sled.sled_id; + + // Update our internal state so future `place_zone` calls take the new + // zone we just placed into account. + sled.discretionary_zones.push(zone_kind); + self.sleds.push(sled); + + Ok(sled_id) + } +} + +// Wrapper around a binary heap that allows us to change the ordering at runtime +// (so we can sort for particular types of zones to place). +#[derive(Debug, Clone)] +struct OrderedSleds { + // The current zone type we're sorted to place. We maintain the invariant + // that every element of `heap` has the same `ordered_by` value as this + // field's current value. + ordered_by: DiscretionaryOmicronZone, + heap: BinaryHeap, +} + +impl OrderedSleds { + fn new( + ordered_by: DiscretionaryOmicronZone, + sleds: impl Iterator, + ) -> Self { + Self { + ordered_by, + heap: sleds + .map(|sled| OrderedSledState { ordered_by, sled }) + .collect(), + } + } + + fn ensure_ordered_by(&mut self, ordered_by: DiscretionaryOmicronZone) { + if self.ordered_by == ordered_by { + return; + } + + // Rebuild our heap, sorting by a new zone kind, and maintaining the + // invariant that all our heap members have the same `ordered_by` value + // as we do. + let mut sleds = mem::take(&mut self.heap).into_vec(); + for s in &mut sleds { + s.ordered_by = ordered_by; + } + self.heap = BinaryHeap::from(sleds); + self.ordered_by = ordered_by; + } + + fn pop(&mut self) -> Option { + self.heap.pop().map(|ordered| ordered.sled) + } + + fn push(&mut self, sled: OmicronZonePlacementSledState) { + self.heap.push(OrderedSledState { ordered_by: self.ordered_by, sled }) + } +} + +#[derive(Debug, Clone, PartialEq, Eq)] +struct OrderedSledState { + ordered_by: DiscretionaryOmicronZone, + sled: OmicronZonePlacementSledState, +} + +impl Ord for OrderedSledState { + fn cmp(&self, other: &Self) -> Ordering { + // Invariant: We should never compare other entries with a different + // `ordered_by`. This is enforced by `OrderedSleds`. + assert_eq!(self.ordered_by, other.ordered_by); + + // Count how many zones of our ordering type are in each side. + let our_zones_of_interest = self + .sled + .discretionary_zones + .iter() + .filter(|&&z| z == self.ordered_by) + .count(); + let other_zones_of_interest = other + .sled + .discretionary_zones + .iter() + .filter(|&&z| z == self.ordered_by) + .count(); + + // BinaryHeap is a max heap, and we want to be on the top of the heap if + // we have fewer zones of interest, so reverse the comparisons below. + our_zones_of_interest + .cmp(&other_zones_of_interest) + .reverse() + // If the zones of interest count is equal, we tiebreak by total + // discretionary zones, again reversing the order for our max heap + // to prioritize sleds with fewer total discretionary zones. + .then_with(|| { + self.sled + .discretionary_zones + .len() + .cmp(&other.sled.discretionary_zones.len()) + .reverse() + }) + // If we're still tied, tiebreak by sorting on sled ID for + // determinism. + .then_with(|| self.sled.sled_id.cmp(&other.sled.sled_id)) + } +} + +impl PartialOrd for OrderedSledState { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +#[cfg(test)] +pub mod test { + use super::*; + use omicron_uuid_kinds::GenericUuid; + use proptest::arbitrary::any; + use proptest::collection::btree_map; + use proptest::sample::size_range; + use std::collections::BTreeMap; + use test_strategy::proptest; + use test_strategy::Arbitrary; + use uuid::Uuid; + + #[derive(Debug, Clone, Arbitrary)] + struct ZonesToPlace { + #[any(size_range(0..8).lift())] + zones: Vec, + } + + #[derive(Debug, Clone, Arbitrary)] + struct ExistingSled { + zones: ZonesToPlace, + #[strategy(0_usize..8)] + num_zpools: usize, + } + + #[derive(Debug, Arbitrary)] + struct ArbitraryTestInput { + #[strategy(btree_map(any::<[u8; 16]>(), any::(), 1..8))] + existing_sleds: BTreeMap<[u8; 16], ExistingSled>, + zones_to_place: ZonesToPlace, + } + + #[derive(Debug)] + struct TestInput { + state: TestState, + zones_to_place: Vec, + } + + impl From for TestInput { + fn from(input: ArbitraryTestInput) -> Self { + let mut sleds = BTreeMap::new(); + for (&raw_id, existing_sled) in input.existing_sleds.iter() { + let sled_id = + SledUuid::from_untyped_uuid(Uuid::from_bytes(raw_id)); + sleds.insert( + sled_id, + TestSledState { + zones: existing_sled.zones.zones.clone(), + num_zpools: existing_sled.num_zpools, + }, + ); + } + let state = TestState { sleds }; + Self { state, zones_to_place: input.zones_to_place.zones } + } + } + + #[derive(Debug)] + struct TestSledState { + zones: Vec, + num_zpools: usize, + } + + impl TestSledState { + fn count_zones_of_kind(&self, kind: DiscretionaryOmicronZone) -> usize { + self.zones.iter().filter(|&&k| k == kind).count() + } + } + + #[derive(Debug)] + struct TestState { + sleds: BTreeMap, + } + + impl TestState { + fn validate_sled_can_support_another_zone_of_kind( + &self, + sled_id: SledUuid, + kind: DiscretionaryOmicronZone, + ) -> Result<(), String> { + let sled_state = self.sleds.get(&sled_id).expect("valid sled_id"); + let existing_zones = sled_state.count_zones_of_kind(kind); + if existing_zones < sled_state.num_zpools { + Ok(()) + } else { + Err(format!( + "already have {existing_zones} \ + {kind:?} instances but only {} zpools", + sled_state.num_zpools + )) + } + } + + fn validate_placement( + &mut self, + sled_id: SledUuid, + kind: DiscretionaryOmicronZone, + ) -> Result<(), String> { + // Ensure this sled is eligible for this kind at all: We have to + // have at least one disk on which we can put the dataset for this + // zone that isn't already holding another zone of this same kind + // (i.e., at most one zone of any given kind per disk per sled). + self.validate_sled_can_support_another_zone_of_kind(sled_id, kind)?; + + let sled_state = self.sleds.get(&sled_id).expect("valid sled_id"); + let existing_zones = sled_state.count_zones_of_kind(kind); + + // Ensure this sled is (at least tied for) the best choice for this + // kind: it should have the minimum number of existing zones of this + // kind, and of all sleds tied for the minimum, it should have the + // fewest total discretionary services. + for (&other_sled_id, other_sled_state) in &self.sleds { + // Ignore other sleds that can't run another zone of `kind`. + if self + .validate_sled_can_support_another_zone_of_kind( + other_sled_id, + kind, + ) + .is_err() + { + continue; + } + + let other_zone_count = + other_sled_state.count_zones_of_kind(kind); + if other_zone_count < existing_zones { + return Err(format!( + "sled {other_sled_id} would be a better choice \ + (fewer existing {kind:?} instances: \ + {other_zone_count} < {existing_zones})" + )); + } + if other_zone_count == existing_zones + && other_sled_state.zones.len() < sled_state.zones.len() + { + return Err(format!( + "sled {other_sled_id} would be a better choice \ + (same number of existing {kind:?} instances, but \ + fewer total discretionary services: {} < {})", + other_sled_state.zones.len(), + sled_state.zones.len(), + )); + } + } + + // This placement is valid: update our state. + self.sleds.get_mut(&sled_id).unwrap().zones.push(kind); + Ok(()) + } + + fn validate_no_placement_possible( + &self, + kind: DiscretionaryOmicronZone, + ) -> Result<(), String> { + // Zones should be placeable unless every sled already has a zone of + // this kind on every disk. + for (sled_id, sled_state) in self.sleds.iter() { + if sled_state.count_zones_of_kind(kind) < sled_state.num_zpools + { + return Err(format!( + "sled {sled_id} is eligible for {kind:?} placement" + )); + } + } + Ok(()) + } + } + + #[proptest] + fn test_place_omicron_zones(input: ArbitraryTestInput) { + let mut input = TestInput::from(input); + + let mut placer = + OmicronZonePlacement::new(input.state.sleds.iter().map( + |(&sled_id, sled_state)| OmicronZonePlacementSledState { + sled_id, + num_zpools: sled_state.num_zpools, + discretionary_zones: sled_state.zones.clone(), + }, + )); + + for z in input.zones_to_place { + println!("placing {z:?}"); + match placer.place_zone(z) { + Ok(sled_id) => { + input + .state + .validate_placement(sled_id, z) + .expect("valid placement"); + } + Err(PlacementError::NoSledsEligible { zone_kind }) => { + assert_eq!(zone_kind, z); + input + .state + .validate_no_placement_possible(z) + .expect("no placement possible"); + } + } + } + } +} diff --git a/nexus/reconfigurator/planning/src/system.rs b/nexus/reconfigurator/planning/src/system.rs index e224e3c6df..74c9313e05 100644 --- a/nexus/reconfigurator/planning/src/system.rs +++ b/nexus/reconfigurator/planning/src/system.rs @@ -10,8 +10,15 @@ use gateway_client::types::RotState; use gateway_client::types::SpState; use indexmap::IndexMap; use nexus_inventory::CollectionBuilder; +use nexus_types::deployment::CockroachDbClusterVersion; +use nexus_types::deployment::CockroachDbSettings; +use nexus_types::deployment::PlanningInputBuilder; use nexus_types::deployment::Policy; +use nexus_types::deployment::SledDetails; +use nexus_types::deployment::SledDisk; use nexus_types::deployment::SledResources; +use nexus_types::external_api::views::PhysicalDiskPolicy; +use nexus_types::external_api::views::PhysicalDiskState; use nexus_types::external_api::views::SledPolicy; use nexus_types::external_api::views::SledProvisionPolicy; use nexus_types::external_api::views::SledState; @@ -20,7 +27,6 @@ use nexus_types::inventory::PowerState; use nexus_types::inventory::RotSlot; use nexus_types::inventory::SledRole; use nexus_types::inventory::SpType; -use nexus_types::inventory::ZpoolName; use omicron_common::address::get_sled_address; use omicron_common::address::IpRange; use omicron_common::address::Ipv6Subnet; @@ -28,11 +34,17 @@ use omicron_common::address::NEXUS_REDUNDANCY; use omicron_common::address::RACK_PREFIX; use omicron_common::address::SLED_PREFIX; use omicron_common::api::external::ByteCount; +use omicron_common::api::external::Generation; +use omicron_common::disk::DiskIdentity; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::PhysicalDiskUuid; +use omicron_uuid_kinds::SledUuid; +use omicron_uuid_kinds::ZpoolUuid; +use std::collections::BTreeMap; use std::collections::BTreeSet; use std::fmt::Debug; use std::net::Ipv4Addr; use std::net::Ipv6Addr; -use uuid::Uuid; trait SubnetIterator: Iterator> + Debug {} impl SubnetIterator for T where @@ -42,8 +54,8 @@ impl SubnetIterator for T where /// Describes an actual or synthetic Oxide rack for planning and testing /// -/// From this description, you can extract a `Policy` or inventory `Collection`. -/// There are a few intended purposes here: +/// From this description, you can extract a `PlanningInput` or inventory +/// `Collection`. There are a few intended purposes here: /// /// 1. to easily construct fake racks in automated tests for the Planner and /// other parts of Reconfigurator @@ -59,12 +71,15 @@ impl SubnetIterator for T where #[derive(Debug)] pub struct SystemDescription { collector: Option, - sleds: IndexMap, + sleds: IndexMap, sled_subnets: Box, available_non_scrimlet_slots: BTreeSet, available_scrimlet_slots: BTreeSet, target_nexus_zone_count: usize, + target_cockroachdb_cluster_version: CockroachDbClusterVersion, service_ip_pool_ranges: Vec, + internal_dns_version: Generation, + external_dns_version: Generation, } impl SystemDescription { @@ -109,6 +124,8 @@ impl SystemDescription { // Policy defaults let target_nexus_zone_count = NEXUS_REDUNDANCY; + let target_cockroachdb_cluster_version = + CockroachDbClusterVersion::POLICY; // IPs from TEST-NET-1 (RFC 5737) let service_ip_pool_ranges = vec![IpRange::try_from(( "192.0.2.2".parse::().unwrap(), @@ -123,7 +140,10 @@ impl SystemDescription { available_non_scrimlet_slots, available_scrimlet_slots, target_nexus_zone_count, + target_cockroachdb_cluster_version, service_ip_pool_ranges, + internal_dns_version: Generation::new(), + external_dns_version: Generation::new(), } } @@ -171,7 +191,7 @@ impl SystemDescription { /// Add a sled to the system, as described by a SledBuilder pub fn sled(&mut self, sled: SledBuilder) -> anyhow::Result<&mut Self> { - let sled_id = sled.id.unwrap_or_else(Uuid::new_v4); + let sled_id = sled.id.unwrap_or_else(SledUuid::new_v4); ensure!( !self.sleds.contains_key(&sled_id), "attempted to add sled with the same id as an existing one: {}", @@ -222,7 +242,8 @@ impl SystemDescription { /// database of an existing system pub fn sled_full( &mut self, - sled_id: Uuid, + sled_id: SledUuid, + sled_policy: SledPolicy, sled_resources: SledResources, inventory_sp: Option>, inventory_sled_agent: &nexus_types::inventory::SledAgent, @@ -236,6 +257,7 @@ impl SystemDescription { sled_id, Sled::new_full( sled_id, + sled_policy, sled_resources, inventory_sp, inventory_sled_agent, @@ -275,26 +297,39 @@ impl SystemDescription { Ok(builder) } - pub fn to_policy(&self) -> anyhow::Result { - let sleds = self - .sleds - .values() - .map(|sled| { - let sled_resources = SledResources { - policy: sled.policy, - state: SledState::Active, - zpools: sled.zpools.iter().cloned().collect(), - subnet: sled.sled_subnet, - }; - (sled.sled_id, sled_resources) - }) - .collect(); - - Ok(Policy { - sleds, + /// Construct a [`PlanningInputBuilder`] primed with all this system's sleds + /// + /// Does not populate extra information like Omicron zone external IPs or + /// NICs. + pub fn to_planning_input_builder( + &self, + ) -> anyhow::Result { + let policy = Policy { service_ip_pool_ranges: self.service_ip_pool_ranges.clone(), target_nexus_zone_count: self.target_nexus_zone_count, - }) + target_cockroachdb_cluster_version: self + .target_cockroachdb_cluster_version, + }; + let mut builder = PlanningInputBuilder::new( + policy, + self.internal_dns_version, + self.external_dns_version, + CockroachDbSettings::empty(), + ); + + for sled in self.sleds.values() { + let sled_details = SledDetails { + policy: sled.policy, + state: SledState::Active, + resources: SledResources { + zpools: sled.zpools.clone(), + subnet: sled.sled_subnet, + }, + }; + builder.add_sled(sled.sled_id, sled_details)?; + } + + Ok(builder) } } @@ -308,7 +343,7 @@ pub enum SledHardware { #[derive(Clone, Debug)] pub struct SledBuilder { - id: Option, + id: Option, unique: Option, hardware: SledHardware, hardware_slot: Option, @@ -332,7 +367,7 @@ impl SledBuilder { /// Set the id of the sled /// /// Default: randomly generated - pub fn id(mut self, id: Uuid) -> Self { + pub fn id(mut self, id: SledUuid) -> Self { self.id = Some(id); self } @@ -391,21 +426,22 @@ pub struct SledHwInventory<'a> { /// Our abstract description of a `Sled` /// -/// This needs to be rich enough to generate a Policy and inventory Collection. +/// This needs to be rich enough to generate a PlanningInput and inventory +/// Collection. #[derive(Clone, Debug)] struct Sled { - sled_id: Uuid, + sled_id: SledUuid, sled_subnet: Ipv6Subnet, inventory_sp: Option<(u16, SpState)>, inventory_sled_agent: sled_agent_client::types::Inventory, - zpools: Vec, + zpools: BTreeMap, policy: SledPolicy, } impl Sled { /// Create a `Sled` using faked-up information based on a `SledBuilder` fn new_simulated( - sled_id: Uuid, + sled_id: SledUuid, sled_subnet: Ipv6Subnet, sled_role: SledRole, unique: Option, @@ -413,12 +449,28 @@ impl Sled { hardware_slot: u16, nzpools: u8, ) -> Sled { + use typed_rng::TypedUuidRng; let unique = unique.unwrap_or_else(|| hardware_slot.to_string()); let model = format!("model{}", unique); let serial = format!("serial{}", unique); let revision = 0; - let zpools = (0..nzpools) - .map(|_| format!("oxp_{}", Uuid::new_v4()).parse().unwrap()) + let mut zpool_rng = + TypedUuidRng::from_seed("SystemSimultatedSled", "ZpoolUuid"); + let zpools: BTreeMap<_, _> = (0..nzpools) + .map(|_| { + let zpool = ZpoolUuid::from(zpool_rng.next()); + let disk = SledDisk { + disk_identity: DiskIdentity { + vendor: String::from("fake-vendor"), + serial: format!("serial-{zpool}"), + model: String::from("fake-model"), + }, + disk_id: PhysicalDiskUuid::new_v4(), + policy: PhysicalDiskPolicy::InService, + state: PhysicalDiskState::Active, + }; + (zpool, disk) + }) .collect(); let inventory_sp = match hardware { SledHardware::Empty => None, @@ -472,10 +524,21 @@ impl Sled { reservoir_size: ByteCount::from(1024), sled_role, sled_agent_address, - sled_id, + sled_id: sled_id.into_untyped_uuid(), usable_hardware_threads: 10, usable_physical_ram: ByteCount::from(1024 * 1024), - disks: vec![], + // Populate disks, appearing like a real device. + disks: zpools + .values() + .enumerate() + .map(|(i, d)| sled_agent_client::types::InventoryDisk { + identity: d.disk_identity.clone(), + variant: sled_agent_client::types::DiskVariant::U2, + slot: i64::try_from(i).unwrap(), + }) + .collect(), + // Zpools won't necessarily show up until our first request + // to provision storage, so we omit them. zpools: vec![], } }; @@ -495,7 +558,8 @@ impl Sled { /// Create a `Sled` based on real information from another `Policy` and /// inventory `Collection` fn new_full( - sled_id: Uuid, + sled_id: SledUuid, + sled_policy: SledPolicy, sled_resources: SledResources, inventory_sp: Option>, inv_sled_agent: &nexus_types::inventory::SledAgent, @@ -555,7 +619,7 @@ impl Sled { reservoir_size: inv_sled_agent.reservoir_size, sled_role: inv_sled_agent.sled_role, sled_agent_address: inv_sled_agent.sled_agent_address.to_string(), - sled_id, + sled_id: sled_id.into_untyped_uuid(), usable_hardware_threads: inv_sled_agent.usable_hardware_threads, usable_physical_ram: inv_sled_agent.usable_physical_ram, disks: vec![], @@ -568,7 +632,7 @@ impl Sled { zpools: sled_resources.zpools.into_iter().collect(), inventory_sp, inventory_sled_agent, - policy: sled_resources.policy, + policy: sled_policy, } } diff --git a/nexus/reconfigurator/planning/tests/output/blueprint_builder_initial_diff.txt b/nexus/reconfigurator/planning/tests/output/blueprint_builder_initial_diff.txt new file mode 100644 index 0000000000..03e76422e9 --- /dev/null +++ b/nexus/reconfigurator/planning/tests/output/blueprint_builder_initial_diff.txt @@ -0,0 +1,120 @@ +from: collection 094d362b-7d79-49e7-a244-134276cca8fe +to: blueprint e4aeb3b3-272f-4967-be34-2d34daa46aa1 + UNCHANGED SLEDS: + + sled 08c7046b-c9c4-4368-881f-19a72df22143: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 2: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 44afce85-3377-4b20-a398-517c1579df4d in service fd00:1122:3344:103::23 + crucible 4644ea0c-0ec3-41be-a356-660308e1c3fc in service fd00:1122:3344:103::2c + crucible 55f4d117-0b9d-4256-a2c0-f46d3ed5fff9 in service fd00:1122:3344:103::25 + crucible 5c6a4628-8831-483b-995f-79b9126c4d04 in service fd00:1122:3344:103::28 + crucible 6a01210c-45ed-41a5-9230-8e05ecf5dd8f in service fd00:1122:3344:103::29 + crucible 7004cab9-dfc0-43ba-92d3-58d4ced66025 in service fd00:1122:3344:103::24 + crucible 79552859-fbd3-43bb-a9d3-6baba25558f8 in service fd00:1122:3344:103::26 + crucible 90696819-9b53-485a-9c65-ca63602e843e in service fd00:1122:3344:103::27 + crucible c99525b3-3680-4df6-9214-2ee3e1020e8b in service fd00:1122:3344:103::2a + crucible f42959d3-9eef-4e3b-b404-6177ce3ec7a1 in service fd00:1122:3344:103::2b + internal_ntp c81c9d4a-36d7-4796-9151-f564d3735152 in service fd00:1122:3344:103::21 + nexus b2573120-9c91-4ed7-8b4f-a7bfe8dbc807 in service fd00:1122:3344:103::22 + + + sled 84ac367e-9b03-4e9d-a846-df1a08deee6c: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 2: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 0faa9350-2c02-47c7-a0a6-9f4afd69152c in service fd00:1122:3344:101::2c + crucible 5b44003e-1a3d-4152-b606-872c72efce0e in service fd00:1122:3344:101::25 + crucible 943fea7a-9458-4935-9dc7-01ee5cfe5a02 in service fd00:1122:3344:101::29 + crucible 95c3b6d1-2592-4252-b5c1-5d0faf3ce9c9 in service fd00:1122:3344:101::24 + crucible a5a0b7a9-37c9-4dbd-8393-ec7748ada3b0 in service fd00:1122:3344:101::2b + crucible a9a6a974-8953-4783-b815-da46884f2c02 in service fd00:1122:3344:101::23 + crucible aa25add8-60b0-4ace-ac60-15adcdd32d50 in service fd00:1122:3344:101::2a + crucible b6f2dd1e-7f98-4a68-9df2-b33c69d1f7ea in service fd00:1122:3344:101::27 + crucible dc22d470-dc46-436b-9750-25c8d7d369e2 in service fd00:1122:3344:101::26 + crucible f7e434f9-6d4a-476b-a9e2-48d6ee28a08e in service fd00:1122:3344:101::28 + internal_ntp 38b047ea-e3de-4859-b8e0-70cac5871446 in service fd00:1122:3344:101::21 + nexus fb36b9dc-273a-4bc3-aaa9-19ee4d0ef552 in service fd00:1122:3344:101::22 + + + sled be7f4375-2a6b-457f-b1a4-3074a715e5fe: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 2: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 248db330-56e6-4c7e-b5ff-9cd6cbcb210a in service fd00:1122:3344:102::2c + crucible 353b0aff-4c71-4fae-a6bd-adcb1d2a1a1d in service fd00:1122:3344:102::29 + crucible 4330134c-41b9-4097-aa0b-3eaefa06d473 in service fd00:1122:3344:102::24 + crucible 65d03287-e43f-45f4-902e-0a5e4638f31a in service fd00:1122:3344:102::25 + crucible 6a5901b1-f9d7-425c-8ecb-a786c900f217 in service fd00:1122:3344:102::27 + crucible 9b722fea-a186-4bc3-bc37-ce7f6de6a796 in service fd00:1122:3344:102::23 + crucible b3583b5f-4a62-4471-9be7-41e61578de4c in service fd00:1122:3344:102::2a + crucible bac92034-b9e6-4e8b-9ffb-dbba9caec88d in service fd00:1122:3344:102::28 + crucible d9653001-f671-4905-a410-6a7abc358318 in service fd00:1122:3344:102::2b + crucible edaca77e-5806-446a-b00c-125962cd551d in service fd00:1122:3344:102::26 + internal_ntp aac3ab51-9e2b-4605-9bf6-e3eb3681c2b5 in service fd00:1122:3344:102::21 + nexus 29278a22-1ba1-4117-bfdb-39fcb9ae7fd1 in service fd00:1122:3344:102::22 + + + COCKROACHDB SETTINGS: ++ state fingerprint::::::::::::::::: (not present in collection) -> (none) ++ cluster.preserve_downgrade_option: (not present in collection) -> (do not modify) + + METADATA: ++ internal DNS version: (not present in collection) -> 1 ++ external DNS version: (not present in collection) -> 1 + diff --git a/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_2_3.txt b/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_2_3.txt index 9f7cab737f..0253baa9f8 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_2_3.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_2_3.txt @@ -1,48 +1,148 @@ -diff blueprint 979ef428-0bdd-4622-8a72-0719e942b415 blueprint 4171ad05-89dd-474b-846b-b007e4346366 ---- blueprint 979ef428-0bdd-4622-8a72-0719e942b415 -+++ blueprint 4171ad05-89dd-474b-846b-b007e4346366 - sled 41f45d9f-766e-4ca6-a881-61ee45c80f57 - zone config generation 2 - 267ed614-92af-4b9d-bdba-c2881c2e43a2 in service internal_ntp [underlay IP fd00:1122:3344:103::21] (unchanged) - 322ee9f1-8903-4542-a0a8-a54cefabdeca in service crucible [underlay IP fd00:1122:3344:103::24] (unchanged) - 4ab1650f-32c5-447f-939d-64b8103a7645 in service crucible [underlay IP fd00:1122:3344:103::2a] (unchanged) - 64aa65f8-1ccb-4cd6-9953-027aebdac8ff in service crucible [underlay IP fd00:1122:3344:103::27] (unchanged) - 6e811d86-8aa7-4660-935b-84b4b7721b10 in service crucible [underlay IP fd00:1122:3344:103::2b] (unchanged) - 747d2426-68bf-4c22-8806-41d290b5d5f5 in service crucible [underlay IP fd00:1122:3344:103::25] (unchanged) - 7fbd2c38-5dc3-48c4-b061-558a2041d70f in service crucible [underlay IP fd00:1122:3344:103::2c] (unchanged) - 8e9e923e-62b1-4cbc-9f59-d6397e338b6b in service crucible [underlay IP fd00:1122:3344:103::29] (unchanged) - b14d5478-1a0e-4b90-b526-36b06339dfc4 in service crucible [underlay IP fd00:1122:3344:103::28] (unchanged) - b40f7c7b-526c-46c8-ae33-67280c280eb7 in service crucible [underlay IP fd00:1122:3344:103::23] (unchanged) - be97b92b-38d6-422a-8c76-d37060f75bd2 in service crucible [underlay IP fd00:1122:3344:103::26] (unchanged) - cc816cfe-3869-4dde-b596-397d41198628 in service nexus [underlay IP fd00:1122:3344:103::22] (unchanged) - sled 43677374-8d2f-4deb-8a41-eeea506db8e0 - zone config generation 2 - 02acbe6a-1c88-47e3-94c3-94084cbde098 in service crucible [underlay IP fd00:1122:3344:101::27] (unchanged) - 07c3c805-8888-4fe5-9543-3d2479dbe6f3 in service crucible [underlay IP fd00:1122:3344:101::26] (unchanged) - 08c7f8aa-1ea9-469b-8cac-2fdbfc11ebcb in service internal_ntp [underlay IP fd00:1122:3344:101::21] (unchanged) - 10d98a73-ec88-4aff-a7e8-7db6a87880e6 in service crucible [underlay IP fd00:1122:3344:101::24] (unchanged) - 2a455c35-eb3c-4c73-ab6c-d0a706e25316 in service crucible [underlay IP fd00:1122:3344:101::29] (unchanged) - 3eda924f-22a9-4f3e-9a1b-91d1c47601ab in service crucible [underlay IP fd00:1122:3344:101::23] (unchanged) - 587be699-a320-4c79-b320-128d9ecddc0b in service crucible [underlay IP fd00:1122:3344:101::2b] (unchanged) - 6fa06115-4959-4913-8e7b-dd70d7651f07 in service crucible [underlay IP fd00:1122:3344:101::2c] (unchanged) - 8f3a1cc5-9195-4a30-ad02-b804278fe639 in service crucible [underlay IP fd00:1122:3344:101::28] (unchanged) - a1696cd4-588c-484a-b95b-66e824c0ce05 in service crucible [underlay IP fd00:1122:3344:101::25] (unchanged) - a2079cbc-a69e-41a1-b1e0-fbcb972d03f6 in service crucible [underlay IP fd00:1122:3344:101::2a] (unchanged) - c66ab6d5-ff7a-46d1-9fd0-70cefa352d25 in service nexus [underlay IP fd00:1122:3344:101::22] (unchanged) - sled 590e3034-d946-4166-b0e5-2d0034197a07 - zone config generation 2 - 18f8fe40-646e-4962-b17a-20e201f3a6e5 in service crucible [underlay IP fd00:1122:3344:102::2a] (unchanged) - 47199d48-534c-4267-a654-d2d90e64b498 in service internal_ntp [underlay IP fd00:1122:3344:102::21] (unchanged) - 56d5d7cf-db2c-40a3-a775-003241ad4820 in service crucible [underlay IP fd00:1122:3344:102::29] (unchanged) - 6af7f4d6-33b6-4eb3-a146-d8e9e4ae9d66 in service crucible [underlay IP fd00:1122:3344:102::2b] (unchanged) - 704e1fed-f8d6-4cfa-a470-bad27fdc06d1 in service nexus [underlay IP fd00:1122:3344:102::22] (unchanged) - 7a9f60d3-2b66-4547-9b63-7d4f7a8b6382 in service crucible [underlay IP fd00:1122:3344:102::26] (unchanged) - 93f2f40c-5616-4d8d-8519-ec6debdcede0 in service crucible [underlay IP fd00:1122:3344:102::2c] (unchanged) - ab7ba6df-d401-40bd-940e-faf57c57aa2a in service crucible [underlay IP fd00:1122:3344:102::28] (unchanged) - af322036-371f-437c-8c08-7f40f3f1403b in service crucible [underlay IP fd00:1122:3344:102::23] (unchanged) - d637264f-6f40-44c2-8b7e-a179430210d2 in service crucible [underlay IP fd00:1122:3344:102::25] (unchanged) - dce226c9-7373-4bfa-8a94-79dc472857a6 in service crucible [underlay IP fd00:1122:3344:102::27] (unchanged) - edabedf3-839c-488d-ad6f-508ffa864674 in service crucible [underlay IP fd00:1122:3344:102::24] (unchanged) -+ sled b59ec570-2abb-4017-80ce-129d94e7a025 (added) -+ zone config generation 2 -+ 2d73d30e-ca47-46a8-9c12-917d4ab824b6 in service internal_ntp [underlay IP fd00:1122:3344:104::21] (added) +from: blueprint 979ef428-0bdd-4622-8a72-0719e942b415 +to: blueprint 4171ad05-89dd-474b-846b-b007e4346366 + + UNCHANGED SLEDS: + + sled 41f45d9f-766e-4ca6-a881-61ee45c80f57: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 2: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 322ee9f1-8903-4542-a0a8-a54cefabdeca in service fd00:1122:3344:103::24 + crucible 4ab1650f-32c5-447f-939d-64b8103a7645 in service fd00:1122:3344:103::2a + crucible 64aa65f8-1ccb-4cd6-9953-027aebdac8ff in service fd00:1122:3344:103::27 + crucible 6e811d86-8aa7-4660-935b-84b4b7721b10 in service fd00:1122:3344:103::2b + crucible 747d2426-68bf-4c22-8806-41d290b5d5f5 in service fd00:1122:3344:103::25 + crucible 7fbd2c38-5dc3-48c4-b061-558a2041d70f in service fd00:1122:3344:103::2c + crucible 8e9e923e-62b1-4cbc-9f59-d6397e338b6b in service fd00:1122:3344:103::29 + crucible b14d5478-1a0e-4b90-b526-36b06339dfc4 in service fd00:1122:3344:103::28 + crucible b40f7c7b-526c-46c8-ae33-67280c280eb7 in service fd00:1122:3344:103::23 + crucible be97b92b-38d6-422a-8c76-d37060f75bd2 in service fd00:1122:3344:103::26 + internal_ntp 267ed614-92af-4b9d-bdba-c2881c2e43a2 in service fd00:1122:3344:103::21 + nexus cc816cfe-3869-4dde-b596-397d41198628 in service fd00:1122:3344:103::22 + + + sled 43677374-8d2f-4deb-8a41-eeea506db8e0: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 2: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 02acbe6a-1c88-47e3-94c3-94084cbde098 in service fd00:1122:3344:101::27 + crucible 07c3c805-8888-4fe5-9543-3d2479dbe6f3 in service fd00:1122:3344:101::26 + crucible 10d98a73-ec88-4aff-a7e8-7db6a87880e6 in service fd00:1122:3344:101::24 + crucible 2a455c35-eb3c-4c73-ab6c-d0a706e25316 in service fd00:1122:3344:101::29 + crucible 3eda924f-22a9-4f3e-9a1b-91d1c47601ab in service fd00:1122:3344:101::23 + crucible 587be699-a320-4c79-b320-128d9ecddc0b in service fd00:1122:3344:101::2b + crucible 6fa06115-4959-4913-8e7b-dd70d7651f07 in service fd00:1122:3344:101::2c + crucible 8f3a1cc5-9195-4a30-ad02-b804278fe639 in service fd00:1122:3344:101::28 + crucible a1696cd4-588c-484a-b95b-66e824c0ce05 in service fd00:1122:3344:101::25 + crucible a2079cbc-a69e-41a1-b1e0-fbcb972d03f6 in service fd00:1122:3344:101::2a + internal_ntp 08c7f8aa-1ea9-469b-8cac-2fdbfc11ebcb in service fd00:1122:3344:101::21 + nexus c66ab6d5-ff7a-46d1-9fd0-70cefa352d25 in service fd00:1122:3344:101::22 + + + sled 590e3034-d946-4166-b0e5-2d0034197a07: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 2: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 18f8fe40-646e-4962-b17a-20e201f3a6e5 in service fd00:1122:3344:102::2a + crucible 56d5d7cf-db2c-40a3-a775-003241ad4820 in service fd00:1122:3344:102::29 + crucible 6af7f4d6-33b6-4eb3-a146-d8e9e4ae9d66 in service fd00:1122:3344:102::2b + crucible 7a9f60d3-2b66-4547-9b63-7d4f7a8b6382 in service fd00:1122:3344:102::26 + crucible 93f2f40c-5616-4d8d-8519-ec6debdcede0 in service fd00:1122:3344:102::2c + crucible ab7ba6df-d401-40bd-940e-faf57c57aa2a in service fd00:1122:3344:102::28 + crucible af322036-371f-437c-8c08-7f40f3f1403b in service fd00:1122:3344:102::23 + crucible d637264f-6f40-44c2-8b7e-a179430210d2 in service fd00:1122:3344:102::25 + crucible dce226c9-7373-4bfa-8a94-79dc472857a6 in service fd00:1122:3344:102::27 + crucible edabedf3-839c-488d-ad6f-508ffa864674 in service fd00:1122:3344:102::24 + internal_ntp 47199d48-534c-4267-a654-d2d90e64b498 in service fd00:1122:3344:102::21 + nexus 704e1fed-f8d6-4cfa-a470-bad27fdc06d1 in service fd00:1122:3344:102::22 + + + ADDED SLEDS: + + sled b59ec570-2abb-4017-80ce-129d94e7a025: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- ++ fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 ++ fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 ++ fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 ++ fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 ++ fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 ++ fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 ++ fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 ++ fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c ++ fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b ++ fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 2: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ ++ internal_ntp 2d73d30e-ca47-46a8-9c12-917d4ab824b6 in service fd00:1122:3344:104::21 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version: 1 (unchanged) + external DNS version: 1 (unchanged) + diff --git a/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_3_5.txt b/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_3_5.txt index 9d98daac36..5a824edf84 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_3_5.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_3_5.txt @@ -1,59 +1,158 @@ -diff blueprint 4171ad05-89dd-474b-846b-b007e4346366 blueprint f432fcd5-1284-4058-8b4a-9286a3de6163 ---- blueprint 4171ad05-89dd-474b-846b-b007e4346366 -+++ blueprint f432fcd5-1284-4058-8b4a-9286a3de6163 - sled 41f45d9f-766e-4ca6-a881-61ee45c80f57 - zone config generation 2 - 267ed614-92af-4b9d-bdba-c2881c2e43a2 in service internal_ntp [underlay IP fd00:1122:3344:103::21] (unchanged) - 322ee9f1-8903-4542-a0a8-a54cefabdeca in service crucible [underlay IP fd00:1122:3344:103::24] (unchanged) - 4ab1650f-32c5-447f-939d-64b8103a7645 in service crucible [underlay IP fd00:1122:3344:103::2a] (unchanged) - 64aa65f8-1ccb-4cd6-9953-027aebdac8ff in service crucible [underlay IP fd00:1122:3344:103::27] (unchanged) - 6e811d86-8aa7-4660-935b-84b4b7721b10 in service crucible [underlay IP fd00:1122:3344:103::2b] (unchanged) - 747d2426-68bf-4c22-8806-41d290b5d5f5 in service crucible [underlay IP fd00:1122:3344:103::25] (unchanged) - 7fbd2c38-5dc3-48c4-b061-558a2041d70f in service crucible [underlay IP fd00:1122:3344:103::2c] (unchanged) - 8e9e923e-62b1-4cbc-9f59-d6397e338b6b in service crucible [underlay IP fd00:1122:3344:103::29] (unchanged) - b14d5478-1a0e-4b90-b526-36b06339dfc4 in service crucible [underlay IP fd00:1122:3344:103::28] (unchanged) - b40f7c7b-526c-46c8-ae33-67280c280eb7 in service crucible [underlay IP fd00:1122:3344:103::23] (unchanged) - be97b92b-38d6-422a-8c76-d37060f75bd2 in service crucible [underlay IP fd00:1122:3344:103::26] (unchanged) - cc816cfe-3869-4dde-b596-397d41198628 in service nexus [underlay IP fd00:1122:3344:103::22] (unchanged) - sled 43677374-8d2f-4deb-8a41-eeea506db8e0 - zone config generation 2 - 02acbe6a-1c88-47e3-94c3-94084cbde098 in service crucible [underlay IP fd00:1122:3344:101::27] (unchanged) - 07c3c805-8888-4fe5-9543-3d2479dbe6f3 in service crucible [underlay IP fd00:1122:3344:101::26] (unchanged) - 08c7f8aa-1ea9-469b-8cac-2fdbfc11ebcb in service internal_ntp [underlay IP fd00:1122:3344:101::21] (unchanged) - 10d98a73-ec88-4aff-a7e8-7db6a87880e6 in service crucible [underlay IP fd00:1122:3344:101::24] (unchanged) - 2a455c35-eb3c-4c73-ab6c-d0a706e25316 in service crucible [underlay IP fd00:1122:3344:101::29] (unchanged) - 3eda924f-22a9-4f3e-9a1b-91d1c47601ab in service crucible [underlay IP fd00:1122:3344:101::23] (unchanged) - 587be699-a320-4c79-b320-128d9ecddc0b in service crucible [underlay IP fd00:1122:3344:101::2b] (unchanged) - 6fa06115-4959-4913-8e7b-dd70d7651f07 in service crucible [underlay IP fd00:1122:3344:101::2c] (unchanged) - 8f3a1cc5-9195-4a30-ad02-b804278fe639 in service crucible [underlay IP fd00:1122:3344:101::28] (unchanged) - a1696cd4-588c-484a-b95b-66e824c0ce05 in service crucible [underlay IP fd00:1122:3344:101::25] (unchanged) - a2079cbc-a69e-41a1-b1e0-fbcb972d03f6 in service crucible [underlay IP fd00:1122:3344:101::2a] (unchanged) - c66ab6d5-ff7a-46d1-9fd0-70cefa352d25 in service nexus [underlay IP fd00:1122:3344:101::22] (unchanged) - sled 590e3034-d946-4166-b0e5-2d0034197a07 - zone config generation 2 - 18f8fe40-646e-4962-b17a-20e201f3a6e5 in service crucible [underlay IP fd00:1122:3344:102::2a] (unchanged) - 47199d48-534c-4267-a654-d2d90e64b498 in service internal_ntp [underlay IP fd00:1122:3344:102::21] (unchanged) - 56d5d7cf-db2c-40a3-a775-003241ad4820 in service crucible [underlay IP fd00:1122:3344:102::29] (unchanged) - 6af7f4d6-33b6-4eb3-a146-d8e9e4ae9d66 in service crucible [underlay IP fd00:1122:3344:102::2b] (unchanged) - 704e1fed-f8d6-4cfa-a470-bad27fdc06d1 in service nexus [underlay IP fd00:1122:3344:102::22] (unchanged) - 7a9f60d3-2b66-4547-9b63-7d4f7a8b6382 in service crucible [underlay IP fd00:1122:3344:102::26] (unchanged) - 93f2f40c-5616-4d8d-8519-ec6debdcede0 in service crucible [underlay IP fd00:1122:3344:102::2c] (unchanged) - ab7ba6df-d401-40bd-940e-faf57c57aa2a in service crucible [underlay IP fd00:1122:3344:102::28] (unchanged) - af322036-371f-437c-8c08-7f40f3f1403b in service crucible [underlay IP fd00:1122:3344:102::23] (unchanged) - d637264f-6f40-44c2-8b7e-a179430210d2 in service crucible [underlay IP fd00:1122:3344:102::25] (unchanged) - dce226c9-7373-4bfa-8a94-79dc472857a6 in service crucible [underlay IP fd00:1122:3344:102::27] (unchanged) - edabedf3-839c-488d-ad6f-508ffa864674 in service crucible [underlay IP fd00:1122:3344:102::24] (unchanged) - sled b59ec570-2abb-4017-80ce-129d94e7a025 -- zone config generation 2 -+ zone config generation 3 - 2d73d30e-ca47-46a8-9c12-917d4ab824b6 in service internal_ntp [underlay IP fd00:1122:3344:104::21] (unchanged) -+ 1a20ee3c-f66e-4fca-ab85-2a248aa3d79d in service crucible [underlay IP fd00:1122:3344:104::2b] (added) -+ 28852beb-d0e5-4cba-9adb-e7f0cd4bb864 in service crucible [underlay IP fd00:1122:3344:104::29] (added) -+ 45556184-7092-4a3d-873f-637976bb133b in service crucible [underlay IP fd00:1122:3344:104::22] (added) -+ 8215bf7a-10d6-4f40-aeb7-27a196307c37 in service crucible [underlay IP fd00:1122:3344:104::25] (added) -+ 9d75abfe-47ab-434a-93dd-af50dc0dddde in service crucible [underlay IP fd00:1122:3344:104::23] (added) -+ a36d291c-7f68-462f-830e-bc29e5841ce2 in service crucible [underlay IP fd00:1122:3344:104::27] (added) -+ b3a4d434-aaee-4752-8c99-69d88fbcb8c5 in service crucible [underlay IP fd00:1122:3344:104::2a] (added) -+ cf5b636b-a505-4db6-bc32-baf9f53f4371 in service crucible [underlay IP fd00:1122:3344:104::28] (added) -+ f6125d45-b9cc-4721-ba60-ed4dbb177e41 in service crucible [underlay IP fd00:1122:3344:104::26] (added) -+ f86e19d2-9145-41cf-be89-6aaa34a73873 in service crucible [underlay IP fd00:1122:3344:104::24] (added) +from: blueprint 4171ad05-89dd-474b-846b-b007e4346366 +to: blueprint f432fcd5-1284-4058-8b4a-9286a3de6163 + + UNCHANGED SLEDS: + + sled 41f45d9f-766e-4ca6-a881-61ee45c80f57: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 2: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 322ee9f1-8903-4542-a0a8-a54cefabdeca in service fd00:1122:3344:103::24 + crucible 4ab1650f-32c5-447f-939d-64b8103a7645 in service fd00:1122:3344:103::2a + crucible 64aa65f8-1ccb-4cd6-9953-027aebdac8ff in service fd00:1122:3344:103::27 + crucible 6e811d86-8aa7-4660-935b-84b4b7721b10 in service fd00:1122:3344:103::2b + crucible 747d2426-68bf-4c22-8806-41d290b5d5f5 in service fd00:1122:3344:103::25 + crucible 7fbd2c38-5dc3-48c4-b061-558a2041d70f in service fd00:1122:3344:103::2c + crucible 8e9e923e-62b1-4cbc-9f59-d6397e338b6b in service fd00:1122:3344:103::29 + crucible b14d5478-1a0e-4b90-b526-36b06339dfc4 in service fd00:1122:3344:103::28 + crucible b40f7c7b-526c-46c8-ae33-67280c280eb7 in service fd00:1122:3344:103::23 + crucible be97b92b-38d6-422a-8c76-d37060f75bd2 in service fd00:1122:3344:103::26 + internal_ntp 267ed614-92af-4b9d-bdba-c2881c2e43a2 in service fd00:1122:3344:103::21 + nexus cc816cfe-3869-4dde-b596-397d41198628 in service fd00:1122:3344:103::22 + + + sled 43677374-8d2f-4deb-8a41-eeea506db8e0: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 2: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 02acbe6a-1c88-47e3-94c3-94084cbde098 in service fd00:1122:3344:101::27 + crucible 07c3c805-8888-4fe5-9543-3d2479dbe6f3 in service fd00:1122:3344:101::26 + crucible 10d98a73-ec88-4aff-a7e8-7db6a87880e6 in service fd00:1122:3344:101::24 + crucible 2a455c35-eb3c-4c73-ab6c-d0a706e25316 in service fd00:1122:3344:101::29 + crucible 3eda924f-22a9-4f3e-9a1b-91d1c47601ab in service fd00:1122:3344:101::23 + crucible 587be699-a320-4c79-b320-128d9ecddc0b in service fd00:1122:3344:101::2b + crucible 6fa06115-4959-4913-8e7b-dd70d7651f07 in service fd00:1122:3344:101::2c + crucible 8f3a1cc5-9195-4a30-ad02-b804278fe639 in service fd00:1122:3344:101::28 + crucible a1696cd4-588c-484a-b95b-66e824c0ce05 in service fd00:1122:3344:101::25 + crucible a2079cbc-a69e-41a1-b1e0-fbcb972d03f6 in service fd00:1122:3344:101::2a + internal_ntp 08c7f8aa-1ea9-469b-8cac-2fdbfc11ebcb in service fd00:1122:3344:101::21 + nexus c66ab6d5-ff7a-46d1-9fd0-70cefa352d25 in service fd00:1122:3344:101::22 + + + sled 590e3034-d946-4166-b0e5-2d0034197a07: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 2: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 18f8fe40-646e-4962-b17a-20e201f3a6e5 in service fd00:1122:3344:102::2a + crucible 56d5d7cf-db2c-40a3-a775-003241ad4820 in service fd00:1122:3344:102::29 + crucible 6af7f4d6-33b6-4eb3-a146-d8e9e4ae9d66 in service fd00:1122:3344:102::2b + crucible 7a9f60d3-2b66-4547-9b63-7d4f7a8b6382 in service fd00:1122:3344:102::26 + crucible 93f2f40c-5616-4d8d-8519-ec6debdcede0 in service fd00:1122:3344:102::2c + crucible ab7ba6df-d401-40bd-940e-faf57c57aa2a in service fd00:1122:3344:102::28 + crucible af322036-371f-437c-8c08-7f40f3f1403b in service fd00:1122:3344:102::23 + crucible d637264f-6f40-44c2-8b7e-a179430210d2 in service fd00:1122:3344:102::25 + crucible dce226c9-7373-4bfa-8a94-79dc472857a6 in service fd00:1122:3344:102::27 + crucible edabedf3-839c-488d-ad6f-508ffa864674 in service fd00:1122:3344:102::24 + internal_ntp 47199d48-534c-4267-a654-d2d90e64b498 in service fd00:1122:3344:102::21 + nexus 704e1fed-f8d6-4cfa-a470-bad27fdc06d1 in service fd00:1122:3344:102::22 + + + MODIFIED SLEDS: + + sled b59ec570-2abb-4017-80ce-129d94e7a025: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones generation 2 -> 3: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + internal_ntp 2d73d30e-ca47-46a8-9c12-917d4ab824b6 in service fd00:1122:3344:104::21 ++ crucible 1a20ee3c-f66e-4fca-ab85-2a248aa3d79d in service fd00:1122:3344:104::2b ++ crucible 28852beb-d0e5-4cba-9adb-e7f0cd4bb864 in service fd00:1122:3344:104::29 ++ crucible 45556184-7092-4a3d-873f-637976bb133b in service fd00:1122:3344:104::22 ++ crucible 8215bf7a-10d6-4f40-aeb7-27a196307c37 in service fd00:1122:3344:104::25 ++ crucible 9d75abfe-47ab-434a-93dd-af50dc0dddde in service fd00:1122:3344:104::23 ++ crucible a36d291c-7f68-462f-830e-bc29e5841ce2 in service fd00:1122:3344:104::27 ++ crucible b3a4d434-aaee-4752-8c99-69d88fbcb8c5 in service fd00:1122:3344:104::2a ++ crucible cf5b636b-a505-4db6-bc32-baf9f53f4371 in service fd00:1122:3344:104::28 ++ crucible f6125d45-b9cc-4721-ba60-ed4dbb177e41 in service fd00:1122:3344:104::26 ++ crucible f86e19d2-9145-41cf-be89-6aaa34a73873 in service fd00:1122:3344:104::24 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version: 1 (unchanged) + external DNS version: 1 (unchanged) + diff --git a/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_1_2.txt b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_1_2.txt new file mode 100644 index 0000000000..7219c300b7 --- /dev/null +++ b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_1_2.txt @@ -0,0 +1,136 @@ +from: blueprint 516e80a3-b362-4fac-bd3c-4559717120dd +to: blueprint 1ac2d88f-27dd-4506-8585-6b2be832528e + + UNCHANGED SLEDS: + + sled d67ce8f0-a691-4010-b414-420d82e80527: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 2: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 15dbaa30-1539-49d6-970d-ba5962960f33 in service fd00:1122:3344:101::27 + crucible 1ec4cc7b-2f00-4d13-8176-3b9815533ae9 in service fd00:1122:3344:101::24 + crucible 2e65b765-5c41-4519-bf4e-e2a68569afc1 in service fd00:1122:3344:101::23 + crucible 3d4143df-e212-4774-9258-7d9b421fac2e in service fd00:1122:3344:101::25 + crucible 5d9d8fa7-8379-470b-90ba-fe84a3c45512 in service fd00:1122:3344:101::2a + crucible 70232a6d-6c9d-4fa6-a34d-9c73d940db33 in service fd00:1122:3344:101::28 + crucible 8567a616-a709-4c8c-a323-4474675dad5c in service fd00:1122:3344:101::2c + crucible 8b0b8623-930a-41af-9f9b-ca28b1b11139 in service fd00:1122:3344:101::29 + crucible cf87d2a3-d323-44a3-a87e-adc4ef6c75f4 in service fd00:1122:3344:101::2b + crucible eac6c0a0-baa5-4490-9cee-65198b7fbd9c in service fd00:1122:3344:101::26 + internal_ntp ad76d200-5675-444b-b19c-684689ff421f in service fd00:1122:3344:101::21 + nexus e9bf2525-5fa0-4c1b-b52d-481225083845 in service fd00:1122:3344:101::22 + + + MODIFIED SLEDS: + + sled a1b477db-b629-48eb-911d-1ccdafca75b9: + + physical disks from generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- +- fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 +- fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 +- fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 +- fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 +- fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 +- fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 +- fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 +- fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c +- fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b +- fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones generation 2 -> 3: + ------------------------------------------------------------------------------------------- + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------- +* crucible 1e1ed0cc-1adc-410f-943a-d1a3107de619 - in service fd00:1122:3344:103::27 + └─ + expunged +* crucible 2307bbed-02ba-493b-89e3-46585c74c8fc - in service fd00:1122:3344:103::28 + └─ + expunged +* crucible 4e36b7ef-5684-4304-b7c3-3c31aaf83d4f - in service fd00:1122:3344:103::23 + └─ + expunged +* crucible 603e629d-2599-400e-b879-4134d4cc426e - in service fd00:1122:3344:103::2c + └─ + expunged +* crucible 9179d6dc-387d-424e-8d62-ed59b2c728f6 - in service fd00:1122:3344:103::2a + └─ + expunged +* crucible c28d7b4b-a259-45ad-945d-f19ca3c6964c - in service fd00:1122:3344:103::29 + └─ + expunged +* crucible e29998e7-9ed2-46b6-bb70-4118159fe07f - in service fd00:1122:3344:103::26 + └─ + expunged +* crucible f06e91a1-0c17-4cca-adbc-1c9b67bdb11d - in service fd00:1122:3344:103::2b + └─ + expunged +* crucible f11f5c60-1ac7-4630-9a3a-a9bc85c75203 - in service fd00:1122:3344:103::25 + └─ + expunged +* crucible f231e4eb-3fc9-4964-9d71-2c41644852d9 - in service fd00:1122:3344:103::24 + └─ + expunged +* internal_ntp c62b87b6-b98d-4d22-ba4f-cee4499e2ba8 - in service fd00:1122:3344:103::21 + └─ + expunged +* nexus 6a70a233-1900-43c0-9c00-aa9d1f7adfbc - in service fd00:1122:3344:103::22 + └─ + expunged + + + sled fefcf4cf-f7e7-46b3-b629-058526ce440e: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones generation 2 -> 3: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 0e2b035e-1de1-48af-8ac0-5316418e3de1 in service fd00:1122:3344:102::2a + crucible 4f8ce495-21dd-48a1-859c-80d34ce394ed in service fd00:1122:3344:102::23 + crucible 5c78756d-6182-4c27-a507-3419e8dbe76b in service fd00:1122:3344:102::28 + crucible a1ae92ac-e1f1-4654-ab54-5b75ba7c44d6 in service fd00:1122:3344:102::24 + crucible a308d3e1-118c-440a-947a-8b6ab7d833ab in service fd00:1122:3344:102::25 + crucible b7402110-d88f-4ca4-8391-4a2fda6ad271 in service fd00:1122:3344:102::29 + crucible b7ae596e-0c85-40b2-bb47-df9f76db3cca in service fd00:1122:3344:102::2b + crucible c552280f-ba02-4f8d-9049-bd269e6b7845 in service fd00:1122:3344:102::26 + crucible cf13b878-47f1-4ba0-b8c2-9f3e15f2ee87 in service fd00:1122:3344:102::2c + crucible e6d0df1f-9f98-4c5a-9540-8444d1185c7d in service fd00:1122:3344:102::27 + internal_ntp f68846ad-4619-4747-8293-a2b4aeeafc5b in service fd00:1122:3344:102::21 + nexus 99c6401d-9796-4ae1-bf0c-9a097cf21c33 in service fd00:1122:3344:102::22 ++ nexus c8851a11-a4f7-4b21-9281-6182fd15dc8d in service fd00:1122:3344:102::2d + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version: 1 (unchanged) + external DNS version: 1 (unchanged) + diff --git a/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt new file mode 100644 index 0000000000..3ba829b1d2 --- /dev/null +++ b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt @@ -0,0 +1,110 @@ +blueprint 1ac2d88f-27dd-4506-8585-6b2be832528e +parent: 516e80a3-b362-4fac-bd3c-4559717120dd + + sled: d67ce8f0-a691-4010-b414-420d82e80527 + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 2: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 15dbaa30-1539-49d6-970d-ba5962960f33 in service fd00:1122:3344:101::27 + crucible 1ec4cc7b-2f00-4d13-8176-3b9815533ae9 in service fd00:1122:3344:101::24 + crucible 2e65b765-5c41-4519-bf4e-e2a68569afc1 in service fd00:1122:3344:101::23 + crucible 3d4143df-e212-4774-9258-7d9b421fac2e in service fd00:1122:3344:101::25 + crucible 5d9d8fa7-8379-470b-90ba-fe84a3c45512 in service fd00:1122:3344:101::2a + crucible 70232a6d-6c9d-4fa6-a34d-9c73d940db33 in service fd00:1122:3344:101::28 + crucible 8567a616-a709-4c8c-a323-4474675dad5c in service fd00:1122:3344:101::2c + crucible 8b0b8623-930a-41af-9f9b-ca28b1b11139 in service fd00:1122:3344:101::29 + crucible cf87d2a3-d323-44a3-a87e-adc4ef6c75f4 in service fd00:1122:3344:101::2b + crucible eac6c0a0-baa5-4490-9cee-65198b7fbd9c in service fd00:1122:3344:101::26 + internal_ntp ad76d200-5675-444b-b19c-684689ff421f in service fd00:1122:3344:101::21 + nexus e9bf2525-5fa0-4c1b-b52d-481225083845 in service fd00:1122:3344:101::22 + + + + sled: fefcf4cf-f7e7-46b3-b629-058526ce440e + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 3: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 0e2b035e-1de1-48af-8ac0-5316418e3de1 in service fd00:1122:3344:102::2a + crucible 4f8ce495-21dd-48a1-859c-80d34ce394ed in service fd00:1122:3344:102::23 + crucible 5c78756d-6182-4c27-a507-3419e8dbe76b in service fd00:1122:3344:102::28 + crucible a1ae92ac-e1f1-4654-ab54-5b75ba7c44d6 in service fd00:1122:3344:102::24 + crucible a308d3e1-118c-440a-947a-8b6ab7d833ab in service fd00:1122:3344:102::25 + crucible b7402110-d88f-4ca4-8391-4a2fda6ad271 in service fd00:1122:3344:102::29 + crucible b7ae596e-0c85-40b2-bb47-df9f76db3cca in service fd00:1122:3344:102::2b + crucible c552280f-ba02-4f8d-9049-bd269e6b7845 in service fd00:1122:3344:102::26 + crucible cf13b878-47f1-4ba0-b8c2-9f3e15f2ee87 in service fd00:1122:3344:102::2c + crucible e6d0df1f-9f98-4c5a-9540-8444d1185c7d in service fd00:1122:3344:102::27 + internal_ntp f68846ad-4619-4747-8293-a2b4aeeafc5b in service fd00:1122:3344:102::21 + nexus 99c6401d-9796-4ae1-bf0c-9a097cf21c33 in service fd00:1122:3344:102::22 + nexus c8851a11-a4f7-4b21-9281-6182fd15dc8d in service fd00:1122:3344:102::2d + + + +!a1b477db-b629-48eb-911d-1ccdafca75b9 +WARNING: Zones exist without physical disks! + omicron zones at generation 3: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 1e1ed0cc-1adc-410f-943a-d1a3107de619 expunged fd00:1122:3344:103::27 + crucible 2307bbed-02ba-493b-89e3-46585c74c8fc expunged fd00:1122:3344:103::28 + crucible 4e36b7ef-5684-4304-b7c3-3c31aaf83d4f expunged fd00:1122:3344:103::23 + crucible 603e629d-2599-400e-b879-4134d4cc426e expunged fd00:1122:3344:103::2c + crucible 9179d6dc-387d-424e-8d62-ed59b2c728f6 expunged fd00:1122:3344:103::2a + crucible c28d7b4b-a259-45ad-945d-f19ca3c6964c expunged fd00:1122:3344:103::29 + crucible e29998e7-9ed2-46b6-bb70-4118159fe07f expunged fd00:1122:3344:103::26 + crucible f06e91a1-0c17-4cca-adbc-1c9b67bdb11d expunged fd00:1122:3344:103::2b + crucible f11f5c60-1ac7-4630-9a3a-a9bc85c75203 expunged fd00:1122:3344:103::25 + crucible f231e4eb-3fc9-4964-9d71-2c41644852d9 expunged fd00:1122:3344:103::24 + internal_ntp c62b87b6-b98d-4d22-ba4f-cee4499e2ba8 expunged fd00:1122:3344:103::21 + nexus 6a70a233-1900-43c0-9c00-aa9d1f7adfbc expunged fd00:1122:3344:103::22 + + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) + cluster.preserve_downgrade_option: (do not modify) + + METADATA: + created by::::::::::: test_blueprint2 + created at::::::::::: 1970-01-01T00:00:00.000Z + comment:::::::::::::: sled a1b477db-b629-48eb-911d-1ccdafca75b9 (sled policy is expunged): 12 zones expunged + internal DNS version: 1 + external DNS version: 1 + diff --git a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_1_2.txt b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_1_2.txt index 17d3db6228..be2bf3c248 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_1_2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_1_2.txt @@ -1,86 +1,213 @@ -diff blueprint 55502b1b-e255-438b-a16a-2680a4b5f962 blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 ---- blueprint 55502b1b-e255-438b-a16a-2680a4b5f962 -+++ blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 - sled 2d1cb4f2-cf44-40fc-b118-85036eb732a9 - zone config generation 2 - 19fbc4f8-a683-4f22-8f5a-e74782b935be in service crucible [underlay IP fd00:1122:3344:105::26] (unchanged) - 4f1ce8a2-d3a5-4a38-be4c-9817de52db37 in service crucible [underlay IP fd00:1122:3344:105::2c] (unchanged) - 6b53ab2e-d98c-485f-87a3-4d5df595390f in service crucible [underlay IP fd00:1122:3344:105::27] (unchanged) - 6dff7633-66bb-4924-a6ff-2c896e66964b in service nexus [underlay IP fd00:1122:3344:105::22] (unchanged) - 7f4e9f9f-08f8-4d14-885d-e977c05525ad in service internal_ntp [underlay IP fd00:1122:3344:105::21] (unchanged) - 93b137a1-a1d6-4b5b-b2cb-21a9f11e2883 in service crucible [underlay IP fd00:1122:3344:105::23] (unchanged) - 9f0abbad-dbd3-4d43-9675-78092217ffd9 in service crucible [underlay IP fd00:1122:3344:105::25] (unchanged) - b0c63f48-01ea-4aae-bb26-fb0dd59d1662 in service crucible [underlay IP fd00:1122:3344:105::28] (unchanged) - c406da50-34b9-4bb4-a460-8f49875d2a6a in service crucible [underlay IP fd00:1122:3344:105::24] (unchanged) - d660d7ed-28c0-45ae-9ace-dc3ecf7e8786 in service crucible [underlay IP fd00:1122:3344:105::2a] (unchanged) - e98cc0de-abf6-4da4-a20d-d05c7a9bb1d7 in service crucible [underlay IP fd00:1122:3344:105::2b] (unchanged) - f55e6aaf-e8fc-4913-9e3c-8cd1bd4bdad3 in service crucible [underlay IP fd00:1122:3344:105::29] (unchanged) - sled 48d95fef-bc9f-4f50-9a53-1e075836291d - zone config generation 2 - 094f27af-1acb-4d1e-ba97-1fc1377d4bf2 in service crucible [underlay IP fd00:1122:3344:103::2c] (unchanged) - 0dcfdfc5-481e-4153-b97c-11cf02b648ea in service crucible [underlay IP fd00:1122:3344:103::25] (unchanged) - 2aa0ea4f-3561-4989-a98c-9ab7d9a240fb in service nexus [underlay IP fd00:1122:3344:103::22] (unchanged) - 2f5e8010-a94d-43a4-9c5c-3f52832f5f7f in service crucible [underlay IP fd00:1122:3344:103::27] (unchanged) - 4a9a0a9d-87f0-4f1d-9181-27f6b435e637 in service crucible [underlay IP fd00:1122:3344:103::28] (unchanged) - 56ac1706-9e2a-49ba-bd6f-a99c44cb2ccb in service crucible [underlay IP fd00:1122:3344:103::24] (unchanged) - 67622d61-2df4-414d-aa0e-d1277265f405 in service crucible [underlay IP fd00:1122:3344:103::23] (unchanged) - 67d913e0-0005-4599-9b28-0abbf6cc2916 in service internal_ntp [underlay IP fd00:1122:3344:103::21] (unchanged) - b91b271d-8d80-4f49-99a0-34006ae86063 in service crucible [underlay IP fd00:1122:3344:103::2a] (unchanged) - d6ee1338-3127-43ec-9aaa-b973ccf05496 in service crucible [underlay IP fd00:1122:3344:103::26] (unchanged) - e39d7c9e-182b-48af-af87-58079d723583 in service crucible [underlay IP fd00:1122:3344:103::29] (unchanged) - f69f92a1-5007-4bb0-a85b-604dc217154b in service crucible [underlay IP fd00:1122:3344:103::2b] (unchanged) - sled 68d24ac5-f341-49ea-a92a-0381b52ab387 - zone config generation 2 - 01d58626-e1b0-480f-96be-ac784863c7dc in service nexus [underlay IP fd00:1122:3344:102::22] (unchanged) - 3b3c14b6-a8e2-4054-a577-8d96cb576230 in service crucible [underlay IP fd00:1122:3344:102::2c] (unchanged) - 47a87c6e-ef45-4d52-9a3e-69cdd96737cc in service crucible [underlay IP fd00:1122:3344:102::23] (unchanged) - 6464d025-4652-4948-919e-740bec5699b1 in service crucible [underlay IP fd00:1122:3344:102::24] (unchanged) - 6939ce48-b17c-4616-b176-8a419a7697be in service crucible [underlay IP fd00:1122:3344:102::29] (unchanged) - 878dfddd-3113-4197-a3ea-e0d4dbe9b476 in service crucible [underlay IP fd00:1122:3344:102::25] (unchanged) - 8d4d2b28-82bb-4e36-80da-1408d8c35d82 in service crucible [underlay IP fd00:1122:3344:102::2b] (unchanged) - 9fd52961-426f-4e62-a644-b70871103fca in service crucible [underlay IP fd00:1122:3344:102::26] (unchanged) - b44cdbc0-0ce0-46eb-8b21-a09e113aa1d0 in service crucible [underlay IP fd00:1122:3344:102::27] (unchanged) - b6b759d0-f60d-42b7-bbbc-9d61c9e895a9 in service crucible [underlay IP fd00:1122:3344:102::28] (unchanged) - c407795c-6c8b-428e-8ab8-b962913c447f in service crucible [underlay IP fd00:1122:3344:102::2a] (unchanged) - f3f2e4f3-0985-4ef6-8336-ce479382d05d in service internal_ntp [underlay IP fd00:1122:3344:102::21] (unchanged) - sled 75bc286f-2b4b-482c-9431-59272af529da -- zone config generation 2 -+ zone config generation 3 - 15bb9def-69b8-4d2e-b04f-9fee1143387c in service crucible [underlay IP fd00:1122:3344:104::25] (unchanged) - 23a8fa2b-ef3e-4017-a43f-f7a83953bd7c in service crucible [underlay IP fd00:1122:3344:104::2c] (unchanged) - 57b96d5c-b71e-43e4-8869-7d514003d00d in service internal_ntp [underlay IP fd00:1122:3344:104::21] (unchanged) - 621509d6-3772-4009-aca1-35eefd1098fb in service crucible [underlay IP fd00:1122:3344:104::28] (unchanged) - 85b8c68a-160d-461d-94dd-1baf175fa75c in service crucible [underlay IP fd00:1122:3344:104::2a] (unchanged) - 996d7570-b0df-46d5-aaa4-0c97697cf484 in service crucible [underlay IP fd00:1122:3344:104::26] (unchanged) - a732c489-d29a-4f75-b900-5966385943af in service crucible [underlay IP fd00:1122:3344:104::29] (unchanged) - b1783e95-9598-451d-b6ba-c50b52b428c3 in service crucible [underlay IP fd00:1122:3344:104::24] (unchanged) - b4947d31-f70e-4ee0-8817-0ca6cea9b16b in service nexus [underlay IP fd00:1122:3344:104::22] (unchanged) - c6dd531e-2d1d-423b-acc8-358533dab78c in service crucible [underlay IP fd00:1122:3344:104::27] (unchanged) - e4b3e159-3dbe-48cb-8497-e3da92a90e5a in service crucible [underlay IP fd00:1122:3344:104::23] (unchanged) - f0ff59e8-4105-4980-a4bb-a1f4c58de1e3 in service crucible [underlay IP fd00:1122:3344:104::2b] (unchanged) -+ 2ec75441-3d7d-4b4b-9614-af03de5a3666 in service nexus [underlay IP fd00:1122:3344:104::2d] (added) -+ 3ca5292f-8a59-4475-bb72-0f43714d0fff in service nexus [underlay IP fd00:1122:3344:104::31] (added) -+ 508abd03-cbfe-4654-9a6d-7f15a1ad32e5 in service nexus [underlay IP fd00:1122:3344:104::2e] (added) -+ 59950bc8-1497-44dd-8cbf-b6502ba921b2 in service nexus [underlay IP fd00:1122:3344:104::2f] (added) -+ 99f6d544-8599-4e2b-a55a-82d9e0034662 in service nexus [underlay IP fd00:1122:3344:104::30] (added) -+ c26b3bda-5561-44a1-a69f-22103fe209a1 in service nexus [underlay IP fd00:1122:3344:104::32] (added) - sled affab35f-600a-4109-8ea0-34a067a4e0bc -- zone config generation 2 -+ zone config generation 3 - 0dfbf374-9ef9-430f-b06d-f271bf7f84c4 in service crucible [underlay IP fd00:1122:3344:101::27] (unchanged) - 15c103f0-ac63-423b-ba5d-1b5fcd563ba3 in service nexus [underlay IP fd00:1122:3344:101::22] (unchanged) - 3aa07966-5899-4789-ace5-f8eeb375c6c3 in service crucible [underlay IP fd00:1122:3344:101::24] (unchanged) - 4ad0e9da-08f8-4d40-b4d3-d17e711b5bbf in service crucible [underlay IP fd00:1122:3344:101::29] (unchanged) - 72c5a909-077d-4ec1-a9d5-ae64ef9d716e in service crucible [underlay IP fd00:1122:3344:101::26] (unchanged) - 95482c25-1e7f-43e8-adf1-e3548a1b3ae0 in service crucible [underlay IP fd00:1122:3344:101::23] (unchanged) - a1c03689-fc62-4ea5-bb72-4d01f5138614 in service crucible [underlay IP fd00:1122:3344:101::2a] (unchanged) - a568e92e-4fbd-4b69-acd8-f16277073031 in service crucible [underlay IP fd00:1122:3344:101::2c] (unchanged) - bf79a56a-97af-4cc4-94a5-8b20d64c2cda in service crucible [underlay IP fd00:1122:3344:101::28] (unchanged) - c60379ba-4e30-4628-a79a-0ae509aef4c5 in service crucible [underlay IP fd00:1122:3344:101::25] (unchanged) - d47f4996-fac0-4657-bcea-01b1fee6404d in service crucible [underlay IP fd00:1122:3344:101::2b] (unchanged) - f1a7b9a7-fc6a-4b23-b829-045ff33117ff in service internal_ntp [underlay IP fd00:1122:3344:101::21] (unchanged) -+ 6f86d5cb-17d7-424b-9d4c-39f670532cbe in service nexus [underlay IP fd00:1122:3344:101::2e] (added) -+ 87c299eb-470e-4b6d-b8c7-6759694e66b6 in service nexus [underlay IP fd00:1122:3344:101::30] (added) -+ c72b7930-0580-4f00-93b9-8cba2c8d344e in service nexus [underlay IP fd00:1122:3344:101::2d] (added) -+ d0095508-bdb8-4faf-b091-964276a20b15 in service nexus [underlay IP fd00:1122:3344:101::31] (added) -+ ff422442-4b31-4ade-a11a-9e5a25f0404c in service nexus [underlay IP fd00:1122:3344:101::2f] (added) +from: blueprint 4d4e6c38-cd95-4c4e-8f45-6af4d686964b +to: blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 + + UNCHANGED SLEDS: + + sled 2d1cb4f2-cf44-40fc-b118-85036eb732a9: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 2: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 19fbc4f8-a683-4f22-8f5a-e74782b935be in service fd00:1122:3344:105::26 + crucible 4f1ce8a2-d3a5-4a38-be4c-9817de52db37 in service fd00:1122:3344:105::2c + crucible 6b53ab2e-d98c-485f-87a3-4d5df595390f in service fd00:1122:3344:105::27 + crucible 93b137a1-a1d6-4b5b-b2cb-21a9f11e2883 in service fd00:1122:3344:105::23 + crucible 9f0abbad-dbd3-4d43-9675-78092217ffd9 in service fd00:1122:3344:105::25 + crucible b0c63f48-01ea-4aae-bb26-fb0dd59d1662 in service fd00:1122:3344:105::28 + crucible c406da50-34b9-4bb4-a460-8f49875d2a6a in service fd00:1122:3344:105::24 + crucible d660d7ed-28c0-45ae-9ace-dc3ecf7e8786 in service fd00:1122:3344:105::2a + crucible e98cc0de-abf6-4da4-a20d-d05c7a9bb1d7 in service fd00:1122:3344:105::2b + crucible f55e6aaf-e8fc-4913-9e3c-8cd1bd4bdad3 in service fd00:1122:3344:105::29 + internal_ntp 7f4e9f9f-08f8-4d14-885d-e977c05525ad in service fd00:1122:3344:105::21 + nexus 6dff7633-66bb-4924-a6ff-2c896e66964b in service fd00:1122:3344:105::22 + + + MODIFIED SLEDS: + + sled 48d95fef-bc9f-4f50-9a53-1e075836291d: + + physical disks from generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- +- fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 +- fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 +- fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 +- fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 +- fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 +- fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 +- fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 +- fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c +- fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b +- fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones generation 2 -> 3: + ------------------------------------------------------------------------------------------- + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------- +* crucible 094f27af-1acb-4d1e-ba97-1fc1377d4bf2 - in service fd00:1122:3344:103::2c + └─ + expunged +* crucible 0dcfdfc5-481e-4153-b97c-11cf02b648ea - in service fd00:1122:3344:103::25 + └─ + expunged +* crucible 2f5e8010-a94d-43a4-9c5c-3f52832f5f7f - in service fd00:1122:3344:103::27 + └─ + expunged +* crucible 4a9a0a9d-87f0-4f1d-9181-27f6b435e637 - in service fd00:1122:3344:103::28 + └─ + expunged +* crucible 56ac1706-9e2a-49ba-bd6f-a99c44cb2ccb - in service fd00:1122:3344:103::24 + └─ + expunged +* crucible 67622d61-2df4-414d-aa0e-d1277265f405 - in service fd00:1122:3344:103::23 + └─ + expunged +* crucible b91b271d-8d80-4f49-99a0-34006ae86063 - in service fd00:1122:3344:103::2a + └─ + expunged +* crucible d6ee1338-3127-43ec-9aaa-b973ccf05496 - in service fd00:1122:3344:103::26 + └─ + expunged +* crucible e39d7c9e-182b-48af-af87-58079d723583 - in service fd00:1122:3344:103::29 + └─ + expunged +* crucible f69f92a1-5007-4bb0-a85b-604dc217154b - in service fd00:1122:3344:103::2b + └─ + expunged +* internal_ntp 67d913e0-0005-4599-9b28-0abbf6cc2916 - in service fd00:1122:3344:103::21 + └─ + expunged +* nexus 2aa0ea4f-3561-4989-a98c-9ab7d9a240fb - in service fd00:1122:3344:103::22 + └─ + expunged + + + sled 68d24ac5-f341-49ea-a92a-0381b52ab387: + + physical disks from generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- +- fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 +- fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 +- fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 +- fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 +- fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 +- fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 +- fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 +- fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c +- fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b +- fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 2: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 3b3c14b6-a8e2-4054-a577-8d96cb576230 expunged fd00:1122:3344:102::2c + crucible 47a87c6e-ef45-4d52-9a3e-69cdd96737cc expunged fd00:1122:3344:102::23 + crucible 6464d025-4652-4948-919e-740bec5699b1 expunged fd00:1122:3344:102::24 + crucible 6939ce48-b17c-4616-b176-8a419a7697be expunged fd00:1122:3344:102::29 + crucible 878dfddd-3113-4197-a3ea-e0d4dbe9b476 expunged fd00:1122:3344:102::25 + crucible 8d4d2b28-82bb-4e36-80da-1408d8c35d82 expunged fd00:1122:3344:102::2b + crucible 9fd52961-426f-4e62-a644-b70871103fca expunged fd00:1122:3344:102::26 + crucible b44cdbc0-0ce0-46eb-8b21-a09e113aa1d0 expunged fd00:1122:3344:102::27 + crucible b6b759d0-f60d-42b7-bbbc-9d61c9e895a9 expunged fd00:1122:3344:102::28 + crucible c407795c-6c8b-428e-8ab8-b962913c447f expunged fd00:1122:3344:102::2a + internal_ntp f3f2e4f3-0985-4ef6-8336-ce479382d05d expunged fd00:1122:3344:102::21 + nexus 01d58626-e1b0-480f-96be-ac784863c7dc expunged fd00:1122:3344:102::22 + + + sled 75bc286f-2b4b-482c-9431-59272af529da: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones generation 2 -> 3: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 15bb9def-69b8-4d2e-b04f-9fee1143387c in service fd00:1122:3344:104::25 + crucible 23a8fa2b-ef3e-4017-a43f-f7a83953bd7c in service fd00:1122:3344:104::2c + crucible 621509d6-3772-4009-aca1-35eefd1098fb in service fd00:1122:3344:104::28 + crucible 85b8c68a-160d-461d-94dd-1baf175fa75c in service fd00:1122:3344:104::2a + crucible 996d7570-b0df-46d5-aaa4-0c97697cf484 in service fd00:1122:3344:104::26 + crucible a732c489-d29a-4f75-b900-5966385943af in service fd00:1122:3344:104::29 + crucible b1783e95-9598-451d-b6ba-c50b52b428c3 in service fd00:1122:3344:104::24 + crucible c6dd531e-2d1d-423b-acc8-358533dab78c in service fd00:1122:3344:104::27 + crucible e4b3e159-3dbe-48cb-8497-e3da92a90e5a in service fd00:1122:3344:104::23 + crucible f0ff59e8-4105-4980-a4bb-a1f4c58de1e3 in service fd00:1122:3344:104::2b + internal_ntp 57b96d5c-b71e-43e4-8869-7d514003d00d in service fd00:1122:3344:104::21 + nexus b4947d31-f70e-4ee0-8817-0ca6cea9b16b in service fd00:1122:3344:104::22 ++ nexus 2ec75441-3d7d-4b4b-9614-af03de5a3666 in service fd00:1122:3344:104::2d ++ nexus 508abd03-cbfe-4654-9a6d-7f15a1ad32e5 in service fd00:1122:3344:104::2e ++ nexus 59950bc8-1497-44dd-8cbf-b6502ba921b2 in service fd00:1122:3344:104::2f + + + sled affab35f-600a-4109-8ea0-34a067a4e0bc: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones generation 2 -> 3: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 0dfbf374-9ef9-430f-b06d-f271bf7f84c4 in service fd00:1122:3344:101::27 + crucible 3aa07966-5899-4789-ace5-f8eeb375c6c3 in service fd00:1122:3344:101::24 + crucible 4ad0e9da-08f8-4d40-b4d3-d17e711b5bbf in service fd00:1122:3344:101::29 + crucible 72c5a909-077d-4ec1-a9d5-ae64ef9d716e in service fd00:1122:3344:101::26 + crucible 95482c25-1e7f-43e8-adf1-e3548a1b3ae0 in service fd00:1122:3344:101::23 + crucible a1c03689-fc62-4ea5-bb72-4d01f5138614 in service fd00:1122:3344:101::2a + crucible a568e92e-4fbd-4b69-acd8-f16277073031 in service fd00:1122:3344:101::2c + crucible bf79a56a-97af-4cc4-94a5-8b20d64c2cda in service fd00:1122:3344:101::28 + crucible c60379ba-4e30-4628-a79a-0ae509aef4c5 in service fd00:1122:3344:101::25 + crucible d47f4996-fac0-4657-bcea-01b1fee6404d in service fd00:1122:3344:101::2b + internal_ntp f1a7b9a7-fc6a-4b23-b829-045ff33117ff in service fd00:1122:3344:101::21 + nexus 15c103f0-ac63-423b-ba5d-1b5fcd563ba3 in service fd00:1122:3344:101::22 ++ nexus 3ca5292f-8a59-4475-bb72-0f43714d0fff in service fd00:1122:3344:101::2e ++ nexus 99f6d544-8599-4e2b-a55a-82d9e0034662 in service fd00:1122:3344:101::2d ++ nexus c26b3bda-5561-44a1-a69f-22103fe209a1 in service fd00:1122:3344:101::2f + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version: 1 (unchanged) + external DNS version: 1 (unchanged) + diff --git a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt new file mode 100644 index 0000000000..262bd14811 --- /dev/null +++ b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt @@ -0,0 +1,225 @@ +from: blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 +to: blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 + + UNCHANGED SLEDS: + + sled 75bc286f-2b4b-482c-9431-59272af529da: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 3: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 15bb9def-69b8-4d2e-b04f-9fee1143387c in service fd00:1122:3344:104::25 + crucible 23a8fa2b-ef3e-4017-a43f-f7a83953bd7c in service fd00:1122:3344:104::2c + crucible 621509d6-3772-4009-aca1-35eefd1098fb in service fd00:1122:3344:104::28 + crucible 85b8c68a-160d-461d-94dd-1baf175fa75c in service fd00:1122:3344:104::2a + crucible 996d7570-b0df-46d5-aaa4-0c97697cf484 in service fd00:1122:3344:104::26 + crucible a732c489-d29a-4f75-b900-5966385943af in service fd00:1122:3344:104::29 + crucible b1783e95-9598-451d-b6ba-c50b52b428c3 in service fd00:1122:3344:104::24 + crucible c6dd531e-2d1d-423b-acc8-358533dab78c in service fd00:1122:3344:104::27 + crucible e4b3e159-3dbe-48cb-8497-e3da92a90e5a in service fd00:1122:3344:104::23 + crucible f0ff59e8-4105-4980-a4bb-a1f4c58de1e3 in service fd00:1122:3344:104::2b + internal_ntp 57b96d5c-b71e-43e4-8869-7d514003d00d in service fd00:1122:3344:104::21 + nexus 2ec75441-3d7d-4b4b-9614-af03de5a3666 in service fd00:1122:3344:104::2d + nexus 508abd03-cbfe-4654-9a6d-7f15a1ad32e5 in service fd00:1122:3344:104::2e + nexus 59950bc8-1497-44dd-8cbf-b6502ba921b2 in service fd00:1122:3344:104::2f + nexus b4947d31-f70e-4ee0-8817-0ca6cea9b16b in service fd00:1122:3344:104::22 + + + sled affab35f-600a-4109-8ea0-34a067a4e0bc: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 3: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 0dfbf374-9ef9-430f-b06d-f271bf7f84c4 in service fd00:1122:3344:101::27 + crucible 3aa07966-5899-4789-ace5-f8eeb375c6c3 in service fd00:1122:3344:101::24 + crucible 4ad0e9da-08f8-4d40-b4d3-d17e711b5bbf in service fd00:1122:3344:101::29 + crucible 72c5a909-077d-4ec1-a9d5-ae64ef9d716e in service fd00:1122:3344:101::26 + crucible 95482c25-1e7f-43e8-adf1-e3548a1b3ae0 in service fd00:1122:3344:101::23 + crucible a1c03689-fc62-4ea5-bb72-4d01f5138614 in service fd00:1122:3344:101::2a + crucible a568e92e-4fbd-4b69-acd8-f16277073031 in service fd00:1122:3344:101::2c + crucible bf79a56a-97af-4cc4-94a5-8b20d64c2cda in service fd00:1122:3344:101::28 + crucible c60379ba-4e30-4628-a79a-0ae509aef4c5 in service fd00:1122:3344:101::25 + crucible d47f4996-fac0-4657-bcea-01b1fee6404d in service fd00:1122:3344:101::2b + internal_ntp f1a7b9a7-fc6a-4b23-b829-045ff33117ff in service fd00:1122:3344:101::21 + nexus 15c103f0-ac63-423b-ba5d-1b5fcd563ba3 in service fd00:1122:3344:101::22 + nexus 3ca5292f-8a59-4475-bb72-0f43714d0fff in service fd00:1122:3344:101::2e + nexus 99f6d544-8599-4e2b-a55a-82d9e0034662 in service fd00:1122:3344:101::2d + nexus c26b3bda-5561-44a1-a69f-22103fe209a1 in service fd00:1122:3344:101::2f + + + REMOVED SLEDS: + + sled 68d24ac5-f341-49ea-a92a-0381b52ab387: + + omicron zones from generation 2: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ +- crucible 3b3c14b6-a8e2-4054-a577-8d96cb576230 expunged fd00:1122:3344:102::2c +- crucible 47a87c6e-ef45-4d52-9a3e-69cdd96737cc expunged fd00:1122:3344:102::23 +- crucible 6464d025-4652-4948-919e-740bec5699b1 expunged fd00:1122:3344:102::24 +- crucible 6939ce48-b17c-4616-b176-8a419a7697be expunged fd00:1122:3344:102::29 +- crucible 878dfddd-3113-4197-a3ea-e0d4dbe9b476 expunged fd00:1122:3344:102::25 +- crucible 8d4d2b28-82bb-4e36-80da-1408d8c35d82 expunged fd00:1122:3344:102::2b +- crucible 9fd52961-426f-4e62-a644-b70871103fca expunged fd00:1122:3344:102::26 +- crucible b44cdbc0-0ce0-46eb-8b21-a09e113aa1d0 expunged fd00:1122:3344:102::27 +- crucible b6b759d0-f60d-42b7-bbbc-9d61c9e895a9 expunged fd00:1122:3344:102::28 +- crucible c407795c-6c8b-428e-8ab8-b962913c447f expunged fd00:1122:3344:102::2a +- internal_ntp f3f2e4f3-0985-4ef6-8336-ce479382d05d expunged fd00:1122:3344:102::21 +- nexus 01d58626-e1b0-480f-96be-ac784863c7dc expunged fd00:1122:3344:102::22 + + + MODIFIED SLEDS: + + sled 2d1cb4f2-cf44-40fc-b118-85036eb732a9: + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 2: + ---------------------------------------------------------------------------------------- + zone type zone id disposition underlay IP + ---------------------------------------------------------------------------------------- + crucible 6b53ab2e-d98c-485f-87a3-4d5df595390f in service fd00:1122:3344:105::27 + crucible 93b137a1-a1d6-4b5b-b2cb-21a9f11e2883 in service fd00:1122:3344:105::23 + crucible 9f0abbad-dbd3-4d43-9675-78092217ffd9 in service fd00:1122:3344:105::25 + crucible b0c63f48-01ea-4aae-bb26-fb0dd59d1662 in service fd00:1122:3344:105::28 + crucible c406da50-34b9-4bb4-a460-8f49875d2a6a in service fd00:1122:3344:105::24 + crucible d660d7ed-28c0-45ae-9ace-dc3ecf7e8786 in service fd00:1122:3344:105::2a + crucible e98cc0de-abf6-4da4-a20d-d05c7a9bb1d7 in service fd00:1122:3344:105::2b + crucible f55e6aaf-e8fc-4913-9e3c-8cd1bd4bdad3 in service fd00:1122:3344:105::29 +- crucible 4f1ce8a2-d3a5-4a38-be4c-9817de52db37 in service fd00:1122:3344:105::2c +* crucible 19fbc4f8-a683-4f22-8f5a-e74782b935be - in service fd00:1122:3344:105::26 + └─ + quiesced + + + sled 48d95fef-bc9f-4f50-9a53-1e075836291d: + + omicron zones generation 3 -> 4: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ +- crucible 094f27af-1acb-4d1e-ba97-1fc1377d4bf2 expunged fd00:1122:3344:103::2c +- crucible 0dcfdfc5-481e-4153-b97c-11cf02b648ea expunged fd00:1122:3344:103::25 +- crucible 2f5e8010-a94d-43a4-9c5c-3f52832f5f7f expunged fd00:1122:3344:103::27 +- crucible 4a9a0a9d-87f0-4f1d-9181-27f6b435e637 expunged fd00:1122:3344:103::28 +- crucible 56ac1706-9e2a-49ba-bd6f-a99c44cb2ccb expunged fd00:1122:3344:103::24 +- crucible 67622d61-2df4-414d-aa0e-d1277265f405 expunged fd00:1122:3344:103::23 +- crucible b91b271d-8d80-4f49-99a0-34006ae86063 expunged fd00:1122:3344:103::2a +- crucible d6ee1338-3127-43ec-9aaa-b973ccf05496 expunged fd00:1122:3344:103::26 +- crucible e39d7c9e-182b-48af-af87-58079d723583 expunged fd00:1122:3344:103::29 +- crucible f69f92a1-5007-4bb0-a85b-604dc217154b expunged fd00:1122:3344:103::2b +- internal_ntp 67d913e0-0005-4599-9b28-0abbf6cc2916 expunged fd00:1122:3344:103::21 +- nexus 2aa0ea4f-3561-4989-a98c-9ab7d9a240fb expunged fd00:1122:3344:103::22 + + +ERRORS: + + sled 2d1cb4f2-cf44-40fc-b118-85036eb732a9 + + zone diff errors: before gen 2, after gen 2 + + zone id: 6dff7633-66bb-4924-a6ff-2c896e66964b + reason: mismatched zone type: after: Nexus( + Nexus { + internal_address: [fd01:1122:3344:105::22]:12221, + external_ip: OmicronZoneExternalFloatingIp { + id: cd63774a-2e2f-49ce-a3df-33e3b5d02650 (external_ip), + ip: 192.0.2.2, + }, + nic: NetworkInterface { + id: 99402426-92dd-4975-9347-907e130d6b79, + kind: Service { + id: 6dff7633-66bb-4924-a6ff-2c896e66964b, + }, + name: Name( + "nexus-6dff7633-66bb-4924-a6ff-2c896e66964b", + ), + ip: 172.30.2.5, + mac: MacAddr( + MacAddr6( + [ + 168, + 64, + 37, + 255, + 128, + 0, + ], + ), + ), + subnet: V4( + Ipv4Net { + addr: 172.30.2.0, + width: 24, + }, + ), + vni: Vni( + 100, + ), + primary: true, + slot: 0, + }, + external_tls: false, + external_dns_servers: [], + }, +) + + zone id: 7f4e9f9f-08f8-4d14-885d-e977c05525ad + reason: mismatched underlay address: before: fd00:1122:3344:105::21, after: fd01:1122:3344:105::21 + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version: 1 (unchanged) +* external DNS version: 1 -> 2 + diff --git a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt new file mode 100644 index 0000000000..f7c0886dde --- /dev/null +++ b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt @@ -0,0 +1,174 @@ +blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 +parent: 4d4e6c38-cd95-4c4e-8f45-6af4d686964b + + sled: 2d1cb4f2-cf44-40fc-b118-85036eb732a9 + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 2: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 19fbc4f8-a683-4f22-8f5a-e74782b935be in service fd00:1122:3344:105::26 + crucible 4f1ce8a2-d3a5-4a38-be4c-9817de52db37 in service fd00:1122:3344:105::2c + crucible 6b53ab2e-d98c-485f-87a3-4d5df595390f in service fd00:1122:3344:105::27 + crucible 93b137a1-a1d6-4b5b-b2cb-21a9f11e2883 in service fd00:1122:3344:105::23 + crucible 9f0abbad-dbd3-4d43-9675-78092217ffd9 in service fd00:1122:3344:105::25 + crucible b0c63f48-01ea-4aae-bb26-fb0dd59d1662 in service fd00:1122:3344:105::28 + crucible c406da50-34b9-4bb4-a460-8f49875d2a6a in service fd00:1122:3344:105::24 + crucible d660d7ed-28c0-45ae-9ace-dc3ecf7e8786 in service fd00:1122:3344:105::2a + crucible e98cc0de-abf6-4da4-a20d-d05c7a9bb1d7 in service fd00:1122:3344:105::2b + crucible f55e6aaf-e8fc-4913-9e3c-8cd1bd4bdad3 in service fd00:1122:3344:105::29 + internal_ntp 7f4e9f9f-08f8-4d14-885d-e977c05525ad in service fd00:1122:3344:105::21 + nexus 6dff7633-66bb-4924-a6ff-2c896e66964b in service fd00:1122:3344:105::22 + + + + sled: 75bc286f-2b4b-482c-9431-59272af529da + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 3: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 15bb9def-69b8-4d2e-b04f-9fee1143387c in service fd00:1122:3344:104::25 + crucible 23a8fa2b-ef3e-4017-a43f-f7a83953bd7c in service fd00:1122:3344:104::2c + crucible 621509d6-3772-4009-aca1-35eefd1098fb in service fd00:1122:3344:104::28 + crucible 85b8c68a-160d-461d-94dd-1baf175fa75c in service fd00:1122:3344:104::2a + crucible 996d7570-b0df-46d5-aaa4-0c97697cf484 in service fd00:1122:3344:104::26 + crucible a732c489-d29a-4f75-b900-5966385943af in service fd00:1122:3344:104::29 + crucible b1783e95-9598-451d-b6ba-c50b52b428c3 in service fd00:1122:3344:104::24 + crucible c6dd531e-2d1d-423b-acc8-358533dab78c in service fd00:1122:3344:104::27 + crucible e4b3e159-3dbe-48cb-8497-e3da92a90e5a in service fd00:1122:3344:104::23 + crucible f0ff59e8-4105-4980-a4bb-a1f4c58de1e3 in service fd00:1122:3344:104::2b + internal_ntp 57b96d5c-b71e-43e4-8869-7d514003d00d in service fd00:1122:3344:104::21 + nexus 2ec75441-3d7d-4b4b-9614-af03de5a3666 in service fd00:1122:3344:104::2d + nexus 508abd03-cbfe-4654-9a6d-7f15a1ad32e5 in service fd00:1122:3344:104::2e + nexus 59950bc8-1497-44dd-8cbf-b6502ba921b2 in service fd00:1122:3344:104::2f + nexus b4947d31-f70e-4ee0-8817-0ca6cea9b16b in service fd00:1122:3344:104::22 + + + + sled: affab35f-600a-4109-8ea0-34a067a4e0bc + + physical disks at generation 1: + ---------------------------------------------------------------------- + vendor model serial + ---------------------------------------------------------------------- + fake-vendor fake-model serial-088f76ef-e985-41fd-8630-c321ed8cca37 + fake-vendor fake-model serial-30d0e693-dec4-402f-baa0-d6d9a93c98a7 + fake-vendor fake-model serial-32e90a17-7080-4c33-a94d-05f4bfb5d368 + fake-vendor fake-model serial-44473266-e28a-43fa-9314-c3416b8b3c14 + fake-vendor fake-model serial-53372ece-d666-4f5b-8f25-286e36242088 + fake-vendor fake-model serial-795061c9-db7b-404a-a2a3-0dad5fdfceb1 + fake-vendor fake-model serial-7b8bc126-4ff8-434f-a949-e98eda2709a5 + fake-vendor fake-model serial-b644318e-da11-46e1-b650-47a067e6024c + fake-vendor fake-model serial-bb2b397b-a3f5-4142-a433-4f2ab5fe284b + fake-vendor fake-model serial-bdbf1352-725d-4b17-98d5-4d7105726721 + + + omicron zones at generation 3: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 0dfbf374-9ef9-430f-b06d-f271bf7f84c4 in service fd00:1122:3344:101::27 + crucible 3aa07966-5899-4789-ace5-f8eeb375c6c3 in service fd00:1122:3344:101::24 + crucible 4ad0e9da-08f8-4d40-b4d3-d17e711b5bbf in service fd00:1122:3344:101::29 + crucible 72c5a909-077d-4ec1-a9d5-ae64ef9d716e in service fd00:1122:3344:101::26 + crucible 95482c25-1e7f-43e8-adf1-e3548a1b3ae0 in service fd00:1122:3344:101::23 + crucible a1c03689-fc62-4ea5-bb72-4d01f5138614 in service fd00:1122:3344:101::2a + crucible a568e92e-4fbd-4b69-acd8-f16277073031 in service fd00:1122:3344:101::2c + crucible bf79a56a-97af-4cc4-94a5-8b20d64c2cda in service fd00:1122:3344:101::28 + crucible c60379ba-4e30-4628-a79a-0ae509aef4c5 in service fd00:1122:3344:101::25 + crucible d47f4996-fac0-4657-bcea-01b1fee6404d in service fd00:1122:3344:101::2b + internal_ntp f1a7b9a7-fc6a-4b23-b829-045ff33117ff in service fd00:1122:3344:101::21 + nexus 15c103f0-ac63-423b-ba5d-1b5fcd563ba3 in service fd00:1122:3344:101::22 + nexus 3ca5292f-8a59-4475-bb72-0f43714d0fff in service fd00:1122:3344:101::2e + nexus 99f6d544-8599-4e2b-a55a-82d9e0034662 in service fd00:1122:3344:101::2d + nexus c26b3bda-5561-44a1-a69f-22103fe209a1 in service fd00:1122:3344:101::2f + + + +!48d95fef-bc9f-4f50-9a53-1e075836291d +WARNING: Zones exist without physical disks! + omicron zones at generation 3: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 094f27af-1acb-4d1e-ba97-1fc1377d4bf2 expunged fd00:1122:3344:103::2c + crucible 0dcfdfc5-481e-4153-b97c-11cf02b648ea expunged fd00:1122:3344:103::25 + crucible 2f5e8010-a94d-43a4-9c5c-3f52832f5f7f expunged fd00:1122:3344:103::27 + crucible 4a9a0a9d-87f0-4f1d-9181-27f6b435e637 expunged fd00:1122:3344:103::28 + crucible 56ac1706-9e2a-49ba-bd6f-a99c44cb2ccb expunged fd00:1122:3344:103::24 + crucible 67622d61-2df4-414d-aa0e-d1277265f405 expunged fd00:1122:3344:103::23 + crucible b91b271d-8d80-4f49-99a0-34006ae86063 expunged fd00:1122:3344:103::2a + crucible d6ee1338-3127-43ec-9aaa-b973ccf05496 expunged fd00:1122:3344:103::26 + crucible e39d7c9e-182b-48af-af87-58079d723583 expunged fd00:1122:3344:103::29 + crucible f69f92a1-5007-4bb0-a85b-604dc217154b expunged fd00:1122:3344:103::2b + internal_ntp 67d913e0-0005-4599-9b28-0abbf6cc2916 expunged fd00:1122:3344:103::21 + nexus 2aa0ea4f-3561-4989-a98c-9ab7d9a240fb expunged fd00:1122:3344:103::22 + + + + +!68d24ac5-f341-49ea-a92a-0381b52ab387 +WARNING: Zones exist without physical disks! + omicron zones at generation 2: + ------------------------------------------------------------------------------------------ + zone type zone id disposition underlay IP + ------------------------------------------------------------------------------------------ + crucible 3b3c14b6-a8e2-4054-a577-8d96cb576230 expunged fd00:1122:3344:102::2c + crucible 47a87c6e-ef45-4d52-9a3e-69cdd96737cc expunged fd00:1122:3344:102::23 + crucible 6464d025-4652-4948-919e-740bec5699b1 expunged fd00:1122:3344:102::24 + crucible 6939ce48-b17c-4616-b176-8a419a7697be expunged fd00:1122:3344:102::29 + crucible 878dfddd-3113-4197-a3ea-e0d4dbe9b476 expunged fd00:1122:3344:102::25 + crucible 8d4d2b28-82bb-4e36-80da-1408d8c35d82 expunged fd00:1122:3344:102::2b + crucible 9fd52961-426f-4e62-a644-b70871103fca expunged fd00:1122:3344:102::26 + crucible b44cdbc0-0ce0-46eb-8b21-a09e113aa1d0 expunged fd00:1122:3344:102::27 + crucible b6b759d0-f60d-42b7-bbbc-9d61c9e895a9 expunged fd00:1122:3344:102::28 + crucible c407795c-6c8b-428e-8ab8-b962913c447f expunged fd00:1122:3344:102::2a + internal_ntp f3f2e4f3-0985-4ef6-8336-ce479382d05d expunged fd00:1122:3344:102::21 + nexus 01d58626-e1b0-480f-96be-ac784863c7dc expunged fd00:1122:3344:102::22 + + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) + cluster.preserve_downgrade_option: (do not modify) + + METADATA: + created by::::::::::: test_blueprint2 + created at::::::::::: 1970-01-01T00:00:00.000Z + comment:::::::::::::: sled 48d95fef-bc9f-4f50-9a53-1e075836291d (sled policy is expunged): 12 zones expunged + internal DNS version: 1 + external DNS version: 1 + diff --git a/nexus/reconfigurator/preparation/Cargo.toml b/nexus/reconfigurator/preparation/Cargo.toml index f95f9c4afe..d7a6d07a5d 100644 --- a/nexus/reconfigurator/preparation/Cargo.toml +++ b/nexus/reconfigurator/preparation/Cargo.toml @@ -3,10 +3,18 @@ name = "nexus-reconfigurator-preparation" version = "0.1.0" edition = "2021" +[lints] +workspace = true + [dependencies] -illumos-utils.workspace = true +anyhow.workspace = true +futures.workspace = true nexus-db-model.workspace = true +nexus-db-queries.workspace = true nexus-types.workspace = true omicron-common.workspace = true +omicron-uuid-kinds.workspace = true +slog.workspace = true +slog-error-chain.workspace = true omicron-workspace-hack.workspace = true diff --git a/nexus/reconfigurator/preparation/src/lib.rs b/nexus/reconfigurator/preparation/src/lib.rs index 77d4532023..24e9afddf8 100644 --- a/nexus/reconfigurator/preparation/src/lib.rs +++ b/nexus/reconfigurator/preparation/src/lib.rs @@ -4,71 +4,354 @@ //! Common facilities for assembling inputs to the planner +use anyhow::Context; +use futures::StreamExt; +use nexus_db_model::DnsGroup; +use nexus_db_queries::context::OpContext; +use nexus_db_queries::db::datastore::DataStoreDnsTest; +use nexus_db_queries::db::datastore::DataStoreInventoryTest; +use nexus_db_queries::db::datastore::Discoverability; +use nexus_db_queries::db::datastore::SQL_BATCH_SIZE; +use nexus_db_queries::db::pagination::Paginator; +use nexus_db_queries::db::DataStore; +use nexus_types::deployment::Blueprint; +use nexus_types::deployment::BlueprintMetadata; +use nexus_types::deployment::CockroachDbClusterVersion; +use nexus_types::deployment::CockroachDbSettings; +use nexus_types::deployment::OmicronZoneExternalIp; +use nexus_types::deployment::OmicronZoneNic; +use nexus_types::deployment::PlanningInput; +use nexus_types::deployment::PlanningInputBuilder; use nexus_types::deployment::Policy; +use nexus_types::deployment::SledDetails; +use nexus_types::deployment::SledDisk; +use nexus_types::deployment::SledFilter; use nexus_types::deployment::SledResources; -use nexus_types::deployment::ZpoolName; +use nexus_types::deployment::UnstableReconfiguratorState; use nexus_types::identity::Asset; +use nexus_types::identity::Resource; +use nexus_types::inventory::Collection; use omicron_common::address::IpRange; use omicron_common::address::Ipv6Subnet; +use omicron_common::address::NEXUS_REDUNDANCY; use omicron_common::address::SLED_PREFIX; use omicron_common::api::external::Error; +use omicron_common::api::external::LookupType; +use omicron_common::disk::DiskIdentity; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::OmicronZoneUuid; +use omicron_uuid_kinds::PhysicalDiskUuid; +use omicron_uuid_kinds::SledUuid; +use omicron_uuid_kinds::ZpoolUuid; +use slog::error; +use slog::Logger; +use slog_error_chain::InlineErrorChain; use std::collections::BTreeMap; use std::collections::BTreeSet; -use std::str::FromStr; /// Given various pieces of database state that go into the blueprint planning -/// process, produce a `Policy` object encapsulating what the planner needs to -/// generate a blueprint -pub fn policy_from_db( - sled_rows: &[nexus_db_model::Sled], - zpool_rows: &[nexus_db_model::Zpool], - ip_pool_range_rows: &[nexus_db_model::IpPoolRange], - target_nexus_zone_count: usize, -) -> Result { - let mut zpools_by_sled_id = { - let mut zpools = BTreeMap::new(); - for z in zpool_rows { - let sled_zpool_names = - zpools.entry(z.sled_id).or_insert_with(BTreeSet::new); - // It's unfortunate that Nexus knows how Sled Agent - // constructs zpool names, but there's not currently an - // alternative. - let zpool_name_generated = - illumos_utils::zpool::ZpoolName::new_external(z.id()) - .to_string(); - let zpool_name = ZpoolName::from_str(&zpool_name_generated) - .map_err(|e| { - Error::internal_error(&format!( - "unexpectedly failed to parse generated \ - zpool name: {}: {}", - zpool_name_generated, e - )) - })?; - sled_zpool_names.insert(zpool_name); - } - zpools - }; +/// process, produce a `PlanningInput` object encapsulating what the planner +/// needs to generate a blueprint +pub struct PlanningInputFromDb<'a> { + pub sled_rows: &'a [nexus_db_model::Sled], + pub zpool_rows: + &'a [(nexus_db_model::Zpool, nexus_db_model::PhysicalDisk)], + pub ip_pool_range_rows: &'a [nexus_db_model::IpPoolRange], + pub external_ip_rows: &'a [nexus_db_model::ExternalIp], + pub service_nic_rows: &'a [nexus_db_model::ServiceNetworkInterface], + pub target_nexus_zone_count: usize, + pub target_cockroachdb_cluster_version: CockroachDbClusterVersion, + pub internal_dns_version: nexus_db_model::Generation, + pub external_dns_version: nexus_db_model::Generation, + pub cockroachdb_settings: &'a CockroachDbSettings, + pub log: &'a Logger, +} - let sleds = sled_rows - .into_iter() - .map(|sled_row| { +impl PlanningInputFromDb<'_> { + pub fn build(&self) -> Result { + let service_ip_pool_ranges = + self.ip_pool_range_rows.iter().map(IpRange::from).collect(); + let policy = Policy { + service_ip_pool_ranges, + target_nexus_zone_count: self.target_nexus_zone_count, + target_cockroachdb_cluster_version: self + .target_cockroachdb_cluster_version, + }; + let mut builder = PlanningInputBuilder::new( + policy, + self.internal_dns_version.into(), + self.external_dns_version.into(), + self.cockroachdb_settings.clone(), + ); + + let mut zpools_by_sled_id = { + let mut zpools = BTreeMap::new(); + for (zpool, disk) in self.zpool_rows { + let sled_zpool_names = + zpools.entry(zpool.sled_id).or_insert_with(BTreeMap::new); + let zpool_id = ZpoolUuid::from_untyped_uuid(zpool.id()); + let disk = SledDisk { + disk_identity: DiskIdentity { + vendor: disk.vendor.clone(), + serial: disk.serial.clone(), + model: disk.model.clone(), + }, + disk_id: PhysicalDiskUuid::from_untyped_uuid(disk.id()), + policy: disk.disk_policy.into(), + state: disk.disk_state.into(), + }; + + sled_zpool_names.insert(zpool_id, disk); + } + zpools + }; + + for sled_row in self.sled_rows { let sled_id = sled_row.id(); let subnet = Ipv6Subnet::::new(sled_row.ip()); let zpools = zpools_by_sled_id .remove(&sled_id) - .unwrap_or_else(BTreeSet::new); - let sled_info = SledResources { + .unwrap_or_else(BTreeMap::new); + let sled_details = SledDetails { policy: sled_row.policy(), state: sled_row.state().into(), - subnet, - zpools, + resources: SledResources { subnet, zpools }, + }; + // TODO-cleanup use `TypedUuid` everywhere + let sled_id = SledUuid::from_untyped_uuid(sled_id); + builder.add_sled(sled_id, sled_details).map_err(|e| { + Error::internal_error(&format!( + "unexpectedly failed to add sled to planning input: {e}" + )) + })?; + } + + for external_ip_row in + self.external_ip_rows.iter().filter(|r| r.is_service) + { + let Some(zone_id) = external_ip_row.parent_id else { + error!( + self.log, + "internal database consistency error: service external IP \ + is missing parent_id (should be the Omicron zone ID)"; + "ip_row" => ?external_ip_row, + ); + continue; }; - (sled_id, sled_info) + + let zone_id = OmicronZoneUuid::from_untyped_uuid(zone_id); + + let external_ip = OmicronZoneExternalIp::try_from(external_ip_row) + .map_err(|e| { + Error::internal_error(&format!( + "invalid database IP record for \ + Omicron zone {zone_id}: {}", + InlineErrorChain::new(&e) + )) + })?; + + builder + .add_omicron_zone_external_ip(zone_id, external_ip) + .map_err(|e| { + Error::internal_error(&format!( + "unexpectedly failed to add external IP \ + to planning input: {e}" + )) + })?; + } + + for nic_row in self.service_nic_rows { + let zone_id = + OmicronZoneUuid::from_untyped_uuid(nic_row.service_id); + let nic = OmicronZoneNic::try_from(nic_row).map_err(|e| { + Error::internal_error(&format!( + "invalid Omicron zone NIC read from database: {e}" + )) + })?; + builder.add_omicron_zone_nic(zone_id, nic).map_err(|e| { + Error::internal_error(&format!( + "unexpectedly failed to add Omicron zone NIC \ + to planning input: {e}" + )) + })?; + } + + Ok(builder.build()) + } +} + +/// Loads state for import into `reconfigurator-cli` +/// +/// This is only to be used in omdb or tests. +pub async fn reconfigurator_state_load( + opctx: &OpContext, + datastore: &DataStore, +) -> Result { + opctx.check_complex_operations_allowed()?; + let sled_rows = datastore + .sled_list_all_batched(opctx, SledFilter::Commissioned) + .await + .context("listing sleds")?; + let zpool_rows = datastore + .zpool_list_all_external_batched(opctx) + .await + .context("listing zpools")?; + let ip_pool_range_rows = { + let (authz_service_ip_pool, _) = datastore + .ip_pools_service_lookup(opctx) + .await + .context("fetching IP services pool")?; + datastore + .ip_pool_list_ranges_batched(opctx, &authz_service_ip_pool) + .await + .context("listing services IP pool ranges")? + }; + let external_ip_rows = datastore + .external_ip_list_service_all_batched(opctx) + .await + .context("fetching service external IPs")?; + let service_nic_rows = datastore + .service_network_interfaces_all_list_batched(opctx) + .await + .context("fetching service NICs")?; + let internal_dns_version = datastore + .dns_group_latest_version(opctx, DnsGroup::Internal) + .await + .context("fetching internal DNS version")? + .version; + let external_dns_version = datastore + .dns_group_latest_version(opctx, DnsGroup::External) + .await + .context("fetching external DNS version")? + .version; + let cockroachdb_settings = datastore + .cockroachdb_settings(opctx) + .await + .context("fetching cockroachdb settings")?; + + let planning_input = PlanningInputFromDb { + sled_rows: &sled_rows, + zpool_rows: &zpool_rows, + ip_pool_range_rows: &ip_pool_range_rows, + target_nexus_zone_count: NEXUS_REDUNDANCY, + target_cockroachdb_cluster_version: CockroachDbClusterVersion::POLICY, + external_ip_rows: &external_ip_rows, + service_nic_rows: &service_nic_rows, + log: &opctx.log, + internal_dns_version, + external_dns_version, + cockroachdb_settings: &cockroachdb_settings, + } + .build() + .context("assembling planning_input")?; + + let collection_ids = datastore + .inventory_collections() + .await + .context("listing collections")? + .into_iter() + .map(|c| c.id()); + let collections = futures::stream::iter(collection_ids) + .filter_map(|id| async move { + let read = datastore + .inventory_collection_read(opctx, id) + .await + .with_context(|| format!("reading collection {}", id)); + // It's not necessarily a problem if we failed to read a collection. + // They can be removed since we fetched the list. + read.ok() }) - .collect(); + .collect::>() + .await; - let service_ip_pool_ranges = - ip_pool_range_rows.iter().map(IpRange::from).collect(); + let mut blueprint_ids = Vec::new(); + let mut paginator = Paginator::new(SQL_BATCH_SIZE); + while let Some(p) = paginator.next() { + let batch = datastore + .blueprints_list(opctx, &p.current_pagparams()) + .await + .context("listing blueprints")?; + paginator = + p.found_batch(&blueprint_ids, &|b: &BlueprintMetadata| b.id); + blueprint_ids.extend(batch.into_iter()); + } - Ok(Policy { sleds, service_ip_pool_ranges, target_nexus_zone_count }) + let blueprints = futures::stream::iter(blueprint_ids) + .filter_map(|bpm| async move { + let blueprint_id = bpm.id; + let read = datastore + .blueprint_read( + opctx, + &nexus_db_queries::authz::Blueprint::new( + nexus_db_queries::authz::FLEET, + blueprint_id, + LookupType::ById(blueprint_id), + ), + ) + .await + .with_context(|| format!("reading blueprint {}", blueprint_id)); + // It's not necessarily a problem if we failed to read a blueprint. + // They can be removed since we fetched the list. + read.ok() + }) + .collect::>() + .await; + + // It's also useful to include information about any DNS generations + // mentioned in any blueprints. + let blueprints_list = &blueprints; + let fetch_dns_group = |dns_group: DnsGroup| async move { + let latest_version = datastore + .dns_group_latest_version(&opctx, dns_group) + .await + .with_context(|| { + format!("reading latest {:?} version", dns_group) + })?; + let dns_generations_needed: BTreeSet<_> = blueprints_list + .iter() + .map(|blueprint| match dns_group { + DnsGroup::Internal => blueprint.internal_dns_version, + DnsGroup::External => blueprint.external_dns_version, + }) + .chain(std::iter::once(*latest_version.version)) + .collect(); + let mut rv = BTreeMap::new(); + for gen in dns_generations_needed { + let config = datastore + .dns_config_read_version(&opctx, dns_group, gen) + .await + .with_context(|| { + format!("reading {:?} DNS version {}", dns_group, gen) + })?; + rv.insert(gen, config); + } + + Ok::, anyhow::Error>(rv) + }; + + let internal_dns = fetch_dns_group(DnsGroup::Internal).await?; + let external_dns = fetch_dns_group(DnsGroup::External).await?; + let silo_names = datastore + .silo_list_all_batched(&opctx, Discoverability::All) + .await + .context("listing all Silos")? + .into_iter() + .map(|s| s.name().clone()) + .collect(); + let external_dns_zone_names = datastore + .dns_zones_list_all(&opctx, DnsGroup::External) + .await + .context("listing external DNS zone names")? + .into_iter() + .map(|dns_zone| dns_zone.zone_name) + .collect(); + Ok(UnstableReconfiguratorState { + planning_input, + collections, + blueprints, + internal_dns, + external_dns, + silo_names, + external_dns_zone_names, + }) } diff --git a/nexus/src/app/allow_list.rs b/nexus/src/app/allow_list.rs new file mode 100644 index 0000000000..d25400a512 --- /dev/null +++ b/nexus/src/app/allow_list.rs @@ -0,0 +1,175 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +// Copyright 2024 Oxide Computer Company + +//! Nexus methods for operating on source IP allowlists. + +use nexus_db_queries::context::OpContext; +use nexus_types::external_api::params; +use nexus_types::external_api::views::AllowList; +use omicron_common::api::external; +use omicron_common::api::external::Error; +use std::net::IpAddr; + +use crate::context::ServerKind; + +impl super::Nexus { + /// Fetch the allowlist of source IPs that can reach user-facing services. + pub async fn allow_list_view( + &self, + opctx: &OpContext, + ) -> Result { + self.db_datastore + .allow_list_view(opctx) + .await + .and_then(AllowList::try_from) + } + + /// Upsert the allowlist of source IPs that can reach user-facing services. + pub async fn allow_list_upsert( + &self, + opctx: &OpContext, + remote_addr: IpAddr, + server_kind: ServerKind, + params: params::AllowListUpdate, + ) -> Result { + if let external::AllowedSourceIps::List(list) = ¶ms.allowed_ips { + // Size limits on the allowlist. + const MAX_ALLOWLIST_LENGTH: usize = 1000; + if list.len() > MAX_ALLOWLIST_LENGTH { + let message = format!( + "Source IP allowlist is limited to {} entries, found {}", + MAX_ALLOWLIST_LENGTH, + list.len(), + ); + return Err(Error::invalid_request(message)); + } + + // Some basic sanity-checks on the addresses in the allowlist. + // + // The most important part here is checking that the source address + // the request came from is on the allowlist. This is our only real + // guardrail to prevent accidentally preventing any future access to + // the rack! + // + // Note that we elide this check when handling a request proxied + // from `wicketd`. This is intentional and used as a safety + // mechanism in the even of lockout or other recovery scenarios. + let check_remote_addr = match server_kind { + ServerKind::External => true, + ServerKind::Techport | ServerKind::Internal => false, + }; + let mut contains_remote = false; + for entry in list.iter() { + contains_remote |= entry.contains(remote_addr); + if entry.addr().is_unspecified() { + return Err(Error::invalid_request( + "Source IP allowlist may not contain the \ + unspecified address. Use \"any\" to allow \ + any source to connect to user-facing services.", + )); + } + if entry.width() == 0 { + return Err(Error::invalid_request( + "Source IP allowlist entries may not have \ + a netmask of /0.", + )); + } + } + if check_remote_addr && !contains_remote { + return Err(Error::invalid_request( + "The source IP allow list would prevent access \ + from the current client! Ensure that the allowlist \ + contains an entry that continues to allow access \ + from this peer.", + )); + } + }; + + // Actually insert the new allowlist. + let list = self + .db_datastore + .allow_list_upsert(opctx, params.allowed_ips.clone()) + .await + .and_then(AllowList::try_from)?; + + // Notify the sled-agents of the updated firewall rules. + // + // Importantly, we need to use a different `opctx` from that we're + // passed in here. This call requires access to Oxide-internal data + // around our VPC, and so we must use a context that's authorized for + // that. + // + // TODO-debugging: It's unfortunate that we're using this new logger, + // since that means we lose things like the original actor and request + // ID. It would be great if we could insert additional key-value pairs + // into the logger itself here, or "merge" the two in some other way. + info!( + opctx.log, + "updated user-facing services allow list, switching to \ + internal opcontext to plumb rules to sled-agents"; + "new_allowlist" => ?params.allowed_ips, + ); + let new_opctx = self.opctx_for_internal_api(); + match nexus_networking::plumb_service_firewall_rules( + self.datastore(), + &new_opctx, + &[], + &new_opctx, + &new_opctx.log, + ) + .await + { + Ok(_) => { + info!(self.log, "plumbed updated IP allowlist to sled-agents"); + Ok(list) + } + Err(e) => { + error!( + self.log, + "failed to update sled-agents with new allowlist"; + "error" => ?e + ); + let message = "Failed to plumb allowlist as firewall rules \ + to relevant sled agents. The request must be retried for them \ + to take effect."; + Err(Error::unavail(message)) + } + } + } + + /// Wait until we've applied the user-facing services allowlist. + /// + /// This will block until we've plumbed this allowlist and passed it to the + /// sled-agents responsible. This should only be called from + /// rack-initialization handling. + pub(crate) async fn await_ip_allowlist_plumbing(&self) { + let opctx = self.opctx_for_internal_api(); + loop { + match nexus_networking::plumb_service_firewall_rules( + self.datastore(), + &opctx, + &[], + &opctx, + &opctx.log, + ) + .await + { + Ok(_) => { + info!(self.log, "plumbed initial IP allowlist"); + return; + } + Err(e) => { + error!( + self.log, + "failed to plumb initial IP allowlist"; + "error" => ?e + ); + tokio::time::sleep(std::time::Duration::from_secs(2)).await; + } + } + } + } +} diff --git a/nexus/src/app/background/abandoned_vmm_reaper.rs b/nexus/src/app/background/abandoned_vmm_reaper.rs new file mode 100644 index 0000000000..b24c543575 --- /dev/null +++ b/nexus/src/app/background/abandoned_vmm_reaper.rs @@ -0,0 +1,467 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Ensures abandoned VMMs are fully destroyed. +//! +//! A VMM is considered "abandoned" if (and only if): +//! +//! - It is in the `Destroyed` state. +//! - It is not currently running an instance, and it is also not the +//! migration target of any instance (i.e. it is not pointed to by +//! any instance record's `active_propolis_id` and `target_propolis_id` +//! fields). +//! - It has not been deleted yet. +//! +//! VMMs are abandoned when the instance they are responsible for migrates. +//! Should the migration succeed, the previously occupied VMM process is now +//! abandoned. If a migration is attempted but fails, the *target* VMM is now +//! abandoned, as the instance remains on the source VMM. +//! +//! Such VMMs may be deleted fairly simply: any sled resources reserved for the +//! VMM process can be deallocated, and the VMM record in the database is then +//! marked as deleted. Note that reaping abandoned VMMs does not require +//! deallocating virtual provisioning resources, NAT entries, and other such +//! resources which are owned by the *instance*, rather than the VMM process; +//! this task is only responsible for cleaning up VMMs left behind by an +//! instance that has moved to *another* VMM process. The instance itself +//! remains alive and continues to own its virtual provisioning resources. +//! +//! Cleanup of instance resources when an instance's *active* VMM is destroyed +//! is handled elsewhere, by `notify_instance_updated` and (eventually) the +//! `instance-update` saga. + +use super::common::BackgroundTask; +use anyhow::Context; +use futures::future::BoxFuture; +use futures::FutureExt; +use nexus_db_model::Vmm; +use nexus_db_queries::context::OpContext; +use nexus_db_queries::db::pagination::Paginator; +use nexus_db_queries::db::DataStore; +use std::num::NonZeroU32; +use std::sync::Arc; + +/// Background task that searches for abandoned VMM records and deletes them. +pub struct AbandonedVmmReaper { + datastore: Arc, +} + +#[derive(Debug, Default)] +struct ActivationResults { + found: usize, + sled_reservations_deleted: usize, + vmms_deleted: usize, + vmms_already_deleted: usize, + error_count: usize, +} + +const MAX_BATCH: NonZeroU32 = unsafe { + // Safety: last time I checked, 100 was greater than zero. + NonZeroU32::new_unchecked(100) +}; + +impl AbandonedVmmReaper { + pub fn new(datastore: Arc) -> Self { + Self { datastore } + } + + /// List abandoned VMMs and clean up all of their database records. + async fn reap_all( + &mut self, + results: &mut ActivationResults, + opctx: &OpContext, + ) -> Result<(), anyhow::Error> { + slog::info!(opctx.log, "Abandoned VMM reaper running"); + + let mut paginator = Paginator::new(MAX_BATCH); + let mut last_err = Ok(()); + while let Some(p) = paginator.next() { + let vmms = self + .datastore + .vmm_list_abandoned(opctx, &p.current_pagparams()) + .await + .context("failed to list abandoned VMMs")?; + paginator = p.found_batch(&vmms, &|vmm| vmm.id); + self.reap_batch(results, &mut last_err, opctx, &vmms).await; + } + + last_err + } + + /// Clean up a batch of abandoned VMMs. + /// + /// This is separated out from `reap_all` to facilitate testing situations + /// where we race with another Nexus instance to delete an abandoned VMM. In + /// order to deterministically simulate such cases, we have to perform the + /// query to list abandoned VMMs, ensure that the VMM record is deleted, and + /// *then* perform the cleanup with the stale list of abandoned VMMs, rather + /// than doing it all in one go. Thus, this is factored out. + async fn reap_batch( + &mut self, + results: &mut ActivationResults, + last_err: &mut Result<(), anyhow::Error>, + opctx: &OpContext, + vmms: &[Vmm], + ) { + results.found += vmms.len(); + slog::debug!(opctx.log, "Found abandoned VMMs"; "count" => vmms.len()); + + for vmm in vmms { + let vmm_id = vmm.id; + slog::trace!(opctx.log, "Deleting abandoned VMM"; "vmm" => %vmm_id); + // Attempt to remove the abandoned VMM's sled resource reservation. + match self.datastore.sled_reservation_delete(opctx, vmm_id).await { + Ok(_) => { + slog::trace!( + opctx.log, + "Deleted abandoned VMM's sled reservation"; + "vmm" => %vmm_id, + ); + results.sled_reservations_deleted += 1; + } + Err(e) => { + slog::warn!( + opctx.log, + "Failed to delete sled reservation for abandoned VMM"; + "vmm" => %vmm_id, + "error" => %e, + ); + results.error_count += 1; + *last_err = Err(e).with_context(|| { + format!( + "failed to delete sled reservation for VMM {vmm_id}" + ) + }); + } + } + + // Now, attempt to mark the VMM record as deleted. + match self.datastore.vmm_mark_deleted(opctx, &vmm_id).await { + Ok(true) => { + slog::trace!( + opctx.log, + "Deleted abandoned VMM"; + "vmm" => %vmm_id, + ); + results.vmms_deleted += 1; + } + Ok(false) => { + slog::trace!( + opctx.log, + "Abandoned VMM was already deleted"; + "vmm" => %vmm_id, + ); + results.vmms_already_deleted += 1; + } + Err(e) => { + slog::warn!( + opctx.log, + "Failed to mark abandoned VMM as deleted"; + "vmm" => %vmm_id, + "error" => %e, + ); + results.error_count += 1; + *last_err = Err(e).with_context(|| { + format!("failed to mark VMM {vmm_id} as deleted") + }); + } + } + } + } +} + +impl BackgroundTask for AbandonedVmmReaper { + fn activate<'a>( + &'a mut self, + opctx: &'a OpContext, + ) -> BoxFuture<'a, serde_json::Value> { + async move { + let mut results = ActivationResults::default(); + let error = match self.reap_all(&mut results, opctx).await { + Ok(_) => { + slog::info!(opctx.log, "Abandoned VMMs reaped"; + "found" => results.found, + "sled_reservations_deleted" => results.sled_reservations_deleted, + "vmms_deleted" => results.vmms_deleted, + "vmms_already_deleted" => results.vmms_already_deleted, + ); + None + } + Err(err) => { + slog::error!(opctx.log, "Abandoned VMM reaper activation failed"; + "error" => %err, + "found" => results.found, + "sled_reservations_deleted" => results.sled_reservations_deleted, + "vmms_deleted" => results.vmms_deleted, + "vmms_already_deleted" => results.vmms_already_deleted, + ); + Some(err.to_string()) + } + }; + serde_json::json!({ + "found": results.found, + "vmms_deleted": results.vmms_deleted, + "vmms_already_deleted": results.vmms_already_deleted, + "sled_reservations_deleted": results.sled_reservations_deleted, + "error_count": results.error_count, + "error": error, + }) + } + .boxed() + } +} + +#[cfg(test)] +mod tests { + + use super::*; + use chrono::Utc; + use nexus_db_model::ByteCount; + use nexus_db_model::Generation; + use nexus_db_model::InstanceState; + use nexus_db_model::Resources; + use nexus_db_model::SledResource; + use nexus_db_model::SledResourceKind; + use nexus_db_model::Vmm; + use nexus_db_model::VmmRuntimeState; + use nexus_test_utils::resource_helpers; + use nexus_test_utils_macros::nexus_test; + use omicron_common::api::external::InstanceState as ApiInstanceState; + use uuid::Uuid; + + type ControlPlaneTestContext = + nexus_test_utils::ControlPlaneTestContext; + + const PROJECT_NAME: &str = "carcosa"; + + struct TestFixture { + destroyed_vmm_id: Uuid, + } + + impl TestFixture { + async fn setup( + client: &dropshot::test_util::ClientTestContext, + datastore: &Arc, + opctx: &OpContext, + ) -> Self { + resource_helpers::create_default_ip_pool(&client).await; + + let _project = + resource_helpers::create_project(client, PROJECT_NAME).await; + let instance = resource_helpers::create_instance( + client, + PROJECT_NAME, + "cassilda", + ) + .await; + + let destroyed_vmm_id = Uuid::new_v4(); + datastore + .vmm_insert( + &opctx, + dbg!(Vmm { + id: destroyed_vmm_id, + time_created: Utc::now(), + time_deleted: None, + instance_id: instance.identity.id, + sled_id: Uuid::new_v4(), + propolis_ip: "::1".parse().unwrap(), + propolis_port: 12345.into(), + runtime: VmmRuntimeState { + state: InstanceState::new( + ApiInstanceState::Destroyed + ), + time_state_updated: Utc::now(), + gen: Generation::new(), + } + }), + ) + .await + .expect("destroyed vmm record should be created successfully"); + let resources = Resources::new( + 1, + // Just require the bare non-zero amount of RAM. + ByteCount::try_from(1024).unwrap(), + ByteCount::try_from(1024).unwrap(), + ); + let constraints = + nexus_db_model::SledReservationConstraints::none(); + dbg!(datastore + .sled_reservation_create( + &opctx, + destroyed_vmm_id, + SledResourceKind::Instance, + resources.clone(), + constraints, + ) + .await + .expect("sled reservation should be created successfully")); + Self { destroyed_vmm_id } + } + + async fn assert_reaped(&self, datastore: &DataStore) { + use async_bb8_diesel::AsyncRunQueryDsl; + use diesel::{ + ExpressionMethods, OptionalExtension, QueryDsl, + SelectableHelper, + }; + use nexus_db_queries::db::schema::sled_resource::dsl as sled_resource_dsl; + use nexus_db_queries::db::schema::vmm::dsl as vmm_dsl; + + let conn = datastore.pool_connection_for_tests().await.unwrap(); + let fetched_vmm = vmm_dsl::vmm + .filter(vmm_dsl::id.eq(self.destroyed_vmm_id)) + .filter(vmm_dsl::time_deleted.is_null()) + .select(Vmm::as_select()) + .first_async::(&*conn) + .await + .optional() + .expect("VMM query should succeed"); + assert!( + dbg!(fetched_vmm).is_none(), + "VMM record should have been deleted" + ); + + let fetched_sled_resource = sled_resource_dsl::sled_resource + .filter(sled_resource_dsl::id.eq(self.destroyed_vmm_id)) + .select(SledResource::as_select()) + .first_async::(&*conn) + .await + .optional() + .expect("sled resource query should succeed"); + assert!( + dbg!(fetched_sled_resource).is_none(), + "sled resource record should have been deleted" + ); + } + } + + #[nexus_test(server = crate::Server)] + async fn test_abandoned_vmms_are_reaped( + cptestctx: &ControlPlaneTestContext, + ) { + let nexus = &cptestctx.server.server_context().nexus; + let datastore = nexus.datastore(); + let opctx = OpContext::for_tests( + cptestctx.logctx.log.clone(), + datastore.clone(), + ); + let fixture = + TestFixture::setup(&cptestctx.external_client, datastore, &opctx) + .await; + + let mut task = AbandonedVmmReaper::new(datastore.clone()); + + let mut results = ActivationResults::default(); + dbg!(task.reap_all(&mut results, &opctx,).await) + .expect("activation completes successfully"); + dbg!(&results); + + assert_eq!(results.vmms_deleted, 1); + assert_eq!(results.sled_reservations_deleted, 1); + assert_eq!(results.vmms_already_deleted, 0); + assert_eq!(results.error_count, 0); + fixture.assert_reaped(datastore).await; + } + + #[nexus_test(server = crate::Server)] + async fn vmm_already_deleted(cptestctx: &ControlPlaneTestContext) { + let nexus = &cptestctx.server.server_context().nexus; + let datastore = nexus.datastore(); + let opctx = OpContext::for_tests( + cptestctx.logctx.log.clone(), + datastore.clone(), + ); + let fixture = + TestFixture::setup(&cptestctx.external_client, datastore, &opctx) + .await; + + // For this test, we separate the database query run by the background + // task to list abandoned VMMs from the actual cleanup of those VMMs, in + // order to simulate a condition where the VMM record was deleted + // between when the listing query was run and when the bg task attempted + // to delete the VMM record. + let paginator = Paginator::new(MAX_BATCH); + let p = paginator.next().unwrap(); + let abandoned_vmms = datastore + .vmm_list_abandoned(&opctx, &p.current_pagparams()) + .await + .expect("must list abandoned vmms"); + + assert!(!abandoned_vmms.is_empty()); + + datastore + .vmm_mark_deleted(&opctx, &fixture.destroyed_vmm_id) + .await + .expect("simulate another nexus marking the VMM deleted"); + + let mut results = ActivationResults::default(); + let mut last_err = Ok(()); + let mut task = AbandonedVmmReaper::new(datastore.clone()); + task.reap_batch(&mut results, &mut last_err, &opctx, &abandoned_vmms) + .await; + dbg!(last_err).expect("should not have errored"); + dbg!(&results); + + assert_eq!(results.found, 1); + assert_eq!(results.vmms_deleted, 0); + assert_eq!(results.sled_reservations_deleted, 1); + assert_eq!(results.vmms_already_deleted, 1); + assert_eq!(results.error_count, 0); + + fixture.assert_reaped(datastore).await + } + + #[nexus_test(server = crate::Server)] + async fn sled_resource_already_deleted( + cptestctx: &ControlPlaneTestContext, + ) { + let nexus = &cptestctx.server.server_context().nexus; + let datastore = nexus.datastore(); + let opctx = OpContext::for_tests( + cptestctx.logctx.log.clone(), + datastore.clone(), + ); + let fixture = + TestFixture::setup(&cptestctx.external_client, datastore, &opctx) + .await; + + // For this test, we separate the database query run by the background + // task to list abandoned VMMs from the actual cleanup of those VMMs, in + // order to simulate a condition where the sled reservation record was + // deleted between when the listing query was run and when the bg task + // attempted to delete the sled reservation.. + let paginator = Paginator::new(MAX_BATCH); + let p = paginator.next().unwrap(); + let abandoned_vmms = datastore + .vmm_list_abandoned(&opctx, &p.current_pagparams()) + .await + .expect("must list abandoned vmms"); + + assert!(!abandoned_vmms.is_empty()); + + datastore + .sled_reservation_delete(&opctx, fixture.destroyed_vmm_id) + .await + .expect( + "simulate another nexus marking the sled reservation deleted", + ); + + let mut results = ActivationResults::default(); + let mut last_err = Ok(()); + let mut task = AbandonedVmmReaper::new(datastore.clone()); + task.reap_batch(&mut results, &mut last_err, &opctx, &abandoned_vmms) + .await; + dbg!(last_err).expect("should not have errored"); + dbg!(&results); + + assert_eq!(results.found, 1); + assert_eq!(results.vmms_deleted, 1); + assert_eq!(results.sled_reservations_deleted, 1); + assert_eq!(results.vmms_already_deleted, 0); + assert_eq!(results.error_count, 0); + + fixture.assert_reaped(datastore).await + } +} diff --git a/nexus/src/app/background/blueprint_execution.rs b/nexus/src/app/background/blueprint_execution.rs index 7db59bc966..69725acf1d 100644 --- a/nexus/src/app/background/blueprint_execution.rs +++ b/nexus/src/app/background/blueprint_execution.rs @@ -121,13 +121,16 @@ mod test { use nexus_db_queries::context::OpContext; use nexus_test_utils_macros::nexus_test; use nexus_types::deployment::{ - Blueprint, BlueprintTarget, BlueprintZoneConfig, - BlueprintZoneDisposition, BlueprintZonesConfig, - }; - use nexus_types::inventory::{ - OmicronZoneConfig, OmicronZoneDataset, OmicronZoneType, + blueprint_zone_type, Blueprint, BlueprintPhysicalDisksConfig, + BlueprintTarget, BlueprintZoneConfig, BlueprintZoneDisposition, + BlueprintZoneType, BlueprintZonesConfig, CockroachDbPreserveDowngrade, }; + use nexus_types::external_api::views::SledState; + use nexus_types::inventory::OmicronZoneDataset; use omicron_common::api::external::Generation; + use omicron_uuid_kinds::GenericUuid; + use omicron_uuid_kinds::OmicronZoneUuid; + use omicron_uuid_kinds::SledUuid; use serde::Deserialize; use serde_json::json; use std::collections::BTreeMap; @@ -140,10 +143,17 @@ mod test { nexus_test_utils::ControlPlaneTestContext; fn create_blueprint( - blueprint_zones: BTreeMap, + blueprint_zones: BTreeMap, + blueprint_disks: BTreeMap, dns_version: Generation, ) -> (BlueprintTarget, Blueprint) { let id = Uuid::new_v4(); + // Assume all sleds are active. + let sled_state = blueprint_zones + .keys() + .copied() + .map(|sled_id| (sled_id, SledState::Active)) + .collect::>(); ( BlueprintTarget { target_id: id, @@ -153,9 +163,14 @@ mod test { Blueprint { id, blueprint_zones, + blueprint_disks, + sled_state, + cockroachdb_setting_preserve_downgrade: + CockroachDbPreserveDowngrade::DoNotModify, parent_blueprint_id: None, internal_dns_version: dns_version, external_dns_version: dns_version, + cockroachdb_fingerprint: String::new(), time_created: chrono::Utc::now(), creator: "test".to_string(), comment: "test blueprint".to_string(), @@ -166,7 +181,7 @@ mod test { #[nexus_test(server = crate::Server)] async fn test_deploy_omicron_zones(cptestctx: &ControlPlaneTestContext) { // Set up the test. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_background( cptestctx.logctx.log.clone(), @@ -179,8 +194,8 @@ mod test { // sleds to CRDB. let mut s1 = httptest::Server::run(); let mut s2 = httptest::Server::run(); - let sled_id1 = Uuid::new_v4(); - let sled_id2 = Uuid::new_v4(); + let sled_id1 = SledUuid::new_v4(); + let sled_id2 = SledUuid::new_v4(); let rack_id = Uuid::new_v4(); for (i, (sled_id, server)) in [(sled_id1, &s1), (sled_id2, &s2)].iter().enumerate() @@ -189,7 +204,7 @@ mod test { panic!("Expected Ipv6 address. Got {}", server.addr()); }; let update = SledUpdate::new( - *sled_id, + sled_id.into_untyped_uuid(), addr, SledBaseboard { serial_number: i.to_string(), @@ -228,7 +243,11 @@ mod test { // With a target blueprint having no zones, the task should trivially // complete and report a successful (empty) summary. let generation = Generation::new(); - let blueprint = Arc::new(create_blueprint(BTreeMap::new(), generation)); + let blueprint = Arc::new(create_blueprint( + BTreeMap::new(), + BTreeMap::new(), + generation, + )); blueprint_tx.send(Some(blueprint)).unwrap(); let value = task.activate(&opctx).await; println!("activating with no zones: {:?}", value); @@ -243,22 +262,22 @@ mod test { BlueprintZonesConfig { generation: Generation::new(), zones: vec![BlueprintZoneConfig { - config: OmicronZoneConfig { - id: Uuid::new_v4(), - underlay_address: "::1".parse().unwrap(), - zone_type: OmicronZoneType::InternalDns { + disposition, + id: OmicronZoneUuid::new_v4(), + underlay_address: "::1".parse().unwrap(), + zone_type: BlueprintZoneType::InternalDns( + blueprint_zone_type::InternalDns { dataset: OmicronZoneDataset { pool_name: format!("oxp_{}", Uuid::new_v4()) .parse() .unwrap(), }, - dns_address: "oh-hello-internal-dns".into(), + dns_address: "[::1]:0".parse().unwrap(), gz_address: "::1".parse().unwrap(), gz_address_index: 0, - http_address: "[::1]:12345".into(), + http_address: "[::1]:12345".parse().unwrap(), }, - }, - disposition, + ), }], } } @@ -273,6 +292,7 @@ mod test { (sled_id1, make_zones(BlueprintZoneDisposition::InService)), (sled_id2, make_zones(BlueprintZoneDisposition::Quiesced)), ]), + BTreeMap::new(), generation, ); diff --git a/nexus/src/app/background/blueprint_load.rs b/nexus/src/app/background/blueprint_load.rs index 2afe2d2f97..baf86d655f 100644 --- a/nexus/src/app/background/blueprint_load.rs +++ b/nexus/src/app/background/blueprint_load.rs @@ -54,12 +54,15 @@ impl BackgroundTask for TargetBlueprintLoader { }; // Retrieve the latest target blueprint - let result = - self.datastore.blueprint_target_get_current_full(opctx).await; - - // Decide what to do with the result - match (&mut self.last, result) { - (_, Err(error)) => { + let (new_bp_target, new_blueprint) = match self + .datastore + .blueprint_target_get_current_full(opctx) + .await + { + Ok((new_bp_target, new_blueprint)) => { + (new_bp_target, new_blueprint) + } + Err(error) => { // We failed to read the blueprint. There's nothing to do // but log an error. We'll retry when we're activated again. let message = format!("{:#}", error); @@ -70,40 +73,84 @@ impl BackgroundTask for TargetBlueprintLoader { ); let e = format!("failed to read target blueprint: {message}"); - json!({"error": e}) - } - (None, Ok(None)) => { - // We haven't found a blueprint yet. Do nothing. - json!({"status": "no target blueprint"}) + return json!({"error": e}); } - (Some(old), Ok(None)) => { - // We have transitioned from having a blueprint to not - // having one. This should not happen. + }; + + // Decide what to do with the new blueprint + let Some((old_bp_target, old_blueprint)) = self.last.as_deref() + else { + // We've found a target blueprint for the first time. + // Save it and notify any watchers. + let target_id = new_blueprint.id; + let time_created = new_blueprint.time_created; + info!( + log, + "found new target blueprint (first find)"; + "target_id" => %target_id, + "time_created" => %time_created + ); + self.last = Some(Arc::new((new_bp_target, new_blueprint))); + self.tx.send_replace(self.last.clone()); + return json!({ + "target_id": target_id, + "time_created": time_created, + "time_found": chrono::Utc::now(), + "status": "first target blueprint", + }); + }; + + let target_id = new_blueprint.id; + let time_created = new_blueprint.time_created; + if old_blueprint.id != new_blueprint.id { + // The current target blueprint has been updated + info!( + log, + "found new target blueprint"; + "target_id" => %target_id, + "time_created" => %time_created + ); + self.last = Some(Arc::new((new_bp_target, new_blueprint))); + self.tx.send_replace(self.last.clone()); + json!({ + "target_id": target_id, + "time_created": time_created, + "time_found": chrono::Utc::now(), + "status": "target blueprint updated" + }) + } else { + // The new target id matches the old target id + // + // Let's see if the blueprints hold the same contents. + // It should not be possible for the contents of a + // blueprint to change, but we check to catch possible + // bugs further up the stack. + if *old_blueprint != new_blueprint { let message = format!( - "target blueprint with id {} was removed. There is no \ - longer any target blueprint", - old.1.id + "blueprint for id {} changed. \ + Blueprints are supposed to be immutable.", + target_id ); - let old_id = old.1.id; - self.last = None; - self.tx.send_replace(self.last.clone()); - error!(&log, "{message:?}"); + error!(&log, "{}", message); json!({ - "removed_target_id": old_id, - "status": "no target blueprint (removed)", + "target_id": target_id, + "status": "target blueprint unchanged (error)", "error": message }) - } - (None, Ok(Some((new_bp_target, new_blueprint)))) => { - // We've found a target blueprint for the first time. - // Save it and notify any watchers. - let target_id = new_blueprint.id; - let time_created = new_blueprint.time_created; + } else if old_bp_target.enabled != new_bp_target.enabled { + // The blueprints have the same contents, but its + // enabled bit has flipped. + let status = if new_bp_target.enabled { + "enabled" + } else { + "disabled" + }; info!( log, - "found new target blueprint (first find)"; + "target blueprint enabled state changed"; "target_id" => %target_id, - "time_created" => %time_created + "time_created" => %time_created, + "state" => status, ); self.last = Some(Arc::new((new_bp_target, new_blueprint))); self.tx.send_replace(self.last.clone()); @@ -111,89 +158,23 @@ impl BackgroundTask for TargetBlueprintLoader { "target_id": target_id, "time_created": time_created, "time_found": chrono::Utc::now(), - "status": "first target blueprint", + "status": format!("target blueprint {status}"), + }) + } else { + // We found a new target blueprint that exactly + // matches the old target blueprint. This is the + // common case when we're activated by a timeout. + debug!( + log, + "found latest target blueprint (unchanged)"; + "target_id" => %target_id, + "time_created" => %time_created.clone() + ); + json!({ + "target_id": target_id, + "time_created": time_created, + "status": "target blueprint unchanged" }) - } - (Some(old), Ok(Some((new_bp_target, new_blueprint)))) => { - let target_id = new_blueprint.id; - let time_created = new_blueprint.time_created; - if old.1.id != new_blueprint.id { - // The current target blueprint has been updated - info!( - log, - "found new target blueprint"; - "target_id" => %target_id, - "time_created" => %time_created - ); - self.last = - Some(Arc::new((new_bp_target, new_blueprint))); - self.tx.send_replace(self.last.clone()); - json!({ - "target_id": target_id, - "time_created": time_created, - "time_found": chrono::Utc::now(), - "status": "target blueprint updated" - }) - } else { - // The new target id matches the old target id - // - // Let's see if the blueprints hold the same contents. - // It should not be possible for the contents of a - // blueprint to change, but we check to catch possible - // bugs further up the stack. - if old.1 != new_blueprint { - let message = format!( - "blueprint for id {} changed. \ - Blueprints are supposed to be immutable.", - target_id - ); - error!(&log, "{}", message); - json!({ - "target_id": target_id, - "status": "target blueprint unchanged (error)", - "error": message - }) - } else if old.0.enabled != new_bp_target.enabled { - // The blueprints have the same contents, but its - // enabled bit has flipped. - let status = if new_bp_target.enabled { - "enabled" - } else { - "disabled" - }; - info!( - log, - "target blueprint enabled state changed"; - "target_id" => %target_id, - "time_created" => %time_created, - "state" => status, - ); - self.last = - Some(Arc::new((new_bp_target, new_blueprint))); - self.tx.send_replace(self.last.clone()); - json!({ - "target_id": target_id, - "time_created": time_created, - "time_found": chrono::Utc::now(), - "status": format!("target blueprint {status}"), - }) - } else { - // We found a new target blueprint that exactly - // matches the old target blueprint. This is the - // common case when we're activated by a timeout. - debug!( - log, - "found latest target blueprint (unchanged)"; - "target_id" => %target_id, - "time_created" => %time_created.clone() - ); - json!({ - "target_id": target_id, - "time_created": time_created, - "status": "target blueprint unchanged" - }) - } - } } } } @@ -207,7 +188,9 @@ mod test { use crate::app::background::common::BackgroundTask; use nexus_inventory::now_db_precision; use nexus_test_utils_macros::nexus_test; - use nexus_types::deployment::{Blueprint, BlueprintTarget}; + use nexus_types::deployment::{ + Blueprint, BlueprintTarget, CockroachDbPreserveDowngrade, + }; use omicron_common::api::external::Generation; use serde::Deserialize; use std::collections::BTreeMap; @@ -229,9 +212,14 @@ mod test { Blueprint { id, blueprint_zones: BTreeMap::new(), + blueprint_disks: BTreeMap::new(), + sled_state: BTreeMap::new(), + cockroachdb_setting_preserve_downgrade: + CockroachDbPreserveDowngrade::DoNotModify, parent_blueprint_id: Some(parent_blueprint_id), internal_dns_version: Generation::new(), external_dns_version: Generation::new(), + cockroachdb_fingerprint: String::new(), time_created: now_db_precision(), creator: "test".to_string(), comment: "test blueprint".to_string(), @@ -250,7 +238,7 @@ mod test { #[nexus_test(server = crate::Server)] async fn test_load_blueprints(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), diff --git a/nexus/src/app/background/common.rs b/nexus/src/app/background/common.rs index e0d8f32316..da595dc4e1 100644 --- a/nexus/src/app/background/common.rs +++ b/nexus/src/app/background/common.rs @@ -376,7 +376,7 @@ impl TaskExec { self.activate(ActivationReason::Signaled).await; } - _ = dependencies.next(), if dependencies.len() > 0 => { + _ = dependencies.next(), if !dependencies.is_empty() => { self.activate(ActivationReason::Dependency).await; } } @@ -533,7 +533,7 @@ mod test { // activated #[nexus_test(server = crate::Server)] async fn test_driver_basic(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), @@ -698,7 +698,7 @@ mod test { // activated. #[nexus_test(server = crate::Server)] async fn test_activation_in_progress(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), @@ -843,7 +843,7 @@ mod test { #[nexus_test(server = crate::Server)] async fn test_saga_request_flow(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), diff --git a/nexus/src/app/background/dns_config.rs b/nexus/src/app/background/dns_config.rs index be18ac3612..71e0a812a7 100644 --- a/nexus/src/app/background/dns_config.rs +++ b/nexus/src/app/background/dns_config.rs @@ -175,7 +175,7 @@ mod test { #[nexus_test(server = crate::Server)] async fn test_basic(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), diff --git a/nexus/src/app/background/dns_propagation.rs b/nexus/src/app/background/dns_propagation.rs index cf7a399999..7d650f6f27 100644 --- a/nexus/src/app/background/dns_propagation.rs +++ b/nexus/src/app/background/dns_propagation.rs @@ -196,7 +196,7 @@ mod test { #[nexus_test(server = crate::Server)] async fn test_basic(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), diff --git a/nexus/src/app/background/external_endpoints.rs b/nexus/src/app/background/external_endpoints.rs index ed530e0775..1a587298d5 100644 --- a/nexus/src/app/background/external_endpoints.rs +++ b/nexus/src/app/background/external_endpoints.rs @@ -131,7 +131,7 @@ mod test { #[nexus_test(server = crate::Server)] async fn test_basic(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), diff --git a/nexus/src/app/background/init.rs b/nexus/src/app/background/init.rs index e3f2154046..a87c53860d 100644 --- a/nexus/src/app/background/init.rs +++ b/nexus/src/app/background/init.rs @@ -4,6 +4,7 @@ //! Background task initialization +use super::abandoned_vmm_reaper; use super::bfd; use super::blueprint_execution; use super::blueprint_load; @@ -12,18 +13,25 @@ use super::dns_config; use super::dns_propagation; use super::dns_servers; use super::external_endpoints; +use super::instance_watcher; use super::inventory_collection; +use super::metrics_producer_gc; use super::nat_cleanup; use super::phantom_disks; +use super::physical_disk_adoption; use super::region_replacement; +use super::service_firewall_rules; use super::sync_service_zone_nat::ServiceZoneNatTracker; use super::sync_switch_configuration::SwitchPortSettingsManager; +use super::v2p_mappings::V2PManager; +use crate::app::oximeter::PRODUCER_LEASE_DURATION; use crate::app::sagas::SagaRequest; use nexus_config::BackgroundTaskConfig; use nexus_config::DnsTasksConfig; use nexus_db_model::DnsGroup; use nexus_db_queries::context::OpContext; use nexus_db_queries::db::DataStore; +use oximeter::types::ProducerRegistry; use std::collections::BTreeMap; use std::sync::Arc; use tokio::sync::mpsc::Sender; @@ -48,6 +56,9 @@ pub struct BackgroundTasks { /// task handle for the external DNS servers background task pub task_external_dns_servers: common::TaskHandle, + /// task handle for pruning metrics producers with expired leases + pub task_metrics_producer_gc: common::TaskHandle, + /// task handle for the task that keeps track of external endpoints pub task_external_endpoints: common::TaskHandle, /// external endpoints read by the background task @@ -63,6 +74,9 @@ pub struct BackgroundTasks { /// task handle for the task that collects inventory pub task_inventory_collection: common::TaskHandle, + /// task handle for the task that collects inventory + pub task_physical_disk_adoption: common::TaskHandle, + /// task handle for the task that detects phantom disks pub task_phantom_disks: common::TaskHandle, @@ -78,9 +92,23 @@ pub struct BackgroundTasks { /// task handle for the switch port settings manager pub task_switch_port_settings_manager: common::TaskHandle, + /// task handle for the opte v2p manager + pub task_v2p_manager: common::TaskHandle, + /// task handle for the task that detects if regions need replacement and /// begins the process pub task_region_replacement: common::TaskHandle, + + /// task handle for the task that polls sled agents for instance states. + pub task_instance_watcher: common::TaskHandle, + + /// task handle for propagation of VPC firewall rules for Omicron services + /// with external network connectivity, + pub task_service_firewall_propagation: common::TaskHandle, + + /// task handle for deletion of database records for VMMs abandoned by their + /// instances. + pub task_abandoned_vmm_reaper: common::TaskHandle, } impl BackgroundTasks { @@ -90,9 +118,15 @@ impl BackgroundTasks { opctx: &OpContext, datastore: Arc, config: &BackgroundTaskConfig, + rack_id: Uuid, nexus_id: Uuid, resolver: internal_dns::resolver::Resolver, saga_request: Sender, + v2p_watcher: ( + tokio::sync::watch::Sender<()>, + tokio::sync::watch::Receiver<()>, + ), + producer_registry: &ProducerRegistry, ) -> BackgroundTasks { let mut driver = common::Driver::new(); @@ -113,6 +147,24 @@ impl BackgroundTasks { &config.dns_external, ); + let task_metrics_producer_gc = { + let gc = metrics_producer_gc::MetricProducerGc::new( + datastore.clone(), + PRODUCER_LEASE_DURATION, + ); + driver.register( + String::from("metrics_producer_gc"), + String::from( + "unregisters Oximeter metrics producers that have not \ + renewed their lease", + ), + config.metrics_producer_gc.period_secs, + Box::new(gc), + opctx.child(BTreeMap::new()), + vec![], + ) + }; + // Background task: External endpoints list watcher let (task_external_endpoints, external_endpoints) = { let watcher = external_endpoints::ExternalEndpointsWatcher::new( @@ -222,7 +274,7 @@ impl BackgroundTasks { // because the blueprint executor might also depend indirectly on the // inventory collector. In that case, we may need to do something more // complicated. But for now, this works. - let task_inventory_collection = { + let (task_inventory_collection, inventory_watcher) = { let collector = inventory_collection::InventoryCollector::new( datastore.clone(), resolver.clone(), @@ -230,6 +282,7 @@ impl BackgroundTasks { config.inventory.nkeep, config.inventory.disable, ); + let inventory_watcher = collector.watcher(); let task = driver.register( String::from("inventory_collection"), String::from( @@ -242,7 +295,24 @@ impl BackgroundTasks { vec![Box::new(rx_blueprint_exec)], ); - task + (task, inventory_watcher) + }; + + let task_physical_disk_adoption = { + driver.register( + "physical_disk_adoption".to_string(), + "ensure new physical disks are automatically marked in-service" + .to_string(), + config.physical_disk_adoption.period_secs, + Box::new(physical_disk_adoption::PhysicalDiskAdoption::new( + datastore.clone(), + inventory_watcher.clone(), + config.physical_disk_adoption.disable, + rack_id, + )), + opctx.child(BTreeMap::new()), + vec![Box::new(inventory_watcher)], + ) }; let task_service_zone_nat_tracker = { @@ -275,11 +345,22 @@ impl BackgroundTasks { ) }; + let task_v2p_manager = { + driver.register( + "v2p_manager".to_string(), + String::from("manages opte v2p mappings for vpc networking"), + config.v2p_mapping_propagation.period_secs, + Box::new(V2PManager::new(datastore.clone())), + opctx.child(BTreeMap::new()), + vec![Box::new(v2p_watcher.1)], + ) + }; + // Background task: detect if a region needs replacement and begin the // process let task_region_replacement = { let detector = region_replacement::RegionReplacementDetector::new( - datastore, + datastore.clone(), saga_request.clone(), ); @@ -295,23 +376,75 @@ impl BackgroundTasks { task }; + let task_instance_watcher = { + let watcher = instance_watcher::InstanceWatcher::new( + datastore.clone(), + resolver.clone(), + producer_registry, + instance_watcher::WatcherIdentity { nexus_id, rack_id }, + v2p_watcher.0, + ); + driver.register( + "instance_watcher".to_string(), + "periodically checks instance states".to_string(), + config.instance_watcher.period_secs, + Box::new(watcher), + opctx.child(BTreeMap::new()), + vec![], + ) + }; + // Background task: service firewall rule propagation + let task_service_firewall_propagation = driver.register( + String::from("service_firewall_rule_propagation"), + String::from( + "propagates VPC firewall rules for Omicron \ + services with external network connectivity", + ), + config.service_firewall_propagation.period_secs, + Box::new(service_firewall_rules::ServiceRulePropagator::new( + datastore.clone(), + )), + opctx.child(BTreeMap::new()), + vec![], + ); + + // Background task: abandoned VMM reaping + let task_abandoned_vmm_reaper = driver.register( + String::from("abandoned_vmm_reaper"), + String::from( + "deletes sled reservations for VMMs that have been abandoned by their instances", + ), + config.abandoned_vmm_reaper.period_secs, + Box::new(abandoned_vmm_reaper::AbandonedVmmReaper::new( + datastore, + )), + opctx.child(BTreeMap::new()), + vec![], + ); + BackgroundTasks { driver, task_internal_dns_config, task_internal_dns_servers, task_external_dns_config, task_external_dns_servers, + task_metrics_producer_gc, task_external_endpoints, external_endpoints, nat_cleanup, bfd_manager, task_inventory_collection, + task_physical_disk_adoption, task_phantom_disks, task_blueprint_loader, task_blueprint_executor, task_service_zone_nat_tracker, task_switch_port_settings_manager, + task_v2p_manager, task_region_replacement, + task_instance_watcher, + task_service_firewall_propagation, + task_abandoned_vmm_reaper, } } @@ -411,7 +544,7 @@ pub mod test { // the new DNS configuration #[nexus_test(server = crate::Server)] async fn test_dns_propagation_basic(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), diff --git a/nexus/src/app/background/instance_watcher.rs b/nexus/src/app/background/instance_watcher.rs new file mode 100644 index 0000000000..d473ea8e99 --- /dev/null +++ b/nexus/src/app/background/instance_watcher.rs @@ -0,0 +1,621 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Background task for pulling instance state from sled-agents. + +use super::common::BackgroundTask; +use futures::{future::BoxFuture, FutureExt}; +use http::StatusCode; +use nexus_db_model::Instance; +use nexus_db_model::Project; +use nexus_db_model::Sled; +use nexus_db_model::Vmm; +use nexus_db_queries::context::OpContext; +use nexus_db_queries::db::pagination::Paginator; +use nexus_db_queries::db::DataStore; +use nexus_types::identity::Asset; +use nexus_types::identity::Resource; +use omicron_common::api::external::InstanceState; +use omicron_common::api::internal::nexus::SledInstanceState; +use oximeter::types::ProducerRegistry; +use sled_agent_client::Client as SledAgentClient; +use std::borrow::Cow; +use std::collections::BTreeMap; +use std::future::Future; +use std::net::IpAddr; +use std::num::NonZeroU32; +use std::sync::Arc; +use std::sync::Mutex; +use uuid::Uuid; + +/// Background task that periodically checks instance states. +pub(crate) struct InstanceWatcher { + datastore: Arc, + resolver: internal_dns::resolver::Resolver, + metrics: Arc>, + id: WatcherIdentity, + v2p_notification_tx: tokio::sync::watch::Sender<()>, +} + +const MAX_SLED_AGENTS: NonZeroU32 = unsafe { + // Safety: last time I checked, 100 was greater than zero. + NonZeroU32::new_unchecked(100) +}; + +impl InstanceWatcher { + pub(crate) fn new( + datastore: Arc, + resolver: internal_dns::resolver::Resolver, + producer_registry: &ProducerRegistry, + id: WatcherIdentity, + v2p_notification_tx: tokio::sync::watch::Sender<()>, + ) -> Self { + let metrics = Arc::new(Mutex::new(metrics::Metrics::default())); + producer_registry + .register_producer(metrics::Producer(metrics.clone())) + .unwrap(); + Self { datastore, resolver, metrics, id, v2p_notification_tx } + } + + fn check_instance( + &self, + opctx: &OpContext, + client: &SledAgentClient, + target: VirtualMachine, + ) -> impl Future + Send + 'static { + let datastore = self.datastore.clone(); + let resolver = self.resolver.clone(); + + let opctx = opctx.child( + std::iter::once(( + "instance_id".to_string(), + target.instance_id.to_string(), + )) + .collect(), + ); + let client = client.clone(); + let v2p_notification_tx = self.v2p_notification_tx.clone(); + + async move { + slog::trace!(opctx.log, "checking on instance..."); + let rsp = client.instance_get_state(&target.instance_id).await; + let mut check = + Check { target, outcome: Default::default(), result: Ok(()) }; + let state = match rsp { + Ok(rsp) => rsp.into_inner(), + Err(ClientError::ErrorResponse(rsp)) => { + let status = rsp.status(); + if status == StatusCode::NOT_FOUND + && rsp.as_ref().error_code.as_deref() + == Some("NO_SUCH_INSTANCE") + { + slog::info!(opctx.log, "instance is wayyyyy gone"); + // TODO(eliza): eventually, we should attempt to put the + // instance in the `Failed` state here. + check.outcome = + CheckOutcome::Failure(Failure::NoSuchInstance); + return check; + } + if status.is_client_error() { + slog::warn!(opctx.log, "check failed due to client error"; + "status" => ?status, "error" => ?rsp.into_inner()); + check.result = + Err(Incomplete::ClientHttpError(status.as_u16())); + } else { + slog::info!(opctx.log, "check failed due to server error"; + "status" => ?status, "error" => ?rsp.into_inner()); + } + + check.outcome = CheckOutcome::Failure( + Failure::SledAgentResponse(status.as_u16()), + ); + return check; + } + Err(ClientError::CommunicationError(e)) => { + // TODO(eliza): eventually, we may want to transition the + // instance to the `Failed` state if the sled-agent has been + // unreachable for a while. We may also want to take other + // corrective actions or alert an operator in this case. + // + // TODO(eliza): because we have the preported IP address + // of the instance's VMM from our databse query, we could + // also ask the VMM directly when the sled-agent is + // unreachable. We should start doing that here at some + // point. + slog::info!(opctx.log, "sled agent is unreachable"; "error" => ?e); + check.outcome = + CheckOutcome::Failure(Failure::SledAgentUnreachable); + return check; + } + Err(e) => { + slog::warn!( + opctx.log, + "error checking up on instance"; + "error" => ?e, + "status" => ?e.status(), + ); + check.result = Err(Incomplete::ClientError); + return check; + } + }; + + let new_runtime_state: SledInstanceState = state.into(); + check.outcome = + CheckOutcome::Success(new_runtime_state.vmm_state.state); + slog::debug!( + opctx.log, + "updating instance state"; + "state" => ?new_runtime_state.vmm_state.state, + ); + check.result = crate::app::instance::notify_instance_updated( + &datastore, + &resolver, + &opctx, + &opctx, + &opctx.log, + &target.instance_id, + &new_runtime_state, + v2p_notification_tx, + ) + .await + .map_err(|e| { + slog::warn!( + opctx.log, + "error updating instance"; + "error" => ?e, + "state" => ?new_runtime_state.vmm_state.state, + ); + Incomplete::UpdateFailed + }) + .and_then(|updated| { + updated.ok_or_else(|| { + slog::warn!( + opctx.log, + "error updating instance: not found in database"; + "state" => ?new_runtime_state.vmm_state.state, + ); + Incomplete::InstanceNotFound + }) + }) + .map(|updated| { + slog::debug!( + opctx.log, + "update successful"; + "instance_updated" => updated.instance_updated, + "vmm_updated" => updated.vmm_updated, + "state" => ?new_runtime_state.vmm_state.state, + ); + }); + + check + } + } +} + +/// The identity of the process performing the health check, for distinguishing +/// health check metrics emitted by different Nexus instances. +/// +/// This is a struct just to ensure that the two UUIDs are named arguments +/// (rather than positional arguments) and can't be swapped accidentally. +#[derive(Copy, Clone)] +pub struct WatcherIdentity { + pub nexus_id: Uuid, + pub rack_id: Uuid, +} + +#[derive( + Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, oximeter::Target, +)] +struct VirtualMachine { + /// The rack ID of the Nexus process which performed the health check. + rack_id: Uuid, + /// The ID of the Nexus process which performed the health check. + nexus_id: Uuid, + /// The instance's ID. + instance_id: Uuid, + /// The silo ID of the instance's silo. + silo_id: Uuid, + /// The project ID of the instance. + project_id: Uuid, + /// The VMM ID of the instance's virtual machine manager. + vmm_id: Uuid, + /// The sled-agent's ID. + sled_agent_id: Uuid, + /// The sled agent's IP address. + sled_agent_ip: IpAddr, + /// The sled agent's port. + sled_agent_port: u16, +} + +impl VirtualMachine { + fn new( + WatcherIdentity { rack_id, nexus_id }: WatcherIdentity, + sled: &Sled, + instance: &Instance, + vmm: &Vmm, + project: &Project, + ) -> Self { + let addr = sled.address(); + Self { + rack_id, + nexus_id, + instance_id: instance.id(), + silo_id: project.silo_id, + project_id: project.id(), + vmm_id: vmm.id, + sled_agent_id: sled.id(), + sled_agent_ip: (*addr.ip()).into(), + sled_agent_port: addr.port(), + } + } +} + +struct Check { + target: VirtualMachine, + + /// The outcome of performing this check. Either we were able to reach the + /// sled-agent that owns this instance and it told us the instance's state + /// and VMM, or we the health check failed in a way that suggests a + /// potential issue with the sled-agent or instance. + /// + /// If we were not able to perform the request at all due to an error on + /// *our* end, this will be `None`. + outcome: CheckOutcome, + + /// `Some` if the instance check was unsuccessful. + /// + /// This indicates that something went wrong *while performing the check* that + /// does not necessarily indicate that the instance itself is in a bad + /// state. For example, the sled-agent client may have constructed an + /// invalid request, or an error may have occurred while updating the + /// instance in the database. + /// + /// Depending on when the error occurred, the `outcome` field may also + /// be populated. + result: Result<(), Incomplete>, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Default)] +enum CheckOutcome { + Success(InstanceState), + Failure(Failure), + #[default] + Unknown, +} + +impl Check { + fn state_str(&self) -> Cow<'static, str> { + match self.outcome { + CheckOutcome::Success(state) => state.label().into(), + CheckOutcome::Failure(_) => InstanceState::Failed.label().into(), + CheckOutcome::Unknown => "unknown".into(), + } + } + + fn reason_str(&self) -> Cow<'static, str> { + match self.outcome { + CheckOutcome::Success(_) => "success".into(), + CheckOutcome::Failure(reason) => reason.as_str(), + CheckOutcome::Unknown => match self.result { + Ok(()) => "unknown".into(), // this shouldn't happen, but there's no way to prevent it from happening, + Err(e) => e.as_str(), + }, + } + } +} + +#[derive( + Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, serde::Serialize, +)] +enum Failure { + /// The sled-agent for the sled on which the instance is running was + /// unreachable. + /// + /// This may indicate a network partition between us and that sled, that + /// the sled-agent process has crashed, or that the sled is down. + SledAgentUnreachable, + /// The sled-agent responded with an unexpected HTTP error. + SledAgentResponse(u16), + /// The sled-agent indicated that it doesn't know about an instance ID that + /// we believe it *should* know about. This probably means the sled-agent, + /// and potentially the whole sled, has been restarted. + NoSuchInstance, +} + +impl Failure { + fn as_str(&self) -> Cow<'static, str> { + match self { + Self::SledAgentUnreachable => "unreachable".into(), + Self::SledAgentResponse(status) => status.to_string().into(), + Self::NoSuchInstance => "no_such_instance".into(), + } + } +} + +#[derive( + Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, serde::Serialize, +)] +enum Incomplete { + /// The sled-agent responded with an HTTP client error, indicating that our + /// request as somehow malformed. + ClientHttpError(u16), + /// Something else went wrong while making an HTTP request. + ClientError, + /// We attempted to update the instance state in the database, but no + /// instance with that UUID existed. + /// + /// Because the instance UUIDs that we perform checks on come from querying + /// the instances table, this would probably indicate that the instance was + /// removed from the database between when we listed instances and when the + /// check completed. + InstanceNotFound, + /// Something went wrong while updating the state of the instance in the + /// database. + UpdateFailed, +} + +impl Incomplete { + fn as_str(&self) -> Cow<'static, str> { + match self { + Self::ClientHttpError(status) => status.to_string().into(), + Self::ClientError => "client_error".into(), + Self::InstanceNotFound => "instance_not_found".into(), + Self::UpdateFailed => "update_failed".into(), + } + } +} + +type ClientError = sled_agent_client::Error; + +impl BackgroundTask for InstanceWatcher { + fn activate<'a>( + &'a mut self, + opctx: &'a OpContext, + ) -> BoxFuture<'a, serde_json::Value> { + async { + let mut tasks = tokio::task::JoinSet::new(); + let mut paginator = Paginator::new(MAX_SLED_AGENTS); + let mk_client = |sled: &Sled| { + nexus_networking::sled_client_from_address( + sled.id(), + sled.address(), + &opctx.log, + ) + }; + + while let Some(p) = paginator.next() { + let maybe_batch = self + .datastore + .instance_and_vmm_list_by_sled_agent( + opctx, + &p.current_pagparams(), + ) + .await; + let batch = match maybe_batch { + Ok(batch) => batch, + Err(e) => { + slog::error!( + opctx.log, + "sled instances by sled agent query failed: {e}" + ); + return serde_json::json!({ "error": e.to_string() }); + } + }; + paginator = p.found_batch(&batch, &|(sled, _, _, _)| sled.id()); + + // When we iterate over the batch of sled instances, we pop the + // first sled from the batch before looping over the rest, to + // insure that the initial sled-agent client is created first, + // as we need the address of the first sled to construct it. + // We could, alternatively, make the sled-agent client an + // `Option`, but then every subsequent iteration would have to + // handle the case where it's `None`, and I thought this was a + // bit neater... + let mut batch = batch.into_iter(); + if let Some((mut curr_sled, instance, vmm, project)) = batch.next() { + let mut client = mk_client(&curr_sled); + let target = VirtualMachine::new(self.id, &curr_sled, &instance, &vmm, &project); + tasks.spawn(self.check_instance(opctx, &client, target)); + + for (sled, instance, vmm, project) in batch { + // We're now talking to a new sled agent; update the client. + if sled.id() != curr_sled.id() { + client = mk_client(&sled); + curr_sled = sled; + } + + let target = VirtualMachine::new(self.id, &curr_sled, &instance, &vmm, &project); + tasks.spawn(self.check_instance(opctx, &client, target)); + } + } + } + + // Now, wait for the check results to come back. + let mut total: usize = 0; + let mut instance_states: BTreeMap = + BTreeMap::new(); + let mut check_failures: BTreeMap = + BTreeMap::new(); + let mut check_errors: BTreeMap = BTreeMap::new(); + while let Some(result) = tasks.join_next().await { + total += 1; + let check = result.expect( + "a `JoinError` is returned if a spawned task \ + panics, or if the task is aborted. we never abort \ + tasks on this `JoinSet`, and nexus is compiled with \ + `panic=\"abort\"`, so neither of these cases should \ + ever occur", + ); + match check.outcome { + CheckOutcome::Success(state) => { + *instance_states + .entry(state.to_string()) + .or_default() += 1; + } + CheckOutcome::Failure(reason) => { + *check_failures.entry(reason.as_str().into_owned()).or_default() += 1; + } + CheckOutcome::Unknown => {} + } + if let Err(ref reason) = check.result { + *check_errors.entry(reason.as_str().into_owned()).or_default() += 1; + } + self.metrics.lock().unwrap().record_check(check); + } + + // All requests completed! Prune any old instance metrics for + // instances that we didn't check --- if we didn't spawn a check for + // something, that means it wasn't present in the most recent + // database query. + let pruned = self.metrics.lock().unwrap().prune(); + + slog::info!(opctx.log, "all instance checks complete"; + "total_instances" => total, + "total_completed" => instance_states.len() + check_failures.len(), + "total_failed" => check_failures.len(), + "total_incomplete" => check_errors.len(), + "pruned_instances" => pruned, + ); + serde_json::json!({ + "total_instances": total, + "instance_states": instance_states, + "failed_checks": check_failures, + "incomplete_checks": check_errors, + "pruned_instances": pruned, + }) + } + .boxed() + } +} + +mod metrics { + use super::{CheckOutcome, Incomplete, VirtualMachine}; + use oximeter::types::Cumulative; + use oximeter::Metric; + use oximeter::MetricsError; + use oximeter::Sample; + use std::borrow::Cow; + use std::collections::BTreeMap; + use std::sync::Arc; + use std::sync::Mutex; + + #[derive(Debug, Default)] + pub(super) struct Metrics { + instances: BTreeMap, + } + + #[derive(Debug)] + pub(super) struct Producer(pub(super) Arc>); + + #[derive(Debug, Default)] + struct Instance { + checks: BTreeMap, + check_errors: BTreeMap, + touched: bool, + } + + impl Metrics { + pub(crate) fn record_check(&mut self, check: super::Check) { + let instance = self.instances.entry(check.target).or_default(); + instance + .checks + .entry(check.outcome) + .or_insert_with(|| Check { + state: check.state_str(), + reason: check.reason_str(), + datum: Cumulative::default(), + }) + .datum += 1; + if let Err(error) = check.result { + instance + .check_errors + .entry(error) + .or_insert_with(|| IncompleteCheck { + reason: error.as_str(), + datum: Cumulative::default(), + }) + .datum += 1; + } + instance.touched = true; + } + + pub(super) fn prune(&mut self) -> usize { + let len = self.instances.len(); + self.instances.retain(|_, instance| { + std::mem::replace(&mut instance.touched, false) + }); + len - self.instances.len() + } + + fn len(&self) -> usize { + self.instances.values().map(Instance::len).sum() + } + } + + impl oximeter::Producer for Producer { + fn produce( + &mut self, + ) -> Result>, MetricsError> { + let metrics = self.0.lock().unwrap(); + let mut v = Vec::with_capacity(metrics.len()); + for (target, instance) in &metrics.instances { + instance.sample_into(target, &mut v)?; + } + Ok(Box::new(v.into_iter())) + } + } + + impl Instance { + fn len(&self) -> usize { + self.checks.len() + self.check_errors.len() + } + + fn sample_into( + &self, + target: &VirtualMachine, + dest: &mut Vec, + ) -> Result<(), MetricsError> { + for metric in self.checks.values() { + dest.push(Sample::new(target, metric)?); + } + for metric in self.check_errors.values() { + dest.push(Sample::new(target, metric)?); + } + Ok(()) + } + } + + /// The number of successful checks for a single instance, VMM, and sled agent. + #[derive(Clone, Debug, Metric)] + struct Check { + /// The string representation of the instance's state as understood by + /// the VMM. If the check failed, this will generally be "failed". + state: Cow<'static, str>, + /// `Why the instance was marked as being in this state. + /// + /// If an instance was marked as "failed" due to a check failure, this + /// will be a string representation of the failure reason. Otherwise, if + /// the check was successful, this will be "success". Note that this may + /// be "success" even if the instance's state is "failed", which + /// indicates that we successfully queried the instance's state from the + /// sled-agent, and the *sled-agent* reported that the instance has + /// failed --- which is distinct from the instance watcher marking an + /// instance as failed due to a failed check. + reason: Cow<'static, str>, + /// The number of checks for this instance and sled agent which recorded + /// this state for this reason. + datum: Cumulative, + } + + /// The number of unsuccessful checks for an instance and sled agent pair. + #[derive(Clone, Debug, Metric)] + struct IncompleteCheck { + /// The reason why the check was unsuccessful. + /// + /// This is generated from the [`Incomplete`] enum's `Display` implementation. + reason: Cow<'static, str>, + /// The number of failed checks for this instance and sled agent. + datum: Cumulative, + } +} diff --git a/nexus/src/app/background/inventory_collection.rs b/nexus/src/app/background/inventory_collection.rs index 0666c136fc..52ee8f6e13 100644 --- a/nexus/src/app/background/inventory_collection.rs +++ b/nexus/src/app/background/inventory_collection.rs @@ -11,17 +11,14 @@ use futures::future::BoxFuture; use futures::FutureExt; use internal_dns::ServiceName; use nexus_db_queries::context::OpContext; -use nexus_db_queries::db::pagination::Paginator; use nexus_db_queries::db::DataStore; use nexus_inventory::InventoryError; -use nexus_types::identity::Asset; +use nexus_types::deployment::SledFilter; use nexus_types::inventory::Collection; +use omicron_uuid_kinds::CollectionUuid; use serde_json::json; -use std::num::NonZeroU32; use std::sync::Arc; - -/// How many rows to request in each paginated database query -const DB_PAGE_SIZE: u32 = 1024; +use tokio::sync::watch; /// Background task that reads inventory for the rack pub struct InventoryCollector { @@ -30,6 +27,7 @@ pub struct InventoryCollector { creator: String, nkeep: u32, disable: bool, + tx: watch::Sender>, } impl InventoryCollector { @@ -40,14 +38,20 @@ impl InventoryCollector { nkeep: u32, disable: bool, ) -> InventoryCollector { + let (tx, _) = watch::channel(None); InventoryCollector { datastore, resolver, creator: creator.to_owned(), nkeep, disable, + tx, } } + + pub fn watcher(&self) -> watch::Receiver> { + self.tx.subscribe() + } } impl BackgroundTask for InventoryCollector { @@ -78,11 +82,13 @@ impl BackgroundTask for InventoryCollector { "collection_id" => collection.id.to_string(), "time_started" => collection.time_started.to_string(), ); - json!({ + let json = json!({ "collection_id": collection.id.to_string(), "time_started": collection.time_started.to_string(), "time_done": collection.time_done.to_string() - }) + }); + self.tx.send_replace(Some(collection.id)); + json } } } @@ -127,8 +133,7 @@ async fn inventory_activate( .collect::>(); // Create an enumerator to find sled agents. - let page_size = NonZeroU32::new(DB_PAGE_SIZE).unwrap(); - let sled_enum = DbSledAgentEnumerator { opctx, datastore, page_size }; + let sled_enum = DbSledAgentEnumerator { opctx, datastore }; // Run a collection. let inventory = nexus_inventory::Collector::new( @@ -156,7 +161,6 @@ async fn inventory_activate( struct DbSledAgentEnumerator<'a> { opctx: &'a OpContext, datastore: &'a DataStore, - page_size: NonZeroU32, } impl<'a> nexus_inventory::SledAgentEnumerator for DbSledAgentEnumerator<'a> { @@ -164,26 +168,17 @@ impl<'a> nexus_inventory::SledAgentEnumerator for DbSledAgentEnumerator<'a> { &self, ) -> BoxFuture<'_, Result, InventoryError>> { async { - let mut all_sleds = Vec::new(); - let mut paginator = Paginator::new(self.page_size); - while let Some(p) = paginator.next() { - let records_batch = self - .datastore - .sled_list(&self.opctx, &p.current_pagparams()) - .await - .context("listing sleds")?; - paginator = p.found_batch( - &records_batch, - &|s: &nexus_db_model::Sled| s.id(), - ); - all_sleds.extend( - records_batch - .into_iter() - .map(|sled| format!("http://{}", sled.address())), - ); - } - - Ok(all_sleds) + Ok(self + .datastore + .sled_list_all_batched( + &self.opctx, + SledFilter::QueryDuringInventory, + ) + .await + .context("listing sleds")? + .into_iter() + .map(|sled| format!("http://{}", sled.address())) + .collect()) } .boxed() } @@ -191,6 +186,7 @@ impl<'a> nexus_inventory::SledAgentEnumerator for DbSledAgentEnumerator<'a> { #[cfg(test)] mod test { + use crate::app::authz; use crate::app::background::common::BackgroundTask; use crate::app::background::inventory_collection::DbSledAgentEnumerator; use crate::app::background::inventory_collection::InventoryCollector; @@ -202,11 +198,13 @@ mod test { use nexus_db_queries::db::datastore::DataStoreInventoryTest; use nexus_inventory::SledAgentEnumerator; use nexus_test_utils_macros::nexus_test; + use nexus_types::identity::Asset; use omicron_common::api::external::ByteCount; - use omicron_test_utils::dev::poll; + use omicron_common::api::external::LookupType; + use omicron_uuid_kinds::CollectionUuid; + use std::collections::BTreeSet; use std::net::Ipv6Addr; use std::net::SocketAddrV6; - use std::num::NonZeroU32; use uuid::Uuid; type ControlPlaneTestContext = @@ -216,46 +214,22 @@ mod test { // collections, too. #[nexus_test(server = crate::Server)] async fn test_basic(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), datastore.clone(), ); - // Nexus starts the very background task that we're also testing - // manually here. As a result, we should find a collection in the - // database before too long. Wait for it so that after it appears, we - // can assume the rest of the collections came from the instance that - // we're testing. - let mut last_collections = - poll::wait_for_condition::<_, anyhow::Error, _, _>( - || async { - let collections = datastore - .inventory_collections() - .await - .map_err(poll::CondCheckError::Failed)?; - if collections.is_empty() { - Err(poll::CondCheckError::NotYet) - } else { - Ok(collections) - } - }, - &std::time::Duration::from_millis(50), - &std::time::Duration::from_secs(15), - ) - .await - .expect("background task did not populate initial collection"); - let resolver = internal_dns::resolver::Resolver::new_from_addrs( cptestctx.logctx.log.clone(), &[cptestctx.internal_dns.dns_server.local_address()], ) .unwrap(); - // Now we'll create our own copy of the background task and activate it - // a bunch and make sure that it always creates a new collection and - // does not allow a backlog to accumulate. + // Create our own copy of the background task and activate it a bunch + // and make sure that it always creates a new collection and does not + // allow a backlog to accumulate. let nkeep = 3; let mut task = InventoryCollector::new( datastore.clone(), @@ -265,25 +239,60 @@ mod test { false, ); let nkeep = usize::try_from(nkeep).unwrap(); - for i in 0..10 { + let mut all_our_collection_ids = Vec::new(); + for i in 0..20 { let _ = task.activate(&opctx).await; let collections = datastore.inventory_collections().await.unwrap(); + + // Nexus is creating inventory collections concurrently with us, + // so our expectations here have to be flexible to account for the + // fact that there might be collections other than the ones we've + // activated interspersed with the ones we care about. + let num_collections = collections.len(); + + // We should have at least one collection (the one we just + // activated). + assert!(num_collections > 0); + + // Regardless of the activation source, we should have at + // most `nkeep + 1` collections. + assert!(num_collections <= nkeep + 1); + + // Filter down to just the collections we activated. (This could be + // empty if Nexus shoved several collections in!) + let our_collections = collections + .into_iter() + .filter(|c| c.collector == "me") + .map(|c| CollectionUuid::from(c.id)) + .collect::>(); + + // If we have no collections, we have nothing else to check; Nexus + // has pushed us out. + if our_collections.is_empty() { + println!( + "iter {i}: no test collections \ + ({num_collections} Nexus collections)", + ); + continue; + } + + // The most recent collection should be new. + let new_collection_id = our_collections.last().unwrap(); + assert!(!all_our_collection_ids.contains(new_collection_id)); + all_our_collection_ids.push(*new_collection_id); + + // Push this onto the collections we've seen, then assert that the + // tail of all IDs we've seen matches the ones we saw in this + // iteration (i.e., we're pushing out old collections in order). println!( - "iter {}: last = {:?}, current = {:?}", - i, last_collections, collections + "iter {i}: saw {our_collections:?}; \ + should match tail of {all_our_collection_ids:?}" + ); + assert_eq!( + all_our_collection_ids + [all_our_collection_ids.len() - our_collections.len()..], + our_collections ); - - let expected_from_last: Vec<_> = if last_collections.len() <= nkeep - { - last_collections - } else { - last_collections.into_iter().skip(1).collect() - }; - let expected_from_current: Vec<_> = - collections.iter().rev().skip(1).rev().cloned().collect(); - assert_eq!(expected_from_last, expected_from_current); - assert_eq!(collections.len(), std::cmp::min(i + 2, nkeep + 1)); - last_collections = collections; } // Create a disabled task and make sure that does nothing. @@ -294,29 +303,43 @@ mod test { 3, true, ); - let previous = datastore.inventory_collections().await.unwrap(); let _ = task.activate(&opctx).await; - let latest = datastore.inventory_collections().await.unwrap(); - assert_eq!(previous, latest); + + // It's possible that Nexus is concurrently running with us still, so + // we'll activate this task and ensure that: + // + // (a) at least one of the collections is from `"me"` above, and + // (b) there is no collection from `"disabled"` + // + // This is technically still racy if Nexus manages to collect `nkeep + + // 1` collections in between the loop above and this check, but we don't + // expect that to be the case. + let latest_collectors = datastore + .inventory_collections() + .await + .unwrap() + .into_iter() + .map(|c| c.collector) + .collect::>(); + println!("latest_collectors: {latest_collectors:?}"); + assert!(latest_collectors.contains("me")); + assert!(!latest_collectors.contains("disabled")); } #[nexus_test(server = crate::Server)] async fn test_db_sled_enumerator(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests( cptestctx.logctx.log.clone(), datastore.clone(), ); - let db_enum = DbSledAgentEnumerator { - opctx: &opctx, - datastore: &datastore, - page_size: NonZeroU32::new(3).unwrap(), - }; + let db_enum = + DbSledAgentEnumerator { opctx: &opctx, datastore: &datastore }; // There will be two sled agents set up as part of the test context. - let found_urls = db_enum.list_sled_agents().await.unwrap(); - assert_eq!(found_urls.len(), 2); + let initial_found_urls = db_enum.list_sled_agents().await.unwrap(); + assert_eq!(initial_found_urls.len(), 2); // Insert some sleds. let rack_id = Uuid::new_v4(); @@ -340,13 +363,14 @@ mod test { rack_id, Generation::new(), ); - sleds.push(datastore.sled_upsert(sled).await.unwrap()); + let (sled, _) = datastore.sled_upsert(sled).await.unwrap(); + sleds.push(sled); } // The same enumerator should immediately find all the new sleds. - let mut expected_urls: Vec<_> = found_urls + let mut expected_urls: Vec<_> = initial_found_urls .into_iter() - .chain(sleds.into_iter().map(|s| format!("http://{}", s.address()))) + .chain(sleds.iter().map(|s| format!("http://{}", s.address()))) .collect(); expected_urls.sort(); println!("expected_urls: {:?}", expected_urls); @@ -355,25 +379,30 @@ mod test { found_urls.sort(); assert_eq!(expected_urls, found_urls); - // We should get the same result even with a page size of 1. - let db_enum = DbSledAgentEnumerator { - opctx: &opctx, - datastore: &datastore, - page_size: NonZeroU32::new(1).unwrap(), - }; - let mut found_urls = db_enum.list_sled_agents().await.unwrap(); - found_urls.sort(); - assert_eq!(expected_urls, found_urls); - - // We should get the same result even with a page size much larger than - // we need. - let db_enum = DbSledAgentEnumerator { - opctx: &opctx, - datastore: &datastore, - page_size: NonZeroU32::new(1024).unwrap(), - }; + // Now mark one expunged. We should not find that sled any more. + let expunged_sled = &sleds[0]; + let expunged_sled_id = expunged_sled.id(); + let authz_sled = authz::Sled::new( + authz::FLEET, + expunged_sled_id, + LookupType::ById(expunged_sled_id), + ); + datastore + .sled_set_policy_to_expunged(&opctx, &authz_sled) + .await + .expect("failed to mark sled expunged"); + let expunged_sled_url = format!("http://{}", expunged_sled.address()); + let (remaining_urls, removed_urls): (Vec<_>, Vec<_>) = expected_urls + .into_iter() + .partition(|sled_url| *sled_url != expunged_sled_url); + assert_eq!( + removed_urls.len(), + 1, + "expected to find exactly one sled URL matching our \ + expunged sled's URL" + ); let mut found_urls = db_enum.list_sled_agents().await.unwrap(); found_urls.sort(); - assert_eq!(expected_urls, found_urls); + assert_eq!(remaining_urls, found_urls); } } diff --git a/nexus/src/app/background/metrics_producer_gc.rs b/nexus/src/app/background/metrics_producer_gc.rs new file mode 100644 index 0000000000..2a8464b80f --- /dev/null +++ b/nexus/src/app/background/metrics_producer_gc.rs @@ -0,0 +1,243 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Background task for garbage collecting metrics producers that have not +//! renewed their lease + +use super::common::BackgroundTask; +use chrono::TimeDelta; +use chrono::Utc; +use futures::future::BoxFuture; +use futures::FutureExt; +use nexus_db_queries::context::OpContext; +use nexus_db_queries::db::DataStore; +use serde_json::json; +use slog_error_chain::InlineErrorChain; +use std::sync::Arc; +use std::time::Duration; + +/// Background task that prunes metrics producers that have failed to renew +/// their lease. +pub struct MetricProducerGc { + datastore: Arc, + lease_duration: Duration, +} + +impl MetricProducerGc { + pub fn new(datastore: Arc, lease_duration: Duration) -> Self { + Self { datastore, lease_duration } + } + + async fn activate(&mut self, opctx: &OpContext) -> serde_json::Value { + let Some(expiration) = TimeDelta::from_std(self.lease_duration) + .ok() + .and_then(|delta| Utc::now().checked_sub_signed(delta)) + else { + error!( + opctx.log, + "Metric producer GC: out of bounds lease_duration"; + "lease_duration" => ?self.lease_duration, + ); + return json!({ + "error": "out of bounds lease duration", + "lease_duration": self.lease_duration, + }); + }; + + info!( + opctx.log, "Metric producer GC running"; + "expiration" => %expiration, + ); + let pruned = match nexus_metrics_producer_gc::prune_expired_producers( + opctx, + &self.datastore, + expiration, + ) + .await + { + Ok(pruned) => pruned, + Err(err) => { + warn!(opctx.log, "Metric producer GC failed"; &err); + return json!({ + "error": InlineErrorChain::new(&err).to_string(), + }); + } + }; + + if pruned.failures.is_empty() { + info!( + opctx.log, "Metric producer GC complete (no errors)"; + "expiration" => %expiration, + "pruned" => ?pruned.successes, + ); + json!({ + "expiration": expiration, + "pruned": pruned.successes, + }) + } else { + warn!( + opctx.log, + "Metric producer GC complete ({} errors)", + pruned.failures.len(); + "expiration" => %expiration, + "pruned" => ?pruned.successes, + "failures" => ?pruned.failures, + ); + json!({ + "expiration": expiration, + "pruned": pruned.successes, + "errors": pruned.failures, + }) + } + } +} + +impl BackgroundTask for MetricProducerGc { + fn activate<'a>( + &'a mut self, + opctx: &'a OpContext, + ) -> BoxFuture<'a, serde_json::Value> { + self.activate(opctx).boxed() + } +} + +#[cfg(test)] +mod tests { + use crate::app::oximeter::PRODUCER_LEASE_DURATION; + + use super::*; + use async_bb8_diesel::AsyncRunQueryDsl; + use chrono::DateTime; + use chrono::Utc; + use diesel::ExpressionMethods; + use httptest::matchers::request; + use httptest::responders::status_code; + use httptest::Expectation; + use nexus_db_model::OximeterInfo; + use nexus_db_queries::context::OpContext; + use nexus_db_queries::db::model::ProducerEndpoint; + use nexus_test_utils_macros::nexus_test; + use nexus_types::identity::Asset; + use nexus_types::internal_api::params; + use omicron_common::api::internal::nexus; + use omicron_common::api::internal::nexus::ProducerRegistrationResponse; + use serde_json::json; + use uuid::Uuid; + + type ControlPlaneTestContext = + nexus_test_utils::ControlPlaneTestContext; + + async fn set_time_modified( + datastore: &DataStore, + producer_id: Uuid, + time_modified: DateTime, + ) { + use nexus_db_queries::db::schema::metric_producer::dsl; + + let conn = datastore.pool_connection_for_tests().await.unwrap(); + if let Err(err) = diesel::update(dsl::metric_producer) + .filter(dsl::id.eq(producer_id)) + .set(dsl::time_modified.eq(time_modified)) + .execute_async(&*conn) + .await + { + panic!( + "failed to update time_modified for producer {producer_id}: \ + {err}" + ); + } + } + + #[nexus_test(server = crate::Server)] + async fn test_pruning(cptestctx: &ControlPlaneTestContext) { + let nexus = &cptestctx.server.server_context().nexus; + let datastore = nexus.datastore(); + let opctx = OpContext::for_tests( + cptestctx.logctx.log.clone(), + datastore.clone(), + ); + + let mut collector = httptest::Server::run(); + + // Insert an Oximeter collector + let collector_info = OximeterInfo::new(¶ms::OximeterInfo { + collector_id: Uuid::new_v4(), + address: collector.addr(), + }); + datastore + .oximeter_create(&opctx, &collector_info) + .await + .expect("failed to insert collector"); + + // There are several producers which automatically register themselves + // during tests, from Nexus and the simulated sled-agent for example. We + // don't particularly care about these registrations, so ignore any such + // requests to our simulated collector server. + let body = serde_json::to_string(&ProducerRegistrationResponse { + lease_duration: PRODUCER_LEASE_DURATION, + }) + .unwrap(); + collector.expect( + Expectation::matching(request::method_path("POST", "/producers")) + .times(0..) + .respond_with(status_code(201).body(body)), + ); + + // Insert a producer. + let producer = ProducerEndpoint::new( + &nexus::ProducerEndpoint { + id: Uuid::new_v4(), + kind: nexus::ProducerKind::Service, + address: "[::1]:0".parse().unwrap(), // unused + interval: Duration::from_secs(0), // unused + }, + collector_info.id, + ); + datastore + .producer_endpoint_create(&opctx, &producer) + .await + .expect("failed to insert producer"); + + // Create the task and activate it. Technically this is racy in that it + // could prune the producer we just added, but if it's been an hour + // since then, we have bigger problems. This should _not_ prune the + // producer, since it's been active within the last hour. + let mut gc = + MetricProducerGc::new(datastore.clone(), Duration::from_secs(3600)); + let value = gc.activate(&opctx).await; + let value = value.as_object().expect("non-object"); + assert!(!value.contains_key("failures")); + assert!(value.contains_key("expiration")); + assert_eq!(*value.get("pruned").expect("missing `pruned`"), json!([])); + + // Move our producer backwards in time: pretend it registered two hours + // ago, which should result in it being pruned. + set_time_modified( + &datastore, + producer.id(), + Utc::now() - chrono::TimeDelta::hours(2), + ) + .await; + + // Pruning should also notify the collector. + collector.expect( + Expectation::matching(request::method_path( + "DELETE", + format!("/producers/{}", producer.id()), + )) + .respond_with(status_code(204)), + ); + + let value = gc.activate(&opctx).await; + let value = value.as_object().expect("non-object"); + assert!(!value.contains_key("failures")); + assert!(value.contains_key("expiration")); + assert_eq!( + *value.get("pruned").expect("missing `pruned`"), + json!([producer.id()]) + ); + + collector.verify_and_clear(); + } +} diff --git a/nexus/src/app/background/mod.rs b/nexus/src/app/background/mod.rs index 9867f1dc6d..6de9e6f4d3 100644 --- a/nexus/src/app/background/mod.rs +++ b/nexus/src/app/background/mod.rs @@ -4,6 +4,7 @@ //! Background tasks +mod abandoned_vmm_reaper; mod bfd; mod blueprint_execution; mod blueprint_load; @@ -13,13 +14,18 @@ mod dns_propagation; mod dns_servers; mod external_endpoints; mod init; +mod instance_watcher; mod inventory_collection; +mod metrics_producer_gc; mod nat_cleanup; mod networking; mod phantom_disks; +mod physical_disk_adoption; mod region_replacement; +mod service_firewall_rules; mod status; mod sync_service_zone_nat; mod sync_switch_configuration; +mod v2p_mappings; pub use init::BackgroundTasks; diff --git a/nexus/src/app/background/physical_disk_adoption.rs b/nexus/src/app/background/physical_disk_adoption.rs new file mode 100644 index 0000000000..05c53963de --- /dev/null +++ b/nexus/src/app/background/physical_disk_adoption.rs @@ -0,0 +1,181 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Background task for automatically adopting physical disks. +//! +//! Removable disks may be arbitrarily attached and detached from +//! Oxide racks. When this happens, if they had not previously +//! been part of a cluster, they need to be explicitly added +//! to become usable. +//! +//! In the future, this may become more explicitly operator-controlled. + +use super::common::BackgroundTask; +use futures::future::BoxFuture; +use futures::FutureExt; +use nexus_db_model::PhysicalDisk; +use nexus_db_model::Zpool; +use nexus_db_queries::context::OpContext; +use nexus_db_queries::db::DataStore; +use nexus_types::identity::Asset; +use omicron_common::api::external::DataPageParams; +use omicron_uuid_kinds::CollectionUuid; +use omicron_uuid_kinds::GenericUuid; +use serde_json::json; +use std::sync::Arc; +use tokio::sync::watch; +use uuid::Uuid; + +pub struct PhysicalDiskAdoption { + datastore: Arc, + disable: bool, + rack_id: Uuid, + rx_inventory_collection: watch::Receiver>, +} + +impl PhysicalDiskAdoption { + pub fn new( + datastore: Arc, + rx_inventory_collection: watch::Receiver>, + disable: bool, + rack_id: Uuid, + ) -> Self { + PhysicalDiskAdoption { + datastore, + disable, + rack_id, + rx_inventory_collection, + } + } +} + +impl BackgroundTask for PhysicalDiskAdoption { + fn activate<'a>( + &'a mut self, + opctx: &'a OpContext, + ) -> BoxFuture<'a, serde_json::Value> { + async { + if self.disable { + return json!({ "error": "task disabled" }); + } + + // Only adopt physical disks after rack handoff has completed. + // + // This prevents a race condition where the same physical disks + // are inserted simultaneously at handoff time and inside this + // background task. This is bad because the handoff transaction will + // fail if the same disk already exists. + // + // TODO-multirack: This will only work for clusters smaller than + // a page. + let result = self.datastore.rack_list_initialized( + opctx, + &DataPageParams::max_page() + ).await; + match result { + Ok(racks) => { + if !racks.iter().any(|r| r.identity().id == self.rack_id) { + info!( + &opctx.log, + "Physical Disk Adoption: Rack not yet initialized"; + "rack_id" => %self.rack_id, + ); + let msg = format!("rack not yet initialized: {}", self.rack_id); + return json!({"error": msg}); + } + }, + Err(err) => { + warn!( + &opctx.log, + "Physical Disk Adoption: failed to query for initialized racks"; + "err" => %err, + ); + return json!({ "error": format!("failed to query database: {:#}", err) }); + } + } + + let mut disks_added = 0; + let log = &opctx.log; + warn!(&log, "physical disk adoption task started"); + + let collection_id = *self.rx_inventory_collection.borrow(); + let Some(collection_id) = collection_id else { + warn!( + &opctx.log, + "Physical Disk Adoption: skipped"; + "reason" => "no inventory" + ); + return json!({ "error": "no inventory" }); + }; + + let result = self.datastore.physical_disk_uninitialized_list( + opctx, + collection_id, + ).await; + + let uninitialized = match result { + Ok(uninitialized) => uninitialized, + Err(err) => { + warn!( + &opctx.log, + "Physical Disk Adoption: failed to query for insertable disks"; + "err" => %err, + ); + return json!({ "error": format!("failed to query database: {:#}", err) }); + }, + }; + + for inv_disk in uninitialized { + let disk = PhysicalDisk::new( + Uuid::new_v4(), + inv_disk.vendor, + inv_disk.serial, + inv_disk.model, + inv_disk.variant, + inv_disk.sled_id.into_untyped_uuid(), + ); + + let zpool = Zpool::new( + Uuid::new_v4(), + inv_disk.sled_id.into_untyped_uuid(), + disk.id() + ); + + let result = self.datastore.physical_disk_and_zpool_insert( + opctx, + disk.clone(), + zpool + ).await; + + if let Err(err) = result { + warn!( + &opctx.log, + "Physical Disk Adoption: failed to insert new disk and zpool"; + "err" => %err + ); + let msg = format!( + "failed to insert disk/zpool: {:#}; disk = {:#?}", + err, + disk + ); + return json!({ "error": msg}); + } + + disks_added += 1; + + info!( + &opctx.log, + "Physical Disk Adoption: Successfully added a new disk and zpool"; + "disk" => #?disk + ); + } + + warn!(&log, "physical disk adoption task done"); + json!({ + "physical_disks_added": disks_added, + }) + } + .boxed() + } +} diff --git a/nexus/src/app/background/service_firewall_rules.rs b/nexus/src/app/background/service_firewall_rules.rs new file mode 100644 index 0000000000..1a705d1fae --- /dev/null +++ b/nexus/src/app/background/service_firewall_rules.rs @@ -0,0 +1,73 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Background task for propagating VPC firewall rules for Omicron services. +//! +//! This is intended to propagate only the rules related to Oxide-managed +//! programs, like Nexus or external DNS. These are special -- they are very +//! unlikely to change and also relatively small. This task is not intended to +//! handle general changes to customer-visible VPC firewalls, and is mostly in +//! place to propagate changes in the IP allowlist for user-facing services. + +use super::common::BackgroundTask; +use futures::future::BoxFuture; +use futures::FutureExt; +use nexus_db_queries::context::OpContext; +use nexus_db_queries::db::DataStore; +use std::sync::Arc; + +pub struct ServiceRulePropagator { + datastore: Arc, +} + +impl ServiceRulePropagator { + pub fn new(datastore: Arc) -> Self { + Self { datastore } + } +} + +impl BackgroundTask for ServiceRulePropagator { + fn activate<'a>( + &'a mut self, + opctx: &'a OpContext, + ) -> BoxFuture<'a, serde_json::Value> { + async { + let log = opctx + .log + .new(slog::o!("component" => "service-firewall-rule-progator")); + debug!( + log, + "starting background task for service \ + firewall rule propagation" + ); + let start = std::time::Instant::now(); + let res = nexus_networking::plumb_service_firewall_rules( + &self.datastore, + opctx, + &[], + opctx, + &log, + ) + .await; + if let Err(e) = res { + error!( + log, + "failed to propagate service firewall rules"; + "error" => ?e, + ); + serde_json::json!({"error" : e.to_string()}) + } else { + // No meaningful data to return, the duration is already + // captured by the driver itself. + debug!( + log, + "successfully propagated service firewall rules"; + "elapsed" => ?start.elapsed() + ); + serde_json::json!({}) + } + } + .boxed() + } +} diff --git a/nexus/src/app/background/status.rs b/nexus/src/app/background/status.rs index 120401c439..f4fb9e56e5 100644 --- a/nexus/src/app/background/status.rs +++ b/nexus/src/app/background/status.rs @@ -13,6 +13,7 @@ use omicron_common::api::external::LookupResult; use omicron_common::api::external::LookupType; use omicron_common::api::external::ResourceType; use std::collections::BTreeMap; +use std::collections::BTreeSet; impl Nexus { pub(crate) async fn bgtasks_list( @@ -53,4 +54,38 @@ impl Nexus { let period = driver.task_period(task); Ok(BackgroundTask::new(task.name(), description, period, status)) } + + pub(crate) async fn bgtask_activate( + &self, + opctx: &OpContext, + mut names: BTreeSet, + ) -> Result<(), Error> { + opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; + let driver = &self.background_tasks.driver; + + // Ensure all task names are valid by removing them from the set of + // names as we find them. + let tasks_to_activate: Vec<_> = + driver.tasks().filter(|t| names.remove(t.name())).collect(); + + // If any names weren't recognized, return an error. + if !names.is_empty() { + let mut names_str = "background tasks: ".to_owned(); + for (i, name) in names.iter().enumerate() { + names_str.push_str(name); + if i < names.len() - 1 { + names_str.push_str(", "); + } + } + + return Err(LookupType::ByOther(names_str) + .into_not_found(ResourceType::BackgroundTask)); + } + + for task in tasks_to_activate { + driver.activate(task); + } + + Ok(()) + } } diff --git a/nexus/src/app/background/sync_service_zone_nat.rs b/nexus/src/app/background/sync_service_zone_nat.rs index e23621ed23..b0a4c8cef2 100644 --- a/nexus/src/app/background/sync_service_zone_nat.rs +++ b/nexus/src/app/background/sync_service_zone_nat.rs @@ -19,7 +19,7 @@ use nexus_db_queries::context::OpContext; use nexus_db_queries::db::lookup::LookupPath; use nexus_db_queries::db::DataStore; use omicron_common::address::{MAX_PORT, MIN_PORT}; -use omicron_common::api::external; +use omicron_uuid_kinds::GenericUuid; use serde_json::json; use sled_agent_client::types::OmicronZoneType; use std::net::{IpAddr, SocketAddr}; @@ -107,7 +107,7 @@ impl BackgroundTask for ServiceZoneNatTracker { for (sled_id, zones_found) in collection.omicron_zones { let (_, sled) = match LookupPath::new(opctx, &self.datastore) - .sled_id(sled_id) + .sled_id(sled_id.into_untyped_uuid()) .fetch() .await .context("failed to look up sled") @@ -124,9 +124,7 @@ impl BackgroundTask for ServiceZoneNatTracker { } }; - let sled_address = external::Ipv6Net( - ipnetwork::Ipv6Network::new(*sled.ip, 128).unwrap(), - ); + let sled_address = oxnet::Ipv6Net::host_net(*sled.ip); let zones_config: sled_agent_client::types::OmicronZonesConfig = zones_found.zones; @@ -151,17 +149,17 @@ impl BackgroundTask for ServiceZoneNatTracker { }; let external_address = - ipnetwork::Ipv4Network::new(external_ip, 32) + oxnet::Ipv4Net::new(external_ip, 32) .unwrap(); + let (snat_first_port, snat_last_port) = + snat_cfg.port_range_raw(); let nat_value = Ipv4NatValues { external_address: nexus_db_model::Ipv4Net( - omicron_common::api::external::Ipv4Net( external_address, - ), ), - first_port: snat_cfg.first_port.into(), - last_port: snat_cfg.last_port.into(), + first_port: snat_first_port.into(), + last_port: snat_last_port.into(), sled_address: sled_address.into(), vni: nexus_db_model::Vni(nic.vni), mac: nexus_db_model::MacAddr(nic.mac), @@ -184,14 +182,12 @@ impl BackgroundTask for ServiceZoneNatTracker { }; let external_address = - ipnetwork::Ipv4Network::new(external_ip, 32) + oxnet::Ipv4Net::new(external_ip, 32) .unwrap(); let nat_value = Ipv4NatValues { external_address: nexus_db_model::Ipv4Net( - omicron_common::api::external::Ipv4Net( external_address, - ), ), first_port: MIN_PORT.into(), last_port: MAX_PORT.into(), @@ -231,14 +227,12 @@ impl BackgroundTask for ServiceZoneNatTracker { }; let external_address = - ipnetwork::Ipv4Network::new(external_ip, 32) + oxnet::Ipv4Net::new(external_ip, 32) .unwrap(); let nat_value = Ipv4NatValues { external_address: nexus_db_model::Ipv4Net( - omicron_common::api::external::Ipv4Net( external_address, - ), ), first_port: MIN_PORT.into(), last_port: MAX_PORT.into(), diff --git a/nexus/src/app/background/sync_switch_configuration.rs b/nexus/src/app/background/sync_switch_configuration.rs index a53e366daa..54fc5b8be0 100644 --- a/nexus/src/app/background/sync_switch_configuration.rs +++ b/nexus/src/app/background/sync_switch_configuration.rs @@ -11,6 +11,7 @@ use crate::app::{ }, map_switch_zone_addrs, }; +use oxnet::Ipv4Net; use slog::o; use internal_dns::resolver::Resolver; @@ -23,12 +24,15 @@ use nexus_db_model::{ use uuid::Uuid; use super::common::BackgroundTask; +use display_error_chain::DisplayErrorChain; use dpd_client::types::PortId; use futures::future::BoxFuture; use futures::FutureExt; use mg_admin_client::types::{ - AddStaticRoute4Request, ApplyRequest, BgpPeerConfig, - DeleteStaticRoute4Request, Prefix4, StaticRoute4, StaticRoute4List, + AddStaticRoute4Request, ApplyRequest, BgpPeerConfig, CheckerSource, + DeleteStaticRoute4Request, ImportExportPolicy as MgImportExportPolicy, + Prefix as MgPrefix, Prefix4, Prefix6, ShaperSource, StaticRoute4, + StaticRoute4List, }; use nexus_db_queries::{ context::OpContext, @@ -40,15 +44,15 @@ use omicron_common::OMICRON_DPD_TAG; use omicron_common::{ address::{get_sled_address, Ipv6Subnet}, api::{ - external::{DataPageParams, SwitchLocation}, + external::{DataPageParams, ImportExportPolicy, SwitchLocation}, internal::shared::ParseSwitchLocationError, }, }; use serde_json::json; use sled_agent_client::types::{ BgpConfig as SledBgpConfig, BgpPeerConfig as SledBgpPeerConfig, - EarlyNetworkConfig, EarlyNetworkConfigBody, HostPortConfig, Ipv4Network, - PortConfigV1, RackNetworkConfigV1, RouteConfig as SledRouteConfig, + EarlyNetworkConfig, EarlyNetworkConfigBody, HostPortConfig, PortConfigV1, + RackNetworkConfigV1, RouteConfig as SledRouteConfig, }; use std::{ collections::{hash_map::Entry, HashMap, HashSet}, @@ -282,13 +286,14 @@ impl BackgroundTask for SwitchPortSettingsManager { let racks = match self.datastore.rack_list_initialized(opctx, &DataPageParams::max_page()).await { Ok(racks) => racks, Err(e) => { - error!(log, "failed to retrieve racks from database"; "error" => ?e); + error!(log, "failed to retrieve racks from database"; + "error" => %DisplayErrorChain::new(&e) + ); return json!({ "error": format!( - "failed to retrieve racks from database : \ - {:#}", - e + "failed to retrieve racks from database : {}", + DisplayErrorChain::new(&e) ) }); }, @@ -311,7 +316,8 @@ impl BackgroundTask for SwitchPortSettingsManager { { Ok(addrs) => addrs, Err(e) => { - error!(log, "failed to resolve addresses for Dendrite services"; "error" => %e); + error!(log, "failed to resolve addresses for Dendrite services"; + "error" => %DisplayErrorChain::new(&e)); continue; }, }; @@ -428,7 +434,7 @@ impl BackgroundTask for SwitchPortSettingsManager { error!( log, "error fetching loopback addresses from db, skipping loopback config"; - "error" => %e + "error" => %DisplayErrorChain::new(&e) ); }, }; @@ -465,7 +471,7 @@ impl BackgroundTask for SwitchPortSettingsManager { log, "error while applying smf updates to switch zone"; "location" => %location, - "error" => %e, + "error" => %DisplayErrorChain::new(&e) ); } } @@ -530,7 +536,7 @@ impl BackgroundTask for SwitchPortSettingsManager { "error while fetching bgp peer config from db"; "location" => %location, "port_name" => %port.port_name, - "error" => %e, + "error" => %DisplayErrorChain::new(&e) ); continue; }, @@ -546,7 +552,8 @@ impl BackgroundTask for SwitchPortSettingsManager { // Same thing as above, check to see if we've already built the announce set, // if so we'll skip this step - if bgp_announce_prefixes.get(&bgp_config.bgp_announce_set_id).is_none() { + #[allow(clippy::map_entry)] + if !bgp_announce_prefixes.contains_key(&bgp_config.bgp_announce_set_id) { let announcements = match self .datastore .bgp_announce_list( @@ -566,7 +573,7 @@ impl BackgroundTask for SwitchPortSettingsManager { "error while fetching bgp announcements from db"; "location" => %location, "bgp_announce_set_id" => %bgp_config.bgp_announce_set_id, - "error" => %e, + "error" => %DisplayErrorChain::new(&e) ); continue; }, @@ -587,6 +594,133 @@ impl BackgroundTask for SwitchPortSettingsManager { bgp_announce_prefixes.insert(bgp_config.bgp_announce_set_id, prefixes); } + let ttl = peer.min_ttl.map(|x| x.0); + + //TODO consider awaiting in parallel and joining + let communities = match self.datastore.communities_for_peer( + opctx, + peer.port_settings_id, + &peer.interface_name, + peer.addr, + ).await { + Ok(cs) => cs, + Err(e) => { + error!(log, + "failed to get communities for peer"; + "peer" => ?peer, + "error" => %DisplayErrorChain::new(&e) + ); + return json!({ + "error": + format!( + "failed to get port settings for peer {:?}: {}", + peer, + DisplayErrorChain::new(&e) + ) + }); + } + }; + + let allow_import = match self.datastore.allow_import_for_peer( + opctx, + peer.port_settings_id, + &peer.interface_name, + peer.addr, + ).await { + Ok(cs) => cs, + Err(e) => { + error!(log, + "failed to get peer allowed imports"; + "peer" => ?peer, + "error" => %DisplayErrorChain::new(&e) + ); + return json!({ + "error": + format!( + "failed to get allowed imports peer {:?}: {}", + peer, + DisplayErrorChain::new(&e) + ) + }); + } + }; + + let import_policy = match allow_import { + Some(list) => { + MgImportExportPolicy::Allow(list + .into_iter() + .map(|x| + match x.prefix { + IpNetwork::V4(p) => MgPrefix::V4( + Prefix4{ + length: p.prefix(), + value: p.ip(), + } + ), + IpNetwork::V6(p) => MgPrefix::V6( + Prefix6{ + length: p.prefix(), + value: p.ip(), + } + ) + } + ) + .collect() + ) + } + None => MgImportExportPolicy::NoFiltering, + }; + + let allow_export = match self.datastore.allow_export_for_peer( + opctx, + peer.port_settings_id, + &peer.interface_name, + peer.addr, + ).await { + Ok(cs) => cs, + Err(e) => { + error!(log, + "failed to get peer allowed exportss"; + "peer" => ?peer, + "error" => %DisplayErrorChain::new(&e), + ); + return json!({ + "error": + format!( + "failed to get allowed exports peer {:?}: {}", + peer, + DisplayErrorChain::new(&e) + ) + }); + } + }; + + let export_policy = match allow_export { + Some(list) => { + MgImportExportPolicy::Allow(list + .into_iter() + .map(|x| + match x.prefix { + IpNetwork::V4(p) => MgPrefix::V4( + Prefix4{ + length: p.prefix(), + value: p.ip(), + } + ), + IpNetwork::V6(p) => MgPrefix::V6( + Prefix6{ + length: p.prefix(), + value: p.ip(), + } + ) + } + ) + .collect() + ) + } + None => MgImportExportPolicy::NoFiltering, + }; + // now that the peer passes the above validations, add it to the list for configuration let peer_config = BgpPeerConfig { name: format!("{}", peer.addr.ip()), @@ -598,6 +732,16 @@ impl BackgroundTask for SwitchPortSettingsManager { keepalive: peer.keepalive.0.into(), resolution: BGP_SESSION_RESOLUTION, passive: false, + remote_asn: peer.remote_asn.as_ref().map(|x| x.0), + min_ttl: ttl, + md5_auth_key: peer.md5_auth_key.clone(), + multi_exit_discriminator: peer.multi_exit_discriminator.as_ref().map(|x| x.0), + local_pref: peer.local_pref.as_ref().map(|x| x.0), + enforce_first_as: peer.enforce_first_as, + communities: communities.into_iter().map(|c| c.community.0).collect(), + allow_export: export_policy, + allow_import: import_policy, + vlan_id: peer.vlan_id.map(|x| x.0), }; // update the stored vec if it exists, create a new on if it doesn't exist @@ -637,6 +781,14 @@ impl BackgroundTask for SwitchPortSettingsManager { asn: *request_bgp_config.asn, peers, originate: request_prefixes.clone(), + checker: request_bgp_config.checker.as_ref().map(|code| CheckerSource{ + asn: *request_bgp_config.asn, + code: code.clone(), + }), + shaper: request_bgp_config.shaper.as_ref().map(|code| ShaperSource{ + asn: *request_bgp_config.asn, + code: code.clone(), + }), }; match desired_bgp_configs.entry(*location) { @@ -717,7 +869,7 @@ impl BackgroundTask for SwitchPortSettingsManager { // build the desired bootstore config from the records we've fetched let subnet = match rack.rack_subnet { - Some(IpNetwork::V6(subnet)) => subnet, + Some(IpNetwork::V6(subnet)) => subnet.into(), Some(IpNetwork::V4(_)) => { error!(log, "rack subnet must be ipv6"; "rack" => ?rack); continue; @@ -728,21 +880,22 @@ impl BackgroundTask for SwitchPortSettingsManager { } }; - // TODO: @rcgoodfellow is this correct? Do we place the BgpConfig for both switches in a single Vec to send to the bootstore? + // TODO: is this correct? Do we place the BgpConfig for both switches in a single Vec to send to the bootstore? let mut bgp: Vec = switch_bgp_config.iter().map(|(_location, (_id, config))| { - let announcements: Vec = bgp_announce_prefixes + let announcements = bgp_announce_prefixes .get(&config.bgp_announce_set_id) .expect("bgp config is present but announce set is not populated") .iter() .map(|prefix| { - ipnetwork::Ipv4Network::new(prefix.value, prefix.length) - .expect("Prefix4 and Ipv4Network's value types have diverged") - .into() + Ipv4Net::new(prefix.value, prefix.length) + .expect("Prefix4 and Ipv4Net's value types have diverged") }).collect(); SledBgpConfig { asn: config.asn.0, originate: announcements, + checker: config.checker.clone(), + shaper: config.shaper.clone(), } }).collect(); @@ -763,14 +916,14 @@ impl BackgroundTask for SwitchPortSettingsManager { "failed to fetch bgp peer config for switch port"; "switch_location" => ?location, "port" => &port.port_name, - "error" => %e, + "error" => %DisplayErrorChain::new(&e) ); continue; }, }; - let port_config = PortConfigV1 { - addresses: info.addresses.iter().map(|a| a.address).collect(), + let mut port_config = PortConfigV1 { + addresses: info.addresses.iter().map(|a| a.address.into()).collect(), autoneg: info .links .get(0) //TODO breakout support @@ -792,6 +945,16 @@ impl BackgroundTask for SwitchPortSettingsManager { delay_open: Some(c.delay_open.0.into()), connect_retry: Some(c.connect_retry.0.into()), keepalive: Some(c.keepalive.0.into()), + enforce_first_as: c.enforce_first_as, + local_pref: c.local_pref.map(|x| x.into()), + md5_auth_key: c.md5_auth_key, + min_ttl: c.min_ttl.map(|x| x.0 as u8), //TODO avoid cast return error + multi_exit_discriminator: c.multi_exit_discriminator.map(|x| x.into()), + remote_asn: c.remote_asn.map(|x| x.into()), + communities: Vec::new(), + allowed_export: ImportExportPolicy::NoFiltering, + allowed_import: ImportExportPolicy::NoFiltering, + vlan_id: c.vlan_id.map(|x| x.0 as u16), } }).collect(), port: port.port_name.clone(), @@ -799,8 +962,9 @@ impl BackgroundTask for SwitchPortSettingsManager { .routes .iter() .map(|r| SledRouteConfig { - destination: r.dst, + destination: r.dst.into(), nexthop: r.gw.ip(), + vlan_id: r.vid.map(|x| x.0), }) .collect(), switch: *location, @@ -817,6 +981,76 @@ impl BackgroundTask for SwitchPortSettingsManager { .unwrap_or(SwitchLinkSpeed::Speed100G) .into(), }; + + for peer in port_config.bgp_peers.iter_mut() { + peer.communities = match self + .datastore + .communities_for_peer( + opctx, + port.port_settings_id.unwrap(), + &peer.port, + IpNetwork::from(IpAddr::from(peer.addr)) + ).await { + Ok(cs) => cs.iter().map(|c| c.community.0).collect(), + Err(e) => { + error!(log, + "failed to get communities for peer"; + "peer" => ?peer, + "error" => %DisplayErrorChain::new(&e) + ); + continue; + } + }; + + //TODO consider awaiting in parallel and joining + let allow_import = match self.datastore.allow_import_for_peer( + opctx, + port.port_settings_id.unwrap(), + &peer.port, + IpNetwork::from(IpAddr::from(peer.addr)), + ).await { + Ok(cs) => cs, + Err(e) => { + error!(log, + "failed to get peer allowed imports"; + "peer" => ?peer, + "error" => %DisplayErrorChain::new(&e) + ); + continue; + } + }; + + peer.allowed_import = match allow_import { + Some(list) => ImportExportPolicy::Allow( + list.clone().into_iter().map(|x| x.prefix.into()).collect() + ), + None => ImportExportPolicy::NoFiltering, + }; + + let allow_export = match self.datastore.allow_export_for_peer( + opctx, + port.port_settings_id.unwrap(), + &peer.port, + IpNetwork::from(IpAddr::from(peer.addr)), + ).await { + Ok(cs) => cs, + Err(e) => { + error!(log, + "failed to get peer allowed exports"; + "peer" => ?peer, + "error" => %DisplayErrorChain::new(&e) + ); + continue; + } + }; + + peer.allowed_export = match allow_export { + Some(list) => ImportExportPolicy::Allow( + list.clone().into_iter().map(|x| x.prefix.into()).collect() + ), + None => ImportExportPolicy::NoFiltering, + }; + } ports.push(port_config); } @@ -1167,7 +1401,7 @@ fn uplinks( }; let config = HostPortConfig { port: port.port_name.clone(), - addrs: config.addresses.iter().map(|a| a.address).collect(), + addrs: config.addresses.iter().map(|a| a.address.into()).collect(), }; match uplinks.entry(*location) { @@ -1202,15 +1436,11 @@ fn build_sled_agent_clients( sled_agent_clients } +type SwitchStaticRoutes = HashSet<(Ipv4Addr, Prefix4, Option)>; + fn static_routes_to_del( - current_static_routes: HashMap< - SwitchLocation, - HashSet<(Ipv4Addr, Prefix4)>, - >, - desired_static_routes: HashMap< - SwitchLocation, - HashSet<(Ipv4Addr, Prefix4)>, - >, + current_static_routes: HashMap, + desired_static_routes: HashMap, ) -> HashMap { let mut routes_to_del: HashMap = HashMap::new(); @@ -1222,9 +1452,10 @@ fn static_routes_to_del( // if it's on the switch but not desired (in our db), it should be removed let stale_routes = routes_on_switch .difference(routes_wanted) - .map(|(nexthop, prefix)| StaticRoute4 { + .map(|(nexthop, prefix, vlan_id)| StaticRoute4 { nexthop: *nexthop, prefix: *prefix, + vlan_id: *vlan_id, }) .collect::>(); @@ -1238,9 +1469,10 @@ fn static_routes_to_del( // if no desired routes are present, all routes on this switch should be deleted let stale_routes = routes_on_switch .iter() - .map(|(nexthop, prefix)| StaticRoute4 { + .map(|(nexthop, prefix, vlan_id)| StaticRoute4 { nexthop: *nexthop, prefix: *prefix, + vlan_id: *vlan_id, }) .collect::>(); @@ -1262,15 +1494,10 @@ fn static_routes_to_del( routes_to_del } +#[allow(clippy::type_complexity)] fn static_routes_to_add( - desired_static_routes: &HashMap< - SwitchLocation, - HashSet<(Ipv4Addr, Prefix4)>, - >, - current_static_routes: &HashMap< - SwitchLocation, - HashSet<(Ipv4Addr, Prefix4)>, - >, + desired_static_routes: &HashMap, + current_static_routes: &HashMap, log: &slog::Logger, ) -> HashMap { let mut routes_to_add: HashMap = @@ -1292,9 +1519,10 @@ fn static_routes_to_add( }; let missing_routes = routes_wanted .difference(routes_on_switch) - .map(|(nexthop, prefix)| StaticRoute4 { + .map(|(nexthop, prefix, vlan_id)| StaticRoute4 { nexthop: *nexthop, prefix: *prefix, + vlan_id: *vlan_id, }) .collect::>(); @@ -1321,11 +1549,9 @@ fn static_routes_in_db( nexus_db_model::SwitchPort, PortSettingsChange, )], -) -> HashMap> { - let mut routes_from_db: HashMap< - SwitchLocation, - HashSet<(Ipv4Addr, Prefix4)>, - > = HashMap::new(); +) -> HashMap { + let mut routes_from_db: HashMap = + HashMap::new(); for (location, _port, change) in changes { // we only need to check for ports that have a configuration present. No config == no routes. @@ -1345,7 +1571,7 @@ fn static_routes_in_db( } IpAddr::V6(_) => continue, }; - routes.insert((nexthop, prefix)); + routes.insert((nexthop, prefix, route.vid.map(|x| x.0))); } match routes_from_db.entry(*location) { @@ -1519,24 +1745,49 @@ async fn apply_switch_port_changes( async fn static_routes_on_switch<'a>( mgd_clients: &HashMap, log: &slog::Logger, -) -> HashMap> { +) -> HashMap { let mut routes_on_switch = HashMap::new(); for (location, client) in mgd_clients { - let static_routes: HashSet<(Ipv4Addr, Prefix4)> = - match client.static_list_v4_routes().await { - Ok(routes) => { - routes.list.iter().map(|r| (r.nexthop, r.prefix)).collect() - } - Err(_) => { - error!( - &log, - "unable to retrieve routes from switch"; - "switch_location" => ?location, - ); - continue; + let static_routes: SwitchStaticRoutes = match client + .static_list_v4_routes() + .await + { + Ok(routes) => { + let mut flattened = HashSet::new(); + for (destination, paths) in routes.iter() { + let Ok(dst) = destination.parse() else { + error!( + log, + "failed to parse static route destination: {destination}" + ); + continue; + }; + for p in paths.iter() { + let nh = match p.nexthop { + IpAddr::V4(addr) => addr, + IpAddr::V6(addr) => { + error!( + log, + "ipv6 nexthops not supported: {addr}" + ); + continue; + } + }; + flattened.insert((nh, dst, p.vlan_id)); + } } - }; + flattened + } + Err(_) => { + error!( + &log, + "unable to retrieve routes from switch"; + "switch_location" => ?location, + ); + continue; + } + }; routes_on_switch.insert(*location, static_routes); } routes_on_switch diff --git a/nexus/src/app/background/v2p_mappings.rs b/nexus/src/app/background/v2p_mappings.rs new file mode 100644 index 0000000000..a53ac3442f --- /dev/null +++ b/nexus/src/app/background/v2p_mappings.rs @@ -0,0 +1,165 @@ +use std::{collections::HashSet, sync::Arc}; + +use futures::future::BoxFuture; +use futures::FutureExt; +use nexus_db_model::{Sled, SledState}; +use nexus_db_queries::{context::OpContext, db::DataStore}; +use nexus_networking::sled_client_from_address; +use nexus_types::{ + deployment::SledFilter, external_api::views::SledPolicy, identity::Asset, +}; +use omicron_common::api::external::Vni; +use serde_json::json; +use sled_agent_client::types::VirtualNetworkInterfaceHost; + +use super::common::BackgroundTask; + +pub struct V2PManager { + datastore: Arc, +} + +impl V2PManager { + pub fn new(datastore: Arc) -> Self { + Self { datastore } + } +} + +impl BackgroundTask for V2PManager { + fn activate<'a>( + &'a mut self, + opctx: &'a OpContext, + ) -> BoxFuture<'a, serde_json::Value> { + let log = opctx.log.clone(); + + async move { + // Get the v2p mappings + let v2p_mappings = match self.datastore.v2p_mappings(opctx).await { + Ok(v) => v, + Err(e) => { + let msg = format!("failed to list v2p mappings: {:#}", e); + error!(&log, "{msg}"); + return json!({"error": msg}); + } + }; + + // Get sleds + // we only care about sleds that are active && inservice + let sleds = match self.datastore.sled_list_all_batched(opctx, SledFilter::InService).await + { + Ok(v) => v, + Err(e) => { + let msg = format!("failed to enumerate sleds: {:#}", e); + error!(&log, "{msg}"); + return json!({"error": msg}); + } + } + .into_iter() + .filter(|sled| { + matches!(sled.state(), SledState::Active) + && matches!(sled.policy(), SledPolicy::InService { .. }) + }); + + // Map sled db records to sled-agent clients + let sled_clients: Vec<(Sled, sled_agent_client::Client)> = sleds + .map(|sled| { + let client = sled_client_from_address( + sled.id(), + sled.address(), + &log, + ); + (sled, client) + }) + .collect(); + + // create a set of updates from the v2p mappings + let desired_v2p: HashSet<_> = v2p_mappings + .into_iter() + .filter_map(|mapping| { + let physical_host_ip = match mapping.sled_ip.ip() { + std::net::IpAddr::V4(v) => { + // sled ip should never be ipv4 + error!( + &log, + "sled ip should be ipv6 but is ipv4: {v}" + ); + return None; + } + std::net::IpAddr::V6(v) => v, + }; + + let vni = mapping.vni.0; + + let mapping = VirtualNetworkInterfaceHost { + virtual_ip: mapping.ip.ip(), + virtual_mac: *mapping.mac, + physical_host_ip, + vni, + }; + Some(mapping) + }) + .collect(); + + for (sled, client) in sled_clients { + // + // Get the current mappings on each sled + // Ignore vopte interfaces that are used for services. Service zones only need + // an opte interface for external communication. For services zones, intra-sled + // communication is facilitated via zone underlay interfaces / addresses, + // not opte interfaces / v2p mappings. + // + let found_v2p: HashSet = match client.list_v2p().await { + Ok(v) => v.into_inner(), + Err(e) => { + error!( + &log, + "unable to list opte v2p mappings for sled"; + "sled" => sled.serial_number(), + "error" => ?e + ); + continue; + } + }.into_iter().filter(|vnic| vnic.vni != Vni::SERVICES_VNI).collect(); + + info!(&log, "found opte v2p mappings"; "sled" => sled.serial_number(), "interfaces" => ?found_v2p); + + let v2p_to_add: Vec<_> = desired_v2p.difference(&found_v2p).collect(); + + let v2p_to_del: Vec<_> = found_v2p.difference(&desired_v2p).collect(); + + // + // Generally, we delete stale entries before adding new entries in RPWs to prevent stale entries + // from causing a conflict with an incoming entry. In the case of opte it doesn't matter which + // order we perform the next two steps in, since conflicting stale entries are overwritten by the + // incoming entries. + // + info!(&log, "v2p mappings to delete"; "sled" => sled.serial_number(), "mappings" => ?v2p_to_del); + for mapping in v2p_to_del { + if let Err(e) = client.del_v2p(&mapping).await { + error!( + &log, + "failed to delete v2p mapping from sled"; + "sled" => sled.serial_number(), + "mapping" => ?mapping, + "error" => ?e, + ); + } + } + + info!(&log, "v2p mappings to add"; "sled" => sled.serial_number(), "mappings" => ?v2p_to_add); + for mapping in v2p_to_add { + if let Err(e) = client.set_v2p(mapping).await { + error!( + &log, + "failed to add v2p mapping to sled"; + "sled" => sled.serial_number(), + "mapping" => ?mapping, + "error" => ?e, + ); + } + } + } + json!({}) + } + .boxed() + } +} diff --git a/nexus/src/app/bgp.rs b/nexus/src/app/bgp.rs index 4a85306d56..b6e3f25263 100644 --- a/nexus/src/app/bgp.rs +++ b/nexus/src/app/bgp.rs @@ -10,9 +10,9 @@ use nexus_db_queries::context::OpContext; use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::external::{ self, BgpImportedRouteIpv4, BgpMessageHistory, BgpPeerStatus, CreateResult, - DeleteResult, Ipv4Net, ListResultVec, LookupResult, NameOrId, - SwitchBgpHistory, + DeleteResult, ListResultVec, LookupResult, NameOrId, SwitchBgpHistory, }; +use std::net::IpAddr; impl super::Nexus { pub async fn bgp_config_set( @@ -95,7 +95,7 @@ impl super::Nexus { "failed to get mg clients: {e}" )) })? { - let router_info = match client.get_routers().await { + let router_info = match client.read_routers().await { Ok(result) => result.into_inner(), Err(e) => { error!( @@ -107,13 +107,29 @@ impl super::Nexus { }; for r in &router_info { - for (addr, info) in &r.peers { - let Ok(addr) = addr.parse() else { + let asn = r.asn; + + let peers = match client.get_neighbors(asn).await { + Ok(result) => result.into_inner(), + Err(e) => { + error!( + self.log, + "failed to get peers for asn {asn} from {switch}: {e}" + ); + continue; + } + }; + for (host, info) in peers { + let Ok(host) = host.parse() else { + error!( + self.log, + "failed to parse peer host address {host}", + ); continue; }; result.push(BgpPeerStatus { switch: *switch, - addr, + addr: host, local_asn: r.asn, remote_asn: info.asn.unwrap_or(0), state: info.state.into(), @@ -176,28 +192,44 @@ impl super::Nexus { "failed to get mg clients: {e}" )) })? { - let imported: Vec = match client - .get_imported4(&mg_admin_client::types::GetImported4Request { + let mut imported: Vec = Vec::new(); + match client + .get_imported(&mg_admin_client::types::AsnSelector { asn: sel.asn, }) .await { - Ok(result) => result - .into_inner() - .into_iter() - .map(|x| BgpImportedRouteIpv4 { - switch: *switch, - prefix: Ipv4Net( - ipnetwork::Ipv4Network::new( - x.prefix.value, - x.prefix.length, - ) - .unwrap(), - ), - nexthop: x.nexthop, - id: x.id, - }) - .collect(), + Ok(result) => { + for (prefix, paths) in result.into_inner().iter() { + let ipnet = match prefix.parse() { + Ok(p) => p, + Err(e) => { + error!( + self.log, + "failed to parse prefix {prefix}: {e}" + ); + continue; + } + }; + for p in paths.iter() { + let nexthop = match p.nexthop { + IpAddr::V4(addr) => addr, + IpAddr::V6(_) => continue, + }; + let x = BgpImportedRouteIpv4 { + switch: *switch, + prefix: ipnet, + id: p + .bgp + .as_ref() + .map(|bgp| bgp.id) + .unwrap_or(0), + nexthop, + }; + imported.push(x); + } + } + } Err(e) => { error!( self.log, diff --git a/nexus/src/app/deployment.rs b/nexus/src/app/deployment.rs index 48ed844f12..280f4306c7 100644 --- a/nexus/src/app/deployment.rs +++ b/nexus/src/app/deployment.rs @@ -7,21 +7,21 @@ use nexus_db_model::DnsGroup; use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; -use nexus_reconfigurator_planning::blueprint_builder::BlueprintBuilder; use nexus_reconfigurator_planning::planner::Planner; -use nexus_reconfigurator_preparation::policy_from_db; +use nexus_reconfigurator_preparation::PlanningInputFromDb; use nexus_types::deployment::Blueprint; use nexus_types::deployment::BlueprintMetadata; use nexus_types::deployment::BlueprintTarget; use nexus_types::deployment::BlueprintTargetSet; -use nexus_types::deployment::Policy; +use nexus_types::deployment::CockroachDbClusterVersion; +use nexus_types::deployment::PlanningInput; +use nexus_types::deployment::SledFilter; use nexus_types::inventory::Collection; use omicron_common::address::NEXUS_REDUNDANCY; use omicron_common::api::external::CreateResult; use omicron_common::api::external::DataPageParams; use omicron_common::api::external::DeleteResult; use omicron_common::api::external::Error; -use omicron_common::api::external::Generation; use omicron_common::api::external::InternalContext; use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; @@ -31,11 +31,9 @@ use uuid::Uuid; /// Common structure for collecting information that the planner needs struct PlanningContext { - policy: Policy, + planning_input: PlanningInput, creator: String, inventory: Option, - internal_dns_version: Generation, - external_dns_version: Generation, } impl super::Nexus { @@ -76,7 +74,7 @@ impl super::Nexus { pub async fn blueprint_target_view( &self, opctx: &OpContext, - ) -> Result, Error> { + ) -> Result { self.db_datastore.blueprint_target_get_current(opctx).await } @@ -133,7 +131,9 @@ impl super::Nexus { let creator = self.id.to_string(); let datastore = self.datastore(); - let sled_rows = datastore.sled_list_all_batched(opctx).await?; + let sled_rows = datastore + .sled_list_all_batched(opctx, SledFilter::Commissioned) + .await?; let zpool_rows = datastore.zpool_list_all_external_batched(opctx).await?; let ip_pool_range_rows = { @@ -143,13 +143,46 @@ impl super::Nexus { .ip_pool_list_ranges_batched(opctx, &authz_service_ip_pool) .await? }; + let external_ip_rows = + datastore.external_ip_list_service_all_batched(opctx).await?; + let service_nic_rows = datastore + .service_network_interfaces_all_list_batched(opctx) + .await?; + + let internal_dns_version = datastore + .dns_group_latest_version(opctx, DnsGroup::Internal) + .await + .internal_context( + "fetching internal DNS version for blueprint planning", + )? + .version; + let external_dns_version = datastore + .dns_group_latest_version(opctx, DnsGroup::External) + .await + .internal_context( + "fetching external DNS version for blueprint planning", + )? + .version; + let cockroachdb_settings = + datastore.cockroachdb_settings(opctx).await.internal_context( + "fetching cockroachdb settings for blueprint planning", + )?; - let policy = policy_from_db( - &sled_rows, - &zpool_rows, - &ip_pool_range_rows, - NEXUS_REDUNDANCY, - )?; + let planning_input = PlanningInputFromDb { + sled_rows: &sled_rows, + zpool_rows: &zpool_rows, + ip_pool_range_rows: &ip_pool_range_rows, + external_ip_rows: &external_ip_rows, + service_nic_rows: &service_nic_rows, + target_nexus_zone_count: NEXUS_REDUNDANCY, + target_cockroachdb_cluster_version: + CockroachDbClusterVersion::POLICY, + log: &opctx.log, + internal_dns_version, + external_dns_version, + cockroachdb_settings: &cockroachdb_settings, + } + .build()?; // The choice of which inventory collection to use here is not // necessarily trivial. Inventory collections may be incomplete due to @@ -169,29 +202,7 @@ impl super::Nexus { "fetching latest inventory collection for blueprint planner", )?; - // Fetch the current DNS versions. This could be made part of - // inventory, but it's enough of a one-off that there's no particular - // advantage to doing that work now. - let internal_dns_version = datastore - .dns_group_latest_version(opctx, DnsGroup::Internal) - .await - .internal_context( - "fetching internal DNS version for blueprint planning", - )?; - let external_dns_version = datastore - .dns_group_latest_version(opctx, DnsGroup::External) - .await - .internal_context( - "fetching external DNS version for blueprint planning", - )?; - - Ok(PlanningContext { - creator, - policy, - inventory, - internal_dns_version: *internal_dns_version.version, - external_dns_version: *external_dns_version.version, - }) + Ok(PlanningContext { planning_input, creator, inventory }) } async fn blueprint_add( @@ -202,46 +213,12 @@ impl super::Nexus { self.db_datastore.blueprint_insert(opctx, blueprint).await } - pub async fn blueprint_generate_from_collection( - &self, - opctx: &OpContext, - collection_id: Uuid, - ) -> CreateResult { - let collection = self - .datastore() - .inventory_collection_read(opctx, collection_id) - .await?; - let planning_context = self.blueprint_planning_context(opctx).await?; - let blueprint = BlueprintBuilder::build_initial_from_collection( - &collection, - planning_context.internal_dns_version, - planning_context.external_dns_version, - &planning_context.policy, - &planning_context.creator, - ) - .map_err(|error| { - Error::internal_error(&format!( - "error generating initial blueprint from collection {}: {}", - collection_id, - InlineErrorChain::new(&error) - )) - })?; - - self.blueprint_add(&opctx, &blueprint).await?; - Ok(blueprint) - } - pub async fn blueprint_create_regenerate( &self, opctx: &OpContext, ) -> CreateResult { - let maybe_target = + let (_, parent_blueprint) = self.db_datastore.blueprint_target_get_current_full(opctx).await?; - let Some((_, parent_blueprint)) = maybe_target else { - return Err(Error::conflict( - "cannot regenerate blueprint without existing target", - )); - }; let planning_context = self.blueprint_planning_context(opctx).await?; let inventory = planning_context.inventory.ok_or_else(|| { @@ -250,9 +227,7 @@ impl super::Nexus { let planner = Planner::new_based_on( opctx.log.clone(), &parent_blueprint, - planning_context.internal_dns_version, - planning_context.external_dns_version, - &planning_context.policy, + &planning_context.planning_input, &planning_context.creator, &inventory, ) @@ -271,4 +246,13 @@ impl super::Nexus { self.blueprint_add(&opctx, &blueprint).await?; Ok(blueprint) } + + pub async fn blueprint_import( + &self, + opctx: &OpContext, + blueprint: Blueprint, + ) -> Result<(), Error> { + let _ = self.blueprint_add(&opctx, &blueprint).await?; + Ok(()) + } } diff --git a/nexus/src/app/disk.rs b/nexus/src/app/disk.rs index 5dd49a2efb..78ae002dd3 100644 --- a/nexus/src/app/disk.rs +++ b/nexus/src/app/disk.rs @@ -151,7 +151,7 @@ impl super::Nexus { // Reject disks where the size isn't at least // MIN_DISK_SIZE_BYTES - if params.size.to_bytes() < MIN_DISK_SIZE_BYTES as u64 { + if params.size.to_bytes() < u64::from(MIN_DISK_SIZE_BYTES) { return Err(Error::invalid_value( "size", format!( @@ -163,7 +163,7 @@ impl super::Nexus { // Reject disks where the MIN_DISK_SIZE_BYTES doesn't evenly // divide the size - if (params.size.to_bytes() % MIN_DISK_SIZE_BYTES as u64) != 0 { + if (params.size.to_bytes() % u64::from(MIN_DISK_SIZE_BYTES)) != 0 { return Err(Error::invalid_value( "size", format!( @@ -488,10 +488,10 @@ impl super::Nexus { // that user's program can act accordingly. In a way, the user's // program is an externally driven saga instead. - let client = crucible_pantry_client::Client::new(&format!( - "http://{}", - endpoint - )); + let client = crucible_pantry_client::Client::new_with_client( + &format!("http://{}", endpoint), + self.reqwest_client.clone(), + ); let request = crucible_pantry_client::types::BulkWriteRequest { offset: param.offset, base64_encoded_data: param.base64_encoded_data, diff --git a/nexus/src/app/external_endpoints.rs b/nexus/src/app/external_endpoints.rs index db87632bbf..18d2399eb5 100644 --- a/nexus/src/app/external_endpoints.rs +++ b/nexus/src/app/external_endpoints.rs @@ -26,7 +26,7 @@ //! "certificate resolver" object that impls //! [`rustls::server::ResolvesServerCert`]. See [`NexusCertResolver`]. -use crate::ServerContext; +use crate::context::ApiContext; use anyhow::anyhow; use anyhow::bail; use anyhow::Context; @@ -674,7 +674,7 @@ impl super::Nexus { /// case, we'll choose an arbitrary Silo. pub fn endpoint_for_request( &self, - rqctx: &dropshot::RequestContext>, + rqctx: &dropshot::RequestContext, ) -> Result, Error> { let log = &rqctx.log; let rqinfo = &rqctx.request; diff --git a/nexus/src/app/external_ip.rs b/nexus/src/app/external_ip.rs index 9908512c0f..62b8c4cbd6 100644 --- a/nexus/src/app/external_ip.rs +++ b/nexus/src/app/external_ip.rs @@ -111,10 +111,11 @@ impl super::Nexus { let params::FloatingIpCreate { identity, pool, ip } = params; + // resolve NameOrId into authz::IpPool let pool = match pool { Some(pool) => Some( self.ip_pool_lookup(opctx, &pool)? - .lookup_for(authz::Action::Read) + .lookup_for(authz::Action::CreateChild) .await? .0, ), diff --git a/nexus/src/app/image.rs b/nexus/src/app/image.rs index a7fe75a464..03c9c9d6a4 100644 --- a/nexus/src/app/image.rs +++ b/nexus/src/app/image.rs @@ -121,7 +121,10 @@ impl super::Nexus { let image_volume = self .db_datastore - .volume_checkout_randomize_ids(db_snapshot.volume_id) + .volume_checkout_randomize_ids( + db_snapshot.volume_id, + db::datastore::VolumeCheckoutReason::ReadOnlyCopy, + ) .await?; db::model::Image { @@ -147,7 +150,7 @@ impl super::Nexus { // allow users to boot that. This should go away when that blob // does. let db_block_size = db::model::BlockSize::Traditional; - let block_size: u64 = db_block_size.to_bytes() as u64; + let block_size: u64 = u64::from(db_block_size.to_bytes()); let image_id = Uuid::new_v4(); diff --git a/nexus/src/app/instance.rs b/nexus/src/app/instance.rs index 2300bd56f2..63b080b436 100644 --- a/nexus/src/app/instance.rs +++ b/nexus/src/app/instance.rs @@ -28,6 +28,7 @@ use nexus_db_queries::db::datastore::InstanceAndActiveVmm; use nexus_db_queries::db::identity::Resource; use nexus_db_queries::db::lookup; use nexus_db_queries::db::lookup::LookupPath; +use nexus_db_queries::db::DataStore; use nexus_types::external_api::views; use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::external::ByteCount; @@ -173,6 +174,13 @@ enum InstanceStateChangeRequestAction { SendToSled(Uuid), } +/// What is the higher level operation that is calling +/// `instance_ensure_registered`? +pub(crate) enum InstanceRegisterReason { + Start { vmm_id: Uuid }, + Migrate { vmm_id: Uuid, target_vmm_id: Uuid }, +} + impl super::Nexus { pub fn instance_lookup<'a>( &'a self, @@ -287,7 +295,7 @@ impl super::Nexus { // Reject instances where the memory is not at least // MIN_MEMORY_BYTES_PER_INSTANCE - if params.memory.to_bytes() < MIN_MEMORY_BYTES_PER_INSTANCE as u64 { + if params.memory.to_bytes() < u64::from(MIN_MEMORY_BYTES_PER_INSTANCE) { return Err(Error::invalid_value( "size", format!( @@ -299,7 +307,7 @@ impl super::Nexus { // Reject instances where the memory is not divisible by // MIN_MEMORY_BYTES_PER_INSTANCE - if (params.memory.to_bytes() % MIN_MEMORY_BYTES_PER_INSTANCE as u64) + if (params.memory.to_bytes() % u64::from(MIN_MEMORY_BYTES_PER_INSTANCE)) != 0 { return Err(Error::invalid_value( @@ -1010,6 +1018,7 @@ impl super::Nexus { db_instance: &db::model::Instance, propolis_id: &Uuid, initial_vmm: &db::model::Vmm, + operation: InstanceRegisterReason, ) -> Result<(), Error> { opctx.authorize(authz::Action::Modify, authz_instance).await?; @@ -1065,8 +1074,19 @@ impl super::Nexus { } }; - let volume = - self.db_datastore.volume_checkout(disk.volume_id).await?; + let volume = self + .db_datastore + .volume_checkout( + disk.volume_id, + match operation { + InstanceRegisterReason::Start { vmm_id } => + db::datastore::VolumeCheckoutReason::InstanceStart { vmm_id }, + InstanceRegisterReason::Migrate { vmm_id, target_vmm_id } => + db::datastore::VolumeCheckoutReason::InstanceMigrate { vmm_id, target_vmm_id }, + } + ) + .await?; + disk_reqs.push(sled_agent_client::types::DiskRequest { name: disk.name().to_string(), slot: sled_agent_client::types::Slot(slot.0), @@ -1140,7 +1160,12 @@ impl super::Nexus { )); } let source_nat = - SourceNatConfig::from(snat_ip.into_iter().next().unwrap()); + SourceNatConfig::try_from(snat_ip.into_iter().next().unwrap()) + .map_err(|err| { + Error::internal_error(&format!( + "read invalid SNAT config from db: {err}" + )) + })?; // Gather the firewall rules for the VPC this instance is in. // The NIC info we gathered above doesn't have VPC information @@ -1489,176 +1514,18 @@ impl super::Nexus { instance_id: &Uuid, new_runtime_state: &nexus::SledInstanceState, ) -> Result<(), Error> { - let log = &self.log; - let propolis_id = new_runtime_state.propolis_id; - - info!(log, "received new runtime state from sled agent"; - "instance_id" => %instance_id, - "instance_state" => ?new_runtime_state.instance_state, - "propolis_id" => %propolis_id, - "vmm_state" => ?new_runtime_state.vmm_state); - - // Grab the current state of the instance in the DB to reason about - // whether this update is stale or not. - let (.., authz_instance, db_instance) = - LookupPath::new(&opctx, &self.db_datastore) - .instance_id(*instance_id) - .fetch() - .await?; - - // Update OPTE and Dendrite if the instance's active sled assignment - // changed or a migration was retired. If these actions fail, sled agent - // is expected to retry this update. - // - // This configuration must be updated before updating any state in CRDB - // so that, if the instance was migrating or has shut down, it will not - // appear to be able to migrate or start again until the appropriate - // networking state has been written. Without this interlock, another - // thread or another Nexus can race with this routine to write - // conflicting configuration. - // - // In the future, this should be replaced by a call to trigger a - // networking state update RPW. - self.ensure_updated_instance_network_config( + notify_instance_updated( + &self.datastore(), + &self.resolver().await, + &self.opctx_alloc, opctx, - &authz_instance, - db_instance.runtime(), - &new_runtime_state.instance_state, + &self.log, + instance_id, + new_runtime_state, + self.v2p_notification_tx.clone(), ) .await?; - - // If the supplied instance state indicates that the instance no longer - // has an active VMM, attempt to delete the virtual provisioning record, - // and the assignment of the Propolis metric producer to an oximeter - // collector. - // - // As with updating networking state, this must be done before - // committing the new runtime state to the database: once the DB is - // written, a new start saga can arrive and start the instance, which - // will try to create its own virtual provisioning charges, which will - // race with this operation. - if new_runtime_state.instance_state.propolis_id.is_none() { - self.db_datastore - .virtual_provisioning_collection_delete_instance( - opctx, - *instance_id, - db_instance.project_id, - i64::from(db_instance.ncpus.0 .0), - db_instance.memory, - (&new_runtime_state.instance_state.gen).into(), - ) - .await?; - - // TODO-correctness: The `notify_instance_updated` method can run - // concurrently with itself in some situations, such as where a - // sled-agent attempts to update Nexus about a stopped instance; - // that times out; and it makes another request to a different - // Nexus. The call to `unassign_producer` is racy in those - // situations, and we may end with instances with no metrics. - // - // This unfortunate case should be handled as part of - // instance-lifecycle improvements, notably using a reliable - // persistent workflow to correctly update the oximete assignment as - // an instance's state changes. - // - // Tracked in https://github.com/oxidecomputer/omicron/issues/3742. - self.unassign_producer(instance_id).await?; - } - - // Write the new instance and VMM states back to CRDB. This needs to be - // done before trying to clean up the VMM, since the datastore will only - // allow a VMM to be marked as deleted if it is already in a terminal - // state. - let result = self - .db_datastore - .instance_and_vmm_update_runtime( - instance_id, - &db::model::InstanceRuntimeState::from( - new_runtime_state.instance_state.clone(), - ), - &propolis_id, - &db::model::VmmRuntimeState::from( - new_runtime_state.vmm_state.clone(), - ), - ) - .await; - - // If the VMM is now in a terminal state, make sure its resources get - // cleaned up. - // - // For idempotency, only check to see if the update was successfully - // processed and ignore whether the VMM record was actually updated. - // This is required to handle the case where this routine is called - // once, writes the terminal VMM state, fails before all per-VMM - // resources are released, returns a retriable error, and is retried: - // the per-VMM resources still need to be cleaned up, but the DB update - // will return Ok(_, false) because the database was already updated. - // - // Unlike the pre-update cases, it is legal to do this cleanup *after* - // committing state to the database, because a terminated VMM cannot be - // reused (restarting or migrating its former instance will use new VMM - // IDs). - if result.is_ok() { - let propolis_terminated = matches!( - new_runtime_state.vmm_state.state, - InstanceState::Destroyed | InstanceState::Failed - ); - - if propolis_terminated { - info!(log, "vmm is terminated, cleaning up resources"; - "instance_id" => %instance_id, - "propolis_id" => %propolis_id); - - self.db_datastore - .sled_reservation_delete(opctx, propolis_id) - .await?; - - if !self - .db_datastore - .vmm_mark_deleted(opctx, &propolis_id) - .await? - { - warn!(log, "failed to mark vmm record as deleted"; - "instance_id" => %instance_id, - "propolis_id" => %propolis_id, - "vmm_state" => ?new_runtime_state.vmm_state); - } - } - } - - match result { - Ok((instance_updated, vmm_updated)) => { - info!(log, "instance and vmm updated by sled agent"; - "instance_id" => %instance_id, - "propolis_id" => %propolis_id, - "instance_updated" => instance_updated, - "vmm_updated" => vmm_updated); - Ok(()) - } - - // The update command should swallow object-not-found errors and - // return them back as failures to update, so this error case is - // unexpected. There's no work to do if this occurs, however. - Err(Error::ObjectNotFound { .. }) => { - error!(log, "instance/vmm update unexpectedly returned \ - an object not found error"; - "instance_id" => %instance_id, - "propolis_id" => %propolis_id); - Ok(()) - } - - // If the datastore is unavailable, propagate that to the caller. - // TODO-robustness Really this should be any _transient_ error. How - // can we distinguish? Maybe datastore should emit something - // different from Error with an Into. - Err(error) => { - warn!(log, "failed to update instance from sled agent"; - "instance_id" => %instance_id, - "propolis_id" => %propolis_id, - "error" => ?error); - Err(error) - } - } + Ok(()) } /// Returns the requested range of serial console output bytes, @@ -2087,6 +1954,198 @@ impl super::Nexus { } } +/// Records what aspects of an instance's state were actually changed in a +/// [`notify_instance_updated`] call. +/// +/// This is (presently) used for debugging purposes only. +#[derive(Copy, Clone)] +pub(crate) struct InstanceUpdated { + pub instance_updated: bool, + pub vmm_updated: bool, +} + +/// Invoked by a sled agent to publish an updated runtime state for an +/// Instance. +#[allow(clippy::too_many_arguments)] // :( +pub(crate) async fn notify_instance_updated( + datastore: &DataStore, + resolver: &internal_dns::resolver::Resolver, + opctx_alloc: &OpContext, + opctx: &OpContext, + log: &slog::Logger, + instance_id: &Uuid, + new_runtime_state: &nexus::SledInstanceState, + v2p_notification_tx: tokio::sync::watch::Sender<()>, +) -> Result, Error> { + let propolis_id = new_runtime_state.propolis_id; + + info!(log, "received new runtime state from sled agent"; + "instance_id" => %instance_id, + "instance_state" => ?new_runtime_state.instance_state, + "propolis_id" => %propolis_id, + "vmm_state" => ?new_runtime_state.vmm_state); + + // Grab the current state of the instance in the DB to reason about + // whether this update is stale or not. + let (.., authz_instance, db_instance) = LookupPath::new(&opctx, &datastore) + .instance_id(*instance_id) + .fetch() + .await?; + + // Update OPTE and Dendrite if the instance's active sled assignment + // changed or a migration was retired. If these actions fail, sled agent + // is expected to retry this update. + // + // This configuration must be updated before updating any state in CRDB + // so that, if the instance was migrating or has shut down, it will not + // appear to be able to migrate or start again until the appropriate + // networking state has been written. Without this interlock, another + // thread or another Nexus can race with this routine to write + // conflicting configuration. + // + // In the future, this should be replaced by a call to trigger a + // networking state update RPW. + super::instance_network::ensure_updated_instance_network_config( + datastore, + log, + resolver, + opctx, + opctx_alloc, + &authz_instance, + db_instance.runtime(), + &new_runtime_state.instance_state, + v2p_notification_tx.clone(), + ) + .await?; + + // If the supplied instance state indicates that the instance no longer + // has an active VMM, attempt to delete the virtual provisioning record, + // and the assignment of the Propolis metric producer to an oximeter + // collector. + // + // As with updating networking state, this must be done before + // committing the new runtime state to the database: once the DB is + // written, a new start saga can arrive and start the instance, which + // will try to create its own virtual provisioning charges, which will + // race with this operation. + if new_runtime_state.instance_state.propolis_id.is_none() { + datastore + .virtual_provisioning_collection_delete_instance( + opctx, + *instance_id, + db_instance.project_id, + i64::from(db_instance.ncpus.0 .0), + db_instance.memory, + (&new_runtime_state.instance_state.gen).into(), + ) + .await?; + + // TODO-correctness: The `notify_instance_updated` method can run + // concurrently with itself in some situations, such as where a + // sled-agent attempts to update Nexus about a stopped instance; + // that times out; and it makes another request to a different + // Nexus. The call to `unassign_producer` is racy in those + // situations, and we may end with instances with no metrics. + // + // This unfortunate case should be handled as part of + // instance-lifecycle improvements, notably using a reliable + // persistent workflow to correctly update the oximete assignment as + // an instance's state changes. + // + // Tracked in https://github.com/oxidecomputer/omicron/issues/3742. + super::oximeter::unassign_producer(datastore, log, opctx, instance_id) + .await?; + } + + // Write the new instance and VMM states back to CRDB. This needs to be + // done before trying to clean up the VMM, since the datastore will only + // allow a VMM to be marked as deleted if it is already in a terminal + // state. + let result = datastore + .instance_and_vmm_update_runtime( + instance_id, + &db::model::InstanceRuntimeState::from( + new_runtime_state.instance_state.clone(), + ), + &propolis_id, + &db::model::VmmRuntimeState::from( + new_runtime_state.vmm_state.clone(), + ), + ) + .await; + + // If the VMM is now in a terminal state, make sure its resources get + // cleaned up. + // + // For idempotency, only check to see if the update was successfully + // processed and ignore whether the VMM record was actually updated. + // This is required to handle the case where this routine is called + // once, writes the terminal VMM state, fails before all per-VMM + // resources are released, returns a retriable error, and is retried: + // the per-VMM resources still need to be cleaned up, but the DB update + // will return Ok(_, false) because the database was already updated. + // + // Unlike the pre-update cases, it is legal to do this cleanup *after* + // committing state to the database, because a terminated VMM cannot be + // reused (restarting or migrating its former instance will use new VMM + // IDs). + if result.is_ok() { + let propolis_terminated = matches!( + new_runtime_state.vmm_state.state, + InstanceState::Destroyed | InstanceState::Failed + ); + + if propolis_terminated { + info!(log, "vmm is terminated, cleaning up resources"; + "instance_id" => %instance_id, + "propolis_id" => %propolis_id); + + datastore.sled_reservation_delete(opctx, propolis_id).await?; + + if !datastore.vmm_mark_deleted(opctx, &propolis_id).await? { + warn!(log, "failed to mark vmm record as deleted"; + "instance_id" => %instance_id, + "propolis_id" => %propolis_id, + "vmm_state" => ?new_runtime_state.vmm_state); + } + } + } + + match result { + Ok((instance_updated, vmm_updated)) => { + info!(log, "instance and vmm updated by sled agent"; + "instance_id" => %instance_id, + "propolis_id" => %propolis_id, + "instance_updated" => instance_updated, + "vmm_updated" => vmm_updated); + Ok(Some(InstanceUpdated { instance_updated, vmm_updated })) + } + + // The update command should swallow object-not-found errors and + // return them back as failures to update, so this error case is + // unexpected. There's no work to do if this occurs, however. + Err(Error::ObjectNotFound { .. }) => { + error!(log, "instance/vmm update unexpectedly returned \ + an object not found error"; + "instance_id" => %instance_id, + "propolis_id" => %propolis_id); + Ok(None) + } + + // If the datastore is unavailable, propagate that to the caller. + // TODO-robustness Really this should be any _transient_ error. How + // can we distinguish? Maybe datastore should emit something + // different from Error with an Into. + Err(error) => { + warn!(log, "failed to update instance from sled agent"; + "instance_id" => %instance_id, + "propolis_id" => %propolis_id, + "error" => ?error); + Err(error) + } + } +} + #[cfg(test)] mod tests { use super::super::Nexus; diff --git a/nexus/src/app/instance_network.rs b/nexus/src/app/instance_network.rs index c345809f4d..3c607bae78 100644 --- a/nexus/src/app/instance_network.rs +++ b/nexus/src/app/instance_network.rs @@ -4,8 +4,8 @@ //! Routines that manage instance-related networking state. +use crate::app::switch_port; use ipnetwork::IpNetwork; -use ipnetwork::Ipv6Network; use nexus_db_model::ExternalIp; use nexus_db_model::IpAttachState; use nexus_db_model::Ipv4NatEntry; @@ -14,22 +14,20 @@ use nexus_db_model::Vni as DbVni; use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; use nexus_db_queries::db; -use nexus_db_queries::db::identity::Asset; use nexus_db_queries::db::lookup::LookupPath; -use omicron_common::api::external::DataPageParams; +use nexus_db_queries::db::DataStore; use omicron_common::api::external::Error; -use omicron_common::api::external::Ipv4Net; -use omicron_common::api::external::Ipv6Net; use omicron_common::api::internal::nexus; use omicron_common::api::internal::shared::NetworkInterface; use omicron_common::api::internal::shared::SwitchLocation; -use omicron_common::retry_until_known_result; -use sled_agent_client::types::DeleteVirtualNetworkInterfaceHost; -use sled_agent_client::types::SetVirtualNetworkInterfaceHost; +use oxnet::Ipv4Net; +use oxnet::Ipv6Net; use std::collections::HashSet; use std::str::FromStr; use uuid::Uuid; +use super::background::BackgroundTasks; + impl super::Nexus { /// Returns the set of switches with uplinks configured and boundary /// services enabled. @@ -37,243 +35,7 @@ impl super::Nexus { &self, opctx: &OpContext, ) -> Result, Error> { - let mut boundary_switches: HashSet = HashSet::new(); - let uplinks = self.list_switch_ports_with_uplinks(opctx).await?; - for uplink in &uplinks { - let location: SwitchLocation = - uplink.switch_location.parse().map_err(|_| { - Error::internal_error(&format!( - "invalid switch location in uplink config: {}", - uplink.switch_location - )) - })?; - boundary_switches.insert(location); - } - Ok(boundary_switches) - } - - /// Ensures that V2P mappings exist that indicate that the instance with ID - /// `instance_id` is resident on the sled with ID `sled_id`. - pub(crate) async fn create_instance_v2p_mappings( - &self, - opctx: &OpContext, - instance_id: Uuid, - sled_id: Uuid, - ) -> Result<(), Error> { - info!(&self.log, "creating V2P mappings for instance"; - "instance_id" => %instance_id, - "sled_id" => %sled_id); - - // For every sled that isn't the sled this instance was allocated to, create - // a virtual to physical mapping for each of this instance's NICs. - // - // For the mappings to be correct, a few invariants must hold: - // - // - mappings must be set whenever an instance's sled changes (eg. - // during instance creation, migration, stop + start) - // - // - an instances' sled must not change while its corresponding mappings - // are being created - // - // - the same mapping creation must be broadcast to all sleds - // - // A more targeted approach would be to see what other instances share - // the VPC this instance is in (or more generally, what instances should - // have connectivity to this one), see what sleds those are allocated - // to, and only create V2P mappings for those sleds. - // - // There's additional work with this approach: - // - // - it means that delete calls are required as well as set calls, - // meaning that now the ordering of those matters (this may also - // necessitate a generation number for V2P mappings) - // - // - V2P mappings have to be bidirectional in order for both instances's - // packets to make a round trip. This isn't a problem with the - // broadcast approach because one of the sides will exist already, but - // it is something to orchestrate with a more targeted approach. - // - // TODO-correctness Default firewall rules currently will block - // instances in different VPCs from connecting to each other. If it ever - // stops doing this, the broadcast approach will create V2P mappings - // that shouldn't exist. - let (.., authz_instance) = LookupPath::new(&opctx, &self.db_datastore) - .instance_id(instance_id) - .lookup_for(authz::Action::Read) - .await?; - - let instance_nics = self - .db_datastore - .derive_guest_network_interface_info(&opctx, &authz_instance) - .await?; - - // Look up the supplied sled's physical host IP. - let physical_host_ip = - *self.sled_lookup(&self.opctx_alloc, &sled_id)?.fetch().await?.1.ip; - - let mut last_sled_id: Option = None; - loop { - let pagparams = DataPageParams { - marker: last_sled_id.as_ref(), - direction: dropshot::PaginationOrder::Ascending, - limit: std::num::NonZeroU32::new(10).unwrap(), - }; - - let sleds_page = - self.sled_list(&self.opctx_alloc, &pagparams).await?; - let mut join_handles = - Vec::with_capacity(sleds_page.len() * instance_nics.len()); - - for sled in &sleds_page { - // set_v2p not required for sled instance was allocated to, OPTE - // currently does that automatically - // - // TODO(#3107): Remove this when XDE stops creating mappings - // implicitly. - if sled.id() == sled_id { - continue; - } - - for nic in &instance_nics { - let client = self.sled_client(&sled.id()).await?; - let nic_id = nic.id; - let mapping = SetVirtualNetworkInterfaceHost { - virtual_ip: nic.ip, - virtual_mac: nic.mac, - physical_host_ip, - vni: nic.vni, - }; - - let log = self.log.clone(); - - // This function is idempotent: calling the set_v2p ioctl with - // the same information is a no-op. - join_handles.push(tokio::spawn(futures::future::lazy( - move |_ctx| async move { - retry_until_known_result(&log, || async { - client.set_v2p(&nic_id, &mapping).await - }) - .await - }, - ))); - } - } - - // Concurrently run each future to completion, but return the last - // error seen. - let mut error = None; - for join_handle in join_handles { - let result = join_handle - .await - .map_err(|e| Error::internal_error(&e.to_string()))? - .await; - - if result.is_err() { - error!(self.log, "{:?}", result); - error = Some(result); - } - } - if let Some(e) = error { - return e.map(|_| ()).map_err(|e| e.into()); - } - - if sleds_page.len() < 10 { - break; - } - - if let Some(last) = sleds_page.last() { - last_sled_id = Some(last.id()); - } - } - - Ok(()) - } - - /// Ensure that the necessary v2p mappings for an instance are deleted - pub(crate) async fn delete_instance_v2p_mappings( - &self, - opctx: &OpContext, - instance_id: Uuid, - ) -> Result<(), Error> { - // For every sled that isn't the sled this instance was allocated to, delete - // the virtual to physical mapping for each of this instance's NICs. If - // there isn't a V2P mapping, del_v2p should be a no-op. - let (.., authz_instance) = LookupPath::new(&opctx, &self.db_datastore) - .instance_id(instance_id) - .lookup_for(authz::Action::Read) - .await?; - - let instance_nics = self - .db_datastore - .derive_guest_network_interface_info(&opctx, &authz_instance) - .await?; - - let mut last_sled_id: Option = None; - - loop { - let pagparams = DataPageParams { - marker: last_sled_id.as_ref(), - direction: dropshot::PaginationOrder::Ascending, - limit: std::num::NonZeroU32::new(10).unwrap(), - }; - - let sleds_page = - self.sled_list(&self.opctx_alloc, &pagparams).await?; - let mut join_handles = - Vec::with_capacity(sleds_page.len() * instance_nics.len()); - - for sled in &sleds_page { - for nic in &instance_nics { - let client = self.sled_client(&sled.id()).await?; - let nic_id = nic.id; - let mapping = DeleteVirtualNetworkInterfaceHost { - virtual_ip: nic.ip, - vni: nic.vni, - }; - - let log = self.log.clone(); - - // This function is idempotent: calling the set_v2p ioctl with - // the same information is a no-op. - join_handles.push(tokio::spawn(futures::future::lazy( - move |_ctx| async move { - retry_until_known_result(&log, || async { - client.del_v2p(&nic_id, &mapping).await - }) - .await - }, - ))); - } - } - - // Concurrently run each future to completion, but return the last - // error seen. - let mut error = None; - for join_handle in join_handles { - let result = join_handle - .await - .map_err(|e| Error::internal_error(&e.to_string()))? - .await; - - if result.is_err() { - error!(self.log, "{:?}", result); - error = Some(result); - } - } - if let Some(e) = error { - return e.map(|_| ()).map_err(|e| e.into()); - } - - if sleds_page.len() < 10 { - break; - } - - if let Some(last) = sleds_page.last() { - last_sled_id = Some(last.id()); - } - } - - Ok(()) + boundary_switches(&self.db_datastore, opctx).await } /// Ensures that the Dendrite configuration for the supplied instance is @@ -305,152 +67,17 @@ impl super::Nexus { sled_ip_address: &std::net::SocketAddrV6, ip_filter: Option, ) -> Result, Error> { - let log = &self.log; - - info!(log, "looking up instance's primary network interface"; - "instance_id" => %instance_id); - - let (.., authz_instance) = LookupPath::new(opctx, &self.db_datastore) - .instance_id(instance_id) - .lookup_for(authz::Action::ListChildren) - .await?; - - // XXX: Need to abstract over v6 and v4 entries here. - let mut nat_entries = vec![]; - - // All external IPs map to the primary network interface, so find that - // interface. If there is no such interface, there's no way to route - // traffic destined to those IPs, so there's nothing to configure and - // it's safe to return early. - let network_interface = match self - .db_datastore - .derive_guest_network_interface_info(&opctx, &authz_instance) - .await? - .into_iter() - .find(|interface| interface.primary) - { - Some(interface) => interface, - None => { - info!(log, "Instance has no primary network interface"; - "instance_id" => %instance_id); - return Ok(nat_entries); - } - }; - - let mac_address = - macaddr::MacAddr6::from_str(&network_interface.mac.to_string()) - .map_err(|e| { - Error::internal_error(&format!( - "failed to convert mac address: {e}" - )) - })?; - - info!(log, "looking up instance's external IPs"; - "instance_id" => %instance_id); - - let ips = self - .db_datastore - .instance_lookup_external_ips(&opctx, instance_id) - .await?; - - let (ips_of_interest, must_all_be_attached) = if let Some(wanted_id) = - ip_filter - { - if let Some(ip) = ips.iter().find(|v| v.id == wanted_id) { - (std::slice::from_ref(ip), false) - } else { - return Err(Error::internal_error(&format!( - "failed to find external ip address with id: {wanted_id}, saw {ips:?}", - ))); - } - } else { - (&ips[..], true) - }; - - // This is performed so that an IP attach/detach will block the - // instance_start saga. Return service unavailable to indicate - // the request is retryable. - if must_all_be_attached - && ips_of_interest - .iter() - .any(|ip| ip.state != IpAttachState::Attached) - { - return Err(Error::unavail( - "cannot push all DPD state: IP attach/detach in progress", - )); - } - - let sled_address = - Ipv6Net(Ipv6Network::new(*sled_ip_address.ip(), 128).unwrap()); - - // If all of our IPs are attached or are guaranteed to be owned - // by the saga calling this fn, then we need to disregard and - // remove conflicting rows. No other instance/service should be - // using these as its own, and we are dealing with detritus, e.g., - // the case where we have a concurrent stop -> detach followed - // by an attach to another instance, or other ongoing attach saga - // cleanup. - let mut err_and_limit = None; - for (i, external_ip) in ips_of_interest.iter().enumerate() { - // For each external ip, add a nat entry to the database - if let Ok(id) = self - .ensure_nat_entry( - external_ip, - sled_address, - &network_interface, - mac_address, - opctx, - ) - .await - { - nat_entries.push(id); - continue; - } - - // We seem to be blocked by a bad row -- take it out and retry. - // This will return Ok() for a non-existent row. - if let Err(e) = self - .external_ip_delete_dpd_config_inner(opctx, external_ip) - .await - { - err_and_limit = Some((e, i)); - break; - }; - - match self - .ensure_nat_entry( - external_ip, - sled_address, - &network_interface, - mac_address, - opctx, - ) - .await - { - Ok(id) => nat_entries.push(id), - Err(e) => { - err_and_limit = Some((e, i)); - break; - } - } - } - - // In the event of an unresolvable failure, we need to remove - // the entries we just added because the undo won't call into - // `instance_delete_dpd_config`. These entries won't stop a - // future caller, but it's better not to pollute switch state. - if let Some((e, max)) = err_and_limit { - for external_ip in &ips_of_interest[..max] { - let _ = self - .external_ip_delete_dpd_config_inner(opctx, external_ip) - .await; - } - return Err(e); - } - - self.notify_dendrite_nat_state(Some(instance_id), true).await?; - - Ok(nat_entries) + instance_ensure_dpd_config( + &self.db_datastore, + &self.log, + &self.resolver().await, + opctx, + &self.opctx_alloc, + instance_id, + sled_ip_address, + ip_filter, + ) + .await } // The logic of this function should follow very closely what @@ -465,120 +92,16 @@ impl super::Nexus { ip_index_filter: Option, dpd_client: &dpd_client::Client, ) -> Result<(), Error> { - let log = &self.log; - - // All external IPs map to the primary network interface, so find that - // interface. If there is no such interface, there's no way to route - // traffic destined to those IPs, so there's nothing to configure and - // it's safe to return early. - let network_interface = match self - .db_datastore - .derive_probe_network_interface_info(&opctx, probe_id) - .await? - .into_iter() - .find(|interface| interface.primary) - { - Some(interface) => interface, - None => { - info!(log, "probe has no primary network interface"; - "probe_id" => %probe_id); - return Ok(()); - } - }; - - let mac_address = - macaddr::MacAddr6::from_str(&network_interface.mac.to_string()) - .map_err(|e| { - Error::internal_error(&format!( - "failed to convert mac address: {e}" - )) - })?; - - info!(log, "looking up probe's external IPs"; - "probe_id" => %probe_id); - - let ips = self - .db_datastore - .probe_lookup_external_ips(&opctx, probe_id) - .await?; - - if let Some(wanted_index) = ip_index_filter { - if let None = ips.get(wanted_index) { - return Err(Error::internal_error(&format!( - "failed to find external ip address at index: {}", - wanted_index - ))); - } - } - - let sled_address = - Ipv6Net(Ipv6Network::new(sled_ip_address, 128).unwrap()); - - for target_ip in ips - .iter() - .enumerate() - .filter(|(index, _)| { - if let Some(wanted_index) = ip_index_filter { - *index == wanted_index - } else { - true - } - }) - .map(|(_, ip)| ip) - { - // For each external ip, add a nat entry to the database - self.ensure_nat_entry( - target_ip, - sled_address, - &network_interface, - mac_address, - opctx, - ) - .await?; - } - - // Notify dendrite that there are changes for it to reconcile. - // In the event of a failure to notify dendrite, we'll log an error - // and rely on dendrite's RPW timer to catch it up. - if let Err(e) = dpd_client.ipv4_nat_trigger_update().await { - error!(self.log, "failed to notify dendrite of nat updates"; "error" => ?e); - }; - - Ok(()) - } - - async fn ensure_nat_entry( - &self, - target_ip: &nexus_db_model::ExternalIp, - sled_address: Ipv6Net, - network_interface: &NetworkInterface, - mac_address: macaddr::MacAddr6, - opctx: &OpContext, - ) -> Result { - match target_ip.ip { - IpNetwork::V4(v4net) => { - let nat_entry = Ipv4NatValues { - external_address: Ipv4Net(v4net).into(), - first_port: target_ip.first_port, - last_port: target_ip.last_port, - sled_address: sled_address.into(), - vni: DbVni(network_interface.vni), - mac: nexus_db_model::MacAddr( - omicron_common::api::external::MacAddr(mac_address), - ), - }; - Ok(self - .db_datastore - .ensure_ipv4_nat_entry(opctx, nat_entry) - .await?) - } - IpNetwork::V6(_v6net) => { - // TODO: implement handling of v6 nat. - return Err(Error::InternalError { - internal_message: "ipv6 nat is not yet implemented".into(), - }); - } - } + probe_ensure_dpd_config( + &self.db_datastore, + &self.log, + opctx, + probe_id, + sled_ip_address, + ip_index_filter, + dpd_client, + ) + .await } /// Attempts to delete all of the Dendrite NAT configuration for the @@ -608,22 +131,16 @@ impl super::Nexus { opctx: &OpContext, authz_instance: &authz::Instance, ) -> Result<(), Error> { - let log = &self.log; - let instance_id = authz_instance.id(); - - info!(log, "deleting instance dpd configuration"; - "instance_id" => %instance_id); - - let external_ips = self - .db_datastore - .instance_lookup_external_ips(opctx, instance_id) - .await?; - - for entry in external_ips { - self.external_ip_delete_dpd_config_inner(opctx, &entry).await?; - } - - self.notify_dendrite_nat_state(Some(instance_id), false).await + let resolver = self.resolver().await; + instance_delete_dpd_config( + &self.db_datastore, + &self.log, + &resolver, + opctx, + &self.opctx_alloc, + authz_instance, + ) + .await } /// Attempts to delete Dendrite NAT configuration for a single external IP. @@ -635,16 +152,13 @@ impl super::Nexus { opctx: &OpContext, external_ip: &ExternalIp, ) -> Result<(), Error> { - let log = &self.log; - let instance_id = external_ip.parent_id; - - info!(log, "deleting individual NAT entry from dpd configuration"; - "instance_id" => ?instance_id, - "external_ip" => %external_ip.ip); - - self.external_ip_delete_dpd_config_inner(opctx, external_ip).await?; - - self.notify_dendrite_nat_state(instance_id, false).await + external_ip_delete_dpd_config_inner( + &self.db_datastore, + &self.log, + opctx, + external_ip, + ) + .await } /// Attempts to soft-delete Dendrite NAT configuration for a specific entry @@ -658,371 +172,859 @@ impl super::Nexus { opctx: &OpContext, nat_entry: &Ipv4NatEntry, ) -> Result<(), Error> { - let log = &self.log; - - info!(log, "deleting individual NAT entry from dpd configuration"; - "id" => ?nat_entry.id, - "version_added" => %nat_entry.external_address.0); - - match self.db_datastore.ipv4_nat_delete(&opctx, nat_entry).await { - Ok(_) => {} - Err(err) => match err { - Error::ObjectNotFound { .. } => { - warn!(log, "no matching nat entries to soft delete"); - } - _ => { - let message = format!( - "failed to delete nat entry due to error: {err:?}" - ); - error!(log, "{}", message); - return Err(Error::internal_error(&message)); - } - }, - } - - self.notify_dendrite_nat_state(None, false).await + delete_dpd_config_by_entry( + &self.db_datastore, + &self.resolver().await, + &self.log, + opctx, + &self.opctx_alloc, + nat_entry, + ) + .await } - /// Soft-delete an individual external IP from the NAT RPW, without - /// triggering a Dendrite notification. - async fn external_ip_delete_dpd_config_inner( + // The logic of this function should follow very closely what + // `instance_delete_dpd_config` does. However, there are enough differences + // in the mechanics of how the logic is being carried out to justify having + // this separate function, it seems. + pub(crate) async fn probe_delete_dpd_config( &self, opctx: &OpContext, - external_ip: &ExternalIp, + probe_id: Uuid, ) -> Result<(), Error> { - let log = &self.log; - - // Soft delete the NAT entry - match self - .db_datastore - .ipv4_nat_delete_by_external_ip(&opctx, external_ip) - .await - { - Ok(_) => Ok(()), - Err(err) => match err { - Error::ObjectNotFound { .. } => { - warn!(log, "no matching nat entries to soft delete"); - Ok(()) - } - _ => { - let message = format!( - "failed to delete nat entry due to error: {err:?}" - ); - error!(log, "{}", message); - Err(Error::internal_error(&message)) - } - }, - } + probe_delete_dpd_config( + &self.db_datastore, + &self.log, + &self.resolver().await, + opctx, + &self.opctx_alloc, + probe_id, + &self.background_tasks, + ) + .await } +} - /// Informs all available boundary switches that the set of NAT entries - /// has changed. - /// - /// When `fail_fast` is set, this function will return on any error when - /// acquiring a handle to a DPD client. Otherwise, it will attempt to notify - /// all clients and then finally return the first error. - async fn notify_dendrite_nat_state( - &self, - instance_id: Option, - fail_fast: bool, - ) -> Result<(), Error> { - // Querying boundary switches also requires fleet access and the use of the - // instance allocator context. - let boundary_switches = - self.boundary_switches(&self.opctx_alloc).await?; - - let mut errors = vec![]; - for switch in &boundary_switches { - debug!(&self.log, "notifying dendrite of updates"; - "instance_id" => ?instance_id, - "switch" => switch.to_string()); - - let clients = self.dpd_clients().await.map_err(|e| { +/// Returns the set of switches with uplinks configured and boundary +/// services enabled. +pub(crate) async fn boundary_switches( + datastore: &DataStore, + opctx: &OpContext, +) -> Result, Error> { + let mut boundary_switches: HashSet = HashSet::new(); + let uplinks = + switch_port::list_switch_ports_with_uplinks(datastore, opctx).await?; + for uplink in &uplinks { + let location: SwitchLocation = + uplink.switch_location.parse().map_err(|_| { Error::internal_error(&format!( - "failed to get dpd clients: {e}" + "invalid switch location in uplink config: {}", + uplink.switch_location )) })?; - let client_result = clients.get(switch).ok_or_else(|| { - Error::internal_error(&format!( - "unable to find dendrite client for {switch}" - )) - }); + boundary_switches.insert(location); + } + Ok(boundary_switches) +} - let dpd_client = match client_result { - Ok(client) => client, - Err(new_error) => { - errors.push(new_error); - if fail_fast { - break; - } else { - continue; - } - } - }; +/// Given old and new instance runtime states, determines the desired +/// networking configuration for a given instance and ensures it has been +/// propagated to all relevant sleds. +/// +/// # Arguments +/// +/// - `datastore`: the datastore to use for lookups and updates. +/// - `log`: the [`slog::Logger`] to log to. +/// - `resolver`: an internal DNS resolver to look up DPD service addresses. +/// - `opctx`: An operation context for this operation. +/// - `opctx_alloc`: An operational context list permissions for all sleds. When +/// called by methods on the [`Nexus`] type, this is the `OpContext` used for +/// instance allocation. In a background task, this may be the background +/// task's operational context; nothing stops you from passing the same +/// `OpContext` as both `opctx` and `opctx_alloc`. +/// - `authz_instance``: A resolved authorization context for the instance of +/// interest. +/// - `prev_instance_state``: The most-recently-recorded instance runtime +/// state for this instance. +/// - `new_instance_state`: The instance state that the caller of this routine +/// has observed and that should be used to set up this instance's +/// networking state. +/// +/// # Return value +/// +/// `Ok(())` if this routine completed all the operations it wanted to +/// complete, or an appropriate `Err` otherwise. +#[allow(clippy::too_many_arguments)] // Yeah, I know, I know, Clippy... +pub(crate) async fn ensure_updated_instance_network_config( + datastore: &DataStore, + log: &slog::Logger, + resolver: &internal_dns::resolver::Resolver, + opctx: &OpContext, + opctx_alloc: &OpContext, + authz_instance: &authz::Instance, + prev_instance_state: &db::model::InstanceRuntimeState, + new_instance_state: &nexus::InstanceRuntimeState, + v2p_notification_tx: tokio::sync::watch::Sender<()>, +) -> Result<(), Error> { + let instance_id = authz_instance.id(); + + // If this instance update is stale, do nothing, since the superseding + // update may have allowed the instance's location to change further. + if prev_instance_state.gen >= new_instance_state.gen.into() { + debug!(log, + "instance state generation already advanced, \ + won't touch network config"; + "instance_id" => %instance_id); + + return Ok(()); + } - // Notify dendrite that there are changes for it to reconcile. - // In the event of a failure to notify dendrite, we'll log an error - // and rely on dendrite's RPW timer to catch it up. - if let Err(e) = dpd_client.ipv4_nat_trigger_update().await { - error!(self.log, "failed to notify dendrite of nat updates"; "error" => ?e); - }; + // If this update will retire the instance's active VMM, delete its + // networking state. It will be re-established the next time the + // instance starts. + if new_instance_state.propolis_id.is_none() { + info!(log, + "instance cleared its Propolis ID, cleaning network config"; + "instance_id" => %instance_id, + "propolis_id" => ?prev_instance_state.propolis_id); + + clear_instance_networking_state( + datastore, + log, + resolver, + opctx, + opctx_alloc, + authz_instance, + v2p_notification_tx, + ) + .await?; + return Ok(()); + } + + // If the instance still has a migration in progress, don't change + // any networking state until an update arrives that retires that + // migration. + // + // This is needed to avoid the following race: + // + // 1. Migration from S to T completes. + // 2. Migration source sends an update that changes the instance's + // active VMM but leaves the migration ID in place. + // 3. Meanwhile, migration target sends an update that changes the + // instance's active VMM and clears the migration ID. + // 4. The migration target's call updates networking state and commits + // the new instance record. + // 5. The instance migrates from T to T' and Nexus applies networking + // configuration reflecting that the instance is on T'. + // 6. The update in step 2 applies configuration saying the instance + // is on sled T. + if new_instance_state.migration_id.is_some() { + debug!(log, + "instance still has a migration in progress, won't touch \ + network config"; + "instance_id" => %instance_id, + "migration_id" => ?new_instance_state.migration_id); + + return Ok(()); + } + + let new_propolis_id = new_instance_state.propolis_id.unwrap(); + + // Updates that end live migration need to push OPTE V2P state even if + // the instance's active sled did not change (see below). + let migration_retired = prev_instance_state.migration_id.is_some() + && new_instance_state.migration_id.is_none(); + + if (prev_instance_state.propolis_id == new_instance_state.propolis_id) + && !migration_retired + { + debug!(log, "instance didn't move, won't touch network config"; + "instance_id" => %instance_id); + + return Ok(()); + } + + // Either the instance moved from one sled to another, or it attempted + // to migrate and failed. Ensure the correct networking configuration + // exists for its current home. + // + // TODO(#3107) This is necessary even if the instance didn't move, + // because registering a migration target on a sled creates OPTE ports + // for its VNICs, and that creates new V2P mappings on that sled that + // place the relevant virtual IPs on the local sled. Once OPTE stops + // creating these mappings, this path only needs to be taken if an + // instance has changed sleds. + let new_sled_id = match datastore + .vmm_fetch(&opctx, authz_instance, &new_propolis_id) + .await + { + Ok(vmm) => vmm.sled_id, + + // A VMM in the active position should never be destroyed. If the + // sled sending this message is the owner of the instance's last + // active VMM and is destroying it, it should also have retired that + // VMM. + Err(Error::ObjectNotFound { .. }) => { + error!(log, "instance's active vmm unexpectedly not found"; + "instance_id" => %instance_id, + "propolis_id" => %new_propolis_id); + + return Ok(()); } - if let Some(e) = errors.into_iter().next() { - return Err(e); + Err(e) => return Err(e), + }; + + if let Err(e) = v2p_notification_tx.send(()) { + error!( + log, + "error notifying background task of v2p change"; + "error" => ?e + ) + }; + + let (.., sled) = + LookupPath::new(opctx, datastore).sled_id(new_sled_id).fetch().await?; + + instance_ensure_dpd_config( + datastore, + log, + resolver, + opctx, + opctx_alloc, + instance_id, + &sled.address(), + None, + ) + .await?; + + Ok(()) +} + +/// Ensures that the Dendrite configuration for the supplied instance is +/// up-to-date. +/// +/// Returns a list of live NAT RPW table entries from this call. Generally +/// these should only be needed for specific unwind operations, like in +/// the IP attach saga. +/// +/// # Parameters +/// +/// - `datastore`: the datastore to use for lookups and updates. +/// - `log`: the [`slog::Logger`] to log to. +/// - `resolver`: an internal DNS resolver to look up DPD service addresses. +/// - `opctx`: An operation context that grants read and list-children +/// permissions on the identified instance. +/// - `opctx_alloc`: An operational context list permissions for all sleds. When +/// called by methods on the [`Nexus`] type, this is the `OpContext` used for +/// instance allocation. In a background task, this may be the background +/// task's operational context; nothing stops you from passing the same +/// `OpContext` as both `opctx` and `opctx_alloc`. +/// - `instance_id`: The ID of the instance to act on. +/// - `sled_ip_address`: The internal IP address assigned to the sled's +/// sled agent. +/// - `ip_filter`: An optional filter on the index into the instance's +/// external IP array. +/// - If this is `Some(id)`, this routine configures DPD state for only the +/// external IP with `id` in the collection returned from CRDB. This will +/// proceed even when the target IP is 'attaching'. +/// - If this is `None`, this routine configures DPD for all external +/// IPs and *will back out* if any IPs are not yet fully attached to +/// the instance. +#[allow(clippy::too_many_arguments)] // I don't like it either, clippy... +pub(crate) async fn instance_ensure_dpd_config( + datastore: &DataStore, + log: &slog::Logger, + resolver: &internal_dns::resolver::Resolver, + opctx: &OpContext, + opctx_alloc: &OpContext, + instance_id: Uuid, + sled_ip_address: &std::net::SocketAddrV6, + ip_filter: Option, +) -> Result, Error> { + info!(log, "looking up instance's primary network interface"; + "instance_id" => %instance_id); + + let (.., authz_instance) = LookupPath::new(opctx, datastore) + .instance_id(instance_id) + .lookup_for(authz::Action::ListChildren) + .await?; + + // XXX: Need to abstract over v6 and v4 entries here. + let mut nat_entries = vec![]; + + // All external IPs map to the primary network interface, so find that + // interface. If there is no such interface, there's no way to route + // traffic destined to those IPs, so there's nothing to configure and + // it's safe to return early. + let network_interface = match datastore + .derive_guest_network_interface_info(&opctx, &authz_instance) + .await? + .into_iter() + .find(|interface| interface.primary) + { + Some(interface) => interface, + None => { + info!(log, "Instance has no primary network interface"; + "instance_id" => %instance_id); + return Ok(nat_entries); } + }; + + let mac_address = + macaddr::MacAddr6::from_str(&network_interface.mac.to_string()) + .map_err(|e| { + Error::internal_error(&format!( + "failed to convert mac address: {e}" + )) + })?; - Ok(()) + info!(log, "looking up instance's external IPs"; + "instance_id" => %instance_id); + + let ips = + datastore.instance_lookup_external_ips(&opctx, instance_id).await?; + + let (ips_of_interest, must_all_be_attached) = if let Some(wanted_id) = + ip_filter + { + if let Some(ip) = ips.iter().find(|v| v.id == wanted_id) { + (std::slice::from_ref(ip), false) + } else { + return Err(Error::internal_error(&format!( + "failed to find external ip address with id: {wanted_id}, saw {ips:?}", + ))); + } + } else { + (&ips[..], true) + }; + + // This is performed so that an IP attach/detach will block the + // instance_start saga. Return service unavailable to indicate + // the request is retryable. + if must_all_be_attached + && ips_of_interest.iter().any(|ip| ip.state != IpAttachState::Attached) + { + return Err(Error::unavail( + "cannot push all DPD state: IP attach/detach in progress", + )); } - // The logic of this function should follow very closely what - // `instance_delete_dpd_config` does. However, there are enough differences - // in the mechanics of how the logic is being carried out to justify having - // this separate function, it seems. - pub(crate) async fn probe_delete_dpd_config( - &self, - opctx: &OpContext, - probe_id: Uuid, - ) -> Result<(), Error> { - let log = &self.log; + let sled_address = Ipv6Net::host_net(*sled_ip_address.ip()); + + // If all of our IPs are attached or are guaranteed to be owned + // by the saga calling this fn, then we need to disregard and + // remove conflicting rows. No other instance/service should be + // using these as its own, and we are dealing with detritus, e.g., + // the case where we have a concurrent stop -> detach followed + // by an attach to another instance, or other ongoing attach saga + // cleanup. + let mut err_and_limit = None; + for (i, external_ip) in ips_of_interest.iter().enumerate() { + // For each external ip, add a nat entry to the database + if let Ok(id) = ensure_nat_entry( + datastore, + external_ip, + sled_address, + &network_interface, + mac_address, + opctx, + ) + .await + { + nat_entries.push(id); + continue; + } - info!(log, "deleting probe dpd configuration"; - "probe_id" => %probe_id); + // We seem to be blocked by a bad row -- take it out and retry. + // This will return Ok() for a non-existent row. + if let Err(e) = external_ip_delete_dpd_config_inner( + datastore, + log, + opctx, + external_ip, + ) + .await + { + err_and_limit = Some((e, i)); + break; + }; - let external_ips = self - .db_datastore - .probe_lookup_external_ips(opctx, probe_id) - .await?; + match ensure_nat_entry( + datastore, + external_ip, + sled_address, + &network_interface, + mac_address, + opctx, + ) + .await + { + Ok(id) => nat_entries.push(id), + Err(e) => { + err_and_limit = Some((e, i)); + break; + } + } + } - let mut errors = vec![]; - for entry in external_ips { - // Soft delete the NAT entry - match self - .db_datastore - .ipv4_nat_delete_by_external_ip(&opctx, &entry) - .await - { - Ok(_) => Ok(()), - Err(err) => match err { - Error::ObjectNotFound { .. } => { - warn!(log, "no matching nat entries to soft delete"); - Ok(()) - } - _ => { - let message = format!( - "failed to delete nat entry due to error: {err:?}" - ); - error!(log, "{}", message); - Err(Error::internal_error(&message)) - } - }, - }?; + // In the event of an unresolvable failure, we need to remove + // the entries we just added because the undo won't call into + // `instance_delete_dpd_config`. These entries won't stop a + // future caller, but it's better not to pollute switch state. + if let Some((e, max)) = err_and_limit { + for external_ip in &ips_of_interest[..max] { + let _ = external_ip_delete_dpd_config_inner( + datastore, + log, + opctx, + external_ip, + ) + .await; } + return Err(e); + } - let boundary_switches = - self.boundary_switches(&self.opctx_alloc).await?; + notify_dendrite_nat_state( + datastore, + log, + resolver, + opctx_alloc, + Some(instance_id), + true, + ) + .await?; + + Ok(nat_entries) +} - for switch in &boundary_switches { - debug!(&self.log, "notifying dendrite of updates"; - "probe_id" => %probe_id, - "switch" => switch.to_string()); +// The logic of this function should follow very closely what +// `instance_ensure_dpd_config` does. However, there are enough differences +// in the mechanics of how the logic is being carried out to justify having +// this separate function, it seems. +pub(crate) async fn probe_ensure_dpd_config( + datastore: &DataStore, + log: &slog::Logger, + opctx: &OpContext, + probe_id: Uuid, + sled_ip_address: std::net::Ipv6Addr, + ip_index_filter: Option, + dpd_client: &dpd_client::Client, +) -> Result<(), Error> { + // All external IPs map to the primary network interface, so find that + // interface. If there is no such interface, there's no way to route + // traffic destined to those IPs, so there's nothing to configure and + // it's safe to return early. + let network_interface = match datastore + .derive_probe_network_interface_info(&opctx, probe_id) + .await? + .into_iter() + .find(|interface| interface.primary) + { + Some(interface) => interface, + None => { + info!(log, "probe has no primary network interface"; + "probe_id" => %probe_id); + return Ok(()); + } + }; - let dpd_clients = self.dpd_clients().await.map_err(|e| { + let mac_address = + macaddr::MacAddr6::from_str(&network_interface.mac.to_string()) + .map_err(|e| { Error::internal_error(&format!( - "unable to get dpd_clients: {e}" + "failed to convert mac address: {e}" )) })?; - let client_result = dpd_clients.get(switch).ok_or_else(|| { - Error::internal_error(&format!( - "unable to find dendrite client for {switch}" - )) - }); + info!(log, "looking up probe's external IPs"; + "probe_id" => %probe_id); - let dpd_client = match client_result { - Ok(client) => client, - Err(new_error) => { - errors.push(new_error); - continue; - } - }; + let ips = datastore.probe_lookup_external_ips(&opctx, probe_id).await?; - // Notify dendrite that there are changes for it to reconcile. - // In the event of a failure to notify dendrite, we'll log an error - // and rely on dendrite's RPW timer to catch it up. - if let Err(e) = dpd_client.ipv4_nat_trigger_update().await { - error!(self.log, "failed to notify dendrite of nat updates"; "error" => ?e); - }; + if let Some(wanted_index) = ip_index_filter { + if let None = ips.get(wanted_index) { + return Err(Error::internal_error(&format!( + "failed to find external ip address at index: {}", + wanted_index + ))); } + } - if let Some(e) = errors.into_iter().nth(0) { - return Err(e); - } + let sled_address = Ipv6Net::host_net(sled_ip_address); - Ok(()) + for target_ip in ips + .iter() + .enumerate() + .filter(|(index, _)| { + if let Some(wanted_index) = ip_index_filter { + *index == wanted_index + } else { + true + } + }) + .map(|(_, ip)| ip) + { + // For each external ip, add a nat entry to the database + ensure_nat_entry( + datastore, + target_ip, + sled_address, + &network_interface, + mac_address, + opctx, + ) + .await?; } - /// Deletes an instance's OPTE V2P mappings and the boundary switch NAT - /// entries for its external IPs. - /// - /// This routine returns immediately upon encountering any errors (and will - /// not try to destroy any more objects after the point of failure). - async fn clear_instance_networking_state( - &self, - opctx: &OpContext, - authz_instance: &authz::Instance, - ) -> Result<(), Error> { - self.delete_instance_v2p_mappings(opctx, authz_instance.id()).await?; + // Notify dendrite that there are changes for it to reconcile. + // In the event of a failure to notify dendrite, we'll log an error + // and rely on dendrite's RPW timer to catch it up. + if let Err(e) = dpd_client.ipv4_nat_trigger_update().await { + error!(log, "failed to notify dendrite of nat updates"; "error" => ?e); + }; + + Ok(()) +} - self.instance_delete_dpd_config(opctx, authz_instance).await?; +/// Deletes an instance's OPTE V2P mappings and the boundary switch NAT +/// entries for its external IPs. +/// +/// This routine returns immediately upon encountering any errors (and will +/// not try to destroy any more objects after the point of failure). +async fn clear_instance_networking_state( + datastore: &DataStore, + log: &slog::Logger, + resolver: &internal_dns::resolver::Resolver, + opctx: &OpContext, + opctx_alloc: &OpContext, + authz_instance: &authz::Instance, + v2p_notification_tx: tokio::sync::watch::Sender<()>, +) -> Result<(), Error> { + if let Err(e) = v2p_notification_tx.send(()) { + error!( + log, + "error notifying background task of v2p change"; + "error" => ?e + ) + }; + + instance_delete_dpd_config( + datastore, + log, + resolver, + opctx, + opctx_alloc, + authz_instance, + ) + .await?; + + notify_dendrite_nat_state( + datastore, + log, + resolver, + opctx_alloc, + Some(authz_instance.id()), + true, + ) + .await +} - self.notify_dendrite_nat_state(Some(authz_instance.id()), true).await +/// Attempts to delete all of the Dendrite NAT configuration for the +/// instance identified by `authz_instance`. +/// +/// Unlike `instance_ensure_dpd_config`, this function will disregard the +/// attachment states of any external IPs because likely callers (instance +/// delete) cannot be piecewise undone. +/// +/// # Return value +/// +/// - `Ok(())` if all NAT entries were successfully deleted. +/// - If an operation fails before this routine begins to walk and delete +/// individual NAT entries, this routine returns `Err` and reports that +/// error. +/// - If an operation fails while this routine is walking NAT entries, it +/// will continue trying to delete subsequent entries but will return the +/// first error it encountered. +/// - `ip_filter`: An optional filter on the index into the instance's +/// external IP array. +/// - If this is `Some(id)`, this routine configures DPD state for only the +/// external IP with `id` in the collection returned from CRDB. +/// - If this is `None`, this routine configures DPD for all external +/// IPs. +pub(crate) async fn instance_delete_dpd_config( + datastore: &DataStore, + log: &slog::Logger, + resolver: &internal_dns::resolver::Resolver, + opctx: &OpContext, + opctx_alloc: &OpContext, + authz_instance: &authz::Instance, +) -> Result<(), Error> { + let instance_id = authz_instance.id(); + + info!(log, "deleting instance dpd configuration"; + "instance_id" => %instance_id); + + let external_ips = + datastore.instance_lookup_external_ips(opctx, instance_id).await?; + + for entry in external_ips { + external_ip_delete_dpd_config_inner(&datastore, &log, opctx, &entry) + .await?; } - /// Given old and new instance runtime states, determines the desired - /// networking configuration for a given instance and ensures it has been - /// propagated to all relevant sleds. - /// - /// # Arguments - /// - /// - opctx: An operation context for this operation. - /// - authz_instance: A resolved authorization context for the instance of - /// interest. - /// - prev_instance_state: The most-recently-recorded instance runtime - /// state for this instance. - /// - new_instance_state: The instance state that the caller of this routine - /// has observed and that should be used to set up this instance's - /// networking state. - /// - /// # Return value - /// - /// `Ok(())` if this routine completed all the operations it wanted to - /// complete, or an appropriate `Err` otherwise. - pub(crate) async fn ensure_updated_instance_network_config( - &self, - opctx: &OpContext, - authz_instance: &authz::Instance, - prev_instance_state: &db::model::InstanceRuntimeState, - new_instance_state: &nexus::InstanceRuntimeState, - ) -> Result<(), Error> { - let log = &self.log; - let instance_id = authz_instance.id(); + notify_dendrite_nat_state( + datastore, + log, + resolver, + opctx_alloc, + Some(instance_id), + false, + ) + .await +} - // If this instance update is stale, do nothing, since the superseding - // update may have allowed the instance's location to change further. - if prev_instance_state.gen >= new_instance_state.gen.into() { - debug!(log, - "instance state generation already advanced, \ - won't touch network config"; - "instance_id" => %instance_id); +// The logic of this function should follow very closely what +// `instance_delete_dpd_config` does. However, there are enough differences +// in the mechanics of how the logic is being carried out to justify having +// this separate function, it seems. +pub(crate) async fn probe_delete_dpd_config( + datastore: &DataStore, + log: &slog::Logger, + resolver: &internal_dns::resolver::Resolver, + opctx: &OpContext, + opctx_alloc: &OpContext, + probe_id: Uuid, + background_tasks: &BackgroundTasks, +) -> Result<(), Error> { + info!(log, "deleting probe dpd configuration"; + "probe_id" => %probe_id); + + let external_ips = + datastore.probe_lookup_external_ips(opctx, probe_id).await?; + + let mut errors = vec![]; + for entry in external_ips { + // Soft delete the NAT entry + match datastore.ipv4_nat_delete_by_external_ip(&opctx, &entry).await { + Ok(_) => Ok(()), + Err(err) => match err { + Error::ObjectNotFound { .. } => { + warn!(log, "no matching nat entries to soft delete"); + Ok(()) + } + _ => { + let message = format!( + "failed to delete nat entry due to error: {err:?}" + ); + error!(log, "{}", message); + Err(Error::internal_error(&message)) + } + }, + }?; + } - return Ok(()); - } + let boundary_switches = boundary_switches(datastore, opctx_alloc).await?; - // If this update will retire the instance's active VMM, delete its - // networking state. It will be re-established the next time the - // instance starts. - if new_instance_state.propolis_id.is_none() { - info!(log, - "instance cleared its Propolis ID, cleaning network config"; - "instance_id" => %instance_id, - "propolis_id" => ?prev_instance_state.propolis_id); + for switch in &boundary_switches { + debug!(log, "notifying dendrite of updates"; + "probe_id" => %probe_id, + "switch" => switch.to_string()); - self.clear_instance_networking_state(opctx, authz_instance).await?; - return Ok(()); - } + let dpd_clients = + super::dpd_clients(resolver, log).await.map_err(|e| { + Error::internal_error(&format!( + "unable to get dpd_clients: {e}" + )) + })?; - // If the instance still has a migration in progress, don't change - // any networking state until an update arrives that retires that - // migration. - // - // This is needed to avoid the following race: - // - // 1. Migration from S to T completes. - // 2. Migration source sends an update that changes the instance's - // active VMM but leaves the migration ID in place. - // 3. Meanwhile, migration target sends an update that changes the - // instance's active VMM and clears the migration ID. - // 4. The migration target's call updates networking state and commits - // the new instance record. - // 5. The instance migrates from T to T' and Nexus applies networking - // configuration reflecting that the instance is on T'. - // 6. The update in step 2 applies configuration saying the instance - // is on sled T. - if new_instance_state.migration_id.is_some() { - debug!(log, - "instance still has a migration in progress, won't touch \ - network config"; - "instance_id" => %instance_id, - "migration_id" => ?new_instance_state.migration_id); + let client_result = dpd_clients.get(switch).ok_or_else(|| { + Error::internal_error(&format!( + "unable to find dendrite client for {switch}" + )) + }); - return Ok(()); - } + let dpd_client = match client_result { + Ok(client) => client, + Err(new_error) => { + errors.push(new_error); + continue; + } + }; - let new_propolis_id = new_instance_state.propolis_id.unwrap(); + background_tasks.activate(&background_tasks.task_v2p_manager); + // Notify dendrite that there are changes for it to reconcile. + // In the event of a failure to notify dendrite, we'll log an error + // and rely on dendrite's RPW timer to catch it up. + if let Err(e) = dpd_client.ipv4_nat_trigger_update().await { + error!(log, "failed to notify dendrite of nat updates"; "error" => ?e); + }; + } - // Updates that end live migration need to push OPTE V2P state even if - // the instance's active sled did not change (see below). - let migration_retired = prev_instance_state.migration_id.is_some() - && new_instance_state.migration_id.is_none(); + if let Some(e) = errors.into_iter().next() { + return Err(e); + } - if (prev_instance_state.propolis_id == new_instance_state.propolis_id) - && !migration_retired - { - debug!(log, "instance didn't move, won't touch network config"; - "instance_id" => %instance_id); + Ok(()) +} - return Ok(()); - } +/// Attempts to soft-delete Dendrite NAT configuration for a specific entry +/// via ID. +/// +/// This function is needed to safely cleanup in at least one unwind scenario +/// where a potential second user could need to use the same (IP, portset) pair, +/// e.g. a rapid reattach or a reallocated ephemeral IP. +pub(crate) async fn delete_dpd_config_by_entry( + datastore: &DataStore, + resolver: &internal_dns::resolver::Resolver, + log: &slog::Logger, + opctx: &OpContext, + opctx_alloc: &OpContext, + nat_entry: &Ipv4NatEntry, +) -> Result<(), Error> { + info!(log, "deleting individual NAT entry from dpd configuration"; + "id" => ?nat_entry.id, + "version_added" => %nat_entry.external_address.0); + + match datastore.ipv4_nat_delete(&opctx, nat_entry).await { + Ok(_) => {} + Err(err) => match err { + Error::ObjectNotFound { .. } => { + warn!(log, "no matching nat entries to soft delete"); + } + _ => { + let message = + format!("failed to delete nat entry due to error: {err:?}"); + error!(log, "{}", message); + return Err(Error::internal_error(&message)); + } + }, + } - // Either the instance moved from one sled to another, or it attempted - // to migrate and failed. Ensure the correct networking configuration - // exists for its current home. - // - // TODO(#3107) This is necessary even if the instance didn't move, - // because registering a migration target on a sled creates OPTE ports - // for its VNICs, and that creates new V2P mappings on that sled that - // place the relevant virtual IPs on the local sled. Once OPTE stops - // creating these mappings, this path only needs to be taken if an - // instance has changed sleds. - let new_sled_id = match self - .db_datastore - .vmm_fetch(&opctx, authz_instance, &new_propolis_id) - .await - { - Ok(vmm) => vmm.sled_id, - - // A VMM in the active position should never be destroyed. If the - // sled sending this message is the owner of the instance's last - // active VMM and is destroying it, it should also have retired that - // VMM. - Err(Error::ObjectNotFound { .. }) => { - error!(log, "instance's active vmm unexpectedly not found"; - "instance_id" => %instance_id, - "propolis_id" => %new_propolis_id); - - return Ok(()); + notify_dendrite_nat_state( + datastore, + log, + resolver, + opctx_alloc, + None, + false, + ) + .await +} + +/// Soft-delete an individual external IP from the NAT RPW, without +/// triggering a Dendrite notification. +async fn external_ip_delete_dpd_config_inner( + datastore: &DataStore, + log: &slog::Logger, + opctx: &OpContext, + external_ip: &ExternalIp, +) -> Result<(), Error> { + // Soft delete the NAT entry + match datastore.ipv4_nat_delete_by_external_ip(&opctx, external_ip).await { + Ok(_) => Ok(()), + Err(err) => match err { + Error::ObjectNotFound { .. } => { + warn!(log, "no matching nat entries to soft delete"); + Ok(()) + } + _ => { + let message = + format!("failed to delete nat entry due to error: {err:?}"); + error!(log, "{}", message); + Err(Error::internal_error(&message)) } + }, + } +} - Err(e) => return Err(e), +/// Informs all available boundary switches that the set of NAT entries +/// has changed. +/// +/// When `fail_fast` is set, this function will return on any error when +/// acquiring a handle to a DPD client. Otherwise, it will attempt to notify +/// all clients and then finally return the first error. +async fn notify_dendrite_nat_state( + datastore: &DataStore, + log: &slog::Logger, + resolver: &internal_dns::resolver::Resolver, + opctx_alloc: &OpContext, + instance_id: Option, + fail_fast: bool, +) -> Result<(), Error> { + // Querying boundary switches also requires fleet access and the use of the + // instance allocator context. + let boundary_switches = boundary_switches(datastore, opctx_alloc).await?; + + let mut errors = vec![]; + for switch in &boundary_switches { + debug!(log, "notifying dendrite of updates"; + "instance_id" => ?instance_id, + "switch" => switch.to_string()); + + let clients = super::dpd_clients(resolver, log).await.map_err(|e| { + Error::internal_error(&format!("failed to get dpd clients: {e}")) + })?; + let client_result = clients.get(switch).ok_or_else(|| { + Error::internal_error(&format!( + "unable to find dendrite client for {switch}" + )) + }); + + let dpd_client = match client_result { + Ok(client) => client, + Err(new_error) => { + errors.push(new_error); + if fail_fast { + break; + } else { + continue; + } + } }; - self.create_instance_v2p_mappings(opctx, instance_id, new_sled_id) - .await?; + // Notify dendrite that there are changes for it to reconcile. + // In the event of a failure to notify dendrite, we'll log an error + // and rely on dendrite's RPW timer to catch it up. + if let Err(e) = dpd_client.ipv4_nat_trigger_update().await { + error!(log, "failed to notify dendrite of nat updates"; "error" => ?e); + }; + } - let (.., sled) = LookupPath::new(opctx, &self.db_datastore) - .sled_id(new_sled_id) - .fetch() - .await?; + if let Some(e) = errors.into_iter().next() { + return Err(e); + } - self.instance_ensure_dpd_config( - opctx, - instance_id, - &sled.address(), - None, - ) - .await?; + Ok(()) +} - Ok(()) +async fn ensure_nat_entry( + datastore: &DataStore, + target_ip: &nexus_db_model::ExternalIp, + sled_address: Ipv6Net, + network_interface: &NetworkInterface, + mac_address: macaddr::MacAddr6, + opctx: &OpContext, +) -> Result { + match target_ip.ip { + IpNetwork::V4(v4net) => { + let nat_entry = Ipv4NatValues { + external_address: Ipv4Net::from(v4net).into(), + first_port: target_ip.first_port, + last_port: target_ip.last_port, + sled_address: sled_address.into(), + vni: DbVni(network_interface.vni), + mac: nexus_db_model::MacAddr( + omicron_common::api::external::MacAddr(mac_address), + ), + }; + Ok(datastore.ensure_ipv4_nat_entry(opctx, nat_entry).await?) + } + IpNetwork::V6(_v6net) => { + // TODO: implement handling of v6 nat. + return Err(Error::InternalError { + internal_message: "ipv6 nat is not yet implemented".into(), + }); + } } } diff --git a/nexus/src/app/metrics.rs b/nexus/src/app/metrics.rs index 94fb232892..3728a3bdc1 100644 --- a/nexus/src/app/metrics.rs +++ b/nexus/src/app/metrics.rs @@ -13,7 +13,9 @@ use nexus_db_queries::{ db::{fixed_data::FLEET_ID, lookup}, }; use omicron_common::api::external::{Error, InternalContext}; -use oximeter_db::Measurement; +use oximeter_db::{ + oxql, Measurement, TimeseriesSchema, TimeseriesSchemaPaginationParams, +}; use std::num::NonZeroU32; impl super::Nexus { @@ -96,4 +98,85 @@ impl super::Nexus { ) .await } + + /// List available timeseries schema. + pub(crate) async fn timeseries_schema_list( + &self, + opctx: &OpContext, + pagination: &TimeseriesSchemaPaginationParams, + limit: NonZeroU32, + ) -> Result, Error> { + // Must be a fleet user to list timeseries schema. + // + // TODO-security: We need to figure out how to implement proper security + // checks here, letting less-privileged users fetch data for the + // resources they have access to. + opctx.authorize(authz::Action::Read, &authz::FLEET).await?; + self.timeseries_client + .get() + .await + .map_err(|e| { + Error::internal_error(&format!( + "Cannot access timeseries DB: {}", + e + )) + })? + .timeseries_schema_list(&pagination.page, limit) + .await + .map_err(|e| match e { + oximeter_db::Error::DatabaseUnavailable(_) => { + Error::ServiceUnavailable { + internal_message: e.to_string(), + } + } + _ => Error::InternalError { internal_message: e.to_string() }, + }) + } + + /// Run an OxQL query against the timeseries database. + pub(crate) async fn timeseries_query( + &self, + opctx: &OpContext, + query: impl AsRef, + ) -> Result, Error> { + // Must be a fleet user to list timeseries schema. + // + // TODO-security: We need to figure out how to implement proper security + // checks here, letting less-privileged users fetch data for the + // resources they have access to. + opctx.authorize(authz::Action::Read, &authz::FLEET).await?; + self.timeseries_client + .get() + .await + .map_err(|e| { + Error::internal_error(&format!( + "Cannot access timeseries DB: {}", + e + )) + })? + .oxql_query(query) + .await + .map(|result| { + // TODO-observability: The query method returns information + // about the duration of the OxQL query and the database + // resource usage for each contained SQL query. We should + // publish this as a timeseries itself, so that we can track + // improvements to query processing. + // + // For now, simply return the tables alone. + result.tables + }) + .map_err(|e| match e { + oximeter_db::Error::DatabaseUnavailable(_) => { + Error::ServiceUnavailable { + internal_message: e.to_string(), + } + } + oximeter_db::Error::Oxql(_) + | oximeter_db::Error::TimeseriesNotFound(_) => { + Error::invalid_request(e.to_string()) + } + _ => Error::InternalError { internal_message: e.to_string() }, + }) + } } diff --git a/nexus/src/app/mod.rs b/nexus/src/app/mod.rs index 9e0b12d83d..3083a8e761 100644 --- a/nexus/src/app/mod.rs +++ b/nexus/src/app/mod.rs @@ -28,6 +28,7 @@ use omicron_common::address::MGD_PORT; use omicron_common::address::MGS_PORT; use omicron_common::api::external::Error; use omicron_common::api::internal::shared::SwitchLocation; +use oximeter_producer::Server as ProducerServer; use slog::Logger; use std::collections::HashMap; use std::net::SocketAddrV6; @@ -38,6 +39,7 @@ use uuid::Uuid; // The implementation of Nexus is large, and split into a number of submodules // by resource. mod address_lot; +mod allow_list; pub(crate) mod background; mod bfd; mod bgp; @@ -55,7 +57,7 @@ mod instance_network; mod ip_pool; mod metrics; mod network_interface; -mod oximeter; +pub(crate) mod oximeter; mod probe; mod project; mod quota; @@ -147,6 +149,16 @@ pub struct Nexus { /// Status of background task to populate database populate_status: tokio::sync::watch::Receiver, + /// The metric producer server from which oximeter collects metric data. + producer_server: std::sync::Mutex>, + + /// Reusable `reqwest::Client`, to be cloned and used with the Progenitor- + /// generated `Client::new_with_client`. + /// + /// (This does not need to be in an `Arc` because `reqwest::Client` uses + /// `Arc` internally.) + reqwest_client: reqwest::Client, + /// Client to the timeseries database. timeseries_client: LazyTimeseriesClient, @@ -188,6 +200,9 @@ pub struct Nexus { /// Default Crucible region allocation strategy default_region_allocation_strategy: RegionAllocationStrategy, + + /// Channel for notifying background task of change to opte v2p state + v2p_notification_tx: tokio::sync::watch::Sender<()>, } impl Nexus { @@ -214,7 +229,7 @@ impl Nexus { db::DataStore::new(&log, Arc::clone(&pool), all_versions.as_ref()) .await?, ); - db_datastore.register_producers(&producer_registry); + db_datastore.register_producers(producer_registry); let my_sec_id = db::SecId::from(config.deployment.id); let sec_store = Arc::new(db::CockroachDbSecStore::new( @@ -338,6 +353,12 @@ impl Nexus { } } + let reqwest_client = reqwest::ClientBuilder::new() + .connect_timeout(std::time::Duration::from_secs(15)) + .timeout(std::time::Duration::from_secs(15)) + .build() + .map_err(|e| e.to_string())?; + // Connect to clickhouse - but do so lazily. // Clickhouse may not be executing when Nexus starts. let timeseries_client = if let Some(address) = @@ -372,15 +393,20 @@ impl Nexus { Arc::clone(&db_datastore), ); + let v2p_watcher_channel = tokio::sync::watch::channel(()); + let (saga_request, mut saga_request_recv) = SagaRequest::channel(); let background_tasks = background::BackgroundTasks::start( &background_ctx, Arc::clone(&db_datastore), &config.pkg.background_tasks, + rack_id, config.deployment.id, resolver.clone(), saga_request, + v2p_watcher_channel.clone(), + producer_registry, ); let external_resolver = { @@ -403,7 +429,9 @@ impl Nexus { external_server: std::sync::Mutex::new(None), techport_external_server: std::sync::Mutex::new(None), internal_server: std::sync::Mutex::new(None), + producer_server: std::sync::Mutex::new(None), populate_status, + reqwest_client, timeseries_client, updates_config: config.pkg.updates.clone(), tunables: config.pkg.tunables.clone(), @@ -431,16 +459,11 @@ impl Nexus { .pkg .default_region_allocation_strategy .clone(), + v2p_notification_tx: v2p_watcher_channel.0, }; // TODO-cleanup all the extra Arcs here seems wrong let nexus = Arc::new(nexus); - let bootstore_opctx = OpContext::for_background( - log.new(o!("component" => "Bootstore")), - Arc::clone(&authz), - authn::Context::internal_api(), - Arc::clone(&db_datastore), - ); let opctx = OpContext::for_background( log.new(o!("component" => "SagaRecoverer")), Arc::clone(&authz), @@ -480,12 +503,6 @@ impl Nexus { for task in task_nexus.background_tasks.driver.tasks() { task_nexus.background_tasks.driver.activate(task); } - if let Err(e) = task_nexus - .initial_bootstore_sync(&bootstore_opctx) - .await - { - error!(task_log, "failed to run bootstore sync: {e}"); - } } Err(_) => { error!(task_log, "populate failed"); @@ -519,6 +536,16 @@ impl Nexus { Ok(nexus) } + /// Return the ID for this Nexus instance. + pub fn id(&self) -> &Uuid { + &self.id + } + + /// Return the rack ID for this Nexus instance. + pub fn rack_id(&self) -> Uuid { + self.rack_id + } + /// Return the tunable configuration parameters, e.g. for use in tests. pub fn tunables(&self) -> &Tunables { &self.tunables @@ -576,6 +603,7 @@ impl Nexus { external_server: DropshotServer, techport_external_server: DropshotServer, internal_server: DropshotServer, + producer_server: ProducerServer, ) { // If any servers already exist, close them. let _ = self.close_servers().await; @@ -587,9 +615,13 @@ impl Nexus { .unwrap() .replace(techport_external_server); self.internal_server.lock().unwrap().replace(internal_server); + self.producer_server.lock().unwrap().replace(producer_server); } pub(crate) async fn close_servers(&self) -> Result<(), String> { + // NOTE: All these take the lock and swap out of the option immediately, + // because they are synchronous mutexes, which cannot be held across the + // await point these `close()` methods expose. let external_server = self.external_server.lock().unwrap().take(); if let Some(server) = external_server { server.close().await?; @@ -603,6 +635,10 @@ impl Nexus { if let Some(server) = internal_server { server.close().await?; } + let producer_server = self.producer_server.lock().unwrap().take(); + if let Some(server) = producer_server { + server.close().await.map_err(|e| e.to_string())?; + } Ok(()) } @@ -633,6 +669,16 @@ impl Nexus { .map(|server| server.local_addr()) } + pub(crate) async fn get_techport_server_address( + &self, + ) -> Option { + self.techport_external_server + .lock() + .unwrap() + .as_ref() + .map(|server| server.local_addr()) + } + pub(crate) async fn get_internal_server_address( &self, ) -> Option { @@ -881,33 +927,16 @@ impl Nexus { pub(crate) async fn dpd_clients( &self, ) -> Result, String> { - let mappings = self.switch_zone_address_mappings().await?; - let clients: HashMap = mappings - .iter() - .map(|(location, addr)| { - let port = DENDRITE_PORT; - - let client_state = dpd_client::ClientState { - tag: String::from("nexus"), - log: self.log.new(o!( - "component" => "DpdClient" - )), - }; - - let dpd_client = dpd_client::Client::new( - &format!("http://[{addr}]:{port}"), - client_state, - ); - (*location, dpd_client) - }) - .collect(); - Ok(clients) + let resolver = self.resolver().await; + dpd_clients(&resolver, &self.log).await } pub(crate) async fn mg_clients( &self, ) -> Result, String> { - let mappings = self.switch_zone_address_mappings().await?; + let resolver = self.resolver().await; + let mappings = + switch_zone_address_mappings(&resolver, &self.log).await?; let mut clients: Vec<(SwitchLocation, mg_admin_client::Client)> = vec![]; for (location, addr) in &mappings { @@ -922,24 +951,6 @@ impl Nexus { } Ok(clients.into_iter().collect::>()) } - - async fn switch_zone_address_mappings( - &self, - ) -> Result, String> { - let switch_zone_addresses = match self - .resolver() - .await - .lookup_all_ipv6(ServiceName::Dendrite) - .await - { - Ok(addrs) => addrs, - Err(e) => { - error!(self.log, "failed to resolve addresses for Dendrite services"; "error" => %e); - return Err(e.to_string()); - } - }; - Ok(map_switch_zone_addrs(&self.log, switch_zone_addresses).await) - } } /// For unimplemented endpoints, indicates whether the resource identified @@ -958,6 +969,50 @@ pub enum Unimpl { ProtectedLookup(Error), } +pub(crate) async fn dpd_clients( + resolver: &internal_dns::resolver::Resolver, + log: &slog::Logger, +) -> Result, String> { + let mappings = switch_zone_address_mappings(resolver, log).await?; + let clients: HashMap = mappings + .iter() + .map(|(location, addr)| { + let port = DENDRITE_PORT; + + let client_state = dpd_client::ClientState { + tag: String::from("nexus"), + log: log.new(o!( + "component" => "DpdClient" + )), + }; + + let dpd_client = dpd_client::Client::new( + &format!("http://[{addr}]:{port}"), + client_state, + ); + (*location, dpd_client) + }) + .collect(); + Ok(clients) +} + +async fn switch_zone_address_mappings( + resolver: &internal_dns::resolver::Resolver, + log: &slog::Logger, +) -> Result, String> { + let switch_zone_addresses = match resolver + .lookup_all_ipv6(ServiceName::Dendrite) + .await + { + Ok(addrs) => addrs, + Err(e) => { + error!(log, "failed to resolve addresses for Dendrite services"; "error" => %e); + return Err(e.to_string()); + } + }; + Ok(map_switch_zone_addrs(&log, switch_zone_addresses).await) +} + // TODO: #3596 Allow updating of Nexus from `handoff_to_nexus()` // This logic is duplicated from RSS // RSS needs to know which addresses are managing which slots, and so does Nexus, diff --git a/nexus/src/app/network_interface.rs b/nexus/src/app/network_interface.rs index 3c154806cd..d1fa87073e 100644 --- a/nexus/src/app/network_interface.rs +++ b/nexus/src/app/network_interface.rs @@ -167,7 +167,8 @@ impl super::Nexus { ) -> DeleteResult { let (.., authz_instance, authz_interface) = network_interface_lookup.lookup_for(authz::Action::Delete).await?; - self.db_datastore + let interface_was_deleted = self + .db_datastore .instance_delete_network_interface( opctx, &authz_instance, @@ -194,6 +195,19 @@ impl super::Nexus { // Convert other errors into an appropriate client error network_interface::DeleteError::into_external(e) } - }) + })?; + + // If the interface was already deleted, in general we'd expect to + // return an error on the `lookup_for(Delete)` above. However, we have a + // TOCTOU race here; if multiple simultaneous calls to delete the same + // interface arrive, all will pass the `lookup_for`, then one will get + // `interface_was_deleted=true` and the rest will get + // `interface_was_deleted=false`. Convert those falses into 404s to + // match what subsequent delete requests will see. + if interface_was_deleted { + Ok(()) + } else { + Err(authz_interface.not_found()) + } } } diff --git a/nexus/src/app/oximeter.rs b/nexus/src/app/oximeter.rs index a168b35293..9039d1b8fa 100644 --- a/nexus/src/app/oximeter.rs +++ b/nexus/src/app/oximeter.rs @@ -9,18 +9,16 @@ use crate::internal_api::params::OximeterInfo; use dropshot::PaginationParams; use internal_dns::resolver::{ResolveError, Resolver}; use internal_dns::ServiceName; +use nexus_db_queries::context::OpContext; use nexus_db_queries::db; -use nexus_db_queries::db::identity::Asset; +use nexus_db_queries::db::DataStore; use omicron_common::address::CLICKHOUSE_PORT; -use omicron_common::api::external::DataPageParams; use omicron_common::api::external::Error; -use omicron_common::api::external::PaginationOrder; -use omicron_common::api::internal::nexus; -use omicron_common::backoff; +use omicron_common::api::external::{DataPageParams, ListResultVec}; +use omicron_common::api::internal::nexus::{self, ProducerEndpoint}; use oximeter_client::Client as OximeterClient; use oximeter_db::query::Timestamp; use oximeter_db::Measurement; -use oximeter_producer::register; use slog::Logger; use std::convert::TryInto; use std::net::SocketAddr; @@ -28,6 +26,12 @@ use std::num::NonZeroU32; use std::time::Duration; use uuid::Uuid; +/// How long a metrics producer remains registered to a collector. +/// +/// Producers are expected to renew their registration lease periodically, at +/// some interval of this overall duration. +pub const PRODUCER_LEASE_DURATION: Duration = Duration::from_secs(10 * 60); + /// A client which knows how to connect to Clickhouse, but does so /// only when a request is actually made. /// @@ -73,117 +77,45 @@ impl super::Nexus { /// Insert a new record of an Oximeter collector server. pub(crate) async fn upsert_oximeter_collector( &self, + opctx: &OpContext, oximeter_info: &OximeterInfo, ) -> Result<(), Error> { // Insert the Oximeter instance into the DB. Note that this _updates_ the record, // specifically, the time_modified, ip, and port columns, if the instance has already been // registered. let db_info = db::model::OximeterInfo::new(&oximeter_info); - self.db_datastore.oximeter_create(&db_info).await?; + self.db_datastore.oximeter_create(opctx, &db_info).await?; info!( self.log, "registered new oximeter metric collection server"; "collector_id" => ?oximeter_info.collector_id, "address" => oximeter_info.address, ); - - // Regardless, notify the collector of any assigned metric producers. - // - // This should be empty if this Oximeter collector is registering for - // the first time, but may not be if the service is re-registering after - // failure. - let client = self.build_oximeter_client( - &oximeter_info.collector_id, - oximeter_info.address, - ); - let mut last_producer_id = None; - loop { - let pagparams = DataPageParams { - marker: last_producer_id.as_ref(), - direction: PaginationOrder::Ascending, - limit: std::num::NonZeroU32::new(100).unwrap(), - }; - let producers = self - .db_datastore - .producers_list_by_oximeter_id( - oximeter_info.collector_id, - &pagparams, - ) - .await?; - if producers.is_empty() { - return Ok(()); - } - debug!( - self.log, - "re-assigning existing metric producers to a collector"; - "n_producers" => producers.len(), - "collector_id" => ?oximeter_info.collector_id, - ); - // Be sure to continue paginating from the last producer. - // - // Safety: We check just above if the list is empty, so there is a - // last element. - last_producer_id.replace(producers.last().unwrap().id()); - for producer in producers.into_iter() { - let producer_info = oximeter_client::types::ProducerEndpoint { - id: producer.id(), - kind: nexus::ProducerKind::from(producer.kind).into(), - address: SocketAddr::new( - producer.ip.ip(), - producer.port.try_into().unwrap(), - ) - .to_string(), - base_route: producer.base_route, - interval: oximeter_client::types::Duration::from( - Duration::from_secs_f64(producer.interval), - ), - }; - client - .producers_post(&producer_info) - .await - .map_err(Error::from)?; - } - } + Ok(()) } - /// Register as a metric producer with the oximeter metric collection server. - pub(crate) async fn register_as_producer(&self, address: SocketAddr) { - let producer_endpoint = nexus::ProducerEndpoint { - id: self.id, - kind: nexus::ProducerKind::Service, - address, - base_route: String::from("/metrics/collect"), - interval: Duration::from_secs(10), - }; - let register = || async { - debug!(self.log, "registering nexus as metric producer"); - register(address, &self.log, &producer_endpoint) - .await - .map_err(backoff::BackoffError::transient) - }; - let log_registration_failure = |error, delay| { - warn!( - self.log, - "failed to register nexus as a metric producer, will retry in {:?}", delay; - "error_message" => ?error, - ); - }; - backoff::retry_notify( - backoff::retry_policy_internal_service(), - register, - log_registration_failure, - ).await - .expect("expected an infinite retry loop registering nexus as a metric producer"); + /// List the producers assigned to an oximeter collector. + pub(crate) async fn list_assigned_producers( + &self, + opctx: &OpContext, + collector_id: Uuid, + pagparams: &DataPageParams<'_, Uuid>, + ) -> ListResultVec { + self.db_datastore + .producers_list_by_oximeter_id(opctx, collector_id, pagparams) + .await + .map(|list| list.into_iter().map(ProducerEndpoint::from).collect()) } /// Assign a newly-registered metric producer to an oximeter collector server. pub(crate) async fn assign_producer( &self, + opctx: &OpContext, producer_info: nexus::ProducerEndpoint, ) -> Result<(), Error> { - let (collector, id) = self.next_collector().await?; + let (collector, id) = self.next_collector(opctx).await?; let db_info = db::model::ProducerEndpoint::new(&producer_info, id); - self.db_datastore.producer_endpoint_create(&db_info).await?; + self.db_datastore.producer_endpoint_create(opctx, &db_info).await?; collector .producers_post(&oximeter_client::types::ProducerEndpoint::from( &producer_info, @@ -199,58 +131,6 @@ impl super::Nexus { Ok(()) } - /// Idempotently un-assign a producer from an oximeter collector. - pub(crate) async fn unassign_producer( - &self, - id: &Uuid, - ) -> Result<(), Error> { - if let Some(collector_id) = - self.db_datastore.producer_endpoint_delete(id).await? - { - debug!( - self.log, - "deleted metric producer assignment"; - "producer_id" => %id, - "collector_id" => %collector_id, - ); - let oximeter_info = - self.db_datastore.oximeter_lookup(&collector_id).await?; - let address = - SocketAddr::new(oximeter_info.ip.ip(), *oximeter_info.port); - let client = self.build_oximeter_client(&id, address); - if let Err(e) = client.producer_delete(&id).await { - error!( - self.log, - "failed to delete producer from collector"; - "producer_id" => %id, - "collector_id" => %collector_id, - "address" => %address, - "error" => ?e, - ); - return Err(Error::internal_error( - format!("failed to delete producer from collector: {e:?}") - .as_str(), - )); - } else { - debug!( - self.log, - "successfully deleted producer from collector"; - "producer_id" => %id, - "collector_id" => %collector_id, - "address" => %address, - ); - Ok(()) - } - } else { - trace!( - self.log, - "un-assigned non-existent metric producer"; - "producer_id" => %id, - ); - Ok(()) - } - } - /// Returns a results from the timeseries DB based on the provided query /// parameters. /// @@ -360,41 +240,79 @@ impl super::Nexus { .unwrap()) } - // Internal helper to build an Oximeter client from its ID and address (common data between - // model type and the API type). - fn build_oximeter_client( - &self, - id: &Uuid, - address: SocketAddr, - ) -> OximeterClient { - let client_log = - self.log.new(o!("oximeter-collector" => id.to_string())); - let client = - OximeterClient::new(&format!("http://{}", address), client_log); - info!( - self.log, - "registered oximeter collector client"; - "id" => id.to_string(), - ); - client - } - // Return an oximeter collector to assign a newly-registered producer - async fn next_collector(&self) -> Result<(OximeterClient, Uuid), Error> { + async fn next_collector( + &self, + opctx: &OpContext, + ) -> Result<(OximeterClient, Uuid), Error> { // TODO-robustness Replace with a real load-balancing strategy. let page_params = DataPageParams { marker: None, direction: dropshot::PaginationOrder::Ascending, limit: std::num::NonZeroU32::new(1).unwrap(), }; - let oxs = self.db_datastore.oximeter_list(&page_params).await?; + let oxs = self.db_datastore.oximeter_list(opctx, &page_params).await?; let info = oxs.first().ok_or_else(|| Error::ServiceUnavailable { internal_message: String::from("no oximeter collectors available"), })?; let address = SocketAddr::from((info.ip.ip(), info.port.try_into().unwrap())); let id = info.id; - Ok((self.build_oximeter_client(&id, address), id)) + Ok((build_oximeter_client(&self.log, &id, address), id)) + } +} + +/// Idempotently un-assign a producer from an oximeter collector. +pub(crate) async fn unassign_producer( + datastore: &DataStore, + log: &slog::Logger, + opctx: &OpContext, + id: &Uuid, +) -> Result<(), Error> { + if let Some(collector_id) = + datastore.producer_endpoint_delete(opctx, id).await? + { + debug!( + log, + "deleted metric producer assignment"; + "producer_id" => %id, + "collector_id" => %collector_id, + ); + let oximeter_info = + datastore.oximeter_lookup(opctx, &collector_id).await?; + let address = + SocketAddr::new(oximeter_info.ip.ip(), *oximeter_info.port); + let client = build_oximeter_client(&log, &id, address); + if let Err(e) = client.producer_delete(&id).await { + error!( + log, + "failed to delete producer from collector"; + "producer_id" => %id, + "collector_id" => %collector_id, + "address" => %address, + "error" => ?e, + ); + return Err(Error::internal_error( + format!("failed to delete producer from collector: {e:?}") + .as_str(), + )); + } else { + debug!( + log, + "successfully deleted producer from collector"; + "producer_id" => %id, + "collector_id" => %collector_id, + "address" => %address, + ); + Ok(()) + } + } else { + trace!( + log, + "un-assigned non-existent metric producer"; + "producer_id" => %id, + ); + Ok(()) } } @@ -406,3 +324,21 @@ fn map_oximeter_err(error: oximeter_db::Error) -> Error { _ => Error::InternalError { internal_message: error.to_string() }, } } + +// Internal helper to build an Oximeter client from its ID and address (common data between +// model type and the API type). +fn build_oximeter_client( + log: &slog::Logger, + id: &Uuid, + address: SocketAddr, +) -> OximeterClient { + let client_log = log.new(o!("oximeter-collector" => id.to_string())); + let client = + OximeterClient::new(&format!("http://{}", address), client_log); + info!( + log, + "registered oximeter collector client"; + "id" => id.to_string(), + ); + client +} diff --git a/nexus/src/app/probe.rs b/nexus/src/app/probe.rs index e85c040a28..41ea4eece2 100644 --- a/nexus/src/app/probe.rs +++ b/nexus/src/app/probe.rs @@ -60,9 +60,22 @@ impl super::Nexus { let (.., authz_project) = project_lookup.lookup_for(authz::Action::CreateChild).await?; + // resolve NameOrId into authz::IpPool + let pool = match &new_probe_params.ip_pool { + Some(pool) => Some( + self.ip_pool_lookup(opctx, &pool)? + .lookup_for(authz::Action::CreateChild) + .await? + .0, + ), + None => None, + }; + + let new_probe = + Probe::from_create(new_probe_params, authz_project.id()); let probe = self .db_datastore - .probe_create(opctx, &authz_project, new_probe_params) + .probe_create(opctx, &authz_project, &new_probe, pool) .await?; let (.., sled) = diff --git a/nexus/src/app/rack.rs b/nexus/src/app/rack.rs index 4a4a61142e..da97c77c04 100644 --- a/nexus/src/app/rack.rs +++ b/nexus/src/app/rack.rs @@ -18,15 +18,20 @@ use nexus_db_queries::context::OpContext; use nexus_db_queries::db; use nexus_db_queries::db::datastore::DnsVersionUpdateBuilder; use nexus_db_queries::db::datastore::RackInit; +use nexus_db_queries::db::datastore::SledUnderlayAllocationResult; use nexus_db_queries::db::lookup::LookupPath; use nexus_reconfigurator_execution::silo_dns_name; +use nexus_types::deployment::blueprint_zone_type; +use nexus_types::deployment::BlueprintZoneFilter; +use nexus_types::deployment::BlueprintZoneType; +use nexus_types::deployment::CockroachDbClusterVersion; +use nexus_types::deployment::SledFilter; use nexus_types::external_api::params::Address; use nexus_types::external_api::params::AddressConfig; use nexus_types::external_api::params::AddressLotBlockCreate; use nexus_types::external_api::params::BgpAnnounceSetCreate; use nexus_types::external_api::params::BgpAnnouncementCreate; use nexus_types::external_api::params::BgpConfigCreate; -use nexus_types::external_api::params::BgpPeer; use nexus_types::external_api::params::LinkConfigCreate; use nexus_types::external_api::params::LldpServiceConfigCreate; use nexus_types::external_api::params::RouteConfig; @@ -45,14 +50,20 @@ use nexus_types::external_api::views; use nexus_types::internal_api::params::DnsRecord; use omicron_common::address::{get_64_subnet, Ipv6Subnet, RACK_PREFIX}; use omicron_common::api::external::AddressLotKind; +use omicron_common::api::external::BgpPeer; use omicron_common::api::external::DataPageParams; use omicron_common::api::external::Error; use omicron_common::api::external::IdentityMetadataCreateParams; +use omicron_common::api::external::InternalContext; use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; use omicron_common::api::external::Name; use omicron_common::api::external::NameOrId; +use omicron_common::api::external::ResourceType; use omicron_common::api::internal::shared::ExternalPortDiscovery; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::SledUuid; +use oxnet::IpNet; use sled_agent_client::types::AddSledRequest; use sled_agent_client::types::StartSledAgentRequest; use sled_agent_client::types::StartSledAgentRequestBody; @@ -87,7 +98,7 @@ impl super::Nexus { Ok(db_rack) } - /// Marks the rack as initialized with a set of services. + /// Marks the rack as initialized with information supplied by RSS. /// /// This function is a no-op if the rack has already been initialized. pub(crate) async fn rack_initialize( @@ -96,8 +107,37 @@ impl super::Nexus { rack_id: Uuid, request: RackInitializationRequest, ) -> Result<(), Error> { + let log = &opctx.log; + opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; + let physical_disks: Vec<_> = request + .physical_disks + .into_iter() + .map(|disk| { + db::model::PhysicalDisk::new( + disk.id, + disk.vendor, + disk.serial, + disk.model, + disk.variant.into(), + disk.sled_id, + ) + }) + .collect(); + + let zpools: Vec<_> = request + .zpools + .into_iter() + .map(|pool| { + db::model::Zpool::new( + pool.id, + pool.sled_id, + pool.physical_disk_id, + ) + }) + .collect(); + let datasets: Vec<_> = request .datasets .into_iter() @@ -162,15 +202,15 @@ impl super::Nexus { let silo_name = &request.recovery_silo.silo_name; let dns_records = request - .services - .iter() - .filter_map(|s| match &s.kind { - nexus_types::internal_api::params::ServiceKind::Nexus { - external_address, + .blueprint + .all_omicron_zones(BlueprintZoneFilter::ShouldBeExternallyReachable) + .filter_map(|(_, zc)| match zc.zone_type { + BlueprintZoneType::Nexus(blueprint_zone_type::Nexus { + external_ip, .. - } => Some(match external_address { - IpAddr::V4(addr) => DnsRecord::A(*addr), - IpAddr::V6(addr) => DnsRecord::Aaaa(*addr), + }) => Some(match external_ip.ip { + IpAddr::V4(addr) => DnsRecord::A(addr), + IpAddr::V6(addr) => DnsRecord::Aaaa(addr), }), _ => None, }) @@ -191,6 +231,33 @@ impl super::Nexus { let mut blueprint = request.blueprint; blueprint.external_dns_version = blueprint.external_dns_version.next(); + // Fill in the CockroachDB metadata for the initial blueprint, and set + // the `cluster.preserve_downgrade_option` setting ahead of blueprint + // execution. + let cockroachdb_settings = self + .datastore() + .cockroachdb_settings(opctx) + .await + .internal_context( + "fetching cockroachdb settings for rack initialization", + )?; + self.datastore() + .cockroachdb_setting_set_string( + opctx, + cockroachdb_settings.state_fingerprint.clone(), + "cluster.preserve_downgrade_option", + CockroachDbClusterVersion::NEWLY_INITIALIZED.to_string(), + ) + .await + .internal_context( + "setting `cluster.preserve_downgrade_option` \ + for rack initialization", + )?; + blueprint.cockroachdb_fingerprint = + cockroachdb_settings.state_fingerprint; + blueprint.cockroachdb_setting_preserve_downgrade = + CockroachDbClusterVersion::NEWLY_INITIALIZED.into(); + // Administrators of the Recovery Silo are automatically made // administrators of the Fleet. let mapped_fleet_roles = BTreeMap::from([( @@ -217,6 +284,13 @@ impl super::Nexus { let rack_network_config = &request.rack_network_config; + // The `rack` row is created with the rack ID we know when Nexus starts, + // but we didn't know the rack subnet until now. Set it. + let mut rack = self.rack_lookup(opctx, &self.rack_id).await?; + rack.rack_subnet = + Some(IpNet::from(rack_network_config.rack_subnet).into()); + self.datastore().update_rack_subnet(opctx, &rack).await?; + // TODO - https://github.com/oxidecomputer/omicron/pull/3359 // register all switches found during rack initialization // identify requested switch from config and associate @@ -224,10 +298,7 @@ impl super::Nexus { match request.external_port_count { ExternalPortDiscovery::Auto(switch_mgmt_addrs) => { use dpd_client::Client as DpdClient; - info!( - self.log, - "Using automatic external switchport discovery" - ); + info!(log, "Using automatic external switchport discovery"); for (switch, addr) in switch_mgmt_addrs { let dpd_client = DpdClient::new( @@ -238,7 +309,7 @@ impl super::Nexus { ), dpd_client::ClientState { tag: "nexus".to_string(), - log: self.log.new(o!("component" => "DpdClient")), + log: log.new(o!("component" => "DpdClient")), }, ); @@ -247,10 +318,7 @@ impl super::Nexus { Error::internal_error(&format!("encountered error while discovering ports for {switch:#?}: {e}")) })?; - info!( - self.log, - "discovered ports for {switch}: {all_ports:#?}" - ); + info!(log, "discovered ports for {switch}: {all_ports:#?}"); let qsfp_ports: Vec = all_ports .iter() @@ -261,7 +329,7 @@ impl super::Nexus { .collect(); info!( - self.log, + log, "populating ports for {switch}: {qsfp_ports:#?}" ); @@ -276,7 +344,7 @@ impl super::Nexus { // TODO: #3602 Eliminate need for static port mappings for switch ports ExternalPortDiscovery::Static(port_mappings) => { info!( - self.log, + log, "Using static configuration for external switchports" ); for (switch, ports) in port_mappings { @@ -295,7 +363,7 @@ impl super::Nexus { // Currently calling some of the apis directly, but should we be using sagas // going forward via self.run_saga()? Note that self.create_runnable_saga and // self.execute_saga are currently not available within this scope. - info!(self.log, "Recording Rack Network Configuration"); + info!(log, "Recording Rack Network Configuration"); let address_lot_name = Name::from_str(INFRA_LOT).map_err(|e| { Error::internal_error(&format!( "unable to use `initial-infra` as `Name`: {e}" @@ -361,8 +429,8 @@ impl super::Nexus { .originate .iter() .map(|o| AddressLotBlockCreate { - first_address: o.network().into(), - last_address: o.broadcast().into(), + first_address: o.first_addr().into(), + last_address: o.last_addr().into(), }) .collect(), }, @@ -394,13 +462,13 @@ impl super::Nexus { announcement: bgp_config .originate .iter() - .map(|x| BgpAnnouncementCreate { + .map(|ipv4_net| BgpAnnouncementCreate { address_lot_block: NameOrId::Name( format!("as{}", bgp_config.asn) .parse() .unwrap(), ), - network: IpNetwork::from(*x).into(), + network: (*ipv4_net).into(), }) .collect(), }, @@ -432,6 +500,8 @@ impl super::Nexus { asn: bgp_config.asn, bgp_announce_set_id: announce_set_name.into(), vrf: None, + shaper: bgp_config.shaper.clone(), + checker: bgp_config.checker.clone(), }, ) .await @@ -484,7 +554,7 @@ impl super::Nexus { .iter() .map(|a| Address { address_lot: NameOrId::Name(address_lot_name.clone()), - address: (*a).into(), + address: (*a), }) .collect(); @@ -495,11 +565,7 @@ impl super::Nexus { let routes: Vec = uplink_config .routes .iter() - .map(|r| Route { - dst: r.destination.into(), - gw: r.nexthop, - vid: None, - }) + .map(|r| Route { dst: r.destination, gw: r.nexthop, vid: None }) .collect(); port_settings_params @@ -510,19 +576,26 @@ impl super::Nexus { .bgp_peers .iter() .map(|r| BgpPeer { - bgp_announce_set: NameOrId::Name( - format!("as{}-announce", r.asn).parse().unwrap(), - ), bgp_config: NameOrId::Name( format!("as{}", r.asn).parse().unwrap(), ), interface_name: "phy0".into(), addr: r.addr.into(), - hold_time: r.hold_time.unwrap_or(6) as u32, - idle_hold_time: r.idle_hold_time.unwrap_or(3) as u32, - delay_open: r.delay_open.unwrap_or(0) as u32, - connect_retry: r.connect_retry.unwrap_or(3) as u32, - keepalive: r.keepalive.unwrap_or(2) as u32, + hold_time: r.hold_time() as u32, + idle_hold_time: r.idle_hold_time() as u32, + delay_open: r.delay_open() as u32, + connect_retry: r.connect_retry() as u32, + keepalive: r.keepalive() as u32, + remote_asn: r.remote_asn, + min_ttl: r.min_ttl, + md5_auth_key: r.md5_auth_key.clone(), + multi_exit_discriminator: r.multi_exit_discriminator, + local_pref: r.local_pref, + enforce_first_as: r.enforce_first_as, + communities: r.communities.clone(), + allowed_import: r.allowed_import.clone(), + allowed_export: r.allowed_export.clone(), + vlan_id: r.vlan_id, }) .collect(); @@ -581,16 +654,16 @@ impl super::Nexus { } // TODO - https://github.com/oxidecomputer/omicron/issues/3277 // record port speed - self.initial_bootstore_sync(&opctx).await?; - self.db_datastore .rack_set_initialized( opctx, RackInit { - rack_subnet: rack_network_config.rack_subnet.into(), + rack_subnet: IpNet::from(rack_network_config.rack_subnet) + .into(), rack_id, blueprint, - services: request.services, + physical_disks, + zpools, datasets, service_ip_pool_ranges, internal_dns, @@ -603,6 +676,7 @@ impl super::Nexus { .user_password_hash .into(), dns_update, + allowed_source_ips: request.allowed_source_ips, }, ) .await?; @@ -657,31 +731,6 @@ impl super::Nexus { } } - pub(crate) async fn initial_bootstore_sync( - &self, - opctx: &OpContext, - ) -> Result<(), Error> { - let mut rack = self.rack_lookup(opctx, &self.rack_id).await?; - if rack.rack_subnet.is_some() { - return Ok(()); - } - let sa = self.get_any_sled_agent_client(opctx).await?; - let result = sa - .read_network_bootstore_config_cache() - .await - .map_err(|e| Error::InternalError { - internal_message: format!("read bootstore network config: {e}"), - })? - .into_inner(); - - rack.rack_subnet = - result.body.rack_network_config.map(|x| x.rack_subnet.into()); - - self.datastore().update_rack_subnet(opctx, &rack).await?; - - Ok(()) - } - /// Return the list of sleds that are inserted into an initialized rack /// but not yet initialized as part of a rack. // @@ -710,7 +759,10 @@ impl super::Nexus { }; debug!(self.log, "Listing sleds"); - let sleds = self.db_datastore.sled_list(opctx, &pagparams).await?; + let sleds = self + .db_datastore + .sled_list(opctx, &pagparams, SledFilter::InService) + .await?; let mut uninitialized_sleds: Vec = collection .sps @@ -745,7 +797,7 @@ impl super::Nexus { &self, opctx: &OpContext, sled: UninitializedSledId, - ) -> Result<(), Error> { + ) -> Result { let baseboard_id = sled.clone().into(); let hw_baseboard_id = self .db_datastore @@ -756,14 +808,26 @@ impl super::Nexus { let rack_subnet = Ipv6Subnet::::from(rack_subnet(Some(subnet))?); - let allocation = self + let allocation = match self .db_datastore .allocate_sled_underlay_subnet_octets( opctx, self.rack_id, hw_baseboard_id, ) - .await?; + .await? + { + SledUnderlayAllocationResult::New(allocation) => allocation, + SledUnderlayAllocationResult::CommissionedSled(allocation) => { + return Err(Error::ObjectAlreadyExists { + type_name: ResourceType::Sled, + object_name: format!( + "{} ({}): {}", + sled.serial, sled.part, allocation.sled_id + ), + }); + } + }; // Convert `UninitializedSledId` to the sled-agent type let baseboard_id = sled_agent_client::types::BaseboardId { @@ -778,7 +842,7 @@ impl super::Nexus { generation: 0, schema_version: 1, body: StartSledAgentRequestBody { - id: allocation.sled_id, + id: allocation.sled_id.into_untyped_uuid(), rack_id: allocation.rack_id, use_trust_quorum: true, is_lrtq_learner: true, @@ -787,8 +851,7 @@ impl super::Nexus { rack_subnet, allocation.subnet_octet.try_into().unwrap(), ) - .net() - .into(), + .net(), }, }, }, @@ -821,11 +884,7 @@ impl super::Nexus { ), })?; - // Trigger an inventory collection so that the newly added sled is known - // about. - self.activate_inventory_collection(); - - Ok(()) + Ok(allocation.sled_id.into()) } async fn get_any_sled_agent_url( @@ -842,14 +901,6 @@ impl super::Nexus { .address(); Ok(format!("http://{}", addr)) } - - async fn get_any_sled_agent_client( - &self, - opctx: &OpContext, - ) -> Result { - let url = self.get_any_sled_agent_url(opctx).await?; - Ok(sled_agent_client::Client::new(&url, self.log.clone())) - } } pub fn rack_subnet( diff --git a/nexus/src/app/saga.rs b/nexus/src/app/saga.rs index 93d22df7e1..8a717839f0 100644 --- a/nexus/src/app/saga.rs +++ b/nexus/src/app/saga.rs @@ -213,6 +213,18 @@ impl super::Nexus { let runnable_saga = self.create_runnable_saga(dag).await?; // Actually run the saga to completion. + // + // XXX: This may loop forever in case `SecStore::record_event` fails. + // Ideally, `run_saga` wouldn't both start the saga and wait for it to + // be finished -- instead, it would start off the saga, and then return + // a notification channel that the caller could use to decide: + // + // - either to .await until completion + // - or to stop waiting after a certain period, while still letting the + // saga run in the background. + // + // For more, see https://github.com/oxidecomputer/omicron/issues/5406 + // and the note in `sec_store.rs`'s `record_event`. self.run_saga(runnable_saga).await } } diff --git a/nexus/src/app/sagas/common_storage.rs b/nexus/src/app/sagas/common_storage.rs index 3b590f6205..51e9648592 100644 --- a/nexus/src/app/sagas/common_storage.rs +++ b/nexus/src/app/sagas/common_storage.rs @@ -17,7 +17,6 @@ use internal_dns::ServiceName; use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; use nexus_db_queries::db; -use nexus_db_queries::db::identity::Asset; use nexus_db_queries::db::lookup::LookupPath; use omicron_common::api::external::Error; use omicron_common::backoff::{self, BackoffError}; @@ -37,10 +36,14 @@ pub(crate) async fn ensure_region_in_dataset( ) -> Result { let url = format!("http://{}", dataset.address()); let client = CrucibleAgentClient::new(&url); - + let Ok(extent_count) = u32::try_from(region.extent_count()) else { + return Err(Error::internal_error( + "Extent count out of range for a u32", + )); + }; let region_request = CreateRegion { block_size: region.block_size().to_bytes(), - extent_count: region.extent_count(), + extent_count, extent_size: region.blocks_per_extent(), // TODO: Can we avoid casting from UUID to string? // NOTE: This'll require updating the crucible agent client. @@ -49,6 +52,7 @@ pub(crate) async fn ensure_region_in_dataset( cert_pem: None, key_pem: None, root_pem: None, + source: None, }; let create_region = || async { @@ -769,7 +773,10 @@ pub(crate) async fn call_pantry_attach_for_disk( let disk_volume = nexus .datastore() - .volume_checkout(disk.volume_id) + .volume_checkout( + disk.volume_id, + db::datastore::VolumeCheckoutReason::Pantry, + ) .await .map_err(ActionError::action_failed)?; diff --git a/nexus/src/app/sagas/disk_create.rs b/nexus/src/app/sagas/disk_create.rs index 9d52ec1501..5e1d386ed1 100644 --- a/nexus/src/app/sagas/disk_create.rs +++ b/nexus/src/app/sagas/disk_create.rs @@ -259,7 +259,7 @@ async fn sdc_alloc_regions( let datasets_and_regions = osagactx .datastore() - .region_allocate( + .disk_region_allocate( &opctx, volume_id, ¶ms.create_params.disk_source, @@ -390,7 +390,10 @@ async fn sdc_regions_ensure( let volume = osagactx .datastore() - .volume_checkout(db_snapshot.volume_id) + .volume_checkout( + db_snapshot.volume_id, + db::datastore::VolumeCheckoutReason::ReadOnlyCopy, + ) .await .map_err(ActionError::action_failed)?; @@ -433,7 +436,10 @@ async fn sdc_regions_ensure( let volume = osagactx .datastore() - .volume_checkout(image.volume_id) + .volume_checkout( + image.volume_id, + db::datastore::VolumeCheckoutReason::ReadOnlyCopy, + ) .await .map_err(ActionError::action_failed)?; @@ -479,7 +485,7 @@ async fn sdc_regions_ensure( sub_volumes: vec![VolumeConstructionRequest::Region { block_size, blocks_per_extent, - extent_count: extent_count.try_into().unwrap(), + extent_count, gen: 1, opts: CrucibleOpts { id: disk_id, @@ -876,7 +882,7 @@ pub(crate) mod test { pub fn test_opctx(cptestctx: &ControlPlaneTestContext) -> OpContext { OpContext::for_tests( cptestctx.logctx.log.new(o!()), - cptestctx.server.apictx().nexus.datastore().clone(), + cptestctx.server.server_context().nexus.datastore().clone(), ) } @@ -887,7 +893,7 @@ pub(crate) mod test { DiskTest::new(cptestctx).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_id = create_project(&client, PROJECT_NAME).await.identity.id; @@ -1027,7 +1033,7 @@ pub(crate) mod test { test: &DiskTest, ) { let sled_agent = &cptestctx.sled_agent.sled_agent; - let datastore = cptestctx.server.apictx().nexus.datastore(); + let datastore = cptestctx.server.server_context().nexus.datastore(); crate::app::sagas::test_helpers::assert_no_failed_undo_steps( &cptestctx.logctx.log, @@ -1057,7 +1063,7 @@ pub(crate) mod test { let log = &cptestctx.logctx.log; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_id = create_project(&client, PROJECT_NAME).await.identity.id; let opctx = test_opctx(cptestctx); @@ -1087,7 +1093,7 @@ pub(crate) mod test { let log = &cptestctx.logctx.log; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx.nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_id = create_project(&client, PROJECT_NAME).await.identity.id; let opctx = test_opctx(&cptestctx); @@ -1105,7 +1111,7 @@ pub(crate) mod test { } async fn destroy_disk(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx.nexus; + let nexus = &cptestctx.server.server_context().nexus; let opctx = test_opctx(&cptestctx); let disk_selector = params::DiskSelector { project: Some( @@ -1128,7 +1134,7 @@ pub(crate) mod test { let test = DiskTest::new(cptestctx).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx.nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_id = create_project(&client, PROJECT_NAME).await.identity.id; diff --git a/nexus/src/app/sagas/disk_delete.rs b/nexus/src/app/sagas/disk_delete.rs index 333e6c1672..24cf331a34 100644 --- a/nexus/src/app/sagas/disk_delete.rs +++ b/nexus/src/app/sagas/disk_delete.rs @@ -201,12 +201,12 @@ pub(crate) mod test { pub fn test_opctx(cptestctx: &ControlPlaneTestContext) -> OpContext { OpContext::for_tests( cptestctx.logctx.log.new(o!()), - cptestctx.server.apictx.nexus.datastore().clone(), + cptestctx.server.server_context().nexus.datastore().clone(), ) } async fn create_disk(cptestctx: &ControlPlaneTestContext) -> Disk { - let nexus = &cptestctx.server.apictx.nexus; + let nexus = &cptestctx.server.server_context().nexus; let opctx = test_opctx(&cptestctx); let project_selector = params::ProjectSelector { @@ -232,7 +232,7 @@ pub(crate) mod test { DiskTest::new(cptestctx).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx.nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_id = create_project(client, PROJECT_NAME).await.identity.id; let disk = create_disk(&cptestctx).await; @@ -258,7 +258,7 @@ pub(crate) mod test { let test = DiskTest::new(cptestctx).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx.nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_id = create_project(client, PROJECT_NAME).await.identity.id; let disk = create_disk(&cptestctx).await; diff --git a/nexus/src/app/sagas/finalize_disk.rs b/nexus/src/app/sagas/finalize_disk.rs index d4f6fc39aa..89893fb703 100644 --- a/nexus/src/app/sagas/finalize_disk.rs +++ b/nexus/src/app/sagas/finalize_disk.rs @@ -79,7 +79,8 @@ impl NexusSaga for SagaFinalizeDisk { silo_id: params.silo_id, project_id: params.project_id, disk_id: params.disk_id, - attached_instance_and_sled: None, + attach_instance_id: None, + use_the_pantry: true, create_params: params::SnapshotCreate { identity: external::IdentityMetadataCreateParams { name: snapshot_name.clone(), diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index 0950754572..a6771f65a0 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -428,12 +428,15 @@ async fn sic_create_network_interface_undo( .lookup_for(authz::Action::Modify) .await .map_err(ActionError::action_failed)?; - match LookupPath::new(&opctx, &datastore) + + let interface_deleted = match LookupPath::new(&opctx, &datastore) .instance_network_interface_id(interface_id) .lookup_for(authz::Action::Delete) .await { Ok((.., authz_interface)) => { + // The lookup succeeded, but we could still fail to delete the + // interface if we're racing another deleter. datastore .instance_delete_network_interface( &opctx, @@ -441,25 +444,26 @@ async fn sic_create_network_interface_undo( &authz_interface, ) .await - .map_err(|e| e.into_external())?; - Ok(()) - } - Err(Error::ObjectNotFound { .. }) => { - // The saga is attempting to delete the NIC by the ID cached - // in the saga log. If we're running this, the NIC already - // appears to be gone, which is odd, but not exactly an - // error. Swallowing the error allows the saga to continue, - // but this is another place we might want to consider - // bumping a counter or otherwise tracking things. - warn!( - osagactx.log(), - "During saga unwind, NIC already appears deleted"; - "interface_id" => %interface_id, - ); - Ok(()) + .map_err(|e| e.into_external())? } - Err(e) => Err(e.into()), + Err(Error::ObjectNotFound { .. }) => false, + Err(e) => return Err(e.into()), + }; + + if !interface_deleted { + // The saga is attempting to delete the NIC by the ID cached + // in the saga log. If we're running this, the NIC already + // appears to be gone, which is odd, but not exactly an + // error. Swallowing the error allows the saga to continue, + // but this is another place we might want to consider + // bumping a counter or otherwise tracking things. + warn!( + osagactx.log(), + "During saga unwind, NIC already appears deleted"; + "interface_id" => %interface_id, + ); } + Ok(()) } /// Create one custom (non-default) network interface for the provided instance. @@ -1133,7 +1137,7 @@ pub mod test { ) { DiskTest::new(cptestctx).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_id = create_org_project_and_disk(&client).await; // Build the saga DAG with the provided test parameters @@ -1260,7 +1264,7 @@ pub mod test { cptestctx: &ControlPlaneTestContext, ) { let sled_agent = &cptestctx.sled_agent.sled_agent; - let datastore = cptestctx.server.apictx().nexus.datastore(); + let datastore = cptestctx.server.server_context().nexus.datastore(); // Check that no partial artifacts of instance creation exist assert!(no_instance_records_exist(datastore).await); @@ -1283,9 +1287,7 @@ pub mod test { assert!(no_instances_or_disks_on_sled(&sled_agent).await); let v2p_mappings = &*sled_agent.v2p_mappings.lock().await; - for (_nic_id, mappings) in v2p_mappings { - assert!(mappings.is_empty()); - } + assert!(v2p_mappings.is_empty()); } #[nexus_test(server = crate::Server)] @@ -1296,7 +1298,7 @@ pub mod test { let log = &cptestctx.logctx.log; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_id = create_org_project_and_disk(&client).await; // Build the saga DAG with the provided test parameters @@ -1325,7 +1327,7 @@ pub mod test { let log = &cptestctx.logctx.log; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_id = create_org_project_and_disk(&client).await; let opctx = test_helpers::test_opctx(&cptestctx); @@ -1349,7 +1351,7 @@ pub mod test { DiskTest::new(cptestctx).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_id = create_org_project_and_disk(&client).await; // Build the saga DAG with the provided test parameters diff --git a/nexus/src/app/sagas/instance_delete.rs b/nexus/src/app/sagas/instance_delete.rs index 0e253913b0..b6fedc175d 100644 --- a/nexus/src/app/sagas/instance_delete.rs +++ b/nexus/src/app/sagas/instance_delete.rs @@ -102,6 +102,7 @@ async fn sid_delete_network_interfaces( sagactx: NexusActionContext, ) -> Result<(), ActionError> { let osagactx = sagactx.user_data(); + let nexus = osagactx.nexus(); let params = sagactx.saga_params::()?; let opctx = crate::context::op_context_for_saga_action( &sagactx, @@ -112,6 +113,7 @@ async fn sid_delete_network_interfaces( .instance_delete_all_network_interfaces(&opctx, ¶ms.authz_instance) .await .map_err(ActionError::action_failed)?; + nexus.background_tasks.activate(&nexus.background_tasks.task_v2p_manager); Ok(()) } @@ -210,7 +212,7 @@ mod test { instance_id: Uuid, ) -> Params { let opctx = test_opctx(&cptestctx); - let datastore = cptestctx.server.apictx().nexus.datastore(); + let datastore = cptestctx.server.server_context().nexus.datastore(); let (.., authz_instance, instance) = LookupPath::new(&opctx, &datastore) @@ -253,7 +255,7 @@ mod test { pub fn test_opctx(cptestctx: &ControlPlaneTestContext) -> OpContext { OpContext::for_tests( cptestctx.logctx.log.new(o!()), - cptestctx.server.apictx().nexus.datastore().clone(), + cptestctx.server.server_context().nexus.datastore().clone(), ) } @@ -263,7 +265,7 @@ mod test { ) { DiskTest::new(cptestctx).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; create_org_project_and_disk(&client).await; // Build the saga DAG with the provided test parameters @@ -290,7 +292,7 @@ mod test { cptestctx: &ControlPlaneTestContext, params: params::InstanceCreate, ) -> db::model::Instance { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let opctx = test_opctx(&cptestctx); let project_selector = params::ProjectSelector { @@ -304,7 +306,8 @@ mod test { .await .unwrap(); - let datastore = cptestctx.server.apictx().nexus.datastore().clone(); + let datastore = + cptestctx.server.server_context().nexus.datastore().clone(); let (.., db_instance) = LookupPath::new(&opctx, &datastore) .instance_id(instance_state.instance().id()) .fetch() @@ -321,7 +324,7 @@ mod test { DiskTest::new(cptestctx).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; create_org_project_and_disk(&client).await; // Build the saga DAG with the provided test parameters diff --git a/nexus/src/app/sagas/instance_ip_attach.rs b/nexus/src/app/sagas/instance_ip_attach.rs index 3cd6ac1c46..3332b71274 100644 --- a/nexus/src/app/sagas/instance_ip_attach.rs +++ b/nexus/src/app/sagas/instance_ip_attach.rs @@ -410,7 +410,7 @@ pub(crate) mod test { cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let sled_agent = &cptestctx.sled_agent.sled_agent; @@ -460,7 +460,7 @@ pub(crate) mod test { use nexus_db_queries::db::schema::external_ip::dsl; let sled_agent = &cptestctx.sled_agent.sled_agent; - let datastore = cptestctx.server.apictx().nexus.datastore(); + let datastore = cptestctx.server.server_context().nexus.datastore(); let conn = datastore.pool_connection_for_tests().await.unwrap(); @@ -500,7 +500,7 @@ pub(crate) mod test { ) { let log = &cptestctx.logctx.log; let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let opctx = test_helpers::test_opctx(cptestctx); @@ -526,7 +526,7 @@ pub(crate) mod test { ) { let log = &cptestctx.logctx.log; let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let opctx = test_helpers::test_opctx(cptestctx); @@ -555,7 +555,7 @@ pub(crate) mod test { cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let opctx = test_helpers::test_opctx(cptestctx); diff --git a/nexus/src/app/sagas/instance_ip_detach.rs b/nexus/src/app/sagas/instance_ip_detach.rs index 7a71824376..2f1d76c853 100644 --- a/nexus/src/app/sagas/instance_ip_detach.rs +++ b/nexus/src/app/sagas/instance_ip_detach.rs @@ -381,7 +381,7 @@ pub(crate) mod test { cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let sled_agent = &cptestctx.sled_agent.sled_agent; @@ -425,7 +425,7 @@ pub(crate) mod test { let opctx = test_helpers::test_opctx(cptestctx); let sled_agent = &cptestctx.sled_agent.sled_agent; - let datastore = cptestctx.server.apictx().nexus.datastore(); + let datastore = cptestctx.server.server_context().nexus.datastore(); let conn = datastore.pool_connection_for_tests().await.unwrap(); @@ -475,7 +475,7 @@ pub(crate) mod test { ) { let log = &cptestctx.logctx.log; let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let opctx = test_helpers::test_opctx(cptestctx); @@ -503,7 +503,7 @@ pub(crate) mod test { ) { let log = &cptestctx.logctx.log; let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let opctx = test_helpers::test_opctx(cptestctx); @@ -534,7 +534,7 @@ pub(crate) mod test { cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let opctx = test_helpers::test_opctx(cptestctx); diff --git a/nexus/src/app/sagas/instance_migrate.rs b/nexus/src/app/sagas/instance_migrate.rs index da3b3e93ea..1cfd170faf 100644 --- a/nexus/src/app/sagas/instance_migrate.rs +++ b/nexus/src/app/sagas/instance_migrate.rs @@ -4,7 +4,8 @@ use super::{NexusActionContext, NexusSaga, ACTION_GENERATE_ID}; use crate::app::instance::{ - InstanceStateChangeError, InstanceStateChangeRequest, + InstanceRegisterReason, InstanceStateChangeError, + InstanceStateChangeRequest, }; use crate::app::sagas::{ declare_saga_actions, instance_common::allocate_vmm_ipv6, @@ -152,7 +153,7 @@ async fn sim_reserve_sled_resources( let resource = super::instance_common::reserve_vmm_resources( osagactx.nexus(), propolis_id, - params.instance.ncpus.0 .0 as u32, + u32::from(params.instance.ncpus.0 .0), params.instance.memory, constraints, ) @@ -356,6 +357,10 @@ async fn sim_ensure_destination_propolis( &db_instance, &vmm.id, &vmm, + InstanceRegisterReason::Migrate { + vmm_id: params.src_vmm.id, + target_vmm_id: vmm.id, + }, ) .await .map_err(ActionError::action_failed)?; @@ -609,7 +614,7 @@ mod tests { ) { let other_sleds = add_sleds(cptestctx, 1).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let _project_id = setup_test_project(&client).await; let opctx = test_helpers::test_opctx(cptestctx); @@ -653,7 +658,7 @@ mod tests { let log = &cptestctx.logctx.log; let other_sleds = add_sleds(cptestctx, 1).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let _project_id = setup_test_project(&client).await; let opctx = test_helpers::test_opctx(cptestctx); diff --git a/nexus/src/app/sagas/instance_start.rs b/nexus/src/app/sagas/instance_start.rs index b1d9506c31..e7caedfc9c 100644 --- a/nexus/src/app/sagas/instance_start.rs +++ b/nexus/src/app/sagas/instance_start.rs @@ -10,6 +10,7 @@ use super::{ instance_common::allocate_vmm_ipv6, NexusActionContext, NexusSaga, SagaInitError, ACTION_GENERATE_ID, }; +use crate::app::instance::InstanceRegisterReason; use crate::app::instance::InstanceStateChangeError; use crate::app::sagas::declare_saga_actions; use chrono::Utc; @@ -131,7 +132,7 @@ async fn sis_alloc_server( let resource = super::instance_common::reserve_vmm_resources( osagactx.nexus(), propolis_id, - hardware_threads.0 as u32, + u32::from(hardware_threads.0), reservoir_ram, db::model::SledReservationConstraints::none(), ) @@ -446,50 +447,18 @@ async fn sis_dpd_ensure_undo( async fn sis_v2p_ensure( sagactx: NexusActionContext, ) -> Result<(), ActionError> { - let params = sagactx.saga_params::()?; let osagactx = sagactx.user_data(); - let instance_id = params.db_instance.id(); - - info!(osagactx.log(), "start saga: ensuring v2p mappings are configured"; - "instance_id" => %instance_id); - - let opctx = crate::context::op_context_for_saga_action( - &sagactx, - ¶ms.serialized_authn, - ); - - let sled_uuid = sagactx.lookup::("sled_id")?; - osagactx - .nexus() - .create_instance_v2p_mappings(&opctx, instance_id, sled_uuid) - .await - .map_err(ActionError::action_failed)?; - + let nexus = osagactx.nexus(); + nexus.background_tasks.activate(&nexus.background_tasks.task_v2p_manager); Ok(()) } async fn sis_v2p_ensure_undo( sagactx: NexusActionContext, ) -> Result<(), anyhow::Error> { - let params = sagactx.saga_params::()?; let osagactx = sagactx.user_data(); - let instance_id = params.db_instance.id(); - let sled_id = sagactx.lookup::("sled_id")?; - info!(osagactx.log(), "start saga: undoing v2p configuration"; - "instance_id" => %instance_id, - "sled_id" => %sled_id); - - let opctx = crate::context::op_context_for_saga_action( - &sagactx, - ¶ms.serialized_authn, - ); - - osagactx - .nexus() - .delete_instance_v2p_mappings(&opctx, instance_id) - .await - .map_err(ActionError::action_failed)?; - + let nexus = osagactx.nexus(); + nexus.background_tasks.activate(&nexus.background_tasks.task_v2p_manager); Ok(()) } @@ -527,6 +496,7 @@ async fn sis_ensure_registered( &db_instance, &propolis_id, &vmm_record, + InstanceRegisterReason::Start { vmm_id: propolis_id }, ) .await .map_err(ActionError::action_failed)?; @@ -765,7 +735,7 @@ mod test { cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let _project_id = setup_test_project(&client).await; let opctx = test_helpers::test_opctx(cptestctx); let instance = create_instance(client).await; @@ -804,7 +774,7 @@ mod test { ) { let log = &cptestctx.logctx.log; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let _project_id = setup_test_project(&client).await; let opctx = test_helpers::test_opctx(cptestctx); let instance = create_instance(client).await; @@ -866,7 +836,7 @@ mod test { cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let _project_id = setup_test_project(&client).await; let opctx = test_helpers::test_opctx(cptestctx); let instance = create_instance(client).await; @@ -908,7 +878,7 @@ mod test { #[nexus_test(server = crate::Server)] async fn test_ensure_running_unwind(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let _project_id = setup_test_project(&client).await; let opctx = test_helpers::test_opctx(cptestctx); let instance = create_instance(client).await; diff --git a/nexus/src/app/sagas/project_create.rs b/nexus/src/app/sagas/project_create.rs index b31dd821f0..6893590519 100644 --- a/nexus/src/app/sagas/project_create.rs +++ b/nexus/src/app/sagas/project_create.rs @@ -188,7 +188,7 @@ mod test { fn test_opctx(cptestctx: &ControlPlaneTestContext) -> OpContext { OpContext::for_tests( cptestctx.logctx.log.new(o!()), - cptestctx.server.apictx().nexus.datastore().clone(), + cptestctx.server.server_context().nexus.datastore().clone(), ) } @@ -258,7 +258,7 @@ mod test { async fn test_saga_basic_usage_succeeds( cptestctx: &ControlPlaneTestContext, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; // Before running the test, confirm we have no records of any projects. verify_clean_slate(nexus.datastore()).await; @@ -279,7 +279,7 @@ mod test { cptestctx: &ControlPlaneTestContext, ) { let log = &cptestctx.logctx.log; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let opctx = test_opctx(&cptestctx); crate::app::sagas::test_helpers::action_failure_can_unwind::< SagaProjectCreate, diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index 290868aae2..287571cfd5 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -130,7 +130,8 @@ pub(crate) struct Params { pub silo_id: Uuid, pub project_id: Uuid, pub disk_id: Uuid, - pub attached_instance_and_sled: Option<(Uuid, Uuid)>, + pub attach_instance_id: Option, + pub use_the_pantry: bool, pub create_params: params::SnapshotCreate, } @@ -251,8 +252,7 @@ impl NexusSaga for SagaSnapshotCreate { // (DB) Tracks virtual resource provisioning. builder.append(space_account_action()); - let use_the_pantry = params.attached_instance_and_sled.is_none(); - if !use_the_pantry { + if !params.use_the_pantry { // (Sleds) If the disk is attached to an instance, send a // snapshot request to sled-agent to create a ZFS snapshot. builder.append(send_snapshot_request_to_sled_agent_action()); @@ -284,7 +284,7 @@ impl NexusSaga for SagaSnapshotCreate { // (DB) Mark snapshot as "ready" builder.append(finalize_snapshot_record_action()); - if use_the_pantry { + if params.use_the_pantry { // (Pantry) Set the state back to Detached // // This has to be the last saga node! Otherwise, concurrent @@ -336,7 +336,7 @@ async fn ssc_alloc_regions( let datasets_and_regions = osagactx .datastore() - .region_allocate( + .disk_region_allocate( &opctx, destination_volume_id, ¶ms::DiskSource::Blank { @@ -400,7 +400,7 @@ async fn ssc_regions_ensure( sub_volumes: vec![VolumeConstructionRequest::Region { block_size, blocks_per_extent, - extent_count: extent_count.try_into().unwrap(), + extent_count, gen: 1, opts: CrucibleOpts { id: destination_volume_id, @@ -675,22 +675,47 @@ async fn ssc_send_snapshot_request_to_sled_agent( let snapshot_id = sagactx.lookup::("snapshot_id")?; // If this node was reached, the saga initiator thought the disk was - // attached to an instance that was running on a specific sled. Contact that - // sled and ask it to initiate a snapshot. Note that this is best-effort: - // the instance may have stopped (or may be have stopped, had the disk - // detached, and resumed running on the same sled) while the saga was - // executing. - let (instance_id, sled_id) = - params.attached_instance_and_sled.ok_or_else(|| { - ActionError::action_failed(Error::internal_error( - "snapshot saga in send_snapshot_request_to_sled_agent but no \ - instance/sled pair was provided", - )) - })?; + // attached to an instance that _may_ have a running Propolis. Contact that + // Propolis and ask it to initiate a snapshot. Note that this is + // best-effort: the instance may have stopped (or may be have stopped, had + // the disk detached, and resumed running on the same sled) while the saga + // was executing. + let Some(attach_instance_id) = params.attach_instance_id else { + return Err(ActionError::action_failed(Error::internal_error( + "attach instance id is None!", + ))); + }; + + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); + + let (.., authz_instance) = LookupPath::new(&opctx, &osagactx.datastore()) + .instance_id(attach_instance_id) + .lookup_for(authz::Action::Read) + .await + .map_err(ActionError::action_failed)?; + + let sled_id = osagactx + .datastore() + .instance_fetch_with_vmm(&opctx, &authz_instance) + .await + .map_err(ActionError::action_failed)? + .sled_id(); + + // If this instance does not currently have a sled, we can't continue this + // saga - the user will have to reissue the snapshot request and it will get + // run on a Pantry. + let Some(sled_id) = sled_id else { + return Err(ActionError::action_failed(Error::unavail( + "sled id is None!", + ))); + }; info!(log, "asking for disk snapshot from Propolis via sled agent"; "disk_id" => %params.disk_id, - "instance_id" => %instance_id, + "instance_id" => %attach_instance_id, "sled_id" => %sled_id); let sled_agent_client = osagactx @@ -702,7 +727,7 @@ async fn ssc_send_snapshot_request_to_sled_agent( retry_until_known_result(log, || async { sled_agent_client .instance_issue_disk_snapshot_request( - &instance_id, + &attach_instance_id, ¶ms.disk_id, &InstanceIssueDiskSnapshotRequestBody { snapshot_id }, ) @@ -838,6 +863,16 @@ async fn ssc_attach_disk_to_pantry( info!(log, "disk {} in state finalizing", params.disk_id); } + external::DiskState::Attached(attach_instance_id) => { + // No state change required + info!( + log, + "disk {} in state attached to instance id {}", + params.disk_id, + attach_instance_id + ); + } + _ => { // Return a 503 indicating that the user should retry return Err(ActionError::action_failed( @@ -1358,7 +1393,10 @@ async fn ssc_create_volume_record( let disk_volume = osagactx .datastore() - .volume_checkout(disk.volume_id) + .volume_checkout( + disk.volume_id, + db::datastore::VolumeCheckoutReason::CopyAndModify, + ) .await .map_err(ActionError::action_failed)?; @@ -1534,12 +1572,9 @@ fn create_snapshot_from_disk( if let Some(socket_map) = socket_map { for target in &mut opts.target { - *target = socket_map - .get(target) - .ok_or_else(|| { - anyhow!("target {} not found in map!", target) - })? - .clone(); + target.clone_from(socket_map.get(target).ok_or_else( + || anyhow!("target {} not found in map!", target), + )?); } } @@ -1815,14 +1850,16 @@ mod test { project_id: Uuid, disk_id: Uuid, disk: NameOrId, - instance_and_sled: Option<(Uuid, Uuid)>, + attach_instance_id: Option, + use_the_pantry: bool, ) -> Params { Params { serialized_authn: authn::saga::Serialized::for_opctx(opctx), silo_id, project_id, disk_id, - attached_instance_and_sled: instance_and_sled, + attach_instance_id, + use_the_pantry, create_params: params::SnapshotCreate { identity: IdentityMetadataCreateParams { name: "my-snapshot".parse().expect("Invalid disk name"), @@ -1836,7 +1873,7 @@ mod test { pub fn test_opctx(cptestctx: &ControlPlaneTestContext) -> OpContext { OpContext::for_tests( cptestctx.logctx.log.new(o!()), - cptestctx.server.apictx().nexus.datastore().clone(), + cptestctx.server.server_context().nexus.datastore().clone(), ) } @@ -1849,7 +1886,7 @@ mod test { DiskTest::new(cptestctx).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let disk_id = create_project_and_disk_and_pool(&client).await; // Build the saga DAG with the provided test parameters @@ -1871,7 +1908,8 @@ mod test { project_id, disk_id, Name::from_str(DISK_NAME).unwrap().into(), - None, + None, // not attached to an instance + true, // use the pantry ); let dag = create_saga_dag::(params).unwrap(); let runnable_saga = nexus.create_runnable_saga(dag).await.unwrap(); @@ -1935,7 +1973,7 @@ mod test { // Verifies: // - No snapshot records exist // - No region snapshot records exist - let datastore = cptestctx.server.apictx().nexus.datastore(); + let datastore = cptestctx.server.server_context().nexus.datastore(); assert!(no_snapshot_records_exist(datastore).await); assert!(no_region_snapshot_records_exist(datastore).await); } @@ -1975,7 +2013,7 @@ mod test { // Read out the instance's assigned sled, then poke the instance to get // it from the Starting state to the Running state so the test disk can // be snapshotted. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let opctx = test_opctx(&cptestctx); let (.., authz_instance) = LookupPath::new(&opctx, nexus.datastore()) .instance_id(instance.identity.id) @@ -2039,7 +2077,7 @@ mod test { let log = &cptestctx.logctx.log; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let disk_id = create_project_and_disk_and_pool(&client).await; // Build the saga DAG with the provided test parameters @@ -2079,7 +2117,7 @@ mod test { // since this is just a test, bypass the normal // attachment machinery and just update the disk's // database record directly. - let instance_and_sled = if !use_the_pantry { + let attach_instance_id = if !use_the_pantry { let state = setup_test_instance( cptestctx, client, @@ -2092,11 +2130,7 @@ mod test { ) .await; - let sled_id = state - .sled_id() - .expect("running instance should have a vmm"); - - Some((state.instance().id(), sled_id)) + Some(state.instance().id()) } else { None }; @@ -2107,7 +2141,8 @@ mod test { project_id, disk_id, Name::from_str(DISK_NAME).unwrap().into(), - instance_and_sled, + attach_instance_id, + use_the_pantry, ) } }) @@ -2181,7 +2216,7 @@ mod test { DiskTest::new(cptestctx).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let disk_id = create_project_and_disk_and_pool(&client).await; // Build the saga DAG with the provided test parameters @@ -2205,36 +2240,31 @@ mod test { Name::from_str(DISK_NAME).unwrap().into(), // The disk isn't attached at this time, so don't supply a sled. None, + true, // use the pantry ); let dag = create_saga_dag::(params).unwrap(); let runnable_saga = nexus.create_runnable_saga(dag).await.unwrap(); // Before running the saga, attach the disk to an instance! - let (.., authz_disk, db_disk) = - LookupPath::new(&opctx, nexus.datastore()) - .disk_id(disk_id) - .fetch_for(authz::Action::Read) - .await - .expect("Failed to look up created disk"); - - assert!(nexus - .datastore() - .disk_update_runtime( - &opctx, - &authz_disk, - &db_disk.runtime().attach(Uuid::new_v4()), - ) - .await - .expect("failed to attach disk")); + let _instance_and_vmm = setup_test_instance( + &cptestctx, + &client, + vec![params::InstanceDiskAttachment::Attach( + params::InstanceDiskAttach { + name: Name::from_str(DISK_NAME).unwrap(), + }, + )], + ) + .await; // Actually run the saga let output = nexus.run_saga(runnable_saga).await; - // Expect to see 503 + // Expect to see 409 match output { Err(e) => { - assert!(matches!(e, Error::ServiceUnavailable { .. })); + assert!(matches!(e, Error::Conflict { .. })); } Ok(_) => { @@ -2269,6 +2299,7 @@ mod test { Name::from_str(DISK_NAME).unwrap().into(), // The disk isn't attached at this time, so don't supply a sled. None, + true, // use the pantry ); let dag = create_saga_dag::(params).unwrap(); @@ -2290,7 +2321,7 @@ mod test { DiskTest::new(cptestctx).await; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let disk_id = create_project_and_disk_and_pool(&client).await; // Build the saga DAG with the provided test parameters @@ -2313,8 +2344,6 @@ mod test { // the saga, stopping the instance, detaching the disk, and then letting // the saga run. let fake_instance_id = Uuid::new_v4(); - let fake_sled_id = - Uuid::parse_str(nexus_test_utils::SLED_AGENT_UUID).unwrap(); let params = new_test_params( &opctx, @@ -2322,7 +2351,8 @@ mod test { project_id, disk_id, Name::from_str(DISK_NAME).unwrap().into(), - Some((fake_instance_id, fake_sled_id)), + Some(fake_instance_id), + false, // use the pantry ); let dag = create_saga_dag::(params).unwrap(); @@ -2363,10 +2393,6 @@ mod test { ) .await; - let sled_id = instance_state - .sled_id() - .expect("running instance should have a vmm"); - // Rerun the saga let params = new_test_params( &opctx, @@ -2374,7 +2400,8 @@ mod test { project_id, disk_id, Name::from_str(DISK_NAME).unwrap().into(), - Some((instance_state.instance().id(), sled_id)), + Some(instance_state.instance().id()), + false, // use the pantry ); let dag = create_saga_dag::(params).unwrap(); diff --git a/nexus/src/app/sagas/test_helpers.rs b/nexus/src/app/sagas/test_helpers.rs index 1b383d27bb..bacd0f1c9d 100644 --- a/nexus/src/app/sagas/test_helpers.rs +++ b/nexus/src/app/sagas/test_helpers.rs @@ -34,7 +34,7 @@ type ControlPlaneTestContext = pub fn test_opctx(cptestctx: &ControlPlaneTestContext) -> OpContext { OpContext::for_tests( cptestctx.logctx.log.new(o!()), - cptestctx.server.apictx().nexus.datastore().clone(), + cptestctx.server.server_context().nexus.datastore().clone(), ) } @@ -42,7 +42,7 @@ pub(crate) async fn instance_start( cptestctx: &ControlPlaneTestContext, id: &Uuid, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let opctx = test_opctx(&cptestctx); let instance_selector = nexus_types::external_api::params::InstanceSelector { @@ -62,7 +62,7 @@ pub(crate) async fn instance_stop( cptestctx: &ControlPlaneTestContext, id: &Uuid, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let opctx = test_opctx(&cptestctx); let instance_selector = nexus_types::external_api::params::InstanceSelector { @@ -83,7 +83,7 @@ pub(crate) async fn instance_stop_by_name( name: &str, project_name: &str, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let opctx = test_opctx(&cptestctx); let instance_selector = nexus_types::external_api::params::InstanceSelector { @@ -104,7 +104,7 @@ pub(crate) async fn instance_delete_by_name( name: &str, project_name: &str, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let opctx = test_opctx(&cptestctx); let instance_selector = nexus_types::external_api::params::InstanceSelector { @@ -126,7 +126,7 @@ pub(crate) async fn instance_simulate( ) { info!(&cptestctx.logctx.log, "Poking simulated instance"; "instance_id" => %instance_id); - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let sa = nexus .instance_sled_by_id(instance_id) .await @@ -145,7 +145,7 @@ pub(crate) async fn instance_simulate_by_name( "instance_name" => %name, "project_name" => %project_name); - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let opctx = test_opctx(&cptestctx); let instance_selector = nexus_types::external_api::params::InstanceSelector { @@ -168,7 +168,7 @@ pub async fn instance_fetch( cptestctx: &ControlPlaneTestContext, instance_id: Uuid, ) -> InstanceAndActiveVmm { - let datastore = cptestctx.server.apictx().nexus.datastore().clone(); + let datastore = cptestctx.server.server_context().nexus.datastore().clone(); let opctx = test_opctx(&cptestctx); let (.., authz_instance) = LookupPath::new(&opctx, &datastore) .instance_id(instance_id) @@ -194,7 +194,7 @@ pub async fn no_virtual_provisioning_resource_records_exist( use nexus_db_queries::db::model::VirtualProvisioningResource; use nexus_db_queries::db::schema::virtual_provisioning_resource::dsl; - let datastore = cptestctx.server.apictx().nexus.datastore().clone(); + let datastore = cptestctx.server.server_context().nexus.datastore().clone(); let conn = datastore.pool_connection_for_tests().await.unwrap(); datastore @@ -223,7 +223,7 @@ pub async fn no_virtual_provisioning_collection_records_using_instances( use nexus_db_queries::db::model::VirtualProvisioningCollection; use nexus_db_queries::db::schema::virtual_provisioning_collection::dsl; - let datastore = cptestctx.server.apictx().nexus.datastore().clone(); + let datastore = cptestctx.server.server_context().nexus.datastore().clone(); let conn = datastore.pool_connection_for_tests().await.unwrap(); datastore diff --git a/nexus/src/app/sagas/test_saga.rs b/nexus/src/app/sagas/test_saga.rs index 0520a17602..9ccdc4aebc 100644 --- a/nexus/src/app/sagas/test_saga.rs +++ b/nexus/src/app/sagas/test_saga.rs @@ -75,7 +75,7 @@ type ControlPlaneTestContext = #[nexus_test(server = crate::Server)] async fn test_saga_stuck(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let params = Params {}; let dag = create_saga_dag::(params).unwrap(); let runnable_saga = nexus.create_runnable_saga(dag.clone()).await.unwrap(); diff --git a/nexus/src/app/sagas/vpc_create.rs b/nexus/src/app/sagas/vpc_create.rs index 6b48e4087a..cc40a8d43a 100644 --- a/nexus/src/app/sagas/vpc_create.rs +++ b/nexus/src/app/sagas/vpc_create.rs @@ -291,15 +291,13 @@ async fn svc_create_subnet( // Allocate the first /64 sub-range from the requested or created // prefix. - let ipv6_block = external::Ipv6Net( - ipnetwork::Ipv6Network::new(db_vpc.ipv6_prefix.network(), 64) - .map_err(|_| { - external::Error::internal_error( - "Failed to allocate default IPv6 subnet", - ) - }) - .map_err(ActionError::action_failed)?, - ); + let ipv6_block = oxnet::Ipv6Net::new(db_vpc.ipv6_prefix.prefix(), 64) + .map_err(|_| { + external::Error::internal_error( + "Failed to allocate default IPv6 subnet", + ) + }) + .map_err(ActionError::action_failed)?; let subnet = db::model::VpcSubnet::new( default_subnet_id, @@ -496,7 +494,7 @@ pub(crate) mod test { fn test_opctx(cptestctx: &ControlPlaneTestContext) -> OpContext { OpContext::for_tests( cptestctx.logctx.log.new(o!()), - cptestctx.server.apictx().nexus.datastore().clone(), + cptestctx.server.server_context().nexus.datastore().clone(), ) } @@ -505,7 +503,7 @@ pub(crate) mod test { project_id: Uuid, action: authz::Action, ) -> authz::Project { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_selector = params::ProjectSelector { project: NameOrId::Id(project_id) }; let opctx = test_opctx(&cptestctx); @@ -523,7 +521,7 @@ pub(crate) mod test { project_id: Uuid, ) { let opctx = test_opctx(&cptestctx); - let datastore = cptestctx.server.apictx().nexus.datastore(); + let datastore = cptestctx.server.server_context().nexus.datastore(); let default_name = Name::try_from("default".to_string()).unwrap(); let system_name = Name::try_from("system".to_string()).unwrap(); @@ -710,7 +708,7 @@ pub(crate) mod test { cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_id = create_org_and_project(&client).await; delete_project_vpc_defaults(&cptestctx, project_id).await; @@ -740,7 +738,7 @@ pub(crate) mod test { let log = &cptestctx.logctx.log; let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let project_id = create_org_and_project(&client).await; delete_project_vpc_defaults(&cptestctx, project_id).await; diff --git a/nexus/src/app/silo.rs b/nexus/src/app/silo.rs index 487af96aab..efde55cbd1 100644 --- a/nexus/src/app/silo.rs +++ b/nexus/src/app/silo.rs @@ -16,6 +16,7 @@ use nexus_db_queries::db::identity::{Asset, Resource}; use nexus_db_queries::db::lookup::LookupPath; use nexus_db_queries::db::{self, lookup}; use nexus_db_queries::{authn, authz}; +use nexus_reconfigurator_execution::blueprint_nexus_external_ips; use nexus_reconfigurator_execution::silo_dns_name; use nexus_types::internal_api::params::DnsRecord; use omicron_common::api::external::http_pagination::PaginatedBy; @@ -96,13 +97,16 @@ impl super::Nexus { // Set up an external DNS name for this Silo's API and console // endpoints (which are the same endpoint). - let target_blueprint = datastore + let nexus_external_dns_zones = datastore + .dns_zones_list_all(nexus_opctx, DnsGroup::External) + .await + .internal_context("listing external DNS zones")?; + let (_, target_blueprint) = datastore .blueprint_target_get_current_full(opctx) .await .internal_context("loading target blueprint")?; - let target = target_blueprint.as_ref().map(|(_, blueprint)| blueprint); - let (nexus_external_ips, nexus_external_dns_zones) = - datastore.nexus_external_addresses(nexus_opctx, target).await?; + let nexus_external_ips = + blueprint_nexus_external_ips(&target_blueprint); let dns_records: Vec = nexus_external_ips .into_iter() .map(|addr| match addr { diff --git a/nexus/src/app/sled.rs b/nexus/src/app/sled.rs index 06e50f2ecd..c7fc651823 100644 --- a/nexus/src/app/sled.rs +++ b/nexus/src/app/sled.rs @@ -4,16 +4,16 @@ //! Sleds, and the hardware and services within them. +use crate::external_api::params; use crate::internal_api::params::{ - PhysicalDiskDeleteRequest, PhysicalDiskPutRequest, SledAgentInfo, SledRole, - ZpoolPutRequest, + PhysicalDiskPutRequest, SledAgentInfo, SledRole, ZpoolPutRequest, }; use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; use nexus_db_queries::db; use nexus_db_queries::db::lookup; -use nexus_db_queries::db::lookup::LookupPath; use nexus_db_queries::db::model::DatasetKind; +use nexus_types::deployment::SledFilter; use nexus_types::external_api::views::SledPolicy; use nexus_types::external_api::views::SledProvisionPolicy; use omicron_common::api::external::DataPageParams; @@ -70,7 +70,17 @@ impl super::Nexus { self.rack_id, info.generation.into(), ); - self.db_datastore.sled_upsert(sled).await?; + let (_, was_modified) = self.db_datastore.sled_upsert(sled).await?; + + // If a new sled-agent just came online we want to trigger inventory + // collection. + // + // This will allow us to learn about disks so that they can be added to + // the control plane. + if was_modified { + self.activate_inventory_collection(); + } + Ok(()) } @@ -106,7 +116,9 @@ impl super::Nexus { opctx: &OpContext, pagparams: &DataPageParams<'_, Uuid>, ) -> ListResultVec { - self.db_datastore.sled_list(&opctx, pagparams).await + self.db_datastore + .sled_list(&opctx, pagparams, SledFilter::InService) + .await } pub async fn sled_client( @@ -173,6 +185,15 @@ impl super::Nexus { // Physical disks + pub async fn physical_disk_lookup<'a>( + &'a self, + opctx: &'a OpContext, + disk_selector: ¶ms::PhysicalDiskPath, + ) -> Result, Error> { + Ok(lookup::LookupPath::new(&opctx, &self.db_datastore) + .physical_disk(disk_selector.disk_id)) + } + pub(crate) async fn sled_list_physical_disks( &self, opctx: &OpContext, @@ -200,46 +221,21 @@ impl super::Nexus { ) -> Result<(), Error> { info!( self.log, "upserting physical disk"; - "sled_id" => request.sled_id.to_string(), - "vendor" => request.vendor.to_string(), - "serial" => request.serial.to_string(), - "model" => request.model.to_string() + "physical_disk_id" => %request.id, + "sled_id" => %request.sled_id, + "vendor" => %request.vendor, + "serial" => %request.serial, + "model" => %request.model, ); let disk = db::model::PhysicalDisk::new( + request.id, request.vendor, request.serial, request.model, request.variant.into(), request.sled_id, ); - self.db_datastore.physical_disk_upsert(&opctx, disk).await?; - Ok(()) - } - - /// Removes a physical disk from the database. - /// - /// TODO: Remove Zpools and datasets contained within this disk. - pub(crate) async fn delete_physical_disk( - &self, - opctx: &OpContext, - request: PhysicalDiskDeleteRequest, - ) -> Result<(), Error> { - info!( - self.log, "deleting physical disk"; - "sled_id" => request.sled_id.to_string(), - "vendor" => request.vendor.to_string(), - "serial" => request.serial.to_string(), - "model" => request.model.to_string() - ); - self.db_datastore - .physical_disk_delete( - &opctx, - request.vendor, - request.serial, - request.model, - request.sled_id, - ) - .await?; + self.db_datastore.physical_disk_insert(&opctx, disk).await?; Ok(()) } @@ -249,23 +245,21 @@ impl super::Nexus { pub(crate) async fn upsert_zpool( &self, opctx: &OpContext, - id: Uuid, - sled_id: Uuid, - info: ZpoolPutRequest, + request: ZpoolPutRequest, ) -> Result<(), Error> { - info!(self.log, "upserting zpool"; "sled_id" => sled_id.to_string(), "zpool_id" => id.to_string()); + info!( + self.log, "upserting zpool"; + "sled_id" => %request.sled_id, + "zpool_id" => %request.id, + "physical_disk_id" => %request.physical_disk_id, + ); - let (_authz_disk, db_disk) = - LookupPath::new(&opctx, &self.db_datastore) - .physical_disk( - &info.disk_vendor, - &info.disk_serial, - &info.disk_model, - ) - .fetch() - .await?; - let zpool = db::model::Zpool::new(id, sled_id, db_disk.uuid()); - self.db_datastore.zpool_upsert(zpool).await?; + let zpool = db::model::Zpool::new( + request.id, + request.sled_id, + request.physical_disk_id, + ); + self.db_datastore.zpool_insert(&opctx, zpool).await?; Ok(()) } @@ -279,7 +273,13 @@ impl super::Nexus { address: SocketAddrV6, kind: DatasetKind, ) -> Result<(), Error> { - info!(self.log, "upserting dataset"; "zpool_id" => zpool_id.to_string(), "dataset_id" => id.to_string(), "address" => address.to_string()); + info!( + self.log, + "upserting dataset"; + "zpool_id" => zpool_id.to_string(), + "dataset_id" => id.to_string(), + "address" => address.to_string() + ); let dataset = db::model::Dataset::new(id, zpool_id, address, kind); self.db_datastore.dataset_upsert(dataset).await?; Ok(()) diff --git a/nexus/src/app/snapshot.rs b/nexus/src/app/snapshot.rs index 0c90ac31fb..c28d180d3c 100644 --- a/nexus/src/app/snapshot.rs +++ b/nexus/src/app/snapshot.rs @@ -12,7 +12,6 @@ use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::external::CreateResult; use omicron_common::api::external::DeleteResult; use omicron_common::api::external::Error; -use omicron_common::api::external::InstanceState; use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; use omicron_common::api::external::NameOrId; @@ -93,7 +92,7 @@ impl super::Nexus { // If there isn't a running propolis, Nexus needs to use the Crucible // Pantry to make this snapshot - let instance_and_sled = if let Some(attach_instance_id) = + let use_the_pantry = if let Some(attach_instance_id) = &db_disk.runtime_state.attach_instance_id { let (.., authz_instance) = @@ -107,29 +106,12 @@ impl super::Nexus { .instance_fetch_with_vmm(&opctx, &authz_instance) .await?; - match instance_state.vmm().as_ref() { - None => None, - Some(vmm) => match vmm.runtime.state.0 { - // If the VM might be running, or it's rebooting (which - // doesn't deactivate the volume), send the snapshot request - // to the relevant VMM. Otherwise, there's no way to know if - // the instance has attached the volume or is in the process - // of detaching it, so bail. - InstanceState::Running | InstanceState::Rebooting => { - Some((*attach_instance_id, vmm.sled_id)) - } - _ => { - return Err(Error::invalid_request(&format!( - "cannot snapshot attached disk for instance in \ - state {}", - vmm.runtime.state.0 - ))); - } - }, - } + // If a Propolis _may_ exist, send the snapshot request there, + // otherwise use the pantry. + !instance_state.vmm().is_some() } else { // This disk is not attached to an instance, use the pantry. - None + true }; let saga_params = sagas::snapshot_create::Params { @@ -137,7 +119,8 @@ impl super::Nexus { silo_id: authz_silo.id(), project_id: authz_project.id(), disk_id: authz_disk.id(), - attached_instance_and_sled: instance_and_sled, + attach_instance_id: db_disk.runtime_state.attach_instance_id, + use_the_pantry, create_params: params.clone(), }; diff --git a/nexus/src/app/switch_interface.rs b/nexus/src/app/switch_interface.rs index c3ce0f553c..bb4cba4c7b 100644 --- a/nexus/src/app/switch_interface.rs +++ b/nexus/src/app/switch_interface.rs @@ -11,8 +11,9 @@ use nexus_db_queries::db::lookup; use nexus_db_queries::db::lookup::LookupPath; use omicron_common::api::external::LookupResult; use omicron_common::api::external::{ - CreateResult, DataPageParams, DeleteResult, Error, IpNet, ListResultVec, + CreateResult, DataPageParams, DeleteResult, Error, ListResultVec, }; +use oxnet::IpNet; use std::sync::Arc; use uuid::Uuid; diff --git a/nexus/src/app/switch_port.rs b/nexus/src/app/switch_port.rs index c7d5272ae1..7a6d56252a 100644 --- a/nexus/src/app/switch_port.rs +++ b/nexus/src/app/switch_port.rs @@ -2,33 +2,24 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -//XXX -#![allow(unused_imports)] - -use crate::app::sagas; use crate::external_api::params; +use crate::external_api::shared::SwitchLinkState; use db::datastore::SwitchPortSettingsCombinedResult; -use dropshot::HttpError; +use dpd_client::types::LinkId; +use dpd_client::types::PortId; use http::StatusCode; -use ipnetwork::IpNetwork; -use nexus_db_model::{SwitchLinkFec, SwitchLinkSpeed}; -use nexus_db_queries::authn; use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; use nexus_db_queries::db; use nexus_db_queries::db::datastore::UpdatePrecondition; use nexus_db_queries::db::model::{SwitchPort, SwitchPortSettings}; -use nexus_types::identity::Resource; +use nexus_db_queries::db::DataStore; use omicron_common::api::external::http_pagination::PaginatedBy; +use omicron_common::api::external::SwitchLocation; use omicron_common::api::external::{ - self, CreateResult, DataPageParams, DeleteResult, ListResultVec, + self, CreateResult, DataPageParams, DeleteResult, Error, ListResultVec, LookupResult, Name, NameOrId, UpdateResult, }; -use sled_agent_client::types::BgpConfig; -use sled_agent_client::types::BgpPeerConfig; -use sled_agent_client::types::{ - EarlyNetworkConfig, PortConfigV1, RackNetworkConfigV1, RouteConfig, -}; use std::sync::Arc; use uuid::Uuid; @@ -168,14 +159,6 @@ impl super::Nexus { self.db_datastore.switch_port_list(opctx, pagparams).await } - pub(crate) async fn list_switch_ports_with_uplinks( - &self, - opctx: &OpContext, - ) -> ListResultVec { - opctx.authorize(authz::Action::Read, &authz::FLEET).await?; - self.db_datastore.switch_ports_with_uplinks(opctx).await - } - pub(crate) async fn set_switch_port_settings_id( &self, opctx: &OpContext, @@ -296,4 +279,80 @@ impl super::Nexus { Ok(()) } + + pub(crate) async fn switch_port_status( + &self, + opctx: &OpContext, + switch: Name, + port: Name, + ) -> Result { + opctx.authorize(authz::Action::Read, &authz::FLEET).await?; + + let loc: SwitchLocation = switch.as_str().parse().map_err(|e| { + Error::invalid_request(&format!( + "invalid switch name {switch}: {e}" + )) + })?; + + let port_id = PortId::Qsfp(port.as_str().parse().map_err(|e| { + Error::invalid_request(&format!("invalid port name: {port} {e}")) + })?); + + // no breakout support yet, link id always 0 + let link_id = LinkId(0); + + let dpd_clients = self.dpd_clients().await.map_err(|e| { + Error::internal_error(&format!("dpd clients get: {e}")) + })?; + + let dpd = dpd_clients.get(&loc).ok_or(Error::internal_error( + &format!("no client for switch {switch}"), + ))?; + + let status = dpd + .link_get(&port_id, &link_id) + .await + .map_err(|e| { + Error::internal_error(&format!( + "failed to get port status for {port} {e}" + )) + })? + .into_inner(); + + let monitors = match dpd.transceiver_monitors_get(&port_id).await { + Ok(resp) => Some(resp.into_inner()), + Err(e) => { + if let Some(StatusCode::NOT_FOUND) = e.status() { + None + } else { + return Err(Error::internal_error(&format!( + "failed to get txr monitors for {port} {e}" + ))); + } + } + }; + + let link_json = serde_json::to_value(status).map_err(|e| { + Error::internal_error(&format!( + "failed to marshal link info to json: {e}" + )) + })?; + let monitors_json = match monitors { + Some(x) => Some(serde_json::to_value(x).map_err(|e| { + Error::internal_error(&format!( + "failed to marshal monitors to json: {e}" + )) + })?), + None => None, + }; + Ok(SwitchLinkState::new(link_json, monitors_json)) + } +} + +pub(crate) async fn list_switch_ports_with_uplinks( + datastore: &DataStore, + opctx: &OpContext, +) -> ListResultVec { + opctx.authorize(authz::Action::Read, &authz::FLEET).await?; + datastore.switch_ports_with_uplinks(opctx).await } diff --git a/nexus/src/app/vpc_subnet.rs b/nexus/src/app/vpc_subnet.rs index 4c5a569201..f081f351db 100644 --- a/nexus/src/app/vpc_subnet.rs +++ b/nexus/src/app/vpc_subnet.rs @@ -19,6 +19,7 @@ use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::external::CreateResult; use omicron_common::api::external::DeleteResult; use omicron_common::api::external::Error; +use omicron_common::api::external::Ipv6NetExt; use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupResult; use omicron_common::api::external::NameOrId; @@ -74,13 +75,13 @@ impl super::Nexus { let (.., authz_vpc, db_vpc) = vpc_lookup.fetch().await?; // Validate IPv4 range - if !params.ipv4_block.network().is_private() { + if !params.ipv4_block.prefix().is_private() { return Err(external::Error::invalid_request( "VPC Subnet IPv4 address ranges must be from a private range", )); } - if params.ipv4_block.prefix() < MIN_VPC_IPV4_SUBNET_PREFIX - || params.ipv4_block.prefix() + if params.ipv4_block.width() < MIN_VPC_IPV4_SUBNET_PREFIX + || params.ipv4_block.width() > self.tunables.max_vpc_ipv4_subnet_prefix { return Err(external::Error::invalid_request(&format!( @@ -116,7 +117,7 @@ impl super::Nexus { let ipv6_block = db_vpc .ipv6_prefix .random_subnet( - external::Ipv6Net::VPC_SUBNET_IPV6_PREFIX_LENGTH, + oxnet::Ipv6Net::VPC_SUBNET_IPV6_PREFIX_LENGTH, ) .map(|block| block.0) .ok_or_else(|| { @@ -148,7 +149,7 @@ impl super::Nexus { self.log, "autogenerated random IPv6 range overlap"; "subnet_id" => ?subnet_id, - "ipv6_block" => %ipv6_block.0 + "ipv6_block" => %ipv6_block ); retry += 1; continue; @@ -193,10 +194,10 @@ impl super::Nexus { if !ipv6_block.is_vpc_subnet(&db_vpc.ipv6_prefix) { return Err(external::Error::invalid_request(&format!( concat!( - "VPC Subnet IPv6 address range '{}' is not valid for ", - "VPC with IPv6 prefix '{}'", - ), - ipv6_block, db_vpc.ipv6_prefix.0 .0, + "VPC Subnet IPv6 address range '{}' is not valid for ", + "VPC with IPv6 prefix '{}'", + ), + ipv6_block, db_vpc.ipv6_prefix.0, ))); } let subnet = db::model::VpcSubnet::new( diff --git a/nexus/src/context.rs b/nexus/src/context.rs index cf2b9d6f17..1512671056 100644 --- a/nexus/src/context.rs +++ b/nexus/src/context.rs @@ -29,6 +29,61 @@ use std::str::FromStr; use std::sync::Arc; use uuid::Uuid; +/// Indicates the kind of HTTP server. +#[derive(Clone, Copy)] +pub enum ServerKind { + /// This serves the internal API. + Internal, + /// This serves the external API over the normal public network. + External, + /// This serves the external API proxied over the technician port. + Techport, +} + +/// The API context for each distinct Dropshot server. +/// +/// This packages up the main server context, which is shared by all API servers +/// (e.g., internal, external, and techport). It also includes the +/// [`ServerKind`], which makes it possible to know which server is handling any +/// particular request. +#[derive(Clone)] +pub struct ApiContext { + /// The kind of server. + pub kind: ServerKind, + /// Shared state available to all endpoint handlers. + pub context: Arc, +} + +impl ApiContext { + /// Create a new context with a rack ID and logger. This creates the + /// underlying `Nexus` as well. + pub async fn for_internal( + rack_id: Uuid, + log: Logger, + config: &NexusConfig, + ) -> Result { + ServerContext::new(rack_id, log, config) + .await + .map(|context| Self { kind: ServerKind::Internal, context }) + } + + /// Clone self for use by the external Dropshot server. + pub fn for_external(&self) -> Self { + Self { kind: ServerKind::External, context: self.context.clone() } + } + + /// Clone self for use by the techport Dropshot server. + pub fn for_techport(&self) -> Self { + Self { kind: ServerKind::Techport, context: self.context.clone() } + } +} + +impl std::borrow::Borrow for ApiContext { + fn borrow(&self) -> &ServerContext { + &self.context + } +} + /// Shared state available to all API request handlers pub struct ServerContext { /// reference to the underlying nexus @@ -157,7 +212,8 @@ impl ServerContext { // Set up DNS Client let resolver = match config.deployment.internal_dns { nexus_config::InternalDns::FromSubnet { subnet } => { - let az_subnet = Ipv6Subnet::::new(subnet.net().ip()); + let az_subnet = + Ipv6Subnet::::new(subnet.net().addr()); info!( log, "Setting up resolver using DNS servers for subnet: {:?}", @@ -262,18 +318,19 @@ impl ServerContext { /// Authenticates an incoming request to the external API and produces a new /// operation context for it pub(crate) async fn op_context_for_external_api( - rqctx: &dropshot::RequestContext>, + rqctx: &dropshot::RequestContext, ) -> Result { let apictx = rqctx.context(); OpContext::new_async( &rqctx.log, async { - let authn = - Arc::new(apictx.external_authn.authn_request(rqctx).await?); - let datastore = Arc::clone(apictx.nexus.datastore()); + let authn = Arc::new( + apictx.context.external_authn.authn_request(rqctx).await?, + ); + let datastore = Arc::clone(apictx.context.nexus.datastore()); let authz = authz::Context::new( Arc::clone(&authn), - Arc::clone(&apictx.authz), + Arc::clone(&apictx.context.authz), datastore, ); Ok((authn, authz)) @@ -285,17 +342,17 @@ pub(crate) async fn op_context_for_external_api( } pub(crate) async fn op_context_for_internal_api( - rqctx: &dropshot::RequestContext>, + rqctx: &dropshot::RequestContext, ) -> OpContext { - let apictx = rqctx.context(); + let apictx = &rqctx.context(); OpContext::new_async( &rqctx.log, async { - let authn = Arc::clone(&apictx.internal_authn); - let datastore = Arc::clone(apictx.nexus.datastore()); + let authn = Arc::clone(&apictx.context.internal_authn); + let datastore = Arc::clone(apictx.context.nexus.datastore()); let authz = authz::Context::new( Arc::clone(&authn), - Arc::clone(&apictx.authz), + Arc::clone(&apictx.context.authz), datastore, ); Ok::<_, std::convert::Infallible>((authn, authz)) diff --git a/nexus/src/external_api/console_api.rs b/nexus/src/external_api/console_api.rs index 86a808a47b..caff195047 100644 --- a/nexus/src/external_api/console_api.rs +++ b/nexus/src/external_api/console_api.rs @@ -8,7 +8,20 @@ //! external API, but in order to avoid CORS issues for now, we are serving //! these routes directly from the external API. -use crate::ServerContext; +// `HeaderName` and `HeaderValue` contain `bytes::Bytes`, which trips +// the `declare_interior_mutable_const` lint. But in a `const fn` +// context, the `AtomicPtr` that is used in `Bytes` only ever points +// to a `&'static str`, so does not have interior mutability in that +// context. +// +// A Clippy bug means that even if you ignore interior mutability of +// `Bytes` (the default behavior), it will still not ignore it for types +// where the only interior mutability is through `Bytes`. This is fixed +// in rust-lang/rust-clippy#12691, which should land in the Rust 1.80 +// toolchain; we can remove this attribute then. +#![allow(clippy::declare_interior_mutable_const)] + +use crate::context::ApiContext; use anyhow::Context; use camino::{Utf8Path, Utf8PathBuf}; use dropshot::{ @@ -16,9 +29,8 @@ use dropshot::{ HttpResponseFound, HttpResponseHeaders, HttpResponseSeeOther, HttpResponseUpdatedNoContent, Path, Query, RequestContext, }; -use http::{header, Response, StatusCode, Uri}; +use http::{header, HeaderName, HeaderValue, Response, StatusCode, Uri}; use hyper::Body; -use mime_guess; use nexus_db_model::AuthenticationMode; use nexus_db_queries::authn::silos::IdentityProviderType; use nexus_db_queries::context::OpContext; @@ -41,9 +53,11 @@ use parse_display::Display; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use serde_urlencoded; +use std::collections::HashMap; use std::num::NonZeroU32; use std::str::FromStr; -use std::{collections::HashSet, sync::Arc}; +use tokio::fs::File; +use tokio_util::codec::{BytesCodec, FramedRead}; // ----------------------------------------------------- // High-level overview of how login works in the console @@ -224,11 +238,11 @@ impl RelayState { unpublished = true, }] pub(crate) async fn login_saml_begin( - rqctx: RequestContext>, + rqctx: RequestContext, _path_params: Path, _query_params: Query, ) -> Result, HttpError> { - serve_console_index(rqctx.context()).await + serve_console_index(rqctx).await } /// Get a redirect straight to the IdP @@ -243,13 +257,13 @@ pub(crate) async fn login_saml_begin( unpublished = true, }] pub(crate) async fn login_saml_redirect( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path_params = path_params.into_inner(); // Use opctx_external_authn because this request will be @@ -288,7 +302,11 @@ pub(crate) async fn login_saml_redirect( } }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Authenticate a user via SAML @@ -298,13 +316,13 @@ pub(crate) async fn login_saml_redirect( tags = ["login"], }] pub(crate) async fn login_saml( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, body_bytes: dropshot::UntypedBody, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path_params = path_params.into_inner(); // By definition, this request is not authenticated. These operations @@ -364,14 +382,18 @@ pub(crate) async fn login_saml( // use absolute timeout even though session might idle out first. // browser expiration is mostly for convenience, as the API will // reject requests with an expired session regardless - apictx.session_absolute_timeout(), - apictx.external_tls_enabled, + apictx.context.session_absolute_timeout(), + apictx.context.external_tls_enabled, )?; headers.append(header::SET_COOKIE, cookie); } Ok(response) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } #[derive(Deserialize, JsonSchema)] @@ -386,15 +408,15 @@ pub struct LoginPathParam { unpublished = true, }] pub(crate) async fn login_local_begin( - rqctx: RequestContext>, + rqctx: RequestContext, _path_params: Path, _query_params: Query, ) -> Result, HttpError> { // TODO: figure out why instrumenting doesn't work // let apictx = rqctx.context(); // let handler = async { serve_console_index(rqctx.context()).await }; - // apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await - serve_console_index(rqctx.context()).await + // apictx.context.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + serve_console_index(rqctx).await } /// Authenticate a user via username and password @@ -404,13 +426,13 @@ pub(crate) async fn login_local_begin( tags = ["login"], }] pub(crate) async fn login_local( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, credentials: dropshot::TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let credentials = credentials.into_inner(); let silo = path.silo_name.into(); @@ -433,22 +455,26 @@ pub(crate) async fn login_local( // use absolute timeout even though session might idle out first. // browser expiration is mostly for convenience, as the API will // reject requests with an expired session regardless - apictx.session_absolute_timeout(), - apictx.external_tls_enabled, + apictx.context.session_absolute_timeout(), + apictx.context.external_tls_enabled, )?; headers.append(header::SET_COOKIE, cookie); } Ok(response) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } async fn create_session( opctx: &OpContext, - apictx: &ServerContext, + apictx: &ApiContext, user: Option, ) -> Result { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let session = match user { Some(user) => nexus.session_create(&opctx, user.id()).await?, None => Err(Error::Unauthenticated { @@ -468,12 +494,12 @@ async fn create_session( tags = ["hidden"], }] pub(crate) async fn logout( - rqctx: RequestContext>, + rqctx: RequestContext, cookies: Cookies, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await; let token = cookies.get(SESSION_COOKIE_COOKIE_NAME); @@ -498,14 +524,20 @@ pub(crate) async fn logout( let headers = response.headers_mut(); headers.append( header::SET_COOKIE, - clear_session_cookie_header_value(apictx.external_tls_enabled)?, + clear_session_cookie_header_value( + apictx.context.external_tls_enabled, + )?, ); }; Ok(response) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } #[derive(Deserialize, JsonSchema)] @@ -559,10 +591,10 @@ pub struct LoginUrlQuery { /// `redirect_uri` represents the URL to send the user back to after successful /// login, and is included in `state` query param if present async fn get_login_url( - rqctx: &RequestContext>, + rqctx: &RequestContext, redirect_uri: Option, ) -> Result { - let nexus = &rqctx.context().nexus; + let nexus = &rqctx.context().context.nexus; let endpoint = nexus.endpoint_for_request(rqctx)?; let silo = endpoint.silo(); @@ -628,7 +660,7 @@ async fn get_login_url( unpublished = true, }] pub(crate) async fn login_begin( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result { let apictx = rqctx.context(); @@ -637,18 +669,22 @@ pub(crate) async fn login_begin( let login_url = get_login_url(&rqctx, query.redirect_uri).await?; http_response_found(login_url) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } pub(crate) async fn console_index_or_login_redirect( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result, HttpError> { let opctx = crate::context::op_context_for_external_api(&rqctx).await; // if authed, serve console index.html with JS bundle in script tag if let Ok(opctx) = opctx { if opctx.authn.actor().is_some() { - return serve_console_index(rqctx.context()).await; + return serve_console_index(rqctx).await; } } @@ -677,7 +713,7 @@ macro_rules! console_page { ($name:ident, $path:literal) => { #[endpoint { method = GET, path = $path, unpublished = true, }] pub(crate) async fn $name( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result, HttpError> { console_index_or_login_redirect(rqctx).await } @@ -689,7 +725,7 @@ macro_rules! console_page_wildcard { ($name:ident, $path:literal) => { #[endpoint { method = GET, path = $path, unpublished = true, }] pub(crate) async fn $name( - rqctx: RequestContext>, + rqctx: RequestContext, _path_params: Path, ) -> Result, HttpError> { console_index_or_login_redirect(rqctx).await @@ -707,6 +743,17 @@ console_page!(console_silo_images, "/images"); console_page!(console_silo_utilization, "/utilization"); console_page!(console_silo_access, "/access"); +/// Check if `gzip` is listed in the request's `Accept-Encoding` header. +fn accept_gz(header_value: &str) -> bool { + header_value.split(',').any(|c| { + c.split(';') + .next() + .expect("str::split always yields at least one item") + .trim() + == "gzip" + }) +} + /// Make a new Utf8PathBuf with `.gz` on the end fn with_gz_ext(path: &Utf8Path) -> Utf8PathBuf { let mut new_path = path.to_owned(); @@ -718,115 +765,148 @@ fn with_gz_ext(path: &Utf8Path) -> Utf8PathBuf { new_path } -/// Fetch a static asset from `/assets`. 404 on virtually all -/// errors. No auth. NO SENSITIVE FILES. Will serve a gzipped version if the -/// `.gz` file is present in the directory and `Accept-Encoding: gzip` is -/// present on the request. Cache in browser for a year because assets have -/// content hash in filename. -#[endpoint { - method = GET, - path = "/assets/{path:.*}", - unpublished = true, -}] -pub(crate) async fn asset( - rqctx: RequestContext>, - path_params: Path, +// Define header values as const so that `HeaderValue::from_static` is given the +// opportunity to panic at compile time +static ALLOWED_EXTENSIONS: Lazy> = { + const CONTENT_TYPES: [(&str, HeaderValue); 10] = [ + ("css", HeaderValue::from_static("text/css")), + ("html", HeaderValue::from_static("text/html; charset=utf-8")), + ("js", HeaderValue::from_static("text/javascript")), + ("map", HeaderValue::from_static("application/json")), + ("png", HeaderValue::from_static("image/png")), + ("svg", HeaderValue::from_static("image/svg+xml")), + ("txt", HeaderValue::from_static("text/plain; charset=utf-8")), + ("webp", HeaderValue::from_static("image/webp")), + ("woff", HeaderValue::from_static("application/font-woff")), + ("woff2", HeaderValue::from_static("font/woff2")), + ]; + + Lazy::new(|| HashMap::from(CONTENT_TYPES)) +}; +const CONTENT_ENCODING_GZIP: HeaderValue = HeaderValue::from_static("gzip"); +// Web application security headers; these should stay in sync with the headers +// listed in the console repo that are used in development. +// https://github.com/oxidecomputer/console/blob/main/docs/csp-headers.md +const WEB_SECURITY_HEADERS: [(HeaderName, HeaderValue); 3] = [ + ( + http::header::CONTENT_SECURITY_POLICY, + HeaderValue::from_static( + "default-src 'self'; style-src 'unsafe-inline' 'self'; \ + frame-src 'none'; object-src 'none'; \ + form-action 'none'; frame-ancestors 'none'", + ), + ), + (http::header::X_CONTENT_TYPE_OPTIONS, HeaderValue::from_static("nosniff")), + (http::header::X_FRAME_OPTIONS, HeaderValue::from_static("DENY")), +]; + +/// Serve a static asset from `static_dir`. 404 on virtually all errors. +/// No auth. NO SENSITIVE FILES. Will serve a gzipped version if the `.gz` +/// file is present in the directory and `gzip` is listed in the request's +/// `Accept-Encoding` header. +async fn serve_static( + rqctx: RequestContext, + path: &Utf8Path, + cache_control: HeaderValue, ) -> Result, HttpError> { let apictx = rqctx.context(); - let path = Utf8PathBuf::from_iter(path_params.into_inner().path); - - // Bail unless the extension is allowed - match path.extension() { - Some(ext) => { - if !ALLOWED_EXTENSIONS.contains(&ext) { - return Err(not_found("file extension not allowed")); - } - } - None => { - return Err(not_found( - "requested file does not have extension, not allowed", - )); - } - } - - // We only serve assets from assets/ within static_dir - let assets_dir = &apictx + let static_dir = apictx + .context .console_config .static_dir - .as_ref() - .ok_or_else(|| not_found("static_dir undefined"))? - .join("assets"); + .as_deref() + .ok_or_else(|| not_found("static_dir undefined"))?; - let request = &rqctx.request; - let accept_encoding = request.headers().get(http::header::ACCEPT_ENCODING); - let accept_gz = accept_encoding.map_or(false, |val| { - val.to_str().map_or(false, |s| s.contains("gzip")) - }); + // Bail unless the extension is allowed + let content_type = ALLOWED_EXTENSIONS + .get(path.extension().ok_or_else(|| { + not_found("requested file does not have extension, not allowed") + })?) + .ok_or_else(|| not_found("file extension not allowed"))?; + + let mut resp = Response::builder() + .status(StatusCode::OK) + .header(http::header::CONTENT_TYPE, content_type) + .header(http::header::CACHE_CONTROL, cache_control); + for (k, v) in WEB_SECURITY_HEADERS { + resp = resp.header(k, v); + } // If req accepts gzip and we have a gzipped version, serve that. Otherwise // fall back to non-gz. If neither file found, bubble up 404. - let (path_to_read, set_content_encoding_gzip) = - match accept_gz.then(|| find_file(&with_gz_ext(&path), &assets_dir)) { - Some(Ok(gzipped_path)) => (gzipped_path, true), - _ => (find_file(&path, &assets_dir)?, false), - }; + let request = &rqctx.request; + let accept_encoding = request + .headers() + .get(http::header::ACCEPT_ENCODING) + .and_then(|v| v.to_str().ok()) + .unwrap_or_default(); + let path_to_read = match accept_gz(accept_encoding) + .then(|| find_file(&with_gz_ext(&path), static_dir)) + { + Some(Ok(gzipped_path)) => { + resp = resp + .header(http::header::CONTENT_ENCODING, CONTENT_ENCODING_GZIP); + gzipped_path + } + _ => find_file(&path, static_dir)?, + }; - // File read is the same regardless of gzip - let file_contents = tokio::fs::read(&path_to_read).await.map_err(|e| { + let file = File::open(&path_to_read).await.map_err(|e| { not_found(&format!("accessing {:?}: {:#}", path_to_read, e)) })?; + let metadata = file.metadata().await.map_err(|e| { + not_found(&format!("accessing {:?}: {:#}", path_to_read, e)) + })?; + resp = resp.header(http::header::CONTENT_LENGTH, metadata.len()); - // Derive the MIME type from the file name (can't use path_to_read because - // it might end with .gz) - let content_type = path.file_name().map_or("text/plain", |f| { - mime_guess::from_path(f).first_raw().unwrap_or("text/plain") - }); - - let mut resp = Response::builder() - .status(StatusCode::OK) - .header(http::header::CONTENT_TYPE, content_type) - .header(http::header::CACHE_CONTROL, "max-age=31536000, immutable"); // 1 year + let stream = FramedRead::new(file, BytesCodec::new()); + let body = Body::wrap_stream(stream); + Ok(resp.body(body)?) +} - if set_content_encoding_gzip { - resp = resp.header(http::header::CONTENT_ENCODING, "gzip"); - } +/// Serve a static asset from `/assets` via [`serve_static`]. Cache +/// in browser for a year because assets have content hash in filename. +/// +/// Note that Dropshot protects us from directory traversal attacks (e.g. +/// `/assets/../../../etc/passwd`). This is tested in the `console_api` +/// integration tests. +#[endpoint { + method = GET, + path = "/assets/{path:.*}", + unpublished = true, +}] +pub(crate) async fn asset( + rqctx: RequestContext, + path_params: Path, +) -> Result, HttpError> { + // asset URLs contain hashes, so cache for 1 year + const CACHE_CONTROL: HeaderValue = + HeaderValue::from_static("max-age=31536000, immutable"); - Ok(resp.body(file_contents.into())?) + let mut path = Utf8PathBuf::from("assets"); + path.extend(path_params.into_inner().path); + serve_static(rqctx, &path, CACHE_CONTROL).await } +/// Serve `/index.html` via [`serve_static`]. Disallow caching. pub(crate) async fn serve_console_index( - apictx: &ServerContext, + rqctx: RequestContext, ) -> Result, HttpError> { - let static_dir = &apictx - .console_config - .static_dir - .to_owned() - .ok_or_else(|| not_found("static_dir undefined"))?; - let file = static_dir.join("index.html"); - let file_contents = tokio::fs::read(&file) - .await - .map_err(|e| not_found(&format!("accessing {:?}: {:#}", file, e)))?; - Ok(Response::builder() - .status(StatusCode::OK) - .header(http::header::CONTENT_TYPE, "text/html; charset=UTF-8") - // do not cache this response in browser - .header(http::header::CACHE_CONTROL, "no-store") - .body(file_contents.into())?) + // do not cache this response in browser + const CACHE_CONTROL: HeaderValue = HeaderValue::from_static("no-store"); + + serve_static(rqctx, Utf8Path::new("index.html"), CACHE_CONTROL).await } fn not_found(internal_msg: &str) -> HttpError { HttpError::for_not_found(None, internal_msg.to_string()) } -static ALLOWED_EXTENSIONS: Lazy> = Lazy::new(|| { - HashSet::from([ - "js", "css", "html", "ico", "map", "otf", "png", "svg", "ttf", "txt", - "webp", "woff", "woff2", - ]) -}); - /// Starting from `root_dir`, follow the segments of `path` down the file tree /// until we find a file (or not). Do not follow symlinks. +/// +/// WARNING: This function assumes that `..` path segments have already been +/// found and rejected. fn find_file( path: &Utf8Path, root_dir: &Utf8Path, @@ -862,10 +942,20 @@ fn find_file( #[cfg(test)] mod test { - use super::{find_file, RelativeUri}; + use super::{accept_gz, find_file, RelativeUri}; use camino::{Utf8Path, Utf8PathBuf}; use http::StatusCode; + #[test] + fn test_accept_gz() { + assert!(!accept_gz("")); + assert!(accept_gz("gzip")); + assert!(accept_gz("deflate, gzip;q=1.0, *;q=0.5")); + assert!(accept_gz(" gzip ; q=0.9 ")); + assert!(!accept_gz("gzip2")); + assert!(accept_gz("gzip2, gzip;q=0.9")); + } + #[test] fn test_find_file_finds_file() { let root = current_dir(); diff --git a/nexus/src/external_api/device_auth.rs b/nexus/src/external_api/device_auth.rs index 1697722f6f..2aa1965e79 100644 --- a/nexus/src/external_api/device_auth.rs +++ b/nexus/src/external_api/device_auth.rs @@ -12,7 +12,7 @@ use super::console_api::console_index_or_login_redirect; use super::views::DeviceAccessTokenGrant; use crate::app::external_endpoints::authority_for_request; -use crate::ServerContext; +use crate::ApiContext; use dropshot::{ endpoint, HttpError, HttpResponseUpdatedNoContent, RequestContext, TypedBody, @@ -23,7 +23,6 @@ use nexus_db_queries::db::model::DeviceAccessToken; use omicron_common::api::external::InternalContext; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; -use std::sync::Arc; use uuid::Uuid; // Token granting à la RFC 8628 (OAuth 2.0 Device Authorization Grant) @@ -64,11 +63,11 @@ pub struct DeviceAuthRequest { tags = ["hidden"], // "token" }] pub(crate) async fn device_auth_request( - rqctx: RequestContext>, + rqctx: RequestContext, params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let params = params.into_inner(); let handler = async { let opctx = nexus.opctx_external_authn(); @@ -116,7 +115,7 @@ pub struct DeviceAuthVerify { unpublished = true, }] pub(crate) async fn device_auth_verify( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result, HttpError> { console_index_or_login_redirect(rqctx).await } @@ -127,7 +126,7 @@ pub(crate) async fn device_auth_verify( unpublished = true, }] pub(crate) async fn device_auth_success( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result, HttpError> { console_index_or_login_redirect(rqctx).await } @@ -143,11 +142,11 @@ pub(crate) async fn device_auth_success( tags = ["hidden"], // "token" }] pub(crate) async fn device_auth_confirm( - rqctx: RequestContext>, + rqctx: RequestContext, params: TypedBody, ) -> Result { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let params = params.into_inner(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -163,7 +162,11 @@ pub(crate) async fn device_auth_confirm( .await?; Ok(HttpResponseUpdatedNoContent()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] @@ -192,11 +195,11 @@ pub enum DeviceAccessTokenResponse { tags = ["hidden"], // "token" }] pub(crate) async fn device_access_token( - rqctx: RequestContext>, + rqctx: RequestContext, params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let params = params.into_inner(); let handler = async { // RFC 8628 §3.4 diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 551ef00817..350836441e 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -13,8 +13,7 @@ use super::{ Utilization, Vpc, VpcRouter, VpcSubnet, }, }; -use crate::external_api::shared; -use crate::ServerContext; +use crate::{context::ApiContext, external_api::shared}; use dropshot::HttpError; use dropshot::HttpResponseAccepted; use dropshot::HttpResponseCreated; @@ -83,6 +82,7 @@ use omicron_common::api::external::{ http_pagination::data_page_params_for, AggregateBgpMessageHistory, }; use omicron_common::bail_unless; +use omicron_uuid_kinds::GenericUuid; use parse_display::Display; use propolis_client::support::tungstenite::protocol::frame::coding::CloseCode; use propolis_client::support::tungstenite::protocol::{ @@ -94,10 +94,9 @@ use schemars::JsonSchema; use serde::Deserialize; use serde::Serialize; use std::net::IpAddr; -use std::sync::Arc; use uuid::Uuid; -type NexusApiDescription = ApiDescription>; +type NexusApiDescription = ApiDescription; /// Returns a description of the external nexus API pub(crate) fn external_api() -> NexusApiDescription { @@ -235,6 +234,7 @@ pub(crate) fn external_api() -> NexusApiDescription { api.register(sled_instance_list)?; api.register(sled_physical_disk_list)?; api.register(physical_disk_list)?; + api.register(physical_disk_view)?; api.register(switch_list)?; api.register(switch_view)?; api.register(sled_list_uninitialized)?; @@ -269,6 +269,7 @@ pub(crate) fn external_api() -> NexusApiDescription { api.register(networking_switch_port_settings_delete)?; api.register(networking_switch_port_list)?; + api.register(networking_switch_port_status)?; api.register(networking_switch_port_apply_settings)?; api.register(networking_switch_port_clear_settings)?; @@ -286,6 +287,9 @@ pub(crate) fn external_api() -> NexusApiDescription { api.register(networking_bfd_disable)?; api.register(networking_bfd_status)?; + api.register(networking_allow_list_view)?; + api.register(networking_allow_list_update)?; + api.register(utilization_view)?; // Fleet-wide API operations @@ -320,6 +324,8 @@ pub(crate) fn external_api() -> NexusApiDescription { api.register(system_metric)?; api.register(silo_metric)?; + api.register(timeseries_schema_list)?; + api.register(timeseries_query)?; api.register(system_update_put_repository)?; api.register(system_update_get_repository)?; @@ -364,9 +370,9 @@ pub(crate) fn external_api() -> NexusApiDescription { endpoint: T, ) -> Result<(), String> where - T: Into>>, + T: Into>, { - let mut ep: ApiEndpoint> = endpoint.into(); + let mut ep: ApiEndpoint = endpoint.into(); // only one tag is allowed ep.tags = vec![String::from("hidden")]; ep.path = String::from("/experimental") + &ep.path; @@ -442,7 +448,7 @@ pub(crate) fn external_api() -> NexusApiDescription { tags = ["system/status"], }] async fn ping( - _rqctx: RequestContext>, + _rqctx: RequestContext, ) -> Result, HttpError> { Ok(HttpResponseOk(views::Ping { status: views::PingStatus::Ok })) } @@ -454,16 +460,20 @@ async fn ping( tags = ["policy"], }] async fn system_policy_view( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let policy = nexus.fleet_fetch_policy(&opctx).await?; Ok(HttpResponseOk(policy)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update top-level IAM policy @@ -473,12 +483,12 @@ async fn system_policy_view( tags = ["policy"], }] async fn system_policy_update( - rqctx: RequestContext>, + rqctx: RequestContext, new_policy: TypedBody>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let new_policy = new_policy.into_inner(); let nasgns = new_policy.role_assignments.len(); // This should have been validated during parsing. @@ -487,7 +497,11 @@ async fn system_policy_update( let policy = nexus.fleet_update_policy(&opctx, &new_policy).await?; Ok(HttpResponseOk(policy)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch current silo's IAM policy @@ -497,11 +511,11 @@ async fn system_policy_update( tags = ["silos"], }] pub(crate) async fn policy_view( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let silo: NameOrId = opctx .authn @@ -514,7 +528,11 @@ pub(crate) async fn policy_view( let policy = nexus.silo_fetch_policy(&opctx, &silo_lookup).await?; Ok(HttpResponseOk(policy)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update current silo's IAM policy @@ -524,12 +542,12 @@ pub(crate) async fn policy_view( tags = ["silos"], }] async fn policy_update( - rqctx: RequestContext>, + rqctx: RequestContext, new_policy: TypedBody>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let new_policy = new_policy.into_inner(); let nasgns = new_policy.role_assignments.len(); // This should have been validated during parsing. @@ -546,7 +564,11 @@ async fn policy_update( nexus.silo_update_policy(&opctx, &silo_lookup, &new_policy).await?; Ok(HttpResponseOk(policy)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch resource utilization for user's current silo @@ -556,11 +578,11 @@ async fn policy_update( tags = ["silos"], }] async fn utilization_view( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let silo_lookup = nexus.current_silo_lookup(&opctx)?; let utilization = @@ -568,7 +590,11 @@ async fn utilization_view( Ok(HttpResponseOk(utilization.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch current utilization for given silo @@ -578,12 +604,12 @@ async fn utilization_view( tags = ["system/silos"], }] async fn silo_utilization_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let silo_lookup = @@ -592,7 +618,11 @@ async fn silo_utilization_view( Ok(HttpResponseOk(quotas.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List current utilization state for all silos #[endpoint { @@ -601,12 +631,12 @@ async fn silo_utilization_view( tags = ["system/silos"], }] async fn silo_utilization_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pagparams = data_page_params_for(&rqctx, &query)?; @@ -627,7 +657,11 @@ async fn silo_utilization_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Lists resource quotas for all silos @@ -637,12 +671,12 @@ async fn silo_utilization_list( tags = ["system/silos"], }] async fn system_quotas_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pagparams = data_page_params_for(&rqctx, &query)?; @@ -661,7 +695,11 @@ async fn system_quotas_list( &|_, quota: &SiloQuotas| quota.silo_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch resource quotas for silo @@ -671,12 +709,12 @@ async fn system_quotas_list( tags = ["system/silos"], }] async fn silo_quotas_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let silo_lookup = @@ -684,7 +722,11 @@ async fn silo_quotas_view( let quota = nexus.silo_quotas_view(&opctx, &silo_lookup).await?; Ok(HttpResponseOk(quota.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update resource quotas for silo @@ -696,13 +738,13 @@ async fn silo_quotas_view( tags = ["system/silos"], }] async fn silo_quotas_update( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, new_quota: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let silo_lookup = @@ -712,7 +754,11 @@ async fn silo_quotas_update( .await?; Ok(HttpResponseOk(quota.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List silos @@ -724,12 +770,12 @@ async fn silo_quotas_update( tags = ["system/silos"], }] async fn silo_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -747,7 +793,11 @@ async fn silo_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create a silo @@ -757,18 +807,22 @@ async fn silo_list( tags = ["system/silos"], }] async fn silo_create( - rqctx: RequestContext>, + rqctx: RequestContext, new_silo_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let silo = nexus.silo_create(&opctx, new_silo_params.into_inner()).await?; Ok(HttpResponseCreated(silo.try_into()?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch silo @@ -780,19 +834,23 @@ async fn silo_create( tags = ["system/silos"], }] async fn silo_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let silo_lookup = nexus.silo_lookup(&opctx, path.silo)?; let (.., silo) = silo_lookup.fetch().await?; Ok(HttpResponseOk(silo.try_into()?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List IP pools linked to silo @@ -806,14 +864,14 @@ async fn silo_view( tags = ["system/silos"], }] async fn silo_ip_pool_list( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); @@ -838,7 +896,11 @@ async fn silo_ip_pool_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete a silo @@ -850,19 +912,23 @@ async fn silo_ip_pool_list( tags = ["system/silos"], }] async fn silo_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let params = path_params.into_inner(); let silo_lookup = nexus.silo_lookup(&opctx, params.silo)?; nexus.silo_delete(&opctx, &silo_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch silo IAM policy @@ -872,19 +938,23 @@ async fn silo_delete( tags = ["system/silos"], }] async fn silo_policy_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let silo_lookup = nexus.silo_lookup(&opctx, path.silo)?; let policy = nexus.silo_fetch_policy(&opctx, &silo_lookup).await?; Ok(HttpResponseOk(policy)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update silo IAM policy @@ -894,7 +964,7 @@ async fn silo_policy_view( tags = ["system/silos"], }] async fn silo_policy_update( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, new_policy: TypedBody>, ) -> Result>, HttpError> { @@ -905,14 +975,18 @@ async fn silo_policy_update( // This should have been validated during parsing. bail_unless!(nasgns <= shared::MAX_ROLE_ASSIGNMENTS_PER_RESOURCE); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let silo_lookup = nexus.silo_lookup(&opctx, path.silo)?; let policy = nexus.silo_update_policy(&opctx, &silo_lookup, &new_policy).await?; Ok(HttpResponseOk(policy)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Silo-specific user endpoints @@ -924,13 +998,13 @@ async fn silo_policy_update( tags = ["system/silos"], }] async fn silo_user_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanById::from_query(&query)?; @@ -948,7 +1022,11 @@ async fn silo_user_list( &|_, user: &User| user.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Path parameters for Silo User requests @@ -965,14 +1043,14 @@ struct UserParam { tags = ["system/silos"], }] async fn silo_user_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let silo_lookup = nexus.silo_lookup(&opctx, query.silo)?; @@ -980,7 +1058,11 @@ async fn silo_user_view( nexus.silo_user_fetch(&opctx, &silo_lookup, path.user_id).await?; Ok(HttpResponseOk(user.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Silo identity providers @@ -992,13 +1074,13 @@ async fn silo_user_view( tags = ["system/silos"], }] async fn silo_identity_provider_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -1017,7 +1099,11 @@ async fn silo_identity_provider_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Silo SAML identity providers @@ -1029,14 +1115,14 @@ async fn silo_identity_provider_list( tags = ["system/silos"], }] async fn saml_identity_provider_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, new_provider: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let silo_lookup = nexus.silo_lookup(&opctx, query.silo)?; let provider = nexus @@ -1048,7 +1134,11 @@ async fn saml_identity_provider_create( .await?; Ok(HttpResponseCreated(provider.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch SAML IdP @@ -1058,14 +1148,14 @@ async fn saml_identity_provider_create( tags = ["system/silos"], }] async fn saml_identity_provider_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let saml_identity_provider_selector = @@ -1082,7 +1172,11 @@ async fn saml_identity_provider_view( .await?; Ok(HttpResponseOk(provider.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // TODO: no DELETE for identity providers? @@ -1100,14 +1194,14 @@ async fn saml_identity_provider_view( tags = ["system/silos"], }] async fn local_idp_user_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, new_user_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let silo_lookup = nexus.silo_lookup(&opctx, query.silo)?; let user = nexus @@ -1119,7 +1213,11 @@ async fn local_idp_user_create( .await?; Ok(HttpResponseCreated(user.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete user @@ -1129,21 +1227,25 @@ async fn local_idp_user_create( tags = ["system/silos"], }] async fn local_idp_user_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let silo_lookup = nexus.silo_lookup(&opctx, query.silo)?; nexus.local_idp_delete_user(&opctx, &silo_lookup, path.user_id).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Set or invalidate user's password @@ -1156,7 +1258,7 @@ async fn local_idp_user_delete( tags = ["system/silos"], }] async fn local_idp_user_set_password( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, update: TypedBody, @@ -1164,7 +1266,7 @@ async fn local_idp_user_set_password( let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let silo_lookup = nexus.silo_lookup(&opctx, query.silo)?; @@ -1178,7 +1280,11 @@ async fn local_idp_user_set_password( .await?; Ok(HttpResponseUpdatedNoContent()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List projects @@ -1188,12 +1294,12 @@ async fn local_idp_user_set_password( tags = ["projects"], }] async fn project_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -1211,7 +1317,11 @@ async fn project_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create project @@ -1221,18 +1331,22 @@ async fn project_list( tags = ["projects"], }] async fn project_create( - rqctx: RequestContext>, + rqctx: RequestContext, new_project: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let project = nexus.project_create(&opctx, &new_project.into_inner()).await?; Ok(HttpResponseCreated(project.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch project @@ -1242,11 +1356,11 @@ async fn project_create( tags = ["projects"], }] async fn project_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -1256,7 +1370,11 @@ async fn project_view( nexus.project_lookup(&opctx, project_selector)?.fetch().await?; Ok(HttpResponseOk(project.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete project @@ -1266,11 +1384,11 @@ async fn project_view( tags = ["projects"], }] async fn project_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -1280,7 +1398,11 @@ async fn project_delete( nexus.project_delete(&opctx, &project_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // TODO-correctness: Is it valid for PUT to accept application/json that's a @@ -1295,12 +1417,12 @@ async fn project_delete( tags = ["projects"], }] async fn project_update( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, updated_project: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let updated_project = updated_project.into_inner(); let handler = async { @@ -1313,7 +1435,11 @@ async fn project_update( .await?; Ok(HttpResponseOk(project.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch project's IAM policy @@ -1323,11 +1449,11 @@ async fn project_update( tags = ["projects"], }] async fn project_policy_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result>, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -1338,7 +1464,11 @@ async fn project_policy_view( nexus.project_fetch_policy(&opctx, &project_lookup).await?; Ok(HttpResponseOk(policy)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update project's IAM policy @@ -1348,12 +1478,12 @@ async fn project_policy_view( tags = ["projects"], }] async fn project_policy_update( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, new_policy: TypedBody>, ) -> Result>, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let new_policy = new_policy.into_inner(); let handler = async { @@ -1366,7 +1496,11 @@ async fn project_policy_update( .await?; Ok(HttpResponseOk(new_policy)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // IP Pools @@ -1378,12 +1512,12 @@ async fn project_policy_update( tags = ["projects"], }] async fn project_ip_pool_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -1404,7 +1538,11 @@ async fn project_ip_pool_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch IP pool @@ -1414,13 +1552,13 @@ async fn project_ip_pool_list( tags = ["projects"], }] async fn project_ip_pool_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let pool_selector = path_params.into_inner().pool; let (pool, silo_link) = nexus.silo_ip_pool_fetch(&opctx, &pool_selector).await?; @@ -1429,7 +1567,11 @@ async fn project_ip_pool_view( is_default: silo_link.is_default, })) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List IP pools @@ -1439,12 +1581,12 @@ async fn project_ip_pool_view( tags = ["system/networking"], }] async fn ip_pool_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -1462,7 +1604,11 @@ async fn ip_pool_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } #[derive(Deserialize, JsonSchema)] @@ -1477,18 +1623,22 @@ pub struct IpPoolPathParam { tags = ["system/networking"], }] async fn ip_pool_create( - rqctx: RequestContext>, + rqctx: RequestContext, pool_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let pool_params = pool_params.into_inner(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let pool = nexus.ip_pool_create(&opctx, &pool_params).await?; Ok(HttpResponseCreated(IpPool::from(pool))) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch IP pool @@ -1498,13 +1648,13 @@ async fn ip_pool_create( tags = ["system/networking"], }] async fn ip_pool_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let pool_selector = path_params.into_inner().pool; // We do not prevent the service pool from being fetched by name or ID // like we do for update, delete, associate. @@ -1512,7 +1662,11 @@ async fn ip_pool_view( nexus.ip_pool_lookup(&opctx, &pool_selector)?.fetch().await?; Ok(HttpResponseOk(IpPool::from(pool))) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete IP pool @@ -1522,19 +1676,23 @@ async fn ip_pool_view( tags = ["system/networking"], }] async fn ip_pool_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; nexus.ip_pool_delete(&opctx, &pool_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update IP pool @@ -1544,21 +1702,25 @@ async fn ip_pool_delete( tags = ["system/networking"], }] async fn ip_pool_update( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, updates: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let updates = updates.into_inner(); let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; let pool = nexus.ip_pool_update(&opctx, &pool_lookup, &updates).await?; Ok(HttpResponseOk(pool.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch IP pool utilization @@ -1568,13 +1730,13 @@ async fn ip_pool_update( tags = ["system/networking"], }] async fn ip_pool_utilization_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let pool_selector = path_params.into_inner().pool; // We do not prevent the service pool from being fetched by name or ID // like we do for update, delete, associate. @@ -1583,7 +1745,11 @@ async fn ip_pool_utilization_view( nexus.ip_pool_utilization_view(&opctx, &pool_lookup).await?; Ok(HttpResponseOk(utilization.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List IP pool's linked silos @@ -1593,7 +1759,7 @@ async fn ip_pool_utilization_view( tags = ["system/networking"], }] async fn ip_pool_silo_list( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, // paginating by resource_id because they're unique per pool. most robust // option would be to paginate by a composite key representing the (pool, @@ -1610,7 +1776,7 @@ async fn ip_pool_silo_list( let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; @@ -1631,7 +1797,11 @@ async fn ip_pool_silo_list( &|_, x: &views::IpPoolSiloLink| x.silo_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Link IP pool to silo @@ -1645,14 +1815,14 @@ async fn ip_pool_silo_list( tags = ["system/networking"], }] async fn ip_pool_silo_link( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, resource_assoc: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let resource_assoc = resource_assoc.into_inner(); let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; @@ -1661,7 +1831,11 @@ async fn ip_pool_silo_link( .await?; Ok(HttpResponseCreated(assoc.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Unlink IP pool from silo @@ -1673,20 +1847,24 @@ async fn ip_pool_silo_link( tags = ["system/networking"], }] async fn ip_pool_silo_unlink( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; let silo_lookup = nexus.silo_lookup(&opctx, path.silo)?; nexus.ip_pool_unlink_silo(&opctx, &pool_lookup, &silo_lookup).await?; Ok(HttpResponseUpdatedNoContent()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Make IP pool default for silo @@ -1701,14 +1879,14 @@ async fn ip_pool_silo_unlink( tags = ["system/networking"], }] async fn ip_pool_silo_update( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, update: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let update = update.into_inner(); let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; @@ -1718,7 +1896,11 @@ async fn ip_pool_silo_update( .await?; Ok(HttpResponseOk(assoc.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch Oxide service IP pool @@ -1728,16 +1910,20 @@ async fn ip_pool_silo_update( tags = ["system/networking"], }] async fn ip_pool_service_view( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let pool = nexus.ip_pool_service_fetch(&opctx).await?; Ok(HttpResponseOk(IpPool::from(pool))) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } type IpPoolRangePaginationParams = PaginationParams; @@ -1751,14 +1937,14 @@ type IpPoolRangePaginationParams = PaginationParams; tags = ["system/networking"], }] async fn ip_pool_range_list( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let path = path_params.into_inner(); let marker = match query.page { @@ -1785,7 +1971,11 @@ async fn ip_pool_range_list( }, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Add range to IP pool @@ -1797,21 +1987,25 @@ async fn ip_pool_range_list( tags = ["system/networking"], }] async fn ip_pool_range_add( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, range_params: TypedBody, ) -> Result, HttpError> { let apictx = &rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let range = range_params.into_inner(); let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; let out = nexus.ip_pool_add_range(&opctx, &pool_lookup, &range).await?; Ok(HttpResponseCreated(out.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Remove range from IP pool @@ -1821,21 +2015,25 @@ async fn ip_pool_range_add( tags = ["system/networking"], }] async fn ip_pool_range_remove( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, range_params: TypedBody, ) -> Result { let apictx = &rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let range = range_params.into_inner(); let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; nexus.ip_pool_delete_range(&opctx, &pool_lookup, &range).await?; Ok(HttpResponseUpdatedNoContent()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List IP ranges for the Oxide service pool @@ -1847,13 +2045,13 @@ async fn ip_pool_range_remove( tags = ["system/networking"], }] async fn ip_pool_service_range_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let marker = match query.page { WhichPage::First(_) => None, @@ -1878,7 +2076,11 @@ async fn ip_pool_service_range_list( }, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Add IP range to Oxide service pool @@ -1890,18 +2092,22 @@ async fn ip_pool_service_range_list( tags = ["system/networking"], }] async fn ip_pool_service_range_add( - rqctx: RequestContext>, + rqctx: RequestContext, range_params: TypedBody, ) -> Result, HttpError> { let apictx = &rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let range = range_params.into_inner(); let out = nexus.ip_pool_service_add_range(&opctx, &range).await?; Ok(HttpResponseCreated(out.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Remove IP range from Oxide service pool @@ -1911,18 +2117,22 @@ async fn ip_pool_service_range_add( tags = ["system/networking"], }] async fn ip_pool_service_range_remove( - rqctx: RequestContext>, + rqctx: RequestContext, range_params: TypedBody, ) -> Result { let apictx = &rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let range = range_params.into_inner(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; nexus.ip_pool_service_delete_range(&opctx, &range).await?; Ok(HttpResponseUpdatedNoContent()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Floating IP Addresses @@ -1934,12 +2144,12 @@ async fn ip_pool_service_range_remove( tags = ["floating-ips"], }] async fn floating_ip_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; @@ -1956,7 +2166,11 @@ async fn floating_ip_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create floating IP @@ -1966,12 +2180,12 @@ async fn floating_ip_list( tags = ["floating-ips"], }] async fn floating_ip_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, floating_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let floating_params = floating_params.into_inner(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -1982,7 +2196,11 @@ async fn floating_ip_create( .await?; Ok(HttpResponseCreated(ip)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update floating IP @@ -1992,14 +2210,14 @@ async fn floating_ip_create( tags = ["floating-ips"], }] async fn floating_ip_update( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, updated_floating_ip: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let updated_floating_ip_params = updated_floating_ip.into_inner(); @@ -2019,7 +2237,11 @@ async fn floating_ip_update( .await?; Ok(HttpResponseOk(floating_ip)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete floating IP @@ -2029,14 +2251,14 @@ async fn floating_ip_update( tags = ["floating-ips"], }] async fn floating_ip_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let floating_ip_selector = params::FloatingIpSelector { @@ -2049,7 +2271,11 @@ async fn floating_ip_delete( nexus.floating_ip_delete(&opctx, fip_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch floating IP @@ -2059,14 +2285,14 @@ async fn floating_ip_delete( tags = ["floating-ips"] }] async fn floating_ip_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let floating_ip_selector = params::FloatingIpSelector { @@ -2079,7 +2305,11 @@ async fn floating_ip_view( .await?; Ok(HttpResponseOk(fip.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Attach floating IP @@ -2091,7 +2321,7 @@ async fn floating_ip_view( tags = ["floating-ips"], }] async fn floating_ip_attach( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, target: TypedBody, @@ -2099,7 +2329,7 @@ async fn floating_ip_attach( let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let floating_ip_selector = params::FloatingIpSelector { @@ -2115,7 +2345,11 @@ async fn floating_ip_attach( .await?; Ok(HttpResponseAccepted(ip)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Detach floating IP @@ -2127,14 +2361,14 @@ async fn floating_ip_attach( tags = ["floating-ips"], }] async fn floating_ip_detach( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let floating_ip_selector = params::FloatingIpSelector { @@ -2146,7 +2380,11 @@ async fn floating_ip_detach( let ip = nexus.floating_ip_detach(&opctx, fip_lookup).await?; Ok(HttpResponseAccepted(ip)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Disks @@ -2158,13 +2396,13 @@ async fn floating_ip_detach( tags = ["disks"], }] async fn disk_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -2183,7 +2421,11 @@ async fn disk_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // TODO-correctness See note about instance create. This should be async. @@ -2194,14 +2436,14 @@ async fn disk_list( tags = ["disks"] }] async fn disk_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, new_disk: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let params = new_disk.into_inner(); let project_lookup = nexus.project_lookup(&opctx, query)?; @@ -2209,7 +2451,11 @@ async fn disk_create( nexus.project_create_disk(&opctx, &project_lookup, ¶ms).await?; Ok(HttpResponseCreated(disk.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch disk @@ -2219,14 +2465,14 @@ async fn disk_create( tags = ["disks"] }] async fn disk_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let disk_selector = @@ -2235,7 +2481,11 @@ async fn disk_view( nexus.disk_lookup(&opctx, disk_selector)?.fetch().await?; Ok(HttpResponseOk(disk.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete disk @@ -2245,14 +2495,14 @@ async fn disk_view( tags = ["disks"], }] async fn disk_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let disk_selector = @@ -2261,7 +2511,11 @@ async fn disk_delete( nexus.project_delete_disk(&opctx, &disk_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } #[derive(Display, Serialize, Deserialize, JsonSchema)] @@ -2289,7 +2543,7 @@ struct DiskMetricsPath { tags = ["disks"], }] async fn disk_metrics_list( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query< PaginationParams, @@ -2298,7 +2552,7 @@ async fn disk_metrics_list( ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); @@ -2323,7 +2577,11 @@ async fn disk_metrics_list( Ok(HttpResponseOk(result)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Start importing blocks into disk @@ -2335,14 +2593,14 @@ async fn disk_metrics_list( tags = ["disks"], }] async fn disk_bulk_write_import_start( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); @@ -2354,7 +2612,11 @@ async fn disk_bulk_write_import_start( Ok(HttpResponseUpdatedNoContent()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Import blocks into disk @@ -2364,7 +2626,7 @@ async fn disk_bulk_write_import_start( tags = ["disks"], }] async fn disk_bulk_write_import( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, import_params: TypedBody, @@ -2372,7 +2634,7 @@ async fn disk_bulk_write_import( let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let params = import_params.into_inner(); @@ -2385,7 +2647,11 @@ async fn disk_bulk_write_import( Ok(HttpResponseUpdatedNoContent()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Stop importing blocks into disk @@ -2397,14 +2663,14 @@ async fn disk_bulk_write_import( tags = ["disks"], }] async fn disk_bulk_write_import_stop( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); @@ -2416,7 +2682,11 @@ async fn disk_bulk_write_import_stop( Ok(HttpResponseUpdatedNoContent()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Confirm disk block import completion @@ -2426,7 +2696,7 @@ async fn disk_bulk_write_import_stop( tags = ["disks"], }] async fn disk_finalize_import( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, finalize_params: TypedBody, @@ -2434,7 +2704,7 @@ async fn disk_finalize_import( let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let params = finalize_params.into_inner(); @@ -2446,7 +2716,11 @@ async fn disk_finalize_import( Ok(HttpResponseUpdatedNoContent()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Instances @@ -2458,12 +2732,12 @@ async fn disk_finalize_import( tags = ["instances"], }] async fn instance_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -2483,7 +2757,11 @@ async fn instance_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create instance @@ -2493,12 +2771,12 @@ async fn instance_list( tags = ["instances"], }] async fn instance_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, new_instance: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let project_selector = query_params.into_inner(); let new_instance_params = &new_instance.into_inner(); let handler = async { @@ -2513,7 +2791,11 @@ async fn instance_create( .await?; Ok(HttpResponseCreated(instance.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch instance @@ -2523,12 +2805,12 @@ async fn instance_create( tags = ["instances"], }] async fn instance_view( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let handler = async { @@ -2547,7 +2829,11 @@ async fn instance_view( .await?; Ok(HttpResponseOk(instance_and_vmm.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete instance @@ -2557,12 +2843,12 @@ async fn instance_view( tags = ["instances"], }] async fn instance_delete( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, path_params: Path, ) -> Result { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let instance_selector = params::InstanceSelector { @@ -2576,7 +2862,11 @@ async fn instance_delete( nexus.project_destroy_instance(&opctx, &instance_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // TODO should this be in the public API? @@ -2587,13 +2877,13 @@ async fn instance_delete( tags = ["instances"], }] async fn instance_migrate( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, path_params: Path, migrate_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let migrate_instance_params = migrate_params.into_inner(); @@ -2614,7 +2904,11 @@ async fn instance_migrate( .await?; Ok(HttpResponseOk(instance.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Reboot an instance @@ -2624,12 +2918,12 @@ async fn instance_migrate( tags = ["instances"], }] async fn instance_reboot( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let instance_selector = params::InstanceSelector { @@ -2643,7 +2937,11 @@ async fn instance_reboot( let instance = nexus.instance_reboot(&opctx, &instance_lookup).await?; Ok(HttpResponseAccepted(instance.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Boot instance @@ -2653,12 +2951,12 @@ async fn instance_reboot( tags = ["instances"], }] async fn instance_start( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let instance_selector = params::InstanceSelector { @@ -2672,7 +2970,11 @@ async fn instance_start( let instance = nexus.instance_start(&opctx, &instance_lookup).await?; Ok(HttpResponseAccepted(instance.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Stop instance @@ -2682,12 +2984,12 @@ async fn instance_start( tags = ["instances"], }] async fn instance_stop( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let instance_selector = params::InstanceSelector { @@ -2701,7 +3003,11 @@ async fn instance_stop( let instance = nexus.instance_stop(&opctx, &instance_lookup).await?; Ok(HttpResponseAccepted(instance.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch instance serial console @@ -2711,14 +3017,14 @@ async fn instance_stop( tags = ["instances"], }] async fn instance_serial_console( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let instance_selector = params::InstanceSelector { @@ -2732,7 +3038,11 @@ async fn instance_serial_console( .await?; Ok(HttpResponseOk(data)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Stream instance serial console @@ -2742,13 +3052,13 @@ async fn instance_serial_console( tags = ["instances"], }] async fn instance_serial_console_stream( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, conn: WebsocketConnection, ) -> WebsocketChannelResult { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -2798,13 +3108,13 @@ async fn instance_serial_console_stream( tags = ["instances"], }] async fn instance_ssh_public_key_list( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; @@ -2829,7 +3139,11 @@ async fn instance_ssh_public_key_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List disks for instance @@ -2839,13 +3153,13 @@ async fn instance_ssh_public_key_list( tags = ["instances"], }] async fn instance_disk_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, path_params: Path, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; @@ -2870,7 +3184,11 @@ async fn instance_disk_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Attach disk to instance @@ -2880,13 +3198,13 @@ async fn instance_disk_list( tags = ["instances"], }] async fn instance_disk_attach( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, disk_to_attach: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let disk = disk_to_attach.into_inner().disk; @@ -2902,7 +3220,11 @@ async fn instance_disk_attach( nexus.instance_attach_disk(&opctx, &instance_lookup, disk).await?; Ok(HttpResponseAccepted(disk.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Detach disk from instance @@ -2912,7 +3234,7 @@ async fn instance_disk_attach( tags = ["instances"], }] async fn instance_disk_detach( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, disk_to_detach: TypedBody, @@ -2920,7 +3242,7 @@ async fn instance_disk_detach( let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let disk = disk_to_detach.into_inner().disk; @@ -2934,7 +3256,11 @@ async fn instance_disk_detach( nexus.instance_detach_disk(&opctx, &instance_lookup, disk).await?; Ok(HttpResponseAccepted(disk.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Certificates @@ -2950,12 +3276,12 @@ async fn instance_disk_detach( tags = ["silos"], }] async fn certificate_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -2973,7 +3299,11 @@ async fn certificate_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create new system-wide x.509 certificate @@ -2986,18 +3316,22 @@ async fn certificate_list( tags = ["silos"] }] async fn certificate_create( - rqctx: RequestContext>, + rqctx: RequestContext, new_cert: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let new_cert_params = new_cert.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let cert = nexus.certificate_create(&opctx, new_cert_params).await?; Ok(HttpResponseCreated(cert.try_into()?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Path parameters for Certificate requests @@ -3015,19 +3349,23 @@ struct CertificatePathParam { tags = ["silos"], }] async fn certificate_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let (.., cert) = nexus.certificate_lookup(&opctx, &path.certificate).fetch().await?; Ok(HttpResponseOk(cert.try_into()?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete certificate @@ -3039,12 +3377,12 @@ async fn certificate_view( tags = ["silos"], }] async fn certificate_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; nexus @@ -3055,7 +3393,11 @@ async fn certificate_delete( .await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create address lot @@ -3065,12 +3407,12 @@ async fn certificate_delete( tags = ["system/networking"], }] async fn networking_address_lot_create( - rqctx: RequestContext>, + rqctx: RequestContext, new_address_lot: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let params = new_address_lot.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let result = nexus.address_lot_create(&opctx, params).await?; @@ -3081,7 +3423,11 @@ async fn networking_address_lot_create( Ok(HttpResponseCreated(AddressLotCreateResponse { lot, blocks })) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete address lot @@ -3091,20 +3437,24 @@ async fn networking_address_lot_create( tags = ["system/networking"], }] async fn networking_address_lot_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let address_lot_lookup = nexus.address_lot_lookup(&opctx, path.address_lot)?; nexus.address_lot_delete(&opctx, &address_lot_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List address lots @@ -3114,12 +3464,12 @@ async fn networking_address_lot_delete( tags = ["system/networking"], }] async fn networking_address_lot_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -3138,7 +3488,11 @@ async fn networking_address_lot_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List blocks in address lot @@ -3148,13 +3502,13 @@ async fn networking_address_lot_list( tags = ["system/networking"], }] async fn networking_address_lot_block_list( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let path = path_params.into_inner(); let pagparams = data_page_params_for(&rqctx, &query)?; @@ -3174,7 +3528,11 @@ async fn networking_address_lot_block_list( &|_, x: &AddressLotBlock| x.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create loopback address @@ -3184,12 +3542,12 @@ async fn networking_address_lot_block_list( tags = ["system/networking"], }] async fn networking_loopback_address_create( - rqctx: RequestContext>, + rqctx: RequestContext, new_loopback_address: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let params = new_loopback_address.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let result = nexus.loopback_address_create(&opctx, params).await?; @@ -3198,7 +3556,11 @@ async fn networking_loopback_address_create( Ok(HttpResponseCreated(addr)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } #[derive(Serialize, Deserialize, JsonSchema)] @@ -3225,12 +3587,12 @@ pub struct LoopbackAddressPath { tags = ["system/networking"], }] async fn networking_loopback_address_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path: Path, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let addr = match IpNetwork::new(path.address, path.subnet_mask) { @@ -3250,7 +3612,11 @@ async fn networking_loopback_address_delete( .await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List loopback addresses @@ -3260,12 +3626,12 @@ async fn networking_loopback_address_delete( tags = ["system/networking"], }] async fn networking_loopback_address_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pagparams = data_page_params_for(&rqctx, &query)?; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -3282,7 +3648,11 @@ async fn networking_loopback_address_list( &|_, x: &LoopbackAddress| x.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create switch port settings @@ -3292,12 +3662,12 @@ async fn networking_loopback_address_list( tags = ["system/networking"], }] async fn networking_switch_port_settings_create( - rqctx: RequestContext>, + rqctx: RequestContext, new_settings: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let params = new_settings.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let result = nexus.switch_port_settings_post(&opctx, params).await?; @@ -3305,7 +3675,11 @@ async fn networking_switch_port_settings_create( let settings: SwitchPortSettingsView = result.into(); Ok(HttpResponseCreated(settings)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete switch port settings @@ -3315,18 +3689,22 @@ async fn networking_switch_port_settings_create( tags = ["system/networking"], }] async fn networking_switch_port_settings_delete( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let selector = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; nexus.switch_port_settings_delete(&opctx, &selector).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List switch port settings @@ -3336,14 +3714,14 @@ async fn networking_switch_port_settings_delete( tags = ["system/networking"], }] async fn networking_switch_port_settings_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query< PaginatedByNameOrId, >, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -3362,7 +3740,11 @@ async fn networking_switch_port_settings_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Get information about switch port @@ -3372,18 +3754,22 @@ async fn networking_switch_port_settings_list( tags = ["system/networking"], }] async fn networking_switch_port_settings_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = path_params.into_inner().port; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let settings = nexus.switch_port_settings_get(&opctx, &query).await?; Ok(HttpResponseOk(settings.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List switch ports @@ -3393,12 +3779,12 @@ async fn networking_switch_port_settings_view( tags = ["system/hardware"], }] async fn networking_switch_port_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pagparams = data_page_params_for(&rqctx, &query)?; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -3415,7 +3801,41 @@ async fn networking_switch_port_list( &|_, x: &SwitchPort| x.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await +} + +/// Get switch port status +#[endpoint { + method = GET, + path = "/v1/system/hardware/switch-port/{port}/status", + tags = ["system/hardware"], +}] +async fn networking_switch_port_status( + rqctx: RequestContext, + path_params: Path, + query_params: Query, +) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let path = path_params.into_inner(); + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + Ok(HttpResponseOk( + nexus + .switch_port_status(&opctx, query.switch_location, path.port) + .await?, + )) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Apply switch port settings @@ -3425,14 +3845,14 @@ async fn networking_switch_port_list( tags = ["system/hardware"], }] async fn networking_switch_port_apply_settings( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, settings_body: TypedBody, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let port = path_params.into_inner().port; let query = query_params.into_inner(); let settings = settings_body.into_inner(); @@ -3442,7 +3862,11 @@ async fn networking_switch_port_apply_settings( .await?; Ok(HttpResponseUpdatedNoContent {}) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Clear switch port settings @@ -3452,20 +3876,24 @@ async fn networking_switch_port_apply_settings( tags = ["system/hardware"], }] async fn networking_switch_port_clear_settings( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let port = path_params.into_inner().port; let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; nexus.switch_port_clear_settings(&opctx, &port, &query).await?; Ok(HttpResponseUpdatedNoContent {}) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create new BGP configuration @@ -3475,18 +3903,22 @@ async fn networking_switch_port_clear_settings( tags = ["system/networking"], }] async fn networking_bgp_config_create( - rqctx: RequestContext>, + rqctx: RequestContext, config: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let config = config.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let result = nexus.bgp_config_set(&opctx, &config).await?; Ok(HttpResponseCreated::(result.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List BGP configurations @@ -3496,12 +3928,12 @@ async fn networking_bgp_config_create( tags = ["system/networking"], }] async fn networking_bgp_config_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -3520,7 +3952,11 @@ async fn networking_bgp_config_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } //TODO pagination? the normal by-name/by-id stuff does not work here @@ -3531,16 +3967,20 @@ async fn networking_bgp_config_list( tags = ["system/networking"], }] async fn networking_bgp_status( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result>, HttpError> { let apictx = rqctx.context(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let result = nexus.bgp_peer_status(&opctx).await?; Ok(HttpResponseOk(result)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Get BGP router message history @@ -3550,18 +3990,22 @@ async fn networking_bgp_status( tags = ["system/networking"], }] async fn networking_bgp_message_history( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let sel = query_params.into_inner(); let result = nexus.bgp_message_history(&opctx, &sel).await?; Ok(HttpResponseOk(AggregateBgpMessageHistory::new(result))) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } //TODO pagination? the normal by-name/by-id stuff does not work here @@ -3572,18 +4016,22 @@ async fn networking_bgp_message_history( tags = ["system/networking"], }] async fn networking_bgp_imported_routes_ipv4( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let sel = query_params.into_inner(); let result = nexus.bgp_imported_routes_ipv4(&opctx, &sel).await?; Ok(HttpResponseOk(result)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete BGP configuration @@ -3593,18 +4041,22 @@ async fn networking_bgp_imported_routes_ipv4( tags = ["system/networking"], }] async fn networking_bgp_config_delete( - rqctx: RequestContext>, + rqctx: RequestContext, sel: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let sel = sel.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; nexus.bgp_config_delete(&opctx, &sel).await?; Ok(HttpResponseUpdatedNoContent {}) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create new BGP announce set @@ -3614,18 +4066,22 @@ async fn networking_bgp_config_delete( tags = ["system/networking"], }] async fn networking_bgp_announce_set_create( - rqctx: RequestContext>, + rqctx: RequestContext, config: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let config = config.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let result = nexus.bgp_create_announce_set(&opctx, &config).await?; Ok(HttpResponseCreated::(result.0.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } //TODO pagination? the normal by-name/by-id stuff does not work here @@ -3636,12 +4092,12 @@ async fn networking_bgp_announce_set_create( tags = ["system/networking"], }] async fn networking_bgp_announce_set_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let sel = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let result = nexus @@ -3652,7 +4108,11 @@ async fn networking_bgp_announce_set_list( .collect(); Ok(HttpResponseOk(result)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete BGP announce set @@ -3662,18 +4122,22 @@ async fn networking_bgp_announce_set_list( tags = ["system/networking"], }] async fn networking_bgp_announce_set_delete( - rqctx: RequestContext>, + rqctx: RequestContext, selector: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let sel = selector.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; nexus.bgp_delete_announce_set(&opctx, &sel).await?; Ok(HttpResponseUpdatedNoContent {}) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Enable a BFD session @@ -3683,18 +4147,22 @@ async fn networking_bgp_announce_set_delete( tags = ["system/networking"], }] async fn networking_bfd_enable( - rqctx: RequestContext>, + rqctx: RequestContext, session: TypedBody, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; nexus.bfd_enable(&opctx, session.into_inner()).await?; Ok(HttpResponseUpdatedNoContent {}) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Disable a BFD session @@ -3704,18 +4172,22 @@ async fn networking_bfd_enable( tags = ["system/networking"], }] async fn networking_bfd_disable( - rqctx: RequestContext>, + rqctx: RequestContext, session: TypedBody, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; nexus.bfd_disable(&opctx, session.into_inner()).await?; Ok(HttpResponseUpdatedNoContent {}) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Get BFD status @@ -3725,17 +4197,77 @@ async fn networking_bfd_disable( tags = ["system/networking"], }] async fn networking_bfd_status( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; let status = nexus.bfd_status(&opctx).await?; Ok(HttpResponseOk(status)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await +} + +/// Get user-facing services IP allowlist +#[endpoint { + method = GET, + path = "/v1/system/networking/allow-list", + tags = ["system/networking"], +}] +async fn networking_allow_list_view( + rqctx: RequestContext, +) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + nexus + .allow_list_view(&opctx) + .await + .map(HttpResponseOk) + .map_err(HttpError::from) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await +} + +/// Update user-facing services IP allowlist +#[endpoint { + method = PUT, + path = "/v1/system/networking/allow-list", + tags = ["system/networking"], +}] +async fn networking_allow_list_update( + rqctx: RequestContext, + params: TypedBody, +) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let server_kind = apictx.kind; + let params = params.into_inner(); + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + let remote_addr = rqctx.request.remote_addr().ip(); + nexus + .allow_list_upsert(&opctx, remote_addr, server_kind, params) + .await + .map(HttpResponseOk) + .map_err(HttpError::from) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Images @@ -3750,13 +4282,13 @@ async fn networking_bfd_status( tags = ["images"], }] async fn image_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -3786,7 +4318,11 @@ async fn image_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create image @@ -3798,14 +4334,14 @@ async fn image_list( tags = ["images"] }] async fn image_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, new_image: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let params = &new_image.into_inner(); let parent_lookup = match query.project.clone() { @@ -3824,7 +4360,11 @@ async fn image_create( let image = nexus.image_create(&opctx, &parent_lookup, ¶ms).await?; Ok(HttpResponseCreated(image.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch image @@ -3836,14 +4376,14 @@ async fn image_create( tags = ["images"], }] async fn image_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let image: nexus_db_model::Image = match nexus @@ -3867,7 +4407,11 @@ async fn image_view( }; Ok(HttpResponseOk(image.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete image @@ -3881,14 +4425,14 @@ async fn image_view( tags = ["images"], }] async fn image_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let image_lookup = nexus @@ -3903,7 +4447,11 @@ async fn image_delete( nexus.image_delete(&opctx, &image_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Promote project image @@ -3915,14 +4463,14 @@ async fn image_delete( tags = ["images"] }] async fn image_promote( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let image_lookup = nexus @@ -3937,7 +4485,11 @@ async fn image_promote( let image = nexus.image_promote(&opctx, &image_lookup).await?; Ok(HttpResponseAccepted(image.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Demote silo image @@ -3949,14 +4501,14 @@ async fn image_promote( tags = ["images"] }] async fn image_demote( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let image_lookup = nexus @@ -3972,7 +4524,11 @@ async fn image_demote( nexus.image_demote(&opctx, &image_lookup, &project_lookup).await?; Ok(HttpResponseAccepted(image.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List network interfaces @@ -3982,13 +4538,13 @@ async fn image_demote( tags = ["instances"], }] async fn instance_network_interface_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -4011,7 +4567,11 @@ async fn instance_network_interface_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create network interface @@ -4021,14 +4581,14 @@ async fn instance_network_interface_list( tags = ["instances"], }] async fn instance_network_interface_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, interface_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let instance_lookup = nexus.instance_lookup(&opctx, query)?; let iface = nexus @@ -4040,7 +4600,11 @@ async fn instance_network_interface_create( .await?; Ok(HttpResponseCreated(iface.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete network interface @@ -4055,14 +4619,14 @@ async fn instance_network_interface_create( tags = ["instances"], }] async fn instance_network_interface_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let interface_selector = params::InstanceNetworkInterfaceSelector { @@ -4077,7 +4641,11 @@ async fn instance_network_interface_delete( .await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch network interface @@ -4087,14 +4655,14 @@ async fn instance_network_interface_delete( tags = ["instances"], }] async fn instance_network_interface_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let interface_selector = params::InstanceNetworkInterfaceSelector { @@ -4108,7 +4676,11 @@ async fn instance_network_interface_view( .await?; Ok(HttpResponseOk(interface.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update network interface @@ -4118,7 +4690,7 @@ async fn instance_network_interface_view( tags = ["instances"], }] async fn instance_network_interface_update( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, updated_iface: TypedBody, @@ -4126,7 +4698,7 @@ async fn instance_network_interface_update( let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let updated_iface = updated_iface.into_inner(); @@ -4150,7 +4722,11 @@ async fn instance_network_interface_update( .await?; Ok(HttpResponseOk(InstanceNetworkInterface::from(interface))) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // External IP addresses for instances @@ -4162,13 +4738,13 @@ async fn instance_network_interface_update( tags = ["instances"], }] async fn instance_external_ip_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, path_params: Path, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -4182,7 +4758,11 @@ async fn instance_external_ip_list( nexus.instance_list_external_ips(&opctx, &instance_lookup).await?; Ok(HttpResponseOk(ResultsPage { items: ips, next_page: None })) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Allocate and attach ephemeral IP to instance @@ -4192,7 +4772,7 @@ async fn instance_external_ip_list( tags = ["instances"], }] async fn instance_ephemeral_ip_attach( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ip_to_create: TypedBody, @@ -4200,7 +4780,7 @@ async fn instance_ephemeral_ip_attach( let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let instance_selector = params::InstanceSelector { @@ -4218,7 +4798,11 @@ async fn instance_ephemeral_ip_attach( .await?; Ok(HttpResponseAccepted(ip)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Detach and deallocate ephemeral IP from instance @@ -4228,14 +4812,14 @@ async fn instance_ephemeral_ip_attach( tags = ["instances"], }] async fn instance_ephemeral_ip_detach( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let instance_selector = params::InstanceSelector { @@ -4253,7 +4837,11 @@ async fn instance_ephemeral_ip_detach( .await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Snapshots @@ -4265,13 +4853,13 @@ async fn instance_ephemeral_ip_detach( tags = ["snapshots"], }] async fn snapshot_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -4290,7 +4878,11 @@ async fn snapshot_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create snapshot @@ -4302,14 +4894,14 @@ async fn snapshot_list( tags = ["snapshots"], }] async fn snapshot_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, new_snapshot: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let new_snapshot_params = &new_snapshot.into_inner(); let project_lookup = nexus.project_lookup(&opctx, query)?; @@ -4318,7 +4910,11 @@ async fn snapshot_create( .await?; Ok(HttpResponseCreated(snapshot.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch snapshot @@ -4328,14 +4924,14 @@ async fn snapshot_create( tags = ["snapshots"], }] async fn snapshot_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let snapshot_selector = params::SnapshotSelector { @@ -4346,7 +4942,11 @@ async fn snapshot_view( nexus.snapshot_lookup(&opctx, snapshot_selector)?.fetch().await?; Ok(HttpResponseOk(snapshot.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete snapshot @@ -4356,14 +4956,14 @@ async fn snapshot_view( tags = ["snapshots"], }] async fn snapshot_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let snapshot_selector = params::SnapshotSelector { @@ -4375,7 +4975,11 @@ async fn snapshot_delete( nexus.snapshot_delete(&opctx, &snapshot_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // VPCs @@ -4387,12 +4991,12 @@ async fn snapshot_delete( tags = ["vpcs"], }] async fn vpc_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -4413,7 +5017,11 @@ async fn vpc_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create VPC @@ -4423,12 +5031,12 @@ async fn vpc_list( tags = ["vpcs"], }] async fn vpc_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, body: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let new_vpc_params = body.into_inner(); let handler = async { @@ -4439,7 +5047,11 @@ async fn vpc_create( .await?; Ok(HttpResponseCreated(vpc.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch VPC @@ -4449,13 +5061,13 @@ async fn vpc_create( tags = ["vpcs"], }] async fn vpc_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -4464,7 +5076,11 @@ async fn vpc_view( let (.., vpc) = nexus.vpc_lookup(&opctx, vpc_selector)?.fetch().await?; Ok(HttpResponseOk(vpc.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update a VPC @@ -4474,14 +5090,14 @@ async fn vpc_view( tags = ["vpcs"], }] async fn vpc_update( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, updated_vpc: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let updated_vpc_params = &updated_vpc.into_inner(); @@ -4494,7 +5110,11 @@ async fn vpc_update( .await?; Ok(HttpResponseOk(vpc.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete VPC @@ -4504,13 +5124,13 @@ async fn vpc_update( tags = ["vpcs"], }] async fn vpc_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -4520,7 +5140,11 @@ async fn vpc_delete( nexus.project_delete_vpc(&opctx, &vpc_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List subnets @@ -4530,12 +5154,12 @@ async fn vpc_delete( tags = ["vpcs"], }] async fn vpc_subnet_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -4555,7 +5179,11 @@ async fn vpc_subnet_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create subnet @@ -4565,13 +5193,13 @@ async fn vpc_subnet_list( tags = ["vpcs"], }] async fn vpc_subnet_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, create_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let create = create_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -4580,7 +5208,11 @@ async fn vpc_subnet_create( nexus.vpc_create_subnet(&opctx, &vpc_lookup, &create).await?; Ok(HttpResponseCreated(subnet.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch subnet @@ -4590,13 +5222,13 @@ async fn vpc_subnet_create( tags = ["vpcs"], }] async fn vpc_subnet_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -4609,7 +5241,11 @@ async fn vpc_subnet_view( nexus.vpc_subnet_lookup(&opctx, subnet_selector)?.fetch().await?; Ok(HttpResponseOk(subnet.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete subnet @@ -4619,13 +5255,13 @@ async fn vpc_subnet_view( tags = ["vpcs"], }] async fn vpc_subnet_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -4638,7 +5274,11 @@ async fn vpc_subnet_delete( nexus.vpc_delete_subnet(&opctx, &subnet_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update subnet @@ -4648,14 +5288,14 @@ async fn vpc_subnet_delete( tags = ["vpcs"], }] async fn vpc_subnet_update( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, subnet_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let subnet_params = subnet_params.into_inner(); @@ -4671,7 +5311,11 @@ async fn vpc_subnet_update( .await?; Ok(HttpResponseOk(subnet.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // This endpoint is likely temporary. We would rather list all IPs allocated in @@ -4685,13 +5329,13 @@ async fn vpc_subnet_update( tags = ["vpcs"], }] async fn vpc_subnet_list_network_interfaces( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let path = path_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; @@ -4720,7 +5364,11 @@ async fn vpc_subnet_list_network_interfaces( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // VPC Firewalls @@ -4733,7 +5381,7 @@ async fn vpc_subnet_list_network_interfaces( tags = ["vpcs"], }] async fn vpc_firewall_rules_view( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result, HttpError> { // TODO: Check If-Match and fail if the ETag doesn't match anymore. @@ -4742,7 +5390,7 @@ async fn vpc_firewall_rules_view( let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let vpc_lookup = nexus.vpc_lookup(&opctx, query)?; let rules = nexus.vpc_list_firewall_rules(&opctx, &vpc_lookup).await?; @@ -4750,7 +5398,11 @@ async fn vpc_firewall_rules_view( rules: rules.into_iter().map(|rule| rule.into()).collect(), })) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Replace firewall rules @@ -4760,7 +5412,7 @@ async fn vpc_firewall_rules_view( tags = ["vpcs"], }] async fn vpc_firewall_rules_update( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, router_params: TypedBody, ) -> Result, HttpError> { @@ -4769,7 +5421,7 @@ async fn vpc_firewall_rules_update( let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let router_params = router_params.into_inner(); let vpc_lookup = nexus.vpc_lookup(&opctx, query)?; @@ -4780,7 +5432,11 @@ async fn vpc_firewall_rules_update( rules: rules.into_iter().map(|rule| rule.into()).collect(), })) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // VPC Routers @@ -4793,13 +5449,13 @@ async fn vpc_firewall_rules_update( unpublished = true, }] async fn vpc_router_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -4818,7 +5474,11 @@ async fn vpc_router_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch router @@ -4829,13 +5489,13 @@ async fn vpc_router_list( unpublished = true, }] async fn vpc_router_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -4848,7 +5508,11 @@ async fn vpc_router_view( nexus.vpc_router_lookup(&opctx, router_selector)?.fetch().await?; Ok(HttpResponseOk(vpc_router.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create VPC router @@ -4859,13 +5523,13 @@ async fn vpc_router_view( unpublished = true, }] async fn vpc_router_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, create_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let create = create_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -4880,7 +5544,11 @@ async fn vpc_router_create( .await?; Ok(HttpResponseCreated(router.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete router @@ -4891,13 +5559,13 @@ async fn vpc_router_create( unpublished = true, }] async fn vpc_router_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -4910,7 +5578,11 @@ async fn vpc_router_delete( nexus.vpc_delete_router(&opctx, &router_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update router @@ -4921,14 +5593,14 @@ async fn vpc_router_delete( unpublished = true, }] async fn vpc_router_update( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, router_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let router_params = router_params.into_inner(); @@ -4944,7 +5616,11 @@ async fn vpc_router_update( .await?; Ok(HttpResponseOk(router.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List routes @@ -4957,13 +5633,13 @@ async fn vpc_router_update( unpublished = true, }] async fn vpc_router_route_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -4982,7 +5658,11 @@ async fn vpc_router_route_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Vpc Router Routes @@ -4995,14 +5675,14 @@ async fn vpc_router_route_list( unpublished = true, }] async fn vpc_router_route_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let route_selector = params::RouteSelector { @@ -5017,7 +5697,11 @@ async fn vpc_router_route_view( .await?; Ok(HttpResponseOk(route.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create router @@ -5028,14 +5712,14 @@ async fn vpc_router_route_view( unpublished = true, }] async fn vpc_router_route_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, create_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let create = create_params.into_inner(); let router_lookup = nexus.vpc_router_lookup(&opctx, query)?; @@ -5049,7 +5733,11 @@ async fn vpc_router_route_create( .await?; Ok(HttpResponseCreated(route.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete route @@ -5060,14 +5748,14 @@ async fn vpc_router_route_create( unpublished = true, }] async fn vpc_router_route_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let route_selector = params::RouteSelector { @@ -5081,7 +5769,11 @@ async fn vpc_router_route_delete( nexus.router_delete_route(&opctx, &route_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Update route @@ -5092,14 +5784,14 @@ async fn vpc_router_route_delete( unpublished = true, }] async fn vpc_router_route_update( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, router_params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let router_params = router_params.into_inner(); @@ -5117,7 +5809,11 @@ async fn vpc_router_route_update( .await?; Ok(HttpResponseOk(route.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Racks @@ -5129,12 +5825,12 @@ async fn vpc_router_route_update( tags = ["system/hardware"], }] async fn rack_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let racks = nexus @@ -5149,7 +5845,11 @@ async fn rack_list( &|_, rack: &Rack| rack.identity.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Path parameters for Rack requests @@ -5166,18 +5866,22 @@ struct RackPathParam { tags = ["system/hardware"], }] async fn rack_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let rack_info = nexus.rack_lookup(&opctx, &path.rack_id).await?; Ok(HttpResponseOk(rack_info.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List uninitialized sleds @@ -5187,7 +5891,7 @@ async fn rack_view( tags = ["system/hardware"] }] async fn sled_list_uninitialized( - rqctx: RequestContext>, + rqctx: RequestContext, query: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); @@ -5199,12 +5903,22 @@ async fn sled_list_uninitialized( ); } let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let sleds = nexus.sled_list_uninitialized(&opctx).await?; Ok(HttpResponseOk(ResultsPage { items: sleds, next_page: None })) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await +} + +/// The unique ID of a sled. +#[derive(Clone, Debug, Serialize, JsonSchema)] +pub struct SledId { + pub id: Uuid, } /// Add sled to initialized rack @@ -5215,21 +5929,28 @@ async fn sled_list_uninitialized( // we are only operating on single rack systems. #[endpoint { method = POST, - path = "/v1/system/hardware/sleds/", + path = "/v1/system/hardware/sleds", tags = ["system/hardware"] }] async fn sled_add( - rqctx: RequestContext>, + rqctx: RequestContext, sled: TypedBody, -) -> Result { +) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - nexus.sled_add(&opctx, sled.into_inner()).await?; - Ok(HttpResponseUpdatedNoContent()) + let id = nexus + .sled_add(&opctx, sled.into_inner()) + .await? + .into_untyped_uuid(); + Ok(HttpResponseCreated(SledId { id })) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Sleds @@ -5241,12 +5962,12 @@ async fn sled_add( tags = ["system/hardware"], }] async fn sled_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let sleds = nexus @@ -5261,7 +5982,11 @@ async fn sled_list( &|_, sled: &Sled| sled.identity.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch sled @@ -5271,19 +5996,23 @@ async fn sled_list( tags = ["system/hardware"], }] async fn sled_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let (.., sled) = nexus.sled_lookup(&opctx, &path.sled_id)?.fetch().await?; Ok(HttpResponseOk(sled.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Set sled provision policy @@ -5293,13 +6022,13 @@ async fn sled_view( tags = ["system/hardware"], }] async fn sled_set_provision_policy( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, new_provision_state: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let new_state = new_provision_state.into_inner().state; @@ -5317,7 +6046,11 @@ async fn sled_set_provision_policy( Ok(HttpResponseOk(response)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List instances running on given sled @@ -5327,13 +6060,13 @@ async fn sled_set_provision_policy( tags = ["system/hardware"], }] async fn sled_instance_list( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -5354,7 +6087,11 @@ async fn sled_instance_list( &|_, sled_instance: &views::SledInstance| sled_instance.identity.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Physical disks @@ -5366,12 +6103,12 @@ async fn sled_instance_list( tags = ["system/hardware"], }] async fn physical_disk_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let disks = nexus @@ -5386,7 +6123,38 @@ async fn physical_disk_list( &|_, disk: &PhysicalDisk| disk.identity.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await +} + +/// Get a physical disk +#[endpoint { + method = GET, + path = "/v1/system/hardware/disks/{disk_id}", + tags = ["system/hardware"], +}] +async fn physical_disk_view( + rqctx: RequestContext, + path_params: Path, +) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + + let (.., physical_disk) = + nexus.physical_disk_lookup(&opctx, &path).await?.fetch().await?; + Ok(HttpResponseOk(physical_disk.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Switches @@ -5398,12 +6166,12 @@ async fn physical_disk_list( tags = ["system/hardware"], }] async fn switch_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let switches = nexus @@ -5418,7 +6186,11 @@ async fn switch_list( &|_, switch: &views::Switch| switch.identity.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch switch @@ -5428,12 +6200,12 @@ async fn switch_list( tags = ["system/hardware"], }] async fn switch_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let (.., switch) = nexus @@ -5445,7 +6217,11 @@ async fn switch_view( .await?; Ok(HttpResponseOk(switch.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List physical disks attached to sleds @@ -5455,13 +6231,13 @@ async fn switch_view( tags = ["system/hardware"], }] async fn sled_physical_disk_list( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -5481,20 +6257,24 @@ async fn sled_physical_disk_list( &|_, disk: &PhysicalDisk| disk.identity.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Metrics #[derive(Debug, Deserialize, JsonSchema)] pub struct SystemMetricParams { - /// A silo ID. If unspecified, get aggregrate metrics across all silos. + /// A silo ID. If unspecified, get aggregate metrics across all silos. pub silo_id: Option, } #[derive(Debug, Deserialize, JsonSchema)] pub struct SiloMetricParams { - /// A project ID. If unspecified, get aggregrate metrics across all projects + /// A project ID. If unspecified, get aggregate metrics across all projects /// in current silo. pub project_id: Option, } @@ -5513,14 +6293,16 @@ struct SystemMetricsPathParam { metric_name: SystemMetricName, } -/// Access metrics data +/// View metrics +/// +/// View CPU, memory, or storage utilization metrics at the fleet or silo level. #[endpoint { method = GET, path = "/v1/system/metrics/{metric_name}", tags = ["system/metrics"], }] async fn system_metric( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, pag_params: Query< PaginationParams, @@ -5529,7 +6311,7 @@ async fn system_metric( ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let metric_name = path_params.into_inner().metric_name; let pagination = pag_params.into_inner(); let limit = rqctx.page_limit(&pagination)?; @@ -5552,17 +6334,23 @@ async fn system_metric( Ok(HttpResponseOk(result)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } -/// Access metrics data +/// View metrics +/// +/// View CPU, memory, or storage utilization metrics at the silo or project level. #[endpoint { method = GET, path = "/v1/metrics/{metric_name}", tags = ["metrics"], }] async fn silo_metric( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, pag_params: Query< PaginationParams, @@ -5571,7 +6359,7 @@ async fn silo_metric( ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let metric_name = path_params.into_inner().metric_name; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -5599,7 +6387,73 @@ async fn silo_metric( Ok(HttpResponseOk(result)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await +} + +/// List timeseries schemas +#[endpoint { + method = GET, + path = "/v1/timeseries/schema", + tags = ["metrics"], +}] +async fn timeseries_schema_list( + rqctx: RequestContext, + pag_params: Query, +) -> Result>, HttpError> +{ + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + let pagination = pag_params.into_inner(); + let limit = rqctx.page_limit(&pagination)?; + nexus + .timeseries_schema_list(&opctx, &pagination, limit) + .await + .map(HttpResponseOk) + .map_err(HttpError::from) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await +} + +// TODO: can we link to an OxQL reference? Do we have one? Can we even do links? + +/// Run timeseries query +/// +/// Queries are written in OxQL. +#[endpoint { + method = POST, + path = "/v1/timeseries/query", + tags = ["metrics"], +}] +async fn timeseries_query( + rqctx: RequestContext, + body: TypedBody, +) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + let query = body.into_inner().query; + nexus + .timeseries_query(&opctx, &query) + .await + .map(HttpResponseOk) + .map_err(HttpError::from) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Updates @@ -5612,12 +6466,12 @@ async fn silo_metric( unpublished = true, }] async fn system_update_put_repository( - rqctx: RequestContext>, + rqctx: RequestContext, query: Query, body: StreamingBody, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let query = query.into_inner(); @@ -5626,7 +6480,11 @@ async fn system_update_put_repository( nexus.updates_put_repository(&opctx, body, query.file_name).await?; Ok(HttpResponseOk(update)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch TUF repository description @@ -5639,11 +6497,11 @@ async fn system_update_put_repository( unpublished = true, }] async fn system_update_get_repository( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let params = path_params.into_inner(); @@ -5653,7 +6511,11 @@ async fn system_update_get_repository( description: description.into_external(), })) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Silo users @@ -5665,12 +6527,12 @@ async fn system_update_get_repository( tags = ["silos"], }] async fn user_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pagparams = data_page_params_for(&rqctx, &query)?; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -5694,7 +6556,11 @@ async fn user_list( &|_, user: &User| user.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Silo groups @@ -5706,11 +6572,11 @@ async fn user_list( tags = ["silos"], }] async fn group_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pagparams = data_page_params_for(&rqctx, &query)?; let handler = async { @@ -5727,7 +6593,11 @@ async fn group_list( &|_, group: &Group| group.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch group @@ -5737,19 +6607,23 @@ async fn group_list( tags = ["silos"], }] async fn group_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let (.., group) = nexus.silo_group_lookup(&opctx, &path.group_id).fetch().await?; Ok(HttpResponseOk(group.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Built-in (system) users @@ -5761,11 +6635,11 @@ async fn group_view( tags = ["system/silos"], }] async fn user_builtin_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pagparams = data_page_params_for(&rqctx, &query)?.map_name(|n| Name::ref_cast(n)); @@ -5783,7 +6657,11 @@ async fn user_builtin_list( &marker_for_name, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch built-in user @@ -5793,19 +6671,23 @@ async fn user_builtin_list( tags = ["system/silos"], }] async fn user_builtin_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let user_selector = path_params.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let (.., user) = nexus.user_builtin_lookup(&opctx, &user_selector)?.fetch().await?; Ok(HttpResponseOk(user.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Built-in roles @@ -5831,11 +6713,11 @@ struct RolePathParam { tags = ["roles"], }] async fn role_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; @@ -5865,7 +6747,11 @@ async fn role_list( |role: &Role, _| RolePage { last_seen: role.name.to_string() }, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch built-in role @@ -5875,11 +6761,11 @@ async fn role_list( tags = ["roles"], }] async fn role_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let role_name = &path.role_name; let handler = async { @@ -5887,7 +6773,11 @@ async fn role_view( let role = nexus.role_builtin_fetch(&opctx, &role_name).await?; Ok(HttpResponseOk(role.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Current user @@ -5899,10 +6789,10 @@ async fn role_view( tags = ["session"], }] pub(crate) async fn current_user_view( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result, HttpError> { let apictx = rqctx.context(); - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let user = nexus.silo_user_fetch_self(&opctx).await?; @@ -5912,7 +6802,11 @@ pub(crate) async fn current_user_view( silo_name: silo.name().clone(), })) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch current user's groups @@ -5922,13 +6816,13 @@ pub(crate) async fn current_user_view( tags = ["session"], }] pub(crate) async fn current_user_groups( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let groups = nexus .silo_user_fetch_groups_for_self( @@ -5945,7 +6839,11 @@ pub(crate) async fn current_user_groups( &|_, group: &views::Group| group.id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } // Per-user SSH public keys @@ -5959,13 +6857,13 @@ pub(crate) async fn current_user_groups( tags = ["session"], }] async fn current_user_ssh_key_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -5986,7 +6884,11 @@ async fn current_user_ssh_key_list( &marker_for_name_or_id, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create SSH public key @@ -5998,13 +6900,13 @@ async fn current_user_ssh_key_list( tags = ["session"], }] async fn current_user_ssh_key_create( - rqctx: RequestContext>, + rqctx: RequestContext, new_key: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let &actor = opctx .authn .actor_required() @@ -6014,7 +6916,11 @@ async fn current_user_ssh_key_create( .await?; Ok(HttpResponseCreated(ssh_key.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Fetch SSH public key @@ -6026,13 +6932,13 @@ async fn current_user_ssh_key_create( tags = ["session"], }] async fn current_user_ssh_key_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let &actor = opctx .authn @@ -6048,7 +6954,11 @@ async fn current_user_ssh_key_view( assert_eq!(silo_user.id(), actor.actor_id()); Ok(HttpResponseOk(ssh_key.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete SSH public key @@ -6060,13 +6970,13 @@ async fn current_user_ssh_key_view( tags = ["session"], }] async fn current_user_ssh_key_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result { let apictx = rqctx.context(); let handler = async { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let &actor = opctx .authn @@ -6080,7 +6990,11 @@ async fn current_user_ssh_key_delete( nexus.ssh_key_delete(&opctx, actor.actor_id(), &ssh_key_lookup).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// List instrumentation probes @@ -6090,7 +7004,7 @@ async fn current_user_ssh_key_delete( tags = ["system/probes"], }] async fn probe_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query>, ) -> Result>, HttpError> { let apictx = rqctx.context(); @@ -6098,7 +7012,7 @@ async fn probe_list( let opctx = crate::context::op_context_for_external_api(&rqctx).await?; opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let query = query_params.into_inner(); let pag_params = data_page_params_for(&rqctx, &query)?; let scan_params = ScanByNameOrId::from_query(&query)?; @@ -6118,7 +7032,11 @@ async fn probe_list( }, )?)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// View instrumentation probe @@ -6128,7 +7046,7 @@ async fn probe_list( tags = ["system/probes"], }] async fn probe_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result, HttpError> { @@ -6137,7 +7055,7 @@ async fn probe_view( let opctx = crate::context::op_context_for_external_api(&rqctx).await?; opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let project_selector = query_params.into_inner(); let project_lookup = nexus.project_lookup(&opctx, project_selector)?; @@ -6145,7 +7063,11 @@ async fn probe_view( nexus.probe_get(&opctx, &project_lookup, &path.probe).await?; Ok(HttpResponseOk(probe)) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Create instrumentation probe @@ -6155,7 +7077,7 @@ async fn probe_view( tags = ["system/probes"], }] async fn probe_create( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, new_probe: TypedBody, ) -> Result, HttpError> { @@ -6164,7 +7086,7 @@ async fn probe_create( let opctx = crate::context::op_context_for_external_api(&rqctx).await?; opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let new_probe_params = &new_probe.into_inner(); let project_selector = query_params.into_inner(); let project_lookup = nexus.project_lookup(&opctx, project_selector)?; @@ -6173,7 +7095,11 @@ async fn probe_create( .await?; Ok(HttpResponseCreated(probe.into())) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } /// Delete instrumentation probe @@ -6183,7 +7109,7 @@ async fn probe_create( tags = ["system/probes"], }] async fn probe_delete( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, path_params: Path, ) -> Result { @@ -6192,14 +7118,18 @@ async fn probe_delete( let opctx = crate::context::op_context_for_external_api(&rqctx).await?; opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; - let nexus = &apictx.nexus; + let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let project_selector = query_params.into_inner(); let project_lookup = nexus.project_lookup(&opctx, project_selector)?; nexus.probe_delete(&opctx, &project_lookup, path.probe).await?; Ok(HttpResponseDeleted()) }; - apictx.external_latencies.instrument_dropshot_handler(&rqctx, handler).await + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } #[cfg(test)] diff --git a/nexus/src/internal_api/http_entrypoints.rs b/nexus/src/internal_api/http_entrypoints.rs index 0676ace70c..ceafe7f103 100644 --- a/nexus/src/internal_api/http_entrypoints.rs +++ b/nexus/src/internal_api/http_entrypoints.rs @@ -4,17 +4,13 @@ //! Handler functions (entrypoints) for HTTP APIs internal to the control plane -use crate::ServerContext; - -use super::params::{ - OximeterInfo, PhysicalDiskDeleteRequest, PhysicalDiskPutRequest, - PhysicalDiskPutResponse, RackInitializationRequest, SledAgentInfo, - ZpoolPutRequest, ZpoolPutResponse, -}; +use super::params::{OximeterInfo, RackInitializationRequest}; +use crate::context::ApiContext; use dropshot::endpoint; use dropshot::ApiDescription; use dropshot::FreeformBody; use dropshot::HttpError; +use dropshot::HttpResponseCreated; use dropshot::HttpResponseDeleted; use dropshot::HttpResponseOk; use dropshot::HttpResponseUpdatedNoContent; @@ -34,6 +30,7 @@ use nexus_types::external_api::params::SledSelector; use nexus_types::external_api::params::UninitializedSledId; use nexus_types::external_api::shared::UninitializedSled; use nexus_types::external_api::views::SledPolicy; +use nexus_types::internal_api::params::SledAgentInfo; use nexus_types::internal_api::params::SwitchPutRequest; use nexus_types::internal_api::params::SwitchPutResponse; use nexus_types::internal_api::views::to_list; @@ -43,29 +40,29 @@ use omicron_common::api::external::http_pagination::data_page_params_for; use omicron_common::api::external::http_pagination::PaginatedById; use omicron_common::api::external::http_pagination::ScanById; use omicron_common::api::external::http_pagination::ScanParams; -use omicron_common::api::external::Error; use omicron_common::api::internal::nexus::DiskRuntimeState; use omicron_common::api::internal::nexus::DownstairsClientStopRequest; use omicron_common::api::internal::nexus::DownstairsClientStopped; use omicron_common::api::internal::nexus::ProducerEndpoint; +use omicron_common::api::internal::nexus::ProducerRegistrationResponse; use omicron_common::api::internal::nexus::RepairFinishInfo; use omicron_common::api::internal::nexus::RepairProgress; use omicron_common::api::internal::nexus::RepairStartInfo; use omicron_common::api::internal::nexus::SledInstanceState; use omicron_common::update::ArtifactId; use omicron_uuid_kinds::DownstairsKind; +use omicron_uuid_kinds::SledUuid; use omicron_uuid_kinds::TypedUuid; use omicron_uuid_kinds::UpstairsKind; use omicron_uuid_kinds::UpstairsRepairKind; -use oximeter::types::ProducerResults; -use oximeter_producer::{collect, ProducerIdPathParams}; use schemars::JsonSchema; use serde::Deserialize; +use serde::Serialize; use std::collections::BTreeMap; -use std::sync::Arc; +use std::collections::BTreeSet; use uuid::Uuid; -type NexusApiDescription = ApiDescription>; +type NexusApiDescription = ApiDescription; /// Returns a description of the internal nexus API pub(crate) fn internal_api() -> NexusApiDescription { @@ -75,16 +72,13 @@ pub(crate) fn internal_api() -> NexusApiDescription { api.register(sled_firewall_rules_request)?; api.register(switch_put)?; api.register(rack_initialization_complete)?; - api.register(physical_disk_put)?; - api.register(physical_disk_delete)?; - api.register(zpool_put)?; api.register(cpapi_instances_put)?; api.register(cpapi_disks_put)?; api.register(cpapi_volume_remove_read_only_parent)?; api.register(cpapi_disk_remove_read_only_parent)?; api.register(cpapi_producers_post)?; + api.register(cpapi_assigned_producers_list)?; api.register(cpapi_collectors_post)?; - api.register(cpapi_metrics_collect)?; api.register(cpapi_artifact_download)?; api.register(cpapi_upstairs_repair_start)?; @@ -100,6 +94,7 @@ pub(crate) fn internal_api() -> NexusApiDescription { api.register(bgtask_list)?; api.register(bgtask_view)?; + api.register(bgtask_activate)?; api.register(blueprint_list)?; api.register(blueprint_view)?; @@ -107,8 +102,8 @@ pub(crate) fn internal_api() -> NexusApiDescription { api.register(blueprint_target_view)?; api.register(blueprint_target_set)?; api.register(blueprint_target_set_enabled)?; - api.register(blueprint_generate_from_collection)?; api.register(blueprint_regenerate)?; + api.register(blueprint_import)?; api.register(sled_list_uninitialized)?; api.register(sled_add)?; @@ -138,10 +133,10 @@ struct SledAgentPathParam { path = "/sled-agents/{sled_id}", }] async fn sled_agent_get( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let path = path_params.into_inner(); @@ -159,11 +154,11 @@ async fn sled_agent_get( path = "/sled-agents/{sled_id}", }] async fn sled_agent_put( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, sled_info: TypedBody, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let path = path_params.into_inner(); @@ -186,10 +181,10 @@ async fn sled_agent_put( path = "/sled-agents/{sled_id}/firewall-rules-update", }] async fn sled_firewall_rules_request( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let path = path_params.into_inner(); @@ -215,11 +210,11 @@ struct RackPathParam { path = "/racks/{rack_id}/initialization-complete", }] async fn rack_initialization_complete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, info: TypedBody, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let path = path_params.into_inner(); let request = info.into_inner(); @@ -241,11 +236,11 @@ struct SwitchPathParam { path = "/switch/{switch_id}", }] async fn switch_put( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, body: TypedBody, ) -> Result, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let nexus = &apictx.nexus; let path = path_params.into_inner(); @@ -256,77 +251,6 @@ async fn switch_put( apictx.internal_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/// Report that a physical disk for the specified sled has come online. -#[endpoint { - method = PUT, - path = "/physical-disk", - }] -async fn physical_disk_put( - rqctx: RequestContext>, - body: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let nexus = &apictx.nexus; - let disk = body.into_inner(); - let handler = async { - let opctx = crate::context::op_context_for_internal_api(&rqctx).await; - nexus.upsert_physical_disk(&opctx, disk).await?; - Ok(HttpResponseOk(PhysicalDiskPutResponse {})) - }; - apictx.internal_latencies.instrument_dropshot_handler(&rqctx, handler).await -} - -/// Report that a physical disk for the specified sled has gone offline. -#[endpoint { - method = DELETE, - path = "/physical-disk", - }] -async fn physical_disk_delete( - rqctx: RequestContext>, - body: TypedBody, -) -> Result { - let apictx = rqctx.context(); - let nexus = &apictx.nexus; - let disk = body.into_inner(); - - let handler = async { - let opctx = crate::context::op_context_for_internal_api(&rqctx).await; - nexus.delete_physical_disk(&opctx, disk).await?; - Ok(HttpResponseDeleted()) - }; - apictx.internal_latencies.instrument_dropshot_handler(&rqctx, handler).await -} - -/// Path parameters for Zpool requests (internal API) -#[derive(Deserialize, JsonSchema)] -struct ZpoolPathParam { - sled_id: Uuid, - zpool_id: Uuid, -} - -/// Report that a pool for a specified sled has come online. -#[endpoint { - method = PUT, - path = "/sled-agents/{sled_id}/zpools/{zpool_id}", - }] -async fn zpool_put( - rqctx: RequestContext>, - path_params: Path, - pool_info: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let nexus = &apictx.nexus; - let path = path_params.into_inner(); - let pi = pool_info.into_inner(); - - let handler = async { - let opctx = crate::context::op_context_for_internal_api(&rqctx).await; - nexus.upsert_zpool(&opctx, path.zpool_id, path.sled_id, pi).await?; - Ok(HttpResponseOk(ZpoolPutResponse {})) - }; - apictx.internal_latencies.instrument_dropshot_handler(&rqctx, handler).await -} - /// Path parameters for Instance requests (internal API) #[derive(Deserialize, JsonSchema)] struct InstancePathParam { @@ -339,11 +263,11 @@ struct InstancePathParam { path = "/instances/{instance_id}", }] async fn cpapi_instances_put( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, new_runtime_state: TypedBody, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let path = path_params.into_inner(); let new_state = new_runtime_state.into_inner(); @@ -369,11 +293,11 @@ struct DiskPathParam { path = "/disks/{disk_id}", }] async fn cpapi_disks_put( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, new_runtime_state: TypedBody, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let path = path_params.into_inner(); let new_state = new_runtime_state.into_inner(); @@ -404,10 +328,10 @@ struct VolumePathParam { path = "/volume/{volume_id}/remove-read-only-parent", }] async fn cpapi_volume_remove_read_only_parent( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let path = path_params.into_inner(); @@ -429,10 +353,10 @@ async fn cpapi_volume_remove_read_only_parent( path = "/disk/{disk_id}/remove-read-only-parent", }] async fn cpapi_disk_remove_read_only_parent( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let path = path_params.into_inner(); @@ -450,15 +374,25 @@ async fn cpapi_disk_remove_read_only_parent( path = "/metrics/producers", }] async fn cpapi_producers_post( - request_context: RequestContext>, + request_context: RequestContext, producer_info: TypedBody, -) -> Result { - let context = request_context.context(); - let nexus = &context.nexus; - let producer_info = producer_info.into_inner(); +) -> Result, HttpError> { + let context = &request_context.context().context; let handler = async { - nexus.assign_producer(producer_info).await?; - Ok(HttpResponseUpdatedNoContent()) + let nexus = &context.nexus; + let producer_info = producer_info.into_inner(); + let opctx = + crate::context::op_context_for_internal_api(&request_context).await; + nexus + .assign_producer(&opctx, producer_info) + .await + .map_err(HttpError::from) + .map(|_| { + HttpResponseCreated(ProducerRegistrationResponse { + lease_duration: + crate::app::oximeter::PRODUCER_LEASE_DURATION, + }) + }) }; context .internal_latencies @@ -466,21 +400,38 @@ async fn cpapi_producers_post( .await } -/// Accept a notification of a new oximeter collection server. +#[derive(Clone, Copy, Debug, Deserialize, JsonSchema, Serialize)] +pub struct CollectorIdPathParams { + /// The ID of the oximeter collector. + pub collector_id: Uuid, +} + +/// List all metric producers assigned to an oximeter collector. #[endpoint { - method = POST, - path = "/metrics/collectors", + method = GET, + path = "/metrics/collectors/{collector_id}/producers", }] -async fn cpapi_collectors_post( - request_context: RequestContext>, - oximeter_info: TypedBody, -) -> Result { - let context = request_context.context(); - let nexus = &context.nexus; - let oximeter_info = oximeter_info.into_inner(); +async fn cpapi_assigned_producers_list( + request_context: RequestContext, + path_params: Path, + query_params: Query, +) -> Result>, HttpError> { + let context = &request_context.context().context; let handler = async { - nexus.upsert_oximeter_collector(&oximeter_info).await?; - Ok(HttpResponseUpdatedNoContent()) + let nexus = &context.nexus; + let collector_id = path_params.into_inner().collector_id; + let query = query_params.into_inner(); + let pagparams = data_page_params_for(&request_context, &query)?; + let opctx = + crate::context::op_context_for_internal_api(&request_context).await; + let producers = nexus + .list_assigned_producers(&opctx, collector_id, &pagparams) + .await?; + Ok(HttpResponseOk(ScanById::results_page( + &query, + producers, + &|_, producer: &ProducerEndpoint| producer.id, + )?)) }; context .internal_latencies @@ -488,19 +439,24 @@ async fn cpapi_collectors_post( .await } -/// Endpoint for oximeter to collect nexus server metrics. +/// Accept a notification of a new oximeter collection server. #[endpoint { - method = GET, - path = "/metrics/collect/{producer_id}", -}] -async fn cpapi_metrics_collect( - request_context: RequestContext>, - path_params: Path, -) -> Result, HttpError> { - let context = request_context.context(); - let producer_id = path_params.into_inner().producer_id; - let handler = - async { collect(&context.producer_registry, producer_id).await }; + method = POST, + path = "/metrics/collectors", + }] +async fn cpapi_collectors_post( + request_context: RequestContext, + oximeter_info: TypedBody, +) -> Result { + let context = &request_context.context().context; + let handler = async { + let nexus = &context.nexus; + let oximeter_info = oximeter_info.into_inner(); + let opctx = + crate::context::op_context_for_internal_api(&request_context).await; + nexus.upsert_oximeter_collector(&opctx, &oximeter_info).await?; + Ok(HttpResponseUpdatedNoContent()) + }; context .internal_latencies .instrument_dropshot_handler(&request_context, handler) @@ -513,10 +469,10 @@ async fn cpapi_metrics_collect( path = "/artifacts/{kind}/{name}/{version}", }] async fn cpapi_artifact_download( - request_context: RequestContext>, + request_context: RequestContext, path_params: Path, ) -> Result, HttpError> { - let context = request_context.context(); + let context = &request_context.context().context; let nexus = &context.nexus; let opctx = crate::context::op_context_for_internal_api(&request_context).await; @@ -540,11 +496,11 @@ struct UpstairsPathParam { path = "/crucible/0/upstairs/{upstairs_id}/repair-start", }] async fn cpapi_upstairs_repair_start( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, repair_start_info: TypedBody, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let path = path_params.into_inner(); @@ -568,11 +524,11 @@ async fn cpapi_upstairs_repair_start( path = "/crucible/0/upstairs/{upstairs_id}/repair-finish", }] async fn cpapi_upstairs_repair_finish( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, repair_finish_info: TypedBody, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let path = path_params.into_inner(); @@ -603,11 +559,11 @@ struct UpstairsRepairPathParam { path = "/crucible/0/upstairs/{upstairs_id}/repair/{repair_id}/progress", }] async fn cpapi_upstairs_repair_progress( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, repair_progress: TypedBody, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let path = path_params.into_inner(); @@ -640,11 +596,11 @@ struct UpstairsDownstairsPathParam { path = "/crucible/0/upstairs/{upstairs_id}/downstairs/{downstairs_id}/stop-request", }] async fn cpapi_downstairs_client_stop_request( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, downstairs_client_stop_request: TypedBody, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let path = path_params.into_inner(); @@ -670,11 +626,11 @@ async fn cpapi_downstairs_client_stop_request( path = "/crucible/0/upstairs/{upstairs_id}/downstairs/{downstairs_id}/stopped", }] async fn cpapi_downstairs_client_stopped( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, downstairs_client_stopped: TypedBody, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let path = path_params.into_inner(); @@ -701,10 +657,10 @@ async fn cpapi_downstairs_client_stopped( path = "/sagas", }] async fn saga_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let nexus = &apictx.nexus; let query = query_params.into_inner(); @@ -733,10 +689,10 @@ struct SagaPathParam { path = "/sagas/{saga_id}", }] async fn saga_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let nexus = &apictx.nexus; @@ -758,9 +714,9 @@ async fn saga_view( path = "/bgtasks", }] async fn bgtask_list( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result>, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let nexus = &apictx.nexus; let opctx = crate::context::op_context_for_internal_api(&rqctx).await; @@ -776,18 +732,24 @@ struct BackgroundTaskPathParam { bgtask_name: String, } +/// Query parameters for Background Task activation requests. +#[derive(Deserialize, JsonSchema)] +struct BackgroundTasksActivateRequest { + bgtask_names: BTreeSet, +} + /// Fetch status of one background task /// /// This is exposed for support and debugging. #[endpoint { method = GET, - path = "/bgtasks/{bgtask_name}", + path = "/bgtasks/view/{bgtask_name}", }] async fn bgtask_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let nexus = &apictx.nexus; @@ -798,6 +760,27 @@ async fn bgtask_view( apictx.internal_latencies.instrument_dropshot_handler(&rqctx, handler).await } +/// Activates one or more background tasks, causing them to be run immediately +/// if idle, or scheduled to run again as soon as possible if already running. +#[endpoint { + method = POST, + path = "/bgtasks/activate", +}] +async fn bgtask_activate( + rqctx: RequestContext, + body: TypedBody, +) -> Result { + let apictx = &rqctx.context().context; + let handler = async { + let opctx = crate::context::op_context_for_internal_api(&rqctx).await; + let nexus = &apictx.nexus; + let body = body.into_inner(); + nexus.bgtask_activate(&opctx, body.bgtask_names).await?; + Ok(HttpResponseUpdatedNoContent()) + }; + apictx.internal_latencies.instrument_dropshot_handler(&rqctx, handler).await +} + // NAT RPW internal APIs /// Path parameters for NAT ChangeSet @@ -826,11 +809,11 @@ struct RpwNatQueryParam { path = "/nat/ipv4/changeset/{from_gen}" }] async fn ipv4_nat_changeset( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result>, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let nexus = &apictx.nexus; @@ -859,10 +842,10 @@ async fn ipv4_nat_changeset( path = "/deployment/blueprints/all", }] async fn blueprint_list( - rqctx: RequestContext>, + rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let nexus = &apictx.nexus; let query = query_params.into_inner(); @@ -885,10 +868,10 @@ async fn blueprint_list( path = "/deployment/blueprints/all/{blueprint_id}", }] async fn blueprint_view( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let nexus = &apictx.nexus; @@ -905,10 +888,10 @@ async fn blueprint_view( path = "/deployment/blueprints/all/{blueprint_id}", }] async fn blueprint_delete( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, ) -> Result { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let nexus = &apictx.nexus; @@ -927,16 +910,13 @@ async fn blueprint_delete( path = "/deployment/blueprints/target", }] async fn blueprint_target_view( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let nexus = &apictx.nexus; - let target = - nexus.blueprint_target_view(&opctx).await?.ok_or_else(|| { - Error::conflict("no target blueprint has been configured") - })?; + let target = nexus.blueprint_target_view(&opctx).await?; Ok(HttpResponseOk(target)) }; apictx.internal_latencies.instrument_dropshot_handler(&rqctx, handler).await @@ -948,10 +928,10 @@ async fn blueprint_target_view( path = "/deployment/blueprints/target", }] async fn blueprint_target_set( - rqctx: RequestContext>, + rqctx: RequestContext, target: TypedBody, ) -> Result, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let nexus = &apictx.nexus; @@ -968,10 +948,10 @@ async fn blueprint_target_set( path = "/deployment/blueprints/target/enabled", }] async fn blueprint_target_set_enabled( - rqctx: RequestContext>, + rqctx: RequestContext, target: TypedBody, ) -> Result, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let nexus = &apictx.nexus; @@ -984,48 +964,43 @@ async fn blueprint_target_set_enabled( // Generating blueprints -#[derive(Debug, Deserialize, JsonSchema)] -struct CollectionId { - collection_id: Uuid, -} - -/// Generates a new blueprint matching the specified inventory collection +/// Generates a new blueprint for the current system, re-evaluating anything +/// that's changed since the last one was generated #[endpoint { method = POST, - path = "/deployment/blueprints/generate-from-collection", + path = "/deployment/blueprints/regenerate", }] -async fn blueprint_generate_from_collection( - rqctx: RequestContext>, - params: TypedBody, +async fn blueprint_regenerate( + rqctx: RequestContext, ) -> Result, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let nexus = &apictx.nexus; - let collection_id = params.into_inner().collection_id; - let result = nexus - .blueprint_generate_from_collection(&opctx, collection_id) - .await?; + let result = nexus.blueprint_create_regenerate(&opctx).await?; Ok(HttpResponseOk(result)) }; apictx.internal_latencies.instrument_dropshot_handler(&rqctx, handler).await } -/// Generates a new blueprint for the current system, re-evaluating anything -/// that's changed since the last one was generated +/// Imports a client-provided blueprint +/// +/// This is intended for development and support, not end users or operators. #[endpoint { method = POST, - path = "/deployment/blueprints/regenerate", + path = "/deployment/blueprints/import", }] -async fn blueprint_regenerate( - rqctx: RequestContext>, -) -> Result, HttpError> { - let apictx = rqctx.context(); +async fn blueprint_import( + rqctx: RequestContext, + blueprint: TypedBody, +) -> Result { + let apictx = &rqctx.context().context; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; let nexus = &apictx.nexus; - let result = nexus.blueprint_create_regenerate(&opctx).await?; - Ok(HttpResponseOk(result)) + let blueprint = blueprint.into_inner(); + nexus.blueprint_import(&opctx, blueprint).await?; + Ok(HttpResponseUpdatedNoContent()) }; apictx.internal_latencies.instrument_dropshot_handler(&rqctx, handler).await } @@ -1036,9 +1011,9 @@ async fn blueprint_regenerate( path = "/sleds/uninitialized", }] async fn sled_list_uninitialized( - rqctx: RequestContext>, + rqctx: RequestContext, ) -> Result>, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let nexus = &apictx.nexus; let opctx = crate::context::op_context_for_internal_api(&rqctx).await; @@ -1048,6 +1023,11 @@ async fn sled_list_uninitialized( apictx.internal_latencies.instrument_dropshot_handler(&rqctx, handler).await } +#[derive(Clone, Debug, Serialize, JsonSchema)] +pub struct SledId { + pub id: SledUuid, +} + /// Add sled to initialized rack // // TODO: In the future this should really be a PUT request, once we resolve @@ -1059,15 +1039,15 @@ async fn sled_list_uninitialized( path = "/sleds/add", }] async fn sled_add( - rqctx: RequestContext>, + rqctx: RequestContext, sled: TypedBody, -) -> Result { - let apictx = rqctx.context(); +) -> Result, HttpError> { + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; - nexus.sled_add(&opctx, sled.into_inner()).await?; - Ok(HttpResponseUpdatedNoContent()) + let id = nexus.sled_add(&opctx, sled.into_inner()).await?; + Ok(HttpResponseCreated(SledId { id })) }; apictx.internal_latencies.instrument_dropshot_handler(&rqctx, handler).await } @@ -1083,10 +1063,10 @@ async fn sled_add( path = "/sleds/expunge", }] async fn sled_expunge( - rqctx: RequestContext>, + rqctx: RequestContext, sled: TypedBody, ) -> Result, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let nexus = &apictx.nexus; let handler = async { let opctx = crate::context::op_context_for_internal_api(&rqctx).await; @@ -1109,11 +1089,11 @@ struct ProbePathParam { path = "/probes/{sled}" }] async fn probes_get( - rqctx: RequestContext>, + rqctx: RequestContext, path_params: Path, query_params: Query, ) -> Result>, HttpError> { - let apictx = rqctx.context(); + let apictx = &rqctx.context().context; let handler = async { let query = query_params.into_inner(); let path = path_params.into_inner(); diff --git a/nexus/src/lib.rs b/nexus/src/lib.rs index bd5a13dfd1..6a23048693 100644 --- a/nexus/src/lib.rs +++ b/nexus/src/lib.rs @@ -9,8 +9,6 @@ #![allow(rustdoc::private_intra_doc_links)] // TODO(#40): Remove this exception once resolved. #![allow(clippy::unnecessary_wraps)] -// Clippy's style lints are useful, but not worth running automatically. -#![allow(clippy::style)] pub mod app; // Public for documentation examples mod cidata; @@ -22,21 +20,30 @@ mod saga_interface; pub use app::test_interfaces::TestInterfaces; pub use app::Nexus; +use context::ApiContext; use context::ServerContext; use dropshot::ConfigDropshot; use external_api::http_entrypoints::external_api; use internal_api::http_entrypoints::internal_api; use nexus_config::NexusConfig; +use nexus_types::deployment::blueprint_zone_type; use nexus_types::deployment::Blueprint; +use nexus_types::deployment::BlueprintZoneFilter; +use nexus_types::deployment::BlueprintZoneType; use nexus_types::external_api::views::SledProvisionPolicy; -use nexus_types::internal_api::params::ServiceKind; +use nexus_types::internal_api::params::{ + PhysicalDiskPutRequest, ZpoolPutRequest, +}; use nexus_types::inventory::Collection; use omicron_common::address::IpRange; use omicron_common::api::external::Error; +use omicron_common::api::internal::nexus::{ProducerEndpoint, ProducerKind}; use omicron_common::api::internal::shared::{ - ExternalPortDiscovery, RackNetworkConfig, SwitchLocation, + AllowedSourceIps, ExternalPortDiscovery, RackNetworkConfig, SwitchLocation, }; use omicron_common::FileKv; +use oximeter::types::ProducerRegistry; +use oximeter_producer::Server as ProducerServer; use slog::Logger; use std::collections::HashMap; use std::net::{Ipv4Addr, SocketAddr, SocketAddrV6}; @@ -50,7 +57,7 @@ extern crate slog; /// to stdout. pub fn run_openapi_external() -> Result<(), String> { external_api() - .openapi("Oxide Region API", "20240327.0") + .openapi("Oxide Region API", "20240502.0") .description("API for interacting with the Oxide control plane") .contact_url("https://oxide.computer") .contact_email("api@oxide.computer") @@ -71,11 +78,10 @@ pub fn run_openapi_internal() -> Result<(), String> { /// A partially-initialized Nexus server, which exposes an internal interface, /// but is not ready to receive external requests. pub struct InternalServer { - /// shared state used by API request handlers - apictx: Arc, + /// Shared server state. + apictx: ApiContext, /// dropshot server for internal API - http_server_internal: dropshot::HttpServer>, - + http_server_internal: dropshot::HttpServer, config: NexusConfig, log: Logger, } @@ -91,31 +97,39 @@ impl InternalServer { let ctxlog = log.new(o!("component" => "ServerContext")); - let apictx = - ServerContext::new(config.deployment.rack_id, ctxlog, &config) - .await?; + let context = ApiContext::for_internal( + config.deployment.rack_id, + ctxlog, + &config, + ) + .await?; // Launch the internal server. let server_starter_internal = dropshot::HttpServerStarter::new( &config.deployment.dropshot_internal, internal_api(), - Arc::clone(&apictx), + context.clone(), &log.new(o!("component" => "dropshot_internal")), ) .map_err(|error| format!("initializing internal server: {}", error))?; let http_server_internal = server_starter_internal.start(); - Ok(Self { apictx, http_server_internal, config: config.clone(), log }) + Ok(Self { + apictx: context, + http_server_internal, + config: config.clone(), + log, + }) } } -type DropshotServer = dropshot::HttpServer>; +type DropshotServer = dropshot::HttpServer; /// Packages up a [`Nexus`], running both external and internal HTTP API servers /// wired up to Nexus pub struct Server { /// shared state used by API request handlers - apictx: Arc, + apictx: ApiContext, } impl Server { @@ -126,11 +140,17 @@ impl Server { let config = internal.config; // Wait until RSS handoff completes. - let opctx = apictx.nexus.opctx_for_service_balancer(); - apictx.nexus.await_rack_initialization(&opctx).await; + let opctx = apictx.context.nexus.opctx_for_service_balancer(); + apictx.context.nexus.await_rack_initialization(&opctx).await; + + // While we've started our internal server, we need to wait until we've + // definitely implemented our source IP allowlist for making requests to + // the external server we're about to start. + apictx.context.nexus.await_ip_allowlist_plumbing().await; // Launch the external server. let tls_config = apictx + .context .nexus .external_tls_config(config.deployment.dropshot_external.tls) .await; @@ -156,7 +176,7 @@ impl Server { dropshot::HttpServerStarter::new_with_tls( &config.deployment.dropshot_external.dropshot, external_api(), - Arc::clone(&apictx), + apictx.for_external(), &log.new(o!("component" => "dropshot_external")), tls_config.clone().map(dropshot::ConfigTls::Dynamic), ) @@ -170,7 +190,7 @@ impl Server { dropshot::HttpServerStarter::new_with_tls( &techport_server_config, external_api(), - Arc::clone(&apictx), + apictx.for_techport(), &log.new(o!("component" => "dropshot_external_techport")), tls_config.map(dropshot::ConfigTls::Dynamic), ) @@ -180,20 +200,30 @@ impl Server { server_starter_external_techport.start() }; + // Start the metric producer server that oximeter uses to fetch our + // metric data. + let producer_server = start_producer_server( + &log, + &apictx.context.producer_registry, + http_server_internal.local_addr(), + )?; + apictx + .context .nexus .set_servers( http_server_external, http_server_techport_external, http_server_internal, + producer_server, ) .await; let server = Server { apictx: apictx.clone() }; Ok(server) } - pub fn apictx(&self) -> &Arc { - &self.apictx + pub fn server_context(&self) -> &Arc { + &self.apictx.context } /// Wait for the given server to shut down @@ -202,18 +232,7 @@ impl Server { /// immediately after calling `start()`, the program will block indefinitely /// or until something else initiates a graceful shutdown. pub(crate) async fn wait_for_finish(self) -> Result<(), String> { - self.apictx.nexus.wait_for_shutdown().await - } - - /// Register the Nexus server as a metric producer with oximeter. - pub async fn register_as_producer(&self) { - let nexus = &self.apictx.nexus; - - nexus - .register_as_producer( - nexus.get_internal_server_address().await.unwrap(), - ) - .await; + self.server_context().nexus.wait_for_shutdown().await } } @@ -227,7 +246,7 @@ impl nexus_test_interface::NexusServer for Server { ) -> (InternalServer, SocketAddr) { let internal_server = InternalServer::start(config, &log).await.unwrap(); - internal_server.apictx.nexus.wait_for_populate().await.unwrap(); + internal_server.apictx.context.nexus.wait_for_populate().await.unwrap(); let addr = internal_server.http_server_internal.local_addr(); (internal_server, addr) } @@ -236,7 +255,10 @@ impl nexus_test_interface::NexusServer for Server { internal_server: InternalServer, config: &NexusConfig, blueprint: Blueprint, - services: Vec, + physical_disks: Vec< + nexus_types::internal_api::params::PhysicalDiskPutRequest, + >, + zpools: Vec, datasets: Vec, internal_dns_zone_config: nexus_types::internal_api::params::DnsConfigParams, external_dns_zone_name: &str, @@ -247,7 +269,8 @@ impl nexus_test_interface::NexusServer for Server { // Perform the "handoff from RSS". // // However, RSS isn't running, so we'll do the handoff ourselves. - let opctx = internal_server.apictx.nexus.opctx_for_internal_api(); + let opctx = + internal_server.apictx.context.nexus.opctx_for_internal_api(); // Allocation of the initial Nexus's external IP is a little funny. In // a real system, it'd be allocated by RSS and provided with the rack @@ -262,26 +285,31 @@ impl nexus_test_interface::NexusServer for Server { // it's 127.0.0.1, having come straight from the stock testing config // file. Whatever it is, we fake up an IP pool range for use by system // services that includes solely this IP. - let internal_services_ip_pool_ranges = services - .iter() - .filter_map(|s| match s.kind { - ServiceKind::ExternalDns { external_address, .. } - | ServiceKind::Nexus { external_address, .. } => { - Some(IpRange::from(external_address)) - } + let internal_services_ip_pool_ranges = blueprint + .all_omicron_zones(BlueprintZoneFilter::ShouldBeExternallyReachable) + .filter_map(|(_, zc)| match &zc.zone_type { + BlueprintZoneType::ExternalDns( + blueprint_zone_type::ExternalDns { dns_address, .. }, + ) => Some(IpRange::from(dns_address.addr.ip())), + BlueprintZoneType::Nexus(blueprint_zone_type::Nexus { + external_ip, + .. + }) => Some(IpRange::from(external_ip.ip)), _ => None, }) .collect(); internal_server .apictx + .context .nexus .rack_initialize( &opctx, config.deployment.rack_id, internal_api::params::RackInitializationRequest { blueprint, - services, + physical_disks, + zpools, datasets, internal_services_ip_pool_ranges, certs, @@ -304,6 +332,7 @@ impl nexus_test_interface::NexusServer for Server { bgp: Vec::new(), bfd: Vec::new(), }, + allowed_source_ips: AllowedSourceIps::Any, }, ) .await @@ -315,7 +344,7 @@ impl nexus_test_interface::NexusServer for Server { // Historically, tests have assumed that there's only one provisionable // sled, and that's convenient for a lot of purposes. Mark our second // sled non-provisionable. - let nexus = &rv.apictx().nexus; + let nexus = &rv.server_context().nexus; nexus .sled_set_provision_policy( &opctx, @@ -332,23 +361,41 @@ impl nexus_test_interface::NexusServer for Server { } async fn get_http_server_external_address(&self) -> SocketAddr { - self.apictx.nexus.get_external_server_address().await.unwrap() + self.apictx.context.nexus.get_external_server_address().await.unwrap() + } + + async fn get_http_server_techport_address(&self) -> SocketAddr { + self.apictx.context.nexus.get_techport_server_address().await.unwrap() } async fn get_http_server_internal_address(&self) -> SocketAddr { - self.apictx.nexus.get_internal_server_address().await.unwrap() + self.apictx.context.nexus.get_internal_server_address().await.unwrap() } async fn upsert_crucible_dataset( &self, - id: Uuid, - zpool_id: Uuid, + physical_disk: PhysicalDiskPutRequest, + zpool: ZpoolPutRequest, + dataset_id: Uuid, address: SocketAddrV6, ) { + let opctx = self.apictx.context.nexus.opctx_for_internal_api(); + self.apictx + .context + .nexus + .upsert_physical_disk(&opctx, physical_disk) + .await + .unwrap(); + + let zpool_id = zpool.id; + + self.apictx.context.nexus.upsert_zpool(&opctx, zpool).await.unwrap(); + self.apictx + .context .nexus .upsert_dataset( - id, + dataset_id, zpool_id, address, nexus_db_queries::db::model::DatasetKind::Crucible, @@ -360,7 +407,7 @@ impl nexus_test_interface::NexusServer for Server { async fn inventory_collect_and_get_latest_collection( &self, ) -> Result, Error> { - let nexus = &self.apictx.nexus; + let nexus = &self.apictx.context.nexus; nexus.activate_inventory_collection(); @@ -370,6 +417,7 @@ impl nexus_test_interface::NexusServer for Server { async fn close(mut self) { self.apictx + .context .nexus .close_servers() .await @@ -397,6 +445,41 @@ pub async fn run_server(config: &NexusConfig) -> Result<(), String> { } let internal_server = InternalServer::start(config, &log).await?; let server = Server::start(internal_server).await?; - server.register_as_producer().await; server.wait_for_finish().await } + +/// Create a new metric producer server. +fn start_producer_server( + log: &Logger, + registry: &ProducerRegistry, + nexus_addr: SocketAddr, +) -> Result { + // The producer server should listen on any available port, using the + // same IP as the main Dropshot server. + let address = SocketAddr::new(nexus_addr.ip(), 0); + + // Create configuration for the server. + // + // Note that because we're registering with _ourselves_, the listening + // address for the producer server and the registration address use the + // same IP. + let config = oximeter_producer::Config { + server_info: ProducerEndpoint { + id: registry.producer_id(), + kind: ProducerKind::Service, + address, + interval: std::time::Duration::from_secs(10), + }, + // Some(_) here prevents DNS resolution, using our own address to + // register. + registration_address: Some(nexus_addr), + request_body_max_bytes: 1024 * 1024 * 10, + log: oximeter_producer::LogConfig::Logger( + log.new(o!("component" => "nexus-producer-server")), + ), + }; + + // Start the server, which will run the registration in a task. + ProducerServer::with_registry(registry.clone(), &config) + .map_err(|e| e.to_string()) +} diff --git a/nexus/test-interface/Cargo.toml b/nexus/test-interface/Cargo.toml index 004ce28545..03e38d3687 100644 --- a/nexus/test-interface/Cargo.toml +++ b/nexus/test-interface/Cargo.toml @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] async-trait.workspace = true nexus-config.workspace = true diff --git a/nexus/test-interface/src/lib.rs b/nexus/test-interface/src/lib.rs index 2e3428a1dd..06c5570b7b 100644 --- a/nexus/test-interface/src/lib.rs +++ b/nexus/test-interface/src/lib.rs @@ -34,6 +34,9 @@ use async_trait::async_trait; use nexus_config::NexusConfig; use nexus_types::deployment::Blueprint; +use nexus_types::internal_api::params::{ + PhysicalDiskPutRequest, ZpoolPutRequest, +}; use nexus_types::inventory::Collection; use omicron_common::api::external::Error; use slog::Logger; @@ -54,7 +57,8 @@ pub trait NexusServer: Send + Sync + 'static { internal_server: Self::InternalServer, config: &NexusConfig, blueprint: Blueprint, - services: Vec, + physical_disks: Vec, + zpools: Vec, datasets: Vec, internal_dns_config: nexus_types::internal_api::params::DnsConfigParams, external_dns_zone_name: &str, @@ -64,6 +68,7 @@ pub trait NexusServer: Send + Sync + 'static { ) -> Self; async fn get_http_server_external_address(&self) -> SocketAddr; + async fn get_http_server_techport_address(&self) -> SocketAddr; async fn get_http_server_internal_address(&self) -> SocketAddr; // Previously, as a dataset was created (within the sled agent), @@ -75,6 +80,10 @@ pub trait NexusServer: Send + Sync + 'static { // control over dataset provisioning is shifting to Nexus. There is // a short window where RSS controls dataset provisioning, but afterwards, // Nexus should be calling the shots on "when to provision datasets". + // Furthermore, with https://github.com/oxidecomputer/omicron/pull/5172, + // physical disk and zpool provisioning has already moved into Nexus. This + // provides a "back-door" for tests to control the set of control plane + // disks that are considered active. // // For test purposes, we have many situations where we want to carve up // zpools and datasets precisely for disk-based tests. As a result, we @@ -88,8 +97,9 @@ pub trait NexusServer: Send + Sync + 'static { // However, doing so would let us remove this test-only API. async fn upsert_crucible_dataset( &self, - id: Uuid, - zpool_id: Uuid, + physical_disk: PhysicalDiskPutRequest, + zpool: ZpoolPutRequest, + dataset_id: Uuid, address: SocketAddrV6, ); diff --git a/nexus/test-utils-macros/Cargo.toml b/nexus/test-utils-macros/Cargo.toml index 5ed57b9c4a..d5094f84eb 100644 --- a/nexus/test-utils-macros/Cargo.toml +++ b/nexus/test-utils-macros/Cargo.toml @@ -7,6 +7,9 @@ license = "MPL-2.0" [lib] proc-macro = true +[lints] +workspace = true + [dependencies] quote.workspace = true syn = { workspace = true, features = [ "fold", "parsing" ] } diff --git a/nexus/test-utils-macros/src/lib.rs b/nexus/test-utils-macros/src/lib.rs index ac21768641..2b87b7a030 100644 --- a/nexus/test-utils-macros/src/lib.rs +++ b/nexus/test-utils-macros/src/lib.rs @@ -88,7 +88,7 @@ pub fn nexus_test(attrs: TokenStream, input: TokenStream) -> TokenStream { syn::ReturnType::Default => true, syn::ReturnType::Type(_, ref t) => { if let syn::Type::Tuple(syn::TypeTuple { elems, .. }) = &**t { - elems.len() == 0 + elems.is_empty() } else { false } diff --git a/nexus/test-utils/Cargo.toml b/nexus/test-utils/Cargo.toml index 861527108b..0eab038f91 100644 --- a/nexus/test-utils/Cargo.toml +++ b/nexus/test-utils/Cargo.toml @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] anyhow.workspace = true bytes.workspace = true @@ -30,6 +33,7 @@ omicron-common.workspace = true omicron-passwords.workspace = true omicron-sled-agent.workspace = true omicron-test-utils.workspace = true +omicron-uuid-kinds.workspace = true oximeter.workspace = true oximeter-collector.workspace = true oximeter-producer.workspace = true diff --git a/nexus/test-utils/src/http_testing.rs b/nexus/test-utils/src/http_testing.rs index ae62218c93..90c7dd43bc 100644 --- a/nexus/test-utils/src/http_testing.rs +++ b/nexus/test-utils/src/http_testing.rs @@ -65,8 +65,10 @@ pub struct RequestBuilder<'a> { expected_status: Option, allowed_headers: Option>, - // doesn't need Option<> because if it's empty, we don't check anything - expected_response_headers: http::HeaderMap, + // if an entry's value is `None`, we verify the header exists in the + // response, but we don't check the value + expected_response_headers: + http::HeaderMap>, } impl<'a> RequestBuilder<'a> { @@ -85,7 +87,6 @@ impl<'a> RequestBuilder<'a> { body: hyper::Body::empty(), expected_status: None, allowed_headers: Some(vec![ - http::header::CACHE_CONTROL, http::header::CONTENT_ENCODING, http::header::CONTENT_LENGTH, http::header::CONTENT_TYPE, @@ -94,7 +95,7 @@ impl<'a> RequestBuilder<'a> { http::header::SET_COOKIE, http::header::HeaderName::from_static("x-request-id"), ]), - expected_response_headers: http::HeaderMap::new(), + expected_response_headers: http::HeaderMap::default(), error: None, allow_non_dropshot_errors: false, } @@ -213,21 +214,6 @@ impl<'a> RequestBuilder<'a> { self } - /// Record a list of header names allowed in the response - /// - /// If this function is used, then [`Self::execute()`] will check each header in - /// the response against this list and raise an error if a header name is - /// found that's not in this list. - pub fn expect_allowed_headers< - I: IntoIterator, - >( - mut self, - allowed_headers: I, - ) -> Self { - self.allowed_headers = Some(allowed_headers.into_iter().collect()); - self - } - /// Add header and value to check for at execution time /// /// Behaves like header() rather than expect_allowed_headers() in that it @@ -248,7 +234,7 @@ impl<'a> RequestBuilder<'a> { self.error = Some(error); } Ok((name, value)) => { - self.expected_response_headers.append(name, value); + self.expected_response_headers.append(name, Some(value)); } } self @@ -279,6 +265,21 @@ impl<'a> RequestBuilder<'a> { ) } + /// Expect a successful console asset response. + pub fn expect_console_asset(mut self) -> Self { + let headers = [ + http::header::CACHE_CONTROL, + http::header::CONTENT_SECURITY_POLICY, + http::header::X_CONTENT_TYPE_OPTIONS, + http::header::X_FRAME_OPTIONS, + ]; + self.allowed_headers.as_mut().unwrap().extend(headers.clone()); + for header in headers { + self.expected_response_headers.entry(header).or_insert(None); + } + self.expect_status(Some(http::StatusCode::OK)) + } + /// Allow non-dropshot error responses, i.e., errors that are not compatible /// with `dropshot::HttpErrorResponseBody`. pub fn allow_non_dropshot_errors(mut self) -> Self { @@ -357,14 +358,17 @@ impl<'a> RequestBuilder<'a> { "response did not contain expected header {:?}", header_name ); - let actual_value = headers.get(header_name).unwrap(); - ensure!( - actual_value == expected_value, - "response contained expected header {:?}, but with value {:?} instead of expected {:?}", - header_name, - actual_value, - expected_value, - ); + if let Some(expected_value) = expected_value { + let actual_value = headers.get(header_name).unwrap(); + ensure!( + actual_value == expected_value, + "response contained expected header {:?}, but with value \ + {:?} instead of expected {:?}", + header_name, + actual_value, + expected_value, + ); + } } // Sanity check the Date header in the response. This check assumes @@ -424,7 +428,7 @@ impl<'a> RequestBuilder<'a> { // the body. if status == http::StatusCode::NO_CONTENT { ensure!( - response_body.len() == 0, + response_body.is_empty(), "expected empty response for 204 status code" ) } @@ -587,6 +591,12 @@ impl<'a> NexusRequest<'a> { self } + /// Tells the request builder to expect headers specific to console assets. + pub fn console_asset(mut self) -> Self { + self.request_builder = self.request_builder.expect_console_asset(); + self + } + /// See [`RequestBuilder::execute()`]. pub async fn execute(self) -> Result { self.request_builder.execute().await diff --git a/nexus/test-utils/src/lib.rs b/nexus/test-utils/src/lib.rs index cc9c8c43df..deb43c42b6 100644 --- a/nexus/test-utils/src/lib.rs +++ b/nexus/test-utils/src/lib.rs @@ -11,7 +11,6 @@ use chrono::Utc; use dns_service_client::types::DnsConfigParams; use dropshot::test_util::ClientTestContext; use dropshot::test_util::LogContext; -use dropshot::ConfigDropshot; use dropshot::ConfigLogging; use dropshot::ConfigLoggingLevel; use dropshot::HandlerTaskMode; @@ -25,22 +24,23 @@ use nexus_config::MgdConfig; use nexus_config::NexusConfig; use nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; use nexus_test_interface::NexusServer; +use nexus_types::deployment::blueprint_zone_type; use nexus_types::deployment::Blueprint; use nexus_types::deployment::BlueprintZoneConfig; use nexus_types::deployment::BlueprintZoneDisposition; +use nexus_types::deployment::BlueprintZoneType; use nexus_types::deployment::BlueprintZonesConfig; +use nexus_types::deployment::CockroachDbPreserveDowngrade; +use nexus_types::deployment::OmicronZoneExternalFloatingAddr; +use nexus_types::deployment::OmicronZoneExternalFloatingIp; use nexus_types::external_api::params::UserId; +use nexus_types::external_api::views::SledState; use nexus_types::internal_api::params::Certificate; use nexus_types::internal_api::params::DatasetCreateRequest; use nexus_types::internal_api::params::DatasetKind; use nexus_types::internal_api::params::DatasetPutRequest; use nexus_types::internal_api::params::RecoverySiloConfig; -use nexus_types::internal_api::params::ServiceKind; -use nexus_types::internal_api::params::ServiceNic; -use nexus_types::internal_api::params::ServicePutRequest; -use nexus_types::inventory::OmicronZoneConfig; use nexus_types::inventory::OmicronZoneDataset; -use nexus_types::inventory::OmicronZoneType; use nexus_types::inventory::OmicronZonesConfig; use omicron_common::address::DNS_OPTE_IPV4_SUBNET; use omicron_common::address::NEXUS_OPTE_IPV4_SUBNET; @@ -55,9 +55,17 @@ use omicron_common::api::internal::shared::NetworkInterfaceKind; use omicron_common::api::internal::shared::SwitchLocation; use omicron_sled_agent::sim; use omicron_test_utils::dev; +use omicron_uuid_kinds::ExternalIpUuid; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::OmicronZoneUuid; +use omicron_uuid_kinds::SledUuid; +use omicron_uuid_kinds::ZpoolUuid; use oximeter_collector::Oximeter; use oximeter_producer::LogConfig; use oximeter_producer::Server as ProducerServer; +use sled_agent_client::types::EarlyNetworkConfig; +use sled_agent_client::types::EarlyNetworkConfigBody; +use sled_agent_client::types::RackNetworkConfigV1; use slog::{debug, error, o, Logger}; use std::collections::BTreeMap; use std::collections::HashMap; @@ -82,6 +90,7 @@ pub const SLED_AGENT_UUID: &str = "b6d65341-167c-41df-9b5c-41cded99c229"; pub const SLED_AGENT2_UUID: &str = "039be560-54cc-49e3-88df-1a29dadbf913"; pub const RACK_UUID: &str = "c19a698f-c6f9-4a17-ae30-20d711b8f7dc"; pub const SWITCH_UUID: &str = "dae4e1f1-410e-4314-bff1-fec0504be07e"; +pub const PHYSICAL_DISK_UUID: &str = "fbf4e1f1-410e-4314-bff1-fec0504be07e"; pub const OXIMETER_UUID: &str = "39e6175b-4df2-4730-b11d-cbc1e60a2e78"; pub const PRODUCER_UUID: &str = "a6458b7d-87c3-4483-be96-854d814c20de"; pub const RACK_SUBNET: &str = "fd00:1122:3344:0100::/56"; @@ -96,6 +105,7 @@ pub const TEST_SUITE_PASSWORD: &str = "oxide"; pub struct ControlPlaneTestContext { pub start_time: chrono::DateTime, pub external_client: ClientTestContext, + pub techport_client: ClientTestContext, pub internal_client: ClientTestContext, pub server: N, pub database: dev::db::CockroachInstance, @@ -113,6 +123,7 @@ pub struct ControlPlaneTestContext { pub external_dns_zone_name: String, pub external_dns: dns_server::TransientServer, pub internal_dns: dns_server::TransientServer, + pub initial_blueprint_id: Uuid, pub silo_name: Name, pub user_name: UserId, } @@ -180,7 +191,6 @@ pub async fn test_setup( } struct RackInitRequestBuilder { - services: Vec, datasets: Vec, internal_dns_config: internal_dns::DnsConfigBuilder, mac_addrs: Box + Send>, @@ -189,31 +199,18 @@ struct RackInitRequestBuilder { impl RackInitRequestBuilder { fn new() -> Self { Self { - services: vec![], datasets: vec![], internal_dns_config: internal_dns::DnsConfigBuilder::new(), mac_addrs: Box::new(MacAddr::iter_system()), } } - // Keeps track of: - // - The "ServicePutRequest" (for handoff to Nexus) - // - The internal DNS configuration for this service - fn add_service_with_id( + fn add_service_to_dns( &mut self, - zone_id: Uuid, + zone_id: OmicronZoneUuid, address: SocketAddrV6, - kind: ServiceKind, service_name: internal_dns::ServiceName, - sled_id: Uuid, ) { - self.services.push(ServicePutRequest { - address, - kind, - service_id: zone_id, - sled_id, - zone_id: Some(zone_id), - }); let zone = self .internal_dns_config .host_zone(zone_id, *address.ip()) @@ -223,41 +220,29 @@ impl RackInitRequestBuilder { .expect("Failed to set up DNS for {kind}"); } - fn add_service_without_dns( - &mut self, - zone_id: Uuid, - address: SocketAddrV6, - kind: ServiceKind, - sled_id: Uuid, - ) { - self.services.push(ServicePutRequest { - address, - kind, - service_id: zone_id, - sled_id, - zone_id: Some(zone_id), - }); - } - // Keeps track of: // - The "DatasetPutRequest" (for handoff to Nexus) // - The internal DNS configuration for this service fn add_dataset( &mut self, - zpool_id: Uuid, + zpool_id: ZpoolUuid, dataset_id: Uuid, address: SocketAddrV6, kind: DatasetKind, service_name: internal_dns::ServiceName, ) { self.datasets.push(DatasetCreateRequest { - zpool_id, + zpool_id: zpool_id.into_untyped_uuid(), dataset_id, request: DatasetPutRequest { address, kind }, }); let zone = self .internal_dns_config - .host_zone(dataset_id, *address.ip()) + .host_zone( + // TODO-cleanup use TypedUuid everywhere + OmicronZoneUuid::from_untyped_uuid(dataset_id), + *address.ip(), + ) .expect("Failed to set up DNS for {kind}"); self.internal_dns_config .service_backend_zone(service_name, &zone, address.port()) @@ -274,6 +259,7 @@ pub struct ControlPlaneTestContextBuilder<'a, N: NexusServer> { pub logctx: LogContext, pub external_client: Option, + pub techport_client: Option, pub internal_client: Option, pub server: Option, @@ -298,8 +284,9 @@ pub struct ControlPlaneTestContextBuilder<'a, N: NexusServer> { pub external_dns: Option, pub internal_dns: Option, dns_config: Option, - omicron_zones: Vec, - omicron_zones2: Vec, + initial_blueprint_id: Option, + blueprint_zones: Vec, + blueprint_zones2: Vec, pub silo_name: Option, pub user_name: Option, @@ -323,6 +310,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { start_time, logctx, external_client: None, + techport_client: None, internal_client: None, server: None, database: None, @@ -342,8 +330,9 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { external_dns: None, internal_dns: None, dns_config: None, - omicron_zones: Vec::new(), - omicron_zones2: Vec::new(), + initial_blueprint_id: None, + blueprint_zones: Vec::new(), + blueprint_zones2: Vec::new(), silo_name: None, user_name: None, } @@ -414,7 +403,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { .parse::() .expect("Failed to parse port"); - let zpool_id = Uuid::new_v4(); + let zpool_id = ZpoolUuid::new_v4(); let dataset_id = Uuid::new_v4(); eprintln!("DB address: {}", address); self.rack_init_builder.add_dataset( @@ -428,13 +417,16 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { .to_string() .parse() .unwrap(); - self.omicron_zones.push(OmicronZoneConfig { - id: dataset_id, + self.blueprint_zones.push(BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id: OmicronZoneUuid::from_untyped_uuid(dataset_id), underlay_address: *address.ip(), - zone_type: OmicronZoneType::CockroachDb { - address: address.to_string(), - dataset: OmicronZoneDataset { pool_name }, - }, + zone_type: BlueprintZoneType::CockroachDb( + blueprint_zone_type::CockroachDb { + address, + dataset: OmicronZoneDataset { pool_name }, + }, + ), }); self.database = Some(database); } @@ -451,7 +443,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { .unwrap(); let port = clickhouse.port(); - let zpool_id = Uuid::new_v4(); + let zpool_id = ZpoolUuid::new_v4(); let dataset_id = Uuid::new_v4(); let address = SocketAddrV6::new(Ipv6Addr::LOCALHOST, port, 0, 0); self.rack_init_builder.add_dataset( @@ -477,13 +469,16 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { .to_string() .parse() .unwrap(); - self.omicron_zones.push(OmicronZoneConfig { - id: dataset_id, + self.blueprint_zones.push(BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, + id: OmicronZoneUuid::from_untyped_uuid(dataset_id), underlay_address: *address.ip(), - zone_type: OmicronZoneType::Clickhouse { - address: address.to_string(), - dataset: OmicronZoneDataset { pool_name }, - }, + zone_type: BlueprintZoneType::Clickhouse( + blueprint_zone_type::Clickhouse { + address, + dataset: OmicronZoneDataset { pool_name }, + }, + ), }); } @@ -524,19 +519,6 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { // NOTE: If dendrite is started after Nexus, this is ignored. let config = DpdConfig { address: std::net::SocketAddr::V6(address) }; self.config.pkg.dendrite.insert(switch_location, config); - - let sled_id = Uuid::parse_str(match switch_location { - SwitchLocation::Switch0 => SLED_AGENT_UUID, - SwitchLocation::Switch1 => SLED_AGENT2_UUID, - }) - .unwrap(); - - self.rack_init_builder.add_service_without_dns( - sled_id, - address, - ServiceKind::Dendrite, - sled_id, - ); } pub async fn start_mgd(&mut self, switch_location: SwitchLocation) { @@ -553,19 +535,6 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { let config = MgdConfig { address: std::net::SocketAddr::V6(address) }; self.config.pkg.mgd.insert(switch_location, config); - - let sled_id = Uuid::parse_str(match switch_location { - SwitchLocation::Switch0 => SLED_AGENT_UUID, - SwitchLocation::Switch1 => SLED_AGENT2_UUID, - }) - .unwrap(); - - self.rack_init_builder.add_service_without_dns( - sled_id, - address, - ServiceKind::Mgd, - sled_id, - ); } pub async fn record_switch_dns(&mut self) { @@ -631,9 +600,8 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { // Set up a test metric producer server let producer_id = Uuid::parse_str(PRODUCER_UUID).unwrap(); - let producer = start_producer_server(nexus_internal_addr, producer_id) - .await - .unwrap(); + let producer = + start_producer_server(nexus_internal_addr, producer_id).unwrap(); register_test_producer(&producer).unwrap(); self.producer = Some(producer); @@ -674,7 +642,6 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { 0, ); - let sled_id = Uuid::parse_str(SLED_AGENT_UUID).unwrap(); let mac = self .rack_init_builder .mac_addrs @@ -682,43 +649,39 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { .expect("ran out of MAC addresses"); let external_address = self.config.deployment.dropshot_external.dropshot.bind_address.ip(); - let nexus_id = self.config.deployment.id; - self.rack_init_builder.add_service_with_id( + let nexus_id = + OmicronZoneUuid::from_untyped_uuid(self.config.deployment.id); + self.rack_init_builder.add_service_to_dns( nexus_id, address, - ServiceKind::Nexus { - external_address, - nic: ServiceNic { - id: Uuid::new_v4(), - name: "nexus".parse().unwrap(), - ip: NEXUS_OPTE_IPV4_SUBNET - .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES as u32 + 1) - .unwrap() - .into(), - mac, - slot: 0, - }, - }, internal_dns::ServiceName::Nexus, - sled_id, ); - self.omicron_zones.push(OmicronZoneConfig { + self.blueprint_zones.push(BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, id: nexus_id, underlay_address: *address.ip(), - zone_type: OmicronZoneType::Nexus { + zone_type: BlueprintZoneType::Nexus(blueprint_zone_type::Nexus { external_dns_servers: self .config .deployment .external_dns_servers .clone(), - external_ip: external_address, + external_ip: OmicronZoneExternalFloatingIp { + id: ExternalIpUuid::new_v4(), + ip: external_address, + }, external_tls: self.config.deployment.dropshot_external.tls, - internal_address: address.to_string(), + internal_address: address, nic: NetworkInterface { id: Uuid::new_v4(), - ip: external_address, - kind: NetworkInterfaceKind::Service { id: nexus_id }, + ip: NEXUS_OPTE_IPV4_SUBNET + .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES + 1) + .unwrap() + .into(), + kind: NetworkInterfaceKind::Service { + id: nexus_id.into_untyped_uuid(), + }, mac, name: format!("nexus-{}", nexus_id).parse().unwrap(), primary: true, @@ -726,7 +689,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { subnet: (*NEXUS_OPTE_IPV4_SUBNET).into(), vni: Vni::SERVICES_VNI, }, - }, + }), }); self.nexus_internal = Some(nexus_internal); @@ -749,8 +712,11 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { log.clone(), ); - let dns_config = - self.rack_init_builder.internal_dns_config.clone().build(); + let dns_config = self + .rack_init_builder + .internal_dns_config + .clone() + .build_full_config_for_initial_generation(); slog::info!(log, "DNS population: {:#?}", dns_config); dns_config_client.dns_config_put(&dns_config).await.expect( @@ -793,45 +759,49 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { let blueprint = { let mut blueprint_zones = BTreeMap::new(); + let mut sled_state = BTreeMap::new(); for (maybe_sled_agent, zones) in [ - (self.sled_agent.as_ref(), &self.omicron_zones), - (self.sled_agent2.as_ref(), &self.omicron_zones2), + (self.sled_agent.as_ref(), &self.blueprint_zones), + (self.sled_agent2.as_ref(), &self.blueprint_zones2), ] { if let Some(sa) = maybe_sled_agent { + let sled_id = SledUuid::from_untyped_uuid(sa.sled_agent.id); blueprint_zones.insert( - sa.sled_agent.id, + sled_id, BlueprintZonesConfig { generation: Generation::new().next(), - zones: zones - .iter() - .map(|z| { - BlueprintZoneConfig { - config: z.clone(), - // All initial zones are in-service - disposition: - BlueprintZoneDisposition::InService, - } - }) - .collect(), + zones: zones.clone(), }, ); + sled_state.insert(sled_id, SledState::Active); } } Blueprint { id: Uuid::new_v4(), blueprint_zones, + // NOTE: We'll probably need to actually add disks here + // when the Blueprint contains "which disks back zones". + // + // However, for now, this isn't necessary. + blueprint_disks: BTreeMap::new(), + sled_state, parent_blueprint_id: None, internal_dns_version: dns_config .generation .try_into() .expect("bad internal DNS generation"), external_dns_version: Generation::new(), + cockroachdb_fingerprint: String::new(), + cockroachdb_setting_preserve_downgrade: + CockroachDbPreserveDowngrade::DoNotModify, time_created: Utc::now(), creator: "nexus-test-utils".to_string(), comment: "initial test blueprint".to_string(), } }; + self.initial_blueprint_id = Some(blueprint.id); + // Handoff all known service information to Nexus let server = N::start( self.nexus_internal @@ -839,7 +809,6 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { .expect("Must launch internal nexus first"), self.config, blueprint, - self.rack_init_builder.services.clone(), // NOTE: We should probably hand off // "self.rack_init_builder.datasets" here, but Nexus won't be happy // if we pass it right now: @@ -858,6 +827,8 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { // asynchronously, and we're not making any effort (currently) to // wait for them to be known to Nexus. vec![], + vec![], + vec![], dns_config, &external_dns_zone_name, recovery_silo, @@ -868,6 +839,8 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { let external_server_addr = server.get_http_server_external_address().await; + let techport_external_server_addr = + server.get_http_server_techport_address().await; let internal_server_addr = server.get_http_server_internal_address().await; let testctx_external = ClientTestContext::new( @@ -876,6 +849,12 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { .log .new(o!("component" => "external client test context")), ); + let testctx_techport = ClientTestContext::new( + techport_external_server_addr, + self.logctx.log.new( + o!("component" => "techport external client test context"), + ), + ); let testctx_internal = ClientTestContext::new( internal_server_addr, self.logctx @@ -885,6 +864,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { self.external_dns_zone_name = Some(external_dns_zone_name); self.external_client = Some(testctx_external); + self.techport_client = Some(testctx_techport); self.internal_client = Some(testctx_internal); self.silo_name = Some(silo_name); self.user_name = Some(user_name); @@ -935,9 +915,9 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { switch_location: SwitchLocation, ) { let (field, zones) = if switch_location == SwitchLocation::Switch0 { - (&self.sled_agent, &self.omicron_zones) + (&self.sled_agent, &self.blueprint_zones) } else { - (&self.sled_agent2, &self.omicron_zones2) + (&self.sled_agent2, &self.blueprint_zones2) }; // Tell our Sled Agent to report the zones that we configured. @@ -950,11 +930,31 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { ); client .omicron_zones_put(&OmicronZonesConfig { - zones: zones.clone(), + zones: zones.clone().into_iter().map(From::from).collect(), generation: Generation::new().next(), }) .await .expect("Failed to configure sled agent with our zones"); + client + .write_network_bootstore_config(&EarlyNetworkConfig { + body: EarlyNetworkConfigBody { + ntp_servers: Vec::new(), + rack_network_config: Some(RackNetworkConfigV1 { + bfd: Vec::new(), + bgp: Vec::new(), + infra_ip_first: "192.0.2.10".parse().unwrap(), + infra_ip_last: "192.0.2.100".parse().unwrap(), + ports: Vec::new(), + rack_subnet: "fd00:1122:3344:0100::/56" + .parse() + .unwrap(), + }), + }, + generation: 1, + schema_version: 1, + }) + .await + .expect("Failed to write early networking config to bootstore"); } // Set up the Crucible Pantry on an existing Sled Agent. @@ -971,28 +971,25 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { panic!("Expected IPv6 Pantry Address"); }; - let sled_id = Uuid::parse_str(SLED_AGENT_UUID).unwrap(); - let zone_id = Uuid::new_v4(); - self.rack_init_builder.add_service_with_id( + let zone_id = OmicronZoneUuid::new_v4(); + self.rack_init_builder.add_service_to_dns( zone_id, address, - ServiceKind::CruciblePantry, internal_dns::ServiceName::CruciblePantry, - sled_id, ); - self.omicron_zones.push(OmicronZoneConfig { + self.blueprint_zones.push(BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, id: zone_id, underlay_address: *address.ip(), - zone_type: OmicronZoneType::CruciblePantry { - address: address.to_string(), - }, + zone_type: BlueprintZoneType::CruciblePantry( + blueprint_zone_type::CruciblePantry { address }, + ), }); } // Set up an external DNS server. pub async fn start_external_dns(&mut self) { let log = self.logctx.log.new(o!("component" => "external_dns_server")); - let sled_id = Uuid::parse_str(SLED_AGENT_UUID).unwrap(); let dns = dns_server::TransientServer::new(&log).await.unwrap(); @@ -1009,51 +1006,50 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { .mac_addrs .next() .expect("ran out of MAC addresses"); - let zone_id = Uuid::new_v4(); - self.rack_init_builder.add_service_with_id( + let zone_id = OmicronZoneUuid::new_v4(); + self.rack_init_builder.add_service_to_dns( zone_id, dropshot_address, - ServiceKind::ExternalDns { - external_address: (*dns_address.ip()).into(), - nic: ServiceNic { - id: Uuid::new_v4(), - name: "external-dns".parse().unwrap(), - ip: DNS_OPTE_IPV4_SUBNET - .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES as u32 + 1) - .unwrap() - .into(), - mac, - slot: 0, - }, - }, internal_dns::ServiceName::ExternalDns, - sled_id, ); - let zpool_id = Uuid::new_v4(); + let zpool_id = ZpoolUuid::new_v4(); let pool_name = illumos_utils::zpool::ZpoolName::new_external(zpool_id) .to_string() .parse() .unwrap(); - self.omicron_zones.push(OmicronZoneConfig { + self.blueprint_zones.push(BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, id: zone_id, underlay_address: *dropshot_address.ip(), - zone_type: OmicronZoneType::ExternalDns { - dataset: OmicronZoneDataset { pool_name }, - dns_address: dns_address.to_string(), - http_address: dropshot_address.to_string(), - nic: NetworkInterface { - id: Uuid::new_v4(), - ip: (*dns_address.ip()).into(), - kind: NetworkInterfaceKind::Service { id: zone_id }, - mac, - name: format!("external-dns-{}", zone_id).parse().unwrap(), - primary: true, - slot: 0, - subnet: (*DNS_OPTE_IPV4_SUBNET).into(), - vni: Vni::SERVICES_VNI, + zone_type: BlueprintZoneType::ExternalDns( + blueprint_zone_type::ExternalDns { + dataset: OmicronZoneDataset { pool_name }, + dns_address: OmicronZoneExternalFloatingAddr { + id: ExternalIpUuid::new_v4(), + addr: dns_address.into(), + }, + http_address: dropshot_address, + nic: NetworkInterface { + id: Uuid::new_v4(), + ip: DNS_OPTE_IPV4_SUBNET + .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES + 1) + .unwrap() + .into(), + kind: NetworkInterfaceKind::Service { + id: zone_id.into_untyped_uuid(), + }, + mac, + name: format!("external-dns-{}", zone_id) + .parse() + .unwrap(), + primary: true, + slot: 0, + subnet: (*DNS_OPTE_IPV4_SUBNET).into(), + vni: Vni::SERVICES_VNI, + }, }, - }, + ), }); self.external_dns = Some(dns); @@ -1062,36 +1058,40 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { // Set up an internal DNS server. pub async fn start_internal_dns(&mut self) { let log = self.logctx.log.new(o!("component" => "internal_dns_server")); - let sled_id = Uuid::parse_str(SLED_AGENT_UUID).unwrap(); let dns = dns_server::TransientServer::new(&log).await.unwrap(); - let SocketAddr::V6(address) = dns.dropshot_server.local_addr() else { + let SocketAddr::V6(dns_address) = dns.dns_server.local_address() else { panic!("Unsupported IPv4 DNS address"); }; - let zone_id = Uuid::new_v4(); - self.rack_init_builder.add_service_with_id( + let SocketAddr::V6(http_address) = dns.dropshot_server.local_addr() + else { + panic!("Unsupported IPv4 DNS address"); + }; + let zone_id = OmicronZoneUuid::new_v4(); + self.rack_init_builder.add_service_to_dns( zone_id, - address, - ServiceKind::InternalDns, + http_address, internal_dns::ServiceName::InternalDns, - sled_id, ); - let zpool_id = Uuid::new_v4(); + let zpool_id = ZpoolUuid::new_v4(); let pool_name = illumos_utils::zpool::ZpoolName::new_external(zpool_id) .to_string() .parse() .unwrap(); - self.omicron_zones.push(OmicronZoneConfig { + self.blueprint_zones.push(BlueprintZoneConfig { + disposition: BlueprintZoneDisposition::InService, id: zone_id, - underlay_address: *address.ip(), - zone_type: OmicronZoneType::InternalDns { - dataset: OmicronZoneDataset { pool_name }, - dns_address: dns.dns_server.local_address().to_string(), - http_address: address.to_string(), - gz_address: Ipv6Addr::LOCALHOST, - gz_address_index: 0, - }, + underlay_address: *http_address.ip(), + zone_type: BlueprintZoneType::InternalDns( + blueprint_zone_type::InternalDns { + dataset: OmicronZoneDataset { pool_name }, + dns_address, + http_address, + gz_address: Ipv6Addr::LOCALHOST, + gz_address_index: 0, + }, + ), }); self.internal_dns = Some(dns); @@ -1102,6 +1102,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { start_time: self.start_time, server: self.server.unwrap(), external_client: self.external_client.unwrap(), + techport_client: self.techport_client.unwrap(), internal_client: self.internal_client.unwrap(), database: self.database.unwrap(), clickhouse: self.clickhouse.unwrap(), @@ -1118,6 +1119,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { external_dns_zone_name: self.external_dns_zone_name.unwrap(), external_dns: self.external_dns.unwrap(), internal_dns: self.internal_dns.unwrap(), + initial_blueprint_id: self.initial_blueprint_id.unwrap(), silo_name: self.silo_name.unwrap(), user_name: self.user_name.unwrap(), } @@ -1411,6 +1413,7 @@ pub async fn start_oximeter( let config = oximeter_collector::Config { nexus_address: Some(nexus_address), db, + refresh_interval: oximeter_collector::default_refresh_interval(), log: ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Error }, }; let args = oximeter_collector::OximeterArguments { @@ -1456,7 +1459,7 @@ impl oximeter::Producer for IntegrationProducer { /// /// Actual producers can be registered with the [`register_producer`] /// helper function. -pub async fn start_producer_server( +pub fn start_producer_server( nexus_address: SocketAddr, id: Uuid, ) -> Result { @@ -1469,23 +1472,17 @@ pub async fn start_producer_server( id, kind: ProducerKind::Service, address: producer_address, - base_route: "/collect".to_string(), interval: Duration::from_secs(1), }; let config = oximeter_producer::Config { server_info, - registration_address: nexus_address, - dropshot: ConfigDropshot { - bind_address: producer_address, - ..Default::default() - }, + registration_address: Some(nexus_address), + request_body_max_bytes: 1024, log: LogConfig::Config(ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Error, }), }; - let server = - ProducerServer::start(&config).await.map_err(|e| e.to_string())?; - Ok(server) + ProducerServer::start(&config).map_err(|e| e.to_string()) } /// Registers an arbitrary producer with the test server. diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index b67028a996..2aef32d37c 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -15,7 +15,6 @@ use http::StatusCode; use nexus_db_queries::db::fixed_data::silo::DEFAULT_SILO; use nexus_test_interface::NexusServer; use nexus_types::external_api::params; -use nexus_types::external_api::params::PhysicalDiskKind; use nexus_types::external_api::params::UserId; use nexus_types::external_api::shared; use nexus_types::external_api::shared::Baseboard; @@ -37,9 +36,12 @@ use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::Instance; use omicron_common::api::external::InstanceCpuCount; use omicron_common::api::external::NameOrId; +use omicron_common::disk::DiskIdentity; use omicron_sled_agent::sim::SledAgent; use omicron_test_utils::dev::poll::wait_for_condition; use omicron_test_utils::dev::poll::CondCheckError; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::ZpoolUuid; use slog::debug; use std::net::IpAddr; use std::sync::Arc; @@ -340,55 +342,6 @@ pub async fn create_switch( .await } -pub async fn create_physical_disk( - client: &ClientTestContext, - vendor: &str, - serial: &str, - model: &str, - variant: PhysicalDiskKind, - sled_id: Uuid, -) -> internal_params::PhysicalDiskPutResponse { - object_put( - client, - "/physical-disk", - &internal_params::PhysicalDiskPutRequest { - vendor: vendor.to_string(), - serial: serial.to_string(), - model: model.to_string(), - variant, - sled_id, - }, - ) - .await -} - -pub async fn delete_physical_disk( - client: &ClientTestContext, - vendor: &str, - serial: &str, - model: &str, - sled_id: Uuid, -) { - let body = internal_params::PhysicalDiskDeleteRequest { - vendor: vendor.to_string(), - serial: serial.to_string(), - model: model.to_string(), - sled_id, - }; - - NexusRequest::new( - RequestBuilder::new(client, http::Method::DELETE, "/physical-disk") - .body(Some(&body)) - .expect_status(Some(http::StatusCode::NO_CONTENT)), - ) - .authn_as(AuthnMode::PrivilegedUser) - .execute() - .await - .unwrap_or_else(|_| { - panic!("failed to make \"delete\" request of physical disk") - }); -} - pub async fn create_silo( client: &ClientTestContext, silo_name: &str, @@ -745,7 +698,7 @@ pub struct TestDataset { } pub struct TestZpool { - pub id: Uuid, + pub id: ZpoolUuid, pub size: ByteCount, pub datasets: Vec, } @@ -757,8 +710,17 @@ pub struct DiskTest { impl DiskTest { pub const DEFAULT_ZPOOL_SIZE_GIB: u32 = 10; + pub const DEFAULT_ZPOOL_COUNT: u32 = 3; + + /// Creates a new "DiskTest", but does not actually add any zpools. + pub async fn empty( + cptestctx: &ControlPlaneTestContext, + ) -> Self { + let sled_agent = cptestctx.sled_agent.sled_agent.clone(); + + Self { sled_agent, zpools: vec![] } + } - // Creates fake physical storage, an organization, and a project. pub async fn new( cptestctx: &ControlPlaneTestContext, ) -> Self { @@ -767,10 +729,8 @@ impl DiskTest { let mut disk_test = Self { sled_agent, zpools: vec![] }; // Create three Zpools, each 10 GiB, each with one Crucible dataset. - for _ in 0..3 { - disk_test - .add_zpool_with_dataset(cptestctx, Self::DEFAULT_ZPOOL_SIZE_GIB) - .await; + for _ in 0..Self::DEFAULT_ZPOOL_COUNT { + disk_test.add_zpool_with_dataset(cptestctx).await; } disk_test @@ -779,38 +739,77 @@ impl DiskTest { pub async fn add_zpool_with_dataset( &mut self, cptestctx: &ControlPlaneTestContext, + ) { + self.add_zpool_with_dataset_ext( + cptestctx, + Uuid::new_v4(), + ZpoolUuid::new_v4(), + Uuid::new_v4(), + Self::DEFAULT_ZPOOL_SIZE_GIB, + ) + .await + } + + pub async fn add_zpool_with_dataset_ext( + &mut self, + cptestctx: &ControlPlaneTestContext, + physical_disk_id: Uuid, + zpool_id: ZpoolUuid, + dataset_id: Uuid, gibibytes: u32, ) { + // To get a dataset, we actually need to create a new simulated physical + // disk, zpool, and dataset, all contained within one another. let zpool = TestZpool { - id: Uuid::new_v4(), + id: zpool_id, size: ByteCount::from_gibibytes_u32(gibibytes), - datasets: vec![TestDataset { id: Uuid::new_v4() }], + datasets: vec![TestDataset { id: dataset_id }], + }; + + let disk_identity = DiskIdentity { + vendor: "test-vendor".into(), + serial: format!("totally-unique-serial: {}", physical_disk_id), + model: "test-model".into(), }; + let physical_disk_request = + nexus_types::internal_api::params::PhysicalDiskPutRequest { + id: physical_disk_id, + vendor: disk_identity.vendor.clone(), + serial: disk_identity.serial.clone(), + model: disk_identity.model.clone(), + variant: + nexus_types::external_api::params::PhysicalDiskKind::U2, + sled_id: self.sled_agent.id, + }; + + let zpool_request = + nexus_types::internal_api::params::ZpoolPutRequest { + id: zpool.id.into_untyped_uuid(), + physical_disk_id, + sled_id: self.sled_agent.id, + }; + + // Tell the simulated sled agent to create the disk and zpool containing + // these datasets. + self.sled_agent .create_external_physical_disk( - "test-vendor".into(), - "test-serial".into(), - "test-model".into(), + physical_disk_id, + disk_identity.clone(), ) .await; self.sled_agent - .create_zpool( - zpool.id, - "test-vendor".into(), - "test-serial".into(), - "test-model".into(), - zpool.size.to_bytes(), - ) + .create_zpool(zpool.id, physical_disk_id, zpool.size.to_bytes()) .await; for dataset in &zpool.datasets { + // Sled Agent side: Create the Dataset, make sure regions can be + // created immediately if Nexus requests anything. let address = self .sled_agent .create_crucible_dataset(zpool.id, dataset.id) .await; - - // By default, regions are created immediately. let crucible = self .sled_agent .get_crucible_dataset(zpool.id, dataset.id) @@ -819,6 +818,9 @@ impl DiskTest { .set_create_callback(Box::new(|_| RegionState::Created)) .await; + // Nexus side: Notify Nexus of the physical disk/zpool/dataset + // combination that exists. + let address = match address { std::net::SocketAddr::V6(addr) => addr, _ => panic!("Unsupported address type: {address} "), @@ -826,7 +828,12 @@ impl DiskTest { cptestctx .server - .upsert_crucible_dataset(dataset.id, zpool.id, address) + .upsert_crucible_dataset( + physical_disk_request.clone(), + zpool_request.clone(), + dataset.id, + address, + ) .await; } @@ -860,7 +867,7 @@ impl DiskTest { .flat_map(|sled_agent| { sled_agent.zpools.iter().map(|z| z.id) }) - .collect::>(); + .collect::>(); if all_zpools.contains(&zpool.id) { Ok(()) diff --git a/nexus/tests/config.test.toml b/nexus/tests/config.test.toml index de3aa4c1f4..861d78e20c 100644 --- a/nexus/tests/config.test.toml +++ b/nexus/tests/config.test.toml @@ -85,6 +85,7 @@ dns_external.period_secs_config = 60 dns_external.period_secs_servers = 60 dns_external.period_secs_propagation = 60 dns_external.max_concurrent_server_updates = 5 +metrics_producer_gc.period_secs = 60 # How frequently we check the list of stored TLS certificates. This is # approximately an upper bound on how soon after updating the list of # certificates it will take _other_ Nexus instances to notice and stop serving @@ -100,11 +101,18 @@ inventory.nkeep = 3 # Disable inventory collection altogether (for emergencies) inventory.disable = false phantom_disks.period_secs = 30 +physical_disk_adoption.period_secs = 30 +# Disable automatic disk adoption to avoid interfering with tests. +physical_disk_adoption.disable = true blueprints.period_secs_load = 100 blueprints.period_secs_execute = 600 sync_service_zone_nat.period_secs = 30 switch_port_settings_manager.period_secs = 30 region_replacement.period_secs = 30 +instance_watcher.period_secs = 30 +service_firewall_propagation.period_secs = 300 +v2p_mapping_propagation.period_secs = 30 +abandoned_vmm_reaper.period_secs = 60 [default_region_allocation_strategy] # we only have one sled in the test environment, so we need to use the diff --git a/nexus/tests/integration_tests/allow_list.rs b/nexus/tests/integration_tests/allow_list.rs new file mode 100644 index 0000000000..336a33273d --- /dev/null +++ b/nexus/tests/integration_tests/allow_list.rs @@ -0,0 +1,131 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Integration tests for IP allow list endpoints. + +use dropshot::test_util::ClientTestContext; +use nexus_test_utils::http_testing::{AuthnMode, NexusRequest}; +use nexus_test_utils_macros::nexus_test; +use nexus_types::external_api::{params, views}; +use omicron_common::api::external::AllowedSourceIps; +use oxnet::IpNet; +use std::net::IpAddr; +use std::net::Ipv4Addr; + +type ControlPlaneTestContext = + nexus_test_utils::ControlPlaneTestContext; + +const URL: &str = "/v1/system/networking/allow-list"; + +#[nexus_test] +async fn test_allow_list(cptestctx: &ControlPlaneTestContext) { + let client = &cptestctx.external_client; + // We should start with the default of any. + let list: views::AllowList = NexusRequest::object_get(client, URL) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("failed to make GET request") + .parsed_body() + .unwrap(); + assert_eq!( + list.allowed_ips, + AllowedSourceIps::Any, + "Should start with the default allow list of Any" + ); + + // All these requests use localhost, which makes things pretty easy. + let our_addr = IpAddr::V4(Ipv4Addr::LOCALHOST); + + // Set the allowlist, and assert it's equal to what we set. + async fn update_list_and_compare( + client: &ClientTestContext, + allowed_ips: AllowedSourceIps, + ) { + let new_list = + params::AllowListUpdate { allowed_ips: allowed_ips.clone() }; + + // PUT the list, which returns it, and ensure we get back what we set. + let list: views::AllowList = + NexusRequest::object_put(client, URL, Some(&new_list)) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("failed to make PUT request") + .parsed_body() + .unwrap(); + assert_eq!( + list.allowed_ips, allowed_ips, + "Failed to update the allow list", + ); + + // GET it as well. + let get_list: views::AllowList = NexusRequest::object_get(client, URL) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("failed to make GET request") + .parsed_body() + .unwrap(); + assert_eq!( + list.allowed_ips, get_list.allowed_ips, + "List returned from PUT and GET should be the same", + ); + } + + // Set the list with exactly one IP, make sure it's the same. + let allowed_ips = + AllowedSourceIps::try_from(vec![IpNet::host_net(our_addr)]) + .expect("Expected a valid IP list"); + update_list_and_compare(client, allowed_ips).await; + + // Add our IP in the front and end, and still make sure that works. + // + // This is a regression for + // https://github.com/oxidecomputer/omicron/issues/5727. + let addrs = vec![ + IpNet::host_net(our_addr), + IpNet::host_net(IpAddr::from(Ipv4Addr::new(10, 0, 0, 1))), + ]; + let allowed_ips = AllowedSourceIps::try_from(addrs.clone()) + .expect("Expected a valid IP list"); + update_list_and_compare(client, allowed_ips).await; + + let addrs = addrs.into_iter().rev().collect::>(); + let allowed_ips = + AllowedSourceIps::try_from(addrs).expect("Expected a valid IP list"); + update_list_and_compare(client, allowed_ips).await; + + // Set back to any + update_list_and_compare(client, AllowedSourceIps::Any).await; + + // Check that we cannot make the request with a list that doesn't include + // us. + let addrs = vec![IpNet::host_net(IpAddr::from(Ipv4Addr::new(1, 1, 1, 1)))]; + let allowed_ips = AllowedSourceIps::try_from(addrs.clone()) + .expect("Expected a valid IP list"); + let new_list = params::AllowListUpdate { allowed_ips: allowed_ips.clone() }; + let err: dropshot::HttpErrorResponseBody = + NexusRequest::expect_failure_with_body( + client, + http::StatusCode::BAD_REQUEST, + http::Method::PUT, + URL, + &new_list, + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("failed to make PUT request") + .parsed_body() + .expect("failed to parse error response"); + assert!(err + .message + .contains("would prevent access from the current client")); + + // But we _should_ be able to make this self-defeating request through the + // techport proxy server. + let client = &cptestctx.techport_client; + update_list_and_compare(client, allowed_ips).await; +} diff --git a/nexus/tests/integration_tests/basic.rs b/nexus/tests/integration_tests/basic.rs index 282ec0cd96..cd23b7dd87 100644 --- a/nexus/tests/integration_tests/basic.rs +++ b/nexus/tests/integration_tests/basic.rs @@ -205,13 +205,13 @@ async fn test_projects_basic(cptestctx: &ControlPlaneTestContext) { assert_eq!(initial_projects.len(), 3); assert_eq!(initial_projects[0].identity.id, new_project_ids[0]); assert_eq!(initial_projects[0].identity.name, "simproject1"); - assert!(initial_projects[0].identity.description.len() > 0); + assert!(!initial_projects[0].identity.description.is_empty()); assert_eq!(initial_projects[1].identity.id, new_project_ids[1]); assert_eq!(initial_projects[1].identity.name, "simproject2"); - assert!(initial_projects[1].identity.description.len() > 0); + assert!(!initial_projects[1].identity.description.is_empty()); assert_eq!(initial_projects[2].identity.id, new_project_ids[2]); assert_eq!(initial_projects[2].identity.name, "simproject3"); - assert!(initial_projects[2].identity.description.len() > 0); + assert!(!initial_projects[2].identity.description.is_empty()); // Basic test of out-of-the-box GET project let project = project_get(&client, "/v1/projects/simproject2").await; @@ -219,7 +219,7 @@ async fn test_projects_basic(cptestctx: &ControlPlaneTestContext) { assert_eq!(project.identity.id, expected.identity.id); assert_eq!(project.identity.name, expected.identity.name); assert_eq!(project.identity.description, expected.identity.description); - assert!(project.identity.description.len() > 0); + assert!(!project.identity.description.is_empty()); // Delete "simproject2", but first delete: // - The default subnet within the default VPC @@ -440,7 +440,7 @@ async fn test_projects_basic(cptestctx: &ControlPlaneTestContext) { assert_eq!(projects[1].identity.name, "lil-lightnin"); assert_eq!(projects[1].identity.description, "little lightning"); assert_eq!(projects[2].identity.name, "simproject1"); - assert!(projects[2].identity.description.len() > 0); + assert!(!projects[2].identity.description.is_empty()); } #[nexus_test] diff --git a/nexus/tests/integration_tests/commands.rs b/nexus/tests/integration_tests/commands.rs index fd7a6c60c0..2eaf24d907 100644 --- a/nexus/tests/integration_tests/commands.rs +++ b/nexus/tests/integration_tests/commands.rs @@ -109,7 +109,7 @@ fn test_nexus_openapi() { .expect("stdout was not valid OpenAPI"); assert_eq!(spec.openapi, "3.0.3"); assert_eq!(spec.info.title, "Oxide Region API"); - assert_eq!(spec.info.version, "20240327.0"); + assert_eq!(spec.info.version, "20240502.0"); // Spot check a couple of items. assert!(!spec.paths.paths.is_empty()); diff --git a/nexus/tests/integration_tests/console_api.rs b/nexus/tests/integration_tests/console_api.rs index 78b5ee21d5..8daaf44733 100644 --- a/nexus/tests/integration_tests/console_api.rs +++ b/nexus/tests/integration_tests/console_api.rs @@ -113,7 +113,7 @@ async fn test_sessions(cptestctx: &ControlPlaneTestContext) { RequestBuilder::new(&testctx, Method::GET, "/projects/whatever") .header(header::COOKIE, &session_token) - .expect_status(Some(StatusCode::OK)) + .expect_console_asset() .execute() .await .expect("failed to get console page with session cookie"); @@ -167,10 +167,10 @@ async fn expect_console_page( } let console_page = builder - .expect_status(Some(StatusCode::OK)) + .expect_console_asset() .expect_response_header( http::header::CONTENT_TYPE, - "text/html; charset=UTF-8", + "text/html; charset=utf-8", ) .expect_response_header(http::header::CACHE_CONTROL, "no-store") .execute() @@ -258,11 +258,12 @@ async fn test_assets(cptestctx: &ControlPlaneTestContext) { // existing file is returned let resp = RequestBuilder::new(&testctx, Method::GET, "/assets/hello.txt") - .expect_status(Some(StatusCode::OK)) + .expect_console_asset() .expect_response_header( http::header::CACHE_CONTROL, "max-age=31536000, immutable", ) + .expect_response_header(http::header::CONTENT_LENGTH, 11) .execute() .await .expect("failed to get existing file"); @@ -277,11 +278,12 @@ async fn test_assets(cptestctx: &ControlPlaneTestContext) { Method::GET, "/assets/a_directory/another_file.txt", ) - .expect_status(Some(StatusCode::OK)) + .expect_console_asset() .expect_response_header( http::header::CACHE_CONTROL, "max-age=31536000, immutable", ) + .expect_response_header(http::header::CONTENT_LENGTH, 10) .execute() .await .expect("failed to get existing file"); @@ -297,12 +299,30 @@ async fn test_assets(cptestctx: &ControlPlaneTestContext) { .await .expect("failed to 404 on gzip file without accept-encoding: gzip"); + // file with only non-gzipped version is returned even if accept requests gzip + let resp = RequestBuilder::new(&testctx, Method::GET, "/assets/hello.txt") + .header(http::header::ACCEPT_ENCODING, "gzip") + .expect_console_asset() + .expect_response_header( + http::header::CACHE_CONTROL, + "max-age=31536000, immutable", + ) + .expect_response_header(http::header::CONTENT_LENGTH, 11) + .execute() + .await + .expect("failed to get existing file"); + + assert_eq!(resp.body, "hello there".as_bytes()); + // make sure we're not including the gzip header on non-gzipped files + assert_eq!(resp.headers.get(http::header::CONTENT_ENCODING), None); + // file with only gzipped version is returned if request accepts gzip let resp = RequestBuilder::new(&testctx, Method::GET, "/assets/gzip-only.txt") .header(http::header::ACCEPT_ENCODING, "gzip") - .expect_status(Some(StatusCode::OK)) + .expect_console_asset() .expect_response_header(http::header::CONTENT_ENCODING, "gzip") + .expect_response_header(http::header::CONTENT_LENGTH, 16) .execute() .await .expect("failed to get existing file"); @@ -313,12 +333,13 @@ async fn test_assets(cptestctx: &ControlPlaneTestContext) { let resp = RequestBuilder::new(&testctx, Method::GET, "/assets/gzip-and-not.txt") .header(http::header::ACCEPT_ENCODING, "gzip") - .expect_status(Some(StatusCode::OK)) + .expect_console_asset() .expect_response_header(http::header::CONTENT_ENCODING, "gzip") .expect_response_header( http::header::CACHE_CONTROL, "max-age=31536000, immutable", ) + .expect_response_header(http::header::CONTENT_LENGTH, 33) .execute() .await .expect("failed to get existing file"); @@ -328,7 +349,8 @@ async fn test_assets(cptestctx: &ControlPlaneTestContext) { // returns non-gzipped if request doesn't accept gzip let resp = RequestBuilder::new(&testctx, Method::GET, "/assets/gzip-and-not.txt") - .expect_status(Some(StatusCode::OK)) + .expect_console_asset() + .expect_response_header(http::header::CONTENT_LENGTH, 28) .execute() .await .expect("failed to get existing file"); @@ -336,6 +358,18 @@ async fn test_assets(cptestctx: &ControlPlaneTestContext) { assert_eq!(resp.body, "not gzipped but I know a guy".as_bytes()); // make sure we're not including the gzip header on non-gzipped files assert_eq!(resp.headers.get(http::header::CONTENT_ENCODING), None); + + // test that `..` is not allowed in paths. (Dropshot handles this, so we + // test to ensure this hasn't gone away.) + let _ = RequestBuilder::new( + &testctx, + Method::GET, + "/assets/../assets/hello.txt", + ) + .expect_status(Some(StatusCode::BAD_REQUEST)) + .execute() + .await + .expect("failed to 400 on `..` traversal"); } #[tokio::test] @@ -356,6 +390,7 @@ async fn test_absolute_static_dir() { // existing file is returned let resp = RequestBuilder::new(&testctx, Method::GET, "/assets/hello.txt") + .expect_console_asset() .execute() .await .expect("failed to get existing file"); diff --git a/nexus/tests/integration_tests/device_auth.rs b/nexus/tests/integration_tests/device_auth.rs index fafa857645..5bb34eb19e 100644 --- a/nexus/tests/integration_tests/device_auth.rs +++ b/nexus/tests/integration_tests/device_auth.rs @@ -82,6 +82,7 @@ async fn test_device_auth_flow(cptestctx: &ControlPlaneTestContext) { // Authenticated requests get the console verification page. assert!(NexusRequest::object_get(testctx, "/device/verify") + .console_asset() .authn_as(AuthnMode::PrivilegedUser) .execute() .await diff --git a/nexus/tests/integration_tests/disks.rs b/nexus/tests/integration_tests/disks.rs index 5464d7e589..ed4fd59277 100644 --- a/nexus/tests/integration_tests/disks.rs +++ b/nexus/tests/integration_tests/disks.rs @@ -10,7 +10,9 @@ use dropshot::test_util::ClientTestContext; use dropshot::HttpErrorResponseBody; use http::method::Method; use http::StatusCode; +use nexus_config::RegionAllocationStrategy; use nexus_db_queries::context::OpContext; +use nexus_db_queries::db::datastore::REGION_REDUNDANCY_THRESHOLD; use nexus_db_queries::db::fixed_data::{silo::DEFAULT_SILO_ID, FLEET_ID}; use nexus_db_queries::db::lookup::LookupPath; use nexus_test_utils::http_testing::AuthnMode; @@ -40,6 +42,7 @@ use omicron_nexus::TestInterfaces as _; use oximeter::types::Datum; use oximeter::types::Measurement; use sled_agent_client::TestInterfaces as _; +use std::collections::HashSet; use std::sync::Arc; use uuid::Uuid; @@ -187,7 +190,7 @@ async fn test_disk_create_attach_detach_delete( let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; let project_id = create_project_and_pool(client).await; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let disks_url = get_disks_url(); // Create a disk. @@ -361,7 +364,7 @@ async fn test_disk_slot_assignment(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; create_project_and_pool(client).await; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let disk_names = ["a", "b", "c", "d"]; let mut disks = Vec::new(); @@ -387,7 +390,7 @@ async fn test_disk_slot_assignment(cptestctx: &ControlPlaneTestContext) { get_disk_attach_url(&instance.identity.id.into()); async fn get_disk_slot(ctx: &ControlPlaneTestContext, disk_id: Uuid) -> u8 { - let apictx = &ctx.server.apictx(); + let apictx = &ctx.server.server_context(); let nexus = &apictx.nexus; let datastore = nexus.datastore(); let opctx = @@ -465,7 +468,7 @@ async fn test_disk_slot_assignment(cptestctx: &ControlPlaneTestContext) { #[nexus_test] async fn test_disk_move_between_instances(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; DiskTest::new(&cptestctx).await; create_project_and_pool(&client).await; let disks_url = get_disks_url(); @@ -797,7 +800,7 @@ async fn test_disk_reject_total_size_not_divisible_by_block_size( // divisible by block size. assert!( disk_size.to_bytes() - < DiskTest::DEFAULT_ZPOOL_SIZE_GIB as u64 * 1024 * 1024 * 1024 + < u64::from(DiskTest::DEFAULT_ZPOOL_SIZE_GIB) * 1024 * 1024 * 1024 ); let disks_url = get_disks_url(); @@ -967,9 +970,9 @@ async fn test_disk_backed_by_multiple_region_sets( assert_eq!(10, DiskTest::DEFAULT_ZPOOL_SIZE_GIB); // Create another three zpools, all 10 gibibytes, each with one dataset - test.add_zpool_with_dataset(cptestctx, 10).await; - test.add_zpool_with_dataset(cptestctx, 10).await; - test.add_zpool_with_dataset(cptestctx, 10).await; + test.add_zpool_with_dataset(cptestctx).await; + test.add_zpool_with_dataset(cptestctx).await; + test.add_zpool_with_dataset(cptestctx).await; create_project_and_pool(client).await; @@ -1039,7 +1042,7 @@ async fn test_disk_virtual_provisioning_collection( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let _test = DiskTest::new(&cptestctx).await; @@ -1247,7 +1250,7 @@ async fn test_disk_virtual_provisioning_collection_failed_delete( ) { // Confirm that there's no panic deleting a project if a disk deletion fails let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let disk_test = DiskTest::new(&cptestctx).await; @@ -1387,7 +1390,7 @@ async fn test_phantom_disk_rename(cptestctx: &ControlPlaneTestContext) { // faulted let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let _disk_test = DiskTest::new(&cptestctx).await; @@ -1508,7 +1511,7 @@ async fn test_phantom_disk_rename(cptestctx: &ControlPlaneTestContext) { #[nexus_test] async fn test_disk_size_accounting(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); // Create three 10 GiB zpools, each with one dataset. @@ -1682,9 +1685,9 @@ async fn test_multiple_disks_multiple_zpools( // Assert default is still 10 GiB assert_eq!(10, DiskTest::DEFAULT_ZPOOL_SIZE_GIB); - test.add_zpool_with_dataset(cptestctx, 10).await; - test.add_zpool_with_dataset(cptestctx, 10).await; - test.add_zpool_with_dataset(cptestctx, 10).await; + test.add_zpool_with_dataset(cptestctx).await; + test.add_zpool_with_dataset(cptestctx).await; + test.add_zpool_with_dataset(cptestctx).await; create_project_and_pool(client).await; @@ -1757,10 +1760,6 @@ const ALL_METRICS: [&'static str; 6] = #[nexus_test] async fn test_disk_metrics(cptestctx: &ControlPlaneTestContext) { - // Normally, Nexus is not registered as a producer for tests. - // Turn this bit on so we can also test some metrics from Nexus itself. - cptestctx.server.register_as_producer().await; - let oximeter = &cptestctx.oximeter; let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; @@ -1831,10 +1830,6 @@ async fn test_disk_metrics(cptestctx: &ControlPlaneTestContext) { #[nexus_test] async fn test_disk_metrics_paginated(cptestctx: &ControlPlaneTestContext) { - // Normally, Nexus is not registered as a producer for tests. - // Turn this bit on so we can also test some metrics from Nexus itself. - cptestctx.server.register_as_producer().await; - let client = &cptestctx.external_client; DiskTest::new(&cptestctx).await; create_project_and_pool(client).await; @@ -1977,7 +1972,7 @@ async fn test_project_delete_disk_no_auth_idempotent( // Call project_delete_disk_no_auth twice, ensuring that the disk is either // there before deleting and not afterwards. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let opctx = OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); @@ -2014,6 +2009,454 @@ async fn test_project_delete_disk_no_auth_idempotent( .unwrap(); } +// Test allocating a single region +#[nexus_test] +async fn test_single_region_allocate(cptestctx: &ControlPlaneTestContext) { + let nexus = &cptestctx.server.server_context().nexus; + let datastore = nexus.datastore(); + let opctx = + OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); + + // Create three 10 GiB zpools, each with one dataset. + let disk_test = DiskTest::new(&cptestctx).await; + + // Assert default is still 10 GiB + assert_eq!(10, DiskTest::DEFAULT_ZPOOL_SIZE_GIB); + + // Allocate a single 1 GB region + let volume_id = Uuid::new_v4(); + + let datasets_and_regions = datastore + .arbitrary_region_allocate( + &opctx, + volume_id, + ¶ms::DiskSource::Blank { + block_size: params::BlockSize::try_from(512).unwrap(), + }, + ByteCount::from_gibibytes_u32(1), + &RegionAllocationStrategy::Random { seed: None }, + 1, + ) + .await + .unwrap(); + + assert_eq!(datasets_and_regions.len(), 1); + + // Double check! + let allocated_regions = + datastore.get_allocated_regions(volume_id).await.unwrap(); + + assert_eq!(allocated_regions.len(), 1); + + // Triple check! + let allocated_region = + datastore.get_region(datasets_and_regions[0].1.id()).await.unwrap(); + assert_eq!(allocated_region.block_size().to_bytes(), 512); + assert_eq!(allocated_region.blocks_per_extent(), 131072); // based on EXTENT_SIZE const + assert_eq!(allocated_region.extent_count(), 16); + + // Quadruple check! Only one Crucible agent should have received a region + // request + let mut number_of_matching_regions = 0; + + for zpool in &disk_test.zpools { + for dataset in &zpool.datasets { + let total_size = datastore + .regions_total_occupied_size(dataset.id) + .await + .unwrap(); + + if total_size == 1073741824 { + number_of_matching_regions += 1; + } else if total_size == 0 { + // ok, unallocated + } else { + panic!("unexpected regions total size of {total_size}"); + } + } + } + + assert_eq!(number_of_matching_regions, 1); +} + +// Ensure that `disk_region_allocate` is idempotent. +#[nexus_test] +async fn test_region_allocation_strategy_random_is_idempotent( + cptestctx: &ControlPlaneTestContext, +) { + let nexus = &cptestctx.server.server_context().nexus; + let datastore = nexus.datastore(); + let opctx = + OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); + + // Create four 10 GiB zpools, each with one dataset. + let mut disk_test = DiskTest::new(&cptestctx).await; + disk_test.add_zpool_with_dataset(&cptestctx).await; + + // Assert default is still 10 GiB + assert_eq!(10, DiskTest::DEFAULT_ZPOOL_SIZE_GIB); + + // Create a disk + let client = &cptestctx.external_client; + let _project_id = create_project_and_pool(client).await; + + let disk = create_disk(&client, PROJECT_NAME, DISK_NAME).await; + + // Assert disk has three allocated regions + let disk_id = disk.identity.id; + let (.., db_disk) = LookupPath::new(&opctx, &datastore) + .disk_id(disk_id) + .fetch() + .await + .unwrap_or_else(|_| panic!("test disk {:?} should exist", disk_id)); + + let allocated_regions = + datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + assert_eq!(allocated_regions.len(), REGION_REDUNDANCY_THRESHOLD); + + // Call `disk_region_allocate` again + let region: &nexus_db_model::Region = &allocated_regions[0].1; + + let region_total_size: ByteCount = ByteCount::try_from( + region.block_size().to_bytes() + * region.blocks_per_extent() + * region.extent_count(), + ) + .unwrap(); + + assert_eq!(region_total_size, ByteCount::from_gibibytes_u32(1)); + + let datasets_and_regions = datastore + .disk_region_allocate( + &opctx, + db_disk.volume_id, + ¶ms::DiskSource::Blank { + block_size: params::BlockSize::try_from( + region.block_size().to_bytes() as u32, + ) + .unwrap(), + }, + region_total_size, + &RegionAllocationStrategy::Random { seed: None }, + ) + .await + .unwrap(); + + // There should be the same amount + assert_eq!(allocated_regions.len(), datasets_and_regions.len()); +} + +// Ensure that adjusting redundancy level with `arbitrary_region_allocate` works +#[nexus_test] +async fn test_region_allocation_strategy_random_is_idempotent_arbitrary( + cptestctx: &ControlPlaneTestContext, +) { + let nexus = &cptestctx.server.server_context().nexus; + let datastore = nexus.datastore(); + let opctx = + OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); + + // Create four 10 GiB zpools, each with one dataset. + let mut disk_test = DiskTest::new(&cptestctx).await; + disk_test.add_zpool_with_dataset(&cptestctx).await; + + // Assert default is still 10 GiB + assert_eq!(10, DiskTest::DEFAULT_ZPOOL_SIZE_GIB); + + // Call region allocation in isolation + let volume_id = Uuid::new_v4(); + + let datasets_and_regions = datastore + .arbitrary_region_allocate( + &opctx, + volume_id, + ¶ms::DiskSource::Blank { + block_size: params::BlockSize::try_from(512).unwrap(), + }, + ByteCount::from_gibibytes_u32(1), + &RegionAllocationStrategy::Random { seed: None }, + REGION_REDUNDANCY_THRESHOLD, + ) + .await + .unwrap(); + + // There should be the same amount as we requested + assert_eq!(REGION_REDUNDANCY_THRESHOLD, datasets_and_regions.len()); + + // Bump up the number of required regions + let datasets_and_regions = datastore + .arbitrary_region_allocate( + &opctx, + volume_id, + ¶ms::DiskSource::Blank { + block_size: params::BlockSize::try_from(512).unwrap(), + }, + ByteCount::from_gibibytes_u32(1), + &RegionAllocationStrategy::Random { seed: None }, + REGION_REDUNDANCY_THRESHOLD + 1, + ) + .await + .unwrap(); + + // There should be the same amount as we requested + assert_eq!(REGION_REDUNDANCY_THRESHOLD + 1, datasets_and_regions.len()); +} + +// Test allocating a single region to replace a disk's region +#[nexus_test] +async fn test_single_region_allocate_for_replace( + cptestctx: &ControlPlaneTestContext, +) { + let nexus = &cptestctx.server.server_context().nexus; + let datastore = nexus.datastore(); + let opctx = + OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); + + // Create three 10 GiB zpools, each with one dataset. + let mut disk_test = DiskTest::new(&cptestctx).await; + + // One more zpool and dataset is required to meet `region_allocate`'s + // redundancy requirement. + disk_test.add_zpool_with_dataset(&cptestctx).await; + + // Assert default is still 10 GiB + assert_eq!(10, DiskTest::DEFAULT_ZPOOL_SIZE_GIB); + + // Create a disk + let client = &cptestctx.external_client; + let _project_id = create_project_and_pool(client).await; + + let disk = create_disk(&client, PROJECT_NAME, DISK_NAME).await; + + // Assert disk has three allocated regions + let disk_id = disk.identity.id; + let (.., db_disk) = LookupPath::new(&opctx, &datastore) + .disk_id(disk_id) + .fetch() + .await + .unwrap_or_else(|_| panic!("test disk {:?} should exist", disk_id)); + + let allocated_regions = + datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + assert_eq!(allocated_regions.len(), REGION_REDUNDANCY_THRESHOLD); + + // Allocate one more single 1 GB region to replace one of the disk's regions + let region_to_replace: &nexus_db_model::Region = &allocated_regions[0].1; + + let one_more = allocated_regions.len() + 1; + assert_eq!(one_more, REGION_REDUNDANCY_THRESHOLD + 1); + + let region_total_size: ByteCount = ByteCount::try_from( + region_to_replace.block_size().to_bytes() + * region_to_replace.blocks_per_extent() + * region_to_replace.extent_count(), + ) + .unwrap(); + + assert_eq!(region_total_size, ByteCount::from_gibibytes_u32(1)); + + let datasets_and_regions = datastore + .arbitrary_region_allocate( + &opctx, + db_disk.volume_id, + ¶ms::DiskSource::Blank { + block_size: params::BlockSize::try_from( + region_to_replace.block_size().to_bytes() as u32, + ) + .unwrap(), + }, + region_total_size, + &RegionAllocationStrategy::Random { seed: None }, + one_more, + ) + .await + .unwrap(); + + eprintln!("{:?}", datasets_and_regions); + + assert_eq!(datasets_and_regions.len(), one_more); + + // There should be `one_more` regions for this disk's volume id. + let allocated_regions = + datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + assert_eq!(allocated_regions.len(), one_more); + + // Each region should be on a different pool + let pools_used: HashSet = datasets_and_regions + .iter() + .map(|(dataset, _)| dataset.pool_id) + .collect(); + + assert_eq!(pools_used.len(), REGION_REDUNDANCY_THRESHOLD + 1); +} + +// Confirm allocating a single region to replace a disk's region fails if +// there's not enough unique zpools +#[nexus_test] +async fn test_single_region_allocate_for_replace_not_enough_zpools( + cptestctx: &ControlPlaneTestContext, +) { + let nexus = &cptestctx.server.server_context().nexus; + let datastore = nexus.datastore(); + let opctx = + OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); + + // Create three 10 GiB zpools, each with one dataset. + let _disk_test = DiskTest::new(&cptestctx).await; + + // Assert default is still 10 GiB + assert_eq!(10, DiskTest::DEFAULT_ZPOOL_SIZE_GIB); + + // Create a disk + let client = &cptestctx.external_client; + let _project_id = create_project_and_pool(client).await; + + let disk = create_disk(&client, PROJECT_NAME, DISK_NAME).await; + + // Assert disk has three allocated regions + let disk_id = disk.identity.id; + let (.., db_disk) = LookupPath::new(&opctx, &datastore) + .disk_id(disk_id) + .fetch() + .await + .unwrap_or_else(|_| panic!("test disk {:?} should exist", disk_id)); + + let allocated_regions = + datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + assert_eq!(allocated_regions.len(), REGION_REDUNDANCY_THRESHOLD); + + // Allocate one more single 1 GB region to replace one of the disk's regions + let region_to_replace: &nexus_db_model::Region = &allocated_regions[0].1; + + let one_more = allocated_regions.len() + 1; + assert_eq!(one_more, REGION_REDUNDANCY_THRESHOLD + 1); + + let region_total_size: ByteCount = ByteCount::try_from( + region_to_replace.block_size().to_bytes() + * region_to_replace.blocks_per_extent() + * region_to_replace.extent_count(), + ) + .unwrap(); + + assert_eq!(region_total_size, ByteCount::from_gibibytes_u32(1)); + + // Trying to allocate one more should fail + let result = datastore + .arbitrary_region_allocate( + &opctx, + db_disk.volume_id, + ¶ms::DiskSource::Blank { + block_size: params::BlockSize::try_from( + region_to_replace.block_size().to_bytes() as u32, + ) + .unwrap(), + }, + region_total_size, + &RegionAllocationStrategy::Random { seed: None }, + one_more, + ) + .await; + + assert!(result.is_err()); + + // Confirm calling `arbitrary_region_allocate` still idempotently works + let datasets_and_regions = datastore + .arbitrary_region_allocate( + &opctx, + db_disk.volume_id, + ¶ms::DiskSource::Blank { + block_size: params::BlockSize::try_from( + region_to_replace.block_size().to_bytes() as u32, + ) + .unwrap(), + }, + region_total_size, + &RegionAllocationStrategy::Random { seed: None }, + allocated_regions.len(), + ) + .await + .unwrap(); + + assert_eq!(datasets_and_regions.len(), REGION_REDUNDANCY_THRESHOLD); +} + +// Confirm that a region set can start at N, a region can be deleted, and the +// allocation CTE can bring the redundancy back to N. +#[nexus_test] +async fn test_region_allocation_after_delete( + cptestctx: &ControlPlaneTestContext, +) { + let nexus = &cptestctx.server.server_context().nexus; + let datastore = nexus.datastore(); + let opctx = + OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); + + // Create three 10 GiB zpools, each with one dataset. + let _disk_test = DiskTest::new(&cptestctx).await; + + // Assert default is still 10 GiB + assert_eq!(10, DiskTest::DEFAULT_ZPOOL_SIZE_GIB); + + // Create a disk + let client = &cptestctx.external_client; + let _project_id = create_project_and_pool(client).await; + + let disk = create_disk(&client, PROJECT_NAME, DISK_NAME).await; + + // Assert disk has three allocated regions + let disk_id = disk.identity.id; + let (.., db_disk) = LookupPath::new(&opctx, &datastore) + .disk_id(disk_id) + .fetch() + .await + .unwrap_or_else(|_| panic!("test disk {:?} should exist", disk_id)); + + let allocated_regions = + datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + assert_eq!(allocated_regions.len(), REGION_REDUNDANCY_THRESHOLD); + + // Delete one of the regions + let region_to_delete: &nexus_db_model::Region = &allocated_regions[0].1; + datastore + .regions_hard_delete(&opctx.log, vec![region_to_delete.id()]) + .await + .unwrap(); + + // Assert disk's volume has one less allocated region + let allocated_regions = + datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + assert_eq!(allocated_regions.len(), REGION_REDUNDANCY_THRESHOLD - 1); + + let region_total_size: ByteCount = ByteCount::try_from( + region_to_delete.block_size().to_bytes() + * region_to_delete.blocks_per_extent() + * region_to_delete.extent_count(), + ) + .unwrap(); + + // Rerun disk region allocation + datastore + .disk_region_allocate( + &opctx, + db_disk.volume_id, + ¶ms::DiskSource::Blank { + block_size: params::BlockSize::try_from( + region_to_delete.block_size().to_bytes() as u32, + ) + .unwrap(), + }, + region_total_size, + &RegionAllocationStrategy::Random { seed: None }, + ) + .await + .unwrap(); + + // Assert redundancy was restored + let allocated_regions = + datastore.get_allocated_regions(db_disk.volume_id).await.unwrap(); + assert_eq!(allocated_regions.len(), REGION_REDUNDANCY_THRESHOLD); +} + async fn disk_get(client: &ClientTestContext, disk_url: &str) -> Disk { NexusRequest::object_get(client, disk_url) .authn_as(AuthnMode::PrivilegedUser) diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index b2b1e72c23..7672bbc034 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -15,6 +15,7 @@ use nexus_db_queries::authn; use nexus_db_queries::db::fixed_data::silo::DEFAULT_SILO; use nexus_db_queries::db::identity::Resource; use nexus_test_utils::resource_helpers::DiskTest; +use nexus_test_utils::PHYSICAL_DISK_UUID; use nexus_test_utils::RACK_UUID; use nexus_test_utils::SLED_AGENT_UUID; use nexus_test_utils::SWITCH_UUID; @@ -24,11 +25,11 @@ use nexus_types::external_api::shared::IpRange; use nexus_types::external_api::shared::Ipv4Range; use nexus_types::external_api::views::SledProvisionPolicy; use omicron_common::api::external::AddressLotKind; +use omicron_common::api::external::AllowedSourceIps; use omicron_common::api::external::ByteCount; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::IdentityMetadataUpdateParams; use omicron_common::api::external::InstanceCpuCount; -use omicron_common::api::external::Ipv4Net; use omicron_common::api::external::Name; use omicron_common::api::external::NameOrId; use omicron_common::api::external::RouteDestination; @@ -56,7 +57,9 @@ pub static DEMO_SLED_PROVISION_POLICY: Lazy = pub static HARDWARE_SWITCH_URL: Lazy = Lazy::new(|| format!("/v1/system/hardware/switches/{}", SWITCH_UUID)); -pub const HARDWARE_DISK_URL: &'static str = "/v1/system/hardware/disks"; +pub const HARDWARE_DISKS_URL: &'static str = "/v1/system/hardware/disks"; +pub static HARDWARE_DISK_URL: Lazy = + Lazy::new(|| format!("/v1/system/hardware/disks/{}", PHYSICAL_DISK_UUID)); pub static HARDWARE_SLED_DISK_URL: Lazy = Lazy::new(|| { format!("/v1/system/hardware/sleds/{}/disks", SLED_AGENT_UUID) }); @@ -197,7 +200,7 @@ pub static DEMO_VPC_SUBNET_CREATE: Lazy = name: DEMO_VPC_SUBNET_NAME.clone(), description: String::from(""), }, - ipv4_block: Ipv4Net("10.1.2.3/8".parse().unwrap()), + ipv4_block: "10.1.2.3/8".parse().unwrap(), ipv6_block: None, }); @@ -494,6 +497,15 @@ pub static DEMO_SWITCH_PORT_SETTINGS: Lazy = Lazy::new(|| params::SwitchPortApplySettings { port_settings: NameOrId::Name("portofino".parse().unwrap()), }); +/* TODO requires dpd access +pub static DEMO_SWITCH_PORT_STATUS_URL: Lazy = Lazy::new(|| { + format!( + "/v1/system/hardware/switch-port/qsfp7/status?rack_id={}&switch_location={}", + uuid::Uuid::new_v4(), + "switch0", + ) +}); +*/ pub static DEMO_LOOPBACK_CREATE_URL: Lazy = Lazy::new(|| "/v1/system/networking/loopback-address".into()); @@ -558,6 +570,8 @@ pub static DEMO_BGP_CONFIG: Lazy = bgp_announce_set_id: NameOrId::Name("instances".parse().unwrap()), asn: 47, vrf: None, + checker: None, + shaper: None, }); pub const DEMO_BGP_ANNOUNCE_SET_URL: &'static str = "/v1/system/networking/bgp-announce?name_or_id=a-bag-of-addrs"; @@ -845,6 +859,17 @@ pub static DEMO_SILO_METRICS_URL: Lazy = Lazy::new(|| { ) }); +pub static TIMESERIES_LIST_URL: Lazy = + Lazy::new(|| String::from("/v1/timeseries/schema")); + +pub static TIMESERIES_QUERY_URL: Lazy = + Lazy::new(|| String::from("/v1/timeseries/query")); + +pub static DEMO_TIMESERIES_QUERY: Lazy = + Lazy::new(|| params::TimeseriesQuery { + query: String::from("get http_service:request_latency_histogram"), + }); + // Users pub static DEMO_USER_CREATE: Lazy = Lazy::new(|| params::UserCreate { @@ -852,6 +877,13 @@ pub static DEMO_USER_CREATE: Lazy = password: params::UserPassword::LoginDisallowed, }); +// Allowlist for user-facing services. +pub static ALLOW_LIST_URL: Lazy = + Lazy::new(|| String::from("/v1/system/networking/allow-list")); +pub static ALLOW_LIST_UPDATE: Lazy = Lazy::new(|| { + params::AllowListUpdate { allowed_ips: AllowedSourceIps::Any } +}); + /// Describes an API endpoint to be verified by the "unauthorized" test /// /// These structs are also used to check whether we're covering all endpoints in @@ -1956,12 +1988,20 @@ pub static VERIFY_ENDPOINTS: Lazy> = Lazy::new(|| { }, VerifyEndpoint { - url: &HARDWARE_DISK_URL, + url: &HARDWARE_DISKS_URL, visibility: Visibility::Public, unprivileged_access: UnprivilegedAccess::None, allowed_methods: vec![AllowedMethod::Get], }, + VerifyEndpoint { + url: &HARDWARE_DISK_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Get], + }, + + VerifyEndpoint { url: &HARDWARE_SLED_DISK_URL, visibility: Visibility::Public, @@ -2012,6 +2052,26 @@ pub static VERIFY_ENDPOINTS: Lazy> = Lazy::new(|| { ], }, + VerifyEndpoint { + url: &TIMESERIES_LIST_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + ], + }, + + VerifyEndpoint { + url: &TIMESERIES_QUERY_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Post( + serde_json::to_value(&*DEMO_TIMESERIES_QUERY).unwrap() + ), + ], + }, + /* Silo identity providers */ VerifyEndpoint { @@ -2119,6 +2179,17 @@ pub static VERIFY_ENDPOINTS: Lazy> = Lazy::new(|| { ], }, + /* TODO requires dpd access + VerifyEndpoint { + url: &DEMO_SWITCH_PORT_STATUS_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + ], + }, + */ + VerifyEndpoint { url: &DEMO_SWITCH_PORT_SETTINGS_APPLY_URL, @@ -2336,5 +2407,18 @@ pub static VERIFY_ENDPOINTS: Lazy> = Lazy::new(|| { ), ], }, + + // User-facing services IP allowlist + VerifyEndpoint { + url: &ALLOW_LIST_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Put( + serde_json::to_value(&*ALLOW_LIST_UPDATE).unwrap(), + ), + ], + }, ] }); diff --git a/nexus/tests/integration_tests/external_ips.rs b/nexus/tests/integration_tests/external_ips.rs index 203a1f35f7..396edddc41 100644 --- a/nexus/tests/integration_tests/external_ips.rs +++ b/nexus/tests/integration_tests/external_ips.rs @@ -22,8 +22,10 @@ use nexus_test_utils::resource_helpers::create_default_ip_pool; use nexus_test_utils::resource_helpers::create_floating_ip; use nexus_test_utils::resource_helpers::create_instance_with; use nexus_test_utils::resource_helpers::create_ip_pool; +use nexus_test_utils::resource_helpers::create_local_user; use nexus_test_utils::resource_helpers::create_project; use nexus_test_utils::resource_helpers::create_silo; +use nexus_test_utils::resource_helpers::grant_iam; use nexus_test_utils::resource_helpers::link_ip_pool; use nexus_test_utils::resource_helpers::object_create; use nexus_test_utils::resource_helpers::object_create_error; @@ -34,6 +36,7 @@ use nexus_test_utils::resource_helpers::object_put; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; use nexus_types::external_api::shared; +use nexus_types::external_api::shared::SiloRole; use nexus_types::external_api::views; use nexus_types::external_api::views::FloatingIp; use nexus_types::identity::Resource; @@ -150,7 +153,7 @@ async fn test_floating_ip_create(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; // automatically linked to current silo - create_default_ip_pool(&client).await; + let default_pool = create_default_ip_pool(&client).await; assert_ip_pool_utilization(client, "default", 0, 65536, 0, 0).await; @@ -159,7 +162,8 @@ async fn test_floating_ip_create(cptestctx: &ControlPlaneTestContext) { .unwrap(), ); // not automatically linked to currently silo. see below - create_ip_pool(&client, "other-pool", Some(other_pool_range)).await; + let (other_pool, ..) = + create_ip_pool(&client, "other-pool", Some(other_pool_range)).await; assert_ip_pool_utilization(client, "other-pool", 0, 5, 0, 0).await; @@ -179,6 +183,7 @@ async fn test_floating_ip_create(cptestctx: &ControlPlaneTestContext) { assert_eq!(fip.project_id, project.identity.id); assert_eq!(fip.instance_id, None); assert_eq!(fip.ip, IpAddr::from(Ipv4Addr::new(10, 0, 0, 0))); + assert_eq!(fip.ip_pool_id, default_pool.identity.id); assert_ip_pool_utilization(client, "default", 1, 65536, 0, 0).await; @@ -197,6 +202,7 @@ async fn test_floating_ip_create(cptestctx: &ControlPlaneTestContext) { assert_eq!(fip.project_id, project.identity.id); assert_eq!(fip.instance_id, None); assert_eq!(fip.ip, ip_addr); + assert_eq!(fip.ip_pool_id, default_pool.identity.id); assert_ip_pool_utilization(client, "default", 2, 65536, 0, 0).await; @@ -227,10 +233,11 @@ async fn test_floating_ip_create(cptestctx: &ControlPlaneTestContext) { assert_eq!(fip.project_id, project.identity.id); assert_eq!(fip.instance_id, None); assert_eq!(fip.ip, IpAddr::from(Ipv4Addr::new(10, 1, 0, 1))); + assert_eq!(fip.ip_pool_id, other_pool.identity.id); assert_ip_pool_utilization(client, "other-pool", 1, 5, 0, 0).await; - // Create with chosen IP from fleet-scoped named pool. + // Create with chosen IP from non-default pool. let fip_name = FIP_NAMES[3]; let ip_addr = "10.1.0.5".parse().unwrap(); let fip = create_floating_ip( @@ -245,10 +252,133 @@ async fn test_floating_ip_create(cptestctx: &ControlPlaneTestContext) { assert_eq!(fip.project_id, project.identity.id); assert_eq!(fip.instance_id, None); assert_eq!(fip.ip, ip_addr); + assert_eq!(fip.ip_pool_id, other_pool.identity.id); assert_ip_pool_utilization(client, "other-pool", 2, 5, 0, 0).await; } +#[nexus_test] +async fn test_floating_ip_create_non_admin( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + + let silo_url = format!("/v1/system/silos/{}", cptestctx.silo_name); + let silo: views::Silo = object_get(client, &silo_url).await; + + // manually create default pool and link to test silo, as opposed to default + // silo, which is what the helper would do + let _ = create_ip_pool(&client, "default", None).await; + link_ip_pool(&client, "default", &silo.identity.id, true).await; + + // create other pool and link to silo + let other_pool_range = IpRange::V4( + Ipv4Range::new(Ipv4Addr::new(10, 1, 0, 1), Ipv4Addr::new(10, 1, 0, 5)) + .unwrap(), + ); + create_ip_pool(&client, "other-pool", Some(other_pool_range)).await; + link_ip_pool(&client, "other-pool", &silo.identity.id, false).await; + + // create third pool and don't link to silo + let unlinked_pool_range = IpRange::V4( + Ipv4Range::new(Ipv4Addr::new(10, 2, 0, 1), Ipv4Addr::new(10, 2, 0, 5)) + .unwrap(), + ); + create_ip_pool(&client, "unlinked-pool", Some(unlinked_pool_range)).await; + + // Create a silo user + let user = create_local_user( + client, + &silo, + &"user".parse().unwrap(), + params::UserPassword::LoginDisallowed, + ) + .await; + + // Make silo collaborator + grant_iam( + client, + &silo_url, + SiloRole::Collaborator, + user.id, + AuthnMode::PrivilegedUser, + ) + .await; + + // create project as user (i.e., in their silo) + NexusRequest::objects_post( + client, + "/v1/projects", + ¶ms::ProjectCreate { + identity: IdentityMetadataCreateParams { + name: PROJECT_NAME.parse().unwrap(), + description: "floating ip project".to_string(), + }, + }, + ) + .authn_as(AuthnMode::SiloUser(user.id)) + .execute() + .await + .expect("Failed to create project"); + + let create_url = get_floating_ips_url(PROJECT_NAME); + + // create a floating IP as this user, first with default pool + let body = params::FloatingIpCreate { + identity: IdentityMetadataCreateParams { + name: "root-beer".parse().unwrap(), + description: String::from("a floating ip"), + }, + pool: None, + ip: None, + }; + let fip: views::FloatingIp = + NexusRequest::objects_post(client, &create_url, &body) + .authn_as(AuthnMode::SiloUser(user.id)) + .execute_and_parse_unwrap() + .await; + assert_eq!(fip.identity.name.to_string(), "root-beer"); + + // now with other pool linked to my silo + let body = params::FloatingIpCreate { + identity: IdentityMetadataCreateParams { + name: "another-soda".parse().unwrap(), + description: String::from("a floating ip"), + }, + pool: Some(NameOrId::Name("other-pool".parse().unwrap())), + ip: None, + }; + let fip: views::FloatingIp = + NexusRequest::objects_post(client, &create_url, &body) + .authn_as(AuthnMode::SiloUser(user.id)) + .execute_and_parse_unwrap() + .await; + assert_eq!(fip.identity.name.to_string(), "another-soda"); + + // now with pool not linked to my silo (fails with 404) + let body = params::FloatingIpCreate { + identity: IdentityMetadataCreateParams { + name: "secret-third-soda".parse().unwrap(), + description: String::from("a floating ip"), + }, + pool: Some(NameOrId::Name("unlinked-pool".parse().unwrap())), + ip: None, + }; + let error = NexusRequest::new( + RequestBuilder::new(client, Method::POST, &create_url) + .body(Some(&body)) + .expect_status(Some(StatusCode::NOT_FOUND)), + ) + .authn_as(AuthnMode::SiloUser(user.id)) + .execute() + .await + .unwrap() + .parsed_body::() + .unwrap(); + + assert_eq!(error.message, "not found: ip-pool with name \"unlinked-pool\""); +} + #[nexus_test] async fn test_floating_ip_create_fails_in_other_silo_pool( cptestctx: &ControlPlaneTestContext, @@ -498,7 +628,7 @@ async fn test_floating_ip_create_attachment( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; create_default_ip_pool(&client).await; @@ -595,7 +725,7 @@ async fn test_external_ip_live_attach_detach( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; create_default_ip_pool(&client).await; @@ -804,7 +934,7 @@ async fn test_floating_ip_attach_fail_between_projects( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let _nexus = &apictx.nexus; create_default_ip_pool(&client).await; @@ -879,7 +1009,7 @@ async fn test_external_ip_attach_fail_if_in_use_by_other( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; create_default_ip_pool(&client).await; diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index d5c4a1e7af..565e2fbafb 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -4,6 +4,8 @@ //! Tests basic instance support in the API +use crate::integration_tests::metrics::wait_for_producer; + use super::external_ips::floating_ip_get; use super::external_ips::get_floating_ip_by_id_url; use super::metrics::{get_latest_silo_metric, get_latest_system_metric}; @@ -54,7 +56,6 @@ use omicron_common::api::external::Instance; use omicron_common::api::external::InstanceCpuCount; use omicron_common::api::external::InstanceNetworkInterface; use omicron_common::api::external::InstanceState; -use omicron_common::api::external::Ipv4Net; use omicron_common::api::external::Name; use omicron_common::api::external::NameOrId; use omicron_common::api::external::Vni; @@ -64,6 +65,7 @@ use omicron_nexus::app::MIN_MEMORY_BYTES_PER_INSTANCE; use omicron_nexus::Nexus; use omicron_nexus::TestInterfaces as _; use omicron_sled_agent::sim::SledAgent; +use omicron_test_utils::dev::poll::wait_for_condition; use sled_agent_client::TestInterfaces as _; use std::convert::TryFrom; use std::net::Ipv4Addr; @@ -114,7 +116,9 @@ fn default_vpc_subnets_url() -> String { format!("/v1/vpc-subnets?{}&vpc=default", get_project_selector()) } -async fn create_project_and_pool(client: &ClientTestContext) -> views::Project { +pub async fn create_project_and_pool( + client: &ClientTestContext, +) -> views::Project { create_default_ip_pool(client).await; create_project(client, PROJECT_NAME).await } @@ -276,7 +280,7 @@ async fn test_instances_create_reboot_halt( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let instance_name = "just-rainsticks"; @@ -581,7 +585,7 @@ async fn test_instance_start_creates_networking_state( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let instance_name = "series-of-tubes"; @@ -656,14 +660,6 @@ async fn test_instance_start_creates_networking_state( .await .unwrap(); - let instance_state = datastore - .instance_fetch_with_vmm(&opctx, &authz_instance) - .await - .unwrap(); - - let sled_id = - instance_state.sled_id().expect("running instance should have a sled"); - let guest_nics = datastore .derive_guest_network_interface_info(&opctx, &authz_instance) .await @@ -671,20 +667,14 @@ async fn test_instance_start_creates_networking_state( assert_eq!(guest_nics.len(), 1); for agent in &sled_agents { - // TODO(#3107) Remove this bifurcation when Nexus programs all mappings - // itself. - if agent.id != sled_id { - assert_sled_v2p_mappings(agent, &nics[0], guest_nics[0].vni).await; - } else { - assert!(agent.v2p_mappings.lock().await.is_empty()); - } + assert_sled_v2p_mappings(agent, &nics[0], guest_nics[0].vni).await; } } #[nexus_test] async fn test_instance_migrate(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let instance_name = "bird-ecology"; @@ -780,7 +770,7 @@ async fn test_instance_migrate(cptestctx: &ControlPlaneTestContext) { #[nexus_test] async fn test_instance_migrate_v2p(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let datastore = nexus.datastore(); let opctx = @@ -857,24 +847,7 @@ async fn test_instance_migrate_v2p(cptestctx: &ControlPlaneTestContext) { let mut sled_agents = vec![cptestctx.sled_agent.sled_agent.clone()]; sled_agents.extend(other_sleds.iter().map(|tup| tup.1.sled_agent.clone())); for sled_agent in &sled_agents { - // Starting the instance should have programmed V2P mappings to all the - // sleds except the one where the instance is running. - // - // TODO(#3107): In practice, the instance's sled also has V2P mappings, but - // these are established during VMM setup (i.e. as part of creating the - // instance's OPTE ports) instead of being established by explicit calls - // from Nexus. Simulated sled agent handles the latter calls but does - // not currently update any mappings during simulated instance creation, - // so the check below verifies that no mappings exist on the instance's - // own sled instead of checking for a real mapping. Once Nexus programs - // all mappings explicitly (without skipping the instance's current - // sled) this bifurcation should be removed. - if sled_agent.id != original_sled_id { - assert_sled_v2p_mappings(sled_agent, &nics[0], guest_nics[0].vni) - .await; - } else { - assert!(sled_agent.v2p_mappings.lock().await.is_empty()); - } + assert_sled_v2p_mappings(sled_agent, &nics[0], guest_nics[0].vni).await; } let dst_sled_id = if original_sled_id == cptestctx.sled_agent.sled_agent.id @@ -932,7 +905,7 @@ async fn test_instance_failed_after_sled_agent_error( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let instance_name = "losing-is-fun"; @@ -1058,12 +1031,8 @@ async fn assert_metrics( #[nexus_test] async fn test_instance_metrics(cptestctx: &ControlPlaneTestContext) { - // Normally, Nexus is not registered as a producer for tests. - // Turn this bit on so we can also test some metrics from Nexus itself. - cptestctx.server.register_as_producer().await; - let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let datastore = nexus.datastore(); @@ -1071,6 +1040,9 @@ async fn test_instance_metrics(cptestctx: &ControlPlaneTestContext) { let project = create_project_and_pool(&client).await; let project_id = project.identity.id; + // Wait until Nexus is registered as a metric producer with Oximeter. + wait_for_producer(&cptestctx.oximeter, nexus.id()).await; + // Query the view of these metrics stored within CRDB let opctx = OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); @@ -1140,10 +1112,17 @@ async fn test_instance_metrics_with_migration( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let instance_name = "bird-ecology"; + // Wait until Nexus registers as a producer with Oximeter. + wait_for_producer( + &cptestctx.oximeter, + cptestctx.server.server_context().nexus.id(), + ) + .await; + // Create a second sled to migrate to/from. let default_sled_id: Uuid = nexus_test_utils::SLED_AGENT_UUID.parse().unwrap(); @@ -1266,7 +1245,7 @@ async fn test_instances_create_stopped_start( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let instance_name = "just-rainsticks"; @@ -1317,7 +1296,7 @@ async fn test_instances_delete_fails_when_running_succeeds_when_stopped( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let instance_name = "just-rainsticks"; @@ -1704,7 +1683,7 @@ async fn test_instance_with_new_custom_network_interfaces( name: non_default_subnet_name.clone(), description: String::from("A non-default subnet"), }, - ipv4_block: Ipv4Net("172.31.0.0/24".parse().unwrap()), + ipv4_block: "172.31.0.0/24".parse().unwrap(), ipv6_block: None, }; let _response = NexusRequest::objects_post( @@ -1839,7 +1818,7 @@ async fn test_instance_create_delete_network_interface( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let instance_name = "nic-attach-test-inst"; create_project_and_pool(&client).await; @@ -1850,7 +1829,7 @@ async fn test_instance_create_delete_network_interface( name: Name::try_from(String::from("secondary")).unwrap(), description: String::from("A secondary VPC subnet"), }, - ipv4_block: Ipv4Net("172.31.0.0/24".parse().unwrap()), + ipv4_block: "172.31.0.0/24".parse().unwrap(), ipv6_block: None, }; let _response = NexusRequest::objects_post( @@ -2080,7 +2059,7 @@ async fn test_instance_update_network_interfaces( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let instance_name = "nic-update-test-inst"; create_project_and_pool(&client).await; @@ -2091,7 +2070,7 @@ async fn test_instance_update_network_interfaces( name: Name::try_from(String::from("secondary")).unwrap(), description: String::from("A secondary VPC subnet"), }, - ipv4_block: Ipv4Net("172.31.0.0/24".parse().unwrap()), + ipv4_block: "172.31.0.0/24".parse().unwrap(), ipv6_block: None, }; let _response = NexusRequest::objects_post( @@ -2700,7 +2679,7 @@ async fn test_instance_create_attach_disks_undo( let faulted_disk = create_disk(&client, PROJECT_NAME, "faulted-disk").await; // set `faulted_disk` to the faulted state - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; assert!(nexus .set_disk_as_faulted(&faulted_disk.identity.id) @@ -2961,7 +2940,7 @@ async fn test_cannot_attach_faulted_disks(cptestctx: &ControlPlaneTestContext) { assert_eq!(disks.len(), 8); // Set the 7th to FAULTED - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; assert!(nexus.set_disk_as_faulted(&disks[6].identity.id).await.unwrap()); @@ -3119,7 +3098,7 @@ async fn test_disks_detached_when_instance_destroyed( // sled. let instance_url = format!("/v1/instances/nfs?project={}", PROJECT_NAME); let instance = instance_get(&client, &instance_url).await; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let sa = nexus .instance_sled_by_id(&instance.identity.id) @@ -3646,7 +3625,7 @@ async fn test_cannot_provision_instance_beyond_cpu_capacity( // Make the started instance transition to Running, shut it down, and verify // that the other reasonably-sized instance can now start. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; instance_simulate(nexus, &instances[1].identity.id).await; instances[1] = instance_post(client, configs[1].0, InstanceOp::Stop).await; instance_simulate(nexus, &instances[1].identity.id).await; @@ -3752,7 +3731,7 @@ async fn test_cannot_provision_instance_beyond_ram_capacity( // Make the started instance transition to Running, shut it down, and verify // that the other reasonably-sized instance can now start. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; instance_simulate(nexus, &instances[1].identity.id).await; instances[1] = instance_post(client, configs[1].0, InstanceOp::Stop).await; instance_simulate(nexus, &instances[1].identity.id).await; @@ -3762,7 +3741,7 @@ async fn test_cannot_provision_instance_beyond_ram_capacity( #[nexus_test] async fn test_instance_serial(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let instance_name = "kris-picks"; @@ -4042,7 +4021,7 @@ async fn stop_and_delete_instance( let client = &cptestctx.external_client; let instance = instance_post(&client, instance_name, InstanceOp::Stop).await; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; instance_simulate(nexus, &instance.identity.id).await; let url = format!("/v1/instances/{}?project={}", instance_name, PROJECT_NAME); @@ -4426,7 +4405,7 @@ async fn test_instance_create_in_silo(cptestctx: &ControlPlaneTestContext) { // Make sure the instance can actually start even though a collaborator // created it. - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let authn = AuthnMode::SiloUser(user_id); let instance_url = get_instance_url(instance_name); @@ -4523,7 +4502,7 @@ async fn test_instance_v2p_mappings(cptestctx: &ControlPlaneTestContext) { // Validate that every sled (except the instance's sled) now has a V2P // mapping for this instance - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let datastore = nexus.datastore(); let opctx = @@ -4535,14 +4514,6 @@ async fn test_instance_v2p_mappings(cptestctx: &ControlPlaneTestContext) { .await .unwrap(); - let instance_state = datastore - .instance_fetch_with_vmm(&opctx, &authz_instance) - .await - .unwrap(); - - let sled_id = - instance_state.sled_id().expect("running instance should have a sled"); - let guest_nics = datastore .derive_guest_network_interface_info(&opctx, &authz_instance) .await @@ -4555,14 +4526,7 @@ async fn test_instance_v2p_mappings(cptestctx: &ControlPlaneTestContext) { sled_agents.push(&cptestctx.sled_agent.sled_agent); for sled_agent in &sled_agents { - // TODO(#3107) Remove this bifurcation when Nexus programs all mappings - // itself. - if sled_agent.id != sled_id { - assert_sled_v2p_mappings(sled_agent, &nics[0], guest_nics[0].vni) - .await; - } else { - assert!(sled_agent.v2p_mappings.lock().await.is_empty()); - } + assert_sled_v2p_mappings(sled_agent, &nics[0], guest_nics[0].vni).await; } // Delete the instance @@ -4579,8 +4543,21 @@ async fn test_instance_v2p_mappings(cptestctx: &ControlPlaneTestContext) { // Validate that every sled no longer has the V2P mapping for this instance for sled_agent in &sled_agents { - let v2p_mappings = sled_agent.v2p_mappings.lock().await; - assert!(v2p_mappings.is_empty()); + let condition = || async { + let v2p_mappings = sled_agent.v2p_mappings.lock().await; + if v2p_mappings.is_empty() { + Ok(()) + } else { + Err(CondCheckError::NotYet::<()>) + } + }; + wait_for_condition( + condition, + &Duration::from_secs(1), + &Duration::from_secs(30), + ) + .await + .expect("v2p mappings should be empty"); } } @@ -4677,14 +4654,28 @@ async fn assert_sled_v2p_mappings( nic: &InstanceNetworkInterface, vni: Vni, ) { - let v2p_mappings = sled_agent.v2p_mappings.lock().await; - assert!(!v2p_mappings.is_empty()); - - let mapping = v2p_mappings.get(&nic.identity.id).unwrap().last().unwrap(); - assert_eq!(mapping.virtual_ip, nic.ip); - assert_eq!(mapping.virtual_mac, nic.mac); - assert_eq!(mapping.physical_host_ip, sled_agent.ip); - assert_eq!(mapping.vni, vni); + let condition = || async { + let v2p_mappings = sled_agent.v2p_mappings.lock().await; + let mapping = v2p_mappings.iter().find(|mapping| { + mapping.virtual_ip == nic.ip + && mapping.virtual_mac == nic.mac + && mapping.physical_host_ip == sled_agent.ip + && mapping.vni == vni + }); + + if mapping.is_some() { + Ok(()) + } else { + Err(CondCheckError::NotYet::<()>) + } + }; + wait_for_condition( + condition, + &Duration::from_secs(1), + &Duration::from_secs(30), + ) + .await + .expect("matching v2p mapping should be present"); } /// Simulate completion of an ongoing instance state transition. To do this, we diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index cb5eade735..38cfd25844 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -791,7 +791,7 @@ async fn test_ip_pool_utilization_total(cptestctx: &ControlPlaneTestContext) { // allowed. It's worth doing because we want this code to correctly handle // IPv6 ranges when they are allowed again. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let log = cptestctx.logctx.log.new(o!()); let opctx = OpContext::for_tests(log, datastore.clone()); @@ -1147,7 +1147,7 @@ async fn test_ip_range_delete_with_allocated_external_ip_fails( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; let ip_pools_url = "/v1/system/ip-pools"; let pool_name = "mypool"; diff --git a/nexus/tests/integration_tests/metrics.rs b/nexus/tests/integration_tests/metrics.rs index 73f11ce49a..5fbff216d9 100644 --- a/nexus/tests/integration_tests/metrics.rs +++ b/nexus/tests/integration_tests/metrics.rs @@ -2,6 +2,11 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. +use std::time::Duration; + +use crate::integration_tests::instances::{ + create_project_and_pool, instance_post, instance_simulate, InstanceOp, +}; use chrono::Utc; use dropshot::test_util::ClientTestContext; use dropshot::ResultsPage; @@ -14,8 +19,10 @@ use nexus_test_utils::resource_helpers::{ }; use nexus_test_utils::ControlPlaneTestContext; use nexus_test_utils_macros::nexus_test; +use omicron_test_utils::dev::poll::{wait_for_condition, CondCheckError}; use oximeter::types::Datum; use oximeter::types::Measurement; +use oximeter::TimeseriesSchema; use uuid::Uuid; pub async fn query_for_metrics( @@ -39,7 +46,7 @@ pub async fn get_latest_system_metric( None => "".to_string(), }; let url = format!( - "/v1/system/metrics/{metric_name}?start_time={:?}&end_time={:?}&order=descending&limit=1{}", + "/v1/system/metrics/{metric_name}?start_time={:?}&end_time={:?}&order=descending&limit=1{}", cptestctx.start_time, Utc::now(), id_param, @@ -69,7 +76,7 @@ pub async fn get_latest_silo_metric( None => "".to_string(), }; let url = format!( - "/v1/metrics/{metric_name}?start_time={:?}&end_time={:?}&order=descending&limit=1{}", + "/v1/metrics/{metric_name}?start_time={:?}&end_time={:?}&order=descending&limit=1{}", cptestctx.start_time, Utc::now(), id_param, @@ -78,7 +85,7 @@ pub async fn get_latest_silo_metric( objects_list_page_authz::(client, &url).await; // prevent more confusing error on next line - assert!(measurements.items.len() == 1, "Expected exactly one measurement"); + assert_eq!(measurements.items.len(), 1, "Expected exactly one measurement"); let item = &measurements.items[0]; let datum = match item.datum() { @@ -166,11 +173,16 @@ async fn test_metrics( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - - cptestctx.server.register_as_producer().await; // needed for oximeter metrics to work create_default_ip_pool(&client).await; // needed for instance create to work DiskTest::new(cptestctx).await; // needed for disk create to work + // Wait until Nexus registers as a producer with Oximeter. + wait_for_producer( + &cptestctx.oximeter, + cptestctx.server.server_context().nexus.id(), + ) + .await; + // silo metrics start out zero assert_system_metrics(&cptestctx, None, 0, 0, 0).await; assert_system_metrics(&cptestctx, Some(*DEFAULT_SILO_ID), 0, 0, 0).await; @@ -186,14 +198,14 @@ async fn test_metrics( // obvious, but all the different resources are stored the same way in // Clickhouse, so we need to be careful. let bad_silo_metrics_url = format!( - "/v1/metrics/cpus_provisioned?start_time={:?}&end_time={:?}&order=descending&limit=1&project={}", + "/v1/metrics/cpus_provisioned?start_time={:?}&end_time={:?}&order=descending&limit=1&project={}", cptestctx.start_time, Utc::now(), *DEFAULT_SILO_ID, ); assert_404(&cptestctx, &bad_silo_metrics_url).await; let bad_system_metrics_url = format!( - "/v1/system/metrics/cpus_provisioned?start_time={:?}&end_time={:?}&order=descending&limit=1&silo={}", + "/v1/system/metrics/cpus_provisioned?start_time={:?}&end_time={:?}&order=descending&limit=1&silo={}", cptestctx.start_time, Utc::now(), project1_id, @@ -238,3 +250,365 @@ async fn test_metrics( // project 1 unaffected by project 2's resources assert_silo_metrics(&cptestctx, Some(project1_id), GIB, 4, GIB).await; } + +/// Test that we can correctly list some timeseries schema. +#[nexus_test] +async fn test_timeseries_schema_list( + cptestctx: &ControlPlaneTestContext, +) { + // Nexus registers itself as a metric producer on startup, with its own UUID + // as the producer ID. Wait for this to show up in the registered lists of + // producers. + let nexus_id = cptestctx.server.server_context().nexus.id(); + wait_for_producer(&cptestctx.oximeter, nexus_id).await; + + // We should be able to fetch the list of timeseries, and it should include + // Nexus's HTTP latency distribution. This is defined in Nexus itself, and + // should always exist after we've registered as a producer and start + // producing data. Force a collection to ensure that happens. + cptestctx.oximeter.force_collect().await; + let client = &cptestctx.external_client; + let url = "/v1/timeseries/schema"; + let schema = + objects_list_page_authz::(client, &url).await; + schema + .items + .iter() + .find(|sc| { + sc.timeseries_name == "http_service:request_latency_histogram" + }) + .expect("Failed to find HTTP request latency histogram schema"); +} + +pub async fn timeseries_query( + cptestctx: &ControlPlaneTestContext, + query: impl ToString, +) -> Vec { + // first, make sure the latest timeseries have been collected. + cptestctx.oximeter.force_collect().await; + + // okay, do the query + let body = nexus_types::external_api::params::TimeseriesQuery { + query: query.to_string(), + }; + let query = &body.query; + let rsp = NexusRequest::new( + nexus_test_utils::http_testing::RequestBuilder::new( + &cptestctx.external_client, + http::Method::POST, + "/v1/timeseries/query", + ) + .body(Some(&body)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap_or_else(|e| { + panic!("timeseries query failed: {e:?}\nquery: {query}") + }); + rsp.parsed_body().unwrap_or_else(|e| { + panic!( + "could not parse timeseries query response: {e:?}\n\ + query: {query}\nresponse: {rsp:#?}" + ); + }) +} + +#[nexus_test] +async fn test_instance_watcher_metrics( + cptestctx: &ControlPlaneTestContext, +) { + macro_rules! assert_gte { + ($a:expr, $b:expr) => {{ + let a = $a; + let b = $b; + assert!( + $a >= $b, + concat!( + "assertion failed: ", + stringify!($a), + " >= ", + stringify!($b), + ", ", + stringify!($a), + " = {:?}, ", + stringify!($b), + " = {:?}", + ), + a, + b + ); + }}; + } + use oximeter::types::FieldValue; + const INSTANCE_ID_FIELD: &str = "instance_id"; + const STATE_FIELD: &str = "state"; + const STATE_STARTING: &str = "starting"; + const STATE_RUNNING: &str = "running"; + const STATE_STOPPING: &str = "stopping"; + const OXQL_QUERY: &str = "get virtual_machine:check"; + + let client = &cptestctx.external_client; + let internal_client = &cptestctx.internal_client; + let nexus = &cptestctx.server.server_context().nexus; + let oximeter = &cptestctx.oximeter; + + // TODO(eliza): consider factoring this out to a generic + // `activate_background_task` function in `nexus-test-utils` eventually? + let activate_instance_watcher = || async { + use nexus_client::types::BackgroundTask; + use nexus_client::types::CurrentStatus; + use nexus_client::types::CurrentStatusRunning; + use nexus_client::types::LastResult; + use nexus_client::types::LastResultCompleted; + + fn most_recent_start_time( + task: &BackgroundTask, + ) -> Option> { + match task.current { + CurrentStatus::Idle => match task.last { + LastResult::Completed(LastResultCompleted { + start_time, + .. + }) => Some(start_time), + LastResult::NeverCompleted => None, + }, + CurrentStatus::Running(CurrentStatusRunning { + start_time, + .. + }) => Some(start_time), + } + } + + eprintln!("\n --- activating instance watcher ---\n"); + let task = NexusRequest::object_get( + internal_client, + "/bgtasks/view/instance_watcher", + ) + .execute_and_parse_unwrap::() + .await; + let last_start = most_recent_start_time(&task); + + internal_client + .make_request( + http::Method::POST, + "/bgtasks/activate", + Some(serde_json::json!({ + "bgtask_names": vec![String::from("instance_watcher")] + })), + http::StatusCode::NO_CONTENT, + ) + .await + .unwrap(); + // Wait for the instance watcher task to finish + wait_for_condition( + || async { + let task = NexusRequest::object_get( + internal_client, + "/bgtasks/view/instance_watcher", + ) + .execute_and_parse_unwrap::() + .await; + if matches!(&task.current, CurrentStatus::Idle) + && most_recent_start_time(&task) > last_start + { + Ok(()) + } else { + Err(CondCheckError::<()>::NotYet) + } + }, + &Duration::from_millis(500), + &Duration::from_secs(60), + ) + .await + .unwrap(); + // Make sure that the latest metrics have been collected. + oximeter.force_collect().await; + }; + + #[track_caller] + fn count_state( + table: &oximeter_db::oxql::Table, + instance_id: Uuid, + state: &'static str, + ) -> i64 { + use oximeter_db::oxql::point::ValueArray; + let uuid = FieldValue::Uuid(instance_id); + let state = FieldValue::String(state.into()); + let mut timeserieses = table.timeseries().filter(|ts| { + ts.fields.get(INSTANCE_ID_FIELD) == Some(&uuid) + && ts.fields.get(STATE_FIELD) == Some(&state) + }); + let Some(timeseries) = timeserieses.next() else { + panic!( + "missing timeseries for instance {instance_id}, state {state}\n\ + found: {table:#?}" + ) + }; + if let Some(timeseries) = timeserieses.next() { + panic!( + "multiple timeseries for instance {instance_id}, state {state}: \ + {timeseries:?}, {timeseries:?}, ...\n\ + found: {table:#?}" + ) + } + match timeseries.points.values(0) { + Some(ValueArray::Integer(ref vals)) => { + vals.iter().filter_map(|&v| v).sum() + } + x => panic!( + "expected timeseries for instance {instance_id}, \ + state {state} to be an integer, but found: {x:?}" + ), + } + } + + // N.B. that we've gotta use the project name that this function hardcodes + // if we're going to use the `instance_post` test helper later. + let project = create_project_and_pool(&client).await; + let project_name = project.identity.name.as_str(); + // Wait until Nexus registers as a producer with Oximeter. + wait_for_producer(&oximeter, cptestctx.server.server_context().nexus.id()) + .await; + + eprintln!("--- creating instance 1 ---"); + let instance1 = create_instance(&client, project_name, "i-1").await; + let instance1_uuid = instance1.identity.id; + + // activate the instance watcher background task. + activate_instance_watcher().await; + + let metrics = timeseries_query(&cptestctx, OXQL_QUERY).await; + let checks = metrics + .iter() + .find(|t| t.name() == "virtual_machine:check") + .expect("missing virtual_machine:check"); + let ts = dbg!(count_state(&checks, instance1_uuid, STATE_STARTING)); + assert_gte!(ts, 1); + + // okay, make another instance + eprintln!("--- creating instance 2 ---"); + let instance2 = create_instance(&client, project_name, "i-2").await; + let instance2_uuid = instance2.identity.id; + + // activate the instance watcher background task. + activate_instance_watcher().await; + + let metrics = timeseries_query(&cptestctx, OXQL_QUERY).await; + let checks = metrics + .iter() + .find(|t| t.name() == "virtual_machine:check") + .expect("missing virtual_machine:check"); + let ts1 = dbg!(count_state(&checks, instance1_uuid, STATE_STARTING)); + let ts2 = dbg!(count_state(&checks, instance2_uuid, STATE_STARTING)); + assert_gte!(ts1, 2); + assert_gte!(ts2, 1); + + // poke instance 1 to get it into the running state + eprintln!("--- starting instance 1 ---"); + instance_simulate(nexus, &instance1_uuid).await; + + // activate the instance watcher background task. + activate_instance_watcher().await; + + let metrics = timeseries_query(&cptestctx, OXQL_QUERY).await; + let checks = metrics + .iter() + .find(|t| t.name() == "virtual_machine:check") + .expect("missing virtual_machine:check"); + let ts1_starting = + dbg!(count_state(&checks, instance1_uuid, STATE_STARTING)); + let ts1_running = dbg!(count_state(&checks, instance1_uuid, STATE_RUNNING)); + let ts2 = dbg!(count_state(&checks, instance2_uuid, STATE_STARTING)); + assert_gte!(ts1_starting, 2); + assert_gte!(ts1_running, 1); + assert_gte!(ts2, 2); + + // poke instance 2 to get it into the Running state. + eprintln!("--- starting instance 2 ---"); + instance_simulate(nexus, &instance2_uuid).await; + // stop instance 1 + eprintln!("--- start stopping instance 1 ---"); + instance_simulate(nexus, &instance1_uuid).await; + instance_post(&client, &instance1.identity.name.as_str(), InstanceOp::Stop) + .await; + + // activate the instance watcher background task. + activate_instance_watcher().await; + + let metrics = timeseries_query(&cptestctx, OXQL_QUERY).await; + let checks = metrics + .iter() + .find(|t| t.name() == "virtual_machine:check") + .expect("missing virtual_machine:check"); + + let ts1_starting = + dbg!(count_state(&checks, instance1_uuid, STATE_STARTING)); + let ts1_running = dbg!(count_state(&checks, instance1_uuid, STATE_RUNNING)); + let ts1_stopping = + dbg!(count_state(&checks, instance1_uuid, STATE_STOPPING)); + let ts2_starting = + dbg!(count_state(&checks, instance2_uuid, STATE_STARTING)); + let ts2_running = dbg!(count_state(&checks, instance2_uuid, STATE_RUNNING)); + assert_gte!(ts1_starting, 2); + assert_gte!(ts1_running, 1); + assert_gte!(ts1_stopping, 1); + assert_gte!(ts2_starting, 2); + assert_gte!(ts2_running, 1); + + // simulate instance 1 completing its stop, which will remove it from the + // set of active instances in CRDB. now, it won't be checked again. + + eprintln!("--- finish stopping instance 1 ---"); + instance_simulate(nexus, &instance1_uuid).await; + + // activate the instance watcher background task. + activate_instance_watcher().await; + + let metrics = timeseries_query(&cptestctx, OXQL_QUERY).await; + let checks = metrics + .iter() + .find(|t| t.name() == "virtual_machine:check") + .expect("missing virtual_machine:check"); + let ts1_starting = + dbg!(count_state(&checks, instance1_uuid, STATE_STARTING)); + let ts1_running = dbg!(count_state(&checks, instance1_uuid, STATE_RUNNING)); + let ts1_stopping = + dbg!(count_state(&checks, instance1_uuid, STATE_STOPPING)); + let ts2_starting = + dbg!(count_state(&checks, instance2_uuid, STATE_STARTING)); + let ts2_running = dbg!(count_state(&checks, instance2_uuid, STATE_RUNNING)); + assert_gte!(ts1_starting, 2); + assert_gte!(ts1_running, 1); + assert_gte!(ts1_stopping, 1); + assert_gte!(ts2_starting, 2); + assert_gte!(ts2_running, 2); +} + +/// Wait until a producer is registered with Oximeter. +/// +/// This blocks until the producer is registered, for up to 60s. It panics if +/// the retry loop hits a permanent error. +pub async fn wait_for_producer( + oximeter: &oximeter_collector::Oximeter, + producer_id: &Uuid, +) { + wait_for_condition( + || async { + if oximeter + .list_producers(None, usize::MAX) + .await + .iter() + .any(|p| &p.id == producer_id) + { + Ok(()) + } else { + Err(CondCheckError::<()>::NotYet) + } + }, + &Duration::from_secs(1), + &Duration::from_secs(60), + ) + .await + .expect("Failed to find producer within time limit"); +} diff --git a/nexus/tests/integration_tests/mod.rs b/nexus/tests/integration_tests/mod.rs index 804694c0b2..5054527c63 100644 --- a/nexus/tests/integration_tests/mod.rs +++ b/nexus/tests/integration_tests/mod.rs @@ -4,6 +4,7 @@ //! the way it is. mod address_lots; +mod allow_list; mod authn_http; mod authz; mod basic; @@ -50,7 +51,6 @@ mod vpc_firewall; mod vpc_routers; mod vpc_subnets; mod vpcs; -mod zpools; // This module is used only for shared data, not test cases. mod endpoints; diff --git a/nexus/tests/integration_tests/oximeter.rs b/nexus/tests/integration_tests/oximeter.rs index 9663e10fa0..a0e1b4413a 100644 --- a/nexus/tests/integration_tests/oximeter.rs +++ b/nexus/tests/integration_tests/oximeter.rs @@ -4,18 +4,12 @@ //! Integration tests for oximeter collectors and producers. -use dropshot::Method; -use http::StatusCode; +use crate::integration_tests::metrics::wait_for_producer; use nexus_test_interface::NexusServer; use nexus_test_utils_macros::nexus_test; -use omicron_common::api::internal::nexus::ProducerEndpoint; -use omicron_common::api::internal::nexus::ProducerKind; use omicron_test_utils::dev::poll::{wait_for_condition, CondCheckError}; use oximeter_db::DbWrite; -use std::collections::BTreeSet; use std::net; -use std::net::Ipv6Addr; -use std::net::SocketAddr; use std::time::Duration; use uuid::Uuid; @@ -46,23 +40,31 @@ async fn test_oximeter_database_records(context: &ControlPlaneTestContext) { "Oximeter ID does not match the ID returned from the database" ); + // Kind of silly, but let's wait until the producer is actually registered + // with Oximeter. + let producer_id = nexus_test_utils::PRODUCER_UUID.parse().unwrap(); + wait_for_producer(&context.oximeter, &producer_id).await; + // Verify that the producer lives in the DB. - let result = conn + let results = conn .query("SELECT * FROM omicron.public.metric_producer;", &[]) .await .unwrap(); - assert_eq!( - result.len(), - 1, - "Expected a single metric producer instance in the database" - ); - let actual_id = result[0].get::<&str, Uuid>("id"); - assert_eq!( - actual_id, - nexus_test_utils::PRODUCER_UUID.parse().unwrap(), - "Producer ID does not match the ID returned from the database" + assert!( + !result.is_empty(), + "Expected at least 1 metric producer instance in the database" ); - let actual_oximeter_id = result[0].get::<&str, Uuid>("oximeter_id"); + let actual_oximeter_id = results + .iter() + .find_map(|row| { + let id = row.get::<&str, Uuid>("id"); + if id == producer_id { + Some(row.get::<&str, Uuid>("oximeter_id")) + } else { + None + } + }) + .expect("The database doesn't contain a record of our producer"); assert_eq!( actual_oximeter_id, nexus_test_utils::OXIMETER_UUID.parse().unwrap(), @@ -77,36 +79,43 @@ async fn test_oximeter_reregistration() { ) .await; let db = &context.database; - let producer_id = nexus_test_utils::PRODUCER_UUID.parse().unwrap(); - let oximeter_id = nexus_test_utils::OXIMETER_UUID.parse().unwrap(); + let producer_id: Uuid = nexus_test_utils::PRODUCER_UUID.parse().unwrap(); + let oximeter_id: Uuid = nexus_test_utils::OXIMETER_UUID.parse().unwrap(); // Get a handle to the DB, for various tests let conn = db.connect().await.unwrap(); - // Helper to get a record for a single metric producer + // Helper to get the record for our test metric producer let get_record = || async { let result = conn .query("SELECT * FROM omicron.public.metric_producer;", &[]) .await .unwrap(); - assert_eq!( - result.len(), - 1, - "Expected a single metric producer instance in the database" - ); - let actual_id = result[0].get::<&str, Uuid>("id"); - assert_eq!( - actual_id, producer_id, - "Producer ID does not match the ID returned from the database" - ); + + // There may be multiple producers in the DB, since Nexus and the + // simulated sled agent register their own. We just care about the + // actual integration test producer here. result + .into_iter() + .find(|row| row.get::<&str, Uuid>("id") == producer_id) + .ok_or_else(|| CondCheckError::<()>::NotYet) }; // Get the original time modified, for comparison later. - let original_time_modified = { - let result = get_record().await; - result[0].get::<&str, chrono::DateTime>("time_modified") - }; + // + // Note that the record may not show up right away, so we'll wait for it + // here. + const PRODUCER_POLL_INTERVAL: Duration = Duration::from_secs(1); + const PRODUCER_POLL_DURATION: Duration = Duration::from_secs(60); + let row = wait_for_condition( + get_record, + &PRODUCER_POLL_INTERVAL, + &PRODUCER_POLL_DURATION, + ) + .await + .expect("Integration test producer is not in the database"); + let original_time_modified = + row.get::<&str, chrono::DateTime>("time_modified"); // ClickHouse client for verifying collection. let ch_address = net::SocketAddrV6::new( @@ -224,7 +233,6 @@ async fn test_oximeter_reregistration() { context.server.get_http_server_internal_address().await, nexus_test_utils::PRODUCER_UUID.parse().unwrap(), ) - .await .expect("Failed to restart metric producer server"); nexus_test_utils::register_test_producer(&context.producer) .expect("Failed to register producer"); @@ -268,11 +276,11 @@ async fn test_oximeter_reregistration() { // Note that it's _probably_ not the case that the port is the same as the original, but it is // possible. We can verify that the modification time has been changed. let (new_port, new_time_modified) = { - let result = get_record().await; + let row = + get_record().await.expect("Expected the producer record to exist"); ( - result[0].get::<&str, i32>("port") as u16, - result[0] - .get::<&str, chrono::DateTime>("time_modified"), + row.get::<&str, i32>("port") as u16, + row.get::<&str, chrono::DateTime>("time_modified"), ) }; assert_eq!(new_port, context.producer.address().port()); @@ -339,88 +347,3 @@ async fn test_oximeter_reregistration() { ); context.teardown().await; } - -// A regression test for https://github.com/oxidecomputer/omicron/issues/4498 -#[tokio::test] -async fn test_oximeter_collector_reregistration_gets_all_assignments() { - let mut context = nexus_test_utils::test_setup::( - "test_oximeter_collector_reregistration_gets_all_assignments", - ) - .await; - let oximeter_id = nexus_test_utils::OXIMETER_UUID.parse().unwrap(); - - // Create a bunch of producer records. - // - // Note that the actual count is arbitrary, but it should be larger than the - // internal pagination limit used in `Nexus::upsert_oximeter_collector()`, - // which is currently 100. - const N_PRODUCERS: usize = 150; - let mut ids = BTreeSet::new(); - for _ in 0..N_PRODUCERS { - let id = Uuid::new_v4(); - ids.insert(id); - let info = ProducerEndpoint { - id, - kind: ProducerKind::Service, - address: SocketAddr::new(Ipv6Addr::LOCALHOST.into(), 12345), - base_route: String::from("/collect"), - interval: Duration::from_secs(1), - }; - context - .internal_client - .make_request( - Method::POST, - "/metrics/producers", - Some(&info), - StatusCode::NO_CONTENT, - ) - .await - .expect("failed to register test producer"); - } - - // Check that `oximeter` has these registered. - let producers = - context.oximeter.list_producers(None, N_PRODUCERS * 2).await; - let actual_ids: BTreeSet<_> = - producers.iter().map(|info| info.id).collect(); - - // There is an additional producer that's created as part of the normal test - // setup, so we'll check that all of the new producers exist, and that - // there's exactly 1 additional one. - assert!( - ids.is_subset(&actual_ids), - "oximeter did not get the right set of producers" - ); - assert_eq!( - ids.len(), - actual_ids.len() - 1, - "oximeter did not get the right set of producers" - ); - - // Drop and restart oximeter, which should result in the exact same set of - // producers again. - drop(context.oximeter); - context.oximeter = nexus_test_utils::start_oximeter( - context.logctx.log.new(o!("component" => "oximeter")), - context.server.get_http_server_internal_address().await, - context.clickhouse.port(), - oximeter_id, - ) - .await - .expect("failed to restart oximeter"); - - let producers = - context.oximeter.list_producers(None, N_PRODUCERS * 2).await; - let actual_ids: BTreeSet<_> = - producers.iter().map(|info| info.id).collect(); - assert!( - ids.is_subset(&actual_ids), - "oximeter did not get the right set of producers after re-registering" - ); - assert_eq!( - ids.len(), - actual_ids.len() - 1, - "oximeter did not get the right set of producers after re-registering" - ); - context.teardown().await; -} diff --git a/nexus/tests/integration_tests/pantry.rs b/nexus/tests/integration_tests/pantry.rs index 1a3908affa..c5d98709ac 100644 --- a/nexus/tests/integration_tests/pantry.rs +++ b/nexus/tests/integration_tests/pantry.rs @@ -393,7 +393,7 @@ async fn test_cannot_mount_import_ready_disk( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; DiskTest::new(&cptestctx).await; create_project_and_pool(client).await; @@ -424,7 +424,7 @@ async fn test_cannot_mount_import_from_bulk_writes_disk( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; DiskTest::new(&cptestctx).await; create_project_and_pool(client).await; @@ -448,7 +448,7 @@ async fn test_import_blocks_with_bulk_write( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; DiskTest::new(&cptestctx).await; create_project_and_pool(client).await; @@ -489,7 +489,7 @@ async fn test_import_blocks_with_bulk_write_with_snapshot( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; DiskTest::new(&cptestctx).await; create_project_and_pool(client).await; @@ -732,7 +732,7 @@ async fn test_cannot_bulk_write_start_attached_disk( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; DiskTest::new(&cptestctx).await; create_project_and_pool(client).await; @@ -762,7 +762,7 @@ async fn test_cannot_bulk_write_attached_disk( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; DiskTest::new(&cptestctx).await; create_project_and_pool(client).await; @@ -792,7 +792,7 @@ async fn test_cannot_bulk_write_stop_attached_disk( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; DiskTest::new(&cptestctx).await; create_project_and_pool(client).await; @@ -821,7 +821,7 @@ async fn test_cannot_finalize_attached_disk( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; DiskTest::new(&cptestctx).await; create_project_and_pool(client).await; diff --git a/nexus/tests/integration_tests/quotas.rs b/nexus/tests/integration_tests/quotas.rs index 4e3335d04c..d8f72ba666 100644 --- a/nexus/tests/integration_tests/quotas.rs +++ b/nexus/tests/integration_tests/quotas.rs @@ -20,6 +20,7 @@ use nexus_types::external_api::views::{Silo, SiloQuotas}; use omicron_common::api::external::ByteCount; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::InstanceCpuCount; +use serde_json::json; type ControlPlaneTestContext = nexus_test_utils::ControlPlaneTestContext; @@ -315,3 +316,69 @@ async fn test_quotas(cptestctx: &ControlPlaneTestContext) { .await .expect("Disk should be provisioned"); } + +#[nexus_test] +async fn test_quota_limits(cptestctx: &ControlPlaneTestContext) { + let client = &cptestctx.external_client; + + let system = setup_silo_with_quota( + &client, + "quota-test-silo", + params::SiloQuotasCreate::empty(), + ) + .await; + + // Maximal legal limits should be allowed. + let quota_limit = params::SiloQuotasUpdate { + cpus: Some(i64::MAX), + memory: Some(i64::MAX.try_into().unwrap()), + storage: Some(i64::MAX.try_into().unwrap()), + }; + system + .set_quotas(client, quota_limit.clone()) + .await + .expect("set max quotas"); + let quotas = system.get_quotas(client).await; + assert_eq!(quotas.limits.cpus, quota_limit.cpus.unwrap()); + assert_eq!(quotas.limits.memory, quota_limit.memory.unwrap()); + assert_eq!(quotas.limits.storage, quota_limit.storage.unwrap()); + + // Construct a value that fits in a u64 but not an i64. + let out_of_bounds = u64::try_from(i64::MAX).unwrap() + 1; + + for key in ["cpus", "memory", "storage"] { + // We can't construct a `SiloQuotasUpdate` with higher-than-maximal + // values, but we can construct the equivalent JSON blob of such a + // request. + let request = json!({ key: out_of_bounds }); + + let err = NexusRequest::expect_failure_with_body( + client, + http::StatusCode::BAD_REQUEST, + http::Method::PUT, + "/v1/system/silos/quota-test-silo/quotas", + &request, + ) + .authn_as(system.auth.clone()) + .execute() + .await + .expect("sent quota update") + .parsed_body::() + .expect("parsed error body"); + assert!( + err.message.contains(key) + && (err.message.contains("invalid value") + || err + .message + .contains("value is too large for a byte count")), + "Unexpected error: {0}", + err.message + ); + + // The quota limits we set above should be unchanged. + let quotas = system.get_quotas(client).await; + assert_eq!(quotas.limits.cpus, quota_limit.cpus.unwrap()); + assert_eq!(quotas.limits.memory, quota_limit.memory.unwrap()); + assert_eq!(quotas.limits.storage, quota_limit.storage.unwrap()); + } +} diff --git a/nexus/tests/integration_tests/rack.rs b/nexus/tests/integration_tests/rack.rs index 1148655195..c72c59b6f7 100644 --- a/nexus/tests/integration_tests/rack.rs +++ b/nexus/tests/integration_tests/rack.rs @@ -5,6 +5,10 @@ use dropshot::ResultsPage; use http::Method; use http::StatusCode; +use nexus_client::types::SledId; +use nexus_db_model::SledBaseboard; +use nexus_db_model::SledSystemHardware; +use nexus_db_model::SledUpdate; use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; @@ -17,7 +21,7 @@ use nexus_types::internal_api::params::SledAgentInfo; use nexus_types::internal_api::params::SledRole; use omicron_common::api::external::ByteCount; use omicron_common::api::external::Generation; -use omicron_nexus::TestInterfaces; +use omicron_uuid_kinds::GenericUuid; use uuid::Uuid; type ControlPlaneTestContext = @@ -35,14 +39,17 @@ async fn test_list_own_rack(cptestctx: &ControlPlaneTestContext) { .all_items; assert_eq!(1, racks.len()); - assert_eq!(cptestctx.server.apictx().nexus.rack_id(), racks[0].identity.id); + assert_eq!( + cptestctx.server.server_context().nexus.rack_id(), + racks[0].identity.id + ); } #[nexus_test] async fn test_get_own_rack(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let expected_id = cptestctx.server.apictx().nexus.rack_id(); + let expected_id = cptestctx.server.server_context().nexus.rack_id(); let rack_url = format!("/v1/system/hardware/racks/{}", expected_id); let rack = NexusRequest::object_get(client, &rack_url) .authn_as(AuthnMode::PrivilegedUser) @@ -144,3 +151,109 @@ async fn test_sled_list_uninitialized(cptestctx: &ControlPlaneTestContext) { assert_eq!(1, uninitialized_sleds_2.len()); assert_eq!(uninitialized_sleds, uninitialized_sleds_2); } + +#[nexus_test] +async fn test_sled_add(cptestctx: &ControlPlaneTestContext) { + let external_client = &cptestctx.external_client; + let list_url = "/v1/system/hardware/sleds-uninitialized"; + let mut uninitialized_sleds = + NexusRequest::object_get(external_client, list_url) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("failed to get uninitialized sleds") + .parsed_body::>() + .unwrap() + .items; + debug!(cptestctx.logctx.log, "{:#?}", uninitialized_sleds); + + // There are currently two fake sim gimlets created in the latest inventory + // collection as part of test setup. + assert_eq!(2, uninitialized_sleds.len()); + + // Add one of these sleds. + let add_url = "/v1/system/hardware/sleds/"; + let baseboard = uninitialized_sleds.pop().unwrap().baseboard; + let sled_id = NexusRequest::objects_post( + external_client, + add_url, + ¶ms::UninitializedSledId { + serial: baseboard.serial.clone(), + part: baseboard.part.clone(), + }, + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute_and_parse_unwrap::() + .await + .id; + + // Attempting to add the same sled again should succeed with the same sled + // ID: this operation should be idempotent up until the point at which the + // sled is inserted in the db. + let repeat_sled_id = NexusRequest::objects_post( + external_client, + add_url, + ¶ms::UninitializedSledId { + serial: baseboard.serial.clone(), + part: baseboard.part.clone(), + }, + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute_and_parse_unwrap::() + .await + .id; + assert_eq!(sled_id, repeat_sled_id); + + // Now upsert the sled. + let nexus = &cptestctx.server.server_context().nexus; + nexus + .datastore() + .sled_upsert(SledUpdate::new( + sled_id.into_untyped_uuid(), + "[::1]:0".parse().unwrap(), + SledBaseboard { + serial_number: baseboard.serial.clone(), + part_number: baseboard.part.clone(), + revision: 0, + }, + SledSystemHardware { + is_scrimlet: false, + usable_hardware_threads: 8, + usable_physical_ram: (1 << 30).try_into().unwrap(), + reservoir_size: (1 << 20).try_into().unwrap(), + }, + nexus.rack_id(), + Generation::new().into(), + )) + .await + .expect("inserted sled"); + + // The sled has been commissioned as part of the rack, so adding it should + // fail. + let error: dropshot::HttpErrorResponseBody = + NexusRequest::expect_failure_with_body( + external_client, + http::StatusCode::BAD_REQUEST, + http::Method::POST, + add_url, + ¶ms::UninitializedSledId { + serial: baseboard.serial.clone(), + part: baseboard.part.clone(), + }, + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("adding sled") + .parsed_body() + .expect("parsing error body"); + assert_eq!(error.error_code, Some("ObjectAlreadyExists".to_string())); + assert!( + error.message.contains(&baseboard.serial) + && error.message.contains(&baseboard.part), + "expected to find {} and {} within error message: {}", + baseboard.serial, + baseboard.part, + error.message + ); +} diff --git a/nexus/tests/integration_tests/saml.rs b/nexus/tests/integration_tests/saml.rs index b1b0429c2e..80816f2ea2 100644 --- a/nexus/tests/integration_tests/saml.rs +++ b/nexus/tests/integration_tests/saml.rs @@ -91,7 +91,7 @@ async fn test_create_a_saml_idp(cptestctx: &ControlPlaneTestContext) { .await; // Assert external authenticator opctx can read it - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let (.., _retrieved_silo_nexus) = nexus .silo_lookup( &nexus.opctx_external_authn(), @@ -1167,7 +1167,7 @@ async fn test_post_saml_response(cptestctx: &ControlPlaneTestContext) { ) .await; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; nexus.set_samael_max_issue_delay( chrono::Utc::now() - "2022-05-04T15:36:12.631Z" @@ -1298,7 +1298,7 @@ async fn test_post_saml_response_with_relay_state( ) .await; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; nexus.set_samael_max_issue_delay( chrono::Utc::now() - "2022-05-04T15:36:12.631Z" diff --git a/nexus/tests/integration_tests/schema.rs b/nexus/tests/integration_tests/schema.rs index 44c91bbacf..89d2e274c5 100644 --- a/nexus/tests/integration_tests/schema.rs +++ b/nexus/tests/integration_tests/schema.rs @@ -941,8 +941,28 @@ async fn dbinit_equals_sum_of_all_up() { let all_versions = read_all_schema_versions(); - // Go from the first version to the latest version. - for version in all_versions.iter_versions() { + // Apply the very first schema migration. In particular, this creates the + // `omicron` database, which allows us to construct a `db::Pool` below. + for version in all_versions.iter_versions().take(1) { + apply_update(log, &crdb, version, 1).await; + assert_eq!( + version.semver().to_string(), + query_crdb_schema_version(&crdb).await + ); + } + + // Create a connection pool after we apply the first schema version but + // before applying the rest, and grab a connection from that pool. We'll use + // it for an extra check later. + let pool = nexus_db_queries::db::Pool::new( + log, + &nexus_db_queries::db::Config { url: crdb.pg_config().clone() }, + ); + let conn_from_pool = + pool.pool().get().await.expect("failed to get pooled connection"); + + // Go from the second version to the latest version. + for version in all_versions.iter_versions().skip(1) { apply_update(log, &crdb, version, 1).await; assert_eq!( version.semver().to_string(), @@ -957,6 +977,38 @@ async fn dbinit_equals_sum_of_all_up() { // Query the newly constructed DB for information about its schema let observed_schema = InformationSchema::new(&crdb).await; let observed_data = observed_schema.query_all_tables(log, &crdb).await; + + // Using the connection we got from the connection pool prior to applying + // the schema migrations, attempt to insert a sled resource. This involves + // the `sled_resource_kind` enum, whose OID was changed by the schema + // migration in version 53.0.0 (by virtue of the enum being dropped and + // added back with a different set of variants). If the diesel OID cache was + // populated when we acquired the connection from the pool, this will fail + // with a `type with ID $NUM does not exist` error. + { + use async_bb8_diesel::AsyncRunQueryDsl; + use nexus_db_model::schema::sled_resource::dsl; + use nexus_db_model::Resources; + use nexus_db_model::SledResource; + use nexus_db_model::SledResourceKind; + + diesel::insert_into(dsl::sled_resource) + .values(SledResource { + id: Uuid::new_v4(), + sled_id: Uuid::new_v4(), + kind: SledResourceKind::Instance, + resources: Resources { + hardware_threads: 8_u32.into(), + rss_ram: 1024_i64.try_into().unwrap(), + reservoir_ram: 1024_i64.try_into().unwrap(), + }, + }) + .execute_async(&*conn_from_pool) + .await + .expect("failed to insert - did we poison the OID cache?"); + } + std::mem::drop(conn_from_pool); + std::mem::drop(pool); crdb.cleanup().await.unwrap(); // Create a new DB with data populated from dbinit.sql for comparison diff --git a/nexus/tests/integration_tests/silo_users.rs b/nexus/tests/integration_tests/silo_users.rs index 099a186a2c..598d2a28a4 100644 --- a/nexus/tests/integration_tests/silo_users.rs +++ b/nexus/tests/integration_tests/silo_users.rs @@ -26,10 +26,10 @@ type ControlPlaneTestContext = #[nexus_test] async fn test_silo_group_users(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let opctx = OpContext::for_tests( cptestctx.logctx.log.new(o!()), - cptestctx.server.apictx().nexus.datastore().clone(), + cptestctx.server.server_context().nexus.datastore().clone(), ); // we start out with the two default users diff --git a/nexus/tests/integration_tests/silos.rs b/nexus/tests/integration_tests/silos.rs index 6dfddb12e1..e95b2870ca 100644 --- a/nexus/tests/integration_tests/silos.rs +++ b/nexus/tests/integration_tests/silos.rs @@ -55,7 +55,7 @@ type ControlPlaneTestContext = #[nexus_test] async fn test_silos(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; // Verify that we cannot create a name with the same name as the recovery // Silo that was created during rack initialization. @@ -277,7 +277,7 @@ async fn test_silos(cptestctx: &ControlPlaneTestContext) { #[nexus_test] async fn test_silo_admin_group(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let silo: Silo = object_create( client, @@ -523,7 +523,7 @@ async fn test_deleting_a_silo_deletes_the_idp( .expect("failed to make request"); // Expect that the silo is gone - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let response = IdentityProviderType::lookup( &nexus.datastore(), @@ -747,7 +747,7 @@ struct TestSiloUserProvisionTypes { #[nexus_test] async fn test_silo_user_provision_types(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let test_cases: Vec = vec![ @@ -844,7 +844,7 @@ async fn test_silo_user_fetch_by_external_id( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let silo = create_silo( &client, @@ -1026,7 +1026,7 @@ async fn test_silo_users_list(cptestctx: &ControlPlaneTestContext) { #[nexus_test] async fn test_silo_groups_jit(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let silo = create_silo( @@ -1095,7 +1095,7 @@ async fn test_silo_groups_jit(cptestctx: &ControlPlaneTestContext) { #[nexus_test] async fn test_silo_groups_fixed(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let silo = create_silo( &client, @@ -1156,7 +1156,7 @@ async fn test_silo_groups_remove_from_one_group( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let silo = create_silo( @@ -1269,7 +1269,7 @@ async fn test_silo_groups_remove_from_both_groups( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let silo = create_silo( @@ -1381,7 +1381,7 @@ async fn test_silo_groups_remove_from_both_groups( #[nexus_test] async fn test_silo_delete_clean_up_groups(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; // Create a silo let silo = create_silo( @@ -1463,7 +1463,7 @@ async fn test_silo_delete_clean_up_groups(cptestctx: &ControlPlaneTestContext) { #[nexus_test] async fn test_ensure_same_silo_group(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; // Create a silo let silo = create_silo( @@ -1525,7 +1525,7 @@ async fn test_ensure_same_silo_group(cptestctx: &ControlPlaneTestContext) { #[nexus_test] async fn test_silo_user_views(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let datastore = cptestctx.server.apictx().nexus.datastore(); + let datastore = cptestctx.server.server_context().nexus.datastore(); // Create the two Silos. let silo1 = @@ -1741,7 +1741,7 @@ async fn create_jit_user( #[nexus_test] async fn test_jit_silo_constraints(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let silo = create_silo(&client, "jit", true, shared::SiloIdentityMode::SamlJit) diff --git a/nexus/tests/integration_tests/sleds.rs b/nexus/tests/integration_tests/sleds.rs index 743a76be17..97dbb39bc6 100644 --- a/nexus/tests/integration_tests/sleds.rs +++ b/nexus/tests/integration_tests/sleds.rs @@ -6,17 +6,17 @@ use camino::Utf8Path; use dropshot::test_util::ClientTestContext; +use nexus_db_model::PhysicalDisk as DbPhysicalDisk; +use nexus_db_model::PhysicalDiskKind as DbPhysicalDiskKind; +use nexus_db_queries::context::OpContext; use nexus_test_interface::NexusServer; use nexus_test_utils::resource_helpers::create_default_ip_pool; use nexus_test_utils::resource_helpers::create_instance; -use nexus_test_utils::resource_helpers::create_physical_disk; use nexus_test_utils::resource_helpers::create_project; -use nexus_test_utils::resource_helpers::delete_physical_disk; use nexus_test_utils::resource_helpers::objects_list_page_authz; use nexus_test_utils::start_sled_agent; use nexus_test_utils::SLED_AGENT_UUID; use nexus_test_utils_macros::nexus_test; -use nexus_types::external_api::params::PhysicalDiskKind; use nexus_types::external_api::views::SledInstance; use nexus_types::external_api::views::{PhysicalDisk, Sled}; use omicron_sled_agent::sim; @@ -95,7 +95,6 @@ async fn test_physical_disk_create_list_delete( cptestctx: &ControlPlaneTestContext, ) { let external_client = &cptestctx.external_client; - let internal_client = &cptestctx.internal_client; // Verify that there are two sleds to begin with. let sleds_url = "/v1/system/hardware/sleds"; @@ -106,17 +105,26 @@ async fn test_physical_disk_create_list_delete( format!("/v1/system/hardware/sleds/{SLED_AGENT_UUID}/disks"); let disks_initial = physical_disks_list(&external_client, &disks_url).await; - // Insert a new disk using the internal API, observe it in the external API + // Inject a disk into the database, observe it in the external API + let nexus = &cptestctx.server.server_context().nexus; + let datastore = nexus.datastore(); let sled_id = Uuid::from_str(&SLED_AGENT_UUID).unwrap(); - create_physical_disk( - &internal_client, - "v", - "s", - "m", - PhysicalDiskKind::U2, + let physical_disk = DbPhysicalDisk::new( + Uuid::new_v4(), + "v".into(), + "s".into(), + "m".into(), + DbPhysicalDiskKind::U2, sled_id, - ) - .await; + ); + + let opctx = + OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); + let _disk_id = datastore + .physical_disk_insert(&opctx, physical_disk.clone()) + .await + .expect("Failed to upsert physical disk"); + let disks = physical_disks_list(&external_client, &disks_url).await; assert_eq!(disks.len(), disks_initial.len() + 1); let _new_disk = disks @@ -129,11 +137,19 @@ async fn test_physical_disk_create_list_delete( .expect("did not find the new disk"); // Delete that disk using the internal API, observe it in the external API - delete_physical_disk(&internal_client, "v", "s", "m", sled_id).await; - assert_eq!( - physical_disks_list(&external_client, &disks_url).await, - disks_initial - ); + datastore + .physical_disk_delete( + &opctx, + "v".into(), + "s".into(), + "m".into(), + sled_id, + ) + .await + .expect("Failed to upsert physical disk"); + + let list = physical_disks_list(&external_client, &disks_url).await; + assert_eq!(list, disks_initial, "{:#?}", list,); } #[nexus_test] diff --git a/nexus/tests/integration_tests/snapshots.rs b/nexus/tests/integration_tests/snapshots.rs index 63ea81f13f..3fb6f8f6ec 100644 --- a/nexus/tests/integration_tests/snapshots.rs +++ b/nexus/tests/integration_tests/snapshots.rs @@ -136,7 +136,7 @@ async fn test_snapshot_basic(cptestctx: &ControlPlaneTestContext) { .await; // cannot snapshot attached disk for instance in state starting - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; instance_simulate(nexus, &instance.identity.id).await; // Issue snapshot request @@ -256,10 +256,113 @@ async fn test_snapshot_without_instance(cptestctx: &ControlPlaneTestContext) { assert_eq!(disk.state, DiskState::Detached); } +#[nexus_test] +async fn test_snapshot_stopped_instance(cptestctx: &ControlPlaneTestContext) { + let client = &cptestctx.external_client; + DiskTest::new(&cptestctx).await; + create_project_and_pool(client).await; + let disks_url = get_disks_url(); + + // Define a global image + let image_create_params = params::ImageCreate { + identity: IdentityMetadataCreateParams { + name: "alpine-edge".parse().unwrap(), + description: String::from( + "you can boot any image, as long as it's alpine", + ), + }, + source: params::ImageSource::YouCanBootAnythingAsLongAsItsAlpine, + os: "alpine".to_string(), + version: "edge".to_string(), + }; + + let images_url = format!("/v1/images?project={}", PROJECT_NAME); + let image = + NexusRequest::objects_post(client, &images_url, &image_create_params) + .authn_as(AuthnMode::PrivilegedUser) + .execute_and_parse_unwrap::() + .await; + + // Create a disk from this image + let disk_size = ByteCount::from_gibibytes_u32(2); + let base_disk_name: Name = "base-disk".parse().unwrap(); + let base_disk = params::DiskCreate { + identity: IdentityMetadataCreateParams { + name: base_disk_name.clone(), + description: String::from("sells rainsticks"), + }, + disk_source: params::DiskSource::Image { image_id: image.identity.id }, + size: disk_size, + }; + + let base_disk: Disk = NexusRequest::new( + RequestBuilder::new(client, Method::POST, &disks_url) + .body(Some(&base_disk)) + .expect_status(Some(StatusCode::CREATED)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + + // Create a stopped instance with attached disk + let instances_url = format!("/v1/instances?project={}", PROJECT_NAME,); + let instance_name = "base-instance"; + + let instance: Instance = object_create( + client, + &instances_url, + ¶ms::InstanceCreate { + identity: IdentityMetadataCreateParams { + name: instance_name.parse().unwrap(), + description: format!("instance {:?}", instance_name), + }, + ncpus: InstanceCpuCount(2), + memory: ByteCount::from_gibibytes_u32(1), + hostname: "base-instance".parse().unwrap(), + user_data: + b"#cloud-config\nsystem_info:\n default_user:\n name: oxide" + .to_vec(), + ssh_public_keys: Some(Vec::new()), + network_interfaces: + params::InstanceNetworkInterfaceAttachment::None, + disks: vec![params::InstanceDiskAttachment::Attach( + params::InstanceDiskAttach { name: base_disk_name.clone() }, + )], + external_ips: vec![], + start: false, + }, + ) + .await; + + assert_eq!(instance.runtime.run_state, external::InstanceState::Stopped); + + // Issue snapshot request + let snapshots_url = format!("/v1/snapshots?project={}", PROJECT_NAME); + + let snapshot: views::Snapshot = object_create( + client, + &snapshots_url, + ¶ms::SnapshotCreate { + identity: IdentityMetadataCreateParams { + name: instance_name.parse().unwrap(), + description: format!("instance {:?}", instance_name), + }, + disk: base_disk_name.into(), + }, + ) + .await; + + assert_eq!(snapshot.disk_id, base_disk.identity.id); + assert_eq!(snapshot.size, base_disk.size); +} + #[nexus_test] async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); DiskTest::new(&cptestctx).await; let project_id = create_project_and_pool(client).await; @@ -418,7 +521,7 @@ async fn test_reject_creating_disk_from_snapshot( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let project_id = create_project_and_pool(&client).await; @@ -571,7 +674,7 @@ async fn test_reject_creating_disk_from_illegal_snapshot( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let project_id = create_project_and_pool(&client).await; @@ -667,7 +770,7 @@ async fn test_reject_creating_disk_from_other_project_snapshot( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let project_id = create_project_and_pool(&client).await; @@ -754,7 +857,7 @@ async fn test_cannot_snapshot_if_no_space(cptestctx: &ControlPlaneTestContext) { let disks_url = get_disks_url(); // Create a disk at just over half the capacity of what DiskTest allocates - let gibibytes: u64 = DiskTest::DEFAULT_ZPOOL_SIZE_GIB as u64 / 2 + 1; + let gibibytes: u64 = u64::from(DiskTest::DEFAULT_ZPOOL_SIZE_GIB) / 2 + 1; let disk_size = ByteCount::try_from(gibibytes * 1024 * 1024 * 1024).unwrap(); let base_disk_name: Name = "base-disk".parse().unwrap(); @@ -899,7 +1002,7 @@ async fn test_create_snapshot_record_idempotent( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let project_id = create_project_and_pool(&client).await; @@ -1091,7 +1194,7 @@ async fn test_create_snapshot_record_idempotent( async fn test_region_snapshot_create_idempotent( cptestctx: &ControlPlaneTestContext, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let region_snapshot = db::model::RegionSnapshot { @@ -1115,7 +1218,7 @@ async fn test_region_snapshot_create_idempotent( #[nexus_test] async fn test_multiple_deletes_not_sent(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); DiskTest::new(&cptestctx).await; let _project_id = create_project_and_pool(client).await; diff --git a/nexus/tests/integration_tests/subnet_allocation.rs b/nexus/tests/integration_tests/subnet_allocation.rs index d9d015bf26..794c769da4 100644 --- a/nexus/tests/integration_tests/subnet_allocation.rs +++ b/nexus/tests/integration_tests/subnet_allocation.rs @@ -9,7 +9,6 @@ use dropshot::test_util::ClientTestContext; use dropshot::HttpErrorResponseBody; use http::method::Method; use http::StatusCode; -use ipnetwork::Ipv4Network; use nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; @@ -22,8 +21,9 @@ use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; use omicron_common::api::external::{ ByteCount, IdentityMetadataCreateParams, InstanceCpuCount, - InstanceNetworkInterface, Ipv4Net, + InstanceNetworkInterface, }; +use oxnet::Ipv4Net; use std::net::Ipv4Addr; type ControlPlaneTestContext = @@ -91,13 +91,17 @@ async fn test_subnet_allocation(cptestctx: &ControlPlaneTestContext) { // Create a new, small VPC Subnet, so we don't need to issue many requests // to test address exhaustion. - let subnet_size = - cptestctx.server.apictx().nexus.tunables().max_vpc_ipv4_subnet_prefix; + let subnet_size = cptestctx + .server + .server_context() + .nexus + .tunables() + .max_vpc_ipv4_subnet_prefix; let vpc_selector = format!("project={}&vpc=default", project_name); let subnets_url = format!("/v1/vpc-subnets?{}", vpc_selector); let subnet_name = "small"; let network_address = Ipv4Addr::new(192, 168, 42, 0); - let subnet = Ipv4Network::new(network_address, subnet_size) + let subnet = Ipv4Net::new(network_address, subnet_size) .expect("Invalid IPv4 network"); let subnet_create = params::VpcSubnetCreate { identity: IdentityMetadataCreateParams { @@ -105,7 +109,7 @@ async fn test_subnet_allocation(cptestctx: &ControlPlaneTestContext) { description: String::from("a small subnet"), }, // Use the minimum subnet size - ipv4_block: Ipv4Net(subnet), + ipv4_block: subnet, ipv6_block: None, }; NexusRequest::objects_post(client, &subnets_url, &Some(&subnet_create)) @@ -128,12 +132,13 @@ async fn test_subnet_allocation(cptestctx: &ControlPlaneTestContext) { }, ]); - // Create enough instances to fill the subnet. There are subnet.size() total - // addresses, 6 of which are reserved. - let n_final_reserved_addresses = 1; - let n_reserved_addresses = - NUM_INITIAL_RESERVED_IP_ADDRESSES + n_final_reserved_addresses; - let subnet_size = subnet.size() as usize - n_reserved_addresses; + // Create enough instances to fill the subnet. There are subnet.size() + // total addresses, 6 of which are reserved. + let subnet_size_minus_1 = match subnet.size() { + Some(n) => n - 1, + None => u32::MAX, + } as usize; + let subnet_size = subnet_size_minus_1 - NUM_INITIAL_RESERVED_IP_ADDRESSES; for i in 0..subnet_size { create_instance_with( client, @@ -174,7 +179,7 @@ async fn test_subnet_allocation(cptestctx: &ControlPlaneTestContext) { network_interfaces.sort_by(|a, b| a.ip.cmp(&b.ip)); for (iface, addr) in network_interfaces .iter() - .zip(subnet.iter().skip(NUM_INITIAL_RESERVED_IP_ADDRESSES)) + .zip(subnet.addr_iter().skip(NUM_INITIAL_RESERVED_IP_ADDRESSES)) { assert_eq!( iface.ip, diff --git a/nexus/tests/integration_tests/switch_port.rs b/nexus/tests/integration_tests/switch_port.rs index c6e774be09..41542d8554 100644 --- a/nexus/tests/integration_tests/switch_port.rs +++ b/nexus/tests/integration_tests/switch_port.rs @@ -10,15 +10,16 @@ use nexus_test_utils::http_testing::{AuthnMode, NexusRequest, RequestBuilder}; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params::{ Address, AddressConfig, AddressLotBlockCreate, AddressLotCreate, - BgpAnnounceSetCreate, BgpAnnouncementCreate, BgpConfigCreate, BgpPeer, - BgpPeerConfig, LinkConfigCreate, LinkFec, LinkSpeed, - LldpServiceConfigCreate, Route, RouteConfig, SwitchInterfaceConfigCreate, - SwitchInterfaceKind, SwitchPortApplySettings, SwitchPortSettingsCreate, + BgpAnnounceSetCreate, BgpAnnouncementCreate, BgpConfigCreate, + BgpPeerConfig, LinkConfigCreate, LldpServiceConfigCreate, Route, + RouteConfig, SwitchInterfaceConfigCreate, SwitchInterfaceKind, + SwitchPortApplySettings, SwitchPortSettingsCreate, }; use nexus_types::external_api::views::Rack; +use omicron_common::api::external::ImportExportPolicy; use omicron_common::api::external::{ - self, AddressLotKind, IdentityMetadataCreateParams, NameOrId, SwitchPort, - SwitchPortSettingsView, + self, AddressLotKind, BgpPeer, IdentityMetadataCreateParams, LinkFec, + LinkSpeed, NameOrId, SwitchPort, SwitchPortSettingsView, }; type ControlPlaneTestContext = @@ -92,6 +93,8 @@ async fn test_port_settings_basic_crud(ctx: &ControlPlaneTestContext) { bgp_announce_set_id: NameOrId::Name("instances".parse().unwrap()), asn: 47, vrf: None, + checker: None, + shaper: None, }; NexusRequest::objects_post( @@ -255,7 +258,6 @@ async fn test_port_settings_basic_crud(ctx: &ControlPlaneTestContext) { BgpPeerConfig { peers: vec![BgpPeer { bgp_config: NameOrId::Name("as47".parse().unwrap()), - bgp_announce_set: NameOrId::Name("instances".parse().unwrap()), interface_name: "phy0".to_string(), addr: "1.2.3.4".parse().unwrap(), hold_time: 6, @@ -263,6 +265,16 @@ async fn test_port_settings_basic_crud(ctx: &ControlPlaneTestContext) { delay_open: 0, connect_retry: 3, keepalive: 2, + remote_asn: None, + min_ttl: None, + md5_auth_key: None, + multi_exit_discriminator: None, + communities: Vec::new(), + local_pref: None, + enforce_first_as: false, + allowed_export: ImportExportPolicy::NoFiltering, + allowed_import: ImportExportPolicy::NoFiltering, + vlan_id: None, }], }, ); diff --git a/nexus/tests/integration_tests/switches.rs b/nexus/tests/integration_tests/switches.rs index f56d42f6d1..d665d6ff8e 100644 --- a/nexus/tests/integration_tests/switches.rs +++ b/nexus/tests/integration_tests/switches.rs @@ -6,15 +6,11 @@ use dropshot::test_util::ClientTestContext; use nexus_test_interface::NexusServer; -use nexus_test_utils::resource_helpers::create_physical_disk; -use nexus_test_utils::resource_helpers::delete_physical_disk; use nexus_test_utils::resource_helpers::objects_list_page_authz; use nexus_test_utils::start_sled_agent; use nexus_test_utils::SLED_AGENT_UUID; use nexus_test_utils_macros::nexus_test; -use nexus_types::external_api::views::{ - PhysicalDisk, PhysicalDiskType, Sled, -}; +use nexus_types::external_api::views::Sled; use nexus_types::internal_api::params as internal_params; use omicron_sled_agent::sim; use std::str::FromStr; @@ -75,42 +71,3 @@ async fn test_switches_list(cptestctx: &ControlPlaneTestContext) { sa.http_server.close().await.unwrap(); } } - -#[nexus_test] -async fn test_physical_disk_create_list_delete( - cptestctx: &ControlPlaneTestContext, -) { - let external_client = &cptestctx.external_client; - let internal_client = &cptestctx.internal_client; - - // Verify that there is one sled to begin with. - let switches_url = "/v1/system/hardware/switches"; - assert_eq!(switches_list(&external_client, &switches_url).await.len(), 1); - - // Verify that there are no disks. - let disks_url = - format!("/v1/system/hardware/switches/{SLED_AGENT_UUID}/disks"); - assert!(physical_disks_list(&external_client, &disks_url).await.is_empty()); - - // Insert a new disk using the internal API, observe it in the external API - let sled_id = Uuid::from_str(&SLED_AGENT_UUID).unwrap(); - create_physical_disk( - &internal_client, - "v", - "s", - "m", - internal_params::PhysicalDiskKind::U2, - sled_id, - ) - .await; - let disks = physical_disks_list(&external_client, &disks_url).await; - assert_eq!(disks.len(), 1); - assert_eq!(disks[0].vendor, "v"); - assert_eq!(disks[0].serial, "s"); - assert_eq!(disks[0].model, "m"); - assert_eq!(disks[0].disk_type, PhysicalDiskType::External); - - // Delete that disk using the internal API, observe it in the external API - delete_physical_disk(&internal_client, "v", "s", "m", sled_id).await; - assert!(physical_disks_list(&external_client, &disks_url).await.is_empty()); -} diff --git a/nexus/tests/integration_tests/unauthorized.rs b/nexus/tests/integration_tests/unauthorized.rs index 3671564866..4f9f75c770 100644 --- a/nexus/tests/integration_tests/unauthorized.rs +++ b/nexus/tests/integration_tests/unauthorized.rs @@ -20,6 +20,7 @@ use nexus_test_utils::http_testing::RequestBuilder; use nexus_test_utils::http_testing::TestResponse; use nexus_test_utils::resource_helpers::DiskTest; use nexus_test_utils_macros::nexus_test; +use omicron_uuid_kinds::ZpoolUuid; use once_cell::sync::Lazy; type ControlPlaneTestContext = @@ -54,7 +55,17 @@ type ControlPlaneTestContext = // 403). #[nexus_test] async fn test_unauthorized(cptestctx: &ControlPlaneTestContext) { - DiskTest::new(cptestctx).await; + let mut disk_test = DiskTest::new(cptestctx).await; + disk_test + .add_zpool_with_dataset_ext( + cptestctx, + nexus_test_utils::PHYSICAL_DISK_UUID.parse().unwrap(), + ZpoolUuid::new_v4(), + uuid::Uuid::new_v4(), + DiskTest::DEFAULT_ZPOOL_SIZE_GIB, + ) + .await; + let client = &cptestctx.external_client; let log = &cptestctx.logctx.log; let mut setup_results = std::collections::BTreeMap::new(); diff --git a/nexus/tests/integration_tests/volume_management.rs b/nexus/tests/integration_tests/volume_management.rs index 289446fe85..ae348e775d 100644 --- a/nexus/tests/integration_tests/volume_management.rs +++ b/nexus/tests/integration_tests/volume_management.rs @@ -9,6 +9,7 @@ use chrono::Utc; use dropshot::test_util::ClientTestContext; use http::method::Method; use http::StatusCode; +use nexus_db_queries::db; use nexus_db_queries::db::DataStore; use nexus_test_utils::http_testing::AuthnMode; use nexus_test_utils::http_testing::NexusRequest; @@ -1350,7 +1351,7 @@ async fn test_volume_remove_read_only_parent_base( ) { // Test the removal of a volume with a read only parent. // The ROP should end up on the t_vid volume. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); @@ -1375,7 +1376,13 @@ async fn test_volume_remove_read_only_parent_base( // Go and get the volume from the database, verify it no longer // has a read only parent. - let new_vol = datastore.volume_checkout(volume_id).await.unwrap(); + let new_vol = datastore + .volume_checkout( + volume_id, + db::datastore::VolumeCheckoutReason::CopyAndModify, + ) + .await + .unwrap(); let vcr: VolumeConstructionRequest = serde_json::from_str(new_vol.data()).unwrap(); @@ -1394,7 +1401,13 @@ async fn test_volume_remove_read_only_parent_base( } // Verify the t_vid now has a ROP. - let new_vol = datastore.volume_checkout(t_vid).await.unwrap(); + let new_vol = datastore + .volume_checkout( + t_vid, + db::datastore::VolumeCheckoutReason::CopyAndModify, + ) + .await + .unwrap(); let vcr: VolumeConstructionRequest = serde_json::from_str(new_vol.data()).unwrap(); @@ -1421,7 +1434,13 @@ async fn test_volume_remove_read_only_parent_base( // We want to verify we can call volume_remove_rop twice and the second // time through it won't change what it did the first time. This is // critical to supporting replay of the saga, should it be needed. - let new_vol = datastore.volume_checkout(t_vid).await.unwrap(); + let new_vol = datastore + .volume_checkout( + t_vid, + db::datastore::VolumeCheckoutReason::CopyAndModify, + ) + .await + .unwrap(); let vcr: VolumeConstructionRequest = serde_json::from_str(new_vol.data()).unwrap(); @@ -1446,7 +1465,7 @@ async fn test_volume_remove_read_only_parent_no_parent( ) { // Test the removal of a read only parent from a volume // without a read only parent. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); @@ -1464,7 +1483,7 @@ async fn test_volume_remove_read_only_parent_volume_not_volume( ) { // test removal of a read only volume for a volume that is not // of a type to have a read only parent. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); @@ -1493,7 +1512,7 @@ async fn test_volume_remove_read_only_parent_bad_volume( ) { // Test the removal of a read only parent from a volume // that does not exist - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); @@ -1509,7 +1528,7 @@ async fn test_volume_remove_read_only_parent_volume_deleted( cptestctx: &ControlPlaneTestContext, ) { // Test the removal of a read_only_parent from a deleted volume. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); let block_size = 512; @@ -1539,7 +1558,7 @@ async fn test_volume_remove_read_only_parent_volume_deleted( async fn test_volume_remove_rop_saga(cptestctx: &ControlPlaneTestContext) { // Test the saga for removal of a volume with a read only parent. // We create a volume with a read only parent, then call the saga on it. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); @@ -1570,7 +1589,13 @@ async fn test_volume_remove_rop_saga(cptestctx: &ControlPlaneTestContext) { .await .unwrap(); - let new_vol = datastore.volume_checkout(volume_id).await.unwrap(); + let new_vol = datastore + .volume_checkout( + volume_id, + db::datastore::VolumeCheckoutReason::CopyAndModify, + ) + .await + .unwrap(); let vcr: VolumeConstructionRequest = serde_json::from_str(new_vol.data()).unwrap(); @@ -1596,7 +1621,7 @@ async fn test_volume_remove_rop_saga_twice( // Test calling the saga for removal of a volume with a read only parent // two times, the first will remove the read_only_parent, the second will // do nothing. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); @@ -1628,7 +1653,13 @@ async fn test_volume_remove_rop_saga_twice( .unwrap(); println!("first returns {:?}", res); - let new_vol = datastore.volume_checkout(volume_id).await.unwrap(); + let new_vol = datastore + .volume_checkout( + volume_id, + db::datastore::VolumeCheckoutReason::CopyAndModify, + ) + .await + .unwrap(); let vcr: VolumeConstructionRequest = serde_json::from_str(new_vol.data()).unwrap(); @@ -1689,7 +1720,7 @@ async fn test_volume_remove_rop_saga_volume_not_volume( ) { // Test saga removal of a read only volume for a volume that is not // of a type to have a read only parent. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let volume_id = Uuid::new_v4(); let datastore = nexus.datastore(); @@ -1728,7 +1759,7 @@ async fn test_volume_remove_rop_saga_deleted_volume( ) { // Test that a saga removal of a read_only_parent from a deleted volume // takes no action on that deleted volume. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); let block_size = 512; @@ -1762,7 +1793,13 @@ async fn test_volume_remove_rop_saga_deleted_volume( .await .unwrap(); - let new_vol = datastore.volume_checkout(volume_id).await.unwrap(); + let new_vol = datastore + .volume_checkout( + volume_id, + db::datastore::VolumeCheckoutReason::CopyAndModify, + ) + .await + .unwrap(); let vcr: VolumeConstructionRequest = serde_json::from_str(new_vol.data()).unwrap(); @@ -1786,7 +1823,7 @@ async fn test_volume_remove_rop_saga_deleted_volume( async fn test_volume_checkout(cptestctx: &ControlPlaneTestContext) { // Verify that a volume_checkout will update the generation number in the // database when the volume type is Volume with sub_volume Region. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); let block_size = 512; @@ -1811,11 +1848,23 @@ async fn test_volume_checkout(cptestctx: &ControlPlaneTestContext) { // The first time back, we get 1 but internally the generation number goes // to 2. - let new_vol = datastore.volume_checkout(volume_id).await.unwrap(); + let new_vol = datastore + .volume_checkout( + volume_id, + db::datastore::VolumeCheckoutReason::CopyAndModify, + ) + .await + .unwrap(); volume_match_gen(new_vol, vec![Some(1)]); // Request again, we should get 2 now. - let new_vol = datastore.volume_checkout(volume_id).await.unwrap(); + let new_vol = datastore + .volume_checkout( + volume_id, + db::datastore::VolumeCheckoutReason::CopyAndModify, + ) + .await + .unwrap(); volume_match_gen(new_vol, vec![Some(2)]); } @@ -1825,7 +1874,7 @@ async fn test_volume_checkout_updates_nothing( ) { // Verify that a volume_checkout will do nothing for a volume that does // not contain a sub_volume with a generation field. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); let block_size = 512; @@ -1853,9 +1902,21 @@ async fn test_volume_checkout_updates_nothing( .unwrap(); // Verify nothing happens to our non generation number volume. - let new_vol = datastore.volume_checkout(volume_id).await.unwrap(); + let new_vol = datastore + .volume_checkout( + volume_id, + db::datastore::VolumeCheckoutReason::CopyAndModify, + ) + .await + .unwrap(); volume_match_gen(new_vol, vec![None]); - let new_vol = datastore.volume_checkout(volume_id).await.unwrap(); + let new_vol = datastore + .volume_checkout( + volume_id, + db::datastore::VolumeCheckoutReason::CopyAndModify, + ) + .await + .unwrap(); volume_match_gen(new_vol, vec![None]); } @@ -1866,7 +1927,7 @@ async fn test_volume_checkout_updates_multiple_gen( // Verify that a volume_checkout will update the generation number in the // database when the volume type is Volume with multiple sub_volumes of // type Region. - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); let block_size = 512; @@ -1894,15 +1955,33 @@ async fn test_volume_checkout_updates_multiple_gen( // The first time back, we get our original values, but internally the // generation number goes up. - let new_vol = datastore.volume_checkout(volume_id).await.unwrap(); + let new_vol = datastore + .volume_checkout( + volume_id, + db::datastore::VolumeCheckoutReason::CopyAndModify, + ) + .await + .unwrap(); volume_match_gen(new_vol, vec![Some(3), Some(8)]); // Request again, we should see the incremented values now.. - let new_vol = datastore.volume_checkout(volume_id).await.unwrap(); + let new_vol = datastore + .volume_checkout( + volume_id, + db::datastore::VolumeCheckoutReason::CopyAndModify, + ) + .await + .unwrap(); volume_match_gen(new_vol, vec![Some(4), Some(9)]); // Request one more, because why not. - let new_vol = datastore.volume_checkout(volume_id).await.unwrap(); + let new_vol = datastore + .volume_checkout( + volume_id, + db::datastore::VolumeCheckoutReason::CopyAndModify, + ) + .await + .unwrap(); volume_match_gen(new_vol, vec![Some(5), Some(10)]); } @@ -1914,7 +1993,7 @@ async fn test_volume_checkout_updates_sparse_multiple_gen( // database when the volume type is Volume with multiple sub_volumes of // type Region and also verify that a non generation sub_volume won't be a // problem - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); let block_size = 512; @@ -1947,11 +2026,23 @@ async fn test_volume_checkout_updates_sparse_multiple_gen( // The first time back, we get our original values, but internally the // generation number goes up. - let new_vol = datastore.volume_checkout(volume_id).await.unwrap(); + let new_vol = datastore + .volume_checkout( + volume_id, + db::datastore::VolumeCheckoutReason::CopyAndModify, + ) + .await + .unwrap(); volume_match_gen(new_vol, vec![None, Some(7), Some(9)]); // Request again, we should see the incremented values now.. - let new_vol = datastore.volume_checkout(volume_id).await.unwrap(); + let new_vol = datastore + .volume_checkout( + volume_id, + db::datastore::VolumeCheckoutReason::CopyAndModify, + ) + .await + .unwrap(); volume_match_gen(new_vol, vec![None, Some(8), Some(10)]); } @@ -1963,7 +2054,7 @@ async fn test_volume_checkout_updates_sparse_mid_multiple_gen( // database when the volume type is Volume with multiple sub_volumes of // type Region and also verify that a non generation sub_volume in the // middle of the sub_volumes won't be a problem - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); let block_size = 512; @@ -1996,11 +2087,23 @@ async fn test_volume_checkout_updates_sparse_mid_multiple_gen( // The first time back, we get our original values, but internally the // generation number goes up. - let new_vol = datastore.volume_checkout(volume_id).await.unwrap(); + let new_vol = datastore + .volume_checkout( + volume_id, + db::datastore::VolumeCheckoutReason::CopyAndModify, + ) + .await + .unwrap(); volume_match_gen(new_vol, vec![Some(7), None, Some(9)]); // Request again, we should see the incremented values now.. - let new_vol = datastore.volume_checkout(volume_id).await.unwrap(); + let new_vol = datastore + .volume_checkout( + volume_id, + db::datastore::VolumeCheckoutReason::CopyAndModify, + ) + .await + .unwrap(); volume_match_gen(new_vol, vec![Some(8), None, Some(10)]); } @@ -2010,7 +2113,7 @@ async fn test_volume_checkout_randomize_ids_only_read_only( ) { // Verify that a volume_checkout_randomize_ids will not work for // non-read-only Regions - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); let block_size = 512; @@ -2038,7 +2141,12 @@ async fn test_volume_checkout_randomize_ids_only_read_only( .unwrap(); // volume_checkout_randomize_ids should fail - let r = datastore.volume_checkout_randomize_ids(volume_id).await; + let r = datastore + .volume_checkout_randomize_ids( + volume_id, + db::datastore::VolumeCheckoutReason::CopyAndModify, + ) + .await; assert!(r.is_err()); } @@ -2047,14 +2155,12 @@ async fn test_volume_checkout_randomize_ids_only_read_only( /// `[ipv6]:port` targets being reused. #[nexus_test] async fn test_keep_your_targets_straight(cptestctx: &ControlPlaneTestContext) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); // Four zpools, one dataset each let mut disk_test = DiskTest::new(&cptestctx).await; - disk_test - .add_zpool_with_dataset(&cptestctx, DiskTest::DEFAULT_ZPOOL_SIZE_GIB) - .await; + disk_test.add_zpool_with_dataset(&cptestctx).await; // This bug occurs when region_snapshot records share a snapshot_addr, so // insert those here manually. @@ -2540,7 +2646,7 @@ fn volume_match_gen( async fn test_volume_hard_delete_idempotent( cptestctx: &ControlPlaneTestContext, ) { - let nexus = &cptestctx.server.apictx().nexus; + let nexus = &cptestctx.server.server_context().nexus; let datastore = nexus.datastore(); let volume_id = Uuid::new_v4(); diff --git a/nexus/tests/integration_tests/vpc_subnets.rs b/nexus/tests/integration_tests/vpc_subnets.rs index 76cff9ac79..dcc96d08bf 100644 --- a/nexus/tests/integration_tests/vpc_subnets.rs +++ b/nexus/tests/integration_tests/vpc_subnets.rs @@ -20,8 +20,8 @@ use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::{params, views::VpcSubnet}; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::IdentityMetadataUpdateParams; -use omicron_common::api::external::Ipv4Net; -use omicron_common::api::external::Ipv6Net; +use omicron_common::api::external::Ipv6NetExt; +use oxnet::Ipv6Net; type ControlPlaneTestContext = nexus_test_utils::ControlPlaneTestContext; @@ -31,7 +31,7 @@ async fn test_delete_vpc_subnet_with_interfaces_fails( cptestctx: &ControlPlaneTestContext, ) { let client = &cptestctx.external_client; - let apictx = &cptestctx.server.apictx(); + let apictx = &cptestctx.server.server_context(); let nexus = &apictx.nexus; // Create a project that we'll use for testing. @@ -160,16 +160,15 @@ async fn test_vpc_subnets(cptestctx: &ControlPlaneTestContext) { assert_eq!(error.message, "not found: vpc-subnet with name \"subnet1\""); // Create a VPC Subnet. - let ipv4_block = Ipv4Net("10.0.0.0/24".parse().unwrap()); - let other_ipv4_block = Ipv4Net("172.31.0.0/16".parse().unwrap()); - // Create the first two available IPv6 address ranges. */ - let prefix = vpc.ipv6_prefix.network(); - let ipv6_block = Ipv6Net(ipnetwork::Ipv6Network::new(prefix, 64).unwrap()); + let ipv4_block = "10.0.0.0/24".parse().unwrap(); + let other_ipv4_block = "172.31.0.0/16".parse().unwrap(); + // Create the first two available IPv6 address ranges. + let prefix = vpc.ipv6_prefix.prefix(); + let ipv6_block = Ipv6Net::new(prefix, 64).unwrap(); let mut segments = prefix.segments(); segments[3] = 1; let addr = std::net::Ipv6Addr::from(segments); - let other_ipv6_block = - Some(Ipv6Net(ipnetwork::Ipv6Network::new(addr, 64).unwrap())); + let other_ipv6_block = Some(Ipv6Net::new(addr, 64).unwrap()); let new_subnet = params::VpcSubnetCreate { identity: IdentityMetadataCreateParams { name: subnet_name.parse().unwrap(), @@ -291,7 +290,7 @@ async fn test_vpc_subnets(cptestctx: &ControlPlaneTestContext) { assert_eq!(error.message, "not found: vpc-subnet with name \"subnet2\""); // create second subnet, this time with an autogenerated IPv6 range. - let ipv4_block = Ipv4Net("192.168.0.0/16".parse().unwrap()); + let ipv4_block = "192.168.0.0/16".parse().unwrap(); let new_subnet = params::VpcSubnetCreate { identity: IdentityMetadataCreateParams { name: subnet2_name.parse().unwrap(), @@ -435,10 +434,7 @@ async fn test_vpc_subnets(cptestctx: &ControlPlaneTestContext) { "it's also below the net" ); assert_eq!(subnet_same_name.vpc_id, vpc2.identity.id); - assert_eq!( - subnet_same_name.ipv4_block, - Ipv4Net("192.168.0.0/16".parse().unwrap()) - ); + assert_eq!(subnet_same_name.ipv4_block, "192.168.0.0/16".parse().unwrap()); assert!(subnet_same_name.ipv6_block.is_unique_local()); } diff --git a/nexus/tests/integration_tests/vpcs.rs b/nexus/tests/integration_tests/vpcs.rs index cc9aea4d11..1ceebd8cff 100644 --- a/nexus/tests/integration_tests/vpcs.rs +++ b/nexus/tests/integration_tests/vpcs.rs @@ -18,7 +18,6 @@ use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::{params, views::Vpc}; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::IdentityMetadataUpdateParams; -use omicron_common::api::external::Ipv6Net; type ControlPlaneTestContext = nexus_test_utils::ControlPlaneTestContext; @@ -76,7 +75,7 @@ async fn test_vpcs(cptestctx: &ControlPlaneTestContext) { // Make sure creating a VPC fails if we specify an IPv6 prefix that is // not a valid ULA range. - let bad_prefix = Ipv6Net("2000:1000::/48".parse().unwrap()); + let bad_prefix = "2000:1000::/48".parse().unwrap(); NexusRequest::new( RequestBuilder::new(client, Method::POST, &vpcs_url) .expect_status(Some(StatusCode::BAD_REQUEST)) @@ -101,7 +100,7 @@ async fn test_vpcs(cptestctx: &ControlPlaneTestContext) { assert_eq!(vpc.identity.description, "vpc description"); assert_eq!(vpc.dns_name, "abc"); assert_eq!( - vpc.ipv6_prefix.prefix(), + vpc.ipv6_prefix.width(), 48, "Expected a 48-bit ULA IPv6 address prefix" ); diff --git a/nexus/tests/integration_tests/zpools.rs b/nexus/tests/integration_tests/zpools.rs deleted file mode 100644 index 8e058f9349..0000000000 --- a/nexus/tests/integration_tests/zpools.rs +++ /dev/null @@ -1,128 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -use dropshot::test_util::ClientTestContext; -use http::method::Method; -use http::StatusCode; -use nexus_types::external_api::params::PhysicalDiskKind; -use nexus_types::internal_api::params::PhysicalDiskPutRequest; -use nexus_types::internal_api::params::ZpoolPutRequest; -use omicron_common::api::external::ByteCount; -use uuid::Uuid; - -use nexus_test_utils::SLED_AGENT_UUID; -use nexus_test_utils_macros::nexus_test; - -type ControlPlaneTestContext = - nexus_test_utils::ControlPlaneTestContext; - -const VENDOR: &str = "test-vendor"; -const SERIAL: &str = "test-serial"; -const MODEL: &str = "test-model"; - -async fn create_test_physical_disk(client: &ClientTestContext) { - let request = PhysicalDiskPutRequest { - vendor: VENDOR.into(), - serial: SERIAL.into(), - model: MODEL.into(), - variant: PhysicalDiskKind::U2, - sled_id: SLED_AGENT_UUID.parse().unwrap(), - }; - let physical_disk_put_url = "/physical-disk"; - client - .make_request( - Method::PUT, - &physical_disk_put_url, - Some(request), - StatusCode::OK, - ) - .await - .unwrap(); -} - -// Tests the "normal" case of zpool_put: inserting a known Zpool. -// -// This will typically be invoked by the Sled Agent, after performing inventory. -#[nexus_test] -async fn test_zpool_put_success(cptestctx: &ControlPlaneTestContext) { - let client = &cptestctx.internal_client; - create_test_physical_disk(&client).await; - - let zpool_id = Uuid::new_v4(); - let zpool_put_url = - format!("/sled-agents/{}/zpools/{}", SLED_AGENT_UUID, zpool_id); - - let request = ZpoolPutRequest { - size: ByteCount::from_gibibytes_u32(1), - disk_vendor: VENDOR.into(), - disk_serial: SERIAL.into(), - disk_model: MODEL.into(), - }; - client - .make_request( - Method::PUT, - &zpool_put_url, - Some(request), - StatusCode::OK, - ) - .await - .unwrap(); -} - -// Tests a failure case of zpool_put: Inserting a zpool into a sled agent that -// does not exist. -#[nexus_test] -async fn test_zpool_put_bad_sled_returns_not_found( - cptestctx: &ControlPlaneTestContext, -) { - let client = &cptestctx.internal_client; - create_test_physical_disk(&client).await; - - // A sled with the "nil" UUID should not exist. - let sled_id = Uuid::nil(); - let zpool_id = Uuid::new_v4(); - let zpool_put_url = format!("/sled_agents/{}/zpools/{}", sled_id, zpool_id); - - let request = ZpoolPutRequest { - size: ByteCount::from_gibibytes_u32(1), - disk_vendor: VENDOR.into(), - disk_serial: SERIAL.into(), - disk_model: MODEL.into(), - }; - client - .make_request_error_body( - Method::PUT, - &zpool_put_url, - request, - StatusCode::NOT_FOUND, - ) - .await; -} - -// Tests a failure case of zpool_put: Inserting a zpool into a sled agent that -// exists, but into a disk that does not exist -#[nexus_test] -async fn test_zpool_put_bad_physical_disk_returns_not_found( - cptestctx: &ControlPlaneTestContext, -) { - let client = &cptestctx.internal_client; - let zpool_id = Uuid::new_v4(); - let zpool_put_url = - format!("/sled_agents/{}/zpools/{}", SLED_AGENT_UUID, zpool_id); - - let request = ZpoolPutRequest { - size: ByteCount::from_gibibytes_u32(1), - disk_vendor: VENDOR.into(), - disk_serial: SERIAL.into(), - disk_model: MODEL.into(), - }; - client - .make_request_error_body( - Method::PUT, - &zpool_put_url, - request, - StatusCode::NOT_FOUND, - ) - .await; -} diff --git a/nexus/tests/output/nexus_tags.txt b/nexus/tests/output/nexus_tags.txt index 64413f396e..a32fe5c4b9 100644 --- a/nexus/tests/output/nexus_tags.txt +++ b/nexus/tests/output/nexus_tags.txt @@ -73,6 +73,8 @@ login_saml POST /login/{silo_name}/saml/{provi API operations found with tag "metrics" OPERATION ID METHOD URL PATH silo_metric GET /v1/metrics/{metric_name} +timeseries_query POST /v1/timeseries/query +timeseries_schema_list GET /v1/timeseries/schema API operations found with tag "policy" OPERATION ID METHOD URL PATH @@ -130,7 +132,9 @@ OPERATION ID METHOD URL PATH networking_switch_port_apply_settings POST /v1/system/hardware/switch-port/{port}/settings networking_switch_port_clear_settings DELETE /v1/system/hardware/switch-port/{port}/settings networking_switch_port_list GET /v1/system/hardware/switch-port +networking_switch_port_status GET /v1/system/hardware/switch-port/{port}/status physical_disk_list GET /v1/system/hardware/disks +physical_disk_view GET /v1/system/hardware/disks/{disk_id} rack_list GET /v1/system/hardware/racks rack_view GET /v1/system/hardware/racks/{rack_id} sled_add POST /v1/system/hardware/sleds @@ -170,6 +174,8 @@ networking_address_lot_block_list GET /v1/system/networking/address- networking_address_lot_create POST /v1/system/networking/address-lot networking_address_lot_delete DELETE /v1/system/networking/address-lot/{address_lot} networking_address_lot_list GET /v1/system/networking/address-lot +networking_allow_list_update PUT /v1/system/networking/allow-list +networking_allow_list_view GET /v1/system/networking/allow-list networking_bfd_disable POST /v1/system/networking/bfd-disable networking_bfd_enable POST /v1/system/networking/bfd-enable networking_bfd_status GET /v1/system/networking/bfd-status diff --git a/nexus/tests/output/uncovered-authz-endpoints.txt b/nexus/tests/output/uncovered-authz-endpoints.txt index d19c7970d0..c5091c5a3b 100644 --- a/nexus/tests/output/uncovered-authz-endpoints.txt +++ b/nexus/tests/output/uncovered-authz-endpoints.txt @@ -3,6 +3,7 @@ probe_delete (delete "/experimental/v1/probes/{probe probe_list (get "/experimental/v1/probes") probe_view (get "/experimental/v1/probes/{probe}") ping (get "/v1/ping") +networking_switch_port_status (get "/v1/system/hardware/switch-port/{port}/status") device_auth_request (post "/device/auth") device_auth_confirm (post "/device/confirm") device_access_token (post "/device/token") diff --git a/nexus/types/Cargo.toml b/nexus/types/Cargo.toml index ecc180b6db..df976e2444 100644 --- a/nexus/types/Cargo.toml +++ b/nexus/types/Cargo.toml @@ -4,22 +4,33 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] anyhow.workspace = true chrono.workspace = true +clap.workspace = true base64.workspace = true +derive-where.workspace = true +derive_more.workspace = true futures.workspace = true humantime.workspace = true +ipnetwork.workspace = true omicron-uuid-kinds.workspace = true openssl.workspace = true +oxnet.workspace = true parse-display.workspace = true schemars = { workspace = true, features = ["chrono", "uuid1"] } serde.workspace = true serde_json.workspace = true serde_with.workspace = true +slog.workspace = true +slog-error-chain.workspace = true steno.workspace = true strum.workspace = true thiserror.workspace = true +newtype-uuid.workspace = true uuid.workspace = true api_identity.workspace = true @@ -29,3 +40,7 @@ omicron-common.workspace = true omicron-passwords.workspace = true omicron-workspace-hack.workspace = true sled-agent-client.workspace = true + +[dev-dependencies] +proptest.workspace = true +test-strategy.workspace = true diff --git a/nexus/types/proptest-regressions/deployment/tri_map.txt b/nexus/types/proptest-regressions/deployment/tri_map.txt new file mode 100644 index 0000000000..c3f4260f52 --- /dev/null +++ b/nexus/types/proptest-regressions/deployment/tri_map.txt @@ -0,0 +1,7 @@ +# Seeds for failure cases proptest has generated in the past. It is +# automatically read and these particular cases re-run before any +# novel cases are generated. +# +# It is recommended to check this file in to source control so that +# everyone who runs the test benefits from these saved cases. +cc bafcbc817cff65814a6f3233f1ef3d6c36f75c37ad35175d17d1c8484a734034 # shrinks to input = _ProptestOpsArgs { initial: {(0, '$', ""): "", (0, ' ', ""): ""}, ops: [] } diff --git a/nexus/types/src/deployment.rs b/nexus/types/src/deployment.rs index b435964b53..4fcd49a254 100644 --- a/nexus/types/src/deployment.rs +++ b/nexus/types/src/deployment.rs @@ -12,8 +12,8 @@ //! nexus/db-model, but nexus/reconfigurator/planning does not currently know //! about nexus/db-model and it's convenient to separate these concerns.) -use crate::external_api::views::SledPolicy; use crate::external_api::views::SledState; +use crate::internal_api::params::DnsConfigParams; use crate::inventory::Collection; pub use crate::inventory::OmicronZoneConfig; pub use crate::inventory::OmicronZoneDataset; @@ -21,81 +21,70 @@ pub use crate::inventory::OmicronZoneType; pub use crate::inventory::OmicronZonesConfig; pub use crate::inventory::SourceNatConfig; pub use crate::inventory::ZpoolName; -use omicron_common::address::IpRange; -use omicron_common::address::Ipv6Subnet; -use omicron_common::address::SLED_PREFIX; +use derive_more::From; +use newtype_uuid::GenericUuid; use omicron_common::api::external::Generation; +use omicron_common::disk::DiskIdentity; +use omicron_uuid_kinds::CollectionUuid; +use omicron_uuid_kinds::ExternalIpUuid; +use omicron_uuid_kinds::OmicronZoneUuid; +use omicron_uuid_kinds::SledUuid; use schemars::JsonSchema; use serde::Deserialize; use serde::Serialize; +use sled_agent_client::types::OmicronPhysicalDisksConfig; +use sled_agent_client::ZoneKind; +use slog_error_chain::SlogInlineError; use std::collections::BTreeMap; use std::collections::BTreeSet; use std::fmt; +use std::net::AddrParseError; +use std::net::Ipv6Addr; use strum::EnumIter; use strum::IntoEnumIterator; +use thiserror::Error; use uuid::Uuid; -/// Fleet-wide deployment policy -/// -/// The **policy** represents the deployment controls that people (operators and -/// support engineers) can modify directly under normal operation. In the -/// limit, this would include things like: which sleds are supposed to be part -/// of the system, how many CockroachDB nodes should be part of the cluster, -/// what system version the system should be running, etc. It would _not_ -/// include things like which services should be running on which sleds or which -/// host OS version should be on each sled because that's up to the control -/// plane to decide. (To be clear, the intent is that for extenuating -/// circumstances, people could exercise control over such things, but that -/// would not be part of normal operation.) -/// -/// The current policy is pretty limited. It's aimed primarily at supporting -/// the add/remove sled use case. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Policy { - /// set of sleds that are supposed to be part of the control plane, along - /// with information about resources available to the planner - pub sleds: BTreeMap, - - /// ranges specified by the IP pool for externally-visible control plane - /// services (e.g., external DNS, Nexus, boundary NTP) - pub service_ip_pool_ranges: Vec, - - /// desired total number of deployed Nexus zones - pub target_nexus_zone_count: usize, -} - -/// Describes the resources available on each sled for the planner -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct SledResources { - /// current sled policy - pub policy: SledPolicy, - - /// current sled state - pub state: SledState, - - /// zpools on this sled - /// - /// (used to allocate storage for control plane zones with persistent - /// storage) - pub zpools: BTreeSet, - - /// the IPv6 subnet of this sled on the underlay network - /// - /// (implicitly specifies the whole range of addresses that the planner can - /// use for control plane components) - pub subnet: Ipv6Subnet, -} - -impl SledResources { - /// Returns true if the sled can have services provisioned on it that - /// aren't required to be on every sled. - /// - /// For example, NTP must exist on every sled, but Nexus does not have to. - pub fn is_eligible_for_discretionary_services(&self) -> bool { - self.policy.is_provisionable() - && self.state.is_eligible_for_discretionary_services() - } -} +mod blueprint_diff; +mod blueprint_display; +mod network_resources; +mod planning_input; +mod tri_map; +mod zone_type; + +pub use network_resources::AddNetworkResourceError; +pub use network_resources::OmicronZoneExternalFloatingAddr; +pub use network_resources::OmicronZoneExternalFloatingIp; +pub use network_resources::OmicronZoneExternalIp; +pub use network_resources::OmicronZoneExternalIpEntry; +pub use network_resources::OmicronZoneExternalIpKey; +pub use network_resources::OmicronZoneExternalSnatIp; +pub use network_resources::OmicronZoneNetworkResources; +pub use network_resources::OmicronZoneNic; +pub use network_resources::OmicronZoneNicEntry; +pub use planning_input::CockroachDbClusterVersion; +pub use planning_input::CockroachDbPreserveDowngrade; +pub use planning_input::CockroachDbSettings; +pub use planning_input::DiskFilter; +pub use planning_input::PlanningInput; +pub use planning_input::PlanningInputBuildError; +pub use planning_input::PlanningInputBuilder; +pub use planning_input::Policy; +pub use planning_input::SledDetails; +pub use planning_input::SledDisk; +pub use planning_input::SledFilter; +pub use planning_input::SledResources; +pub use planning_input::ZpoolFilter; +pub use zone_type::blueprint_zone_type; +pub use zone_type::BlueprintZoneType; + +use blueprint_display::{ + constants::*, BpDiffState, BpGeneration, BpOmicronZonesSubtableSchema, + BpPhysicalDisksSubtableSchema, BpSledSubtable, BpSledSubtableData, + BpSledSubtableRow, KvListWithHeading, +}; + +pub use blueprint_diff::BlueprintDiff; /// Describes a complete set of software and configuration for the system // Blueprints are a fundamental part of how the system modifies itself. Each @@ -140,12 +129,23 @@ pub struct Blueprint { /// unique identifier for this blueprint pub id: Uuid, - /// A map of sled id -> zones deployed on each sled, along with the - /// [`BlueprintZoneDisposition`] for each zone. + /// A map of sled id -> desired state of the sled. /// /// A sled is considered part of the control plane cluster iff it has an /// entry in this map. - pub blueprint_zones: BTreeMap, + pub sled_state: BTreeMap, + + /// A map of sled id -> zones deployed on each sled, along with the + /// [`BlueprintZoneDisposition`] for each zone. + /// + /// Unlike `sled_state`, this map may contain entries for sleds that are no + /// longer a part of the control plane cluster (e.g., sleds that have been + /// decommissioned, but still have expunged zones where cleanup has not yet + /// completed). + pub blueprint_zones: BTreeMap, + + /// A map of sled id -> disks in use on each sled. + pub blueprint_disks: BTreeMap, /// which blueprint this blueprint is based on pub parent_blueprint_id: Option, @@ -158,6 +158,14 @@ pub struct Blueprint { // See blueprint execution for more on this. pub external_dns_version: Generation, + /// CockroachDB state fingerprint when this blueprint was created + // See `nexus/db-queries/src/db/datastore/cockroachdb_settings.rs` for more + // on this. + pub cockroachdb_fingerprint: String, + + /// Whether to set `cluster.preserve_downgrade_option` and what to set it to + pub cockroachdb_setting_preserve_downgrade: CockroachDbPreserveDowngrade, + /// when this blueprint was generated (for debugging) pub time_created: chrono::DateTime, /// identity of the component that generated the blueprint (for debugging) @@ -169,89 +177,126 @@ pub struct Blueprint { } impl Blueprint { + /// Return metadata for this blueprint. + pub fn metadata(&self) -> BlueprintMetadata { + BlueprintMetadata { + id: self.id, + parent_blueprint_id: self.parent_blueprint_id, + internal_dns_version: self.internal_dns_version, + external_dns_version: self.external_dns_version, + cockroachdb_fingerprint: self.cockroachdb_fingerprint.clone(), + cockroachdb_setting_preserve_downgrade: Some( + self.cockroachdb_setting_preserve_downgrade, + ), + time_created: self.time_created, + creator: self.creator.clone(), + comment: self.comment.clone(), + } + } + /// Iterate over the [`BlueprintZoneConfig`] instances in the blueprint /// that match the provided filter, along with the associated sled id. - pub fn all_blueprint_zones( + pub fn all_omicron_zones( &self, filter: BlueprintZoneFilter, - ) -> impl Iterator { + ) -> impl Iterator { self.blueprint_zones.iter().flat_map(move |(sled_id, z)| { - z.zones.iter().filter_map(move |z| { - z.disposition.matches(filter).then_some((*sled_id, z)) - }) + z.zones + .iter() + .filter(move |z| z.disposition.matches(filter)) + .map(|z| (*sled_id, z)) }) } - /// Iterate over all the [`OmicronZoneConfig`] instances in the blueprint, - /// along with the associated sled id. - pub fn all_omicron_zones( + /// Iterate over the [`BlueprintZoneConfig`] instances in the blueprint + /// that do not match the provided filter, along with the associated sled + /// id. + pub fn all_omicron_zones_not_in( &self, - ) -> impl Iterator { - self.blueprint_zones.iter().flat_map(|(sled_id, z)| { - z.zones.iter().map(|z| (*sled_id, &z.config)) + filter: BlueprintZoneFilter, + ) -> impl Iterator { + self.blueprint_zones.iter().flat_map(move |(sled_id, z)| { + z.zones + .iter() + .filter(move |z| !z.disposition.matches(filter)) + .map(|z| (*sled_id, z)) }) } /// Iterate over the ids of all sleds in the blueprint - pub fn sleds(&self) -> impl Iterator + '_ { + pub fn sleds(&self) -> impl Iterator + '_ { self.blueprint_zones.keys().copied() } - /// Summarize the difference between sleds and zones between two blueprints - pub fn diff_sleds<'a>( - &'a self, - other: &'a Blueprint, - ) -> OmicronZonesDiff<'a> { - OmicronZonesDiff { - before_label: format!("blueprint {}", self.id), - before_zones: self.blueprint_zones.clone(), - after_label: format!("blueprint {}", other.id), - after_zones: &other.blueprint_zones, - } + /// Summarize the difference between sleds and zones between two + /// blueprints. + /// + /// The argument provided is the "before" side, and `self` is the "after" + /// side. This matches the order of arguments to + /// [`Blueprint::diff_since_collection`]. + pub fn diff_since_blueprint(&self, before: &Blueprint) -> BlueprintDiff { + BlueprintDiff::new( + DiffBeforeMetadata::Blueprint(Box::new(before.metadata())), + before + .blueprint_zones + .iter() + .map(|(sled_id, zones)| (*sled_id, zones.clone().into())) + .collect(), + self.metadata(), + self.blueprint_zones.clone(), + before + .blueprint_disks + .iter() + .map(|(sled_id, disks)| (*sled_id, disks.clone().into())) + .collect(), + self.blueprint_disks.clone(), + ) } /// Summarize the differences in sleds and zones between a collection and a - /// blueprint + /// blueprint. /// /// This gives an idea about what would change about a running system if /// one were to execute the blueprint. /// - /// Note that collections do not currently include information about what - /// zones are in-service, so it is assumed that all zones in the collection - /// are in-service. (This is the same assumption made by - /// [`BlueprintZonesConfig::initial_from_collection`]. The logic here may - /// also be expanded to handle cases where not all zones in the collection - /// are in-service.) - pub fn diff_sleds_from_collection( - &self, - collection: &Collection, - ) -> OmicronZonesDiff<'_> { - let before_zones = collection + /// Note that collections do not include information about zone + /// disposition, so it is assumed that all zones in the collection have the + /// [`InService`](BlueprintZoneDisposition::InService) disposition. + pub fn diff_since_collection(&self, before: &Collection) -> BlueprintDiff { + let before_zones = before .omicron_zones .iter() .map(|(sled_id, zones_found)| { - let zones = zones_found - .zones - .zones - .iter() - .map(|z| BlueprintZoneConfig { - config: z.clone(), - disposition: BlueprintZoneDisposition::InService, - }) - .collect(); - let zones = BlueprintZonesConfig { - generation: zones_found.zones.generation, - zones, - }; - (*sled_id, zones) + (*sled_id, zones_found.zones.clone().into()) }) .collect(); - OmicronZonesDiff { - before_label: format!("collection {}", collection.id), + + let before_disks = before + .sled_agents + .iter() + .map(|(sled_id, sa)| { + ( + *sled_id, + CollectionPhysicalDisksConfig { + disks: sa + .disks + .iter() + .map(|d| d.identity.clone()) + .collect::>(), + } + .into(), + ) + }) + .collect(); + + BlueprintDiff::new( + DiffBeforeMetadata::Collection { id: before.id }, before_zones, - after_label: format!("blueprint {}", self.id), - after_zones: &self.blueprint_zones, - } + self.metadata(), + self.blueprint_zones.clone(), + before_disks, + self.blueprint_disks.clone(), + ) } /// Return a struct that can be displayed to present information about the @@ -261,6 +306,50 @@ impl Blueprint { } } +impl BpSledSubtableData for &OmicronPhysicalDisksConfig { + fn bp_generation(&self) -> BpGeneration { + BpGeneration::Value(self.generation) + } + + fn rows( + &self, + state: BpDiffState, + ) -> impl Iterator { + let sorted_disk_ids: BTreeSet = + self.disks.iter().map(|d| d.identity.clone()).collect(); + + sorted_disk_ids.into_iter().map(move |d| { + BpSledSubtableRow::from_strings( + state, + vec![d.vendor, d.model, d.serial], + ) + }) + } +} + +impl BpSledSubtableData for BlueprintOrCollectionZonesConfig { + fn bp_generation(&self) -> BpGeneration { + BpGeneration::Value(self.generation()) + } + + fn rows( + &self, + state: BpDiffState, + ) -> impl Iterator { + self.zones().map(move |zone| { + BpSledSubtableRow::from_strings( + state, + vec![ + zone.kind().to_string(), + zone.id().to_string(), + zone.disposition().to_string(), + zone.underlay_address().to_string(), + ], + ) + }) + } +} + /// Wrapper to allow a [`Blueprint`] to be displayed with information. /// /// Returned by [`Blueprint::display()`]. @@ -271,6 +360,60 @@ pub struct BlueprintDisplay<'a> { // TODO: add colorization with a stylesheet } +impl<'a> BlueprintDisplay<'a> { + fn make_cockroachdb_table(&self) -> KvListWithHeading { + let fingerprint = if self.blueprint.cockroachdb_fingerprint.is_empty() { + NONE_PARENS.to_string() + } else { + self.blueprint.cockroachdb_fingerprint.clone() + }; + + KvListWithHeading::new_unchanged( + COCKROACHDB_HEADING, + vec![ + (COCKROACHDB_FINGERPRINT, fingerprint), + ( + COCKROACHDB_PRESERVE_DOWNGRADE, + self.blueprint + .cockroachdb_setting_preserve_downgrade + .to_string(), + ), + ], + ) + } + + fn make_metadata_table(&self) -> KvListWithHeading { + let comment = if self.blueprint.comment.is_empty() { + NONE_PARENS.to_string() + } else { + self.blueprint.comment.clone() + }; + + KvListWithHeading::new_unchanged( + METADATA_HEADING, + vec![ + (CREATED_BY, self.blueprint.creator.clone()), + ( + CREATED_AT, + humantime::format_rfc3339_millis( + self.blueprint.time_created.into(), + ) + .to_string(), + ), + (COMMENT, comment), + ( + INTERNAL_DNS_VERSION, + self.blueprint.internal_dns_version.to_string(), + ), + ( + EXTERNAL_DNS_VERSION, + self.blueprint.external_dns_version.to_string(), + ), + ], + ) + } +} + impl<'a> fmt::Display for BlueprintDisplay<'a> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let b = self.blueprint; @@ -282,36 +425,66 @@ impl<'a> fmt::Display for BlueprintDisplay<'a> { .map(|u| u.to_string()) .unwrap_or_else(|| String::from("")) )?; - writeln!( - f, - "created by {}{}", - b.creator, - if b.creator.parse::().is_ok() { - " (likely a Nexus instance)" - } else { - "" + + // Keep track of any sled_ids that have been seen in the first loop. + let mut seen_sleds = BTreeSet::new(); + + // Loop through all sleds that have physical disks and print a table of + // those physical disks. + // + // If there are corresponding zones, print those as well. + for (sled_id, disks) in &self.blueprint.blueprint_disks { + // Construct the disks subtable + let disks_table = BpSledSubtable::new( + BpPhysicalDisksSubtableSchema {}, + disks.bp_generation(), + disks.rows(BpDiffState::Unchanged).collect(), + ); + + // Construct the zones subtable + match self.blueprint.blueprint_zones.get(sled_id) { + Some(zones) => { + let zones = + BlueprintOrCollectionZonesConfig::from(zones.clone()); + let zones_tab = BpSledSubtable::new( + BpOmicronZonesSubtableSchema {}, + zones.bp_generation(), + zones.rows(BpDiffState::Unchanged).collect(), + ); + writeln!( + f, + "\n sled: {sled_id}\n\n{disks_table}\n\n{zones_tab}\n" + )?; + } + None => writeln!(f, "\n sled: {sled_id}\n\n{disks_table}\n")?, } - )?; - writeln!( - f, - "created at {}", - humantime::format_rfc3339_millis(b.time_created.into(),) - )?; - writeln!(f, "internal DNS version: {}", b.internal_dns_version)?; - writeln!(f, "comment: {}", b.comment)?; - writeln!(f, "zones:\n")?; - - for (sled_id, sled_zones) in &b.blueprint_zones { - writeln!( - f, - " sled {}: Omicron zones at generation {}", - sled_id, sled_zones.generation - )?; - for z in &sled_zones.zones { - writeln!(f, " {}", z.display())?; + seen_sleds.insert(sled_id); + } + + // Now create and display a table of zones on sleds that don't + // yet have physical disks. + // + // This should basically be impossible, so we warn if it occurs. + for (sled_id, zones) in &self.blueprint.blueprint_zones { + if !seen_sleds.contains(sled_id) && !zones.zones.is_empty() { + let zones = + BlueprintOrCollectionZonesConfig::from(zones.clone()); + writeln!( + f, + "\n!{sled_id}\n{}\n{}\n\n", + "WARNING: Zones exist without physical disks!", + BpSledSubtable::new( + BpOmicronZonesSubtableSchema {}, + zones.bp_generation(), + zones.rows(BpDiffState::Unchanged).collect() + ) + )?; } } + writeln!(f, "{}", self.make_cockroachdb_table())?; + writeln!(f, "{}", self.make_metadata_table())?; + Ok(()) } } @@ -334,39 +507,22 @@ pub struct BlueprintZonesConfig { pub zones: Vec, } -impl BlueprintZonesConfig { - /// Constructs a new [`BlueprintZonesConfig`] from a collection's zones. - /// - /// For the initial blueprint, all zones within a collection are assumed to - /// be in-service. - pub fn initial_from_collection(collection: &OmicronZonesConfig) -> Self { - let zones = collection - .zones - .iter() - .map(|z| BlueprintZoneConfig { - config: z.clone(), - disposition: BlueprintZoneDisposition::InService, - }) - .collect(); - - let mut ret = Self { - // An initial `BlueprintZonesConfig` reuses the generation from - // `OmicronZonesConfig`. - generation: collection.generation, - zones, - }; - // For testing, it's helpful for zones to be in sorted order. - ret.sort(); - - ret +impl From for OmicronZonesConfig { + fn from(config: BlueprintZonesConfig) -> Self { + Self { + generation: config.generation, + zones: config.zones.into_iter().map(From::from).collect(), + } } +} +impl BlueprintZonesConfig { /// Sorts the list of zones stored in this configuration. /// - /// This is not strictly necessary. But for testing, it's helpful for - /// zones to be in sorted order. + /// This is not strictly necessary. But for testing (particularly snapshot + /// testing), it's helpful for zones to be in sorted order. pub fn sort(&mut self) { - self.zones.sort_unstable_by_key(|z| z.config.id); + self.zones.sort_unstable_by_key(zone_sort_key); } /// Converts self to an [`OmicronZonesConfig`], applying the provided @@ -383,59 +539,352 @@ impl BlueprintZonesConfig { zones: self .zones .iter() - .filter_map(|z| { - z.disposition.matches(filter).then(|| z.config.clone()) - }) + .filter(|z| z.disposition.matches(filter)) + .cloned() + .map(OmicronZoneConfig::from) .collect(), } } + + /// Returns true if all zones in the blueprint have a disposition of + // `Expunged`, false otherwise. + pub fn are_all_zones_expunged(&self) -> bool { + self.zones + .iter() + .all(|c| c.disposition == BlueprintZoneDisposition::Expunged) + } +} + +trait ZoneSortKey { + fn kind(&self) -> ZoneKind; + fn id(&self) -> OmicronZoneUuid; +} + +impl ZoneSortKey for BlueprintZoneConfig { + fn kind(&self) -> ZoneKind { + self.zone_type.kind() + } + + fn id(&self) -> OmicronZoneUuid { + self.id + } +} + +impl ZoneSortKey for OmicronZoneConfig { + fn kind(&self) -> ZoneKind { + self.zone_type.kind() + } + + fn id(&self) -> OmicronZoneUuid { + OmicronZoneUuid::from_untyped_uuid(self.id) + } +} + +impl ZoneSortKey for BlueprintOrCollectionZoneConfig { + fn kind(&self) -> ZoneKind { + BlueprintOrCollectionZoneConfig::kind(self) + } + + fn id(&self) -> OmicronZoneUuid { + BlueprintOrCollectionZoneConfig::id(self) + } +} + +fn zone_sort_key(z: &T) -> impl Ord { + // First sort by kind, then by ID. This makes it so that zones of the same + // kind (e.g. Crucible zones) are grouped together. + (z.kind(), z.id()) +} + +/// "Should never happen" errors from converting an [`OmicronZoneType`] into a +/// [`BlueprintZoneType`]. +// Removing this error type would be a side effect of fixing +// https://github.com/oxidecomputer/omicron/issues/4988. +#[derive(Debug, Clone, Error, SlogInlineError)] +pub enum InvalidOmicronZoneType { + #[error("invalid socket address for Omicron zone {kind} ({addr})")] + ParseSocketAddr { + kind: ZoneKind, + addr: String, + #[source] + err: AddrParseError, + }, + #[error("Omicron zone {kind} requires an external IP ID")] + ExternalIpIdRequired { kind: ZoneKind }, } /// Describes one Omicron-managed zone in a blueprint. /// -/// This is a wrapper around an [`OmicronZoneConfig`] that also includes a -/// [`BlueprintZoneDisposition`]. -/// /// Part of [`BlueprintZonesConfig`]. #[derive(Debug, Clone, Eq, PartialEq, JsonSchema, Deserialize, Serialize)] pub struct BlueprintZoneConfig { - /// The underlying zone configuration. - pub config: OmicronZoneConfig, - /// The disposition (desired state) of this zone recorded in the blueprint. pub disposition: BlueprintZoneDisposition, + + pub id: OmicronZoneUuid, + pub underlay_address: Ipv6Addr, + pub zone_type: BlueprintZoneType, } impl BlueprintZoneConfig { - /// Return a struct that can be displayed to present information about the - /// zone. - pub fn display(&self) -> BlueprintZoneConfigDisplay<'_> { - BlueprintZoneConfigDisplay { zone: self } + /// Convert from an [`OmicronZoneConfig`]. + /// + /// This method is annoying to call correctly and will become more so over + /// time. Ideally we'd remove all callers and then remove this method, but + /// for now we keep it. + /// + /// # Errors + /// + /// If `config.zone_type` is a zone that has an external IP address (Nexus, + /// boundary NTP, external DNS), `external_ip_id` must be `Some(_)` or this + /// method will return an error. + pub fn from_omicron_zone_config( + config: OmicronZoneConfig, + disposition: BlueprintZoneDisposition, + external_ip_id: Option, + ) -> Result { + let kind = config.zone_type.kind(); + let zone_type = match config.zone_type { + OmicronZoneType::BoundaryNtp { + address, + dns_servers, + domain, + nic, + ntp_servers, + snat_cfg, + } => { + let external_ip_id = external_ip_id.ok_or( + InvalidOmicronZoneType::ExternalIpIdRequired { kind }, + )?; + let address = address.parse().map_err(|err| { + InvalidOmicronZoneType::ParseSocketAddr { + kind, + addr: address.clone(), + err, + } + })?; + BlueprintZoneType::BoundaryNtp( + blueprint_zone_type::BoundaryNtp { + address, + ntp_servers, + dns_servers, + domain, + nic, + external_ip: OmicronZoneExternalSnatIp { + id: external_ip_id, + snat_cfg, + }, + }, + ) + } + OmicronZoneType::Clickhouse { address, dataset } => { + let address = address.parse().map_err(|err| { + InvalidOmicronZoneType::ParseSocketAddr { + kind, + addr: address.clone(), + err, + } + })?; + BlueprintZoneType::Clickhouse(blueprint_zone_type::Clickhouse { + address, + dataset, + }) + } + OmicronZoneType::ClickhouseKeeper { address, dataset } => { + let address = address.parse().map_err(|err| { + InvalidOmicronZoneType::ParseSocketAddr { + kind, + addr: address.clone(), + err, + } + })?; + BlueprintZoneType::ClickhouseKeeper( + blueprint_zone_type::ClickhouseKeeper { address, dataset }, + ) + } + OmicronZoneType::CockroachDb { address, dataset } => { + let address = address.parse().map_err(|err| { + InvalidOmicronZoneType::ParseSocketAddr { + kind, + addr: address.clone(), + err, + } + })?; + BlueprintZoneType::CockroachDb( + blueprint_zone_type::CockroachDb { address, dataset }, + ) + } + OmicronZoneType::Crucible { address, dataset } => { + let address = address.parse().map_err(|err| { + InvalidOmicronZoneType::ParseSocketAddr { + kind, + addr: address.clone(), + err, + } + })?; + BlueprintZoneType::Crucible(blueprint_zone_type::Crucible { + address, + dataset, + }) + } + OmicronZoneType::CruciblePantry { address } => { + let address = address.parse().map_err(|err| { + InvalidOmicronZoneType::ParseSocketAddr { + kind, + addr: address.clone(), + err, + } + })?; + BlueprintZoneType::CruciblePantry( + blueprint_zone_type::CruciblePantry { address }, + ) + } + OmicronZoneType::ExternalDns { + dataset, + dns_address, + http_address, + nic, + } => { + let external_ip_id = external_ip_id.ok_or( + InvalidOmicronZoneType::ExternalIpIdRequired { kind }, + )?; + let dns_address = dns_address.parse().map_err(|err| { + InvalidOmicronZoneType::ParseSocketAddr { + kind, + addr: dns_address.clone(), + err, + } + })?; + let http_address = http_address.parse().map_err(|err| { + InvalidOmicronZoneType::ParseSocketAddr { + kind, + addr: http_address.clone(), + err, + } + })?; + BlueprintZoneType::ExternalDns( + blueprint_zone_type::ExternalDns { + dataset, + http_address, + dns_address: OmicronZoneExternalFloatingAddr { + id: external_ip_id, + addr: dns_address, + }, + nic, + }, + ) + } + OmicronZoneType::InternalDns { + dataset, + dns_address, + gz_address, + gz_address_index, + http_address, + } => { + let dns_address = dns_address.parse().map_err(|err| { + InvalidOmicronZoneType::ParseSocketAddr { + kind, + addr: dns_address.clone(), + err, + } + })?; + let http_address = http_address.parse().map_err(|err| { + InvalidOmicronZoneType::ParseSocketAddr { + kind, + addr: http_address.clone(), + err, + } + })?; + BlueprintZoneType::InternalDns( + blueprint_zone_type::InternalDns { + dataset, + http_address, + dns_address, + gz_address, + gz_address_index, + }, + ) + } + OmicronZoneType::InternalNtp { + address, + dns_servers, + domain, + ntp_servers, + } => { + let address = address.parse().map_err(|err| { + InvalidOmicronZoneType::ParseSocketAddr { + kind, + addr: address.clone(), + err, + } + })?; + BlueprintZoneType::InternalNtp( + blueprint_zone_type::InternalNtp { + address, + ntp_servers, + dns_servers, + domain, + }, + ) + } + OmicronZoneType::Nexus { + external_dns_servers, + external_ip, + external_tls, + internal_address, + nic, + } => { + let external_ip_id = external_ip_id.ok_or( + InvalidOmicronZoneType::ExternalIpIdRequired { kind }, + )?; + let internal_address = + internal_address.parse().map_err(|err| { + InvalidOmicronZoneType::ParseSocketAddr { + kind, + addr: internal_address.clone(), + err, + } + })?; + BlueprintZoneType::Nexus(blueprint_zone_type::Nexus { + internal_address, + external_ip: OmicronZoneExternalFloatingIp { + id: external_ip_id, + ip: external_ip, + }, + nic, + external_tls, + external_dns_servers, + }) + } + OmicronZoneType::Oximeter { address } => { + let address = address.parse().map_err(|err| { + InvalidOmicronZoneType::ParseSocketAddr { + kind, + addr: address.clone(), + err, + } + })?; + BlueprintZoneType::Oximeter(blueprint_zone_type::Oximeter { + address, + }) + } + }; + Ok(Self { + disposition, + id: OmicronZoneUuid::from_untyped_uuid(config.id), + underlay_address: config.underlay_address, + zone_type, + }) } } -/// A wrapper to allow a [`BlueprintZoneConfig`] to be displayed with -/// information. -/// -/// Returned by [`BlueprintZoneConfig::display()`]. -#[derive(Clone, Debug)] -#[must_use = "this struct does nothing unless displayed"] -pub struct BlueprintZoneConfigDisplay<'a> { - zone: &'a BlueprintZoneConfig, -} - -impl<'a> fmt::Display for BlueprintZoneConfigDisplay<'a> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let z = self.zone; - write!( - f, - "{} {: for OmicronZoneConfig { + fn from(z: BlueprintZoneConfig) -> Self { + Self { + id: z.id.into_untyped_uuid(), + underlay_address: z.underlay_address, + zone_type: z.zone_type.into(), + } } } @@ -463,12 +912,12 @@ pub enum BlueprintZoneDisposition { /// The zone is not in service. Quiesced, + + /// The zone is permanently gone. + Expunged, } impl BlueprintZoneDisposition { - /// The maximum width of `Display` output. - const DISPLAY_WIDTH: usize = 10; - /// Returns true if the zone disposition matches this filter. pub fn matches(self, filter: BlueprintZoneFilter) -> bool { // This code could be written in three ways: @@ -482,21 +931,33 @@ impl BlueprintZoneDisposition { match self { Self::InService => match filter { BlueprintZoneFilter::All => true, - BlueprintZoneFilter::SledAgentPut => true, - BlueprintZoneFilter::InternalDns => true, - BlueprintZoneFilter::VpcFirewall => true, + BlueprintZoneFilter::ShouldBeRunning => true, + BlueprintZoneFilter::ShouldBeExternallyReachable => true, + BlueprintZoneFilter::ShouldBeInInternalDns => true, + BlueprintZoneFilter::ShouldDeployVpcFirewallRules => true, }, Self::Quiesced => match filter { BlueprintZoneFilter::All => true, - // Quiesced zones should not be exposed in DNS. - BlueprintZoneFilter::InternalDns => false, + // Quiesced zones are still running. + BlueprintZoneFilter::ShouldBeRunning => true, - // Quiesced zones are expected to be deployed by sled-agent. - BlueprintZoneFilter::SledAgentPut => true, + // Quiesced zones should not have external resources -- we do + // not want traffic to be directed to them. + BlueprintZoneFilter::ShouldBeExternallyReachable => false, + + // Quiesced zones should not be exposed in DNS. + BlueprintZoneFilter::ShouldBeInInternalDns => false, // Quiesced zones should get firewall rules. - BlueprintZoneFilter::VpcFirewall => true, + BlueprintZoneFilter::ShouldDeployVpcFirewallRules => true, + }, + Self::Expunged => match filter { + BlueprintZoneFilter::All => true, + BlueprintZoneFilter::ShouldBeRunning => false, + BlueprintZoneFilter::ShouldBeExternallyReachable => false, + BlueprintZoneFilter::ShouldBeInInternalDns => false, + BlueprintZoneFilter::ShouldDeployVpcFirewallRules => false, }, } } @@ -516,6 +977,7 @@ impl fmt::Display for BlueprintZoneDisposition { // and alignment (used above), but this does. BlueprintZoneDisposition::InService => "in service".fmt(f), BlueprintZoneDisposition::Quiesced => "quiesced".fmt(f), + BlueprintZoneDisposition::Expunged => "expunged".fmt(f), } } } @@ -536,16 +998,28 @@ pub enum BlueprintZoneFilter { /// All zones. All, - /// Filter by zones that should be in internal DNS. - InternalDns, + /// Zones that are desired to be in the RUNNING state + ShouldBeRunning, - /// Filter by zones that we should tell sled-agent to deploy. - SledAgentPut, + /// Filter by zones that should have external IP and DNS resources. + ShouldBeExternallyReachable, + + /// Filter by zones that should be in internal DNS. + ShouldBeInInternalDns, /// Filter by zones that should be sent VPC firewall rules. - VpcFirewall, + ShouldDeployVpcFirewallRules, } +/// Information about an Omicron physical disk as recorded in a blueprint. +/// +/// Part of [`Blueprint`]. +pub type BlueprintPhysicalDisksConfig = + sled_agent_client::types::OmicronPhysicalDisksConfig; + +pub type BlueprintPhysicalDiskConfig = + sled_agent_client::types::OmicronPhysicalDiskConfig; + /// Describe high-level metadata about a blueprint // These fields are a subset of [`Blueprint`], and include only the data we can // quickly fetch from the main blueprint table (e.g., when listing all @@ -561,6 +1035,12 @@ pub struct BlueprintMetadata { pub internal_dns_version: Generation, /// external DNS version when this blueprint was created pub external_dns_version: Generation, + /// CockroachDB state fingerprint when this blueprint was created + pub cockroachdb_fingerprint: String, + /// Whether to set `cluster.preserve_downgrade_option` and what to set it to + /// (`None` if this value was retrieved from the database and was invalid) + pub cockroachdb_setting_preserve_downgrade: + Option, /// when this blueprint was generated (for debugging) pub time_created: chrono::DateTime, @@ -572,6 +1052,12 @@ pub struct BlueprintMetadata { pub comment: String, } +impl BlueprintMetadata { + pub fn display_id(&self) -> String { + format!("blueprint {}", self.id) + } +} + /// Describes what blueprint, if any, the system is currently working toward #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, JsonSchema)] pub struct BlueprintTarget { @@ -592,328 +1078,206 @@ pub struct BlueprintTargetSet { pub enabled: bool, } -/// Summarizes the differences between two blueprints -#[derive(Debug)] -pub struct OmicronZonesDiff<'a> { - before_label: String, - // We store an owned copy of "before_zones" to make it easier to support - // collections here, where we need to assemble this map ourselves. - before_zones: BTreeMap, - after_label: String, - after_zones: &'a BTreeMap, -} - -/// Describes a sled that appeared on both sides of a diff (possibly changed) -#[derive(Debug)] -pub struct DiffSledCommon<'a> { - /// id of the sled - pub sled_id: Uuid, - /// generation of the "zones" configuration on the left side - pub generation_before: Generation, - /// generation of the "zones" configuration on the right side - pub generation_after: Generation, - zones_added: Vec<&'a BlueprintZoneConfig>, - zones_removed: Vec<&'a BlueprintZoneConfig>, - zones_common: Vec>, -} - -impl<'a> DiffSledCommon<'a> { - /// Iterate over zones added between the blueprints - pub fn zones_added( - &self, - ) -> impl Iterator + '_ { - self.zones_added.iter().copied() - } - - /// Iterate over zones removed between the blueprints - pub fn zones_removed( - &self, - ) -> impl Iterator + '_ { - self.zones_removed.iter().copied() - } - - /// Iterate over zones that are common to both blueprints - pub fn zones_in_common( - &self, - ) -> impl Iterator> + '_ { - self.zones_common.iter().copied() - } +/// Data about the "before" version within a [`BlueprintDiff`]. +#[derive(Clone, Debug)] +pub enum DiffBeforeMetadata { + /// The diff was made from a collection. + Collection { id: CollectionUuid }, + /// The diff was made from a blueprint. + Blueprint(Box), +} - /// Iterate over zones that changed between the blue prints - pub fn zones_changed( - &self, - ) -> impl Iterator> + '_ { - self.zones_in_common().filter(|z| z.is_changed()) +impl DiffBeforeMetadata { + pub fn display_id(&self) -> String { + match self { + DiffBeforeMetadata::Collection { id } => format!("collection {id}"), + DiffBeforeMetadata::Blueprint(b) => b.display_id(), + } } } -/// Describes a zone that was common to both sides of a diff -#[derive(Debug, Copy, Clone)] -pub struct DiffZoneCommon<'a> { - /// full zone configuration before - pub zone_before: &'a BlueprintZoneConfig, - /// full zone configuration after - pub zone_after: &'a BlueprintZoneConfig, +/// Single sled's zones config for "before" version within a [`BlueprintDiff`]. +#[derive(Clone, Debug)] +pub enum BlueprintOrCollectionZonesConfig { + /// The diff was made from a collection. + Collection(OmicronZonesConfig), + /// The diff was made from a blueprint. + Blueprint(BlueprintZonesConfig), } -impl<'a> DiffZoneCommon<'a> { - /// Returns true if there are any differences between `zone_before` and - /// `zone_after`. - /// - /// This is equivalent to `config_changed() || disposition_changed()`. - #[inline] - pub fn is_changed(&self) -> bool { - // state is smaller and easier to compare than config. - self.disposition_changed() || self.config_changed() +impl BlueprintOrCollectionZonesConfig { + pub fn sort(&mut self) { + match self { + BlueprintOrCollectionZonesConfig::Collection(z) => { + z.zones.sort_unstable_by_key(zone_sort_key) + } + BlueprintOrCollectionZonesConfig::Blueprint(z) => z.sort(), + } } - /// Returns true if the zone configuration (excluding the disposition) - /// changed. - #[inline] - pub fn config_changed(&self) -> bool { - self.zone_before.config != self.zone_after.config + pub fn generation(&self) -> Generation { + match self { + BlueprintOrCollectionZonesConfig::Collection(z) => z.generation, + BlueprintOrCollectionZonesConfig::Blueprint(z) => z.generation, + } } - /// Returns true if the [`BlueprintZoneDisposition`] for the zone changed. - #[inline] - pub fn disposition_changed(&self) -> bool { - self.zone_before.disposition != self.zone_after.disposition + pub fn zones( + &self, + ) -> Box + '_> { + match self { + BlueprintOrCollectionZonesConfig::Collection(zc) => { + Box::new(zc.zones.iter().map(|z| z.clone().into())) + } + BlueprintOrCollectionZonesConfig::Blueprint(zc) => { + Box::new(zc.zones.iter().map(|z| z.clone().into())) + } + } } } -impl<'a> OmicronZonesDiff<'a> { - fn sleds_before(&self) -> BTreeSet { - self.before_zones.keys().copied().collect() +impl From for BlueprintOrCollectionZonesConfig { + fn from(zc: OmicronZonesConfig) -> Self { + Self::Collection(zc) } +} - fn sleds_after(&self) -> BTreeSet { - self.after_zones.keys().copied().collect() - } - - /// Iterate over sleds only present in the second blueprint of a diff - pub fn sleds_added( - &self, - ) -> impl Iterator + '_ { - let sled_ids = self - .sleds_after() - .difference(&self.sleds_before()) - .copied() - .collect::>(); - - sled_ids - .into_iter() - .map(|sled_id| (sled_id, self.after_zones.get(&sled_id).unwrap())) +impl From for BlueprintOrCollectionZonesConfig { + fn from(zc: BlueprintZonesConfig) -> Self { + Self::Blueprint(zc) } +} - /// Iterate over sleds only present in the first blueprint of a diff - pub fn sleds_removed( - &self, - ) -> impl Iterator + '_ { - let sled_ids = self - .sleds_before() - .difference(&self.sleds_after()) - .copied() - .collect::>(); - sled_ids - .into_iter() - .map(|sled_id| (sled_id, self.before_zones.get(&sled_id).unwrap())) - } - - /// Iterate over sleds present in both blueprints in a diff - pub fn sleds_in_common( - &'a self, - ) -> impl Iterator)> + '_ { - let sled_ids = self - .sleds_before() - .intersection(&self.sleds_after()) - .copied() - .collect::>(); - sled_ids.into_iter().map(|sled_id| { - let b1sledzones = self.before_zones.get(&sled_id).unwrap(); - let b2sledzones = self.after_zones.get(&sled_id).unwrap(); - - // Assemble separate summaries of the zones, indexed by zone id. - let b1_zones: BTreeMap = b1sledzones - .zones - .iter() - .map(|zone| (zone.config.id, zone)) - .collect(); - let mut b2_zones: BTreeMap = - b2sledzones - .zones - .iter() - .map(|zone| (zone.config.id, zone)) - .collect(); - let mut zones_removed = vec![]; - let mut zones_common = vec![]; - - // Now go through each zone and compare them. - for (zone_id, zone_before) in &b1_zones { - if let Some(zone_after) = b2_zones.remove(zone_id) { - zones_common - .push(DiffZoneCommon { zone_before, zone_after }); - } else { - zones_removed.push(*zone_before); - } +impl PartialEq for BlueprintOrCollectionZonesConfig { + fn eq(&self, other: &BlueprintZonesConfig) -> bool { + match self { + BlueprintOrCollectionZonesConfig::Collection(z) => { + // BlueprintZonesConfig contains more information than + // OmicronZonesConfig. We compare them by lowering the + // BlueprintZonesConfig into an OmicronZonesConfig. + let lowered = OmicronZonesConfig::from(other.clone()); + z.eq(&lowered) } - - // Since we removed common zones above, anything else exists only in - // b2 and was therefore added. - let zones_added = b2_zones.into_values().collect(); - - ( - sled_id, - DiffSledCommon { - sled_id, - generation_before: b1sledzones.generation, - generation_after: b2sledzones.generation, - zones_added, - zones_removed, - zones_common, - }, - ) - }) - } - - pub fn sleds_changed( - &'a self, - ) -> impl Iterator)> + '_ { - self.sleds_in_common().filter(|(_, sled_changes)| { - sled_changes.zones_added().next().is_some() - || sled_changes.zones_removed().next().is_some() - || sled_changes.zones_changed().next().is_some() - }) - } - - /// Return a struct that can be used to display the diff in a - /// unified `diff(1)`-like format. - pub fn display(&self) -> OmicronZonesDiffDisplay<'_, 'a> { - OmicronZonesDiffDisplay::new(self) + BlueprintOrCollectionZonesConfig::Blueprint(z) => z.eq(other), + } } } -/// Wrapper to allow a [`OmicronZonesDiff`] to be displayed in a unified -/// `diff(1)`-like format. -/// -/// Returned by [`OmicronZonesDiff::display()`]. +/// Single zone config for "before" version within a [`BlueprintDiff`]. #[derive(Clone, Debug)] -#[must_use = "this struct does nothing unless displayed"] -pub struct OmicronZonesDiffDisplay<'diff, 'a> { - diff: &'diff OmicronZonesDiff<'a>, - // TODO: add colorization with a stylesheet +pub enum BlueprintOrCollectionZoneConfig { + /// The diff was made from a collection. + Collection(OmicronZoneConfig), + /// The diff was made from a blueprint. + Blueprint(BlueprintZoneConfig), } -impl<'diff, 'a> OmicronZonesDiffDisplay<'diff, 'a> { - #[inline] - fn new(diff: &'diff OmicronZonesDiff<'a>) -> Self { - Self { diff } +impl From for BlueprintOrCollectionZoneConfig { + fn from(zc: OmicronZoneConfig) -> Self { + Self::Collection(zc) } +} - fn print_whole_sled( - &self, - f: &mut fmt::Formatter<'_>, - prefix: char, - label: &str, - bbsledzones: &BlueprintZonesConfig, - sled_id: Uuid, - ) -> fmt::Result { - writeln!(f, "{} sled {} ({})", prefix, sled_id, label)?; - writeln!( - f, - "{} zone config generation {}", - prefix, bbsledzones.generation - )?; - for z in &bbsledzones.zones { - writeln!(f, "{prefix} {} ({label})", z.display())?; - } +impl From for BlueprintOrCollectionZoneConfig { + fn from(zc: BlueprintZoneConfig) -> Self { + Self::Blueprint(zc) + } +} - Ok(()) +impl PartialEq for BlueprintOrCollectionZoneConfig { + fn eq(&self, other: &BlueprintZoneConfig) -> bool { + self.kind() == other.kind() + && self.disposition() == other.disposition + && self.underlay_address() == other.underlay_address + && self.is_zone_type_equal(&other.zone_type) } } -impl<'diff, 'a> fmt::Display for OmicronZonesDiffDisplay<'diff, 'a> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let diff = self.diff; - writeln!(f, "diff {} {}", diff.before_label, diff.after_label)?; - writeln!(f, "--- {}", diff.before_label)?; - writeln!(f, "+++ {}", diff.after_label)?; +impl BlueprintOrCollectionZoneConfig { + pub fn id(&self) -> OmicronZoneUuid { + match self { + BlueprintOrCollectionZoneConfig::Collection(z) => z.id(), + BlueprintOrCollectionZoneConfig::Blueprint(z) => z.id(), + } + } - for (sled_id, sled_zones) in diff.sleds_removed() { - self.print_whole_sled(f, '-', "removed", sled_zones, sled_id)?; + pub fn kind(&self) -> ZoneKind { + match self { + BlueprintOrCollectionZoneConfig::Collection(z) => z.kind(), + BlueprintOrCollectionZoneConfig::Blueprint(z) => z.kind(), } + } - for (sled_id, sled_changes) in diff.sleds_in_common() { - // Print a line about the sled itself and zone config generation, - // regardless of whether anything has changed. - writeln!(f, " sled {}", sled_id)?; - if sled_changes.generation_before != sled_changes.generation_after { - writeln!( - f, - "- zone config generation {}", - sled_changes.generation_before - )?; - writeln!( - f, - "+ zone config generation {}", - sled_changes.generation_after - )?; - } else { - writeln!( - f, - " zone config generation {}", - sled_changes.generation_before - )?; + pub fn disposition(&self) -> BlueprintZoneDisposition { + match self { + // All zones from inventory collection are assumed to be in-service. + BlueprintOrCollectionZoneConfig::Collection(_) => { + BlueprintZoneDisposition::InService } + BlueprintOrCollectionZoneConfig::Blueprint(z) => z.disposition, + } + } - for zone in sled_changes.zones_removed() { - writeln!(f, "- {} (removed)", zone.display())?; + pub fn underlay_address(&self) -> Ipv6Addr { + match self { + BlueprintOrCollectionZoneConfig::Collection(z) => { + z.underlay_address } + BlueprintOrCollectionZoneConfig::Blueprint(z) => z.underlay_address, + } + } - for zone_changes in sled_changes.zones_in_common() { - if zone_changes.config_changed() { - writeln!( - f, - "- {} (changed)", - zone_changes.zone_before.display(), - )?; - writeln!( - f, - "+ {} (changed)", - zone_changes.zone_after.display(), - )?; - } else if zone_changes.disposition_changed() { - writeln!( - f, - "- {} (disposition changed)", - zone_changes.zone_before.display(), - )?; - writeln!( - f, - "+ {} (disposition changed)", - zone_changes.zone_after.display(), - )?; - } else { - writeln!( - f, - " {} (unchanged)", - zone_changes.zone_before.display(), - )?; - } + pub fn is_zone_type_equal(&self, other: &BlueprintZoneType) -> bool { + match self { + BlueprintOrCollectionZoneConfig::Collection(z) => { + // BlueprintZoneType contains more information than + // OmicronZoneType. We compare them by lowering the + // BlueprintZoneType into an OmicronZoneType. + let lowered = OmicronZoneType::from(other.clone()); + z.zone_type == lowered } - - for zone in sled_changes.zones_added() { - writeln!(f, "+ {} (added)", zone.display())?; + BlueprintOrCollectionZoneConfig::Blueprint(z) => { + z.zone_type == *other } } + } +} + +/// Single sled's disks config for "before" version within a [`BlueprintDiff`]. +#[derive(Clone, Debug, From)] +pub enum BlueprintOrCollectionDisksConfig { + /// The diff was made from a collection. + Collection(CollectionPhysicalDisksConfig), + /// The diff was made from a blueprint. + Blueprint(BlueprintPhysicalDisksConfig), +} - for (sled_id, sled_zones) in diff.sleds_added() { - self.print_whole_sled(f, '+', "added", sled_zones, sled_id)?; +impl BlueprintOrCollectionDisksConfig { + pub fn generation(&self) -> Option { + match self { + BlueprintOrCollectionDisksConfig::Collection(_) => None, + BlueprintOrCollectionDisksConfig::Blueprint(c) => { + Some(c.generation) + } } + } - Ok(()) + pub fn disks(&self) -> BTreeSet { + match self { + BlueprintOrCollectionDisksConfig::Collection(c) => c.disks.clone(), + BlueprintOrCollectionDisksConfig::Blueprint(c) => { + c.disks.iter().map(|d| d.identity.clone()).collect() + } + } } } +/// Single sled's disk config for "before" version within a [`BlueprintDiff`]. +#[derive(Clone, Debug, From)] +pub struct CollectionPhysicalDisksConfig { + disks: BTreeSet, +} + /// Encapsulates Reconfigurator state /// /// This serialized from is intended for saving state from hand-constructed or @@ -923,7 +1287,11 @@ impl<'diff, 'a> fmt::Display for OmicronZonesDiffDisplay<'diff, 'a> { /// backwards-compatibility guarantees.** #[derive(Debug, Clone, Serialize, Deserialize)] pub struct UnstableReconfiguratorState { - pub policy: Policy, + pub planning_input: PlanningInput, pub collections: Vec, pub blueprints: Vec, + pub internal_dns: BTreeMap, + pub external_dns: BTreeMap, + pub silo_names: Vec, + pub external_dns_zone_names: Vec, } diff --git a/nexus/types/src/deployment/blueprint_diff.rs b/nexus/types/src/deployment/blueprint_diff.rs new file mode 100644 index 0000000000..0ee039b50f --- /dev/null +++ b/nexus/types/src/deployment/blueprint_diff.rs @@ -0,0 +1,877 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Types helpful for diffing [`Blueprints`]. + +use super::blueprint_display::{ + constants::*, linear_table_modified, linear_table_unchanged, BpDiffState, + BpGeneration, BpOmicronZonesSubtableSchema, BpPhysicalDisksSubtableSchema, + BpSledSubtable, BpSledSubtableColumn, BpSledSubtableData, + BpSledSubtableRow, KvListWithHeading, KvPair, +}; +use super::{zone_sort_key, CockroachDbPreserveDowngrade}; +use omicron_common::api::external::Generation; +use omicron_common::disk::DiskIdentity; +use omicron_uuid_kinds::OmicronZoneUuid; +use omicron_uuid_kinds::SledUuid; +use sled_agent_client::ZoneKind; +use std::collections::{BTreeMap, BTreeSet}; +use std::fmt; + +use crate::deployment::{ + BlueprintMetadata, BlueprintOrCollectionDisksConfig, + BlueprintOrCollectionZoneConfig, BlueprintOrCollectionZonesConfig, + BlueprintPhysicalDisksConfig, BlueprintZoneConfig, + BlueprintZoneDisposition, BlueprintZonesConfig, DiffBeforeMetadata, + ZoneSortKey, +}; + +/// Diffs for omicron zones on a given sled with a given `BpDiffState` +#[derive(Debug)] +pub struct BpDiffZoneDetails { + pub generation_before: Option, + pub generation_after: Option, + pub zones: Vec, +} + +impl BpSledSubtableData for BpDiffZoneDetails { + fn bp_generation(&self) -> BpGeneration { + BpGeneration::Diff { + before: self.generation_before, + after: self.generation_after, + } + } + + fn rows( + &self, + state: BpDiffState, + ) -> impl Iterator { + self.zones.iter().map(move |zone| { + BpSledSubtableRow::from_strings( + state, + vec![ + zone.kind().to_string(), + zone.id().to_string(), + zone.disposition().to_string(), + zone.underlay_address().to_string(), + ], + ) + }) + } +} + +/// A modified omicron zone +/// +/// A zone is considered modified if its `disposition` changes. All +/// modifications to other fields are considered errors, and will be recorded +/// as such. +#[derive(Debug)] +pub struct ModifiedZone { + pub prior_disposition: BlueprintZoneDisposition, + pub zone: BlueprintOrCollectionZoneConfig, +} + +impl ZoneSortKey for ModifiedZone { + fn kind(&self) -> ZoneKind { + self.zone.kind() + } + + fn id(&self) -> OmicronZoneUuid { + self.zone.id() + } +} + +impl ModifiedZone { + #[allow(clippy::result_large_err)] + pub fn new( + before: BlueprintOrCollectionZoneConfig, + after: BlueprintZoneConfig, + ) -> Result { + // Do we have any errors? If so, create a "reason" string. + let mut reason = String::new(); + if before.kind() != after.kind() { + let msg = format!( + "mismatched zone kind: before: {}, after: {}\n", + before.kind(), + after.kind() + ); + reason.push_str(&msg); + } + if before.underlay_address() != after.underlay_address { + let msg = format!( + "mismatched underlay address: before: {}, after: {}\n", + before.underlay_address(), + after.underlay_address + ); + reason.push_str(&msg); + } + if !before.is_zone_type_equal(&after.zone_type) { + let msg = format!( + "mismatched zone type: after: {:#?}\n", + after.zone_type + ); + reason.push_str(&msg); + } + if reason.is_empty() { + Ok(ModifiedZone { + prior_disposition: before.disposition(), + zone: after.into(), + }) + } else { + Err(BpDiffZoneError { + zone_before: before, + zone_after: after.into(), + reason, + }) + } + } +} + +/// Details of modified zones on a given sled +#[derive(Debug)] +pub struct BpDiffZonesModified { + pub generation_before: Generation, + pub generation_after: Generation, + pub zones: Vec, +} + +impl BpSledSubtableData for BpDiffZonesModified { + fn bp_generation(&self) -> BpGeneration { + BpGeneration::Diff { + before: Some(self.generation_before), + after: Some(self.generation_after), + } + } + + fn rows( + &self, + state: BpDiffState, + ) -> impl Iterator { + self.zones.iter().map(move |zone| { + BpSledSubtableRow::new( + state, + vec![ + BpSledSubtableColumn::value(zone.zone.kind().to_string()), + BpSledSubtableColumn::value(zone.zone.id().to_string()), + BpSledSubtableColumn::diff( + zone.prior_disposition.to_string(), + zone.zone.disposition().to_string(), + ), + BpSledSubtableColumn::value( + zone.zone.underlay_address().to_string(), + ), + ], + ) + }) + } +} + +#[derive(Debug)] +/// Errors arising from illegally modified zone fields +pub struct BpDiffZoneErrors { + pub generation_before: Generation, + pub generation_after: Generation, + pub errors: Vec, +} + +#[derive(Debug)] +pub struct BpDiffZoneError { + pub zone_before: BlueprintOrCollectionZoneConfig, + pub zone_after: BlueprintOrCollectionZoneConfig, + pub reason: String, +} + +/// All known zones across all known sleds, their various states, and errors +#[derive(Debug, Default)] +pub struct BpDiffZones { + pub added: BTreeMap, + pub removed: BTreeMap, + pub unchanged: BTreeMap, + pub modified: BTreeMap, + pub errors: BTreeMap, +} + +impl BpDiffZones { + pub fn new( + before: BTreeMap, + mut after: BTreeMap, + ) -> Self { + let mut diffs = BpDiffZones::default(); + for (sled_id, before_zones) in before { + let before_generation = before_zones.generation(); + let mut removed = vec![]; + if let Some(after_zones) = after.remove(&sled_id) { + let after_generation = after_zones.generation; + let mut unchanged = vec![]; + let mut modified = vec![]; + let mut errors = vec![]; + let mut added = vec![]; + + // Compare `before_zones` and `after_zones` to look + // for additions, deletions, modifications, and errors. + let before_by_id: BTreeMap<_, BlueprintOrCollectionZoneConfig> = + before_zones.zones().map(|z| (z.id(), z)).collect(); + let mut after_by_id: BTreeMap<_, BlueprintZoneConfig> = + after_zones.zones.into_iter().map(|z| (z.id, z)).collect(); + + for (zone_id, zone_before) in before_by_id { + if let Some(zone_after) = after_by_id.remove(&zone_id) { + // Are the zones equal? + if zone_before == zone_after { + unchanged.push(zone_after.into()); + } else { + // The zones are different. They are only allowed to differ in terms + // of `disposition`, otherwise we have an error. + match ModifiedZone::new(zone_before, zone_after) { + Ok(modified_zone) => { + modified.push(modified_zone) + } + Err(error) => errors.push(error), + } + } + } else { + // This zone doesn't exist in `zone_after` so it must have + // been removed. + removed.push(zone_before); + } + } + // Any remaining zones in `after_by_id` are newly added + for (_, zone_after) in after_by_id { + added.push(zone_after.into()); + } + + // Add all records to `diffs` that come from either `before` or `after` + // for this `sled_id`. + if !unchanged.is_empty() { + unchanged.sort_unstable_by_key(zone_sort_key); + diffs.unchanged.insert( + sled_id, + BpDiffZoneDetails { + generation_before: Some(before_generation), + generation_after: Some(after_generation), + zones: unchanged, + }, + ); + } + if !removed.is_empty() { + removed.sort_unstable_by_key(zone_sort_key); + diffs.removed.insert( + sled_id, + BpDiffZoneDetails { + generation_before: Some(before_generation), + generation_after: Some(after_generation), + zones: removed, + }, + ); + } + if !added.is_empty() { + added.sort_unstable_by_key(zone_sort_key); + diffs.added.insert( + sled_id, + BpDiffZoneDetails { + generation_before: Some(before_generation), + generation_after: Some(after_generation), + zones: added, + }, + ); + } + if !modified.is_empty() { + modified.sort_unstable_by_key(zone_sort_key); + diffs.modified.insert( + sled_id, + BpDiffZonesModified { + generation_before: before_generation, + generation_after: after_generation, + zones: modified, + }, + ); + } + if !errors.is_empty() { + diffs.errors.insert( + sled_id, + BpDiffZoneErrors { + generation_before: before_generation, + generation_after: after_generation, + errors, + }, + ); + } + } else { + // No `after_zones` for this `sled_id`, so `before_zones` are removed + assert!(removed.is_empty()); + for zone in before_zones.zones() { + removed.push(zone); + } + + if !removed.is_empty() { + removed.sort_unstable_by_key(zone_sort_key); + diffs.removed.insert( + sled_id, + BpDiffZoneDetails { + generation_before: Some(before_generation), + generation_after: None, + zones: removed, + }, + ); + } + } + } + + // Any sleds remaining in `after` have just been added, since we remove + // sleds from `after`, that were also in `before`, in the above loop. + for (sled_id, after_zones) in after { + if !after_zones.zones.is_empty() { + diffs.added.insert( + sled_id, + BpDiffZoneDetails { + generation_before: None, + generation_after: Some(after_zones.generation), + zones: after_zones + .zones + .into_iter() + .map(|z| z.into()) + .collect(), + }, + ); + } + } + + diffs + } + + /// Return a [`BpSledSubtable`] for the given `sled_id` + /// + /// We collate all the data from each category to produce a single table. + /// The order is: + /// + /// 1. Unchanged + /// 2. Removed + /// 3. Modified + /// 4. Added + /// + /// The idea behind the order is to (a) group all changes together + /// and (b) put changes towards the bottom, so people have to scroll + /// back less. + /// + /// Errors are printed in a more freeform manner after the table is + /// displayed. + pub fn to_bp_sled_subtable( + &self, + sled_id: &SledUuid, + ) -> Option { + let mut generation = BpGeneration::Diff { before: None, after: None }; + let mut rows = vec![]; + if let Some(diff) = self.unchanged.get(sled_id) { + generation = diff.bp_generation(); + rows.extend(diff.rows(BpDiffState::Unchanged)); + } + if let Some(diff) = self.removed.get(sled_id) { + // Generations never vary for the same sled, so this is harmless + generation = diff.bp_generation(); + rows.extend(diff.rows(BpDiffState::Removed)); + } + + if let Some(diff) = self.modified.get(sled_id) { + // Generations never vary for the same sled, so this is harmless + generation = diff.bp_generation(); + rows.extend(diff.rows(BpDiffState::Modified)); + } + + if let Some(diff) = self.added.get(sled_id) { + // Generations never vary for the same sled, so this is harmless + generation = diff.bp_generation(); + rows.extend(diff.rows(BpDiffState::Added)); + } + + if rows.is_empty() { + None + } else { + Some(BpSledSubtable::new( + BpOmicronZonesSubtableSchema {}, + generation, + rows, + )) + } + } +} + +#[derive(Debug)] +pub struct DiffPhysicalDisksDetails { + // Disks that come from inventory don't have generation numbers + pub before_generation: Option, + + // Disks that are removed don't have "after" generation numbers + pub after_generation: Option, + + // Disks added, removed, or unmodified + pub disks: BTreeSet, +} + +impl BpSledSubtableData for DiffPhysicalDisksDetails { + fn bp_generation(&self) -> BpGeneration { + BpGeneration::Diff { + before: self.before_generation, + after: self.after_generation, + } + } + + fn rows( + &self, + state: BpDiffState, + ) -> impl Iterator { + self.disks.iter().map(move |d| { + BpSledSubtableRow::from_strings( + state, + vec![d.vendor.clone(), d.model.clone(), d.serial.clone()], + ) + }) + } +} + +#[derive(Debug, Default)] +pub struct BpDiffPhysicalDisks { + pub added: BTreeMap, + pub removed: BTreeMap, + pub unchanged: BTreeMap, +} + +impl BpDiffPhysicalDisks { + pub fn new( + before: BTreeMap, + mut after: BTreeMap, + ) -> Self { + let mut diffs = BpDiffPhysicalDisks::default(); + for (sled_id, before_disks) in before { + let before_generation = before_disks.generation(); + if let Some(after_disks) = after.remove(&sled_id) { + let after_generation = Some(after_disks.generation); + let a: BTreeSet = + after_disks.disks.into_iter().map(|d| d.identity).collect(); + let b = before_disks.disks(); + let added: BTreeSet<_> = a.difference(&b).cloned().collect(); + let removed: BTreeSet<_> = b.difference(&a).cloned().collect(); + let unchanged: BTreeSet<_> = + a.intersection(&b).cloned().collect(); + if !added.is_empty() { + diffs.added.insert( + sled_id, + DiffPhysicalDisksDetails { + before_generation, + after_generation, + disks: added, + }, + ); + } + if !removed.is_empty() { + diffs.removed.insert( + sled_id, + DiffPhysicalDisksDetails { + before_generation, + after_generation, + disks: removed, + }, + ); + } + if !unchanged.is_empty() { + diffs.unchanged.insert( + sled_id, + DiffPhysicalDisksDetails { + before_generation, + after_generation, + disks: unchanged, + }, + ); + } + } else { + diffs.removed.insert( + sled_id, + DiffPhysicalDisksDetails { + before_generation, + after_generation: None, + disks: before_disks.disks().into_iter().collect(), + }, + ); + } + } + + // Any sleds remaining in `after` have just been added, since we remove + // sleds from `after`, that were also in `before`, in the above loop. + for (sled_id, after_disks) in after { + let added: BTreeSet = + after_disks.disks.into_iter().map(|d| d.identity).collect(); + if !added.is_empty() { + diffs.added.insert( + sled_id, + DiffPhysicalDisksDetails { + before_generation: None, + after_generation: Some(after_disks.generation), + disks: added, + }, + ); + } + } + + diffs + } + + /// Return a [`BpSledSubtable`] for the given `sled_id` + pub fn to_bp_sled_subtable( + &self, + sled_id: &SledUuid, + ) -> Option { + let mut generation = BpGeneration::Diff { before: None, after: None }; + let mut rows = vec![]; + if let Some(diff) = self.unchanged.get(sled_id) { + generation = diff.bp_generation(); + rows.extend(diff.rows(BpDiffState::Unchanged)); + } + if let Some(diff) = self.removed.get(sled_id) { + // Generations never vary for the same sled, so this is harmless + generation = diff.bp_generation(); + rows.extend(diff.rows(BpDiffState::Removed)); + } + + if let Some(diff) = self.added.get(sled_id) { + // Generations never vary for the same sled, so this is harmless + generation = diff.bp_generation(); + rows.extend(diff.rows(BpDiffState::Added)); + } + + if rows.is_empty() { + None + } else { + Some(BpSledSubtable::new( + BpPhysicalDisksSubtableSchema {}, + generation, + rows, + )) + } + } +} + +/// Summarizes the differences between two blueprints +#[derive(Debug)] +pub struct BlueprintDiff { + pub before_meta: DiffBeforeMetadata, + pub after_meta: BlueprintMetadata, + pub zones: BpDiffZones, + pub physical_disks: BpDiffPhysicalDisks, + pub sleds_added: BTreeSet, + pub sleds_removed: BTreeSet, + pub sleds_unchanged: BTreeSet, + pub sleds_modified: BTreeSet, +} + +impl BlueprintDiff { + /// Build a diff with the provided contents, verifying that the provided + /// data is valid. + pub fn new( + before_meta: DiffBeforeMetadata, + before_zones: BTreeMap, + after_meta: BlueprintMetadata, + after_zones: BTreeMap, + before_disks: BTreeMap, + after_disks: BTreeMap, + ) -> Self { + let before_sleds: BTreeSet<_> = + before_zones.keys().chain(before_disks.keys()).collect(); + let after_sleds: BTreeSet<_> = + after_zones.keys().chain(after_disks.keys()).collect(); + let all_sleds: BTreeSet<_> = + before_sleds.union(&after_sleds).map(|&sled_id| *sled_id).collect(); + + // All sleds that have zones or disks in `after_*`, but not `before_*` + // have been added. + let sleds_added: BTreeSet<_> = after_sleds + .difference(&before_sleds) + .map(|&sled_id| *sled_id) + .collect(); + + // All sleds that have zones or disks in `before_*`, but not `after_*` + // have been removed. + let sleds_removed: BTreeSet<_> = before_sleds + .difference(&after_sleds) + .map(|&sled_id| *sled_id) + .collect(); + + let zones = BpDiffZones::new(before_zones, after_zones); + let physical_disks = + BpDiffPhysicalDisks::new(before_disks, after_disks); + + // Sleds that haven't been added or removed are either unchanged or + // modified. + let sleds_unchanged_or_modified: BTreeSet<_> = all_sleds + .iter() + .filter(|&sled_id| { + !sleds_added.contains(sled_id) + && !sleds_removed.contains(sled_id) + }) + .map(|s| *s) + .collect(); + + // Sleds are modified if any zones or disks on those sleds are anything + // other than unchanged. + let mut sleds_modified = sleds_unchanged_or_modified.clone(); + sleds_modified.retain(|sled_id| { + physical_disks.added.contains_key(sled_id) + || physical_disks.removed.contains_key(sled_id) + || zones.added.contains_key(sled_id) + || zones.removed.contains_key(sled_id) + || zones.modified.contains_key(sled_id) + || zones.errors.contains_key(sled_id) + }); + + // The rest of the sleds must be unchanged. + let unchanged_sleds: BTreeSet<_> = sleds_unchanged_or_modified + .difference(&sleds_modified) + .map(|sled_id| *sled_id) + .collect(); + + BlueprintDiff { + before_meta, + after_meta, + zones, + physical_disks, + sleds_added, + sleds_removed, + sleds_unchanged: unchanged_sleds, + sleds_modified, + } + } + + /// Return a struct that can be used to display the diff. + pub fn display(&self) -> BlueprintDiffDisplay<'_> { + BlueprintDiffDisplay::new(self) + } +} + +/// Wrapper to allow a [`BlueprintDiff`] to be displayed. +/// +/// Returned by [`BlueprintDiff::display()`]. +#[derive(Clone, Debug)] +#[must_use = "this struct does nothing unless displayed"] +pub struct BlueprintDiffDisplay<'diff> { + pub diff: &'diff BlueprintDiff, + // TODO: add colorization with a stylesheet +} + +impl<'diff> BlueprintDiffDisplay<'diff> { + #[inline] + fn new(diff: &'diff BlueprintDiff) -> Self { + Self { diff } + } + + pub fn make_metadata_diff_tables( + &self, + ) -> impl IntoIterator { + macro_rules! diff_row { + ($member:ident, $label:expr) => { + diff_row!($member, $label, |value| value) + }; + + ($member:ident, $label:expr, $display:expr) => { + match &self.diff.before_meta { + DiffBeforeMetadata::Collection { .. } => { + // Collections have no metadata, so this is new + KvPair::new( + BpDiffState::Added, + $label, + linear_table_modified( + &NOT_PRESENT_IN_COLLECTION_PARENS, + &$display(&self.diff.after_meta.$member), + ), + ) + } + DiffBeforeMetadata::Blueprint(before) => { + if before.$member == self.diff.after_meta.$member { + KvPair::new( + BpDiffState::Unchanged, + $label, + linear_table_unchanged(&$display( + &self.diff.after_meta.$member, + )), + ) + } else { + KvPair::new( + BpDiffState::Modified, + $label, + linear_table_modified( + &$display(&before.$member), + &$display(&self.diff.after_meta.$member), + ), + ) + } + } + } + }; + } + + [ + KvListWithHeading::new( + COCKROACHDB_HEADING, + vec![ + diff_row!( + cockroachdb_fingerprint, + COCKROACHDB_FINGERPRINT, + display_none_if_empty + ), + diff_row!( + cockroachdb_setting_preserve_downgrade, + COCKROACHDB_PRESERVE_DOWNGRADE, + display_optional_preserve_downgrade + ), + ], + ), + KvListWithHeading::new( + METADATA_HEADING, + vec![ + diff_row!(internal_dns_version, INTERNAL_DNS_VERSION), + diff_row!(external_dns_version, EXTERNAL_DNS_VERSION), + ], + ), + ] + } + + /// Write out physical disk and zone tables for a given `sled_id` + fn write_tables( + &self, + f: &mut fmt::Formatter<'_>, + sled_id: &SledUuid, + ) -> fmt::Result { + // Write the physical disks table if it exists + if let Some(table) = + self.diff.physical_disks.to_bp_sled_subtable(sled_id) + { + writeln!(f, "{table}\n")?; + } + + // Write the zones table if it exists + if let Some(table) = self.diff.zones.to_bp_sled_subtable(sled_id) { + writeln!(f, "{table}\n")?; + } + + Ok(()) + } +} + +impl<'diff> fmt::Display for BlueprintDiffDisplay<'diff> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let diff = self.diff; + + // Print things differently based on whether the diff is between a + // collection and a blueprint, or a blueprint and a blueprint. + match &diff.before_meta { + DiffBeforeMetadata::Collection { id } => { + writeln!( + f, + "from: collection {}\n\ + to: blueprint {}", + id, diff.after_meta.id, + )?; + } + DiffBeforeMetadata::Blueprint(before) => { + writeln!( + f, + "from: blueprint {}\n\ + to: blueprint {}\n", + before.id, diff.after_meta.id + )?; + } + } + + // Write out sled information + // + // The order is: + // + // 1. Unchanged + // 2. Removed + // 3. Modified + // 4. Added + // 5. Errors + // + // The idea behind the order is to (a) group all changes together + // and (b) put changes towards the bottom, so people have to scroll + // back less. + // + // We put errors at the bottom to ensure they are seen immediately. + + // Write out tables for unchanged sleds + if !diff.sleds_unchanged.is_empty() { + writeln!(f, " UNCHANGED SLEDS:\n")?; + for sled_id in &diff.sleds_unchanged { + writeln!(f, " sled {sled_id}:\n")?; + self.write_tables(f, sled_id)?; + } + } + + // Write out tables for removed sleds + if !diff.sleds_removed.is_empty() { + writeln!(f, " REMOVED SLEDS:\n")?; + for sled_id in &diff.sleds_removed { + writeln!(f, " sled {sled_id}:\n")?; + self.write_tables(f, sled_id)?; + } + } + + // Write out tables for modified sleds + if !diff.sleds_modified.is_empty() { + writeln!(f, " MODIFIED SLEDS:\n")?; + for sled_id in &diff.sleds_modified { + writeln!(f, " sled {sled_id}:\n")?; + self.write_tables(f, sled_id)?; + } + } + + // Write out tables for added sleds + if !diff.sleds_added.is_empty() { + writeln!(f, " ADDED SLEDS:\n")?; + for sled_id in &diff.sleds_added { + writeln!(f, " sled {sled_id}:\n")?; + self.write_tables(f, sled_id)?; + } + } + + // Write out zone errors. + if !diff.zones.errors.is_empty() { + writeln!(f, "ERRORS:")?; + for (sled_id, errors) in &diff.zones.errors { + writeln!(f, "\n sled {sled_id}\n")?; + writeln!( + f, + " zone diff errors: before gen {}, after gen {}\n", + errors.generation_before, errors.generation_after + )?; + + for err in &errors.errors { + writeln!(f, " zone id: {}", err.zone_before.id())?; + writeln!(f, " reason: {}", err.reason)?; + } + } + } + + // Write out metadata diff table + for table in self.make_metadata_diff_tables() { + writeln!(f, "{}", table)?; + } + + Ok(()) + } +} + +fn display_none_if_empty(value: &str) -> &str { + if value.is_empty() { + NONE_PARENS + } else { + value + } +} + +fn display_optional_preserve_downgrade( + value: &Option, +) -> String { + match value { + Some(v) => v.to_string(), + None => INVALID_VALUE_PARENS.to_string(), + } +} diff --git a/nexus/types/src/deployment/blueprint_display.rs b/nexus/types/src/deployment/blueprint_display.rs new file mode 100644 index 0000000000..5d106b6ef3 --- /dev/null +++ b/nexus/types/src/deployment/blueprint_display.rs @@ -0,0 +1,423 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Types helpful for rendering [`Blueprints`]. + +use omicron_common::api::external::Generation; +use std::fmt; + +pub mod constants { + pub(super) const ADDED_PREFIX: char = '+'; + pub(super) const REMOVED_PREFIX: char = '-'; + pub(super) const MODIFIED_PREFIX: char = '*'; + pub(super) const UNCHANGED_PREFIX: char = ' '; + + #[allow(unused)] + pub(super) const SUB_NOT_LAST: &str = "├─"; + pub(super) const SUB_LAST: &str = "└─"; + + pub const ARROW: &str = "->"; + pub const COCKROACHDB_HEADING: &str = "COCKROACHDB SETTINGS"; + pub const COCKROACHDB_FINGERPRINT: &str = "state fingerprint"; + pub const COCKROACHDB_PRESERVE_DOWNGRADE: &str = + "cluster.preserve_downgrade_option"; + pub const METADATA_HEADING: &str = "METADATA"; + pub const CREATED_BY: &str = "created by"; + pub const CREATED_AT: &str = "created at"; + pub const INTERNAL_DNS_VERSION: &str = "internal DNS version"; + pub const EXTERNAL_DNS_VERSION: &str = "external DNS version"; + pub const COMMENT: &str = "comment"; + + pub const UNCHANGED_PARENS: &str = "(unchanged)"; + pub const NONE_PARENS: &str = "(none)"; + pub const NOT_PRESENT_IN_COLLECTION_PARENS: &str = + "(not present in collection)"; + pub const INVALID_VALUE_PARENS: &str = "(invalid value)"; +} +use constants::*; + +/// The state of a sled or resource (e.g. zone or physical disk) in this +/// blueprint, with regards to the parent blueprint +#[derive(Debug, Clone, Copy)] +pub enum BpDiffState { + Unchanged, + Removed, + Modified, + Added, +} + +impl BpDiffState { + pub fn prefix(&self) -> char { + match self { + BpDiffState::Unchanged => UNCHANGED_PREFIX, + BpDiffState::Removed => REMOVED_PREFIX, + BpDiffState::Modified => MODIFIED_PREFIX, + BpDiffState::Added => ADDED_PREFIX, + } + } +} + +impl fmt::Display for BpDiffState { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let s = match self { + BpDiffState::Unchanged => "UNCHANGED", + BpDiffState::Removed => "REMOVED", + BpDiffState::Modified => "MODIFIED", + BpDiffState::Added => "ADDED", + }; + write!(f, "{s}") + } +} + +/// A wrapper aound generation numbers for blueprints or blueprint diffs +#[derive(Debug, Clone, Copy)] +pub enum BpGeneration { + // A value in a single blueprint + Value(Generation), + + // A diff between two blueprints + Diff { before: Option, after: Option }, +} + +impl fmt::Display for BpGeneration { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + BpGeneration::Value(generation) => { + write!(f, "at generation {generation}") + } + BpGeneration::Diff { before: None, after: Some(after) } => { + write!(f, "at generation {after}") + } + BpGeneration::Diff { before: Some(before), after: None } => { + write!(f, "from generation {before}") + } + BpGeneration::Diff { before: Some(before), after: Some(after) } => { + if before == after { + write!(f, "at generation {after}") + } else { + write!(f, "generation {before} -> {after}") + } + } + BpGeneration::Diff { before: None, after: None } => { + write!(f, "Error: unknown generation") + } + } + } +} + +pub enum BpSledSubtableColumn { + Value(String), + Diff { before: String, after: String }, +} + +impl BpSledSubtableColumn { + pub fn value(s: String) -> BpSledSubtableColumn { + BpSledSubtableColumn::Value(s) + } + + pub fn diff(before: String, after: String) -> BpSledSubtableColumn { + BpSledSubtableColumn::Diff { before, after } + } + + pub fn len(&self) -> usize { + match self { + BpSledSubtableColumn::Value(s) => s.len(), + BpSledSubtableColumn::Diff { before, after } => { + // Add 1 for the added/removed prefix and 1 for a space + // + // This will need to change if we change how we render diffs in + // the `Display` impl for `BpSledSubtable`. However, putting it + // here allows to minimize any extra horizontal spacing in case + // other values for the same column are already longer than the + // the before or after values + 2. + usize::max(before.len(), after.len()) + 2 + } + } + } +} + +/// A row in a [`BpSledSubtable`] +pub struct BpSledSubtableRow { + state: BpDiffState, + columns: Vec, +} + +impl BpSledSubtableRow { + pub fn new(state: BpDiffState, columns: Vec) -> Self { + BpSledSubtableRow { state, columns } + } + + pub fn from_strings(state: BpDiffState, columns: Vec) -> Self { + BpSledSubtableRow { + state, + columns: columns + .into_iter() + .map(BpSledSubtableColumn::Value) + .collect(), + } + } +} + +/// Metadata about all instances of specific type of [`BpSledSubtable`], +/// such as omicron zones or physical disks. +pub trait BpSledSubtableSchema { + fn table_name(&self) -> &'static str; + fn column_names(&self) -> &'static [&'static str]; +} + +// Provide data specific to an instance of a [`BpSledSubtable`] +pub trait BpSledSubtableData { + fn bp_generation(&self) -> BpGeneration; + fn rows( + &self, + state: BpDiffState, + ) -> impl Iterator; +} + +/// A table specific to a sled resource, such as a zone or disk. +/// `BpSledSubtable`s are always nested under [`BpSledTable`]s. +pub struct BpSledSubtable { + table_name: &'static str, + column_names: &'static [&'static str], + generation: BpGeneration, + rows: Vec, +} + +impl BpSledSubtable { + pub fn new( + schema: impl BpSledSubtableSchema, + generation: BpGeneration, + rows: Vec, + ) -> BpSledSubtable { + BpSledSubtable { + table_name: schema.table_name(), + column_names: schema.column_names(), + generation, + rows, + } + } + + /// Compute the max column widths based on the contents of `column_names` + // and `rows`. + fn column_widths(&self) -> Vec { + let mut widths: Vec = + self.column_names.iter().map(|s| s.len()).collect(); + + for row in &self.rows { + assert_eq!(row.columns.len(), widths.len()); + for (i, s) in row.columns.iter().enumerate() { + widths[i] = usize::max(s.len(), widths[i]); + } + } + + widths + } +} + +const SUBTABLE_INDENT: usize = 4; +const COLUMN_GAP: usize = 3; + +impl fmt::Display for BpSledSubtable { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let widths = self.column_widths(); + let mut total_width = + widths.iter().fold(0, |acc, i| acc + i + COLUMN_GAP); + total_width -= COLUMN_GAP; + + // Write the name of the subtable + writeln!( + f, + "{: (s.clone(), false), + BpSledSubtableColumn::Diff { before, .. } => { + // If we remove the prefix and space, we'll need to also + // modify `BpSledSubtableColumn::len` to reflect this. + (format!("{REMOVED_PREFIX} {before}"), true) + } + }; + multiline_row |= needs_multiline; + + if i == 0 { + write!(f, "{column: &'static str { + "physical disks" + } + + fn column_names(&self) -> &'static [&'static str] { + &["vendor", "model", "serial"] + } +} + +/// The [`BpSledSubtable`] schema for omicron zones +pub struct BpOmicronZonesSubtableSchema {} +impl BpSledSubtableSchema for BpOmicronZonesSubtableSchema { + fn table_name(&self) -> &'static str { + "omicron zones" + } + fn column_names(&self) -> &'static [&'static str] { + &["zone type", "zone id", "disposition", "underlay IP"] + } +} + +// An entry in a [`KvListWithHeading`] +#[derive(Debug)] +pub struct KvPair { + state: BpDiffState, + key: String, + val: String, +} + +impl KvPair { + pub fn new_unchanged, S2: Into>( + key: S1, + val: S2, + ) -> KvPair { + KvPair { + state: BpDiffState::Unchanged, + key: key.into(), + val: val.into(), + } + } + + pub fn new, S2: Into>( + state: BpDiffState, + key: S1, + val: S2, + ) -> KvPair { + KvPair { state, key: key.into(), val: val.into() } + } +} + +// A top-to-bottom list of KV pairs with a heading +#[derive(Debug)] +pub struct KvListWithHeading { + heading: &'static str, + kv: Vec, +} + +impl KvListWithHeading { + pub fn new_unchanged, S2: Into>( + heading: &'static str, + kv: Vec<(S1, S2)>, + ) -> KvListWithHeading { + let kv = + kv.into_iter().map(|(k, v)| KvPair::new_unchanged(k, v)).collect(); + KvListWithHeading { heading, kv } + } + + pub fn new(heading: &'static str, kv: Vec) -> KvListWithHeading { + KvListWithHeading { heading, kv } + } + + /// Compute the max width of the keys for alignment purposes + fn max_key_width(&self) -> usize { + self.kv.iter().fold(0, |acc, kv| usize::max(acc, kv.key.len())) + } +} + +impl fmt::Display for KvListWithHeading { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // Write the heading + writeln!(f, " {}:", self.heading)?; + + // Write the rows + let key_width = self.max_key_width() + 1; + for kv in &self.kv { + let prefix = kv.state.prefix(); + writeln!( + f, + "{prefix: String { + format!("{before} {ARROW} {after}") +} + +pub fn linear_table_unchanged(value: &dyn fmt::Display) -> String { + format!("{value} {UNCHANGED_PARENS}") +} diff --git a/nexus/types/src/deployment/network_resources.rs b/nexus/types/src/deployment/network_resources.rs new file mode 100644 index 0000000000..c93e604af9 --- /dev/null +++ b/nexus/types/src/deployment/network_resources.rs @@ -0,0 +1,319 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use super::tri_map::TriMap; +use super::tri_map::TriMapEntry; +use anyhow::anyhow; +use omicron_common::api::external::MacAddr; +use omicron_common::api::internal::shared::SourceNatConfig; +use omicron_uuid_kinds::ExternalIpUuid; +use omicron_uuid_kinds::OmicronZoneUuid; +use omicron_uuid_kinds::VnicUuid; +use schemars::JsonSchema; +use serde::Deserialize; +use serde::Serialize; +use std::net::IpAddr; +use std::net::SocketAddr; +use thiserror::Error; + +/// Tracker and validator for network resources allocated to Omicron-managed +/// zones. +/// +/// ## Implementation notes +/// +/// `OmicronZoneNetworkResources` consists of two 1:1:1 "trijective" maps: +/// +/// 1. Providing a unique map for Omicron zone IDs, external IP IDs, and +/// external IPs. +/// 2. Providing a unique map for Omicron zone IDs, vNIC IDs, and vNICs. +/// +/// One question that arises: should there instead be a single 1:1:1:1:1 map? +/// In other words, is there a 1:1 mapping between external IPs and vNICs as +/// well? The answer is "generally yes", but: +/// +/// - They're not stored in the database that way, and it's possible that +/// there's some divergence. +/// - We currently don't plan to get any utility out of asserting the 1:1:1:1:1 +/// map. The main planned use of this is for expunged zone garbage collection +/// -- while that benefits from trijective maps tremendously, there's no +/// additional value in asserting a unique mapping between external IPs and +/// vNICs. +/// +/// So we use two separate maps for now. But a single map is always a +/// possibility in the future, if required. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct OmicronZoneNetworkResources { + /// external IPs allocated to Omicron zones + omicron_zone_external_ips: TriMap, + + /// vNICs allocated to Omicron zones + omicron_zone_nics: TriMap, +} + +impl OmicronZoneNetworkResources { + pub fn new() -> Self { + Self { + omicron_zone_external_ips: TriMap::new(), + omicron_zone_nics: TriMap::new(), + } + } + + pub fn omicron_zone_external_ips( + &self, + ) -> impl Iterator + '_ { + self.omicron_zone_external_ips.iter().copied() + } + + pub fn omicron_zone_nics( + &self, + ) -> impl Iterator + '_ { + self.omicron_zone_nics.iter().copied() + } + + pub fn add_external_ip( + &mut self, + zone_id: OmicronZoneUuid, + ip: OmicronZoneExternalIp, + ) -> Result<(), AddNetworkResourceError> { + let entry = OmicronZoneExternalIpEntry { zone_id, ip }; + self.omicron_zone_external_ips.insert_no_dups(entry).map_err(|err| { + AddNetworkResourceError::DuplicateOmicronZoneExternalIp { + zone_id, + ip, + err: anyhow!(err), + } + }) + } + + pub fn add_nic( + &mut self, + zone_id: OmicronZoneUuid, + nic: OmicronZoneNic, + ) -> Result<(), AddNetworkResourceError> { + let entry = OmicronZoneNicEntry { zone_id, nic }; + self.omicron_zone_nics.insert_no_dups(entry).map_err(|err| { + AddNetworkResourceError::DuplicateOmicronZoneNic { + zone_id, + nic, + err: anyhow!(err), + } + }) + } + + pub fn get_external_ip_by_zone_id( + &self, + zone_id: OmicronZoneUuid, + ) -> Option<&OmicronZoneExternalIpEntry> { + self.omicron_zone_external_ips.get1(&zone_id) + } + + pub fn get_external_ip_by_external_ip_id( + &self, + ip: ExternalIpUuid, + ) -> Option<&OmicronZoneExternalIpEntry> { + self.omicron_zone_external_ips.get2(&ip) + } + + pub fn get_external_ip_by_ip( + &self, + ip: OmicronZoneExternalIpKey, + ) -> Option<&OmicronZoneExternalIpEntry> { + self.omicron_zone_external_ips.get3(&ip) + } + + pub fn get_nic_by_zone_id( + &self, + zone_id: OmicronZoneUuid, + ) -> Option<&OmicronZoneNicEntry> { + self.omicron_zone_nics.get1(&zone_id) + } + + pub fn get_nic_by_vnic_id( + &self, + vnic_id: VnicUuid, + ) -> Option<&OmicronZoneNicEntry> { + self.omicron_zone_nics.get2(&vnic_id) + } + + pub fn get_nic_by_mac(&self, mac: MacAddr) -> Option<&OmicronZoneNicEntry> { + self.omicron_zone_nics.get3(&mac) + } +} + +/// External IP variants possible for Omicron-managed zones. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +pub enum OmicronZoneExternalIp { + Floating(OmicronZoneExternalFloatingIp), + Snat(OmicronZoneExternalSnatIp), + // We may eventually want `Ephemeral(_)` too (arguably Nexus could be + // ephemeral?), but for now we only have Floating and Snat uses. +} + +impl OmicronZoneExternalIp { + pub fn id(&self) -> ExternalIpUuid { + match self { + OmicronZoneExternalIp::Floating(ext) => ext.id, + OmicronZoneExternalIp::Snat(ext) => ext.id, + } + } + + pub fn ip(&self) -> IpAddr { + match self { + OmicronZoneExternalIp::Floating(ext) => ext.ip, + OmicronZoneExternalIp::Snat(ext) => ext.snat_cfg.ip, + } + } + + pub fn ip_key(&self) -> OmicronZoneExternalIpKey { + match self { + OmicronZoneExternalIp::Floating(ip) => { + OmicronZoneExternalIpKey::Floating(ip.ip) + } + OmicronZoneExternalIp::Snat(snat) => { + OmicronZoneExternalIpKey::Snat(snat.snat_cfg) + } + } + } +} + +/// An IP-based key suitable for uniquely identifying an +/// [`OmicronZoneExternalIp`]. +/// +/// We can't use the IP itself to uniquely identify an external IP because SNAT +/// IPs can have overlapping addresses. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub enum OmicronZoneExternalIpKey { + Floating(IpAddr), + Snat(SourceNatConfig), +} + +/// Floating external IP allocated to an Omicron-managed zone. +/// +/// This is a slimmer `nexus_db_model::ExternalIp` that only stores the fields +/// necessary for blueprint planning, and requires that the zone have a single +/// IP. +#[derive( + Debug, Clone, Copy, PartialEq, Eq, JsonSchema, Serialize, Deserialize, +)] +pub struct OmicronZoneExternalFloatingIp { + pub id: ExternalIpUuid, + pub ip: IpAddr, +} + +/// Floating external address with port allocated to an Omicron-managed zone. +#[derive( + Debug, Clone, Copy, PartialEq, Eq, JsonSchema, Serialize, Deserialize, +)] +pub struct OmicronZoneExternalFloatingAddr { + pub id: ExternalIpUuid, + pub addr: SocketAddr, +} + +impl OmicronZoneExternalFloatingAddr { + pub fn into_ip(self) -> OmicronZoneExternalFloatingIp { + OmicronZoneExternalFloatingIp { id: self.id, ip: self.addr.ip() } + } +} + +/// SNAT (outbound) external IP allocated to an Omicron-managed zone. +/// +/// This is a slimmer `nexus_db_model::ExternalIp` that only stores the fields +/// necessary for blueprint planning, and requires that the zone have a single +/// IP. +#[derive( + Debug, Clone, Copy, PartialEq, Eq, JsonSchema, Serialize, Deserialize, +)] +pub struct OmicronZoneExternalSnatIp { + pub id: ExternalIpUuid, + pub snat_cfg: SourceNatConfig, +} + +/// Network interface allocated to an Omicron-managed zone. +/// +/// This is a slimmer `nexus_db_model::ServiceNetworkInterface` that only stores +/// the fields necessary for blueprint planning. +#[derive(Debug, Clone, Copy, Serialize, Deserialize)] +pub struct OmicronZoneNic { + pub id: VnicUuid, + pub mac: MacAddr, + pub ip: IpAddr, + pub slot: u8, + pub primary: bool, +} + +/// A pair of an Omicron zone ID and an external IP. +/// +/// Part of [`OmicronZoneNetworkResources`]. +#[derive(Clone, Copy, Debug, Deserialize, Serialize)] +pub struct OmicronZoneExternalIpEntry { + pub zone_id: OmicronZoneUuid, + pub ip: OmicronZoneExternalIp, +} + +/// Specification for the tri-map of Omicron zone external IPs. +impl TriMapEntry for OmicronZoneExternalIpEntry { + type K1 = OmicronZoneUuid; + type K2 = ExternalIpUuid; + + // Note: cannot use IpAddr here, because SNAT IPs can overlap as long as + // their port blocks are disjoint. + type K3 = OmicronZoneExternalIpKey; + + fn key1(&self) -> Self::K1 { + self.zone_id + } + + fn key2(&self) -> Self::K2 { + self.ip.id() + } + + fn key3(&self) -> Self::K3 { + self.ip.ip_key() + } +} + +/// A pair of an Omicron zone ID and a network interface. +/// +/// Part of [`OmicronZoneNetworkResources`]. +#[derive(Clone, Copy, Debug, Deserialize, Serialize)] +pub struct OmicronZoneNicEntry { + pub zone_id: OmicronZoneUuid, + pub nic: OmicronZoneNic, +} + +impl TriMapEntry for OmicronZoneNicEntry { + type K1 = OmicronZoneUuid; + type K2 = VnicUuid; + type K3 = MacAddr; + + fn key1(&self) -> Self::K1 { + self.zone_id + } + + fn key2(&self) -> Self::K2 { + self.nic.id + } + + fn key3(&self) -> Self::K3 { + self.nic.mac + } +} + +#[derive(Debug, Error)] +pub enum AddNetworkResourceError { + #[error("associating Omicron zone {zone_id} with {ip:?} failed due to duplicates")] + DuplicateOmicronZoneExternalIp { + zone_id: OmicronZoneUuid, + ip: OmicronZoneExternalIp, + #[source] + err: anyhow::Error, + }, + #[error("associating Omicron zone {zone_id} with {nic:?} failed due to duplicates")] + DuplicateOmicronZoneNic { + zone_id: OmicronZoneUuid, + nic: OmicronZoneNic, + #[source] + err: anyhow::Error, + }, +} diff --git a/nexus/types/src/deployment/planning_input.rs b/nexus/types/src/deployment/planning_input.rs new file mode 100644 index 0000000000..bb74c3655e --- /dev/null +++ b/nexus/types/src/deployment/planning_input.rs @@ -0,0 +1,821 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Types describing inputs the Reconfigurator needs to plan and produce new +//! blueprints. + +use super::AddNetworkResourceError; +use super::OmicronZoneExternalIp; +use super::OmicronZoneNetworkResources; +use super::OmicronZoneNic; +use crate::external_api::views::PhysicalDiskPolicy; +use crate::external_api::views::PhysicalDiskState; +use crate::external_api::views::SledPolicy; +use crate::external_api::views::SledProvisionPolicy; +use crate::external_api::views::SledState; +use clap::ValueEnum; +use ipnetwork::IpNetwork; +use omicron_common::address::IpRange; +use omicron_common::address::Ipv6Subnet; +use omicron_common::address::SLED_PREFIX; +use omicron_common::api::external::Generation; +use omicron_common::api::internal::shared::SourceNatConfigError; +use omicron_common::disk::DiskIdentity; +use omicron_uuid_kinds::OmicronZoneUuid; +use omicron_uuid_kinds::PhysicalDiskUuid; +use omicron_uuid_kinds::SledUuid; +use omicron_uuid_kinds::ZpoolUuid; +use schemars::JsonSchema; +use serde::Deserialize; +use serde::Serialize; +use std::collections::btree_map::Entry; +use std::collections::BTreeMap; +use std::fmt; +use strum::IntoEnumIterator; + +/// Policy and database inputs to the Reconfigurator planner +/// +/// The primary inputs to the planner are the parent (either a parent blueprint +/// or an inventory collection) and this structure. This type holds the +/// fleet-wide policy as well as any additional information fetched from CRDB +/// that the planner needs to make decisions. +/// +/// The current policy is pretty limited. It's aimed primarily at supporting +/// the add/remove sled use case. +/// +/// The planning input has some internal invariants that code outside of this +/// module can rely on. They include: +/// +/// - Each Omicron zone has at most one external IP and at most one vNIC. +/// - A given external IP or vNIC is only associated with a single Omicron +/// zone. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PlanningInput { + /// fleet-wide policy + policy: Policy, + + /// current internal DNS version + internal_dns_version: Generation, + + /// current external DNS version + external_dns_version: Generation, + + /// current CockroachDB settings + cockroachdb_settings: CockroachDbSettings, + + /// per-sled policy and resources + sleds: BTreeMap, + + /// per-zone network resources + network_resources: OmicronZoneNetworkResources, +} + +impl PlanningInput { + /// current internal DNS version + pub fn internal_dns_version(&self) -> Generation { + self.internal_dns_version + } + + /// current external DNS version + pub fn external_dns_version(&self) -> Generation { + self.external_dns_version + } + + /// current CockroachDB settings + pub fn cockroachdb_settings(&self) -> &CockroachDbSettings { + &self.cockroachdb_settings + } + + pub fn target_nexus_zone_count(&self) -> usize { + self.policy.target_nexus_zone_count + } + + pub fn target_cockroachdb_cluster_version( + &self, + ) -> CockroachDbClusterVersion { + self.policy.target_cockroachdb_cluster_version + } + + pub fn service_ip_pool_ranges(&self) -> &[IpRange] { + &self.policy.service_ip_pool_ranges + } + + pub fn all_sleds( + &self, + filter: SledFilter, + ) -> impl Iterator + '_ { + self.sleds.iter().filter_map(move |(&sled_id, details)| { + filter + .matches_policy_and_state(details.policy, details.state) + .then_some((sled_id, details)) + }) + } + + pub fn all_sled_ids( + &self, + filter: SledFilter, + ) -> impl Iterator + '_ { + self.all_sleds(filter).map(|(sled_id, _)| sled_id) + } + + pub fn all_sled_resources( + &self, + filter: SledFilter, + ) -> impl Iterator + '_ { + self.all_sleds(filter) + .map(|(sled_id, details)| (sled_id, &details.resources)) + } + + pub fn sled_policy(&self, sled_id: &SledUuid) -> Option { + self.sleds.get(sled_id).map(|details| details.policy) + } + + pub fn sled_resources(&self, sled_id: &SledUuid) -> Option<&SledResources> { + self.sleds.get(sled_id).map(|details| &details.resources) + } + + pub fn network_resources(&self) -> &OmicronZoneNetworkResources { + &self.network_resources + } + + /// Convert this `PlanningInput` back into a [`PlanningInputBuilder`] + /// + /// This is primarily useful for tests that want to mutate an existing + /// [`PlanningInput`]. + pub fn into_builder(self) -> PlanningInputBuilder { + PlanningInputBuilder { + policy: self.policy, + internal_dns_version: self.internal_dns_version, + external_dns_version: self.external_dns_version, + cockroachdb_settings: self.cockroachdb_settings, + sleds: self.sleds, + network_resources: self.network_resources, + } + } +} + +/// Describes the current values for any CockroachDB settings that we care +/// about. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct CockroachDbSettings { + /// A fingerprint representing the current state of the cluster. This must + /// be recorded in a blueprint and passed to the `DataStore` function when + /// changing settings. + pub state_fingerprint: String, + + /// `version` + /// + /// WARNING: This value should _not_ be used to set the + /// `cluster.preserve_downgrade_option` setting. It can potentially reflect + /// an internal, intermediate upgrade version (e.g. "22.1-12"). + pub version: String, + /// `cluster.preserve_downgrade_option` + pub preserve_downgrade: String, +} + +impl CockroachDbSettings { + pub const fn empty() -> CockroachDbSettings { + CockroachDbSettings { + state_fingerprint: String::new(), + version: String::new(), + preserve_downgrade: String::new(), + } + } +} + +/// CockroachDB cluster versions we are aware of. +/// +/// CockroachDB can be upgraded from one major version to the next, e.g. v22.1 +/// -> v22.2. Each major version introduces changes in how it stores data on +/// disk to support new features, and each major version has support for reading +/// the previous version's data so that it can perform an upgrade. The version +/// of the data format is called the "cluster version", which is distinct from +/// but related to the software version that's being run. +/// +/// While software version v22.2 is using cluster version v22.1, it's possible +/// to downgrade back to v22.1. Once the cluster version is upgraded, there's no +/// going back. +/// +/// To give us some time to evaluate new versions of the software while +/// retaining a downgrade path, we currently deploy new versions of CockroachDB +/// across two releases of the Oxide software, in a "tick-tock" model: +/// +/// - In "tick" releases, we upgrade the version of the +/// CockroachDB software to a new major version, and update +/// `CockroachDbClusterVersion::NEWLY_INITIALIZED`. On upgraded racks, the new +/// version is running with the previous cluster version; on newly-initialized +/// racks, the new version is running with the new cluser version. +/// - In "tock" releases, we change `CockroachDbClusterVersion::POLICY` to the +/// major version we upgraded to in the last "tick" release. This results in a +/// new blueprint that upgrades the cluster version, destroying the downgrade +/// path but allowing us to eventually upgrade to the next release. +/// +/// These presently describe major versions of CockroachDB. The order of these +/// must be maintained in the correct order (the first variant must be the +/// earliest version). +#[derive( + Debug, + Clone, + Copy, + PartialEq, + Eq, + PartialOrd, + Ord, + parse_display::Display, + parse_display::FromStr, + Deserialize, + Serialize, + JsonSchema, +)] +pub enum CockroachDbClusterVersion { + #[display("22.1")] + V22_1, +} + +impl CockroachDbClusterVersion { + /// The hardcoded CockroachDB cluster version we want to be on, used in + /// [`Policy`]. + /// + /// /!\ WARNING: If you change this, there is no going back. /!\ + pub const POLICY: CockroachDbClusterVersion = + CockroachDbClusterVersion::V22_1; + + /// The CockroachDB cluster version created as part of newly-initialized + /// racks. + /// + /// CockroachDB knows how to create a new cluster with the current cluster + /// version, and how to upgrade the cluster version from the previous major + /// release, but it does not have any ability to create a new cluster with + /// the previous major release's cluster version. + /// + /// During "tick" releases, newly-initialized racks will be running + /// this cluster version, which will be one major version newer than the + /// version specified by `CockroachDbClusterVersion::POLICY`. During "tock" + /// releases, these versions are the same. + pub const NEWLY_INITIALIZED: CockroachDbClusterVersion = + CockroachDbClusterVersion::V22_1; +} + +/// Whether to set `cluster.preserve_downgrade_option` and what to set it to. +#[derive( + Clone, Copy, Debug, Eq, PartialEq, Deserialize, Serialize, JsonSchema, +)] +#[serde(tag = "action", content = "data", rename_all = "snake_case")] +pub enum CockroachDbPreserveDowngrade { + /// Do not modify the setting. + DoNotModify, + /// Ensure the setting is set to an empty string. + AllowUpgrade, + /// Ensure the setting is set to a given cluster version. + Set(CockroachDbClusterVersion), +} + +impl CockroachDbPreserveDowngrade { + pub fn from_optional_string( + value: &Option, + ) -> Result { + Ok(match value { + Some(version) => { + if version.is_empty() { + CockroachDbPreserveDowngrade::AllowUpgrade + } else { + CockroachDbPreserveDowngrade::Set(version.parse()?) + } + } + None => CockroachDbPreserveDowngrade::DoNotModify, + }) + } + + pub fn to_optional_string(self) -> Option { + match self { + CockroachDbPreserveDowngrade::DoNotModify => None, + CockroachDbPreserveDowngrade::AllowUpgrade => Some(String::new()), + CockroachDbPreserveDowngrade::Set(version) => { + Some(version.to_string()) + } + } + } +} + +impl fmt::Display for CockroachDbPreserveDowngrade { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + CockroachDbPreserveDowngrade::DoNotModify => { + write!(f, "(do not modify)") + } + CockroachDbPreserveDowngrade::AllowUpgrade => { + write!(f, "\"\" (allow upgrade)") + } + CockroachDbPreserveDowngrade::Set(version) => { + write!(f, "\"{}\"", version) + } + } + } +} + +impl From for CockroachDbPreserveDowngrade { + fn from(value: CockroachDbClusterVersion) -> Self { + CockroachDbPreserveDowngrade::Set(value) + } +} + +/// Describes a single disk already managed by the sled. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SledDisk { + pub disk_identity: DiskIdentity, + pub disk_id: PhysicalDiskUuid, + pub policy: PhysicalDiskPolicy, + pub state: PhysicalDiskState, +} + +impl SledDisk { + fn provisionable(&self) -> bool { + DiskFilter::InService.matches_policy_and_state(self.policy, self.state) + } +} + +/// Filters that apply to disks. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub enum DiskFilter { + /// All disks + All, + + /// All disks which are in-service. + InService, +} + +impl DiskFilter { + fn matches_policy_and_state( + self, + policy: PhysicalDiskPolicy, + state: PhysicalDiskState, + ) -> bool { + match self { + DiskFilter::All => true, + DiskFilter::InService => match (policy, state) { + (PhysicalDiskPolicy::InService, PhysicalDiskState::Active) => { + true + } + _ => false, + }, + } + } +} + +/// Filters that apply to zpools. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub enum ZpoolFilter { + /// All zpools + All, + + /// All zpools which are in-service. + InService, +} + +impl ZpoolFilter { + fn matches_policy_and_state( + self, + policy: PhysicalDiskPolicy, + state: PhysicalDiskState, + ) -> bool { + match self { + ZpoolFilter::All => true, + ZpoolFilter::InService => match (policy, state) { + (PhysicalDiskPolicy::InService, PhysicalDiskState::Active) => { + true + } + _ => false, + }, + } + } +} + +/// Describes the resources available on each sled for the planner +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SledResources { + /// zpools (and their backing disks) on this sled + /// + /// (used to allocate storage for control plane zones with persistent + /// storage) + pub zpools: BTreeMap, + + /// the IPv6 subnet of this sled on the underlay network + /// + /// (implicitly specifies the whole range of addresses that the planner can + /// use for control plane components) + pub subnet: Ipv6Subnet, +} + +impl SledResources { + /// Returns if the zpool is provisionable (known, in-service, and active). + pub fn zpool_is_provisionable(&self, zpool: &ZpoolUuid) -> bool { + let Some(disk) = self.zpools.get(zpool) else { return false }; + disk.provisionable() + } + + /// Returns all zpools matching the given filter. + pub fn all_zpools( + &self, + filter: ZpoolFilter, + ) -> impl Iterator + '_ { + self.zpools.iter().filter_map(move |(zpool, disk)| { + filter + .matches_policy_and_state(disk.policy, disk.state) + .then_some(zpool) + }) + } + + pub fn all_disks( + &self, + filter: DiskFilter, + ) -> impl Iterator + '_ { + self.zpools.iter().filter_map(move |(zpool, disk)| { + filter + .matches_policy_and_state(disk.policy, disk.state) + .then_some((zpool, disk)) + }) + } +} + +/// Filters that apply to sleds. +/// +/// This logic lives here rather than within the individual components making +/// decisions, so that this is easier to read. +/// +/// The meaning of a particular filter should not be overloaded -- each time a +/// new use case wants to make a decision based on the zone disposition, a new +/// variant should be added to this enum. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, ValueEnum)] +pub enum SledFilter { + // --- + // Prefer to keep this list in alphabetical order. + // --- + /// All sleds that are currently part of the control plane cluster. + /// + /// Intentionally omits decommissioned sleds, but is otherwise the filter to + /// fetch "all sleds regardless of current policy or state". + Commissioned, + + /// All sleds that were previously part of the control plane cluster but + /// have been decommissioned. + /// + /// Any sleds matching this filter are expected to no longer be present. + /// This filter is only useful for historical or debugging purposes, such as + /// listing decommissioned sleds via `omdb`. + Decommissioned, + + /// Sleds that are eligible for discretionary services. + Discretionary, + + /// Sleds that are in service (even if they might not be eligible for + /// discretionary services). + InService, + + /// Sleds whose sled agents should be queried for inventory + QueryDuringInventory, + + /// Sleds on which reservations can be created. + ReservationCreate, + + /// Sleds which should be sent VPC firewall rules. + VpcFirewall, +} + +impl SledFilter { + /// Returns true if self matches the provided policy and state. + pub fn matches_policy_and_state( + self, + policy: SledPolicy, + state: SledState, + ) -> bool { + policy.matches(self) && state.matches(self) + } +} + +impl SledPolicy { + /// Returns true if self matches the filter. + /// + /// Any users of this must also compare against the [`SledState`], if + /// relevant: a sled filter is fully matched when it matches both the + /// policy and the state. See [`SledFilter::matches_policy_and_state`]. + pub fn matches(self, filter: SledFilter) -> bool { + // Some notes: + // + // # Match style + // + // This code could be written in three ways: + // + // 1. match self { match filter { ... } } + // 2. match filter { match self { ... } } + // 3. match (self, filter) { ... } + // + // We choose 1 here because we expect many filters and just a few + // policies, and 1 is the easiest form to represent that. + // + // # Illegal states + // + // Some of the code that checks against both policies and filters is + // effectively checking for illegal states. We shouldn't be able to + // have a policy+state combo where the policy says the sled is in + // service but the state is decommissioned, for example, but the two + // separate types let us represent that. Code that ANDs + // policy.matches(filter) and state.matches(filter) naturally guards + // against those states. + match self { + SledPolicy::InService { + provision_policy: SledProvisionPolicy::Provisionable, + } => match filter { + SledFilter::Commissioned => true, + SledFilter::Decommissioned => false, + SledFilter::Discretionary => true, + SledFilter::InService => true, + SledFilter::QueryDuringInventory => true, + SledFilter::ReservationCreate => true, + SledFilter::VpcFirewall => true, + }, + SledPolicy::InService { + provision_policy: SledProvisionPolicy::NonProvisionable, + } => match filter { + SledFilter::Commissioned => true, + SledFilter::Decommissioned => false, + SledFilter::Discretionary => false, + SledFilter::InService => true, + SledFilter::QueryDuringInventory => true, + SledFilter::ReservationCreate => false, + SledFilter::VpcFirewall => true, + }, + SledPolicy::Expunged => match filter { + SledFilter::Commissioned => true, + SledFilter::Decommissioned => true, + SledFilter::Discretionary => false, + SledFilter::InService => false, + SledFilter::QueryDuringInventory => false, + SledFilter::ReservationCreate => false, + SledFilter::VpcFirewall => false, + }, + } + } + + /// Returns all policies matching the given filter. + /// + /// This is meant for database access, and is generally paired with + /// [`SledState::all_matching`]. See `ApplySledFilterExt` in + /// nexus-db-model. + pub fn all_matching(filter: SledFilter) -> impl Iterator { + Self::iter().filter(move |policy| policy.matches(filter)) + } +} + +impl SledState { + /// Returns true if self matches the filter. + /// + /// Any users of this must also compare against the [`SledPolicy`], if + /// relevant: a sled filter is fully matched when both the policy and the + /// state match. See [`SledFilter::matches_policy_and_state`]. + pub fn matches(self, filter: SledFilter) -> bool { + // See `SledFilter::matches` above for some notes. + match self { + SledState::Active => match filter { + SledFilter::Commissioned => true, + SledFilter::Decommissioned => false, + SledFilter::Discretionary => true, + SledFilter::InService => true, + SledFilter::QueryDuringInventory => true, + SledFilter::ReservationCreate => true, + SledFilter::VpcFirewall => true, + }, + SledState::Decommissioned => match filter { + SledFilter::Commissioned => false, + SledFilter::Decommissioned => true, + SledFilter::Discretionary => false, + SledFilter::InService => false, + SledFilter::QueryDuringInventory => false, + SledFilter::ReservationCreate => false, + SledFilter::VpcFirewall => false, + }, + } + } + + /// Returns all policies matching the given filter. + /// + /// This is meant for database access, and is generally paired with + /// [`SledPolicy::all_matching`]. See `ApplySledFilterExt` in + /// nexus-db-model. + pub fn all_matching(filter: SledFilter) -> impl Iterator { + Self::iter().filter(move |state| state.matches(filter)) + } +} + +/// Fleet-wide deployment policy +/// +/// The **policy** represents the deployment controls that people (operators and +/// support engineers) can modify directly under normal operation. In the +/// limit, this would include things like: how many CockroachDB nodes should be +/// part of the cluster, what system version the system should be running, etc. +/// It would _not_ include things like which services should be running on which +/// sleds or which host OS version should be on each sled because that's up to +/// the control plane to decide. (To be clear, the intent is that for +/// extenuating circumstances, people could exercise control over such things, +/// but that would not be part of normal operation.) +/// +/// Conceptually the policy should also include the set of sleds that are +/// supposed to be part of the system and their individual [`SledPolicy`]s; +/// however, those are tracked as a separate part of [`PlanningInput`] as each +/// sled additionally has non-policy [`SledResources`] needed for planning. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Policy { + /// ranges specified by the IP pool for externally-visible control plane + /// services (e.g., external DNS, Nexus, boundary NTP) + pub service_ip_pool_ranges: Vec, + + /// desired total number of deployed Nexus zones + pub target_nexus_zone_count: usize, + + /// desired CockroachDB `cluster.preserve_downgrade_option` setting. + /// at present this is hardcoded based on the version of CockroachDB we + /// presently ship and the tick-tock pattern described in RFD 469. + pub target_cockroachdb_cluster_version: CockroachDbClusterVersion, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SledDetails { + /// current sled policy + pub policy: SledPolicy, + /// current sled state + pub state: SledState, + /// current resources allocated to this sled + pub resources: SledResources, +} + +#[derive(Debug, thiserror::Error)] +pub enum PlanningInputBuildError { + #[error("duplicate sled ID: {0}")] + DuplicateSledId(SledUuid), + #[error("Omicron zone {zone_id} has a range of IPs ({ip:?}), only a single IP is supported")] + NotSingleIp { zone_id: OmicronZoneUuid, ip: IpNetwork }, + #[error(transparent)] + AddNetworkResource(#[from] AddNetworkResourceError), + #[error("Omicron zone {0} has an ephemeral IP (unsupported)")] + EphemeralIpUnsupported(OmicronZoneUuid), + #[error("Omicron zone {zone_id} has a bad SNAT config")] + BadSnatConfig { + zone_id: OmicronZoneUuid, + #[source] + err: SourceNatConfigError, + }, +} + +/// Constructor for [`PlanningInput`]. +#[derive(Clone, Debug)] +pub struct PlanningInputBuilder { + policy: Policy, + internal_dns_version: Generation, + external_dns_version: Generation, + cockroachdb_settings: CockroachDbSettings, + sleds: BTreeMap, + network_resources: OmicronZoneNetworkResources, +} + +impl PlanningInputBuilder { + pub fn empty_input() -> PlanningInput { + // This empty input is known to be valid. + PlanningInput { + policy: Policy { + service_ip_pool_ranges: Vec::new(), + target_nexus_zone_count: 0, + target_cockroachdb_cluster_version: + CockroachDbClusterVersion::POLICY, + }, + internal_dns_version: Generation::new(), + external_dns_version: Generation::new(), + cockroachdb_settings: CockroachDbSettings::empty(), + sleds: BTreeMap::new(), + network_resources: OmicronZoneNetworkResources::new(), + } + } + + pub fn new( + policy: Policy, + internal_dns_version: Generation, + external_dns_version: Generation, + cockroachdb_settings: CockroachDbSettings, + ) -> Self { + Self { + policy, + internal_dns_version, + external_dns_version, + cockroachdb_settings, + sleds: BTreeMap::new(), + network_resources: OmicronZoneNetworkResources::new(), + } + } + + pub fn add_sled( + &mut self, + sled_id: SledUuid, + details: SledDetails, + ) -> Result<(), PlanningInputBuildError> { + match self.sleds.entry(sled_id) { + Entry::Vacant(slot) => { + slot.insert(details); + Ok(()) + } + Entry::Occupied(_) => { + Err(PlanningInputBuildError::DuplicateSledId(sled_id)) + } + } + } + + pub fn add_omicron_zone_external_ip( + &mut self, + zone_id: OmicronZoneUuid, + ip: OmicronZoneExternalIp, + ) -> Result<(), PlanningInputBuildError> { + Ok(self.network_resources.add_external_ip(zone_id, ip)?) + } + + pub fn add_omicron_zone_nic( + &mut self, + zone_id: OmicronZoneUuid, + nic: OmicronZoneNic, + ) -> Result<(), PlanningInputBuildError> { + Ok(self.network_resources.add_nic(zone_id, nic)?) + } + + pub fn network_resources_mut( + &mut self, + ) -> &mut OmicronZoneNetworkResources { + &mut self.network_resources + } + + pub fn policy_mut(&mut self) -> &mut Policy { + &mut self.policy + } + + pub fn sleds(&mut self) -> &BTreeMap { + &self.sleds + } + + pub fn sleds_mut(&mut self) -> &mut BTreeMap { + &mut self.sleds + } + + pub fn set_internal_dns_version(&mut self, new_version: Generation) { + self.internal_dns_version = new_version; + } + + pub fn set_external_dns_version(&mut self, new_version: Generation) { + self.external_dns_version = new_version; + } + + pub fn set_cockroachdb_settings( + &mut self, + cockroachdb_settings: CockroachDbSettings, + ) { + self.cockroachdb_settings = cockroachdb_settings; + } + + pub fn build(self) -> PlanningInput { + PlanningInput { + policy: self.policy, + internal_dns_version: self.internal_dns_version, + external_dns_version: self.external_dns_version, + cockroachdb_settings: self.cockroachdb_settings, + sleds: self.sleds, + network_resources: self.network_resources, + } + } +} + +#[cfg(test)] +mod tests { + use super::CockroachDbClusterVersion; + + #[test] + fn cockroachdb_cluster_versions() { + // This should always be true. + assert!( + CockroachDbClusterVersion::POLICY + <= CockroachDbClusterVersion::NEWLY_INITIALIZED + ); + + let cockroachdb_version = + include_str!("../../../../tools/cockroachdb_version") + .trim_start_matches('v') + .rsplit_once('.') + .unwrap() + .0; + assert_eq!( + CockroachDbClusterVersion::NEWLY_INITIALIZED.to_string(), + cockroachdb_version + ); + + // In the next "tick" release, this version will be stored in a + // different file. + assert_eq!( + CockroachDbClusterVersion::POLICY.to_string(), + cockroachdb_version + ); + } +} diff --git a/nexus/types/src/deployment/tri_map.rs b/nexus/types/src/deployment/tri_map.rs new file mode 100644 index 0000000000..e4ef320b4f --- /dev/null +++ b/nexus/types/src/deployment/tri_map.rs @@ -0,0 +1,515 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use std::{ + borrow::Borrow, + collections::{hash_map, BTreeSet, HashMap}, + fmt, + hash::Hash, +}; + +use derive_where::derive_where; +use serde::{Deserialize, Serialize, Serializer}; + +/// An append-only 1:1:1 (trijective) map for three keys and a value. +/// +/// The storage mechanism is a vector of entries, with indexes into that vector +/// stored in three hashmaps. This allows for efficient lookups by any of the +/// three keys, while preventing duplicates. +/// +/// Not totally generic yet, just meant for the deployment use case. +#[derive_where(Clone, Debug, Default)] +pub(crate) struct TriMap { + entries: Vec, + // Invariant: the value (usize) in these maps are valid indexes into + // `entries`, and are a 1:1 mapping. + k1_to_entry: HashMap, + k2_to_entry: HashMap, + k3_to_entry: HashMap, +} + +// Note: Eq and PartialEq are not implemented for TriMap. Implementing them +// would need to be done with care, because TriMap is not semantically like an +// IndexMap: two maps are equivalent even if their entries are in a different +// order. + +/// The `Serialize` impl for `TriMap` serializes just the list of entries. +impl Serialize for TriMap +where + T: Serialize, +{ + fn serialize( + &self, + serializer: S, + ) -> Result { + // Serialize just the entries -- don't serialize the indexes. We'll + // rebuild the indexes on deserialization. + self.entries.serialize(serializer) + } +} + +/// The `Deserialize` impl for `TriMap` deserializes the list of entries and +/// then rebuilds the indexes, producing an error if there are any duplicates. +impl<'de, T: TriMapEntry> Deserialize<'de> for TriMap +where + T: Deserialize<'de>, +{ + fn deserialize>( + deserializer: D, + ) -> Result { + // First, deserialize the entries. + let entries = Vec::::deserialize(deserializer)?; + + // Now build a map from scratch, inserting the entries sequentially. + // This will catch issues with duplicates. + let mut map = TriMap::new(); + for entry in entries { + map.insert_no_dups(entry).map_err(serde::de::Error::custom)?; + } + + Ok(map) + } +} + +pub(crate) trait TriMapEntry: Clone + fmt::Debug { + type K1: Eq + Hash + Clone + fmt::Debug; + type K2: Eq + Hash + Clone + fmt::Debug; + type K3: Eq + Hash + Clone + fmt::Debug; + + fn key1(&self) -> Self::K1; + fn key2(&self) -> Self::K2; + fn key3(&self) -> Self::K3; +} + +impl TriMap { + pub(crate) fn new() -> Self { + Self { + entries: Vec::new(), + k1_to_entry: HashMap::new(), + k2_to_entry: HashMap::new(), + k3_to_entry: HashMap::new(), + } + } + + pub(crate) fn iter(&self) -> impl Iterator { + self.entries.iter() + } + + /// Checks general invariants of the map. + /// + /// The code below always upholds these invariants, but it's useful to have + /// an explicit check for tests. + #[cfg(test)] + fn validate(&self) -> anyhow::Result<()> { + use anyhow::{ensure, Context}; + + // Check that all the maps are of the right size. + ensure!( + self.entries.len() == self.k1_to_entry.len(), + "key1 index has {} entries, but there are {} entries", + self.k1_to_entry.len(), + self.entries.len() + ); + ensure!( + self.entries.len() == self.k2_to_entry.len(), + "key2 index has {} entries, but there are {} entries", + self.k2_to_entry.len(), + self.entries.len() + ); + ensure!( + self.entries.len() == self.k3_to_entry.len(), + "key3 index has {} entries, but there are {} entries", + self.k3_to_entry.len(), + self.entries.len() + ); + + // Check that the indexes are all correct. + for (ix, entry) in self.entries.iter().enumerate() { + let key1 = entry.key1(); + let key2 = entry.key2(); + let key3 = entry.key3(); + + let ix1 = self.k1_to_entry.get(&key1).context(format!( + "entry at index {ix} ({entry:?}) has no key1 index" + ))?; + let ix2 = self.k2_to_entry.get(&key2).context(format!( + "entry at index {ix} ({entry:?}) has no key2 index" + ))?; + let ix3 = self.k3_to_entry.get(&key3).context(format!( + "entry at index {ix} ({entry:?}) has no key3 index" + ))?; + + if *ix1 != ix || *ix2 != ix || *ix3 != ix { + return Err(anyhow::anyhow!( + "entry at index {} has mismatched indexes: key1: {}, key2: {}, key3: {}", + ix, + ix1, + ix2, + ix3 + )); + } + } + + Ok(()) + } + + /// Inserts a value into the set, returning an error if any duplicates were + /// added. + pub(crate) fn insert_no_dups( + &mut self, + value: T, + ) -> Result<(), DuplicateEntry> { + let mut dups = BTreeSet::new(); + + // Check for duplicates *before* inserting the new entry, because we + // don't want to partially insert the new entry and then have to roll + // back. + let e1 = detect_dup_or_insert( + self.k1_to_entry.entry(value.key1()), + &mut dups, + ); + let e2 = detect_dup_or_insert( + self.k2_to_entry.entry(value.key2()), + &mut dups, + ); + let e3 = detect_dup_or_insert( + self.k3_to_entry.entry(value.key3()), + &mut dups, + ); + + if !dups.is_empty() { + return Err(DuplicateEntry { + new: value, + dups: dups.iter().map(|ix| self.entries[*ix].clone()).collect(), + }); + } + + let next_index = self.entries.len(); + self.entries.push(value); + // e1, e2 and e3 are all Some because if they were None, dups would be + // non-empty, and we'd have bailed out earlier. + e1.unwrap().insert(next_index); + e2.unwrap().insert(next_index); + e3.unwrap().insert(next_index); + + Ok(()) + } + + pub(crate) fn get1(&self, key1: &Q) -> Option<&T> + where + T::K1: Borrow, + Q: Eq + Hash + ?Sized, + { + self.k1_to_entry.get(key1).map(|ix| &self.entries[*ix]) + } + + pub(crate) fn get2(&self, key2: &Q) -> Option<&T> + where + T::K2: Borrow, + Q: Eq + Hash + ?Sized, + { + self.k2_to_entry.get(key2).map(|ix| &self.entries[*ix]) + } + + pub(crate) fn get3(&self, key3: &Q) -> Option<&T> + where + T::K3: Borrow, + Q: Eq + Hash + ?Sized, + { + self.k3_to_entry.get(key3).map(|ix| &self.entries[*ix]) + } +} + +fn detect_dup_or_insert<'a, K>( + entry: hash_map::Entry<'a, K, usize>, + dups: &mut BTreeSet, +) -> Option> { + match entry { + hash_map::Entry::Vacant(slot) => Some(slot), + hash_map::Entry::Occupied(slot) => { + dups.insert(*slot.get()); + None + } + } +} + +#[derive(Debug)] +pub struct DuplicateEntry { + new: T, + dups: Vec, +} + +impl fmt::Display for DuplicateEntry { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "duplicate entry: {:?} conflicts with existing: {:?}", + self.new, self.dups + ) + } +} + +impl std::error::Error for DuplicateEntry {} + +#[cfg(test)] +mod tests { + use super::*; + use proptest::prelude::*; + use test_strategy::{proptest, Arbitrary}; + + #[derive( + Clone, Debug, Eq, PartialEq, Arbitrary, Serialize, Deserialize, + )] + struct TestEntry { + key1: u8, + key2: char, + key3: String, + value: String, + } + + impl TriMapEntry for TestEntry { + // These types are chosen to represent various kinds of keys in the + // proptest below. + // + // We use u8 since there can only be 256 values, increasing the + // likelihood of collisions in the proptest below. + type K1 = u8; + // char is chosen because the Arbitrary impl for it is biased towards + // ASCII, increasing the likelihood of collisions. + type K2 = char; + // String is a generally open-ended type that probably won't have many + // collisions. + type K3 = String; + + fn key1(&self) -> Self::K1 { + self.key1 + } + + fn key2(&self) -> Self::K2 { + self.key2 + } + + fn key3(&self) -> Self::K3 { + self.key3.clone() + } + } + + #[test] + fn test_insert_entry_no_dups() { + let mut map = TriMap::::new(); + + // Add an element. + let v1 = TestEntry { + key1: 0, + key2: 'a', + key3: "x".to_string(), + value: "v".to_string(), + }; + map.insert_no_dups(v1.clone()).unwrap(); + + // Add an exact duplicate, which should error out. + let error = map.insert_no_dups(v1.clone()).unwrap_err(); + assert_eq!(&error.new, &v1); + assert_eq!(error.dups, vec![v1.clone()]); + + // Add a duplicate against just key1, which should error out. + let v2 = TestEntry { + key1: 0, + key2: 'b', + key3: "y".to_string(), + value: "v".to_string(), + }; + let error = map.insert_no_dups(v2.clone()).unwrap_err(); + assert_eq!(&error.new, &v2); + assert_eq!(error.dups, vec![v1.clone()]); + + // Add a duplicate against just key2, which should error out. + let v3 = TestEntry { + key1: 1, + key2: 'a', + key3: "y".to_string(), + value: "v".to_string(), + }; + let error = map.insert_no_dups(v3.clone()).unwrap_err(); + assert_eq!(&error.new, &v3); + + // Add a duplicate against just key3, which should error out. + let v4 = TestEntry { + key1: 1, + key2: 'b', + key3: "x".to_string(), + value: "v".to_string(), + }; + let error = map.insert_no_dups(v4.clone()).unwrap_err(); + assert_eq!(&error.new, &v4); + + // Add an entry that doesn't have any conflicts. + let v5 = TestEntry { + key1: 1, + key2: 'b', + key3: "y".to_string(), + value: "v".to_string(), + }; + map.insert_no_dups(v5.clone()).unwrap(); + } + + /// Represents a naive version of `TriMap` that doesn't have any indexes + /// and does linear scans. + #[derive(Debug)] + struct NaiveTriMap { + entries: Vec, + } + + impl NaiveTriMap { + fn new() -> Self { + Self { entries: Vec::new() } + } + + fn insert_entry_no_dups( + &mut self, + entry: TestEntry, + ) -> Result<(), DuplicateEntry> { + let dups = self + .entries + .iter() + .filter(|e| { + e.key1 == entry.key1 + || e.key2 == entry.key2 + || e.key3 == entry.key3 + }) + .cloned() + .collect::>(); + + if !dups.is_empty() { + return Err(DuplicateEntry { new: entry, dups }); + } + + self.entries.push(entry); + Ok(()) + } + } + + #[derive(Debug, Arbitrary)] + enum Operation { + // Make inserts a bit more common to try and fill up the map. + #[weight(3)] + Insert(TestEntry), + Get1(u8), + Get2(char), + Get3(String), + } + + #[proptest] + fn proptest_serialize_roundtrip(values: Vec) { + let mut map = TriMap::::new(); + let mut first_error = None; + for value in values.clone() { + // Ignore errors from duplicates which are quite possible to occur + // here, since we're just testing serialization. But store the + // first error to ensure that deserialization returns errors. + if let Err(error) = map.insert_no_dups(value) { + if first_error.is_none() { + first_error = Some(error); + } + } + } + + let serialized = serde_json::to_string(&map).unwrap(); + let deserialized: TriMap = + serde_json::from_str(&serialized).unwrap(); + + assert_eq!(map.entries, deserialized.entries, "entries match"); + // All of the indexes should be the same too. + assert_eq!( + map.k1_to_entry, deserialized.k1_to_entry, + "k1 indexes match" + ); + assert_eq!( + map.k2_to_entry, deserialized.k2_to_entry, + "k2 indexes match" + ); + assert_eq!( + map.k3_to_entry, deserialized.k3_to_entry, + "k3 indexes match" + ); + + // Try deserializing the full list of values directly, and see that the + // error reported is the same as first_error. + // + // Here we rely on the fact that a TriMap is serialized as just a + // vector. + let serialized = serde_json::to_string(&values).unwrap(); + let res: Result, _> = + serde_json::from_str(&serialized); + match (first_error, res) { + (None, Ok(_)) => {} // No error, should be fine + (Some(first_error), Ok(_)) => { + panic!( + "expected error ({first_error}), but deserialization succeeded" + ) + } + (None, Err(error)) => { + panic!("unexpected error: {error}, deserialization should have succeeded") + } + (Some(first_error), Err(error)) => { + // first_error is the error from the map, and error is the + // deserialization error (which should always be a custom + // error, stored as a string). + let expected = first_error.to_string(); + let actual = error.to_string(); + assert_eq!(actual, expected, "error matches"); + } + } + } + + #[proptest(cases = 16)] + fn proptest_ops( + #[strategy(prop::collection::vec(any::(), 0..1024))] + ops: Vec, + ) { + let mut map = TriMap::::new(); + let mut naive_map = NaiveTriMap::new(); + + // Now perform the operations on both maps. + for op in ops { + match op { + Operation::Insert(entry) => { + let map_res = map.insert_no_dups(entry.clone()); + let naive_res = + naive_map.insert_entry_no_dups(entry.clone()); + + assert_eq!(map_res.is_ok(), naive_res.is_ok()); + if let Err(map_err) = map_res { + let naive_err = naive_res.unwrap_err(); + assert_eq!(map_err.new, naive_err.new); + assert_eq!(map_err.dups, naive_err.dups); + } + + map.validate().expect("map should be valid"); + } + Operation::Get1(key1) => { + let map_res = map.get1(&key1); + let naive_res = + naive_map.entries.iter().find(|e| e.key1 == key1); + + assert_eq!(map_res, naive_res); + } + Operation::Get2(key2) => { + let map_res = map.get2(&key2); + let naive_res = + naive_map.entries.iter().find(|e| e.key2 == key2); + + assert_eq!(map_res, naive_res); + } + Operation::Get3(key3) => { + let map_res = map.get3(&key3); + let naive_res = + naive_map.entries.iter().find(|e| e.key3 == key3); + + assert_eq!(map_res, naive_res); + } + } + } + } +} diff --git a/nexus/types/src/deployment/zone_type.rs b/nexus/types/src/deployment/zone_type.rs new file mode 100644 index 0000000000..9f663015cd --- /dev/null +++ b/nexus/types/src/deployment/zone_type.rs @@ -0,0 +1,328 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Types representing types of Omicron zones managed by blueprints +//! +//! These types are closely related to the `OmicronZoneType` in sled-agent's +//! internal API, but include additional information needed by Reconfigurator +//! that is not needed by sled-agent. + +use super::OmicronZoneExternalIp; +use omicron_common::api::internal::shared::NetworkInterface; +use schemars::JsonSchema; +use serde::Deserialize; +use serde::Serialize; +use sled_agent_client::types::OmicronZoneType; +use sled_agent_client::ZoneKind; + +#[derive(Debug, Clone, Eq, PartialEq, JsonSchema, Deserialize, Serialize)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum BlueprintZoneType { + BoundaryNtp(blueprint_zone_type::BoundaryNtp), + Clickhouse(blueprint_zone_type::Clickhouse), + ClickhouseKeeper(blueprint_zone_type::ClickhouseKeeper), + CockroachDb(blueprint_zone_type::CockroachDb), + Crucible(blueprint_zone_type::Crucible), + CruciblePantry(blueprint_zone_type::CruciblePantry), + ExternalDns(blueprint_zone_type::ExternalDns), + InternalDns(blueprint_zone_type::InternalDns), + InternalNtp(blueprint_zone_type::InternalNtp), + Nexus(blueprint_zone_type::Nexus), + Oximeter(blueprint_zone_type::Oximeter), +} + +impl BlueprintZoneType { + pub fn external_networking( + &self, + ) -> Option<(OmicronZoneExternalIp, &NetworkInterface)> { + match self { + BlueprintZoneType::Nexus(nexus) => Some(( + OmicronZoneExternalIp::Floating(nexus.external_ip), + &nexus.nic, + )), + BlueprintZoneType::ExternalDns(dns) => Some(( + OmicronZoneExternalIp::Floating(dns.dns_address.into_ip()), + &dns.nic, + )), + BlueprintZoneType::BoundaryNtp(ntp) => { + Some((OmicronZoneExternalIp::Snat(ntp.external_ip), &ntp.nic)) + } + BlueprintZoneType::Clickhouse(_) + | BlueprintZoneType::ClickhouseKeeper(_) + | BlueprintZoneType::CockroachDb(_) + | BlueprintZoneType::Crucible(_) + | BlueprintZoneType::CruciblePantry(_) + | BlueprintZoneType::InternalDns(_) + | BlueprintZoneType::InternalNtp(_) + | BlueprintZoneType::Oximeter(_) => None, + } + } + + /// Identifies whether this is an NTP zone (any flavor) + pub fn is_ntp(&self) -> bool { + match self { + BlueprintZoneType::InternalNtp(_) + | BlueprintZoneType::BoundaryNtp(_) => true, + BlueprintZoneType::Nexus(_) + | BlueprintZoneType::ExternalDns(_) + | BlueprintZoneType::Clickhouse(_) + | BlueprintZoneType::ClickhouseKeeper(_) + | BlueprintZoneType::CockroachDb(_) + | BlueprintZoneType::Crucible(_) + | BlueprintZoneType::CruciblePantry(_) + | BlueprintZoneType::InternalDns(_) + | BlueprintZoneType::Oximeter(_) => false, + } + } + + /// Identifies whether this is a Nexus zone + pub fn is_nexus(&self) -> bool { + match self { + BlueprintZoneType::Nexus(_) => true, + BlueprintZoneType::BoundaryNtp(_) + | BlueprintZoneType::ExternalDns(_) + | BlueprintZoneType::Clickhouse(_) + | BlueprintZoneType::ClickhouseKeeper(_) + | BlueprintZoneType::CockroachDb(_) + | BlueprintZoneType::Crucible(_) + | BlueprintZoneType::CruciblePantry(_) + | BlueprintZoneType::InternalDns(_) + | BlueprintZoneType::InternalNtp(_) + | BlueprintZoneType::Oximeter(_) => false, + } + } + + /// Identifies whether this a Crucible (not Crucible pantry) zone + pub fn is_crucible(&self) -> bool { + match self { + BlueprintZoneType::Crucible(_) => true, + BlueprintZoneType::BoundaryNtp(_) + | BlueprintZoneType::Clickhouse(_) + | BlueprintZoneType::ClickhouseKeeper(_) + | BlueprintZoneType::CockroachDb(_) + | BlueprintZoneType::CruciblePantry(_) + | BlueprintZoneType::ExternalDns(_) + | BlueprintZoneType::InternalDns(_) + | BlueprintZoneType::InternalNtp(_) + | BlueprintZoneType::Nexus(_) + | BlueprintZoneType::Oximeter(_) => false, + } + } +} + +impl From for OmicronZoneType { + fn from(zone_type: BlueprintZoneType) -> Self { + match zone_type { + BlueprintZoneType::BoundaryNtp(zone) => Self::BoundaryNtp { + address: zone.address.to_string(), + ntp_servers: zone.ntp_servers, + dns_servers: zone.dns_servers, + domain: zone.domain, + nic: zone.nic, + snat_cfg: zone.external_ip.snat_cfg, + }, + BlueprintZoneType::Clickhouse(zone) => Self::Clickhouse { + address: zone.address.to_string(), + dataset: zone.dataset, + }, + BlueprintZoneType::ClickhouseKeeper(zone) => { + Self::ClickhouseKeeper { + address: zone.address.to_string(), + dataset: zone.dataset, + } + } + BlueprintZoneType::CockroachDb(zone) => Self::CockroachDb { + address: zone.address.to_string(), + dataset: zone.dataset, + }, + BlueprintZoneType::Crucible(zone) => Self::Crucible { + address: zone.address.to_string(), + dataset: zone.dataset, + }, + BlueprintZoneType::CruciblePantry(zone) => { + Self::CruciblePantry { address: zone.address.to_string() } + } + BlueprintZoneType::ExternalDns(zone) => Self::ExternalDns { + dataset: zone.dataset, + http_address: zone.http_address.to_string(), + dns_address: zone.dns_address.addr.to_string(), + nic: zone.nic, + }, + BlueprintZoneType::InternalDns(zone) => Self::InternalDns { + dataset: zone.dataset, + http_address: zone.http_address.to_string(), + dns_address: zone.dns_address.to_string(), + gz_address: zone.gz_address, + gz_address_index: zone.gz_address_index, + }, + BlueprintZoneType::InternalNtp(zone) => Self::InternalNtp { + address: zone.address.to_string(), + ntp_servers: zone.ntp_servers, + dns_servers: zone.dns_servers, + domain: zone.domain, + }, + BlueprintZoneType::Nexus(zone) => Self::Nexus { + internal_address: zone.internal_address.to_string(), + external_ip: zone.external_ip.ip, + nic: zone.nic, + external_tls: zone.external_tls, + external_dns_servers: zone.external_dns_servers, + }, + BlueprintZoneType::Oximeter(zone) => { + Self::Oximeter { address: zone.address.to_string() } + } + } + } +} + +impl BlueprintZoneType { + /// Returns the [`ZoneKind`] corresponding to this variant. + pub fn kind(&self) -> ZoneKind { + match self { + Self::BoundaryNtp(_) => ZoneKind::BoundaryNtp, + Self::Clickhouse(_) => ZoneKind::Clickhouse, + Self::ClickhouseKeeper(_) => ZoneKind::ClickhouseKeeper, + Self::CockroachDb(_) => ZoneKind::CockroachDb, + Self::Crucible(_) => ZoneKind::Crucible, + Self::CruciblePantry(_) => ZoneKind::CruciblePantry, + Self::ExternalDns(_) => ZoneKind::ExternalDns, + Self::InternalDns(_) => ZoneKind::InternalDns, + Self::InternalNtp(_) => ZoneKind::InternalNtp, + Self::Nexus(_) => ZoneKind::Nexus, + Self::Oximeter(_) => ZoneKind::Oximeter, + } + } +} + +pub mod blueprint_zone_type { + use crate::deployment::OmicronZoneExternalFloatingAddr; + use crate::deployment::OmicronZoneExternalFloatingIp; + use crate::deployment::OmicronZoneExternalSnatIp; + use crate::inventory::OmicronZoneDataset; + use omicron_common::api::internal::shared::NetworkInterface; + use schemars::JsonSchema; + use serde::Deserialize; + use serde::Serialize; + use std::net::IpAddr; + use std::net::Ipv6Addr; + use std::net::SocketAddrV6; + + #[derive( + Debug, Clone, Eq, PartialEq, JsonSchema, Deserialize, Serialize, + )] + pub struct BoundaryNtp { + pub address: SocketAddrV6, + pub ntp_servers: Vec, + pub dns_servers: Vec, + pub domain: Option, + /// The service vNIC providing outbound connectivity using OPTE. + pub nic: NetworkInterface, + pub external_ip: OmicronZoneExternalSnatIp, + } + + #[derive( + Debug, Clone, Eq, PartialEq, JsonSchema, Deserialize, Serialize, + )] + pub struct Clickhouse { + pub address: SocketAddrV6, + pub dataset: OmicronZoneDataset, + } + + #[derive( + Debug, Clone, Eq, PartialEq, JsonSchema, Deserialize, Serialize, + )] + pub struct ClickhouseKeeper { + pub address: SocketAddrV6, + pub dataset: OmicronZoneDataset, + } + + #[derive( + Debug, Clone, Eq, PartialEq, JsonSchema, Deserialize, Serialize, + )] + pub struct CockroachDb { + pub address: SocketAddrV6, + pub dataset: OmicronZoneDataset, + } + + #[derive( + Debug, Clone, Eq, PartialEq, JsonSchema, Deserialize, Serialize, + )] + pub struct Crucible { + pub address: SocketAddrV6, + pub dataset: OmicronZoneDataset, + } + + #[derive( + Debug, Clone, Eq, PartialEq, JsonSchema, Deserialize, Serialize, + )] + pub struct CruciblePantry { + pub address: SocketAddrV6, + } + + #[derive( + Debug, Clone, Eq, PartialEq, JsonSchema, Deserialize, Serialize, + )] + pub struct ExternalDns { + pub dataset: OmicronZoneDataset, + /// The address at which the external DNS server API is reachable. + pub http_address: SocketAddrV6, + /// The address at which the external DNS server is reachable. + pub dns_address: OmicronZoneExternalFloatingAddr, + /// The service vNIC providing external connectivity using OPTE. + pub nic: NetworkInterface, + } + + #[derive( + Debug, Clone, Eq, PartialEq, JsonSchema, Deserialize, Serialize, + )] + pub struct InternalDns { + pub dataset: OmicronZoneDataset, + pub http_address: SocketAddrV6, + pub dns_address: SocketAddrV6, + /// The addresses in the global zone which should be created + /// + /// For the DNS service, which exists outside the sleds's typical subnet + /// - adding an address in the GZ is necessary to allow inter-zone + /// traffic routing. + pub gz_address: Ipv6Addr, + + /// The address is also identified with an auxiliary bit of information + /// to ensure that the created global zone address can have a unique + /// name. + pub gz_address_index: u32, + } + + #[derive( + Debug, Clone, Eq, PartialEq, JsonSchema, Deserialize, Serialize, + )] + pub struct InternalNtp { + pub address: SocketAddrV6, + pub ntp_servers: Vec, + pub dns_servers: Vec, + pub domain: Option, + } + + #[derive( + Debug, Clone, Eq, PartialEq, JsonSchema, Deserialize, Serialize, + )] + pub struct Nexus { + /// The address at which the internal nexus server is reachable. + pub internal_address: SocketAddrV6, + /// The address at which the external nexus server is reachable. + pub external_ip: OmicronZoneExternalFloatingIp, + /// The service vNIC providing external connectivity using OPTE. + pub nic: NetworkInterface, + /// Whether Nexus's external endpoint should use TLS + pub external_tls: bool, + /// External DNS servers Nexus can use to resolve external hosts. + pub external_dns_servers: Vec, + } + + #[derive( + Debug, Clone, Eq, PartialEq, JsonSchema, Deserialize, Serialize, + )] + pub struct Oximeter { + pub address: SocketAddrV6, + } +} diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index 51d0869821..3f53503cc2 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -9,11 +9,12 @@ use crate::external_api::shared; use base64::Engine; use chrono::{DateTime, Utc}; use omicron_common::api::external::{ - AddressLotKind, BfdMode, ByteCount, Hostname, IdentityMetadataCreateParams, - IdentityMetadataUpdateParams, InstanceCpuCount, IpNet, Ipv4Net, Ipv6Net, - Name, NameOrId, PaginationOrder, RouteDestination, RouteTarget, - SemverVersion, + AddressLotKind, AllowedSourceIps, BfdMode, BgpPeer, ByteCount, Hostname, + IdentityMetadataCreateParams, IdentityMetadataUpdateParams, + InstanceCpuCount, LinkFec, LinkSpeed, Name, NameOrId, PaginationOrder, + RouteDestination, RouteTarget, SemverVersion, }; +use oxnet::{IpNet, Ipv4Net, Ipv6Net}; use schemars::JsonSchema; use serde::{ de::{self, Visitor}, @@ -88,6 +89,7 @@ id_path_param!(GroupPath, group_id, "group"); // ID that can be used to deterministically generate the UUID. id_path_param!(SledPath, sled_id, "sled"); id_path_param!(SwitchPath, switch_id, "switch"); +id_path_param!(PhysicalDiskPath, disk_id, "physical disk"); // Internal API parameters id_path_param!(BlueprintPath, blueprint_id, "blueprint"); @@ -1288,7 +1290,7 @@ impl Into for BlockSize { impl From for u64 { fn from(bs: BlockSize) -> u64 { - bs.0 as u64 + u64::from(bs.0) } } @@ -1518,88 +1520,6 @@ pub enum SwitchPortGeometry { Sfp28x4, } -/// The forward error correction mode of a link. -#[derive(Copy, Clone, Debug, Deserialize, Serialize, JsonSchema)] -#[serde(rename_all = "snake_case")] -pub enum LinkFec { - /// Firecode foward error correction. - Firecode, - /// No forward error correction. - None, - /// Reed-Solomon forward error correction. - Rs, -} - -impl From for LinkFec { - fn from(x: omicron_common::api::internal::shared::PortFec) -> LinkFec { - match x { - omicron_common::api::internal::shared::PortFec::Firecode => { - Self::Firecode - } - omicron_common::api::internal::shared::PortFec::None => Self::None, - omicron_common::api::internal::shared::PortFec::Rs => Self::Rs, - } - } -} - -/// The speed of a link. -#[derive(Copy, Clone, Debug, Deserialize, Serialize, JsonSchema)] -#[serde(rename_all = "snake_case")] -pub enum LinkSpeed { - /// Zero gigabits per second. - Speed0G, - /// 1 gigabit per second. - Speed1G, - /// 10 gigabits per second. - Speed10G, - /// 25 gigabits per second. - Speed25G, - /// 40 gigabits per second. - Speed40G, - /// 50 gigabits per second. - Speed50G, - /// 100 gigabits per second. - Speed100G, - /// 200 gigabits per second. - Speed200G, - /// 400 gigabits per second. - Speed400G, -} - -impl From for LinkSpeed { - fn from(x: omicron_common::api::internal::shared::PortSpeed) -> Self { - match x { - omicron_common::api::internal::shared::PortSpeed::Speed0G => { - Self::Speed0G - } - omicron_common::api::internal::shared::PortSpeed::Speed1G => { - Self::Speed1G - } - omicron_common::api::internal::shared::PortSpeed::Speed10G => { - Self::Speed10G - } - omicron_common::api::internal::shared::PortSpeed::Speed25G => { - Self::Speed25G - } - omicron_common::api::internal::shared::PortSpeed::Speed40G => { - Self::Speed40G - } - omicron_common::api::internal::shared::PortSpeed::Speed50G => { - Self::Speed50G - } - omicron_common::api::internal::shared::PortSpeed::Speed100G => { - Self::Speed100G - } - omicron_common::api::internal::shared::PortSpeed::Speed200G => { - Self::Speed200G - } - omicron_common::api::internal::shared::PortSpeed::Speed400G => { - Self::Speed400G - } - } - } -} - /// Switch link configuration. #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct LinkConfigCreate { @@ -1709,46 +1629,6 @@ pub struct BgpPeerConfig { pub peers: Vec, } -/// A BGP peer configuration for an interface. Includes the set of announcements -/// that will be advertised to the peer identified by `addr`. The `bgp_config` -/// parameter is a reference to global BGP parameters. The `interface_name` -/// indicates what interface the peer should be contacted on. -#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -pub struct BgpPeer { - /// The set of announcements advertised by the peer. - pub bgp_announce_set: NameOrId, - - /// The global BGP configuration used for establishing a session with this - /// peer. - pub bgp_config: NameOrId, - - /// The name of interface to peer on. This is relative to the port - /// configuration this BGP peer configuration is a part of. For example this - /// value could be phy0 to refer to a primary physical interface. Or it - /// could be vlan47 to refer to a VLAN interface. - pub interface_name: String, - - /// The address of the host to peer with. - pub addr: IpAddr, - - /// How long to hold peer connections between keppalives (seconds). - pub hold_time: u32, - - /// How long to hold a peer in idle before attempting a new session - /// (seconds). - pub idle_hold_time: u32, - - /// How long to delay sending an open request after establishing a TCP - /// session (seconds). - pub delay_open: u32, - - /// How long to to wait between TCP connection retries (seconds). - pub connect_retry: u32, - - /// How often to send keepalive requests (seconds). - pub keepalive: u32, -} - /// Parameters for creating a named set of BGP announcements. #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct BgpAnnounceSetCreate { @@ -1805,6 +1685,14 @@ pub struct BgpConfigCreate { /// Optional virtual routing and forwarding identifier for this BGP /// configuration. pub vrf: Option, + + // Dynamic BGP policy is not yet available so we skip adding it to the API + /// A shaper program to apply to outgoing open and update messages. + #[serde(skip)] + pub shaper: Option, + /// A checker program to apply to incoming open and update messages. + #[serde(skip)] + pub checker: Option, } /// Select a BGP status information by BGP config id. @@ -2054,3 +1942,19 @@ pub struct ProbeListSelector { /// A name or id to use when selecting a probe. pub name_or_id: Option, } + +/// A timeseries query string, written in the Oximeter query language. +#[derive(Deserialize, JsonSchema, Serialize)] +pub struct TimeseriesQuery { + /// A timeseries query string, written in the Oximeter query language. + pub query: String, +} + +// Allowed source IPs + +/// Parameters for updating allowed source IPs +#[derive(Clone, Debug, Deserialize, JsonSchema, Serialize)] +pub struct AllowListUpdate { + /// The new list of allowed source IPs. + pub allowed_ips: AllowedSourceIps, +} diff --git a/nexus/types/src/external_api/shared.rs b/nexus/types/src/external_api/shared.rs index 2f65f09c29..96843ba6a4 100644 --- a/nexus/types/src/external_api/shared.rs +++ b/nexus/types/src/external_api/shared.rs @@ -335,6 +335,39 @@ pub struct BfdStatus { pub mode: BfdMode, } +/// Opaque object representing link state. The contents of this object are not +/// yet stable. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct SwitchLinkState { + link: serde_json::Value, + monitors: Option, +} + +impl SwitchLinkState { + pub fn new( + link: serde_json::Value, + monitors: Option, + ) -> Self { + Self { link, monitors } + } +} + +impl JsonSchema for SwitchLinkState { + fn json_schema( + gen: &mut schemars::gen::SchemaGenerator, + ) -> schemars::schema::Schema { + let obj = schemars::schema::Schema::Object( + schemars::schema::SchemaObject::default(), + ); + gen.definitions_mut().insert(Self::schema_name(), obj.clone()); + obj + } + + fn schema_name() -> String { + "SwitchLinkState".to_owned() + } +} + #[cfg(test)] mod test { use super::Policy; diff --git a/nexus/types/src/external_api/views.rs b/nexus/types/src/external_api/views.rs index fcea302f72..2fa94b0e80 100644 --- a/nexus/types/src/external_api/views.rs +++ b/nexus/types/src/external_api/views.rs @@ -12,9 +12,11 @@ use api_identity::ObjectIdentity; use chrono::DateTime; use chrono::Utc; use omicron_common::api::external::{ - ByteCount, Digest, Error, IdentityMetadata, InstanceState, Ipv4Net, - Ipv6Net, Name, ObjectIdentity, RoleName, SimpleIdentity, + AllowedSourceIps as ExternalAllowedSourceIps, ByteCount, Digest, Error, + IdentityMetadata, InstanceState, Name, ObjectIdentity, RoleName, + SimpleIdentity, }; +use oxnet::{Ipv4Net, Ipv6Net}; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use std::collections::BTreeMap; @@ -449,6 +451,8 @@ pub struct FloatingIp { pub identity: IdentityMetadata, /// The IP address held by this resource. pub ip: IpAddr, + /// The ID of the IP pool this resource belongs to. + pub ip_pool_id: Uuid, /// The project this resource exists within. pub project_id: Uuid, /// The ID of the instance that this Floating IP is attached to, @@ -592,31 +596,6 @@ impl SledPolicy { Self::InService { provision_policy: SledProvisionPolicy::Provisionable } } - /// Returns the list of all in-service policies. - pub fn all_in_service() -> &'static [Self] { - &[ - Self::InService { - provision_policy: SledProvisionPolicy::Provisionable, - }, - Self::InService { - provision_policy: SledProvisionPolicy::NonProvisionable, - }, - ] - } - - /// Returns true if the sled can have services provisioned on it. - pub fn is_provisionable(&self) -> bool { - match self { - Self::InService { - provision_policy: SledProvisionPolicy::Provisionable, - } => true, - Self::InService { - provision_policy: SledProvisionPolicy::NonProvisionable, - } - | Self::Expunged => false, - } - } - /// Returns the provision policy, if the sled is in service. pub fn provision_policy(&self) -> Option { match self { @@ -625,9 +604,13 @@ impl SledPolicy { } } - /// Returns true if the sled can be decommissioned in this state. + /// Returns true if the sled can be decommissioned with this policy + /// + /// This is a method here, rather than being a variant on `SledFilter`, + /// because the "decommissionable" condition only has meaning for policies, + /// not states. pub fn is_decommissionable(&self) -> bool { - // This should be kept in sync with decommissionable_states below. + // This should be kept in sync with `all_decommissionable` below. match self { Self::InService { .. } => false, Self::Expunged => true, @@ -636,6 +619,10 @@ impl SledPolicy { /// Returns all the possible policies a sled can have for it to be /// decommissioned. + /// + /// This is a method here, rather than being a variant on `SledFilter`, + /// because the "decommissionable" condition only has meaning for policies, + /// not states. pub fn all_decommissionable() -> &'static [Self] { &[Self::Expunged] } @@ -649,7 +636,7 @@ impl fmt::Display for SledPolicy { } => write!(f, "in service"), SledPolicy::InService { provision_policy: SledProvisionPolicy::NonProvisionable, - } => write!(f, "in service (not provisionable)"), + } => write!(f, "not provisionable"), SledPolicy::Expunged => write!(f, "expunged"), } } @@ -680,21 +667,6 @@ pub enum SledState { Decommissioned, } -impl SledState { - /// Returns true if the sled state makes it eligible for services that - /// aren't required to be on every sled. - /// - /// For example, NTP must exist on every sled, but Nexus does not have to. - pub fn is_eligible_for_discretionary_services(&self) -> bool { - // (Explicit match, so that this fails to compile if a new state is - // added.) - match self { - SledState::Active => true, - SledState::Decommissioned => false, - } - } -} - impl fmt::Display for SledState { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { @@ -742,6 +714,11 @@ pub struct PhysicalDisk { #[serde(flatten)] pub identity: AssetIdentityMetadata, + /// The operator-defined policy for a physical disk. + pub policy: PhysicalDiskPolicy, + /// The current state Nexus believes the disk to be in. + pub state: PhysicalDiskState, + /// The sled to which this disk is attached, if any. pub sled_id: Option, @@ -752,6 +729,97 @@ pub struct PhysicalDisk { pub form_factor: PhysicalDiskKind, } +/// The operator-defined policy of a physical disk. +#[derive( + Copy, Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, +)] +#[serde(rename_all = "snake_case", tag = "kind")] +pub enum PhysicalDiskPolicy { + /// The operator has indicated that the disk is in-service. + InService, + + /// The operator has indicated that the disk has been permanently removed + /// from service. + /// + /// This is a terminal state: once a particular disk ID is expunged, it + /// will never return to service. (The actual hardware may be reused, but + /// it will be treated as a brand-new disk.) + /// + /// An expunged disk is always non-provisionable. + Expunged, + // NOTE: if you add a new value here, be sure to add it to + // the `IntoEnumIterator` impl below! +} + +// Can't automatically derive strum::EnumIter because that doesn't provide a +// way to iterate over nested enums. +impl IntoEnumIterator for PhysicalDiskPolicy { + type Iterator = std::array::IntoIter; + + fn iter() -> Self::Iterator { + [Self::InService, Self::Expunged].into_iter() + } +} + +impl PhysicalDiskPolicy { + /// Creates a new `PhysicalDiskPolicy` that is in-service. + pub fn in_service() -> Self { + Self::InService + } + + /// Returns true if the disk can be decommissioned in this state. + pub fn is_decommissionable(&self) -> bool { + // This should be kept in sync with decommissionable_states below. + match self { + Self::InService => false, + Self::Expunged => true, + } + } +} + +impl fmt::Display for PhysicalDiskPolicy { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + PhysicalDiskPolicy::InService => write!(f, "in service"), + PhysicalDiskPolicy::Expunged => write!(f, "expunged"), + } + } +} + +/// The current state of the disk, as determined by Nexus. +#[derive( + Copy, + Clone, + Debug, + Deserialize, + Serialize, + JsonSchema, + PartialEq, + Eq, + EnumIter, +)] +#[serde(rename_all = "snake_case")] +pub enum PhysicalDiskState { + /// The disk is currently active, and has resources allocated on it. + Active, + + /// The disk has been permanently removed from service. + /// + /// This is a terminal state: once a particular disk ID is decommissioned, + /// it will never return to service. (The actual hardware may be reused, + /// but it will be treated as a brand-new disk.) + Decommissioned, +} + +impl fmt::Display for PhysicalDiskState { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + PhysicalDiskState::Active => write!(f, "active"), + PhysicalDiskState::Decommissioned => write!(f, "decommissioned"), + } + } +} + // SILO USERS /// View of a User @@ -884,3 +952,16 @@ pub struct Ping { /// returns anything at all. pub status: PingStatus, } + +// ALLOWED SOURCE IPS + +/// Allowlist of IPs or subnets that can make requests to user-facing services. +#[derive(Clone, Debug, Deserialize, JsonSchema, Serialize)] +pub struct AllowList { + /// Time the list was created. + pub time_created: DateTime, + /// Time the list was last modified. + pub time_modified: DateTime, + /// The allowlist of IPs or subnets. + pub allowed_ips: ExternalAllowedSourceIps, +} diff --git a/nexus/types/src/internal_api/params.rs b/nexus/types/src/internal_api/params.rs index 9f80d313fd..143ca1be8b 100644 --- a/nexus/types/src/internal_api/params.rs +++ b/nexus/types/src/internal_api/params.rs @@ -13,6 +13,7 @@ use omicron_common::api::external::ByteCount; use omicron_common::api::external::Generation; use omicron_common::api::external::MacAddr; use omicron_common::api::external::Name; +use omicron_common::api::internal::shared::AllowedSourceIps; use omicron_common::api::internal::shared::ExternalPortDiscovery; use omicron_common::api::internal::shared::RackNetworkConfig; use omicron_common::api::internal::shared::SourceNatConfig; @@ -82,43 +83,25 @@ pub struct SwitchPutResponse {} #[derive(Clone, Debug, Serialize, Deserialize, JsonSchema)] pub struct PhysicalDiskPutRequest { - pub vendor: String, - pub serial: String, - pub model: String, - - pub variant: PhysicalDiskKind, - pub sled_id: Uuid, -} - -#[derive(Serialize, Deserialize, JsonSchema)] -pub struct PhysicalDiskPutResponse {} + pub id: Uuid, -#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema)] -pub struct PhysicalDiskDeleteRequest { pub vendor: String, pub serial: String, pub model: String, + pub variant: PhysicalDiskKind, pub sled_id: Uuid, } -/// Sent by a sled agent on startup to Nexus to request further instruction +/// Identifies information about a Zpool that should be part of the control +/// plane. #[derive(Clone, Debug, Serialize, Deserialize, JsonSchema)] pub struct ZpoolPutRequest { - /// Total size of the pool. - pub size: ByteCount, - - // Information to identify the disk to which this zpool belongs - pub disk_vendor: String, - pub disk_serial: String, - pub disk_model: String, - // TODO: We could include any other data from `ZpoolInfo` we want, - // such as "allocated/free" space and pool health? + pub id: Uuid, + pub sled_id: Uuid, + pub physical_disk_id: Uuid, } -#[derive(Serialize, Deserialize, JsonSchema)] -pub struct ZpoolPutResponse {} - /// Describes the purpose of the dataset. #[derive( Debug, Serialize, Deserialize, JsonSchema, Clone, Copy, PartialEq, Eq, @@ -211,20 +194,6 @@ impl fmt::Display for ServiceKind { } } -/// Describes a service on a sled -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] -pub struct ServicePutRequest { - pub service_id: Uuid, - pub sled_id: Uuid, - pub zone_id: Option, - - /// Address on which a service is responding to requests. - pub address: SocketAddrV6, - - /// Type of service being inserted. - pub kind: ServiceKind, -} - #[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] pub struct DatasetCreateRequest { pub zpool_id: Uuid, @@ -251,8 +220,13 @@ impl std::fmt::Debug for Certificate { pub struct RackInitializationRequest { /// Blueprint describing services initialized by RSS. pub blueprint: Blueprint, - /// Services on the rack which have been created by RSS. - pub services: Vec, + + /// "Managed" physical disks owned by the control plane + pub physical_disks: Vec, + + /// Zpools created within the physical disks created by the control plane. + pub zpools: Vec, + /// Datasets on the rack which have been provisioned by RSS. pub datasets: Vec, /// Ranges of the service IP pool which may be used for internal services, @@ -270,6 +244,8 @@ pub struct RackInitializationRequest { pub external_port_count: ExternalPortDiscovery, /// Initial rack network configuration pub rack_network_config: RackNetworkConfig, + /// IPs or subnets allowed to make requests to user-facing services + pub allowed_source_ips: AllowedSourceIps, } pub type DnsConfigParams = dns_service_client::types::DnsConfigParams; diff --git a/nexus/types/src/internal_api/views.rs b/nexus/types/src/internal_api/views.rs index b7a097431b..fde2d07072 100644 --- a/nexus/types/src/internal_api/views.rs +++ b/nexus/types/src/internal_api/views.rs @@ -141,7 +141,7 @@ impl From for SagaState { }, .. } => SagaState::Failed { - error_node_name: error_node_name, + error_node_name, error_info: SagaErrorInfo::from(error_source), }, } diff --git a/nexus/types/src/inventory.rs b/nexus/types/src/inventory.rs index 40da26047b..6acbcaca6a 100644 --- a/nexus/types/src/inventory.rs +++ b/nexus/types/src/inventory.rs @@ -21,6 +21,10 @@ use omicron_common::api::external::ByteCount; pub use omicron_common::api::internal::shared::NetworkInterface; pub use omicron_common::api::internal::shared::NetworkInterfaceKind; pub use omicron_common::api::internal::shared::SourceNatConfig; +pub use omicron_common::zpool_name::ZpoolName; +use omicron_uuid_kinds::CollectionUuid; +use omicron_uuid_kinds::SledUuid; +use omicron_uuid_kinds::ZpoolUuid; use serde::{Deserialize, Serialize}; use serde_with::serde_as; pub use sled_agent_client::types::OmicronZoneConfig; @@ -28,13 +32,11 @@ pub use sled_agent_client::types::OmicronZoneDataset; pub use sled_agent_client::types::OmicronZoneType; pub use sled_agent_client::types::OmicronZonesConfig; pub use sled_agent_client::types::SledRole; -pub use sled_agent_client::types::ZpoolName; use std::collections::BTreeMap; use std::collections::BTreeSet; use std::net::SocketAddrV6; use std::sync::Arc; use strum::EnumIter; -use uuid::Uuid; /// Results of collecting hardware/software inventory from various Omicron /// components @@ -55,7 +57,7 @@ use uuid::Uuid; #[derive(Debug, Eq, PartialEq, Clone, Serialize, Deserialize)] pub struct Collection { /// unique identifier for this collection - pub id: Uuid, + pub id: CollectionUuid, /// errors encountered during collection pub errors: Vec, /// time the collection started @@ -109,10 +111,10 @@ pub struct Collection { BTreeMap, RotPageFound>>, /// Sled Agent information, by *sled* id - pub sled_agents: BTreeMap, + pub sled_agents: BTreeMap, /// Omicron zones found, by *sled* id - pub omicron_zones: BTreeMap, + pub omicron_zones: BTreeMap, } impl Collection { @@ -144,7 +146,7 @@ impl Collection { } /// Iterate over the sled ids of sleds identified as Scrimlets - pub fn scrimlets(&self) -> impl Iterator + '_ { + pub fn scrimlets(&self) -> impl Iterator + '_ { self.sled_agents .iter() .filter(|(_, inventory)| inventory.sled_role == SledRole::Scrimlet) @@ -360,7 +362,7 @@ pub struct PhysicalDisk { impl From for PhysicalDisk { fn from(disk: sled_agent_client::types::InventoryDisk) -> PhysicalDisk { PhysicalDisk { - identity: disk.identity.into(), + identity: disk.identity, variant: disk.variant.into(), slot: disk.slot, } @@ -371,7 +373,7 @@ impl From for PhysicalDisk { #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] pub struct Zpool { pub time_collected: DateTime, - pub id: Uuid, + pub id: ZpoolUuid, pub total_size: ByteCount, } @@ -394,7 +396,7 @@ impl Zpool { pub struct SledAgent { pub time_collected: DateTime, pub source: String, - pub sled_id: Uuid, + pub sled_id: SledUuid, pub baseboard_id: Option>, pub sled_agent_address: SocketAddrV6, pub sled_role: SledRole, @@ -409,6 +411,6 @@ pub struct SledAgent { pub struct OmicronZonesFound { pub time_collected: DateTime, pub source: String, - pub sled_id: Uuid, + pub sled_id: SledUuid, pub zones: OmicronZonesConfig, } diff --git a/openapi/bootstrap-agent.json b/openapi/bootstrap-agent.json index 688e444053..b09f34ea9e 100644 --- a/openapi/bootstrap-agent.json +++ b/openapi/bootstrap-agent.json @@ -161,6 +161,48 @@ }, "components": { "schemas": { + "AllowedSourceIps": { + "description": "Description of source IPs allowed to reach rack services.", + "oneOf": [ + { + "description": "Allow traffic from any external IP address.", + "type": "object", + "properties": { + "allow": { + "type": "string", + "enum": [ + "any" + ] + } + }, + "required": [ + "allow" + ] + }, + { + "description": "Restrict access to a specific set of source IP addresses or subnets.\n\nAll others are prevented from reaching rack services.", + "type": "object", + "properties": { + "allow": { + "type": "string", + "enum": [ + "list" + ] + }, + "ips": { + "type": "array", + "items": { + "$ref": "#/components/schemas/IpNet" + } + } + }, + "required": [ + "allow", + "ips" + ] + } + ] + }, "Baseboard": { "description": "Describes properties that should uniquely identify a Gimlet.", "oneOf": [ @@ -283,12 +325,22 @@ "format": "uint32", "minimum": 0 }, + "checker": { + "nullable": true, + "description": "Checker to apply to incoming messages.", + "type": "string" + }, "originate": { "description": "The set of prefixes for the BGP router to originate.", "type": "array", "items": { - "$ref": "#/components/schemas/Ipv4Network" + "$ref": "#/components/schemas/Ipv4Net" } + }, + "shaper": { + "nullable": true, + "description": "Shaper to apply to outgoing messages.", + "type": "string" } }, "required": [ @@ -304,12 +356,44 @@ "type": "string", "format": "ipv4" }, + "allowed_export": { + "description": "Define export policy for a peer.", + "default": { + "type": "no_filtering" + }, + "allOf": [ + { + "$ref": "#/components/schemas/ImportExportPolicy" + } + ] + }, + "allowed_import": { + "description": "Define import policy for a peer.", + "default": { + "type": "no_filtering" + }, + "allOf": [ + { + "$ref": "#/components/schemas/ImportExportPolicy" + } + ] + }, "asn": { - "description": "The autonomous sysetm number of the router the peer belongs to.", + "description": "The autonomous system number of the router the peer belongs to.", "type": "integer", "format": "uint32", "minimum": 0 }, + "communities": { + "description": "Include the provided communities in updates sent to the peer.", + "default": [], + "type": "array", + "items": { + "type": "integer", + "format": "uint32", + "minimum": 0 + } + }, "connect_retry": { "nullable": true, "description": "The interval in seconds between peer connection retry attempts.", @@ -324,6 +408,11 @@ "format": "uint64", "minimum": 0 }, + "enforce_first_as": { + "description": "Enforce that the first AS in paths received from this peer is the peer's AS.", + "default": false, + "type": "boolean" + }, "hold_time": { "nullable": true, "description": "How long to keep a session alive without a keepalive in seconds. Defaults to 6.", @@ -345,9 +434,49 @@ "format": "uint64", "minimum": 0 }, + "local_pref": { + "nullable": true, + "description": "Apply a local preference to routes received from this peer.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "md5_auth_key": { + "nullable": true, + "description": "Use the given key for TCP-MD5 authentication with the peer.", + "type": "string" + }, + "min_ttl": { + "nullable": true, + "description": "Require messages from a peer have a minimum IP time to live field.", + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "multi_exit_discriminator": { + "nullable": true, + "description": "Apply the provided multi-exit discriminator (MED) updates sent to the peer.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, "port": { "description": "Switch port the peer is reachable on.", "type": "string" + }, + "remote_asn": { + "nullable": true, + "description": "Require that a peer has a specified ASN.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "vlan_id": { + "nullable": true, + "description": "Associate a VLAN ID with a BGP peer session.", + "type": "integer", + "format": "uint16", + "minimum": 0 } }, "required": [ @@ -449,13 +578,59 @@ "request_id" ] }, - "IpNetwork": { + "ImportExportPolicy": { + "description": "Define policy relating to the import and export of prefixes from a BGP peer.", + "oneOf": [ + { + "description": "Do not perform any filtering.", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "no_filtering" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "allow" + ] + }, + "value": { + "type": "array", + "items": { + "$ref": "#/components/schemas/IpNet" + } + } + }, + "required": [ + "type", + "value" + ] + } + ] + }, + "IpNet": { + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::IpNet", + "version": "0.1.0" + }, "oneOf": [ { "title": "v4", "allOf": [ { - "$ref": "#/components/schemas/Ipv4Network" + "$ref": "#/components/schemas/Ipv4Net" } ] }, @@ -463,7 +638,7 @@ "title": "v6", "allOf": [ { - "$ref": "#/components/schemas/Ipv6Network" + "$ref": "#/components/schemas/Ipv6Net" } ] } @@ -489,9 +664,17 @@ } ] }, - "Ipv4Network": { + "Ipv4Net": { + "example": "192.168.1.0/24", + "title": "An IPv4 subnet", + "description": "An IPv4 subnet, including prefix and prefix length", + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::Ipv4Net", + "version": "0.1.0" + }, "type": "string", - "pattern": "^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\/(3[0-2]|[0-2]?[0-9])$" + "pattern": "^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|1[0-9]|2[0-9]|3[0-2])$" }, "Ipv4Range": { "description": "A non-decreasing IPv4 address range, inclusive of both ends.\n\nThe first address must be less than or equal to the last address.", @@ -511,9 +694,17 @@ "last" ] }, - "Ipv6Network": { + "Ipv6Net": { + "example": "fd12:3456::/64", + "title": "An IPv6 subnet", + "description": "An IPv6 subnet, including prefix and subnet mask", + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::Ipv6Net", + "version": "0.1.0" + }, "type": "string", - "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\")[/](12[0-8]|1[0-1][0-9]|[0-9]?[0-9])$" + "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))\\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])$" }, "Ipv6Range": { "description": "A non-decreasing IPv6 address range, inclusive of both ends.\n\nThe first address must be less than or equal to the last address.", @@ -553,7 +744,7 @@ "description": "This port's addresses.", "type": "array", "items": { - "$ref": "#/components/schemas/IpNetwork" + "$ref": "#/components/schemas/IpNet" } }, "autoneg": { @@ -646,6 +837,17 @@ "description": "Configuration for the \"rack setup service\".\n\nThe Rack Setup Service should be responsible for one-time setup actions, such as CockroachDB placement and initialization. Without operator intervention, however, these actions need a way to be automated in our deployment.", "type": "object", "properties": { + "allowed_source_ips": { + "description": "IPs or subnets allowed to make requests to user-facing services", + "default": { + "allow": "any" + }, + "allOf": [ + { + "$ref": "#/components/schemas/AllowedSourceIps" + } + ] + }, "bootstrap_discovery": { "description": "Describes how bootstrap addresses should be collected during RSS.", "allOf": [ @@ -769,7 +971,7 @@ } }, "rack_subnet": { - "$ref": "#/components/schemas/Ipv6Network" + "$ref": "#/components/schemas/Ipv6Net" } }, "required": [ @@ -978,7 +1180,7 @@ "description": "The destination of the route.", "allOf": [ { - "$ref": "#/components/schemas/IpNetwork" + "$ref": "#/components/schemas/IpNet" } ] }, @@ -986,6 +1188,13 @@ "description": "The nexthop/gateway address.", "type": "string", "format": "ip" + }, + "vlan_id": { + "nullable": true, + "description": "The VLAN id associated with this route.", + "type": "integer", + "format": "uint16", + "minimum": 0 } }, "required": [ @@ -1034,4 +1243,4 @@ } } } -} \ No newline at end of file +} diff --git a/openapi/dns-server.json b/openapi/dns-server.json index 41b351d4c1..1b02199b76 100644 --- a/openapi/dns-server.json +++ b/openapi/dns-server.json @@ -253,4 +253,4 @@ } } } -} \ No newline at end of file +} diff --git a/openapi/gateway.json b/openapi/gateway.json index 5961b670ed..c5d0eab0b1 100644 --- a/openapi/gateway.json +++ b/openapi/gateway.json @@ -1300,6 +1300,62 @@ } } }, + "/sp/{type}/{slot}/sensor/{sensor_id}/value": { + "get": { + "summary": "Read the current value of a sensor by ID", + "description": "Sensor IDs come from the host topo tree.", + "operationId": "sp_sensor_read_value", + "parameters": [ + { + "in": "path", + "name": "sensor_id", + "description": "ID for the sensor on the SP.", + "required": true, + "schema": { + "type": "integer", + "format": "uint32", + "minimum": 0 + } + }, + { + "in": "path", + "name": "slot", + "required": true, + "schema": { + "type": "integer", + "format": "uint32", + "minimum": 0 + } + }, + { + "in": "path", + "name": "type", + "required": true, + "schema": { + "$ref": "#/components/schemas/SpType" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SpSensorReading" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, "/sp/{type}/{slot}/startup-options": { "get": { "summary": "Get host startup options for a sled", @@ -2788,6 +2844,124 @@ } ] }, + "SpSensorReading": { + "description": "Result of reading an SP sensor.", + "type": "object", + "properties": { + "result": { + "description": "Value (or error) from the sensor.", + "allOf": [ + { + "$ref": "#/components/schemas/SpSensorReadingResult" + } + ] + }, + "timestamp": { + "description": "SP-centric timestamp of when `result` was recorded from this sensor.\n\nCurrently this value represents \"milliseconds since the last SP boot\" and is primarily useful as a delta between sensors on this SP (assuming no reboot in between). The meaning could change with future SP releases.", + "type": "integer", + "format": "uint64", + "minimum": 0 + } + }, + "required": [ + "result", + "timestamp" + ] + }, + "SpSensorReadingResult": { + "description": "Single reading (or error) from an SP sensor.", + "oneOf": [ + { + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "success" + ] + }, + "value": { + "type": "number", + "format": "float" + } + }, + "required": [ + "kind", + "value" + ] + }, + { + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "device_off" + ] + } + }, + "required": [ + "kind" + ] + }, + { + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "device_error" + ] + } + }, + "required": [ + "kind" + ] + }, + { + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "device_not_present" + ] + } + }, + "required": [ + "kind" + ] + }, + { + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "device_unavailable" + ] + } + }, + "required": [ + "kind" + ] + }, + { + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "device_timeout" + ] + } + }, + "required": [ + "kind" + ] + } + ] + }, "SpState": { "type": "object", "properties": { @@ -3111,4 +3285,4 @@ } } } -} \ No newline at end of file +} diff --git a/openapi/installinator-artifactd.json b/openapi/installinator-artifactd.json index 136e60a8c4..61f555e10d 100644 --- a/openapi/installinator-artifactd.json +++ b/openapi/installinator-artifactd.json @@ -2325,4 +2325,4 @@ } } } -} \ No newline at end of file +} diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index cba8063b7e..828378eaba 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -90,7 +90,35 @@ } } }, - "/bgtasks/{bgtask_name}": { + "/bgtasks/activate": { + "post": { + "summary": "Activates one or more background tasks, causing them to be run immediately", + "description": "if idle, or scheduled to run again as soon as possible if already running.", + "operationId": "bgtask_activate", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BackgroundTasksActivateRequest" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/bgtasks/view/{bgtask_name}": { "get": { "summary": "Fetch status of one background task", "description": "This is exposed for support and debugging.", @@ -455,30 +483,24 @@ } } }, - "/deployment/blueprints/generate-from-collection": { + "/deployment/blueprints/import": { "post": { - "summary": "Generates a new blueprint matching the specified inventory collection", - "operationId": "blueprint_generate_from_collection", + "summary": "Imports a client-provided blueprint", + "description": "This is intended for development and support, not end users or operators.", + "operationId": "blueprint_import", "requestBody": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/CollectionId" + "$ref": "#/components/schemas/Blueprint" } } }, "required": true }, "responses": { - "200": { - "description": "successful operation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Blueprint" - } - } - } + "204": { + "description": "resource updated" }, "4XX": { "$ref": "#/components/responses/Error" @@ -709,19 +731,74 @@ } } }, - "/metrics/collect/{producer_id}": { + "/metrics/collectors": { + "post": { + "summary": "Accept a notification of a new oximeter collection server.", + "operationId": "cpapi_collectors_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OximeterInfo" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/metrics/collectors/{collector_id}/producers": { "get": { - "summary": "Endpoint for oximeter to collect nexus server metrics.", - "operationId": "cpapi_metrics_collect", + "summary": "List all metric producers assigned to an oximeter collector.", + "operationId": "cpapi_assigned_producers_list", "parameters": [ { "in": "path", - "name": "producer_id", + "name": "collector_id", + "description": "The ID of the oximeter collector.", "required": true, "schema": { "type": "string", "format": "uuid" } + }, + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/IdSortMode" + } } ], "responses": { @@ -730,11 +807,7 @@ "content": { "application/json": { "schema": { - "title": "Array_of_ProducerResultsItem", - "type": "array", - "items": { - "$ref": "#/components/schemas/ProducerResultsItem" - } + "$ref": "#/components/schemas/ProducerEndpointResultsPage" } } } @@ -745,33 +818,9 @@ "5XX": { "$ref": "#/components/responses/Error" } - } - } - }, - "/metrics/collectors": { - "post": { - "summary": "Accept a notification of a new oximeter collection server.", - "operationId": "cpapi_collectors_post", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/OximeterInfo" - } - } - }, - "required": true }, - "responses": { - "204": { - "description": "resource updated" - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } + "x-dropshot-pagination": { + "required": [] } } }, @@ -790,8 +839,15 @@ "required": true }, "responses": { - "204": { - "description": "resource updated" + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ProducerRegistrationResponse" + } + } + } }, "4XX": { "$ref": "#/components/responses/Error" @@ -853,65 +909,6 @@ } } }, - "/physical-disk": { - "put": { - "summary": "Report that a physical disk for the specified sled has come online.", - "operationId": "physical_disk_put", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/PhysicalDiskPutRequest" - } - } - }, - "required": true - }, - "responses": { - "200": { - "description": "successful operation", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/PhysicalDiskPutResponse" - } - } - } - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - }, - "delete": { - "summary": "Report that a physical disk for the specified sled has gone offline.", - "operationId": "physical_disk_delete", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/PhysicalDiskDeleteRequest" - } - } - }, - "required": true - }, - "responses": { - "204": { - "description": "successful deletion" - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - } - }, "/probes/{sled}": { "get": { "summary": "Get all the probes associated with a given sled.", @@ -1211,47 +1208,27 @@ } } }, - "/sled-agents/{sled_id}/zpools/{zpool_id}": { - "put": { - "summary": "Report that a pool for a specified sled has come online.", - "operationId": "zpool_put", - "parameters": [ - { - "in": "path", - "name": "sled_id", - "required": true, - "schema": { - "type": "string", - "format": "uuid" - } - }, - { - "in": "path", - "name": "zpool_id", - "required": true, - "schema": { - "type": "string", - "format": "uuid" - } - } - ], + "/sleds/add": { + "post": { + "summary": "Add sled to initialized rack", + "operationId": "sled_add", "requestBody": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/ZpoolPutRequest" + "$ref": "#/components/schemas/UninitializedSledId" } } }, "required": true }, "responses": { - "200": { - "description": "successful operation", + "201": { + "description": "successful creation", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/ZpoolPutResponse" + "$ref": "#/components/schemas/SledId" } } } @@ -1265,33 +1242,6 @@ } } }, - "/sleds/add": { - "post": { - "summary": "Add sled to initialized rack", - "operationId": "sled_add", - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/UninitializedSledId" - } - } - }, - "required": true - }, - "responses": { - "204": { - "description": "resource updated" - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } - } - }, "/sleds/expunge": { "post": { "summary": "Mark a sled as expunged", @@ -1436,23 +1386,65 @@ "dependency" ] }, - "BackgroundTask": { - "description": "Background tasks\n\nThese are currently only intended for observability by developers. We will eventually want to flesh this out into something more observable for end users.", - "type": "object", - "properties": { - "current": { - "description": "Describes the current task status", - "allOf": [ - { - "$ref": "#/components/schemas/CurrentStatus" + "AllowedSourceIps": { + "description": "Description of source IPs allowed to reach rack services.", + "oneOf": [ + { + "description": "Allow traffic from any external IP address.", + "type": "object", + "properties": { + "allow": { + "type": "string", + "enum": [ + "any" + ] } + }, + "required": [ + "allow" ] }, - "description": { - "description": "brief summary (for developers) of what this task does", - "type": "string" - }, - "last": { + { + "description": "Restrict access to a specific set of source IP addresses or subnets.\n\nAll others are prevented from reaching rack services.", + "type": "object", + "properties": { + "allow": { + "type": "string", + "enum": [ + "list" + ] + }, + "ips": { + "type": "array", + "items": { + "$ref": "#/components/schemas/IpNet" + } + } + }, + "required": [ + "allow", + "ips" + ] + } + ] + }, + "BackgroundTask": { + "description": "Background tasks\n\nThese are currently only intended for observability by developers. We will eventually want to flesh this out into something more observable for end users.", + "type": "object", + "properties": { + "current": { + "description": "Describes the current task status", + "allOf": [ + { + "$ref": "#/components/schemas/CurrentStatus" + } + ] + }, + "description": { + "description": "brief summary (for developers) of what this task does", + "type": "string" + }, + "last": { "description": "Describes the last completed activation", "allOf": [ { @@ -1481,6 +1473,22 @@ "period" ] }, + "BackgroundTasksActivateRequest": { + "description": "Query parameters for Background Task activation requests.", + "type": "object", + "properties": { + "bgtask_names": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + } + }, + "required": [ + "bgtask_names" + ] + }, "Baseboard": { "description": "Properties that uniquely identify an Oxide hardware component", "type": "object", @@ -1556,12 +1564,22 @@ "format": "uint32", "minimum": 0 }, + "checker": { + "nullable": true, + "description": "Checker to apply to incoming messages.", + "type": "string" + }, "originate": { "description": "The set of prefixes for the BGP router to originate.", "type": "array", "items": { - "$ref": "#/components/schemas/Ipv4Network" + "$ref": "#/components/schemas/Ipv4Net" } + }, + "shaper": { + "nullable": true, + "description": "Shaper to apply to outgoing messages.", + "type": "string" } }, "required": [ @@ -1577,12 +1595,44 @@ "type": "string", "format": "ipv4" }, + "allowed_export": { + "description": "Define export policy for a peer.", + "default": { + "type": "no_filtering" + }, + "allOf": [ + { + "$ref": "#/components/schemas/ImportExportPolicy" + } + ] + }, + "allowed_import": { + "description": "Define import policy for a peer.", + "default": { + "type": "no_filtering" + }, + "allOf": [ + { + "$ref": "#/components/schemas/ImportExportPolicy" + } + ] + }, "asn": { - "description": "The autonomous sysetm number of the router the peer belongs to.", + "description": "The autonomous system number of the router the peer belongs to.", "type": "integer", "format": "uint32", "minimum": 0 }, + "communities": { + "description": "Include the provided communities in updates sent to the peer.", + "default": [], + "type": "array", + "items": { + "type": "integer", + "format": "uint32", + "minimum": 0 + } + }, "connect_retry": { "nullable": true, "description": "The interval in seconds between peer connection retry attempts.", @@ -1597,6 +1647,11 @@ "format": "uint64", "minimum": 0 }, + "enforce_first_as": { + "description": "Enforce that the first AS in paths received from this peer is the peer's AS.", + "default": false, + "type": "boolean" + }, "hold_time": { "nullable": true, "description": "How long to keep a session alive without a keepalive in seconds. Defaults to 6.", @@ -1618,9 +1673,49 @@ "format": "uint64", "minimum": 0 }, + "local_pref": { + "nullable": true, + "description": "Apply a local preference to routes received from this peer.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "md5_auth_key": { + "nullable": true, + "description": "Use the given key for TCP-MD5 authentication with the peer.", + "type": "string" + }, + "min_ttl": { + "nullable": true, + "description": "Require messages from a peer have a minimum IP time to live field.", + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "multi_exit_discriminator": { + "nullable": true, + "description": "Apply the provided multi-exit discriminator (MED) updates sent to the peer.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, "port": { "description": "Switch port the peer is reachable on.", "type": "string" + }, + "remote_asn": { + "nullable": true, + "description": "Require that a peer has a specified ASN.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "vlan_id": { + "nullable": true, + "description": "Associate a VLAN ID with a BGP peer session.", + "type": "integer", + "format": "uint16", + "minimum": 0 } }, "required": [ @@ -1629,4313 +1724,2038 @@ "port" ] }, - "BinRangedouble": { - "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", - "oneOf": [ - { - "description": "A range unbounded below and exclusively above, `..end`.", + "Blueprint": { + "description": "Describes a complete set of software and configuration for the system", + "type": "object", + "properties": { + "blueprint_disks": { + "description": "A map of sled id -> disks in use on each sled.", "type": "object", - "properties": { - "end": { - "type": "number", - "format": "double" - }, - "type": { - "type": "string", - "enum": [ - "range_to" - ] + "additionalProperties": { + "$ref": "#/components/schemas/OmicronPhysicalDisksConfig" + } + }, + "blueprint_zones": { + "description": "A map of sled id -> zones deployed on each sled, along with the [`BlueprintZoneDisposition`] for each zone.\n\nUnlike `sled_state`, this map may contain entries for sleds that are no longer a part of the control plane cluster (e.g., sleds that have been decommissioned, but still have expunged zones where cleanup has not yet completed).", + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/BlueprintZonesConfig" + } + }, + "cockroachdb_fingerprint": { + "description": "CockroachDB state fingerprint when this blueprint was created", + "type": "string" + }, + "cockroachdb_setting_preserve_downgrade": { + "description": "Whether to set `cluster.preserve_downgrade_option` and what to set it to", + "allOf": [ + { + "$ref": "#/components/schemas/CockroachDbPreserveDowngrade" } - }, - "required": [ - "end", - "type" ] }, - { - "description": "A range bounded inclusively below and exclusively above, `start..end`.", - "type": "object", - "properties": { - "end": { - "type": "number", - "format": "double" - }, - "start": { - "type": "number", - "format": "double" - }, - "type": { - "type": "string", - "enum": [ - "range" - ] + "comment": { + "description": "human-readable string describing why this blueprint was created (for debugging)", + "type": "string" + }, + "creator": { + "description": "identity of the component that generated the blueprint (for debugging) This would generally be the Uuid of a Nexus instance.", + "type": "string" + }, + "external_dns_version": { + "description": "external DNS version when thi blueprint was created", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" } - }, - "required": [ - "end", - "start", - "type" ] }, - { - "description": "A range bounded inclusively below and unbounded above, `start..`.", - "type": "object", - "properties": { - "start": { - "type": "number", - "format": "double" - }, - "type": { - "type": "string", - "enum": [ - "range_from" - ] + "id": { + "description": "unique identifier for this blueprint", + "type": "string", + "format": "uuid" + }, + "internal_dns_version": { + "description": "internal DNS version when this blueprint was created", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" } - }, - "required": [ - "start", - "type" ] + }, + "parent_blueprint_id": { + "nullable": true, + "description": "which blueprint this blueprint is based on", + "type": "string", + "format": "uuid" + }, + "sled_state": { + "description": "A map of sled id -> desired state of the sled.\n\nA sled is considered part of the control plane cluster iff it has an entry in this map.", + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/SledState" + } + }, + "time_created": { + "description": "when this blueprint was generated (for debugging)", + "type": "string", + "format": "date-time" } + }, + "required": [ + "blueprint_disks", + "blueprint_zones", + "cockroachdb_fingerprint", + "cockroachdb_setting_preserve_downgrade", + "comment", + "creator", + "external_dns_version", + "id", + "internal_dns_version", + "sled_state", + "time_created" ] }, - "BinRangefloat": { - "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", - "oneOf": [ - { - "description": "A range unbounded below and exclusively above, `..end`.", - "type": "object", - "properties": { - "end": { - "type": "number", - "format": "float" - }, - "type": { - "type": "string", - "enum": [ - "range_to" - ] + "BlueprintMetadata": { + "description": "Describe high-level metadata about a blueprint", + "type": "object", + "properties": { + "cockroachdb_fingerprint": { + "description": "CockroachDB state fingerprint when this blueprint was created", + "type": "string" + }, + "cockroachdb_setting_preserve_downgrade": { + "nullable": true, + "description": "Whether to set `cluster.preserve_downgrade_option` and what to set it to (`None` if this value was retrieved from the database and was invalid)", + "allOf": [ + { + "$ref": "#/components/schemas/CockroachDbPreserveDowngrade" } - }, - "required": [ - "end", - "type" ] }, - { - "description": "A range bounded inclusively below and exclusively above, `start..end`.", - "type": "object", - "properties": { - "end": { - "type": "number", - "format": "float" - }, - "start": { - "type": "number", - "format": "float" - }, - "type": { - "type": "string", - "enum": [ - "range" - ] + "comment": { + "description": "human-readable string describing why this blueprint was created (for debugging)", + "type": "string" + }, + "creator": { + "description": "identity of the component that generated the blueprint (for debugging) This would generally be the Uuid of a Nexus instance.", + "type": "string" + }, + "external_dns_version": { + "description": "external DNS version when this blueprint was created", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" } - }, - "required": [ - "end", - "start", - "type" ] }, - { - "description": "A range bounded inclusively below and unbounded above, `start..`.", - "type": "object", - "properties": { - "start": { - "type": "number", - "format": "float" - }, - "type": { - "type": "string", - "enum": [ - "range_from" - ] - } - }, - "required": [ - "start", - "type" + "id": { + "description": "unique identifier for this blueprint", + "type": "string", + "format": "uuid" + }, + "internal_dns_version": { + "description": "internal DNS version when this blueprint was created", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + }, + "parent_blueprint_id": { + "nullable": true, + "description": "which blueprint this blueprint is based on", + "type": "string", + "format": "uuid" + }, + "time_created": { + "description": "when this blueprint was generated (for debugging)", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "cockroachdb_fingerprint", + "comment", + "creator", + "external_dns_version", + "id", + "internal_dns_version", + "time_created" + ] + }, + "BlueprintMetadataResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/BlueprintMetadata" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "BlueprintTarget": { + "description": "Describes what blueprint, if any, the system is currently working toward", + "type": "object", + "properties": { + "enabled": { + "description": "policy: should the system actively work towards this blueprint\n\nThis should generally be left enabled.", + "type": "boolean" + }, + "target_id": { + "description": "id of the blueprint that the system is trying to make real", + "type": "string", + "format": "uuid" + }, + "time_made_target": { + "description": "when this blueprint was made the target", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "enabled", + "target_id", + "time_made_target" + ] + }, + "BlueprintTargetSet": { + "description": "Specifies what blueprint, if any, the system should be working toward", + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "target_id": { + "type": "string", + "format": "uuid" + } + }, + "required": [ + "enabled", + "target_id" + ] + }, + "BlueprintZoneConfig": { + "description": "Describes one Omicron-managed zone in a blueprint.\n\nPart of [`BlueprintZonesConfig`].", + "type": "object", + "properties": { + "disposition": { + "description": "The disposition (desired state) of this zone recorded in the blueprint.", + "allOf": [ + { + "$ref": "#/components/schemas/BlueprintZoneDisposition" + } ] + }, + "id": { + "$ref": "#/components/schemas/TypedUuidForOmicronZoneKind" + }, + "underlay_address": { + "type": "string", + "format": "ipv6" + }, + "zone_type": { + "$ref": "#/components/schemas/BlueprintZoneType" } + }, + "required": [ + "disposition", + "id", + "underlay_address", + "zone_type" ] }, - "BinRangeint16": { - "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "BlueprintZoneDisposition": { + "description": "The desired state of an Omicron-managed zone in a blueprint.\n\nPart of [`BlueprintZoneConfig`].", "oneOf": [ { - "description": "A range unbounded below and exclusively above, `..end`.", - "type": "object", - "properties": { - "end": { - "type": "integer", - "format": "int16" - }, - "type": { - "type": "string", - "enum": [ - "range_to" - ] - } - }, - "required": [ - "end", - "type" + "description": "The zone is in-service.", + "type": "string", + "enum": [ + "in_service" ] }, { - "description": "A range bounded inclusively below and exclusively above, `start..end`.", - "type": "object", - "properties": { - "end": { - "type": "integer", - "format": "int16" - }, - "start": { - "type": "integer", - "format": "int16" - }, - "type": { - "type": "string", - "enum": [ - "range" - ] - } - }, - "required": [ - "end", - "start", - "type" + "description": "The zone is not in service.", + "type": "string", + "enum": [ + "quiesced" ] }, { - "description": "A range bounded inclusively below and unbounded above, `start..`.", - "type": "object", - "properties": { - "start": { - "type": "integer", - "format": "int16" - }, - "type": { - "type": "string", - "enum": [ - "range_from" - ] - } - }, - "required": [ - "start", - "type" + "description": "The zone is permanently gone.", + "type": "string", + "enum": [ + "expunged" ] } ] }, - "BinRangeint32": { - "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "BlueprintZoneType": { "oneOf": [ { - "description": "A range unbounded below and exclusively above, `..end`.", "type": "object", "properties": { - "end": { - "type": "integer", - "format": "int32" + "address": { + "type": "string" + }, + "dns_servers": { + "type": "array", + "items": { + "type": "string", + "format": "ip" + } + }, + "domain": { + "nullable": true, + "type": "string" + }, + "external_ip": { + "$ref": "#/components/schemas/OmicronZoneExternalSnatIp" + }, + "nic": { + "description": "The service vNIC providing outbound connectivity using OPTE.", + "allOf": [ + { + "$ref": "#/components/schemas/NetworkInterface" + } + ] + }, + "ntp_servers": { + "type": "array", + "items": { + "type": "string" + } }, "type": { "type": "string", "enum": [ - "range_to" + "boundary_ntp" ] } }, "required": [ - "end", + "address", + "dns_servers", + "external_ip", + "nic", + "ntp_servers", "type" ] }, { - "description": "A range bounded inclusively below and exclusively above, `start..end`.", "type": "object", "properties": { - "end": { - "type": "integer", - "format": "int32" + "address": { + "type": "string" }, - "start": { - "type": "integer", - "format": "int32" + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" }, "type": { "type": "string", "enum": [ - "range" + "clickhouse" ] } }, "required": [ - "end", - "start", + "address", + "dataset", "type" ] }, { - "description": "A range bounded inclusively below and unbounded above, `start..`.", "type": "object", "properties": { - "start": { - "type": "integer", - "format": "int32" + "address": { + "type": "string" + }, + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" }, "type": { "type": "string", "enum": [ - "range_from" + "clickhouse_keeper" ] } }, "required": [ - "start", + "address", + "dataset", "type" ] - } - ] - }, - "BinRangeint64": { - "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", - "oneOf": [ + }, { - "description": "A range unbounded below and exclusively above, `..end`.", "type": "object", "properties": { - "end": { - "type": "integer", - "format": "int64" + "address": { + "type": "string" + }, + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" }, "type": { "type": "string", "enum": [ - "range_to" + "cockroach_db" ] } }, "required": [ - "end", + "address", + "dataset", "type" ] }, { - "description": "A range bounded inclusively below and exclusively above, `start..end`.", "type": "object", "properties": { - "end": { - "type": "integer", - "format": "int64" + "address": { + "type": "string" }, - "start": { - "type": "integer", - "format": "int64" + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" }, "type": { "type": "string", "enum": [ - "range" + "crucible" ] } }, "required": [ - "end", - "start", + "address", + "dataset", "type" ] }, { - "description": "A range bounded inclusively below and unbounded above, `start..`.", "type": "object", "properties": { - "start": { - "type": "integer", - "format": "int64" + "address": { + "type": "string" }, "type": { "type": "string", "enum": [ - "range_from" + "crucible_pantry" ] } }, "required": [ - "start", + "address", "type" ] - } - ] - }, - "BinRangeint8": { - "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", - "oneOf": [ + }, { - "description": "A range unbounded below and exclusively above, `..end`.", "type": "object", "properties": { - "end": { - "type": "integer", - "format": "int8" + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" }, - "type": { - "type": "string", - "enum": [ - "range_to" + "dns_address": { + "description": "The address at which the external DNS server is reachable.", + "allOf": [ + { + "$ref": "#/components/schemas/OmicronZoneExternalFloatingAddr" + } ] - } - }, - "required": [ - "end", - "type" - ] - }, - { - "description": "A range bounded inclusively below and exclusively above, `start..end`.", - "type": "object", - "properties": { - "end": { - "type": "integer", - "format": "int8" }, - "start": { - "type": "integer", - "format": "int8" + "http_address": { + "description": "The address at which the external DNS server API is reachable.", + "type": "string" + }, + "nic": { + "description": "The service vNIC providing external connectivity using OPTE.", + "allOf": [ + { + "$ref": "#/components/schemas/NetworkInterface" + } + ] }, "type": { "type": "string", "enum": [ - "range" + "external_dns" ] } }, "required": [ - "end", - "start", + "dataset", + "dns_address", + "http_address", + "nic", "type" ] }, { - "description": "A range bounded inclusively below and unbounded above, `start..`.", "type": "object", "properties": { - "start": { + "dataset": { + "$ref": "#/components/schemas/OmicronZoneDataset" + }, + "dns_address": { + "type": "string" + }, + "gz_address": { + "description": "The addresses in the global zone which should be created\n\nFor the DNS service, which exists outside the sleds's typical subnet - adding an address in the GZ is necessary to allow inter-zone traffic routing.", + "type": "string", + "format": "ipv6" + }, + "gz_address_index": { + "description": "The address is also identified with an auxiliary bit of information to ensure that the created global zone address can have a unique name.", "type": "integer", - "format": "int8" + "format": "uint32", + "minimum": 0 + }, + "http_address": { + "type": "string" }, "type": { "type": "string", "enum": [ - "range_from" + "internal_dns" ] } }, "required": [ - "start", + "dataset", + "dns_address", + "gz_address", + "gz_address_index", + "http_address", "type" ] - } - ] - }, - "BinRangeuint16": { - "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", - "oneOf": [ + }, { - "description": "A range unbounded below and exclusively above, `..end`.", "type": "object", "properties": { - "end": { - "type": "integer", - "format": "uint16", - "minimum": 0 + "address": { + "type": "string" + }, + "dns_servers": { + "type": "array", + "items": { + "type": "string", + "format": "ip" + } + }, + "domain": { + "nullable": true, + "type": "string" + }, + "ntp_servers": { + "type": "array", + "items": { + "type": "string" + } }, "type": { "type": "string", "enum": [ - "range_to" + "internal_ntp" ] } }, "required": [ - "end", + "address", + "dns_servers", + "ntp_servers", "type" ] }, { - "description": "A range bounded inclusively below and exclusively above, `start..end`.", "type": "object", "properties": { - "end": { - "type": "integer", - "format": "uint16", - "minimum": 0 + "external_dns_servers": { + "description": "External DNS servers Nexus can use to resolve external hosts.", + "type": "array", + "items": { + "type": "string", + "format": "ip" + } }, - "start": { - "type": "integer", - "format": "uint16", - "minimum": 0 + "external_ip": { + "description": "The address at which the external nexus server is reachable.", + "allOf": [ + { + "$ref": "#/components/schemas/OmicronZoneExternalFloatingIp" + } + ] + }, + "external_tls": { + "description": "Whether Nexus's external endpoint should use TLS", + "type": "boolean" + }, + "internal_address": { + "description": "The address at which the internal nexus server is reachable.", + "type": "string" + }, + "nic": { + "description": "The service vNIC providing external connectivity using OPTE.", + "allOf": [ + { + "$ref": "#/components/schemas/NetworkInterface" + } + ] }, "type": { "type": "string", "enum": [ - "range" + "nexus" ] } }, "required": [ - "end", - "start", + "external_dns_servers", + "external_ip", + "external_tls", + "internal_address", + "nic", "type" ] }, { - "description": "A range bounded inclusively below and unbounded above, `start..`.", "type": "object", "properties": { - "start": { - "type": "integer", - "format": "uint16", - "minimum": 0 + "address": { + "type": "string" }, "type": { "type": "string", "enum": [ - "range_from" + "oximeter" ] } }, "required": [ - "start", + "address", "type" ] } ] }, - "BinRangeuint32": { - "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "BlueprintZonesConfig": { + "description": "Information about an Omicron zone as recorded in a blueprint.\n\nCurrently, this is similar to [`OmicronZonesConfig`], but also contains a per-zone [`BlueprintZoneDisposition`].\n\nPart of [`Blueprint`].", + "type": "object", + "properties": { + "generation": { + "description": "Generation number of this configuration.\n\nThis generation number is owned by the control plane. See [`OmicronZonesConfig::generation`] for more details.", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + }, + "zones": { + "description": "The list of running zones.", + "type": "array", + "items": { + "$ref": "#/components/schemas/BlueprintZoneConfig" + } + } + }, + "required": [ + "generation", + "zones" + ] + }, + "ByteCount": { + "description": "Byte count to express memory or storage capacity.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "Certificate": { + "type": "object", + "properties": { + "cert": { + "type": "string" + }, + "key": { + "type": "string" + } + }, + "required": [ + "cert", + "key" + ] + }, + "CockroachDbClusterVersion": { + "description": "CockroachDB cluster versions we are aware of.\n\nCockroachDB can be upgraded from one major version to the next, e.g. v22.1 -> v22.2. Each major version introduces changes in how it stores data on disk to support new features, and each major version has support for reading the previous version's data so that it can perform an upgrade. The version of the data format is called the \"cluster version\", which is distinct from but related to the software version that's being run.\n\nWhile software version v22.2 is using cluster version v22.1, it's possible to downgrade back to v22.1. Once the cluster version is upgraded, there's no going back.\n\nTo give us some time to evaluate new versions of the software while retaining a downgrade path, we currently deploy new versions of CockroachDB across two releases of the Oxide software, in a \"tick-tock\" model:\n\n- In \"tick\" releases, we upgrade the version of the CockroachDB software to a new major version, and update `CockroachDbClusterVersion::NEWLY_INITIALIZED`. On upgraded racks, the new version is running with the previous cluster version; on newly-initialized racks, the new version is running with the new cluser version. - In \"tock\" releases, we change `CockroachDbClusterVersion::POLICY` to the major version we upgraded to in the last \"tick\" release. This results in a new blueprint that upgrades the cluster version, destroying the downgrade path but allowing us to eventually upgrade to the next release.\n\nThese presently describe major versions of CockroachDB. The order of these must be maintained in the correct order (the first variant must be the earliest version).", + "type": "string", + "enum": [ + "V22_1" + ] + }, + "CockroachDbPreserveDowngrade": { + "description": "Whether to set `cluster.preserve_downgrade_option` and what to set it to.", "oneOf": [ { - "description": "A range unbounded below and exclusively above, `..end`.", + "description": "Do not modify the setting.", "type": "object", "properties": { - "end": { - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "type": { + "action": { "type": "string", "enum": [ - "range_to" + "do_not_modify" ] } }, "required": [ - "end", - "type" + "action" ] }, { - "description": "A range bounded inclusively below and exclusively above, `start..end`.", + "description": "Ensure the setting is set to an empty string.", "type": "object", "properties": { - "end": { - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "start": { - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "type": { + "action": { "type": "string", "enum": [ - "range" + "allow_upgrade" ] } }, "required": [ - "end", - "start", - "type" + "action" ] }, { - "description": "A range bounded inclusively below and unbounded above, `start..`.", + "description": "Ensure the setting is set to a given cluster version.", "type": "object", "properties": { - "start": { - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "type": { + "action": { "type": "string", "enum": [ - "range_from" + "set" ] + }, + "data": { + "$ref": "#/components/schemas/CockroachDbClusterVersion" } }, "required": [ - "start", - "type" + "action", + "data" ] } ] }, - "BinRangeuint64": { - "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "CurrentStatus": { + "description": "Describes the current status of a background task", "oneOf": [ { - "description": "A range unbounded below and exclusively above, `..end`.", + "description": "The background task is not running\n\nTypically, the task would be waiting for its next activation, which would happen after a timeout or some other event that triggers activation", "type": "object", "properties": { - "end": { - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "type": { + "current_status": { "type": "string", "enum": [ - "range_to" + "idle" ] } }, "required": [ - "end", - "type" + "current_status" ] }, { - "description": "A range bounded inclusively below and exclusively above, `start..end`.", + "description": "The background task is currently running\n\nMore precisely, the task has been activated and has not yet finished this activation", "type": "object", "properties": { - "end": { - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "start": { - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "type": { + "current_status": { "type": "string", "enum": [ - "range" + "running" ] - } - }, - "required": [ - "end", - "start", - "type" - ] - }, - { - "description": "A range bounded inclusively below and unbounded above, `start..`.", - "type": "object", - "properties": { - "start": { - "type": "integer", - "format": "uint64", - "minimum": 0 }, - "type": { - "type": "string", - "enum": [ - "range_from" - ] + "details": { + "$ref": "#/components/schemas/CurrentStatusRunning" } }, "required": [ - "start", - "type" + "current_status", + "details" ] } ] }, - "BinRangeuint8": { - "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", - "oneOf": [ - { - "description": "A range unbounded below and exclusively above, `..end`.", - "type": "object", - "properties": { - "end": { - "type": "integer", - "format": "uint8", - "minimum": 0 - }, - "type": { - "type": "string", - "enum": [ - "range_to" - ] - } - }, - "required": [ - "end", - "type" - ] - }, - { - "description": "A range bounded inclusively below and exclusively above, `start..end`.", - "type": "object", - "properties": { - "end": { - "type": "integer", - "format": "uint8", - "minimum": 0 - }, - "start": { - "type": "integer", - "format": "uint8", - "minimum": 0 - }, - "type": { - "type": "string", - "enum": [ - "range" - ] - } - }, - "required": [ - "end", - "start", - "type" - ] - }, - { - "description": "A range bounded inclusively below and unbounded above, `start..`.", - "type": "object", - "properties": { - "start": { - "type": "integer", - "format": "uint8", - "minimum": 0 - }, - "type": { - "type": "string", - "enum": [ - "range_from" - ] - } - }, - "required": [ - "start", - "type" - ] - } - ] - }, - "Bindouble": { - "description": "Type storing bin edges and a count of samples within it.", + "CurrentStatusRunning": { "type": "object", "properties": { - "count": { - "description": "The total count of samples in this bin.", + "iteration": { + "description": "which iteration this was (counter)", "type": "integer", "format": "uint64", "minimum": 0 }, - "range": { - "description": "The range of the support covered by this bin.", + "reason": { + "description": "what kind of event triggered this activation", "allOf": [ { - "$ref": "#/components/schemas/BinRangedouble" + "$ref": "#/components/schemas/ActivationReason" } ] + }, + "start_time": { + "description": "wall-clock time when the current activation started", + "type": "string", + "format": "date-time" } }, "required": [ - "count", - "range" + "iteration", + "reason", + "start_time" ] }, - "Binfloat": { - "description": "Type storing bin edges and a count of samples within it.", + "DatasetCreateRequest": { "type": "object", "properties": { - "count": { - "description": "The total count of samples in this bin.", - "type": "integer", - "format": "uint64", - "minimum": 0 + "dataset_id": { + "type": "string", + "format": "uuid" }, - "range": { - "description": "The range of the support covered by this bin.", - "allOf": [ - { - "$ref": "#/components/schemas/BinRangefloat" - } - ] + "request": { + "$ref": "#/components/schemas/DatasetPutRequest" + }, + "zpool_id": { + "type": "string", + "format": "uuid" } }, "required": [ - "count", - "range" + "dataset_id", + "request", + "zpool_id" ] }, - "Binint16": { - "description": "Type storing bin edges and a count of samples within it.", - "type": "object", - "properties": { - "count": { - "description": "The total count of samples in this bin.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "range": { - "description": "The range of the support covered by this bin.", - "allOf": [ - { - "$ref": "#/components/schemas/BinRangeint16" - } - ] - } - }, - "required": [ - "count", - "range" + "DatasetKind": { + "description": "Describes the purpose of the dataset.", + "type": "string", + "enum": [ + "crucible", + "cockroach", + "clickhouse", + "clickhouse_keeper", + "external_dns", + "internal_dns" ] }, - "Binint32": { - "description": "Type storing bin edges and a count of samples within it.", + "DatasetPutRequest": { + "description": "Describes a dataset within a pool.", "type": "object", "properties": { - "count": { - "description": "The total count of samples in this bin.", - "type": "integer", - "format": "uint64", - "minimum": 0 + "address": { + "description": "Address on which a service is responding to requests for the dataset.", + "type": "string" }, - "range": { - "description": "The range of the support covered by this bin.", + "kind": { + "description": "Type of dataset being inserted.", "allOf": [ { - "$ref": "#/components/schemas/BinRangeint32" + "$ref": "#/components/schemas/DatasetKind" } ] } }, "required": [ - "count", - "range" + "address", + "kind" ] }, - "Binint64": { - "description": "Type storing bin edges and a count of samples within it.", + "DiskIdentity": { + "description": "Uniquely identifies a disk.", "type": "object", "properties": { - "count": { - "description": "The total count of samples in this bin.", - "type": "integer", - "format": "uint64", - "minimum": 0 + "model": { + "type": "string" }, - "range": { - "description": "The range of the support covered by this bin.", - "allOf": [ - { - "$ref": "#/components/schemas/BinRangeint64" - } - ] + "serial": { + "type": "string" + }, + "vendor": { + "type": "string" } }, "required": [ - "count", - "range" + "model", + "serial", + "vendor" ] }, - "Binint8": { - "description": "Type storing bin edges and a count of samples within it.", + "DiskRuntimeState": { + "description": "Runtime state of the Disk, which includes its attach state and some minimal metadata", "type": "object", "properties": { - "count": { - "description": "The total count of samples in this bin.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "range": { - "description": "The range of the support covered by this bin.", + "disk_state": { + "description": "runtime state of the Disk", "allOf": [ { - "$ref": "#/components/schemas/BinRangeint8" + "$ref": "#/components/schemas/DiskState" } ] - } - }, - "required": [ - "count", - "range" - ] - }, - "Binuint16": { - "description": "Type storing bin edges and a count of samples within it.", - "type": "object", - "properties": { - "count": { - "description": "The total count of samples in this bin.", - "type": "integer", - "format": "uint64", - "minimum": 0 }, - "range": { - "description": "The range of the support covered by this bin.", + "gen": { + "description": "generation number for this state", "allOf": [ { - "$ref": "#/components/schemas/BinRangeuint16" + "$ref": "#/components/schemas/Generation" } ] + }, + "time_updated": { + "description": "timestamp for this information", + "type": "string", + "format": "date-time" } }, "required": [ - "count", - "range" + "disk_state", + "gen", + "time_updated" ] }, - "Binuint32": { - "description": "Type storing bin edges and a count of samples within it.", - "type": "object", - "properties": { - "count": { - "description": "The total count of samples in this bin.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "range": { - "description": "The range of the support covered by this bin.", - "allOf": [ - { - "$ref": "#/components/schemas/BinRangeuint32" + "DiskState": { + "description": "State of a Disk", + "oneOf": [ + { + "description": "Disk is being initialized", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "creating" + ] } + }, + "required": [ + "state" ] - } - }, - "required": [ - "count", - "range" - ] - }, - "Binuint64": { - "description": "Type storing bin edges and a count of samples within it.", - "type": "object", - "properties": { - "count": { - "description": "The total count of samples in this bin.", - "type": "integer", - "format": "uint64", - "minimum": 0 }, - "range": { - "description": "The range of the support covered by this bin.", - "allOf": [ - { - "$ref": "#/components/schemas/BinRangeuint64" + { + "description": "Disk is ready but detached from any Instance", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "detached" + ] } + }, + "required": [ + "state" ] - } - }, - "required": [ - "count", - "range" - ] - }, - "Binuint8": { - "description": "Type storing bin edges and a count of samples within it.", - "type": "object", - "properties": { - "count": { - "description": "The total count of samples in this bin.", - "type": "integer", - "format": "uint64", - "minimum": 0 }, - "range": { - "description": "The range of the support covered by this bin.", - "allOf": [ - { - "$ref": "#/components/schemas/BinRangeuint8" + { + "description": "Disk is ready to receive blocks from an external source", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "import_ready" + ] } + }, + "required": [ + "state" ] - } - }, - "required": [ - "count", - "range" - ] - }, - "Blueprint": { - "description": "Describes a complete set of software and configuration for the system", - "type": "object", - "properties": { - "blueprint_zones": { - "description": "A map of sled id -> zones deployed on each sled, along with the [`BlueprintZoneDisposition`] for each zone.\n\nA sled is considered part of the control plane cluster iff it has an entry in this map.", - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/BlueprintZonesConfig" - } }, - "comment": { - "description": "human-readable string describing why this blueprint was created (for debugging)", - "type": "string" - }, - "creator": { - "description": "identity of the component that generated the blueprint (for debugging) This would generally be the Uuid of a Nexus instance.", - "type": "string" - }, - "external_dns_version": { - "description": "external DNS version when thi blueprint was created", - "allOf": [ - { - "$ref": "#/components/schemas/Generation" + { + "description": "Disk is importing blocks from a URL", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "importing_from_url" + ] } + }, + "required": [ + "state" ] }, - "id": { - "description": "unique identifier for this blueprint", - "type": "string", - "format": "uuid" - }, - "internal_dns_version": { - "description": "internal DNS version when this blueprint was created", - "allOf": [ - { - "$ref": "#/components/schemas/Generation" + { + "description": "Disk is importing blocks from bulk writes", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "importing_from_bulk_writes" + ] } + }, + "required": [ + "state" ] }, - "parent_blueprint_id": { - "nullable": true, - "description": "which blueprint this blueprint is based on", - "type": "string", - "format": "uuid" + { + "description": "Disk is being finalized to state Detached", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "finalizing" + ] + } + }, + "required": [ + "state" + ] }, - "time_created": { - "description": "when this blueprint was generated (for debugging)", - "type": "string", - "format": "date-time" - } - }, - "required": [ - "blueprint_zones", - "comment", - "creator", - "external_dns_version", - "id", - "internal_dns_version", - "time_created" - ] - }, - "BlueprintMetadata": { - "description": "Describe high-level metadata about a blueprint", - "type": "object", - "properties": { - "comment": { - "description": "human-readable string describing why this blueprint was created (for debugging)", - "type": "string" + { + "description": "Disk is undergoing maintenance", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "maintenance" + ] + } + }, + "required": [ + "state" + ] }, - "creator": { - "description": "identity of the component that generated the blueprint (for debugging) This would generally be the Uuid of a Nexus instance.", - "type": "string" + { + "description": "Disk is being attached to the given Instance", + "type": "object", + "properties": { + "instance": { + "type": "string", + "format": "uuid" + }, + "state": { + "type": "string", + "enum": [ + "attaching" + ] + } + }, + "required": [ + "instance", + "state" + ] }, - "external_dns_version": { - "description": "external DNS version when this blueprint was created", - "allOf": [ - { - "$ref": "#/components/schemas/Generation" + { + "description": "Disk is attached to the given Instance", + "type": "object", + "properties": { + "instance": { + "type": "string", + "format": "uuid" + }, + "state": { + "type": "string", + "enum": [ + "attached" + ] } + }, + "required": [ + "instance", + "state" ] }, - "id": { - "description": "unique identifier for this blueprint", - "type": "string", - "format": "uuid" + { + "description": "Disk is being detached from the given Instance", + "type": "object", + "properties": { + "instance": { + "type": "string", + "format": "uuid" + }, + "state": { + "type": "string", + "enum": [ + "detaching" + ] + } + }, + "required": [ + "instance", + "state" + ] }, - "internal_dns_version": { - "description": "internal DNS version when this blueprint was created", - "allOf": [ - { - "$ref": "#/components/schemas/Generation" + { + "description": "Disk has been destroyed", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "destroyed" + ] } + }, + "required": [ + "state" ] }, - "parent_blueprint_id": { - "nullable": true, - "description": "which blueprint this blueprint is based on", - "type": "string", - "format": "uuid" + { + "description": "Disk is unavailable", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "faulted" + ] + } + }, + "required": [ + "state" + ] + } + ] + }, + "DnsConfigParams": { + "description": "DnsConfigParams\n\n
JSON schema\n\n```json { \"type\": \"object\", \"required\": [ \"generation\", \"time_created\", \"zones\" ], \"properties\": { \"generation\": { \"type\": \"integer\", \"format\": \"uint64\", \"minimum\": 0.0 }, \"time_created\": { \"type\": \"string\", \"format\": \"date-time\" }, \"zones\": { \"type\": \"array\", \"items\": { \"$ref\": \"#/components/schemas/DnsConfigZone\" } } } } ```
", + "type": "object", + "properties": { + "generation": { + "type": "integer", + "format": "uint64", + "minimum": 0 }, "time_created": { - "description": "when this blueprint was generated (for debugging)", "type": "string", "format": "date-time" + }, + "zones": { + "type": "array", + "items": { + "$ref": "#/components/schemas/DnsConfigZone" + } } }, "required": [ - "comment", - "creator", - "external_dns_version", - "id", - "internal_dns_version", - "time_created" + "generation", + "time_created", + "zones" ] }, - "BlueprintMetadataResultsPage": { - "description": "A single page of results", + "DnsConfigZone": { + "description": "DnsConfigZone\n\n
JSON schema\n\n```json { \"type\": \"object\", \"required\": [ \"records\", \"zone_name\" ], \"properties\": { \"records\": { \"type\": \"object\", \"additionalProperties\": { \"type\": \"array\", \"items\": { \"$ref\": \"#/components/schemas/DnsRecord\" } } }, \"zone_name\": { \"type\": \"string\" } } } ```
", "type": "object", "properties": { - "items": { - "description": "list of items on this page of results", - "type": "array", - "items": { - "$ref": "#/components/schemas/BlueprintMetadata" + "records": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "$ref": "#/components/schemas/DnsRecord" + } } }, - "next_page": { - "nullable": true, - "description": "token used to fetch the next page of results (if any)", + "zone_name": { "type": "string" } }, "required": [ - "items" + "records", + "zone_name" ] }, - "BlueprintTarget": { - "description": "Describes what blueprint, if any, the system is currently working toward", - "type": "object", - "properties": { - "enabled": { - "description": "policy: should the system actively work towards this blueprint\n\nThis should generally be left enabled.", - "type": "boolean" - }, - "target_id": { - "description": "id of the blueprint that the system is trying to make real", - "type": "string", - "format": "uuid" - }, - "time_made_target": { - "description": "when this blueprint was made the target", - "type": "string", - "format": "date-time" - } - }, - "required": [ - "enabled", - "target_id", - "time_made_target" - ] - }, - "BlueprintTargetSet": { - "description": "Specifies what blueprint, if any, the system should be working toward", - "type": "object", - "properties": { - "enabled": { - "type": "boolean" - }, - "target_id": { - "type": "string", - "format": "uuid" - } - }, - "required": [ - "enabled", - "target_id" - ] - }, - "BlueprintZoneConfig": { - "description": "Describes one Omicron-managed zone in a blueprint.\n\nThis is a wrapper around an [`OmicronZoneConfig`] that also includes a [`BlueprintZoneDisposition`].\n\nPart of [`BlueprintZonesConfig`].", - "type": "object", - "properties": { - "config": { - "description": "The underlying zone configuration.", - "allOf": [ - { - "$ref": "#/components/schemas/OmicronZoneConfig" + "DnsRecord": { + "description": "DnsRecord\n\n
JSON schema\n\n```json { \"oneOf\": [ { \"type\": \"object\", \"required\": [ \"data\", \"type\" ], \"properties\": { \"data\": { \"type\": \"string\", \"format\": \"ipv4\" }, \"type\": { \"type\": \"string\", \"enum\": [ \"A\" ] } } }, { \"type\": \"object\", \"required\": [ \"data\", \"type\" ], \"properties\": { \"data\": { \"type\": \"string\", \"format\": \"ipv6\" }, \"type\": { \"type\": \"string\", \"enum\": [ \"AAAA\" ] } } }, { \"type\": \"object\", \"required\": [ \"data\", \"type\" ], \"properties\": { \"data\": { \"$ref\": \"#/components/schemas/Srv\" }, \"type\": { \"type\": \"string\", \"enum\": [ \"SRV\" ] } } } ] } ```
", + "oneOf": [ + { + "type": "object", + "properties": { + "data": { + "type": "string", + "format": "ipv4" + }, + "type": { + "type": "string", + "enum": [ + "A" + ] } + }, + "required": [ + "data", + "type" ] }, - "disposition": { - "description": "The disposition (desired state) of this zone recorded in the blueprint.", - "allOf": [ - { - "$ref": "#/components/schemas/BlueprintZoneDisposition" - } - ] - } - }, - "required": [ - "config", - "disposition" - ] - }, - "BlueprintZoneDisposition": { - "description": "The desired state of an Omicron-managed zone in a blueprint.\n\nPart of [`BlueprintZoneConfig`].", - "oneOf": [ { - "description": "The zone is in-service.", - "type": "string", - "enum": [ - "in_service" + "type": "object", + "properties": { + "data": { + "type": "string", + "format": "ipv6" + }, + "type": { + "type": "string", + "enum": [ + "AAAA" + ] + } + }, + "required": [ + "data", + "type" ] }, { - "description": "The zone is not in service.", - "type": "string", - "enum": [ - "quiesced" - ] - } - ] - }, - "BlueprintZonesConfig": { - "description": "Information about an Omicron zone as recorded in a blueprint.\n\nCurrently, this is similar to [`OmicronZonesConfig`], but also contains a per-zone [`BlueprintZoneDisposition`].\n\nPart of [`Blueprint`].", - "type": "object", - "properties": { - "generation": { - "description": "Generation number of this configuration.\n\nThis generation number is owned by the control plane. See [`OmicronZonesConfig::generation`] for more details.", - "allOf": [ - { - "$ref": "#/components/schemas/Generation" + "type": "object", + "properties": { + "data": { + "$ref": "#/components/schemas/Srv" + }, + "type": { + "type": "string", + "enum": [ + "SRV" + ] } + }, + "required": [ + "data", + "type" ] - }, - "zones": { - "description": "The list of running zones.", - "type": "array", - "items": { - "$ref": "#/components/schemas/BlueprintZoneConfig" - } } - }, - "required": [ - "generation", - "zones" ] }, - "ByteCount": { - "description": "Byte count to express memory or storage capacity.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "Certificate": { + "DownstairsClientStopRequest": { "type": "object", "properties": { - "cert": { - "type": "string" + "reason": { + "$ref": "#/components/schemas/DownstairsClientStopRequestReason" }, - "key": { - "type": "string" + "time": { + "type": "string", + "format": "date-time" } }, "required": [ - "cert", - "key" + "reason", + "time" ] }, - "CollectionId": { - "type": "object", - "properties": { - "collection_id": { - "type": "string", - "format": "uuid" - } - }, - "required": [ - "collection_id" + "DownstairsClientStopRequestReason": { + "type": "string", + "enum": [ + "replacing", + "disabled", + "failed_reconcile", + "i_o_error", + "bad_negotiation_order", + "incompatible", + "failed_live_repair", + "too_many_outstanding_jobs", + "deactivated" ] }, - "Cumulativedouble": { - "description": "A cumulative or counter data type.", + "DownstairsClientStopped": { "type": "object", "properties": { - "start_time": { + "reason": { + "$ref": "#/components/schemas/DownstairsClientStoppedReason" + }, + "time": { "type": "string", "format": "date-time" - }, - "value": { - "type": "number", - "format": "double" } }, "required": [ - "start_time", - "value" + "reason", + "time" + ] + }, + "DownstairsClientStoppedReason": { + "type": "string", + "enum": [ + "connection_timeout", + "connection_failed", + "timeout", + "write_failed", + "read_failed", + "requested_stop", + "finished", + "queue_closed", + "receive_task_cancelled" ] }, - "Cumulativefloat": { - "description": "A cumulative or counter data type.", + "DownstairsUnderRepair": { "type": "object", "properties": { - "start_time": { - "type": "string", - "format": "date-time" + "region_uuid": { + "$ref": "#/components/schemas/TypedUuidForDownstairsRegionKind" }, - "value": { - "type": "number", - "format": "float" + "target_addr": { + "type": "string" } }, "required": [ - "start_time", - "value" + "region_uuid", + "target_addr" ] }, - "Cumulativeint64": { - "description": "A cumulative or counter data type.", + "Duration": { "type": "object", "properties": { - "start_time": { - "type": "string", - "format": "date-time" + "nanos": { + "type": "integer", + "format": "uint32", + "minimum": 0 }, - "value": { + "secs": { "type": "integer", - "format": "int64" + "format": "uint64", + "minimum": 0 } }, "required": [ - "start_time", - "value" + "nanos", + "secs" ] }, - "Cumulativeuint64": { - "description": "A cumulative or counter data type.", + "Error": { + "description": "Error information from a response.", "type": "object", "properties": { - "start_time": { - "type": "string", - "format": "date-time" + "error_code": { + "type": "string" }, - "value": { - "type": "integer", - "format": "uint64", - "minimum": 0 + "message": { + "type": "string" + }, + "request_id": { + "type": "string" } }, "required": [ - "start_time", - "value" + "message", + "request_id" ] }, - "CurrentStatus": { - "description": "Describes the current status of a background task", + "ExternalPortDiscovery": { "oneOf": [ { - "description": "The background task is not running\n\nTypically, the task would be waiting for its next activation, which would happen after a timeout or some other event that triggers activation", "type": "object", "properties": { - "current_status": { - "type": "string", - "enum": [ - "idle" - ] - } - }, + "auto": { + "type": "object", + "additionalProperties": { + "type": "string", + "format": "ipv6" + } + } + }, "required": [ - "current_status" + "auto" + ], + "additionalProperties": false + }, + { + "type": "object", + "properties": { + "static": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Name" + } + } + } + }, + "required": [ + "static" + ], + "additionalProperties": false + } + ] + }, + "Generation": { + "description": "Generation numbers stored in the database, used for optimistic concurrency control", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "ImportExportPolicy": { + "description": "Define policy relating to the import and export of prefixes from a BGP peer.", + "oneOf": [ + { + "description": "Do not perform any filtering.", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "no_filtering" + ] + } + }, + "required": [ + "type" ] }, { - "description": "The background task is currently running\n\nMore precisely, the task has been activated and has not yet finished this activation", "type": "object", "properties": { - "current_status": { + "type": { "type": "string", "enum": [ - "running" + "allow" ] }, - "details": { - "$ref": "#/components/schemas/CurrentStatusRunning" + "value": { + "type": "array", + "items": { + "$ref": "#/components/schemas/IpNet" + } } }, "required": [ - "current_status", - "details" + "type", + "value" ] } ] }, - "CurrentStatusRunning": { + "InstanceRuntimeState": { + "description": "The dynamic runtime properties of an instance: its current VMM ID (if any), migration information (if any), and the instance state to report if there is no active VMM.", "type": "object", "properties": { - "iteration": { - "description": "which iteration this was (counter)", - "type": "integer", - "format": "uint64", - "minimum": 0 + "dst_propolis_id": { + "nullable": true, + "description": "If a migration is active, the ID of the target VMM.", + "type": "string", + "format": "uuid" }, - "reason": { - "description": "what kind of event triggered this activation", + "gen": { + "description": "Generation number for this state.", "allOf": [ { - "$ref": "#/components/schemas/ActivationReason" + "$ref": "#/components/schemas/Generation" } ] }, - "start_time": { - "description": "wall-clock time when the current activation started", + "migration_id": { + "nullable": true, + "description": "If a migration is active, the ID of that migration.", + "type": "string", + "format": "uuid" + }, + "propolis_id": { + "nullable": true, + "description": "The instance's currently active VMM ID.", + "type": "string", + "format": "uuid" + }, + "time_updated": { + "description": "Timestamp for this information.", "type": "string", "format": "date-time" } }, "required": [ - "iteration", - "reason", - "start_time" + "gen", + "time_updated" ] }, - "DatasetCreateRequest": { - "type": "object", - "properties": { - "dataset_id": { + "InstanceState": { + "description": "Running state of an Instance (primarily: booted or stopped)\n\nThis typically reflects whether it's starting, running, stopping, or stopped, but also includes states related to the Instance's lifecycle", + "oneOf": [ + { + "description": "The instance is being created.", "type": "string", - "format": "uuid" + "enum": [ + "creating" + ] }, - "request": { - "$ref": "#/components/schemas/DatasetPutRequest" + { + "description": "The instance is currently starting up.", + "type": "string", + "enum": [ + "starting" + ] }, - "zpool_id": { + { + "description": "The instance is currently running.", "type": "string", - "format": "uuid" + "enum": [ + "running" + ] + }, + { + "description": "The instance has been requested to stop and a transition to \"Stopped\" is imminent.", + "type": "string", + "enum": [ + "stopping" + ] + }, + { + "description": "The instance is currently stopped.", + "type": "string", + "enum": [ + "stopped" + ] + }, + { + "description": "The instance is in the process of rebooting - it will remain in the \"rebooting\" state until the VM is starting once more.", + "type": "string", + "enum": [ + "rebooting" + ] + }, + { + "description": "The instance is in the process of migrating - it will remain in the \"migrating\" state until the migration process is complete and the destination propolis is ready to continue execution.", + "type": "string", + "enum": [ + "migrating" + ] + }, + { + "description": "The instance is attempting to recover from a failure.", + "type": "string", + "enum": [ + "repairing" + ] + }, + { + "description": "The instance has encountered a failure.", + "type": "string", + "enum": [ + "failed" + ] + }, + { + "description": "The instance has been deleted.", + "type": "string", + "enum": [ + "destroyed" + ] } - }, - "required": [ - "dataset_id", - "request", - "zpool_id" ] }, - "DatasetKind": { - "description": "Describes the purpose of the dataset.", + "IpKind": { "type": "string", "enum": [ - "crucible", - "cockroach", - "clickhouse", - "clickhouse_keeper", - "external_dns", - "internal_dns" + "snat", + "floating", + "ephemeral" ] }, - "DatasetPutRequest": { - "description": "Describes a dataset within a pool.", - "type": "object", - "properties": { - "address": { - "description": "Address on which a service is responding to requests for the dataset.", - "type": "string" + "IpNet": { + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::IpNet", + "version": "0.1.0" + }, + "oneOf": [ + { + "title": "v4", + "allOf": [ + { + "$ref": "#/components/schemas/Ipv4Net" + } + ] }, - "kind": { - "description": "Type of dataset being inserted.", + { + "title": "v6", "allOf": [ { - "$ref": "#/components/schemas/DatasetKind" + "$ref": "#/components/schemas/Ipv6Net" } ] } - }, - "required": [ - "address", - "kind" ] }, - "Datum": { - "description": "A `Datum` is a single sampled data point from a metric.", + "IpRange": { "oneOf": [ { - "type": "object", - "properties": { - "datum": { - "type": "boolean" - }, - "type": { - "type": "string", - "enum": [ - "bool" - ] + "title": "v4", + "allOf": [ + { + "$ref": "#/components/schemas/Ipv4Range" } - }, - "required": [ - "datum", - "type" ] }, { - "type": "object", - "properties": { - "datum": { - "type": "integer", - "format": "int8" - }, - "type": { - "type": "string", - "enum": [ - "i8" - ] + "title": "v6", + "allOf": [ + { + "$ref": "#/components/schemas/Ipv6Range" } - }, - "required": [ - "datum", - "type" ] + } + ] + }, + "Ipv4NatEntryView": { + "description": "NAT Record", + "type": "object", + "properties": { + "deleted": { + "type": "boolean" + }, + "external_address": { + "type": "string", + "format": "ipv4" + }, + "first_port": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "gen": { + "type": "integer", + "format": "int64" + }, + "last_port": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "mac": { + "$ref": "#/components/schemas/MacAddr" + }, + "sled_address": { + "type": "string", + "format": "ipv6" + }, + "vni": { + "$ref": "#/components/schemas/Vni" + } + }, + "required": [ + "deleted", + "external_address", + "first_port", + "gen", + "last_port", + "mac", + "sled_address", + "vni" + ] + }, + "Ipv4Net": { + "example": "192.168.1.0/24", + "title": "An IPv4 subnet", + "description": "An IPv4 subnet, including prefix and prefix length", + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::Ipv4Net", + "version": "0.1.0" + }, + "type": "string", + "pattern": "^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|1[0-9]|2[0-9]|3[0-2])$" + }, + "Ipv4Range": { + "description": "A non-decreasing IPv4 address range, inclusive of both ends.\n\nThe first address must be less than or equal to the last address.", + "type": "object", + "properties": { + "first": { + "type": "string", + "format": "ipv4" + }, + "last": { + "type": "string", + "format": "ipv4" + } + }, + "required": [ + "first", + "last" + ] + }, + "Ipv6Net": { + "example": "fd12:3456::/64", + "title": "An IPv6 subnet", + "description": "An IPv6 subnet, including prefix and subnet mask", + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::Ipv6Net", + "version": "0.1.0" + }, + "type": "string", + "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))\\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])$" + }, + "Ipv6Range": { + "description": "A non-decreasing IPv6 address range, inclusive of both ends.\n\nThe first address must be less than or equal to the last address.", + "type": "object", + "properties": { + "first": { + "type": "string", + "format": "ipv6" }, + "last": { + "type": "string", + "format": "ipv6" + } + }, + "required": [ + "first", + "last" + ] + }, + "LastResult": { + "oneOf": [ { + "description": "The task has never completed an activation", "type": "object", "properties": { - "datum": { - "type": "integer", - "format": "uint8", - "minimum": 0 - }, - "type": { + "last_result": { "type": "string", "enum": [ - "u8" + "never_completed" ] } }, "required": [ - "datum", - "type" + "last_result" ] }, { + "description": "The task has completed at least one activation", "type": "object", "properties": { - "datum": { - "type": "integer", - "format": "int16" + "details": { + "$ref": "#/components/schemas/LastResultCompleted" }, - "type": { + "last_result": { "type": "string", "enum": [ - "i16" + "completed" ] } }, "required": [ - "datum", - "type" + "details", + "last_result" + ] + } + ] + }, + "LastResultCompleted": { + "type": "object", + "properties": { + "details": { + "description": "arbitrary datum emitted by the background task" + }, + "elapsed": { + "description": "total time elapsed during the activation", + "allOf": [ + { + "$ref": "#/components/schemas/Duration" + } + ] + }, + "iteration": { + "description": "which iteration this was (counter)", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "reason": { + "description": "what kind of event triggered this activation", + "allOf": [ + { + "$ref": "#/components/schemas/ActivationReason" + } ] }, + "start_time": { + "description": "wall-clock time when the activation started", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "details", + "elapsed", + "iteration", + "reason", + "start_time" + ] + }, + "MacAddr": { + "example": "ff:ff:ff:ff:ff:ff", + "title": "A MAC address", + "description": "A Media Access Control address, in EUI-48 format", + "type": "string", + "pattern": "^([0-9a-fA-F]{0,2}:){5}[0-9a-fA-F]{0,2}$", + "minLength": 5, + "maxLength": 17 + }, + "Name": { + "title": "A name unique within the parent collection", + "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID though they may contain a UUID.", + "type": "string", + "pattern": "^(?![0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$)^[a-z]([a-zA-Z0-9-]*[a-zA-Z0-9]+)?$", + "minLength": 1, + "maxLength": 63 + }, + "NetworkInterface": { + "description": "Information required to construct a virtual network interface", + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "ip": { + "type": "string", + "format": "ip" + }, + "kind": { + "$ref": "#/components/schemas/NetworkInterfaceKind" + }, + "mac": { + "$ref": "#/components/schemas/MacAddr" + }, + "name": { + "$ref": "#/components/schemas/Name" + }, + "primary": { + "type": "boolean" + }, + "slot": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "subnet": { + "$ref": "#/components/schemas/IpNet" + }, + "vni": { + "$ref": "#/components/schemas/Vni" + } + }, + "required": [ + "id", + "ip", + "kind", + "mac", + "name", + "primary", + "slot", + "subnet", + "vni" + ] + }, + "NetworkInterfaceKind": { + "description": "The type of network interface", + "oneOf": [ { + "description": "A vNIC attached to a guest instance", "type": "object", "properties": { - "datum": { - "type": "integer", - "format": "uint16", - "minimum": 0 + "id": { + "type": "string", + "format": "uuid" }, "type": { "type": "string", "enum": [ - "u16" + "instance" ] } }, "required": [ - "datum", + "id", "type" ] }, { + "description": "A vNIC associated with an internal service", "type": "object", "properties": { - "datum": { - "type": "integer", - "format": "int32" + "id": { + "type": "string", + "format": "uuid" }, "type": { "type": "string", "enum": [ - "i32" + "service" ] } }, "required": [ - "datum", + "id", "type" ] }, { + "description": "A vNIC associated with a probe", "type": "object", "properties": { - "datum": { - "type": "integer", - "format": "uint32", - "minimum": 0 + "id": { + "type": "string", + "format": "uuid" }, "type": { "type": "string", "enum": [ - "u32" + "probe" ] } }, "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "type": "integer", - "format": "int64" - }, - "type": { - "type": "string", - "enum": [ - "i64" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "type": { - "type": "string", - "enum": [ - "u64" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "type": "number", - "format": "float" - }, - "type": { - "type": "string", - "enum": [ - "f32" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "type": "number", - "format": "double" - }, - "type": { - "type": "string", - "enum": [ - "f64" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "string" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "type": "array", - "items": { - "type": "integer", - "format": "uint8", - "minimum": 0 - } - }, - "type": { - "type": "string", - "enum": [ - "bytes" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "$ref": "#/components/schemas/Cumulativeint64" - }, - "type": { - "type": "string", - "enum": [ - "cumulative_i64" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "$ref": "#/components/schemas/Cumulativeuint64" - }, - "type": { - "type": "string", - "enum": [ - "cumulative_u64" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "$ref": "#/components/schemas/Cumulativefloat" - }, - "type": { - "type": "string", - "enum": [ - "cumulative_f32" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "$ref": "#/components/schemas/Cumulativedouble" - }, - "type": { - "type": "string", - "enum": [ - "cumulative_f64" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "$ref": "#/components/schemas/Histogramint8" - }, - "type": { - "type": "string", - "enum": [ - "histogram_i8" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "$ref": "#/components/schemas/Histogramuint8" - }, - "type": { - "type": "string", - "enum": [ - "histogram_u8" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "$ref": "#/components/schemas/Histogramint16" - }, - "type": { - "type": "string", - "enum": [ - "histogram_i16" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "$ref": "#/components/schemas/Histogramuint16" - }, - "type": { - "type": "string", - "enum": [ - "histogram_u16" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "$ref": "#/components/schemas/Histogramint32" - }, - "type": { - "type": "string", - "enum": [ - "histogram_i32" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "$ref": "#/components/schemas/Histogramuint32" - }, - "type": { - "type": "string", - "enum": [ - "histogram_u32" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "$ref": "#/components/schemas/Histogramint64" - }, - "type": { - "type": "string", - "enum": [ - "histogram_i64" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "$ref": "#/components/schemas/Histogramuint64" - }, - "type": { - "type": "string", - "enum": [ - "histogram_u64" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "$ref": "#/components/schemas/Histogramfloat" - }, - "type": { - "type": "string", - "enum": [ - "histogram_f32" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "$ref": "#/components/schemas/Histogramdouble" - }, - "type": { - "type": "string", - "enum": [ - "histogram_f64" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "$ref": "#/components/schemas/MissingDatum" - }, - "type": { - "type": "string", - "enum": [ - "missing" - ] - } - }, - "required": [ - "datum", - "type" - ] - } - ] - }, - "DatumType": { - "description": "The type of an individual datum of a metric.", - "type": "string", - "enum": [ - "bool", - "i8", - "u8", - "i16", - "u16", - "i32", - "u32", - "i64", - "u64", - "f32", - "f64", - "string", - "bytes", - "cumulative_i64", - "cumulative_u64", - "cumulative_f32", - "cumulative_f64", - "histogram_i8", - "histogram_u8", - "histogram_i16", - "histogram_u16", - "histogram_i32", - "histogram_u32", - "histogram_i64", - "histogram_u64", - "histogram_f32", - "histogram_f64" - ] - }, - "DiskRuntimeState": { - "description": "Runtime state of the Disk, which includes its attach state and some minimal metadata", - "type": "object", - "properties": { - "disk_state": { - "description": "runtime state of the Disk", - "allOf": [ - { - "$ref": "#/components/schemas/DiskState" - } - ] - }, - "gen": { - "description": "generation number for this state", - "allOf": [ - { - "$ref": "#/components/schemas/Generation" - } - ] - }, - "time_updated": { - "description": "timestamp for this information", - "type": "string", - "format": "date-time" - } - }, - "required": [ - "disk_state", - "gen", - "time_updated" - ] - }, - "DiskState": { - "description": "State of a Disk", - "oneOf": [ - { - "description": "Disk is being initialized", - "type": "object", - "properties": { - "state": { - "type": "string", - "enum": [ - "creating" - ] - } - }, - "required": [ - "state" - ] - }, - { - "description": "Disk is ready but detached from any Instance", - "type": "object", - "properties": { - "state": { - "type": "string", - "enum": [ - "detached" - ] - } - }, - "required": [ - "state" - ] - }, - { - "description": "Disk is ready to receive blocks from an external source", - "type": "object", - "properties": { - "state": { - "type": "string", - "enum": [ - "import_ready" - ] - } - }, - "required": [ - "state" - ] - }, - { - "description": "Disk is importing blocks from a URL", - "type": "object", - "properties": { - "state": { - "type": "string", - "enum": [ - "importing_from_url" - ] - } - }, - "required": [ - "state" - ] - }, - { - "description": "Disk is importing blocks from bulk writes", - "type": "object", - "properties": { - "state": { - "type": "string", - "enum": [ - "importing_from_bulk_writes" - ] - } - }, - "required": [ - "state" - ] - }, - { - "description": "Disk is being finalized to state Detached", - "type": "object", - "properties": { - "state": { - "type": "string", - "enum": [ - "finalizing" - ] - } - }, - "required": [ - "state" - ] - }, - { - "description": "Disk is undergoing maintenance", - "type": "object", - "properties": { - "state": { - "type": "string", - "enum": [ - "maintenance" - ] - } - }, - "required": [ - "state" - ] - }, - { - "description": "Disk is being attached to the given Instance", - "type": "object", - "properties": { - "instance": { - "type": "string", - "format": "uuid" - }, - "state": { - "type": "string", - "enum": [ - "attaching" - ] - } - }, - "required": [ - "instance", - "state" - ] - }, - { - "description": "Disk is attached to the given Instance", - "type": "object", - "properties": { - "instance": { - "type": "string", - "format": "uuid" - }, - "state": { - "type": "string", - "enum": [ - "attached" - ] - } - }, - "required": [ - "instance", - "state" - ] - }, - { - "description": "Disk is being detached from the given Instance", - "type": "object", - "properties": { - "instance": { - "type": "string", - "format": "uuid" - }, - "state": { - "type": "string", - "enum": [ - "detaching" - ] - } - }, - "required": [ - "instance", - "state" - ] - }, - { - "description": "Disk has been destroyed", - "type": "object", - "properties": { - "state": { - "type": "string", - "enum": [ - "destroyed" - ] - } - }, - "required": [ - "state" - ] - }, - { - "description": "Disk is unavailable", - "type": "object", - "properties": { - "state": { - "type": "string", - "enum": [ - "faulted" - ] - } - }, - "required": [ - "state" - ] - } - ] - }, - "DnsConfigParams": { - "description": "DnsConfigParams\n\n
JSON schema\n\n```json { \"type\": \"object\", \"required\": [ \"generation\", \"time_created\", \"zones\" ], \"properties\": { \"generation\": { \"type\": \"integer\", \"format\": \"uint64\", \"minimum\": 0.0 }, \"time_created\": { \"type\": \"string\", \"format\": \"date-time\" }, \"zones\": { \"type\": \"array\", \"items\": { \"$ref\": \"#/components/schemas/DnsConfigZone\" } } } } ```
", - "type": "object", - "properties": { - "generation": { - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "time_created": { - "type": "string", - "format": "date-time" - }, - "zones": { - "type": "array", - "items": { - "$ref": "#/components/schemas/DnsConfigZone" - } - } - }, - "required": [ - "generation", - "time_created", - "zones" - ] - }, - "DnsConfigZone": { - "description": "DnsConfigZone\n\n
JSON schema\n\n```json { \"type\": \"object\", \"required\": [ \"records\", \"zone_name\" ], \"properties\": { \"records\": { \"type\": \"object\", \"additionalProperties\": { \"type\": \"array\", \"items\": { \"$ref\": \"#/components/schemas/DnsRecord\" } } }, \"zone_name\": { \"type\": \"string\" } } } ```
", - "type": "object", - "properties": { - "records": { - "type": "object", - "additionalProperties": { - "type": "array", - "items": { - "$ref": "#/components/schemas/DnsRecord" - } - } - }, - "zone_name": { - "type": "string" - } - }, - "required": [ - "records", - "zone_name" - ] - }, - "DnsRecord": { - "description": "DnsRecord\n\n
JSON schema\n\n```json { \"oneOf\": [ { \"type\": \"object\", \"required\": [ \"data\", \"type\" ], \"properties\": { \"data\": { \"type\": \"string\", \"format\": \"ipv4\" }, \"type\": { \"type\": \"string\", \"enum\": [ \"A\" ] } } }, { \"type\": \"object\", \"required\": [ \"data\", \"type\" ], \"properties\": { \"data\": { \"type\": \"string\", \"format\": \"ipv6\" }, \"type\": { \"type\": \"string\", \"enum\": [ \"AAAA\" ] } } }, { \"type\": \"object\", \"required\": [ \"data\", \"type\" ], \"properties\": { \"data\": { \"$ref\": \"#/components/schemas/Srv\" }, \"type\": { \"type\": \"string\", \"enum\": [ \"SRV\" ] } } } ] } ```
", - "oneOf": [ - { - "type": "object", - "properties": { - "data": { - "type": "string", - "format": "ipv4" - }, - "type": { - "type": "string", - "enum": [ - "A" - ] - } - }, - "required": [ - "data", - "type" - ] - }, - { - "type": "object", - "properties": { - "data": { - "type": "string", - "format": "ipv6" - }, - "type": { - "type": "string", - "enum": [ - "AAAA" - ] - } - }, - "required": [ - "data", - "type" - ] - }, - { - "type": "object", - "properties": { - "data": { - "$ref": "#/components/schemas/Srv" - }, - "type": { - "type": "string", - "enum": [ - "SRV" - ] - } - }, - "required": [ - "data", - "type" - ] - } - ] - }, - "DownstairsClientStopRequest": { - "type": "object", - "properties": { - "reason": { - "$ref": "#/components/schemas/DownstairsClientStopRequestReason" - }, - "time": { - "type": "string", - "format": "date-time" - } - }, - "required": [ - "reason", - "time" - ] - }, - "DownstairsClientStopRequestReason": { - "type": "string", - "enum": [ - "replacing", - "disabled", - "failed_reconcile", - "i_o_error", - "bad_negotiation_order", - "incompatible", - "failed_live_repair", - "too_many_outstanding_jobs", - "deactivated" - ] - }, - "DownstairsClientStopped": { - "type": "object", - "properties": { - "reason": { - "$ref": "#/components/schemas/DownstairsClientStoppedReason" - }, - "time": { - "type": "string", - "format": "date-time" - } - }, - "required": [ - "reason", - "time" - ] - }, - "DownstairsClientStoppedReason": { - "type": "string", - "enum": [ - "connection_timeout", - "connection_failed", - "timeout", - "write_failed", - "read_failed", - "requested_stop", - "finished", - "queue_closed", - "receive_task_cancelled" - ] - }, - "DownstairsUnderRepair": { - "type": "object", - "properties": { - "region_uuid": { - "$ref": "#/components/schemas/TypedUuidForDownstairsRegionKind" - }, - "target_addr": { - "type": "string" - } - }, - "required": [ - "region_uuid", - "target_addr" - ] - }, - "Duration": { - "type": "object", - "properties": { - "nanos": { - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "secs": { - "type": "integer", - "format": "uint64", - "minimum": 0 - } - }, - "required": [ - "nanos", - "secs" - ] - }, - "Error": { - "description": "Error information from a response.", - "type": "object", - "properties": { - "error_code": { - "type": "string" - }, - "message": { - "type": "string" - }, - "request_id": { - "type": "string" - } - }, - "required": [ - "message", - "request_id" - ] - }, - "ExternalPortDiscovery": { - "oneOf": [ - { - "type": "object", - "properties": { - "auto": { - "type": "object", - "additionalProperties": { - "type": "string", - "format": "ipv6" - } - } - }, - "required": [ - "auto" - ], - "additionalProperties": false - }, - { - "type": "object", - "properties": { - "static": { - "type": "object", - "additionalProperties": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Name" - } - } - } - }, - "required": [ - "static" - ], - "additionalProperties": false - } - ] - }, - "Field": { - "description": "A `Field` is a named aspect of a target or metric.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "value": { - "$ref": "#/components/schemas/FieldValue" - } - }, - "required": [ - "name", - "value" - ] - }, - "FieldSet": { - "type": "object", - "properties": { - "fields": { - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/Field" - } - }, - "name": { - "type": "string" - } - }, - "required": [ - "fields", - "name" - ] - }, - "FieldValue": { - "description": "The `FieldValue` contains the value of a target or metric field.", - "oneOf": [ - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "string" - ] - }, - "value": { - "type": "string" - } - }, - "required": [ - "type", - "value" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "i8" - ] - }, - "value": { - "type": "integer", - "format": "int8" - } - }, - "required": [ - "type", - "value" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "u8" - ] - }, - "value": { - "type": "integer", - "format": "uint8", - "minimum": 0 - } - }, - "required": [ - "type", - "value" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "i16" - ] - }, - "value": { - "type": "integer", - "format": "int16" - } - }, - "required": [ - "type", - "value" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "u16" - ] - }, - "value": { - "type": "integer", - "format": "uint16", - "minimum": 0 - } - }, - "required": [ - "type", - "value" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "i32" - ] - }, - "value": { - "type": "integer", - "format": "int32" - } - }, - "required": [ - "type", - "value" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "u32" - ] - }, - "value": { - "type": "integer", - "format": "uint32", - "minimum": 0 - } - }, - "required": [ - "type", - "value" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "i64" - ] - }, - "value": { - "type": "integer", - "format": "int64" - } - }, - "required": [ - "type", - "value" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "u64" - ] - }, - "value": { - "type": "integer", - "format": "uint64", - "minimum": 0 - } - }, - "required": [ - "type", - "value" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "ip_addr" - ] - }, - "value": { - "type": "string", - "format": "ip" - } - }, - "required": [ - "type", - "value" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "uuid" - ] - }, - "value": { - "type": "string", - "format": "uuid" - } - }, - "required": [ - "type", - "value" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "bool" - ] - }, - "value": { - "type": "boolean" - } - }, - "required": [ - "type", - "value" - ] - } - ] - }, - "Generation": { - "description": "Generation numbers stored in the database, used for optimistic concurrency control", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "HistogramError": { - "description": "Errors related to constructing histograms or adding samples into them.", - "oneOf": [ - { - "description": "An attempt to construct a histogram with an empty set of bins.", - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "empty_bins" - ] - } - }, - "required": [ - "type" - ] - }, - { - "description": "An attempt to construct a histogram with non-monotonic bins.", - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "nonmonotonic_bins" - ] - } - }, - "required": [ - "type" - ] - }, - { - "description": "A non-finite was encountered, either as a bin edge or a sample.", - "type": "object", - "properties": { - "content": { - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "non_finite_value" - ] - } - }, - "required": [ - "content", - "type" - ] - }, - { - "description": "Error returned when two neighboring bins are not adjoining (there's space between them)", - "type": "object", - "properties": { - "content": { - "type": "object", - "properties": { - "left": { - "type": "string" - }, - "right": { - "type": "string" - } - }, - "required": [ - "left", - "right" - ] - }, - "type": { - "type": "string", - "enum": [ - "non_adjoining_bins" - ] - } - }, - "required": [ - "content", - "type" - ] - }, - { - "description": "Bin and count arrays are of different sizes.", - "type": "object", - "properties": { - "content": { - "type": "object", - "properties": { - "n_bins": { - "type": "integer", - "format": "uint", - "minimum": 0 - }, - "n_counts": { - "type": "integer", - "format": "uint", - "minimum": 0 - } - }, - "required": [ - "n_bins", - "n_counts" - ] - }, - "type": { - "type": "string", - "enum": [ - "array_size_mismatch" - ] - } - }, - "required": [ - "content", - "type" - ] - }, - { - "type": "object", - "properties": { - "content": { - "$ref": "#/components/schemas/QuantizationError" - }, - "type": { - "type": "string", - "enum": [ - "quantization" - ] - } - }, - "required": [ - "content", - "type" - ] - } - ] - }, - "Histogramdouble": { - "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", - "type": "object", - "properties": { - "bins": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Bindouble" - } - }, - "n_samples": { - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "start_time": { - "type": "string", - "format": "date-time" - } - }, - "required": [ - "bins", - "n_samples", - "start_time" - ] - }, - "Histogramfloat": { - "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", - "type": "object", - "properties": { - "bins": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Binfloat" - } - }, - "n_samples": { - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "start_time": { - "type": "string", - "format": "date-time" - } - }, - "required": [ - "bins", - "n_samples", - "start_time" - ] - }, - "Histogramint16": { - "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", - "type": "object", - "properties": { - "bins": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Binint16" - } - }, - "n_samples": { - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "start_time": { - "type": "string", - "format": "date-time" - } - }, - "required": [ - "bins", - "n_samples", - "start_time" - ] - }, - "Histogramint32": { - "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", - "type": "object", - "properties": { - "bins": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Binint32" - } - }, - "n_samples": { - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "start_time": { - "type": "string", - "format": "date-time" - } - }, - "required": [ - "bins", - "n_samples", - "start_time" - ] - }, - "Histogramint64": { - "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", - "type": "object", - "properties": { - "bins": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Binint64" - } - }, - "n_samples": { - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "start_time": { - "type": "string", - "format": "date-time" - } - }, - "required": [ - "bins", - "n_samples", - "start_time" - ] - }, - "Histogramint8": { - "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", - "type": "object", - "properties": { - "bins": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Binint8" - } - }, - "n_samples": { - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "start_time": { - "type": "string", - "format": "date-time" - } - }, - "required": [ - "bins", - "n_samples", - "start_time" - ] - }, - "Histogramuint16": { - "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", - "type": "object", - "properties": { - "bins": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Binuint16" - } - }, - "n_samples": { - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "start_time": { - "type": "string", - "format": "date-time" - } - }, - "required": [ - "bins", - "n_samples", - "start_time" - ] - }, - "Histogramuint32": { - "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", - "type": "object", - "properties": { - "bins": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Binuint32" - } - }, - "n_samples": { - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "start_time": { - "type": "string", - "format": "date-time" - } - }, - "required": [ - "bins", - "n_samples", - "start_time" - ] - }, - "Histogramuint64": { - "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", - "type": "object", - "properties": { - "bins": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Binuint64" - } - }, - "n_samples": { - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "start_time": { - "type": "string", - "format": "date-time" - } - }, - "required": [ - "bins", - "n_samples", - "start_time" - ] - }, - "Histogramuint8": { - "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", - "type": "object", - "properties": { - "bins": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Binuint8" - } - }, - "n_samples": { - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "start_time": { - "type": "string", - "format": "date-time" - } - }, - "required": [ - "bins", - "n_samples", - "start_time" - ] - }, - "InstanceRuntimeState": { - "description": "The dynamic runtime properties of an instance: its current VMM ID (if any), migration information (if any), and the instance state to report if there is no active VMM.", - "type": "object", - "properties": { - "dst_propolis_id": { - "nullable": true, - "description": "If a migration is active, the ID of the target VMM.", - "type": "string", - "format": "uuid" - }, - "gen": { - "description": "Generation number for this state.", - "allOf": [ - { - "$ref": "#/components/schemas/Generation" - } - ] - }, - "migration_id": { - "nullable": true, - "description": "If a migration is active, the ID of that migration.", - "type": "string", - "format": "uuid" - }, - "propolis_id": { - "nullable": true, - "description": "The instance's currently active VMM ID.", - "type": "string", - "format": "uuid" - }, - "time_updated": { - "description": "Timestamp for this information.", - "type": "string", - "format": "date-time" - } - }, - "required": [ - "gen", - "time_updated" - ] - }, - "InstanceState": { - "description": "Running state of an Instance (primarily: booted or stopped)\n\nThis typically reflects whether it's starting, running, stopping, or stopped, but also includes states related to the Instance's lifecycle", - "oneOf": [ - { - "description": "The instance is being created.", - "type": "string", - "enum": [ - "creating" - ] - }, - { - "description": "The instance is currently starting up.", - "type": "string", - "enum": [ - "starting" - ] - }, - { - "description": "The instance is currently running.", - "type": "string", - "enum": [ - "running" - ] - }, - { - "description": "The instance has been requested to stop and a transition to \"Stopped\" is imminent.", - "type": "string", - "enum": [ - "stopping" - ] - }, - { - "description": "The instance is currently stopped.", - "type": "string", - "enum": [ - "stopped" - ] - }, - { - "description": "The instance is in the process of rebooting - it will remain in the \"rebooting\" state until the VM is starting once more.", - "type": "string", - "enum": [ - "rebooting" - ] - }, - { - "description": "The instance is in the process of migrating - it will remain in the \"migrating\" state until the migration process is complete and the destination propolis is ready to continue execution.", - "type": "string", - "enum": [ - "migrating" - ] - }, - { - "description": "The instance is attempting to recover from a failure.", - "type": "string", - "enum": [ - "repairing" - ] - }, - { - "description": "The instance has encountered a failure.", - "type": "string", - "enum": [ - "failed" - ] - }, - { - "description": "The instance has been deleted.", - "type": "string", - "enum": [ - "destroyed" - ] - } - ] - }, - "IpKind": { - "type": "string", - "enum": [ - "snat", - "floating", - "ephemeral" - ] - }, - "IpNet": { - "oneOf": [ - { - "title": "v4", - "allOf": [ - { - "$ref": "#/components/schemas/Ipv4Net" - } - ] - }, - { - "title": "v6", - "allOf": [ - { - "$ref": "#/components/schemas/Ipv6Net" - } - ] - } - ] - }, - "IpNetwork": { - "oneOf": [ - { - "title": "v4", - "allOf": [ - { - "$ref": "#/components/schemas/Ipv4Network" - } - ] - }, - { - "title": "v6", - "allOf": [ - { - "$ref": "#/components/schemas/Ipv6Network" - } - ] - } - ] - }, - "IpRange": { - "oneOf": [ - { - "title": "v4", - "allOf": [ - { - "$ref": "#/components/schemas/Ipv4Range" - } - ] - }, - { - "title": "v6", - "allOf": [ - { - "$ref": "#/components/schemas/Ipv6Range" - } - ] - } - ] - }, - "Ipv4NatEntryView": { - "description": "NAT Record", - "type": "object", - "properties": { - "deleted": { - "type": "boolean" - }, - "external_address": { - "type": "string", - "format": "ipv4" - }, - "first_port": { - "type": "integer", - "format": "uint16", - "minimum": 0 - }, - "gen": { - "type": "integer", - "format": "int64" - }, - "last_port": { - "type": "integer", - "format": "uint16", - "minimum": 0 - }, - "mac": { - "$ref": "#/components/schemas/MacAddr" - }, - "sled_address": { - "type": "string", - "format": "ipv6" - }, - "vni": { - "$ref": "#/components/schemas/Vni" - } - }, - "required": [ - "deleted", - "external_address", - "first_port", - "gen", - "last_port", - "mac", - "sled_address", - "vni" - ] - }, - "Ipv4Net": { - "example": "192.168.1.0/24", - "title": "An IPv4 subnet", - "description": "An IPv4 subnet, including prefix and subnet mask", - "type": "string", - "pattern": "^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|1[0-9]|2[0-9]|3[0-2])$" - }, - "Ipv4Network": { - "type": "string", - "pattern": "^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\/(3[0-2]|[0-2]?[0-9])$" - }, - "Ipv4Range": { - "description": "A non-decreasing IPv4 address range, inclusive of both ends.\n\nThe first address must be less than or equal to the last address.", - "type": "object", - "properties": { - "first": { - "type": "string", - "format": "ipv4" - }, - "last": { - "type": "string", - "format": "ipv4" - } - }, - "required": [ - "first", - "last" - ] - }, - "Ipv6Net": { - "example": "fd12:3456::/64", - "title": "An IPv6 subnet", - "description": "An IPv6 subnet, including prefix and subnet mask", - "type": "string", - "pattern": "^([fF][dD])[0-9a-fA-F]{2}:(([0-9a-fA-F]{1,4}:){6}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,6}:)([0-9a-fA-F]{1,4})?\\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])$" - }, - "Ipv6Network": { - "type": "string", - "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\")[/](12[0-8]|1[0-1][0-9]|[0-9]?[0-9])$" - }, - "Ipv6Range": { - "description": "A non-decreasing IPv6 address range, inclusive of both ends.\n\nThe first address must be less than or equal to the last address.", - "type": "object", - "properties": { - "first": { - "type": "string", - "format": "ipv6" - }, - "last": { - "type": "string", - "format": "ipv6" - } - }, - "required": [ - "first", - "last" - ] - }, - "LastResult": { - "oneOf": [ - { - "description": "The task has never completed an activation", - "type": "object", - "properties": { - "last_result": { - "type": "string", - "enum": [ - "never_completed" - ] - } - }, - "required": [ - "last_result" - ] - }, - { - "description": "The task has completed at least one activation", - "type": "object", - "properties": { - "details": { - "$ref": "#/components/schemas/LastResultCompleted" - }, - "last_result": { - "type": "string", - "enum": [ - "completed" - ] - } - }, - "required": [ - "details", - "last_result" - ] - } - ] - }, - "LastResultCompleted": { - "type": "object", - "properties": { - "details": { - "description": "arbitrary datum emitted by the background task" - }, - "elapsed": { - "description": "total time elapsed during the activation", - "allOf": [ - { - "$ref": "#/components/schemas/Duration" - } - ] - }, - "iteration": { - "description": "which iteration this was (counter)", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "reason": { - "description": "what kind of event triggered this activation", - "allOf": [ - { - "$ref": "#/components/schemas/ActivationReason" - } - ] - }, - "start_time": { - "description": "wall-clock time when the activation started", - "type": "string", - "format": "date-time" - } - }, - "required": [ - "details", - "elapsed", - "iteration", - "reason", - "start_time" - ] - }, - "MacAddr": { - "example": "ff:ff:ff:ff:ff:ff", - "title": "A MAC address", - "description": "A Media Access Control address, in EUI-48 format", - "type": "string", - "pattern": "^([0-9a-fA-F]{0,2}:){5}[0-9a-fA-F]{0,2}$", - "minLength": 5, - "maxLength": 17 - }, - "Measurement": { - "description": "A `Measurement` is a timestamped datum from a single metric", - "type": "object", - "properties": { - "datum": { - "$ref": "#/components/schemas/Datum" - }, - "timestamp": { - "type": "string", - "format": "date-time" - } - }, - "required": [ - "datum", - "timestamp" - ] - }, - "MetricsError": { - "description": "Errors related to the generation or collection of metrics.", - "oneOf": [ - { - "description": "An error related to generating metric data points", - "type": "object", - "properties": { - "content": { - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "datum_error" - ] - } - }, - "required": [ - "content", - "type" - ] - }, - { - "description": "An error running an `Oximeter` server", - "type": "object", - "properties": { - "content": { - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "oximeter_server" - ] - } - }, - "required": [ - "content", - "type" - ] - }, - { - "description": "An error related to creating or sampling a [`histogram::Histogram`] metric.", - "type": "object", - "properties": { - "content": { - "$ref": "#/components/schemas/HistogramError" - }, - "type": { - "type": "string", - "enum": [ - "histogram_error" - ] - } - }, - "required": [ - "content", - "type" - ] - }, - { - "description": "An error parsing a field or measurement from a string.", - "type": "object", - "properties": { - "content": { - "type": "object", - "properties": { - "src": { - "type": "string" - }, - "typ": { - "type": "string" - } - }, - "required": [ - "src", - "typ" - ] - }, - "type": { - "type": "string", - "enum": [ - "parse_error" - ] - } - }, - "required": [ - "content", - "type" - ] - }, - { - "description": "A field name is duplicated between the target and metric.", - "type": "object", - "properties": { - "content": { - "type": "object", - "properties": { - "name": { - "type": "string" - } - }, - "required": [ - "name" - ] - }, - "type": { - "type": "string", - "enum": [ - "duplicate_field_name" - ] - } - }, - "required": [ - "content", - "type" - ] - }, - { - "type": "object", - "properties": { - "content": { - "type": "object", - "properties": { - "datum_type": { - "$ref": "#/components/schemas/DatumType" - } - }, - "required": [ - "datum_type" - ] - }, - "type": { - "type": "string", - "enum": [ - "missing_datum_requires_start_time" - ] - } - }, - "required": [ - "content", - "type" - ] - }, - { - "type": "object", - "properties": { - "content": { - "type": "object", - "properties": { - "datum_type": { - "$ref": "#/components/schemas/DatumType" - } - }, - "required": [ - "datum_type" - ] - }, - "type": { - "type": "string", - "enum": [ - "missing_datum_cannot_have_start_time" - ] - } - }, - "required": [ - "content", - "type" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "invalid_timeseries_name" - ] - } - }, - "required": [ - "type" - ] - } - ] - }, - "MissingDatum": { - "type": "object", - "properties": { - "datum_type": { - "$ref": "#/components/schemas/DatumType" - }, - "start_time": { - "nullable": true, - "type": "string", - "format": "date-time" - } - }, - "required": [ - "datum_type" - ] - }, - "Name": { - "title": "A name unique within the parent collection", - "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID though they may contain a UUID.", - "type": "string", - "pattern": "^(?![0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$)^[a-z]([a-zA-Z0-9-]*[a-zA-Z0-9]+)?$", - "minLength": 1, - "maxLength": 63 - }, - "NetworkInterface": { - "description": "Information required to construct a virtual network interface", - "type": "object", - "properties": { - "id": { - "type": "string", - "format": "uuid" - }, - "ip": { - "type": "string", - "format": "ip" - }, - "kind": { - "$ref": "#/components/schemas/NetworkInterfaceKind" - }, - "mac": { - "$ref": "#/components/schemas/MacAddr" - }, - "name": { - "$ref": "#/components/schemas/Name" - }, - "primary": { - "type": "boolean" - }, - "slot": { - "type": "integer", - "format": "uint8", - "minimum": 0 - }, - "subnet": { - "$ref": "#/components/schemas/IpNet" - }, - "vni": { - "$ref": "#/components/schemas/Vni" - } - }, - "required": [ - "id", - "ip", - "kind", - "mac", - "name", - "primary", - "slot", - "subnet", - "vni" - ] - }, - "NetworkInterfaceKind": { - "description": "The type of network interface", - "oneOf": [ - { - "description": "A vNIC attached to a guest instance", - "type": "object", - "properties": { - "id": { - "type": "string", - "format": "uuid" - }, - "type": { - "type": "string", - "enum": [ - "instance" - ] - } - }, - "required": [ - "id", - "type" - ] - }, - { - "description": "A vNIC associated with an internal service", - "type": "object", - "properties": { - "id": { - "type": "string", - "format": "uuid" - }, - "type": { - "type": "string", - "enum": [ - "service" - ] - } - }, - "required": [ - "id", - "type" - ] - }, - { - "description": "A vNIC associated with a probe", - "type": "object", - "properties": { - "id": { - "type": "string", - "format": "uuid" - }, - "type": { - "type": "string", - "enum": [ - "probe" - ] - } - }, - "required": [ - "id", - "type" - ] - } - ] - }, - "NewPasswordHash": { - "title": "A password hash in PHC string format", - "description": "Password hashes must be in PHC (Password Hashing Competition) string format. Passwords must be hashed with Argon2id. Password hashes may be rejected if the parameters appear not to be secure enough.", - "type": "string" - }, - "NodeName": { - "description": "Unique name for a saga [`Node`]\n\nEach node requires a string name that's unique within its DAG. The name is used to identify its output. Nodes that depend on a given node (either directly or indirectly) can access the node's output using its name.", - "type": "string" - }, - "OmicronZoneConfig": { - "description": "Describes one Omicron-managed zone running on a sled\n\n
JSON schema\n\n```json { \"description\": \"Describes one Omicron-managed zone running on a sled\", \"type\": \"object\", \"required\": [ \"id\", \"underlay_address\", \"zone_type\" ], \"properties\": { \"id\": { \"type\": \"string\", \"format\": \"uuid\" }, \"underlay_address\": { \"type\": \"string\", \"format\": \"ipv6\" }, \"zone_type\": { \"$ref\": \"#/components/schemas/OmicronZoneType\" } } } ```
", - "type": "object", - "properties": { - "id": { - "type": "string", - "format": "uuid" - }, - "underlay_address": { - "type": "string", - "format": "ipv6" - }, - "zone_type": { - "$ref": "#/components/schemas/OmicronZoneType" - } - }, - "required": [ - "id", - "underlay_address", - "zone_type" - ] - }, - "OmicronZoneDataset": { - "description": "Describes a persistent ZFS dataset associated with an Omicron zone\n\n
JSON schema\n\n```json { \"description\": \"Describes a persistent ZFS dataset associated with an Omicron zone\", \"type\": \"object\", \"required\": [ \"pool_name\" ], \"properties\": { \"pool_name\": { \"$ref\": \"#/components/schemas/ZpoolName\" } } } ```
", - "type": "object", - "properties": { - "pool_name": { - "$ref": "#/components/schemas/ZpoolName" - } - }, - "required": [ - "pool_name" - ] - }, - "OmicronZoneType": { - "description": "Describes what kind of zone this is (i.e., what component is running in it) as well as any type-specific configuration\n\n
JSON schema\n\n```json { \"description\": \"Describes what kind of zone this is (i.e., what component is running in it) as well as any type-specific configuration\", \"oneOf\": [ { \"type\": \"object\", \"required\": [ \"address\", \"dns_servers\", \"nic\", \"ntp_servers\", \"snat_cfg\", \"type\" ], \"properties\": { \"address\": { \"type\": \"string\" }, \"dns_servers\": { \"type\": \"array\", \"items\": { \"type\": \"string\", \"format\": \"ip\" } }, \"domain\": { \"type\": [ \"string\", \"null\" ] }, \"nic\": { \"description\": \"The service vNIC providing outbound connectivity using OPTE.\", \"allOf\": [ { \"$ref\": \"#/components/schemas/NetworkInterface\" } ] }, \"ntp_servers\": { \"type\": \"array\", \"items\": { \"type\": \"string\" } }, \"snat_cfg\": { \"description\": \"The SNAT configuration for outbound connections.\", \"allOf\": [ { \"$ref\": \"#/components/schemas/SourceNatConfig\" } ] }, \"type\": { \"type\": \"string\", \"enum\": [ \"boundary_ntp\" ] } } }, { \"type\": \"object\", \"required\": [ \"address\", \"dataset\", \"type\" ], \"properties\": { \"address\": { \"type\": \"string\" }, \"dataset\": { \"$ref\": \"#/components/schemas/OmicronZoneDataset\" }, \"type\": { \"type\": \"string\", \"enum\": [ \"clickhouse\" ] } } }, { \"type\": \"object\", \"required\": [ \"address\", \"dataset\", \"type\" ], \"properties\": { \"address\": { \"type\": \"string\" }, \"dataset\": { \"$ref\": \"#/components/schemas/OmicronZoneDataset\" }, \"type\": { \"type\": \"string\", \"enum\": [ \"clickhouse_keeper\" ] } } }, { \"type\": \"object\", \"required\": [ \"address\", \"dataset\", \"type\" ], \"properties\": { \"address\": { \"type\": \"string\" }, \"dataset\": { \"$ref\": \"#/components/schemas/OmicronZoneDataset\" }, \"type\": { \"type\": \"string\", \"enum\": [ \"cockroach_db\" ] } } }, { \"type\": \"object\", \"required\": [ \"address\", \"dataset\", \"type\" ], \"properties\": { \"address\": { \"type\": \"string\" }, \"dataset\": { \"$ref\": \"#/components/schemas/OmicronZoneDataset\" }, \"type\": { \"type\": \"string\", \"enum\": [ \"crucible\" ] } } }, { \"type\": \"object\", \"required\": [ \"address\", \"type\" ], \"properties\": { \"address\": { \"type\": \"string\" }, \"type\": { \"type\": \"string\", \"enum\": [ \"crucible_pantry\" ] } } }, { \"type\": \"object\", \"required\": [ \"dataset\", \"dns_address\", \"http_address\", \"nic\", \"type\" ], \"properties\": { \"dataset\": { \"$ref\": \"#/components/schemas/OmicronZoneDataset\" }, \"dns_address\": { \"description\": \"The address at which the external DNS server is reachable.\", \"type\": \"string\" }, \"http_address\": { \"description\": \"The address at which the external DNS server API is reachable.\", \"type\": \"string\" }, \"nic\": { \"description\": \"The service vNIC providing external connectivity using OPTE.\", \"allOf\": [ { \"$ref\": \"#/components/schemas/NetworkInterface\" } ] }, \"type\": { \"type\": \"string\", \"enum\": [ \"external_dns\" ] } } }, { \"type\": \"object\", \"required\": [ \"dataset\", \"dns_address\", \"gz_address\", \"gz_address_index\", \"http_address\", \"type\" ], \"properties\": { \"dataset\": { \"$ref\": \"#/components/schemas/OmicronZoneDataset\" }, \"dns_address\": { \"type\": \"string\" }, \"gz_address\": { \"description\": \"The addresses in the global zone which should be created\\n\\nFor the DNS service, which exists outside the sleds's typical subnet - adding an address in the GZ is necessary to allow inter-zone traffic routing.\", \"type\": \"string\", \"format\": \"ipv6\" }, \"gz_address_index\": { \"description\": \"The address is also identified with an auxiliary bit of information to ensure that the created global zone address can have a unique name.\", \"type\": \"integer\", \"format\": \"uint32\", \"minimum\": 0.0 }, \"http_address\": { \"type\": \"string\" }, \"type\": { \"type\": \"string\", \"enum\": [ \"internal_dns\" ] } } }, { \"type\": \"object\", \"required\": [ \"address\", \"dns_servers\", \"ntp_servers\", \"type\" ], \"properties\": { \"address\": { \"type\": \"string\" }, \"dns_servers\": { \"type\": \"array\", \"items\": { \"type\": \"string\", \"format\": \"ip\" } }, \"domain\": { \"type\": [ \"string\", \"null\" ] }, \"ntp_servers\": { \"type\": \"array\", \"items\": { \"type\": \"string\" } }, \"type\": { \"type\": \"string\", \"enum\": [ \"internal_ntp\" ] } } }, { \"type\": \"object\", \"required\": [ \"external_dns_servers\", \"external_ip\", \"external_tls\", \"internal_address\", \"nic\", \"type\" ], \"properties\": { \"external_dns_servers\": { \"description\": \"External DNS servers Nexus can use to resolve external hosts.\", \"type\": \"array\", \"items\": { \"type\": \"string\", \"format\": \"ip\" } }, \"external_ip\": { \"description\": \"The address at which the external nexus server is reachable.\", \"type\": \"string\", \"format\": \"ip\" }, \"external_tls\": { \"description\": \"Whether Nexus's external endpoint should use TLS\", \"type\": \"boolean\" }, \"internal_address\": { \"description\": \"The address at which the internal nexus server is reachable.\", \"type\": \"string\" }, \"nic\": { \"description\": \"The service vNIC providing external connectivity using OPTE.\", \"allOf\": [ { \"$ref\": \"#/components/schemas/NetworkInterface\" } ] }, \"type\": { \"type\": \"string\", \"enum\": [ \"nexus\" ] } } }, { \"type\": \"object\", \"required\": [ \"address\", \"type\" ], \"properties\": { \"address\": { \"type\": \"string\" }, \"type\": { \"type\": \"string\", \"enum\": [ \"oximeter\" ] } } } ] } ```
", - "oneOf": [ - { - "type": "object", - "properties": { - "address": { - "type": "string" - }, - "dns_servers": { - "type": "array", - "items": { - "type": "string", - "format": "ip" - } - }, - "domain": { - "nullable": true, - "type": "string" - }, - "nic": { - "description": "The service vNIC providing outbound connectivity using OPTE.", - "allOf": [ - { - "$ref": "#/components/schemas/NetworkInterface" - } - ] - }, - "ntp_servers": { - "type": "array", - "items": { - "type": "string" - } - }, - "snat_cfg": { - "description": "The SNAT configuration for outbound connections.", - "allOf": [ - { - "$ref": "#/components/schemas/SourceNatConfig" - } - ] - }, - "type": { - "type": "string", - "enum": [ - "boundary_ntp" - ] - } - }, - "required": [ - "address", - "dns_servers", - "nic", - "ntp_servers", - "snat_cfg", - "type" - ] - }, - { - "type": "object", - "properties": { - "address": { - "type": "string" - }, - "dataset": { - "$ref": "#/components/schemas/OmicronZoneDataset" - }, - "type": { - "type": "string", - "enum": [ - "clickhouse" - ] - } - }, - "required": [ - "address", - "dataset", - "type" - ] - }, - { - "type": "object", - "properties": { - "address": { - "type": "string" - }, - "dataset": { - "$ref": "#/components/schemas/OmicronZoneDataset" - }, - "type": { - "type": "string", - "enum": [ - "clickhouse_keeper" - ] - } - }, - "required": [ - "address", - "dataset", - "type" - ] - }, - { - "type": "object", - "properties": { - "address": { - "type": "string" - }, - "dataset": { - "$ref": "#/components/schemas/OmicronZoneDataset" - }, - "type": { - "type": "string", - "enum": [ - "cockroach_db" - ] - } - }, - "required": [ - "address", - "dataset", - "type" - ] - }, - { - "type": "object", - "properties": { - "address": { - "type": "string" - }, - "dataset": { - "$ref": "#/components/schemas/OmicronZoneDataset" - }, - "type": { - "type": "string", - "enum": [ - "crucible" - ] - } - }, - "required": [ - "address", - "dataset", - "type" - ] - }, - { - "type": "object", - "properties": { - "address": { - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "crucible_pantry" - ] - } - }, - "required": [ - "address", - "type" - ] - }, - { - "type": "object", - "properties": { - "dataset": { - "$ref": "#/components/schemas/OmicronZoneDataset" - }, - "dns_address": { - "description": "The address at which the external DNS server is reachable.", - "type": "string" - }, - "http_address": { - "description": "The address at which the external DNS server API is reachable.", - "type": "string" - }, - "nic": { - "description": "The service vNIC providing external connectivity using OPTE.", - "allOf": [ - { - "$ref": "#/components/schemas/NetworkInterface" - } - ] - }, - "type": { - "type": "string", - "enum": [ - "external_dns" - ] - } - }, - "required": [ - "dataset", - "dns_address", - "http_address", - "nic", - "type" - ] - }, - { - "type": "object", - "properties": { - "dataset": { - "$ref": "#/components/schemas/OmicronZoneDataset" - }, - "dns_address": { - "type": "string" - }, - "gz_address": { - "description": "The addresses in the global zone which should be created\n\nFor the DNS service, which exists outside the sleds's typical subnet - adding an address in the GZ is necessary to allow inter-zone traffic routing.", - "type": "string", - "format": "ipv6" - }, - "gz_address_index": { - "description": "The address is also identified with an auxiliary bit of information to ensure that the created global zone address can have a unique name.", - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "http_address": { - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "internal_dns" - ] - } - }, - "required": [ - "dataset", - "dns_address", - "gz_address", - "gz_address_index", - "http_address", - "type" - ] - }, - { - "type": "object", - "properties": { - "address": { - "type": "string" - }, - "dns_servers": { - "type": "array", - "items": { - "type": "string", - "format": "ip" - } - }, - "domain": { - "nullable": true, - "type": "string" - }, - "ntp_servers": { - "type": "array", - "items": { - "type": "string" - } - }, - "type": { - "type": "string", - "enum": [ - "internal_ntp" - ] - } - }, - "required": [ - "address", - "dns_servers", - "ntp_servers", - "type" - ] - }, - { - "type": "object", - "properties": { - "external_dns_servers": { - "description": "External DNS servers Nexus can use to resolve external hosts.", - "type": "array", - "items": { - "type": "string", - "format": "ip" - } - }, - "external_ip": { - "description": "The address at which the external nexus server is reachable.", - "type": "string", - "format": "ip" - }, - "external_tls": { - "description": "Whether Nexus's external endpoint should use TLS", - "type": "boolean" - }, - "internal_address": { - "description": "The address at which the internal nexus server is reachable.", - "type": "string" - }, - "nic": { - "description": "The service vNIC providing external connectivity using OPTE.", - "allOf": [ - { - "$ref": "#/components/schemas/NetworkInterface" - } - ] - }, - "type": { - "type": "string", - "enum": [ - "nexus" - ] - } - }, - "required": [ - "external_dns_servers", - "external_ip", - "external_tls", - "internal_address", - "nic", + "id", "type" ] + } + ] + }, + "NewPasswordHash": { + "title": "A password hash in PHC string format", + "description": "Password hashes must be in PHC (Password Hashing Competition) string format. Passwords must be hashed with Argon2id. Password hashes may be rejected if the parameters appear not to be secure enough.", + "type": "string" + }, + "NodeName": { + "description": "Unique name for a saga [`Node`]\n\nEach node requires a string name that's unique within its DAG. The name is used to identify its output. Nodes that depend on a given node (either directly or indirectly) can access the node's output using its name.", + "type": "string" + }, + "OmicronPhysicalDiskConfig": { + "description": "OmicronPhysicalDiskConfig\n\n
JSON schema\n\n```json { \"type\": \"object\", \"required\": [ \"id\", \"identity\", \"pool_id\" ], \"properties\": { \"id\": { \"type\": \"string\", \"format\": \"uuid\" }, \"identity\": { \"$ref\": \"#/components/schemas/DiskIdentity\" }, \"pool_id\": { \"$ref\": \"#/components/schemas/TypedUuidForZpoolKind\" } } } ```
", + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uuid" }, - { - "type": "object", - "properties": { - "address": { - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "oximeter" - ] + "identity": { + "$ref": "#/components/schemas/DiskIdentity" + }, + "pool_id": { + "$ref": "#/components/schemas/TypedUuidForZpoolKind" + } + }, + "required": [ + "id", + "identity", + "pool_id" + ] + }, + "OmicronPhysicalDisksConfig": { + "description": "OmicronPhysicalDisksConfig\n\n
JSON schema\n\n```json { \"type\": \"object\", \"required\": [ \"disks\", \"generation\" ], \"properties\": { \"disks\": { \"type\": \"array\", \"items\": { \"$ref\": \"#/components/schemas/OmicronPhysicalDiskConfig\" } }, \"generation\": { \"description\": \"generation number of this configuration\\n\\nThis generation number is owned by the control plane (i.e., RSS or Nexus, depending on whether RSS-to-Nexus handoff has happened). It should not be bumped within Sled Agent.\\n\\nSled Agent rejects attempts to set the configuration to a generation older than the one it's currently running.\", \"allOf\": [ { \"$ref\": \"#/components/schemas/Generation\" } ] } } } ```
", + "type": "object", + "properties": { + "disks": { + "type": "array", + "items": { + "$ref": "#/components/schemas/OmicronPhysicalDiskConfig" + } + }, + "generation": { + "description": "generation number of this configuration\n\nThis generation number is owned by the control plane (i.e., RSS or Nexus, depending on whether RSS-to-Nexus handoff has happened). It should not be bumped within Sled Agent.\n\nSled Agent rejects attempts to set the configuration to a generation older than the one it's currently running.", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" } - }, - "required": [ - "address", - "type" ] } + }, + "required": [ + "disks", + "generation" ] }, - "OximeterInfo": { - "description": "Message used to notify Nexus that this oximeter instance is up and running.", + "OmicronZoneDataset": { + "description": "Describes a persistent ZFS dataset associated with an Omicron zone\n\n
JSON schema\n\n```json { \"description\": \"Describes a persistent ZFS dataset associated with an Omicron zone\", \"type\": \"object\", \"required\": [ \"pool_name\" ], \"properties\": { \"pool_name\": { \"$ref\": \"#/components/schemas/ZpoolName\" } } } ```
", "type": "object", "properties": { - "address": { - "description": "The address on which this oximeter instance listens for requests", + "pool_name": { + "$ref": "#/components/schemas/ZpoolName" + } + }, + "required": [ + "pool_name" + ] + }, + "OmicronZoneExternalFloatingAddr": { + "description": "Floating external address with port allocated to an Omicron-managed zone.", + "type": "object", + "properties": { + "addr": { "type": "string" }, - "collector_id": { - "description": "The ID for this oximeter instance.", + "id": { + "$ref": "#/components/schemas/TypedUuidForExternalIpKind" + } + }, + "required": [ + "addr", + "id" + ] + }, + "OmicronZoneExternalFloatingIp": { + "description": "Floating external IP allocated to an Omicron-managed zone.\n\nThis is a slimmer `nexus_db_model::ExternalIp` that only stores the fields necessary for blueprint planning, and requires that the zone have a single IP.", + "type": "object", + "properties": { + "id": { + "$ref": "#/components/schemas/TypedUuidForExternalIpKind" + }, + "ip": { "type": "string", - "format": "uuid" + "format": "ip" } }, "required": [ - "address", - "collector_id" + "id", + "ip" ] }, - "PhysicalDiskDeleteRequest": { + "OmicronZoneExternalSnatIp": { + "description": "SNAT (outbound) external IP allocated to an Omicron-managed zone.\n\nThis is a slimmer `nexus_db_model::ExternalIp` that only stores the fields necessary for blueprint planning, and requires that the zone have a single IP.", "type": "object", "properties": { - "model": { - "type": "string" + "id": { + "$ref": "#/components/schemas/TypedUuidForExternalIpKind" }, - "serial": { + "snat_cfg": { + "$ref": "#/components/schemas/SourceNatConfig" + } + }, + "required": [ + "id", + "snat_cfg" + ] + }, + "OximeterInfo": { + "description": "Message used to notify Nexus that this oximeter instance is up and running.", + "type": "object", + "properties": { + "address": { + "description": "The address on which this oximeter instance listens for requests", "type": "string" }, - "sled_id": { + "collector_id": { + "description": "The ID for this oximeter instance.", "type": "string", "format": "uuid" - }, - "vendor": { - "type": "string" } }, "required": [ - "model", - "serial", - "sled_id", - "vendor" + "address", + "collector_id" ] }, "PhysicalDiskKind": { @@ -5949,6 +3769,10 @@ "PhysicalDiskPutRequest": { "type": "object", "properties": { + "id": { + "type": "string", + "format": "uuid" + }, "model": { "type": "string" }, @@ -5967,6 +3791,7 @@ } }, "required": [ + "id", "model", "serial", "sled_id", @@ -5974,9 +3799,6 @@ "vendor" ] }, - "PhysicalDiskPutResponse": { - "type": "object" - }, "PortConfigV1": { "type": "object", "properties": { @@ -5984,7 +3806,7 @@ "description": "This port's addresses.", "type": "array", "items": { - "$ref": "#/components/schemas/IpNetwork" + "$ref": "#/components/schemas/IpNet" } }, "autoneg": { @@ -6137,10 +3959,6 @@ "description": "The IP address and port at which `oximeter` can collect metrics from the producer.", "type": "string" }, - "base_route": { - "description": "The API base route from which `oximeter` can collect metrics.\n\nThe full route is `{base_route}/{id}`.", - "type": "string" - }, "id": { "description": "A unique ID for this producer.", "type": "string", @@ -6165,12 +3983,32 @@ }, "required": [ "address", - "base_route", "id", "interval", "kind" ] }, + "ProducerEndpointResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/ProducerEndpoint" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, "ProducerKind": { "description": "The kind of metric producer this is.", "oneOf": [ @@ -6178,160 +4016,53 @@ "description": "The producer is a sled-agent.", "type": "string", "enum": [ - "sled_agent" - ] - }, - { - "description": "The producer is an Omicron-managed service.", - "type": "string", - "enum": [ - "service" - ] - }, - { - "description": "The producer is a Propolis VMM managing a guest instance.", - "type": "string", - "enum": [ - "instance" - ] - } - ] - }, - "ProducerResultsItem": { - "oneOf": [ - { - "type": "object", - "properties": { - "info": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Sample" - } - }, - "status": { - "type": "string", - "enum": [ - "ok" - ] - } - }, - "required": [ - "info", - "status" - ] - }, - { - "type": "object", - "properties": { - "info": { - "$ref": "#/components/schemas/MetricsError" - }, - "status": { - "type": "string", - "enum": [ - "err" - ] - } - }, - "required": [ - "info", - "status" - ] - } - ] - }, - "QuantizationError": { - "description": "Errors occurring during quantizated bin generation.", - "oneOf": [ - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "overflow" - ] - } - }, - "required": [ - "type" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "precision" - ] - } - }, - "required": [ - "type" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "invalid_base" - ] - } - }, - "required": [ - "type" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "invalid_steps" - ] - } - }, - "required": [ - "type" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "uneven_steps_for_base" - ] - } - }, - "required": [ - "type" + "sled_agent" ] }, { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "powers_out_of_order" - ] + "description": "The producer is an Omicron-managed service.", + "type": "string", + "enum": [ + "service" + ] + }, + { + "description": "The producer is a Propolis VMM managing a guest instance.", + "type": "string", + "enum": [ + "instance" + ] + } + ] + }, + "ProducerRegistrationResponse": { + "description": "Response to a successful producer registration.", + "type": "object", + "properties": { + "lease_duration": { + "description": "Period within which producers must renew their lease.\n\nProducers are required to periodically re-register with Nexus, to ensure that they are still collected from by `oximeter`.", + "allOf": [ + { + "$ref": "#/components/schemas/Duration" } - }, - "required": [ - "type" ] } + }, + "required": [ + "lease_duration" ] }, "RackInitializationRequest": { "type": "object", "properties": { + "allowed_source_ips": { + "description": "IPs or subnets allowed to make requests to user-facing services", + "allOf": [ + { + "$ref": "#/components/schemas/AllowedSourceIps" + } + ] + }, "blueprint": { "description": "Blueprint describing services initialized by RSS.", "allOf": [ @@ -6381,6 +4112,13 @@ "$ref": "#/components/schemas/IpRange" } }, + "physical_disks": { + "description": "\"Managed\" physical disks owned by the control plane", + "type": "array", + "items": { + "$ref": "#/components/schemas/PhysicalDiskPutRequest" + } + }, "rack_network_config": { "description": "Initial rack network configuration", "allOf": [ @@ -6397,15 +4135,16 @@ } ] }, - "services": { - "description": "Services on the rack which have been created by RSS.", + "zpools": { + "description": "Zpools created within the physical disks created by the control plane.", "type": "array", "items": { - "$ref": "#/components/schemas/ServicePutRequest" + "$ref": "#/components/schemas/ZpoolPutRequest" } } }, "required": [ + "allowed_source_ips", "blueprint", "certs", "datasets", @@ -6413,9 +4152,10 @@ "external_port_count", "internal_dns_zone_config", "internal_services_ip_pool_ranges", + "physical_disks", "rack_network_config", "recovery_silo", - "services" + "zpools" ] }, "RackNetworkConfigV1": { @@ -6455,7 +4195,7 @@ } }, "rack_subnet": { - "$ref": "#/components/schemas/Ipv6Network" + "$ref": "#/components/schemas/Ipv6Net" } }, "required": [ @@ -6482,679 +4222,336 @@ "required": [ "silo_name", "user_name", - "user_password_hash" - ] - }, - "RepairFinishInfo": { - "type": "object", - "properties": { - "aborted": { - "type": "boolean" - }, - "repair_id": { - "$ref": "#/components/schemas/TypedUuidForUpstairsRepairKind" - }, - "repair_type": { - "$ref": "#/components/schemas/UpstairsRepairType" - }, - "repairs": { - "type": "array", - "items": { - "$ref": "#/components/schemas/DownstairsUnderRepair" - } - }, - "session_id": { - "$ref": "#/components/schemas/TypedUuidForUpstairsSessionKind" - }, - "time": { - "type": "string", - "format": "date-time" - } - }, - "required": [ - "aborted", - "repair_id", - "repair_type", - "repairs", - "session_id", - "time" - ] - }, - "RepairProgress": { - "type": "object", - "properties": { - "current_item": { - "type": "integer", - "format": "int64" - }, - "time": { - "type": "string", - "format": "date-time" - }, - "total_items": { - "type": "integer", - "format": "int64" - } - }, - "required": [ - "current_item", - "time", - "total_items" - ] - }, - "RepairStartInfo": { - "type": "object", - "properties": { - "repair_id": { - "$ref": "#/components/schemas/TypedUuidForUpstairsRepairKind" - }, - "repair_type": { - "$ref": "#/components/schemas/UpstairsRepairType" - }, - "repairs": { - "type": "array", - "items": { - "$ref": "#/components/schemas/DownstairsUnderRepair" - } - }, - "session_id": { - "$ref": "#/components/schemas/TypedUuidForUpstairsSessionKind" - }, - "time": { - "type": "string", - "format": "date-time" - } - }, - "required": [ - "repair_id", - "repair_type", - "repairs", - "session_id", - "time" - ] - }, - "RouteConfig": { - "type": "object", - "properties": { - "destination": { - "description": "The destination of the route.", - "allOf": [ - { - "$ref": "#/components/schemas/IpNetwork" - } - ] - }, - "nexthop": { - "description": "The nexthop/gateway address.", - "type": "string", - "format": "ip" - } - }, - "required": [ - "destination", - "nexthop" - ] - }, - "Saga": { - "description": "Sagas\n\nThese are currently only intended for observability by developers. We will eventually want to flesh this out into something more observable for end users.", - "type": "object", - "properties": { - "id": { - "type": "string", - "format": "uuid" - }, - "state": { - "$ref": "#/components/schemas/SagaState" - } - }, - "required": [ - "id", - "state" - ] - }, - "SagaErrorInfo": { - "oneOf": [ - { - "type": "object", - "properties": { - "error": { - "type": "string", - "enum": [ - "action_failed" - ] - }, - "source_error": {} - }, - "required": [ - "error", - "source_error" - ] - }, - { - "type": "object", - "properties": { - "error": { - "type": "string", - "enum": [ - "deserialize_failed" - ] - }, - "message": { - "type": "string" - } - }, - "required": [ - "error", - "message" - ] - }, - { - "type": "object", - "properties": { - "error": { - "type": "string", - "enum": [ - "injected_error" - ] - } - }, - "required": [ - "error" - ] - }, - { - "type": "object", - "properties": { - "error": { - "type": "string", - "enum": [ - "serialize_failed" - ] - }, - "message": { - "type": "string" - } - }, - "required": [ - "error", - "message" - ] - }, - { - "type": "object", - "properties": { - "error": { - "type": "string", - "enum": [ - "subsaga_create_failed" - ] - }, - "message": { - "type": "string" - } - }, - "required": [ - "error", - "message" - ] - } + "user_password_hash" ] }, - "SagaResultsPage": { - "description": "A single page of results", + "RepairFinishInfo": { "type": "object", "properties": { - "items": { - "description": "list of items on this page of results", + "aborted": { + "type": "boolean" + }, + "repair_id": { + "$ref": "#/components/schemas/TypedUuidForUpstairsRepairKind" + }, + "repair_type": { + "$ref": "#/components/schemas/UpstairsRepairType" + }, + "repairs": { "type": "array", "items": { - "$ref": "#/components/schemas/Saga" + "$ref": "#/components/schemas/DownstairsUnderRepair" } }, - "next_page": { - "nullable": true, - "description": "token used to fetch the next page of results (if any)", - "type": "string" + "session_id": { + "$ref": "#/components/schemas/TypedUuidForUpstairsSessionKind" + }, + "time": { + "type": "string", + "format": "date-time" } }, "required": [ - "items" + "aborted", + "repair_id", + "repair_type", + "repairs", + "session_id", + "time" ] }, - "SagaState": { - "oneOf": [ - { - "description": "Saga is currently executing", - "type": "object", - "properties": { - "state": { - "type": "string", - "enum": [ - "running" - ] - } - }, - "required": [ - "state" - ] - }, - { - "description": "Saga completed successfully", - "type": "object", - "properties": { - "state": { - "type": "string", - "enum": [ - "succeeded" - ] - } - }, - "required": [ - "state" - ] + "RepairProgress": { + "type": "object", + "properties": { + "current_item": { + "type": "integer", + "format": "int64" }, - { - "description": "One or more saga actions failed and the saga was successfully unwound (i.e., undo actions were executed for any actions that were completed). The saga is no longer running.", - "type": "object", - "properties": { - "error_info": { - "$ref": "#/components/schemas/SagaErrorInfo" - }, - "error_node_name": { - "$ref": "#/components/schemas/NodeName" - }, - "state": { - "type": "string", - "enum": [ - "failed" - ] - } - }, - "required": [ - "error_info", - "error_node_name", - "state" - ] + "time": { + "type": "string", + "format": "date-time" }, - { - "description": "One or more saga actions failed, *and* one or more undo actions failed during unwinding. State managed by the saga may now be inconsistent. Support may be required to repair the state. The saga is no longer running.", - "type": "object", - "properties": { - "error_info": { - "$ref": "#/components/schemas/SagaErrorInfo" - }, - "error_node_name": { - "$ref": "#/components/schemas/NodeName" - }, - "state": { - "type": "string", - "enum": [ - "stuck" - ] - }, - "undo_error_node_name": { - "$ref": "#/components/schemas/NodeName" - }, - "undo_source_error": {} - }, - "required": [ - "error_info", - "error_node_name", - "state", - "undo_error_node_name", - "undo_source_error" - ] + "total_items": { + "type": "integer", + "format": "int64" } + }, + "required": [ + "current_item", + "time", + "total_items" ] }, - "Sample": { - "description": "A concrete type representing a single, timestamped measurement from a timeseries.", + "RepairStartInfo": { "type": "object", "properties": { - "measurement": { - "description": "The measured value of the metric at this sample", - "allOf": [ - { - "$ref": "#/components/schemas/Measurement" - } - ] + "repair_id": { + "$ref": "#/components/schemas/TypedUuidForUpstairsRepairKind" }, - "metric": { - "$ref": "#/components/schemas/FieldSet" + "repair_type": { + "$ref": "#/components/schemas/UpstairsRepairType" }, - "target": { - "$ref": "#/components/schemas/FieldSet" + "repairs": { + "type": "array", + "items": { + "$ref": "#/components/schemas/DownstairsUnderRepair" + } }, - "timeseries_name": { - "description": "The name of the timeseries this sample belongs to", - "type": "string" + "session_id": { + "$ref": "#/components/schemas/TypedUuidForUpstairsSessionKind" + }, + "time": { + "type": "string", + "format": "date-time" } }, "required": [ - "measurement", - "metric", - "target", - "timeseries_name" + "repair_id", + "repair_type", + "repairs", + "session_id", + "time" ] }, - "ServiceKind": { - "description": "Describes the purpose of the service.", - "oneOf": [ - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "clickhouse" - ] - } - }, - "required": [ - "type" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "clickhouse_keeper" - ] - } - }, - "required": [ - "type" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "cockroach" - ] + "RouteConfig": { + "type": "object", + "properties": { + "destination": { + "description": "The destination of the route.", + "allOf": [ + { + "$ref": "#/components/schemas/IpNet" } - }, - "required": [ - "type" ] }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "crucible" - ] - } - }, - "required": [ - "type" - ] + "nexthop": { + "description": "The nexthop/gateway address.", + "type": "string", + "format": "ip" }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "crucible_pantry" - ] - } - }, - "required": [ - "type" - ] + "vlan_id": { + "nullable": true, + "description": "The VLAN id associated with this route.", + "type": "integer", + "format": "uint16", + "minimum": 0 + } + }, + "required": [ + "destination", + "nexthop" + ] + }, + "Saga": { + "description": "Sagas\n\nThese are currently only intended for observability by developers. We will eventually want to flesh this out into something more observable for end users.", + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uuid" }, + "state": { + "$ref": "#/components/schemas/SagaState" + } + }, + "required": [ + "id", + "state" + ] + }, + "SagaErrorInfo": { + "oneOf": [ { "type": "object", "properties": { - "content": { - "type": "object", - "properties": { - "external_address": { - "type": "string", - "format": "ip" - }, - "nic": { - "$ref": "#/components/schemas/ServiceNic" - } - }, - "required": [ - "external_address", - "nic" - ] - }, - "type": { + "error": { "type": "string", "enum": [ - "external_dns" + "action_failed" ] - } + }, + "source_error": {} }, "required": [ - "content", - "type" + "error", + "source_error" ] }, { "type": "object", "properties": { - "type": { + "error": { "type": "string", "enum": [ - "internal_dns" + "deserialize_failed" ] + }, + "message": { + "type": "string" } }, "required": [ - "type" + "error", + "message" ] }, { "type": "object", "properties": { - "content": { - "type": "object", - "properties": { - "external_address": { - "type": "string", - "format": "ip" - }, - "nic": { - "$ref": "#/components/schemas/ServiceNic" - } - }, - "required": [ - "external_address", - "nic" - ] - }, - "type": { + "error": { "type": "string", "enum": [ - "nexus" + "injected_error" ] } }, "required": [ - "content", - "type" + "error" ] }, { "type": "object", "properties": { - "type": { + "error": { "type": "string", "enum": [ - "oximeter" + "serialize_failed" ] + }, + "message": { + "type": "string" } }, "required": [ - "type" + "error", + "message" ] }, { "type": "object", "properties": { - "type": { + "error": { "type": "string", "enum": [ - "dendrite" + "subsaga_create_failed" ] + }, + "message": { + "type": "string" } }, "required": [ - "type" + "error", + "message" ] + } + ] + }, + "SagaResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/Saga" + } }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "SagaState": { + "oneOf": [ { + "description": "Saga is currently executing", "type": "object", "properties": { - "type": { + "state": { "type": "string", "enum": [ - "tfport" + "running" ] } }, "required": [ - "type" + "state" ] }, { + "description": "Saga completed successfully", "type": "object", "properties": { - "content": { - "type": "object", - "properties": { - "nic": { - "$ref": "#/components/schemas/ServiceNic" - }, - "snat": { - "$ref": "#/components/schemas/SourceNatConfig" - } - }, - "required": [ - "nic", - "snat" - ] - }, - "type": { + "state": { "type": "string", "enum": [ - "boundary_ntp" + "succeeded" ] } }, "required": [ - "content", - "type" + "state" ] }, { + "description": "One or more saga actions failed and the saga was successfully unwound (i.e., undo actions were executed for any actions that were completed). The saga is no longer running.", "type": "object", "properties": { - "type": { + "error_info": { + "$ref": "#/components/schemas/SagaErrorInfo" + }, + "error_node_name": { + "$ref": "#/components/schemas/NodeName" + }, + "state": { "type": "string", "enum": [ - "internal_ntp" + "failed" ] } }, "required": [ - "type" + "error_info", + "error_node_name", + "state" ] }, { + "description": "One or more saga actions failed, *and* one or more undo actions failed during unwinding. State managed by the saga may now be inconsistent. Support may be required to repair the state. The saga is no longer running.", "type": "object", "properties": { - "type": { + "error_info": { + "$ref": "#/components/schemas/SagaErrorInfo" + }, + "error_node_name": { + "$ref": "#/components/schemas/NodeName" + }, + "state": { "type": "string", "enum": [ - "mgd" + "stuck" ] - } + }, + "undo_error_node_name": { + "$ref": "#/components/schemas/NodeName" + }, + "undo_source_error": {} }, "required": [ - "type" - ] - } - ] - }, - "ServiceNic": { - "description": "Describes the RSS allocated values for a service vnic", - "type": "object", - "properties": { - "id": { - "type": "string", - "format": "uuid" - }, - "ip": { - "type": "string", - "format": "ip" - }, - "mac": { - "$ref": "#/components/schemas/MacAddr" - }, - "name": { - "$ref": "#/components/schemas/Name" - }, - "slot": { - "type": "integer", - "format": "uint8", - "minimum": 0 - } - }, - "required": [ - "id", - "ip", - "mac", - "name", - "slot" - ] - }, - "ServicePutRequest": { - "description": "Describes a service on a sled", - "type": "object", - "properties": { - "address": { - "description": "Address on which a service is responding to requests.", - "type": "string" - }, - "kind": { - "description": "Type of service being inserted.", - "allOf": [ - { - "$ref": "#/components/schemas/ServiceKind" - } + "error_info", + "error_node_name", + "state", + "undo_error_node_name", + "undo_source_error" ] - }, - "service_id": { - "type": "string", - "format": "uuid" - }, - "sled_id": { - "type": "string", - "format": "uuid" - }, - "zone_id": { - "nullable": true, - "type": "string", - "format": "uuid" } - }, - "required": [ - "address", - "kind", - "service_id", - "sled_id" ] }, "SledAgentInfo": { @@ -7227,6 +4624,17 @@ "usable_physical_ram" ] }, + "SledId": { + "type": "object", + "properties": { + "id": { + "$ref": "#/components/schemas/TypedUuidForSledKind" + } + }, + "required": [ + "id" + ] + }, "SledInstanceState": { "description": "A wrapper type containing a sled's total knowledge of the state of a specific VMM and the instance it incarnates.", "type": "object", @@ -7354,6 +4762,25 @@ "sled" ] }, + "SledState": { + "description": "The current state of the sled, as determined by Nexus.", + "oneOf": [ + { + "description": "The sled is currently active, and has resources allocated on it.", + "type": "string", + "enum": [ + "active" + ] + }, + { + "description": "The sled has been permanently removed from service.\n\nThis is a terminal state: once a particular sled ID is decommissioned, it will never return to service. (The actual hardware may be reused, but it will be treated as a brand-new sled.)", + "type": "string", + "enum": [ + "decommissioned" + ] + } + ] + }, "SourceNatConfig": { "description": "An IP address and port range used for source NAT, i.e., making outbound network connections from guests or services.", "type": "object", @@ -7454,6 +4881,18 @@ "type": "string", "format": "uuid" }, + "TypedUuidForExternalIpKind": { + "type": "string", + "format": "uuid" + }, + "TypedUuidForOmicronZoneKind": { + "type": "string", + "format": "uuid" + }, + "TypedUuidForSledKind": { + "type": "string", + "format": "uuid" + }, "TypedUuidForUpstairsRepairKind": { "type": "string", "format": "uuid" @@ -7462,6 +4901,10 @@ "type": "string", "format": "uuid" }, + "TypedUuidForZpoolKind": { + "type": "string", + "format": "uuid" + }, "UninitializedSled": { "description": "A sled that has not been added to an initialized rack yet", "type": "object", @@ -7576,41 +5019,34 @@ "minimum": 0 }, "ZpoolName": { - "description": "Zpool names are of the format ox{i,p}_. They are either Internal or External, and should be unique\n\n
JSON schema\n\n```json { \"title\": \"The name of a Zpool\", \"description\": \"Zpool names are of the format ox{i,p}_. They are either Internal or External, and should be unique\", \"type\": \"string\", \"pattern\": \"^ox[ip]_[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$\" } ```
", - "type": "string" + "title": "The name of a Zpool", + "description": "Zpool names are of the format ox{i,p}_. They are either Internal or External, and should be unique", + "type": "string", + "pattern": "^ox[ip]_[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" }, "ZpoolPutRequest": { - "description": "Sent by a sled agent on startup to Nexus to request further instruction", + "description": "Identifies information about a Zpool that should be part of the control plane.", "type": "object", "properties": { - "disk_model": { - "type": "string" - }, - "disk_serial": { - "type": "string" + "id": { + "type": "string", + "format": "uuid" }, - "disk_vendor": { - "type": "string" + "physical_disk_id": { + "type": "string", + "format": "uuid" }, - "size": { - "description": "Total size of the pool.", - "allOf": [ - { - "$ref": "#/components/schemas/ByteCount" - } - ] + "sled_id": { + "type": "string", + "format": "uuid" } }, "required": [ - "disk_model", - "disk_serial", - "disk_vendor", - "size" + "id", + "physical_disk_id", + "sled_id" ] }, - "ZpoolPutResponse": { - "type": "object" - }, "SemverVersion": { "type": "string", "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$" @@ -7649,4 +5085,4 @@ } } } -} \ No newline at end of file +} diff --git a/openapi/nexus.json b/openapi/nexus.json index 7d236de7a3..a0789aecde 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -7,7 +7,7 @@ "url": "https://oxide.computer", "email": "api@oxide.computer" }, - "version": "20240327.0" + "version": "20240502.0" }, "paths": { "/device/auth": { @@ -3089,7 +3089,8 @@ "tags": [ "metrics" ], - "summary": "Access metrics data", + "summary": "View metrics", + "description": "View CPU, memory, or storage utilization metrics at the silo or project level.", "operationId": "silo_metric", "parameters": [ { @@ -4113,6 +4114,45 @@ } } }, + "/v1/system/hardware/disks/{disk_id}": { + "get": { + "tags": [ + "system/hardware" + ], + "summary": "Get a physical disk", + "operationId": "physical_disk_view", + "parameters": [ + { + "in": "path", + "name": "disk_id", + "description": "ID of the physical disk", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PhysicalDisk" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, "/v1/system/hardware/racks": { "get": { "tags": [ @@ -4286,8 +4326,15 @@ "required": true }, "responses": { - "204": { - "description": "resource updated" + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SledId" + } + } + } }, "4XX": { "$ref": "#/components/responses/Error" @@ -4753,6 +4800,63 @@ } } }, + "/v1/system/hardware/switch-port/{port}/status": { + "get": { + "tags": [ + "system/hardware" + ], + "summary": "Get switch port status", + "operationId": "networking_switch_port_status", + "parameters": [ + { + "in": "path", + "name": "port", + "description": "A name to use when selecting switch ports.", + "required": true, + "schema": { + "$ref": "#/components/schemas/Name" + } + }, + { + "in": "query", + "name": "rack_id", + "description": "A rack id to use when selecting switch ports.", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + }, + { + "in": "query", + "name": "switch_location", + "description": "A switch location to use when selecting switch ports.", + "required": true, + "schema": { + "$ref": "#/components/schemas/Name" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SwitchLinkState" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, "/v1/system/hardware/switches": { "get": { "tags": [ @@ -5915,7 +6019,8 @@ "tags": [ "system/metrics" ], - "summary": "Access metrics data", + "summary": "View metrics", + "description": "View CPU, memory, or storage utilization metrics at the fleet or silo level.", "operationId": "system_metric", "parameters": [ { @@ -6200,6 +6305,68 @@ } } }, + "/v1/system/networking/allow-list": { + "get": { + "tags": [ + "system/networking" + ], + "summary": "Get user-facing services IP allowlist", + "operationId": "networking_allow_list_view", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AllowList" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "tags": [ + "system/networking" + ], + "summary": "Update user-facing services IP allowlist", + "operationId": "networking_allow_list_update", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AllowListUpdate" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AllowList" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, "/v1/system/networking/bfd-disable": { "post": { "tags": [ @@ -7890,6 +8057,100 @@ } } }, + "/v1/timeseries/query": { + "post": { + "tags": [ + "metrics" + ], + "summary": "Run timeseries query", + "description": "Queries are written in OxQL.", + "operationId": "timeseries_query", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TimeseriesQuery" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "title": "Array_of_Table", + "type": "array", + "items": { + "$ref": "#/components/schemas/Table" + } + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/timeseries/schema": { + "get": { + "tags": [ + "metrics" + ], + "summary": "List timeseries schemas", + "operationId": "timeseries_schema_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TimeseriesSchemaResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, "/v1/users": { "get": { "tags": [ @@ -8988,26 +9249,114 @@ "switch_histories" ] }, - "Baseboard": { - "description": "Properties that uniquely identify an Oxide hardware component", + "AllowList": { + "description": "Allowlist of IPs or subnets that can make requests to user-facing services.", "type": "object", "properties": { - "part": { - "type": "string" + "allowed_ips": { + "description": "The allowlist of IPs or subnets.", + "allOf": [ + { + "$ref": "#/components/schemas/AllowedSourceIps" + } + ] }, - "revision": { - "type": "integer", - "format": "int64" + "time_created": { + "description": "Time the list was created.", + "type": "string", + "format": "date-time" }, - "serial": { - "type": "string" + "time_modified": { + "description": "Time the list was last modified.", + "type": "string", + "format": "date-time" } }, "required": [ - "part", - "revision", - "serial" - ] + "allowed_ips", + "time_created", + "time_modified" + ] + }, + "AllowListUpdate": { + "description": "Parameters for updating allowed source IPs", + "type": "object", + "properties": { + "allowed_ips": { + "description": "The new list of allowed source IPs.", + "allOf": [ + { + "$ref": "#/components/schemas/AllowedSourceIps" + } + ] + } + }, + "required": [ + "allowed_ips" + ] + }, + "AllowedSourceIps": { + "description": "Description of source IPs allowed to reach rack services.", + "oneOf": [ + { + "description": "Allow traffic from any external IP address.", + "type": "object", + "properties": { + "allow": { + "type": "string", + "enum": [ + "any" + ] + } + }, + "required": [ + "allow" + ] + }, + { + "description": "Restrict access to a specific set of source IP addresses or subnets.\n\nAll others are prevented from reaching rack services.", + "type": "object", + "properties": { + "allow": { + "type": "string", + "enum": [ + "list" + ] + }, + "ips": { + "type": "array", + "items": { + "$ref": "#/components/schemas/IpNet" + } + } + }, + "required": [ + "allow", + "ips" + ] + } + ] + }, + "Baseboard": { + "description": "Properties that uniquely identify an Oxide hardware component", + "type": "object", + "properties": { + "part": { + "type": "string" + }, + "revision": { + "type": "integer", + "format": "int64" + }, + "serial": { + "type": "string" + } + }, + "required": [ + "part", + "revision", + "serial" + ] }, "BfdMode": { "description": "BFD connection mode.", @@ -9442,11 +9791,19 @@ "type": "string", "format": "ip" }, - "bgp_announce_set": { - "description": "The set of announcements advertised by the peer.", + "allowed_export": { + "description": "Define export policy for a peer.", "allOf": [ { - "$ref": "#/components/schemas/NameOrId" + "$ref": "#/components/schemas/ImportExportPolicy" + } + ] + }, + "allowed_import": { + "description": "Define import policy for a peer.", + "allOf": [ + { + "$ref": "#/components/schemas/ImportExportPolicy" } ] }, @@ -9458,6 +9815,15 @@ } ] }, + "communities": { + "description": "Include the provided communities in updates sent to the peer.", + "type": "array", + "items": { + "type": "integer", + "format": "uint32", + "minimum": 0 + } + }, "connect_retry": { "description": "How long to to wait between TCP connection retries (seconds).", "type": "integer", @@ -9470,8 +9836,12 @@ "format": "uint32", "minimum": 0 }, + "enforce_first_as": { + "description": "Enforce that the first AS in paths received from this peer is the peer's AS.", + "type": "boolean" + }, "hold_time": { - "description": "How long to hold peer connections between keppalives (seconds).", + "description": "How long to hold peer connections between keepalives (seconds).", "type": "integer", "format": "uint32", "minimum": 0 @@ -9491,14 +9861,57 @@ "type": "integer", "format": "uint32", "minimum": 0 + }, + "local_pref": { + "nullable": true, + "description": "Apply a local preference to routes received from this peer.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "md5_auth_key": { + "nullable": true, + "description": "Use the given key for TCP-MD5 authentication with the peer.", + "type": "string" + }, + "min_ttl": { + "nullable": true, + "description": "Require messages from a peer have a minimum IP time to live field.", + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "multi_exit_discriminator": { + "nullable": true, + "description": "Apply the provided multi-exit discriminator (MED) updates sent to the peer.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "remote_asn": { + "nullable": true, + "description": "Require that a peer has a specified ASN.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "vlan_id": { + "nullable": true, + "description": "Associate a VLAN ID with a peer.", + "type": "integer", + "format": "uint16", + "minimum": 0 } }, "required": [ "addr", - "bgp_announce_set", + "allowed_export", + "allowed_import", "bgp_config", + "communities", "connect_retry", "delay_open", + "enforce_first_as", "hold_time", "idle_hold_time", "interface_name", @@ -9523,7 +9936,7 @@ "description": "The current state of a BGP peer.", "oneOf": [ { - "description": "Initial state. Refuse all incomming BGP connections. No resources allocated to peer.", + "description": "Initial state. Refuse all incoming BGP connections. No resources allocated to peer.", "type": "string", "enum": [ "idle" @@ -9565,7 +9978,7 @@ ] }, { - "description": "Session established. Able to exchange update, notification and keepliave messages with peers.", + "description": "Session established. Able to exchange update, notification and keepalive messages with peers.", "type": "string", "enum": [ "established" @@ -11878,6 +12291,56 @@ } ] }, + "Distributiondouble": { + "description": "A distribution is a sequence of bins and counts in those bins.", + "type": "object", + "properties": { + "bins": { + "type": "array", + "items": { + "type": "number", + "format": "double" + } + }, + "counts": { + "type": "array", + "items": { + "type": "integer", + "format": "uint64", + "minimum": 0 + } + } + }, + "required": [ + "bins", + "counts" + ] + }, + "Distributionint64": { + "description": "A distribution is a sequence of bins and counts in those bins.", + "type": "object", + "properties": { + "bins": { + "type": "array", + "items": { + "type": "integer", + "format": "int64" + } + }, + "counts": { + "type": "array", + "items": { + "type": "integer", + "format": "uint64", + "minimum": 0 + } + } + }, + "required": [ + "bins", + "counts" + ] + }, "EphemeralIpCreate": { "description": "Parameters for creating an ephemeral IP address for an instance.", "type": "object", @@ -11957,6 +12420,11 @@ "type": "string", "format": "ip" }, + "ip_pool_id": { + "description": "The ID of the IP pool this resource belongs to.", + "type": "string", + "format": "uuid" + }, "kind": { "type": "string", "enum": [ @@ -11991,6 +12459,7 @@ "description", "id", "ip", + "ip_pool_id", "kind", "name", "project_id", @@ -12068,67 +12537,348 @@ "items" ] }, - "FinalizeDisk": { - "description": "Parameters for finalizing a disk", + "FieldSchema": { + "description": "The name and type information for a field of a timeseries schema.", "type": "object", "properties": { - "snapshot_name": { - "nullable": true, - "description": "If specified a snapshot of the disk will be created with the given name during finalization. If not specified, a snapshot for the disk will _not_ be created. A snapshot can be manually created once the disk transitions into the `Detached` state.", - "allOf": [ - { - "$ref": "#/components/schemas/Name" - } - ] + "field_type": { + "$ref": "#/components/schemas/FieldType" + }, + "name": { + "type": "string" + }, + "source": { + "$ref": "#/components/schemas/FieldSource" } - } + }, + "required": [ + "field_type", + "name", + "source" + ] }, - "FleetRole": { + "FieldSource": { + "description": "The source from which a field is derived, the target or metric.", "type": "string", "enum": [ - "admin", - "collaborator", - "viewer" + "target", + "metric" ] }, - "FleetRolePolicy": { - "description": "Policy for a particular resource\n\nNote that the Policy only describes access granted explicitly for this resource. The policies of parent resources can also cause a user to have access to this resource.", - "type": "object", - "properties": { - "role_assignments": { - "description": "Roles directly assigned on this resource", - "type": "array", - "items": { - "$ref": "#/components/schemas/FleetRoleRoleAssignment" - } - } - }, - "required": [ - "role_assignments" + "FieldType": { + "description": "The `FieldType` identifies the data type of a target or metric field.", + "type": "string", + "enum": [ + "string", + "i8", + "u8", + "i16", + "u16", + "i32", + "u32", + "i64", + "u64", + "ip_addr", + "uuid", + "bool" ] }, - "FleetRoleRoleAssignment": { - "description": "Describes the assignment of a particular role on a particular resource to a particular identity (user, group, etc.)\n\nThe resource is not part of this structure. Rather, `RoleAssignment`s are put into a `Policy` and that Policy is applied to a particular resource.", - "type": "object", - "properties": { - "identity_id": { - "type": "string", - "format": "uuid" + "FieldValue": { + "description": "The `FieldValue` contains the value of a target or metric field.", + "oneOf": [ + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "string" + ] + }, + "value": { + "type": "string" + } + }, + "required": [ + "type", + "value" + ] }, - "identity_type": { - "$ref": "#/components/schemas/IdentityType" + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "i8" + ] + }, + "value": { + "type": "integer", + "format": "int8" + } + }, + "required": [ + "type", + "value" + ] }, - "role_name": { - "$ref": "#/components/schemas/FleetRole" - } - }, - "required": [ - "identity_id", - "identity_type", - "role_name" - ] - }, - "FloatingIp": { + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "u8" + ] + }, + "value": { + "type": "integer", + "format": "uint8", + "minimum": 0 + } + }, + "required": [ + "type", + "value" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "i16" + ] + }, + "value": { + "type": "integer", + "format": "int16" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "u16" + ] + }, + "value": { + "type": "integer", + "format": "uint16", + "minimum": 0 + } + }, + "required": [ + "type", + "value" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "i32" + ] + }, + "value": { + "type": "integer", + "format": "int32" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "u32" + ] + }, + "value": { + "type": "integer", + "format": "uint32", + "minimum": 0 + } + }, + "required": [ + "type", + "value" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "i64" + ] + }, + "value": { + "type": "integer", + "format": "int64" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "u64" + ] + }, + "value": { + "type": "integer", + "format": "uint64", + "minimum": 0 + } + }, + "required": [ + "type", + "value" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "ip_addr" + ] + }, + "value": { + "type": "string", + "format": "ip" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "uuid" + ] + }, + "value": { + "type": "string", + "format": "uuid" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "bool" + ] + }, + "value": { + "type": "boolean" + } + }, + "required": [ + "type", + "value" + ] + } + ] + }, + "FinalizeDisk": { + "description": "Parameters for finalizing a disk", + "type": "object", + "properties": { + "snapshot_name": { + "nullable": true, + "description": "If specified a snapshot of the disk will be created with the given name during finalization. If not specified, a snapshot for the disk will _not_ be created. A snapshot can be manually created once the disk transitions into the `Detached` state.", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + } + } + }, + "FleetRole": { + "type": "string", + "enum": [ + "admin", + "collaborator", + "viewer" + ] + }, + "FleetRolePolicy": { + "description": "Policy for a particular resource\n\nNote that the Policy only describes access granted explicitly for this resource. The policies of parent resources can also cause a user to have access to this resource.", + "type": "object", + "properties": { + "role_assignments": { + "description": "Roles directly assigned on this resource", + "type": "array", + "items": { + "$ref": "#/components/schemas/FleetRoleRoleAssignment" + } + } + }, + "required": [ + "role_assignments" + ] + }, + "FleetRoleRoleAssignment": { + "description": "Describes the assignment of a particular role on a particular resource to a particular identity (user, group, etc.)\n\nThe resource is not part of this structure. Rather, `RoleAssignment`s are put into a `Policy` and that Policy is applied to a particular resource.", + "type": "object", + "properties": { + "identity_id": { + "type": "string", + "format": "uuid" + }, + "identity_type": { + "$ref": "#/components/schemas/IdentityType" + }, + "role_name": { + "$ref": "#/components/schemas/FleetRole" + } + }, + "required": [ + "identity_id", + "identity_type", + "role_name" + ] + }, + "FloatingIp": { "description": "A Floating IP is a well-known IP address which can be attached and detached from instances.", "type": "object", "properties": { @@ -12152,6 +12902,11 @@ "type": "string", "format": "ip" }, + "ip_pool_id": { + "description": "The ID of the IP pool this resource belongs to.", + "type": "string", + "format": "uuid" + }, "name": { "description": "unique, mutable, user-controlled identifier for each resource", "allOf": [ @@ -12180,6 +12935,7 @@ "description", "id", "ip", + "ip_pool_id", "name", "project_id", "time_created", @@ -12927,6 +13683,47 @@ "offset" ] }, + "ImportExportPolicy": { + "description": "Define policy relating to the import and export of prefixes from a BGP peer.", + "oneOf": [ + { + "description": "Do not perform any filtering.", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "no_filtering" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "allow" + ] + }, + "value": { + "type": "array", + "items": { + "$ref": "#/components/schemas/IpNet" + } + } + }, + "required": [ + "type", + "value" + ] + } + ] + }, "Instance": { "description": "View of an Instance", "type": "object", @@ -13517,6 +14314,11 @@ ] }, "IpNet": { + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::IpNet", + "version": "0.1.0" + }, "oneOf": [ { "title": "v4", @@ -13798,7 +14600,12 @@ "Ipv4Net": { "example": "192.168.1.0/24", "title": "An IPv4 subnet", - "description": "An IPv4 subnet, including prefix and subnet mask", + "description": "An IPv4 subnet, including prefix and prefix length", + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::Ipv4Net", + "version": "0.1.0" + }, "type": "string", "pattern": "^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|1[0-9]|2[0-9]|3[0-2])$" }, @@ -13845,8 +14652,13 @@ "example": "fd12:3456::/64", "title": "An IPv6 subnet", "description": "An IPv6 subnet, including prefix and subnet mask", + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::Ipv6Net", + "version": "0.1.0" + }, "type": "string", - "pattern": "^([fF][dD])[0-9a-fA-F]{2}:(([0-9a-fA-F]{1,4}:){6}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,6}:)([0-9a-fA-F]{1,4})?\\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])$" + "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))\\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])$" }, "Ipv6Range": { "description": "A non-decreasing IPv6 address range, inclusive of both ends.\n\nThe first address must be less than or equal to the last address.", @@ -13945,7 +14757,7 @@ "description": "The forward error correction mode of a link.", "oneOf": [ { - "description": "Firecode foward error correction.", + "description": "Firecode forward error correction.", "type": "string", "enum": [ "firecode" @@ -14240,6 +15052,32 @@ "items" ] }, + "MetricType": { + "description": "The type of the metric itself, indicating what its values represent.", + "oneOf": [ + { + "description": "The value represents an instantaneous measurement in time.", + "type": "string", + "enum": [ + "gauge" + ] + }, + { + "description": "The value represents a difference between two points in time.", + "type": "string", + "enum": [ + "delta" + ] + }, + { + "description": "The value represents an accumulation between two points in time.", + "type": "string", + "enum": [ + "cumulative" + ] + } + ] + }, "MissingDatum": { "type": "object", "properties": { @@ -14419,6 +15257,14 @@ "model": { "type": "string" }, + "policy": { + "description": "The operator-defined policy for a physical disk.", + "allOf": [ + { + "$ref": "#/components/schemas/PhysicalDiskPolicy" + } + ] + }, "serial": { "type": "string" }, @@ -14428,6 +15274,14 @@ "type": "string", "format": "uuid" }, + "state": { + "description": "The current state Nexus believes the disk to be in.", + "allOf": [ + { + "$ref": "#/components/schemas/PhysicalDiskState" + } + ] + }, "time_created": { "description": "timestamp when this resource was created", "type": "string", @@ -14446,7 +15300,9 @@ "form_factor", "id", "model", + "policy", "serial", + "state", "time_created", "time_modified", "vendor" @@ -14460,6 +15316,41 @@ "u2" ] }, + "PhysicalDiskPolicy": { + "description": "The operator-defined policy of a physical disk.", + "oneOf": [ + { + "description": "The operator has indicated that the disk is in-service.", + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "in_service" + ] + } + }, + "required": [ + "kind" + ] + }, + { + "description": "The operator has indicated that the disk has been permanently removed from service.\n\nThis is a terminal state: once a particular disk ID is expunged, it will never return to service. (The actual hardware may be reused, but it will be treated as a brand-new disk.)\n\nAn expunged disk is always non-provisionable.", + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "expunged" + ] + } + }, + "required": [ + "kind" + ] + } + ] + }, "PhysicalDiskResultsPage": { "description": "A single page of results", "type": "object", @@ -14481,6 +15372,25 @@ "items" ] }, + "PhysicalDiskState": { + "description": "The current state of the disk, as determined by Nexus.", + "oneOf": [ + { + "description": "The disk is currently active, and has resources allocated on it.", + "type": "string", + "enum": [ + "active" + ] + }, + { + "description": "The disk has been permanently removed from service.\n\nThis is a terminal state: once a particular disk ID is decommissioned, it will never return to service. (The actual hardware may be reused, but it will be treated as a brand-new disk.)", + "type": "string", + "enum": [ + "decommissioned" + ] + } + ] + }, "Ping": { "type": "object", "properties": { @@ -14494,13 +15404,44 @@ } }, "required": [ - "status" - ] - }, - "PingStatus": { - "type": "string", - "enum": [ - "ok" + "status" + ] + }, + "PingStatus": { + "type": "string", + "enum": [ + "ok" + ] + }, + "Points": { + "description": "Timepoints and values for one timeseries.", + "type": "object", + "properties": { + "start_times": { + "nullable": true, + "type": "array", + "items": { + "type": "string", + "format": "date-time" + } + }, + "timestamps": { + "type": "array", + "items": { + "type": "string", + "format": "date-time" + } + }, + "values": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Values" + } + } + }, + "required": [ + "timestamps", + "values" ] }, "Probe": { @@ -15044,7 +15985,6 @@ "signing_keypair": { "nullable": true, "description": "request signing key pair", - "default": null, "allOf": [ { "$ref": "#/components/schemas/DerEncodedKeyPair" @@ -15608,6 +16548,19 @@ "usable_physical_ram" ] }, + "SledId": { + "description": "The unique ID of a sled.", + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uuid" + } + }, + "required": [ + "id" + ] + }, "SledInstance": { "description": "An operator's view of an instance running on a given sled", "type": "object", @@ -16248,6 +17201,7 @@ } ] }, + "SwitchLinkState": {}, "SwitchLocation": { "description": "Identifies switch physical location", "oneOf": [ @@ -16354,37 +17308,6 @@ "port_settings" ] }, - "SwitchPortBgpPeerConfig": { - "description": "A BGP peer configuration for a port settings object.", - "type": "object", - "properties": { - "addr": { - "description": "The address of the peer.", - "type": "string", - "format": "ip" - }, - "bgp_config_id": { - "description": "The id of the global BGP configuration referenced by this peer configuration.", - "type": "string", - "format": "uuid" - }, - "interface_name": { - "description": "The interface name used to establish a peer session.", - "type": "string" - }, - "port_settings_id": { - "description": "The port settings object this BGP configuration belongs to.", - "type": "string", - "format": "uuid" - } - }, - "required": [ - "addr", - "bgp_config_id", - "interface_name", - "port_settings_id" - ] - }, "SwitchPortConfig": { "description": "A physical port configuration for a port settings object.", "type": "object", @@ -16481,6 +17404,18 @@ "description": "A link configuration for a port settings object.", "type": "object", "properties": { + "autoneg": { + "description": "Whether or not the link has autonegotiation enabled.", + "type": "boolean" + }, + "fec": { + "description": "The forward error correction mode of the link.", + "allOf": [ + { + "$ref": "#/components/schemas/LinkFec" + } + ] + }, "link_name": { "description": "The name of this link.", "type": "string" @@ -16500,13 +17435,24 @@ "description": "The port settings this link configuration belongs to.", "type": "string", "format": "uuid" + }, + "speed": { + "description": "The configured speed of the link.", + "allOf": [ + { + "$ref": "#/components/schemas/LinkSpeed" + } + ] } }, "required": [ + "autoneg", + "fec", "link_name", "lldp_service_config_id", "mtu", - "port_settings_id" + "port_settings_id", + "speed" ] }, "SwitchPortResultsPage": { @@ -16737,7 +17683,7 @@ "description": "BGP peer settings.", "type": "array", "items": { - "$ref": "#/components/schemas/SwitchPortBgpPeerConfig" + "$ref": "#/components/schemas/BgpPeer" } }, "groups": { @@ -16854,6 +17800,113 @@ "vlan_id" ] }, + "Table": { + "description": "A table represents one or more timeseries with the same schema.\n\nA table is the result of an OxQL query. It contains a name, usually the name of the timeseries schema from which the data is derived, and any number of timeseries, which contain the actual data.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "timeseries": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/Timeseries" + } + } + }, + "required": [ + "name", + "timeseries" + ] + }, + "Timeseries": { + "description": "A timeseries contains a timestamped set of values from one source.\n\nThis includes the typed key-value pairs that uniquely identify it, and the set of timestamps and data values from it.", + "type": "object", + "properties": { + "fields": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/FieldValue" + } + }, + "points": { + "$ref": "#/components/schemas/Points" + } + }, + "required": [ + "fields", + "points" + ] + }, + "TimeseriesName": { + "title": "The name of a timeseries", + "description": "Names are constructed by concatenating the target and metric names with ':'. Target and metric names must be lowercase alphanumeric characters with '_' separating words.", + "type": "string", + "pattern": "^(([a-z]+[a-z0-9]*)(_([a-z0-9]+))*):(([a-z]+[a-z0-9]*)(_([a-z0-9]+))*)$" + }, + "TimeseriesQuery": { + "description": "A timeseries query string, written in the Oximeter query language.", + "type": "object", + "properties": { + "query": { + "description": "A timeseries query string, written in the Oximeter query language.", + "type": "string" + } + }, + "required": [ + "query" + ] + }, + "TimeseriesSchema": { + "description": "The schema for a timeseries.\n\nThis includes the name of the timeseries, as well as the datum type of its metric and the schema for each field.", + "type": "object", + "properties": { + "created": { + "type": "string", + "format": "date-time" + }, + "datum_type": { + "$ref": "#/components/schemas/DatumType" + }, + "field_schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/FieldSchema" + }, + "uniqueItems": true + }, + "timeseries_name": { + "$ref": "#/components/schemas/TimeseriesName" + } + }, + "required": [ + "created", + "datum_type", + "field_schema", + "timeseries_name" + ] + }, + "TimeseriesSchemaResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/TimeseriesSchema" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, "UninitializedSled": { "description": "A sled that has not been added to an initialized rack yet", "type": "object", @@ -17135,6 +18188,169 @@ "provisioned" ] }, + "ValueArray": { + "description": "List of data values for one timeseries.\n\nEach element is an option, where `None` represents a missing sample.", + "oneOf": [ + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "integer" + ] + }, + "values": { + "type": "array", + "items": { + "nullable": true, + "type": "integer", + "format": "int64" + } + } + }, + "required": [ + "type", + "values" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "double" + ] + }, + "values": { + "type": "array", + "items": { + "nullable": true, + "type": "number", + "format": "double" + } + } + }, + "required": [ + "type", + "values" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "boolean" + ] + }, + "values": { + "type": "array", + "items": { + "nullable": true, + "type": "boolean" + } + } + }, + "required": [ + "type", + "values" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "string" + ] + }, + "values": { + "type": "array", + "items": { + "nullable": true, + "type": "string" + } + } + }, + "required": [ + "type", + "values" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "integer_distribution" + ] + }, + "values": { + "type": "array", + "items": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/Distributionint64" + } + ] + } + } + }, + "required": [ + "type", + "values" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "double_distribution" + ] + }, + "values": { + "type": "array", + "items": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/Distributiondouble" + } + ] + } + } + }, + "required": [ + "type", + "values" + ] + } + ] + }, + "Values": { + "description": "A single list of values, for one dimension of a timeseries.", + "type": "object", + "properties": { + "metric_type": { + "$ref": "#/components/schemas/MetricType" + }, + "values": { + "$ref": "#/components/schemas/ValueArray" + } + }, + "required": [ + "metric_type", + "values" + ] + }, "VirtualResourceCounts": { "description": "A collection of resource counts used to describe capacity and utilization", "type": "object", @@ -18154,4 +19370,4 @@ } } ] -} \ No newline at end of file +} diff --git a/openapi/oximeter.json b/openapi/oximeter.json index f5c78d53cd..d4a37957ab 100644 --- a/openapi/oximeter.json +++ b/openapi/oximeter.json @@ -142,6 +142,12 @@ "description": "The collector's UUID.", "type": "string", "format": "uuid" + }, + "last_refresh": { + "nullable": true, + "description": "Last time we refreshed our producer list with Nexus.", + "type": "string", + "format": "date-time" } }, "required": [ @@ -194,10 +200,6 @@ "description": "The IP address and port at which `oximeter` can collect metrics from the producer.", "type": "string" }, - "base_route": { - "description": "The API base route from which `oximeter` can collect metrics.\n\nThe full route is `{base_route}/{id}`.", - "type": "string" - }, "id": { "description": "A unique ID for this producer.", "type": "string", @@ -222,7 +224,6 @@ }, "required": [ "address", - "base_route", "id", "interval", "kind" @@ -289,4 +290,4 @@ } } } -} \ No newline at end of file +} diff --git a/openapi/sled-agent.json b/openapi/sled-agent.json index e5b3a1c56f..b975f16484 100644 --- a/openapi/sled-agent.json +++ b/openapi/sled-agent.json @@ -468,6 +468,38 @@ } }, "/instances/{instance_id}/state": { + "get": { + "operationId": "instance_get_state", + "parameters": [ + { + "in": "path", + "name": "instance_id", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SledInstanceState" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, "put": { "operationId": "instance_put_state", "parameters": [ @@ -535,32 +567,18 @@ } } }, - "/metrics/collect/{producer_id}": { + "/network-bootstore-config": { "get": { - "summary": "Collect oximeter samples from the sled agent.", - "operationId": "metrics_collect", - "parameters": [ - { - "in": "path", - "name": "producer_id", - "required": true, - "schema": { - "type": "string", - "format": "uuid" - } - } - ], + "summary": "This API endpoint is only reading the local sled agent's view of the", + "description": "bootstore. The boostore is a distributed data store that is eventually consistent. Reads from individual nodes may not represent the latest state.", + "operationId": "read_network_bootstore_config_cache", "responses": { "200": { "description": "successful operation", "content": { "application/json": { "schema": { - "title": "Array_of_ProducerResultsItem", - "type": "array", - "items": { - "$ref": "#/components/schemas/ProducerResultsItem" - } + "$ref": "#/components/schemas/EarlyNetworkConfig" } } } @@ -572,20 +590,42 @@ "$ref": "#/components/responses/Error" } } + }, + "put": { + "operationId": "write_network_bootstore_config", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/EarlyNetworkConfig" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } } }, - "/network-bootstore-config": { + "/omicron-physical-disks": { "get": { - "summary": "This API endpoint is only reading the local sled agent's view of the", - "description": "bootstore. The boostore is a distributed data store that is eventually consistent. Reads from individual nodes may not represent the latest state.", - "operationId": "read_network_bootstore_config_cache", + "operationId": "omicron_physical_disks_get", "responses": { "200": { "description": "successful operation", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/EarlyNetworkConfig" + "$ref": "#/components/schemas/OmicronPhysicalDisksConfig" } } } @@ -599,20 +639,27 @@ } }, "put": { - "operationId": "write_network_bootstore_config", + "operationId": "omicron_physical_disks_put", "requestBody": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/EarlyNetworkConfig" + "$ref": "#/components/schemas/OmicronPhysicalDisksConfig" } } }, "required": true }, "responses": { - "204": { - "description": "resource updated" + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DisksManagementResult" + } + } + } }, "4XX": { "$ref": "#/components/responses/Error" @@ -795,26 +842,41 @@ } } }, - "/v2p/{interface_id}": { + "/v2p": { + "get": { + "summary": "List v2p mappings present on sled", + "operationId": "list_v2p", + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "title": "Array_of_VirtualNetworkInterfaceHost", + "type": "array", + "items": { + "$ref": "#/components/schemas/VirtualNetworkInterfaceHost" + } + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, "put": { "summary": "Create a mapping from a virtual NIC to a physical host", "operationId": "set_v2p", - "parameters": [ - { - "in": "path", - "name": "interface_id", - "required": true, - "schema": { - "type": "string", - "format": "uuid" - } - } - ], "requestBody": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/SetVirtualNetworkInterfaceHost" + "$ref": "#/components/schemas/VirtualNetworkInterfaceHost" } } }, @@ -835,22 +897,11 @@ "delete": { "summary": "Delete a mapping from a virtual NIC to a physical host", "operationId": "del_v2p", - "parameters": [ - { - "in": "path", - "name": "interface_id", - "required": true, - "schema": { - "type": "string", - "format": "uuid" - } - } - ], "requestBody": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/DeleteVirtualNetworkInterfaceHost" + "$ref": "#/components/schemas/VirtualNetworkInterfaceHost" } } }, @@ -1415,12 +1466,22 @@ "format": "uint32", "minimum": 0 }, + "checker": { + "nullable": true, + "description": "Checker to apply to incoming messages.", + "type": "string" + }, "originate": { "description": "The set of prefixes for the BGP router to originate.", "type": "array", "items": { - "$ref": "#/components/schemas/Ipv4Network" + "$ref": "#/components/schemas/Ipv4Net" } + }, + "shaper": { + "nullable": true, + "description": "Shaper to apply to outgoing messages.", + "type": "string" } }, "required": [ @@ -1436,12 +1497,44 @@ "type": "string", "format": "ipv4" }, + "allowed_export": { + "description": "Define export policy for a peer.", + "default": { + "type": "no_filtering" + }, + "allOf": [ + { + "$ref": "#/components/schemas/ImportExportPolicy" + } + ] + }, + "allowed_import": { + "description": "Define import policy for a peer.", + "default": { + "type": "no_filtering" + }, + "allOf": [ + { + "$ref": "#/components/schemas/ImportExportPolicy" + } + ] + }, "asn": { - "description": "The autonomous sysetm number of the router the peer belongs to.", + "description": "The autonomous system number of the router the peer belongs to.", "type": "integer", "format": "uint32", "minimum": 0 }, + "communities": { + "description": "Include the provided communities in updates sent to the peer.", + "default": [], + "type": "array", + "items": { + "type": "integer", + "format": "uint32", + "minimum": 0 + } + }, "connect_retry": { "nullable": true, "description": "The interval in seconds between peer connection retry attempts.", @@ -1456,6 +1549,11 @@ "format": "uint64", "minimum": 0 }, + "enforce_first_as": { + "description": "Enforce that the first AS in paths received from this peer is the peer's AS.", + "default": false, + "type": "boolean" + }, "hold_time": { "nullable": true, "description": "How long to keep a session alive without a keepalive in seconds. Defaults to 6.", @@ -1477,9 +1575,49 @@ "format": "uint64", "minimum": 0 }, + "local_pref": { + "nullable": true, + "description": "Apply a local preference to routes received from this peer.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "md5_auth_key": { + "nullable": true, + "description": "Use the given key for TCP-MD5 authentication with the peer.", + "type": "string" + }, + "min_ttl": { + "nullable": true, + "description": "Require messages from a peer have a minimum IP time to live field.", + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "multi_exit_discriminator": { + "nullable": true, + "description": "Apply the provided multi-exit discriminator (MED) updates sent to the peer.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, "port": { "description": "Switch port the peer is reachable on.", "type": "string" + }, + "remote_asn": { + "nullable": true, + "description": "Require that a peer has a specified ASN.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "vlan_id": { + "nullable": true, + "description": "Associate a VLAN ID with a BGP peer session.", + "type": "integer", + "format": "uint16", + "minimum": 0 } }, "required": [ @@ -1488,2566 +1626,477 @@ "port" ] }, - "BinRangedouble": { - "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "BootDiskOsWriteProgress": { + "description": "Current progress of an OS image being written to disk.", "oneOf": [ { - "description": "A range unbounded below and exclusively above, `..end`.", + "description": "The image is still being uploaded.", "type": "object", "properties": { - "end": { - "type": "number", - "format": "double" + "bytes_received": { + "type": "integer", + "format": "uint", + "minimum": 0 }, - "type": { + "state": { "type": "string", "enum": [ - "range_to" + "receiving_uploaded_image" ] } }, "required": [ - "end", - "type" + "bytes_received", + "state" ] }, { - "description": "A range bounded inclusively below and exclusively above, `start..end`.", + "description": "The image is being written to disk.", "type": "object", "properties": { - "end": { - "type": "number", - "format": "double" - }, - "start": { - "type": "number", - "format": "double" + "bytes_written": { + "type": "integer", + "format": "uint", + "minimum": 0 }, - "type": { + "state": { "type": "string", "enum": [ - "range" + "writing_image_to_disk" ] } }, "required": [ - "end", - "start", - "type" + "bytes_written", + "state" ] }, { - "description": "A range bounded inclusively below and unbounded above, `start..`.", + "description": "The image is being read back from disk for validation.", "type": "object", "properties": { - "start": { - "type": "number", - "format": "double" + "bytes_read": { + "type": "integer", + "format": "uint", + "minimum": 0 }, - "type": { + "state": { "type": "string", "enum": [ - "range_from" + "validating_written_image" ] } }, "required": [ - "start", - "type" + "bytes_read", + "state" ] } ] }, - "BinRangefloat": { - "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "BootDiskOsWriteStatus": { + "description": "Status of an update to a boot disk OS.", "oneOf": [ { - "description": "A range unbounded below and exclusively above, `..end`.", + "description": "No update has been started for this disk, or any previously-started update has completed and had its status cleared.", "type": "object", "properties": { - "end": { - "type": "number", - "format": "float" - }, - "type": { + "status": { "type": "string", "enum": [ - "range_to" + "no_update_started" ] } }, "required": [ - "end", - "type" + "status" ] }, { - "description": "A range bounded inclusively below and exclusively above, `start..end`.", + "description": "An update is currently running.", "type": "object", "properties": { - "end": { - "type": "number", - "format": "float" - }, - "start": { - "type": "number", - "format": "float" + "progress": { + "$ref": "#/components/schemas/BootDiskOsWriteProgress" }, - "type": { + "status": { "type": "string", "enum": [ - "range" + "in_progress" ] + }, + "update_id": { + "type": "string", + "format": "uuid" } }, "required": [ - "end", - "start", - "type" + "progress", + "status", + "update_id" ] }, { - "description": "A range bounded inclusively below and unbounded above, `start..`.", + "description": "The most recent update completed successfully.", "type": "object", "properties": { - "start": { - "type": "number", - "format": "float" - }, - "type": { + "status": { "type": "string", "enum": [ - "range_from" + "complete" ] + }, + "update_id": { + "type": "string", + "format": "uuid" } }, "required": [ - "start", - "type" - ] - } - ] - }, - "BinRangeint16": { - "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", - "oneOf": [ - { - "description": "A range unbounded below and exclusively above, `..end`.", - "type": "object", - "properties": { - "end": { - "type": "integer", - "format": "int16" - }, - "type": { - "type": "string", - "enum": [ - "range_to" - ] - } - }, - "required": [ - "end", - "type" - ] - }, - { - "description": "A range bounded inclusively below and exclusively above, `start..end`.", - "type": "object", - "properties": { - "end": { - "type": "integer", - "format": "int16" - }, - "start": { - "type": "integer", - "format": "int16" - }, - "type": { - "type": "string", - "enum": [ - "range" - ] - } - }, - "required": [ - "end", - "start", - "type" - ] - }, - { - "description": "A range bounded inclusively below and unbounded above, `start..`.", - "type": "object", - "properties": { - "start": { - "type": "integer", - "format": "int16" - }, - "type": { - "type": "string", - "enum": [ - "range_from" - ] - } - }, - "required": [ - "start", - "type" - ] - } - ] - }, - "BinRangeint32": { - "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", - "oneOf": [ - { - "description": "A range unbounded below and exclusively above, `..end`.", - "type": "object", - "properties": { - "end": { - "type": "integer", - "format": "int32" - }, - "type": { - "type": "string", - "enum": [ - "range_to" - ] - } - }, - "required": [ - "end", - "type" - ] - }, - { - "description": "A range bounded inclusively below and exclusively above, `start..end`.", - "type": "object", - "properties": { - "end": { - "type": "integer", - "format": "int32" - }, - "start": { - "type": "integer", - "format": "int32" - }, - "type": { - "type": "string", - "enum": [ - "range" - ] - } - }, - "required": [ - "end", - "start", - "type" - ] - }, - { - "description": "A range bounded inclusively below and unbounded above, `start..`.", - "type": "object", - "properties": { - "start": { - "type": "integer", - "format": "int32" - }, - "type": { - "type": "string", - "enum": [ - "range_from" - ] - } - }, - "required": [ - "start", - "type" - ] - } - ] - }, - "BinRangeint64": { - "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", - "oneOf": [ - { - "description": "A range unbounded below and exclusively above, `..end`.", - "type": "object", - "properties": { - "end": { - "type": "integer", - "format": "int64" - }, - "type": { - "type": "string", - "enum": [ - "range_to" - ] - } - }, - "required": [ - "end", - "type" - ] - }, - { - "description": "A range bounded inclusively below and exclusively above, `start..end`.", - "type": "object", - "properties": { - "end": { - "type": "integer", - "format": "int64" - }, - "start": { - "type": "integer", - "format": "int64" - }, - "type": { - "type": "string", - "enum": [ - "range" - ] - } - }, - "required": [ - "end", - "start", - "type" - ] - }, - { - "description": "A range bounded inclusively below and unbounded above, `start..`.", - "type": "object", - "properties": { - "start": { - "type": "integer", - "format": "int64" - }, - "type": { - "type": "string", - "enum": [ - "range_from" - ] - } - }, - "required": [ - "start", - "type" - ] - } - ] - }, - "BinRangeint8": { - "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", - "oneOf": [ - { - "description": "A range unbounded below and exclusively above, `..end`.", - "type": "object", - "properties": { - "end": { - "type": "integer", - "format": "int8" - }, - "type": { - "type": "string", - "enum": [ - "range_to" - ] - } - }, - "required": [ - "end", - "type" - ] - }, - { - "description": "A range bounded inclusively below and exclusively above, `start..end`.", - "type": "object", - "properties": { - "end": { - "type": "integer", - "format": "int8" - }, - "start": { - "type": "integer", - "format": "int8" - }, - "type": { - "type": "string", - "enum": [ - "range" - ] - } - }, - "required": [ - "end", - "start", - "type" - ] - }, - { - "description": "A range bounded inclusively below and unbounded above, `start..`.", - "type": "object", - "properties": { - "start": { - "type": "integer", - "format": "int8" - }, - "type": { - "type": "string", - "enum": [ - "range_from" - ] - } - }, - "required": [ - "start", - "type" - ] - } - ] - }, - "BinRangeuint16": { - "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", - "oneOf": [ - { - "description": "A range unbounded below and exclusively above, `..end`.", - "type": "object", - "properties": { - "end": { - "type": "integer", - "format": "uint16", - "minimum": 0 - }, - "type": { - "type": "string", - "enum": [ - "range_to" - ] - } - }, - "required": [ - "end", - "type" - ] - }, - { - "description": "A range bounded inclusively below and exclusively above, `start..end`.", - "type": "object", - "properties": { - "end": { - "type": "integer", - "format": "uint16", - "minimum": 0 - }, - "start": { - "type": "integer", - "format": "uint16", - "minimum": 0 - }, - "type": { - "type": "string", - "enum": [ - "range" - ] - } - }, - "required": [ - "end", - "start", - "type" - ] - }, - { - "description": "A range bounded inclusively below and unbounded above, `start..`.", - "type": "object", - "properties": { - "start": { - "type": "integer", - "format": "uint16", - "minimum": 0 - }, - "type": { - "type": "string", - "enum": [ - "range_from" - ] - } - }, - "required": [ - "start", - "type" - ] - } - ] - }, - "BinRangeuint32": { - "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", - "oneOf": [ - { - "description": "A range unbounded below and exclusively above, `..end`.", - "type": "object", - "properties": { - "end": { - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "type": { - "type": "string", - "enum": [ - "range_to" - ] - } - }, - "required": [ - "end", - "type" - ] - }, - { - "description": "A range bounded inclusively below and exclusively above, `start..end`.", - "type": "object", - "properties": { - "end": { - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "start": { - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "type": { - "type": "string", - "enum": [ - "range" - ] - } - }, - "required": [ - "end", - "start", - "type" - ] - }, - { - "description": "A range bounded inclusively below and unbounded above, `start..`.", - "type": "object", - "properties": { - "start": { - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "type": { - "type": "string", - "enum": [ - "range_from" - ] - } - }, - "required": [ - "start", - "type" - ] - } - ] - }, - "BinRangeuint64": { - "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", - "oneOf": [ - { - "description": "A range unbounded below and exclusively above, `..end`.", - "type": "object", - "properties": { - "end": { - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "type": { - "type": "string", - "enum": [ - "range_to" - ] - } - }, - "required": [ - "end", - "type" - ] - }, - { - "description": "A range bounded inclusively below and exclusively above, `start..end`.", - "type": "object", - "properties": { - "end": { - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "start": { - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "type": { - "type": "string", - "enum": [ - "range" - ] - } - }, - "required": [ - "end", - "start", - "type" - ] - }, - { - "description": "A range bounded inclusively below and unbounded above, `start..`.", - "type": "object", - "properties": { - "start": { - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "type": { - "type": "string", - "enum": [ - "range_from" - ] - } - }, - "required": [ - "start", - "type" - ] - } - ] - }, - "BinRangeuint8": { - "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", - "oneOf": [ - { - "description": "A range unbounded below and exclusively above, `..end`.", - "type": "object", - "properties": { - "end": { - "type": "integer", - "format": "uint8", - "minimum": 0 - }, - "type": { - "type": "string", - "enum": [ - "range_to" - ] - } - }, - "required": [ - "end", - "type" - ] - }, - { - "description": "A range bounded inclusively below and exclusively above, `start..end`.", - "type": "object", - "properties": { - "end": { - "type": "integer", - "format": "uint8", - "minimum": 0 - }, - "start": { - "type": "integer", - "format": "uint8", - "minimum": 0 - }, - "type": { - "type": "string", - "enum": [ - "range" - ] - } - }, - "required": [ - "end", - "start", - "type" - ] - }, - { - "description": "A range bounded inclusively below and unbounded above, `start..`.", - "type": "object", - "properties": { - "start": { - "type": "integer", - "format": "uint8", - "minimum": 0 - }, - "type": { - "type": "string", - "enum": [ - "range_from" - ] - } - }, - "required": [ - "start", - "type" - ] - } - ] - }, - "Bindouble": { - "description": "Type storing bin edges and a count of samples within it.", - "type": "object", - "properties": { - "count": { - "description": "The total count of samples in this bin.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "range": { - "description": "The range of the support covered by this bin.", - "allOf": [ - { - "$ref": "#/components/schemas/BinRangedouble" - } - ] - } - }, - "required": [ - "count", - "range" - ] - }, - "Binfloat": { - "description": "Type storing bin edges and a count of samples within it.", - "type": "object", - "properties": { - "count": { - "description": "The total count of samples in this bin.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "range": { - "description": "The range of the support covered by this bin.", - "allOf": [ - { - "$ref": "#/components/schemas/BinRangefloat" - } - ] - } - }, - "required": [ - "count", - "range" - ] - }, - "Binint16": { - "description": "Type storing bin edges and a count of samples within it.", - "type": "object", - "properties": { - "count": { - "description": "The total count of samples in this bin.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "range": { - "description": "The range of the support covered by this bin.", - "allOf": [ - { - "$ref": "#/components/schemas/BinRangeint16" - } - ] - } - }, - "required": [ - "count", - "range" - ] - }, - "Binint32": { - "description": "Type storing bin edges and a count of samples within it.", - "type": "object", - "properties": { - "count": { - "description": "The total count of samples in this bin.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "range": { - "description": "The range of the support covered by this bin.", - "allOf": [ - { - "$ref": "#/components/schemas/BinRangeint32" - } - ] - } - }, - "required": [ - "count", - "range" - ] - }, - "Binint64": { - "description": "Type storing bin edges and a count of samples within it.", - "type": "object", - "properties": { - "count": { - "description": "The total count of samples in this bin.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "range": { - "description": "The range of the support covered by this bin.", - "allOf": [ - { - "$ref": "#/components/schemas/BinRangeint64" - } - ] - } - }, - "required": [ - "count", - "range" - ] - }, - "Binint8": { - "description": "Type storing bin edges and a count of samples within it.", - "type": "object", - "properties": { - "count": { - "description": "The total count of samples in this bin.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "range": { - "description": "The range of the support covered by this bin.", - "allOf": [ - { - "$ref": "#/components/schemas/BinRangeint8" - } - ] - } - }, - "required": [ - "count", - "range" - ] - }, - "Binuint16": { - "description": "Type storing bin edges and a count of samples within it.", - "type": "object", - "properties": { - "count": { - "description": "The total count of samples in this bin.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "range": { - "description": "The range of the support covered by this bin.", - "allOf": [ - { - "$ref": "#/components/schemas/BinRangeuint16" - } - ] - } - }, - "required": [ - "count", - "range" - ] - }, - "Binuint32": { - "description": "Type storing bin edges and a count of samples within it.", - "type": "object", - "properties": { - "count": { - "description": "The total count of samples in this bin.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "range": { - "description": "The range of the support covered by this bin.", - "allOf": [ - { - "$ref": "#/components/schemas/BinRangeuint32" - } - ] - } - }, - "required": [ - "count", - "range" - ] - }, - "Binuint64": { - "description": "Type storing bin edges and a count of samples within it.", - "type": "object", - "properties": { - "count": { - "description": "The total count of samples in this bin.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "range": { - "description": "The range of the support covered by this bin.", - "allOf": [ - { - "$ref": "#/components/schemas/BinRangeuint64" - } - ] - } - }, - "required": [ - "count", - "range" - ] - }, - "Binuint8": { - "description": "Type storing bin edges and a count of samples within it.", - "type": "object", - "properties": { - "count": { - "description": "The total count of samples in this bin.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "range": { - "description": "The range of the support covered by this bin.", - "allOf": [ - { - "$ref": "#/components/schemas/BinRangeuint8" - } - ] - } - }, - "required": [ - "count", - "range" - ] - }, - "BootDiskOsWriteProgress": { - "description": "Current progress of an OS image being written to disk.", - "oneOf": [ - { - "description": "The image is still being uploaded.", - "type": "object", - "properties": { - "bytes_received": { - "type": "integer", - "format": "uint", - "minimum": 0 - }, - "state": { - "type": "string", - "enum": [ - "receiving_uploaded_image" - ] - } - }, - "required": [ - "bytes_received", - "state" - ] - }, - { - "description": "The image is being written to disk.", - "type": "object", - "properties": { - "bytes_written": { - "type": "integer", - "format": "uint", - "minimum": 0 - }, - "state": { - "type": "string", - "enum": [ - "writing_image_to_disk" - ] - } - }, - "required": [ - "bytes_written", - "state" - ] - }, - { - "description": "The image is being read back from disk for validation.", - "type": "object", - "properties": { - "bytes_read": { - "type": "integer", - "format": "uint", - "minimum": 0 - }, - "state": { - "type": "string", - "enum": [ - "validating_written_image" - ] - } - }, - "required": [ - "bytes_read", - "state" - ] - } - ] - }, - "BootDiskOsWriteStatus": { - "description": "Status of an update to a boot disk OS.", - "oneOf": [ - { - "description": "No update has been started for this disk, or any previously-started update has completed and had its status cleared.", - "type": "object", - "properties": { - "status": { - "type": "string", - "enum": [ - "no_update_started" - ] - } - }, - "required": [ - "status" - ] - }, - { - "description": "An update is currently running.", - "type": "object", - "properties": { - "progress": { - "$ref": "#/components/schemas/BootDiskOsWriteProgress" - }, - "status": { - "type": "string", - "enum": [ - "in_progress" - ] - }, - "update_id": { - "type": "string", - "format": "uuid" - } - }, - "required": [ - "progress", - "status", - "update_id" - ] - }, - { - "description": "The most recent update completed successfully.", - "type": "object", - "properties": { - "status": { - "type": "string", - "enum": [ - "complete" - ] - }, - "update_id": { - "type": "string", - "format": "uuid" - } - }, - "required": [ - "status", - "update_id" - ] - }, - { - "description": "The most recent update failed.", - "type": "object", - "properties": { - "message": { - "type": "string" - }, - "status": { - "type": "string", - "enum": [ - "failed" - ] - }, - "update_id": { - "type": "string", - "format": "uuid" - } - }, - "required": [ - "message", - "status", - "update_id" - ] - } - ] - }, - "BootstoreStatus": { - "type": "object", - "properties": { - "accepted_connections": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "established_connections": { - "type": "array", - "items": { - "$ref": "#/components/schemas/EstablishedConnection" - } - }, - "fsm_ledger_generation": { - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "fsm_state": { - "type": "string" - }, - "negotiating_connections": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "network_config_ledger_generation": { - "nullable": true, - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "peers": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - } - }, - "required": [ - "accepted_connections", - "established_connections", - "fsm_ledger_generation", - "fsm_state", - "negotiating_connections", - "peers" - ] - }, - "BundleUtilization": { - "description": "The portion of a debug dataset used for zone bundles.", - "type": "object", - "properties": { - "bytes_available": { - "description": "The total number of bytes available for zone bundles.\n\nThis is `dataset_quota` multiplied by the context's storage limit.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "bytes_used": { - "description": "Total bundle usage, in bytes.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "dataset_quota": { - "description": "The total dataset quota, in bytes.", - "type": "integer", - "format": "uint64", - "minimum": 0 - } - }, - "required": [ - "bytes_available", - "bytes_used", - "dataset_quota" - ] - }, - "ByteCount": { - "description": "Byte count to express memory or storage capacity.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "CleanupContext": { - "description": "Context provided for the zone bundle cleanup task.", - "type": "object", - "properties": { - "period": { - "description": "The period on which automatic checks and cleanup is performed.", - "allOf": [ - { - "$ref": "#/components/schemas/CleanupPeriod" - } - ] - }, - "priority": { - "description": "The priority ordering for keeping old bundles.", - "allOf": [ - { - "$ref": "#/components/schemas/PriorityOrder" - } - ] - }, - "storage_limit": { - "description": "The limit on the dataset quota available for zone bundles.", - "allOf": [ - { - "$ref": "#/components/schemas/StorageLimit" - } - ] - } - }, - "required": [ - "period", - "priority", - "storage_limit" - ] - }, - "CleanupContextUpdate": { - "description": "Parameters used to update the zone bundle cleanup context.", - "type": "object", - "properties": { - "period": { - "nullable": true, - "description": "The new period on which automatic cleanups are run.", - "allOf": [ - { - "$ref": "#/components/schemas/Duration" - } - ] - }, - "priority": { - "nullable": true, - "description": "The priority ordering for preserving old zone bundles.", - "allOf": [ - { - "$ref": "#/components/schemas/PriorityOrder" - } - ] - }, - "storage_limit": { - "nullable": true, - "description": "The new limit on the underlying dataset quota allowed for bundles.", - "type": "integer", - "format": "uint8", - "minimum": 0 - } - } - }, - "CleanupCount": { - "description": "The count of bundles / bytes removed during a cleanup operation.", - "type": "object", - "properties": { - "bundles": { - "description": "The number of bundles removed.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "bytes": { - "description": "The number of bytes removed.", - "type": "integer", - "format": "uint64", - "minimum": 0 - } - }, - "required": [ - "bundles", - "bytes" - ] - }, - "CleanupPeriod": { - "description": "A period on which bundles are automatically cleaned up.", - "allOf": [ - { - "$ref": "#/components/schemas/Duration" - } - ] - }, - "CrucibleOpts": { - "description": "CrucibleOpts\n\n
JSON schema\n\n```json { \"type\": \"object\", \"required\": [ \"id\", \"lossy\", \"read_only\", \"target\" ], \"properties\": { \"cert_pem\": { \"type\": [ \"string\", \"null\" ] }, \"control\": { \"type\": [ \"string\", \"null\" ] }, \"flush_timeout\": { \"type\": [ \"number\", \"null\" ], \"format\": \"float\" }, \"id\": { \"type\": \"string\", \"format\": \"uuid\" }, \"key\": { \"type\": [ \"string\", \"null\" ] }, \"key_pem\": { \"type\": [ \"string\", \"null\" ] }, \"lossy\": { \"type\": \"boolean\" }, \"read_only\": { \"type\": \"boolean\" }, \"root_cert_pem\": { \"type\": [ \"string\", \"null\" ] }, \"target\": { \"type\": \"array\", \"items\": { \"type\": \"string\" } } } } ```
", - "type": "object", - "properties": { - "cert_pem": { - "nullable": true, - "type": "string" - }, - "control": { - "nullable": true, - "type": "string" - }, - "flush_timeout": { - "nullable": true, - "type": "number", - "format": "float" - }, - "id": { - "type": "string", - "format": "uuid" - }, - "key": { - "nullable": true, - "type": "string" - }, - "key_pem": { - "nullable": true, - "type": "string" - }, - "lossy": { - "type": "boolean" - }, - "read_only": { - "type": "boolean" - }, - "root_cert_pem": { - "nullable": true, - "type": "string" - }, - "target": { - "type": "array", - "items": { - "type": "string" - } - } - }, - "required": [ - "id", - "lossy", - "read_only", - "target" - ] - }, - "Cumulativedouble": { - "description": "A cumulative or counter data type.", - "type": "object", - "properties": { - "start_time": { - "type": "string", - "format": "date-time" - }, - "value": { - "type": "number", - "format": "double" - } - }, - "required": [ - "start_time", - "value" - ] - }, - "Cumulativefloat": { - "description": "A cumulative or counter data type.", - "type": "object", - "properties": { - "start_time": { - "type": "string", - "format": "date-time" - }, - "value": { - "type": "number", - "format": "float" - } - }, - "required": [ - "start_time", - "value" - ] - }, - "Cumulativeint64": { - "description": "A cumulative or counter data type.", - "type": "object", - "properties": { - "start_time": { - "type": "string", - "format": "date-time" - }, - "value": { - "type": "integer", - "format": "int64" - } - }, - "required": [ - "start_time", - "value" - ] - }, - "Cumulativeuint64": { - "description": "A cumulative or counter data type.", - "type": "object", - "properties": { - "start_time": { - "type": "string", - "format": "date-time" - }, - "value": { - "type": "integer", - "format": "uint64", - "minimum": 0 - } - }, - "required": [ - "start_time", - "value" - ] - }, - "Datum": { - "description": "A `Datum` is a single sampled data point from a metric.", - "oneOf": [ - { - "type": "object", - "properties": { - "datum": { - "type": "boolean" - }, - "type": { - "type": "string", - "enum": [ - "bool" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "type": "integer", - "format": "int8" - }, - "type": { - "type": "string", - "enum": [ - "i8" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "type": "integer", - "format": "uint8", - "minimum": 0 - }, - "type": { - "type": "string", - "enum": [ - "u8" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "type": "integer", - "format": "int16" - }, - "type": { - "type": "string", - "enum": [ - "i16" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "type": "integer", - "format": "uint16", - "minimum": 0 - }, - "type": { - "type": "string", - "enum": [ - "u16" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "type": "integer", - "format": "int32" - }, - "type": { - "type": "string", - "enum": [ - "i32" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "type": { - "type": "string", - "enum": [ - "u32" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "type": "integer", - "format": "int64" - }, - "type": { - "type": "string", - "enum": [ - "i64" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "type": { - "type": "string", - "enum": [ - "u64" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "type": "number", - "format": "float" - }, - "type": { - "type": "string", - "enum": [ - "f32" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "type": "number", - "format": "double" - }, - "type": { - "type": "string", - "enum": [ - "f64" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "string" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "type": "array", - "items": { - "type": "integer", - "format": "uint8", - "minimum": 0 - } - }, - "type": { - "type": "string", - "enum": [ - "bytes" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "$ref": "#/components/schemas/Cumulativeint64" - }, - "type": { - "type": "string", - "enum": [ - "cumulative_i64" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "$ref": "#/components/schemas/Cumulativeuint64" - }, - "type": { - "type": "string", - "enum": [ - "cumulative_u64" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "$ref": "#/components/schemas/Cumulativefloat" - }, - "type": { - "type": "string", - "enum": [ - "cumulative_f32" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "$ref": "#/components/schemas/Cumulativedouble" - }, - "type": { - "type": "string", - "enum": [ - "cumulative_f64" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "$ref": "#/components/schemas/Histogramint8" - }, - "type": { - "type": "string", - "enum": [ - "histogram_i8" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "$ref": "#/components/schemas/Histogramuint8" - }, - "type": { - "type": "string", - "enum": [ - "histogram_u8" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "$ref": "#/components/schemas/Histogramint16" - }, - "type": { - "type": "string", - "enum": [ - "histogram_i16" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "$ref": "#/components/schemas/Histogramuint16" - }, - "type": { - "type": "string", - "enum": [ - "histogram_u16" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "$ref": "#/components/schemas/Histogramint32" - }, - "type": { - "type": "string", - "enum": [ - "histogram_i32" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "$ref": "#/components/schemas/Histogramuint32" - }, - "type": { - "type": "string", - "enum": [ - "histogram_u32" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "$ref": "#/components/schemas/Histogramint64" - }, - "type": { - "type": "string", - "enum": [ - "histogram_i64" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "$ref": "#/components/schemas/Histogramuint64" - }, - "type": { - "type": "string", - "enum": [ - "histogram_u64" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "$ref": "#/components/schemas/Histogramfloat" - }, - "type": { - "type": "string", - "enum": [ - "histogram_f32" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "$ref": "#/components/schemas/Histogramdouble" - }, - "type": { - "type": "string", - "enum": [ - "histogram_f64" - ] - } - }, - "required": [ - "datum", - "type" - ] - }, - { - "type": "object", - "properties": { - "datum": { - "$ref": "#/components/schemas/MissingDatum" - }, - "type": { - "type": "string", - "enum": [ - "missing" - ] - } - }, - "required": [ - "datum", - "type" - ] - } - ] - }, - "DatumType": { - "description": "The type of an individual datum of a metric.", - "type": "string", - "enum": [ - "bool", - "i8", - "u8", - "i16", - "u16", - "i32", - "u32", - "i64", - "u64", - "f32", - "f64", - "string", - "bytes", - "cumulative_i64", - "cumulative_u64", - "cumulative_f32", - "cumulative_f64", - "histogram_i8", - "histogram_u8", - "histogram_i16", - "histogram_u16", - "histogram_i32", - "histogram_u32", - "histogram_i64", - "histogram_u64", - "histogram_f32", - "histogram_f64" - ] - }, - "DeleteVirtualNetworkInterfaceHost": { - "description": "The data needed to identify a virtual IP for which a sled maintains an OPTE virtual-to-physical mapping such that that mapping can be deleted.", - "type": "object", - "properties": { - "virtual_ip": { - "description": "The virtual IP whose mapping should be deleted.", - "type": "string", - "format": "ip" - }, - "vni": { - "description": "The VNI for the network containing the virtual IP whose mapping should be deleted.", - "allOf": [ - { - "$ref": "#/components/schemas/Vni" - } - ] - } - }, - "required": [ - "virtual_ip", - "vni" - ] - }, - "DhcpConfig": { - "description": "DHCP configuration for a port\n\nNot present here: Hostname (DHCPv4 option 12; used in DHCPv6 option 39); we use `InstanceRuntimeState::hostname` for this value.", - "type": "object", - "properties": { - "dns_servers": { - "description": "DNS servers to send to the instance\n\n(DHCPv4 option 6; DHCPv6 option 23)", - "type": "array", - "items": { - "type": "string", - "format": "ip" - } - }, - "host_domain": { - "nullable": true, - "description": "DNS zone this instance's hostname belongs to (e.g. the `project.example` part of `instance1.project.example`)\n\n(DHCPv4 option 15; used in DHCPv6 option 39)", - "type": "string" - }, - "search_domains": { - "description": "DNS search domains\n\n(DHCPv4 option 119; DHCPv6 option 24)", - "type": "array", - "items": { - "type": "string" - } - } - }, - "required": [ - "dns_servers", - "search_domains" - ] - }, - "DiskEnsureBody": { - "description": "Sent from to a sled agent to establish the runtime state of a Disk", - "type": "object", - "properties": { - "initial_runtime": { - "description": "Last runtime state of the Disk known to Nexus (used if the agent has never seen this Disk before).", - "allOf": [ - { - "$ref": "#/components/schemas/DiskRuntimeState" - } - ] - }, - "target": { - "description": "requested runtime state of the Disk", - "allOf": [ - { - "$ref": "#/components/schemas/DiskStateRequested" - } - ] - } - }, - "required": [ - "initial_runtime", - "target" - ] - }, - "DiskIdentity": { - "description": "Uniquely identifies a disk.", - "type": "object", - "properties": { - "model": { - "type": "string" - }, - "serial": { - "type": "string" - }, - "vendor": { - "type": "string" - } - }, - "required": [ - "model", - "serial", - "vendor" - ] - }, - "DiskRequest": { - "description": "DiskRequest\n\n
JSON schema\n\n```json { \"type\": \"object\", \"required\": [ \"device\", \"name\", \"read_only\", \"slot\", \"volume_construction_request\" ], \"properties\": { \"device\": { \"type\": \"string\" }, \"name\": { \"type\": \"string\" }, \"read_only\": { \"type\": \"boolean\" }, \"slot\": { \"$ref\": \"#/components/schemas/Slot\" }, \"volume_construction_request\": { \"$ref\": \"#/components/schemas/VolumeConstructionRequest\" } } } ```
", - "type": "object", - "properties": { - "device": { - "type": "string" - }, - "name": { - "type": "string" - }, - "read_only": { - "type": "boolean" - }, - "slot": { - "$ref": "#/components/schemas/Slot" - }, - "volume_construction_request": { - "$ref": "#/components/schemas/VolumeConstructionRequest" - } - }, - "required": [ - "device", - "name", - "read_only", - "slot", - "volume_construction_request" - ] - }, - "DiskRuntimeState": { - "description": "Runtime state of the Disk, which includes its attach state and some minimal metadata", - "type": "object", - "properties": { - "disk_state": { - "description": "runtime state of the Disk", - "allOf": [ - { - "$ref": "#/components/schemas/DiskState" - } - ] - }, - "gen": { - "description": "generation number for this state", - "allOf": [ - { - "$ref": "#/components/schemas/Generation" - } - ] - }, - "time_updated": { - "description": "timestamp for this information", - "type": "string", - "format": "date-time" - } - }, - "required": [ - "disk_state", - "gen", - "time_updated" - ] - }, - "DiskState": { - "description": "State of a Disk", - "oneOf": [ - { - "description": "Disk is being initialized", - "type": "object", - "properties": { - "state": { - "type": "string", - "enum": [ - "creating" - ] - } - }, - "required": [ - "state" - ] - }, - { - "description": "Disk is ready but detached from any Instance", - "type": "object", - "properties": { - "state": { - "type": "string", - "enum": [ - "detached" - ] - } - }, - "required": [ - "state" - ] - }, - { - "description": "Disk is ready to receive blocks from an external source", - "type": "object", - "properties": { - "state": { - "type": "string", - "enum": [ - "import_ready" - ] - } - }, - "required": [ - "state" - ] - }, - { - "description": "Disk is importing blocks from a URL", - "type": "object", - "properties": { - "state": { - "type": "string", - "enum": [ - "importing_from_url" - ] - } - }, - "required": [ - "state" - ] - }, - { - "description": "Disk is importing blocks from bulk writes", - "type": "object", - "properties": { - "state": { - "type": "string", - "enum": [ - "importing_from_bulk_writes" - ] - } - }, - "required": [ - "state" - ] - }, - { - "description": "Disk is being finalized to state Detached", - "type": "object", - "properties": { - "state": { - "type": "string", - "enum": [ - "finalizing" - ] - } - }, - "required": [ - "state" - ] - }, - { - "description": "Disk is undergoing maintenance", - "type": "object", - "properties": { - "state": { - "type": "string", - "enum": [ - "maintenance" - ] - } - }, - "required": [ - "state" - ] - }, - { - "description": "Disk is being attached to the given Instance", - "type": "object", - "properties": { - "instance": { - "type": "string", - "format": "uuid" - }, - "state": { - "type": "string", - "enum": [ - "attaching" - ] - } - }, - "required": [ - "instance", - "state" - ] - }, - { - "description": "Disk is attached to the given Instance", - "type": "object", - "properties": { - "instance": { - "type": "string", - "format": "uuid" - }, - "state": { - "type": "string", - "enum": [ - "attached" - ] - } - }, - "required": [ - "instance", - "state" - ] - }, - { - "description": "Disk is being detached from the given Instance", - "type": "object", - "properties": { - "instance": { - "type": "string", - "format": "uuid" - }, - "state": { - "type": "string", - "enum": [ - "detaching" - ] - } - }, - "required": [ - "instance", - "state" - ] - }, - { - "description": "Disk has been destroyed", - "type": "object", - "properties": { - "state": { - "type": "string", - "enum": [ - "destroyed" - ] - } - }, - "required": [ - "state" - ] - }, - { - "description": "Disk is unavailable", - "type": "object", - "properties": { - "state": { - "type": "string", - "enum": [ - "faulted" - ] - } - }, - "required": [ - "state" - ] - } - ] - }, - "DiskStateRequested": { - "description": "Used to request a Disk state change", - "oneOf": [ - { - "type": "object", - "properties": { - "state": { - "type": "string", - "enum": [ - "detached" - ] - } - }, - "required": [ - "state" - ] - }, - { - "type": "object", - "properties": { - "instance": { - "type": "string", - "format": "uuid" - }, - "state": { - "type": "string", - "enum": [ - "attached" - ] - } - }, - "required": [ - "instance", - "state" - ] - }, - { - "type": "object", - "properties": { - "state": { - "type": "string", - "enum": [ - "destroyed" - ] - } - }, - "required": [ - "state" + "status", + "update_id" ] }, { + "description": "The most recent update failed.", "type": "object", "properties": { - "state": { + "message": { + "type": "string" + }, + "status": { "type": "string", "enum": [ - "faulted" + "failed" ] + }, + "update_id": { + "type": "string", + "format": "uuid" } }, "required": [ - "state" + "message", + "status", + "update_id" ] } ] }, - "DiskType": { - "type": "string", - "enum": [ - "U2", - "M2" - ] - }, - "DiskVariant": { - "type": "string", - "enum": [ - "U2", - "M2" - ] - }, - "Duration": { + "BootstoreStatus": { "type": "object", "properties": { - "nanos": { + "accepted_connections": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "established_connections": { + "type": "array", + "items": { + "$ref": "#/components/schemas/EstablishedConnection" + } + }, + "fsm_ledger_generation": { "type": "integer", - "format": "uint32", + "format": "uint64", "minimum": 0 }, - "secs": { + "fsm_state": { + "type": "string" + }, + "negotiating_connections": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "network_config_ledger_generation": { + "nullable": true, "type": "integer", "format": "uint64", "minimum": 0 + }, + "peers": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true } }, "required": [ - "nanos", - "secs" + "accepted_connections", + "established_connections", + "fsm_ledger_generation", + "fsm_state", + "negotiating_connections", + "peers" ] }, - "EarlyNetworkConfig": { - "description": "Network configuration required to bring up the control plane\n\nThe fields in this structure are those from [`super::params::RackInitializeRequest`] necessary for use beyond RSS. This is just for the initial rack configuration and cold boot purposes. Updates come from Nexus.", + "BundleUtilization": { + "description": "The portion of a debug dataset used for zone bundles.", "type": "object", "properties": { - "body": { - "$ref": "#/components/schemas/EarlyNetworkConfigBody" + "bytes_available": { + "description": "The total number of bytes available for zone bundles.\n\nThis is `dataset_quota` multiplied by the context's storage limit.", + "type": "integer", + "format": "uint64", + "minimum": 0 }, - "generation": { + "bytes_used": { + "description": "Total bundle usage, in bytes.", "type": "integer", "format": "uint64", "minimum": 0 }, - "schema_version": { + "dataset_quota": { + "description": "The total dataset quota, in bytes.", "type": "integer", - "format": "uint32", + "format": "uint64", "minimum": 0 } }, "required": [ - "body", - "generation", - "schema_version" + "bytes_available", + "bytes_used", + "dataset_quota" ] }, - "EarlyNetworkConfigBody": { - "description": "This is the actual configuration of EarlyNetworking.\n\nWe nest it below the \"header\" of `generation` and `schema_version` so that we can perform partial deserialization of `EarlyNetworkConfig` to only read the header and defer deserialization of the body once we know the schema version. This is possible via the use of [`serde_json::value::RawValue`] in future (post-v1) deserialization paths.", + "ByteCount": { + "description": "Byte count to express memory or storage capacity.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "CleanupContext": { + "description": "Context provided for the zone bundle cleanup task.", "type": "object", "properties": { - "ntp_servers": { - "description": "The external NTP server addresses.", - "type": "array", - "items": { - "type": "string" - } + "period": { + "description": "The period on which automatic checks and cleanup is performed.", + "allOf": [ + { + "$ref": "#/components/schemas/CleanupPeriod" + } + ] }, - "rack_network_config": { + "priority": { + "description": "The priority ordering for keeping old bundles.", + "allOf": [ + { + "$ref": "#/components/schemas/PriorityOrder" + } + ] + }, + "storage_limit": { + "description": "The limit on the dataset quota available for zone bundles.", + "allOf": [ + { + "$ref": "#/components/schemas/StorageLimit" + } + ] + } + }, + "required": [ + "period", + "priority", + "storage_limit" + ] + }, + "CleanupContextUpdate": { + "description": "Parameters used to update the zone bundle cleanup context.", + "type": "object", + "properties": { + "period": { "nullable": true, + "description": "The new period on which automatic cleanups are run.", "allOf": [ { - "$ref": "#/components/schemas/RackNetworkConfigV1" + "$ref": "#/components/schemas/Duration" + } + ] + }, + "priority": { + "nullable": true, + "description": "The priority ordering for preserving old zone bundles.", + "allOf": [ + { + "$ref": "#/components/schemas/PriorityOrder" } ] + }, + "storage_limit": { + "nullable": true, + "description": "The new limit on the underlying dataset quota allowed for bundles.", + "type": "integer", + "format": "uint8", + "minimum": 0 + } + } + }, + "CleanupCount": { + "description": "The count of bundles / bytes removed during a cleanup operation.", + "type": "object", + "properties": { + "bundles": { + "description": "The number of bundles removed.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "bytes": { + "description": "The number of bytes removed.", + "type": "integer", + "format": "uint64", + "minimum": 0 } }, "required": [ - "ntp_servers" + "bundles", + "bytes" ] }, - "Error": { - "description": "Error information from a response.", + "CleanupPeriod": { + "description": "A period on which bundles are automatically cleaned up.", + "allOf": [ + { + "$ref": "#/components/schemas/Duration" + } + ] + }, + "CrucibleOpts": { + "description": "CrucibleOpts\n\n
JSON schema\n\n```json { \"type\": \"object\", \"required\": [ \"id\", \"lossy\", \"read_only\", \"target\" ], \"properties\": { \"cert_pem\": { \"type\": [ \"string\", \"null\" ] }, \"control\": { \"type\": [ \"string\", \"null\" ] }, \"flush_timeout\": { \"type\": [ \"number\", \"null\" ], \"format\": \"float\" }, \"id\": { \"type\": \"string\", \"format\": \"uuid\" }, \"key\": { \"type\": [ \"string\", \"null\" ] }, \"key_pem\": { \"type\": [ \"string\", \"null\" ] }, \"lossy\": { \"type\": \"boolean\" }, \"read_only\": { \"type\": \"boolean\" }, \"root_cert_pem\": { \"type\": [ \"string\", \"null\" ] }, \"target\": { \"type\": \"array\", \"items\": { \"type\": \"string\" } } } } ```
", "type": "object", "properties": { - "error_code": { + "cert_pem": { + "nullable": true, "type": "string" }, - "message": { + "control": { + "nullable": true, "type": "string" }, - "request_id": { + "flush_timeout": { + "nullable": true, + "type": "number", + "format": "float" + }, + "id": { + "type": "string", + "format": "uuid" + }, + "key": { + "nullable": true, + "type": "string" + }, + "key_pem": { + "nullable": true, + "type": "string" + }, + "lossy": { + "type": "boolean" + }, + "read_only": { + "type": "boolean" + }, + "root_cert_pem": { + "nullable": true, "type": "string" + }, + "target": { + "type": "array", + "items": { + "type": "string" + } } }, "required": [ - "message", - "request_id" + "id", + "lossy", + "read_only", + "target" ] }, - "EstablishedConnection": { + "DhcpConfig": { + "description": "DHCP configuration for a port\n\nNot present here: Hostname (DHCPv4 option 12; used in DHCPv6 option 39); we use `InstanceRuntimeState::hostname` for this value.", "type": "object", "properties": { - "addr": { + "dns_servers": { + "description": "DNS servers to send to the instance\n\n(DHCPv4 option 6; DHCPv6 option 23)", + "type": "array", + "items": { + "type": "string", + "format": "ip" + } + }, + "host_domain": { + "nullable": true, + "description": "DNS zone this instance's hostname belongs to (e.g. the `project.example` part of `instance1.project.example`)\n\n(DHCPv4 option 15; used in DHCPv6 option 39)", "type": "string" }, - "baseboard": { - "$ref": "#/components/schemas/Baseboard" + "search_domains": { + "description": "DNS search domains\n\n(DHCPv4 option 119; DHCPv6 option 24)", + "type": "array", + "items": { + "type": "string" + } } }, "required": [ - "addr", - "baseboard" + "dns_servers", + "search_domains" ] }, - "Field": { - "description": "A `Field` is a named aspect of a target or metric.", + "DiskEnsureBody": { + "description": "Sent from to a sled agent to establish the runtime state of a Disk", "type": "object", "properties": { - "name": { - "type": "string" + "initial_runtime": { + "description": "Last runtime state of the Disk known to Nexus (used if the agent has never seen this Disk before).", + "allOf": [ + { + "$ref": "#/components/schemas/DiskRuntimeState" + } + ] }, - "value": { - "$ref": "#/components/schemas/FieldValue" + "target": { + "description": "requested runtime state of the Disk", + "allOf": [ + { + "$ref": "#/components/schemas/DiskStateRequested" + } + ] } }, "required": [ - "name", - "value" + "initial_runtime", + "target" ] }, - "FieldSet": { + "DiskIdentity": { + "description": "Uniquely identifies a disk.", "type": "object", "properties": { - "fields": { - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/Field" - } + "model": { + "type": "string" }, - "name": { + "serial": { + "type": "string" + }, + "vendor": { "type": "string" } }, "required": [ - "fields", - "name" + "model", + "serial", + "vendor" ] }, - "FieldValue": { - "description": "The `FieldValue` contains the value of a target or metric field.", + "DiskManagementError": { "oneOf": [ { "type": "object", @@ -4055,16 +2104,12 @@ "type": { "type": "string", "enum": [ - "string" + "not_found" ] - }, - "value": { - "type": "string" } }, "required": [ - "type", - "value" + "type" ] }, { @@ -4073,12 +2118,23 @@ "type": { "type": "string", "enum": [ - "i8" + "zpool_uuid_mismatch" ] }, "value": { - "type": "integer", - "format": "int8" + "type": "object", + "properties": { + "expected": { + "$ref": "#/components/schemas/TypedUuidForZpoolKind" + }, + "observed": { + "$ref": "#/components/schemas/TypedUuidForZpoolKind" + } + }, + "required": [ + "expected", + "observed" + ] } }, "required": [ @@ -4092,13 +2148,11 @@ "type": { "type": "string", "enum": [ - "u8" + "key_manager" ] }, "value": { - "type": "integer", - "format": "uint8", - "minimum": 0 + "type": "string" } }, "required": [ @@ -4112,579 +2166,502 @@ "type": { "type": "string", "enum": [ - "i16" + "other" ] }, "value": { - "type": "integer", - "format": "int16" + "type": "string" } }, "required": [ "type", "value" ] + } + ] + }, + "DiskManagementStatus": { + "description": "Identifies how a single disk management operation may have succeeded or failed.", + "type": "object", + "properties": { + "err": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/DiskManagementError" + } + ] + }, + "identity": { + "$ref": "#/components/schemas/DiskIdentity" + } + }, + "required": [ + "identity" + ] + }, + "DiskRequest": { + "description": "DiskRequest\n\n
JSON schema\n\n```json { \"type\": \"object\", \"required\": [ \"device\", \"name\", \"read_only\", \"slot\", \"volume_construction_request\" ], \"properties\": { \"device\": { \"type\": \"string\" }, \"name\": { \"type\": \"string\" }, \"read_only\": { \"type\": \"boolean\" }, \"slot\": { \"$ref\": \"#/components/schemas/Slot\" }, \"volume_construction_request\": { \"$ref\": \"#/components/schemas/VolumeConstructionRequest\" } } } ```
", + "type": "object", + "properties": { + "device": { + "type": "string" + }, + "name": { + "type": "string" + }, + "read_only": { + "type": "boolean" + }, + "slot": { + "$ref": "#/components/schemas/Slot" + }, + "volume_construction_request": { + "$ref": "#/components/schemas/VolumeConstructionRequest" + } + }, + "required": [ + "device", + "name", + "read_only", + "slot", + "volume_construction_request" + ] + }, + "DiskRuntimeState": { + "description": "Runtime state of the Disk, which includes its attach state and some minimal metadata", + "type": "object", + "properties": { + "disk_state": { + "description": "runtime state of the Disk", + "allOf": [ + { + "$ref": "#/components/schemas/DiskState" + } + ] + }, + "gen": { + "description": "generation number for this state", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] }, + "time_updated": { + "description": "timestamp for this information", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "disk_state", + "gen", + "time_updated" + ] + }, + "DiskState": { + "description": "State of a Disk", + "oneOf": [ { + "description": "Disk is being initialized", "type": "object", "properties": { - "type": { + "state": { "type": "string", "enum": [ - "u16" + "creating" ] - }, - "value": { - "type": "integer", - "format": "uint16", - "minimum": 0 } }, "required": [ - "type", - "value" + "state" ] }, { + "description": "Disk is ready but detached from any Instance", "type": "object", "properties": { - "type": { + "state": { "type": "string", "enum": [ - "i32" + "detached" ] - }, - "value": { - "type": "integer", - "format": "int32" } }, "required": [ - "type", - "value" + "state" ] }, { + "description": "Disk is ready to receive blocks from an external source", "type": "object", "properties": { - "type": { + "state": { "type": "string", "enum": [ - "u32" + "import_ready" ] - }, - "value": { - "type": "integer", - "format": "uint32", - "minimum": 0 } }, "required": [ - "type", - "value" + "state" ] }, { + "description": "Disk is importing blocks from a URL", "type": "object", "properties": { - "type": { + "state": { "type": "string", "enum": [ - "i64" + "importing_from_url" ] - }, - "value": { - "type": "integer", - "format": "int64" } }, "required": [ - "type", - "value" + "state" ] }, { + "description": "Disk is importing blocks from bulk writes", "type": "object", "properties": { - "type": { + "state": { "type": "string", "enum": [ - "u64" + "importing_from_bulk_writes" ] - }, - "value": { - "type": "integer", - "format": "uint64", - "minimum": 0 } }, "required": [ - "type", - "value" + "state" ] }, { + "description": "Disk is being finalized to state Detached", "type": "object", "properties": { - "type": { + "state": { + "type": "string", + "enum": [ + "finalizing" + ] + } + }, + "required": [ + "state" + ] + }, + { + "description": "Disk is undergoing maintenance", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "maintenance" + ] + } + }, + "required": [ + "state" + ] + }, + { + "description": "Disk is being attached to the given Instance", + "type": "object", + "properties": { + "instance": { + "type": "string", + "format": "uuid" + }, + "state": { "type": "string", "enum": [ - "ip_addr" + "attaching" ] - }, - "value": { - "type": "string", - "format": "ip" } }, "required": [ - "type", - "value" + "instance", + "state" ] }, { + "description": "Disk is attached to the given Instance", "type": "object", "properties": { - "type": { + "instance": { "type": "string", - "enum": [ - "uuid" - ] + "format": "uuid" }, - "value": { + "state": { "type": "string", - "format": "uuid" + "enum": [ + "attached" + ] } }, "required": [ - "type", - "value" + "instance", + "state" ] }, { + "description": "Disk is being detached from the given Instance", "type": "object", "properties": { - "type": { + "instance": { + "type": "string", + "format": "uuid" + }, + "state": { "type": "string", "enum": [ - "bool" + "detaching" ] - }, - "value": { - "type": "boolean" } }, "required": [ - "type", - "value" + "instance", + "state" ] - } - ] - }, - "Generation": { - "description": "Generation numbers stored in the database, used for optimistic concurrency control", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "HistogramError": { - "description": "Errors related to constructing histograms or adding samples into them.", - "oneOf": [ + }, { - "description": "An attempt to construct a histogram with an empty set of bins.", + "description": "Disk has been destroyed", "type": "object", "properties": { - "type": { + "state": { "type": "string", "enum": [ - "empty_bins" + "destroyed" ] } }, "required": [ - "type" + "state" ] }, { - "description": "An attempt to construct a histogram with non-monotonic bins.", + "description": "Disk is unavailable", "type": "object", "properties": { - "type": { + "state": { "type": "string", "enum": [ - "nonmonotonic_bins" + "faulted" ] } }, "required": [ - "type" + "state" ] - }, + } + ] + }, + "DiskStateRequested": { + "description": "Used to request a Disk state change", + "oneOf": [ { - "description": "A non-finite was encountered, either as a bin edge or a sample.", "type": "object", "properties": { - "content": { - "type": "string" - }, - "type": { + "state": { "type": "string", "enum": [ - "non_finite_value" + "detached" ] } }, "required": [ - "content", - "type" + "state" ] }, { - "description": "Error returned when two neighboring bins are not adjoining (there's space between them)", "type": "object", "properties": { - "content": { - "type": "object", - "properties": { - "left": { - "type": "string" - }, - "right": { - "type": "string" - } - }, - "required": [ - "left", - "right" - ] + "instance": { + "type": "string", + "format": "uuid" }, - "type": { + "state": { "type": "string", "enum": [ - "non_adjoining_bins" + "attached" ] } }, "required": [ - "content", - "type" + "instance", + "state" ] }, { - "description": "Bin and count arrays are of different sizes.", "type": "object", "properties": { - "content": { - "type": "object", - "properties": { - "n_bins": { - "type": "integer", - "format": "uint", - "minimum": 0 - }, - "n_counts": { - "type": "integer", - "format": "uint", - "minimum": 0 - } - }, - "required": [ - "n_bins", - "n_counts" - ] - }, - "type": { + "state": { "type": "string", "enum": [ - "array_size_mismatch" + "destroyed" ] } }, "required": [ - "content", - "type" + "state" ] }, { "type": "object", "properties": { - "content": { - "$ref": "#/components/schemas/QuantizationError" - }, - "type": { + "state": { "type": "string", "enum": [ - "quantization" + "faulted" ] } }, "required": [ - "content", - "type" + "state" ] } ] }, - "Histogramdouble": { - "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", - "type": "object", - "properties": { - "bins": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Bindouble" - } - }, - "n_samples": { - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "start_time": { - "type": "string", - "format": "date-time" - } - }, - "required": [ - "bins", - "n_samples", - "start_time" + "DiskType": { + "type": "string", + "enum": [ + "U2", + "M2" ] }, - "Histogramfloat": { - "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", - "type": "object", - "properties": { - "bins": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Binfloat" - } - }, - "n_samples": { - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "start_time": { - "type": "string", - "format": "date-time" - } - }, - "required": [ - "bins", - "n_samples", - "start_time" + "DiskVariant": { + "type": "string", + "enum": [ + "U2", + "M2" ] }, - "Histogramint16": { - "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "DisksManagementResult": { + "description": "The result from attempting to manage underlying disks.\n\nThis is more complex than a simple \"Error\" type because it's possible for some disks to be initialized correctly, while others can fail.\n\nThis structure provides a mechanism for callers to learn about partial failures, and handle them appropriately on a per-disk basis.", "type": "object", "properties": { - "bins": { + "status": { "type": "array", "items": { - "$ref": "#/components/schemas/Binint16" + "$ref": "#/components/schemas/DiskManagementStatus" } - }, - "n_samples": { - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "start_time": { - "type": "string", - "format": "date-time" } }, "required": [ - "bins", - "n_samples", - "start_time" + "status" ] }, - "Histogramint32": { - "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "Duration": { "type": "object", "properties": { - "bins": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Binint32" - } - }, - "n_samples": { + "nanos": { "type": "integer", - "format": "uint64", + "format": "uint32", "minimum": 0 }, - "start_time": { - "type": "string", - "format": "date-time" - } - }, - "required": [ - "bins", - "n_samples", - "start_time" - ] - }, - "Histogramint64": { - "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", - "type": "object", - "properties": { - "bins": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Binint64" - } - }, - "n_samples": { + "secs": { "type": "integer", "format": "uint64", "minimum": 0 - }, - "start_time": { - "type": "string", - "format": "date-time" } }, "required": [ - "bins", - "n_samples", - "start_time" + "nanos", + "secs" ] }, - "Histogramint8": { - "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "EarlyNetworkConfig": { + "description": "Network configuration required to bring up the control plane\n\nThe fields in this structure are those from [`super::params::RackInitializeRequest`] necessary for use beyond RSS. This is just for the initial rack configuration and cold boot purposes. Updates come from Nexus.", "type": "object", "properties": { - "bins": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Binint8" - } + "body": { + "$ref": "#/components/schemas/EarlyNetworkConfigBody" }, - "n_samples": { + "generation": { "type": "integer", "format": "uint64", "minimum": 0 }, - "start_time": { - "type": "string", - "format": "date-time" - } - }, - "required": [ - "bins", - "n_samples", - "start_time" - ] - }, - "Histogramuint16": { - "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", - "type": "object", - "properties": { - "bins": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Binuint16" - } - }, - "n_samples": { + "schema_version": { "type": "integer", - "format": "uint64", + "format": "uint32", "minimum": 0 - }, - "start_time": { - "type": "string", - "format": "date-time" } }, "required": [ - "bins", - "n_samples", - "start_time" + "body", + "generation", + "schema_version" ] }, - "Histogramuint32": { - "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "EarlyNetworkConfigBody": { + "description": "This is the actual configuration of EarlyNetworking.\n\nWe nest it below the \"header\" of `generation` and `schema_version` so that we can perform partial deserialization of `EarlyNetworkConfig` to only read the header and defer deserialization of the body once we know the schema version. This is possible via the use of [`serde_json::value::RawValue`] in future (post-v1) deserialization paths.", "type": "object", "properties": { - "bins": { + "ntp_servers": { + "description": "The external NTP server addresses.", "type": "array", "items": { - "$ref": "#/components/schemas/Binuint32" + "type": "string" } }, - "n_samples": { - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "start_time": { - "type": "string", - "format": "date-time" + "rack_network_config": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/RackNetworkConfigV1" + } + ] } }, "required": [ - "bins", - "n_samples", - "start_time" + "ntp_servers" ] }, - "Histogramuint64": { - "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "Error": { + "description": "Error information from a response.", "type": "object", "properties": { - "bins": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Binuint64" - } + "error_code": { + "type": "string" }, - "n_samples": { - "type": "integer", - "format": "uint64", - "minimum": 0 + "message": { + "type": "string" }, - "start_time": { - "type": "string", - "format": "date-time" + "request_id": { + "type": "string" } }, "required": [ - "bins", - "n_samples", - "start_time" + "message", + "request_id" ] }, - "Histogramuint8": { - "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "EstablishedConnection": { "type": "object", "properties": { - "bins": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Binuint8" - } - }, - "n_samples": { - "type": "integer", - "format": "uint64", - "minimum": 0 + "addr": { + "type": "string" }, - "start_time": { - "type": "string", - "format": "date-time" + "baseboard": { + "$ref": "#/components/schemas/Baseboard" } }, "required": [ - "bins", - "n_samples", - "start_time" + "addr", + "baseboard" ] }, + "Generation": { + "description": "Generation numbers stored in the database, used for optimistic concurrency control", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, "HostIdentifier": { "description": "A `HostIdentifier` represents either an IP host or network (v4 or v6), or an entire VPC (identified by its VNI). It is used in firewall rule host filters.", "oneOf": [ @@ -4733,7 +2710,7 @@ "description": "IP Address and prefix (e.g., `192.168.0.1/16`) to apply to switchport (must be in infra_ip pool)", "type": "array", "items": { - "$ref": "#/components/schemas/IpNetwork" + "$ref": "#/components/schemas/IpNet" } }, "port": { @@ -4754,6 +2731,47 @@ "minLength": 1, "maxLength": 253 }, + "ImportExportPolicy": { + "description": "Define policy relating to the import and export of prefixes from a BGP peer.", + "oneOf": [ + { + "description": "Do not perform any filtering.", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "no_filtering" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "allow" + ] + }, + "value": { + "type": "array", + "items": { + "$ref": "#/components/schemas/IpNet" + } + } + }, + "required": [ + "type", + "value" + ] + } + ] + }, "InstanceCpuCount": { "description": "The number of CPUs in an Instance", "type": "integer", @@ -5358,8 +3376,7 @@ "type": "object", "properties": { "id": { - "type": "string", - "format": "uuid" + "$ref": "#/components/schemas/TypedUuidForZpoolKind" }, "total_size": { "$ref": "#/components/schemas/ByteCount" @@ -5371,6 +3388,11 @@ ] }, "IpNet": { + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::IpNet", + "version": "0.1.0" + }, "oneOf": [ { "title": "v4", @@ -5390,50 +3412,32 @@ } ] }, - "IpNetwork": { - "oneOf": [ - { - "title": "v4", - "allOf": [ - { - "$ref": "#/components/schemas/Ipv4Network" - } - ] - }, - { - "title": "v6", - "allOf": [ - { - "$ref": "#/components/schemas/Ipv6Network" - } - ] - } - ] - }, "Ipv4Net": { "example": "192.168.1.0/24", "title": "An IPv4 subnet", - "description": "An IPv4 subnet, including prefix and subnet mask", + "description": "An IPv4 subnet, including prefix and prefix length", + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::Ipv4Net", + "version": "0.1.0" + }, "type": "string", "pattern": "^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|1[0-9]|2[0-9]|3[0-2])$" }, - "Ipv4Network": { - "type": "string", - "pattern": "^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\/(3[0-2]|[0-2]?[0-9])$" - }, "Ipv6Net": { "example": "fd12:3456::/64", "title": "An IPv6 subnet", "description": "An IPv6 subnet, including prefix and subnet mask", + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::Ipv6Net", + "version": "0.1.0" + }, "type": "string", - "pattern": "^([fF][dD])[0-9a-fA-F]{2}:(([0-9a-fA-F]{1,4}:){6}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,6}:)([0-9a-fA-F]{1,4})?\\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])$" - }, - "Ipv6Network": { - "type": "string", - "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\")[/](12[0-8]|1[0-1][0-9]|[0-9]?[0-9])$" + "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))\\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])$" }, "Ipv6Subnet": { - "description": "Wraps an [`Ipv6Network`] with a compile-time prefix length.", + "description": "Wraps an [`Ipv6Net`] with a compile-time prefix length.", "type": "object", "properties": { "net": { @@ -5456,245 +3460,26 @@ "psc_sp", "psc_rot", "switch_sp", - "switch_rot" - ] - }, - "L4PortRange": { - "example": "22", - "title": "A range of IP ports", - "description": "An inclusive-inclusive range of IP ports. The second port may be omitted to represent a single port", - "type": "string", - "pattern": "^[0-9]{1,5}(-[0-9]{1,5})?$", - "minLength": 1, - "maxLength": 11 - }, - "MacAddr": { - "example": "ff:ff:ff:ff:ff:ff", - "title": "A MAC address", - "description": "A Media Access Control address, in EUI-48 format", - "type": "string", - "pattern": "^([0-9a-fA-F]{0,2}:){5}[0-9a-fA-F]{0,2}$", - "minLength": 5, - "maxLength": 17 - }, - "Measurement": { - "description": "A `Measurement` is a timestamped datum from a single metric", - "type": "object", - "properties": { - "datum": { - "$ref": "#/components/schemas/Datum" - }, - "timestamp": { - "type": "string", - "format": "date-time" - } - }, - "required": [ - "datum", - "timestamp" - ] - }, - "MetricsError": { - "description": "Errors related to the generation or collection of metrics.", - "oneOf": [ - { - "description": "An error related to generating metric data points", - "type": "object", - "properties": { - "content": { - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "datum_error" - ] - } - }, - "required": [ - "content", - "type" - ] - }, - { - "description": "An error running an `Oximeter` server", - "type": "object", - "properties": { - "content": { - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "oximeter_server" - ] - } - }, - "required": [ - "content", - "type" - ] - }, - { - "description": "An error related to creating or sampling a [`histogram::Histogram`] metric.", - "type": "object", - "properties": { - "content": { - "$ref": "#/components/schemas/HistogramError" - }, - "type": { - "type": "string", - "enum": [ - "histogram_error" - ] - } - }, - "required": [ - "content", - "type" - ] - }, - { - "description": "An error parsing a field or measurement from a string.", - "type": "object", - "properties": { - "content": { - "type": "object", - "properties": { - "src": { - "type": "string" - }, - "typ": { - "type": "string" - } - }, - "required": [ - "src", - "typ" - ] - }, - "type": { - "type": "string", - "enum": [ - "parse_error" - ] - } - }, - "required": [ - "content", - "type" - ] - }, - { - "description": "A field name is duplicated between the target and metric.", - "type": "object", - "properties": { - "content": { - "type": "object", - "properties": { - "name": { - "type": "string" - } - }, - "required": [ - "name" - ] - }, - "type": { - "type": "string", - "enum": [ - "duplicate_field_name" - ] - } - }, - "required": [ - "content", - "type" - ] - }, - { - "type": "object", - "properties": { - "content": { - "type": "object", - "properties": { - "datum_type": { - "$ref": "#/components/schemas/DatumType" - } - }, - "required": [ - "datum_type" - ] - }, - "type": { - "type": "string", - "enum": [ - "missing_datum_requires_start_time" - ] - } - }, - "required": [ - "content", - "type" - ] - }, - { - "type": "object", - "properties": { - "content": { - "type": "object", - "properties": { - "datum_type": { - "$ref": "#/components/schemas/DatumType" - } - }, - "required": [ - "datum_type" - ] - }, - "type": { - "type": "string", - "enum": [ - "missing_datum_cannot_have_start_time" - ] - } - }, - "required": [ - "content", - "type" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "invalid_timeseries_name" - ] - } - }, - "required": [ - "type" - ] - } + "switch_rot" ] }, - "MissingDatum": { - "type": "object", - "properties": { - "datum_type": { - "$ref": "#/components/schemas/DatumType" - }, - "start_time": { - "nullable": true, - "type": "string", - "format": "date-time" - } - }, - "required": [ - "datum_type" - ] + "L4PortRange": { + "example": "22", + "title": "A range of IP ports", + "description": "An inclusive-inclusive range of IP ports. The second port may be omitted to represent a single port", + "type": "string", + "pattern": "^[0-9]{1,5}(-[0-9]{1,5})?$", + "minLength": 1, + "maxLength": 11 + }, + "MacAddr": { + "example": "ff:ff:ff:ff:ff:ff", + "title": "A MAC address", + "description": "A Media Access Control address, in EUI-48 format", + "type": "string", + "pattern": "^([0-9a-fA-F]{0,2}:){5}[0-9a-fA-F]{0,2}$", + "minLength": 5, + "maxLength": 17 }, "Name": { "title": "A name unique within the parent collection", @@ -5817,6 +3602,49 @@ } ] }, + "OmicronPhysicalDiskConfig": { + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "identity": { + "$ref": "#/components/schemas/DiskIdentity" + }, + "pool_id": { + "$ref": "#/components/schemas/TypedUuidForZpoolKind" + } + }, + "required": [ + "id", + "identity", + "pool_id" + ] + }, + "OmicronPhysicalDisksConfig": { + "type": "object", + "properties": { + "disks": { + "type": "array", + "items": { + "$ref": "#/components/schemas/OmicronPhysicalDiskConfig" + } + }, + "generation": { + "description": "generation number of this configuration\n\nThis generation number is owned by the control plane (i.e., RSS or Nexus, depending on whether RSS-to-Nexus handoff has happened). It should not be bumped within Sled Agent.\n\nSled Agent rejects attempts to set the configuration to a generation older than the one it's currently running.", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + } + }, + "required": [ + "disks", + "generation" + ] + }, "OmicronZoneConfig": { "description": "Describes one Omicron-managed zone running on a sled", "type": "object", @@ -6228,7 +4056,7 @@ "description": "This port's addresses.", "type": "array", "items": { - "$ref": "#/components/schemas/IpNetwork" + "$ref": "#/components/schemas/IpNet" } }, "autoneg": { @@ -6341,138 +4169,6 @@ "minItems": 2, "maxItems": 2 }, - "ProducerResultsItem": { - "oneOf": [ - { - "type": "object", - "properties": { - "info": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Sample" - } - }, - "status": { - "type": "string", - "enum": [ - "ok" - ] - } - }, - "required": [ - "info", - "status" - ] - }, - { - "type": "object", - "properties": { - "info": { - "$ref": "#/components/schemas/MetricsError" - }, - "status": { - "type": "string", - "enum": [ - "err" - ] - } - }, - "required": [ - "info", - "status" - ] - } - ] - }, - "QuantizationError": { - "description": "Errors occurring during quantizated bin generation.", - "oneOf": [ - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "overflow" - ] - } - }, - "required": [ - "type" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "precision" - ] - } - }, - "required": [ - "type" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "invalid_base" - ] - } - }, - "required": [ - "type" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "invalid_steps" - ] - } - }, - "required": [ - "type" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "uneven_steps_for_base" - ] - } - }, - "required": [ - "type" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "powers_out_of_order" - ] - } - }, - "required": [ - "type" - ] - } - ] - }, "RackNetworkConfigV1": { "description": "Initial network configuration", "type": "object", @@ -6510,7 +4206,7 @@ } }, "rack_subnet": { - "$ref": "#/components/schemas/Ipv6Network" + "$ref": "#/components/schemas/Ipv6Net" } }, "required": [ @@ -6528,7 +4224,7 @@ "description": "The destination of the route.", "allOf": [ { - "$ref": "#/components/schemas/IpNetwork" + "$ref": "#/components/schemas/IpNet" } ] }, @@ -6536,6 +4232,13 @@ "description": "The nexthop/gateway address.", "type": "string", "format": "ip" + }, + "vlan_id": { + "nullable": true, + "description": "The VLAN id associated with this route.", + "type": "integer", + "format": "uint16", + "minimum": 0 } }, "required": [ @@ -6543,66 +4246,10 @@ "nexthop" ] }, - "Sample": { - "description": "A concrete type representing a single, timestamped measurement from a timeseries.", - "type": "object", - "properties": { - "measurement": { - "description": "The measured value of the metric at this sample", - "allOf": [ - { - "$ref": "#/components/schemas/Measurement" - } - ] - }, - "metric": { - "$ref": "#/components/schemas/FieldSet" - }, - "target": { - "$ref": "#/components/schemas/FieldSet" - }, - "timeseries_name": { - "description": "The name of the timeseries this sample belongs to", - "type": "string" - } - }, - "required": [ - "measurement", - "metric", - "target", - "timeseries_name" - ] - }, "SemverVersion": { "type": "string", "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$" }, - "SetVirtualNetworkInterfaceHost": { - "description": "A mapping from a virtual NIC to a physical host", - "type": "object", - "properties": { - "physical_host_ip": { - "type": "string", - "format": "ipv6" - }, - "virtual_ip": { - "type": "string", - "format": "ip" - }, - "virtual_mac": { - "$ref": "#/components/schemas/MacAddr" - }, - "vni": { - "$ref": "#/components/schemas/Vni" - } - }, - "required": [ - "physical_host_ip", - "virtual_ip", - "virtual_mac", - "vni" - ] - }, "SledInstanceState": { "description": "A wrapper type containing a sled's total knowledge of the state of a specific VMM and the instance it incarnates.", "type": "object", @@ -6836,6 +4483,10 @@ "sync" ] }, + "TypedUuidForZpoolKind": { + "type": "string", + "format": "uuid" + }, "UpdateArtifactId": { "description": "An identifier for a single update artifact.", "type": "object", @@ -6867,6 +4518,32 @@ "version" ] }, + "VirtualNetworkInterfaceHost": { + "description": "A mapping from a virtual NIC to a physical host", + "type": "object", + "properties": { + "physical_host_ip": { + "type": "string", + "format": "ipv6" + }, + "virtual_ip": { + "type": "string", + "format": "ip" + }, + "virtual_mac": { + "$ref": "#/components/schemas/MacAddr" + }, + "vni": { + "$ref": "#/components/schemas/Vni" + } + }, + "required": [ + "physical_host_ip", + "virtual_ip", + "virtual_mac", + "vni" + ] + }, "VmmRuntimeState": { "description": "The dynamic runtime properties of an individual VMM process.", "type": "object", @@ -7251,8 +4928,7 @@ "$ref": "#/components/schemas/DiskType" }, "id": { - "type": "string", - "format": "uuid" + "$ref": "#/components/schemas/TypedUuidForZpoolKind" } }, "required": [ @@ -7288,4 +4964,4 @@ } } } -} \ No newline at end of file +} diff --git a/openapi/wicketd.json b/openapi/wicketd.json index b9645a174f..fd8e49b6e3 100644 --- a/openapi/wicketd.json +++ b/openapi/wicketd.json @@ -457,6 +457,85 @@ } } }, + "/rack-setup/config/bgp/auth-key": { + "get": { + "summary": "Return information about BGP authentication keys, including checking", + "description": "validity of keys.\nProduces an error if the rack setup config wasn't set, or if any of the requested key IDs weren't found.", + "operationId": "get_bgp_auth_key_info", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetBgpAuthKeyParams" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetBgpAuthKeyInfoResponse" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/rack-setup/config/bgp/auth-key/{key_id}": { + "put": { + "summary": "Set the BGP authentication key for a particular key ID.", + "operationId": "put_bgp_auth_key", + "parameters": [ + { + "in": "path", + "name": "key_id", + "required": true, + "schema": { + "$ref": "#/components/schemas/BgpAuthKeyId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PutBgpAuthKeyBody" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PutBgpAuthKeyResponse" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, "/rack-setup/config/cert": { "post": { "summary": "Add an external certificate.", @@ -699,6 +778,48 @@ "message" ] }, + "AllowedSourceIps": { + "description": "Description of source IPs allowed to reach rack services.", + "oneOf": [ + { + "description": "Allow traffic from any external IP address.", + "type": "object", + "properties": { + "allow": { + "type": "string", + "enum": [ + "any" + ] + } + }, + "required": [ + "allow" + ] + }, + { + "description": "Restrict access to a specific set of source IP addresses or subnets.\n\nAll others are prevented from reaching rack services.", + "type": "object", + "properties": { + "allow": { + "type": "string", + "enum": [ + "list" + ] + }, + "ips": { + "type": "array", + "items": { + "$ref": "#/components/schemas/IpNet" + } + } + }, + "required": [ + "allow", + "ips" + ] + } + ] + }, "ArtifactHashId": { "description": "A hash-based identifier for an artifact.\n\nSome places, e.g. the installinator, request artifacts by hash rather than by name and version. This type indicates that.", "type": "object", @@ -813,6 +934,109 @@ } ] }, + "BgpAuthKey": { + "description": "Describes the actual authentication key to use with a BGP peer.\n\nCurrently, only TCP-MD5 authentication is supported.", + "oneOf": [ + { + "description": "TCP-MD5 authentication.", + "type": "object", + "properties": { + "key": { + "description": "The pre-shared key.", + "type": "string" + }, + "kind": { + "type": "string", + "enum": [ + "tcp_md5" + ] + } + }, + "required": [ + "key", + "kind" + ] + } + ] + }, + "BgpAuthKeyId": { + "description": "The key identifier for authentication to use with a BGP peer.", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "BgpAuthKeyInfo": { + "description": "Describes insensitive information about a BGP authentication key.\n\nThis information is considered okay to display in the UI.", + "oneOf": [ + { + "description": "TCP-MD5 authentication.", + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "tcp_md5" + ] + }, + "sha256": { + "description": "A SHA-256 digest of the key.", + "type": "string", + "format": "hex string (32 bytes)" + } + }, + "required": [ + "kind", + "sha256" + ] + } + ] + }, + "BgpAuthKeyStatus": { + "description": "The status of a BGP authentication key.\n\nThis is part of a wicketd response, but is returned here because our tooling turns BTreeMaps into HashMaps. So we use a `replace` directive instead.", + "oneOf": [ + { + "description": "The key was specified but hasn't been set yet.", + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "unset" + ] + } + }, + "required": [ + "status" + ] + }, + { + "description": "The key has been set.", + "type": "object", + "properties": { + "info": { + "description": "Information about the key.", + "allOf": [ + { + "$ref": "#/components/schemas/BgpAuthKeyInfo" + } + ] + }, + "status": { + "type": "string", + "enum": [ + "set" + ] + } + }, + "required": [ + "info", + "status" + ] + } + ] + }, "BgpConfig": { "type": "object", "properties": { @@ -822,77 +1046,27 @@ "format": "uint32", "minimum": 0 }, + "checker": { + "nullable": true, + "description": "Checker to apply to incoming messages.", + "type": "string" + }, "originate": { "description": "The set of prefixes for the BGP router to originate.", "type": "array", "items": { - "$ref": "#/components/schemas/Ipv4Network" + "$ref": "#/components/schemas/Ipv4Net" } - } - }, - "required": [ - "asn", - "originate" - ] - }, - "BgpPeerConfig": { - "type": "object", - "properties": { - "addr": { - "description": "Address of the peer.", - "type": "string", - "format": "ipv4" - }, - "asn": { - "description": "The autonomous sysetm number of the router the peer belongs to.", - "type": "integer", - "format": "uint32", - "minimum": 0 }, - "connect_retry": { - "nullable": true, - "description": "The interval in seconds between peer connection retry attempts.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "delay_open": { - "nullable": true, - "description": "How long to delay sending open messages to a peer. In seconds.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "hold_time": { - "nullable": true, - "description": "How long to keep a session alive without a keepalive in seconds. Defaults to 6.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "idle_hold_time": { - "nullable": true, - "description": "How long to keep a peer in idle after a state machine reset in seconds.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "keepalive": { + "shaper": { "nullable": true, - "description": "The interval to send keepalive messages at.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "port": { - "description": "Switch port the peer is reachable on.", + "description": "Shaper to apply to outgoing messages.", "type": "string" } }, "required": [ - "addr", "asn", - "port" + "originate" ] }, "BootstrapSledDescription": { @@ -1090,8 +1264,17 @@ ] }, "CurrentRssUserConfigInsensitive": { + "description": "The subset of `RackInitializeRequest` that the user fills in as clear text (e.g., via an uploaded config file).", "type": "object", "properties": { + "allowed_source_ips": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/AllowedSourceIps" + } + ] + }, "bootstrap_sleds": { "type": "array", "items": { @@ -1149,6 +1332,9 @@ "CurrentRssUserConfigSensitive": { "type": "object", "properties": { + "bgp_auth_keys": { + "$ref": "#/components/schemas/GetBgpAuthKeyInfoResponse" + }, "num_external_certificates": { "type": "integer", "format": "uint", @@ -1159,6 +1345,7 @@ } }, "required": [ + "bgp_auth_keys", "num_external_certificates", "recovery_silo_password_set" ] @@ -1322,6 +1509,38 @@ } } }, + "GetBgpAuthKeyInfoResponse": { + "description": "Returns information about BGP keys for rack network setup.\n\nThis is part of a wicketd response, but is returned here because our tooling turns BTreeMaps into HashMaps. So we use a `replace` directive instead.", + "type": "object", + "properties": { + "data": { + "description": "Information about the requested keys.\n\nNone indicates that the key ID has not been set yet. An error indicates that the key was not specified in the rack setup config.", + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/BgpAuthKeyStatus" + } + } + }, + "required": [ + "data" + ] + }, + "GetBgpAuthKeyParams": { + "type": "object", + "properties": { + "check_valid": { + "description": "Checks that these keys are valid.", + "type": "array", + "items": { + "$ref": "#/components/schemas/BgpAuthKeyId" + }, + "uniqueItems": true + } + }, + "required": [ + "check_valid" + ] + }, "GetInventoryParams": { "type": "object", "properties": { @@ -1446,13 +1665,18 @@ "installable" ] }, - "IpNetwork": { + "IpNet": { + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::IpNet", + "version": "0.1.0" + }, "oneOf": [ { "title": "v4", "allOf": [ { - "$ref": "#/components/schemas/Ipv4Network" + "$ref": "#/components/schemas/Ipv4Net" } ] }, @@ -1460,7 +1684,7 @@ "title": "v6", "allOf": [ { - "$ref": "#/components/schemas/Ipv6Network" + "$ref": "#/components/schemas/Ipv6Net" } ] } @@ -1486,9 +1710,17 @@ } ] }, - "Ipv4Network": { + "Ipv4Net": { + "example": "192.168.1.0/24", + "title": "An IPv4 subnet", + "description": "An IPv4 subnet, including prefix and prefix length", + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::Ipv4Net", + "version": "0.1.0" + }, "type": "string", - "pattern": "^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\/(3[0-2]|[0-2]?[0-9])$" + "pattern": "^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|1[0-9]|2[0-9]|3[0-2])$" }, "Ipv4Range": { "description": "A non-decreasing IPv4 address range, inclusive of both ends.\n\nThe first address must be less than or equal to the last address.", @@ -1508,9 +1740,17 @@ "last" ] }, - "Ipv6Network": { + "Ipv6Net": { + "example": "fd12:3456::/64", + "title": "An IPv6 subnet", + "description": "An IPv6 subnet, including prefix and subnet mask", + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::Ipv6Net", + "version": "0.1.0" + }, "type": "string", - "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\")[/](12[0-8]|1[0-1][0-9]|[0-9]?[0-9])$" + "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))\\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])$" }, "Ipv6Range": { "description": "A non-decreasing IPv6 address range, inclusive of both ends.\n\nThe first address must be less than or equal to the last address.", @@ -1530,79 +1770,19 @@ "last" ] }, + "Name": { + "title": "A name unique within the parent collection", + "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID though they may contain a UUID.", + "type": "string", + "pattern": "^(?![0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$)^[a-z]([a-zA-Z0-9-]*[a-zA-Z0-9]+)?$", + "minLength": 1, + "maxLength": 63 + }, "NewPasswordHash": { "title": "A password hash in PHC string format", "description": "Password hashes must be in PHC (Password Hashing Competition) string format. Passwords must be hashed with Argon2id. Password hashes may be rejected if the parameters appear not to be secure enough.", "type": "string" }, - "PortConfigV1": { - "type": "object", - "properties": { - "addresses": { - "description": "This port's addresses.", - "type": "array", - "items": { - "$ref": "#/components/schemas/IpNetwork" - } - }, - "autoneg": { - "description": "Whether or not to set autonegotiation", - "default": false, - "type": "boolean" - }, - "bgp_peers": { - "description": "BGP peers on this port", - "type": "array", - "items": { - "$ref": "#/components/schemas/BgpPeerConfig" - } - }, - "port": { - "description": "Nmae of the port this config applies to.", - "type": "string" - }, - "routes": { - "description": "The set of routes associated with this port.", - "type": "array", - "items": { - "$ref": "#/components/schemas/RouteConfig" - } - }, - "switch": { - "description": "Switch the port belongs to.", - "allOf": [ - { - "$ref": "#/components/schemas/SwitchLocation" - } - ] - }, - "uplink_port_fec": { - "description": "Port forward error correction type.", - "allOf": [ - { - "$ref": "#/components/schemas/PortFec" - } - ] - }, - "uplink_port_speed": { - "description": "Port speed.", - "allOf": [ - { - "$ref": "#/components/schemas/PortSpeed" - } - ] - } - }, - "required": [ - "addresses", - "bgp_peers", - "port", - "routes", - "switch", - "uplink_port_fec", - "uplink_port_speed" - ] - }, "PortFec": { "description": "Switchport FEC options", "type": "string", @@ -2118,6 +2298,28 @@ } ] }, + "PutBgpAuthKeyBody": { + "type": "object", + "properties": { + "key": { + "$ref": "#/components/schemas/BgpAuthKey" + } + }, + "required": [ + "key" + ] + }, + "PutBgpAuthKeyResponse": { + "type": "object", + "properties": { + "status": { + "$ref": "#/components/schemas/SetBgpAuthKeyStatus" + } + }, + "required": [ + "status" + ] + }, "PutRssRecoveryUserPasswordHash": { "type": "object", "properties": { @@ -2130,8 +2332,12 @@ ] }, "PutRssUserConfigInsensitive": { + "description": "The portion of `CurrentRssUserConfig` that can be posted in one shot; it is provided by the wicket user uploading a TOML file, currently.\n\nThis is the \"write\" version of [`CurrentRssUserConfigInsensitive`], with some different fields.", "type": "object", "properties": { + "allowed_source_ips": { + "$ref": "#/components/schemas/AllowedSourceIps" + }, "bootstrap_sleds": { "description": "List of slot numbers only.\n\n`wicketd` will map this back to sleds with the correct `SpIdentifier` based on the `bootstrap_sleds` it provides in `CurrentRssUserConfigInsensitive`.", "type": "array", @@ -2176,6 +2382,7 @@ } }, "required": [ + "allowed_source_ips", "bootstrap_sleds", "dns_servers", "external_dns_ips", @@ -2514,7 +2721,7 @@ "description": "The destination of the route.", "allOf": [ { - "$ref": "#/components/schemas/IpNetwork" + "$ref": "#/components/schemas/IpNet" } ] }, @@ -2522,6 +2729,13 @@ "description": "The nexthop/gateway address.", "type": "string", "format": "ip" + }, + "vlan_id": { + "nullable": true, + "description": "The VLAN id associated with this route.", + "type": "integer", + "format": "uint16", + "minimum": 0 } }, "required": [ @@ -2533,6 +2747,31 @@ "type": "string", "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$" }, + "SetBgpAuthKeyStatus": { + "oneOf": [ + { + "description": "The key was accepted and replaced an old key.", + "type": "string", + "enum": [ + "replaced" + ] + }, + { + "description": "The key was accepted, and is the same as the existing key.", + "type": "string", + "enum": [ + "unchanged" + ] + }, + { + "description": "The key was accepted and is new.", + "type": "string", + "enum": [ + "added" + ] + } + ] + }, "SpComponentCaboose": { "description": "SpComponentCaboose\n\n
JSON schema\n\n```json { \"type\": \"object\", \"required\": [ \"board\", \"git_commit\", \"name\", \"version\" ], \"properties\": { \"board\": { \"type\": \"string\" }, \"git_commit\": { \"type\": \"string\" }, \"name\": { \"type\": \"string\" }, \"version\": { \"type\": \"string\" } } } ```
", "type": "object", @@ -4372,25 +4611,6 @@ } ] }, - "SwitchLocation": { - "description": "Identifies switch physical location", - "oneOf": [ - { - "description": "Switch in upper slot", - "type": "string", - "enum": [ - "switch0" - ] - }, - { - "description": "Switch in lower slot", - "type": "string", - "enum": [ - "switch1" - ] - } - ] - }, "UpdateComponent": { "oneOf": [ { @@ -4658,6 +4878,192 @@ } ] }, + "UserSpecifiedBgpPeerConfig": { + "description": "User-specified version of [`BgpPeerConfig`].\n\nThis is similar to [`BgpPeerConfig`], except it doesn't have the sensitive `md5_auth_key` parameter, instead requiring that the user provide the key separately.\n\n[`BgpPeerConfig`]: omicron_common::api::internal::shared::BgpPeerConfig", + "type": "object", + "properties": { + "addr": { + "description": "Address of the peer.", + "type": "string", + "format": "ipv4" + }, + "allowed_export": { + "description": "Apply export policy to this peer with an allow list.", + "allOf": [ + { + "$ref": "#/components/schemas/UserSpecifiedImportExportPolicy" + } + ] + }, + "allowed_import": { + "description": "Apply import policy to this peer with an allow list.", + "allOf": [ + { + "$ref": "#/components/schemas/UserSpecifiedImportExportPolicy" + } + ] + }, + "asn": { + "description": "The autonomous sysetm number of the router the peer belongs to.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "auth_key_id": { + "nullable": true, + "description": "The key identifier for authentication to use with the peer.", + "allOf": [ + { + "$ref": "#/components/schemas/BgpAuthKeyId" + } + ] + }, + "communities": { + "description": "Include the provided communities in updates sent to the peer.", + "default": [], + "type": "array", + "items": { + "type": "integer", + "format": "uint32", + "minimum": 0 + } + }, + "connect_retry": { + "nullable": true, + "description": "The interval in seconds between peer connection retry attempts. Defaults to 3 seconds.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "delay_open": { + "nullable": true, + "description": "How long to delay sending open messages to a peer in seconds. Defaults to 0.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "enforce_first_as": { + "description": "Enforce that the first AS in paths received from this peer is the peer's AS.", + "default": false, + "type": "boolean" + }, + "hold_time": { + "nullable": true, + "description": "How long to keep a session alive without a keepalive in seconds. Defaults to 6 seconds.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "idle_hold_time": { + "nullable": true, + "description": "How long to keep a peer in idle after a state machine reset in seconds. Defaults to 3 seconds.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "keepalive": { + "nullable": true, + "description": "The interval to send keepalive messages at, in seconds. Defaults to 2 seconds.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "local_pref": { + "nullable": true, + "description": "Apply a local preference to routes received from this peer.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "min_ttl": { + "nullable": true, + "description": "Require messages from a peer have a minimum IP time to live field.", + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "multi_exit_discriminator": { + "nullable": true, + "description": "Apply the provided multi-exit discriminator (MED) updates sent to the peer.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "port": { + "description": "Switch port the peer is reachable on.", + "type": "string" + }, + "remote_asn": { + "nullable": true, + "description": "Require that a peer has a specified ASN.", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "vlan_id": { + "nullable": true, + "description": "Associate a VLAN ID with a BGP peer session.", + "type": "integer", + "format": "uint16", + "minimum": 0 + } + }, + "required": [ + "addr", + "asn", + "port" + ], + "additionalProperties": false + }, + "UserSpecifiedImportExportPolicy": { + "nullable": true, + "type": "array", + "items": { + "$ref": "#/components/schemas/IpNet" + } + }, + "UserSpecifiedPortConfig": { + "description": "User-specified version of [`PortConfigV1`].\n\nAll of [`PortConfigV1`] is user-specified. But we expect the port name to be a key, rather than a field as in [`PortConfigV1`]. So this has all of the fields other than the port name.\n\n[`PortConfigV1`]: omicron_common::api::internal::shared::PortConfigV1", + "type": "object", + "properties": { + "addresses": { + "type": "array", + "items": { + "$ref": "#/components/schemas/IpNet" + } + }, + "autoneg": { + "type": "boolean" + }, + "bgp_peers": { + "default": [], + "type": "array", + "items": { + "$ref": "#/components/schemas/UserSpecifiedBgpPeerConfig" + } + }, + "routes": { + "type": "array", + "items": { + "$ref": "#/components/schemas/RouteConfig" + } + }, + "uplink_port_fec": { + "$ref": "#/components/schemas/PortFec" + }, + "uplink_port_speed": { + "$ref": "#/components/schemas/PortSpeed" + } + }, + "required": [ + "addresses", + "autoneg", + "routes", + "uplink_port_fec", + "uplink_port_speed" + ], + "additionalProperties": false + }, "UserSpecifiedRackNetworkConfig": { "description": "User-specified parts of [`RackNetworkConfig`](omicron_common::api::internal::shared::RackNetworkConfig).", "type": "object", @@ -4676,10 +5082,16 @@ "type": "string", "format": "ipv4" }, - "ports": { - "type": "array", - "items": { - "$ref": "#/components/schemas/PortConfigV1" + "switch0": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/UserSpecifiedPortConfig" + } + }, + "switch1": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/UserSpecifiedPortConfig" } } }, @@ -4687,8 +5099,10 @@ "bgp", "infra_ip_first", "infra_ip_last", - "ports" - ] + "switch0", + "switch1" + ], + "additionalProperties": false }, "IgnitionCommand": { "description": "Ignition command.\n\n
JSON schema\n\n```json { \"description\": \"Ignition command.\", \"type\": \"string\", \"enum\": [ \"power_on\", \"power_off\", \"power_reset\" ] } ```
", @@ -4713,4 +5127,4 @@ } } } -} \ No newline at end of file +} diff --git a/oximeter/collector/Cargo.toml b/oximeter/collector/Cargo.toml index 92c91ca101..01f484f5f4 100644 --- a/oximeter/collector/Cargo.toml +++ b/oximeter/collector/Cargo.toml @@ -5,6 +5,9 @@ edition = "2021" description = "The oximeter metric collection server" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] anyhow.workspace = true camino.workspace = true @@ -13,7 +16,6 @@ clap.workspace = true dropshot.workspace = true futures.workspace = true internal-dns.workspace = true -nexus-client.workspace = true nexus-types.workspace = true omicron-common.workspace = true oximeter.workspace = true @@ -33,6 +35,7 @@ tokio.workspace = true toml.workspace = true uuid.workspace = true omicron-workspace-hack.workspace = true +nexus-client.workspace = true [dev-dependencies] expectorate.workspace = true diff --git a/oximeter/collector/src/agent.rs b/oximeter/collector/src/agent.rs index 8fff44bb2d..5da9a1dfa8 100644 --- a/oximeter/collector/src/agent.rs +++ b/oximeter/collector/src/agent.rs @@ -11,9 +11,16 @@ use crate::DbConfig; use crate::Error; use crate::ProducerEndpoint; use anyhow::anyhow; +use chrono::DateTime; +use chrono::Utc; +use futures::TryStreamExt; use internal_dns::resolver::Resolver; use internal_dns::ServiceName; +use nexus_client::types::IdSortMode; use omicron_common::address::CLICKHOUSE_PORT; +use omicron_common::address::NEXUS_INTERNAL_PORT; +use omicron_common::backoff; +use omicron_common::backoff::BackoffError; use oximeter::types::ProducerResults; use oximeter::types::ProducerResultsItem; use oximeter_db::Client; @@ -31,10 +38,12 @@ use std::net::SocketAddr; use std::net::SocketAddrV6; use std::ops::Bound; use std::sync::Arc; +use std::sync::Mutex as StdMutex; use std::time::Duration; use tokio::sync::mpsc; use tokio::sync::oneshot; use tokio::sync::Mutex; +use tokio::sync::MutexGuard; use tokio::task::JoinHandle; use tokio::time::interval; use uuid::Uuid; @@ -71,11 +80,7 @@ async fn perform_collection( ) { debug!(log, "collecting from producer"); let res = client - .get(format!( - "http://{}{}", - producer.address, - producer.collection_route() - )) + .get(format!("http://{}/{}", producer.address, producer.id,)) .send() .await; match res { @@ -140,12 +145,13 @@ async fn perform_collection( // also send a `CollectionMessage`, for example to update the collection interval. This is not // currently used, but will likely be exposed via control plane interfaces in the future. async fn collection_task( - log: Logger, + orig_log: Logger, collector: self_stats::OximeterCollector, mut producer: ProducerEndpoint, mut inbox: mpsc::Receiver, outbox: mpsc::Sender<(Option, ProducerResults)>, ) { + let mut log = orig_log.new(o!("address" => producer.address)); let client = reqwest::Client::new(); let mut collection_timer = interval(producer.interval); collection_timer.tick().await; // completes immediately @@ -184,6 +190,9 @@ async fn collection_task( "interval" => ?producer.interval, "address" => producer.address, ); + + // Update the logger with the new information as well. + log = orig_log.new(o!("address" => producer.address)); collection_timer = interval(producer.interval); collection_timer.tick().await; // completes immediately } @@ -343,7 +352,7 @@ async fn results_sink( } /// The internal agent the oximeter server uses to collect metrics from producers. -#[derive(Debug)] +#[derive(Clone, Debug)] pub struct OximeterAgent { /// The collector ID for this agent pub id: Uuid, @@ -355,6 +364,12 @@ pub struct OximeterAgent { // The actual tokio tasks running the collection on a timer. collection_tasks: Arc>>, + // The interval on which we refresh our list of producers from Nexus + refresh_interval: Duration, + // Handle to the task used to periodically refresh the list of producers. + refresh_task: Arc>>>, + /// The last time we've refreshed our list of producers from Nexus. + pub last_refresh_time: Arc>>>, } impl OximeterAgent { @@ -362,6 +377,7 @@ impl OximeterAgent { pub async fn with_id( id: Uuid, address: SocketAddrV6, + refresh_interval: Duration, db_config: DbConfig, resolver: &Resolver, log: &Logger, @@ -370,6 +386,7 @@ impl OximeterAgent { let log = log.new(o!( "component" => "oximeter-agent", "collector_id" => id.to_string(), + "collector_ip" => address.ip().to_string(), )); let insertion_log = log.new(o!("component" => "results-sink")); @@ -435,13 +452,30 @@ impl OximeterAgent { ) .await }); - Ok(Self { + + let self_ = Self { id, log, collection_target, result_sender, collection_tasks: Arc::new(Mutex::new(BTreeMap::new())), - }) + refresh_interval, + refresh_task: Arc::new(StdMutex::new(None)), + last_refresh_time: Arc::new(StdMutex::new(None)), + }; + + Ok(self_) + } + + /// Ensure the background task that polls Nexus periodically for our list of + /// assigned producers is running. + pub(crate) fn ensure_producer_refresh_task(&self, resolver: Resolver) { + let mut task = self.refresh_task.lock().unwrap(); + if task.is_none() { + let refresh_task = + tokio::spawn(refresh_producer_list(self.clone(), resolver)); + *task = Some(refresh_task); + } } /// Construct a new standalone `oximeter` collector. @@ -455,6 +489,7 @@ impl OximeterAgent { pub async fn new_standalone( id: Uuid, address: SocketAddrV6, + refresh_interval: Duration, db_config: Option, log: &Logger, ) -> Result { @@ -462,6 +497,7 @@ impl OximeterAgent { let log = log.new(o!( "component" => "oximeter-standalone", "collector_id" => id.to_string(), + "collector_ip" => address.ip().to_string(), )); // If we have configuration for ClickHouse, we'll spawn the results @@ -503,12 +539,21 @@ impl OximeterAgent { collector_ip: (*address.ip()).into(), collector_port: address.port(), }; + + // We don't spawn the task to periodically refresh producers when run + // in standalone mode. We can just pretend we registered once, and + // that's it. + let last_refresh_time = Arc::new(StdMutex::new(Some(Utc::now()))); + Ok(Self { id, log, collection_target, result_sender, collection_tasks: Arc::new(Mutex::new(BTreeMap::new())), + refresh_interval, + refresh_task: Arc::new(StdMutex::new(None)), + last_refresh_time, }) } @@ -517,8 +562,23 @@ impl OximeterAgent { &self, info: ProducerEndpoint, ) -> Result<(), Error> { + let mut tasks = self.collection_tasks.lock().await; + self.register_producer_locked(&mut tasks, info).await; + Ok(()) + } + + // Internal implementation that registers a producer, assuming the lock on + // the map is held. + async fn register_producer_locked( + &self, + tasks: &mut MutexGuard< + '_, + BTreeMap, + >, + info: ProducerEndpoint, + ) { let id = info.id; - match self.collection_tasks.lock().await.entry(id) { + match tasks.entry(id) { Entry::Vacant(value) => { debug!( self.log, @@ -530,7 +590,10 @@ impl OximeterAgent { // Build channel to control the task and receive results. let (tx, rx) = mpsc::channel(4); let q = self.result_sender.clone(); - let log = self.log.new(o!("component" => "collection-task", "producer_id" => id.to_string())); + let log = self.log.new(o!( + "component" => "collection-task", + "producer_id" => id.to_string(), + )); let info_clone = info.clone(); let target = self.collection_target; let task = tokio::spawn(async move { @@ -557,7 +620,6 @@ impl OximeterAgent { .unwrap(); } } - Ok(()) } /// Forces a collection from all producers. @@ -607,12 +669,24 @@ impl OximeterAgent { /// Delete a producer by ID, stopping its collection task. pub async fn delete_producer(&self, id: Uuid) -> Result<(), Error> { - let (_info, task) = self - .collection_tasks - .lock() - .await - .remove(&id) - .ok_or_else(|| Error::NoSuchProducer(id))?; + let mut tasks = self.collection_tasks.lock().await; + self.delete_producer_locked(&mut tasks, id).await + } + + // Internal implementation that deletes a producer, assuming the lock on + // the map is held. + async fn delete_producer_locked( + &self, + tasks: &mut MutexGuard< + '_, + BTreeMap, + >, + id: Uuid, + ) -> Result<(), Error> { + let Some((_info, task)) = tasks.remove(&id) else { + // We have no such producer, so good news, we've removed it! + return Ok(()); + }; debug!( self.log, "removed collection task from set"; @@ -633,6 +707,140 @@ impl OximeterAgent { } Ok(()) } + + // Ensure that exactly the set of producers is registered with `self`. + // + // Errors logged, but not returned, and an attempt to register all producers + // is made, even if an error is encountered part-way through. + // + // This returns the number of pruned tasks. + async fn ensure_producers( + &self, + expected_producers: BTreeMap, + ) -> usize { + let mut tasks = self.collection_tasks.lock().await; + + // First prune unwanted collection tasks. + // + // This is set of all producers that we currently have, which are not in + // the new list from Nexus. + let ids_to_prune: Vec<_> = tasks + .keys() + .filter(|id| !expected_producers.contains_key(id)) + .copied() + .collect(); + let n_pruned = ids_to_prune.len(); + for id in ids_to_prune.into_iter() { + // This method only returns an error if the provided ID does not + // exist in the current tasks. That is impossible, because we hold + // the lock, and we've just computed this as the set that _is_ in + // the map, and not in the new set from Nexus. + self.delete_producer_locked(&mut tasks, id).await.unwrap(); + } + + // And then ensure everything in the list. + // + // This will insert new tasks, and update any that we already know + // about. + for info in expected_producers.into_values() { + self.register_producer_locked(&mut tasks, info).await; + } + n_pruned + } +} + +// A task which periodically updates our list of producers from Nexus. +async fn refresh_producer_list(agent: OximeterAgent, resolver: Resolver) { + let mut interval = tokio::time::interval(agent.refresh_interval); + loop { + interval.tick().await; + info!(agent.log, "refreshing list of producers from Nexus"); + let nexus_addr = + resolve_nexus_with_backoff(&agent.log, &resolver).await; + let url = format!("http://{}", nexus_addr); + let client = nexus_client::Client::new(&url, agent.log.clone()); + let mut stream = client.cpapi_assigned_producers_list_stream( + &agent.id, + // This is a _total_ limit, not a page size, so `None` means "get + // all entries". + None, + Some(IdSortMode::IdAscending), + ); + let mut expected_producers = BTreeMap::new(); + loop { + match stream.try_next().await { + Err(e) => { + error!( + agent.log, + "error fetching next assigned producer"; + "err" => ?e, + ); + } + Ok(Some(p)) => { + let endpoint = match ProducerEndpoint::try_from(p) { + Ok(ep) => ep, + Err(e) => { + error!( + agent.log, + "failed to convert producer description \ + from Nexus, skipping producer"; + "err" => e + ); + continue; + } + }; + let old = expected_producers.insert(endpoint.id, endpoint); + if let Some(ProducerEndpoint { id, .. }) = old { + error!( + agent.log, + "Nexus appears to have sent duplicate producer info"; + "producer_id" => %id, + ); + } + } + Ok(None) => break, + } + } + let n_current_tasks = expected_producers.len(); + let n_pruned_tasks = agent.ensure_producers(expected_producers).await; + *agent.last_refresh_time.lock().unwrap() = Some(Utc::now()); + info!( + agent.log, + "refreshed list of producers from Nexus"; + "n_pruned_tasks" => n_pruned_tasks, + "n_current_tasks" => n_current_tasks, + ); + } +} + +async fn resolve_nexus_with_backoff( + log: &Logger, + resolver: &Resolver, +) -> SocketAddr { + let log_failure = |error, delay| { + warn!( + log, + "failed to lookup Nexus IP, will retry"; + "delay" => ?delay, + "error" => ?error, + ); + }; + let do_lookup = || async { + resolver + .lookup_ipv6(ServiceName::Nexus) + .await + .map_err(|e| BackoffError::transient(e.to_string())) + .map(|ip| { + SocketAddr::V6(SocketAddrV6::new(ip, NEXUS_INTERNAL_PORT, 0, 0)) + }) + }; + backoff::retry_notify( + backoff::retry_policy_internal_service(), + do_lookup, + log_failure, + ) + .await + .expect("Expected infinite retry loop resolving Nexus address") } #[cfg(test)] @@ -696,6 +904,7 @@ mod tests { let collector = OximeterAgent::new_standalone( Uuid::new_v4(), SocketAddrV6::new(Ipv6Addr::LOCALHOST, 0, 0, 0), + crate::default_refresh_interval(), None, log, ) @@ -721,7 +930,6 @@ mod tests { id: Uuid::new_v4(), kind: ProducerKind::Service, address, - base_route: String::from("/"), interval: COLLECTION_INTERVAL, }; collector @@ -772,6 +980,7 @@ mod tests { let collector = OximeterAgent::new_standalone( Uuid::new_v4(), SocketAddrV6::new(Ipv6Addr::LOCALHOST, 0, 0, 0), + crate::default_refresh_interval(), None, log, ) @@ -789,7 +998,6 @@ mod tests { 0, 0, )), - base_route: String::from("/"), interval: COLLECTION_INTERVAL, }; collector @@ -842,6 +1050,7 @@ mod tests { let collector = OximeterAgent::new_standalone( Uuid::new_v4(), SocketAddrV6::new(Ipv6Addr::LOCALHOST, 0, 0, 0), + crate::default_refresh_interval(), None, log, ) @@ -868,7 +1077,6 @@ mod tests { id: Uuid::new_v4(), kind: ProducerKind::Service, address, - base_route: String::from("/"), interval: COLLECTION_INTERVAL, }; collector @@ -917,4 +1125,27 @@ mod tests { assert_eq!(stats.failed_collections.len(), 1); logctx.cleanup_successful(); } + + #[tokio::test] + async fn test_delete_nonexistent_producer_succeeds() { + let logctx = + test_setup_log("test_delete_nonexistent_producer_succeeds"); + let log = &logctx.log; + + // Spawn an oximeter collector ... + let collector = OximeterAgent::new_standalone( + Uuid::new_v4(), + SocketAddrV6::new(Ipv6Addr::LOCALHOST, 0, 0, 0), + crate::default_refresh_interval(), + None, + log, + ) + .await + .unwrap(); + assert!( + collector.delete_producer(Uuid::new_v4()).await.is_ok(), + "Deleting a non-existent producer should be OK" + ); + logctx.cleanup_successful(); + } } diff --git a/oximeter/collector/src/http_entrypoints.rs b/oximeter/collector/src/http_entrypoints.rs index 493083a40d..e876ed047d 100644 --- a/oximeter/collector/src/http_entrypoints.rs +++ b/oximeter/collector/src/http_entrypoints.rs @@ -7,6 +7,8 @@ // Copyright 2023 Oxide Computer Company use crate::OximeterAgent; +use chrono::DateTime; +use chrono::Utc; use dropshot::endpoint; use dropshot::ApiDescription; use dropshot::EmptyScanParams; @@ -117,6 +119,8 @@ async fn producer_delete( pub struct CollectorInfo { /// The collector's UUID. pub id: Uuid, + /// Last time we refreshed our producer list with Nexus. + pub last_refresh: Option>, } // Return identifying information about this collector @@ -128,6 +132,8 @@ async fn collector_info( request_context: RequestContext>, ) -> Result, HttpError> { let agent = request_context.context(); - let info = CollectorInfo { id: agent.id }; + let id = agent.id; + let last_refresh = *agent.last_refresh_time.lock().unwrap(); + let info = CollectorInfo { id, last_refresh }; Ok(HttpResponseOk(info)) } diff --git a/oximeter/collector/src/lib.rs b/oximeter/collector/src/lib.rs index f3c793d5c2..367a2066a1 100644 --- a/oximeter/collector/src/lib.rs +++ b/oximeter/collector/src/lib.rs @@ -31,6 +31,7 @@ use std::net::SocketAddr; use std::net::SocketAddrV6; use std::path::Path; use std::sync::Arc; +use std::time::Duration; use thiserror::Error; use uuid::Uuid; @@ -59,22 +60,13 @@ pub enum Error { #[error(transparent)] ResolveError(#[from] ResolveError), - #[error("No producer is registered with ID")] - NoSuchProducer(Uuid), - #[error("Error running standalone")] Standalone(#[from] anyhow::Error), } impl From for HttpError { fn from(e: Error) -> Self { - match e { - Error::NoSuchProducer(id) => HttpError::for_not_found( - None, - format!("No such producer: {id}"), - ), - _ => HttpError::for_internal_error(e.to_string()), - } + HttpError::for_internal_error(e.to_string()) } } @@ -114,6 +106,11 @@ impl DbConfig { } } +/// Default interval on which we refresh our list of producers from Nexus. +pub const fn default_refresh_interval() -> Duration { + Duration::from_secs(60 * 10) +} + /// Configuration used to initialize an oximeter server #[derive(Clone, Debug, Deserialize, Serialize)] pub struct Config { @@ -123,6 +120,11 @@ pub struct Config { #[serde(default, skip_serializing_if = "Option::is_none")] pub nexus_address: Option, + /// The interval on which we periodically refresh our list of producers from + /// Nexus. + #[serde(default = "default_refresh_interval")] + pub refresh_interval: Duration, + /// Configuration for working with ClickHouse pub db: DbConfig, @@ -202,6 +204,7 @@ impl Oximeter { OximeterAgent::with_id( args.id, args.address, + config.refresh_interval, config.db, &resolver, &log, @@ -239,7 +242,10 @@ impl Oximeter { .start(); // Notify Nexus that this oximeter instance is available. - let client = reqwest::Client::new(); + let our_info = nexus_client::types::OximeterInfo { + address: server.local_addr().to_string(), + collector_id: agent.id, + }; let notify_nexus = || async { debug!(log, "contacting nexus"); let nexus_address = if let Some(address) = config.nexus_address { @@ -254,18 +260,25 @@ impl Oximeter { 0, )) }; - - client - .post(format!("http://{}/metrics/collectors", nexus_address,)) - .json(&nexus_client::types::OximeterInfo { - address: server.local_addr().to_string(), - collector_id: agent.id, - }) - .send() - .await - .map_err(|e| backoff::BackoffError::transient(e.to_string()))? - .error_for_status() - .map_err(|e| backoff::BackoffError::transient(e.to_string())) + let client = nexus_client::Client::new( + &format!("http://{nexus_address}"), + log.clone(), + ); + client.cpapi_collectors_post(&our_info).await.map_err(|e| { + match &e { + // Failures to reach nexus, or server errors on its side + // are retryable. Everything else is permanent. + nexus_client::Error::CommunicationError(_) => { + backoff::BackoffError::transient(e.to_string()) + } + nexus_client::Error::ErrorResponse(inner) + if inner.status().is_server_error() => + { + backoff::BackoffError::transient(e.to_string()) + } + _ => backoff::BackoffError::permanent(e.to_string()), + } + }) }; let log_notification_failure = |error, delay| { warn!( @@ -282,6 +295,10 @@ impl Oximeter { .await .expect("Expected an infinite retry loop contacting Nexus"); + // Now that we've successfully registered, we'll start periodically + // polling for our list of producers from Nexus. + agent.ensure_producer_refresh_task(resolver); + info!(log, "oximeter registered with nexus"; "id" => ?agent.id); Ok(Self { agent, server }) } @@ -298,6 +315,7 @@ impl Oximeter { OximeterAgent::new_standalone( args.id, args.address, + crate::default_refresh_interval(), db_config, &log, ) @@ -371,6 +389,9 @@ impl Oximeter { } /// List producers. + /// + /// This returns up to `limit` producers, whose ID is _strictly greater_ + /// than `start`, or all producers if `start` is `None`. pub async fn list_producers( &self, start: Option, @@ -383,4 +404,9 @@ impl Oximeter { pub async fn delete_producer(&self, id: Uuid) -> Result<(), Error> { self.agent.delete_producer(id).await } + + /// Return the ID of this collector. + pub fn collector_id(&self) -> &Uuid { + &self.agent.id + } } diff --git a/oximeter/collector/src/self_stats.rs b/oximeter/collector/src/self_stats.rs index 8d39e6e282..ab9e5bedf4 100644 --- a/oximeter/collector/src/self_stats.rs +++ b/oximeter/collector/src/self_stats.rs @@ -14,6 +14,7 @@ use oximeter::MetricsError; use oximeter::Sample; use oximeter::Target; use reqwest::StatusCode; +use std::borrow::Cow; use std::collections::BTreeMap; use std::net::IpAddr; use std::time::Duration; @@ -45,6 +46,10 @@ pub struct Collections { /// The base route in the producer server used to collect metrics. /// /// The full route is `{base_route}/{producer_id}`. + /// + // TODO-cleanup: This is no longer relevant, but removing it entirely + // relies on nonexistent functionality for updating timeseries schema. When + // that lands, we should remove this. pub base_route: String, pub datum: Cumulative, } @@ -65,13 +70,26 @@ pub enum FailureReason { impl std::fmt::Display for FailureReason { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { - Self::Unreachable => write!(f, "unreachable"), - Self::Deserialization => write!(f, "deserialization"), + Self::Unreachable => f.write_str(Self::UNREACHABLE), + Self::Deserialization => f.write_str(Self::DESERIALIZATION), Self::Other(c) => write!(f, "{}", c.as_u16()), } } } +impl FailureReason { + const UNREACHABLE: &'static str = "unreachable"; + const DESERIALIZATION: &'static str = "deserialization"; + + fn as_string(&self) -> Cow<'static, str> { + match self { + Self::Unreachable => Cow::Borrowed(Self::UNREACHABLE), + Self::Deserialization => Cow::Borrowed(Self::DESERIALIZATION), + Self::Other(c) => Cow::Owned(c.as_u16().to_string()), + } + } +} + /// The number of failed collections from a single producer. #[derive(Clone, Debug, Metric)] pub struct FailedCollections { @@ -84,11 +102,15 @@ pub struct FailedCollections { /// The base route in the producer server used to collect metrics. /// /// The full route is `{base_route}/{producer_id}`. + /// + // TODO-cleanup: This is no longer relevant, but removing it entirely + // relies on nonexistent functionality for updating timeseries schema. When + // that lands, we should remove this. pub base_route: String, /// The reason we could not collect. // // NOTE: This should always be generated through a `FailureReason`. - pub reason: String, + pub reason: Cow<'static, str>, pub datum: Cumulative, } @@ -111,7 +133,7 @@ impl CollectionTaskStats { producer_id: producer.id, producer_ip: producer.address.ip(), producer_port: producer.address.port(), - base_route: producer.base_route.clone(), + base_route: String::new(), datum: Cumulative::new(0), }, failed_collections: BTreeMap::new(), @@ -128,7 +150,7 @@ impl CollectionTaskStats { producer_ip: self.collections.producer_ip, producer_port: self.collections.producer_port, base_route: self.collections.base_route.clone(), - reason: reason.to_string(), + reason: reason.as_string(), datum: Cumulative::new(0), } }) @@ -189,7 +211,7 @@ mod tests { producer_id: uuid::uuid!("718452ab-7cca-42f6-b8b1-1aaaa1b09104"), producer_ip: IpAddr::V6(Ipv6Addr::LOCALHOST), producer_port: 12345, - base_route: String::from("/"), + base_route: String::new(), datum: Cumulative::new(0), } } @@ -199,8 +221,8 @@ mod tests { producer_id: uuid::uuid!("718452ab-7cca-42f6-b8b1-1aaaa1b09104"), producer_ip: IpAddr::V6(Ipv6Addr::LOCALHOST), producer_port: 12345, - base_route: String::from("/"), - reason: FailureReason::Unreachable.to_string(), + base_route: String::new(), + reason: FailureReason::Unreachable.as_string(), datum: Cumulative::new(0), } } diff --git a/oximeter/collector/src/standalone.rs b/oximeter/collector/src/standalone.rs index 826a5f4663..fa040f5f54 100644 --- a/oximeter/collector/src/standalone.rs +++ b/oximeter/collector/src/standalone.rs @@ -5,13 +5,14 @@ //! Implementation of a standalone fake Nexus, simply for registering producers //! and collectors with one another. -// Copyright 2023 Oxide Computer Company +// Copyright 2024 Oxide Computer Company use crate::Error; use dropshot::endpoint; use dropshot::ApiDescription; use dropshot::ConfigDropshot; use dropshot::HttpError; +use dropshot::HttpResponseCreated; use dropshot::HttpResponseUpdatedNoContent; use dropshot::HttpServer; use dropshot::HttpServerStarter; @@ -19,6 +20,7 @@ use dropshot::RequestContext; use dropshot::TypedBody; use nexus_types::internal_api::params::OximeterInfo; use omicron_common::api::internal::nexus::ProducerEndpoint; +use omicron_common::api::internal::nexus::ProducerRegistrationResponse; use omicron_common::FileKv; use oximeter_client::Client; use rand::seq::IteratorRandom; @@ -32,6 +34,7 @@ use slog::Logger; use std::collections::HashMap; use std::net::SocketAddr; use std::sync::Arc; +use std::time::Duration; use tokio::sync::Mutex; use uuid::Uuid; @@ -60,6 +63,16 @@ impl Inner { } } +// The period on which producers must renew their lease. +// +// This is different from the one we actually use in Nexus (and shorter). That's +// fine, since this is really a testing interface more than anything. +const PRODUCER_RENEWAL_INTERVAL: Duration = Duration::from_secs(60); + +const fn default_producer_response() -> ProducerRegistrationResponse { + ProducerRegistrationResponse { lease_duration: PRODUCER_RENEWAL_INTERVAL } +} + // A stripped-down Nexus server, with only the APIs for registering metric // producers and collectors. #[derive(Debug)] @@ -79,10 +92,11 @@ impl StandaloneNexus { } } + /// Register an oximeter producer, returning the lease period. async fn register_producer( &self, info: &ProducerEndpoint, - ) -> Result<(), HttpError> { + ) -> Result { let mut inner = self.inner.lock().await; let assignment = match inner.producers.get_mut(&info.id) { None => { @@ -113,7 +127,7 @@ impl StandaloneNexus { // We have a record, first check if it matches the assignment we // have. if &existing_assignment.producer == info { - return Ok(()); + return Ok(default_producer_response()); } // This appears to be a re-registration, e.g., the producer @@ -133,7 +147,7 @@ impl StandaloneNexus { } }; inner.producers.insert(info.id, assignment); - Ok(()) + Ok(default_producer_response()) } async fn register_collector( @@ -183,13 +197,13 @@ pub fn standalone_nexus_api() -> ApiDescription> { async fn cpapi_producers_post( request_context: RequestContext>, producer_info: TypedBody, -) -> Result { +) -> Result, HttpError> { let context = request_context.context(); let producer_info = producer_info.into_inner(); context .register_producer(&producer_info) .await - .map(|_| HttpResponseUpdatedNoContent()) + .map(HttpResponseCreated) .map_err(|e| HttpError::for_internal_error(e.to_string())) } diff --git a/oximeter/collector/tests/output/self-stat-schema.json b/oximeter/collector/tests/output/self-stat-schema.json index 8017d61880..286ac63405 100644 --- a/oximeter/collector/tests/output/self-stat-schema.json +++ b/oximeter/collector/tests/output/self-stat-schema.json @@ -39,7 +39,7 @@ } ], "datum_type": "cumulative_u64", - "created": "2024-02-05T23:03:00.842290108Z" + "created": "2024-05-21T18:32:24.199619581Z" }, "oximeter_collector:failed_collections": { "timeseries_name": "oximeter_collector:failed_collections", @@ -86,6 +86,6 @@ } ], "datum_type": "cumulative_u64", - "created": "2024-02-05T23:03:00.842943988Z" + "created": "2024-05-21T18:32:24.200514936Z" } } \ No newline at end of file diff --git a/oximeter/db/Cargo.toml b/oximeter/db/Cargo.toml index c4ee44acb6..c446bc7822 100644 --- a/oximeter/db/Cargo.toml +++ b/oximeter/db/Cargo.toml @@ -5,8 +5,12 @@ edition = "2021" description = "Tools for interacting with the Oxide control plane telemetry database" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] anyhow.workspace = true +async-recursion = "1.1.1" async-trait.workspace = true bcs.workspace = true camino.workspace = true @@ -15,21 +19,16 @@ clap.workspace = true dropshot.workspace = true futures.workspace = true highway.workspace = true -indexmap.workspace = true omicron-common.workspace = true omicron-workspace-hack.workspace = true oximeter.workspace = true -reedline.workspace = true regex.workspace = true -rustyline.workspace = true serde.workspace = true serde_json.workspace = true slog.workspace = true slog-async.workspace = true +slog-dtrace.workspace = true slog-term.workspace = true -sqlparser.workspace = true -sqlformat = "0.2.3" -tabled.workspace = true thiserror.workspace = true usdt.workspace = true uuid.workspace = true @@ -38,26 +37,82 @@ uuid.workspace = true workspace = true features = [ "serde" ] +[dependencies.crossterm] +workspace = true +optional = true + +[dependencies.indexmap] +workspace = true +optional = true + +[dependencies.num] +workspace = true +optional = true + +[dependencies.peg] +workspace = true +optional = true + +[dependencies.reedline] +workspace = true +optional = true + [dependencies.reqwest] workspace = true features = [ "json" ] +[dependencies.rustyline] +workspace = true +optional = true + [dependencies.schemars] workspace = true features = [ "uuid1", "bytes", "chrono" ] +[dependencies.sqlformat] +workspace = true +optional = true + +[dependencies.sqlparser] +workspace = true +optional = true + [dependencies.tokio] workspace = true features = [ "rt-multi-thread", "macros" ] +[dependencies.tabled] +workspace = true +optional = true + [dev-dependencies] expectorate.workspace = true +indexmap.workspace = true itertools.workspace = true omicron-test-utils.workspace = true slog-dtrace.workspace = true +sqlparser.workspace = true strum.workspace = true tempfile.workspace = true +[features] +default = [ "oxql", "sql" ] +sql = [ + "dep:indexmap", + "dep:reedline", + "dep:rustyline", + "dep:sqlformat", + "dep:sqlparser", + "dep:tabled" +] +oxql = [ + "dep:crossterm", + "dep:num", + "dep:peg", + "dep:reedline", + "dep:tabled", +] + [[bin]] name = "oxdb" doc = false diff --git a/oximeter/db/src/bin/oxdb.rs b/oximeter/db/src/bin/oxdb/main.rs similarity index 50% rename from oximeter/db/src/bin/oxdb.rs rename to oximeter/db/src/bin/oxdb/main.rs index 02a8054da0..ca11dd18a3 100644 --- a/oximeter/db/src/bin/oxdb.rs +++ b/oximeter/db/src/bin/oxdb/main.rs @@ -4,31 +4,27 @@ //! Tool for developing against the Oximeter timeseries database, populating data and querying. -// Copyright 2023 Oxide Computer Company +// Copyright 2024 Oxide Computer Company use anyhow::{bail, Context}; use chrono::{DateTime, Utc}; use clap::{Args, Parser}; -use dropshot::EmptyScanParams; -use dropshot::WhichPage; use oximeter::{ types::{Cumulative, Sample}, Metric, Target, }; -use oximeter_db::sql::function_allow_list; -use oximeter_db::QueryMetadata; -use oximeter_db::QueryResult; -use oximeter_db::Table; use oximeter_db::{query, Client, DbWrite}; -use reedline::DefaultPrompt; -use reedline::DefaultPromptSegment; -use reedline::Reedline; -use reedline::Signal; use slog::{debug, info, o, Drain, Level, Logger}; use std::net::IpAddr; use std::net::SocketAddr; use uuid::Uuid; +#[cfg(feature = "sql")] +mod sql; + +#[cfg(feature = "oxql")] +mod oxql; + // Samples are inserted in chunks of this size, to avoid large allocations when inserting huge // numbers of timeseries. const INSERT_CHUNK_SIZE: usize = 100_000; @@ -151,9 +147,17 @@ enum Subcommand { }, /// Enter a SQL shell for interactive querying. + #[cfg(feature = "sql")] Sql { #[clap(flatten)] - opts: ShellOptions, + opts: crate::sql::ShellOptions, + }, + + /// Enter the Oximeter Query Language shell for interactive querying. + #[cfg(feature = "oxql")] + Oxql { + #[clap(flatten)] + opts: crate::oxql::ShellOptions, }, } @@ -312,281 +316,6 @@ async fn query( Ok(()) } -fn print_basic_commands() { - println!("Basic commands:"); - println!(" \\?, \\h, help - Print this help"); - println!(" \\q, quit, exit, ^D - Exit the shell"); - println!(" \\l - List tables"); - println!(" \\d - Describe a table"); - println!( - " \\f - List or describe ClickHouse SQL functions" - ); - println!(); - println!("Or try entering a SQL `SELECT` statement"); -} - -async fn list_virtual_tables(client: &Client) -> anyhow::Result<()> { - let mut page = WhichPage::First(EmptyScanParams {}); - let limit = 100.try_into().unwrap(); - loop { - let results = client.timeseries_schema_list(&page, limit).await?; - for schema in results.items.iter() { - println!("{}", schema.timeseries_name); - } - if results.next_page.is_some() { - if let Some(last) = results.items.last() { - page = WhichPage::Next(last.timeseries_name.clone()); - } else { - return Ok(()); - } - } else { - return Ok(()); - } - } -} - -async fn describe_virtual_table( - client: &Client, - table: &str, -) -> anyhow::Result<()> { - match table.parse() { - Err(_) => println!("Invalid timeseries name: {table}"), - Ok(name) => { - if let Some(schema) = client.schema_for_timeseries(&name).await? { - let mut cols = - Vec::with_capacity(schema.field_schema.len() + 2); - let mut types = cols.clone(); - for field in schema.field_schema.iter() { - cols.push(field.name.clone()); - types.push(field.field_type.to_string()); - } - cols.push("timestamp".into()); - types.push("DateTime64".into()); - - if schema.datum_type.is_histogram() { - cols.push("start_time".into()); - types.push("DateTime64".into()); - - cols.push("bins".into()); - types.push(format!( - "Array[{}]", - schema - .datum_type - .to_string() - .strip_prefix("Histogram") - .unwrap() - .to_lowercase(), - )); - - cols.push("counts".into()); - types.push("Array[u64]".into()); - } else if schema.datum_type.is_cumulative() { - cols.push("start_time".into()); - types.push("DateTime64".into()); - cols.push("datum".into()); - types.push(schema.datum_type.to_string()); - } else { - cols.push("datum".into()); - types.push(schema.datum_type.to_string()); - } - - let mut builder = tabled::builder::Builder::default(); - builder.push_record(cols); // first record is the header - builder.push_record(types); - println!( - "{}", - builder.build().with(tabled::settings::Style::psql()) - ); - } else { - println!("No such timeseries: {table}"); - } - } - } - Ok(()) -} - -#[derive(Clone, Debug, Args)] -struct ShellOptions { - /// Print query metadata. - #[clap(long = "metadata")] - print_metadata: bool, - /// Print the original SQL query. - #[clap(long = "original")] - print_original_query: bool, - /// Print the rewritten SQL query that is actually run on the DB. - #[clap(long = "rewritten")] - print_rewritten_query: bool, - /// Print the transformed query, but do not run it. - #[clap(long)] - transform: Option, -} - -impl Default for ShellOptions { - fn default() -> Self { - Self { - print_metadata: true, - print_original_query: false, - print_rewritten_query: false, - transform: None, - } - } -} - -fn list_supported_functions() { - println!("Subset of ClickHouse SQL functions currently supported"); - println!( - "See https://clickhouse.com/docs/en/sql-reference/functions for more" - ); - println!(); - for func in function_allow_list().iter() { - println!(" {func}"); - } -} - -fn show_supported_function(name: &str) { - if let Some(func) = function_allow_list().iter().find(|f| f.name == name) { - println!("{}", func.name); - println!(" {}", func.usage); - println!(" {}", func.description); - } else { - println!("No supported function '{name}'"); - } -} - -fn print_sql_query(query: &str) { - println!( - "{}", - sqlformat::format( - &query, - &sqlformat::QueryParams::None, - sqlformat::FormatOptions { uppercase: true, ..Default::default() } - ) - ); - println!(); -} - -fn print_query_metadata(table: &Table, metadata: &QueryMetadata) { - println!("Metadata"); - println!(" Query ID: {}", metadata.id); - println!(" Result rows: {}", table.rows.len()); - println!(" Time: {:?}", metadata.elapsed); - println!(" Read: {}\n", metadata.summary.read); -} - -async fn sql_shell( - address: IpAddr, - port: u16, - log: Logger, - opts: ShellOptions, -) -> anyhow::Result<()> { - let client = make_client(address, port, &log).await?; - - // A workaround to ensure the client has all available timeseries when the - // shell starts. - let dummy = "foo:bar".parse().unwrap(); - let _ = client.schema_for_timeseries(&dummy).await; - - // Possibly just transform the query, but do not execute it. - if let Some(query) = &opts.transform { - let transformed = client.transform_query(query).await?; - println!( - "{}", - sqlformat::format( - &transformed, - &sqlformat::QueryParams::None, - sqlformat::FormatOptions { - uppercase: true, - ..Default::default() - } - ) - ); - return Ok(()); - } - - let mut ed = Reedline::create(); - let prompt = DefaultPrompt::new( - DefaultPromptSegment::Basic("0x".to_string()), - DefaultPromptSegment::Empty, - ); - println!("Oximeter SQL shell"); - println!(); - print_basic_commands(); - loop { - let sig = ed.read_line(&prompt); - match sig { - Ok(Signal::Success(buf)) => { - let cmd = buf.as_str().trim(); - match cmd { - "" => continue, - "\\?" | "\\h" | "help" => print_basic_commands(), - "\\q" | "quit" | "exit" => return Ok(()), - "\\l" | "\\d" => list_virtual_tables(&client).await?, - _ => { - if let Some(table_name) = cmd.strip_prefix("\\d") { - if table_name.is_empty() { - list_virtual_tables(&client).await?; - } else { - describe_virtual_table( - &client, - table_name.trim().trim_end_matches(';'), - ) - .await?; - } - } else if let Some(func_name) = cmd.strip_prefix("\\f") - { - if func_name.is_empty() { - list_supported_functions(); - } else { - show_supported_function( - func_name.trim().trim_end_matches(';'), - ); - } - } else { - match client.query(&buf).await { - Err(e) => println!("Query failed: {e:#?}"), - Ok(QueryResult { - original_query, - rewritten_query, - metadata, - table, - }) => { - println!(); - let mut builder = - tabled::builder::Builder::default(); - builder.push_record(&table.column_names); // first record is the header - for row in table.rows.iter() { - builder.push_record( - row.iter().map(ToString::to_string), - ); - } - if opts.print_original_query { - print_sql_query(&original_query); - } - if opts.print_rewritten_query { - print_sql_query(&rewritten_query); - } - println!( - "{}\n", - builder.build().with( - tabled::settings::Style::psql() - ) - ); - if opts.print_metadata { - print_query_metadata(&table, &metadata); - } - } - } - } - } - } - } - Ok(Signal::CtrlD) => return Ok(()), - Ok(Signal::CtrlC) => continue, - err => println!("err: {err:?}"), - } - } -} - #[tokio::main] async fn main() -> anyhow::Result<()> { usdt::register_probes().context("Failed to register USDT probes")?; @@ -598,6 +327,7 @@ async fn main() -> anyhow::Result<()> { .filter_level(args.log_level) .fuse(); let drain = slog_async::Async::new(drain).build().fuse(); + let drain = slog_dtrace::with_drain(drain).0.fuse(); let log = Logger::root(drain, o!("component" => "oxdb")); match args.cmd { Subcommand::Describe => describe_data(), @@ -636,8 +366,13 @@ async fn main() -> anyhow::Result<()> { ) .await?; } + #[cfg(feature = "sql")] Subcommand::Sql { opts } => { - sql_shell(args.address, args.port, log, opts).await? + crate::sql::sql_shell(args.address, args.port, log, opts).await? + } + #[cfg(feature = "oxql")] + Subcommand::Oxql { opts } => { + crate::oxql::oxql_shell(args.address, args.port, log, opts).await? } } Ok(()) diff --git a/oximeter/db/src/bin/oxdb/oxql.rs b/oximeter/db/src/bin/oxdb/oxql.rs new file mode 100644 index 0000000000..54e40afa15 --- /dev/null +++ b/oximeter/db/src/bin/oxdb/oxql.rs @@ -0,0 +1,333 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! OxQL shell. + +// Copyright 2024 Oxide Computer + +use crate::make_client; +use clap::Args; +use crossterm::style::Stylize; +use dropshot::EmptyScanParams; +use dropshot::WhichPage; +use oximeter_db::oxql::query::special_idents; +use oximeter_db::oxql::Table; +use oximeter_db::Client; +use oximeter_db::OxqlResult; +use reedline::DefaultPrompt; +use reedline::DefaultPromptSegment; +use reedline::Reedline; +use reedline::Signal; +use slog::Logger; +use std::net::IpAddr; + +#[derive(Clone, Debug, Args)] +pub struct ShellOptions { + /// Print summaries of each SQL query run against the database. + #[clap(long = "summaries")] + print_summaries: bool, + /// Print the total elapsed query duration. + #[clap(long = "elapsed")] + print_elapsed: bool, +} + +// Print help for the basic OxQL commands. +fn print_basic_commands() { + println!("Basic commands:"); + println!(" \\?, \\h, help - Print this help"); + println!(" \\q, quit, exit, ^D - Exit the shell"); + println!(" \\l - List timeseries"); + println!(" \\d - Describe a timeseries"); + println!(" \\ql [] - Get OxQL help about an operation"); + println!(); + println!("Or try entering an OxQL `get` query"); +} + +// Print high-level information about OxQL. +fn print_general_oxql_help() { + const HELP: &str = r#"Oximeter Query Language + +The Oximeter Query Language (OxQL) implements queries as +as sequence of operations. Each of these takes zero or more +timeseries as inputs, and produces zero or more timeseries +as outputs. Operations are chained together with the pipe +operator, "|". + +All queries start with a `get` operation, which selects a +timeseries from the database, by name. For example: + +`get physical_data_link:bytes_received` + +The supported timeseries operations are: + +- get: Select a timeseries by name +- filter: Filter timeseries by field or sample values +- group_by: Group timeseries by fields, applying a reducer. +- join: Join two or more timeseries together + +Run `\ql ` to get specific help about that operation. + "#; + println!("{HELP}"); +} + +// Print help for a specific OxQL operation. +fn print_oxql_operation_help(op: &str) { + match op { + "get" => { + const HELP: &str = r#"get "); + +Get instances of a timeseries by name"#; + println!("{HELP}"); + } + "filter" => { + const HELP: &str = r#"filter "); + +Filter timeseries based on their attributes. + can be a logical combination of filtering +\"atoms\", such as `field_foo > 0`. Expressions +may use any of the usual comparison operators, and +can be nested and combined with && or ||. + +Expressions must refer to the name of a field +for a timeseries at this time, and must compare +against literals. For example, `some_field > 0` +is supported, but `some_field > other_field` is not."#; + println!("{HELP}"); + } + "group_by" => { + const HELP: &str = r#"group_by [, ... ] +group_by [, ... ], + +Group timeseries by the named fields, optionally +specifying a reducer to use when aggregating the +timeseries within each group. If no reducer is +specified, `mean` is used, averaging the values +within each group. + +Current supported reducers: + - mean + - sum"#; + println!("{HELP}"); + } + "join" => { + const HELP: &str = r#"join + +Combine 2 or more tables by peforming a natural +inner join, matching up those with fields of the +same value. Currently, joining does not take into +account the timestamps, and does not align the outputs +directly."#; + println!("{HELP}"); + } + _ => eprintln!("unrecognized OxQL operation: '{op}'"), + } +} + +// List the known timeseries. +async fn list_timeseries(client: &Client) -> anyhow::Result<()> { + let mut page = WhichPage::First(EmptyScanParams {}); + let limit = 100.try_into().unwrap(); + loop { + let results = client.timeseries_schema_list(&page, limit).await?; + for schema in results.items.iter() { + println!("{}", schema.timeseries_name); + } + if results.next_page.is_some() { + if let Some(last) = results.items.last() { + page = WhichPage::Next(last.timeseries_name.clone()); + } else { + return Ok(()); + } + } else { + return Ok(()); + } + } +} + +// Describe a single timeseries. +async fn describe_timeseries( + client: &Client, + timeseries: &str, +) -> anyhow::Result<()> { + match timeseries.parse() { + Err(_) => eprintln!( + "Invalid timeseries name '{timeseries}, \ + use \\l to list available timeseries by name + " + ), + Ok(name) => { + if let Some(schema) = client.schema_for_timeseries(&name).await? { + let mut cols = + Vec::with_capacity(schema.field_schema.len() + 2); + let mut types = cols.clone(); + for field in schema.field_schema.iter() { + cols.push(field.name.clone()); + types.push(field.field_type.to_string()); + } + cols.push(special_idents::TIMESTAMP.into()); + types.push(special_idents::DATETIME64.into()); + + if schema.datum_type.is_histogram() { + cols.push(special_idents::START_TIME.into()); + types.push(special_idents::DATETIME64.into()); + + cols.push(special_idents::BINS.into()); + types.push( + special_idents::array_type_name_from_histogram_type( + schema.datum_type, + ) + .unwrap(), + ); + + cols.push(special_idents::COUNTS.into()); + types.push(special_idents::ARRAYU64.into()); + } else if schema.datum_type.is_cumulative() { + cols.push(special_idents::START_TIME.into()); + types.push(special_idents::DATETIME64.into()); + cols.push(special_idents::DATUM.into()); + types.push(schema.datum_type.to_string()); + } else { + cols.push(special_idents::DATUM.into()); + types.push(schema.datum_type.to_string()); + } + + let mut builder = tabled::builder::Builder::default(); + builder.push_record(cols); // first record is the header + builder.push_record(types); + println!( + "{}", + builder.build().with(tabled::settings::Style::psql()) + ); + } else { + eprintln!("No such timeseries: {timeseries}"); + } + } + } + Ok(()) +} + +/// Run the OxQL shell. +pub async fn oxql_shell( + address: IpAddr, + port: u16, + log: Logger, + opts: ShellOptions, +) -> anyhow::Result<()> { + let client = make_client(address, port, &log).await?; + + // A workaround to ensure the client has all available timeseries when the + // shell starts. + let dummy = "foo:bar".parse().unwrap(); + let _ = client.schema_for_timeseries(&dummy).await; + + // Create the line-editor. + let mut ed = Reedline::create(); + let prompt = DefaultPrompt::new( + DefaultPromptSegment::Basic("0x".to_string()), + DefaultPromptSegment::Empty, + ); + println!("Oximeter Query Language shell"); + println!(); + print_basic_commands(); + loop { + let sig = ed.read_line(&prompt); + match sig { + Ok(Signal::Success(buf)) => { + let cmd = buf.as_str().trim(); + match cmd { + "" => continue, + "\\?" | "\\h" | "help" => print_basic_commands(), + "\\q" | "quit" | "exit" => return Ok(()), + "\\l" | "\\d" => list_timeseries(&client).await?, + _ => { + if let Some(timeseries_name) = cmd.strip_prefix("\\d") { + if timeseries_name.is_empty() { + list_timeseries(&client).await?; + } else { + describe_timeseries( + &client, + timeseries_name + .trim() + .trim_end_matches(';'), + ) + .await?; + } + } else if let Some(stmt) = cmd.strip_prefix("\\ql") { + let stmt = stmt.trim(); + if stmt.is_empty() { + print_general_oxql_help(); + } else { + print_oxql_operation_help(stmt); + } + } else { + match client + .oxql_query(cmd.trim().trim_end_matches(';')) + .await + { + Ok(result) => { + print_query_summary( + &result, + opts.print_elapsed, + opts.print_summaries, + ); + print_tables(&result.tables); + } + Err(e) => { + eprintln!("{}", "Error".underlined().red()); + eprintln!("{e}"); + } + } + } + } + } + } + Ok(Signal::CtrlD) => return Ok(()), + Ok(Signal::CtrlC) => continue, + err => eprintln!("err: {err:?}"), + } + } +} + +fn print_query_summary( + result: &OxqlResult, + print_elapsed: bool, + print_summaries: bool, +) { + if !print_elapsed && !print_summaries { + return; + } + println!("{}", "Query summary".underlined().bold()); + println!(" {}: {}", "ID".bold(), result.query_id); + if print_elapsed { + println!(" {}: {:?}\n", "Total duration".bold(), result.total_duration); + } + if print_summaries { + println!(" {}:", "SQL queries".bold()); + for summary in result.query_summaries.iter() { + println!(" {}: {}", "ID".bold(), summary.id); + println!(" {}: {:?}", "Duration".bold(), summary.elapsed); + println!(" {}: {}", "Read".bold(), summary.io_summary.read); + println!(); + } + } +} + +fn print_tables(tables: &[Table]) { + for table in tables.iter() { + println!(); + println!("{}", table.name().underlined().bold()); + for timeseries in table.iter() { + if timeseries.points.is_empty() { + continue; + } + println!(); + for (name, value) in timeseries.fields.iter() { + println!(" {}: {}", name.as_str().bold(), value); + } + for point in timeseries.points.iter_points() { + println!(" {point}"); + } + } + } +} diff --git a/oximeter/db/src/bin/oxdb/sql.rs b/oximeter/db/src/bin/oxdb/sql.rs new file mode 100644 index 0000000000..d50a60f4d7 --- /dev/null +++ b/oximeter/db/src/bin/oxdb/sql.rs @@ -0,0 +1,298 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! SQL shell subcommand for `oxdb`. + +// Copyright 2024 Oxide Computer Company + +use crate::make_client; +use clap::Args; +use dropshot::EmptyScanParams; +use dropshot::WhichPage; +use oximeter_db::sql::function_allow_list; +use oximeter_db::sql::QueryResult; +use oximeter_db::sql::Table; +use oximeter_db::Client; +use oximeter_db::QuerySummary; +use reedline::DefaultPrompt; +use reedline::DefaultPromptSegment; +use reedline::Reedline; +use reedline::Signal; +use slog::Logger; +use std::net::IpAddr; + +fn print_basic_commands() { + println!("Basic commands:"); + println!(" \\?, \\h, help - Print this help"); + println!(" \\q, quit, exit, ^D - Exit the shell"); + println!(" \\l - List tables"); + println!(" \\d
- Describe a table"); + println!( + " \\f - List or describe ClickHouse SQL functions" + ); + println!(); + println!("Or try entering a SQL `SELECT` statement"); +} + +async fn list_virtual_tables(client: &Client) -> anyhow::Result<()> { + let mut page = WhichPage::First(EmptyScanParams {}); + let limit = 100.try_into().unwrap(); + loop { + let results = client.timeseries_schema_list(&page, limit).await?; + for schema in results.items.iter() { + println!("{}", schema.timeseries_name); + } + if results.next_page.is_some() { + if let Some(last) = results.items.last() { + page = WhichPage::Next(last.timeseries_name.clone()); + } else { + return Ok(()); + } + } else { + return Ok(()); + } + } +} + +async fn describe_virtual_table( + client: &Client, + table: &str, +) -> anyhow::Result<()> { + match table.parse() { + Err(_) => println!("Invalid timeseries name: {table}"), + Ok(name) => { + if let Some(schema) = client.schema_for_timeseries(&name).await? { + let mut cols = + Vec::with_capacity(schema.field_schema.len() + 2); + let mut types = cols.clone(); + for field in schema.field_schema.iter() { + cols.push(field.name.clone()); + types.push(field.field_type.to_string()); + } + cols.push("timestamp".into()); + types.push("DateTime64".into()); + + if schema.datum_type.is_histogram() { + cols.push("start_time".into()); + types.push("DateTime64".into()); + + cols.push("bins".into()); + types.push(format!( + "Array[{}]", + schema + .datum_type + .to_string() + .strip_prefix("Histogram") + .unwrap() + .to_lowercase(), + )); + + cols.push("counts".into()); + types.push("Array[u64]".into()); + } else if schema.datum_type.is_cumulative() { + cols.push("start_time".into()); + types.push("DateTime64".into()); + cols.push("datum".into()); + types.push(schema.datum_type.to_string()); + } else { + cols.push("datum".into()); + types.push(schema.datum_type.to_string()); + } + + let mut builder = tabled::builder::Builder::default(); + builder.push_record(cols); // first record is the header + builder.push_record(types); + println!( + "{}", + builder.build().with(tabled::settings::Style::psql()) + ); + } else { + println!("No such timeseries: {table}"); + } + } + } + Ok(()) +} + +#[derive(Clone, Debug, Args)] +pub struct ShellOptions { + /// Print query metadata. + #[clap(long = "metadata")] + print_metadata: bool, + /// Print the original SQL query. + #[clap(long = "original")] + print_original_query: bool, + /// Print the rewritten SQL query that is actually run on the DB. + #[clap(long = "rewritten")] + print_rewritten_query: bool, + /// Print the transformed query, but do not run it. + #[clap(long)] + transform: Option, +} + +impl Default for ShellOptions { + fn default() -> Self { + Self { + print_metadata: true, + print_original_query: false, + print_rewritten_query: false, + transform: None, + } + } +} + +fn list_supported_functions() { + println!("Subset of ClickHouse SQL functions currently supported"); + println!( + "See https://clickhouse.com/docs/en/sql-reference/functions for more" + ); + println!(); + for func in function_allow_list().iter() { + println!(" {func}"); + } +} + +fn show_supported_function(name: &str) { + if let Some(func) = function_allow_list().iter().find(|f| f.name == name) { + println!("{}", func.name); + println!(" {}", func.usage); + println!(" {}", func.description); + } else { + println!("No supported function '{name}'"); + } +} + +fn print_sql_query(query: &str) { + println!( + "{}", + sqlformat::format( + &query, + &sqlformat::QueryParams::None, + sqlformat::FormatOptions { uppercase: true, ..Default::default() } + ) + ); + println!(); +} + +fn print_query_summary(table: &Table, summary: &QuerySummary) { + println!("Summary"); + println!(" Query ID: {}", summary.id); + println!(" Result rows: {}", table.rows.len()); + println!(" Time: {:?}", summary.elapsed); + println!(" Read: {}\n", summary.io_summary.read); +} + +pub async fn sql_shell( + address: IpAddr, + port: u16, + log: Logger, + opts: ShellOptions, +) -> anyhow::Result<()> { + let client = make_client(address, port, &log).await?; + + // A workaround to ensure the client has all available timeseries when the + // shell starts. + let dummy = "foo:bar".parse().unwrap(); + let _ = client.schema_for_timeseries(&dummy).await; + + // Possibly just transform the query, but do not execute it. + if let Some(query) = &opts.transform { + let transformed = client.transform_query(query).await?; + println!( + "{}", + sqlformat::format( + &transformed, + &sqlformat::QueryParams::None, + sqlformat::FormatOptions { + uppercase: true, + ..Default::default() + } + ) + ); + return Ok(()); + } + + let mut ed = Reedline::create(); + let prompt = DefaultPrompt::new( + DefaultPromptSegment::Basic("0x".to_string()), + DefaultPromptSegment::Empty, + ); + println!("Oximeter SQL shell"); + println!(); + print_basic_commands(); + loop { + let sig = ed.read_line(&prompt); + match sig { + Ok(Signal::Success(buf)) => { + let cmd = buf.as_str().trim(); + match cmd { + "" => continue, + "\\?" | "\\h" | "help" => print_basic_commands(), + "\\q" | "quit" | "exit" => return Ok(()), + "\\l" | "\\d" => list_virtual_tables(&client).await?, + _ => { + if let Some(table_name) = cmd.strip_prefix("\\d") { + if table_name.is_empty() { + list_virtual_tables(&client).await?; + } else { + describe_virtual_table( + &client, + table_name.trim().trim_end_matches(';'), + ) + .await?; + } + } else if let Some(func_name) = cmd.strip_prefix("\\f") + { + if func_name.is_empty() { + list_supported_functions(); + } else { + show_supported_function( + func_name.trim().trim_end_matches(';'), + ); + } + } else { + match client.query(&buf).await { + Err(e) => println!("Query failed: {e:#?}"), + Ok(QueryResult { + original_query, + rewritten_query, + summary, + table, + }) => { + println!(); + let mut builder = + tabled::builder::Builder::default(); + builder.push_record(&table.column_names); // first record is the header + for row in table.rows.iter() { + builder.push_record( + row.iter().map(ToString::to_string), + ); + } + if opts.print_original_query { + print_sql_query(&original_query); + } + if opts.print_rewritten_query { + print_sql_query(&rewritten_query); + } + println!( + "{}\n", + builder.build().with( + tabled::settings::Style::psql() + ) + ); + if opts.print_metadata { + print_query_summary(&table, &summary); + } + } + } + } + } + } + } + Ok(Signal::CtrlD) => return Ok(()), + Ok(Signal::CtrlC) => continue, + err => eprintln!("err: {err:?}"), + } + } +} diff --git a/oximeter/db/src/client/dbwrite.rs b/oximeter/db/src/client/dbwrite.rs new file mode 100644 index 0000000000..f21880f314 --- /dev/null +++ b/oximeter/db/src/client/dbwrite.rs @@ -0,0 +1,266 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Implementation of client methods that write to the ClickHouse database. + +// Copyright 2024 Oxide Computer Company + +use crate::client::Client; +use crate::model; +use crate::Error; +use oximeter::Sample; +use oximeter::TimeseriesName; +use slog::debug; +use std::collections::BTreeMap; +use std::collections::BTreeSet; + +#[derive(Debug)] +pub(super) struct UnrolledSampleRows { + /// The timeseries schema rows, keyed by timeseries name. + pub new_schema: BTreeMap, + /// The rows to insert in all the other tables, keyed by the table name. + pub rows: BTreeMap>, +} + +/// A trait allowing a [`Client`] to write data into the timeseries database. +/// +/// The vanilla [`Client`] object allows users to query the timeseries database, returning +/// timeseries samples corresponding to various filtering criteria. This trait segregates the +/// methods required for _writing_ new data into the database, and is intended only for use by the +/// `oximeter-collector` crate. +#[async_trait::async_trait] +pub trait DbWrite { + /// Insert the given samples into the database. + async fn insert_samples(&self, samples: &[Sample]) -> Result<(), Error>; + + /// Initialize the replicated telemetry database, creating tables as needed. + async fn init_replicated_db(&self) -> Result<(), Error>; + + /// Initialize a single node telemetry database, creating tables as needed. + async fn init_single_node_db(&self) -> Result<(), Error>; + + /// Wipe the ClickHouse database entirely from a single node set up. + async fn wipe_single_node_db(&self) -> Result<(), Error>; + + /// Wipe the ClickHouse database entirely from a replicated set up. + async fn wipe_replicated_db(&self) -> Result<(), Error>; +} + +#[async_trait::async_trait] +impl DbWrite for Client { + /// Insert the given samples into the database. + async fn insert_samples(&self, samples: &[Sample]) -> Result<(), Error> { + debug!(self.log, "unrolling {} total samples", samples.len()); + let UnrolledSampleRows { new_schema, rows } = + self.unroll_samples(samples).await; + self.save_new_schema_or_remove(new_schema).await?; + self.insert_unrolled_samples(rows).await + } + + /// Initialize the replicated telemetry database, creating tables as needed. + async fn init_replicated_db(&self) -> Result<(), Error> { + debug!(self.log, "initializing ClickHouse database"); + self.run_many_sql_statements(include_str!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/schema/replicated/db-init.sql" + ))) + .await + } + + /// Wipe the ClickHouse database entirely from a replicated set up. + async fn wipe_replicated_db(&self) -> Result<(), Error> { + debug!(self.log, "wiping ClickHouse database"); + self.run_many_sql_statements(include_str!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/schema/replicated/db-wipe.sql" + ))) + .await + } + + /// Initialize a single node telemetry database, creating tables as needed. + async fn init_single_node_db(&self) -> Result<(), Error> { + debug!(self.log, "initializing ClickHouse database"); + self.run_many_sql_statements(include_str!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/schema/single-node/db-init.sql" + ))) + .await + } + + /// Wipe the ClickHouse database entirely from a single node set up. + async fn wipe_single_node_db(&self) -> Result<(), Error> { + debug!(self.log, "wiping ClickHouse database"); + self.run_many_sql_statements(include_str!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/schema/single-node/db-wipe.sql" + ))) + .await + } +} + +impl Client { + // Unroll each sample into its consituent rows, after verifying the schema. + // + // Note that this also inserts the schema into the internal cache, if it + // does not already exist there. + pub(super) async fn unroll_samples( + &self, + samples: &[Sample], + ) -> UnrolledSampleRows { + let mut seen_timeseries = BTreeSet::new(); + let mut rows = BTreeMap::new(); + let mut new_schema = BTreeMap::new(); + + for sample in samples.iter() { + match self.verify_or_cache_sample_schema(sample).await { + Err(_) => { + // Skip the sample, but otherwise do nothing. The error is logged in the above + // call. + continue; + } + Ok(None) => {} + Ok(Some((name, schema))) => { + debug!( + self.log, + "new timeseries schema"; + "timeseries_name" => %name, + "schema" => %schema + ); + new_schema.insert(name, schema); + } + } + + // Key on both the timeseries name and key, as timeseries may actually share keys. + let key = ( + sample.timeseries_name.as_str(), + crate::timeseries_key(sample), + ); + if !seen_timeseries.contains(&key) { + for (table_name, table_rows) in model::unroll_field_rows(sample) + { + rows.entry(table_name) + .or_insert_with(Vec::new) + .extend(table_rows); + } + } + + let (table_name, measurement_row) = + model::unroll_measurement_row(sample); + + rows.entry(table_name) + .or_insert_with(Vec::new) + .push(measurement_row); + + seen_timeseries.insert(key); + } + + UnrolledSampleRows { new_schema, rows } + } + + // Insert unrolled sample rows into the corresponding tables. + async fn insert_unrolled_samples( + &self, + rows: BTreeMap>, + ) -> Result<(), Error> { + for (table_name, rows) in rows { + let body = format!( + "INSERT INTO {table_name} FORMAT JSONEachRow\n{row_data}\n", + table_name = table_name, + row_data = rows.join("\n") + ); + // TODO-robustness We've verified the schema, so this is likely a transient failure. + // But we may want to check the actual error condition, and, if possible, continue + // inserting any remaining data. + self.execute(body).await?; + debug!( + self.log, + "inserted rows into table"; + "n_rows" => rows.len(), + "table_name" => table_name, + ); + } + + // TODO-correctness We'd like to return all errors to clients here, and there may be as + // many as one per sample. It's not clear how to structure this in a way that's useful. + Ok(()) + } + + // Save new schema to the database, or remove them from the cache on + // failure. + // + // This attempts to insert the provided schema into the timeseries schema + // table. If that fails, those schema are _also_ removed from the internal + // cache. + // + // TODO-robustness There's still a race possible here. If two distinct clients receive new + // but conflicting schema, they will both try to insert those at some point into the schema + // tables. It's not clear how to handle this, since ClickHouse provides no transactions. + // This is unlikely to happen at this point, because the design is such that there will be + // a single `oximeter` instance, which has one client object, connected to a single + // ClickHouse server. But once we start replicating data, the window within which the race + // can occur is much larger, since it includes the time it takes ClickHouse to replicate + // data between nodes. + // + // NOTE: This is an issue even in the case where the schema don't conflict. Two clients may + // receive a sample with a new schema, and both would then try to insert that schema. + pub(super) async fn save_new_schema_or_remove( + &self, + new_schema: BTreeMap, + ) -> Result<(), Error> { + if !new_schema.is_empty() { + debug!( + self.log, + "inserting {} new timeseries schema", + new_schema.len() + ); + const APPROX_ROW_SIZE: usize = 64; + let mut body = String::with_capacity( + APPROX_ROW_SIZE + APPROX_ROW_SIZE * new_schema.len(), + ); + body.push_str("INSERT INTO "); + body.push_str(crate::DATABASE_NAME); + body.push_str(".timeseries_schema FORMAT JSONEachRow\n"); + for row_data in new_schema.values() { + body.push_str(row_data); + body.push('\n'); + } + + // Try to insert the schema. + // + // If this fails, be sure to remove the schema we've added from the + // internal cache. Since we check the internal cache first for + // schema, if we fail here but _don't_ remove the schema, we'll + // never end up inserting the schema, but we will insert samples. + if let Err(e) = self.execute(body).await { + debug!( + self.log, + "failed to insert new schema, removing from cache"; + "error" => ?e, + ); + let mut schema = self.schema.lock().await; + for name in new_schema.keys() { + schema + .remove(name) + .expect("New schema should have been cached"); + } + return Err(e); + } + } + Ok(()) + } + + // Run one or more SQL statements. + // + // This is intended to be used for the methods which run SQL from one of the + // SQL files in the crate, e.g., the DB initialization or update files. + async fn run_many_sql_statements( + &self, + sql: impl AsRef, + ) -> Result<(), Error> { + for stmt in sql.as_ref().split(';').filter(|s| !s.trim().is_empty()) { + self.execute(stmt).await?; + } + Ok(()) + } +} diff --git a/oximeter/db/src/client.rs b/oximeter/db/src/client/mod.rs similarity index 88% rename from oximeter/db/src/client.rs rename to oximeter/db/src/client/mod.rs index abea11aa06..9a2b7b1bd3 100644 --- a/oximeter/db/src/client.rs +++ b/oximeter/db/src/client/mod.rs @@ -4,11 +4,19 @@ //! Rust client to ClickHouse database -// Copyright 2023 Oxide Computer Company +// Copyright 2024 Oxide Computer Company +pub(crate) mod dbwrite; +#[cfg(any(feature = "oxql", test))] +pub(crate) mod oxql; +pub(crate) mod query_summary; +#[cfg(any(feature = "sql", test))] +mod sql; + +pub use self::dbwrite::DbWrite; +use crate::client::query_summary::QuerySummary; use crate::model; use crate::query; -use crate::sql::RestrictedQuery; use crate::Error; use crate::Metric; use crate::Target; @@ -18,16 +26,13 @@ use crate::TimeseriesName; use crate::TimeseriesPageSelector; use crate::TimeseriesScanParams; use crate::TimeseriesSchema; -use async_trait::async_trait; use dropshot::EmptyScanParams; use dropshot::PaginationOrder; use dropshot::ResultsPage; use dropshot::WhichPage; -use indexmap::IndexMap; use oximeter::types::Sample; use regex::Regex; use regex::RegexBuilder; -use reqwest::header::HeaderMap; use slog::debug; use slog::error; use slog::info; @@ -44,7 +49,6 @@ use std::ops::Bound; use std::path::Path; use std::path::PathBuf; use std::sync::OnceLock; -use std::time::Duration; use std::time::Instant; use tokio::fs; use tokio::sync::Mutex; @@ -56,139 +60,11 @@ const CLICKHOUSE_DB_VERSION_MISSING: &'static str = #[usdt::provider(provider = "clickhouse_client")] mod probes { - fn query__start(_: &usdt::UniqueId, sql: &str) {} - fn query__done(_: &usdt::UniqueId) {} -} - -/// A count of bytes / rows accessed during a query. -#[derive(Clone, Copy, Debug)] -pub struct IoCount { - pub bytes: u64, - pub rows: u64, -} - -impl std::fmt::Display for IoCount { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{} rows ({} bytes)", self.rows, self.bytes) - } -} + /// Fires when a SQL query begins, with the query string. + fn sql__query__start(_: &usdt::UniqueId, sql: &str) {} -/// Summary of the I/O and duration of a query. -#[derive(Clone, Copy, Debug, serde::Deserialize)] -#[serde(try_from = "serde_json::Value")] -pub struct QuerySummary { - /// The bytes and rows read by the query. - pub read: IoCount, - /// The bytes and rows written by the query. - pub written: IoCount, -} - -impl TryFrom for QuerySummary { - type Error = Error; - - fn try_from(j: serde_json::Value) -> Result { - use serde_json::Map; - use serde_json::Value; - use std::str::FromStr; - - let Value::Object(map) = j else { - return Err(Error::Database(String::from( - "Expected a JSON object for a metadata summary", - ))); - }; - - fn unpack_summary_value( - map: &Map, - key: &str, - ) -> Result - where - T: FromStr, - ::Err: std::error::Error, - { - let value = map.get(key).ok_or_else(|| { - Error::MissingHeaderKey { key: key.to_string() } - })?; - let Value::String(v) = value else { - return Err(Error::BadMetadata { - key: key.to_string(), - msg: String::from("Expected a string value"), - }); - }; - v.parse::().map_err(|e| Error::BadMetadata { - key: key.to_string(), - msg: e.to_string(), - }) - } - let rows_read: u64 = unpack_summary_value(&map, "read_rows")?; - let bytes_read: u64 = unpack_summary_value(&map, "read_bytes")?; - let rows_written: u64 = unpack_summary_value(&map, "written_rows")?; - let bytes_written: u64 = unpack_summary_value(&map, "written_bytes")?; - Ok(Self { - read: IoCount { bytes: bytes_read, rows: rows_read }, - written: IoCount { bytes: bytes_written, rows: rows_written }, - }) - } -} - -/// Basic metadata about the resource usage of a single SQL query. -#[derive(Clone, Copy, Debug)] -pub struct QueryMetadata { - /// The database-assigned query ID. - pub id: Uuid, - /// The total duration of the query (network plus execution). - pub elapsed: Duration, - /// Summary of the data read and written. - pub summary: QuerySummary, -} - -impl QueryMetadata { - fn from_headers( - elapsed: Duration, - headers: &HeaderMap, - ) -> Result { - fn get_header<'a>( - map: &'a HeaderMap, - key: &'a str, - ) -> Result<&'a str, Error> { - let hdr = map.get(key).ok_or_else(|| Error::MissingHeaderKey { - key: key.to_string(), - })?; - std::str::from_utf8(hdr.as_bytes()) - .map_err(|err| Error::Database(err.to_string())) - } - let summary = - serde_json::from_str(get_header(headers, "X-ClickHouse-Summary")?) - .map_err(|err| Error::Database(err.to_string()))?; - let id = get_header(headers, "X-ClickHouse-Query-Id")? - .parse() - .map_err(|err: uuid::Error| Error::Database(err.to_string()))?; - Ok(Self { id, elapsed, summary }) - } -} - -/// A tabular result from a SQL query against a timeseries. -#[derive(Clone, Debug, Default, serde::Serialize)] -pub struct Table { - /// The name of each column in the result set. - pub column_names: Vec, - /// The rows of the result set, one per column. - pub rows: Vec>, -} - -/// The full result of running a SQL query against a timeseries. -#[derive(Clone, Debug)] -pub struct QueryResult { - /// The query as written by the client. - pub original_query: String, - /// The rewritten query, run against the JOINed representation of the - /// timeseries. - /// - /// This is the query that is actually run in the database itself. - pub rewritten_query: String, - /// Metadata about the resource usage of the query. - pub metadata: QueryMetadata, - /// The result of the query, with column names and rows. - pub table: Table, + /// Fires when a SQL query ends, either in success or failure. + fn sql__query__done(_: &usdt::UniqueId) {} } /// A `Client` to the ClickHouse metrics database. @@ -229,76 +105,6 @@ impl Client { Ok(()) } - /// Transform a SQL query against a timeseries, but do not execute it. - pub async fn transform_query( - &self, - query: impl AsRef, - ) -> Result { - let restricted = RestrictedQuery::new(query.as_ref())?; - restricted.to_oximeter_sql(&*self.schema.lock().await) - } - - /// Run a SQL query against a timeseries. - pub async fn query( - &self, - query: impl AsRef, - ) -> Result { - let original_query = query.as_ref().trim_end_matches(';'); - let ox_sql = self.transform_query(original_query).await?; - let rewritten = format!("{ox_sql} FORMAT JSONEachRow"); - debug!( - self.log, - "rewrote restricted query"; - "original_sql" => &original_query, - "rewritten_sql" => &rewritten, - ); - let request = self - .client - .post(&self.url) - .query(&[ - ("output_format_json_quote_64bit_integers", "0"), - ("database", crate::DATABASE_NAME), - ]) - .body(rewritten.clone()); - let query_start = Instant::now(); - let response = handle_db_response( - request - .send() - .await - .map_err(|err| Error::DatabaseUnavailable(err.to_string()))?, - ) - .await?; - let metadata = QueryMetadata::from_headers( - query_start.elapsed(), - response.headers(), - )?; - let text = response.text().await.unwrap(); - let mut table = Table::default(); - for line in text.lines() { - let row = - serde_json::from_str::>( - line.trim(), - ) - .unwrap(); - if table.column_names.is_empty() { - table.column_names.extend(row.keys().cloned()) - } else { - assert!(table - .column_names - .iter() - .zip(row.keys()) - .all(|(k1, k2)| k1 == k2)); - } - table.rows.push(row.into_values().collect()); - } - Ok(QueryResult { - original_query: original_query.to_string(), - rewritten_query: rewritten, - metadata, - table, - }) - } - /// Select timeseries from criteria on the fields and start/end timestamps. pub async fn select_timeseries_with( &self, @@ -348,6 +154,7 @@ impl Client { Some(field_query) => { self.select_matching_timeseries_info(&field_query, &schema) .await? + .1 } None => BTreeMap::new(), }; @@ -367,6 +174,7 @@ impl Client { } } + /// Return a page of timeseries schema from the database. pub async fn list_timeseries( &self, page: &WhichPage, @@ -401,6 +209,7 @@ impl Client { Some(field_query) => { self.select_matching_timeseries_info(&field_query, &schema) .await? + .1 } None => BTreeMap::new(), }; @@ -445,6 +254,7 @@ impl Client { concat!( "SELECT * ", "FROM {}.timeseries_schema ", + "ORDER BY timeseries_name ", "LIMIT {} ", "FORMAT JSONEachRow;", ), @@ -457,6 +267,7 @@ impl Client { concat!( "SELECT * FROM {}.timeseries_schema ", "WHERE timeseries_name > '{}' ", + "ORDER BY timeseries_name ", "LIMIT {} ", "FORMAT JSONEachRow;", ), @@ -466,7 +277,7 @@ impl Client { ) } }; - let body = self.execute_with_body(sql).await?; + let body = self.execute_with_body(sql).await?.1; let schema = body .lines() .map(|line| { @@ -848,14 +659,14 @@ impl Client { ); let version = match self.execute_with_body(sql).await { - Ok(body) if body.is_empty() => { + Ok((_, body)) if body.is_empty() => { warn!( self.log, "no version in database (treated as 'version 0')" ); 0 } - Ok(body) => body.trim().parse::().map_err(|err| { + Ok((_, body)) => body.trim().parse::().map_err(|err| { Error::Database(format!("Cannot read version: {err}")) })?, Err(Error::Database(err)) @@ -895,14 +706,13 @@ impl Client { "INSERT INTO {db_name}.version (*) VALUES ({version}, now());", db_name = crate::DATABASE_NAME, ); - self.execute_with_body(sql).await?; - Ok(()) + self.execute(sql).await } /// Verifies if instance is part of oximeter_cluster pub async fn is_oximeter_cluster(&self) -> Result { let sql = "SHOW CLUSTERS FORMAT JSONEachRow;"; - let res = self.execute_with_body(sql).await?; + let res = self.execute_with_body(sql).await?.1; Ok(res.contains("oximeter_cluster")) } @@ -972,8 +782,9 @@ impl Client { &self, field_query: &str, schema: &TimeseriesSchema, - ) -> Result, Error> { - let body = self.execute_with_body(field_query).await?; + ) -> Result<(QuerySummary, BTreeMap), Error> + { + let (summary, body) = self.execute_with_body(field_query).await?; let mut results = BTreeMap::new(); for line in body.lines() { let row: model::FieldSelectRow = serde_json::from_str(line) @@ -982,7 +793,7 @@ impl Client { model::parse_field_select_row(&row, schema); results.insert(id, (target, metric)); } - Ok(results) + Ok((summary, results)) } // Given information returned from `select_matching_timeseries_info`, select the actual @@ -996,7 +807,8 @@ impl Client { let mut timeseries_by_key = BTreeMap::new(); let keys = info.keys().copied().collect::>(); let measurement_query = query.measurement_query(&keys); - for line in self.execute_with_body(&measurement_query).await?.lines() { + for line in self.execute_with_body(&measurement_query).await?.1.lines() + { let (key, measurement) = model::parse_measurement_from_row(line, schema.datum_type); let timeseries = timeseries_by_key.entry(key).or_insert_with( @@ -1032,7 +844,10 @@ impl Client { // Execute a generic SQL statement, awaiting the response as text // // TODO-robustness This currently does no validation of the statement. - async fn execute_with_body(&self, sql: S) -> Result + async fn execute_with_body( + &self, + sql: S, + ) -> Result<(QuerySummary, String), Error> where S: AsRef, { @@ -1042,24 +857,57 @@ impl Client { "executing SQL query"; "sql" => &sql, ); + + // Run the SQL query itself. + // + // This code gets a bit convoluted, so that we can fire the USDT probe + // in all situations, even when the various fallible operations + // complete. let id = usdt::UniqueId::new(); - probes::query__start!(|| (&id, &sql)); - let response = handle_db_response( - self.client - .post(&self.url) - // See regression test `test_unquoted_64bit_integers` for details. - .query(&[("output_format_json_quote_64bit_integers", "0")]) - .body(sql) - .send() - .await - .map_err(|err| Error::DatabaseUnavailable(err.to_string()))?, - ) - .await? - .text() - .await - .map_err(|err| Error::Database(err.to_string())); - probes::query__done!(|| (&id)); - response + probes::sql__query__start!(|| (&id, &sql)); + let start = Instant::now(); + + // Submit the SQL request itself. + let response = self + .client + .post(&self.url) + .query(&[ + ("output_format_json_quote_64bit_integers", "0"), + // TODO-performance: This is needed to get the correct counts of + // rows/bytes accessed during a query, but implies larger memory + // consumption on the server and higher latency for the request. + // We may want to sacrifice accuracy of those counts. + ("wait_end_of_query", "1"), + ]) + .body(sql) + .send() + .await + .map_err(|err| { + probes::sql__query__done!(|| (&id)); + Error::DatabaseUnavailable(err.to_string()) + })?; + + // Convert the HTTP response into a database response. + let response = handle_db_response(response).await.map_err(|err| { + probes::sql__query__done!(|| (&id)); + err + })?; + + // Extract the query summary, measuring resource usage and duration. + let summary = + QuerySummary::from_headers(start.elapsed(), response.headers()) + .map_err(|err| { + probes::sql__query__done!(|| (&id)); + err + })?; + + // Extract the actual text of the response. + let text = response.text().await.map_err(|err| { + probes::sql__query__done!(|| (&id)); + Error::Database(err.to_string()) + })?; + probes::sql__query__done!(|| (&id)); + Ok((summary, text)) } // Get timeseries schema from the database. @@ -1095,7 +943,7 @@ impl Client { ) } }; - let body = self.execute_with_body(sql).await?; + let body = self.execute_with_body(sql).await?.1; if body.is_empty() { trace!(self.log, "no new timeseries schema in database"); } else { @@ -1113,167 +961,6 @@ impl Client { } Ok(()) } - - // Unroll each sample into its consituent rows, after verifying the schema. - // - // Note that this also inserts the schema into the internal cache, if it - // does not already exist there. - async fn unroll_samples(&self, samples: &[Sample]) -> UnrolledSampleRows { - let mut seen_timeseries = BTreeSet::new(); - let mut rows = BTreeMap::new(); - let mut new_schema = BTreeMap::new(); - - for sample in samples.iter() { - match self.verify_or_cache_sample_schema(sample).await { - Err(_) => { - // Skip the sample, but otherwise do nothing. The error is logged in the above - // call. - continue; - } - Ok(None) => {} - Ok(Some((name, schema))) => { - debug!( - self.log, - "new timeseries schema"; - "timeseries_name" => %name, - "schema" => %schema - ); - new_schema.insert(name, schema); - } - } - - // Key on both the timeseries name and key, as timeseries may actually share keys. - let key = ( - sample.timeseries_name.as_str(), - crate::timeseries_key(&sample), - ); - if !seen_timeseries.contains(&key) { - for (table_name, table_rows) in model::unroll_field_rows(sample) - { - rows.entry(table_name) - .or_insert_with(Vec::new) - .extend(table_rows); - } - } - - let (table_name, measurement_row) = - model::unroll_measurement_row(sample); - - rows.entry(table_name) - .or_insert_with(Vec::new) - .push(measurement_row); - - seen_timeseries.insert(key); - } - - UnrolledSampleRows { new_schema, rows } - } - - // Save new schema to the database, or remove them from the cache on - // failure. - // - // This attempts to insert the provided schema into the timeseries schema - // table. If that fails, those schema are _also_ removed from the internal - // cache. - // - // TODO-robustness There's still a race possible here. If two distinct clients receive new - // but conflicting schema, they will both try to insert those at some point into the schema - // tables. It's not clear how to handle this, since ClickHouse provides no transactions. - // This is unlikely to happen at this point, because the design is such that there will be - // a single `oximeter` instance, which has one client object, connected to a single - // ClickHouse server. But once we start replicating data, the window within which the race - // can occur is much larger, since it includes the time it takes ClickHouse to replicate - // data between nodes. - // - // NOTE: This is an issue even in the case where the schema don't conflict. Two clients may - // receive a sample with a new schema, and both would then try to insert that schema. - async fn save_new_schema_or_remove( - &self, - new_schema: BTreeMap, - ) -> Result<(), Error> { - if !new_schema.is_empty() { - debug!( - self.log, - "inserting {} new timeseries schema", - new_schema.len() - ); - const APPROX_ROW_SIZE: usize = 64; - let mut body = String::with_capacity( - APPROX_ROW_SIZE + APPROX_ROW_SIZE * new_schema.len(), - ); - body.push_str("INSERT INTO "); - body.push_str(crate::DATABASE_NAME); - body.push_str(".timeseries_schema FORMAT JSONEachRow\n"); - for row_data in new_schema.values() { - body.push_str(row_data); - body.push_str("\n"); - } - - // Try to insert the schema. - // - // If this fails, be sure to remove the schema we've added from the - // internal cache. Since we check the internal cache first for - // schema, if we fail here but _don't_ remove the schema, we'll - // never end up inserting the schema, but we will insert samples. - if let Err(e) = self.execute(body).await { - debug!( - self.log, - "failed to insert new schema, removing from cache"; - "error" => ?e, - ); - let mut schema = self.schema.lock().await; - for name in new_schema.keys() { - schema - .remove(name) - .expect("New schema should have been cached"); - } - return Err(e); - } - } - Ok(()) - } - - // Insert unrolled sample rows into the corresponding tables. - async fn insert_unrolled_samples( - &self, - rows: BTreeMap>, - ) -> Result<(), Error> { - for (table_name, rows) in rows { - let body = format!( - "INSERT INTO {table_name} FORMAT JSONEachRow\n{row_data}\n", - table_name = table_name, - row_data = rows.join("\n") - ); - // TODO-robustness We've verified the schema, so this is likely a transient failure. - // But we may want to check the actual error condition, and, if possible, continue - // inserting any remaining data. - self.execute(body).await?; - debug!( - self.log, - "inserted rows into table"; - "n_rows" => rows.len(), - "table_name" => table_name, - ); - } - - // TODO-correctness We'd like to return all errors to clients here, and there may be as - // many as one per sample. It's not clear how to structure this in a way that's useful. - Ok(()) - } - - // Run one or more SQL statements. - // - // This is intended to be used for the methods which run SQL from one of the - // SQL files in the crate, e.g., the DB initialization or update files. - async fn run_many_sql_statements( - &self, - sql: impl AsRef, - ) -> Result<(), Error> { - for stmt in sql.as_ref().split(';').filter(|s| !s.trim().is_empty()) { - self.execute(stmt).await?; - } - Ok(()) - } } // A regex used to validate supported schema updates. @@ -1297,87 +984,6 @@ fn schema_validation_regex() -> &'static Regex { .expect("Invalid regex") }) } - -#[derive(Debug)] -struct UnrolledSampleRows { - // The timeseries schema rows, keyed by timeseries name. - new_schema: BTreeMap, - // The rows to insert in all the other tables, keyed by the table name. - rows: BTreeMap>, -} - -/// A trait allowing a [`Client`] to write data into the timeseries database. -/// -/// The vanilla [`Client`] object allows users to query the timeseries database, returning -/// timeseries samples corresponding to various filtering criteria. This trait segregates the -/// methods required for _writing_ new data into the database, and is intended only for use by the -/// `oximeter-collector` crate. -#[async_trait] -pub trait DbWrite { - /// Insert the given samples into the database. - async fn insert_samples(&self, samples: &[Sample]) -> Result<(), Error>; - - /// Initialize the replicated telemetry database, creating tables as needed. - async fn init_replicated_db(&self) -> Result<(), Error>; - - /// Initialize a single node telemetry database, creating tables as needed. - async fn init_single_node_db(&self) -> Result<(), Error>; - - /// Wipe the ClickHouse database entirely from a single node set up. - async fn wipe_single_node_db(&self) -> Result<(), Error>; - - /// Wipe the ClickHouse database entirely from a replicated set up. - async fn wipe_replicated_db(&self) -> Result<(), Error>; -} - -#[async_trait] -impl DbWrite for Client { - /// Insert the given samples into the database. - async fn insert_samples(&self, samples: &[Sample]) -> Result<(), Error> { - debug!(self.log, "unrolling {} total samples", samples.len()); - let UnrolledSampleRows { new_schema, rows } = - self.unroll_samples(samples).await; - self.save_new_schema_or_remove(new_schema).await?; - self.insert_unrolled_samples(rows).await - } - - /// Initialize the replicated telemetry database, creating tables as needed. - async fn init_replicated_db(&self) -> Result<(), Error> { - debug!(self.log, "initializing ClickHouse database"); - self.run_many_sql_statements(include_str!( - "../schema/replicated/db-init.sql" - )) - .await - } - - /// Wipe the ClickHouse database entirely from a replicated set up. - async fn wipe_replicated_db(&self) -> Result<(), Error> { - debug!(self.log, "wiping ClickHouse database"); - self.run_many_sql_statements(include_str!( - "../schema/replicated/db-wipe.sql" - )) - .await - } - - /// Initialize a single node telemetry database, creating tables as needed. - async fn init_single_node_db(&self) -> Result<(), Error> { - debug!(self.log, "initializing ClickHouse database"); - self.run_many_sql_statements(include_str!( - "../schema/single-node/db-init.sql" - )) - .await - } - - /// Wipe the ClickHouse database entirely from a single node set up. - async fn wipe_single_node_db(&self) -> Result<(), Error> { - debug!(self.log, "wiping ClickHouse database"); - self.run_many_sql_statements(include_str!( - "../schema/single-node/db-wipe.sql" - )) - .await - } -} - // Return Ok if the response indicates success, otherwise return either the reqwest::Error, if this // is a client-side error, or the body of the actual error retrieved from ClickHouse if the error // was generated there. @@ -1397,6 +1003,7 @@ async fn handle_db_response( #[cfg(test)] mod tests { + use super::dbwrite::UnrolledSampleRows; use super::*; use crate::model::OXIMETER_VERSION; use crate::query; @@ -1933,7 +1540,7 @@ mod tests { let mut result = String::from(""); let tries = 5; for _ in 0..tries { - result = client_2.execute_with_body(sql.clone()).await.unwrap(); + result = client_2.execute_with_body(sql.clone()).await.unwrap().1; if !result.contains("oximeter") { sleep(Duration::from_secs(1)).await; continue; @@ -1948,21 +1555,21 @@ mod tests { let sql = String::from( "INSERT INTO oximeter.measurements_string (datum) VALUES ('hiya');", ); - let result = client_2.execute_with_body(sql.clone()).await.unwrap(); + let result = client_2.execute_with_body(sql.clone()).await.unwrap().1; info!(log, "Inserted datum to client #2"; "sql" => sql, "result" => result); // Make sure replicas are synched let sql = String::from( "SYSTEM SYNC REPLICA oximeter.measurements_string_local;", ); - let result = client_1.execute_with_body(sql.clone()).await.unwrap(); + let result = client_1.execute_with_body(sql.clone()).await.unwrap().1; info!(log, "Synced replicas via client #1"; "sql" => sql, "result" => result); // Make sure data exists in the other replica let sql = String::from( "SELECT * FROM oximeter.measurements_string FORMAT JSONEachRow;", ); - let result = client_1.execute_with_body(sql.clone()).await.unwrap(); + let result = client_1.execute_with_body(sql.clone()).await.unwrap().1; info!(log, "Retrieved values via client #1"; "sql" => sql, "result" => result.clone()); assert!(result.contains("hiya")); @@ -2124,7 +1731,7 @@ mod tests { let sql = String::from( "SELECT * FROM oximeter.timeseries_schema FORMAT JSONEachRow;", ); - let result = client.execute_with_body(sql).await.unwrap(); + let result = client.execute_with_body(sql).await.unwrap().1; let schema = result .lines() .map(|line| { @@ -2253,7 +1860,8 @@ mod tests { table )) .await - .unwrap(); + .unwrap() + .1; let actual_count = body.lines().next().unwrap().trim().parse::().expect( "Expected a count of the number of rows from ClickHouse", @@ -2301,7 +1909,8 @@ mod tests { "SELECT toUInt64(1) AS foo FORMAT JSONEachRow;".to_string(), ) .await - .unwrap(); + .unwrap() + .1; let json: Value = serde_json::from_str(&output).unwrap(); assert_eq!(json["foo"], Value::Number(1u64.into())); @@ -3167,7 +2776,8 @@ mod tests { let body = client .execute_with_body(select_sql) .await - .expect("Failed to select field row"); + .expect("Failed to select field row") + .1; let actual_row: serde_json::Value = serde_json::from_str(&body) .expect("Failed to parse field row JSON"); println!("{actual_row:?}"); @@ -3507,7 +3117,8 @@ mod tests { let body = client .execute_with_body(select_sql) .await - .expect("Failed to select measurement row"); + .expect("Failed to select measurement row") + .1; let (_, actual_row) = crate::model::parse_measurement_from_row( &body, measurement.datum_type(), @@ -3528,6 +3139,7 @@ mod tests { ) .await .expect("Failed to SELECT from database") + .1 .lines() .count() } @@ -3749,7 +3361,7 @@ mod tests { // one. let response = client.execute_with_body( "SELECT COUNT() FROM oximeter.timeseries_schema FORMAT JSONEachRow; - ").await.unwrap(); + ").await.unwrap().1; assert_eq!(response.lines().count(), 1, "Expected exactly 1 schema"); assert_eq!(client.schema.lock().await.len(), 1); @@ -3766,7 +3378,7 @@ mod tests { // only the one schema. let response = client.execute_with_body( "SELECT COUNT() FROM oximeter.timeseries_schema FORMAT JSONEachRow; - ").await.unwrap(); + ").await.unwrap().1; assert_eq!( response.lines().count(), 1, @@ -3804,7 +3416,7 @@ mod tests { crate::DATABASE_NAME, crate::model::DbDatumType::from(ty), ); - let res = client.execute_with_body(sql).await.unwrap(); + let res = client.execute_with_body(sql).await.unwrap().1; let count = res.trim().parse::().unwrap(); assert_eq!(count, 0); } @@ -4099,7 +3711,8 @@ mod tests { " )) .await - .unwrap(); + .unwrap() + .1; let mut lines = body.lines(); assert_eq!(lines.next().unwrap(), "\"col0\",\"UInt8\""); assert_eq!(lines.next().unwrap(), "\"col1\",\"UInt16\""); @@ -4319,7 +3932,8 @@ mod tests { " )) .await - .unwrap(); + .unwrap() + .1; let mut lines = body.lines(); assert_eq!(lines.next().unwrap(), "\"col0\",\"UInt8\""); assert_eq!(lines.next().unwrap(), "\"col1\",\"UInt16\""); @@ -4480,7 +4094,7 @@ mod tests { crate::DATABASE_NAME, crate::model::DbFieldType::from(ty), ); - let res = client.execute_with_body(sql).await.unwrap(); + let res = client.execute_with_body(sql).await.unwrap().1; let count = res.trim().parse::().unwrap(); assert_eq!(count, 0); } @@ -4488,6 +4102,7 @@ mod tests { logctx.cleanup_successful(); } + #[cfg(any(feature = "sql", test))] #[tokio::test] async fn test_sql_query_output() { let logctx = test_setup_log("test_sql_query_output"); diff --git a/oximeter/db/src/client/oxql.rs b/oximeter/db/src/client/oxql.rs new file mode 100644 index 0000000000..d1ce131581 --- /dev/null +++ b/oximeter/db/src/client/oxql.rs @@ -0,0 +1,1645 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Client methods for running OxQL queries against the timeseries database. + +// Copyright 2024 Oxide Computer Company + +use super::query_summary::QuerySummary; +use crate::client::Client; +use crate::model; +use crate::oxql; +use crate::oxql::ast::table_ops::filter; +use crate::oxql::ast::table_ops::filter::Filter; +use crate::oxql::ast::table_ops::limit::Limit; +use crate::oxql::ast::table_ops::limit::LimitKind; +use crate::query::field_table_name; +use crate::Error; +use crate::Metric; +use crate::Target; +use crate::TimeseriesKey; +use oximeter::TimeseriesSchema; +use slog::debug; +use slog::trace; +use slog::Logger; +use std::collections::BTreeMap; +use std::time::Duration; +use std::time::Instant; +use uuid::Uuid; + +#[usdt::provider(provider = "clickhouse_client")] +mod probes { + /// Fires when an OxQL query starts, with the query ID and string. + fn oxql__query__start(_: &usdt::UniqueId, _: &Uuid, query: &str) {} + + /// Fires when an OxQL query ends, either in success or failure. + fn oxql__query__done(_: &usdt::UniqueId, _: &Uuid) {} + + /// Fires when an OxQL table operation starts, with the query ID and details + /// of the operation itself. + fn oxql__table__op__start(_: &usdt::UniqueId, _: &Uuid, op: &str) {} + + /// Fires when an OxQL table operation ends. + fn oxql__table__op__done(_: &usdt::UniqueId, _: &Uuid) {} +} + +/// The full result of an OxQL query. +#[derive(Clone, Debug)] +pub struct OxqlResult { + /// A query ID assigned to this OxQL query. + pub query_id: Uuid, + + /// The total duration of the OxQL query. + /// + /// This includes the time to run SQL queries against the database, and the + /// internal processing for each transformation in the query pipeline. + pub total_duration: Duration, + + /// The summary for each SQL query run against the ClickHouse database. + /// + /// Each OxQL query translates into many calls to ClickHouse. We fetch the + /// fields; count the number of samples; and finally fetch the samples + /// themselves. In the future, more may be needed as well. + /// + /// This returns a list of summaries, one for each SQL query that was run. + /// It includes the ClickHouse-assigned query ID for correlation and looking + /// up in the logs. + pub query_summaries: Vec, + + /// The list of OxQL tables returned from the query. + pub tables: Vec, +} + +/// The maximum number of data values fetched from the database for an OxQL +/// query. +// +// The `Client::oxql_query()` API is currently unpaginated. It's also not clear +// _how_ to paginate it. The objects contributing to the size of the returned +// value, the actual data points, are nested several layers deep, inside the +// `Timeseries` and `Table`s. A page size is supposed to refer to the top-level +// object, so we'd need to flatten this hierarchy for that to work. That's +// undesirable because it will lead to a huge amount of duplication of the table +// / timeseries-level information, once for each point. +// +// Also, since we cannot use a cursor-based pagination, we're stuck with +// limit-offset. That means we may need to run substantially all of the query, +// just to know how to retrieve the next page, sidestepping one of the main +// goals of pagination (to limit resource usage). +// +// Note that it's also hard or impossible to _predict_ how much data a query +// will use. We need to count the number of rows in the database, for example, +// _and also_ understand how table operations might change that size. For +// example, alignment is allowed to upsample the data (within limits), so the +// number of rows in the database are not the only factor. +// +// This limit here is a crude attempt to limit just the raw data fetched from +// ClickHouse itself. For any OxQL query, we may retrieve many measurements from +// the database. Each time we do so, we increment a counter, and compare it to +// this. If we exceed it, the whole query fails. +pub const MAX_DATABASE_ROWS: u64 = 1_000_000; + +// When running an OxQL query, we may need to separately run several field +// queries, to get the consistent keys independently for a range of time. +// +// This type stores the predicates used to generate the keys, and the keys +// consistent with it. +#[derive(Clone, Debug, PartialEq)] +struct ConsistentKeyGroup { + predicates: Option, + consistent_keys: BTreeMap, +} + +impl Client { + /// Run a OxQL query. + pub async fn oxql_query( + &self, + query: impl AsRef, + ) -> Result { + // TODO-security: Need a way to implement authz checks for things like + // viewing resources in another project or silo. + // + // I think one way to do that is look at the predicates and make sure + // they refer to things the user has access to. Another is to add some + // implicit predicates here, indicating the subset of fields that the + // query should be able to access. + // + // This probably means we'll need to parse the query in Nexus, so that + // we can attach the other filters ourselves. + // + // See https://github.com/oxidecomputer/omicron/issues/5298. + let query = query.as_ref(); + let parsed_query = oxql::Query::new(query)?; + let query_id = Uuid::new_v4(); + let query_log = + self.log.new(slog::o!("query_id" => query_id.to_string())); + debug!( + query_log, + "parsed OxQL query"; + "query" => query, + "parsed_query" => ?parsed_query, + ); + let id = usdt::UniqueId::new(); + probes::oxql__query__start!(|| (&id, &query_id, query)); + let mut total_rows_fetched = 0; + let result = self + .run_oxql_query( + &query_log, + query_id, + parsed_query, + &mut total_rows_fetched, + None, + None, + ) + .await; + probes::oxql__query__done!(|| (&id, &query_id)); + result + } + + /// Rewrite the predicates from an OxQL query so that they apply only to the + /// field tables. + fn rewrite_predicate_for_fields( + schema: &TimeseriesSchema, + preds: &filter::Filter, + ) -> Result, Error> { + // Walk the set of predicates, keeping those which apply to this schema. + match &preds.expr { + filter::FilterExpr::Simple(inner) => { + // If the predicate names a field in this timeseries schema, + // return that predicate printed as a string. If not, we return + // None. + let Some(field_schema) = + schema.schema_for_field(inner.ident.as_str()) + else { + return Ok(None); + }; + if !inner.value_type_is_compatible_with_field( + field_schema.field_type, + ) { + return Err(Error::from(anyhow::anyhow!( + "Expression for field {} is not compatible with \ + its type {}", + field_schema.name, + field_schema.field_type, + ))); + } + Ok(Some(inner.as_db_safe_string())) + } + filter::FilterExpr::Compound(inner) => { + let left_pred = + Self::rewrite_predicate_for_fields(schema, &inner.left)?; + let right_pred = + Self::rewrite_predicate_for_fields(schema, &inner.right)?; + let out = match (left_pred, right_pred) { + (Some(left), Some(right)) => Some(format!( + "{}({left}, {right})", + inner.op.as_db_function_name() + )), + (Some(single), None) | (None, Some(single)) => Some(single), + (None, None) => None, + }; + Ok(out) + } + } + } + + /// Rewrite the predicates from an OxQL query so that they apply only to the + /// measurement table. + fn rewrite_predicate_for_measurements( + schema: &TimeseriesSchema, + preds: &oxql::ast::table_ops::filter::Filter, + ) -> Result, Error> { + // Walk the set of predicates, keeping those which apply to this schema. + match &preds.expr { + filter::FilterExpr::Simple(inner) => { + // The relevant columns on which we filter depend on the datum + // type of the timeseries. All timeseries support "timestamp". + let ident = inner.ident.as_str(); + if ident == "timestamp" { + if matches!( + inner.value, + oxql::ast::literal::Literal::Timestamp(_) + ) { + return Ok(Some(inner.as_db_safe_string())); + } + return Err(Error::from(anyhow::anyhow!( + "Literal cannot be compared with a timestamp" + ))); + } + + // We do not currently support filtering in the database on + // values, only the `timestamp` and possibly `start_time` (if + // the metric is cumulative). + if ident == "start_time" { + if !schema.datum_type.is_cumulative() { + return Err(Error::from(anyhow::anyhow!( + "Start time can only be compared if the metric \ + is cumulative, but found one of type {}", + schema.datum_type, + ))); + } + if matches!( + inner.value, + oxql::ast::literal::Literal::Timestamp(_) + ) { + return Ok(Some(inner.as_db_safe_string())); + } + return Err(Error::from(anyhow::anyhow!( + "Literal cannot be compared with a timestamp" + ))); + } + + // We'll delegate to the actual table op to filter on any of the + // data columns. + Ok(None) + } + filter::FilterExpr::Compound(inner) => { + let left_pred = Self::rewrite_predicate_for_measurements( + schema, + &inner.left, + )?; + let right_pred = Self::rewrite_predicate_for_measurements( + schema, + &inner.right, + )?; + let out = match (left_pred, right_pred) { + (Some(left), Some(right)) => Some(format!( + "{}({left}, {right})", + inner.op.as_db_function_name() + )), + (Some(single), None) | (None, Some(single)) => Some(single), + (None, None) => None, + }; + Ok(out) + } + } + } + + // Run one query. + // + // If the query is flat, run it directly. If it's nested, run each of them; + // concatenate the results; and then apply all the remaining + // transformations. + #[async_recursion::async_recursion] + async fn run_oxql_query( + &self, + query_log: &Logger, + query_id: Uuid, + query: oxql::Query, + total_rows_fetched: &mut u64, + outer_predicates: Option, + outer_limit: Option, + ) -> Result { + let split = query.split(); + if let oxql::ast::SplitQuery::Nested { subqueries, transformations } = + split + { + trace!( + query_log, + "OxQL query contains subqueries, running recursively" + ); + // Create the new set of outer predicates to pass in to the + // subquery, by merging the previous outer predicates with those of + // the transformation portion of this nested query. + let new_outer_predicates = + query.coalesced_predicates(outer_predicates.clone()); + let new_outer_limit = query.coalesced_limits(outer_limit); + + // Run each subquery recursively, and extend the results + // accordingly. + let mut query_summaries = Vec::with_capacity(subqueries.len()); + let mut tables = Vec::with_capacity(subqueries.len()); + let query_start = Instant::now(); + for subq in subqueries.into_iter() { + let res = self + .run_oxql_query( + query_log, + query_id, + subq, + total_rows_fetched, + new_outer_predicates.clone(), + new_outer_limit, + ) + .await?; + query_summaries.extend(res.query_summaries); + tables.extend(res.tables); + } + for tr in transformations.into_iter() { + trace!( + query_log, + "applying query transformation"; + "transformation" => ?tr, + ); + let id = usdt::UniqueId::new(); + probes::oxql__table__op__start!(|| ( + &id, + &query_id, + format!("{tr:?}") + )); + let new_tables = tr.apply(&tables, query.end_time()); + probes::oxql__table__op__done!(|| (&id, &query_id)); + tables = new_tables?; + } + let result = OxqlResult { + query_id, + total_duration: query_start.elapsed(), + query_summaries, + tables, + }; + return Ok(result); + } + + // This is a flat query, let's just run it directly. First step is + // getting the schema itself. + let query_start = Instant::now(); + let oxql::ast::SplitQuery::Flat(query) = split else { + unreachable!(); + }; + let name = query.timeseries_name(); + let Some(schema) = self.schema_for_timeseries(name).await? else { + return Err(Error::TimeseriesNotFound(name.to_string())); + }; + debug!( + query_log, + "running flat OxQL query"; + "query" => ?query, + "timeseries_name" => %name, + ); + + // Fetch the consistent fields (including keys) for this timeseries, + // including filtering them based on the predicates in the query + // that apply to this timeseries in particular. We also need to merge + // them in with the predicates passed in from a possible outer query. + let preds = query.coalesced_predicates(outer_predicates.clone()); + debug!( + query_log, + "coalesced predicates from flat query"; + "outer_predicates" => ?&outer_predicates, + "coalesced" => ?&preds, + ); + let limit = query.coalesced_limits(outer_limit); + debug!( + query_log, + "coalesced limit operations from flat query"; + "outer_limit" => ?&outer_limit, + "coalesced" => ?&limit, + ); + + // We generally run a few SQL queries for each OxQL query: + // + // - Some number of queries to fetch the timeseries keys that are + // consistent with it. + // - Fetch the consistent samples. + // + // Note that there are often 2 or more queries needed for the first + // case. In particular, there is one query required for each independent + // time range in the query (including when a time range isn't + // specified). + // + // For example, consider the filter operation: + // + // ``` + // filter some_predicate || (timestamp > @now() - 1m && other_predicate) + // ``` + // + // That is, we return all timepoints for things where `some_predicate` + // is true, and only the last minute for those satisfying + // `other_predicate`. If we simply drop the timestamp filter, and run + // the two predicates conjoined, we would erroneously return only the + // last minute for everything, including those satisfying + // `some_predicate`. + // + // So instead, we need to run one query for each of those, fetch the + // keys associated with it, and then independently select the + // measurements satisfying both the time range and key-consistency + // constraints. Thankfully that can be done in one query, albeit a + // complicated one. + // + // Convert any outer predicates to DNF, and split into disjoint key + // groups for the measurement queries. + let disjoint_predicates = if let Some(preds) = preds.as_ref() { + let simplified = preds.simplify_to_dnf()?; + debug!( + query_log, + "simplified filtering predicates to disjunctive normal form"; + "original" => %preds, + "DNF" => %simplified, + ); + simplified + .flatten_disjunctions() + .into_iter() + .map(Option::Some) + .collect() + } else { + // There are no outer predicates, so we have 1 disjoint key group, + // with no predicates. + vec![None] + }; + + // Run each query group indepdendently, keeping the predicates and the + // timeseries keys corresponding to it. + let mut consistent_key_groups = + Vec::with_capacity(1 + disjoint_predicates.len()); + let mut query_summaries = + Vec::with_capacity(1 + disjoint_predicates.len()); + for predicates in disjoint_predicates.into_iter() { + debug!( + query_log, + "running disjoint query predicate"; + "predicate" => predicates.as_ref().map(|s| s.to_string()).unwrap_or("none".into()), + ); + let all_fields_query = + self.all_fields_query(&schema, predicates.as_ref())?; + let (summary, consistent_keys) = self + .select_matching_timeseries_info(&all_fields_query, &schema) + .await?; + debug!( + query_log, + "fetched information for matching timeseries keys"; + "n_keys" => consistent_keys.len(), + ); + query_summaries.push(summary); + + // If there are no consistent keys, move to the next independent + // query chunk. + if consistent_keys.is_empty() { + continue; + } + + // Push the disjoint filter itself, plus the keys consistent with + // it. + consistent_key_groups + .push(ConsistentKeyGroup { predicates, consistent_keys }); + } + + // If there are no consistent keys _at all_, we can just return an empty + // table. + if consistent_key_groups.is_empty() { + let result = OxqlResult { + query_id, + total_duration: query_start.elapsed(), + query_summaries, + tables: vec![oxql::Table::new(schema.timeseries_name.as_str())], + }; + return Ok(result); + } + + // Fetch the consistent measurements for this timeseries, by key group. + // + // We'll keep track of all the measurements for this timeseries schema, + // organized by timeseries key. That's because we fetch all consistent + // samples at once, so we get many concrete _timeseries_ in the returned + // response, even though they're all from the same schema. + let (summaries, timeseries_by_key) = self + .select_matching_samples( + query_log, + &schema, + &consistent_key_groups, + limit, + total_rows_fetched, + ) + .await?; + query_summaries.extend(summaries); + + // At this point, let's construct a set of tables and run the results + // through the transformation pipeline. + let mut tables = vec![oxql::Table::from_timeseries( + schema.timeseries_name.as_str(), + timeseries_by_key.into_values(), + )?]; + + let transformations = query.transformations(); + debug!( + query_log, + "constructed OxQL table, starting transformation pipeline"; + "name" => tables[0].name(), + "n_timeseries" => tables[0].n_timeseries(), + "n_transformations" => transformations.len(), + ); + for tr in transformations { + trace!( + query_log, + "applying query transformation"; + "transformation" => ?tr, + ); + let id = usdt::UniqueId::new(); + probes::oxql__table__op__start!(|| ( + &id, + &query_id, + format!("{tr:?}") + )); + let new_tables = tr.apply(&tables, query.end_time()); + probes::oxql__table__op__done!(|| (&id, &query_id)); + tables = new_tables?; + } + let result = OxqlResult { + query_id, + total_duration: query_start.elapsed(), + query_summaries, + tables, + }; + Ok(result) + } + + // Select samples matching the set of predicates and consistent keys. + // + // Note that this also implements the conversion from cumulative to gauge + // samples, depending on how data was requested. + async fn select_matching_samples( + &self, + query_log: &Logger, + schema: &TimeseriesSchema, + consistent_key_groups: &[ConsistentKeyGroup], + limit: Option, + total_rows_fetched: &mut u64, + ) -> Result< + (Vec, BTreeMap), + Error, + > { + // We'll create timeseries for each key on the fly. To enable computing + // deltas, we need to track the last measurement we've seen as well. + let mut measurements_by_key: BTreeMap<_, Vec<_>> = BTreeMap::new(); + + // If the set of consistent keys is quite large, we may run into + // ClickHouse's SQL query size limit, which is 256KiB by default. + // See https://clickhouse.com/docs/en/operations/settings/settings#max_query_size + // for that limit. + // + // To avoid this, we have to split large groups of keys into pages, and + // concatenate the results ourself. + let mut n_measurements: u64 = 0; + let mut summaries = Vec::new(); + for key_group_chunk in + chunk_consistent_key_groups(consistent_key_groups) + { + let measurements_query = self.measurements_query( + schema, + &key_group_chunk, + limit, + total_rows_fetched, + )?; + let (summary, body) = + self.execute_with_body(&measurements_query).await?; + summaries.push(summary); + for line in body.lines() { + let (key, measurement) = + model::parse_measurement_from_row(line, schema.datum_type); + measurements_by_key.entry(key).or_default().push(measurement); + n_measurements += 1; + } + } + debug!( + query_log, + "fetched measurements for OxQL query"; + "n_keys" => measurements_by_key.len(), + "n_measurements" => n_measurements, + ); + + // At this point, we need to check that we're still within our maximum + // result size. The measurement query we issued limited the returned + // result to 1 more than the remainder on our allotment. So if we get + // exactly that limit, we know that there are more rows than we can + // allow. We don't know how many more, but we don't care, and we fail + // the query regardless. + update_total_rows_and_check( + query_log, + total_rows_fetched, + n_measurements, + )?; + + // At this point, we no longer care about the consistent_key groups. We + // throw away the predicates that distinguished them, and merge the + // timeseries information together. + let info = consistent_key_groups + .iter() + .map(|group| group.consistent_keys.clone()) + .reduce(|mut acc, current| { + acc.extend(current); + acc + }) + .expect("Should have at least one key-group for every query"); + + // Remove the last measurement, returning just the keys and timeseries. + let mut out = BTreeMap::new(); + for (key, measurements) in measurements_by_key.into_iter() { + // Constuct a new timeseries, from the target/metric info. + let (target, metric) = info.get(&key).unwrap(); + let mut timeseries = oxql::Timeseries::new( + target + .fields + .iter() + .chain(metric.fields.iter()) + .map(|field| (field.name.clone(), field.value.clone())), + oxql::point::DataType::try_from(schema.datum_type)?, + if schema.datum_type.is_cumulative() { + oxql::point::MetricType::Delta + } else { + oxql::point::MetricType::Gauge + }, + )?; + + // Covert its oximeter measurements into OxQL data types. + let points = if schema.datum_type.is_cumulative() { + oxql::point::Points::delta_from_cumulative(&measurements)? + } else { + oxql::point::Points::gauge_from_gauge(&measurements)? + }; + timeseries.points = points; + debug!( + query_log, + "inserted new OxQL timeseries"; + "key" => key, + "metric_type" => ?timeseries.points.metric_type(), + "n_points" => timeseries.points.len(), + ); + out.insert(key, timeseries); + } + Ok((summaries, out)) + } + + fn measurements_query( + &self, + schema: &TimeseriesSchema, + consistent_key_groups: &[ConsistentKeyGroup], + limit: Option, + total_rows_fetched: &mut u64, + ) -> Result { + use std::fmt::Write; + + // Build the base query, which just selects the timeseries by name based + // on the datum type. + let mut query = self.measurements_query_raw(schema.datum_type); + query.push_str(" WHERE timeseries_name = '"); + write!(query, "{}", schema.timeseries_name).unwrap(); + query.push('\''); + + // Filter down the fields to those which apply to the data itself, which + // includes the timestamps and data values. The supported fields here + // depend on the datum type. + // + // We join all the consistent key groups with OR, which mirrors how they + // were split originally. + let all_predicates = consistent_key_groups + .iter() + .map(|group| { + // Write out the predicates on the measurements themselves, + // which really refers to the timestamps (and possibly start + // times). + let maybe_predicates = group + .predicates + .as_ref() + .map(|preds| { + Self::rewrite_predicate_for_measurements(schema, preds) + }) + .transpose()? + .flatten(); + + // Push the predicate that selects the timeseries keys, which + // are unique to this group. + let maybe_key_set = if !group.consistent_keys.is_empty() { + let mut chunk = String::from("timeseries_key IN ("); + let keys = group + .consistent_keys + .keys() + .map(ToString::to_string) + .collect::>() + .join(","); + chunk.push_str(&keys); + chunk.push(')'); + Some(chunk) + } else { + None + }; + + let chunk = match (maybe_predicates, maybe_key_set) { + (Some(preds), None) => preds, + (None, Some(key_set)) => key_set, + (Some(preds), Some(key_set)) => { + format!("({preds} AND {key_set})") + } + (None, None) => String::new(), + }; + Ok(chunk) + }) + .collect::, Error>>()? + .join(" OR "); + if !all_predicates.is_empty() { + query.push_str(" AND ("); + query.push_str(&all_predicates); + query.push(')'); + } + + // Always impose a strong order on these fields. + // + // The tables are all sorted by: + // + // - timeseries_name + // - timeseries_key + // - start_time, if present + // - timestamp + // + // We care most about the timestamp ordering, since that is assumed (and + // asserted) by downstream table operations. + // + // Note that although the tables are sorted by start_time, we _omit_ + // that if the query includes a limiting operation, like `first k`. This + // is an unfortunate interaction between the `LIMIT BY` clause that + // implements this in ClickHouse and the fact that the start times for + // some metrics are not monotonic. In particular, those metrics + // collected before a sled syncs with upstream NTP servers may have + // wildly inaccurate start times. Using the `LIMIT BY` clause in + // ClickHouse along with this sort order means we may end up taking the + // latest samples from a block of metrics with an early start time, even + // if there is a sample with a globally later, and accurate, timestamp, + // but with a start_time _after_ that previous block. + query.push_str(" ORDER BY timeseries_key"); + if schema.datum_type.is_cumulative() && limit.is_none() { + query.push_str(", start_time"); + } + query.push_str(", timestamp"); + + // If provided, push a `LIMIT BY` clause, which implements the `first` + // or `last` table operations directly in ClickHouse. + // + // This clause limits the number of rows _within each group_, which here + // is always the `timeseries_key`. Note that the clause is completely + // independent of the the traditional SQL `LIMIT` clause, pushed below + // to avoid selecting too many rows at once. + if let Some(limit) = limit { + // If this limit takes the _last_ samples, we need to invert the + // sorting by timestamp to be descending. + let is_last = matches!(limit.kind, LimitKind::Last); + if is_last { + query.push_str(" DESC"); + } + + // In either case, add the limit-by clause itself. + query.push_str(" LIMIT "); + write!(query, "{}", limit.count).unwrap(); + query.push_str(" BY timeseries_key"); + + // Possibly invert the timestamp ordering again. + // + // To implement a `last k` operation, above we sort by descending + // timestamps and use the `LIMIT k BY timeseries_key` clause. + // However, this inverts the ordering by timestamp that we need for + // all downstream operations to work correctly. + // + // Restore that ordering here, by putting the now-complete query + // inside a CTE and selecting from that ordered by timestamp. Note + if is_last { + query = format!( + "WITH another_sort_bites_the_dust \ + AS ({query}) \ + SELECT * FROM another_sort_bites_the_dust \ + ORDER BY timeseries_key, timestamp" + ); + } + } + + // Push a limit clause, which restricts the number of records we could + // return. + // + // This is used to ensure that we never go above the limit in + // `MAX_RESULT_SIZE`. That restricts the _total_ number of rows we want + // to retch from the database. So we set our limit to be one more than + // the remainder on our allotment. If we get exactly as many as we set + // in the limit, then we fail the query because there are more rows that + // _would_ be returned. We don't know how many more, but there is at + // least 1 that pushes us over the limit. This prevents tricky + // TOCTOU-like bugs where we need to check the limit twice, and improves + // performance, since we don't return much more than we could possibly + // handle. + let remainder = MAX_DATABASE_ROWS - *total_rows_fetched; + query.push_str(" LIMIT "); + write!(query, "{}", remainder + 1).unwrap(); + + // Finally, use JSON format. + query.push_str(" FORMAT "); + query.push_str(crate::DATABASE_SELECT_FORMAT); + Ok(query) + } + + fn measurements_query_raw( + &self, + datum_type: oximeter::DatumType, + ) -> String { + let value_columns = if datum_type.is_histogram() { + "timeseries_key, start_time, timestamp, bins, counts" + } else if datum_type.is_cumulative() { + "timeseries_key, start_time, timestamp, datum" + } else { + "timeseries_key, timestamp, datum" + }; + format!( + "SELECT {} \ + FROM {}.{}", + value_columns, + crate::DATABASE_NAME, + crate::query::measurement_table_name(datum_type), + ) + } + + fn all_fields_query( + &self, + schema: &TimeseriesSchema, + preds: Option<&oxql::ast::table_ops::filter::Filter>, + ) -> Result { + // Filter down the fields to those which apply to this timeseries + // itself, and rewrite as a DB-safe WHERE clause. + let preds_for_fields = preds + .map(|p| Self::rewrite_predicate_for_fields(schema, p)) + .transpose()? + .flatten(); + let (already_has_where, mut query) = self.all_fields_query_raw(schema); + if let Some(preds) = preds_for_fields { + // If the raw field has only a single select query, then we've + // already added a "WHERE" clause. Simply tack these predicates onto + // that one. + if already_has_where { + query.push_str(" AND "); + } else { + query.push_str(" WHERE "); + } + query.push_str(&preds); + } + query.push_str(" FORMAT "); + query.push_str(crate::DATABASE_SELECT_FORMAT); + Ok(query) + } + + fn all_fields_query_raw( + &self, + schema: &TimeseriesSchema, + ) -> (bool, String) { + match schema.field_schema.len() { + 0 => unreachable!(), + 1 => { + let field_schema = schema.field_schema.first().unwrap(); + ( + true, + format!( + "SELECT DISTINCT timeseries_key, field_value AS {field_name} \ + FROM {db_name}.{field_table} \ + WHERE \ + timeseries_name = '{timeseries_name}' AND \ + field_name = '{field_name}'", + field_name = field_schema.name, + db_name = crate::DATABASE_NAME, + field_table = field_table_name(field_schema.field_type), + timeseries_name = schema.timeseries_name, + ) + ) + } + _ => { + let mut top_level_columns = + Vec::with_capacity(schema.field_schema.len()); + let mut field_subqueries = + Vec::with_capacity(schema.field_schema.len()); + + // Select each field value, aliasing it to its field name. + for field_schema in schema.field_schema.iter() { + top_level_columns.push(format!( + "filter_on_{}.field_value AS {}", + field_schema.name, field_schema.name + )); + field_subqueries.push(( + format!( + "SELECT DISTINCT timeseries_key, field_value \ + FROM {db_name}.{field_table} \ + WHERE \ + timeseries_name = '{timeseries_name}' AND \ + field_name = '{field_name}' \ + ", + db_name = crate::DATABASE_NAME, + field_table = + field_table_name(field_schema.field_type), + timeseries_name = schema.timeseries_name, + field_name = field_schema.name, + ), + format!("filter_on_{}", field_schema.name), + )); + } + + // Write the top-level select statement, starting by selecting + // the timeseries key from the first field schema. + let mut out = format!( + "SELECT {}.timeseries_key AS timeseries_key, {} FROM ", + field_subqueries[0].1, + top_level_columns.join(", "), + ); + + // Then add all the subqueries selecting each field. + // + // We need to add these, along with their aliases. The first + // such subquery has no join conditions, but the later ones all + // refer to the previous via: + // + // `ON .timeseries_key = .timeseries_key` + for (i, (subq, alias)) in field_subqueries.iter().enumerate() { + // Push the subquery itself, aliased. + out.push('('); + out.push_str(subq); + out.push_str(") AS "); + out.push_str(alias); + + // Push the join conditions. + if i > 0 { + let previous_alias = &field_subqueries[i - 1].1; + out.push_str(" ON "); + out.push_str(alias); + out.push_str(".timeseries_key = "); + out.push_str(previous_alias); + out.push_str(".timeseries_key"); + } + + // Push the "INNER JOIN" expression itself, for all but the + // last subquery. + if i < field_subqueries.len() - 1 { + out.push_str(" INNER JOIN "); + } + } + (false, out) + } + } + } +} + +// Split the list of consistent key groups, ensuring none exceeds ClickHouse's +// query limit. +// +// The set of consistent keys for an OxQL query can be quite large. When stuffed +// into a giant list of keys and used in a SQL query like so: +// +// ``` +// timeseries_key IN (list, of, many, keys) +// ``` +// +// this can hit ClickHouse's SQL query size limit (defaulting to 256KiB, see +// https://clickhouse.com/docs/en/operations/settings/settings#max_query_size). +// +// This function chunks the list of consistent keys, ensuring that each group is +// small enough to fit within that query limit. +// +// Note that this unfortunately needs to chunk and reallocate the groups, +// because it may entail splitting each key group. That requires a copy of the +// internal map, to split it at a particular size. +fn chunk_consistent_key_groups( + consistent_key_groups: &[ConsistentKeyGroup], +) -> Vec> { + // The max number of keys allowed in each measurement query. + // + // Keys are u64s, so their max is 18446744073709551615, which has 20 base-10 + // digits. We also separate the keys by a `,`, so let's call it 21 digits. + // + // ClickHouse's max query size is 256KiB, but we allow for 6KiB of overhead + // for the other parts of the query (select, spaces, column names, etc). + // That's very conservative. + const MAX_QUERY_SIZE_FOR_KEYS: usize = 250 * 1024; + const DIGITS_PER_KEY: usize = 21; + const MAX_KEYS_PER_MEASUREMENT_QUERY: usize = + MAX_QUERY_SIZE_FOR_KEYS / DIGITS_PER_KEY; + chunk_consistent_key_groups_impl( + consistent_key_groups, + MAX_KEYS_PER_MEASUREMENT_QUERY, + ) +} + +fn chunk_consistent_key_groups_impl( + consistent_key_groups: &[ConsistentKeyGroup], + chunk_size: usize, +) -> Vec> { + // Create the output vec-of-vec of key groups. We'll always push to the last + // one, so grab a reference to it. + let mut out = vec![vec![]]; + let mut current_chunk = out.last_mut().unwrap(); + let mut room = chunk_size; + 'group: for next_group in consistent_key_groups.iter().cloned() { + // If we have room for it in this chunk, push it onto the current chunk, + // and then continue to the next group. + let group_size = next_group.consistent_keys.len(); + if room >= group_size { + current_chunk.push(next_group); + room -= group_size; + continue; + } + + // If we don't have enough room for this entire group, then we need to + // split it up and push whatever we can. It's actually possible that the + // next group needs to be split multiple times. So we'll do that until + // it's empty, possibly adding new chunks to the output array. + // + // It's tricky to iterate over a map by the index / count, and since + // we're operating on a clone anyway, convert this to a vec. + let predicates = next_group.predicates; + let mut group_keys: Vec<_> = + next_group.consistent_keys.into_iter().collect(); + while !group_keys.is_empty() { + // On a previous pass through this loop, we may have exhausted all + // the remaining room. As we have re-entered it, we still have items + // in this current group of keys. So "close" the last chunk and push + // a new one, onto which we'll start adding the remaining items. + if room == 0 { + out.push(vec![]); + current_chunk = out.last_mut().unwrap(); + room = chunk_size; + } + + // Fetch up to the remaining set of keys. + let ix = room.min(group_keys.len()); + let consistent_keys: BTreeMap<_, _> = + group_keys.drain(..ix).collect(); + + // There are no more keys in this group, we need to continue to the + // next one. + if consistent_keys.is_empty() { + continue 'group; + } + + // We need to update the amount of room we have left, to be sure we + // don't push this whole group if the chunk boundary falls in the + // middle of it. + room -= consistent_keys.len(); + + // Push this set of keys onto the current chunk. + let this_group_chunk = ConsistentKeyGroup { + predicates: predicates.clone(), + consistent_keys, + }; + current_chunk.push(this_group_chunk); + } + } + out +} + +// Helper to update the number of total rows fetched so far, and check it's +// still under the limit. +fn update_total_rows_and_check( + query_log: &Logger, + total_rows_fetched: &mut u64, + count: u64, +) -> Result<(), Error> { + *total_rows_fetched += count; + if *total_rows_fetched > MAX_DATABASE_ROWS { + return Err(Error::from(anyhow::anyhow!( + "Query requires fetching more than the \ + current limit of {} data points from the \ + timeseries database", + MAX_DATABASE_ROWS, + ))); + } + trace!( + query_log, + "verified OxQL measurement query returns few enough results"; + "n_new_measurements" => count, + "n_total" => *total_rows_fetched, + "limit" => MAX_DATABASE_ROWS, + ); + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::ConsistentKeyGroup; + use crate::client::oxql::chunk_consistent_key_groups_impl; + use crate::{ + oxql::{point::Points, Table, Timeseries}, + Client, DbWrite, + }; + use crate::{Metric, Target}; + use chrono::{DateTime, Utc}; + use dropshot::test_util::LogContext; + use omicron_test_utils::dev::clickhouse::ClickHouseInstance; + use omicron_test_utils::dev::test_setup_log; + use oximeter::{types::Cumulative, FieldValue}; + use oximeter::{DatumType, Sample}; + use std::collections::BTreeMap; + use std::time::Duration; + + #[derive( + Clone, Debug, Eq, PartialEq, PartialOrd, Ord, oximeter::Target, + )] + struct SomeTarget { + name: String, + index: u32, + } + + #[derive(Clone, Debug, oximeter::Metric)] + struct SomeMetric { + foo: i32, + datum: Cumulative, + } + + #[derive(Clone, Debug)] + #[allow(dead_code)] + struct TestData { + targets: Vec, + // Note that we really want all the samples per metric _field_, not the + // full metric. That would give us a 1-element sample array for each. + samples_by_timeseries: BTreeMap<(SomeTarget, i32), Vec>, + first_timestamp: DateTime, + } + + struct TestContext { + logctx: LogContext, + clickhouse: ClickHouseInstance, + client: Client, + test_data: TestData, + } + + impl TestContext { + async fn cleanup_successful(mut self) { + self.clickhouse + .cleanup() + .await + .expect("Failed to cleanup ClickHouse server"); + self.logctx.cleanup_successful(); + } + } + + const N_SAMPLES_PER_TIMESERIES: usize = 16; + const SAMPLE_INTERVAL: Duration = Duration::from_secs(1); + const SHIFT: Duration = Duration::from_secs(1); + + fn format_timestamp(t: DateTime) -> String { + format!("{}", t.format("%Y-%m-%dT%H:%M:%S.%f")) + } + + fn generate_test_samples() -> TestData { + // We'll test with 4 different targets, each with two values for its + // fields. + let mut targets = Vec::with_capacity(4); + let names = &["first-target", "second-target"]; + let indices = 1..3; + for (name, index) in itertools::iproduct!(names, indices) { + let target = SomeTarget { name: name.to_string(), index }; + targets.push(target); + } + + // Create a start time for all samples. + // + // IMPORTANT: There is a TTL of 30 days on all data currently. I would + // love this to be a fixed, well-known start time, to make tests easier, + // but that's in conflict with the TTL. Instead, we'll use midnight on + // the current day, and then store it in the test data context. + let first_timestamp = + Utc::now().date_naive().and_hms_opt(0, 0, 0).unwrap().and_utc(); + + // For simplicity, we'll also assume all the cumulative measurements + // start at the first timestamp as well. + let datum = Cumulative::with_start_time(first_timestamp, 0); + + // We'll create two separate metrics, with 16 samples each. + let foos = [-1, 1]; + let mut samples_by_timeseries = BTreeMap::new(); + let mut timeseries_index = 0; + for target in targets.iter() { + for foo in foos.iter() { + // Shift this timeseries relative to the others, to ensure we + // have some different timestamps. + let timeseries_start = + first_timestamp + timeseries_index * SHIFT; + + // Create the first metric, starting from a count of 0. + let mut metric = SomeMetric { foo: *foo, datum }; + + // Create all the samples,, incrementing the datum and sample + // time. + for i in 0..N_SAMPLES_PER_TIMESERIES { + let sample_time = + timeseries_start + SAMPLE_INTERVAL * i as u32; + let sample = Sample::new_with_timestamp( + sample_time, + target, + &metric, + ) + .unwrap(); + samples_by_timeseries + .entry((target.clone(), *foo)) + .or_insert_with(|| { + Vec::with_capacity(N_SAMPLES_PER_TIMESERIES) + }) + .push(sample); + metric.datum += 1; + } + timeseries_index += 1; + } + } + TestData { targets, samples_by_timeseries, first_timestamp } + } + + async fn setup_oxql_test(name: &str) -> TestContext { + let logctx = test_setup_log(name); + let db = ClickHouseInstance::new_single_node(&logctx, 0) + .await + .expect("Failed to start ClickHouse"); + let client = Client::new(db.address, &logctx.log); + client + .init_single_node_db() + .await + .expect("Failed to init single-node oximeter database"); + let test_data = generate_test_samples(); + let samples: Vec<_> = test_data + .samples_by_timeseries + .values() + .flatten() + .cloned() + .collect(); + client + .insert_samples(&samples) + .await + .expect("Failed to insert test data"); + TestContext { logctx, clickhouse: db, client, test_data } + } + + #[tokio::test] + async fn test_get_entire_table() { + let ctx = setup_oxql_test("test_get_entire_table").await; + let query = "get some_target:some_metric"; + let result = ctx + .client + .oxql_query(query) + .await + .expect("failed to run OxQL query"); + assert_eq!(result.tables.len(), 1, "Should be exactly 1 table"); + let table = result.tables.get(0).unwrap(); + assert_eq!( + table.n_timeseries(), + ctx.test_data.samples_by_timeseries.len(), + "Should have fetched every timeseries" + ); + assert!( + table.iter().all(|t| t.points.len() == N_SAMPLES_PER_TIMESERIES), + "Should have fetched all points for all timeseries" + ); + + // Let's build the expected point array, from each timeseries we + // inserted. + let mut matched_timeseries = 0; + for ((target, foo), samples) in + ctx.test_data.samples_by_timeseries.iter() + { + let measurements: Vec<_> = + samples.iter().map(|s| s.measurement.clone()).collect(); + let expected_points = Points::delta_from_cumulative(&measurements) + .expect( + "failed to create expected points from inserted measurements", + ); + let expected_timeseries = + find_timeseries_in_table(&table, target, foo) + .expect("Table did not contain an expected timeseries"); + assert_eq!( + expected_timeseries.points, expected_points, + "Did not reconstruct the correct points for this timeseries" + ); + matched_timeseries += 1; + } + assert_eq!(matched_timeseries, table.len()); + assert_eq!( + matched_timeseries, + ctx.test_data.samples_by_timeseries.len() + ); + + ctx.cleanup_successful().await; + } + + #[tokio::test] + async fn test_get_one_timeseries() { + let ctx = setup_oxql_test("test_get_one_timeseries").await; + + // Specify exactly one timeseries we _want_ to fetch, by picking the + // first timeseries we inserted. + let ((expected_target, expected_foo), expected_samples) = + ctx.test_data.samples_by_timeseries.first_key_value().unwrap(); + let query = format!( + "get some_target:some_metric | filter {}", + exact_filter_for(expected_target, *expected_foo) + ); + let result = ctx + .client + .oxql_query(&query) + .await + .expect("failed to run OxQL query"); + assert_eq!(result.tables.len(), 1, "Should be exactly 1 table"); + let table = result.tables.get(0).unwrap(); + assert_eq!( + table.n_timeseries(), + 1, + "Should have fetched exactly the target timeseries" + ); + assert!( + table.iter().all(|t| t.points.len() == N_SAMPLES_PER_TIMESERIES), + "Should have fetched all points for all timeseries" + ); + + let expected_timeseries = + find_timeseries_in_table(&table, expected_target, expected_foo) + .expect("Table did not contain expected timeseries"); + let measurements: Vec<_> = + expected_samples.iter().map(|s| s.measurement.clone()).collect(); + let expected_points = Points::delta_from_cumulative(&measurements) + .expect("failed to build expected points from measurements"); + assert_eq!( + expected_points, expected_timeseries.points, + "Did not reconstruct the correct points for the one \ + timeseries the query fetched" + ); + + ctx.cleanup_successful().await; + } + + // In this test, we'll fetch the entire history of one timeseries, and only + // the last few samples of another. + // + // This checks that we correctly do complex logical operations that require + // fetching different sets of fields at different times. + #[tokio::test] + async fn test_get_entire_timeseries_and_part_of_another() { + usdt::register_probes().unwrap(); + let ctx = + setup_oxql_test("test_get_entire_timeseries_and_part_of_another") + .await; + + let mut it = ctx.test_data.samples_by_timeseries.iter(); + let (entire, only_part) = (it.next().unwrap(), it.next().unwrap()); + + let entire_filter = exact_filter_for(&entire.0 .0, entire.0 .1); + let only_part_filter = + exact_filter_for(&only_part.0 .0, only_part.0 .1); + let start_timestamp = only_part.1[6].measurement.timestamp(); + let only_part_timestamp_filter = format_timestamp(start_timestamp); + + let query = format!( + "get some_target:some_metric | filter ({}) || (timestamp >= @{} && {})", + entire_filter, + only_part_timestamp_filter, + only_part_filter, + ); + let result = ctx + .client + .oxql_query(&query) + .await + .expect("failed to run OxQL query"); + assert_eq!(result.tables.len(), 1, "Should be exactly 1 table"); + let table = result.tables.get(0).unwrap(); + assert_eq!( + table.n_timeseries(), + 2, + "Should have fetched exactly the two target timeseries" + ); + + // Check that we fetched the entire timeseries for the first one. + let expected_timeseries = + find_timeseries_in_table(table, &entire.0 .0, &entire.0 .1) + .expect("failed to fetch all of the first timeseries"); + let measurements: Vec<_> = + entire.1.iter().map(|s| s.measurement.clone()).collect(); + let expected_points = Points::delta_from_cumulative(&measurements) + .expect("failed to build expected points"); + assert_eq!( + expected_timeseries.points, expected_points, + "Did not collect the entire set of points for the first timeseries", + ); + + // And that we only get the last portion of the second timeseries. + let expected_timeseries = + find_timeseries_in_table(table, &only_part.0 .0, &only_part.0 .1) + .expect("failed to fetch part of the second timeseries"); + let measurements: Vec<_> = only_part + .1 + .iter() + .filter_map(|sample| { + let meas = &sample.measurement; + if meas.timestamp() >= start_timestamp { + Some(meas.clone()) + } else { + None + } + }) + .collect(); + let expected_points = Points::delta_from_cumulative(&measurements) + .expect("failed to build expected points"); + assert_eq!( + expected_timeseries.points, expected_points, + "Did not collect the last few points for the second timeseries", + ); + + ctx.cleanup_successful().await; + } + + // Return an OxQL filter item that will exactly select the provided + // timeseries by its target / metric. + fn exact_filter_for(target: &SomeTarget, foo: i32) -> String { + format!( + "name == '{}' && index == {} && foo == {}", + target.name, target.index, foo, + ) + } + + // Given a table from an OxQL query, look up the timeseries for the inserted + // target / metric, if it exists + fn find_timeseries_in_table<'a>( + table: &'a Table, + target: &'a SomeTarget, + foo: &'a i32, + ) -> Option<&'a Timeseries> { + for timeseries in table.iter() { + let fields = ×eries.fields; + + // Look up each field in turn, and compare it. + let FieldValue::String(val) = fields.get("name")? else { + unreachable!(); + }; + if val != &target.name { + continue; + } + let FieldValue::U32(val) = fields.get("index")? else { + unreachable!(); + }; + if val != &target.index { + continue; + } + let FieldValue::I32(val) = fields.get("foo")? else { + unreachable!(); + }; + if val != foo { + continue; + } + + // We done matched it. + return Some(timeseries); + } + None + } + + fn make_consistent_key_group(size: u64) -> ConsistentKeyGroup { + let consistent_keys = (0..size) + .map(|key| { + let target = Target { name: "foo".to_string(), fields: vec![] }; + let metric = Metric { + name: "bar".to_string(), + fields: vec![], + datum_type: DatumType::U8, + }; + (key, (target, metric)) + }) + .collect(); + ConsistentKeyGroup { predicates: None, consistent_keys } + } + + #[test] + fn test_chunk_consistent_key_groups_all_in_one_chunk() { + // Create two key groups, each with 5 keys. + // + // With a chunk size of 12, these should all be in the same chunk, so + // we're really just cloning the inputs. They do go into an outer vec + // though, because we can have multiple chunks in theory. + let keys = + vec![make_consistent_key_group(5), make_consistent_key_group(5)]; + let chunks = chunk_consistent_key_groups_impl(&keys, 12); + assert_eq!( + chunks.len(), + 1, + "All key groups should fit into one chunk when their \ + total size is less than the chunk size" + ); + assert_eq!( + keys, chunks[0], + "All key groups should fit into one chunk when their \ + total size is less than the chunk size" + ); + } + + #[test] + fn test_chunk_consistent_key_groups_split_middle_of_key_group() { + // Create one key group, with 10 keys. + // + // With a chunk size of 5, this should be split in half across two + // chunks. + let keys = vec![make_consistent_key_group(10)]; + let chunks = chunk_consistent_key_groups_impl(&keys, 5); + assert_eq!( + chunks.len(), + 2, + "Consistent key group should be split into two chunks", + ); + + let first = keys[0] + .consistent_keys + .range(..5) + .map(|(k, v)| (*k, v.clone())) + .collect(); + assert_eq!( + chunks[0][0].consistent_keys, first, + "The first chunk of the consistent keys should be \ + the first half of the input keys" + ); + + let second = keys[0] + .consistent_keys + .range(5..) + .map(|(k, v)| (*k, v.clone())) + .collect(); + assert_eq!( + chunks[1][0].consistent_keys, second, + "The second chunk of the consistent keys should be \ + the second half of the input keys" + ); + } + + #[test] + fn test_chunk_consistent_key_groups_split_key_group_multiple_times() { + // Create one key group, with 10 keys. + // + // With a chunk size of 4, this should be split 3 times, with the first + // two having 4 items and the last the remaining 2. + let keys = vec![make_consistent_key_group(10)]; + let chunks = chunk_consistent_key_groups_impl(&keys, 4); + assert_eq!( + chunks.len(), + 3, + "Consistent key group should be split into three chunks", + ); + + let first = keys[0] + .consistent_keys + .range(..4) + .map(|(k, v)| (*k, v.clone())) + .collect(); + assert_eq!( + chunks[0][0].consistent_keys, first, + "The first chunk of the consistent keys should be \ + the first 4 input keys" + ); + + let second = keys[0] + .consistent_keys + .range(4..8) + .map(|(k, v)| (*k, v.clone())) + .collect(); + assert_eq!( + chunks[1][0].consistent_keys, second, + "The second chunk of the consistent keys should be \ + the next 4 input keys", + ); + + let third = keys[0] + .consistent_keys + .range(8..) + .map(|(k, v)| (*k, v.clone())) + .collect(); + assert_eq!( + chunks[2][0].consistent_keys, third, + "The second chunk of the consistent keys should be \ + the remaining 2 input keys", + ); + } + + #[tokio::test] + async fn test_limit_operations() { + let ctx = setup_oxql_test("test_limit_operations").await; + + // Specify exactly one timeseries we _want_ to fetch, by picking the + // first timeseries we inserted. + let ((expected_target, expected_foo), expected_samples) = + ctx.test_data.samples_by_timeseries.first_key_value().unwrap(); + let query = format!( + "get some_target:some_metric | filter {} | first 1", + exact_filter_for(expected_target, *expected_foo) + ); + let result = ctx + .client + .oxql_query(&query) + .await + .expect("failed to run OxQL query"); + assert_eq!(result.tables.len(), 1, "Should be exactly 1 table"); + let table = result.tables.get(0).unwrap(); + assert_eq!( + table.n_timeseries(), + 1, + "Should have fetched exactly the target timeseries" + ); + assert!( + table.iter().all(|t| t.points.len() == 1), + "Should have fetched exactly 1 point for this timeseries", + ); + + let expected_timeseries = + find_timeseries_in_table(&table, expected_target, expected_foo) + .expect("Table did not contain expected timeseries"); + let measurements: Vec<_> = expected_samples + .iter() + .take(1) + .map(|s| s.measurement.clone()) + .collect(); + let expected_points = Points::delta_from_cumulative(&measurements) + .expect("failed to build expected points from measurements"); + assert_eq!( + expected_points, expected_timeseries.points, + "Did not reconstruct the correct points for the one \ + timeseries the query fetched" + ); + + ctx.cleanup_successful().await; + } +} diff --git a/oximeter/db/src/client/query_summary.rs b/oximeter/db/src/client/query_summary.rs new file mode 100644 index 0000000000..b00a11c38e --- /dev/null +++ b/oximeter/db/src/client/query_summary.rs @@ -0,0 +1,123 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Types representing summaries of queries against the timeseries database. + +// Copyright 2024 Oxide Computer Company + +use crate::Error; +use reqwest::header::HeaderMap; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +use std::time::Duration; +use uuid::Uuid; + +/// A count of bytes / rows accessed during a query. +#[derive(Clone, Copy, Debug, Deserialize, JsonSchema, Serialize)] +pub struct IoCount { + /// The number of bytes accessed. + pub bytes: u64, + /// The number of rows accessed. + pub rows: u64, +} + +impl std::fmt::Display for IoCount { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{} rows ({} bytes)", self.rows, self.bytes) + } +} + +/// Summary of the I/O resources used by a query. +#[derive(Clone, Copy, Debug, Deserialize, JsonSchema, Serialize)] +#[serde(try_from = "serde_json::Value")] +pub struct IoSummary { + /// The bytes and rows read by the query. + pub read: IoCount, + /// The bytes and rows written by the query. + pub written: IoCount, +} + +impl TryFrom for IoSummary { + type Error = Error; + + fn try_from(j: serde_json::Value) -> Result { + use serde_json::Map; + use serde_json::Value; + use std::str::FromStr; + + let Value::Object(map) = j else { + return Err(Error::Database(String::from( + "Expected a JSON object for a metadata summary", + ))); + }; + + fn unpack_summary_value( + map: &Map, + key: &str, + ) -> Result + where + T: FromStr, + ::Err: std::error::Error, + { + let value = map.get(key).ok_or_else(|| { + Error::MissingHeaderKey { key: key.to_string() } + })?; + let Value::String(v) = value else { + return Err(Error::BadMetadata { + key: key.to_string(), + msg: String::from("Expected a string value"), + }); + }; + v.parse::().map_err(|e| Error::BadMetadata { + key: key.to_string(), + msg: e.to_string(), + }) + } + let rows_read: u64 = unpack_summary_value(&map, "read_rows")?; + let bytes_read: u64 = unpack_summary_value(&map, "read_bytes")?; + let rows_written: u64 = unpack_summary_value(&map, "written_rows")?; + let bytes_written: u64 = unpack_summary_value(&map, "written_bytes")?; + Ok(Self { + read: IoCount { bytes: bytes_read, rows: rows_read }, + written: IoCount { bytes: bytes_written, rows: rows_written }, + }) + } +} + +/// Basic metadata about the resource usage of a single SQL query. +#[derive(Clone, Copy, Debug, Deserialize, JsonSchema, Serialize)] +pub struct QuerySummary { + /// The database-assigned query ID. + pub id: Uuid, + /// The total duration of the query (network plus execution). + pub elapsed: Duration, + /// Summary of the data read and written. + pub io_summary: IoSummary, +} + +impl QuerySummary { + /// Construct a SQL query summary from the headers received from the DB. + pub(crate) fn from_headers( + elapsed: Duration, + headers: &HeaderMap, + ) -> Result { + fn get_header<'a>( + map: &'a HeaderMap, + key: &'a str, + ) -> Result<&'a str, Error> { + let hdr = map.get(key).ok_or_else(|| Error::MissingHeaderKey { + key: key.to_string(), + })?; + std::str::from_utf8(hdr.as_bytes()) + .map_err(|err| Error::Database(err.to_string())) + } + let summary = + serde_json::from_str(get_header(headers, "X-ClickHouse-Summary")?) + .map_err(|err| Error::Database(err.to_string()))?; + let id = get_header(headers, "X-ClickHouse-Query-Id")? + .parse() + .map_err(|err: uuid::Error| Error::Database(err.to_string()))?; + Ok(Self { id, elapsed, io_summary: summary }) + } +} diff --git a/oximeter/db/src/client/sql.rs b/oximeter/db/src/client/sql.rs new file mode 100644 index 0000000000..236faa7aa4 --- /dev/null +++ b/oximeter/db/src/client/sql.rs @@ -0,0 +1,104 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Client methods for running SQL queries againts timeseries themselves. +//! +//! This implements a prototype system for creating "virtual tables" from each +//! timeseries, letting us run SQL queries directly against them. These tables +//! are constructed via huge joins, which effectively reconstruct the entire +//! history of samples as received from the producers. Each row is the original +//! sample. This denormalization comes at a big cost, both in cycles and memory +//! usage, since we need to build the entire join in ClickHouse and send it all +//! to the client for deserialization. +//! +//! Thus this prototype is very useful for development, running analyses on +//! small datasets. It's less helpful on real deployments, where the size of +//! data makes this approach prohibitive. + +// Copyright 2024 Oxide Computer Company + +use super::query_summary::QuerySummary; +pub use crate::sql::RestrictedQuery; +use crate::Error; +use crate::{ + client::Client, + sql::{QueryResult, Table}, +}; +pub use indexmap::IndexMap; +use slog::debug; +pub use std::time::Instant; + +impl Client { + /// Transform a SQL query against a timeseries, but do not execute it. + pub async fn transform_query( + &self, + query: impl AsRef, + ) -> Result { + let restricted = RestrictedQuery::new(query.as_ref())?; + restricted.to_oximeter_sql(&*self.schema.lock().await) + } + + /// Run a SQL query against a timeseries. + pub async fn query( + &self, + query: impl AsRef, + ) -> Result { + use crate::client::handle_db_response; + + let original_query = query.as_ref().trim_end_matches(';'); + let ox_sql = self.transform_query(original_query).await?; + let rewritten = format!("{ox_sql} FORMAT JSONEachRow"); + debug!( + self.log, + "rewrote restricted query"; + "original_sql" => &original_query, + "rewritten_sql" => &rewritten, + ); + let request = self + .client + .post(&self.url) + .query(&[ + ("output_format_json_quote_64bit_integers", "0"), + ("database", crate::DATABASE_NAME), + ]) + .body(rewritten.clone()); + let query_start = Instant::now(); + let response = handle_db_response( + request + .send() + .await + .map_err(|err| Error::DatabaseUnavailable(err.to_string()))?, + ) + .await?; + let summary = QuerySummary::from_headers( + query_start.elapsed(), + response.headers(), + )?; + let text = response.text().await.unwrap(); + let mut table = Table::default(); + for line in text.lines() { + let row = + serde_json::from_str::>( + line.trim(), + ) + .unwrap(); + if table.column_names.is_empty() { + table.column_names.extend(row.keys().cloned()) + } else { + assert!(table + .column_names + .iter() + .zip(row.keys()) + .all(|(k1, k2)| k1 == k2)); + } + table.rows.push(row.into_values().collect()); + } + Ok(QueryResult { + original_query: original_query.to_string(), + rewritten_query: rewritten, + summary, + table, + }) + } +} diff --git a/oximeter/db/src/lib.rs b/oximeter/db/src/lib.rs index 24f7d8c2d0..e1570ee0c3 100644 --- a/oximeter/db/src/lib.rs +++ b/oximeter/db/src/lib.rs @@ -4,7 +4,7 @@ //! Tools for interacting with the control plane telemetry database. -// Copyright 2023 Oxide Computer Company +// Copyright 2024 Oxide Computer Company use crate::query::StringFieldSelector; use chrono::DateTime; @@ -32,14 +32,17 @@ use thiserror::Error; mod client; pub mod model; +#[cfg(feature = "oxql")] +pub mod oxql; pub mod query; +#[cfg(any(feature = "sql", test))] pub mod sql; +#[cfg(feature = "oxql")] +pub use client::oxql::OxqlResult; +pub use client::query_summary::QuerySummary; pub use client::Client; pub use client::DbWrite; -pub use client::QueryMetadata; -pub use client::QueryResult; -pub use client::Table; pub use model::OXIMETER_VERSION; #[derive(Debug, Error)] @@ -58,7 +61,7 @@ pub enum Error { BadMetadata { key: String, msg: String }, /// An error interacting with the telemetry database - #[error("Error interacting with telemetry database")] + #[error("Error interacting with telemetry database: {0}")] Database(String), /// A schema provided when collecting samples did not match the expected schema @@ -134,8 +137,20 @@ pub enum Error { #[error("Schema update versions must be sequential without gaps")] NonSequentialSchemaVersions, + #[cfg(any(feature = "sql", test))] #[error("SQL error")] Sql(#[from] sql::Error), + + #[cfg(any(feature = "oxql", test))] + #[error(transparent)] + Oxql(oxql::Error), +} + +#[cfg(any(feature = "oxql", test))] +impl From for Error { + fn from(e: crate::oxql::Error) -> Self { + Error::Oxql(e) + } } impl From for TimeseriesSchema { @@ -281,6 +296,7 @@ mod tests { use super::*; use crate::model::DbFieldList; use crate::model::DbTimeseriesSchema; + use std::borrow::Cow; use uuid::Uuid; // Validates that the timeseries_key stability for a sample is stable. @@ -317,7 +333,7 @@ mod tests { use strum::EnumCount; let values = [ - ("string", FieldValue::String(String::default())), + ("string", FieldValue::String(Cow::Owned(String::default()))), ("i8", FieldValue::I8(-0x0A)), ("u8", FieldValue::U8(0x0A)), ("i16", FieldValue::I16(-0x0ABC)), diff --git a/oximeter/db/src/model.rs b/oximeter/db/src/model.rs index b1b45eabc4..106c347ef6 100644 --- a/oximeter/db/src/model.rs +++ b/oximeter/db/src/model.rs @@ -64,7 +64,7 @@ impl From for DbBool { impl From for DbBool { fn from(b: bool) -> Self { - DbBool { inner: b as _ } + DbBool { inner: u8::from(b) } } } @@ -391,7 +391,7 @@ declare_field_row! {I32FieldRow, i32, "i32"} declare_field_row! {U32FieldRow, u32, "u32"} declare_field_row! {I64FieldRow, i64, "i64"} declare_field_row! {U64FieldRow, u64, "u64"} -declare_field_row! {StringFieldRow, String, "string"} +declare_field_row! {StringFieldRow, std::borrow::Cow<'static, str>, "string"} declare_field_row! {IpAddrFieldRow, Ipv6Addr, "ipaddr"} declare_field_row! {UuidFieldRow, Uuid, "uuid"} @@ -1600,30 +1600,23 @@ pub(crate) fn parse_field_select_row( ) -> (TimeseriesKey, Target, Metric) { assert_eq!( row.fields.len(), - 2 * schema.field_schema.len(), - "Expected pairs of (field_name, field_value) from the field query" + schema.field_schema.len(), + "Expected the same number of fields in each row as the schema itself", ); let (target_name, metric_name) = schema.component_names(); let mut target_fields = Vec::new(); let mut metric_fields = Vec::new(); - let mut actual_fields = row.fields.values(); + let mut actual_fields = row.fields.iter(); for _ in 0..schema.field_schema.len() { // Extract the field name from the row and find a matching expected field. - let actual_field_name = actual_fields + let (actual_field_name, actual_field_value) = actual_fields .next() .expect("Missing a field name from a field select query"); - let name = actual_field_name - .as_str() - .expect("Expected a string field name") - .to_string(); - let expected_field = schema.schema_for_field(&name).expect( + let expected_field = schema.schema_for_field(actual_field_name).expect( "Found field with name that is not part of the timeseries schema", ); // Parse the field value as the expected type - let actual_field_value = actual_fields - .next() - .expect("Missing a field value from a field select query"); let value = match expected_field.field_type { FieldType::Bool => { FieldValue::Bool(bool::from(DbBool::from( @@ -1723,10 +1716,11 @@ pub(crate) fn parse_field_select_row( .as_str() .expect("Expected a UUID string for a Uuid field from the database") .to_string() + .into() ) } }; - let field = Field { name, value }; + let field = Field { name: actual_field_name.to_string(), value }; match expected_field.source { FieldSource::Target => target_fields.push(field), FieldSource::Metric => metric_fields.push(field), diff --git a/oximeter/db/src/oxql/ast/cmp.rs b/oximeter/db/src/oxql/ast/cmp.rs new file mode 100644 index 0000000000..ea33056c1f --- /dev/null +++ b/oximeter/db/src/oxql/ast/cmp.rs @@ -0,0 +1,76 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! An AST node describing comparison operators + +// Copyright 2024 Oxide Computer Company + +use std::fmt; + +/// Comparison operators. +// TODO-completeness: Operators for other types, like IP containment ('<<'). +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum Comparison { + /// Equality comparison. + Eq, + /// Inequality comparison. + Ne, + /// Greater-than comparison + Gt, + /// Greater-than or equals comparison + Ge, + /// Lesser-than comparison + Lt, + /// Lesser-than or equals comparison + Le, + /// Regular expression pattern matching. + Like, +} + +impl Comparison { + // Return the _function name_ of the comparison that is safe for use in + // ClickHouse. + // + // Note that we're always using the functional form for these comparisons, + // even when they have obvious operators. E.g., we return `"equals"` for the + // `Comparison::Eq` rather than `"=="`. + // + // This is to normalize the different comparisons we support, which do not + // all have operator formats. `Comparison::Like` is the best example, but we + // may also want to support things like IP address containment. While DBs + // like PostgreSQL have the `<<` operator for that, ClickHouse supports only + // the function `isIPAddressInRange()`. + // + // One consequence of this is that the caller needs to wrap the argument in + // parentheses manually. + pub(crate) fn as_db_function_name(&self) -> &'static str { + match self { + Comparison::Eq => "equals", + Comparison::Ne => "notEquals", + Comparison::Gt => "greater", + Comparison::Ge => "greaterOrEquals", + Comparison::Lt => "less", + Comparison::Le => "lessOrEquals", + Comparison::Like => "match", + } + } +} + +impl fmt::Display for Comparison { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "{}", + match self { + Comparison::Eq => "==", + Comparison::Ne => "!=", + Comparison::Gt => ">", + Comparison::Ge => ">=", + Comparison::Lt => "<", + Comparison::Le => "<=", + Comparison::Like => "~=", + } + ) + } +} diff --git a/oximeter/db/src/oxql/ast/grammar.rs b/oximeter/db/src/oxql/ast/grammar.rs new file mode 100644 index 0000000000..a644dff41d --- /dev/null +++ b/oximeter/db/src/oxql/ast/grammar.rs @@ -0,0 +1,1350 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Grammar for the Oximeter Query Language (OxQL). + +// Copyright 2024 Oxide Computer + +peg::parser! { + pub grammar query_parser() for str { + use crate::oxql::ast::cmp::Comparison; + use crate::oxql::ast::table_ops::align::Align; + use crate::oxql::ast::table_ops::align::AlignmentMethod; + use crate::oxql::ast::table_ops::filter::SimpleFilter; + use crate::oxql::ast::table_ops::filter::FilterExpr; + use crate::oxql::ast::table_ops::filter::Filter; + use crate::oxql::ast::table_ops::filter::CompoundFilter; + use crate::oxql::ast::table_ops::get::Get; + use crate::oxql::ast::table_ops::group_by::GroupBy; + use crate::oxql::ast::ident::Ident; + use crate::oxql::ast::literal::Literal; + use crate::oxql::ast::logical_op::LogicalOp; + use crate::oxql::ast::Query; + use crate::oxql::ast::table_ops::join::Join; + use crate::oxql::ast::table_ops::GroupedTableOp; + use crate::oxql::ast::table_ops::BasicTableOp; + use crate::oxql::ast::table_ops::TableOp; + use crate::oxql::ast::table_ops::group_by::Reducer; + use crate::oxql::ast::table_ops::limit::Limit; + use crate::oxql::ast::table_ops::limit::LimitKind; + use crate::oxql::ast::literal::duration_consts; + use oximeter::TimeseriesName; + use std::time::Duration; + use uuid::Uuid; + use chrono::Utc; + use chrono::DateTime; + use chrono::NaiveDateTime; + use chrono::NaiveDate; + use chrono::NaiveTime; + use std::net::IpAddr; + use std::net::Ipv4Addr; + use std::net::Ipv6Addr; + + rule _ = quiet!{[' ' | '\n' | '\t']+} / expected!("whitespace") + + // Parse boolean literals. + rule true_literal() -> bool = "true" { true } + rule false_literal() -> bool = "false" { false } + pub(super) rule boolean_literal_impl() -> bool + = quiet! { true_literal() / false_literal() } / expected!("boolean literal") + + pub rule boolean_literal() -> Literal + = b:boolean_literal_impl() { Literal::Boolean(b) } + + // Parse duration literals. + rule year() -> Duration + = "Y" { duration_consts::YEAR } + rule month() -> Duration + = "M" { duration_consts::MONTH } + rule week() -> Duration + = "w" { duration_consts::WEEK } + rule day() -> Duration + = "d" { duration_consts::DAY } + rule hour() -> Duration + = "h" { duration_consts::HOUR } + rule minute() -> Duration + = "m" { duration_consts::MINUTE } + rule second() -> Duration + = "s" { duration_consts::SECOND } + rule millisecond() -> Duration + = "ms" { duration_consts::MILLISECOND } + rule microsecond() -> Duration + = "us" { duration_consts::MICROSECOND } + rule nanosecond() -> Duration + = "ns" { duration_consts::NANOSECOND } + pub(super) rule duration_literal_impl() -> Duration + = count:integer_literal_impl() base:( + year() / + month() / + week() / day() / + hour() / + millisecond() / + minute() / + second() / + microsecond() / + nanosecond() + ) + {? + // NOTE: This count is the factor by which we multiply the base + // unit. So it counts the number of nanos, millis, or days, etc. It + // does not limit the total duration itself. + let Ok(count) = u32::try_from(count) else { + return Err("invalid count for duration literal"); + }; + base.checked_mul(count).ok_or("overflowed duration literal") + } + + /// Parse a literal duration from a string. + /// + /// Durations are written as a positive integer multiple of a base time + /// unit. For example, `7s` is interpreted as 7 seconds. Supported units + /// are: + /// + /// - 'y': an approximate year, 365 days + /// - 'M': an approximate month, 30 days + /// - 'w': an approximate week, 7 days + /// - 'h': an hour, 3600 seconds + /// - 'm': a minute, 60 seconds + /// - 's': seconds + /// - 'ms': milliseconds + /// - 'us': microseconds + /// - 'ns': nanoseconds + pub rule duration_literal() -> Literal + = d:duration_literal_impl() { Literal::Duration(d) } + + /// Parse a literal timestamp. + /// + /// Timestamps are literals prefixed with `@`. They can be in one of + /// several formats: + /// + /// - YYYY-MM-DD + /// - HH:MM:SS[.f] + /// - RFC 3339, `YYYY-MM-DDTHH:MM:SS.f` + /// - The literal `now()`, possibly with some simple offset expression, + /// such as `now() - 5m`. The offset must be a duration. + /// + /// All timestamps are in UTC. + pub rule timestamp_literal() -> Literal + = t:timestamp_literal_impl() { Literal::Timestamp(t) } + + rule timestamp_literal_impl() -> DateTime + = timestamp_string() + / now_timestamp() + + pub(super) rule timestamp_string() -> DateTime + = "@" s:$(['0'..='9' | '-' | 'T' | ':' | '.']+) + {? + if let Ok(t) = NaiveDate::parse_from_str(s, "%F") { + return Ok(t.and_hms_opt(0, 0, 0).unwrap().and_utc()); + } + if let Ok(t) = NaiveTime::parse_from_str(s, "%H:%M:%S%.f") { + return Ok(NaiveDateTime::new(Utc::now().date_naive(), t).and_utc()); + } + if let Ok(t) = NaiveDateTime::parse_from_str(s, "%Y-%m-%dT%H:%M:%S%.f") { + return Ok(t.and_utc()); + } + Err("a recognized timestamp format") + } + + rule now_offset() -> (bool, Duration) + = _? sign:['+' | '-'] _? dur:duration_literal_impl() + { + let negative = matches!(sign, '-'); + (negative, dur) + } + + pub(super) rule now_timestamp() -> DateTime + = "@now()" maybe_offset:now_offset()? + { + let now = Utc::now(); + if let Some((negative, offset)) = maybe_offset { + if negative { + now - offset + } else { + now + offset + } + } else { + now + } + } + + /// Parse an IP address literal, either IPv4 or IPv6 + pub rule ip_literal() -> Literal + = ip:ipv4_literal() { Literal::IpAddr(IpAddr::V4(ip)) } + / ip:ipv6_literal() { Literal::IpAddr(IpAddr::V6(ip)) } + + pub(super) rule ipv4_literal() -> Ipv4Addr + = "\"" s:$((['0'..='9']*<1,3>)**<4> ".") "\"" + {? + s.parse().map_err(|_| "an IPv4 address") + } + + pub(super) rule ipv6_literal() -> Ipv6Addr + = "\"" s:$(['a'..='f' | '0'..='9' | ':']+) "\"" + {? + s.parse().map_err(|_| "an IPv6 address") + } + + rule dashed_uuid_literal() -> Uuid + = s:$( + "\"" + ['a'..='f' | '0'..='9']*<8> "-" + ['a'..='f' | '0'..='9']*<4> "-" + ['a'..='f' | '0'..='9']*<4> "-" + ['a'..='f' | '0'..='9']*<4> "-" + ['a'..='f' | '0'..='9']*<12> + "\"" + ) {? + let Some(middle) = s.get(1..37) else { + return Err("invalid UUID literal"); + }; + middle.parse().or(Err("invalid UUID literal")) + } + rule undashed_uuid_literal() -> Uuid + = s:$("\"" ['a'..='f' | '0'..='9']*<32> "\"") {? + let Some(middle) = s.get(1..33) else { + return Err("invalid UUID literal"); + }; + middle.parse().or(Err("invalid UUID literal")) + } + pub(super) rule uuid_literal_impl() -> Uuid + = dashed_uuid_literal() / undashed_uuid_literal() + + /// Parse UUID literals. + /// + /// UUIDs should be quoted with `"` and can include or omit dashes + /// between the segments. Both of the following are equivalent. + /// + /// "fc59ab26-f1d8-44ca-abbc-dd8f61321433" + /// "fc59ab26f1d844caabbcdd8f61321433" + pub rule uuid_literal() -> Literal + = id:uuid_literal_impl() { Literal::Uuid(id) } + + // Parse string literals. + rule any_but_single_quote() -> String + = s:$([^'\'']*) + {? + recognize_escape_sequences(s).ok_or("invalid single quoted string") + } + + rule any_but_double_quote() -> String + = s:$([^'"']*) + {? + recognize_escape_sequences(s).ok_or("invalid double quoted string") + } + + rule single_quoted_string() -> String + = "'" s:any_but_single_quote() "'" { s } + + rule double_quoted_string() -> String + = "\"" s:any_but_double_quote() "\"" { s } + + pub(super) rule string_literal_impl() -> String + = single_quoted_string() / double_quoted_string() + + /// Parse a string literal, either single- or double-quoted. + /// + /// Parsing string literals is pretty tricky, but we add several + /// constraints to simplify things. First strings must be quoted, either + /// with single- or double-quotes. E.g., the strings `"this"` and + /// `'this'` parse the same way. + /// + /// We require that the string not _contain_ its quote-style, so there + /// can't be any embedded single-quotes in a single-quoted string, or + /// double-quotes in a double-quoted string. Each quote-style may contain + /// the quote from the other style. + /// + /// We support the following common escape sequences: + /// + /// ```ignore + /// \n + /// \r + /// \t + /// \\ + /// \0 + /// ``` + /// + /// Beyond this, any valid Unicode code point, written in the usual Rust + /// style, is supported. For example, `\u{1234}` is accepted and mapped + /// to `ሴ` upon parsing. This also allows users to write both quote + /// styles if required, by writing them as their Unicode escape + /// sequences. For example, this string: + /// + /// ```ignore + /// "this string has \u{22} in it" + /// ``` + /// + /// Will be parsed as `this string has " in it`. + pub rule string_literal() -> Literal + = s:string_literal_impl() { Literal::String(s) } + + pub(super) rule integer_literal_impl() -> i128 + = n:$("-"? ['0'..='9']+ !['e' | 'E' | '.']) + {? + let Ok(x) = n.parse() else { + return Err("integer literal"); + }; + if x < i128::from(i64::MIN) { + Err("negative overflow") + } else if x > i128::from(u64::MAX) { + Err("positive overflow") + } else { + Ok(x) + } + } + + /// Parse integer literals. + pub rule integer_literal() -> Literal + = n:integer_literal_impl() { Literal::Integer(n) } + + // We're being a bit lazy here, since the rule expression isn't exactly + // right. But we rely on calling `f64`'s `FromStr` implementation to + // actually verify the values can be parsed. + pub(super) rule double_literal_impl() -> f64 + = n:$("-"? ['0'..='9']* "."? ['0'..='9']* (['e' | 'E'] "-"? ['0'..='9']+)*) {? + n.parse().or(Err("double literal")) + } + + // Parse double literals. + pub rule double_literal() -> Literal + = d:double_literal_impl() { Literal::Double(d) } + + /// Parse a literal. + /// + /// Literals are typed, with support for bools, durations, integers and + /// doubles, UUIDs, and general strings. See the rules for each type of + /// literal for details on supported formats. + pub rule literal() -> Literal + = lit:( + boolean_literal() / + duration_literal() / + integer_literal() / + double_literal() / + uuid_literal() / + ip_literal() / + string_literal() / + timestamp_literal() + ) + { + lit + } + + /// Parse a logical operator. + pub(super) rule logical_op_impl() -> LogicalOp + = "||" { LogicalOp::Or} + / "&&" { LogicalOp::And } + / "^" { LogicalOp::Xor } + + + // NOTES: + // + // The rules below are all used to parse a filtering expression. This + // turns out to be surprisingly complicated to express succinctly in + // `peg`, but there are a few tricks. First, it's important that we do + // not try to parse negation ("!") inside the filtering atoms -- it's a + // higher-level concept, and not part of the atom itself. + // + // Second, it's not clear how to use `peg`'s precendence macro to + // correctly describe the precedence. Things are recursive, but we + // choose to define that in the rules themselves, rather than explicitly + // with precedence levels. This is common in PEG definitions, and the + // main trick is force things _not_ to be left-recursive, and use two + // rules tried in sequence. The `factor` rule is a good example of this. + // + // Another example is the logical OR / AND / XOR parsing. We start with + // OR, which is the lowest precedence, and move to the others in + // sequence. Each is defined as parsing either the "thing itself", e.g., + // `foo || bar` for the OR rule; or the rule with next-higher + // precedence. + // + // IMPORTANT: The #[cache] directives on the rules below are _critical_ + // to avoiding wildly exponential runtime with nested expressions. + + /// Parse a logical negation + pub rule not() = "!" + + /// A factor is a logically negated expression, or a primary expression. + #[cache] + pub rule factor() -> Filter + = not() _? factor:factor() + { + Filter { + negated: !factor.negated, + expr: factor.expr + } + } + / p:primary() { p } + + /// A primary expression is either a comparison "atom", e.g., `foo == + /// "bar"`, or a grouping around a sequence of such things. + #[cache] + pub rule primary() -> Filter + = atom:comparison_atom() + {? + if matches!(atom.cmp, Comparison::Like) && !matches!(atom.value, Literal::String(_)) { + Err("~= comparison is only supported for string literals") + } else { + Ok(Filter { negated: false, expr: FilterExpr::Simple(atom) }) + } + } + / "(" _? or:logical_or_expr() _? ")" { or } + + /// A comparison atom is a base-case for all this recursion. + /// + /// It specifies a single comparison between an identifier and a value, + /// using a specific comparison operator. For example, this parses `foo + /// == "bar"`. + pub rule comparison_atom() -> SimpleFilter + = ident:ident() _? cmp:comparison() _? value:literal() + { + SimpleFilter { ident, cmp, value } + } + + /// Two filtering expressions combined with a logical OR. + /// + /// An OR expression is two logical ANDs joined with "||", or just a + /// bare logical AND expression. + #[cache] + pub rule logical_or_expr() -> Filter + = left:logical_and_expr() _? "||" _? right:logical_or_expr() + { + let compound = CompoundFilter { + left: Box::new(left), + op: LogicalOp::Or, + right: Box::new(right), + }; + Filter { negated: false, expr: FilterExpr::Compound(compound) } + } + / logical_and_expr() + + /// Two filtering expressions combined with a logical AND. + /// + /// A logical AND expression is two logical XORs joined with "&&", or + /// just a bare logical XOR expression. + #[cache] + pub rule logical_and_expr() -> Filter + = left:logical_xor_expr() _? "&&" _? right:logical_and_expr() + { + let compound = CompoundFilter { + left: Box::new(left), + op: LogicalOp::And, + right: Box::new(right), + }; + Filter { negated: false, expr: FilterExpr::Compound(compound) } + } + / logical_xor_expr() + + /// Two filtering expressions combined with a logical XOR. + /// + /// A logical XOR expression is two logical XORs joined with "^ or + /// just a bare factor. Note that this either hits the base case, if + /// `factor` is actually an atom, or recurses again if its a logical OR + /// expression. + /// + /// Note that this is the highest-precedence logical operator. + #[cache] + pub rule logical_xor_expr() -> Filter + = left:factor() _? "^" _? right:logical_xor_expr() + { + let compound = CompoundFilter { + left: Box::new(left), + op: LogicalOp::Xor, + right: Box::new(right), + }; + Filter { negated: false, expr: FilterExpr::Compound(compound) } + } + / factor:factor() { factor } + + /// Parse the _logical expression_ part of a `filter` table operation. + pub rule filter_expr() -> Filter = logical_or_expr() + + /// Parse a "filter" table operation. + pub rule filter() -> Filter + = "filter" _ expr:filter_expr() _? + { + expr + } + + pub(super) rule ident_impl() -> &'input str + = quiet!{ inner:$(['a'..='z']+ ['a'..='z' | '0'..='9']* ("_" ['a'..='z' | '0'..='9']+)*) } / + expected!("A valid identifier") + + /// Parse an identifier, usually a column name. + pub rule ident() -> Ident + = inner:ident_impl() { Ident(inner.to_string()) } + + pub(super) rule comparison() -> Comparison + = "==" { Comparison::Eq } + / "!=" { Comparison::Ne } + / ">=" { Comparison::Ge } + / ">" { Comparison::Gt } + / "<=" { Comparison::Le } + / "<" { Comparison::Lt } + / "~=" { Comparison::Like } + + pub rule timeseries_name() -> TimeseriesName + = target_name:ident_impl() ":" metric_name:ident_impl() + {? + format!("{target_name}:{metric_name}") + .try_into() + .map_err(|_| "invalid timeseries name") + } + + rule get_delim() = quiet!{ _? "," _? } + + /// Parse a "get" table operation. + pub rule get() -> Vec + = "get" _ names:(timeseries_name() **<1,> get_delim()) + { + names.into_iter().map(|t| Get { timeseries_name: t }).collect() + } + + /// Parse a reducing operation by name. + pub rule reducer() -> Reducer + = "mean" { Reducer::Mean } + / "sum" { Reducer::Sum } + / expected!("a reducer name") + + rule ws_with_comma() = _? "," _? + pub rule group_by() -> GroupBy + = "group_by" + _ + "[" _? identifiers:(ident() ** ws_with_comma()) ","? _? "]" + reducer:("," _? red:reducer() { red })? + { + GroupBy { + identifiers, + reducer: reducer.unwrap_or_default(), + } + } + + /// Parse a `join` table operation. + pub rule join() = "join" {} + + pub(super) rule alignment_method() -> AlignmentMethod + = "interpolate" { AlignmentMethod::Interpolate } + / "mean_within" { AlignmentMethod::MeanWithin } + + /// Parse an alignment table operation. + pub rule align() -> Align + = "align" _ method:alignment_method() "(" period:duration_literal_impl() ")" + { + Align { method, period } + } + + /// Parse a limit kind + pub rule limit_kind() -> LimitKind + = "first" { LimitKind::First } + / "last" { LimitKind::Last } + + /// Parse a limit table operation + pub rule limit() -> Limit + = kind:limit_kind() _ count:integer_literal_impl() + {? + if count <= 0 || count > usize::MAX as i128 { + return Err("limit count must be a nonzero usize") + }; + let count = std::num::NonZeroUsize::new(count.try_into().unwrap()).unwrap(); + Ok(Limit { kind, count }) + } + + pub(super) rule basic_table_op() -> TableOp + = g:"get" _ t:timeseries_name() { TableOp::Basic(BasicTableOp::Get(t)) } + / f:filter() { TableOp::Basic(BasicTableOp::Filter(f)) } + / g:group_by() { TableOp::Basic(BasicTableOp::GroupBy(g)) } + / join() { TableOp::Basic(BasicTableOp::Join(Join)) } + / a:align() { TableOp::Basic(BasicTableOp::Align(a)) } + / l:limit() { TableOp::Basic(BasicTableOp::Limit(l)) } + + pub(super) rule grouped_table_op() -> TableOp + = "{" _? ops:(query() ++ grouped_table_op_delim()) _? "}" + { + TableOp::Grouped(GroupedTableOp { ops }) + } + + /// Parse a top-level OxQL query. + /// + /// Queries always start with a "get" operation, and may be followed by + /// any number of other timeseries transformations + pub rule query() -> Query + = ops:(basic_table_op() / grouped_table_op()) ++ query_delim() + {? + let query = Query { ops }; + if query.all_gets_at_query_start() { + Ok(query) + } else { + Err("every subquery must start with a `get` operation") + } + } + + rule grouped_table_op_delim() = quiet!{ _? ";" _? } + rule query_delim() = quiet!{ _? "|" _? } + } +} + +// Recognize escape sequences and convert them into the intended Unicode point +// they represent. +// +// For example, the string containing ASCII "abcd" is returned unchanged. +// +// The string containing "\u{1234}" is returned as the string "ሴ". Note that the +// Unicode bytes must be enclosed in {}, and can have length 1-6. +// +// If the string contains an invalid escape sequence, such as "\uFFFF", or a +// control code, such as `\u07`, `None` is returned. +// +// Note that the main goal of this method is to _unescape_ relevant sequences. +// We will get queries that may contain escaped sequences, like `\\\n`, which +// this method will unescape to `\n`. +fn recognize_escape_sequences(s: &str) -> Option { + let mut out = String::with_capacity(s.len()); + + let mut chars = s.chars().peekable(); + while let Some(ch) = chars.next() { + match ch { + '\\' => { + let Some(next_ch) = chars.next() else { + // Escape at the end of the string + return None; + }; + match next_ch { + 'n' => out.push('\n'), + 'r' => out.push('\r'), + 't' => out.push('\t'), + '\\' => out.push('\\'), + '0' => out.push('\0'), + 'u' => { + // We need this to be delimited by {}, and between 1 and + // 6 characters long. + if !matches!(chars.next(), Some('{')) { + return None; + } + + let mut digits = String::with_capacity(6); + let mut found_closing_brace = false; + while !found_closing_brace && digits.len() < 7 { + // Take the next value, if it's a hex digit or the + // closing brace. + let Some(next) = chars.next_if(|ch| { + ch.is_ascii_hexdigit() || *ch == '}' + }) else { + break; + }; + if next.is_ascii_hexdigit() { + digits.push(next); + continue; + } + found_closing_brace = true; + } + if !found_closing_brace { + return None; + } + let val = u32::from_str_radix(&digits, 16).ok()?; + let decoded = char::from_u32(val)?; + out.push(decoded) + } + _ => return None, + } + } + _ => out.push(ch), + } + } + Some(out) +} + +#[cfg(test)] +mod tests { + use super::query_parser; + use crate::oxql::ast::cmp::Comparison; + use crate::oxql::ast::grammar::recognize_escape_sequences; + use crate::oxql::ast::ident::Ident; + use crate::oxql::ast::literal::Literal; + use crate::oxql::ast::logical_op::LogicalOp; + use crate::oxql::ast::table_ops::align::Align; + use crate::oxql::ast::table_ops::align::AlignmentMethod; + use crate::oxql::ast::table_ops::filter::CompoundFilter; + use crate::oxql::ast::table_ops::filter::Filter; + use crate::oxql::ast::table_ops::filter::FilterExpr; + use crate::oxql::ast::table_ops::filter::SimpleFilter; + use crate::oxql::ast::table_ops::group_by::Reducer; + use crate::oxql::ast::table_ops::limit::Limit; + use crate::oxql::ast::table_ops::limit::LimitKind; + use chrono::NaiveDate; + use chrono::NaiveDateTime; + use chrono::NaiveTime; + use chrono::TimeZone; + use chrono::Utc; + use std::net::IpAddr; + use std::net::Ipv4Addr; + use std::net::Ipv6Addr; + use std::time::Duration; + use uuid::Uuid; + + #[test] + fn test_boolean_literal() { + assert_eq!(query_parser::boolean_literal_impl("true").unwrap(), true); + assert_eq!(query_parser::boolean_literal_impl("false").unwrap(), false); + } + + #[test] + fn test_duration_literal() { + for (as_str, dur) in [ + ("7Y", Duration::from_secs(60 * 60 * 24 * 365 * 7)), + ("7M", Duration::from_secs(60 * 60 * 24 * 30 * 7)), + ("7w", Duration::from_secs(60 * 60 * 24 * 7 * 7)), + ("7d", Duration::from_secs(60 * 60 * 24 * 7)), + ("7h", Duration::from_secs(60 * 60 * 7)), + ("7m", Duration::from_secs(60 * 7)), + ("7s", Duration::from_secs(7)), + ("7ms", Duration::from_millis(7)), + ("7us", Duration::from_micros(7)), + ("7ns", Duration::from_nanos(7)), + ] { + assert_eq!( + query_parser::duration_literal_impl(as_str).unwrap(), + dur + ); + } + + assert!(query_parser::duration_literal_impl("-1m").is_err()); + let too_big: i64 = i64::from(u32::MAX) + 1; + assert!(query_parser::duration_literal_impl(&format!("{too_big}s")) + .is_err()); + } + + #[test] + fn test_uuid_literal() { + const ID: Uuid = uuid::uuid!("9f8900bd-886d-4988-b623-95b7fda36d23"); + let as_string = format!("\"{}\"", ID); + assert_eq!(query_parser::uuid_literal_impl(&as_string).unwrap(), ID); + let without_dashes = as_string.replace('-', ""); + assert_eq!( + query_parser::uuid_literal_impl(&without_dashes).unwrap(), + ID + ); + + assert!(query_parser::uuid_literal_impl( + &as_string[1..as_string.len() - 2] + ) + .is_err()); + assert!(query_parser::uuid_literal_impl( + &without_dashes[1..without_dashes.len() - 2] + ) + .is_err()); + } + + #[test] + fn test_integer_literal() { + assert_eq!(query_parser::integer_literal_impl("1").unwrap(), 1); + assert_eq!(query_parser::integer_literal_impl("-1").unwrap(), -1); + assert_eq!(query_parser::integer_literal_impl("-1").unwrap(), -1); + + assert!(query_parser::integer_literal_impl("-1.0").is_err()); + assert!(query_parser::integer_literal_impl("-1.").is_err()); + assert!(query_parser::integer_literal_impl("1e3").is_err()); + } + + #[test] + fn test_double_literal() { + assert_eq!(query_parser::double_literal_impl("1.0").unwrap(), 1.0); + assert_eq!(query_parser::double_literal_impl("-1.0").unwrap(), -1.0); + assert_eq!(query_parser::double_literal_impl("1.").unwrap(), 1.0); + assert_eq!(query_parser::double_literal_impl("-1.").unwrap(), -1.0); + assert_eq!(query_parser::double_literal_impl(".5").unwrap(), 0.5); + assert_eq!(query_parser::double_literal_impl("-.5").unwrap(), -0.5); + assert_eq!(query_parser::double_literal_impl("1e3").unwrap(), 1e3); + assert_eq!(query_parser::double_literal_impl("-1e3").unwrap(), -1e3); + assert_eq!(query_parser::double_literal_impl("-1e-3").unwrap(), -1e-3); + assert_eq!( + query_parser::double_literal_impl("0.5e-3").unwrap(), + 0.5e-3 + ); + + assert!(query_parser::double_literal_impl("-.e4").is_err()); + assert!(query_parser::double_literal_impl("-.e-4").is_err()); + assert!(query_parser::double_literal_impl("1e").is_err()); + } + + #[test] + fn test_recognize_escape_sequences_with_none() { + for each in ["", "abc", "$%("] { + assert_eq!(recognize_escape_sequences(each).unwrap(), each); + } + } + + #[test] + fn test_recognize_escape_sequence_with_valid_unicode_sequence() { + // Welp, let's just test every possible code point. + for x in 0..=0x10FFFF { + let expected = char::from_u32(x); + let as_hex = format!("{x:0x}"); + let sequence = format!("\\u{{{as_hex}}}"); + let recognized = recognize_escape_sequences(&sequence) + .map(|s| s.chars().next().unwrap()); + assert_eq!( + expected, recognized, + "did not correctly recognized Unicode escape sequence" + ); + } + } + + #[test] + fn test_recognize_escape_sequences_with_invalid_unicode_sequence() { + for each in [ + r#"\uFFFF"#, // Valid, but not using {} delimiters + r#"\u{}"#, // Not enough characters. + r#"\u{12345678}"#, // Too many characters + r#"\u{ZZZZ}"#, // Not hex digits + r#"\u{d800}"#, // A surrogate code point, not valid. + r#"\u{1234"#, // Valid, but missing closing brace. + ] { + println!("{each}"); + assert!(recognize_escape_sequences(each).is_none()); + } + } + + #[test] + fn test_recognize_escape_sequences_with_valid_escape_sequence() { + for (as_str, expected) in [ + (r#"\n"#, '\n'), + (r#"\r"#, '\r'), + (r#"\t"#, '\t'), + (r#"\0"#, '\0'), + (r#"\\"#, '\\'), + ] { + let recognized = recognize_escape_sequences(as_str).unwrap(); + assert_eq!(recognized.chars().next().unwrap(), expected); + } + } + + #[test] + fn test_single_quoted_string_literal() { + for (input, expected) in [ + ("''", String::new()), + ("'simple'", String::from("simple")), + ("'袈►♖'", String::from("袈►♖")), + (r#"'escapes \n handled'"#, String::from("escapes \n handled")), + (r#"'may contain " in it'"#, String::from("may contain \" in it")), + ( + r#"'may contain "\u{1234}" in it'"#, + String::from("may contain \"ሴ\" in it"), + ), + ] { + assert_eq!( + query_parser::string_literal_impl(input).unwrap(), + expected + ); + } + assert!(query_parser::string_literal_impl(r#"' cannot have ' in it'"#) + .is_err()); + } + + #[test] + fn test_double_quoted_string_literal() { + for (input, expected) in [ + ("\"\"", String::new()), + ("\"simple\"", String::from("simple")), + ("\"袈►♖\"", String::from("袈►♖")), + (r#""escapes \n handled""#, String::from("escapes \n handled")), + (r#""may contain ' in it""#, String::from("may contain ' in it")), + ( + r#""may contain '\u{1234}' in it""#, + String::from("may contain 'ሴ' in it"), + ), + ] { + assert_eq!( + query_parser::string_literal_impl(input).unwrap(), + expected + ); + } + + assert!(query_parser::string_literal_impl(r#"" cannot have " in it""#) + .is_err()); + } + + #[test] + fn test_comparison() { + for (as_str, cmp) in [ + ("==", Comparison::Eq), + ("!=", Comparison::Ne), + (">=", Comparison::Ge), + (">", Comparison::Gt), + ("<=", Comparison::Le), + ("<", Comparison::Lt), + ("~=", Comparison::Like), + ] { + assert_eq!(query_parser::comparison(as_str).unwrap(), cmp); + } + } + + #[test] + fn test_filter_expr_single_simple_expression() { + let expr = Filter { + negated: false, + expr: FilterExpr::Simple(SimpleFilter { + ident: Ident("a".to_string()), + cmp: Comparison::Eq, + value: Literal::Boolean(true), + }), + }; + assert_eq!(query_parser::filter_expr("a == true").unwrap(), expr); + assert_eq!(query_parser::filter_expr("(a == true)").unwrap(), expr); + + assert!(query_parser::filter_expr("(a == true").is_err()); + } + + #[test] + fn test_filter_expr_single_negated_simple_expression() { + let expr = Filter { + negated: true, + expr: FilterExpr::Simple(SimpleFilter { + ident: Ident("a".to_string()), + cmp: Comparison::Gt, + value: Literal::Double(1.0), + }), + }; + assert_eq!(query_parser::filter_expr("!(a > 1.)").unwrap(), expr,); + + assert!(query_parser::filter_expr("!(a > 1.0").is_err()); + } + + #[test] + fn test_filter_expr_two_simple_filter_expressions() { + let left = Filter { + negated: false, + expr: FilterExpr::Simple(SimpleFilter { + ident: Ident("a".to_string()), + cmp: Comparison::Eq, + value: Literal::Boolean(true), + }), + }; + + for op in [LogicalOp::And, LogicalOp::Or] { + let expected = left.merge(&left, op); + // Match with either parenthesized. + let as_str = format!("a == true {op} (a == true)"); + assert_eq!(query_parser::filter_expr(&as_str).unwrap(), expected); + let as_str = format!("(a == true) {op} a == true"); + assert_eq!(query_parser::filter_expr(&as_str).unwrap(), expected); + let as_str = format!("(a == true) {op} (a == true)"); + assert_eq!(query_parser::filter_expr(&as_str).unwrap(), expected); + } + } + + #[test] + fn test_filter_expr_operator_precedence() { + // We'll combine the following simple expression in a number of + // different sequences, to check that we correctly group by operator + // precedence. + let atom = Filter { + negated: false, + expr: FilterExpr::Simple(SimpleFilter { + ident: Ident("a".to_string()), + cmp: Comparison::Eq, + value: Literal::Boolean(true), + }), + }; + let as_str = "a == true || a == true && a == true ^ a == true"; + let parsed = query_parser::filter_expr(as_str).unwrap(); + assert_eq!( + parsed.to_string(), + "((a == true) || ((a == true) && ((a == true) ^ (a == true))))" + ); + + // This should bind most tighty from right to left: XOR, then AND, then + // OR. Since we're destructuring from out to in, though, we check in the + // opposite order, weakest to strongest, or left to right. + // + // Start with OR, which should bind the most weakly. + assert!(!parsed.negated); + let FilterExpr::Compound(CompoundFilter { left, op, right }) = + parsed.expr + else { + unreachable!(); + }; + assert!(!left.negated); + assert!(!right.negated); + assert_eq!(op, LogicalOp::Or); + assert_eq!(atom, *left); + + // && should bind next-most tightly + let FilterExpr::Compound(CompoundFilter { left, op, right }) = + right.expr + else { + unreachable!(); + }; + assert!(!left.negated); + assert!(!right.negated); + assert_eq!(op, LogicalOp::And); + assert_eq!(atom, *left); + + // Followed by XOR, the tightest binding operator. + let FilterExpr::Compound(CompoundFilter { left, op, right }) = + right.expr + else { + unreachable!(); + }; + assert!(!left.negated); + assert!(!right.negated); + assert_eq!(op, LogicalOp::Xor); + assert_eq!(atom, *left); + assert_eq!(atom, *right); + } + + #[test] + fn test_filter_expr_overridden_precedence() { + // Similar to above, we'll test with a single atom, and group in a + // number of ways. + let atom = Filter { + negated: false, + expr: FilterExpr::Simple(SimpleFilter { + ident: Ident("a".to_string()), + cmp: Comparison::Eq, + value: Literal::Boolean(true), + }), + }; + let as_str = "(a == true || a == true) && a == true"; + let parsed = query_parser::filter_expr(as_str).unwrap(); + + // Now, || should bind more tightly, so we should have (a && b) at the + // top-level, where b is the test atom. We're comparing the atom at the + // _right_ now with the original expressions. + assert!(!parsed.negated); + let FilterExpr::Compound(CompoundFilter { left, op, right }) = + parsed.expr + else { + unreachable!(); + }; + assert!(!left.negated); + assert!(!right.negated); + assert_eq!(op, LogicalOp::And); + assert_eq!(atom, *right); + + // Destructure the LHS and check it. + let FilterExpr::Compound(CompoundFilter { left, op, right }) = + left.expr + else { + unreachable!(); + }; + assert!(!left.negated); + assert!(!right.negated); + assert_eq!(op, LogicalOp::Or); + assert_eq!(atom, *left); + assert_eq!(atom, *right); + } + + #[test] + fn test_negated_filter_expr() { + let left = Filter { + negated: false, + expr: FilterExpr::Simple(SimpleFilter { + ident: Ident("a".into()), + cmp: Comparison::Eq, + value: Literal::Boolean(true), + }), + }; + let right = left.negate(); + let top = left.merge(&right, LogicalOp::Xor).negate(); + let as_str = "!(a == true ^ !(a == true))"; + let parsed = query_parser::filter_expr(as_str).unwrap(); + assert_eq!(top, parsed); + } + + #[test] + fn test_filter_table_op() { + for expr in [ + "filter field == 0", + "filter baz == 'quux'", + "filter other_field != 'yes'", + "filter id != \"45c937fb-5e99-4a86-a95b-22bf30bf1507\"", + "filter (foo == 'bar') || ((yes != \"no\") && !(maybe > 'so'))", + ] { + let parsed = query_parser::filter(expr).unwrap_or_else(|_| { + panic!("failed to parse query: '{}'", expr) + }); + println!("{parsed:#?}"); + } + } + + #[test] + fn test_get_table_op() { + for expr in [ + "get foo:bar", + "get target_name:metric_name", + "get target_name_0:metric_name000", + ] { + let parsed = query_parser::get(expr).unwrap_or_else(|_| { + panic!("failed to parse get expr: '{}'", expr) + }); + println!("{parsed:#?}"); + } + + assert!(query_parser::get("get foo").is_err()); + assert!(query_parser::get("get foo:").is_err()); + assert!(query_parser::get("get :bar").is_err()); + assert!(query_parser::get("get 0:0").is_err()); + } + + #[test] + fn test_ident() { + for id in ["foo", "foo0", "foo_0_1_2"] { + query_parser::ident(id) + .unwrap_or_else(|_| panic!("failed to identifier: '{id}'")); + } + + for id in ["0foo", "0", "A", "", "%", "foo_"] { + query_parser::ident(id).expect_err(&format!( + "should not have parsed as identifier: '{}'", + id + )); + } + } + + #[test] + fn test_group_by() { + for q in [ + "group_by []", + "group_by [baz]", + "group_by [baz,]", + "group_by [baz,another_field]", + "group_by [baz,another_field,]", + ] { + let parsed = query_parser::group_by(q) + .unwrap_or_else(|_| panic!("failed to parse group_by: '{q}'")); + println!("{parsed:#?}"); + } + } + + #[test] + fn test_query() { + for q in [ + "get foo:bar", + "get foo:bar | group_by []", + "get foo:bar | group_by [baz]", + "get foo:bar | filter baz == 'quuz'", + "get foo:bar | filter (some == 0) && (id == false || a == -1.0)", + "get foo:bar | group_by [baz] | filter baz == 'yo'", + "{ get foo:bar | filter x == 0; get x:y } | join", + "{ get foo:bar ; get x:y } | join | filter baz == 0", + "get foo:bar | align interpolate(10s)", + ] { + let parsed = query_parser::query(q) + .unwrap_or_else(|_| panic!("failed to parse query: '{q}'")); + println!("{parsed:#?}"); + } + } + + #[test] + fn test_reducer() { + assert_eq!(query_parser::reducer("mean").unwrap(), Reducer::Mean); + assert!(query_parser::reducer("foo").is_err()); + } + + #[test] + fn test_parse_literal_timestamp_string() { + assert_eq!( + query_parser::timestamp_string("@2020-01-01").unwrap(), + Utc.with_ymd_and_hms(2020, 1, 1, 0, 0, 0).unwrap(), + ); + assert_eq!( + query_parser::timestamp_string("@01:01:01").unwrap().time(), + NaiveTime::from_hms_opt(1, 1, 1).unwrap(), + ); + assert_eq!( + query_parser::timestamp_string("@01:01:01.123456").unwrap().time(), + NaiveTime::from_hms_micro_opt(1, 1, 1, 123456).unwrap(), + ); + assert_eq!( + query_parser::timestamp_string("@2020-01-01T01:01:01.123456") + .unwrap(), + NaiveDateTime::new( + NaiveDate::from_ymd_opt(2020, 1, 1).unwrap(), + NaiveTime::from_hms_micro_opt(1, 1, 1, 123456).unwrap(), + ) + .and_utc(), + ); + } + + #[test] + fn test_parse_ipv4_literal() { + let check = |s: &str, addr: IpAddr| { + let Literal::IpAddr(ip) = query_parser::ip_literal(s).unwrap() + else { + panic!("expected '{}' to be parsed into {}", s, addr); + }; + assert_eq!(ip, addr); + }; + check("\"100.100.100.100\"", Ipv4Addr::new(100, 100, 100, 100).into()); + check("\"1.2.3.4\"", Ipv4Addr::new(1, 2, 3, 4).into()); + check("\"0.0.0.0\"", Ipv4Addr::UNSPECIFIED.into()); + + assert!(query_parser::ip_literal("\"abcd\"").is_err()); + assert!(query_parser::ip_literal("\"1.1.1.\"").is_err()); + assert!(query_parser::ip_literal("\"1.1.1.1.1.1\"").is_err()); + assert!(query_parser::ip_literal("\"2555.1.1.1\"").is_err()); + assert!(query_parser::ip_literal("1.2.3.4").is_err()); // no quotes + } + + #[test] + fn test_parse_ipv6_literal() { + let check = |s: &str, addr: IpAddr| { + let Literal::IpAddr(ip) = query_parser::ip_literal(s).unwrap() + else { + panic!("expected '{}' to be parsed into {}", s, addr); + }; + assert_eq!(ip, addr); + }; + + // IPv6 is nuts, let's just check a few common patterns. + check("\"::1\"", Ipv6Addr::LOCALHOST.into()); + check("\"::\"", Ipv6Addr::UNSPECIFIED.into()); + check("\"fd00::1\"", Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 1).into()); + check( + "\"fd00:1:2:3:4:5:6:7\"", + Ipv6Addr::new(0xfd00, 1, 2, 3, 4, 5, 6, 7).into(), + ); + + // Don't currently support IPv6-mapped IPv4 addresses + assert!(query_parser::ip_literal("\"::ffff:127.0.0.1\"").is_err()); + + // Other obviously bad patterns. + assert!(query_parser::ip_literal("\"1\"").is_err()); + assert!(query_parser::ip_literal("\":1::1::1\"").is_err()); + assert!(query_parser::ip_literal("\"::g\"").is_err()); + assert!(query_parser::ip_literal("\":::\"").is_err()); + assert!(query_parser::ip_literal("::1").is_err()); // no quotes + } + + #[test] + fn test_query_starts_with_get() { + assert!(query_parser::query("{ get a:b }") + .unwrap() + .all_gets_at_query_start()); + assert!(query_parser::query("{ get a:b; get a:b } | join") + .unwrap() + .all_gets_at_query_start()); + assert!(query_parser::query( + "{ { get a:b ; get a:b } | join; get c:d } | join" + ) + .unwrap() + .all_gets_at_query_start()); + + assert!(query_parser::query("{ get a:b; filter foo == 0 }").is_err()); + assert!(query_parser::query("{ get a:b; filter foo == 0 }").is_err()); + assert!(query_parser::query("get a:b | get a:b").is_err()); + } + + #[test] + fn test_like_only_available_for_strings() { + assert!(query_parser::filter_expr("foo ~= 0").is_err()); + assert!(query_parser::filter_expr("foo ~= \"something\"").is_ok()); + } + + #[test] + fn test_align_table_op() { + assert_eq!( + query_parser::align("align interpolate(1m)").unwrap(), + Align { + method: AlignmentMethod::Interpolate, + period: Duration::from_secs(60) + } + ); + assert_eq!( + query_parser::align("align mean_within(100s)").unwrap(), + Align { + method: AlignmentMethod::MeanWithin, + period: Duration::from_secs(100) + } + ); + + assert!(query_parser::align("align whatever(100s)").is_err()); + assert!(query_parser::align("align interpolate('foo')").is_err()); + } + + #[test] + fn test_complicated_logical_combinations() { + let parsed = + query_parser::logical_or_expr("a == 'b' ^ !(c == 0) && d == false") + .unwrap(); + + // Build up this expected expression from its components. + let left = Filter { + negated: false, + expr: FilterExpr::Simple(SimpleFilter { + ident: Ident("a".to_string()), + cmp: Comparison::Eq, + value: Literal::String("b".into()), + }), + }; + let middle = Filter { + negated: true, + expr: FilterExpr::Simple(SimpleFilter { + ident: Ident("c".to_string()), + cmp: Comparison::Eq, + value: Literal::Integer(0), + }), + }; + let right = Filter { + negated: false, + expr: FilterExpr::Simple(SimpleFilter { + ident: Ident("d".to_string()), + cmp: Comparison::Eq, + value: Literal::Boolean(false), + }), + }; + + // The left and right are bound most tightly, by the XOR operator. + let xor = Filter { + negated: false, + expr: FilterExpr::Compound(CompoundFilter { + left: Box::new(left), + op: LogicalOp::Xor, + right: Box::new(middle), + }), + }; + + // And then those two together are joined with the AND. + let expected = Filter { + negated: false, + expr: FilterExpr::Compound(CompoundFilter { + left: Box::new(xor), + op: LogicalOp::And, + right: Box::new(right), + }), + }; + assert_eq!(parsed, expected); + } + + #[test] + fn test_multiple_negation() { + let negated = + query_parser::filter_expr("(a == 0) || !!!(a == 0 && a == 0)") + .unwrap(); + let expected = + query_parser::filter_expr("(a == 0) || !(a == 0 && a == 0)") + .unwrap(); + assert_eq!(negated, expected, "Failed to handle multiple negations"); + } + + #[test] + fn test_limiting_table_ops() { + assert_eq!( + query_parser::limit("first 100").unwrap(), + Limit { kind: LimitKind::First, count: 100.try_into().unwrap() }, + ); + assert_eq!( + query_parser::limit("last 100").unwrap(), + Limit { kind: LimitKind::Last, count: 100.try_into().unwrap() }, + ); + + assert!(query_parser::limit(&format!( + "first {}", + usize::MAX as i128 + 1 + )) + .is_err()); + assert!(query_parser::limit("first 0").is_err()); + assert!(query_parser::limit("first -1").is_err()); + assert!(query_parser::limit("first \"foo\"").is_err()); + } +} diff --git a/oximeter/db/src/oxql/ast/ident.rs b/oximeter/db/src/oxql/ast/ident.rs new file mode 100644 index 0000000000..6fb2dab85a --- /dev/null +++ b/oximeter/db/src/oxql/ast/ident.rs @@ -0,0 +1,25 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! OxQL identifiers, such as column names. + +// Copyright 2024 Oxide Computer Company + +use std::fmt; + +/// An identifier, such as a column or function name. +#[derive(Clone, Debug, PartialEq)] +pub struct Ident(pub(in crate::oxql) String); + +impl Ident { + pub fn as_str(&self) -> &str { + self.0.as_str() + } +} + +impl fmt::Display for Ident { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.0) + } +} diff --git a/oximeter/db/src/oxql/ast/literal.rs b/oximeter/db/src/oxql/ast/literal.rs new file mode 100644 index 0000000000..d80977fe49 --- /dev/null +++ b/oximeter/db/src/oxql/ast/literal.rs @@ -0,0 +1,390 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! AST node for literal values. + +// Copyright 2024 Oxide Computer Company + +use crate::oxql::ast::cmp::Comparison; +use crate::oxql::Error; +use anyhow::Context; +use chrono::DateTime; +use chrono::Utc; +use oximeter::FieldType; +use oximeter::FieldValue; +use regex::Regex; +use std::borrow::Borrow; +use std::fmt; +use std::net::IpAddr; +use std::time::Duration; +use uuid::Uuid; + +/// A literal value. +#[derive(Clone, Debug, PartialEq)] +pub enum Literal { + // TODO-performance: An i128 here is a bit gratuitous. + Integer(i128), + Double(f64), + String(String), + Boolean(bool), + Uuid(Uuid), + Duration(Duration), + Timestamp(DateTime), + IpAddr(IpAddr), +} + +impl Literal { + // Format the literal as a safe, typed string for ClickHouse. + pub(crate) fn as_db_safe_string(&self) -> String { + match self { + Literal::Integer(inner) => format!("{inner}"), + Literal::Double(inner) => format!("{inner}"), + Literal::String(inner) => format!("'{inner}'"), + Literal::Boolean(inner) => format!("{inner}"), + Literal::Uuid(inner) => format!("'{inner}'"), + Literal::Duration(inner) => { + let (count, interval) = duration_to_db_interval(inner); + format!("INTERVAL {} {}", count, interval) + } + Literal::Timestamp(inner) => { + format!("'{}'", inner.format(crate::DATABASE_TIMESTAMP_FORMAT)) + } + Literal::IpAddr(inner) => { + // NOTE: We store all IP addresses in ClickHouse as IPv6, with + // IPv4 addresses mapped to that. To run a comparison against a + // literal in Rust, we can use the value directly, since we + // decode it an convert to the right type during + // deserialization. But to compare in the DB itself, we need to + // do that with an IPv4-mapped IPv6 address. + // + // Helpfully, ClickHouse's `toIPv6` function takes a string of + // either family, and maps IPv4 into the IPv6 space, if needed. + format!("toIPv6('{inner}')") + } + } + } + + // Return true if this literal can be compared to a field of the provided + // type. + pub(crate) fn is_compatible_with_field( + &self, + field_type: FieldType, + ) -> bool { + match self { + Literal::Integer(_) => matches!( + field_type, + FieldType::U8 + | FieldType::I8 + | FieldType::U16 + | FieldType::I16 + | FieldType::U32 + | FieldType::I32 + | FieldType::U64 + | FieldType::I64 + ), + Literal::Double(_) => false, + Literal::String(_) => matches!(field_type, FieldType::String), + Literal::Boolean(_) => matches!(field_type, FieldType::Bool), + Literal::Uuid(_) => matches!(field_type, FieldType::Uuid), + Literal::Duration(_) => false, + Literal::Timestamp(_) => false, + Literal::IpAddr(_) => matches!(field_type, FieldType::IpAddr), + } + } + + /// Apply the comparison op between self and the provided field. + /// + /// Return None if the comparison cannot be applied, either because the type + /// is not compatible or the comparison doesn't make sense. + pub(crate) fn compare_field( + &self, + value: &FieldValue, + cmp: Comparison, + ) -> Result, Error> { + anyhow::ensure!( + self.is_compatible_with_field(value.field_type()), + "Field value of type {} is cannot be compared to \ + the value in this filter", + value.field_type(), + ); + macro_rules! generate_cmp_match { + ($lhs:ident, $rhs:ident) => { + match cmp { + Comparison::Eq => Ok(Some($lhs == $rhs)), + Comparison::Ne => Ok(Some($lhs != $rhs)), + Comparison::Gt => Ok(Some($lhs > $rhs)), + Comparison::Ge => Ok(Some($lhs >= $rhs)), + Comparison::Lt => Ok(Some($lhs < $rhs)), + Comparison::Le => Ok(Some($lhs <= $rhs)), + Comparison::Like => Ok(None), + } + }; + } + // Filter expressions are currently written as ` + // `. That means the literal stored in `self` is the RHS of + // the comparison, and the field value passed in is the LHS. + match (value, self) { + (FieldValue::Bool(lhs), Literal::Boolean(rhs)) => { + generate_cmp_match!(rhs, lhs) + } + (FieldValue::String(lhs), Literal::String(rhs)) => { + let lhs = lhs.borrow(); + let rhs = rhs.as_ref(); + match cmp { + Comparison::Eq => Ok(Some(lhs == rhs)), + Comparison::Ne => Ok(Some(lhs != rhs)), + Comparison::Gt => Ok(Some(lhs > rhs)), + Comparison::Ge => Ok(Some(lhs >= rhs)), + Comparison::Lt => Ok(Some(lhs < rhs)), + Comparison::Le => Ok(Some(lhs <= rhs)), + Comparison::Like => { + let re = Regex::new(rhs).context( + "failed to create regex for string matching", + )?; + Ok(Some(re.is_match(lhs))) + } + } + } + (FieldValue::IpAddr(lhs), Literal::IpAddr(rhs)) => { + generate_cmp_match!(rhs, lhs) + } + (FieldValue::Uuid(lhs), Literal::Uuid(rhs)) => { + generate_cmp_match!(rhs, lhs) + } + (FieldValue::U8(lhs), Literal::Integer(rhs)) => { + let lhs = i128::from(*lhs); + let rhs = *rhs; + generate_cmp_match!(lhs, rhs) + } + (FieldValue::I8(lhs), Literal::Integer(rhs)) => { + let lhs = i128::from(*lhs); + let rhs = *rhs; + generate_cmp_match!(lhs, rhs) + } + (FieldValue::U16(lhs), Literal::Integer(rhs)) => { + let lhs = i128::from(*lhs); + let rhs = *rhs; + generate_cmp_match!(lhs, rhs) + } + (FieldValue::I16(lhs), Literal::Integer(rhs)) => { + let lhs = i128::from(*lhs); + let rhs = *rhs; + generate_cmp_match!(lhs, rhs) + } + (FieldValue::U32(lhs), Literal::Integer(rhs)) => { + let lhs = i128::from(*lhs); + let rhs = *rhs; + generate_cmp_match!(lhs, rhs) + } + (FieldValue::I32(lhs), Literal::Integer(rhs)) => { + let lhs = i128::from(*lhs); + let rhs = *rhs; + generate_cmp_match!(lhs, rhs) + } + (FieldValue::U64(lhs), Literal::Integer(rhs)) => { + let lhs = i128::from(*lhs); + let rhs = *rhs; + generate_cmp_match!(lhs, rhs) + } + (FieldValue::I64(lhs), Literal::Integer(rhs)) => { + let lhs = i128::from(*lhs); + let rhs = *rhs; + generate_cmp_match!(lhs, rhs) + } + (_, _) => unreachable!(), + } + } +} + +/// Duration constants used for interpreting duration literals. +/// +/// Many of the values here are **approximate**. For example, a "year" is always +/// 365 24-hour periods, regardless of leap years, the current time, or any +/// other context. +pub(crate) mod duration_consts { + use std::time::Duration; + + /// Approximately 1 year, 365 24-hour periods. + pub const YEAR: Duration = Duration::from_secs(60 * 60 * 24 * 365); + + /// Approximately 1 month, 30 24-hour periods. + pub const MONTH: Duration = Duration::from_secs(60 * 60 * 24 * 30); + + /// Approximately 1 week, 7 24-hour periods. + pub const WEEK: Duration = Duration::from_secs(60 * 60 * 24 * 7); + + /// One day, equal to 24 hours. + pub const DAY: Duration = Duration::from_secs(60 * 60 * 24); + + /// An hour, exactly 3600 seconds. + pub const HOUR: Duration = Duration::from_secs(60 * 60); + + /// A minute, exactly 60 seconds. + pub const MINUTE: Duration = Duration::from_secs(60); + + /// One second. + pub const SECOND: Duration = Duration::from_secs(1); + + /// One millisecond, a thousandth of a second. + pub const MILLISECOND: Duration = Duration::from_millis(1); + + /// One microsecond, a millionth of a second. + pub const MICROSECOND: Duration = Duration::from_micros(1); + + /// One nanosecond, a billionth of a second. + pub const NANOSECOND: Duration = Duration::from_nanos(1); +} + +// Convert a duration into an appropriate interval for a database query. +// +// This converts the provided duration into the largest interval type for which +// the value is an integer. For example: +// +// `1us` -> (1, "MICROSECOND"), +// `3.4s` -> (3400, "MILLISECOND") +fn duration_to_db_interval(dur: &Duration) -> (u64, &'static str) { + fn as_whole_multiple(dur: &Duration, base: &Duration) -> Option { + let d = dur.as_nanos(); + let base = base.as_nanos(); + if d % base == 0 { + Some(u64::try_from(d / base).unwrap()) + } else { + None + } + } + use duration_consts::*; + const INTERVALS: [(Duration, &str); 10] = [ + (YEAR, "YEAR"), + (MONTH, "MONTH"), + (WEEK, "WEEK"), + (DAY, "DAY"), + (HOUR, "HOUR"), + (MINUTE, "MINUTE"), + (SECOND, "SECOND"), + (MILLISECOND, "MILLISECOND"), + (MICROSECOND, "MICROSECOND"), + (NANOSECOND, "NANOSECOND"), + ]; + for (base, interval) in &INTERVALS { + if let Some(count) = as_whole_multiple(dur, base) { + return (count, interval); + } + } + + // Durations must be a whole number of nanoseconds, so we will never fall + // past the last interval in the array above. + unreachable!(); +} + +impl fmt::Display for Literal { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Literal::Integer(inner) => write!(f, "{inner}"), + Literal::Double(inner) => write!(f, "{inner}"), + Literal::String(inner) => write!(f, "{inner:?}"), + Literal::Boolean(inner) => write!(f, "{inner}"), + Literal::Uuid(inner) => write!(f, "\"{inner}\""), + Literal::Duration(inner) => write!(f, "{inner:?}"), + Literal::Timestamp(inner) => write!(f, "@{inner}"), + Literal::IpAddr(inner) => write!(f, "{inner}"), + } + } +} + +#[cfg(test)] +mod tests { + use super::duration_consts::*; + use super::duration_to_db_interval; + use super::Literal; + use crate::oxql::ast::cmp::Comparison; + use oximeter::FieldValue; + + #[test] + fn test_duration_to_db_interval() { + for base in [1_u32, 2, 3] { + let b = u64::from(base); + assert_eq!(duration_to_db_interval(&(base * YEAR)), (b, "YEAR")); + assert_eq!(duration_to_db_interval(&(base * MONTH)), (b, "MONTH")); + assert_eq!(duration_to_db_interval(&(base * WEEK)), (b, "WEEK")); + assert_eq!(duration_to_db_interval(&(base * DAY)), (b, "DAY")); + assert_eq!(duration_to_db_interval(&(base * HOUR)), (b, "HOUR")); + assert_eq!( + duration_to_db_interval(&(base * MINUTE)), + (b, "MINUTE") + ); + assert_eq!( + duration_to_db_interval(&(base * SECOND)), + (b, "SECOND") + ); + assert_eq!( + duration_to_db_interval(&(base * MILLISECOND)), + (b, "MILLISECOND") + ); + assert_eq!( + duration_to_db_interval(&(base * MICROSECOND)), + (b, "MICROSECOND") + ); + assert_eq!( + duration_to_db_interval(&(base * NANOSECOND)), + (b, "NANOSECOND") + ); + } + assert_eq!(duration_to_db_interval(&(YEAR / 2)), (4380, "HOUR")); + assert_eq!(duration_to_db_interval(&(HOUR / 60)), (1, "MINUTE")); + assert_eq!(duration_to_db_interval(&(HOUR / 10)), (6, "MINUTE")); + assert_eq!(duration_to_db_interval(&(HOUR / 12)), (5, "MINUTE")); + assert_eq!(duration_to_db_interval(&(HOUR / 120)), (30, "SECOND")); + assert_eq!(duration_to_db_interval(&(MINUTE / 2)), (30, "SECOND")); + assert_eq!(duration_to_db_interval(&(MINUTE / 10)), (6, "SECOND")); + assert_eq!( + duration_to_db_interval(&MINUTE.mul_f64(1.5)), + (90, "SECOND") + ); + assert_eq!( + duration_to_db_interval(&MICROSECOND.mul_f64(1.5)), + (1500, "NANOSECOND") + ); + assert_eq!( + duration_to_db_interval(&(YEAR + NANOSECOND)), + (31536000000000001, "NANOSECOND") + ); + } + + #[test] + fn test_literal_compare_field() { + let value = FieldValue::I64(3); + let lit = Literal::Integer(4); + + // The literal comparison would be written like: `field >= 4` where + // `field` has a value of 3 here. So the comparison is false. + assert_eq!( + lit.compare_field(&value, Comparison::Ge).unwrap(), + Some(false) + ); + + // Reversing this, we should have true. + assert_eq!( + lit.compare_field(&value, Comparison::Lt).unwrap(), + Some(true) + ); + + // It should not be equal. + assert_eq!( + lit.compare_field(&value, Comparison::Eq).unwrap(), + Some(false) + ); + assert_eq!( + lit.compare_field(&value, Comparison::Ne).unwrap(), + Some(true) + ); + } + + #[test] + fn test_literal_compare_field_wrong_type() { + let value = + FieldValue::String(std::borrow::Cow::Owned(String::from("foo"))); + let lit = Literal::Integer(4); + assert!(lit.compare_field(&value, Comparison::Eq).is_err()); + } +} diff --git a/oximeter/db/src/oxql/ast/logical_op.rs b/oximeter/db/src/oxql/ast/logical_op.rs new file mode 100644 index 0000000000..60fc5d134f --- /dev/null +++ b/oximeter/db/src/oxql/ast/logical_op.rs @@ -0,0 +1,41 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! An AST node describing logical operators. + +// Copyright 2024 Oxide Computer Company + +use std::fmt; + +/// Logical operators. +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum LogicalOp { + And, + Or, + Xor, +} + +impl LogicalOp { + pub(crate) fn as_db_function_name(&self) -> &'static str { + match self { + LogicalOp::And => "and", + LogicalOp::Or => "or", + LogicalOp::Xor => "xor", + } + } +} + +impl fmt::Display for LogicalOp { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "{}", + match self { + LogicalOp::And => "&&", + LogicalOp::Or => "||", + LogicalOp::Xor => "^", + } + ) + } +} diff --git a/oximeter/db/src/oxql/ast/mod.rs b/oximeter/db/src/oxql/ast/mod.rs new file mode 100644 index 0000000000..7037b74a7f --- /dev/null +++ b/oximeter/db/src/oxql/ast/mod.rs @@ -0,0 +1,152 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! AST for the Oximeter Query Language. + +// Copyright 2024 Oxide Computer Company + +use chrono::DateTime; +use chrono::Utc; +use oximeter::TimeseriesName; + +use self::table_ops::BasicTableOp; +use self::table_ops::GroupedTableOp; +use self::table_ops::TableOp; +pub mod cmp; +pub(super) mod grammar; +pub mod ident; +pub mod literal; +pub mod logical_op; +pub mod table_ops; + +/// An OxQL query. +#[derive(Clone, Debug, PartialEq)] +pub struct Query { + ops: Vec, +} + +impl Query { + // Return the first operation in the query, which is always a form of `get`. + fn first_op(&self) -> &TableOp { + self.ops.first().expect("Should have parsed at least 1 operation") + } + + pub(crate) fn timeseries_name(&self) -> &TimeseriesName { + match self.first_op() { + TableOp::Basic(BasicTableOp::Get(n)) => n, + TableOp::Basic(_) => unreachable!(), + TableOp::Grouped(GroupedTableOp { ops }) => { + ops.first().unwrap().timeseries_name() + } + } + } + + // Check that this query (and any subqueries) start with a get table op, and + // that there are no following get operations. I.e., we have: + // + // get ... | + // { get .. } | + // { get .. ; get .. } | + pub(crate) fn all_gets_at_query_start(&self) -> bool { + fn all_gets_at_query_start(ops: &[TableOp]) -> bool { + let (head, tail) = ops.split_at(1); + match &head[0] { + // If the head is a get, check that there are no following get + // operations. + TableOp::Basic(BasicTableOp::Get(_)) => { + !tail.iter().any(|op| { + matches!(op, TableOp::Basic(BasicTableOp::Get(_))) + }) + } + // Cannot start with any other basic op. + TableOp::Basic(_) => false, + // Recurse for grouped ops. + TableOp::Grouped(GroupedTableOp { ops }) => { + ops.iter().all(Query::all_gets_at_query_start) + } + } + } + all_gets_at_query_start(&self.ops) + } + + // Return the non-get table transformations. + pub(crate) fn transformations(&self) -> &[TableOp] { + &self.ops[1..] + } + + // Split the query into either: + // + // - a list of nested queries and the remaining table ops in self, or + // - the flat query contained in self. + pub(crate) fn split(&self, query_end_time: DateTime) -> SplitQuery { + match &self.ops[0] { + TableOp::Basic(BasicTableOp::Get(_)) => { + SplitQuery::Flat(crate::oxql::Query { + parsed: self.clone(), + end_time: query_end_time, + }) + } + TableOp::Basic(_) => unreachable!(), + TableOp::Grouped(GroupedTableOp { ops }) => SplitQuery::Nested { + subqueries: ops + .iter() + .cloned() + .map(|parsed| crate::oxql::Query { + parsed, + end_time: query_end_time, + }) + .collect(), + transformations: self.ops[1..].to_vec(), + }, + } + } + + // Return the last referenced timestamp in the query, if any. + pub(crate) fn query_end_time(&self) -> Option> { + match &self.ops[0] { + TableOp::Basic(BasicTableOp::Get(_)) => self + .transformations() + .iter() + .filter_map(|op| { + let TableOp::Basic(BasicTableOp::Filter(filter)) = op + else { + return None; + }; + filter.last_timestamp() + }) + .max(), + TableOp::Basic(_) => unreachable!(), + TableOp::Grouped(GroupedTableOp { ops }) => { + let grouped_max = + ops.iter().filter_map(Self::query_end_time).max(); + let op_max = self + .transformations() + .iter() + .filter_map(|op| { + let TableOp::Basic(BasicTableOp::Filter(filter)) = op + else { + return None; + }; + filter.last_timestamp() + }) + .max(); + grouped_max.max(op_max) + } + } + } +} + +// Either a flat query or one with nested subqueries. +// +// OxQL supports subqueries. Though they can be nested, they must always be at +// the front of a query. This represents either a query that is flat, _or_ that +// prefix of subqueries and the following transformations. +#[derive(Clone, Debug, PartialEq)] +pub(crate) enum SplitQuery { + Flat(crate::oxql::Query), + Nested { + subqueries: Vec, + transformations: Vec, + }, +} diff --git a/oximeter/db/src/oxql/ast/table_ops/align.rs b/oximeter/db/src/oxql/ast/table_ops/align.rs new file mode 100644 index 0000000000..cf54ebc312 --- /dev/null +++ b/oximeter/db/src/oxql/ast/table_ops/align.rs @@ -0,0 +1,753 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! An AST node describing timeseries alignment operations. + +// Copyright 2024 Oxide Computer Company + +use crate::oxql::point::DataType; +use crate::oxql::point::MetricType; +use crate::oxql::point::Points; +use crate::oxql::point::ValueArray; +use crate::oxql::point::Values; +use crate::oxql::query::Alignment; +use crate::oxql::Error; +use crate::oxql::Table; +use crate::oxql::Timeseries; +use anyhow::Context; +use chrono::DateTime; +use chrono::TimeDelta; +use chrono::Utc; +use std::time::Duration; + +// The maximum factor by which an alignment operation may upsample data. +// +// This is a crude way to limit the size of a query result. We do not currently +// paginate the results of OxQL queries, so we need to find other ways to avoid +// DOS attacks due to large query results. +// +// While we also apply limits on the total number of samples fetched from the +// ClickHouse database, this alone is insufficient. For example, suppose we have +// two samples, spaced 1 second apart, which are then passed to an alignment +// table operation with a period of 1 nanosecond. Now you have a billion points! +// +// To prevent this, we restrict the total amount by which any alignment +// operation can upsample the data. Another way to think of it is that this +// limits the ratio between the requested period and the largest interval +// between timestamps in the data. +const MAX_UPSAMPLING_RATIO: u128 = 10; + +fn verify_max_upsampling_ratio( + timestamps: &[DateTime], + period: &Duration, +) -> Result<(), Error> { + let period = period.as_nanos(); + let max = MAX_UPSAMPLING_RATIO * period; + for (t1, t0) in timestamps.iter().skip(1).zip(timestamps.iter()) { + let Some(nanos) = t1.signed_duration_since(t0).num_nanoseconds() else { + anyhow::bail!("Overflow computing timestamp delta"); + }; + assert!(nanos > 0, "Timestamps should be sorted"); + let nanos = nanos as u128; + anyhow::ensure!( + nanos <= max, + "A table alignment operation may not upsample data by \ + more than a factor of {MAX_UPSAMPLING_RATIO}" + ); + } + Ok(()) +} + +/// An `align` table operation, used to produce data at well-defined periods. +/// +/// Alignment is important for any kind of aggregation. Data is actually +/// produced at variable intervals, under the control of the producer itself. +/// This means that in general, two timeseries that are related (say, the same +/// schema) may have data samples at slightly different timestamps. +/// +/// Alignment is used to produce data at the defined timestamps, so that samples +/// from multiple timeseries may be combined or correlated in meaningful ways. +#[derive(Clone, Debug, PartialEq)] +pub struct Align { + /// The alignment method, used to describe how data over the input period + /// is used to generate an output sample. + pub method: AlignmentMethod, + // TODO-completeness. We'd like to separate the concept of the period, the + // interval on which data is produced by this alignment, and the input + // window, the range of time in the past over which data is considered to + // produce the output values. + // + // For example, we might want to produce a moving average, by considering + // the last 1h of data, and produce an output value every 10m. Each of those + // output values would share 50m of data with the points on either side. + // + // For now, we'll enforce that the output period and input window are the + // same. + pub period: Duration, +} + +impl Align { + // Apply the alignment function to the set of tables. + pub(crate) fn apply( + &self, + tables: &[Table], + query_end: &DateTime, + ) -> Result, Error> { + match self.method { + AlignmentMethod::Interpolate => tables + .iter() + .map(|table| align_interpolate(table, query_end, &self.period)) + .collect(), + AlignmentMethod::MeanWithin => tables + .iter() + .map(|table| align_mean_within(table, query_end, &self.period)) + .collect(), + } + } +} + +/// An alignment method. +#[derive(Clone, Debug, PartialEq)] +pub enum AlignmentMethod { + /// Alignment is done by interpolating the output data at the specified + /// period. + Interpolate, + /// Alignment is done by computing the mean of the output data within the + /// specified period. + MeanWithin, +} + +// Align the timeseries in a table by computing the average within each output +// period. +fn align_mean_within( + table: &Table, + query_end: &DateTime, + period: &Duration, +) -> Result { + let mut output_table = Table::new(table.name()); + for timeseries in table.iter() { + let points = ×eries.points; + anyhow::ensure!( + points.dimensionality() == 1, + "Aligning multidimensional timeseries is not yet supported" + ); + let data_type = points.data_types().next().unwrap(); + anyhow::ensure!( + data_type.is_numeric(), + "Alignment by mean requires numeric data type, not {}", + data_type + ); + let metric_type = points.metric_type().unwrap(); + anyhow::ensure!( + matches!(metric_type, MetricType::Gauge | MetricType::Delta), + "Alignment by mean requires a gauge or delta metric, not {}", + metric_type, + ); + verify_max_upsampling_ratio(&points.timestamps, &period)?; + + // Always convert the output to doubles, when computing the mean. The + // output is always a gauge, so we do not need the start times of the + // input either. + // + // IMPORTANT: We compute the mean in the loop below from the back of the + // array (latest timestamp) to the front (earliest timestamp). They are + // appended to these arrays here in that _reversed_ order. These arrays + // are flipped before pushing them onto the timeseries at the end of the + // loop below. + let mut output_values = Vec::with_capacity(points.len()); + let mut output_timestamps = Vec::with_capacity(points.len()); + + // Convert the input to doubles now, so the tight loop below does less + // conversion / matching inside. + let input_points = match points.values(0).unwrap() { + ValueArray::Integer(values) => values + .iter() + .map(|maybe_int| maybe_int.map(|int| int as f64)) + .collect(), + ValueArray::Double(values) => values.clone(), + _ => unreachable!(), + }; + + // Alignment works as follows: + // + // - Start at the end of the timestamp array, working our way backwards + // in time. + // - Create the output timestamp from the current step. + // - Find all points in the input array that are within the alignment + // period. + // - Compute the mean of those. + let period_ = + TimeDelta::from_std(*period).context("time delta out of range")?; + let first_timestamp = points.timestamps[0]; + let mut ix: u32 = 0; + loop { + // Compute the next output timestamp, by shifting the query end time + // by the period and the index. + let time_offset = TimeDelta::from_std(ix * *period) + .context("time delta out of range")?; + let output_time = query_end + .checked_sub_signed(time_offset) + .context("overflow computing next output timestamp")?; + let window_start = output_time + .checked_sub_signed(period_) + .context("overflow computing next output window start")?; + + // The output time is before any of the data in the input array, + // we're done. It's OK for the _start time_ to be before any input + // timestamps. + if output_time < first_timestamp { + break; + } + + // Aggregate all values within this time window. + // + // This works a bit differently for gauge timeseries and deltas. + // Gauges are simpler, so let's consider them first. A point is + // "within" the window if the timestamp is within the window. Every + // point is either completely within or completely without the + // window, so we just add the values. + // + // Deltas have a start time, which makes things a bit more + // complicated. In that case, a point can overlap _partially_ with + // the output time window, and we'd like to take that partial + // overlap into account. To do that, we find relevant values which + // have either a start time or timestamp within the output window. + // We compute the fraction of overlap with the window, which is in + // [0.0, 1.0], and multiply the value by that fraction. One can + // think of this as a dot-product between the interval-overlap array + // and the value array, divided by the 1-norm, or number of nonzero + // entries. + let output_value = if matches!(metric_type, MetricType::Gauge) { + mean_gauge_value_in_window( + &points.timestamps, + &input_points, + window_start, + output_time, + ) + } else { + mean_delta_value_in_window( + points.start_times.as_ref().unwrap(), + &points.timestamps, + &input_points, + window_start, + output_time, + ) + }; + output_values.push(output_value); + + // In any case, we push the window's end time and increment to the + // next period. + output_timestamps.push(output_time); + ix += 1; + } + + // We've accumulated our input values into the output arrays, but in + // reverse order. Flip them and push onto the existing table, as a gauge + // timeseries. + let mut new_timeseries = Timeseries::new( + timeseries.fields.clone().into_iter(), + DataType::Double, + MetricType::Gauge, + ) + .unwrap(); + let values = + ValueArray::Double(output_values.into_iter().rev().collect()); + let timestamps = output_timestamps.into_iter().rev().collect(); + let values = Values { values, metric_type: MetricType::Gauge }; + new_timeseries.points = + Points { start_times: None, timestamps, values: vec![values] }; + new_timeseries.alignment = + Some(Alignment { end_time: *query_end, period: *period }); + output_table.insert(new_timeseries).unwrap(); + } + Ok(output_table) +} + +// Given an interval start and end, and a window start and end, compute the +// fraction of the _interval_ that the time window represents. +fn fraction_overlap_with_window( + interval_start: DateTime, + interval_end: DateTime, + window_start: DateTime, + window_end: DateTime, +) -> f64 { + assert!(interval_start < interval_end); + assert!(window_start < window_end); + let end = window_end.min(interval_end); + let start = window_start.max(interval_start); + let contained_size = (end - start).num_nanoseconds().unwrap() as f64; + if contained_size < 0.0 { + return 0.0; + } + let interval_size = + (interval_end - interval_start).num_nanoseconds().unwrap() as f64; + let fraction = contained_size / interval_size; + assert!(fraction >= 0.0); + assert!(fraction <= 1.0); + fraction +} + +// For a delta metric, compute the mean of points falling within the provided +// window. +// +// This uses both the start and end times when considering each point. Each +// point's value is weighted by the faction of overlap with the window. +fn mean_delta_value_in_window( + start_times: &[DateTime], + timestamps: &[DateTime], + input_points: &[Option], + window_start: DateTime, + window_end: DateTime, +) -> Option { + // We can find the indices where the timestamp and start times separately + // overlap the window of interest. Then any interval is potentially of + // interest if _either_ its start time or timestamp is within the window. + // + // Since the start times are <= the timestamps, we can take the min of those + // two to get the first point that overlaps at all, and the max to get the + // last. + let first_timestamp = timestamps.partition_point(|t| t <= &window_start); + let last_timestamp = timestamps.partition_point(|t| t <= &window_end); + let first_start_time = start_times.partition_point(|t| t <= &window_start); + let last_start_time = start_times.partition_point(|t| t <= &window_end); + let first_index = first_timestamp.min(first_start_time); + let last_index = last_timestamp.max(last_start_time); + + // Detect the possible case where the interval is entirely before or + // entirely after the window. + if first_index == last_index { + let t = *timestamps.get(first_timestamp)?; + let s = *start_times.get(first_timestamp)?; + if t < window_start || s > window_end { + return None; + } + let Some(val) = input_points[first_timestamp] else { + return None; + }; + let fraction = fraction_overlap_with_window( + start_times[first_start_time], + timestamps[first_timestamp], + window_start, + window_end, + ); + return Some(fraction * val); + } + + // Compute the overlap for all points which have some overlap. + let starts = &start_times[first_index..last_index]; + let times = ×tamps[first_index..last_index]; + let vals = &input_points[first_index..last_index]; + let iter = starts + .into_iter() + .copied() + .zip(times.into_iter().copied()) + .zip(vals.into_iter().copied()); + let count = (last_timestamp - first_timestamp).max(1) as f64; + let mut maybe_sum = None; + for it in iter.filter_map(|((start, time), maybe_val)| { + let Some(val) = maybe_val else { + return None; + }; + let fraction = + fraction_overlap_with_window(start, time, window_start, window_end); + Some(fraction * val) + }) { + *maybe_sum.get_or_insert(0.0) += it; + } + maybe_sum.map(|sum| sum / count) +} + +// For a gauge metric, compute the mean of points falling within the provided +// window. +fn mean_gauge_value_in_window( + timestamps: &[DateTime], + input_points: &[Option], + window_start: DateTime, + window_end: DateTime, +) -> Option { + // Find the position of the window start and end in the sorted + // array of input timestamps. The `partition_point()` method accepts + // a closure, which partitions the input into a prefix where the + // closure evaluates to true, and a suffix where it's false. It + // returns the first element in the suffix. + // + // So the first closure returns true for all timestamps we want to + // exclude, which are those up to and including the window start time. + // So we get the index of the first point strictly later than the + // window start. + // + // The second closure returns true for all points up to and + // including the output time as well. + let start_index = timestamps.partition_point(|t| t <= &window_start); + let output_index = timestamps.partition_point(|t| t <= &window_end); + assert!(output_index >= start_index); + + // Accumulate the values over this set of indices. + // + // If there are really zero points in this time interval, we add + // a missing value. + if start_index != output_index { + let mut maybe_sum = None; + for it in input_points[start_index..output_index] + .iter() + .filter_map(|x| x.as_ref().copied()) + { + *maybe_sum.get_or_insert(0.0) += it; + } + maybe_sum.map(|output_value| { + output_value / (output_index - start_index) as f64 + }) + } else { + None + } +} + +fn align_interpolate( + _table: &Table, + _query_end: &DateTime, + _period: &Duration, +) -> Result { + anyhow::bail!("Alignment with interpolation not yet implemented") +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_fraction_overlap_with_window() { + let now = Utc::now(); + let window_start = now - Duration::from_secs(1); + let window_end = now; + let interval_start = window_start; + let interval_end = window_end; + assert_eq!( + fraction_overlap_with_window( + interval_start, + interval_end, + window_start, + window_end, + ), + 1.0 + ); + + let window_start = now - Duration::from_secs(1); + let window_end = now; + let interval_start = window_start; + let interval_end = now - Duration::from_secs_f64(0.5); + assert_eq!( + fraction_overlap_with_window( + interval_start, + interval_end, + window_start, + window_end, + ), + 1.0, + "This interval is aligned with the start time \ + of the window, and contained entirely within it, \ + so the fraction should be 1.0", + ); + + // If we reverse the window and interval, then the interval entirely + // contains the window, which is 50% of the interval. + let (window_start, window_end, interval_start, interval_end) = + (interval_start, interval_end, window_start, window_end); + assert_eq!( + fraction_overlap_with_window( + interval_start, + interval_end, + window_start, + window_end, + ), + 0.5, + "The window is entirely contained within the interval, \ + and covers 50% of it", + ); + + // If the interval is entirely contained in the window, we should have + // the entire interval as our fraction. + let window_start = now - Duration::from_secs(1); + let window_end = now; + let interval_start = window_start + Duration::from_secs_f64(0.25); + let interval_end = window_start + Duration::from_secs_f64(0.5); + assert_eq!( + fraction_overlap_with_window( + interval_start, + interval_end, + window_start, + window_end, + ), + 1.0, + "The interval is entirely contained within the window", + ); + + // This is aligned at the right with the window end. + let window_start = now - Duration::from_secs(1); + let window_end = now; + let interval_start = window_start + Duration::from_secs_f64(0.25); + let interval_end = window_end; + assert_eq!( + fraction_overlap_with_window( + interval_start, + interval_end, + window_start, + window_end, + ), + 1.0, + "The interval is aligned at right with the window, and \ + entirely contained within it, so the fraction should still \ + be 1.0", + ); + + // But if we reverse it again, the fraction should reveal itself. + let (window_start, window_end, interval_start, interval_end) = + (interval_start, interval_end, window_start, window_end); + assert_eq!( + fraction_overlap_with_window( + interval_start, + interval_end, + window_start, + window_end, + ), + 0.75, + "The window represents 75% of the interval", + ); + + // This interval does not overlap at all, to the left. + let window_start = now - Duration::from_secs(1); + let window_end = now; + let interval_start = window_start - Duration::from_secs(2); + let interval_end = window_start - Duration::from_secs(1); + assert_eq!( + fraction_overlap_with_window( + interval_start, + interval_end, + window_start, + window_end, + ), + 0.0, + ); + + // This interval does not overlap at all, to the right. + let window_start = now - Duration::from_secs(1); + let window_end = now; + let interval_start = window_start + Duration::from_secs(1); + let interval_end = window_start + Duration::from_secs(2); + assert_eq!( + fraction_overlap_with_window( + interval_start, + interval_end, + window_start, + window_end, + ), + 0.0, + ); + } + + #[test] + fn test_mean_delta_value_in_window() { + let now = Utc::now(); + let start_times = &[ + now - Duration::from_secs(4), + now - Duration::from_secs(3), + now - Duration::from_secs(2), + now - Duration::from_secs(1), + ]; + let timestamps = &[ + now - Duration::from_secs(3), + now - Duration::from_secs(2), + now - Duration::from_secs(1), + now, + ]; + let input_points = &[Some(0.0), Some(1.0), Some(2.0), Some(3.0)]; + + let window_start = now - Duration::from_secs_f64(0.5); + let window_end = now; + let mean = mean_delta_value_in_window( + start_times, + timestamps, + input_points, + window_start, + window_end, + ) + .expect("This should overlap the last interval"); + assert_eq!( + mean, + input_points.last().unwrap().unwrap() / 2.0, + "This overlaps the last interval by half", + ); + } + + #[test] + fn test_mean_gauge_value_in_window() { + let now = Utc::now(); + let timestamps = &[ + now - Duration::from_secs(3), + now - Duration::from_secs(2), + now - Duration::from_secs(1), + now, + ]; + let input_points = &[Some(0.0), Some(1.0), Some(2.0), Some(3.0)]; + + let window_start = now - Duration::from_secs(4); + let window_end = now - Duration::from_secs(3); + let mean = mean_gauge_value_in_window( + timestamps, + input_points, + window_start, + window_end, + ) + .expect("This window should overlap the first timestamp"); + assert_eq!( + mean, 0.0, + "This window should overlap the first timestamp, so the \ + mean value should be the mean of the first point only" + ); + + let window_start = now - Duration::from_secs(4); + let window_end = now - Duration::from_secs(2); + let mean = mean_gauge_value_in_window( + timestamps, + input_points, + window_start, + window_end, + ) + .expect("This window should overlap the first two timestamps"); + assert_eq!( + mean, 0.5, + "This window should overlap the first two timestamps, so the \ + mean value should be the mean of the first two points" + ); + + let window_start = now - Duration::from_secs(3); + let window_end = now - Duration::from_secs(2); + let mean = mean_gauge_value_in_window( + timestamps, + input_points, + window_start, + window_end, + ) + .expect("This window should overlap the second timestamps"); + assert_eq!( + mean, 1.0, + "This window should overlap the second timestamp, so the \ + mean value should be the mean of the second point only." + ); + + let window_start = now - Duration::from_secs(4); + let window_end = *timestamps.last().unwrap(); + let mean = mean_gauge_value_in_window( + timestamps, + input_points, + window_start, + window_end, + ) + .expect("This window should overlap the all timestamps"); + assert_eq!( + mean, + input_points.iter().map(|x| x.unwrap()).sum::() + / input_points.len() as f64, + "This window should overlap the all timestamps, so the \ + mean value should be the mean of all points", + ); + + let window_start = now - Duration::from_secs(3); + let window_end = now - Duration::from_secs_f64(2.5); + assert!( + mean_gauge_value_in_window( + timestamps, + input_points, + window_start, + window_end, + ) + .is_none(), + "This window should overlap none of the points" + ); + } + + #[test] + fn test_verify_max_upsampling_ratio() { + // We'll use a 1 second period, and ensure that we allow downsampling, + // and upsampling up to the max factor. That's 1/10th of a second, + // currently. + let now = Utc::now(); + let timestamps = &[now - Duration::from_secs(1), now]; + + // All values within the threshold. + for period in [ + Duration::from_secs_f64(0.5), + Duration::from_secs(10), + Duration::from_millis(100), + ] { + assert!(verify_max_upsampling_ratio(timestamps, &period).is_ok()); + } + + // Just below the threshold. + assert!(verify_max_upsampling_ratio( + timestamps, + &Duration::from_millis(99), + ) + .is_err()); + + // Sanity check for way below the threshold. + assert!(verify_max_upsampling_ratio( + timestamps, + &Duration::from_nanos(1), + ) + .is_err()); + + // Arrays where we can't compute an interval are fine. + assert!(verify_max_upsampling_ratio( + ×tamps[..1], + &Duration::from_nanos(1), + ) + .is_ok()); + assert!( + verify_max_upsampling_ratio(&[], &Duration::from_nanos(1),).is_ok() + ); + } + + #[test] + fn test_mean_delta_does_not_modify_missing_values() { + let now = Utc::now(); + let start_times = + &[now - Duration::from_secs(2), now - Duration::from_secs(1)]; + let timestamps = &[now - Duration::from_secs(1), now]; + let input_points = &[Some(1.0), None]; + let window_start = now - Duration::from_secs(1); + let window_end = now; + let mean = mean_delta_value_in_window( + start_times, + timestamps, + input_points, + window_start, + window_end, + ); + assert!( + mean.is_none(), + "This time window contains only a None value, which should not be \ + included in the sum" + ); + } + + #[test] + fn test_mean_gauge_does_not_modify_missing_values() { + let now = Utc::now(); + let timestamps = &[now - Duration::from_secs(1), now]; + let input_points = &[Some(1.0), None]; + let window_start = now - Duration::from_secs(1); + let window_end = now; + let mean = mean_gauge_value_in_window( + timestamps, + input_points, + window_start, + window_end, + ); + assert!( + mean.is_none(), + "This time window contains only a None value, which should not be \ + included in the sum" + ); + } +} diff --git a/oximeter/db/src/oxql/ast/table_ops/filter.rs b/oximeter/db/src/oxql/ast/table_ops/filter.rs new file mode 100644 index 0000000000..e5963fe69c --- /dev/null +++ b/oximeter/db/src/oxql/ast/table_ops/filter.rs @@ -0,0 +1,1356 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! An AST node describing filtering table operations. + +// Copyright 2024 Oxide Computer Company + +use crate::oxql::ast::cmp::Comparison; +use crate::oxql::ast::ident::Ident; +use crate::oxql::ast::literal::Literal; +use crate::oxql::ast::logical_op::LogicalOp; +use crate::oxql::ast::table_ops::limit::Limit; +use crate::oxql::ast::table_ops::limit::LimitKind; +use crate::oxql::point::DataType; +use crate::oxql::point::MetricType; +use crate::oxql::point::Points; +use crate::oxql::point::ValueArray; +use crate::oxql::query::special_idents; +use crate::oxql::Error; +use crate::oxql::Table; +use crate::oxql::Timeseries; +use chrono::DateTime; +use chrono::Utc; +use oximeter::FieldType; +use oximeter::FieldValue; +use regex::Regex; +use std::collections::BTreeSet; +use std::fmt; + +/// An AST node for the `filter` table operation. +/// +/// This can be a simple operation like `foo == "bar"` or a more complex +/// expression, such as: `filter hostname == "foo" || (hostname == "bar" +/// && id == "baz")`. +#[derive(Clone, Debug, PartialEq)] +pub struct Filter { + /// True if the whole expression is negated. + pub negated: bool, + /// The contained filtering expression, which may contain many expressions + /// joined by logical operators. + pub expr: FilterExpr, +} + +impl fmt::Display for Filter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}({})", if self.negated { "!" } else { "" }, self.expr,) + } +} + +impl core::str::FromStr for Filter { + type Err = Error; + fn from_str(s: &str) -> Result { + crate::oxql::ast::grammar::query_parser::filter_expr(s) + .map_err(|e| anyhow::anyhow!("invalid filter expression: {e}")) + } +} + +// A crude limit on expression complexity, governing how many times we +// iteratively apply a DNF simplification before bailing out. +const EXPR_COMPLEXITY_ITERATIVE_LIMIT: usize = 32; + +// A crude limit on expression complexity, governing how many times we +// recurisvely apply a DNF simplification before bailing out. +const EXPR_COMPLEXITY_RECURSIVE_LIMIT: usize = 32; + +impl Filter { + /// Return the negation of this filter. + pub fn negate(&self) -> Filter { + Self { negated: !self.negated, ..self.clone() } + } + + /// Split the filter at top-level disjunctions. + /// + /// This is likely only useful after simplifying to DNF with + /// `simplify_to_dnf()`. + pub fn flatten_disjunctions(&self) -> Vec { + let mut out = vec![]; + self.flatten_disjunctions_inner(&mut out); + out + } + + fn flatten_disjunctions_inner(&self, dis: &mut Vec) { + // Recursion is only needed if this is an OR expression. In that case, + // we split the left and push it, and then recurse on the right. + // + // Note that we don't need left-recursion because the parser is strictly + // non-left-recursive. + if let FilterExpr::Compound(CompoundFilter { + left, + op: LogicalOp::Or, + right, + }) = &self.expr + { + dis.push(*left.clone()); + right.flatten_disjunctions_inner(dis); + } else { + // It's not an OR expression, or it is a simple filter expression. + // In either case, just push it directly, withouth recursing. + dis.push(self.clone()); + } + } + + /// Simplfy a filter expression to disjunctive normal form (DNF). + /// + /// Disjunctive normal form is one of a few canonical ways of writing a + /// boolean expression. It simplifies to a disjunction of conjunctions, + /// i.e., only has terms like `(a && b) || (c && d) || ...`. + /// + /// This method exists for the purposes of creating _independent_ pieces of + /// a filtering expression, each of which can be used to generate a new SQL + /// query run against ClickHouse. This is critical to support complicated + /// OxQL queries. Consider: + /// + /// ```ignore + /// get some_timeseries + /// | filter (foo == "bar") || (timestamp > @now() - 1m && foo == "baz") + /// ``` + /// + /// This requires fetching part of one timeseries, and all of another. One + /// cannot run this as a conjunction on the fields and then a query on the + /// measurements. It must be run in such a way to get the sets of keys + /// consistent with each term in the disjunction _independently_, so that + /// one can apply the timestamp filter to only the correct one. + /// + /// We use this method to generate the DNF, a form with only disjunctions of + /// conjunctions. That is, it's not possible to further distribute + /// conjunctions over disjunctions. + /// + /// Each disjunction is then a separate query against the fields table, where + /// we keep track of the keys in each. Each set of predicates and consistent + /// keys is then used later to fetch the measurements. + /// + /// # Notes + /// + /// There is a huge academic literature on this topic, part of the study of + /// formal languages and other areas theoretical computer science. These + /// references are mostly pretty dense and formal, though a few are really + /// useful. This [paper](https://www.researchgate.net/publication/220154187_A_Survey_of_Strategies_in_Program_Transformation_Systems) + /// is a good and accessible survey to the idea of translation systems -- + /// it's mostly focused on programming languages and compilers, but Figures + /// 7-9 in particular are about DNF. + /// + /// As usual, the Wikipedia page is a reasonable overview as well, + /// [here](https://en.wikipedia.org/wiki/Disjunctive_normal_form). We're + /// using the "syntactic" DNF conversion algorithm, essentially. This + /// involves a recursive application of + /// [de Morgan's rules](https://en.wikipedia.org/wiki/De_Morgan%27s_laws), + /// [involution / double-negation](https://en.wikipedia.org/wiki/Involution_(mathematics)), + /// distributivity of [Boolean operators](https://en.wikipedia.org/wiki/Boolean_algebra#Monotone_laws), + /// etc. + pub fn simplify_to_dnf(&self) -> Result { + self.simplify_to_dnf_impl(0) + } + + fn simplify_to_dnf_impl(&self, level: usize) -> Result { + anyhow::ensure!( + level < EXPR_COMPLEXITY_RECURSIVE_LIMIT, + "Maximum recursion level exceeded trying to simplify \ + logical expression to disjunctive normal form" + ); + let mut out = self.simplify_to_dnf_inner(level)?; + if &out == self { + return Ok(out); + } + // Continually apply simplifications as long as able. + // + // This makes me really nervous, so I'm adding an escape hatch that we + // only allow a few iterations. If we've not simplified within that, + // we'll just declare the expression too complicated to handle. + for _ in 0..EXPR_COMPLEXITY_ITERATIVE_LIMIT { + let out_ = out.simplify_to_dnf_inner(level)?; + if out_ == out { + return Ok(out_); + } + out = out_; + } + anyhow::bail!("Logical expression is too complicated to simplify") + } + + fn simplify_to_dnf_inner(&self, level: usize) -> Result { + let new = self.expr.simplify_to_dnf(level)?; + + // This matches the rule: + // + // !!x -> x + if self.negated && new.negated && new.is_simple() { + return Ok(new.negate()); + } + + // These two blocks match de Morgan's rules, which distribute a negation + // down and swap the logical operator. + if self.negated { + // This matches one of de Morgan's rules: + // + // !(x && y) -> !x || !y + if let FilterExpr::Compound(CompoundFilter { + left: x, + op: LogicalOp::And, + right: y, + }) = &new.expr + { + let expr = FilterExpr::Compound(CompoundFilter { + left: Box::new(x.negate()), + op: LogicalOp::Or, + right: Box::new(y.negate()), + }); + return Ok(Filter { negated: false, expr }); + } + + // This matches the other of de Morgan's rules: + // + // !(x || y) -> !x && !y + if let FilterExpr::Compound(CompoundFilter { + left: x, + op: LogicalOp::And, + right: y, + }) = &new.expr + { + let expr = FilterExpr::Compound(CompoundFilter { + left: Box::new(x.negate()), + op: LogicalOp::Or, + right: Box::new(y.negate()), + }); + return Ok(Filter { negated: false, expr }); + } + } + + // Nothing else to do, just return ourself, though we do need to make + // sure we copy the negation from self as well. + Ok(Self { negated: self.negated, ..new }) + } + + // Merge this filter with another one, using the provided operator. + pub(crate) fn merge(&self, other: &Filter, op: LogicalOp) -> Self { + Self { + negated: false, + expr: FilterExpr::Compound(CompoundFilter { + left: Box::new(self.clone()), + op, + right: Box::new(other.clone()), + }), + } + } + + // Apply the filter to the provided field. + // + // This returns `Ok(None)` if the filter doesn't apply. It returns `Ok(x)` + // if the filter does apply, where `x` is the logical application of the + // filter to the field. `true` means "keep this field", which is analogous + // to the `Iterator::filter()` method's signature. + // + // If the filter does apply, but is incompatible or incomparable, return an + // error. + fn filter_field( + &self, + name: &str, + value: &FieldValue, + ) -> Result, Error> { + let result = match &self.expr { + FilterExpr::Simple(inner) => inner.filter_field(name, value), + FilterExpr::Compound(inner) => inner.filter_field(name, value), + }; + result.map(|maybe_keep| maybe_keep.map(|keep| self.negated ^ keep)) + } + + // Apply the filter to the provided points. + fn filter_points(&self, points: &Points) -> Result { + let to_keep = self.filter_points_inner(points)?; + points.filter(to_keep) + } + + // Inner implementation of filtering points. + // + // Returns an array of bools, where true indicates the point should be kept. + fn filter_points_inner(&self, points: &Points) -> Result, Error> { + match &self.expr { + FilterExpr::Simple(inner) => { + inner.filter_points(self.negated, points) + } + FilterExpr::Compound(inner) => { + inner.filter_points(self.negated, points) + } + } + } + + // Apply the filtering table operation. + pub(crate) fn apply(&self, tables: &[Table]) -> Result, Error> { + anyhow::ensure!( + !tables.is_empty(), + "Filtering operations require at least one table", + ); + let mut output_tables = Vec::with_capacity(tables.len()); + // Ensure that all the identifiers in this filter apply to the + // input timeseries. We can do this once at the beginning, because all + // the timeseries in a table have the same set of fields. + let Some(first_timeseries) = tables[0].iter().next() else { + // You give nothing, you get nothing. + return Ok(tables.to_vec()); + }; + let ident_names = self.ident_names(); + + // There are extra, implied names that depend on the data type of the + // timeseries itself, check those as well. + let extras = implicit_field_names(first_timeseries); + let not_valid = ident_names + .iter() + .filter(|&&name| { + !(first_timeseries.fields.contains_key(name) + || extras.contains(name)) + }) + .collect::>(); + anyhow::ensure!( + not_valid.is_empty(), + "The filter expression contains identifiers that are not \ + valid for its input timeseries. Invalid identifiers: {:?}, \ + timeseries fields: {:?}", + not_valid, + ident_names.union(&extras), + ); + + // Filter each input table in succession. + for table in tables.iter() { + let mut timeseries = Vec::with_capacity(table.len()); + 'timeseries: for input in table.iter() { + // If the filter restricts any of the fields, remove this + // timeseries altogether. + for (name, value) in input.fields.iter() { + if let Some(false) = self.filter_field(name, value)? { + continue 'timeseries; + } + } + + // Apply the filter to the data points as well. + let points = self.filter_points(&input.points)?; + + // Similar to above, if the filter removes all data points in + // the timeseries, let's remove the timeseries altogether. + if points.is_empty() { + continue; + } + timeseries.push(Timeseries { + fields: input.fields.clone(), + points, + alignment: input.alignment, + }) + } + output_tables.push(Table::from_timeseries( + table.name(), + timeseries.into_iter(), + )?); + } + Ok(output_tables) + } + + // Return the last referenced timestamp by this filter, if any. + // + // This is the maximum timestamp, before which any filtered point must lie. + // This is used to determine the query end time. + pub(crate) fn last_timestamp(&self) -> Option> { + match &self.expr { + FilterExpr::Simple(inner) => inner.last_timestamp(), + FilterExpr::Compound(inner) => inner.last_timestamp(), + } + } + + // Return the name of all identifiers listed in this filter. + fn ident_names(&self) -> BTreeSet<&str> { + match &self.expr { + FilterExpr::Simple(inner) => { + let mut out = BTreeSet::new(); + out.insert(inner.ident.as_str()); + out + } + FilterExpr::Compound(inner) => { + let mut all = inner.left.ident_names(); + all.extend(inner.right.ident_names()); + all + } + } + } + + fn is_xor(&self) -> bool { + self.is_op(LogicalOp::Xor) + } + + fn is_op(&self, expected_op: LogicalOp) -> bool { + let FilterExpr::Compound(CompoundFilter { op, .. }) = &self.expr else { + return false; + }; + op == &expected_op + } + + // If this is an XOR, rewrite it to a disjunction of conjunctions. + // + // If it is not, return a clone of self. + fn rewrite_xor_to_disjunction(&self) -> Self { + let self_ = self.clone(); + if !self.is_xor() { + return self_; + } + let Filter { + negated, + expr: FilterExpr::Compound(CompoundFilter { left, right, .. }), + } = self_ + else { + unreachable!(); + }; + let left_ = CompoundFilter { + left: left.clone(), + op: LogicalOp::And, + right: Box::new(right.negate()), + }; + let right_ = CompoundFilter { + left: Box::new(left.negate()), + op: LogicalOp::And, + right, + }; + let expr = CompoundFilter { + left: Box::new(left_.to_filter()), + op: LogicalOp::Or, + right: Box::new(right_.to_filter()), + }; + Filter { negated, expr: FilterExpr::Compound(expr) } + } + + fn is_simple(&self) -> bool { + matches!(self.expr, FilterExpr::Simple(_)) + } + + /// Return true if this filtering expression can be reordered around a + /// `limit` table operation. + /// + /// We attempt to push filtering expressions down to the database as much as + /// possible. This involves moving filters "through" an OxQL pipeline, so + /// that we can run them as early as possible, before other operations like + /// a `group_by`. + /// + /// In some cases, but not all, filters interact with limiting table + /// operations, which take the first or last k points from a timeseries. + /// Specifically, we can move a filter around a limit if: + /// + /// - The filter does not refer to timestamps at all + /// - The filter's comparison against timestamps restricts them in the same + /// "direction" as the limit operation. A timestamp filter which takes later + /// values, e.g., `timestamp > t0` can be moved around a `last k` operation; + /// a filter which takes earlier values, e.g., `timestamp < t0` can be moved + /// around a `first k` operation. + /// + /// All other situations return false. Consider a query with `filter + /// timestamp < t0` and `last k`. Those return different results depending + /// on which is run first: + /// + /// - Running the filter then the limit returns the last values before `t0`, + /// so the "end" of that chunk of time. + /// - Running the limit then filter returns the values in the last `k` of + /// the entire timeseries where the timestamp is before `t0`. That set can + /// be empty, if all the last `k` samples have a timestamp _after_ `t0`, + /// whereas the reverse is may well _not_ be empty. + pub(crate) fn can_reorder_around(&self, limit: &Limit) -> bool { + match &self.expr { + FilterExpr::Simple(SimpleFilter { ident, cmp, .. }) => { + if ident.as_str() != special_idents::TIMESTAMP { + return true; + } + let is_compatible = match limit.kind { + LimitKind::First => { + matches!(cmp, Comparison::Lt | Comparison::Le) + } + LimitKind::Last => { + matches!(cmp, Comparison::Gt | Comparison::Ge) + } + }; + self.negated ^ is_compatible + } + FilterExpr::Compound(CompoundFilter { left, right, .. }) => { + let left = left.can_reorder_around(limit); + let right = right.can_reorder_around(limit); + self.negated ^ (left && right) + } + } + } +} + +/// Return the names of the implicit fields / columns that a filter can apply +/// to, based on the metric types of the contained data points. +fn implicit_field_names( + first_timeseries: &Timeseries, +) -> BTreeSet<&'static str> { + let mut out = BTreeSet::new(); + + // Everything has a timestamp! + out.insert(special_idents::TIMESTAMP); + let type_info = first_timeseries + .points + .metric_types() + .zip(first_timeseries.points.data_types()); + for (metric_type, data_type) in type_info { + match (metric_type, data_type) { + // Scalar gauges. + ( + MetricType::Gauge, + DataType::Integer + | DataType::Boolean + | DataType::Double + | DataType::String, + ) => { + out.insert(special_idents::DATUM); + } + // Histogram gauges. + ( + MetricType::Gauge, + DataType::IntegerDistribution | DataType::DoubleDistribution, + ) => { + out.insert(special_idents::BINS); + out.insert(special_idents::COUNTS); + } + // Scalars, either delta or cumulatives. + ( + MetricType::Delta | MetricType::Cumulative, + DataType::Integer | DataType::Double, + ) => { + out.insert(special_idents::DATUM); + out.insert(special_idents::START_TIME); + } + // Histograms, either delta or cumulative. + ( + MetricType::Delta | MetricType::Cumulative, + DataType::IntegerDistribution | DataType::DoubleDistribution, + ) => { + out.insert(special_idents::BINS); + out.insert(special_idents::COUNTS); + out.insert(special_idents::START_TIME); + } + // Impossible combinations + ( + MetricType::Delta | MetricType::Cumulative, + DataType::Boolean | DataType::String, + ) => unreachable!(), + } + } + out +} + +/// A filtering expression, used in the `filter` table operation. +#[derive(Clone, Debug, PartialEq)] +pub enum FilterExpr { + /// A single logical expression, e.g., `foo == "bar"`. + Simple(SimpleFilter), + /// Two logical expressions, e.g., `foo == "bar" || yes == false` + Compound(CompoundFilter), +} + +impl FilterExpr { + fn to_filter(&self) -> Filter { + Filter { negated: false, expr: self.clone() } + } + + fn simplify_to_dnf(&self, level: usize) -> Result { + match self { + FilterExpr::Simple(_) => Ok(self.to_filter()), + FilterExpr::Compound(CompoundFilter { left, op, right }) => { + // Apply recursively first. + let left = left.simplify_to_dnf_impl(level + 1)?; + let right = right.simplify_to_dnf_impl(level + 1)?; + + // This matches the rule: + // + // (x || y) && z -> (x && z) || (y && z) + if let ( + FilterExpr::Compound(CompoundFilter { + left: x, + op: LogicalOp::Or, + right: y, + }), + LogicalOp::And, + FilterExpr::Simple(z), + ) = (&left.expr, op, &right.expr) + { + let left_ = Filter { + negated: false, + expr: FilterExpr::Compound(CompoundFilter { + left: x.clone(), + op: LogicalOp::And, + right: Box::new(z.to_filter()), + }), + }; + let right_ = Filter { + negated: false, + expr: FilterExpr::Compound(CompoundFilter { + left: y.clone(), + op: LogicalOp::And, + right: Box::new(z.to_filter()), + }), + }; + return Ok(Filter { + negated: false, + expr: FilterExpr::Compound(CompoundFilter { + left: Box::new(left_), + op: LogicalOp::Or, + right: Box::new(right_), + }), + }); + } + + // This matches the rule: + // + // z && (x || y) -> (z && x) || (z && y) + if let ( + FilterExpr::Simple(z), + LogicalOp::And, + FilterExpr::Compound(CompoundFilter { + left: x, + op: LogicalOp::Or, + right: y, + }), + ) = (&left.expr, op, &right.expr) + { + let left_ = Filter { + negated: false, + expr: FilterExpr::Compound(CompoundFilter { + left: Box::new(z.to_filter()), + op: LogicalOp::And, + right: x.clone(), + }), + }; + let right_ = Filter { + negated: false, + expr: FilterExpr::Compound(CompoundFilter { + left: Box::new(z.to_filter()), + op: LogicalOp::And, + right: y.clone(), + }), + }; + return Ok(Filter { + negated: false, + expr: FilterExpr::Compound(CompoundFilter { + left: Box::new(left_), + op: LogicalOp::Or, + right: Box::new(right_), + }), + }); + } + + // Lastly, simplify an XOR to its logical equivalent, which is + // in DNF. + let out = Filter { + negated: false, + expr: FilterExpr::Compound(CompoundFilter { + left: Box::new(left), + op: *op, + right: Box::new(right), + }), + }; + Ok(out.rewrite_xor_to_disjunction()) + } + } + } +} + +impl fmt::Display for FilterExpr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + FilterExpr::Simple(inner) => write!(f, "{inner}"), + FilterExpr::Compound(inner) => write!(f, "{inner}"), + } + } +} + +/// Two filter expressions joined by a logical operator. +#[derive(Clone, Debug, PartialEq)] +pub struct CompoundFilter { + /// The left subexpression. + pub left: Box, + /// The logical operator joining the two expressions. + pub op: LogicalOp, + /// The right subexpression. + pub right: Box, +} + +impl fmt::Display for CompoundFilter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{} {} {}", self.left, self.op, self.right,) + } +} + +impl CompoundFilter { + fn to_filter(&self) -> Filter { + Filter { negated: false, expr: FilterExpr::Compound(self.clone()) } + } + + // Apply the filter to the provided field. + fn filter_field( + &self, + name: &str, + value: &FieldValue, + ) -> Result, Error> { + let left = self.left.filter_field(name, value)?; + let right = self.right.filter_field(name, value)?; + match (left, right) { + (None, None) => Ok(None), + (Some(x), None) | (None, Some(x)) => Ok(Some(x)), + (Some(left), Some(right)) => match self.op { + LogicalOp::And => Ok(Some(left && right)), + LogicalOp::Or => Ok(Some(left || right)), + LogicalOp::Xor => Ok(Some(left ^ right)), + }, + } + } + + // Apply the filter to the provided points. + fn filter_points( + &self, + negated: bool, + points: &Points, + ) -> Result, Error> { + let mut left = self.left.filter_points_inner(points)?; + let right = self.right.filter_points_inner(points)?; + match self.op { + LogicalOp::And => { + for i in 0..left.len() { + left[i] = negated ^ (left[i] & right[i]); + } + } + LogicalOp::Or => { + for i in 0..left.len() { + left[i] = negated ^ (left[i] | right[i]); + } + } + LogicalOp::Xor => { + for i in 0..left.len() { + left[i] = negated ^ (left[i] ^ right[i]); + } + } + } + Ok(left) + } + + fn last_timestamp(&self) -> Option> { + let left = self.left.last_timestamp(); + let right = self.right.last_timestamp(); + match (left, right) { + (None, None) => None, + (Some(single), None) | (None, Some(single)) => Some(single), + (Some(left), Some(right)) => Some(left.max(right)), + } + } +} + +/// A simple filter expression, comparing an identifier to a value. +#[derive(Clone, Debug, PartialEq)] +pub struct SimpleFilter { + /// The identifier being compared. + pub ident: Ident, + /// The comparison operator. + pub cmp: Comparison, + /// The value to compare the identifier against. + pub value: Literal, +} + +impl fmt::Display for SimpleFilter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{} {} {}", self.ident, self.cmp, self.value,) + } +} + +impl SimpleFilter { + fn to_filter(&self) -> Filter { + Filter { negated: false, expr: FilterExpr::Simple(self.clone()) } + } + + // Apply this filter to the provided field. + // + // If the field name does not match the identifier in `self`, return + // `Ok(None)`, since this filter does not apply to the provided field. + // + // If the name matches and the type of `self` is compatible, return `Ok(x)` + // where `x` is the logical application of the filter to the field. + // + // If the field matches the name, but the type is not compatible, return an + // error. + fn filter_field( + &self, + name: &str, + value: &FieldValue, + ) -> Result, Error> { + // If the name does not match, this filter does not apply, and so we do not + // filter the field. + if self.ident.as_str() != name { + return Ok(None); + } + self.value.compare_field(value, self.cmp) + } + + pub(crate) fn value_type_is_compatible_with_field( + &self, + field_type: FieldType, + ) -> bool { + self.value.is_compatible_with_field(field_type) + } + + /// Return the expression as a string that can be applied safely in the + /// database. + pub(crate) fn as_db_safe_string(&self) -> String { + let expr = self.value.as_db_safe_string(); + let fn_name = self.cmp.as_db_function_name(); + format!("{}({}, {})", fn_name, self.ident, expr) + } + + // Returns an array of bools, where true indicates the point should be kept. + fn filter_points( + &self, + negated: bool, + points: &Points, + ) -> Result, Error> { + let ident = self.ident.as_str(); + if ident == "timestamp" { + self.filter_points_by_timestamp(negated, &points.timestamps) + } else if ident == "datum" { + anyhow::ensure!( + points.dimensionality() == 1, + "Filtering multidimensional values by datum is not yet supported" + ); + self.filter_points_by_datum(negated, points.values(0).unwrap()) + } else { + Ok(vec![!negated; points.len()]) + } + } + + fn filter_points_by_timestamp( + &self, + negated: bool, + timestamps: &[DateTime], + ) -> Result, Error> { + let Literal::Timestamp(timestamp) = &self.value else { + anyhow::bail!( + "Cannot compare non-timestamp filter against a timestamp" + ); + }; + match self.cmp { + Comparison::Eq => Ok(timestamps + .iter() + .map(|t| negated ^ (t == timestamp)) + .collect()), + Comparison::Ne => Ok(timestamps + .iter() + .map(|t| negated ^ (t != timestamp)) + .collect()), + Comparison::Gt => Ok(timestamps + .iter() + .map(|t| negated ^ (t > timestamp)) + .collect()), + Comparison::Ge => Ok(timestamps + .iter() + .map(|t| negated ^ (t >= timestamp)) + .collect()), + Comparison::Lt => Ok(timestamps + .iter() + .map(|t| negated ^ (t < timestamp)) + .collect()), + Comparison::Le => Ok(timestamps + .iter() + .map(|t| negated ^ (t <= timestamp)) + .collect()), + Comparison::Like => unreachable!(), + } + } + + fn filter_points_by_datum( + &self, + negated: bool, + values: &ValueArray, + ) -> Result, Error> { + match (&self.value, values) { + (Literal::Integer(int), ValueArray::Integer(ints)) => { + match self.cmp { + Comparison::Eq => Ok(ints + .iter() + .map(|maybe_int| { + maybe_int + .map(|i| negated ^ (i128::from(i) == *int)) + .unwrap_or(false) + }) + .collect()), + Comparison::Ne => Ok(ints + .iter() + .map(|maybe_int| { + maybe_int + .map(|i| negated ^ (i128::from(i) != *int)) + .unwrap_or(false) + }) + .collect()), + Comparison::Gt => Ok(ints + .iter() + .map(|maybe_int| { + maybe_int + .map(|i| negated ^ (i128::from(i) > *int)) + .unwrap_or(false) + }) + .collect()), + Comparison::Ge => Ok(ints + .iter() + .map(|maybe_int| { + maybe_int + .map(|i| negated ^ (i128::from(i) >= *int)) + .unwrap_or(false) + }) + .collect()), + Comparison::Lt => Ok(ints + .iter() + .map(|maybe_int| { + maybe_int + .map(|i| negated ^ (i128::from(i) < *int)) + .unwrap_or(false) + }) + .collect()), + Comparison::Le => Ok(ints + .iter() + .map(|maybe_int| { + maybe_int + .map(|i| negated ^ (i128::from(i) <= *int)) + .unwrap_or(false) + }) + .collect()), + Comparison::Like => unreachable!(), + } + } + (Literal::Double(double), ValueArray::Double(doubles)) => { + match self.cmp { + Comparison::Eq => Ok(doubles + .iter() + .map(|maybe_double| { + maybe_double + .map(|d| negated ^ (d == *double)) + .unwrap_or(false) + }) + .collect()), + Comparison::Ne => Ok(doubles + .iter() + .map(|maybe_double| { + maybe_double + .map(|d| negated ^ (d != *double)) + .unwrap_or(false) + }) + .collect()), + Comparison::Gt => Ok(doubles + .iter() + .map(|maybe_double| { + maybe_double + .map(|d| negated ^ (d > *double)) + .unwrap_or(false) + }) + .collect()), + Comparison::Ge => Ok(doubles + .iter() + .map(|maybe_double| { + maybe_double + .map(|d| negated ^ (d >= *double)) + .unwrap_or(false) + }) + .collect()), + Comparison::Lt => Ok(doubles + .iter() + .map(|maybe_double| { + maybe_double + .map(|d| negated ^ (d < *double)) + .unwrap_or(false) + }) + .collect()), + Comparison::Le => Ok(doubles + .iter() + .map(|maybe_double| { + maybe_double + .map(|d| negated ^ (d <= *double)) + .unwrap_or(false) + }) + .collect()), + Comparison::Like => unreachable!(), + } + } + (Literal::String(string), ValueArray::String(strings)) => { + let string = string.as_str(); + match self.cmp { + Comparison::Eq => Ok(strings + .iter() + .map(|maybe_string| { + maybe_string + .as_deref() + .map(|s| negated ^ (s == string)) + .unwrap_or(false) + }) + .collect()), + Comparison::Ne => Ok(strings + .iter() + .map(|maybe_string| { + maybe_string + .as_deref() + .map(|s| negated ^ (s != string)) + .unwrap_or(false) + }) + .collect()), + Comparison::Gt => Ok(strings + .iter() + .map(|maybe_string| { + maybe_string + .as_deref() + .map(|s| negated ^ (s > string)) + .unwrap_or(false) + }) + .collect()), + Comparison::Ge => Ok(strings + .iter() + .map(|maybe_string| { + maybe_string + .as_deref() + .map(|s| negated ^ (s >= string)) + .unwrap_or(false) + }) + .collect()), + Comparison::Lt => Ok(strings + .iter() + .map(|maybe_string| { + maybe_string + .as_deref() + .map(|s| negated ^ (s < string)) + .unwrap_or(false) + }) + .collect()), + Comparison::Le => Ok(strings + .iter() + .map(|maybe_string| { + maybe_string + .as_deref() + .map(|s| negated ^ (s <= string)) + .unwrap_or(false) + }) + .collect()), + Comparison::Like => { + let re = Regex::new(string)?; + Ok(strings + .iter() + .map(|maybe_string| { + maybe_string + .as_deref() + .map(|s| negated ^ re.is_match(s)) + .unwrap_or(false) + }) + .collect()) + } + } + } + (Literal::Boolean(boolean), ValueArray::Boolean(booleans)) => { + match self.cmp { + Comparison::Eq => Ok(booleans + .iter() + .map(|maybe_boolean| { + maybe_boolean + .map(|b| negated ^ (b == *boolean)) + .unwrap_or(false) + }) + .collect()), + Comparison::Ne => Ok(booleans + .iter() + .map(|maybe_boolean| { + maybe_boolean + .map(|b| negated ^ (b != *boolean)) + .unwrap_or(false) + }) + .collect()), + Comparison::Gt => Ok(booleans + .iter() + .map(|maybe_boolean| { + maybe_boolean + .map(|b| negated ^ (b & !(*boolean))) + .unwrap_or(false) + }) + .collect()), + Comparison::Ge => Ok(booleans + .iter() + .map(|maybe_boolean| { + maybe_boolean + .map(|b| negated ^ (b >= *boolean)) + .unwrap_or(false) + }) + .collect()), + Comparison::Lt => Ok(booleans + .iter() + .map(|maybe_boolean| { + maybe_boolean + .map(|b| negated ^ (!b & *boolean)) + .unwrap_or(false) + }) + .collect()), + Comparison::Le => Ok(booleans + .iter() + .map(|maybe_boolean| { + maybe_boolean + .map(|b| negated ^ (b <= *boolean)) + .unwrap_or(false) + }) + .collect()), + Comparison::Like => unreachable!(), + } + } + (_, _) => { + let lit_type = match &self.value { + Literal::Uuid(_) => "UUID", + Literal::Duration(_) => "duration", + Literal::Timestamp(_) => "timestamp", + Literal::IpAddr(_) => "IP address", + Literal::Integer(_) => "integer", + Literal::Double(_) => "double", + Literal::String(_) => "string", + Literal::Boolean(_) => "boolean", + }; + anyhow::bail!( + "Cannot compare {} literal against values of type {}", + lit_type, + values.data_type(), + ) + } + } + } + + fn last_timestamp(&self) -> Option> { + if self.ident.as_str() == "timestamp" + && matches!( + self.cmp, + Comparison::Lt | Comparison::Le | Comparison::Eq + ) + { + let Literal::Timestamp(t) = self.value else { + return None; + }; + Some(t) + } else { + None + } + } +} + +#[cfg(test)] +mod tests { + use crate::oxql::ast::grammar::query_parser; + use crate::oxql::ast::logical_op::LogicalOp; + use crate::oxql::point::DataType; + use crate::oxql::point::MetricType; + use crate::oxql::point::Points; + use crate::oxql::point::ValueArray; + use crate::oxql::point::Values; + use crate::oxql::Table; + use crate::oxql::Timeseries; + use chrono::Utc; + use oximeter::FieldValue; + use std::time::Duration; + use uuid::Uuid; + + #[test] + fn test_atom_filter_double_points() { + let start_times = None; + let timestamps = + vec![Utc::now(), Utc::now() + Duration::from_secs(1000)]; + let values = vec![Values { + values: ValueArray::Double(vec![Some(0.0), Some(2.0)]), + metric_type: MetricType::Gauge, + }]; + let points = Points { start_times, timestamps, values }; + + // This filter should remove the first point based on its timestamp. + let t = Utc::now() + Duration::from_secs(10); + let q = + format!("filter timestamp > @{}", t.format("%Y-%m-%dT%H:%M:%S")); + let filter = query_parser::filter(q.as_str()).unwrap(); + let out = filter.filter_points(&points).unwrap(); + assert!(out.len() == 1); + assert_eq!( + out.values(0).unwrap().as_double().unwrap()[0], + points.values(0).unwrap().as_double().unwrap()[1], + ); + + // And this one the second point based on the datum + let filter = query_parser::filter("filter datum < 1.0").unwrap(); + let out = filter.filter_points(&points).unwrap(); + assert!(out.len() == 1); + assert_eq!( + out.values(0).unwrap().as_double().unwrap()[0], + points.values(0).unwrap().as_double().unwrap()[0], + ); + } + + #[test] + fn test_atom_filter_points_wrong_type() { + let start_times = None; + let timestamps = + vec![Utc::now(), Utc::now() + Duration::from_secs(1000)]; + let values = vec![Values { + values: ValueArray::Double(vec![Some(0.0), Some(2.0)]), + metric_type: MetricType::Gauge, + }]; + let points = Points { start_times, timestamps, values }; + + let filter = + query_parser::filter("filter datum < \"something\"").unwrap(); + assert!(filter.filter_points(&points).is_err()); + } + + #[test] + fn test_all_ident_names() { + let f = query_parser::filter("filter timestamp > @now() && datum < 1") + .unwrap(); + assert_eq!( + f.ident_names(), + ["datum", "timestamp"].into_iter().collect() + ); + + let f = query_parser::filter( + "filter timestamp > @now() - 1m && timestamp < @now()", + ) + .unwrap(); + let idents = f.ident_names(); + assert_eq!(idents.len(), 1); + assert_eq!(idents.iter().next().unwrap(), &"timestamp"); + } + + #[test] + #[allow(clippy::impossible_comparisons)] + fn test_filter_field_logic() { + for op in [LogicalOp::And, LogicalOp::Or, LogicalOp::Xor] { + let s = format!("filter (x > 10) {op} (x < 0)"); + let filter = query_parser::filter(&s).unwrap(); + let cases = &[11, 10, 5, 0, -1]; + for &val in cases.iter() { + let pass = match op { + LogicalOp::And => (val > 10) && (val < 0), + LogicalOp::Or => (val > 10) || (val < 0), + LogicalOp::Xor => (val > 10) ^ (val < 0), + }; + let result = filter + .filter_field("x", &FieldValue::I32(val)) + .expect("Filter should be considered comparable") + .expect("Filter should apply to field of the same name"); + assert_eq!( + result, + pass, + "Filter '{}' should {} the value {}", + filter, + if pass { "pass" } else { "not pass" }, + val, + ); + } + + // This names a different field, so should not apply. + assert_eq!( + filter + .filter_field("y", &FieldValue::I32(11)) + .expect("Filter should be considered comparable"), + None, + "Filter should not apply, since it names a different field" + ); + + // These values should not be comparable at all, so we'll return an + // error. + let incomparable = &[ + FieldValue::String("foo".into()), + FieldValue::Uuid(Uuid::new_v4()), + FieldValue::IpAddr("127.0.0.1".parse().unwrap()), + FieldValue::Bool(false), + ]; + for na in incomparable.iter() { + filter + .filter_field("x", na) + .expect_err("These should not be comparable at all"); + } + } + } + + #[test] + fn test_simplify_to_dnf() { + let cases = &[ + // Simple cases that should not be changed + ("a == 0", "a == 0"), + ("!(a == 0)", "!(a == 0)"), + ("a == 0 || b == 1", "a == 0 || b == 1"), + ("a == 0 && b == 1", "a == 0 && b == 1"), + + // Rewrite of XOR + ("a == 0 ^ b == 1", "(a == 0 && !(b == 1)) || (!(a == 0) && (b == 1))"), + + // Simple applications of distribution rules. + // + // Distribute conjunction over disjunction. + ("a == 0 && (b == 1 || c == 2)", "(a == 0 && b == 1) || (a == 0 && c == 2)"), + ("a == 0 && (b == 1 || c == 2 || d == 3)", "(a == 0 && b == 1) || (a == 0 && c == 2) || (a == 0 && d == 3)"), + ("a == 0 && (b == 1 || c == 2 || d == 3 || e == 4)", "(a == 0 && b == 1) || (a == 0 && c == 2) || (a == 0 && d == 3) || (a == 0 && e == 4)"), + ]; + for (input, expected) in cases.iter() { + let parsed_input = query_parser::filter_expr(input).unwrap(); + let simplified = parsed_input.simplify_to_dnf().unwrap(); + let parsed_expected = query_parser::filter_expr(expected).unwrap(); + assert_eq!( + simplified, + parsed_expected, + "\ninput expression: {}\nparsed to: {}\nsimplifed to: {}\nexpected: {}\n", + input, + parsed_input, + simplified, + expected, + ); + } + } + + #[test] + fn test_dnf_conversion_fails_on_extremely_long_expressions() { + let atom = "a == 0"; + let or_chain = std::iter::repeat(atom) + .take(super::EXPR_COMPLEXITY_ITERATIVE_LIMIT + 1) + .collect::>() + .join(" || "); + let expr = format!("{atom} && ({or_chain})"); + let parsed = query_parser::filter_expr(&expr).unwrap(); + assert!( + parsed.simplify_to_dnf().is_err(), + "Should fail for extremely long logical expressions" + ); + } + + #[test] + fn test_dnf_conversion_fails_on_extremely_deep_expressions() { + let atom = "a == 0"; + let mut expr = atom.to_string(); + for _ in 0..super::EXPR_COMPLEXITY_RECURSIVE_LIMIT + 1 { + expr = format!("{atom} && ({expr})"); + } + let parsed = query_parser::filter_expr(&expr).unwrap(); + assert!( + parsed.simplify_to_dnf().is_err(), + "Should fail for extremely deep logical expressions" + ); + } + + #[test] + fn test_filter_empty_timeseries() { + let ts = Timeseries::new( + std::iter::once((String::from("foo"), FieldValue::U8(0))), + DataType::Double, + MetricType::Gauge, + ) + .unwrap(); + let table = Table::from_timeseries("foo", std::iter::once(ts)).unwrap(); + let filt = query_parser::filter_expr("timestamp > @now()").unwrap(); + assert!( + filt.apply(&[table]).is_ok(), + "It's not an error to filter an empty table" + ); + } +} diff --git a/oximeter/db/src/oxql/ast/table_ops/get.rs b/oximeter/db/src/oxql/ast/table_ops/get.rs new file mode 100644 index 0000000000..f0ef22c2f6 --- /dev/null +++ b/oximeter/db/src/oxql/ast/table_ops/get.rs @@ -0,0 +1,15 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! AST node for the `get` table operation. + +// Copyright 2024 Oxide Computer Company + +use oximeter::TimeseriesName; + +/// An AST node like: `get foo:bar` +#[derive(Clone, Debug, PartialEq)] +pub struct Get { + pub timeseries_name: TimeseriesName, +} diff --git a/oximeter/db/src/oxql/ast/table_ops/group_by.rs b/oximeter/db/src/oxql/ast/table_ops/group_by.rs new file mode 100644 index 0000000000..f40572d762 --- /dev/null +++ b/oximeter/db/src/oxql/ast/table_ops/group_by.rs @@ -0,0 +1,746 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! AST node for the `group_by` operation. + +// Copyright 2024 Oxide Computer Company + +use chrono::DateTime; +use chrono::Utc; + +use crate::oxql::ast::ident::Ident; +use crate::oxql::point::DataType; +use crate::oxql::point::MetricType; +use crate::oxql::point::ValueArray; +use crate::oxql::Error; +use crate::oxql::Table; +use crate::oxql::Timeseries; +use crate::TimeseriesKey; +use std::collections::btree_map::Entry; +use std::collections::BTreeMap; + +/// A table operation for grouping data by fields, apply a reducer to the +/// remaining. +#[derive(Clone, Debug, PartialEq)] +pub struct GroupBy { + pub identifiers: Vec, + pub reducer: Reducer, +} + +impl GroupBy { + // Apply the group_by table operation. + pub(crate) fn apply(&self, tables: &[Table]) -> Result, Error> { + anyhow::ensure!( + tables.len() == 1, + "Group by operations require exactly one table", + ); + let table = &tables[0]; + anyhow::ensure!( + table.is_aligned(), + "Input tables to a `group_by` must be aligned" + ); + + match self.reducer { + Reducer::Mean => self.reduce_mean(table), + Reducer::Sum => self.reduce_sum(table), + } + } + + fn check_input_timeseries(input: &Timeseries) -> Result<(), Error> { + anyhow::ensure!(!input.points.is_empty(), "Timeseries cannot be empty"); + + // For now, we can only apply this to 1-D timeseries. + anyhow::ensure!( + input.points.dimensionality() == 1, + "Group-by with multi-dimensional timeseries is not yet supported" + ); + let data_type = input.points.data_types().next().unwrap(); + anyhow::ensure!( + data_type.is_numeric(), + "Only numeric data types can be grouped, not {}", + data_type, + ); + let metric_type = input.points.metric_types().next().unwrap(); + anyhow::ensure!( + !matches!(metric_type, MetricType::Cumulative), + "Cumulative metric types cannot be grouped", + ); + Ok(()) + } + + // Reduce points in each group by summing. + fn reduce_sum(&self, table: &Table) -> Result, Error> { + assert_eq!(self.reducer, Reducer::Sum); + let mut output_table = Table::new(table.name()); + let kept_fields: Vec<_> = + self.identifiers.iter().map(Ident::as_str).collect(); + + for input in table.iter() { + Self::check_input_timeseries(input)?; + + // Throw away the fields in this timeseries that are not in the + // group_by list. + let dropped = input.copy_with_fields(&kept_fields)?; + let key = dropped.key(); + + // Fetch the existing timeseries, if one exists. If one does _not_ exist, + // we'll insert it as is, without converting. That's because we're + // just summing, not averaging. + match output_table.get_mut(key) { + Some(existing) => { + // No casting is done here, we're simply adding T + + // T -> T. + let new_values = dropped.points.values(0).unwrap(); + let existing_values = existing.points.values(0).unwrap(); + match (new_values, existing_values) { + ( + ValueArray::Double(new_values), + ValueArray::Double(existing_values), + ) => { + let new_timestamps = &dropped.points.timestamps; + + // We will be merging the new data with the + // existing, but borrow-checking limits the degree + // to which we can easily do this on the `existing` + // entry in the output table. Instead, aggregate + // everything into a copy of the expected data. + let mut timestamps = + existing.points.timestamps.clone(); + let mut values = existing_values.clone(); + + // Merge in the new values, so long as they actually + // exist. That is, we can just skip missing points + // in this round, since they do not contribute to + // the reduced value. + for (new_timestamp, new_value) in new_timestamps + .iter() + .zip(new_values) + .filter_map(|(timestamp, value)| { + if let Some(val) = value { + Some((*timestamp, *val)) + } else { + None + } + }) + { + // We're really doing binary search, on both the + // sample count map and the data array. They + // both must exist, or both not, or we've done + // our accounting incorrectly. + let maybe_index = + timestamps.binary_search(&new_timestamp); + match maybe_index { + Err(insert_at) => { + // This is a new timestamp. Insert it + // into the output timeseries. + timestamps + .insert(insert_at, new_timestamp); + values + .insert(insert_at, Some(new_value)); + } + Ok(ix) => { + // This is an existing + // timestamp, so we only need to + // add the new value. If the value + // didn't exist before, replace it. + *values[ix].get_or_insert(0.0) += + new_value; + } + } + } + + // Replace the existing output timeseries's + // timestamps and data arrays. + std::mem::swap( + &mut existing.points.timestamps, + &mut timestamps, + ); + existing + .points + .values_mut(0) + .unwrap() + .swap(ValueArray::Double(values)); + } + ( + ValueArray::Integer(new_values), + ValueArray::Integer(existing_values), + ) => { + let new_timestamps = &dropped.points.timestamps; + + // We will be merging the new data with the + // existing, but borrow-checking limits the degree + // to which we can easily do this on the `existing` + // entry in the output table. Instead, aggregate + // everything into a copy of the expected data. + let mut timestamps = + existing.points.timestamps.clone(); + let mut values = existing_values.clone(); + + // Merge in the new values, so long as they actually + // exist. That is, we can just skip missing points + // in this round, since they do not contribute to + // the reduced value. + for (new_timestamp, new_value) in new_timestamps + .iter() + .zip(new_values) + .filter_map(|(timestamp, value)| { + if let Some(val) = value { + Some((*timestamp, *val)) + } else { + None + } + }) + { + // We're really doing binary search, on both the + // sample count map and the data array. They + // both must exist, or both not, or we've done + // our accounting incorrectly. + let maybe_index = + timestamps.binary_search(&new_timestamp); + match maybe_index { + Err(insert_at) => { + // This is a new timestamp. Insert it + // into the output timeseries. + timestamps + .insert(insert_at, new_timestamp); + values + .insert(insert_at, Some(new_value)); + } + Ok(ix) => { + // This is an existing + // timestamp, so we only need to + // add the new value. If the value + // didn't exist before, replace it. + *values[ix].get_or_insert(0) += + new_value; + } + } + } + + // Replace the existing output timeseries's + // timestamps and data arrays. + std::mem::swap( + &mut existing.points.timestamps, + &mut timestamps, + ); + existing + .points + .values_mut(0) + .unwrap() + .swap(ValueArray::Integer(values)); + } + _ => unreachable!(), + } + } + None => output_table.insert(dropped)?, + } + } + Ok(vec![output_table]) + } + + // Reduce points in each group by averaging. + fn reduce_mean(&self, table: &Table) -> Result, Error> { + assert_eq!(self.reducer, Reducer::Mean); + let mut output_table = Table::new(table.name()); + let kept_fields: Vec<_> = + self.identifiers.iter().map(Ident::as_str).collect(); + + // Keep track of the number of values at each output timestamp, within + // each group. + // + // As we iterate through timeseries, we reduce in-group points, so long + // as they occur at the same timestamp. And while timeseries must all be + // aligned the same way, they need not actually have identical + // timestamps. So what we're producing on the output is data at the + // union of all the input timestamps. + // + // These arrays keeps the count of values at each time, and may be either + // expanded or have its values incremented. Note that they're all + // doubles because we will be reducing at the end by dividing the sum at + // each point by the counts. + let mut sample_counts_by_group: BTreeMap< + TimeseriesKey, + BTreeMap, f64>, + > = BTreeMap::new(); + + for input in table.iter() { + Self::check_input_timeseries(input)?; + + // Throw away the fields in this timeseries that are not in the + // group_by list. + let dropped = input.copy_with_fields(&kept_fields)?; + let key = dropped.key(); + + // Fetch the existing timeseries, if one exists. If one does _not_ exist, + // we'll insert the table with the data type converted to a double, + // since we're always averaging. + match output_table.get_mut(key) { + Some(existing) => { + // Cast the new points to doubles, since we'll be + // aggregating. + let new_points = + dropped.points.cast(&[DataType::Double])?; + let ValueArray::Double(new_values) = + new_points.values(0).unwrap() + else { + unreachable!(); + }; + let new_timestamps = &new_points.timestamps; + + // We will be merging the new data with the + // existing, but borrow-checking limits the degree + // to which we can easily do this on the `existing` + // entry in the output table. Instead, aggregate + // everything into a copy of the expected data. + let mut timestamps = existing.points.timestamps.clone(); + let mut values = existing + .points + .values(0) + .unwrap() + .as_double() + .unwrap() + .clone(); + + // Also fetch a reference to the existing counts by + // timestamp for this group. This should exist. + let counts = sample_counts_by_group.get_mut(&key).expect( + "Should already have some sample counts for this group", + ); + + // Merge in the new values, so long as they actually + // exist. That is, we can just skip missing points + // in this round, since they do not contribute to + // the reduced value. + for (new_timestamp, new_value) in new_timestamps + .iter() + .zip(new_values) + .filter_map(|(timestamp, value)| { + if let Some(val) = value { + Some((*timestamp, *val)) + } else { + None + } + }) + { + // We're really doing binary search, on both the + // sample count map and the data array. They + // both must exist, or both not, or we've done + // our accounting incorrectly. + let maybe_index = + timestamps.binary_search(&new_timestamp); + let count = counts.entry(new_timestamp); + match (count, maybe_index) { + (Entry::Vacant(entry), Err(insert_at)) => { + // This is a new timestamp. Insert it + // into the output timeseries, and count + // it. + timestamps.insert(insert_at, new_timestamp); + values.insert(insert_at, Some(new_value)); + entry.insert(1.0); + } + (Entry::Occupied(mut entry), Ok(ix)) => { + // This is an existing timestamp. _Add_ + // it into the output timeseries, and + // count it. Its timestamp already + // exists. If the value was previously None, + // replace it now. + *values[ix].get_or_insert(0.0) += new_value; + *entry.get_mut() += 1.0; + } + (_, _) => { + panic!( + "In-group counts and output \ + values must both exist or \ + both be missing" + ); + } + } + } + + // Replace the existing output timeseries's + // timestamps and data arrays. + std::mem::swap( + &mut existing.points.timestamps, + &mut timestamps, + ); + existing + .points + .values_mut(0) + .unwrap() + .swap(ValueArray::Double(values)); + } + None => { + // There were no previous points for this group. + // + // We'll cast to doubles, but _keep_ any missing samples + // (None) that were in there. Those will have a "count" of + // 0, so that we don't incorrectly over-divide in the case + // where there are both missing and non-missing samples. + let new_timeseries = dropped.cast(&[DataType::Double])?; + let values = new_timeseries + .points + .values(0) + .unwrap() + .as_double() + .unwrap(); + // Insert a count of 1.0 for each timestamp remaining, and + // _zero_ for any where the values are none. + let counts = new_timeseries + .points + .timestamps + .iter() + .zip(values) + .map(|(timestamp, maybe_value)| { + let count = f64::from(maybe_value.is_some()); + (*timestamp, count) + }) + .collect(); + let old = sample_counts_by_group.insert(key, counts); + assert!(old.is_none(), "Should not have counts entry for first timeseries in the group"); + output_table.insert(new_timeseries)?; + } + } + } + + // Since we're computing the mean, we need to divide each output value + // by the number of values that went into it. + for each in output_table.iter_mut() { + let counts = sample_counts_by_group + .get(&each.key()) + .expect("key should have been inserted earlier"); + let ValueArray::Double(values) = each.points.values_mut(0).unwrap() + else { + unreachable!(); + }; + for (val, count) in values.iter_mut().zip(counts.values()) { + if let Some(x) = val.as_mut() { + *x /= *count; + } + } + } + Ok(vec![output_table]) + } +} + +/// A reduction operation applied to unnamed columns during a group by. +#[derive(Clone, Copy, Debug, Default, PartialEq)] +pub enum Reducer { + #[default] + Mean, + Sum, +} + +#[cfg(test)] +mod tests { + use super::{GroupBy, Reducer}; + use crate::oxql::{ + ast::{ + ident::Ident, + table_ops::align::{Align, AlignmentMethod}, + }, + point::{DataType, MetricType, ValueArray}, + Table, Timeseries, + }; + use chrono::{DateTime, Utc}; + use oximeter::FieldValue; + use std::{collections::BTreeMap, time::Duration}; + + // Which timeseries the second data point is missing from. + #[derive(Clone, Copy, Debug)] + enum MissingValue { + Neither, + First, + Both, + } + + #[derive(Clone, Copy, Debug)] + struct TestConfig { + missing_value: MissingValue, + overlapping_times: bool, + reducer: Reducer, + } + + #[derive(Clone, Debug)] + #[allow(dead_code)] + struct TestTable { + aligned_table: Table, + grouped_table: Table, + query_end: DateTime, + timestamps: Vec>, + } + + impl TestTable { + fn new(cfg: TestConfig) -> Self { + let query_end = Utc::now(); + let mut timestamps = vec![ + query_end - Duration::from_secs(2), + query_end - Duration::from_secs(1), + query_end, + ]; + + // Create the first timeseries. + // + // This has two fields, one of which we'll group by. There are three + // timepoints of double values. + let mut fields = BTreeMap::new(); + fields.insert("int".to_string(), FieldValue::U8(0)); + fields.insert( + "name".to_string(), + FieldValue::String("whodat".into()), + ); + let mut ts0 = Timeseries::new( + fields.into_iter(), + DataType::Double, + MetricType::Gauge, + ) + .unwrap(); + ts0.points.start_times = None; + ts0.points.timestamps.clone_from(×tamps); + *ts0.points.values_mut(0).unwrap() = ValueArray::Double(vec![ + Some(1.0), + if matches!( + cfg.missing_value, + MissingValue::First | MissingValue::Both + ) { + None + } else { + Some(2.0) + }, + Some(3.0), + ]); + + // Create the second timeseries. + // + // This is nearly the same, and shares the same field value for the + // "int" field. When we group, we should reduce these two timeseries + // together. + let mut fields = BTreeMap::new(); + fields.insert("int".to_string(), FieldValue::U8(0)); + fields.insert( + "name".to_string(), + FieldValue::String("whodis".into()), + ); + let mut ts1 = Timeseries::new( + fields.into_iter(), + DataType::Double, + MetricType::Gauge, + ) + .unwrap(); + ts1.points.start_times = None; + + // Non-overlapping in this test setup means that we just shift one + // value from this array backward in time by one additional second. + // So we should have timestamps like: + // + // ts0: [ _, t0, t1, t2 ] + // ts1: [ t0, _, t1, t2 ] + // + // When reducing, t0 is never changed, and t1-t2 are always reduced + // together, if the values are present. + ts1.points.timestamps = if cfg.overlapping_times { + timestamps.clone() + } else { + let mut new_timestamps = timestamps.clone(); + new_timestamps[0] = new_timestamps[0] - Duration::from_secs(1); + timestamps.insert(0, new_timestamps[0]); + new_timestamps + }; + *ts1.points.values_mut(0).unwrap() = ValueArray::Double(vec![ + Some(2.0), + if matches!(cfg.missing_value, MissingValue::Both) { + None + } else { + Some(3.0) + }, + Some(4.0), + ]); + + let mut table = Table::new("foo"); + table.insert(ts0).unwrap(); + table.insert(ts1).unwrap(); + + // Align the actual table, based on the input, and apply the right + // group-by + let align = Align { + method: AlignmentMethod::MeanWithin, + period: Duration::from_secs(1), + }; + let aligned_tables = align.apply(&[table], &query_end).unwrap(); + let group_by = GroupBy { + identifiers: vec![Ident("int".into())], + reducer: cfg.reducer, + }; + let grouped_tables = group_by.apply(&aligned_tables).unwrap(); + assert_eq!( + grouped_tables.len(), + 1, + "Group by should produce exaclty 1 table" + ); + let grouped_table = grouped_tables.into_iter().next().unwrap(); + let aligned_table = aligned_tables.into_iter().next().unwrap(); + + let test = + Self { timestamps, aligned_table, grouped_table, query_end }; + + // These checks are all valid for grouping in general, independent + // of the exact missing values or reducer. + assert_eq!( + test.grouped_table.len(), + 1, + "Should have grouped both timeseries down to 1" + ); + let grouped_timeseries = test.grouped_table.iter().next().unwrap(); + assert_eq!( + grouped_timeseries.fields.len(), + 1, + "Should have only one grouped-by field" + ); + assert_eq!( + grouped_timeseries.fields.get("int").unwrap(), + &FieldValue::U8(0), + "Grouped-by field was not maintained correctly" + ); + let points = &grouped_timeseries.points; + assert_eq!(points.dimensionality(), 1, "Points should still be 1D"); + assert_eq!( + points.start_times, None, + "Points should not have start times" + ); + assert_eq!( + points.timestamps, test.timestamps, + "Points do not have correct timestamps" + ); + + test + } + } + + #[test] + fn test_group_by() { + const TEST_CASES: &[(TestConfig, &[Option])] = &[ + ( + TestConfig { + missing_value: MissingValue::Neither, + overlapping_times: true, + reducer: Reducer::Mean, + }, + // This is the most basic case, where we simply average all the + // values together. They exactly line up and none are missing. + &[Some(1.5), Some(2.5), Some(3.5)], + ), + ( + TestConfig { + missing_value: MissingValue::Neither, + overlapping_times: true, + reducer: Reducer::Sum, + }, + // This is the next-simplest case, where we simply sum all the + // values together. They exactly line up and none are missing. + &[Some(3.0), Some(5.0), Some(7.0)], + ), + ( + TestConfig { + missing_value: MissingValue::Neither, + overlapping_times: false, + reducer: Reducer::Mean, + }, + // In this case, the timestamps don't all overlap, though some + // of them do. In particular, the arrays are shifted by one + // timestamp relative to each other, so there are 2 extra + // values. The one value that does overlap is averaged, and the + // other two are unchanged. + &[Some(2.0), Some(1.0), Some(2.5), Some(3.5)], + ), + ( + TestConfig { + missing_value: MissingValue::Neither, + overlapping_times: false, + reducer: Reducer::Sum, + }, + // Here, we should have 4 output samples because the timestamps + // don't overlap. The second input timeseries has its first + // point shifted back by one second. That means the first two + // values are just from one array (no reduction), while the next + // two are reduced as usual. + &[Some(2.0), Some(1.0), Some(5.0), Some(7.0)], + ), + ( + TestConfig { + missing_value: MissingValue::First, + overlapping_times: true, + reducer: Reducer::Mean, + }, + // In this case, we have a missing value for the middle + // timestamp of the first input timeseries. That means we should + // still have 3 output samples, but the second point isn't an + // aggregation, it's just the input value, from the second + // timeseries. + &[Some(1.5), Some(3.0), Some(3.5)], + ), + ( + TestConfig { + missing_value: MissingValue::First, + overlapping_times: true, + reducer: Reducer::Sum, + }, + // Same as above, but we're summing, not averaging. + &[Some(3.0), Some(3.0), Some(7.0)], + ), + ( + TestConfig { + missing_value: MissingValue::First, + overlapping_times: false, + reducer: Reducer::Mean, + }, + // We need 4 output points again here, but we also have a + // missing value. So we'll take the first value from the second + // timeseries; the second from the first; the second from the + // second directly, since its corresponding point is missing in + // the first, and then the average of both in the last point. + &[Some(2.0), Some(1.0), Some(3.0), Some(3.5)], + ), + ( + TestConfig { + missing_value: MissingValue::First, + overlapping_times: false, + reducer: Reducer::Sum, + }, + // Same as above, but summing, instead of averaging. + &[Some(2.0), Some(1.0), Some(3.0), Some(7.0)], + ), + ( + TestConfig { + missing_value: MissingValue::Both, + overlapping_times: true, + reducer: Reducer::Mean, + }, + // In this case, the 2nd timepoint is missing from both + // timeseries. We should preserve that as a missing value in the + // output. + &[Some(1.5), None, Some(3.5)], + ), + ( + TestConfig { + missing_value: MissingValue::Both, + overlapping_times: true, + reducer: Reducer::Sum, + }, + // Same as above, but summing instead of averaging. + &[Some(3.0), None, Some(7.0)], + ), + ]; + for (test_config, expected_data) in TEST_CASES.iter() { + let test_table = TestTable::new(*test_config); + let grouped_timeseries = + test_table.grouped_table.iter().next().unwrap(); + let points = &grouped_timeseries.points; + let values = points.values(0).unwrap().as_double().unwrap(); + assert_eq!( + values, expected_data, + "Timeseries values were not grouped correctly, \ + test_config = {test_config:?}" + ); + } + } +} diff --git a/oximeter/db/src/oxql/ast/table_ops/join.rs b/oximeter/db/src/oxql/ast/table_ops/join.rs new file mode 100644 index 0000000000..3c150a4acf --- /dev/null +++ b/oximeter/db/src/oxql/ast/table_ops/join.rs @@ -0,0 +1,385 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! An AST node describing join table operations. + +// Copyright 2024 Oxide Computer Company + +use crate::oxql::point::MetricType; +use crate::oxql::point::Points; +use crate::oxql::point::Values; +use crate::oxql::Error; +use crate::oxql::Table; +use anyhow::Context; + +/// An AST node for a natural inner join. +#[derive(Clone, Copy, Debug, PartialEq)] +pub struct Join; +impl Join { + // Apply the group_by table operation. + pub(crate) fn apply(&self, tables: &[Table]) -> Result, Error> { + anyhow::ensure!( + tables.len() > 1, + "Join operations require more than one table", + ); + let mut tables = tables.iter().cloned().enumerate(); + let (_, mut out) = tables.next().unwrap(); + anyhow::ensure!( + out.is_aligned(), + "Input tables for a join operation must be aligned" + ); + let metric_types = out + .iter() + .next() + .context("Input tables for a join operation may not be empty")? + .points + .metric_types() + .collect::>(); + ensure_all_metric_types(metric_types.iter().copied())?; + let alignment = out.alignment(); + assert!(alignment.is_some()); + + for (i, next_table) in tables { + anyhow::ensure!( + next_table.alignment() == alignment, + "All tables to a join operator must have the same \ + alignment. Expected alignment: {:?}, found a table \ + aligned with: {:?}", + alignment.unwrap(), + next_table.alignment(), + ); + let name = next_table.name().to_string(); + for next_timeseries in next_table.into_iter() { + let new_types = + next_timeseries.points.metric_types().collect::>(); + ensure_all_metric_types(new_types.iter().copied())?; + anyhow::ensure!( + metric_types == new_types, + "Input tables do not all share the same metric types" + ); + + let key = next_timeseries.key(); + let Some(timeseries) = out.iter_mut().find(|t| t.key() == key) + else { + anyhow::bail!( + "Join failed, input table {} does not \ + contain a timeseries with key {}", + i, + key, + ); + }; + + // Joining the timeseries is done by stacking together the + // values that have the same timestamp. + // + // If two value arrays have different timestamps, which is + // possible if they're derived from two separately-aligned + // tables, then we need to correctly ensure that: + // + // 1. They have the same alignment, and + // 2. We merge the timepoints rather than simply creating a + // ragged array of points. + timeseries.points = inner_join_point_arrays( + ×eries.points, + &next_timeseries.points, + )?; + } + // We'll also update the name, to indicate the joined data. + out.name.push(','); + out.name.push_str(&name); + } + Ok(vec![out]) + } +} + +// Given two arrays of points, stack them together at matching timepoints. +// +// For time points in either which do not have a corresponding point in the +// other, the entire time point is elided. +fn inner_join_point_arrays( + left: &Points, + right: &Points, +) -> Result { + // Create an output array with roughly the right capacity, and double the + // number of dimensions. We're trying to stack output value arrays together + // along the dimension axis. + let data_types = + left.data_types().chain(right.data_types()).collect::>(); + let metric_types = + left.metric_types().chain(right.metric_types()).collect::>(); + let mut out = Points::with_capacity( + left.len().max(right.len()), + data_types.iter().copied(), + metric_types.iter().copied(), + )?; + + // Iterate through each array until one is exhausted. We're only inserting + // values from both arrays where the timestamps actually match, since this + // is an inner join. We may want to insert missing values where timestamps + // do not match on either side, when we support an outer join of some kind. + let n_left_dim = left.values.len(); + let mut left_ix = 0; + let mut right_ix = 0; + while left_ix < left.len() && right_ix < right.len() { + let left_timestamp = left.timestamps[left_ix]; + let right_timestamp = right.timestamps[right_ix]; + if left_timestamp == right_timestamp { + out.timestamps.push(left_timestamp); + push_concrete_values( + &mut out.values[..n_left_dim], + &left.values, + left_ix, + ); + push_concrete_values( + &mut out.values[n_left_dim..], + &right.values, + right_ix, + ); + left_ix += 1; + right_ix += 1; + } else if left_timestamp < right_timestamp { + left_ix += 1; + } else { + right_ix += 1; + } + } + Ok(out) +} + +// Push the `i`th value from each dimension of `from` onto `to`. +fn push_concrete_values(to: &mut [Values], from: &[Values], i: usize) { + assert_eq!(to.len(), from.len()); + for (output, input) in to.iter_mut().zip(from.iter()) { + let input_array = &input.values; + let output_array = &mut output.values; + assert_eq!(input_array.data_type(), output_array.data_type()); + if let Ok(ints) = input_array.as_integer() { + output_array.as_integer_mut().unwrap().push(ints[i]); + continue; + } + if let Ok(doubles) = input_array.as_double() { + output_array.as_double_mut().unwrap().push(doubles[i]); + continue; + } + if let Ok(bools) = input_array.as_boolean() { + output_array.as_boolean_mut().unwrap().push(bools[i]); + continue; + } + if let Ok(strings) = input_array.as_string() { + output_array.as_string_mut().unwrap().push(strings[i].clone()); + continue; + } + if let Ok(dists) = input_array.as_integer_distribution() { + output_array + .as_integer_distribution_mut() + .unwrap() + .push(dists[i].clone()); + continue; + } + if let Ok(dists) = input_array.as_double_distribution() { + output_array + .as_double_distribution_mut() + .unwrap() + .push(dists[i].clone()); + continue; + } + unreachable!(); + } +} + +// Return an error if any metric types are not suitable for joining. +fn ensure_all_metric_types( + mut metric_types: impl ExactSizeIterator, +) -> Result<(), Error> { + anyhow::ensure!( + metric_types + .all(|mt| matches!(mt, MetricType::Gauge | MetricType::Delta)), + "Join operation requires timeseries with gauge or \ + delta metric types", + ); + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::oxql::point::DataType; + use crate::oxql::point::Datum; + use crate::oxql::point::ValueArray; + use chrono::Utc; + use std::time::Duration; + + #[test] + fn test_push_concrete_values() { + let mut points = Points::with_capacity( + 2, + [DataType::Integer, DataType::Double].into_iter(), + [MetricType::Gauge, MetricType::Gauge].into_iter(), + ) + .unwrap(); + + // Push a concrete value for the integer dimension + let from_ints = vec![Values { + values: ValueArray::Integer(vec![Some(1)]), + metric_type: MetricType::Gauge, + }]; + push_concrete_values(&mut points.values[..1], &from_ints, 0); + + // And another for the double dimension. + let from_doubles = vec![Values { + values: ValueArray::Double(vec![Some(2.0)]), + metric_type: MetricType::Gauge, + }]; + push_concrete_values(&mut points.values[1..], &from_doubles, 0); + + assert_eq!( + points.dimensionality(), + 2, + "Points should have 2 dimensions", + ); + let ints = points.values[0].values.as_integer().unwrap(); + assert_eq!( + ints.len(), + 1, + "Should have pushed one point in the first dimension" + ); + assert_eq!( + ints[0], + Some(1), + "Should have pushed 1 onto the first dimension" + ); + let doubles = points.values[1].values.as_double().unwrap(); + assert_eq!( + doubles.len(), + 1, + "Should have pushed one point in the second dimension" + ); + assert_eq!( + doubles[0], + Some(2.0), + "Should have pushed 2.0 onto the second dimension" + ); + } + + #[test] + fn test_join_point_arrays() { + let now = Utc::now(); + + // Create a set of integer points to join with. + // + // This will have two timestamps, one of which will match the points + // below that are merged in. + let int_points = Points { + start_times: None, + timestamps: vec![ + now - Duration::from_secs(3), + now - Duration::from_secs(2), + now, + ], + values: vec![Values { + values: ValueArray::Integer(vec![Some(1), Some(2), Some(3)]), + metric_type: MetricType::Gauge, + }], + }; + + // Create an additional set of double points. + // + // This also has two timepoints, one of which matches with the above, + // and one of which does not. + let double_points = Points { + start_times: None, + timestamps: vec![ + now - Duration::from_secs(3), + now - Duration::from_secs(1), + now, + ], + values: vec![Values { + values: ValueArray::Double(vec![ + Some(4.0), + Some(5.0), + Some(6.0), + ]), + metric_type: MetricType::Gauge, + }], + }; + + // Merge the arrays. + let merged = + inner_join_point_arrays(&int_points, &double_points).unwrap(); + + // Basic checks that we merged in the right values and have the right + // types and dimensions. + assert_eq!( + merged.dimensionality(), + 2, + "Should have appended the dimensions from each input array" + ); + assert_eq!(merged.len(), 2, "Should have merged two common points",); + assert_eq!( + merged.data_types().collect::>(), + &[DataType::Integer, DataType::Double], + "Should have combined the data types of the input arrays" + ); + assert_eq!( + merged.metric_types().collect::>(), + &[MetricType::Gauge, MetricType::Gauge], + "Should have combined the metric types of the input arrays" + ); + + // Check the actual values of the array. + let mut points = merged.iter_points(); + + // The first and last timepoint overlapped between the two arrays, so we + // should have both of them as concrete samples. + let pt = points.next().unwrap(); + assert_eq!(pt.start_time, None, "Gauges don't have a start time"); + assert_eq!( + *pt.timestamp, int_points.timestamps[0], + "Should have taken the first input timestamp from both arrays", + ); + assert_eq!( + *pt.timestamp, double_points.timestamps[0], + "Should have taken the first input timestamp from both arrays", + ); + let values = pt.values; + assert_eq!(values.len(), 2, "Should have 2 dimensions"); + assert_eq!( + &values[0], + &(Datum::Integer(Some(&1)), MetricType::Gauge), + "Should have pulled value from first integer array." + ); + assert_eq!( + &values[1], + &(Datum::Double(Some(&4.0)), MetricType::Gauge), + "Should have pulled value from second double array." + ); + + // And the next point + let pt = points.next().unwrap(); + assert_eq!(pt.start_time, None, "Gauges don't have a start time"); + assert_eq!( + *pt.timestamp, int_points.timestamps[2], + "Should have taken the input timestamp from both arrays", + ); + assert_eq!( + *pt.timestamp, double_points.timestamps[2], + "Should have taken the input timestamp from both arrays", + ); + let values = pt.values; + assert_eq!(values.len(), 2, "Should have 2 dimensions"); + assert_eq!( + &values[0], + &(Datum::Integer(Some(&3)), MetricType::Gauge), + "Should have pulled value from first integer array." + ); + assert_eq!( + &values[1], + &(Datum::Double(Some(&6.0)), MetricType::Gauge), + "Should have pulled value from second double array." + ); + + // And there should be no other values. + assert!(points.next().is_none(), "There should be no more points"); + } +} diff --git a/oximeter/db/src/oxql/ast/table_ops/limit.rs b/oximeter/db/src/oxql/ast/table_ops/limit.rs new file mode 100644 index 0000000000..0205868f5c --- /dev/null +++ b/oximeter/db/src/oxql/ast/table_ops/limit.rs @@ -0,0 +1,263 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! An AST node apply limiting timeseries operations. + +// Copyright 2024 Oxide Computer Company + +use crate::oxql::point::Points; +use crate::oxql::point::ValueArray; +use crate::oxql::point::Values; +use crate::oxql::Error; +use crate::oxql::Table; +use crate::oxql::Timeseries; +use std::num::NonZeroUsize; + +/// The kind of limiting operation +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum LimitKind { + /// Limit the timeseries to the first points. + First, + /// Limit the timeseries to the last points. + Last, +} + +/// A table operation limiting a timeseries to a number of points. +#[derive(Clone, Copy, Debug, PartialEq)] +pub struct Limit { + /// The kind of limit + pub kind: LimitKind, + /// The number of points the timeseries is limited to. + pub count: NonZeroUsize, +} +impl Limit { + /// Apply the limit operation to the input tables. + pub(crate) fn apply(&self, tables: &[Table]) -> Result, Error> { + if tables.is_empty() { + return Ok(vec![]); + } + + tables + .iter() + .map(|table| { + let timeseries = table.iter().map(|timeseries| { + let input_points = ×eries.points; + + // Compute the slice indices for this timeseries. + let (start, end) = match self.kind { + LimitKind::First => { + // The count in the limit operation should not be + // larger than the number of data points. + let end = input_points.len().min(self.count.get()); + (0, end) + } + LimitKind::Last => { + // When taking the last k points, we need to + // subtract the count from the end of the array, + // taking care that we don't panic if the count is + // larger than the number of data points. + let start = input_points + .len() + .saturating_sub(self.count.get()); + let end = input_points.len(); + (start, end) + } + }; + + // Slice the various data arrays. + let start_times = input_points + .start_times + .as_ref() + .map(|s| s[start..end].to_vec()); + let timestamps = + input_points.timestamps[start..end].to_vec(); + let values = input_points + .values + .iter() + .map(|vals| { + let values = match &vals.values { + ValueArray::Integer(inner) => { + ValueArray::Integer( + inner[start..end].to_vec(), + ) + } + ValueArray::Double(inner) => { + ValueArray::Double( + inner[start..end].to_vec(), + ) + } + ValueArray::Boolean(inner) => { + ValueArray::Boolean( + inner[start..end].to_vec(), + ) + } + ValueArray::String(inner) => { + ValueArray::String( + inner[start..end].to_vec(), + ) + } + ValueArray::IntegerDistribution(inner) => { + ValueArray::IntegerDistribution( + inner[start..end].to_vec(), + ) + } + ValueArray::DoubleDistribution(inner) => { + ValueArray::DoubleDistribution( + inner[start..end].to_vec(), + ) + } + }; + Values { values, metric_type: vals.metric_type } + }) + .collect(); + let points = Points { start_times, timestamps, values }; + Timeseries { + fields: timeseries.fields.clone(), + points, + alignment: timeseries.alignment, + } + }); + Table::from_timeseries(table.name(), timeseries) + }) + .collect() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::oxql::point::{DataType, MetricType}; + use chrono::Utc; + use oximeter::FieldValue; + use std::{collections::BTreeMap, time::Duration}; + + fn test_tables() -> Vec
{ + let mut fields = BTreeMap::new(); + fields.insert("foo".to_string(), FieldValue::from("bar")); + fields.insert("bar".to_string(), FieldValue::from(1u8)); + + let now = Utc::now(); + let timestamps = vec![ + now - Duration::from_secs(4), + now - Duration::from_secs(3), + now - Duration::from_secs(2), + ]; + + let mut timeseries = Timeseries::new( + fields.clone().into_iter(), + DataType::Integer, + MetricType::Gauge, + ) + .unwrap(); + timeseries.points.timestamps.clone_from(×tamps); + timeseries.points.values[0].values.as_integer_mut().unwrap().extend([ + Some(1), + Some(2), + Some(3), + ]); + let table1 = + Table::from_timeseries("first", std::iter::once(timeseries)) + .unwrap(); + + let mut timeseries = Timeseries::new( + fields.clone().into_iter(), + DataType::Integer, + MetricType::Gauge, + ) + .unwrap(); + timeseries.points.timestamps.clone_from(×tamps); + timeseries.points.values[0].values.as_integer_mut().unwrap().extend([ + Some(4), + Some(5), + Some(6), + ]); + let table2 = + Table::from_timeseries("second", std::iter::once(timeseries)) + .unwrap(); + + vec![table1, table2] + } + + #[test] + fn test_first_k() { + test_limit_impl(LimitKind::First); + } + + #[test] + fn test_last_k() { + test_limit_impl(LimitKind::Last); + } + + fn test_limit_impl(kind: LimitKind) { + let (start, end) = match kind { + LimitKind::First => (0, 2), + LimitKind::Last => (1, 3), + }; + + // Create test data and apply limit operation. + let tables = test_tables(); + let limit = Limit { kind, count: 2.try_into().unwrap() }; + let limited = limit.apply(&tables).expect("This should be infallible"); + assert_eq!( + tables.len(), + limited.len(), + "Limiting should not change the number of tables" + ); + + // Should apply to all tables the same way. + for (table, limited_table) in tables.iter().zip(limited.iter()) { + assert_eq!( + table.name(), + limited_table.name(), + "Limited table whould have the same name" + ); + + // Compare all timeseries. + for (timeseries, limited_timeseries) in + table.iter().zip(limited_table.iter()) + { + // The fields and basic shape should not change. + assert_eq!( + timeseries.fields, limited_timeseries.fields, + "Limited table should have the same fields" + ); + assert_eq!( + timeseries.alignment, limited_timeseries.alignment, + "Limited timeseries should have the same alignment" + ); + assert_eq!( + timeseries.points.dimensionality(), + limited_timeseries.points.dimensionality(), + "Limited timeseries should have the same number of dimensions" + ); + + // Compare data points themselves. + // + // These depend on the limit operation. + let points = ×eries.points; + let limited_points = &limited_timeseries.points; + assert_eq!(points.start_times, limited_points.start_times); + assert_eq!( + points.timestamps[start..end], + limited_points.timestamps + ); + assert_eq!( + limited_points.values[0].values.as_integer().unwrap(), + &points.values[0].values.as_integer().unwrap()[start..end], + "Points should be limited to [{start}..{end}]", + ); + } + } + + // Check that limiting the table to more points than exist returns the + // whole table. + let limit = Limit { kind, count: 100.try_into().unwrap() }; + let limited = limit.apply(&tables).expect("This should be infallible"); + assert_eq!( + limited, + tables, + "Limiting tables to more than their length should return the same thing" + ); + } +} diff --git a/oximeter/db/src/oxql/ast/table_ops/mod.rs b/oximeter/db/src/oxql/ast/table_ops/mod.rs new file mode 100644 index 0000000000..46f5106a08 --- /dev/null +++ b/oximeter/db/src/oxql/ast/table_ops/mod.rs @@ -0,0 +1,80 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! AST nodes for table operations. + +// Copyright 2024 Oxide Computer Company + +pub mod align; +pub mod filter; +pub mod get; +pub mod group_by; +pub mod join; +pub mod limit; + +use self::align::Align; +use self::filter::Filter; +use self::group_by::GroupBy; +use self::join::Join; +use self::limit::Limit; +use crate::oxql::ast::Query; +use crate::oxql::Error; +use crate::oxql::Table; +use chrono::DateTime; +use chrono::Utc; +use oximeter::TimeseriesName; + +/// A basic table operation, the atoms of an OxQL query. +#[derive(Clone, Debug, PartialEq)] +pub enum BasicTableOp { + Get(TimeseriesName), + Filter(Filter), + GroupBy(GroupBy), + Join(Join), + Align(Align), + Limit(Limit), +} + +impl BasicTableOp { + pub(crate) fn apply( + &self, + tables: &[Table], + query_end: &DateTime, + ) -> Result, Error> { + match self { + BasicTableOp::Get(_) => panic!("Should not apply get table ops"), + BasicTableOp::Filter(f) => f.apply(tables), + BasicTableOp::GroupBy(g) => g.apply(tables), + BasicTableOp::Join(j) => j.apply(tables), + BasicTableOp::Align(a) => a.apply(tables, query_end), + BasicTableOp::Limit(l) => l.apply(tables), + } + } +} + +/// A grouped table operation is a subquery in OxQL. +#[derive(Clone, Debug, PartialEq)] +pub struct GroupedTableOp { + pub ops: Vec, +} + +/// Any kind of OxQL table operation. +#[derive(Clone, Debug, PartialEq)] +pub enum TableOp { + Basic(BasicTableOp), + Grouped(GroupedTableOp), +} + +impl TableOp { + pub(crate) fn apply( + &self, + tables: &[Table], + query_end: &DateTime, + ) -> Result, Error> { + let TableOp::Basic(basic) = self else { + panic!("Should not apply grouped table ops"); + }; + basic.apply(tables, query_end) + } +} diff --git a/oximeter/db/src/oxql/mod.rs b/oximeter/db/src/oxql/mod.rs new file mode 100644 index 0000000000..b93d75b859 --- /dev/null +++ b/oximeter/db/src/oxql/mod.rs @@ -0,0 +1,39 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! The Oximeter Query Language, OxQL. + +// Copyright 2024 Oxide Computer Company + +use peg::error::ParseError as PegError; +use peg::str::LineCol; + +pub mod ast; +pub mod point; +pub mod query; +pub mod table; + +pub use self::query::Query; +pub use self::table::Table; +pub use self::table::Timeseries; +pub use anyhow::Error; + +// Format a PEG parsing error into a nice anyhow error. +fn fmt_parse_error(source: &str, err: PegError) -> Error { + use std::fmt::Write; + let mut out = + format!("Error at {}:{}", err.location.line, err.location.column); + const CONTEXT: usize = 24; + let start = err.location.offset.saturating_sub(CONTEXT); + let end = err.location.offset.saturating_add(CONTEXT).min(source.len()); + if let Some(context) = source.get(start..end) { + let prefix_len = out.len() + 2; + writeln!(out, ": .. {context} ..").unwrap(); + let left_pad = err.location.offset - start + 3 + prefix_len; + let right_pad = end - err.location.offset + 3 + prefix_len; + writeln!(out, "{:right_pad$}", ' ', ' ').unwrap(); + } + writeln!(out, "Expected: {}", err).unwrap(); + anyhow::anyhow!(out) +} diff --git a/oximeter/db/src/oxql/point.rs b/oximeter/db/src/oxql/point.rs new file mode 100644 index 0000000000..e12214aaf0 --- /dev/null +++ b/oximeter/db/src/oxql/point.rs @@ -0,0 +1,2040 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Definition of data points for OxQL. + +// Copyright 2024 Oxide Computer Company + +use super::Error; +use anyhow::Context; +use chrono::DateTime; +use chrono::Utc; +use num::ToPrimitive; +use oximeter::DatumType; +use oximeter::Measurement; +use schemars::JsonSchema; +use serde::Deserialize; +use serde::Serialize; +use std::fmt; + +/// The type of each individual data point's value in a timeseries. +#[derive( + Clone, Copy, Debug, Deserialize, Hash, JsonSchema, PartialEq, Serialize, +)] +#[serde(rename_all = "snake_case")] +pub enum DataType { + /// A 64-bit integer. + Integer, + /// A 64-bit float. + Double, + /// A boolean. + Boolean, + /// A string. + String, + /// A distribution, a sequence of integer bins and counts. + IntegerDistribution, + /// A distribution, a sequence of double bins and integer counts. + DoubleDistribution, +} + +impl DataType { + /// True if this is a numeric scalar type. + pub fn is_numeric(&self) -> bool { + matches!(self, DataType::Integer | DataType::Double) + } +} + +impl TryFrom for DataType { + type Error = Error; + + fn try_from(datum_type: DatumType) -> Result { + let data_type = match datum_type { + DatumType::Bool => DataType::Boolean, + DatumType::I8 + | DatumType::U8 + | DatumType::I16 + | DatumType::U16 + | DatumType::I32 + | DatumType::U32 + | DatumType::I64 + | DatumType::U64 + | DatumType::CumulativeI64 + | DatumType::CumulativeU64 => DataType::Integer, + DatumType::F32 + | DatumType::F64 + | DatumType::CumulativeF32 + | DatumType::CumulativeF64 => DataType::Double, + DatumType::String => DataType::String, + DatumType::HistogramI8 + | DatumType::HistogramU8 + | DatumType::HistogramI16 + | DatumType::HistogramU16 + | DatumType::HistogramI32 + | DatumType::HistogramU32 + | DatumType::HistogramI64 + | DatumType::HistogramU64 => DataType::IntegerDistribution, + DatumType::HistogramF32 | DatumType::HistogramF64 => { + DataType::DoubleDistribution + } + DatumType::Bytes => { + anyhow::bail!("Unsupported datum type: {}", datum_type) + } + }; + Ok(data_type) + } +} + +impl fmt::Display for DataType { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +/// The type of the metric itself, indicating what its values represent. +#[derive( + Clone, Copy, Debug, Deserialize, Hash, JsonSchema, PartialEq, Serialize, +)] +#[serde(rename_all = "snake_case")] +pub enum MetricType { + /// The value represents an instantaneous measurement in time. + Gauge, + /// The value represents a difference between two points in time. + Delta, + /// The value represents an accumulation between two points in time. + Cumulative, +} + +impl fmt::Display for MetricType { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +// A converted oximeter datum, used internally. +// +// This is used when computing deltas between cumulative measurements, and so +// only represents the possible cumulative types. +#[derive(Clone, Debug, PartialEq)] +enum CumulativeDatum { + Integer(i64), + Double(f64), + IntegerDistribution(Distribution), + DoubleDistribution(Distribution), +} + +impl CumulativeDatum { + // Construct a datum from a cumulative type, failing if the measurement is + // not cumulative. + fn from_cumulative(meas: &Measurement) -> Result { + let datum = match meas.datum() { + oximeter::Datum::CumulativeI64(val) => { + CumulativeDatum::Integer(val.value()) + } + oximeter::Datum::CumulativeU64(val) => { + let int = val + .value() + .try_into() + .context("Overflow converting u64 to i64")?; + CumulativeDatum::Integer(int) + } + oximeter::Datum::CumulativeF32(val) => { + CumulativeDatum::Double(val.value().into()) + } + oximeter::Datum::CumulativeF64(val) => { + CumulativeDatum::Double(val.value()) + } + oximeter::Datum::HistogramI8(hist) => hist.into(), + oximeter::Datum::HistogramU8(hist) => hist.into(), + oximeter::Datum::HistogramI16(hist) => hist.into(), + oximeter::Datum::HistogramU16(hist) => hist.into(), + oximeter::Datum::HistogramI32(hist) => hist.into(), + oximeter::Datum::HistogramU32(hist) => hist.into(), + oximeter::Datum::HistogramI64(hist) => hist.into(), + oximeter::Datum::HistogramU64(hist) => hist.try_into()?, + oximeter::Datum::HistogramF32(hist) => hist.into(), + oximeter::Datum::HistogramF64(hist) => hist.into(), + other => anyhow::bail!( + "Input datum of type {} is not cumulative", + other.datum_type(), + ), + }; + Ok(datum) + } +} + +/// A single list of values, for one dimension of a timeseries. +#[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq, Serialize)] +pub struct Values { + // The data values. + pub(super) values: ValueArray, + // The type of this metric. + pub(super) metric_type: MetricType, +} + +impl Values { + // Construct an empty array of values to hold the provided types. + fn with_capacity( + size: usize, + data_type: DataType, + metric_type: MetricType, + ) -> Self { + Self { values: ValueArray::with_capacity(size, data_type), metric_type } + } + + fn len(&self) -> usize { + self.values.len() + } +} + +/// Reference type describing a single point in a `Points` array. +/// +/// The `Points` type is column-major, in that the timestamps and each data +/// value (one for each dimension) are stored in separate arrays, of the same +/// length. This type holds references to the relevant items in each array that +/// constitutes a single point. +#[derive(Clone, Debug, PartialEq)] +pub struct Point<'a> { + /// The start time of this point, if any. + pub start_time: Option<&'a DateTime>, + /// The timestamp for this point. + pub timestamp: &'a DateTime, + /// One datum and its metric type, for each dimension in the point. + /// + /// The datum itself is optional, and will be `None` if the point is missing + /// a value at the corresponding point and dimension. + pub values: Vec<(Datum<'a>, MetricType)>, +} + +impl<'a> fmt::Display for Point<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + const TIMESTAMP_FMT: &str = "%Y-%m-%d %H:%M:%S.%f"; + match &self.start_time { + Some(start_time) => write!( + f, + "[{}, {}]: ", + start_time.format(TIMESTAMP_FMT), + self.timestamp.format(TIMESTAMP_FMT) + )?, + None => write!(f, "{}: ", self.timestamp.format(TIMESTAMP_FMT))?, + } + let values = self + .values + .iter() + .map(|(datum, _)| datum.to_string()) + .collect::>() + .join(","); + write!(f, "[{}]", values) + } +} + +impl<'a> Point<'a> { + /// Return the dimensionality of this point. + pub fn dimensionality(&self) -> usize { + self.values.len() + } +} + +/// A reference to a single datum of a multidimensional value. +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum Datum<'a> { + Boolean(Option), + Integer(Option<&'a i64>), + Double(Option<&'a f64>), + String(Option<&'a str>), + IntegerDistribution(Option<&'a Distribution>), + DoubleDistribution(Option<&'a Distribution>), +} + +impl<'a> fmt::Display for Datum<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Datum::Boolean(Some(inner)) => write!(f, "{}", inner), + Datum::Integer(Some(inner)) => write!(f, "{}", inner), + Datum::Double(Some(inner)) => write!(f, "{}", inner), + Datum::String(Some(inner)) => write!(f, "{}", inner), + Datum::IntegerDistribution(Some(inner)) => write!(f, "{}", inner), + Datum::DoubleDistribution(Some(inner)) => write!(f, "{}", inner), + Datum::Boolean(None) + | Datum::Integer(None) + | Datum::Double(None) + | Datum::String(None) + | Datum::IntegerDistribution(None) + | Datum::DoubleDistribution(None) => { + write!(f, "-") + } + } + } +} + +/// Timepoints and values for one timeseries. +// +// Invariants: +// +// The start_time and timestamp arrays must be the same length, or start_times +// must be None. +// +// The length of timestamps (and possibly start_times) must be the same as the +// length of _each element_ of the `values` array. That is, there are as many +// timestamps as data values. +// +// The length of `values` is the number of dimensions, and is always at least 1. +#[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq, Serialize)] +pub struct Points { + // The start time points for cumulative or delta metrics. + pub(super) start_times: Option>>, + // The timestamp of each value. + pub(super) timestamps: Vec>, + // The array of data values, one for each dimension. + pub(super) values: Vec, +} + +impl Points { + /// Construct an empty array of points to hold data of the provided type. + pub fn empty(data_type: DataType, metric_type: MetricType) -> Self { + Self::with_capacity( + 0, + std::iter::once(data_type), + std::iter::once(metric_type), + ) + .unwrap() + } + + // Return a mutable reference to the value array of the specified dimension, if any. + pub(super) fn values_mut(&mut self, dim: usize) -> Option<&mut ValueArray> { + self.values.get_mut(dim).map(|val| &mut val.values) + } + + /// Return a reference to the value array of the specified dimension, if any. + pub fn values(&self, dim: usize) -> Option<&ValueArray> { + self.values.get(dim).map(|val| &val.values) + } + + /// Return the dimensionality of the data points, i.e., the number of values + /// at each timestamp. + pub fn dimensionality(&self) -> usize { + self.values.len() + } + + /// Return the number of points in self. + pub fn len(&self) -> usize { + self.values[0].len() + } + + /// Construct an empty array of points to hold size data points of the + /// provided types. + /// + /// The type information may have length > 1 to reserve space for + /// multi-dimensional values. + pub fn with_capacity( + size: usize, + data_types: D, + metric_types: M, + ) -> Result + where + D: ExactSizeIterator, + M: ExactSizeIterator, + { + anyhow::ensure!( + data_types.len() == metric_types.len(), + "Data and metric type iterators must have the same length", + ); + let timestamps = Vec::with_capacity(size); + let mut start_times = None; + let mut values = Vec::with_capacity(data_types.len()); + for (data_type, metric_type) in data_types.zip(metric_types) { + if matches!(metric_type, MetricType::Delta | MetricType::Cumulative) + && start_times.is_none() + { + start_times.replace(Vec::with_capacity(size)); + } + values.push(Values::with_capacity(size, data_type, metric_type)); + } + Ok(Self { start_times, timestamps, values }) + } + + /// Return the data types of self. + pub fn data_types(&self) -> impl ExactSizeIterator + '_ { + self.values.iter().map(|val| val.values.data_type()) + } + + /// Return the metric types of self. + pub fn metric_types( + &self, + ) -> impl ExactSizeIterator + '_ { + self.values.iter().map(|val| val.metric_type) + } + + /// Return the single metric type of all values in self, it they are all the + /// same. + pub fn metric_type(&self) -> Option { + let mut types = self.metric_types(); + let Some(first_type) = types.next() else { + unreachable!(); + }; + if types.all(|ty| ty == first_type) { + Some(first_type) + } else { + None + } + } + + /// Construct a list of gauge points from a list of gauge measurements. + /// + /// An error is returned if the provided input measurements are not gauges, + /// or do not all have the same datum type. + pub fn gauge_from_gauge( + measurements: &[Measurement], + ) -> Result { + let Some(first) = measurements.first() else { + anyhow::bail!( + "Cannot construct points from empty measurements array" + ); + }; + let datum_type = first.datum_type(); + anyhow::ensure!( + !datum_type.is_cumulative(), + "Measurements are not gauges" + ); + let data_type = DataType::try_from(datum_type)?; + let mut self_ = Self::with_capacity( + measurements.len(), + std::iter::once(data_type), + std::iter::once(MetricType::Gauge), + )?; + + // Since we're directly pushing gauges, each measurement is independent + // of the others. Simply translate types and push the data. + for measurement in measurements.iter() { + anyhow::ensure!( + measurement.datum_type() == datum_type, + "Measurements must all have the same datum type", + ); + self_ + .values_mut(0) + .unwrap() + .push_value_from_datum(measurement.datum())?; + self_.timestamps.push(measurement.timestamp()); + } + Ok(self_) + } + + /// Construct a list of delta points from a list of cumulative measurements. + /// + /// An error is returned if the provided measurements are not of the same + /// type or not cumulative. + pub fn delta_from_cumulative( + measurements: &[Measurement], + ) -> Result { + let mut iter = measurements.iter(); + let Some(first) = iter.next() else { + anyhow::bail!( + "Cannot construct points from empty measurements array" + ); + }; + let datum_type = first.datum_type(); + anyhow::ensure!( + datum_type.is_cumulative(), + "Measurements are not cumulative", + ); + let data_type = DataType::try_from(datum_type)?; + let mut self_ = Self::with_capacity( + measurements.len(), + std::iter::once(data_type), + std::iter::once(MetricType::Delta), + )?; + + // Construct the first point, which directly uses the start / end time + // of the first measurement itself. + self_.values_mut(0).unwrap().push_value_from_datum(first.datum())?; + self_.start_times.as_mut().unwrap().push(first.start_time().unwrap()); + self_.timestamps.push(first.timestamp()); + + // We need to keep track of the last cumulative measurement that's not + // _missing_, to compute successive differences between neighboring + // points. Note that we only need the datum from the measurement, + // because even missing samples have valid timestamp information. So we + // can always generate the timestamp for each delta, even if the datum + // is missing. + let mut last_datum = if first.is_missing() { + None + } else { + // Safety: We're confirming above the measurement is cumulative, and + // in this block if the datum is missing. So we know this conversion + // should succeed. + Some(CumulativeDatum::from_cumulative(first).unwrap()) + }; + + // We also need to keep track of the start time of this "epoch", periods + // where the cumulative data has the same start time. If there are jumps + // forward in this, and thus gaps in the records, we need to update the + // start_time of the epoch and also the last datum. + let mut epoch_start_time = first.start_time().unwrap(); + + // Push the remaining values. + for measurement in iter { + anyhow::ensure!( + measurement.datum_type() == datum_type, + "Measurements must all have the same datum type" + ); + + // For the time ranges we must have either: + // + // 1. Either the start time of the _first_ and new points must be + // equal, with the timestamp of the new strictly later than the + // timestamp of the last, OR + // 2. Both the start time and timestamp of the new point must be + // strictly later than the timestamp (and thus start time) of the + // last point. In this case, we effectively have a _gap_ in the + // timeseries, and so we need to update `first_start_time` to + // reflect this new epoch. + let last_start_time = + *self_.start_times.as_ref().unwrap().last().unwrap(); + let last_timestamp = *self_.timestamps.last().unwrap(); + let new_start_time = measurement.start_time().unwrap(); + let new_timestamp = measurement.timestamp(); + + if epoch_start_time == new_start_time + && last_timestamp < new_timestamp + { + // Push the timestamps to reflect this interval, from the end of + // the last sample to the end of this one. + self_.start_times.as_mut().unwrap().push(last_timestamp); + self_.timestamps.push(new_timestamp); + + // The data value is the difference between the last non-missing + // datum and the new datum. + self_.values_mut(0).unwrap().push_diff_from_last_to_datum( + &last_datum, + measurement.datum(), + data_type, + )?; + } else if new_start_time > last_timestamp + && new_timestamp > last_timestamp + { + // Push the new start time directly, since it begins a new + // epoch. + self_.start_times.as_mut().unwrap().push(new_start_time); + self_.timestamps.push(new_timestamp); + + // Update the epoch start time, and also simply push the datum + // directly. The difference with the previous is not meaningful, + // since we've begun a new epoch. + epoch_start_time = new_start_time; + self_ + .values_mut(0) + .unwrap() + .push_value_from_datum(measurement.datum())?; + } else { + // Print as useful a message as we can here. + anyhow::bail!( + "Cannot compute a delta, the timestamp of the next \ + sample has a new start time, or overlaps with the \ + last processed sample. \n \ + epoch start time = {epoch_start_time}\n \ + last timestamp = [{last_start_time}, {last_timestamp}]\n \ + new timestamp = [{new_start_time}, {new_timestamp}]" + ); + } + + // If the new datum is _not_ missing, we'll update the last one. + if !measurement.is_missing() { + last_datum.replace( + CumulativeDatum::from_cumulative(measurement).unwrap(), + ); + } + } + Ok(self_) + } + + /// Iterate over each point in self. + pub fn iter_points(&self) -> impl Iterator> + '_ { + (0..self.len()).map(|i| Point { + start_time: self.start_times.as_ref().map(|s| &s[i]), + timestamp: &self.timestamps[i], + values: self + .values + .iter() + .map(|val| (val.values.get(i), val.metric_type)) + .collect(), + }) + } + + // Filter points in self to those where `to_keep` is true. + pub(crate) fn filter(&self, to_keep: Vec) -> Result { + anyhow::ensure!( + to_keep.len() == self.len(), + "Filter array must be the same length as self", + ); + + // Compute the indices of values we're keeping. + let indices: Vec<_> = to_keep + .iter() + .enumerate() + .filter(|(_ix, to_keep)| **to_keep) + .map(|(ix, _)| ix) + .collect(); + let n_true = indices.len(); + let mut out = Self::with_capacity( + n_true, + self.data_types(), + self.metric_types(), + )?; + + // Push the compressed start times, if any. + if let Some(start_times) = self.start_times.as_ref() { + let Some(new_start_times) = out.start_times.as_mut() else { + unreachable!(); + }; + for ix in indices.iter().copied() { + new_start_times.push(start_times[ix]); + } + } + + // Push the compressed timestamps. + for ix in indices.iter().copied() { + out.timestamps.push(self.timestamps[ix]); + } + + // Push each dimension of the data values themselves. + for (new_values, existing_values) in + out.values.iter_mut().zip(self.values.iter()) + { + match (&mut new_values.values, &existing_values.values) { + (ValueArray::Integer(new), ValueArray::Integer(existing)) => { + for ix in indices.iter().copied() { + new.push(existing[ix]); + } + } + (ValueArray::Double(new), ValueArray::Double(existing)) => { + for ix in indices.iter().copied() { + new.push(existing[ix]); + } + } + (ValueArray::Boolean(new), ValueArray::Boolean(existing)) => { + for ix in indices.iter().copied() { + new.push(existing[ix]); + } + } + (ValueArray::String(new), ValueArray::String(existing)) => { + for ix in indices.iter().copied() { + new.push(existing[ix].clone()); + } + } + ( + ValueArray::IntegerDistribution(new), + ValueArray::IntegerDistribution(existing), + ) => { + for ix in indices.iter().copied() { + new.push(existing[ix].clone()); + } + } + ( + ValueArray::DoubleDistribution(new), + ValueArray::DoubleDistribution(existing), + ) => { + for ix in indices.iter().copied() { + new.push(existing[ix].clone()); + } + } + (_, _) => unreachable!(), + } + } + Ok(out) + } + + // Return a new set of points, with the values casted to the provided types. + pub(crate) fn cast(&self, types: &[DataType]) -> Result { + anyhow::ensure!( + types.len() == self.dimensionality(), + "Cannot cast to {} types, the data has dimensionality {}", + types.len(), + self.dimensionality(), + ); + let start_times = self.start_times.clone(); + let timestamps = self.timestamps.clone(); + let mut new_values = Vec::with_capacity(self.dimensionality()); + for (new_type, existing_values) in types.iter().zip(self.values.iter()) + { + let values = match (new_type, &existing_values.values) { + // "Cast" from i64 -> i64 + (DataType::Integer, ValueArray::Integer(vals)) => { + ValueArray::Integer(vals.clone()) + } + + // Cast f64 -> i64 + (DataType::Integer, ValueArray::Double(doubles)) => { + let mut new = Vec::with_capacity(doubles.len()); + for maybe_double in doubles.iter().copied() { + if let Some(d) = maybe_double { + let as_int = d + .to_i64() + .context("Cannot cast double {d} to i64")?; + new.push(Some(as_int)); + } else { + new.push(None); + } + } + ValueArray::Integer(new) + } + + // Cast bool -> i64 + (DataType::Integer, ValueArray::Boolean(bools)) => { + ValueArray::Integer( + bools + .iter() + .copied() + .map(|b| b.map(i64::from)) + .collect(), + ) + } + + // Cast string -> i64, by parsing. + (DataType::Integer, ValueArray::String(strings)) => { + let mut new = Vec::with_capacity(strings.len()); + for maybe_str in strings.iter() { + if let Some(s) = maybe_str { + let as_int = s + .parse() + .context("Cannot cast string '{s}' to i64")?; + new.push(Some(as_int)); + } else { + new.push(None); + } + } + ValueArray::Integer(new) + } + + // Cast i64 -> f64 + (DataType::Double, ValueArray::Integer(ints)) => { + let mut new = Vec::with_capacity(ints.len()); + for maybe_int in ints.iter().copied() { + if let Some(int) = maybe_int { + let as_double = int.to_f64().context( + "Cannot cast integer {int} as double", + )?; + new.push(Some(as_double)); + } else { + new.push(None); + } + } + ValueArray::Double(new) + } + + // "Cast" f64 -> f64 + (DataType::Double, ValueArray::Double(vals)) => { + ValueArray::Double(vals.clone()) + } + + // Cast bool -> f64 + (DataType::Double, ValueArray::Boolean(bools)) => { + ValueArray::Double( + bools + .iter() + .copied() + .map(|b| b.map(f64::from)) + .collect(), + ) + } + + // Cast string -> f64, by parsing. + (DataType::Double, ValueArray::String(strings)) => { + let mut new = Vec::with_capacity(strings.len()); + for maybe_str in strings.iter() { + if let Some(s) = maybe_str { + let as_double = s + .parse() + .context("Cannot cast string '{s}' to f64")?; + new.push(Some(as_double)); + } else { + new.push(None); + } + } + ValueArray::Double(new) + } + + // Cast i64 -> bool + // + // Any non-zero value is considered truthy. + (DataType::Boolean, ValueArray::Integer(ints)) => { + let mut new = Vec::with_capacity(ints.len()); + for maybe_int in ints.iter().copied() { + match maybe_int { + Some(0) => new.push(Some(false)), + Some(_) => new.push(Some(true)), + None => new.push(None), + } + } + ValueArray::Boolean(new) + } + + // Cast f64 -> bool + // + // Any non-zero value is considered truthy. + (DataType::Boolean, ValueArray::Double(doubles)) => { + let mut new = Vec::with_capacity(doubles.len()); + for maybe_double in doubles.iter().copied() { + match maybe_double { + Some(d) if d == 0.0 => new.push(Some(false)), + Some(_) => new.push(Some(true)), + None => new.push(None), + } + } + ValueArray::Boolean(new) + } + + // "Cast" bool -> bool + (DataType::Boolean, ValueArray::Boolean(vals)) => { + ValueArray::Boolean(vals.clone()) + } + + // Cast string -> bool. + // + // Any non-empty string is considered truthy + (DataType::Boolean, ValueArray::String(strings)) => { + let mut new = Vec::with_capacity(strings.len()); + for maybe_str in strings.iter() { + match maybe_str { + Some(s) if s.is_empty() => new.push(Some(false)), + Some(_) => new.push(Some(true)), + None => new.push(None), + } + } + ValueArray::Boolean(new) + } + + // Cast i64 -> string + (DataType::String, ValueArray::Integer(ints)) => { + ValueArray::String( + ints.iter().map(|x| x.map(|x| x.to_string())).collect(), + ) + } + + // Cast f64 -> string + (DataType::String, ValueArray::Double(doubles)) => { + ValueArray::String( + doubles + .iter() + .map(|x| x.map(|x| x.to_string())) + .collect(), + ) + } + + // Cast bool -> string + (DataType::String, ValueArray::Boolean(bools)) => { + ValueArray::String( + bools + .iter() + .map(|x| x.map(|x| x.to_string())) + .collect(), + ) + } + + // "Cast" string -> string + (DataType::String, ValueArray::String(vals)) => { + ValueArray::String(vals.clone()) + } + + // "Cast" distributions to the same type of distribution + ( + DataType::IntegerDistribution, + ValueArray::IntegerDistribution(vals), + ) => ValueArray::IntegerDistribution(vals.clone()), + ( + DataType::DoubleDistribution, + ValueArray::DoubleDistribution(vals), + ) => ValueArray::DoubleDistribution(vals.clone()), + + // All other casts are invalid + (_, vals) => anyhow::bail!( + "Cannot cast {} -> {}", + new_type, + vals.data_type(), + ), + }; + new_values.push(Values { + values, + metric_type: existing_values.metric_type, + }); + } + Ok(Self { start_times, timestamps, values: new_values }) + } + + /// Return true if self contains no data points. + pub fn is_empty(&self) -> bool { + self.len() == 0 + } +} + +/// List of data values for one timeseries. +/// +/// Each element is an option, where `None` represents a missing sample. +#[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq, Serialize)] +#[serde(rename_all = "snake_case", tag = "type", content = "values")] +pub enum ValueArray { + Integer(Vec>), + Double(Vec>), + Boolean(Vec>), + String(Vec>), + IntegerDistribution(Vec>>), + DoubleDistribution(Vec>>), +} + +impl ValueArray { + // Create an empty array with capacity `size` of the provided data type. + fn with_capacity(size: usize, data_type: DataType) -> Self { + match data_type { + DataType::Integer => Self::Integer(Vec::with_capacity(size)), + DataType::Double => Self::Double(Vec::with_capacity(size)), + DataType::Boolean => Self::Boolean(Vec::with_capacity(size)), + DataType::String => Self::String(Vec::with_capacity(size)), + DataType::IntegerDistribution => { + Self::IntegerDistribution(Vec::with_capacity(size)) + } + DataType::DoubleDistribution => { + Self::DoubleDistribution(Vec::with_capacity(size)) + } + } + } + + // Return the data type in self. + pub(super) fn data_type(&self) -> DataType { + match self { + ValueArray::Integer(_) => DataType::Integer, + ValueArray::Double(_) => DataType::Double, + ValueArray::Boolean(_) => DataType::Boolean, + ValueArray::String(_) => DataType::String, + ValueArray::IntegerDistribution(_) => DataType::IntegerDistribution, + ValueArray::DoubleDistribution(_) => DataType::DoubleDistribution, + } + } + + // Access the inner array of booleans, if possible. + pub(super) fn as_boolean_mut( + &mut self, + ) -> Result<&mut Vec>, Error> { + let ValueArray::Boolean(inner) = self else { + anyhow::bail!( + "Cannot access value array as boolean type, it has type {}", + self.data_type(), + ); + }; + Ok(inner) + } + + /// Access the values as an array of bools, if they have that type. + pub fn as_boolean(&self) -> Result<&Vec>, Error> { + let ValueArray::Boolean(inner) = self else { + anyhow::bail!( + "Cannot access value array as boolean type, it has type {}", + self.data_type(), + ); + }; + Ok(inner) + } + + /// Access the values as an array of integers, if they have that type. + pub fn as_integer(&self) -> Result<&Vec>, Error> { + let ValueArray::Integer(inner) = self else { + anyhow::bail!( + "Cannot access value array as integer type, it has type {}", + self.data_type(), + ); + }; + Ok(inner) + } + + // Access the inner array of integers, if possible. + pub(super) fn as_integer_mut( + &mut self, + ) -> Result<&mut Vec>, Error> { + let ValueArray::Integer(inner) = self else { + anyhow::bail!( + "Cannot access value array as integer type, it has type {}", + self.data_type(), + ); + }; + Ok(inner) + } + + /// Access the values as an array of doubles, if they have that type. + pub fn as_double(&self) -> Result<&Vec>, Error> { + let ValueArray::Double(inner) = self else { + anyhow::bail!( + "Cannot access value array as double type, it has type {}", + self.data_type(), + ); + }; + Ok(inner) + } + + // Access the inner array of doubles, if possible. + pub(super) fn as_double_mut( + &mut self, + ) -> Result<&mut Vec>, Error> { + let ValueArray::Double(inner) = self else { + anyhow::bail!( + "Cannot access value array as double type, it has type {}", + self.data_type(), + ); + }; + Ok(inner) + } + + /// Access the values as an array of strings, if they have that type. + pub fn as_string(&self) -> Result<&Vec>, Error> { + let ValueArray::String(inner) = self else { + anyhow::bail!( + "Cannot access value array as string type, it has type {}", + self.data_type(), + ); + }; + Ok(inner) + } + + // Access the inner array of strings, if possible. + pub(super) fn as_string_mut( + &mut self, + ) -> Result<&mut Vec>, Error> { + let ValueArray::String(inner) = self else { + anyhow::bail!( + "Cannot access value array as string type, it has type {}", + self.data_type(), + ); + }; + Ok(inner) + } + + /// Access the values as an array of integer distribution, if they have that + /// type. + pub fn as_integer_distribution( + &self, + ) -> Result<&Vec>>, Error> { + let ValueArray::IntegerDistribution(inner) = self else { + anyhow::bail!( + "Cannot access value array as integer \ + distribution type, it has type {}", + self.data_type(), + ); + }; + Ok(inner) + } + + // Access the inner array of integer distribution, if possible. + pub(super) fn as_integer_distribution_mut( + &mut self, + ) -> Result<&mut Vec>>, Error> { + let ValueArray::IntegerDistribution(inner) = self else { + anyhow::bail!( + "Cannot access value array as integer \ + distribution type, it has type {}", + self.data_type(), + ); + }; + Ok(inner) + } + + /// Access the values as an array of double distribution, if they have that + /// type. + pub fn as_double_distribution( + &self, + ) -> Result<&Vec>>, Error> { + let ValueArray::DoubleDistribution(inner) = self else { + anyhow::bail!( + "Cannot access value array as double \ + distribution type, it has type {}", + self.data_type(), + ); + }; + Ok(inner) + } + + // Access the inner array of double distributions, if possible. + pub(super) fn as_double_distribution_mut( + &mut self, + ) -> Result<&mut Vec>>, Error> { + let ValueArray::DoubleDistribution(inner) = self else { + anyhow::bail!( + "Cannot access value array as double \ + distribution type, it has type {}", + self.data_type(), + ); + }; + Ok(inner) + } + + fn push_missing(&mut self, datum_type: DatumType) -> Result<(), Error> { + match datum_type { + DatumType::Bool => self.as_boolean_mut()?.push(None), + DatumType::I8 + | DatumType::U8 + | DatumType::I16 + | DatumType::U16 + | DatumType::I32 + | DatumType::U32 + | DatumType::I64 + | DatumType::U64 + | DatumType::CumulativeI64 + | DatumType::CumulativeU64 => self.as_integer_mut()?.push(None), + DatumType::F32 + | DatumType::F64 + | DatumType::CumulativeF32 + | DatumType::CumulativeF64 => self.as_double_mut()?.push(None), + DatumType::String => self.as_string_mut()?.push(None), + DatumType::Bytes => { + anyhow::bail!("Bytes data types are not yet supported") + } + DatumType::HistogramI8 + | DatumType::HistogramU8 + | DatumType::HistogramI16 + | DatumType::HistogramU16 + | DatumType::HistogramI32 + | DatumType::HistogramU32 + | DatumType::HistogramI64 + | DatumType::HistogramU64 => { + self.as_integer_distribution_mut()?.push(None) + } + DatumType::HistogramF32 | DatumType::HistogramF64 => { + self.as_double_distribution_mut()?.push(None) + } + } + Ok(()) + } + + // Push a value directly from a datum, without modification. + fn push_value_from_datum( + &mut self, + datum: &oximeter::Datum, + ) -> Result<(), Error> { + match datum { + oximeter::Datum::Bool(b) => self.as_boolean_mut()?.push(Some(*b)), + oximeter::Datum::I8(i) => { + self.as_integer_mut()?.push(Some(i64::from(*i))) + } + oximeter::Datum::U8(i) => { + self.as_integer_mut()?.push(Some(i64::from(*i))) + } + oximeter::Datum::I16(i) => { + self.as_integer_mut()?.push(Some(i64::from(*i))) + } + oximeter::Datum::U16(i) => { + self.as_integer_mut()?.push(Some(i64::from(*i))) + } + oximeter::Datum::I32(i) => { + self.as_integer_mut()?.push(Some(i64::from(*i))) + } + oximeter::Datum::U32(i) => { + self.as_integer_mut()?.push(Some(i64::from(*i))) + } + oximeter::Datum::I64(i) => self.as_integer_mut()?.push(Some(*i)), + oximeter::Datum::U64(i) => { + let i = + i.to_i64().context("Failed to convert u64 datum to i64")?; + self.as_integer_mut()?.push(Some(i)); + } + oximeter::Datum::F32(f) => { + self.as_double_mut()?.push(Some(f64::from(*f))) + } + oximeter::Datum::F64(f) => self.as_double_mut()?.push(Some(*f)), + oximeter::Datum::String(s) => { + self.as_string_mut()?.push(Some(s.clone())) + } + oximeter::Datum::Bytes(_) => { + anyhow::bail!("Bytes data types are not yet supported") + } + oximeter::Datum::CumulativeI64(c) => { + self.as_integer_mut()?.push(Some(c.value())) + } + oximeter::Datum::CumulativeU64(c) => { + let c = c + .value() + .to_i64() + .context("Failed to convert u64 datum to i64")?; + self.as_integer_mut()?.push(Some(c)); + } + oximeter::Datum::CumulativeF32(c) => { + self.as_double_mut()?.push(Some(f64::from(c.value()))) + } + oximeter::Datum::CumulativeF64(c) => { + self.as_double_mut()?.push(Some(c.value())) + } + oximeter::Datum::HistogramI8(h) => self + .as_integer_distribution_mut()? + .push(Some(Distribution::from(h))), + oximeter::Datum::HistogramU8(h) => self + .as_integer_distribution_mut()? + .push(Some(Distribution::from(h))), + oximeter::Datum::HistogramI16(h) => self + .as_integer_distribution_mut()? + .push(Some(Distribution::from(h))), + oximeter::Datum::HistogramU16(h) => self + .as_integer_distribution_mut()? + .push(Some(Distribution::from(h))), + oximeter::Datum::HistogramI32(h) => self + .as_integer_distribution_mut()? + .push(Some(Distribution::from(h))), + oximeter::Datum::HistogramU32(h) => self + .as_integer_distribution_mut()? + .push(Some(Distribution::from(h))), + oximeter::Datum::HistogramI64(h) => self + .as_integer_distribution_mut()? + .push(Some(Distribution::from(h))), + oximeter::Datum::HistogramU64(h) => self + .as_integer_distribution_mut()? + .push(Some(Distribution::try_from(h)?)), + oximeter::Datum::HistogramF32(h) => self + .as_double_distribution_mut()? + .push(Some(Distribution::from(h))), + oximeter::Datum::HistogramF64(h) => self + .as_double_distribution_mut()? + .push(Some(Distribution::from(h))), + oximeter::Datum::Missing(missing) => { + self.push_missing(missing.datum_type())? + } + } + Ok(()) + } + + // Push a delta from the last valid datum and a new one. + // + // This takes the last valid datum, if any, and a new one. It computes the + // delta between the the values of the datum, if possible, and pushes it + // onto the correct value array inside `self`. + // + // If both the last datum and new one exist (are not missing), the normal + // diff is pushed. If the last datum is missing, but the new one exists, + // then the new value is pushed directly. If the last datum exists but the + // new one does not, then a missing datum is pushed. If both are missing, + // then a missing one is pushed as well. + // + // In other words, the diff is always between the new datum and the last + // non-None value. If such a last value does not exist, the datum is + // inserted directly. + fn push_diff_from_last_to_datum( + &mut self, + last_datum: &Option, + new_datum: &oximeter::Datum, + data_type: DataType, + ) -> Result<(), Error> { + match (last_datum.as_ref(), new_datum.is_missing()) { + (None, true) | (Some(_), true) => { + // In this case, either both values are missing, or just the new + // one is. In either case, we cannot compute a new value, and + // need to insert None to represent the new missing datum. + match data_type { + DataType::Integer => self.as_integer_mut()?.push(None), + DataType::Double => self.as_double_mut()?.push(None), + DataType::Boolean => self.as_boolean_mut()?.push(None), + DataType::String => self.as_string_mut()?.push(None), + DataType::IntegerDistribution => { + self.as_integer_distribution_mut()?.push(None) + } + DataType::DoubleDistribution => { + self.as_double_distribution_mut()?.push(None) + } + } + } + (None, false) => { + // The last datum was missing, but the new one is not. We cannot + // compute the difference, since we have no previous point. + // However, we can still push some value by inserting the datum + // directly. + self.push_value_from_datum(new_datum)?; + } + (Some(last_datum), false) => { + // Both values exist, so we can compute the difference between + // them and insert that. + // + // Note that we're asserting both are the same _datum_ type, + // which is guaranteed by a check in the caller. + match (last_datum, new_datum) { + ( + CumulativeDatum::Integer(last), + oximeter::Datum::I8(new), + ) => { + let new = i64::from(*new); + self.as_integer_mut()?.push(Some(new - last)); + } + ( + CumulativeDatum::Integer(last), + oximeter::Datum::U8(new), + ) => { + let new = i64::from(*new); + self.as_integer_mut()?.push(Some(new - last)); + } + ( + CumulativeDatum::Integer(last), + oximeter::Datum::I16(new), + ) => { + let new = i64::from(*new); + self.as_integer_mut()?.push(Some(new - last)); + } + ( + CumulativeDatum::Integer(last), + oximeter::Datum::U16(new), + ) => { + let new = i64::from(*new); + self.as_integer_mut()?.push(Some(new - last)); + } + ( + CumulativeDatum::Integer(last), + oximeter::Datum::I32(new), + ) => { + let new = i64::from(*new); + self.as_integer_mut()?.push(Some(new - last)); + } + ( + CumulativeDatum::Integer(last), + oximeter::Datum::U32(new), + ) => { + let new = i64::from(*new); + self.as_integer_mut()?.push(Some(new - last)); + } + ( + CumulativeDatum::Integer(last), + oximeter::Datum::I64(new), + ) => { + let diff = new + .checked_sub(*last) + .context("Overflow computing deltas")?; + self.as_integer_mut()?.push(Some(diff)); + } + ( + CumulativeDatum::Integer(last), + oximeter::Datum::U64(new), + ) => { + let new = new + .to_i64() + .context("Failed to convert u64 datum to i64")?; + let diff = new + .checked_sub(*last) + .context("Overflow computing deltas")?; + self.as_integer_mut()?.push(Some(diff)); + } + ( + CumulativeDatum::Double(last), + oximeter::Datum::F32(new), + ) => { + self.as_double_mut()? + .push(Some(f64::from(*new) - last)); + } + ( + CumulativeDatum::Double(last), + oximeter::Datum::F64(new), + ) => { + self.as_double_mut()?.push(Some(new - last)); + } + ( + CumulativeDatum::Integer(last), + oximeter::Datum::CumulativeI64(new), + ) => { + let new = new.value(); + let diff = new + .checked_sub(*last) + .context("Overflow computing deltas")?; + self.as_integer_mut()?.push(Some(diff)); + } + ( + CumulativeDatum::Integer(last), + oximeter::Datum::CumulativeU64(new), + ) => { + let new = new + .value() + .to_i64() + .context("Failed to convert u64 datum to i64")?; + let diff = new + .checked_sub(*last) + .context("Overflow computing deltas")?; + self.as_integer_mut()?.push(Some(diff)); + } + ( + CumulativeDatum::Double(last), + oximeter::Datum::CumulativeF32(new), + ) => { + self.as_double_mut()? + .push(Some(f64::from(new.value()) - last)); + } + ( + CumulativeDatum::Double(last), + oximeter::Datum::CumulativeF64(new), + ) => { + self.as_double_mut()?.push(Some(new.value() - last)); + } + ( + CumulativeDatum::IntegerDistribution(last), + oximeter::Datum::HistogramI8(new), + ) => { + let new = Distribution::from(new); + self.as_integer_distribution_mut()? + .push(Some(new.checked_sub(&last)?)); + } + ( + CumulativeDatum::IntegerDistribution(last), + oximeter::Datum::HistogramU8(new), + ) => { + let new = Distribution::from(new); + self.as_integer_distribution_mut()? + .push(Some(new.checked_sub(&last)?)); + } + ( + CumulativeDatum::IntegerDistribution(last), + oximeter::Datum::HistogramI16(new), + ) => { + let new = Distribution::from(new); + self.as_integer_distribution_mut()? + .push(Some(new.checked_sub(&last)?)); + } + ( + CumulativeDatum::IntegerDistribution(last), + oximeter::Datum::HistogramU16(new), + ) => { + let new = Distribution::from(new); + self.as_integer_distribution_mut()? + .push(Some(new.checked_sub(&last)?)); + } + ( + CumulativeDatum::IntegerDistribution(last), + oximeter::Datum::HistogramI32(new), + ) => { + let new = Distribution::from(new); + self.as_integer_distribution_mut()? + .push(Some(new.checked_sub(&last)?)); + } + ( + CumulativeDatum::IntegerDistribution(last), + oximeter::Datum::HistogramU32(new), + ) => { + let new = Distribution::from(new); + self.as_integer_distribution_mut()? + .push(Some(new.checked_sub(&last)?)); + } + ( + CumulativeDatum::IntegerDistribution(last), + oximeter::Datum::HistogramI64(new), + ) => { + let new = Distribution::from(new); + self.as_integer_distribution_mut()? + .push(Some(new.checked_sub(&last)?)); + } + ( + CumulativeDatum::IntegerDistribution(last), + oximeter::Datum::HistogramU64(new), + ) => { + let new = Distribution::try_from(new)?; + self.as_integer_distribution_mut()? + .push(Some(new.checked_sub(&last)?)); + } + ( + CumulativeDatum::DoubleDistribution(last), + oximeter::Datum::HistogramF32(new), + ) => { + let new = Distribution::from(new); + self.as_double_distribution_mut()? + .push(Some(new.checked_sub(&last)?)); + } + ( + CumulativeDatum::DoubleDistribution(last), + oximeter::Datum::HistogramF64(new), + ) => { + let new = Distribution::from(new); + self.as_double_distribution_mut()? + .push(Some(new.checked_sub(&last)?)); + } + (_, _) => unreachable!(), + } + } + } + Ok(()) + } + + // Return the number of samples in self. + fn len(&self) -> usize { + match self { + ValueArray::Boolean(inner) => inner.len(), + ValueArray::Integer(inner) => inner.len(), + ValueArray::Double(inner) => inner.len(), + ValueArray::String(inner) => inner.len(), + ValueArray::IntegerDistribution(inner) => inner.len(), + ValueArray::DoubleDistribution(inner) => inner.len(), + } + } + + // Return a reference to the i-th value in the array. + // + // This panics if `i >= self.len()`. + fn get(&self, i: usize) -> Datum<'_> { + match self { + ValueArray::Boolean(inner) => Datum::Boolean(inner[i]), + ValueArray::Integer(inner) => { + Datum::Integer(inner.get(i).unwrap().as_ref()) + } + ValueArray::Double(inner) => { + Datum::Double(inner.get(i).unwrap().as_ref()) + } + ValueArray::String(inner) => { + Datum::String(inner.get(i).unwrap().as_deref()) + } + ValueArray::IntegerDistribution(inner) => { + Datum::IntegerDistribution(inner.get(i).unwrap().as_ref()) + } + ValueArray::DoubleDistribution(inner) => { + Datum::DoubleDistribution(inner.get(i).unwrap().as_ref()) + } + } + } + + // Swap the value in self with other, asserting they're the same type. + pub(crate) fn swap(&mut self, mut values: ValueArray) { + use std::mem::swap; + match (self, &mut values) { + (ValueArray::Integer(x), ValueArray::Integer(y)) => swap(x, y), + (ValueArray::Double(x), ValueArray::Double(y)) => swap(x, y), + (ValueArray::Boolean(x), ValueArray::Boolean(y)) => swap(x, y), + (ValueArray::String(x), ValueArray::String(y)) => swap(x, y), + ( + ValueArray::IntegerDistribution(x), + ValueArray::IntegerDistribution(y), + ) => swap(x, y), + ( + ValueArray::DoubleDistribution(x), + ValueArray::DoubleDistribution(y), + ) => swap(x, y), + (_, _) => panic!("Cannot swap values of different types"), + } + } +} + +mod private { + pub trait Sealed {} + impl Sealed for i64 {} + impl Sealed for f64 {} +} + +pub trait DistributionSupport: + fmt::Display + Clone + Copy + fmt::Debug + PartialEq + private::Sealed +{ +} +impl DistributionSupport for i64 {} +impl DistributionSupport for f64 {} + +/// A distribution is a sequence of bins and counts in those bins. +#[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq, Serialize)] +#[schemars(rename = "Distribution{T}")] +pub struct Distribution { + bins: Vec, + counts: Vec, +} + +impl fmt::Display for Distribution { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let elems = self + .bins + .iter() + .zip(self.counts.iter()) + .map(|(bin, count)| format!("{bin}: {count}")) + .collect::>() + .join(", "); + write!(f, "{}", elems) + } +} + +impl Distribution { + // Subtract two distributions, checking that they have the same bins. + fn checked_sub( + &self, + rhs: &Distribution, + ) -> Result, Error> { + anyhow::ensure!( + self.bins == rhs.bins, + "Cannot subtract distributions with different bins", + ); + let counts = self + .counts + .iter() + .zip(rhs.counts.iter().copied()) + .map(|(x, y)| x.checked_sub(y)) + .collect::>() + .context("Underflow subtracting distributions values")?; + Ok(Self { bins: self.bins.clone(), counts }) + } + + /// Return the slice of bins. + pub fn bins(&self) -> &[T] { + &self.bins + } + + /// Return the slice of counts. + pub fn counts(&self) -> &[u64] { + &self.counts + } + + /// Return an iterator over each bin and count. + pub fn iter(&self) -> impl ExactSizeIterator + '_ { + self.bins.iter().zip(self.counts.iter()) + } +} + +macro_rules! i64_dist_from { + ($t:ty) => { + impl From<&oximeter::histogram::Histogram<$t>> for Distribution { + fn from(hist: &oximeter::histogram::Histogram<$t>) -> Self { + let (bins, counts) = hist.to_arrays(); + Self { bins: bins.into_iter().map(i64::from).collect(), counts } + } + } + + impl From<&oximeter::histogram::Histogram<$t>> for CumulativeDatum { + fn from(hist: &oximeter::histogram::Histogram<$t>) -> Self { + CumulativeDatum::IntegerDistribution(hist.into()) + } + } + }; +} + +i64_dist_from!(i8); +i64_dist_from!(u8); +i64_dist_from!(i16); +i64_dist_from!(u16); +i64_dist_from!(i32); +i64_dist_from!(u32); +i64_dist_from!(i64); + +impl TryFrom<&oximeter::histogram::Histogram> for Distribution { + type Error = Error; + fn try_from( + hist: &oximeter::histogram::Histogram, + ) -> Result { + let (bins, counts) = hist.to_arrays(); + let bins = bins + .into_iter() + .map(i64::try_from) + .collect::>() + .context("Overflow converting u64 to i64")?; + Ok(Self { bins, counts }) + } +} + +impl TryFrom<&oximeter::histogram::Histogram> for CumulativeDatum { + type Error = Error; + fn try_from( + hist: &oximeter::histogram::Histogram, + ) -> Result { + hist.try_into().map(CumulativeDatum::IntegerDistribution) + } +} + +macro_rules! f64_dist_from { + ($t:ty) => { + impl From<&oximeter::histogram::Histogram<$t>> for Distribution { + fn from(hist: &oximeter::histogram::Histogram<$t>) -> Self { + let (bins, counts) = hist.to_arrays(); + Self { bins: bins.into_iter().map(f64::from).collect(), counts } + } + } + + impl From<&oximeter::histogram::Histogram<$t>> for CumulativeDatum { + fn from(hist: &oximeter::histogram::Histogram<$t>) -> Self { + CumulativeDatum::DoubleDistribution(hist.into()) + } + } + }; +} + +f64_dist_from!(f32); +f64_dist_from!(f64); + +#[cfg(test)] +mod tests { + use crate::oxql::point::{DataType, ValueArray}; + + use super::{Distribution, MetricType, Points, Values}; + use chrono::{DateTime, Utc}; + use oximeter::types::Cumulative; + use oximeter::Measurement; + use std::time::Duration; + + #[test] + fn test_point_delta_between() { + let mut datum = Cumulative::new(2i64); + let now = Utc::now(); + let meas0 = Measurement::new(now + Duration::from_secs(1), datum); + datum.set(10i64); + let meas1 = Measurement::new(now + Duration::from_secs(2), datum); + let measurements = vec![meas0.clone(), meas1.clone()]; + let points = Points::delta_from_cumulative(&measurements).unwrap(); + + assert_eq!(points.len(), 2); + assert_eq!( + points.values(0).unwrap().as_integer().unwrap(), + &[Some(2i64), Some(8)], + ); + assert_eq!( + Duration::from_secs(1), + (points.timestamps[1] - points.timestamps[0]).to_std().unwrap(), + ); + let expected = vec![now, meas0.timestamp()]; + let actual = points.start_times.as_ref().unwrap(); + assert_eq!(expected.len(), actual.len()); + for (x, y) in expected.into_iter().zip(actual.into_iter()) { + assert!((*y - x).num_nanoseconds().unwrap() <= 1); + } + } + + #[test] + fn test_point_delta_between_with_new_epoch() { + let datum = Cumulative::new(2i64); + let now = Utc::now(); + let meas0 = Measurement::new(now + Duration::from_secs(1), datum); + + // Create a new datum, with a completely new start time, representing a + // new epoch. + let now = Utc::now() + Duration::from_secs(10); + let datum = Cumulative::with_start_time(now, 10i64); + let meas1 = Measurement::new(now + Duration::from_secs(2), datum); + let measurements = vec![meas0.clone(), meas1.clone()]; + let points = Points::delta_from_cumulative(&measurements).unwrap(); + + // The second point should not be referenced to the first, because + // they're in different epochs. + assert_eq!(points.len(), 2); + assert_eq!( + points.values(0).unwrap().as_integer().unwrap(), + &[Some(2i64), Some(10)], + ); + + // The start times should be the start times of the measurements + // themselves as well. Same for timestamps. + assert_eq!( + points.timestamps, + vec![meas0.timestamp(), meas1.timestamp()], + ); + assert_eq!( + points.start_times.as_ref().unwrap(), + &[meas0.start_time().unwrap(), meas1.start_time().unwrap()], + ); + } + + #[test] + fn test_point_delta_between_overlapping_time_ranges() { + // These data points start at `T` and `T + 100ms` respectively, and end + // at those times + 1s. That means their time ranges overlap, and so we + // can't compute a delta from them. + let start_time = Utc::now() - Duration::from_secs(1); + let datum1 = Cumulative::with_start_time(start_time, 1i64); + let datum2 = Cumulative::with_start_time( + start_time + Duration::from_millis(100), + 10i64, + ); + let meas1 = Measurement::new( + datum1.start_time() + Duration::from_secs(1), + datum1, + ); + let meas2 = Measurement::new( + datum2.start_time() + Duration::from_secs(1), + datum2, + ); + + assert!( + Points::delta_from_cumulative(&[meas1.clone(), meas2.clone()]) + .is_err(), + "Should not be able to compute a delta point \ + between two measuremenst with overlapping start \ + times: [{}, {}] and [{}, {}]", + meas1.start_time().unwrap(), + meas1.timestamp(), + meas2.start_time().unwrap(), + meas2.timestamp(), + ); + } + + fn timestamps(n: usize) -> Vec> { + let now = Utc::now(); + let mut out = Vec::with_capacity(n); + for i in 0..n { + out.push(now - Duration::from_secs(i as _)); + } + out.into_iter().rev().collect() + } + + #[test] + fn test_cast_points_from_bool() { + let points = Points { + start_times: None, + timestamps: timestamps(2), + values: vec![Values { + values: ValueArray::Boolean(vec![Some(false), Some(true)]), + metric_type: MetricType::Gauge, + }], + }; + + let as_same = points.cast(&[DataType::Boolean]).unwrap(); + let vals = as_same.values[0].values.as_boolean().unwrap(); + assert_eq!(vals, points.values[0].values.as_boolean().unwrap()); + + let as_int = points.cast(&[DataType::Integer]).unwrap(); + let vals = as_int.values[0].values.as_integer().unwrap(); + assert_eq!(vals, &vec![Some(0), Some(1)]); + + let as_double = points.cast(&[DataType::Double]).unwrap(); + let vals = as_double.values[0].values.as_double().unwrap(); + assert_eq!(vals, &vec![Some(0.0), Some(1.0)]); + + let as_string = points.cast(&[DataType::String]).unwrap(); + let vals = as_string.values[0].values.as_string().unwrap(); + assert_eq!( + vals, + &vec![Some("false".to_string()), Some("true".to_string())] + ); + + for ty in [DataType::IntegerDistribution, DataType::DoubleDistribution] + { + assert!( + points.cast(&[ty]).is_err(), + "Should not be able to cast bool array to distributions" + ); + } + assert!(points.cast(&[]).is_err(), "Should fail to cast with no types"); + assert!( + points.cast(&[DataType::Boolean, DataType::Boolean]).is_err(), + "Should fail to cast to the wrong number of types" + ); + } + + #[test] + fn test_cast_points_from_integer() { + let points = Points { + start_times: None, + timestamps: timestamps(2), + values: vec![Values { + values: ValueArray::Integer(vec![Some(0), Some(10)]), + metric_type: MetricType::Gauge, + }], + }; + + let as_same = points.cast(&[DataType::Integer]).unwrap(); + let vals = as_same.values[0].values.as_integer().unwrap(); + assert_eq!(vals, points.values[0].values.as_integer().unwrap()); + + let as_bools = points.cast(&[DataType::Boolean]).unwrap(); + let vals = as_bools.values[0].values.as_boolean().unwrap(); + assert_eq!(vals, &vec![Some(false), Some(true)]); + + let as_double = points.cast(&[DataType::Double]).unwrap(); + let vals = as_double.values[0].values.as_double().unwrap(); + assert_eq!(vals, &vec![Some(0.0), Some(10.0)]); + + let as_string = points.cast(&[DataType::String]).unwrap(); + let vals = as_string.values[0].values.as_string().unwrap(); + assert_eq!(vals, &vec![Some("0".to_string()), Some("10".to_string())]); + + for ty in [DataType::IntegerDistribution, DataType::DoubleDistribution] + { + assert!( + points.cast(&[ty]).is_err(), + "Should not be able to cast int array to distributions" + ); + } + assert!(points.cast(&[]).is_err(), "Should fail to cast with no types"); + assert!( + points.cast(&[DataType::Boolean, DataType::Boolean]).is_err(), + "Should fail to cast to the wrong number of types" + ); + } + + #[test] + fn test_cast_points_from_double() { + let points = Points { + start_times: None, + timestamps: timestamps(2), + values: vec![Values { + values: ValueArray::Double(vec![Some(0.0), Some(10.5)]), + metric_type: MetricType::Gauge, + }], + }; + + let as_same = points.cast(&[DataType::Double]).unwrap(); + let vals = as_same.values[0].values.as_double().unwrap(); + assert_eq!(vals, points.values[0].values.as_double().unwrap()); + + let as_bools = points.cast(&[DataType::Boolean]).unwrap(); + let vals = as_bools.values[0].values.as_boolean().unwrap(); + assert_eq!(vals, &vec![Some(false), Some(true)]); + + let as_ints = points.cast(&[DataType::Integer]).unwrap(); + let vals = as_ints.values[0].values.as_integer().unwrap(); + assert_eq!(vals, &vec![Some(0), Some(10)]); + + let as_string = points.cast(&[DataType::String]).unwrap(); + let vals = as_string.values[0].values.as_string().unwrap(); + assert_eq!( + vals, + &vec![Some("0".to_string()), Some("10.5".to_string())] + ); + + let points = Points { + start_times: None, + timestamps: timestamps(2), + values: vec![Values { + values: ValueArray::Double(vec![Some(0.0), Some(f64::MAX)]), + metric_type: MetricType::Gauge, + }], + }; + assert!( + points.cast(&[DataType::Integer]).is_err(), + "Should fail to cast out-of-range doubles to integer" + ); + + for ty in [DataType::IntegerDistribution, DataType::DoubleDistribution] + { + assert!( + points.cast(&[ty]).is_err(), + "Should not be able to cast double array to distributions" + ); + } + assert!(points.cast(&[]).is_err(), "Should fail to cast with no types"); + assert!( + points.cast(&[DataType::Boolean, DataType::Boolean]).is_err(), + "Should fail to cast to the wrong number of types" + ); + } + + #[test] + fn test_cast_points_from_string() { + fn make_points(strings: &[&str]) -> Points { + Points { + start_times: None, + timestamps: timestamps(strings.len()), + values: vec![Values { + values: ValueArray::String( + strings.iter().map(|&s| Some(s.into())).collect(), + ), + metric_type: MetricType::Gauge, + }], + } + } + + let points = make_points(&["some", "strings"]); + let as_same = points.cast(&[DataType::String]).unwrap(); + assert_eq!(as_same, points); + + // Any non-empty string is truthy, even "false". + let points = make_points(&["", "false", "true"]); + let as_bools = points.cast(&[DataType::Boolean]).unwrap(); + let vals = as_bools.values[0].values.as_boolean().unwrap(); + assert_eq!(vals, &vec![Some(false), Some(true), Some(true)]); + + // Conversion to integers happens by parsing. + let points = make_points(&["0", "1"]); + let as_ints = points.cast(&[DataType::Integer]).unwrap(); + let vals = as_ints.values[0].values.as_integer().unwrap(); + assert_eq!(vals, &vec![Some(0), Some(1)]); + for bad in ["1.0", "", "foo", "[]"] { + assert!( + make_points(&[bad]).cast(&[DataType::Integer]).is_err(), + "Should fail to cast non-int string '{}' to integers", + bad, + ); + } + + // Conversion to doubles happens by parsing. + let points = make_points(&["0", "1.1"]); + let as_doubles = points.cast(&[DataType::Double]).unwrap(); + let vals = as_doubles.values[0].values.as_double().unwrap(); + assert_eq!(vals, &vec![Some(0.0), Some(1.1)]); + for bad in ["", "foo", "[]"] { + assert!( + make_points(&[bad]).cast(&[DataType::Double]).is_err(), + "Should fail to cast non-double string '{}' to double", + bad, + ); + } + + // Checks for invalid casts + for ty in [DataType::IntegerDistribution, DataType::DoubleDistribution] + { + assert!( + points.cast(&[ty]).is_err(), + "Should not be able to cast double array to distributions" + ); + } + assert!(points.cast(&[]).is_err(), "Should fail to cast with no types"); + assert!( + points.cast(&[DataType::Boolean, DataType::Boolean]).is_err(), + "Should fail to cast to the wrong number of types" + ); + } + + #[test] + fn test_cast_points_from_int_distribution() { + // We can only "cast" to the same type here. + let points = Points { + start_times: None, + timestamps: timestamps(1), + values: vec![Values { + values: ValueArray::IntegerDistribution(vec![Some( + Distribution { bins: vec![0, 1, 2], counts: vec![0; 3] }, + )]), + metric_type: MetricType::Gauge, + }], + }; + let as_same = points.cast(&[DataType::IntegerDistribution]).unwrap(); + assert_eq!(points, as_same); + + for ty in [ + DataType::Boolean, + DataType::String, + DataType::Integer, + DataType::Double, + DataType::DoubleDistribution, + ] { + assert!( + points.cast(&[ty]).is_err(), + "Should not be able to cast distributions to anything other than itself" + ); + } + assert!(points.cast(&[]).is_err()); + assert!(points + .cast(&[ + DataType::IntegerDistribution, + DataType::IntegerDistribution + ]) + .is_err()); + } + + #[test] + fn test_cast_points_from_double_distribution() { + // We can only "cast" to the same type here. + let points = Points { + start_times: None, + timestamps: timestamps(1), + values: vec![Values { + values: ValueArray::DoubleDistribution(vec![Some( + Distribution { + bins: vec![0.0, 1.0, 2.0], + counts: vec![0; 3], + }, + )]), + metric_type: MetricType::Gauge, + }], + }; + let as_same = points.cast(&[DataType::DoubleDistribution]).unwrap(); + assert_eq!(points, as_same); + + for ty in [ + DataType::Boolean, + DataType::String, + DataType::Integer, + DataType::Double, + DataType::IntegerDistribution, + ] { + assert!( + points.cast(&[ty]).is_err(), + "Should not be able to cast distributions to anything other than itself" + ); + } + assert!(points.cast(&[]).is_err()); + assert!(points + .cast(&[DataType::DoubleDistribution, DataType::DoubleDistribution]) + .is_err()); + } +} diff --git a/oximeter/db/src/oxql/query/mod.rs b/oximeter/db/src/oxql/query/mod.rs new file mode 100644 index 0000000000..1c4383d68d --- /dev/null +++ b/oximeter/db/src/oxql/query/mod.rs @@ -0,0 +1,1033 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! A single OxQL query. + +// Copyright 2024 Oxide Computer Company + +use super::ast::ident::Ident; +use super::ast::logical_op::LogicalOp; +use super::ast::table_ops::filter::CompoundFilter; +use super::ast::table_ops::filter::FilterExpr; +use super::ast::table_ops::group_by::GroupBy; +use super::ast::table_ops::limit::Limit; +use super::ast::table_ops::BasicTableOp; +use super::ast::table_ops::TableOp; +use super::ast::SplitQuery; +use crate::oxql::ast::grammar; +use crate::oxql::ast::table_ops::filter::Filter; +use crate::oxql::ast::Query as QueryNode; +use crate::oxql::fmt_parse_error; +use crate::oxql::Error; +use crate::TimeseriesName; +use chrono::DateTime; +use chrono::Utc; +use std::time::Duration; + +/// Special identifiers for column names or other widely-used values. +pub mod special_idents { + use oximeter::DatumType; + + pub const TIMESTAMP: &str = "timestamp"; + pub const START_TIME: &str = "start_time"; + pub const DATUM: &str = "datum"; + pub const BINS: &str = "bins"; + pub const COUNTS: &str = "counts"; + pub const DATETIME64: &str = "DateTime64"; + pub const ARRAYU64: &str = "Array[u64]"; + + pub fn array_type_name_from_histogram_type( + type_: DatumType, + ) -> Option { + if !type_.is_histogram() { + return None; + } + Some(format!( + "Array[{}]", + type_.to_string().strip_prefix("Histogram").unwrap().to_lowercase(), + )) + } +} + +/// A parsed OxQL query. +#[derive(Clone, Debug, PartialEq)] +pub struct Query { + pub(super) parsed: QueryNode, + pub(super) end_time: DateTime, +} + +impl Query { + /// Construct a query written in OxQL. + pub fn new(query: impl AsRef) -> Result { + let raw = query.as_ref().trim(); + const MAX_LEN: usize = 4096; + anyhow::ensure!( + raw.len() <= MAX_LEN, + "Queries must be <= {} characters", + MAX_LEN, + ); + let parsed = grammar::query_parser::query(raw) + .map_err(|e| fmt_parse_error(raw, e))?; + + // Fetch the latest query end time referred to in the parsed query, or + // use now if there isn't one. + let query_end_time = parsed.query_end_time().unwrap_or_else(Utc::now); + Ok(Self { parsed, end_time: query_end_time }) + } + + /// Return the end time of the query. + pub fn end_time(&self) -> &DateTime { + &self.end_time + } + + /// Return the next referenced timeseries name. + /// + /// Queries always start with either a single `get` operation, which refers + /// to one timeseries; or a subquery, each component of which is a query. So + /// it is always true that there is exactly one next timeseries name, since + /// that comes from the current query, or the next subquery. + pub fn timeseries_name(&self) -> &TimeseriesName { + self.parsed.timeseries_name() + } + + /// Return the transformation table ops, i.e., everything after the initial + /// get operation or subquery. + pub fn transformations(&self) -> &[TableOp] { + self.parsed.transformations() + } + + /// Return predicates which can be pushed down into the database, if any. + /// + /// Query optimization is a large topic. There are few rules, and many + /// heuristics. However, one of those is extremely useful for our case: + /// predicate pushdown. This is where one moves predicates as close as + /// possible to the data, filtering out unused data as early as possible in + /// query processing. + /// + /// In our case, _currently_, we can implement this pretty easily. Filtering + /// operations can usually be coalesced into a single item. That means: + /// + /// - successive filtering operations are merged: `filter a | filter b -> + /// `filter (a) && (b)`. + /// - filtering operations are "pushed down", to just after the initial + /// `get` operation in the query. + /// + /// # Group by + /// + /// While filters can be combined and pushed down through many operations, + /// special care is taken for `group_by`. Specifically, the filter must only + /// name columns explicitly named in the `group_by`. If we pushed through + /// filters which named one of the columns _within_ the group (one not + /// named), then that would change the set of data in a group, and thus the + /// result. + /// + /// # Datum filters + /// + /// We currently only push down filters on the timestamps, and that is only + /// because we do _not_ support aggregations across time, only values. If + /// and when we do support that, then filters which reference time also + /// cannot be pushed down. + /// + /// # No predicates + /// + /// Note that this may return `None`, in the case where there are zero + /// predicates of any kind. + /// + /// # Limit operations + /// + /// OxQL table operations which limit data, such as `first k` or `last k`, + /// can also be pushed down into the database in certain cases. Since they + /// change the number of points, but not the timeseries, they cannot be + /// pushed through an `align` operation. But they _can_ be pushed through + /// grouping or other filters. + // + // Pushing filters through a group by. Consider the following data: + // + // a b timestamp datum + // 0 0 0 0 + // 0 0 1 1 + // 0 1 0 2 + // 0 1 1 3 + // 1 0 0 4 + // 1 0 1 5 + // 1 1 0 6 + // 1 1 1 7 + // + // So there are two groups for a and b columns each with two samples. + // + // Consider `get a:b | group_by [a] | filter a == 0`. + // + // After the group by, the result is: + // + // a timestamp datum + // 0 0 avg([0, 2]) -> 1 + // 0 1 avg([1, 3]) -> 2 + // 1 0 avg([4, 6]) -> 5 + // 1 1 avg([5, 7]) -> 6 + // + // Then after the filter, it becomes: + // + // a timestamp datum + // 0 0 avg([0, 2]) -> 1 + // 0 1 avg([1, 3]) -> 2 + // + // Now, let's do the filter first, as if we pushed that down. + // i.e., `get a:b | filter a == 0 | group_by [a]`. After the filter, we get: + // + // a b timestamp datum + // 0 0 0 0 + // 0 0 1 1 + // 0 1 0 2 + // 0 1 1 3 + // + // Then we apply the group by: + // + // a timestamp datum + // 0 0 avg([0, 2]) -> 1 + // 0 1 avg([1, 3]) -> 2 + // + // So we get the same result. Let's suppose we had a filter on the column + // `b` instead. Doing the group_by first, we get the exact same result as + // the first one above. Or we really get an error, because the resulting + // table does not have a `b` column. + // + // If instead we did the filter first, we'd get a different result. Starting + // from: + // + // a b timestamp datum + // 0 0 0 0 + // 0 0 1 1 + // 0 1 0 2 + // 0 1 1 3 + // 1 0 0 4 + // 1 0 1 5 + // 1 1 0 6 + // 1 1 1 7 + // + // Apply `filter b == 0`: + // + // + // a b timestamp datum + // 0 0 0 0 + // 0 0 1 1 + // 1 0 0 4 + // 1 0 1 5 + // + // Then apply group_by [a] + // + // a timestamp datum + // 0 0 avg([0, 1]) -> 0.5 + // 0 1 avg([4, 5]) -> 4.5 + // + // So we get something very different. + // + // What about filtering by timestamp? Starting from the raw data again: + // + // a b timestamp datum + // 0 0 0 0 + // 0 0 1 1 + // 0 1 0 2 + // 0 1 1 3 + // 1 0 0 4 + // 1 0 1 5 + // 1 1 0 6 + // 1 1 1 7 + // + // Let's add a `filter timestamp >= 1`. After the `group_by [a]`, we get: + // + // a timestamp datum + // 0 0 avg([0, 2]) -> 1 + // 0 1 avg([1, 3]) -> 2 + // 1 0 avg([4, 6]) -> 5 + // 1 1 avg([5, 7]) -> 6 + // + // Then after `filter timestamp >= 1`: + // + // a timestamp datum + // 0 1 avg([1, 3]) -> 2 + // 1 1 avg([5, 7]) -> 6 + // + // Now, filtering the timestamps first, after that we get: + // + // a b timestamp datum + // 0 0 1 1 + // 0 1 1 3 + // 1 0 1 5 + // 1 1 1 7 + // + // Then grouping: + // + // a timestamp datum + // 0 1 avg([1, 3]) -> 2 + // 1 1 avg([5, 7]) -> 6 + // + // So that also works fine. + pub(crate) fn coalesced_predicates( + &self, + outer: Option, + ) -> Option { + self.transformations().iter().rev().fold( + // We'll start from the predicates passed from the outer query. + outer, + |maybe_filter, next_tr| { + // Transformations only return basic ops, since all the + // subqueries must be at the prefix of the query. + let TableOp::Basic(op) = next_tr else { + unreachable!(); + }; + + match op { + BasicTableOp::GroupBy(GroupBy { identifiers, .. }) => { + // Only push through columns referred to in the group by + // itself, which replaces the current filter. + maybe_filter.as_ref().and_then(|current| { + restrict_filter_idents(current, identifiers) + }) + } + BasicTableOp::Filter(filter) => { + // Merge with any existing filter. + if let Some(left) = maybe_filter { + Some(left.merge(&filter, LogicalOp::And)) + } else { + Some(filter.clone()) + } + } + BasicTableOp::Limit(limit) => { + // A filter can be pushed through a limiting table + // operation in a few cases, see `can_reorder_around` + // for details. + maybe_filter.and_then(|filter| { + if filter.can_reorder_around(limit) { + Some(filter) + } else { + None + } + }) + } + _ => maybe_filter, + } + }, + ) + } + + /// Coalesce any limiting table operations, if possible. + pub(crate) fn coalesced_limits( + &self, + maybe_limit: Option, + ) -> Option { + self.transformations().iter().rev().fold( + maybe_limit, + |maybe_limit, next_tr| { + // Transformations only return basic ops, since all the + // subqueries must be at the prefix of the query. + let TableOp::Basic(op) = next_tr else { + unreachable!(); + }; + + match op { + BasicTableOp::Filter(filter) => { + // A limit can be pushed through a filter operation, in + // only a few cases, see `can_reorder_around` for + // details. + maybe_limit.and_then(|limit| { + if filter.can_reorder_around(&limit) { + Some(limit) + } else { + None + } + }) + } + BasicTableOp::Limit(limit) => { + // It is possible to "merge" limits if they're of the + // same kind. To do so, we simply take the one with the + // smaller count. For example + // + // ... | first 10 | first 5 + // + // is equivalent to just + // + // ... | first 5 + let new_limit = if let Some(current_limit) = maybe_limit + { + if limit.kind == current_limit.kind { + Limit { + kind: limit.kind, + count: limit.count.min(current_limit.count), + } + } else { + // If the limits are of different kinds, we replace + // the current one, i.e., drop it and start passing + // through the inner one. + *limit + } + } else { + // No outer limit at all, simply take this one. + *limit + }; + Some(new_limit) + } + _ => maybe_limit, + } + }, + ) + } + + pub(crate) fn split(&self) -> SplitQuery { + self.parsed.split(self.end_time) + } +} + +// Return a new filter containing only parts that refer to either: +// +// - a `timestamp` column +// - a column listed in `identifiers` +fn restrict_filter_idents( + current_filter: &Filter, + identifiers: &[Ident], +) -> Option { + match ¤t_filter.expr { + FilterExpr::Simple(inner) => { + let ident = inner.ident.as_str(); + if ident == "timestamp" + || identifiers.iter().map(Ident::as_str).any(|id| id == ident) + { + Some(current_filter.clone()) + } else { + None + } + } + FilterExpr::Compound(CompoundFilter { left, op, right }) => { + let maybe_left = restrict_filter_idents(left, identifiers); + let maybe_right = restrict_filter_idents(right, identifiers); + match (maybe_left, maybe_right) { + (Some(left), Some(right)) => Some(Filter { + negated: current_filter.negated, + expr: FilterExpr::Compound(CompoundFilter { + left: Box::new(left), + op: *op, + right: Box::new(right), + }), + }), + (Some(single), None) | (None, Some(single)) => Some(single), + (None, None) => None, + } + } + } +} + +/// Describes the time alignment for an OxQL query. +#[derive(Clone, Copy, Debug, PartialEq)] +pub struct Alignment { + /// The end time of the query, which the temporal reference point. + pub end_time: DateTime, + /// The alignment period, the interval on which values are produced. + pub period: Duration, +} + +#[cfg(test)] +mod tests { + use super::Filter; + use super::Ident; + use super::Query; + use crate::oxql::ast::cmp::Comparison; + use crate::oxql::ast::literal::Literal; + use crate::oxql::ast::logical_op::LogicalOp; + use crate::oxql::ast::table_ops::filter::CompoundFilter; + use crate::oxql::ast::table_ops::filter::FilterExpr; + use crate::oxql::ast::table_ops::filter::SimpleFilter; + use crate::oxql::ast::table_ops::join::Join; + use crate::oxql::ast::table_ops::limit::Limit; + use crate::oxql::ast::table_ops::limit::LimitKind; + use crate::oxql::ast::table_ops::BasicTableOp; + use crate::oxql::ast::table_ops::TableOp; + use crate::oxql::ast::SplitQuery; + use crate::oxql::query::restrict_filter_idents; + use chrono::NaiveDateTime; + use chrono::Utc; + use std::time::Duration; + + #[test] + fn test_restrict_filter_idents_single_atom() { + let ident = Ident("foo".into()); + let filter = Filter { + negated: false, + expr: FilterExpr::Simple(SimpleFilter { + ident: ident.clone(), + cmp: Comparison::Eq, + value: Literal::Boolean(false), + }), + }; + assert_eq!( + restrict_filter_idents(&filter, &[ident.clone()]).unwrap(), + filter + ); + assert_eq!(restrict_filter_idents(&filter, &[]), None); + } + + #[test] + fn test_restrict_filter_idents_single_atom_with_timestamp() { + let filter = Filter { + negated: false, + expr: FilterExpr::Simple(SimpleFilter { + ident: Ident("timestamp".into()), + cmp: Comparison::Eq, + value: Literal::Boolean(false), + }), + }; + assert_eq!(restrict_filter_idents(&filter, &[]).unwrap(), filter); + } + + #[test] + fn test_restrict_filter_idents_expr() { + let idents = [Ident("foo".into()), Ident("bar".into())]; + let left = Filter { + negated: false, + expr: FilterExpr::Simple(SimpleFilter { + ident: idents[0].clone(), + cmp: Comparison::Eq, + value: Literal::Boolean(false), + }), + }; + let right = Filter { + negated: false, + expr: FilterExpr::Simple(SimpleFilter { + ident: idents[1].clone(), + cmp: Comparison::Eq, + value: Literal::Boolean(false), + }), + }; + let filter = Filter { + negated: false, + expr: FilterExpr::Compound(CompoundFilter { + left: Box::new(left.clone()), + op: LogicalOp::And, + right: Box::new(right.clone()), + }), + }; + assert_eq!(restrict_filter_idents(&filter, &idents).unwrap(), filter); + + // This should remove the right filter. + assert_eq!( + restrict_filter_idents(&filter, &idents[..1]).unwrap(), + left + ); + + // And both + assert_eq!(restrict_filter_idents(&filter, &[]), None); + } + + #[test] + fn test_split_query() { + let q = Query::new("get a:b").unwrap(); + let split = q.split(); + assert_eq!(split, SplitQuery::Flat(q)); + + let q = Query::new("get a:b | filter x == 0").unwrap(); + let split = q.split(); + assert_eq!(split, SplitQuery::Flat(q)); + + let q = Query::new("{ get a:b } | join").unwrap(); + let split = q.split(); + let mut inner = Query::new("get a:b").unwrap(); + inner.end_time = q.end_time; + assert_eq!( + split, + SplitQuery::Nested { + subqueries: vec![inner], + transformations: vec![TableOp::Basic(BasicTableOp::Join(Join))], + } + ); + + let q = Query::new("{ get a:b | filter x == 0 } | join").unwrap(); + let split = q.split(); + let mut inner = Query::new("get a:b | filter x == 0").unwrap(); + inner.end_time = q.end_time; + assert_eq!( + split, + SplitQuery::Nested { + subqueries: vec![inner], + transformations: vec![TableOp::Basic(BasicTableOp::Join(Join))], + } + ); + + let q = Query::new("{ get a:b ; get a:b } | join").unwrap(); + let split = q.split(); + let mut inner = Query::new("get a:b").unwrap(); + inner.end_time = q.end_time; + assert_eq!( + split, + SplitQuery::Nested { + subqueries: vec![inner; 2], + transformations: vec![TableOp::Basic(BasicTableOp::Join(Join))], + } + ); + + let q = Query::new("{ { get a:b ; get a:b } | join } | join").unwrap(); + let split = q.split(); + let mut subqueries = + vec![Query::new("{ get a:b; get a:b } | join").unwrap()]; + subqueries[0].end_time = q.end_time; + let expected = SplitQuery::Nested { + subqueries: subqueries.clone(), + transformations: vec![TableOp::Basic(BasicTableOp::Join(Join))], + }; + assert_eq!(split, expected); + let split = subqueries[0].split(); + let mut inner = Query::new("get a:b").unwrap(); + inner.end_time = q.end_time; + assert_eq!( + split, + SplitQuery::Nested { + subqueries: vec![inner; 2], + transformations: vec![TableOp::Basic(BasicTableOp::Join(Join))], + } + ); + } + + #[test] + fn test_coalesce_predicates() { + // Passed through group-by unchanged. + let q = Query::new("get a:b | group_by [a] | filter a == 0").unwrap(); + let preds = Filter { + negated: false, + expr: FilterExpr::Simple(SimpleFilter { + ident: Ident("a".to_string()), + cmp: Comparison::Eq, + value: Literal::Integer(0), + }), + }; + assert_eq!(q.coalesced_predicates(None), Some(preds)); + + // Merge the first two, then pass through group by. + let q = Query::new( + "get a:b | group_by [a] | filter a == 0 | filter a == 0", + ) + .unwrap(); + let atom = Filter { + negated: false, + expr: FilterExpr::Simple(SimpleFilter { + ident: Ident("a".to_string()), + cmp: Comparison::Eq, + value: Literal::Integer(0), + }), + }; + let preds = Filter { + negated: false, + expr: FilterExpr::Compound(CompoundFilter { + left: Box::new(atom.clone()), + op: LogicalOp::And, + right: Box::new(atom.clone()), + }), + }; + assert_eq!(q.coalesced_predicates(None), Some(preds)); + + // These are also merged, even though they're on different sides of the + // group by. + let q = Query::new( + "get a:b | filter a == 0 | group_by [a] | filter a == 0", + ) + .unwrap(); + let atom = Filter { + negated: false, + expr: FilterExpr::Simple(SimpleFilter { + ident: Ident("a".to_string()), + cmp: Comparison::Eq, + value: Literal::Integer(0), + }), + }; + let preds = Filter { + negated: false, + expr: FilterExpr::Compound(CompoundFilter { + left: Box::new(atom.clone()), + op: LogicalOp::And, + right: Box::new(atom.clone()), + }), + }; + assert_eq!(q.coalesced_predicates(None), Some(preds)); + + // Second filter is _not_ passed through, because it refers to columns + // not in the group by. We have only the first filter. + let q = Query::new( + "get a:b | filter a == 0 | group_by [a] | filter b == 0", + ) + .unwrap(); + let preds = Filter { + negated: false, + expr: FilterExpr::Simple(SimpleFilter { + ident: Ident("a".to_string()), + cmp: Comparison::Eq, + value: Literal::Integer(0), + }), + }; + assert_eq!(q.coalesced_predicates(None), Some(preds)); + } + + #[test] + fn test_coalesce_predicates_into_subqueries() { + let q = "{ get a:b; get a:b } | join | filter foo == 'bar'"; + let query = Query::new(q).unwrap(); + let preds = query.coalesced_predicates(None).unwrap(); + let expected_predicate = Filter { + negated: false, + expr: FilterExpr::Simple(SimpleFilter { + ident: Ident("foo".to_string()), + cmp: Comparison::Eq, + value: Literal::String("bar".into()), + }), + }; + assert_eq!(preds, expected_predicate); + + // Split the query, which should give us a list of two subqueries, + // followed by the join and filter. + let SplitQuery::Nested { subqueries, .. } = query.split() else { + panic!(); + }; + for subq in subqueries.iter() { + let inner = subq + .coalesced_predicates(Some(expected_predicate.clone())) + .unwrap(); + assert_eq!( + inner, expected_predicate, + "Predicates passed into an inner subquery should be preserved" + ); + } + } + + #[test] + fn test_coalesce_predicates_into_subqueries_with_group_by() { + let q = "{ get a:b | group_by [baz]; get a:b | group_by [foo] } | \ + join | filter foo == 'bar'"; + let query = Query::new(q).unwrap(); + let preds = query.coalesced_predicates(None).unwrap(); + let expected_predicate = Filter { + negated: false, + expr: FilterExpr::Simple(SimpleFilter { + ident: Ident("foo".to_string()), + cmp: Comparison::Eq, + value: Literal::String("bar".into()), + }), + }; + assert_eq!(preds, expected_predicate); + + // Split the query, which should give us a list of two subqueries, + // followed by the join and filter. + let SplitQuery::Nested { subqueries, .. } = query.split() else { + panic!(); + }; + + // The first subquery groups by a field "baz", which isn't in the outer + // filter. It should have that outer predicate removed, and have no + // predicates at all. + let subq = &subqueries[0]; + assert!( + subq.coalesced_predicates(Some(expected_predicate.clone())) + .is_none(), + "Should not push an outer predicate into a subquery, when that \ + subquery includes a group_by that does not name a field in the \ + outer predicate" + ); + + // The second subquery should include the expected predicate, since the + // group_by includes the field named in the filter itself. + let subq = &subqueries[1]; + let inner = subq + .coalesced_predicates(Some(expected_predicate.clone())) + .unwrap(); + assert_eq!( + inner, expected_predicate, + "Predicates passed into an inner subquery should be preserved, \ + when that inner subquery includes a group_by that names the \ + ident in the outer filter" + ); + } + + #[test] + fn test_coalesce_predicates_merged_into_subqueries() { + let q = "{ get a:b | filter baz == 0; get a:b | filter baz == 0 } \ + | join | filter foo == 'bar'"; + let query = Query::new(q).unwrap(); + let preds = query.coalesced_predicates(None).unwrap(); + let expected_predicate = Filter { + negated: false, + expr: FilterExpr::Simple(SimpleFilter { + ident: Ident("foo".to_string()), + cmp: Comparison::Eq, + value: Literal::String("bar".into()), + }), + }; + assert_eq!(preds, expected_predicate); + let expected_inner_predicate = Filter { + negated: false, + expr: FilterExpr::Simple(SimpleFilter { + ident: Ident("baz".to_string()), + cmp: Comparison::Eq, + value: Literal::Integer(0), + }), + }; + + // Split the query, which should give us a list of two subqueries, + // followed by the join and filter. + let SplitQuery::Nested { subqueries, .. } = query.split() else { + panic!(); + }; + for subq in subqueries.iter() { + let inner = subq + .coalesced_predicates(Some(expected_predicate.clone())) + .unwrap(); + assert_eq!( + inner, + expected_predicate.merge(&expected_inner_predicate, LogicalOp::And), + "Predicates passed into an inner subquery should be preserved, \ + and merged with any subquery predicates", + ); + } + } + + #[test] + fn test_query_end_time() { + const MAX_DIFF: i64 = 1_000; + let q = Query::new("get a:b").unwrap(); + assert!( + (q.end_time - Utc::now()).num_nanoseconds().unwrap() < MAX_DIFF, + "Query which does not explicitly name an end time should \ + use now as the end time", + ); + + let q = Query::new("get a:b | filter timestamp > @now() - 1s").unwrap(); + assert!( + (q.end_time - Utc::now()).num_nanoseconds().unwrap() < MAX_DIFF, + "Query which does not explicitly name an end time should \ + use now as the end time", + ); + + let then = Utc::now() - Duration::from_secs(60); + let as_str = then.format("%Y-%m-%dT%H:%M:%S.%f"); + let q = Query::new(&format!("get a:b | filter timestamp < @{as_str}")) + .unwrap(); + assert_eq!( + q.end_time, then, + "Query with a less-than filter and a timestamp should \ + set the query end time" + ); + + let q = Query::new(&format!("get a:b | filter timestamp <= @{as_str}")) + .unwrap(); + assert_eq!( + q.end_time, then, + "Query with a less-than-or-equal filter and a timestamp should \ + set the query end time" + ); + + let q = Query::new(&format!("get a:b | filter timestamp > @{as_str}")) + .unwrap(); + assert!( + (q.end_time - Utc::now()).num_nanoseconds().unwrap() < MAX_DIFF, + "Query with a greater-than timestamp filter should not set an \ + explicit query end time, and so use now" + ); + + let q = Query::new("get a:b | filter timestamp > @now() - 1d").unwrap(); + assert!( + (q.end_time - Utc::now()).num_nanoseconds().unwrap() < MAX_DIFF, + "Query which does not explicitly name an end time should \ + use now as the end time", + ); + + let q = Query::new(&format!( + "get a:b | filter timestamp > @now() - 1d && timestamp < @{as_str}" + )) + .unwrap(); + assert_eq!( + q.end_time, + then, + "Query with a compound less-than-or-equal filter and a timestamp should \ + set the query end time" + ); + + let then = Utc::now() - Duration::from_secs(60); + let then_as_str = then.format("%Y-%m-%dT%H:%M:%S.%f"); + let even_earlier = then - Duration::from_secs(10); + let even_earlier_as_str = even_earlier.format("%Y-%m-%dT%H:%M:%S.%f"); + let q = Query::new(&format!( + "get a:b | filter timestamp < @{then_as_str} || timestamp < @{even_earlier_as_str}" + )) + .unwrap(); + assert_eq!( + q.end_time, + then, + "Query with two less-than timestamp filters should use the later timestamp" + ); + + let expected = NaiveDateTime::parse_from_str( + "2024-03-13T06:24:00", + "%Y-%m-%dT%H:%M:%S%.f", + ) + .unwrap() + .and_utc(); + let q = "{ \ + get physical_data_link:bytes_sent ; \ + get physical_data_link:bytes_received \ + } | filter timestamp > @2024-03-13T06:20:00 && timestamp < @2024-03-13T06:24:00"; + let query = Query::new(q).unwrap(); + assert_eq!(query.end_time, expected); + } + + #[test] + fn test_query_end_time_across_subqueries() { + let now = Utc::now(); + const FMT: &str = "%Y-%m-%dT%H:%M:%S.%f"; + let first = now - Duration::from_secs(1); + let second = now - Duration::from_secs_f64(1e-3); + let q = format!( + "{{ \ + get a:b | filter timestamp > @{}; \ + get a:b | filter timestamp > @{} \ + }}", + first.format(FMT), + second.format(FMT), + ); + let query = Query::new(q).unwrap(); + assert!( + query.end_time > second, + "This nested query should have used Utc::now() as the end time" + ); + let end_time = query.end_time; + let SplitQuery::Nested { subqueries, .. } = query.split() else { + unreachable!(); + }; + for subq in subqueries.iter() { + assert_eq!( + subq.end_time, end_time, + "All subqueries should have the same end time." + ); + } + } + + #[test] + fn test_coalesce_limits() { + let query = Query::new("get a:b | last 5").unwrap(); + let lim = query.coalesced_limits(None).expect("Should have a limit"); + assert_eq!( + lim.kind, + LimitKind::Last, + "This limit op has the wrong kind" + ); + assert_eq!(lim.count.get(), 5, "Limit has the wrong count"); + } + + #[test] + fn test_coalesce_limits_merge_same_kind_within_query() { + let qs = ["get a:b | last 10 | last 5", "get a:b | last 5 | last 10"]; + for q in qs { + let query = Query::new(q).unwrap(); + let lim = + query.coalesced_limits(None).expect("Should have a limit"); + assert_eq!( + lim.kind, + LimitKind::Last, + "This limit op has the wrong kind" + ); + assert_eq!( + lim.count.get(), + 5, + "Should have merged two limits of the same kind, \ + taking the one with the smaller count" + ); + } + } + + #[test] + fn test_coalesce_limits_do_not_merge_different_kinds_within_query() { + let qs = + ["get a:b | first 10 | last 10", "get a:b | last 10 | first 10"]; + let kinds = [LimitKind::First, LimitKind::Last]; + for (q, kind) in qs.iter().zip(kinds) { + let query = Query::new(q).unwrap(); + let lim = + query.coalesced_limits(None).expect("Should have a limit"); + assert_eq!(lim.kind, kind, "This limit op has the wrong kind"); + assert_eq!(lim.count.get(), 10); + } + } + + #[test] + fn test_coalesce_limits_rearrange_around_timestamp_filters() { + let qs = [ + "get a:b | filter timestamp < @now() | first 10", + "get a:b | filter timestamp > @now() | last 10", + ]; + let kinds = [LimitKind::First, LimitKind::Last]; + for (q, kind) in qs.iter().zip(kinds) { + let query = Query::new(q).unwrap(); + let lim = query.coalesced_limits(None).expect( + "This limit op should have been re-arranged around \ + a compatible timestamp filter", + ); + assert_eq!(lim.kind, kind, "This limit op has the wrong kind"); + assert_eq!(lim.count.get(), 10); + } + } + + #[test] + fn test_coalesce_limits_do_not_rearrange_around_incompatible_timestamp_filters( + ) { + let qs = [ + "get a:b | filter timestamp < @now() | last 10", + "get a:b | filter timestamp > @now() | first 10", + ]; + for q in qs { + let query = Query::new(q).unwrap(); + assert!( + query.coalesced_limits(None).is_none(), + "This limit op should have be merged around an \ + incompatible timestamp filter" + ); + } + } + + #[test] + fn test_coalesce_limits_merge_from_outer_query() { + let query = Query::new("get a:b | last 10").unwrap(); + let outer = + Limit { kind: LimitKind::Last, count: 5.try_into().unwrap() }; + let lim = query + .coalesced_limits(Some(outer)) + .expect("Should have a limit here"); + assert_eq!(lim.kind, LimitKind::Last, "Limit has the wrong kind"); + assert_eq!( + lim.count.get(), + 5, + "Did not pass through outer limit correctly" + ); + } + + #[test] + fn test_coalesce_limits_do_not_merge_different_kind_from_outer_query() { + let query = Query::new("get a:b | last 10").unwrap(); + let outer = + Limit { kind: LimitKind::First, count: 5.try_into().unwrap() }; + let lim = query + .coalesced_limits(Some(outer)) + .expect("Should have a limit here"); + assert_eq!(lim.kind, LimitKind::Last, "Limit has the wrong kind"); + assert_eq!( + lim.count.get(), + 10, + "Inner limit of different kind should ignore the outer one" + ); + } + + #[test] + fn test_coalesce_limits_do_not_coalesce_incompatible_kind_from_outer_query() + { + let query = Query::new("get a:b | filter timestamp > @now()").unwrap(); + let outer = + Limit { kind: LimitKind::First, count: 5.try_into().unwrap() }; + assert!( + query.coalesced_limits(Some(outer)).is_none(), + "Should not coalesce a limit from the outer query, when the \ + inner query contains an incompatible timestamp filter" + ); + } +} diff --git a/oximeter/db/src/oxql/table.rs b/oximeter/db/src/oxql/table.rs new file mode 100644 index 0000000000..2cd141d2fa --- /dev/null +++ b/oximeter/db/src/oxql/table.rs @@ -0,0 +1,293 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Definitions of timeseries and groups of them, a [`Table`]. + +// Copyright 2024 Oxide Computer Company + +use super::point::DataType; +use super::point::MetricType; +use super::point::Points; +use super::query::Alignment; +use super::Error; +use crate::TimeseriesKey; +use highway::HighwayHasher; +use oximeter::FieldValue; +use schemars::JsonSchema; +use serde::Deserialize; +use serde::Serialize; +use std::collections::btree_map::Entry; +use std::collections::BTreeMap; +use std::hash::Hash; +use std::hash::Hasher; + +/// A timeseries contains a timestamped set of values from one source. +/// +/// This includes the typed key-value pairs that uniquely identify it, and the +/// set of timestamps and data values from it. +#[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq, Serialize)] +pub struct Timeseries { + pub fields: BTreeMap, + pub points: Points, + #[serde(skip)] + pub(crate) alignment: Option, +} + +impl Timeseries { + /// Construct a new timeseries, from its fields. + /// + /// It holds no points or type information. That will be enforced by the + /// points type as they are added. + pub fn new( + fields: impl Iterator, + data_type: DataType, + metric_type: MetricType, + ) -> Result { + let fields: BTreeMap<_, _> = fields.collect(); + anyhow::ensure!(!fields.is_empty(), "Fields cannot be empty"); + Ok(Self { + fields, + points: Points::empty(data_type, metric_type), + alignment: None, + }) + } + + pub fn key(&self) -> TimeseriesKey { + // NOTE: The key here is _not_ stable, like the one used in the database + // itself to identify timeseries. That's OK, however, because we do not + // serialize this value anywhere -- it's used entirely for the lifetime + // of one query, and then thrown away, and only needs to be consistent + // for that long. + let mut hasher = HighwayHasher::default(); + for (name, value) in self.fields.iter() { + name.hash(&mut hasher); + value.hash(&mut hasher); + } + hasher.finish() + } + + /// Return a copy of the timeseries, keeping only the provided fields. + /// + /// An error is returned if the timeseries does not contain those fields. + pub(crate) fn copy_with_fields( + &self, + kept_fields: &[&str], + ) -> Result { + let mut fields = BTreeMap::new(); + for field in kept_fields { + let Some(f) = self.fields.get(*field) else { + anyhow::bail!("Timeseries does not contain field '{}'", field); + }; + fields.insert(field.to_string(), f.clone()); + } + Ok(Self { + fields, + points: self.points.clone(), + alignment: self.alignment, + }) + } + + // Return `true` if the schema in `other` matches that of `self`. + fn matches_schema(&self, other: &Timeseries) -> bool { + if self.fields.len() != other.fields.len() { + return false; + } + for (f0, f1) in self.fields.iter().zip(other.fields.iter()) { + // Check the field names. + if f0.0 != f1.0 { + return false; + } + // And types. + if f0.1.field_type() != f1.1.field_type() { + return false; + } + } + + // And the type info is the same as well. + if !self + .points + .data_types() + .zip(other.points.data_types()) + .all(|(x, y)| x == y) + { + return false; + } + self.points + .metric_types() + .zip(other.points.metric_types()) + .all(|(x, y)| x == y) + } + + /// Return a new timeseries, with the points cast to the provided list of + /// data types. + /// + /// This returns an error if the points cannot be so cast, or the + /// dimensionality of the types requested differs from the dimensionality of + /// the points themselves. + pub(crate) fn cast(&self, types: &[DataType]) -> Result { + let fields = self.fields.clone(); + Ok(Self { + fields, + points: self.points.cast(types)?, + alignment: self.alignment, + }) + } +} + +/// A table represents one or more timeseries with the same schema. +/// +/// A table is the result of an OxQL query. It contains a name, usually the name +/// of the timeseries schema from which the data is derived, and any number of +/// timeseries, which contain the actual data. +#[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq, Serialize)] +pub struct Table { + // The name of the table. + // + // This starts as the name of the timeseries schema the data is derived + // from, but can be modified as operations are done. + pub(super) name: String, + // The set of timeseries in the table, ordered by key. + timeseries: BTreeMap, +} + +impl Table { + /// Create a new table, with no timeseries. + pub fn new(name: impl AsRef) -> Self { + Self { name: name.as_ref().to_string(), timeseries: BTreeMap::new() } + } + + /// Create a table from a set of timeseries. + pub fn from_timeseries( + name: impl AsRef, + t: impl Iterator, + ) -> Result { + let mut out = Self::new(name); + for each in t { + out.insert(each)?; + } + Ok(out) + } + + /// Return the name of the table. + pub fn name(&self) -> &str { + self.name.as_str() + } + + /// Return the number of timeseries in this table. + pub fn n_timeseries(&self) -> usize { + self.timeseries.len() + } + + /// Return the list of timeseries in this table, ordered by key. + pub fn timeseries(&self) -> impl ExactSizeIterator { + self.timeseries.values() + } + + // Check that the schema of `other` matches `self`. + // + // That means the fields have the same names and types, and the timeseries + // have the same type info. + fn matches_schema(&self, other: &Timeseries) -> bool { + if let Some((_, first)) = self.timeseries.first_key_value() { + first.matches_schema(other) + } else { + // Table is empty. + true + } + } + + /// Get a timeseries matching the provided key, if any. + pub fn get_mut(&mut self, key: TimeseriesKey) -> Option<&mut Timeseries> { + self.timeseries.get_mut(&key) + } + + /// Insert a new timeseries into the table. + /// + /// If the timeseries already exists, an error is returned. Use + /// [`Table::replace()`] to replace an existing timeseries. + /// + /// It is an error if the timeseries does not have the same schema as the + /// others in the table (if any). + pub fn insert(&mut self, timeseries: Timeseries) -> Result<(), Error> { + anyhow::ensure!( + self.matches_schema(×eries), + "Timeseries in a table must have the same schema", + ); + let key = timeseries.key(); + let Entry::Vacant(e) = self.timeseries.entry(key) else { + return Err(anyhow::anyhow!( + "Timeseries with key {} already exists", + key, + )); + }; + e.insert(timeseries); + Ok(()) + } + + /// Replace a timeseries in the table. + pub fn replace(&mut self, timeseries: Timeseries) { + let key = timeseries.key(); + let _ = self.timeseries.insert(key, timeseries); + } + + /// Add multiple timeseries to the table. + /// + /// An error is returned if any timeseries already exist. + pub fn extend( + &mut self, + timeseries: impl Iterator, + ) -> Result<(), Error> { + for t in timeseries { + self.insert(t)?; + } + Ok(()) + } + + /// Return the number of timeseries in the table. + pub fn len(&self) -> usize { + self.timeseries.len() + } + + /// Return a mutable iterator over timeseries in the table. + pub fn iter_mut(&mut self) -> impl Iterator { + self.timeseries.values_mut() + } + + /// Return an iterator over timeseries in the table. + pub fn iter(&self) -> impl Iterator { + self.timeseries.values() + } + + /// Consume the table and return an iterator over its timeseries. + pub fn into_iter(self) -> impl Iterator { + self.timeseries.into_values() + } + + /// Return `true` if all the timeseries in this table are aligned, with the + /// same alignment information. + /// + /// If there are no timeseries, `false` is returned. + pub fn is_aligned(&self) -> bool { + let mut timeseries = self.timeseries.values(); + let Some(t) = timeseries.next() else { + return false; + }; + let Some(alignment) = t.alignment else { + return false; + }; + timeseries.all(|t| t.alignment == Some(alignment)) + } + + /// Return the alignment of this table, if all timeseries are aligned with + /// the same alignment. + pub fn alignment(&self) -> Option { + if self.is_aligned() { + Some( + self.timeseries.first_key_value().unwrap().1.alignment.unwrap(), + ) + } else { + None + } + } +} diff --git a/oximeter/db/src/query.rs b/oximeter/db/src/query.rs index 9212769573..e14dfbbc55 100644 --- a/oximeter/db/src/query.rs +++ b/oximeter/db/src/query.rs @@ -576,33 +576,32 @@ impl SelectQuery { match self.field_selectors.len() { 0 => None, n => { - // Select timeseries key for first column, plus field name and field value for - // all columns. - const SELECTED_COLUMNS: &[&str] = - &["field_name", "field_value"]; + // Select timeseries key for first column, plus the field value + // for all columns, aliased to the field name. const JOIN_COLUMNS: &[&str] = &["timeseries_name", "timeseries_key"]; - let mut top_level_columns = - Vec::with_capacity(1 + SELECTED_COLUMNS.len() * n); + let mut top_level_columns = Vec::with_capacity(2 + n); top_level_columns.push(String::from( "filter0.timeseries_key as timeseries_key", )); let mut from_statements = String::new(); - for (i, subquery) in self + for (i, (field_name, subquery)) in self .field_selectors - .values() - .map(|sel| { - sel.as_query(&self.timeseries_schema.timeseries_name) + .iter() + .map(|(field_schema, selector)| { + ( + &field_schema.name, + selector.as_query( + &self.timeseries_schema.timeseries_name, + ), + ) }) .enumerate() { - for column in SELECTED_COLUMNS { - top_level_columns.push(format!( - "filter{i}.{column}", - i = i, - column = column - )); - } + top_level_columns.push(format!( + "filter{}.field_value AS {}", + i, field_name, + )); if i == 0 { from_statements.push_str(&format!( @@ -1028,8 +1027,8 @@ mod tests { concat!( "SELECT ", "filter0.timeseries_key as timeseries_key, ", - "filter0.field_name, filter0.field_value, ", - "filter1.field_name, filter1.field_value ", + "filter0.field_value AS f0, ", + "filter1.field_value AS f1 ", "FROM (", "SELECT * FROM oximeter.fields_i64 ", "WHERE timeseries_name = 'foo:bar' ", @@ -1095,8 +1094,8 @@ mod tests { concat!( "SELECT ", "filter0.timeseries_key as timeseries_key, ", - "filter0.field_name, filter0.field_value, ", - "filter1.field_name, filter1.field_value ", + "filter0.field_value AS f0, ", + "filter1.field_value AS f1 ", "FROM (", "SELECT * FROM oximeter.fields_i64 ", "WHERE timeseries_name = 'foo:bar' AND field_name = 'f0' AND field_value = 0", @@ -1152,8 +1151,8 @@ mod tests { query.field_query().unwrap(), concat!( "SELECT filter0.timeseries_key as timeseries_key, ", - "filter0.field_name, filter0.field_value, ", - "filter1.field_name, filter1.field_value ", + "filter0.field_value AS f0, ", + "filter1.field_value AS f1 ", "FROM (", "SELECT * FROM oximeter.fields_i64 ", "WHERE timeseries_name = 'foo:bar' AND field_name = 'f0' AND field_value = 0", diff --git a/oximeter/db/src/sql/mod.rs b/oximeter/db/src/sql/mod.rs index 5d9685d19f..f3082dcaa5 100644 --- a/oximeter/db/src/sql/mod.rs +++ b/oximeter/db/src/sql/mod.rs @@ -32,6 +32,7 @@ use crate::query::measurement_table_name; use crate::DatumType; use crate::Error as OxdbError; use crate::FieldType; +use crate::QuerySummary; use crate::TimeseriesName; use crate::TimeseriesSchema; use indexmap::IndexSet; @@ -131,6 +132,31 @@ macro_rules! unsupported { }; } +/// A tabular result from a SQL query against a timeseries. +#[derive(Clone, Debug, Default, serde::Serialize)] +pub struct Table { + /// The name of each column in the result set. + pub column_names: Vec, + /// The rows of the result set, one per column. + pub rows: Vec>, +} + +/// The full result of running a SQL query against a timeseries. +#[derive(Clone, Debug)] +pub struct QueryResult { + /// The query as written by the client. + pub original_query: String, + /// The rewritten query, run against the JOINed representation of the + /// timeseries. + /// + /// This is the query that is actually run in the database itself. + pub rewritten_query: String, + /// Summary of the resource usage of the query. + pub summary: QuerySummary, + /// The result of the query, with column names and rows. + pub table: Table, +} + /// A helper type to preprocess any ClickHouse-specific SQL, and present a /// known-safe version of it to the main `sqlparser` code. /// @@ -562,10 +588,11 @@ impl RestrictedQuery { having: None, named_window: vec![], qualify: None, + value_table_mode: None, }; let mut query = Self::select_to_query(top_level_select); query.order_by = order_by; - Cte { alias, query, from: None } + Cte { alias, query, from: None, materialized: None } } // Create a SQL parser `Ident` with a the given name. @@ -690,6 +717,7 @@ impl RestrictedQuery { having: None, named_window: vec![], qualify: None, + value_table_mode: None, } } @@ -760,6 +788,7 @@ impl RestrictedQuery { having: None, named_window: vec![], qualify: None, + value_table_mode: None, } } diff --git a/oximeter/instruments/Cargo.toml b/oximeter/instruments/Cargo.toml index c49d3b976e..a04e26fdaa 100644 --- a/oximeter/instruments/Cargo.toml +++ b/oximeter/instruments/Cargo.toml @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] cfg-if = { workspace = true, optional = true } chrono = { workspace = true, optional = true } diff --git a/oximeter/oximeter-macro-impl/Cargo.toml b/oximeter/oximeter-macro-impl/Cargo.toml index df9ed547ed..d0a8c5f566 100644 --- a/oximeter/oximeter-macro-impl/Cargo.toml +++ b/oximeter/oximeter-macro-impl/Cargo.toml @@ -8,6 +8,9 @@ license = "MPL-2.0" [lib] proc-macro = true +[lints] +workspace = true + [dependencies] proc-macro2.workspace = true quote.workspace = true diff --git a/oximeter/oximeter/Cargo.toml b/oximeter/oximeter/Cargo.toml index b545c697de..2445e0483a 100644 --- a/oximeter/oximeter/Cargo.toml +++ b/oximeter/oximeter/Cargo.toml @@ -5,6 +5,9 @@ authors = ["Benjamin Naecker "] edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] bytes = { workspace = true, features = [ "serde" ] } chrono.workspace = true diff --git a/oximeter/oximeter/src/histogram.rs b/oximeter/oximeter/src/histogram.rs index aaf9297ca4..82b9916153 100644 --- a/oximeter/oximeter/src/histogram.rs +++ b/oximeter/oximeter/src/histogram.rs @@ -513,6 +513,11 @@ where self.bins.iter() } + /// Get the bin at the given index. + pub fn get(&self, index: usize) -> Option<&Bin> { + self.bins.get(index) + } + /// Generate paired arrays with the left bin edges and the counts, for each bin. /// /// The returned edges are always left-inclusive, by construction of the histogram. @@ -993,9 +998,9 @@ mod tests { let mut hist = Histogram::with_bins(&[(0..1).into()]).unwrap(); assert!(hist.sample(i64::MIN).is_ok()); assert!(hist.sample(i64::MAX).is_ok()); - assert_eq!(hist.iter().nth(0).unwrap().count, 1); - assert_eq!(hist.iter().nth(1).unwrap().count, 0); - assert_eq!(hist.iter().nth(2).unwrap().count, 1); + assert_eq!(hist.get(0).unwrap().count, 1); + assert_eq!(hist.get(1).unwrap().count, 0); + assert_eq!(hist.get(2).unwrap().count, 1); let mut hist = Histogram::with_bins(&[(0.0..1.0).into()]).unwrap(); assert!(hist.sample(f64::MIN).is_ok()); diff --git a/oximeter/oximeter/src/types.rs b/oximeter/oximeter/src/types.rs index eff5c399e3..3e6ffc5442 100644 --- a/oximeter/oximeter/src/types.rs +++ b/oximeter/oximeter/src/types.rs @@ -17,6 +17,7 @@ use num::traits::Zero; use schemars::JsonSchema; use serde::Deserialize; use serde::Serialize; +use std::borrow::Cow; use std::boxed::Box; use std::collections::BTreeMap; use std::fmt; @@ -77,6 +78,8 @@ macro_rules! impl_field_type_from { } impl_field_type_from! { String, FieldType::String } +impl_field_type_from! { &'static str, FieldType::String } +impl_field_type_from! { Cow<'static, str>, FieldType::String } impl_field_type_from! { i8, FieldType::I8 } impl_field_type_from! { u8, FieldType::U8 } impl_field_type_from! { i16, FieldType::I16 } @@ -103,7 +106,7 @@ impl_field_type_from! { bool, FieldType::Bool } )] #[serde(tag = "type", content = "value", rename_all = "snake_case")] pub enum FieldValue { - String(String), + String(Cow<'static, str>), I8(i8), U8(u8), I16(i16), @@ -147,7 +150,9 @@ impl FieldValue { typ: field_type.to_string(), }; match field_type { - FieldType::String => Ok(FieldValue::String(s.to_string())), + FieldType::String => { + Ok(FieldValue::String(Cow::Owned(s.to_string()))) + } FieldType::I8 => { Ok(FieldValue::I8(s.parse().map_err(|_| make_err())?)) } @@ -222,14 +227,20 @@ impl_field_value_from! { i32, FieldValue::I32 } impl_field_value_from! { u32, FieldValue::U32 } impl_field_value_from! { i64, FieldValue::I64 } impl_field_value_from! { u64, FieldValue::U64 } -impl_field_value_from! { String, FieldValue::String } +impl_field_value_from! { Cow<'static, str>, FieldValue::String } impl_field_value_from! { IpAddr, FieldValue::IpAddr } impl_field_value_from! { Uuid, FieldValue::Uuid } impl_field_value_from! { bool, FieldValue::Bool } impl From<&str> for FieldValue { fn from(value: &str) -> Self { - FieldValue::String(String::from(value)) + FieldValue::String(Cow::Owned(String::from(value))) + } +} + +impl From for FieldValue { + fn from(value: String) -> Self { + FieldValue::String(Cow::Owned(value)) } } @@ -311,7 +322,7 @@ pub enum DatumType { impl DatumType { /// Return `true` if this datum type is cumulative, and `false` otherwise. - pub fn is_cumulative(&self) -> bool { + pub const fn is_cumulative(&self) -> bool { matches!( self, DatumType::CumulativeI64 @@ -331,9 +342,26 @@ impl DatumType { ) } + /// Return `true` if this datum type is a scalar, and `false` otherwise. + pub const fn is_scalar(&self) -> bool { + !self.is_histogram() + } + /// Return `true` if this datum type is a histogram, and `false` otherwise. pub const fn is_histogram(&self) -> bool { - matches!(self, DatumType::HistogramF64 | DatumType::HistogramI64) + matches!( + self, + DatumType::HistogramI8 + | DatumType::HistogramU8 + | DatumType::HistogramI16 + | DatumType::HistogramU16 + | DatumType::HistogramI32 + | DatumType::HistogramU32 + | DatumType::HistogramI64 + | DatumType::HistogramU64 + | DatumType::HistogramF32 + | DatumType::HistogramF64 + ) } } @@ -450,6 +478,11 @@ impl Datum { Datum::Missing(ref inner) => inner.start_time(), } } + + /// Return true if this datum is missing. + pub fn is_missing(&self) -> bool { + matches!(self, Datum::Missing(_)) + } } // Helper macro to generate `From` and `From<&T>` for the datum types. @@ -580,7 +613,7 @@ impl Measurement { /// Return true if this measurement represents a missing datum. pub fn is_missing(&self) -> bool { - matches!(self.datum, Datum::Missing(_)) + self.datum.is_missing() } /// Return the datum for this measurement diff --git a/oximeter/producer/Cargo.toml b/oximeter/producer/Cargo.toml index 79f6c754f7..dfac555a49 100644 --- a/oximeter/producer/Cargo.toml +++ b/oximeter/producer/Cargo.toml @@ -5,6 +5,9 @@ edition = "2021" description = "Crate for producing metric data to be collected by the Oxide control plane" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] chrono.workspace = true dropshot.workspace = true @@ -19,7 +22,11 @@ tokio.workspace = true thiserror.workspace = true uuid.workspace = true omicron-workspace-hack.workspace = true +internal-dns.workspace = true [dev-dependencies] anyhow.workspace = true clap.workspace = true +omicron-test-utils.workspace = true +serde_json.workspace = true +slog-term.workspace = true diff --git a/oximeter/producer/examples/producer.rs b/oximeter/producer/examples/producer.rs index 8dbe0b6ad9..87748dd12d 100644 --- a/oximeter/producer/examples/producer.rs +++ b/oximeter/producer/examples/producer.rs @@ -10,10 +10,8 @@ use anyhow::Context; use chrono::DateTime; use chrono::Utc; use clap::Parser; -use dropshot::ConfigDropshot; use dropshot::ConfigLogging; use dropshot::ConfigLoggingLevel; -use dropshot::HandlerTaskMode; use omicron_common::api::internal::nexus::ProducerEndpoint; use omicron_common::api::internal::nexus::ProducerKind; use oximeter::types::Cumulative; @@ -112,11 +110,6 @@ impl Producer for CpuBusyProducer { #[tokio::main] async fn main() -> anyhow::Result<()> { let args = Args::parse(); - let dropshot = ConfigDropshot { - bind_address: args.address, - request_body_max_bytes: 2048, - default_handler_task_mode: HandlerTaskMode::Detached, - }; let log = LogConfig::Config(ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Debug, }); @@ -127,13 +120,15 @@ async fn main() -> anyhow::Result<()> { id: registry.producer_id(), kind: ProducerKind::Service, address: args.address, - base_route: "/collect".to_string(), interval: Duration::from_secs(10), }; - let config = - Config { server_info, registration_address: args.nexus, dropshot, log }; + let config = Config { + server_info, + registration_address: Some(args.nexus), + request_body_max_bytes: 2048, + log, + }; let server = Server::with_registry(registry, &config) - .await .context("failed to create producer")?; server.serve_forever().await.context("server failed") } diff --git a/oximeter/producer/src/lib.rs b/oximeter/producer/src/lib.rs index 3fecaadf4f..6bf8954ae0 100644 --- a/oximeter/producer/src/lib.rs +++ b/oximeter/producer/src/lib.rs @@ -4,7 +4,7 @@ //! Types for serving produced metric data to an Oximeter collector server. -// Copyright 2021 Oxide Computer Company +// Copyright 2024 Oxide Computer Company use dropshot::endpoint; use dropshot::ApiDescription; @@ -16,7 +16,13 @@ use dropshot::HttpServer; use dropshot::HttpServerStarter; use dropshot::Path; use dropshot::RequestContext; +use internal_dns::resolver::ResolveError; +use internal_dns::resolver::Resolver; +use internal_dns::ServiceName; +use nexus_client::types::ProducerEndpoint as ApiProducerEndpoint; use omicron_common::api::internal::nexus::ProducerEndpoint; +use omicron_common::backoff; +use omicron_common::backoff::BackoffError; use omicron_common::FileKv; use oximeter::types::ProducerRegistry; use oximeter::types::ProducerResults; @@ -27,9 +33,12 @@ use slog::debug; use slog::error; use slog::info; use slog::o; +use slog::warn; use slog::Drain; use slog::Logger; +use std::net::IpAddr; use std::net::SocketAddr; +use std::time::Duration; use thiserror::Error; use uuid::Uuid; @@ -38,11 +47,20 @@ pub enum Error { #[error("Error running producer HTTP server: {0}")] Server(String), - #[error("Error registering as metric producer: {msg}")] - RegistrationError { retryable: bool, msg: String }, - #[error("Producer registry and config UUIDs do not match")] UuidMismatch, + + #[error( + "The producer must listen on an IPv6 address \ + to resolve Nexus using DNS" + )] + Ipv6AddressRequiredForResolution, + + #[error("Error resolving Nexus using DNS")] + Resolution(#[source] ResolveError), + + #[error("Invalid port number provided for Nexus registration address")] + InvalidRegistrationPort, } /// Either configuration for building a logger, or an actual logger already @@ -64,56 +82,65 @@ pub struct Config { /// The information for contacting this server, and collecting its metrics. pub server_info: ProducerEndpoint, /// The address at which we attempt to register as a producer. - pub registration_address: SocketAddr, - /// Configuration for starting the Dropshot server used to produce metrics. - pub dropshot: ConfigDropshot, + /// + /// If the address is not provided, the address of Nexus will be resolved + /// using internal DNS, based on the local address of the server being + /// configured. + pub registration_address: Option, + /// The maximum size of Dropshot requests. + pub request_body_max_bytes: usize, /// The logging configuration or actual logger used to emit logs. pub log: LogConfig, } /// A Dropshot server used to expose metrics to be collected over the network. -/// -/// This is a "batteries-included" HTTP server, meant to be used in applications that don't -/// otherwise run a server. The standalone functions [`register`] and [`collect`] can be used as -/// part of an existing Dropshot server's API. pub struct Server { registry: ProducerRegistry, + registration_task: tokio::task::JoinHandle<()>, server: HttpServer, } impl Server { /// Start a new metric server, registering it with the chosen endpoint, and listening for /// requests on the associated address and route. - pub async fn start(config: &Config) -> Result { + /// + /// Note that the producer server is registered with Nexus in a background + /// task. That task also periodically re-registers with Nexus to ensure that + /// data continues to be collected. + pub fn start(config: &Config) -> Result { Self::with_registry( ProducerRegistry::with_id(config.server_info.id), &config, ) - .await } /// Create a new metric producer server, with an existing registry. - pub async fn with_registry( + /// + /// Note that the producer server is registered with Nexus in a background + /// task. + pub fn with_registry( registry: ProducerRegistry, config: &Config, ) -> Result { Self::new_impl( registry, config.server_info.clone(), - &config.registration_address, - &config.dropshot, + config.registration_address.as_ref(), + config.request_body_max_bytes, &config.log, ) - .await } /// Serve requests for metrics. pub async fn serve_forever(self) -> Result<(), Error> { - self.server.await.map_err(Error::Server) + let res = self.server.await.map_err(Error::Server); + self.registration_task.abort(); + res } /// Close the server pub async fn close(self) -> Result<(), Error> { + self.registration_task.abort(); self.server.close().await.map_err(Error::Server) } @@ -122,7 +149,7 @@ impl Server { /// The registry is thread-safe and clonable, so the returned reference can be used throughout /// an application to register types implementing the [`Producer`](oximeter::traits::Producer) /// trait. The samples generated by the registered producers will be included in response to a - /// request on the collection endpoint. + /// request on the collection endpoint. pub fn registry(&self) -> &ProducerRegistry { &self.registry } @@ -172,18 +199,25 @@ impl Server { } // Create a new server registering with Nexus. - async fn new_impl( + fn new_impl( registry: ProducerRegistry, mut server_info: ProducerEndpoint, - registration_address: &SocketAddr, - dropshot: &ConfigDropshot, + registration_address: Option<&SocketAddr>, + request_body_max_bytes: usize, log: &LogConfig, ) -> Result { if registry.producer_id() != server_info.id { return Err(Error::UuidMismatch); } + + // Build the logger / server. let log = Self::build_logger(log)?; - let server = Self::build_dropshot_server(&log, ®istry, dropshot)?; + let dropshot = ConfigDropshot { + bind_address: server_info.address, + request_body_max_bytes, + default_handler_task_mode: dropshot::HandlerTaskMode::Detached, + }; + let server = Self::build_dropshot_server(&log, ®istry, &dropshot)?; // Update the producer endpoint address with the actual server's // address, to handle cases where client listens on any available @@ -198,79 +232,135 @@ impl Server { server_info.address = server.local_addr(); } - debug!(log, "registering metric server as a producer"); - register(*registration_address, &log, &server_info).await?; + // Create a resolver if needed, or use Nexus's address directly. + let find_nexus = match registration_address { + Some(addr) => { + if addr.port() == 0 { + return Err(Error::InvalidRegistrationPort); + } + debug!( + log, + "Nexus IP provided explicitly, will use it"; + "addr" => %addr, + ); + FindNexus::ByAddr(*addr) + } + None => { + // Ensure that we've been provided with an IPv6 address if we're + // using DNS to resolve Nexus. That's required because we need + // to use the /48 to find our DNS server itself. + let IpAddr::V6(our_addr) = server_info.address.ip() else { + return Err(Error::Ipv6AddressRequiredForResolution); + }; + debug!( + log, + "Nexus IP not provided, will use DNS to resolve it" + ); + Resolver::new_from_ip( + log.new(o!("component" => "internal-dns-resolver")), + our_addr, + ) + .map_err(Error::Resolution) + .map(FindNexus::WithResolver)? + } + }; + + // Spawn the task that will register with Nexus in the background. + debug!(log, "starting producer registration task"); + let info = ApiProducerEndpoint::from(&server_info); + let registration_task = tokio::task::spawn(registration_task( + find_nexus, + log.new(o!("component" => "producer-registration-task")), + info, + )); info!( log, "starting oximeter metric producer server"; - "route" => server_info.collection_route(), "producer_id" => ?registry.producer_id(), "address" => server.local_addr(), "interval" => ?server_info.interval, ); - Ok(Self { registry, server }) + Ok(Self { registry, registration_task, server }) + } +} + +/// Helper passed to the renewal task, used to determine whether / how to find +/// Nexus periodically. +enum FindNexus { + /// An explicit address was provided at creation time, just use it. + ByAddr(SocketAddr), + /// An address was not provided, we'll resolve it on each attempt to renew + /// the lease. + WithResolver(Resolver), +} + +/// The rate at which we renew, as a fraction of the renewal interval. +// +// E.g., a value of 4 means wait no more than 1/4 the period before renewing the +// lease. Be aware that renewal occurs with backoff, so it may be useful to +// register quite aggressively to avoid Nexus pruning the producer too early. +const RENEWAL_RATE: u32 = 4; + +/// A backround task that periodically renews this producer's lease with Nexus. +async fn registration_task( + find_nexus: FindNexus, + log: Logger, + endpoint: ApiProducerEndpoint, +) { + loop { + debug!( + log, + "registering / renewing oximeter producer lease with Nexus" + ); + let address = match &find_nexus { + FindNexus::ByAddr(addr) => *addr, + FindNexus::WithResolver(resolver) => { + resolve_nexus_with_backoff(&log, resolver).await + } + }; + debug!(log, "using nexus address for registration"; "addr" => ?address); + let lease_duration = + register_with_backoff(address, &log, &endpoint).await; + debug!(log, "registered with nexus successfully"); + + // Wait for a reasonable fraction of the renewal period, and then hit + // 'em again. + let wait = + lease_duration.checked_div(RENEWAL_RATE).unwrap_or(lease_duration); + debug!( + log, + "pausing until time to renew lease"; + "lease_duration" => ?lease_duration, + "wait_period" => ?wait, + ); + tokio::time::sleep(wait).await; } } // Register API endpoints of the `Server`. fn metric_server_api() -> ApiDescription { let mut api = ApiDescription::new(); - api.register(collect_endpoint) - .expect("Failed to register handler for collect_endpoint"); + api.register(collect).expect("Failed to register handler for collect"); api } #[derive(Clone, Copy, Debug, Deserialize, JsonSchema, Serialize)] pub struct ProducerIdPathParams { + /// The ID of the producer to be polled. pub producer_id: Uuid, } -// Implementation of the actual collection routine used by the `Server`. +/// Collect metric data from this producer. #[endpoint { method = GET, - path = "/collect/{producer_id}", + path = "/{producer_id}", }] -async fn collect_endpoint( +async fn collect( request_context: RequestContext, path_params: Path, ) -> Result, HttpError> { let registry = request_context.context(); let producer_id = path_params.into_inner().producer_id; - collect(registry, producer_id).await -} - -// TODO this seems misplaced. -/// Register a metric server to be polled for metric data. -/// -/// This function is used to provide consumers the flexibility to define their own Dropshot -/// servers, rather than using the `Server` provided by this crate (which starts a _new_ server). -pub async fn register( - address: SocketAddr, - log: &slog::Logger, - server_info: &omicron_common::api::internal::nexus::ProducerEndpoint, -) -> Result<(), Error> { - let client = - nexus_client::Client::new(&format!("http://{}", address), log.clone()); - client.cpapi_producers_post(&server_info.into()).await.map(|_| ()).map_err( - |err| { - let retryable = match &err { - nexus_client::Error::CommunicationError(..) => true, - nexus_client::Error::ErrorResponse(resp) => { - resp.status().is_server_error() - } - _ => false, - }; - let msg = err.to_string(); - Error::RegistrationError { retryable, msg } - }, - ) -} - -/// Handle a request to pull available metric data from a [`ProducerRegistry`]. -pub async fn collect( - registry: &ProducerRegistry, - producer_id: Uuid, -) -> Result, HttpError> { if producer_id == registry.producer_id() { Ok(HttpResponseOk(registry.collect())) } else { @@ -284,3 +374,198 @@ pub async fn collect( )) } } + +/// Resolve Nexus's address using the provided resolver. +async fn resolve_nexus_with_backoff( + log: &Logger, + resolver: &Resolver, +) -> SocketAddr { + let log_failure = |error, delay| { + warn!( + log, + "failed to lookup Nexus IP, will retry"; + "delay" => ?delay, + "error" => ?error, + ); + }; + let do_lookup = || async { + resolver + .lookup_socket_v6(ServiceName::Nexus) + .await + .map_err(|e| BackoffError::transient(e.to_string())) + .map(Into::into) + }; + backoff::retry_notify( + backoff::retry_policy_internal_service(), + do_lookup, + log_failure, + ) + .await + .expect("Expected infinite retry loop resolving Nexus address") +} + +/// Register as a metric producer with Nexus, retrying endlessly with backoff. +/// +/// This returns the lease renewal period that we're required to re-register +/// within. +async fn register_with_backoff( + addr: SocketAddr, + log: &Logger, + endpoint: &ApiProducerEndpoint, +) -> Duration { + let log_failure = |error, delay| { + warn!( + log, + "failed to register as a producer with Nexus, will retry"; + "delay" => ?delay, + "error" => ?error, + ); + }; + // For the purposes of oximeter registration, all errors are retryable. The + // main reason for this is that there's just not much better we can do. + // Panicking seems bad, but stopping the retry loop is also not great + // without a way to kick it to start trying again. We may want to add + // better reporting, such as a counter or way to fetch the last registration + // result. + let do_register = || async { + let client = + nexus_client::Client::new(&format!("http://{}", addr), log.clone()); + client + .cpapi_producers_post(&endpoint.into()) + .await + .map(|response| response.into_inner().lease_duration.into()) + .map_err(|e| BackoffError::transient(e.to_string())) + }; + backoff::retry_notify( + backoff::retry_policy_internal_service(), + do_register, + log_failure, + ) + .await + .expect("Expected infinite retry loop registering as a producer with") +} + +#[cfg(test)] +mod tests { + use super::Config; + use super::LogConfig; + use super::ProducerEndpoint; + use super::Server; + use dropshot::endpoint; + use dropshot::ApiDescription; + use dropshot::ConfigDropshot; + use dropshot::HttpError; + use dropshot::HttpResponseCreated; + use dropshot::HttpServer; + use dropshot::HttpServerStarter; + use dropshot::RequestContext; + use omicron_common::api::internal::nexus::ProducerKind; + use omicron_common::api::internal::nexus::ProducerRegistrationResponse; + use omicron_test_utils::dev::poll::{wait_for_condition, CondCheckError}; + use slog::Drain; + use slog::Logger; + use std::sync::atomic::AtomicU32; + use std::sync::atomic::Ordering; + use std::sync::Arc; + use std::time::Duration; + use uuid::Uuid; + + fn test_logger() -> Logger { + let dec = + slog_term::PlainSyncDecorator::new(slog_term::TestStdoutWriter); + let drain = slog_term::FullFormat::new(dec).build().fuse(); + let log = + Logger::root(drain, slog::o!("component" => "fake-cleanup-task")); + log + } + + // Re-registration interval for tests. + const INTERVAL: Duration = Duration::from_secs(1); + + type Context = Arc; + + // Mock endpoint for the test Nexus server. + #[endpoint { + method = POST, + path = "/metrics/producers", + }] + async fn register_producer( + rqctx: RequestContext, + ) -> Result, HttpError> + { + rqctx.context().fetch_add(1, Ordering::SeqCst); + Ok(HttpResponseCreated(ProducerRegistrationResponse { + lease_duration: INTERVAL, + })) + } + + // Start a Dropshot server mocking the Nexus registration endpoint. + fn spawn_fake_nexus_server(log: &Logger) -> HttpServer { + let mut api = ApiDescription::new(); + api.register(register_producer).expect("Expected to register endpoint"); + HttpServerStarter::new( + &ConfigDropshot { + bind_address: "[::1]:0".parse().unwrap(), + request_body_max_bytes: 2048, + ..Default::default() + }, + api, + Arc::new(AtomicU32::new(0)), + log, + ) + .expect("Expected to start Dropshot server") + .start() + } + + #[tokio::test] + async fn test_producer_registration_task() { + let log = test_logger(); + let fake_nexus = spawn_fake_nexus_server(&log); + slog::info!( + log, + "fake nexus test server listening"; + "address" => ?fake_nexus.local_addr(), + ); + + let address = "[::1]:0".parse().unwrap(); + let config = Config { + server_info: ProducerEndpoint { + id: Uuid::new_v4(), + kind: ProducerKind::Service, + address, + interval: Duration::from_secs(10), + }, + registration_address: Some(fake_nexus.local_addr()), + request_body_max_bytes: 1024, + log: LogConfig::Logger(log), + }; + + // Ideally, we would check pretty carefully that there are exactly N + // registrations after N renewal periods. That's brittle, especially on + // a loaded system. Instead, we'll wait until we've received the + // expected number of registration requests. + let _server = Server::start(&config).unwrap(); + const N_REQUESTS: u32 = 10; + const POLL_INTERVAL: Duration = Duration::from_millis(100); + + // The poll interval is 1s (see `INTERVAL`), and the producer attempts + // to register every 1/4 interval, so this should be quite sufficient + // for even heavily-loaded tests. + const POLL_DURATION: Duration = Duration::from_secs(30); + wait_for_condition( + || async { + if fake_nexus.app_private().load(Ordering::SeqCst) >= N_REQUESTS + { + Ok(()) + } else { + Err(CondCheckError::<()>::NotYet) + } + }, + &POLL_INTERVAL, + &POLL_DURATION, + ) + .await + .expect("Expected all registration requests to be made within timeout"); + fake_nexus.close().await.expect("Expected to close server"); + } +} diff --git a/package-manifest.toml b/package-manifest.toml index e3198cef8b..7f80dacf7c 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -100,7 +100,7 @@ only_for_targets.image = "standard" source.type = "composite" source.packages = [ "omicron-nexus.tar.gz", - "zone-network-setup.tar.gz", + "zone-setup.tar.gz", "zone-network-install.tar.gz", "opte-interface-setup.tar.gz", ] @@ -130,11 +130,7 @@ output.intermediate_only = true service_name = "oximeter" only_for_targets.image = "standard" source.type = "composite" -source.packages = [ - "oximeter-collector.tar.gz", - "zone-network-setup.tar.gz", - "zone-network-install.tar.gz", -] +source.packages = [ "oximeter-collector.tar.gz", "zone-setup.tar.gz", "zone-network-install.tar.gz" ] output.type = "zone" [package.oximeter-collector] @@ -157,8 +153,8 @@ source.type = "composite" source.packages = [ "clickhouse_svc.tar.gz", "internal-dns-cli.tar.gz", - "zone-network-setup.tar.gz", - "zone-network-install.tar.gz", + "zone-setup.tar.gz", + "zone-network-install.tar.gz" ] output.type = "zone" @@ -183,8 +179,8 @@ source.type = "composite" source.packages = [ "clickhouse_keeper_svc.tar.gz", "internal-dns-cli.tar.gz", - "zone-network-setup.tar.gz", - "zone-network-install.tar.gz", + "zone-setup.tar.gz", + "zone-network-install.tar.gz" ] output.type = "zone" @@ -209,8 +205,8 @@ source.type = "composite" source.packages = [ "cockroachdb-service.tar.gz", "internal-dns-cli.tar.gz", - "zone-network-setup.tar.gz", - "zone-network-install.tar.gz", + "zone-setup.tar.gz", + "zone-network-install.tar.gz" ] output.type = "zone" @@ -245,8 +241,8 @@ source.type = "composite" source.packages = [ "dns-server.tar.gz", "internal-dns-customizations.tar.gz", - "zone-network-setup.tar.gz", - "zone-network-install.tar.gz", + "zone-setup.tar.gz", + "zone-network-install.tar.gz" ] output.type = "zone" @@ -257,7 +253,7 @@ source.type = "composite" source.packages = [ "dns-server.tar.gz", "external-dns-customizations.tar.gz", - "zone-network-setup.tar.gz", + "zone-setup.tar.gz", "zone-network-install.tar.gz", "opte-interface-setup.tar.gz", ] @@ -298,10 +294,11 @@ service_name = "ntp" only_for_targets.image = "standard" source.type = "composite" source.packages = [ + "chrony-setup.tar.gz", "ntp-svc.tar.gz", "opte-interface-setup.tar.gz", - "zone-network-setup.tar.gz", - "zone-network-install.tar.gz", + "zone-setup.tar.gz", + "zone-network-install.tar.gz" ] output.type = "zone" @@ -311,8 +308,17 @@ only_for_targets.image = "standard" source.type = "local" source.paths = [ { from = "smf/ntp/manifest", to = "/var/svc/manifest/site/ntp" }, - { from = "smf/ntp/method", to = "/var/svc/method" }, - { from = "smf/ntp/etc", to = "/etc" }, +] +output.intermediate_only = true +output.type = "zone" + +[package.chrony-setup] +service_name = "chrony-setup" +only_for_targets.image = "standard" +source.type = "local" +source.paths = [ + { from = "smf/chrony-setup/manifest.xml", to = "/var/svc/manifest/site/chrony-setup/manifest.xml" }, + { from = "smf/chrony-setup/etc", to = "/etc" }, ] output.intermediate_only = true output.type = "zone" @@ -457,11 +463,7 @@ output.intermediate_only = true service_name = "crucible" only_for_targets.image = "standard" source.type = "composite" -source.packages = [ - "crucible.tar.gz", - "zone-network-setup.tar.gz", - "zone-network-install.tar.gz", -] +source.packages = [ "crucible.tar.gz", "zone-setup.tar.gz", "zone-network-install.tar.gz" ] output.type = "zone" @@ -469,11 +471,7 @@ output.type = "zone" service_name = "crucible_pantry" only_for_targets.image = "standard" source.type = "composite" -source.packages = [ - "crucible-pantry.tar.gz", - "zone-network-setup.tar.gz", - "zone-network-install.tar.gz", -] +source.packages = [ "crucible-pantry.tar.gz", "zone-setup.tar.gz", "zone-network-install.tar.gz" ] output.type = "zone" # Packages not built within Omicron, but which must be imported. @@ -492,10 +490,10 @@ only_for_targets.image = "standard" # 3. Use source.type = "manual" instead of "prebuilt" source.type = "prebuilt" source.repo = "crucible" -source.commit = "16f16478f4af1502b25ddcd79d307b3f116f13f6" +source.commit = "8c6d485110ecfae5409575246b986a145c386dc4" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/crucible/image//crucible.sha256.txt -source.sha256 = "ce186a1a1243ea618755ae341844795cff0ce2c1415c6bd770360b3330dc664b" +source.sha256 = "a974c976babbbbe4d126fe324e28093b4f69b689e1cf607ce38323befcfa494e" output.type = "zone" output.intermediate_only = true @@ -504,10 +502,10 @@ service_name = "crucible_pantry_prebuilt" only_for_targets.image = "standard" source.type = "prebuilt" source.repo = "crucible" -source.commit = "16f16478f4af1502b25ddcd79d307b3f116f13f6" +source.commit = "8c6d485110ecfae5409575246b986a145c386dc4" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/crucible/image//crucible-pantry.sha256.txt -source.sha256 = "8920622255fa0317ce312d0127c94b8fef647a85be4c8abaf861be560fe43194" +source.sha256 = "34418c60ecccade796e604997a11b1fa7f01c364996fa4b57131466e910700a8" output.type = "zone" output.intermediate_only = true @@ -519,10 +517,10 @@ service_name = "propolis-server" only_for_targets.image = "standard" source.type = "prebuilt" source.repo = "propolis" -source.commit = "fdf0585c6a227a7cfbee4a61a36938c3d77e4712" +source.commit = "6d7ed9a033babc054db9eff5b59dee978d2b0d76" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/propolis/image//propolis-server.sha256.txt -source.sha256 = "f07720e9041907f9285432251c82c0c7502bf1be9dd4df1ba6abe8f7462c2e9e" +source.sha256 = "f8f41b47bc00811fefe2ba75e0f6f8ab77765776c04021e0b31f09c3b21108a9" output.type = "zone" [package.mg-ddm-gz] @@ -535,10 +533,10 @@ source.repo = "maghemite" # `tools/maghemite_openapi_version`. Failing to do so will cause a failure when # building `ddm-admin-client` (which will instruct you to update # `tools/maghemite_openapi_version`). -source.commit = "de065a84831e66c829603d9a098e237e8f5faaa1" +source.commit = "23b0cf439f9f62b9a4933e55cc72bcaddc9596cd" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/maghemite/image//maghemite.sha256.txt -source.sha256 = "8a7525f8329c5178ebf07cecc623a017806b81d5d1ca55cf76b88e737ae57dec" +source.sha256 = "1ea0e73e149a68bf91b5ce2e0db2a8a1af50dcdbbf381b672aa9ac7e36a3a181" output.type = "tarball" [package.mg-ddm] @@ -551,10 +549,10 @@ source.repo = "maghemite" # `tools/maghemite_openapi_version`. Failing to do so will cause a failure when # building `ddm-admin-client` (which will instruct you to update # `tools/maghemite_openapi_version`). -source.commit = "de065a84831e66c829603d9a098e237e8f5faaa1" +source.commit = "23b0cf439f9f62b9a4933e55cc72bcaddc9596cd" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/maghemite/image//mg-ddm.sha256.txt -source.sha256 = "0cc9cbef39103d3e651334574ebdd0e6ef71670cbe6a720d22b1efb005b5a71c" +source.sha256 = "3334b0a9d5956e3117a6b493b9a5a31220391fab1ecbfb3a4bd8e94d7030771a" output.type = "zone" output.intermediate_only = true @@ -566,10 +564,10 @@ source.repo = "maghemite" # `tools/maghemite_openapi_version`. Failing to do so will cause a failure when # building `ddm-admin-client` (which will instruct you to update # `tools/maghemite_openapi_version`). -source.commit = "de065a84831e66c829603d9a098e237e8f5faaa1" +source.commit = "23b0cf439f9f62b9a4933e55cc72bcaddc9596cd" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/maghemite/image//mg-ddm.sha256.txt -source.sha256 = "4256d320b1ec310d56679eca5f65c9149072fe647f66021fd0cce1411fc39e0c" +source.sha256 = "e0907de39ca9f8ab45d40d361a1dbeed4bd8e9b157f8d3d8fe0a4bc259d933bd" output.type = "zone" output.intermediate_only = true @@ -577,11 +575,31 @@ output.intermediate_only = true service_name = "lldp" source.type = "prebuilt" source.repo = "lldp" -source.commit = "daab8580d6994c29f3c45ffa5a76b1de765b30da" -source.sha256 = "15a9068fdd4521a77f7d32c80d1e6d3a6a4ad15accab77641ed8f14175ae0db6" +source.commit = "30e5d89fae9190c69258ca77d5d5a1acec064742" +source.sha256 = "f58bfd1b77748544b5b1a99a07e52bab8dc5673b9bd3a745ebbfdd614d492328" output.type = "zone" output.intermediate_only = true +[package.pumpkind] +service_name = "pumpkind" +source.type = "prebuilt" +source.repo = "pumpkind" +source.commit = "3fe9c306590fb2f28f54ace7fd18b3c126323683" +source.sha256 = "97eff3265bd6d2aee1b543d621187a11f6bf84bcfe0752c456ab33e312900125" +output.type = "zone" +output.intermediate_only = true +only_for_targets.image = "standard" +only_for_targets.switch = "asic" + +[package.pumpkind-gz] +service_name = "pumpkind" +source.type = "prebuilt" +source.repo = "pumpkind" +source.commit = "3fe9c306590fb2f28f54ace7fd18b3c126323683" +source.sha256 = "21ba6926761ef2365046b5d90d725cea2ba5ce9d5059700eeb48406c1950fbc0" +output.type = "tarball" +only_for_targets.image = "standard" + [package.dendrite-stub] service_name = "dendrite" only_for_targets.switch = "stub" @@ -595,8 +613,8 @@ only_for_targets.image = "standard" # 2. Copy dendrite.tar.gz from dendrite/out to omicron/out source.type = "prebuilt" source.repo = "dendrite" -source.commit = "c2a9f29f70b1e05d891c713997577be53826e1bb" -source.sha256 = "1405185cc9645b3a6f1bf82d2ffd89f17505e4e625795cd0beb58f043fa7fd8a" +source.commit = "6334bf74fa21790c15f1c4e494ea2ec0edd1c83c" +source.sha256 = "5929f9abf0daf4bbf17d835e5d69fc842b9617b312fb5644fa99daf785203700" output.type = "zone" output.intermediate_only = true @@ -620,8 +638,8 @@ only_for_targets.image = "standard" # 2. Copy the output zone image from dendrite/out to omicron/out source.type = "prebuilt" source.repo = "dendrite" -source.commit = "c2a9f29f70b1e05d891c713997577be53826e1bb" -source.sha256 = "2124ffd76bb80bcb7063862a1516da3d805b1c062fe2339b95e4656355a55fd9" +source.commit = "6334bf74fa21790c15f1c4e494ea2ec0edd1c83c" +source.sha256 = "0294a1911212c4764d1034b5e0ca00cc9dfc51df482a9f6e5547b191b4481ad8" output.type = "zone" output.intermediate_only = true @@ -638,8 +656,8 @@ only_for_targets.image = "standard" # 2. Copy dendrite.tar.gz from dendrite/out to omicron/out/dendrite-softnpu.tar.gz source.type = "prebuilt" source.repo = "dendrite" -source.commit = "c2a9f29f70b1e05d891c713997577be53826e1bb" -source.sha256 = "3e89ff18a1209b82caafce91db42dec9f9f8d0fcaacbb1a8cfe7d1c0b0966700" +source.commit = "6334bf74fa21790c15f1c4e494ea2ec0edd1c83c" +source.sha256 = "1a188da01dccf565058145b43573a549a2eb4d71fe8800170152b823af27a010" output.type = "zone" output.intermediate_only = true @@ -678,6 +696,7 @@ source.packages = [ "omicron-gateway-asic.tar.gz", "dendrite-asic.tar.gz", "lldp.tar.gz", + "pumpkind.tar.gz", "wicketd.tar.gz", "wicket.tar.gz", "mg-ddm.tar.gz", @@ -746,11 +765,11 @@ source.paths = [ output.type = "zone" output.intermediate_only = true -[package.zone-network-setup] -service_name = "zone-network-cli" +[package.zone-setup] +service_name = "zone-setup-cli" only_for_targets.image = "standard" source.type = "local" -source.rust.binary_names = ["zone-networking"] +source.rust.binary_names = ["zone-setup"] source.rust.release = true output.type = "zone" output.intermediate_only = true diff --git a/package/Cargo.toml b/package/Cargo.toml index 8067473aa0..4632e66731 100644 --- a/package/Cargo.toml +++ b/package/Cargo.toml @@ -5,6 +5,9 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] anyhow.workspace = true camino.workspace = true diff --git a/package/src/bin/omicron-package.rs b/package/src/bin/omicron-package.rs index 3b8bd24918..09fa7ab178 100644 --- a/package/src/bin/omicron-package.rs +++ b/package/src/bin/omicron-package.rs @@ -199,6 +199,25 @@ async fn do_dot(config: &Config) -> Result<()> { Ok(()) } +async fn do_list_outputs( + config: &Config, + output_directory: &Utf8Path, + intermediate: bool, +) -> Result<()> { + for (name, package) in + config.package_config.packages_to_build(&config.target).0 + { + if !intermediate + && package.output + == (PackageOutput::Zone { intermediate_only: true }) + { + continue; + } + println!("{}", package.get_output_path(name, output_directory)); + } + Ok(()) +} + // The name reserved for the currently-in-use build target. const ACTIVE: &str = "active"; @@ -919,7 +938,7 @@ async fn main() -> Result<()> { tokio::fs::create_dir_all(&args.artifact_dir).await?; let logpath = args.artifact_dir.join("LOG"); let logfile = std::io::LineWriter::new(open_options.open(&logpath)?); - println!("Logging to: {}", std::fs::canonicalize(logpath)?.display()); + eprintln!("Logging to: {}", std::fs::canonicalize(logpath)?.display()); let drain = slog_bunyan::new(logfile).build().fuse(); let drain = slog_async::Async::new(drain).build().fuse(); @@ -981,6 +1000,10 @@ async fn main() -> Result<()> { SubCommand::Build(BuildCommand::Dot) => { do_dot(&get_config()?).await?; } + SubCommand::Build(BuildCommand::ListOutputs { intermediate }) => { + do_list_outputs(&get_config()?, &args.artifact_dir, *intermediate) + .await?; + } SubCommand::Build(BuildCommand::Package { disable_cache }) => { do_package(&get_config()?, &args.artifact_dir, *disable_cache) .await?; diff --git a/package/src/dot.rs b/package/src/dot.rs index 3307d100ba..141adcf368 100644 --- a/package/src/dot.rs +++ b/package/src/dot.rs @@ -196,7 +196,7 @@ pub fn do_dot( // Similarly, regardless of the type of local package, create // a node showing any local paths that get included in the // package. - if paths.len() > 0 { + if !paths.is_empty() { let paths = paths .iter() .map(|mapping| { diff --git a/package/src/lib.rs b/package/src/lib.rs index bba1a3a0cd..2b99cfbe07 100644 --- a/package/src/lib.rs +++ b/package/src/lib.rs @@ -90,6 +90,11 @@ pub enum BuildCommand { }, /// Make a `dot` graph to visualize the package tree Dot, + /// List the output packages for the current target + ListOutputs { + #[clap(long)] + intermediate: bool, + }, /// Builds the packages specified in a manifest, and places them into an /// 'out' directory. Package { diff --git a/package/src/target.rs b/package/src/target.rs index d5d5e92c46..589dba7870 100644 --- a/package/src/target.rs +++ b/package/src/target.rs @@ -32,7 +32,7 @@ pub enum Machine { /// Use sled agent configuration for a device emulating a Gimlet /// /// Note that this configuration can actually work on real gimlets, - /// it just relies on the "./tools/create_virtual_hardware.sh" script. + /// it just relies on "cargo xtask virtual-hardware create". NonGimlet, } diff --git a/passwords/Cargo.toml b/passwords/Cargo.toml index 4f3922a7a5..eda3a020dc 100644 --- a/passwords/Cargo.toml +++ b/passwords/Cargo.toml @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] argon2 = { version = "0.5.3", features = ["alloc", "password-hash", "rand", "std"] } rand.workspace = true @@ -14,7 +17,7 @@ serde_with.workspace = true omicron-workspace-hack.workspace = true [dev-dependencies] -argon2alt = { package = "rust-argon2", version = "1.0" } +argon2alt = { package = "rust-argon2", version = "2.1.0" } criterion.workspace = true [[bench]] diff --git a/passwords/src/lib.rs b/passwords/src/lib.rs index ca2292420c..c7e9f1a118 100644 --- a/passwords/src/lib.rs +++ b/passwords/src/lib.rs @@ -563,7 +563,6 @@ mod test { mem_cost: ARGON2_COST_M_KIB, time_cost: ARGON2_COST_T, lanes: ARGON2_COST_P, - thread_mode: argon2alt::ThreadMode::Sequential, secret: &[], ad: &[], hash_length: 32, diff --git a/rpaths/Cargo.toml b/rpaths/Cargo.toml index 45e6c9b925..5e54e9e87b 100644 --- a/rpaths/Cargo.toml +++ b/rpaths/Cargo.toml @@ -4,5 +4,8 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] omicron-workspace-hack.workspace = true diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 2e3f4c137b..7c513cfbad 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -4,5 +4,5 @@ # # We choose a specific toolchain (rather than "stable") for repeatability. The # intent is to keep this up-to-date with recently-released stable Rust. -channel = "1.76.0" +channel = "1.78.0" profile = "default" diff --git a/schema/all-zone-requests.json b/schema/all-zone-requests.json index e37fbfde59..fde6ee18a4 100644 --- a/schema/all-zone-requests.json +++ b/schema/all-zone-requests.json @@ -173,16 +173,26 @@ } ] } - ] + ], + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::IpNet", + "version": "0.1.0" + } }, "Ipv4Net": { "title": "An IPv4 subnet", - "description": "An IPv4 subnet, including prefix and subnet mask", + "description": "An IPv4 subnet, including prefix and prefix length", "examples": [ "192.168.1.0/24" ], "type": "string", - "pattern": "^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|1[0-9]|2[0-9]|3[0-2])$" + "pattern": "^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|1[0-9]|2[0-9]|3[0-2])$", + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::Ipv4Net", + "version": "0.1.0" + } }, "Ipv6Net": { "title": "An IPv6 subnet", @@ -191,7 +201,12 @@ "fd12:3456::/64" ], "type": "string", - "pattern": "^([fF][dD])[0-9a-fA-F]{2}:(([0-9a-fA-F]{1,4}:){6}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,6}:)([0-9a-fA-F]{1,4})?\\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])$" + "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))\\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])$", + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::Ipv6Net", + "version": "0.1.0" + } }, "MacAddr": { "title": "A MAC address", @@ -668,7 +683,6 @@ } }, "dataset": { - "default": null, "anyOf": [ { "$ref": "#/definitions/DatasetRequest" diff --git a/schema/all-zones-requests.json b/schema/all-zones-requests.json index 0ac9e760a8..526e41376f 100644 --- a/schema/all-zones-requests.json +++ b/schema/all-zones-requests.json @@ -57,16 +57,26 @@ } ] } - ] + ], + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::IpNet", + "version": "0.1.0" + } }, "Ipv4Net": { "title": "An IPv4 subnet", - "description": "An IPv4 subnet, including prefix and subnet mask", + "description": "An IPv4 subnet, including prefix and prefix length", "examples": [ "192.168.1.0/24" ], "type": "string", - "pattern": "^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|1[0-9]|2[0-9]|3[0-2])$" + "pattern": "^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|1[0-9]|2[0-9]|3[0-2])$", + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::Ipv4Net", + "version": "0.1.0" + } }, "Ipv6Net": { "title": "An IPv6 subnet", @@ -75,7 +85,12 @@ "fd12:3456::/64" ], "type": "string", - "pattern": "^([fF][dD])[0-9a-fA-F]{2}:(([0-9a-fA-F]{1,4}:){6}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,6}:)([0-9a-fA-F]{1,4})?\\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])$" + "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))\\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])$", + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::Ipv6Net", + "version": "0.1.0" + } }, "MacAddr": { "title": "A MAC address", diff --git a/schema/crdb/add-allowed-source-ips/up.sql b/schema/crdb/add-allowed-source-ips/up.sql new file mode 100644 index 0000000000..f1f84e9a2c --- /dev/null +++ b/schema/crdb/add-allowed-source-ips/up.sql @@ -0,0 +1,6 @@ +CREATE TABLE IF NOT EXISTS omicron.public.allow_list ( + id UUID PRIMARY KEY, + time_created TIMESTAMPTZ NOT NULL, + time_modified TIMESTAMPTZ NOT NULL, + allowed_ips INET[] CHECK (array_length(allowed_ips, 1) > 0) +); diff --git a/schema/crdb/add-lookup-disk-by-volume-id-index/up.sql b/schema/crdb/add-lookup-disk-by-volume-id-index/up.sql new file mode 100644 index 0000000000..2f129f334c --- /dev/null +++ b/schema/crdb/add-lookup-disk-by-volume-id-index/up.sql @@ -0,0 +1,4 @@ +CREATE UNIQUE INDEX IF NOT EXISTS lookup_disk_by_volume_id ON omicron.public.disk ( + volume_id +) WHERE + time_deleted IS NULL; diff --git a/schema/crdb/add-lookup-sled-by-policy-and-state-index/up.sql b/schema/crdb/add-lookup-sled-by-policy-and-state-index/up.sql new file mode 100644 index 0000000000..adfd6ff454 --- /dev/null +++ b/schema/crdb/add-lookup-sled-by-policy-and-state-index/up.sql @@ -0,0 +1,5 @@ +/* Add an index which lets us look up sleds based on policy and state */ +CREATE INDEX IF NOT EXISTS lookup_sled_by_policy_and_state ON omicron.public.sled ( + sled_policy, + sled_state +); diff --git a/schema/crdb/add-lookup-vmm-by-sled-id-index/up.sql b/schema/crdb/add-lookup-vmm-by-sled-id-index/up.sql new file mode 100644 index 0000000000..7f9262e4fe --- /dev/null +++ b/schema/crdb/add-lookup-vmm-by-sled-id-index/up.sql @@ -0,0 +1,3 @@ +CREATE INDEX IF NOT EXISTS lookup_vmms_by_sled_id ON omicron.public.vmm ( + sled_id +) WHERE time_deleted IS NULL; diff --git a/schema/crdb/add-metrics-producers-time-modified-index/up.sql b/schema/crdb/add-metrics-producers-time-modified-index/up.sql new file mode 100644 index 0000000000..35136ca759 --- /dev/null +++ b/schema/crdb/add-metrics-producers-time-modified-index/up.sql @@ -0,0 +1,3 @@ +CREATE INDEX IF NOT EXISTS lookup_producer_by_time_modified ON omicron.public.metric_producer ( + time_modified +); diff --git a/schema/crdb/add-view-for-v2p-mappings/up01.sql b/schema/crdb/add-view-for-v2p-mappings/up01.sql new file mode 100644 index 0000000000..96d5723c00 --- /dev/null +++ b/schema/crdb/add-view-for-v2p-mappings/up01.sql @@ -0,0 +1,41 @@ +CREATE VIEW IF NOT EXISTS omicron.public.v2p_mapping_view +AS +WITH VmV2pMappings AS ( + SELECT + n.id as nic_id, + s.id as sled_id, + s.ip as sled_ip, + v.vni, + n.mac, + n.ip + FROM omicron.public.network_interface n + JOIN omicron.public.vpc_subnet vs ON vs.id = n.subnet_id + JOIN omicron.public.vpc v ON v.id = n.vpc_id + JOIN omicron.public.vmm vmm ON n.parent_id = vmm.instance_id + JOIN omicron.public.sled s ON vmm.sled_id = s.id + WHERE n.time_deleted IS NULL + AND n.kind = 'instance' + AND s.sled_policy = 'in_service' + AND s.sled_state = 'active' +), +ProbeV2pMapping AS ( + SELECT + n.id as nic_id, + s.id as sled_id, + s.ip as sled_ip, + v.vni, + n.mac, + n.ip + FROM omicron.public.network_interface n + JOIN omicron.public.vpc_subnet vs ON vs.id = n.subnet_id + JOIN omicron.public.vpc v ON v.id = n.vpc_id + JOIN omicron.public.probe p ON n.parent_id = p.id + JOIN omicron.public.sled s ON p.sled = s.id + WHERE n.time_deleted IS NULL + AND n.kind = 'probe' + AND s.sled_policy = 'in_service' + AND s.sled_state = 'active' +) +SELECT nic_id, sled_id, sled_ip, vni, mac, ip FROM VmV2pMappings +UNION +SELECT nic_id, sled_id, sled_ip, vni, mac, ip FROM ProbeV2pMapping; diff --git a/schema/crdb/add-view-for-v2p-mappings/up02.sql b/schema/crdb/add-view-for-v2p-mappings/up02.sql new file mode 100644 index 0000000000..5ab1075fbe --- /dev/null +++ b/schema/crdb/add-view-for-v2p-mappings/up02.sql @@ -0,0 +1,3 @@ +CREATE INDEX IF NOT EXISTS network_interface_by_parent +ON omicron.public.network_interface (parent_id) +STORING (name, kind, vpc_id, subnet_id, mac, ip, slot); diff --git a/schema/crdb/add-view-for-v2p-mappings/up03.sql b/schema/crdb/add-view-for-v2p-mappings/up03.sql new file mode 100644 index 0000000000..86cef026a1 --- /dev/null +++ b/schema/crdb/add-view-for-v2p-mappings/up03.sql @@ -0,0 +1,2 @@ +CREATE INDEX IF NOT EXISTS sled_by_policy_and_state +ON omicron.public.sled (sled_policy, sled_state, id) STORING (ip); diff --git a/schema/crdb/add-view-for-v2p-mappings/up04.sql b/schema/crdb/add-view-for-v2p-mappings/up04.sql new file mode 100644 index 0000000000..809146b809 --- /dev/null +++ b/schema/crdb/add-view-for-v2p-mappings/up04.sql @@ -0,0 +1,2 @@ +CREATE INDEX IF NOT EXISTS active_vmm +on omicron.public.vmm (time_deleted, sled_id, instance_id); diff --git a/schema/crdb/add-view-for-v2p-mappings/up05.sql b/schema/crdb/add-view-for-v2p-mappings/up05.sql new file mode 100644 index 0000000000..cdabdc6a96 --- /dev/null +++ b/schema/crdb/add-view-for-v2p-mappings/up05.sql @@ -0,0 +1,4 @@ +CREATE INDEX IF NOT EXISTS v2p_mapping_details +ON omicron.public.network_interface ( + time_deleted, kind, subnet_id, vpc_id, parent_id +) STORING (mac, ip); diff --git a/schema/crdb/add-view-for-v2p-mappings/up06.sql b/schema/crdb/add-view-for-v2p-mappings/up06.sql new file mode 100644 index 0000000000..afd10ed13f --- /dev/null +++ b/schema/crdb/add-view-for-v2p-mappings/up06.sql @@ -0,0 +1,2 @@ +CREATE INDEX IF NOT EXISTS sled_by_policy +ON omicron.public.sled (sled_policy) STORING (ip, sled_state); diff --git a/schema/crdb/add-view-for-v2p-mappings/up07.sql b/schema/crdb/add-view-for-v2p-mappings/up07.sql new file mode 100644 index 0000000000..defe411f96 --- /dev/null +++ b/schema/crdb/add-view-for-v2p-mappings/up07.sql @@ -0,0 +1,2 @@ +CREATE INDEX IF NOT EXISTS vmm_by_instance_id +ON omicron.public.vmm (instance_id) STORING (sled_id); diff --git a/schema/crdb/allocate-subnet-decommissioned-sleds/up1.sql b/schema/crdb/allocate-subnet-decommissioned-sleds/up1.sql new file mode 100644 index 0000000000..adffd4a2cf --- /dev/null +++ b/schema/crdb/allocate-subnet-decommissioned-sleds/up1.sql @@ -0,0 +1,2 @@ +ALTER TABLE omicron.public.sled_underlay_subnet_allocation + ALTER PRIMARY KEY USING COLUMNS (hw_baseboard_id, sled_id); diff --git a/schema/crdb/allocate-subnet-decommissioned-sleds/up2.sql b/schema/crdb/allocate-subnet-decommissioned-sleds/up2.sql new file mode 100644 index 0000000000..ba67d093f4 --- /dev/null +++ b/schema/crdb/allocate-subnet-decommissioned-sleds/up2.sql @@ -0,0 +1 @@ +DROP INDEX IF EXISTS sled_underlay_subnet_allocation_hw_baseboard_id_key CASCADE; diff --git a/schema/crdb/allocate-subnet-decommissioned-sleds/up3.sql b/schema/crdb/allocate-subnet-decommissioned-sleds/up3.sql new file mode 100644 index 0000000000..f96b3312c9 --- /dev/null +++ b/schema/crdb/allocate-subnet-decommissioned-sleds/up3.sql @@ -0,0 +1,3 @@ +CREATE UNIQUE INDEX IF NOT EXISTS commissioned_sled_uniqueness + ON omicron.public.sled (serial_number, part_number) + WHERE sled_state != 'decommissioned'; diff --git a/schema/crdb/allocate-subnet-decommissioned-sleds/up4.sql b/schema/crdb/allocate-subnet-decommissioned-sleds/up4.sql new file mode 100644 index 0000000000..9489a61c2a --- /dev/null +++ b/schema/crdb/allocate-subnet-decommissioned-sleds/up4.sql @@ -0,0 +1 @@ +DROP INDEX IF EXISTS serial_part_revision_unique CASCADE; diff --git a/schema/crdb/bgp-oxpop-features/up01.sql b/schema/crdb/bgp-oxpop-features/up01.sql new file mode 100644 index 0000000000..a92c755947 --- /dev/null +++ b/schema/crdb/bgp-oxpop-features/up01.sql @@ -0,0 +1 @@ +ALTER TABLE omicron.public.switch_port_settings_bgp_peer_config ADD COLUMN IF NOT EXISTS remote_asn INT8; diff --git a/schema/crdb/bgp-oxpop-features/up02.sql b/schema/crdb/bgp-oxpop-features/up02.sql new file mode 100644 index 0000000000..c7107d4316 --- /dev/null +++ b/schema/crdb/bgp-oxpop-features/up02.sql @@ -0,0 +1 @@ +ALTER TABLE omicron.public.switch_port_settings_bgp_peer_config ADD COLUMN IF NOT EXISTS min_ttl INT2; diff --git a/schema/crdb/bgp-oxpop-features/up03.sql b/schema/crdb/bgp-oxpop-features/up03.sql new file mode 100644 index 0000000000..9de6526e6f --- /dev/null +++ b/schema/crdb/bgp-oxpop-features/up03.sql @@ -0,0 +1 @@ +ALTER TABLE omicron.public.switch_port_settings_bgp_peer_config ADD COLUMN IF NOT EXISTS md5_auth_key TEXT; diff --git a/schema/crdb/bgp-oxpop-features/up04.sql b/schema/crdb/bgp-oxpop-features/up04.sql new file mode 100644 index 0000000000..722b75ca55 --- /dev/null +++ b/schema/crdb/bgp-oxpop-features/up04.sql @@ -0,0 +1 @@ +ALTER TABLE omicron.public.switch_port_settings_bgp_peer_config ADD COLUMN IF NOT EXISTS multi_exit_discriminator INT8; diff --git a/schema/crdb/bgp-oxpop-features/up05.sql b/schema/crdb/bgp-oxpop-features/up05.sql new file mode 100644 index 0000000000..a2f5cb607d --- /dev/null +++ b/schema/crdb/bgp-oxpop-features/up05.sql @@ -0,0 +1 @@ +ALTER TABLE omicron.public.switch_port_settings_bgp_peer_config ADD COLUMN IF NOT EXISTS local_pref INT8; diff --git a/schema/crdb/bgp-oxpop-features/up06.sql b/schema/crdb/bgp-oxpop-features/up06.sql new file mode 100644 index 0000000000..68161246b5 --- /dev/null +++ b/schema/crdb/bgp-oxpop-features/up06.sql @@ -0,0 +1 @@ +ALTER TABLE omicron.public.switch_port_settings_bgp_peer_config ADD COLUMN IF NOT EXISTS enforce_first_as BOOLEAN; diff --git a/schema/crdb/bgp-oxpop-features/up07.sql b/schema/crdb/bgp-oxpop-features/up07.sql new file mode 100644 index 0000000000..5555cb953c --- /dev/null +++ b/schema/crdb/bgp-oxpop-features/up07.sql @@ -0,0 +1,8 @@ +CREATE TABLE IF NOT EXISTS omicron.public.switch_port_settings_bgp_peer_config_communities ( + port_settings_id UUID NOT NULL, + interface_name TEXT NOT NULL, + addr INET NOT NULL, + community INT8 NOT NULL, + + PRIMARY KEY (port_settings_id, interface_name, addr, community) +); diff --git a/schema/crdb/bgp-oxpop-features/up08.sql b/schema/crdb/bgp-oxpop-features/up08.sql new file mode 100644 index 0000000000..36fea6f35a --- /dev/null +++ b/schema/crdb/bgp-oxpop-features/up08.sql @@ -0,0 +1 @@ +ALTER TABLE omicron.public.bgp_config ADD COLUMN IF NOT EXISTS shaper TEXT; diff --git a/schema/crdb/bgp-oxpop-features/up09.sql b/schema/crdb/bgp-oxpop-features/up09.sql new file mode 100644 index 0000000000..dd7e2bc218 --- /dev/null +++ b/schema/crdb/bgp-oxpop-features/up09.sql @@ -0,0 +1 @@ +ALTER TABLE omicron.public.bgp_config ADD COLUMN IF NOT EXISTS checker TEXT; diff --git a/schema/crdb/bgp-oxpop-features/up10.sql b/schema/crdb/bgp-oxpop-features/up10.sql new file mode 100644 index 0000000000..c4ef11bc05 --- /dev/null +++ b/schema/crdb/bgp-oxpop-features/up10.sql @@ -0,0 +1 @@ +DROP VIEW IF EXISTS omicron.public.bgp_peer_view; diff --git a/schema/crdb/bgp-oxpop-features/up11.sql b/schema/crdb/bgp-oxpop-features/up11.sql new file mode 100644 index 0000000000..d47f3cff0f --- /dev/null +++ b/schema/crdb/bgp-oxpop-features/up11.sql @@ -0,0 +1,8 @@ +CREATE TABLE IF NOT EXISTS omicron.public.switch_port_settings_bgp_peer_config_allow_import ( + port_settings_id UUID NOT NULL, + interface_name TEXT NOT NULL, + addr INET NOT NULL, + prefix INET NOT NULL, + + PRIMARY KEY (port_settings_id, interface_name, addr, prefix) +); diff --git a/schema/crdb/bgp-oxpop-features/up12.sql b/schema/crdb/bgp-oxpop-features/up12.sql new file mode 100644 index 0000000000..60f35e7dc2 --- /dev/null +++ b/schema/crdb/bgp-oxpop-features/up12.sql @@ -0,0 +1,8 @@ +CREATE TABLE IF NOT EXISTS omicron.public.switch_port_settings_bgp_peer_config_allow_export ( + port_settings_id UUID NOT NULL, + interface_name TEXT NOT NULL, + addr INET NOT NULL, + prefix INET NOT NULL, + + PRIMARY KEY (port_settings_id, interface_name, addr, prefix) +); diff --git a/schema/crdb/bgp-oxpop-features/up13.sql b/schema/crdb/bgp-oxpop-features/up13.sql new file mode 100644 index 0000000000..4377529852 --- /dev/null +++ b/schema/crdb/bgp-oxpop-features/up13.sql @@ -0,0 +1 @@ +ALTER TABLE omicron.public.switch_port_settings_bgp_peer_config ADD COLUMN IF NOT EXISTS allow_import_list_active BOOLEAN NOT NULL DEFAULT false; diff --git a/schema/crdb/bgp-oxpop-features/up14.sql b/schema/crdb/bgp-oxpop-features/up14.sql new file mode 100644 index 0000000000..c5aaa29fbc --- /dev/null +++ b/schema/crdb/bgp-oxpop-features/up14.sql @@ -0,0 +1 @@ +ALTER TABLE omicron.public.switch_port_settings_bgp_peer_config ADD COLUMN IF NOT EXISTS allow_export_list_active BOOLEAN NOT NULL DEFAULT false; diff --git a/schema/crdb/bgp-oxpop-features/up15.sql b/schema/crdb/bgp-oxpop-features/up15.sql new file mode 100644 index 0000000000..556e26762e --- /dev/null +++ b/schema/crdb/bgp-oxpop-features/up15.sql @@ -0,0 +1 @@ +ALTER TABLE omicron.public.switch_port_settings_bgp_peer_config ADD COLUMN IF NOT EXISTS vlan_id INT4; diff --git a/schema/crdb/bgp-oxpop-features/up16.sql b/schema/crdb/bgp-oxpop-features/up16.sql new file mode 100644 index 0000000000..c6655518bf --- /dev/null +++ b/schema/crdb/bgp-oxpop-features/up16.sql @@ -0,0 +1,23 @@ +CREATE VIEW IF NOT EXISTS omicron.public.bgp_peer_view +AS +SELECT + sp.switch_location, + sp.port_name, + bpc.addr, + bpc.hold_time, + bpc.idle_hold_time, + bpc.delay_open, + bpc.connect_retry, + bpc.keepalive, + bpc.remote_asn, + bpc.min_ttl, + bpc.md5_auth_key, + bpc.multi_exit_discriminator, + bpc.local_pref, + bpc.enforce_first_as, + bpc.vlan_id, + bc.asn +FROM omicron.public.switch_port sp +JOIN omicron.public.switch_port_settings_bgp_peer_config bpc +ON sp.port_settings_id = bpc.port_settings_id +JOIN omicron.public.bgp_config bc ON bc.id = bpc.bgp_config_id; diff --git a/schema/crdb/blueprint-add-external-ip-id/up1.sql b/schema/crdb/blueprint-add-external-ip-id/up1.sql new file mode 100644 index 0000000000..05cf7bfe81 --- /dev/null +++ b/schema/crdb/blueprint-add-external-ip-id/up1.sql @@ -0,0 +1 @@ +ALTER TABLE bp_omicron_zone ADD COLUMN IF NOT EXISTS external_ip_id UUID; diff --git a/schema/crdb/blueprint-add-external-ip-id/up2.sql b/schema/crdb/blueprint-add-external-ip-id/up2.sql new file mode 100644 index 0000000000..00063a6f92 --- /dev/null +++ b/schema/crdb/blueprint-add-external-ip-id/up2.sql @@ -0,0 +1,22 @@ +set local disallow_full_table_scans = off; + +-- Fill in the external IP IDs for all past blueprints. +-- +-- This query makes some assumptions that are true at the time of its writing +-- for systems where this migration will run, but may not be true in the future +-- or for other systems: +-- +-- 1. We've never deleted an Omicron zone external IP. (This will be untrue +-- _soon_, as the driver for this migration is to do exactly that.) +-- 2. Only the three zone types listed below have external IPs. +-- 3. Every blueprint zone of one of those three types has exactly one external +-- IP. +-- 4. We do not have any blueprints that have not yet been realized. (If we did, +-- those zones would not have corresponding external IPs. We'd leave the IPs +-- as NULL, which would prevent them from being loaded from the db.) +UPDATE bp_omicron_zone SET external_ip_id = ( + SELECT external_ip.id FROM external_ip + WHERE external_ip.parent_id = bp_omicron_zone.id + AND time_deleted IS NULL +) +WHERE zone_type IN ('nexus','external_dns','boundary_ntp'); diff --git a/schema/crdb/blueprint-add-sled-state/up1.sql b/schema/crdb/blueprint-add-sled-state/up1.sql new file mode 100644 index 0000000000..855bc95ab4 --- /dev/null +++ b/schema/crdb/blueprint-add-sled-state/up1.sql @@ -0,0 +1,6 @@ +CREATE TABLE IF NOT EXISTS omicron.public.bp_sled_state ( + blueprint_id UUID NOT NULL, + sled_id UUID NOT NULL, + sled_state omicron.public.sled_state NOT NULL, + PRIMARY KEY (blueprint_id, sled_id) +); diff --git a/schema/crdb/blueprint-add-sled-state/up2.sql b/schema/crdb/blueprint-add-sled-state/up2.sql new file mode 100644 index 0000000000..238870021f --- /dev/null +++ b/schema/crdb/blueprint-add-sled-state/up2.sql @@ -0,0 +1,14 @@ +set local disallow_full_table_scans = off; + +-- At this point in history, all sleds are considered active and this table only +-- exists to support transitioning active-but-expunged sleds to +-- 'decommissioned'. We'll fill in this table for all historical blueprints by +-- inserting rows for every sled for which a given blueprint had a zone config +-- with the state set to 'active'. +INSERT INTO bp_sled_state ( + SELECT DISTINCT + blueprint_id, + sled_id, + 'active'::sled_state + FROM bp_sled_omicron_zones +); diff --git a/schema/crdb/blueprint-crdb-preserve-downgrade/up1.sql b/schema/crdb/blueprint-crdb-preserve-downgrade/up1.sql new file mode 100644 index 0000000000..6555cd9cd2 --- /dev/null +++ b/schema/crdb/blueprint-crdb-preserve-downgrade/up1.sql @@ -0,0 +1,3 @@ +ALTER TABLE omicron.public.blueprint + ADD COLUMN IF NOT EXISTS cockroachdb_fingerprint TEXT NOT NULL DEFAULT '', + ADD COLUMN IF NOT EXISTS cockroachdb_setting_preserve_downgrade TEXT; diff --git a/schema/crdb/blueprint-crdb-preserve-downgrade/up2.sql b/schema/crdb/blueprint-crdb-preserve-downgrade/up2.sql new file mode 100644 index 0000000000..0388528071 --- /dev/null +++ b/schema/crdb/blueprint-crdb-preserve-downgrade/up2.sql @@ -0,0 +1,2 @@ +ALTER TABLE omicron.public.blueprint + ALTER COLUMN cockroachdb_fingerprint DROP DEFAULT; diff --git a/schema/crdb/blueprint-disposition-column/up1.sql b/schema/crdb/blueprint-disposition-column/up1.sql new file mode 100644 index 0000000000..6426d80142 --- /dev/null +++ b/schema/crdb/blueprint-disposition-column/up1.sql @@ -0,0 +1,6 @@ +-- Add the disposition enum. +CREATE TYPE IF NOT EXISTS omicron.public.bp_zone_disposition AS ENUM ( + 'in_service', + 'quiesced', + 'expunged' +); diff --git a/schema/crdb/blueprint-disposition-column/up2.sql b/schema/crdb/blueprint-disposition-column/up2.sql new file mode 100644 index 0000000000..cf5ae38a4d --- /dev/null +++ b/schema/crdb/blueprint-disposition-column/up2.sql @@ -0,0 +1,9 @@ +-- Add the disposition column to the bp_omicron_zone table. +ALTER TABLE omicron.public.bp_omicron_zone + ADD COLUMN IF NOT EXISTS disposition omicron.public.bp_zone_disposition + NOT NULL + -- The only currently-representable zones are in-service and quiesced + -- (represented by bp_omicron_zones_not_in_service, which we're going to + -- drop in the next statement). We don't actually have any quiesced zones + -- yet, so it's fine to just do this. + DEFAULT 'in_service'; diff --git a/schema/crdb/blueprint-disposition-column/up3.sql b/schema/crdb/blueprint-disposition-column/up3.sql new file mode 100644 index 0000000000..8848a5f2e6 --- /dev/null +++ b/schema/crdb/blueprint-disposition-column/up3.sql @@ -0,0 +1,2 @@ +-- Drop the not-in-service table. +DROP TABLE IF EXISTS omicron.public.bp_omicron_zones_not_in_service; diff --git a/schema/crdb/blueprint-disposition-column/up4.sql b/schema/crdb/blueprint-disposition-column/up4.sql new file mode 100644 index 0000000000..e3b2cd9948 --- /dev/null +++ b/schema/crdb/blueprint-disposition-column/up4.sql @@ -0,0 +1,3 @@ +-- Drop the default for the disposition now that in_service is set. +ALTER TABLE omicron.public.bp_omicron_zone + ALTER COLUMN disposition DROP DEFAULT; diff --git a/schema/crdb/blueprint-physical-disk/up1.sql b/schema/crdb/blueprint-physical-disk/up1.sql new file mode 100644 index 0000000000..faf3b49e86 --- /dev/null +++ b/schema/crdb/blueprint-physical-disk/up1.sql @@ -0,0 +1,10 @@ +-- description of a collection of omicron physical disks stored in a blueprint. +CREATE TABLE IF NOT EXISTS omicron.public.bp_sled_omicron_physical_disks ( + -- foreign key into `blueprint` table + blueprint_id UUID NOT NULL, + + sled_id UUID NOT NULL, + generation INT8 NOT NULL, + PRIMARY KEY (blueprint_id, sled_id) +); + diff --git a/schema/crdb/blueprint-physical-disk/up2.sql b/schema/crdb/blueprint-physical-disk/up2.sql new file mode 100644 index 0000000000..734bbd0ccb --- /dev/null +++ b/schema/crdb/blueprint-physical-disk/up2.sql @@ -0,0 +1,20 @@ +-- description of omicron physical disks specified in a blueprint. +CREATE TABLE IF NOT EXISTS omicron.public.bp_omicron_physical_disk ( + -- foreign key into the `blueprint` table + blueprint_id UUID NOT NULL, + + -- unique id for this sled (should be foreign keys into `sled` table, though + -- it's conceivable a blueprint could refer to a sled that no longer exists, + -- particularly if the blueprint is older than the current target) + sled_id UUID NOT NULL, + + vendor TEXT NOT NULL, + serial TEXT NOT NULL, + model TEXT NOT NULL, + + id UUID NOT NULL, + pool_id UUID NOT NULL, + + PRIMARY KEY (blueprint_id, id) +); + diff --git a/schema/crdb/blueprint-physical-disk/up3.sql b/schema/crdb/blueprint-physical-disk/up3.sql new file mode 100644 index 0000000000..48a5c182ac --- /dev/null +++ b/schema/crdb/blueprint-physical-disk/up3.sql @@ -0,0 +1 @@ +DROP INDEX IF EXISTS vendor_serial_model_unique CASCADE; diff --git a/schema/crdb/blueprint-physical-disk/up4.sql b/schema/crdb/blueprint-physical-disk/up4.sql new file mode 100644 index 0000000000..0224215789 --- /dev/null +++ b/schema/crdb/blueprint-physical-disk/up4.sql @@ -0,0 +1,3 @@ +CREATE UNIQUE INDEX IF NOT EXISTS vendor_serial_model_unique on omicron.public.physical_disk ( + vendor, serial, model +) WHERE time_deleted IS NULL AND disk_state != 'decommissioned'; diff --git a/schema/crdb/blueprint-physical-disk/up5.sql b/schema/crdb/blueprint-physical-disk/up5.sql new file mode 100644 index 0000000000..eed24a7806 --- /dev/null +++ b/schema/crdb/blueprint-physical-disk/up5.sql @@ -0,0 +1 @@ +DROP INDEX IF EXISTS lookup_physical_disk_by_sled; diff --git a/schema/crdb/blueprint-physical-disk/up6.sql b/schema/crdb/blueprint-physical-disk/up6.sql new file mode 100644 index 0000000000..ec69b95a67 --- /dev/null +++ b/schema/crdb/blueprint-physical-disk/up6.sql @@ -0,0 +1,4 @@ +CREATE UNIQUE INDEX IF NOT EXISTS lookup_physical_disk_by_sled ON omicron.public.physical_disk ( + sled_id, + id +); diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index ec6a7c192f..cf4ac4b20b 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -148,46 +148,46 @@ CREATE TABLE IF NOT EXISTS omicron.public.sled ( sled_state omicron.public.sled_state NOT NULL, /* Generation number owned and incremented by the sled-agent */ - sled_agent_gen INT8 NOT NULL DEFAULT 1, - - -- This constraint should be upheld, even for deleted disks - -- in the fleet. - CONSTRAINT serial_part_revision_unique UNIQUE ( - serial_number, part_number, revision - ) + sled_agent_gen INT8 NOT NULL DEFAULT 1 ); +-- Add an index that ensures a given physical sled (identified by serial and +-- part number) can only be a commissioned member of the control plane once. +-- +-- TODO Should `sled` reference `hw_baseboard_id` instead of having its own +-- serial/part columns? +CREATE UNIQUE INDEX IF NOT EXISTS commissioned_sled_uniqueness + ON omicron.public.sled (serial_number, part_number) + WHERE sled_state != 'decommissioned'; + /* Add an index which lets us look up sleds on a rack */ CREATE UNIQUE INDEX IF NOT EXISTS lookup_sled_by_rack ON omicron.public.sled ( rack_id, id ) WHERE time_deleted IS NULL; +/* Add an index which lets us look up sleds based on policy and state */ +CREATE INDEX IF NOT EXISTS lookup_sled_by_policy_and_state ON omicron.public.sled ( + sled_policy, + sled_state +); + CREATE TYPE IF NOT EXISTS omicron.public.sled_resource_kind AS ENUM ( - -- omicron.public.dataset - 'dataset', - -- omicron.public.service - 'service', -- omicron.public.instance - 'instance', - -- omicron.public.sled - -- - -- reserved as an approximation of sled internal usage, such as "by the OS - -- and all unaccounted services". - 'reserved' + 'instance' + -- We expect to other resource kinds here in the future; e.g., to track + -- resources used by control plane services. For now, we only track + -- instances. ); -- Accounting for programs using resources on a sled CREATE TABLE IF NOT EXISTS omicron.public.sled_resource ( - -- Should match the UUID of the corresponding service + -- Should match the UUID of the corresponding resource id UUID PRIMARY KEY, -- The sled where resources are being consumed sled_id UUID NOT NULL, - -- Identifies the type of the resource - kind omicron.public.sled_resource_kind NOT NULL, - -- The maximum number of hardware threads usable by this resource hardware_threads INT8 NOT NULL, @@ -195,7 +195,10 @@ CREATE TABLE IF NOT EXISTS omicron.public.sled_resource ( rss_ram INT8 NOT NULL, -- The maximum amount of Reservoir RAM provisioned to this resource - reservoir_ram INT8 NOT NULL + reservoir_ram INT8 NOT NULL, + + -- Identifies the type of the resource + kind omicron.public.sled_resource_kind NOT NULL ); -- Allow looking up all resources which reside on a sled @@ -222,7 +225,7 @@ CREATE UNIQUE INDEX IF NOT EXISTS lookup_resource_by_sled ON omicron.public.sled CREATE TABLE IF NOT EXISTS omicron.public.sled_underlay_subnet_allocation ( -- The physical identity of the sled -- (foreign key into `hw_baseboard_id` table) - hw_baseboard_id UUID PRIMARY KEY, + hw_baseboard_id UUID, -- The rack to which a sled is being added -- (foreign key into `rack` table) @@ -240,7 +243,9 @@ CREATE TABLE IF NOT EXISTS omicron.public.sled_underlay_subnet_allocation ( -- The octet that extends a /56 rack subnet to a /64 sled subnet -- -- Always between 33 and 255 inclusive - subnet_octet INT2 NOT NULL UNIQUE CHECK (subnet_octet BETWEEN 33 AND 255) + subnet_octet INT2 NOT NULL UNIQUE CHECK (subnet_octet BETWEEN 33 AND 255), + + PRIMARY KEY (hw_baseboard_id, sled_id) ); -- Add an index which allows pagination by {rack_id, sled_id} pairs. @@ -296,39 +301,43 @@ CREATE TYPE IF NOT EXISTS omicron.public.service_kind AS ENUM ( 'mgd' ); -CREATE TABLE IF NOT EXISTS omicron.public.service ( - /* Identity metadata (asset) */ - id UUID PRIMARY KEY, - time_created TIMESTAMPTZ NOT NULL, - time_modified TIMESTAMPTZ NOT NULL, - - /* FK into the Sled table */ - sled_id UUID NOT NULL, - /* For services in illumos zones, the zone's unique id (for debugging) */ - zone_id UUID, - /* The IP address of the service. */ - ip INET NOT NULL, - /* The UDP or TCP port on which the service listens. */ - port INT4 CHECK (port BETWEEN 0 AND 65535) NOT NULL, - /* Indicates the type of service. */ - kind omicron.public.service_kind NOT NULL +CREATE TYPE IF NOT EXISTS omicron.public.physical_disk_kind AS ENUM ( + 'm2', + 'u2' ); -/* Add an index which lets us look up the services on a sled */ -CREATE UNIQUE INDEX IF NOT EXISTS lookup_service_by_sled ON omicron.public.service ( - sled_id, - id +-- The disposition for a particular physical disk. +-- This is updated by the operator, either explicitly through an operator API, +-- or implicitly when altering sled policy. +CREATE TYPE IF NOT EXISTS omicron.public.physical_disk_policy AS ENUM ( + -- The disk is in service, and new resources can be provisioned onto it. + 'in_service', + -- The disk has been, or will be, removed from the rack, and it should be + -- assumed that any resources currently on it are now permanently missing. + 'expunged' ); -/* Look up (and paginate) services of a given kind. */ -CREATE UNIQUE INDEX IF NOT EXISTS lookup_service_by_kind ON omicron.public.service ( - kind, - id -); +-- The actual state of a physical disk. This is updated exclusively by Nexus. +-- +-- Nexus's goal is to match the physical disk's state with the +-- operator-indicated policy. For example, if the policy is "expunged" and the +-- state is "active", Nexus will assume that the physical disk is gone. Based +-- on that, Nexus will reallocate resources currently on the expunged disk +-- elsewhere, etc. Once the expunged disk no longer has any resources attached +-- to it, Nexus will mark it as decommissioned. +CREATE TYPE IF NOT EXISTS omicron.public.physical_disk_state AS ENUM ( + -- The disk has resources of any kind allocated on it, or, is available for + -- new resources. + -- + -- The disk can be in this state and have a different policy, e.g. + -- "expunged". + 'active', -CREATE TYPE IF NOT EXISTS omicron.public.physical_disk_kind AS ENUM ( - 'm2', - 'u2' + -- The disk no longer has resources allocated on it, now or in the future. + -- + -- This is a terminal state. This state is only valid if the policy is + -- 'expunged'. + 'decommissioned' ); -- A physical disk which exists inside the rack. @@ -348,13 +357,16 @@ CREATE TABLE IF NOT EXISTS omicron.public.physical_disk ( -- FK into the Sled table sled_id UUID NOT NULL, - -- This constraint should be upheld, even for deleted disks - -- in the fleet. - CONSTRAINT vendor_serial_model_unique UNIQUE ( - vendor, serial, model - ) + disk_policy omicron.public.physical_disk_policy NOT NULL, + disk_state omicron.public.physical_disk_state NOT NULL ); +-- This constraint only needs to be upheld for disks that are not deleted +-- nor decommissioned. +CREATE UNIQUE INDEX IF NOT EXISTS vendor_serial_model_unique on omicron.public.physical_disk ( + vendor, serial, model +) WHERE time_deleted IS NULL AND disk_state != 'decommissioned'; + CREATE UNIQUE INDEX IF NOT EXISTS lookup_physical_disk_by_variant ON omicron.public.physical_disk ( variant, id @@ -364,7 +376,7 @@ CREATE UNIQUE INDEX IF NOT EXISTS lookup_physical_disk_by_variant ON omicron.pub CREATE UNIQUE INDEX IF NOT EXISTS lookup_physical_disk_by_sled ON omicron.public.physical_disk ( sled_id, id -) WHERE time_deleted IS NULL; +); -- x509 certificates which may be used by services CREATE TABLE IF NOT EXISTS omicron.public.certificate ( @@ -1115,6 +1127,11 @@ CREATE UNIQUE INDEX IF NOT EXISTS lookup_deleted_disk ON omicron.public.disk ( ) WHERE time_deleted IS NOT NULL; +CREATE UNIQUE INDEX IF NOT EXISTS lookup_disk_by_volume_id ON omicron.public.disk ( + volume_id +) WHERE + time_deleted IS NULL; + CREATE TABLE IF NOT EXISTS omicron.public.image ( /* Identity metadata (resource) */ id UUID PRIMARY KEY, @@ -1258,7 +1275,9 @@ CREATE TABLE IF NOT EXISTS omicron.public.oximeter ( CREATE TYPE IF NOT EXISTS omicron.public.producer_kind AS ENUM ( -- A sled agent for an entry in the sled table. 'sled_agent', - -- A service in the omicron.public.service table + -- A service in a blueprint (typically the current target blueprint, but it + -- may reference a prior blueprint if the service is in the process of being + -- removed). 'service', -- A Propolis VMM for an instance in the omicron.public.instance table 'instance' @@ -1275,8 +1294,6 @@ CREATE TABLE IF NOT EXISTS omicron.public.metric_producer ( ip INET NOT NULL, port INT4 CHECK (port BETWEEN 0 AND 65535) NOT NULL, interval FLOAT NOT NULL, - /* TODO: Is this length appropriate? */ - base_route STRING(512) NOT NULL, /* Oximeter collector instance to which this metric producer is assigned. */ oximeter_id UUID NOT NULL ); @@ -1286,6 +1303,10 @@ CREATE UNIQUE INDEX IF NOT EXISTS lookup_producer_by_oximeter ON omicron.public. id ); +CREATE INDEX IF NOT EXISTS lookup_producer_by_time_modified ON omicron.public.metric_producer ( + time_modified +); + /* * VPCs and networking primitives */ @@ -2614,11 +2635,47 @@ CREATE TABLE IF NOT EXISTS omicron.public.switch_port_settings_bgp_peer_config ( delay_open INT8, connect_retry INT8, keepalive INT8, + remote_asn INT8, + min_ttl INT2, + md5_auth_key TEXT, + multi_exit_discriminator INT8, + local_pref INT8, + enforce_first_as BOOLEAN NOT NULL DEFAULT false, + allow_import_list_active BOOLEAN NOT NULL DEFAULT false, + allow_export_list_active BOOLEAN NOT NULL DEFAULT false, + vlan_id INT4, /* TODO https://github.com/oxidecomputer/omicron/issues/3013 */ PRIMARY KEY (port_settings_id, interface_name, addr) ); +CREATE TABLE IF NOT EXISTS omicron.public.switch_port_settings_bgp_peer_config_communities ( + port_settings_id UUID NOT NULL, + interface_name TEXT NOT NULL, + addr INET NOT NULL, + community INT8 NOT NULL, + + PRIMARY KEY (port_settings_id, interface_name, addr, community) +); + +CREATE TABLE IF NOT EXISTS omicron.public.switch_port_settings_bgp_peer_config_allow_import ( + port_settings_id UUID NOT NULL, + interface_name TEXT NOT NULL, + addr INET NOT NULL, + prefix INET NOT NULL, + + PRIMARY KEY (port_settings_id, interface_name, addr, prefix) +); + +CREATE TABLE IF NOT EXISTS omicron.public.switch_port_settings_bgp_peer_config_allow_export ( + port_settings_id UUID NOT NULL, + interface_name TEXT NOT NULL, + addr INET NOT NULL, + prefix INET NOT NULL, + + PRIMARY KEY (port_settings_id, interface_name, addr, prefix) +); + CREATE TABLE IF NOT EXISTS omicron.public.bgp_config ( id UUID PRIMARY KEY, name STRING(63) NOT NULL, @@ -2628,7 +2685,9 @@ CREATE TABLE IF NOT EXISTS omicron.public.bgp_config ( time_deleted TIMESTAMPTZ, asn INT8 NOT NULL, vrf TEXT, - bgp_announce_set_id UUID NOT NULL + bgp_announce_set_id UUID NOT NULL, + shaper TEXT, + checker TEXT ); CREATE UNIQUE INDEX IF NOT EXISTS lookup_bgp_config_by_name ON omicron.public.bgp_config ( @@ -3150,18 +3209,14 @@ CREATE TABLE IF NOT EXISTS omicron.public.inv_omicron_zone_nic ( * `bp_sled_omicron_zones`, `bp_omicron_zone`, and `bp_omicron_zone_nic` are * nearly identical to their `inv_*` counterparts, and record the * `OmicronZonesConfig` for each sled. - * - * `bp_omicron_zones_not_in_service` stores a list of Omicron zones (present in - * `bp_omicron_zone`) that are NOT in service; e.g., should not appear in - * internal DNS. Nexus's in-memory `Blueprint` representation stores the set of - * zones that ARE in service. We invert that logic at this layer because we - * expect most blueprints to have a relatively large number of omicron zones, - * almost all of which will be in service. This is a minor and perhaps - * unnecessary optimization at the database layer, but it's also relatively - * simple and hidden by the relevant read and insert queries in - * `nexus-db-queries`. */ +CREATE TYPE IF NOT EXISTS omicron.public.bp_zone_disposition AS ENUM ( + 'in_service', + 'quiesced', + 'expunged' +); + -- list of all blueprints CREATE TABLE IF NOT EXISTS omicron.public.blueprint ( id UUID PRIMARY KEY, @@ -3183,7 +3238,20 @@ CREATE TABLE IF NOT EXISTS omicron.public.blueprint ( -- identifies the latest internal DNS version when blueprint planning began internal_dns_version INT8 NOT NULL, -- identifies the latest external DNS version when blueprint planning began - external_dns_version INT8 NOT NULL + external_dns_version INT8 NOT NULL, + -- identifies the CockroachDB state fingerprint when blueprint planning began + cockroachdb_fingerprint TEXT NOT NULL, + + -- CockroachDB settings managed by blueprints. + -- + -- We use NULL in these columns to reflect that blueprint execution should + -- not modify the option; we're able to do this because CockroachDB settings + -- require the value to be the correct type and not NULL. There is no value + -- that represents "please reset this setting to the default value"; that is + -- represented by the presence of the default value in that field. + -- + -- `cluster.preserve_downgrade_option` + cockroachdb_setting_preserve_downgrade TEXT ); -- table describing both the current and historical target blueprints of the @@ -3210,6 +3278,46 @@ CREATE TABLE IF NOT EXISTS omicron.public.bp_target ( time_made_target TIMESTAMPTZ NOT NULL ); +-- state of a sled in a blueprint +CREATE TABLE IF NOT EXISTS omicron.public.bp_sled_state ( + -- foreign key into `blueprint` table + blueprint_id UUID NOT NULL, + + sled_id UUID NOT NULL, + sled_state omicron.public.sled_state NOT NULL, + PRIMARY KEY (blueprint_id, sled_id) +); + +-- description of a collection of omicron physical disks stored in a blueprint. +CREATE TABLE IF NOT EXISTS omicron.public.bp_sled_omicron_physical_disks ( + -- foreign key into `blueprint` table + blueprint_id UUID NOT NULL, + + sled_id UUID NOT NULL, + generation INT8 NOT NULL, + PRIMARY KEY (blueprint_id, sled_id) +); + +-- description of omicron physical disks specified in a blueprint. +CREATE TABLE IF NOT EXISTS omicron.public.bp_omicron_physical_disk ( + -- foreign key into the `blueprint` table + blueprint_id UUID NOT NULL, + + -- unique id for this sled (should be foreign keys into `sled` table, though + -- it's conceivable a blueprint could refer to a sled that no longer exists, + -- particularly if the blueprint is older than the current target) + sled_id UUID NOT NULL, + + vendor TEXT NOT NULL, + serial TEXT NOT NULL, + model TEXT NOT NULL, + + id UUID NOT NULL, + pool_id UUID NOT NULL, + + PRIMARY KEY (blueprint_id, id) +); + -- see inv_sled_omicron_zones, which is identical except it references a -- collection whereas this table references a blueprint CREATE TABLE IF NOT EXISTS omicron.public.bp_sled_omicron_zones ( @@ -3291,6 +3399,18 @@ CREATE TABLE IF NOT EXISTS omicron.public.bp_omicron_zone ( snat_last_port INT4 CHECK (snat_last_port IS NULL OR snat_last_port BETWEEN 0 AND 65535), + -- Zone disposition + disposition omicron.public.bp_zone_disposition NOT NULL, + + -- For some zones, either primary_service_ip or second_service_ip (but not + -- both!) is an external IP address. For such zones, this is the ID of that + -- external IP. In general this is a foreign key into + -- omicron.public.external_ip, though the row many not exist: if this + -- blueprint is old, it's possible the IP has been deleted, and if this + -- blueprint has not yet been realized, it's possible the IP hasn't been + -- created yet. + external_ip_id UUID, + PRIMARY KEY (blueprint_id, id) ); @@ -3308,20 +3428,6 @@ CREATE TABLE IF NOT EXISTS omicron.public.bp_omicron_zone_nic ( PRIMARY KEY (blueprint_id, id) ); --- list of omicron zones that are considered NOT in-service for a blueprint --- --- In Rust code, we generally want to deal with "zones in service", which means --- they should appear in DNS. However, almost all zones in almost all blueprints --- will be in service, so we can induce considerably less database work by --- storing the zones _not_ in service. Our DB wrapper layer handles this --- inversion, so the rest of our Rust code can ignore it. -CREATE TABLE IF NOT EXISTS omicron.public.bp_omicron_zones_not_in_service ( - blueprint_id UUID NOT NULL, - bp_omicron_zone_id UUID NOT NULL, - - PRIMARY KEY (blueprint_id, bp_omicron_zone_id) -); - /*******************************************************************/ /* @@ -3346,6 +3452,10 @@ CREATE TABLE IF NOT EXISTS omicron.public.vmm ( propolis_port INT4 NOT NULL CHECK (propolis_port BETWEEN 0 AND 65535) DEFAULT 12400 ); +CREATE INDEX IF NOT EXISTS lookup_vmms_by_sled_id ON omicron.public.vmm ( + sled_id +) WHERE time_deleted IS NULL; + /* * A special view of an instance provided to operators for insights into what's * running on a sled. @@ -3684,6 +3794,13 @@ SELECT bpc.delay_open, bpc.connect_retry, bpc.keepalive, + bpc.remote_asn, + bpc.min_ttl, + bpc.md5_auth_key, + bpc.multi_exit_discriminator, + bpc.local_pref, + bpc.enforce_first_as, + bpc.vlan_id, bc.asn FROM omicron.public.switch_port sp JOIN omicron.public.switch_port_settings_bgp_peer_config bpc @@ -3695,6 +3812,145 @@ ON omicron.public.switch_port (port_settings_id, port_name) STORING (switch_loca CREATE INDEX IF NOT EXISTS switch_port_name ON omicron.public.switch_port (port_name); +COMMIT; +BEGIN; + +-- view for v2p mapping rpw +CREATE VIEW IF NOT EXISTS omicron.public.v2p_mapping_view +AS +WITH VmV2pMappings AS ( + SELECT + n.id as nic_id, + s.id as sled_id, + s.ip as sled_ip, + v.vni, + n.mac, + n.ip + FROM omicron.public.network_interface n + JOIN omicron.public.vpc_subnet vs ON vs.id = n.subnet_id + JOIN omicron.public.vpc v ON v.id = n.vpc_id + JOIN omicron.public.vmm vmm ON n.parent_id = vmm.instance_id + JOIN omicron.public.sled s ON vmm.sled_id = s.id + WHERE n.time_deleted IS NULL + AND n.kind = 'instance' + AND s.sled_policy = 'in_service' + AND s.sled_state = 'active' +), +ProbeV2pMapping AS ( + SELECT + n.id as nic_id, + s.id as sled_id, + s.ip as sled_ip, + v.vni, + n.mac, + n.ip + FROM omicron.public.network_interface n + JOIN omicron.public.vpc_subnet vs ON vs.id = n.subnet_id + JOIN omicron.public.vpc v ON v.id = n.vpc_id + JOIN omicron.public.probe p ON n.parent_id = p.id + JOIN omicron.public.sled s ON p.sled = s.id + WHERE n.time_deleted IS NULL + AND n.kind = 'probe' + AND s.sled_policy = 'in_service' + AND s.sled_state = 'active' +) +SELECT nic_id, sled_id, sled_ip, vni, mac, ip FROM VmV2pMappings +UNION +SELECT nic_id, sled_id, sled_ip, vni, mac, ip FROM ProbeV2pMapping; + +CREATE INDEX IF NOT EXISTS network_interface_by_parent +ON omicron.public.network_interface (parent_id) +STORING (name, kind, vpc_id, subnet_id, mac, ip, slot); + +CREATE INDEX IF NOT EXISTS sled_by_policy_and_state +ON omicron.public.sled (sled_policy, sled_state, id) STORING (ip); + +CREATE INDEX IF NOT EXISTS active_vmm +ON omicron.public.vmm (time_deleted, sled_id, instance_id); + +CREATE INDEX IF NOT EXISTS v2p_mapping_details +ON omicron.public.network_interface ( + time_deleted, kind, subnet_id, vpc_id, parent_id +) STORING (mac, ip); + +CREATE INDEX IF NOT EXISTS sled_by_policy +ON omicron.public.sled (sled_policy) STORING (ip, sled_state); + +CREATE INDEX IF NOT EXISTS vmm_by_instance_id +ON omicron.public.vmm (instance_id) STORING (sled_id); + +CREATE TYPE IF NOT EXISTS omicron.public.region_replacement_state AS ENUM ( + 'requested', + 'allocating', + 'running', + 'driving', + 'replacement_done', + 'completing', + 'complete' +); + +CREATE TABLE IF NOT EXISTS omicron.public.region_replacement ( + /* unique ID for this region replacement */ + id UUID PRIMARY KEY, + + request_time TIMESTAMPTZ NOT NULL, + + old_region_id UUID NOT NULL, + + volume_id UUID NOT NULL, + + old_region_volume_id UUID, + + new_region_id UUID, + + replacement_state omicron.public.region_replacement_state NOT NULL, + + operating_saga_id UUID +); + +CREATE INDEX IF NOT EXISTS lookup_region_replacement_by_state on omicron.public.region_replacement (replacement_state); + +CREATE TABLE IF NOT EXISTS omicron.public.volume_repair ( + volume_id UUID PRIMARY KEY, + repair_id UUID NOT NULL +); + +CREATE INDEX IF NOT EXISTS lookup_volume_repair_by_repair_id on omicron.public.volume_repair ( + repair_id +); + +CREATE TYPE IF NOT EXISTS omicron.public.region_replacement_step_type AS ENUM ( + 'propolis', + 'pantry' +); + +CREATE TABLE IF NOT EXISTS omicron.public.region_replacement_step ( + replacement_id UUID NOT NULL, + + step_time TIMESTAMPTZ NOT NULL, + + step_type omicron.public.region_replacement_step_type NOT NULL, + + step_associated_instance_id UUID, + step_associated_vmm_id UUID, + + step_associated_pantry_ip INET, + step_associated_pantry_port INT4 CHECK (step_associated_pantry_port BETWEEN 0 AND 65535), + step_associated_pantry_job_id UUID, + + PRIMARY KEY (replacement_id, step_time, step_type) +); + +CREATE INDEX IF NOT EXISTS step_time_order on omicron.public.region_replacement_step (step_time); + +CREATE INDEX IF NOT EXISTS search_for_repair_notifications ON omicron.public.upstairs_repair_notification (region_id, notification_type); + +CREATE INDEX IF NOT EXISTS lookup_any_disk_by_volume_id ON omicron.public.disk ( + volume_id +); + +CREATE INDEX IF NOT EXISTS lookup_snapshot_by_destination_volume_id ON omicron.public.snapshot ( destination_volume_id ); + /* * Metadata for the schema itself. This version number isn't great, as there's * nothing to ensure it gets bumped when it should be, but it's a start. @@ -3718,6 +3974,32 @@ CREATE TABLE IF NOT EXISTS omicron.public.db_metadata ( CHECK (singleton = true) ); +-- An allowlist of IP addresses that can make requests to user-facing services. +CREATE TABLE IF NOT EXISTS omicron.public.allow_list ( + id UUID PRIMARY KEY, + time_created TIMESTAMPTZ NOT NULL, + time_modified TIMESTAMPTZ NOT NULL, + -- A nullable list of allowed source IPs. + -- + -- NULL is used to indicate _any_ source IP is allowed. A _non-empty_ list + -- represents an explicit allow list of IPs or IP subnets. Note that the + -- list itself may never be empty. + allowed_ips INET[] CHECK (array_length(allowed_ips, 1) > 0) +); + +-- Insert default allowlist, allowing all traffic. +-- See `schema/crdb/insert-default-allowlist/up.sql` for details. +INSERT INTO omicron.public.allow_list (id, time_created, time_modified, allowed_ips) +VALUES ( + '001de000-a110-4000-8000-000000000000', + NOW(), + NOW(), + NULL +) +ON CONFLICT (id) +DO NOTHING; + + /* * Keep this at the end of file so that the database does not contain a version * until it is fully populated. @@ -3729,7 +4011,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - ( TRUE, NOW(), NOW(), '47.0.0', NULL) + (TRUE, NOW(), NOW(), '66.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; diff --git a/schema/crdb/drop-service-table/up1.sql b/schema/crdb/drop-service-table/up1.sql new file mode 100644 index 0000000000..dfb402ba4d --- /dev/null +++ b/schema/crdb/drop-service-table/up1.sql @@ -0,0 +1,17 @@ +-- Ensure there are no `sled_resource` rows with a `kind` other than 'instance' + +-- This is a full table scan, but the sled_resource table does not track +-- historical, deleted resources, so is at most the size of the number of +-- currently-running instances (which should be zero during a schema update). +SET + LOCAL disallow_full_table_scans = OFF; + +WITH count_non_instance_resources AS ( + SELECT COUNT(*) AS num + FROM omicron.public.sled_resource + WHERE kind != 'instance' +) +SELECT CAST( + IF(num = 0, 'true', 'sled_resource contains non-instance rows') + AS bool +) FROM count_non_instance_resources; diff --git a/schema/crdb/drop-service-table/up2.sql b/schema/crdb/drop-service-table/up2.sql new file mode 100644 index 0000000000..3d723a4876 --- /dev/null +++ b/schema/crdb/drop-service-table/up2.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS omicron.public.service; diff --git a/schema/crdb/drop-service-table/up3.sql b/schema/crdb/drop-service-table/up3.sql new file mode 100644 index 0000000000..8b546821a7 --- /dev/null +++ b/schema/crdb/drop-service-table/up3.sql @@ -0,0 +1,3 @@ +-- We are dropping `kind` so that we can drop the `sled_resource` kind; we'll +-- then recreate it (with some variants removed) and add this column back. +ALTER TABLE omicron.public.sled_resource DROP COLUMN IF EXISTS kind; diff --git a/schema/crdb/drop-service-table/up4.sql b/schema/crdb/drop-service-table/up4.sql new file mode 100644 index 0000000000..bbf5a605a4 --- /dev/null +++ b/schema/crdb/drop-service-table/up4.sql @@ -0,0 +1 @@ +DROP TYPE IF EXISTS omicron.public.sled_resource_kind; diff --git a/schema/crdb/drop-service-table/up5.sql b/schema/crdb/drop-service-table/up5.sql new file mode 100644 index 0000000000..9903bb28df --- /dev/null +++ b/schema/crdb/drop-service-table/up5.sql @@ -0,0 +1,3 @@ +CREATE TYPE IF NOT EXISTS omicron.public.sled_resource_kind AS ENUM ( + 'instance' +); diff --git a/schema/crdb/drop-service-table/up6.sql b/schema/crdb/drop-service-table/up6.sql new file mode 100644 index 0000000000..28241f96da --- /dev/null +++ b/schema/crdb/drop-service-table/up6.sql @@ -0,0 +1,5 @@ +ALTER TABLE omicron.public.sled_resource + ADD COLUMN IF NOT EXISTS + kind omicron.public.sled_resource_kind + NOT NULL + DEFAULT 'instance'; diff --git a/schema/crdb/drop-service-table/up7.sql b/schema/crdb/drop-service-table/up7.sql new file mode 100644 index 0000000000..1eeea65813 --- /dev/null +++ b/schema/crdb/drop-service-table/up7.sql @@ -0,0 +1 @@ +ALTER TABLE omicron.public.sled_resource ALTER COLUMN kind DROP DEFAULT; diff --git a/schema/crdb/enforce-first-as-default/up01.sql b/schema/crdb/enforce-first-as-default/up01.sql new file mode 100644 index 0000000000..c1952a2df6 --- /dev/null +++ b/schema/crdb/enforce-first-as-default/up01.sql @@ -0,0 +1 @@ +ALTER TABLE omicron.public.switch_port_settings_bgp_peer_config ALTER COLUMN enforce_first_as SET DEFAULT false; diff --git a/schema/crdb/enforce-first-as-default/up02.sql b/schema/crdb/enforce-first-as-default/up02.sql new file mode 100644 index 0000000000..ff6e96ba0f --- /dev/null +++ b/schema/crdb/enforce-first-as-default/up02.sql @@ -0,0 +1,5 @@ +set local disallow_full_table_scans = off; + +UPDATE omicron.public.switch_port_settings_bgp_peer_config + SET enforce_first_as = false + WHERE enforce_first_as IS NULL; diff --git a/schema/crdb/enforce-first-as-default/up03.sql b/schema/crdb/enforce-first-as-default/up03.sql new file mode 100644 index 0000000000..2ccd0220df --- /dev/null +++ b/schema/crdb/enforce-first-as-default/up03.sql @@ -0,0 +1 @@ +ALTER TABLE omicron.public.switch_port_settings_bgp_peer_config ALTER COLUMN enforce_first_as SET NOT NULL; diff --git a/schema/crdb/insert-default-allowlist/up.sql b/schema/crdb/insert-default-allowlist/up.sql new file mode 100644 index 0000000000..0b216af95c --- /dev/null +++ b/schema/crdb/insert-default-allowlist/up.sql @@ -0,0 +1,23 @@ +-- This is a one-time insertion of the default allowlist for user-facing +-- services on existing racks. +-- +-- During RSS, this row is populated by the bootstrap agent. Nexus awaits its +-- presence before launching its external server, to ensure the list is active +-- from the first request Nexus serves. +-- +-- However, on existing racks, this row doesn't exist, and RSS also doesn't run. +-- Thus Nexus waits forever. Insert the default now, ignoring any conflict with +-- an existing row. +INSERT INTO omicron.public.allow_list (id, time_created, time_modified, allowed_ips) +VALUES ( + -- Hardcoded ID, see nexus/db-queries/src/db/fixed_data/allow_list.rs. + '001de000-a110-4000-8000-000000000000', + NOW(), + NOW(), + -- No allowlist at all, meaning allow any external traffic. + NULL +) +-- If the row already exists, RSS has already run and the bootstrap agent has +-- inserted this record. Do not overwrite it. +ON CONFLICT (id) +DO NOTHING; diff --git a/schema/crdb/physical-disk-state-and-policy/up01.sql b/schema/crdb/physical-disk-state-and-policy/up01.sql new file mode 100644 index 0000000000..5589f3a6ee --- /dev/null +++ b/schema/crdb/physical-disk-state-and-policy/up01.sql @@ -0,0 +1,4 @@ +CREATE TYPE IF NOT EXISTS omicron.public.physical_disk_policy AS ENUM ( + 'in_service', + 'expunged' +); diff --git a/schema/crdb/physical-disk-state-and-policy/up02.sql b/schema/crdb/physical-disk-state-and-policy/up02.sql new file mode 100644 index 0000000000..fbe5ba6e51 --- /dev/null +++ b/schema/crdb/physical-disk-state-and-policy/up02.sql @@ -0,0 +1,4 @@ +CREATE TYPE IF NOT EXISTS omicron.public.physical_disk_state AS ENUM ( + 'active', + 'decommissioned' +); diff --git a/schema/crdb/physical-disk-state-and-policy/up03.sql b/schema/crdb/physical-disk-state-and-policy/up03.sql new file mode 100644 index 0000000000..d3dcd714bd --- /dev/null +++ b/schema/crdb/physical-disk-state-and-policy/up03.sql @@ -0,0 +1,5 @@ +ALTER TABLE omicron.public.physical_disk + ADD COLUMN IF NOT EXISTS disk_policy omicron.public.physical_disk_policy + NOT NULL DEFAULT 'in_service', + ADD COLUMN IF NOT EXISTS disk_state omicron.public.physical_disk_state + NOT NULL DEFAULT 'active'; diff --git a/schema/crdb/physical-disk-state-and-policy/up04.sql b/schema/crdb/physical-disk-state-and-policy/up04.sql new file mode 100644 index 0000000000..a455c59dc3 --- /dev/null +++ b/schema/crdb/physical-disk-state-and-policy/up04.sql @@ -0,0 +1,3 @@ +ALTER TABLE omicron.public.physical_disk + ALTER COLUMN disk_policy DROP DEFAULT, + ALTER COLUMN disk_state DROP DEFAULT; diff --git a/schema/crdb/region-replacement/up01.sql b/schema/crdb/region-replacement/up01.sql new file mode 100644 index 0000000000..e13ec3c983 --- /dev/null +++ b/schema/crdb/region-replacement/up01.sql @@ -0,0 +1,9 @@ +CREATE TYPE IF NOT EXISTS omicron.public.region_replacement_state AS ENUM ( + 'requested', + 'allocating', + 'running', + 'driving', + 'replacement_done', + 'completing', + 'complete' +); diff --git a/schema/crdb/region-replacement/up02.sql b/schema/crdb/region-replacement/up02.sql new file mode 100644 index 0000000000..46e5de96ba --- /dev/null +++ b/schema/crdb/region-replacement/up02.sql @@ -0,0 +1,18 @@ +CREATE TABLE IF NOT EXISTS omicron.public.region_replacement ( + /* unique ID for this region replacement */ + id UUID PRIMARY KEY, + + request_time TIMESTAMPTZ NOT NULL, + + old_region_id UUID NOT NULL, + + volume_id UUID NOT NULL, + + old_region_volume_id UUID, + + new_region_id UUID, + + replacement_state omicron.public.region_replacement_state NOT NULL, + + operating_saga_id UUID +); diff --git a/schema/crdb/region-replacement/up03.sql b/schema/crdb/region-replacement/up03.sql new file mode 100644 index 0000000000..51a9db9379 --- /dev/null +++ b/schema/crdb/region-replacement/up03.sql @@ -0,0 +1 @@ +CREATE INDEX IF NOT EXISTS lookup_region_replacement_by_state on omicron.public.region_replacement (replacement_state); diff --git a/schema/crdb/region-replacement/up04.sql b/schema/crdb/region-replacement/up04.sql new file mode 100644 index 0000000000..7a95f48983 --- /dev/null +++ b/schema/crdb/region-replacement/up04.sql @@ -0,0 +1,4 @@ +CREATE TABLE IF NOT EXISTS omicron.public.volume_repair ( + volume_id UUID PRIMARY KEY, + repair_id UUID NOT NULL +); diff --git a/schema/crdb/region-replacement/up05.sql b/schema/crdb/region-replacement/up05.sql new file mode 100644 index 0000000000..b436dd865d --- /dev/null +++ b/schema/crdb/region-replacement/up05.sql @@ -0,0 +1,3 @@ +CREATE INDEX IF NOT EXISTS lookup_volume_repair_by_repair_id on omicron.public.volume_repair ( + repair_id +); diff --git a/schema/crdb/region-replacement/up06.sql b/schema/crdb/region-replacement/up06.sql new file mode 100644 index 0000000000..b02377cc59 --- /dev/null +++ b/schema/crdb/region-replacement/up06.sql @@ -0,0 +1,4 @@ +CREATE TYPE IF NOT EXISTS omicron.public.region_replacement_step_type AS ENUM ( + 'propolis', + 'pantry' +); diff --git a/schema/crdb/region-replacement/up07.sql b/schema/crdb/region-replacement/up07.sql new file mode 100644 index 0000000000..675b637bf3 --- /dev/null +++ b/schema/crdb/region-replacement/up07.sql @@ -0,0 +1,16 @@ +CREATE TABLE IF NOT EXISTS omicron.public.region_replacement_step ( + replacement_id UUID NOT NULL, + + step_time TIMESTAMPTZ NOT NULL, + + step_type omicron.public.region_replacement_step_type NOT NULL, + + step_associated_instance_id UUID, + step_associated_vmm_id UUID, + + step_associated_pantry_ip INET, + step_associated_pantry_port INT4 CHECK (step_associated_pantry_port BETWEEN 0 AND 65535), + step_associated_pantry_job_id UUID, + + PRIMARY KEY (replacement_id, step_time, step_type) +); diff --git a/schema/crdb/region-replacement/up08.sql b/schema/crdb/region-replacement/up08.sql new file mode 100644 index 0000000000..a5ecac8216 --- /dev/null +++ b/schema/crdb/region-replacement/up08.sql @@ -0,0 +1 @@ +CREATE INDEX IF NOT EXISTS step_time_order on omicron.public.region_replacement_step (step_time); diff --git a/schema/crdb/region-replacement/up09.sql b/schema/crdb/region-replacement/up09.sql new file mode 100644 index 0000000000..f5cc7bb682 --- /dev/null +++ b/schema/crdb/region-replacement/up09.sql @@ -0,0 +1 @@ +CREATE INDEX IF NOT EXISTS search_for_repair_notifications ON omicron.public.upstairs_repair_notification (region_id, notification_type); diff --git a/schema/crdb/region-replacement/up10.sql b/schema/crdb/region-replacement/up10.sql new file mode 100644 index 0000000000..eccfad8a25 --- /dev/null +++ b/schema/crdb/region-replacement/up10.sql @@ -0,0 +1,3 @@ +CREATE INDEX IF NOT EXISTS lookup_any_disk_by_volume_id ON omicron.public.disk ( + volume_id +); diff --git a/schema/crdb/region-replacement/up11.sql b/schema/crdb/region-replacement/up11.sql new file mode 100644 index 0000000000..5984bba752 --- /dev/null +++ b/schema/crdb/region-replacement/up11.sql @@ -0,0 +1 @@ +CREATE INDEX IF NOT EXISTS lookup_snapshot_by_destination_volume_id ON omicron.public.snapshot ( destination_volume_id ); diff --git a/schema/crdb/remove-producer-base-route-column/up.sql b/schema/crdb/remove-producer-base-route-column/up.sql new file mode 100644 index 0000000000..90798f031a --- /dev/null +++ b/schema/crdb/remove-producer-base-route-column/up.sql @@ -0,0 +1 @@ +ALTER TABLE omicron.public.metric_producer DROP COLUMN IF EXISTS base_route; diff --git a/schema/deployment-config.json b/schema/deployment-config.json index be6018a6ac..7b737c52b2 100644 --- a/schema/deployment-config.json +++ b/schema/deployment-config.json @@ -132,10 +132,15 @@ "fd12:3456::/64" ], "type": "string", - "pattern": "^([fF][dD])[0-9a-fA-F]{2}:(([0-9a-fA-F]{1,4}:){6}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,6}:)([0-9a-fA-F]{1,4})?\\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])$" + "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))\\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])$", + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::Ipv6Net", + "version": "0.1.0" + } }, "Ipv6Subnet": { - "description": "Wraps an [`Ipv6Network`] with a compile-time prefix length.", + "description": "Wraps an [`Ipv6Net`] with a compile-time prefix length.", "type": "object", "required": [ "net" diff --git a/schema/omicron-physical-disks.json b/schema/omicron-physical-disks.json new file mode 100644 index 0000000000..60c32d98ff --- /dev/null +++ b/schema/omicron-physical-disks.json @@ -0,0 +1,77 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "OmicronPhysicalDisksConfig", + "type": "object", + "required": [ + "disks", + "generation" + ], + "properties": { + "disks": { + "type": "array", + "items": { + "$ref": "#/definitions/OmicronPhysicalDiskConfig" + } + }, + "generation": { + "description": "generation number of this configuration\n\nThis generation number is owned by the control plane (i.e., RSS or Nexus, depending on whether RSS-to-Nexus handoff has happened). It should not be bumped within Sled Agent.\n\nSled Agent rejects attempts to set the configuration to a generation older than the one it's currently running.", + "allOf": [ + { + "$ref": "#/definitions/Generation" + } + ] + } + }, + "definitions": { + "DiskIdentity": { + "description": "Uniquely identifies a disk.", + "type": "object", + "required": [ + "model", + "serial", + "vendor" + ], + "properties": { + "model": { + "type": "string" + }, + "serial": { + "type": "string" + }, + "vendor": { + "type": "string" + } + } + }, + "Generation": { + "description": "Generation numbers stored in the database, used for optimistic concurrency control", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "OmicronPhysicalDiskConfig": { + "type": "object", + "required": [ + "id", + "identity", + "pool_id" + ], + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "identity": { + "$ref": "#/definitions/DiskIdentity" + }, + "pool_id": { + "$ref": "#/definitions/TypedUuidForZpoolKind" + } + } + }, + "TypedUuidForZpoolKind": { + "type": "string", + "format": "uuid" + } + } +} \ No newline at end of file diff --git a/schema/rss-service-plan-v3.json b/schema/rss-service-plan-v3.json new file mode 100644 index 0000000000..d1540ca351 --- /dev/null +++ b/schema/rss-service-plan-v3.json @@ -0,0 +1,866 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Plan", + "type": "object", + "required": [ + "dns_config", + "services" + ], + "properties": { + "dns_config": { + "$ref": "#/definitions/DnsConfigParams" + }, + "services": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/SledConfig" + } + } + }, + "definitions": { + "DiskIdentity": { + "description": "Uniquely identifies a disk.", + "type": "object", + "required": [ + "model", + "serial", + "vendor" + ], + "properties": { + "model": { + "type": "string" + }, + "serial": { + "type": "string" + }, + "vendor": { + "type": "string" + } + } + }, + "DnsConfigParams": { + "description": "DnsConfigParams\n\n
JSON schema\n\n```json { \"type\": \"object\", \"required\": [ \"generation\", \"time_created\", \"zones\" ], \"properties\": { \"generation\": { \"type\": \"integer\", \"format\": \"uint64\", \"minimum\": 0.0 }, \"time_created\": { \"type\": \"string\", \"format\": \"date-time\" }, \"zones\": { \"type\": \"array\", \"items\": { \"$ref\": \"#/components/schemas/DnsConfigZone\" } } } } ```
", + "type": "object", + "required": [ + "generation", + "time_created", + "zones" + ], + "properties": { + "generation": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "time_created": { + "type": "string", + "format": "date-time" + }, + "zones": { + "type": "array", + "items": { + "$ref": "#/definitions/DnsConfigZone" + } + } + } + }, + "DnsConfigZone": { + "description": "DnsConfigZone\n\n
JSON schema\n\n```json { \"type\": \"object\", \"required\": [ \"records\", \"zone_name\" ], \"properties\": { \"records\": { \"type\": \"object\", \"additionalProperties\": { \"type\": \"array\", \"items\": { \"$ref\": \"#/components/schemas/DnsRecord\" } } }, \"zone_name\": { \"type\": \"string\" } } } ```
", + "type": "object", + "required": [ + "records", + "zone_name" + ], + "properties": { + "records": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "$ref": "#/definitions/DnsRecord" + } + } + }, + "zone_name": { + "type": "string" + } + } + }, + "DnsRecord": { + "description": "DnsRecord\n\n
JSON schema\n\n```json { \"oneOf\": [ { \"type\": \"object\", \"required\": [ \"data\", \"type\" ], \"properties\": { \"data\": { \"type\": \"string\", \"format\": \"ipv4\" }, \"type\": { \"type\": \"string\", \"enum\": [ \"A\" ] } } }, { \"type\": \"object\", \"required\": [ \"data\", \"type\" ], \"properties\": { \"data\": { \"type\": \"string\", \"format\": \"ipv6\" }, \"type\": { \"type\": \"string\", \"enum\": [ \"AAAA\" ] } } }, { \"type\": \"object\", \"required\": [ \"data\", \"type\" ], \"properties\": { \"data\": { \"$ref\": \"#/components/schemas/Srv\" }, \"type\": { \"type\": \"string\", \"enum\": [ \"SRV\" ] } } } ] } ```
", + "oneOf": [ + { + "type": "object", + "required": [ + "data", + "type" + ], + "properties": { + "data": { + "type": "string", + "format": "ipv4" + }, + "type": { + "type": "string", + "enum": [ + "A" + ] + } + } + }, + { + "type": "object", + "required": [ + "data", + "type" + ], + "properties": { + "data": { + "type": "string", + "format": "ipv6" + }, + "type": { + "type": "string", + "enum": [ + "AAAA" + ] + } + } + }, + { + "type": "object", + "required": [ + "data", + "type" + ], + "properties": { + "data": { + "$ref": "#/definitions/Srv" + }, + "type": { + "type": "string", + "enum": [ + "SRV" + ] + } + } + } + ] + }, + "Generation": { + "description": "Generation numbers stored in the database, used for optimistic concurrency control", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "IpNet": { + "oneOf": [ + { + "title": "v4", + "allOf": [ + { + "$ref": "#/definitions/Ipv4Net" + } + ] + }, + { + "title": "v6", + "allOf": [ + { + "$ref": "#/definitions/Ipv6Net" + } + ] + } + ], + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::IpNet", + "version": "0.1.0" + } + }, + "Ipv4Net": { + "title": "An IPv4 subnet", + "description": "An IPv4 subnet, including prefix and prefix length", + "examples": [ + "192.168.1.0/24" + ], + "type": "string", + "pattern": "^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|1[0-9]|2[0-9]|3[0-2])$", + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::Ipv4Net", + "version": "0.1.0" + } + }, + "Ipv6Net": { + "title": "An IPv6 subnet", + "description": "An IPv6 subnet, including prefix and subnet mask", + "examples": [ + "fd12:3456::/64" + ], + "type": "string", + "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))\\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])$", + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::Ipv6Net", + "version": "0.1.0" + } + }, + "MacAddr": { + "title": "A MAC address", + "description": "A Media Access Control address, in EUI-48 format", + "examples": [ + "ff:ff:ff:ff:ff:ff" + ], + "type": "string", + "maxLength": 17, + "minLength": 5, + "pattern": "^([0-9a-fA-F]{0,2}:){5}[0-9a-fA-F]{0,2}$" + }, + "Name": { + "title": "A name unique within the parent collection", + "description": "Names must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Names cannot be a UUID though they may contain a UUID.", + "type": "string", + "maxLength": 63, + "minLength": 1, + "pattern": "^(?![0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$)^[a-z]([a-zA-Z0-9-]*[a-zA-Z0-9]+)?$" + }, + "NetworkInterface": { + "description": "Information required to construct a virtual network interface", + "type": "object", + "required": [ + "id", + "ip", + "kind", + "mac", + "name", + "primary", + "slot", + "subnet", + "vni" + ], + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "ip": { + "type": "string", + "format": "ip" + }, + "kind": { + "$ref": "#/definitions/NetworkInterfaceKind" + }, + "mac": { + "$ref": "#/definitions/MacAddr" + }, + "name": { + "$ref": "#/definitions/Name" + }, + "primary": { + "type": "boolean" + }, + "slot": { + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, + "subnet": { + "$ref": "#/definitions/IpNet" + }, + "vni": { + "$ref": "#/definitions/Vni" + } + } + }, + "NetworkInterfaceKind": { + "description": "The type of network interface", + "oneOf": [ + { + "description": "A vNIC attached to a guest instance", + "type": "object", + "required": [ + "id", + "type" + ], + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "type": { + "type": "string", + "enum": [ + "instance" + ] + } + } + }, + { + "description": "A vNIC associated with an internal service", + "type": "object", + "required": [ + "id", + "type" + ], + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "type": { + "type": "string", + "enum": [ + "service" + ] + } + } + }, + { + "description": "A vNIC associated with a probe", + "type": "object", + "required": [ + "id", + "type" + ], + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "type": { + "type": "string", + "enum": [ + "probe" + ] + } + } + } + ] + }, + "OmicronPhysicalDiskConfig": { + "type": "object", + "required": [ + "id", + "identity", + "pool_id" + ], + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "identity": { + "$ref": "#/definitions/DiskIdentity" + }, + "pool_id": { + "$ref": "#/definitions/TypedUuidForZpoolKind" + } + } + }, + "OmicronPhysicalDisksConfig": { + "type": "object", + "required": [ + "disks", + "generation" + ], + "properties": { + "disks": { + "type": "array", + "items": { + "$ref": "#/definitions/OmicronPhysicalDiskConfig" + } + }, + "generation": { + "description": "generation number of this configuration\n\nThis generation number is owned by the control plane (i.e., RSS or Nexus, depending on whether RSS-to-Nexus handoff has happened). It should not be bumped within Sled Agent.\n\nSled Agent rejects attempts to set the configuration to a generation older than the one it's currently running.", + "allOf": [ + { + "$ref": "#/definitions/Generation" + } + ] + } + } + }, + "OmicronZoneConfig": { + "description": "Describes one Omicron-managed zone running on a sled", + "type": "object", + "required": [ + "id", + "underlay_address", + "zone_type" + ], + "properties": { + "id": { + "type": "string", + "format": "uuid" + }, + "underlay_address": { + "type": "string", + "format": "ipv6" + }, + "zone_type": { + "$ref": "#/definitions/OmicronZoneType" + } + } + }, + "OmicronZoneDataset": { + "description": "Describes a persistent ZFS dataset associated with an Omicron zone", + "type": "object", + "required": [ + "pool_name" + ], + "properties": { + "pool_name": { + "$ref": "#/definitions/ZpoolName" + } + } + }, + "OmicronZoneType": { + "description": "Describes what kind of zone this is (i.e., what component is running in it) as well as any type-specific configuration", + "oneOf": [ + { + "type": "object", + "required": [ + "address", + "dns_servers", + "nic", + "ntp_servers", + "snat_cfg", + "type" + ], + "properties": { + "address": { + "type": "string" + }, + "dns_servers": { + "type": "array", + "items": { + "type": "string", + "format": "ip" + } + }, + "domain": { + "type": [ + "string", + "null" + ] + }, + "nic": { + "description": "The service vNIC providing outbound connectivity using OPTE.", + "allOf": [ + { + "$ref": "#/definitions/NetworkInterface" + } + ] + }, + "ntp_servers": { + "type": "array", + "items": { + "type": "string" + } + }, + "snat_cfg": { + "description": "The SNAT configuration for outbound connections.", + "allOf": [ + { + "$ref": "#/definitions/SourceNatConfig" + } + ] + }, + "type": { + "type": "string", + "enum": [ + "boundary_ntp" + ] + } + } + }, + { + "type": "object", + "required": [ + "address", + "dataset", + "type" + ], + "properties": { + "address": { + "type": "string" + }, + "dataset": { + "$ref": "#/definitions/OmicronZoneDataset" + }, + "type": { + "type": "string", + "enum": [ + "clickhouse" + ] + } + } + }, + { + "type": "object", + "required": [ + "address", + "dataset", + "type" + ], + "properties": { + "address": { + "type": "string" + }, + "dataset": { + "$ref": "#/definitions/OmicronZoneDataset" + }, + "type": { + "type": "string", + "enum": [ + "clickhouse_keeper" + ] + } + } + }, + { + "type": "object", + "required": [ + "address", + "dataset", + "type" + ], + "properties": { + "address": { + "type": "string" + }, + "dataset": { + "$ref": "#/definitions/OmicronZoneDataset" + }, + "type": { + "type": "string", + "enum": [ + "cockroach_db" + ] + } + } + }, + { + "type": "object", + "required": [ + "address", + "dataset", + "type" + ], + "properties": { + "address": { + "type": "string" + }, + "dataset": { + "$ref": "#/definitions/OmicronZoneDataset" + }, + "type": { + "type": "string", + "enum": [ + "crucible" + ] + } + } + }, + { + "type": "object", + "required": [ + "address", + "type" + ], + "properties": { + "address": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "crucible_pantry" + ] + } + } + }, + { + "type": "object", + "required": [ + "dataset", + "dns_address", + "http_address", + "nic", + "type" + ], + "properties": { + "dataset": { + "$ref": "#/definitions/OmicronZoneDataset" + }, + "dns_address": { + "description": "The address at which the external DNS server is reachable.", + "type": "string" + }, + "http_address": { + "description": "The address at which the external DNS server API is reachable.", + "type": "string" + }, + "nic": { + "description": "The service vNIC providing external connectivity using OPTE.", + "allOf": [ + { + "$ref": "#/definitions/NetworkInterface" + } + ] + }, + "type": { + "type": "string", + "enum": [ + "external_dns" + ] + } + } + }, + { + "type": "object", + "required": [ + "dataset", + "dns_address", + "gz_address", + "gz_address_index", + "http_address", + "type" + ], + "properties": { + "dataset": { + "$ref": "#/definitions/OmicronZoneDataset" + }, + "dns_address": { + "type": "string" + }, + "gz_address": { + "description": "The addresses in the global zone which should be created\n\nFor the DNS service, which exists outside the sleds's typical subnet - adding an address in the GZ is necessary to allow inter-zone traffic routing.", + "type": "string", + "format": "ipv6" + }, + "gz_address_index": { + "description": "The address is also identified with an auxiliary bit of information to ensure that the created global zone address can have a unique name.", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "http_address": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "internal_dns" + ] + } + } + }, + { + "type": "object", + "required": [ + "address", + "dns_servers", + "ntp_servers", + "type" + ], + "properties": { + "address": { + "type": "string" + }, + "dns_servers": { + "type": "array", + "items": { + "type": "string", + "format": "ip" + } + }, + "domain": { + "type": [ + "string", + "null" + ] + }, + "ntp_servers": { + "type": "array", + "items": { + "type": "string" + } + }, + "type": { + "type": "string", + "enum": [ + "internal_ntp" + ] + } + } + }, + { + "type": "object", + "required": [ + "external_dns_servers", + "external_ip", + "external_tls", + "internal_address", + "nic", + "type" + ], + "properties": { + "external_dns_servers": { + "description": "External DNS servers Nexus can use to resolve external hosts.", + "type": "array", + "items": { + "type": "string", + "format": "ip" + } + }, + "external_ip": { + "description": "The address at which the external nexus server is reachable.", + "type": "string", + "format": "ip" + }, + "external_tls": { + "description": "Whether Nexus's external endpoint should use TLS", + "type": "boolean" + }, + "internal_address": { + "description": "The address at which the internal nexus server is reachable.", + "type": "string" + }, + "nic": { + "description": "The service vNIC providing external connectivity using OPTE.", + "allOf": [ + { + "$ref": "#/definitions/NetworkInterface" + } + ] + }, + "type": { + "type": "string", + "enum": [ + "nexus" + ] + } + } + }, + { + "type": "object", + "required": [ + "address", + "type" + ], + "properties": { + "address": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "oximeter" + ] + } + } + } + ] + }, + "SledConfig": { + "type": "object", + "required": [ + "disks", + "zones" + ], + "properties": { + "disks": { + "description": "Control plane disks configured for this sled", + "allOf": [ + { + "$ref": "#/definitions/OmicronPhysicalDisksConfig" + } + ] + }, + "zones": { + "description": "zones configured for this sled", + "type": "array", + "items": { + "$ref": "#/definitions/OmicronZoneConfig" + } + } + } + }, + "SourceNatConfig": { + "description": "An IP address and port range used for source NAT, i.e., making outbound network connections from guests or services.", + "type": "object", + "required": [ + "first_port", + "ip", + "last_port" + ], + "properties": { + "first_port": { + "description": "The first port used for source NAT, inclusive.", + "type": "integer", + "format": "uint16", + "minimum": 0.0 + }, + "ip": { + "description": "The external address provided to the instance or service.", + "type": "string", + "format": "ip" + }, + "last_port": { + "description": "The last port used for source NAT, also inclusive.", + "type": "integer", + "format": "uint16", + "minimum": 0.0 + } + } + }, + "Srv": { + "description": "Srv\n\n
JSON schema\n\n```json { \"type\": \"object\", \"required\": [ \"port\", \"prio\", \"target\", \"weight\" ], \"properties\": { \"port\": { \"type\": \"integer\", \"format\": \"uint16\", \"minimum\": 0.0 }, \"prio\": { \"type\": \"integer\", \"format\": \"uint16\", \"minimum\": 0.0 }, \"target\": { \"type\": \"string\" }, \"weight\": { \"type\": \"integer\", \"format\": \"uint16\", \"minimum\": 0.0 } } } ```
", + "type": "object", + "required": [ + "port", + "prio", + "target", + "weight" + ], + "properties": { + "port": { + "type": "integer", + "format": "uint16", + "minimum": 0.0 + }, + "prio": { + "type": "integer", + "format": "uint16", + "minimum": 0.0 + }, + "target": { + "type": "string" + }, + "weight": { + "type": "integer", + "format": "uint16", + "minimum": 0.0 + } + } + }, + "TypedUuidForZpoolKind": { + "type": "string", + "format": "uuid" + }, + "Vni": { + "description": "A Geneve Virtual Network Identifier", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "ZpoolName": { + "title": "The name of a Zpool", + "description": "Zpool names are of the format ox{i,p}_. They are either Internal or External, and should be unique", + "type": "string", + "pattern": "^ox[ip]_[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + } + } +} \ No newline at end of file diff --git a/schema/rss-sled-plan.json b/schema/rss-sled-plan.json index 14ddc848be..5971235634 100644 --- a/schema/rss-sled-plan.json +++ b/schema/rss-sled-plan.json @@ -23,6 +23,48 @@ } }, "definitions": { + "AllowedSourceIps": { + "description": "Description of source IPs allowed to reach rack services.", + "oneOf": [ + { + "description": "Allow traffic from any external IP address.", + "type": "object", + "required": [ + "allow" + ], + "properties": { + "allow": { + "type": "string", + "enum": [ + "any" + ] + } + } + }, + { + "description": "Restrict access to a specific set of source IP addresses or subnets.\n\nAll others are prevented from reaching rack services.", + "type": "object", + "required": [ + "allow", + "ips" + ], + "properties": { + "allow": { + "type": "string", + "enum": [ + "list" + ] + }, + "ips": { + "type": "array", + "items": { + "$ref": "#/definitions/IpNet" + } + } + } + } + ] + }, "Baseboard": { "description": "Describes properties that should uniquely identify a Gimlet.", "oneOf": [ @@ -151,12 +193,26 @@ "format": "uint32", "minimum": 0.0 }, + "checker": { + "description": "Checker to apply to incoming messages.", + "type": [ + "string", + "null" + ] + }, "originate": { "description": "The set of prefixes for the BGP router to originate.", "type": "array", "items": { - "$ref": "#/definitions/Ipv4Network" + "$ref": "#/definitions/Ipv4Net" } + }, + "shaper": { + "description": "Shaper to apply to outgoing messages.", + "type": [ + "string", + "null" + ] } } }, @@ -173,12 +229,44 @@ "type": "string", "format": "ipv4" }, + "allowed_export": { + "description": "Define export policy for a peer.", + "default": { + "type": "no_filtering" + }, + "allOf": [ + { + "$ref": "#/definitions/ImportExportPolicy" + } + ] + }, + "allowed_import": { + "description": "Define import policy for a peer.", + "default": { + "type": "no_filtering" + }, + "allOf": [ + { + "$ref": "#/definitions/ImportExportPolicy" + } + ] + }, "asn": { - "description": "The autonomous sysetm number of the router the peer belongs to.", + "description": "The autonomous system number of the router the peer belongs to.", "type": "integer", "format": "uint32", "minimum": 0.0 }, + "communities": { + "description": "Include the provided communities in updates sent to the peer.", + "default": [], + "type": "array", + "items": { + "type": "integer", + "format": "uint32", + "minimum": 0.0 + } + }, "connect_retry": { "description": "The interval in seconds between peer connection retry attempts.", "type": [ @@ -197,6 +285,11 @@ "format": "uint64", "minimum": 0.0 }, + "enforce_first_as": { + "description": "Enforce that the first AS in paths received from this peer is the peer's AS.", + "default": false, + "type": "boolean" + }, "hold_time": { "description": "How long to keep a session alive without a keepalive in seconds. Defaults to 6.", "type": [ @@ -224,9 +317,61 @@ "format": "uint64", "minimum": 0.0 }, + "local_pref": { + "description": "Apply a local preference to routes received from this peer.", + "type": [ + "integer", + "null" + ], + "format": "uint32", + "minimum": 0.0 + }, + "md5_auth_key": { + "description": "Use the given key for TCP-MD5 authentication with the peer.", + "type": [ + "string", + "null" + ] + }, + "min_ttl": { + "description": "Require messages from a peer have a minimum IP time to live field.", + "type": [ + "integer", + "null" + ], + "format": "uint8", + "minimum": 0.0 + }, + "multi_exit_discriminator": { + "description": "Apply the provided multi-exit discriminator (MED) updates sent to the peer.", + "type": [ + "integer", + "null" + ], + "format": "uint32", + "minimum": 0.0 + }, "port": { "description": "Switch port the peer is reachable on.", "type": "string" + }, + "remote_asn": { + "description": "Require that a peer has a specified ASN.", + "type": [ + "integer", + "null" + ], + "format": "uint32", + "minimum": 0.0 + }, + "vlan_id": { + "description": "Associate a VLAN ID with a BGP peer session.", + "type": [ + "integer", + "null" + ], + "format": "uint16", + "minimum": 0.0 } } }, @@ -289,13 +434,54 @@ } } }, - "IpNetwork": { + "ImportExportPolicy": { + "description": "Define policy relating to the import and export of prefixes from a BGP peer.", + "oneOf": [ + { + "description": "Do not perform any filtering.", + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "no_filtering" + ] + } + } + }, + { + "type": "object", + "required": [ + "type", + "value" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "allow" + ] + }, + "value": { + "type": "array", + "items": { + "$ref": "#/definitions/IpNet" + } + } + } + } + ] + }, + "IpNet": { "oneOf": [ { "title": "v4", "allOf": [ { - "$ref": "#/definitions/Ipv4Network" + "$ref": "#/definitions/Ipv4Net" } ] }, @@ -303,12 +489,16 @@ "title": "v6", "allOf": [ { - "$ref": "#/definitions/Ipv6Network" + "$ref": "#/definitions/Ipv6Net" } ] } ], - "x-rust-type": "ipnetwork::IpNetwork" + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::IpNet", + "version": "0.1.0" + } }, "IpRange": { "oneOf": [ @@ -330,10 +520,19 @@ } ] }, - "Ipv4Network": { + "Ipv4Net": { + "title": "An IPv4 subnet", + "description": "An IPv4 subnet, including prefix and prefix length", + "examples": [ + "192.168.1.0/24" + ], "type": "string", - "pattern": "^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\/(3[0-2]|[0-2]?[0-9])$", - "x-rust-type": "ipnetwork::Ipv4Network" + "pattern": "^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|1[0-9]|2[0-9]|3[0-2])$", + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::Ipv4Net", + "version": "0.1.0" + } }, "Ipv4Range": { "description": "A non-decreasing IPv4 address range, inclusive of both ends.\n\nThe first address must be less than or equal to the last address.", @@ -360,12 +559,12 @@ "fd12:3456::/64" ], "type": "string", - "pattern": "^([fF][dD])[0-9a-fA-F]{2}:(([0-9a-fA-F]{1,4}:){6}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,6}:)([0-9a-fA-F]{1,4})?\\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])$" - }, - "Ipv6Network": { - "type": "string", - "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\")[/](12[0-8]|1[0-1][0-9]|[0-9]?[0-9])$", - "x-rust-type": "ipnetwork::Ipv6Network" + "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))\\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])$", + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::Ipv6Net", + "version": "0.1.0" + } }, "Ipv6Range": { "description": "A non-decreasing IPv6 address range, inclusive of both ends.\n\nThe first address must be less than or equal to the last address.", @@ -386,7 +585,7 @@ } }, "Ipv6Subnet": { - "description": "Wraps an [`Ipv6Network`] with a compile-time prefix length.", + "description": "Wraps an [`Ipv6Net`] with a compile-time prefix length.", "type": "object", "required": [ "net" @@ -426,7 +625,7 @@ "description": "This port's addresses.", "type": "array", "items": { - "$ref": "#/definitions/IpNetwork" + "$ref": "#/definitions/IpNet" } }, "autoneg": { @@ -517,6 +716,17 @@ "recovery_silo" ], "properties": { + "allowed_source_ips": { + "description": "IPs or subnets allowed to make requests to user-facing services", + "default": { + "allow": "any" + }, + "allOf": [ + { + "$ref": "#/definitions/AllowedSourceIps" + } + ] + }, "bootstrap_discovery": { "description": "Describes how bootstrap addresses should be collected during RSS.", "allOf": [ @@ -638,7 +848,7 @@ } }, "rack_subnet": { - "$ref": "#/definitions/Ipv6Network" + "$ref": "#/definitions/Ipv6Net" } } }, @@ -673,7 +883,7 @@ "description": "The destination of the route.", "allOf": [ { - "$ref": "#/definitions/IpNetwork" + "$ref": "#/definitions/IpNet" } ] }, @@ -681,6 +891,15 @@ "description": "The nexthop/gateway address.", "type": "string", "format": "ip" + }, + "vlan_id": { + "description": "The VLAN id associated with this route.", + "type": [ + "integer", + "null" + ], + "format": "uint16", + "minimum": 0.0 } } }, diff --git a/schema/start-sled-agent-request.json b/schema/start-sled-agent-request.json index b03058d106..98dfcea61c 100644 --- a/schema/start-sled-agent-request.json +++ b/schema/start-sled-agent-request.json @@ -32,10 +32,15 @@ "fd12:3456::/64" ], "type": "string", - "pattern": "^([fF][dD])[0-9a-fA-F]{2}:(([0-9a-fA-F]{1,4}:){6}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,6}:)([0-9a-fA-F]{1,4})?\\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])$" + "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))\\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])$", + "x-rust-type": { + "crate": "oxnet", + "path": "oxnet::Ipv6Net", + "version": "0.1.0" + } }, "Ipv6Subnet": { - "description": "Wraps an [`Ipv6Network`] with a compile-time prefix length.", + "description": "Wraps an [`Ipv6Net`] with a compile-time prefix length.", "type": "object", "required": [ "net" diff --git a/sled-agent/Cargo.toml b/sled-agent/Cargo.toml index c941ee2625..167ac987ca 100644 --- a/sled-agent/Cargo.toml +++ b/sled-agent/Cargo.toml @@ -5,6 +5,9 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] anyhow.workspace = true async-trait.workspace = true @@ -48,10 +51,12 @@ nexus-config.workspace = true nexus-types.workspace = true omicron-common.workspace = true omicron-ddm-admin-client.workspace = true +omicron-uuid-kinds.workspace = true once_cell.workspace = true oximeter.workspace = true oximeter-instruments.workspace = true oximeter-producer.workspace = true +oxnet.workspace = true propolis-client.workspace = true propolis-mock-server.workspace = true # Only used by the simulated sled agent rand = { workspace = true, features = ["getrandom"] } @@ -106,7 +111,7 @@ tempfile.workspace = true tokio-stream.workspace = true tokio-util.workspace = true -illumos-utils = { workspace = true, features = ["testing", "tmp_keypath"] } +illumos-utils = { workspace = true, features = ["testing"] } sled-storage = { workspace = true, features = ["testing"] } # diff --git a/sled-agent/src/bin/zone-bundle.rs b/sled-agent/src/bin/zone-bundle.rs index d49e22d80a..82433edaf5 100644 --- a/sled-agent/src/bin/zone-bundle.rs +++ b/sled-agent/src/bin/zone-bundle.rs @@ -449,11 +449,7 @@ async fn main() -> anyhow::Result<()> { .await .context("failed to get zone bundle")? .into_inner(); - let mut f = tokio::fs::OpenOptions::new() - .read(true) - .write(true) - .create(true) - .open(&output) + let mut f = tokio::fs::File::create(&output) .await .context("failed to open output file")?; let mut stream = bundle.into_inner(); @@ -654,11 +650,7 @@ async fn main() -> anyhow::Result<()> { } // Open megabundle output file. - let f = tokio::fs::OpenOptions::new() - .read(true) - .write(true) - .create(true) - .open(&output) + let f = tokio::fs::File::create(&output) .await .context("failed to open output file")? .into_std() diff --git a/sled-agent/src/bootstrap/bootstore_setup.rs b/sled-agent/src/bootstrap/bootstore_setup.rs index e5079b978e..ee9a321474 100644 --- a/sled-agent/src/bootstrap/bootstore_setup.rs +++ b/sled-agent/src/bootstrap/bootstore_setup.rs @@ -15,7 +15,7 @@ use omicron_ddm_admin_client::Client as DdmAdminClient; use sled_hardware_types::underlay::BootstrapInterface; use sled_hardware_types::Baseboard; use sled_storage::dataset::CLUSTER_DATASET; -use sled_storage::resources::StorageResources; +use sled_storage::resources::AllDisks; use slog::Logger; use std::collections::BTreeSet; use std::net::Ipv6Addr; @@ -26,7 +26,7 @@ const BOOTSTORE_FSM_STATE_FILE: &str = "bootstore-fsm-state.json"; const BOOTSTORE_NETWORK_CONFIG_FILE: &str = "bootstore-network-config.json"; pub fn new_bootstore_config( - storage_resources: &StorageResources, + all_disks: &AllDisks, baseboard: Baseboard, global_zone_bootstrap_ip: Ipv6Addr, ) -> Result { @@ -37,17 +37,17 @@ pub fn new_bootstore_config( learn_timeout: Duration::from_secs(5), rack_init_timeout: Duration::from_secs(300), rack_secret_request_timeout: Duration::from_secs(5), - fsm_state_ledger_paths: bootstore_fsm_state_paths(&storage_resources)?, + fsm_state_ledger_paths: bootstore_fsm_state_paths(&all_disks)?, network_config_ledger_paths: bootstore_network_config_paths( - &storage_resources, + &all_disks, )?, }) } fn bootstore_fsm_state_paths( - storage: &StorageResources, + all_disks: &AllDisks, ) -> Result, StartError> { - let paths: Vec<_> = storage + let paths: Vec<_> = all_disks .all_m2_mountpoints(CLUSTER_DATASET) .into_iter() .map(|p| p.join(BOOTSTORE_FSM_STATE_FILE)) @@ -60,9 +60,9 @@ fn bootstore_fsm_state_paths( } fn bootstore_network_config_paths( - storage: &StorageResources, + all_disks: &AllDisks, ) -> Result, StartError> { - let paths: Vec<_> = storage + let paths: Vec<_> = all_disks .all_m2_mountpoints(CLUSTER_DATASET) .into_iter() .map(|p| p.join(BOOTSTORE_NETWORK_CONFIG_FILE)) diff --git a/sled-agent/src/bootstrap/early_networking.rs b/sled-agent/src/bootstrap/early_networking.rs index 57e28d530e..bd12bb745a 100644 --- a/sled-agent/src/bootstrap/early_networking.rs +++ b/sled-agent/src/bootstrap/early_networking.rs @@ -14,15 +14,15 @@ use futures::future; use gateway_client::Client as MgsClient; use internal_dns::resolver::{ResolveError, Resolver as DnsResolver}; use internal_dns::ServiceName; -use ipnetwork::Ipv6Network; use mg_admin_client::types::{ AddStaticRoute4Request, ApplyRequest, BfdPeerConfig, BgpPeerConfig, - Prefix4, StaticRoute4, StaticRoute4List, + CheckerSource, ImportExportPolicy as MgImportExportPolicy, Prefix, Prefix4, + Prefix6, ShaperSource, StaticRoute4, StaticRoute4List, }; use mg_admin_client::Client as MgdClient; use omicron_common::address::DENDRITE_PORT; use omicron_common::address::{MGD_PORT, MGS_PORT}; -use omicron_common::api::external::BfdMode; +use omicron_common::api::external::{BfdMode, ImportExportPolicy}; use omicron_common::api::internal::shared::{ BgpConfig, PortConfigV1, PortFec, PortSpeed, RackNetworkConfig, RackNetworkConfigV1, SwitchLocation, UplinkConfig, @@ -33,6 +33,7 @@ use omicron_common::backoff::{ }; use omicron_common::OMICRON_DPD_TAG; use omicron_ddm_admin_client::DdmError; +use oxnet::{IpNet, Ipv6Net}; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use slog::Logger; @@ -497,6 +498,58 @@ impl<'a> EarlyNetworkSetup<'a> { keepalive: peer.keepalive.unwrap_or(2), resolution: BGP_SESSION_RESOLUTION, passive: false, + remote_asn: peer.remote_asn, + min_ttl: peer.min_ttl, + md5_auth_key: peer.md5_auth_key.clone(), + multi_exit_discriminator: peer.multi_exit_discriminator, + communities: peer.communities.clone(), + local_pref: peer.local_pref, + enforce_first_as: peer.enforce_first_as, + allow_export: match &peer.allowed_export { + ImportExportPolicy::NoFiltering => { + MgImportExportPolicy::NoFiltering + } + ImportExportPolicy::Allow(list) => { + MgImportExportPolicy::Allow( + list.clone() + .iter() + .map(|x| match x { + IpNet::V4(p) => Prefix::V4(Prefix4 { + length: p.width(), + value: p.addr(), + }), + IpNet::V6(p) => Prefix::V6(Prefix6 { + length: p.width(), + value: p.addr(), + }), + }) + .collect(), + ) + } + }, + allow_import: match &peer.allowed_import { + ImportExportPolicy::NoFiltering => { + MgImportExportPolicy::NoFiltering + } + ImportExportPolicy::Allow(list) => { + MgImportExportPolicy::Allow( + list.clone() + .iter() + .map(|x| match x { + IpNet::V4(p) => Prefix::V4(Prefix4 { + length: p.width(), + value: p.addr(), + }), + IpNet::V6(p) => Prefix::V6(Prefix6 { + length: p.width(), + value: p.addr(), + }), + }) + .collect(), + ) + } + }, + vlan_id: peer.vlan_id, }; match bgp_peer_configs.get_mut(&port.port) { Some(peers) => { @@ -514,10 +567,18 @@ impl<'a> EarlyNetworkSetup<'a> { mgd.bgp_apply(&ApplyRequest { asn: config.asn, peers: bgp_peer_configs, + shaper: config.shaper.as_ref().map(|x| ShaperSource { + code: x.clone(), + asn: config.asn, + }), + checker: config.checker.as_ref().map(|x| CheckerSource { + code: x.clone(), + asn: config.asn, + }), originate: config .originate .iter() - .map(|x| Prefix4 { length: x.prefix(), value: x.ip() }) + .map(|x| Prefix4 { length: x.width(), value: x.addr() }) .collect(), }) .await @@ -539,13 +600,14 @@ impl<'a> EarlyNetworkSetup<'a> { IpAddr::V4(v4) => v4, IpAddr::V6(_) => continue, }; - let prefix = match r.destination.ip() { + let prefix = match r.destination.addr() { IpAddr::V4(v4) => { - Prefix4 { value: v4, length: r.destination.prefix() } + Prefix4 { value: v4, length: r.destination.width() } } IpAddr::V6(_) => continue, }; - let sr = StaticRoute4 { nexthop, prefix }; + let vlan_id = r.vlan_id; + let sr = StaticRoute4 { nexthop, prefix, vlan_id }; rq.routes.list.push(sr); } } @@ -595,7 +657,7 @@ impl<'a> EarlyNetworkSetup<'a> { // TODO We're discarding the `uplink_cidr.prefix()` here and only using // the IP address; at some point we probably need to give the full CIDR // to dendrite? - addrs.push(a.ip()); + addrs.push(a.addr()); } let link_settings = LinkSettings { @@ -823,7 +885,7 @@ impl RackNetworkConfigV0 { v0: RackNetworkConfigV0, ) -> RackNetworkConfigV1 { RackNetworkConfigV1 { - rack_subnet: Ipv6Network::new(rack_subnet, 56).unwrap(), + rack_subnet: Ipv6Net::new(rack_subnet, 56).unwrap(), infra_ip_first: v0.infra_ip_first, infra_ip_last: v0.infra_ip_last, ports: v0 @@ -910,13 +972,14 @@ mod tests { body: EarlyNetworkConfigBody { ntp_servers: v0.ntp_servers.clone(), rack_network_config: Some(RackNetworkConfigV1 { - rack_subnet: Ipv6Network::new(v0.rack_subnet, 56).unwrap(), + rack_subnet: Ipv6Net::new(v0.rack_subnet, 56).unwrap(), infra_ip_first: v0_rack_network_config.infra_ip_first, infra_ip_last: v0_rack_network_config.infra_ip_last, ports: vec![PortConfigV1 { routes: vec![RouteConfig { destination: "0.0.0.0/0".parse().unwrap(), nexthop: uplink.gateway_ip.into(), + vlan_id: None, }], addresses: vec![uplink.uplink_cidr.into()], switch: uplink.switch, diff --git a/sled-agent/src/bootstrap/mod.rs b/sled-agent/src/bootstrap/mod.rs index 590e13c891..acda0b456f 100644 --- a/sled-agent/src/bootstrap/mod.rs +++ b/sled-agent/src/bootstrap/mod.rs @@ -12,6 +12,7 @@ mod http_entrypoints; mod maghemite; pub(crate) mod params; mod pre_server; +mod pumpkind; mod rack_ops; pub(crate) mod rss_handle; pub mod secret_retriever; diff --git a/sled-agent/src/bootstrap/params.rs b/sled-agent/src/bootstrap/params.rs index c901fb31ac..e458900c53 100644 --- a/sled-agent/src/bootstrap/params.rs +++ b/sled-agent/src/bootstrap/params.rs @@ -7,6 +7,7 @@ use anyhow::{bail, Result}; use async_trait::async_trait; use omicron_common::address::{self, Ipv6Subnet, SLED_PREFIX}; +use omicron_common::api::external::AllowedSourceIps; use omicron_common::api::internal::shared::RackNetworkConfig; use omicron_common::ledger::Ledgerable; use schemars::JsonSchema; @@ -41,6 +42,8 @@ struct UnvalidatedRackInitializeRequest { external_certificates: Vec, recovery_silo: RecoverySiloConfig, rack_network_config: RackNetworkConfig, + #[serde(default = "default_allowed_source_ips")] + allowed_source_ips: AllowedSourceIps, } /// Configuration for the "rack setup service". @@ -87,6 +90,17 @@ pub struct RackInitializeRequest { /// Initial rack network configuration pub rack_network_config: RackNetworkConfig, + + /// IPs or subnets allowed to make requests to user-facing services + #[serde(default = "default_allowed_source_ips")] + pub allowed_source_ips: AllowedSourceIps, +} + +/// This field was added after several racks were already deployed. RSS plans +/// for those racks should default to allowing any source IP, since that is +/// effectively what they did. +const fn default_allowed_source_ips() -> AllowedSourceIps { + AllowedSourceIps::Any } // This custom debug implementation hides the private keys. @@ -105,6 +119,7 @@ impl std::fmt::Debug for RackInitializeRequest { external_certificates: _, recovery_silo, rack_network_config, + allowed_source_ips, } = &self; f.debug_struct("RackInitializeRequest") @@ -121,6 +136,7 @@ impl std::fmt::Debug for RackInitializeRequest { .field("external_certificates", &"") .field("recovery_silo", recovery_silo) .field("rack_network_config", rack_network_config) + .field("allowed_source_ips", allowed_source_ips) .finish() } } @@ -161,6 +177,7 @@ impl TryFrom for RackInitializeRequest { external_certificates: value.external_certificates, recovery_silo: value.recovery_silo, rack_network_config: value.rack_network_config, + allowed_source_ips: value.allowed_source_ips, }) } } @@ -367,6 +384,7 @@ mod tests { use super::*; use camino::Utf8PathBuf; + use oxnet::Ipv6Net; #[test] fn parse_rack_initialization() { @@ -488,13 +506,14 @@ mod tests { user_password_hash: "$argon2id$v=19$m=98304,t=13,p=1$RUlWc0ZxaHo0WFdrN0N6ZQ$S8p52j85GPvMhR/ek3GL0el/oProgTwWpHJZ8lsQQoY".parse().unwrap(), }, rack_network_config: RackNetworkConfig { - rack_subnet: Ipv6Addr::LOCALHOST.into(), + rack_subnet: Ipv6Net::host_net(Ipv6Addr::LOCALHOST), infra_ip_first: Ipv4Addr::LOCALHOST, infra_ip_last: Ipv4Addr::LOCALHOST, ports: Vec::new(), bgp: Vec::new(), bfd: Vec::new(), }, + allowed_source_ips: AllowedSourceIps::Any, }; // Valid configs: all external DNS IPs are contained in the IP pool diff --git a/sled-agent/src/bootstrap/pre_server.rs b/sled-agent/src/bootstrap/pre_server.rs index 38bedf921c..5b89506242 100644 --- a/sled-agent/src/bootstrap/pre_server.rs +++ b/sled-agent/src/bootstrap/pre_server.rs @@ -11,6 +11,7 @@ #![allow(clippy::result_large_err)] use super::maghemite; +use super::pumpkind; use super::server::StartError; use crate::config::Config; use crate::config::SidecarRevision; @@ -20,7 +21,6 @@ use crate::long_running_tasks::{ use crate::services::ServiceManager; use crate::services::TimeSyncConfig; use crate::sled_agent::SledAgent; -use crate::storage_monitor::UnderlayAccess; use camino::Utf8PathBuf; use cancel_safe_futures::TryStreamExt; use futures::stream; @@ -54,7 +54,6 @@ pub(super) struct BootstrapAgentStartup { pub(super) service_manager: ServiceManager, pub(super) long_running_task_handles: LongRunningTaskHandles, pub(super) sled_agent_started_tx: oneshot::Sender, - pub(super) underlay_available_tx: oneshot::Sender, } impl BootstrapAgentStartup { @@ -77,6 +76,7 @@ impl BootstrapAgentStartup { let (config, log, ddm_admin_localhost_client, startup_networking) = tokio::task::spawn_blocking(move || { enable_mg_ddm(&config, &log)?; + pumpkind::enable_pumpkind_service(&log)?; ensure_zfs_key_directory_exists(&log)?; let startup_networking = BootstrapNetworking::setup(&config)?; @@ -126,7 +126,6 @@ impl BootstrapAgentStartup { long_running_task_handles, sled_agent_started_tx, service_manager_ready_tx, - underlay_available_tx, ) = spawn_all_longrunning_tasks( &base_log, sled_mode, @@ -172,7 +171,6 @@ impl BootstrapAgentStartup { service_manager, long_running_task_handles, sled_agent_started_tx, - underlay_available_tx, }) } } diff --git a/sled-agent/src/bootstrap/pumpkind.rs b/sled-agent/src/bootstrap/pumpkind.rs new file mode 100644 index 0000000000..63be799519 --- /dev/null +++ b/sled-agent/src/bootstrap/pumpkind.rs @@ -0,0 +1,41 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Starting the pumpkind service. + +use thiserror::Error; + +#[derive(Debug, Error)] +pub enum Error { + #[error("Error configuring service: {0}")] + Config(#[from] smf::ConfigError), + + #[error("Error administering service: {0}")] + Adm(#[from] smf::AdmError), +} + +#[cfg(feature = "switch-asic")] +pub(super) fn enable_pumpkind_service(log: &slog::Logger) -> Result<(), Error> { + const SERVICE_FMRI: &str = "svc:/oxide/pumpkind"; + const MANIFEST_PATH: &str = + "/opt/oxide/pumpkind/lib/svc/manifest/system/pumpkind.xml"; + + info!(log, "Importing pumpkind service"; "path" => MANIFEST_PATH); + smf::Config::import().run(MANIFEST_PATH)?; + + info!(log, "Enabling pumpkind service"); + smf::Adm::new() + .enable() + .temporary() + .run(smf::AdmSelection::ByPattern(&[SERVICE_FMRI]))?; + + Ok(()) +} + +#[cfg(not(feature = "switch-asic"))] +pub(super) fn enable_pumpkind_service( + _log: &slog::Logger, +) -> Result<(), Error> { + Ok(()) +} diff --git a/sled-agent/src/bootstrap/rss_handle.rs b/sled-agent/src/bootstrap/rss_handle.rs index 5d9c01e7f2..9baf0e7ef3 100644 --- a/sled-agent/src/bootstrap/rss_handle.rs +++ b/sled-agent/src/bootstrap/rss_handle.rs @@ -299,11 +299,3 @@ impl BootstrapAgentHandleReceiver { tx.send(Ok(())).unwrap(); } } - -struct AbortOnDrop(JoinHandle); - -impl Drop for AbortOnDrop { - fn drop(&mut self) { - self.0.abort(); - } -} diff --git a/sled-agent/src/bootstrap/server.rs b/sled-agent/src/bootstrap/server.rs index bca3350696..369437d3aa 100644 --- a/sled-agent/src/bootstrap/server.rs +++ b/sled-agent/src/bootstrap/server.rs @@ -17,6 +17,7 @@ use crate::bootstrap::http_entrypoints::api as http_api; use crate::bootstrap::http_entrypoints::BootstrapServerContext; use crate::bootstrap::maghemite; use crate::bootstrap::pre_server::BootstrapAgentStartup; +use crate::bootstrap::pumpkind; use crate::bootstrap::rack_ops::RssAccess; use crate::bootstrap::secret_retriever::LrtqOrHardcodedSecretRetriever; use crate::bootstrap::sprockets_server::SprocketsServer; @@ -26,7 +27,6 @@ use crate::long_running_tasks::LongRunningTaskHandles; use crate::server::Server as SledAgentServer; use crate::services::ServiceManager; use crate::sled_agent::SledAgent; -use crate::storage_monitor::UnderlayAccess; use bootstore::schemes::v0 as bootstore; use camino::Utf8PathBuf; use cancel_safe_futures::TryStreamExt; @@ -75,6 +75,9 @@ pub enum StartError { #[error("Failed to enable mg-ddm")] EnableMgDdm(#[from] maghemite::Error), + #[error("Failed to enable pumpkind")] + EnablePumpkind(#[from] pumpkind::Error), + #[error("Failed to create zfs key directory {dir:?}")] CreateZfsKeyDirectory { dir: &'static str, @@ -179,7 +182,6 @@ impl Server { service_manager, long_running_task_handles, sled_agent_started_tx, - underlay_available_tx, } = BootstrapAgentStartup::run(config).await?; // Do we have a StartSledAgentRequest stored in the ledger? @@ -242,7 +244,6 @@ impl Server { &config, start_sled_agent_request, long_running_task_handles.clone(), - underlay_available_tx, service_manager.clone(), &ddm_admin_localhost_client, &base_log, @@ -264,10 +265,7 @@ impl Server { sled_agent.load_services().await; SledAgentState::ServerStarted(sled_agent_server) } else { - SledAgentState::Bootstrapping( - Some(sled_agent_started_tx), - Some(underlay_available_tx), - ) + SledAgentState::Bootstrapping(Some(sled_agent_started_tx)) }; // Spawn our inner task that handles any future hardware updates and any @@ -310,10 +308,7 @@ impl Server { // bootstrap server). enum SledAgentState { // We're still in the bootstrapping phase, waiting for a sled-agent request. - Bootstrapping( - Option>, - Option>, - ), + Bootstrapping(Option>), // ... or the sled agent server is running. ServerStarted(SledAgentServer), } @@ -357,7 +352,6 @@ async fn start_sled_agent( config: &SledConfig, request: StartSledAgentRequest, long_running_task_handles: LongRunningTaskHandles, - underlay_available_tx: oneshot::Sender, service_manager: ServiceManager, ddmd_client: &DdmAdminClient, base_log: &Logger, @@ -412,7 +406,7 @@ async fn start_sled_agent( ddmd_client.advertise_prefix(request.body.subnet); let az_prefix = - Ipv6Subnet::::new(request.body.subnet.net().network()); + Ipv6Subnet::::new(request.body.subnet.net().addr()); let addr = request.body.subnet.net().iter().nth(1).unwrap(); let dns_servers = Resolver::servers_from_subnet(az_prefix); ddmd_client.enable_stats( @@ -429,7 +423,6 @@ async fn start_sled_agent( request.clone(), long_running_task_handles.clone(), service_manager, - underlay_available_tx, ) .await .map_err(SledAgentServerStartError::FailedStartingServer)?; @@ -495,7 +488,7 @@ impl From for SledAgentServerStartError { async fn sled_config_paths( storage: &StorageHandle, ) -> Result, MissingM2Paths> { - let resources = storage.get_latest_resources().await; + let resources = storage.get_latest_disks().await; let paths: Vec<_> = resources .all_m2_mountpoints(CONFIG_DATASET) .into_iter() @@ -573,10 +566,7 @@ impl Inner { log: &Logger, ) { match &mut self.state { - SledAgentState::Bootstrapping( - sled_agent_started_tx, - underlay_available_tx, - ) => { + SledAgentState::Bootstrapping(sled_agent_started_tx) => { let request_id = request.body.id; // Extract from options to satisfy the borrow checker. @@ -587,14 +577,11 @@ impl Inner { // See https://github.com/oxidecomputer/omicron/issues/4494 let sled_agent_started_tx = sled_agent_started_tx.take().unwrap(); - let underlay_available_tx = - underlay_available_tx.take().unwrap(); let response = match start_sled_agent( &self.config, request, self.long_running_task_handles.clone(), - underlay_available_tx, self.service_manager.clone(), &self.ddm_admin_localhost_client, &self.base_log, @@ -664,7 +651,7 @@ impl Inner { let config_dirs = self .long_running_task_handles .storage_manager - .get_latest_resources() + .get_latest_disks() .await .all_m2_mountpoints(CONFIG_DATASET) .into_iter(); diff --git a/sled-agent/src/config.rs b/sled-agent/src/config.rs index 058f343e2a..c4ce421497 100644 --- a/sled-agent/src/config.rs +++ b/sled-agent/src/config.rs @@ -12,10 +12,10 @@ use illumos_utils::dladm::Dladm; use illumos_utils::dladm::FindPhysicalLinkError; use illumos_utils::dladm::PhysicalLink; use illumos_utils::dladm::CHELSIO_LINK_PREFIX; -use illumos_utils::zpool::ZpoolName; use omicron_common::vlan::VlanID; use serde::Deserialize; use sled_hardware::is_gimlet; +use sled_hardware::UnparsedDisk; #[derive(Clone, Debug, Deserialize)] #[serde(rename_all = "lowercase")] @@ -65,8 +65,12 @@ pub struct Config { pub swap_device_size_gb: Option, /// Optional VLAN ID to be used for tagging guest VNICs. pub vlan: Option, - /// Optional list of zpools to be used as "discovered disks". - pub zpools: Option>, + /// Optional list of virtual devices to be used as "discovered disks". + pub vdevs: Option>, + /// Optional list of real devices to be injected as observed disks during + /// device polling. + #[serde(default)] + pub nongimlet_observed_disks: Option>, /// Optionally skip waiting for time synchronization pub skip_timesync: Option, diff --git a/sled-agent/src/dump_setup.rs b/sled-agent/src/dump_setup.rs index bdbc008ccb..02d3d41dd7 100644 --- a/sled-agent/src/dump_setup.rs +++ b/sled-agent/src/dump_setup.rs @@ -82,6 +82,7 @@ //! rotated log files having the same modified time to the second), the //! number is incremented by 1 until no conflict remains. +use async_trait::async_trait; use camino::Utf8PathBuf; use derive_more::{AsRef, From}; use illumos_utils::coreadm::{CoreAdm, CoreFileOption}; @@ -89,17 +90,16 @@ use illumos_utils::dumpadm::{DumpAdm, DumpContentType}; use illumos_utils::zone::ZONE_PREFIX; use illumos_utils::zpool::{ZpoolHealth, ZpoolName}; use illumos_utils::ExecutionError; -use omicron_common::disk::DiskIdentity; use sled_hardware::DiskVariant; +use sled_storage::config::MountConfig; use sled_storage::dataset::{CRASH_DATASET, DUMP_DATASET}; use sled_storage::disk::Disk; -use sled_storage::pool::Pool; use slog::Logger; -use std::collections::{BTreeMap, HashSet}; +use std::collections::HashSet; use std::ffi::OsString; use std::path::{Path, PathBuf}; -use std::sync::{Arc, Weak}; use std::time::{Duration, SystemTime, SystemTimeError, UNIX_EPOCH}; +use tokio::sync::mpsc::Receiver; use zone::{Zone, ZoneError}; const ZFS_PROP_USED: &str = "used"; @@ -118,38 +118,66 @@ struct DebugDataset(Utf8PathBuf); #[derive(AsRef, Clone, Debug, Eq, From, Hash, Ord, PartialEq, PartialOrd)] struct CoreDataset(Utf8PathBuf); -#[derive(AsRef, Clone, From)] -pub(super) struct CoreZpool(pub ZpoolName); -#[derive(AsRef, Clone, From)] -pub(super) struct DebugZpool(pub ZpoolName); +#[derive(AsRef, Clone, Debug, From)] +pub(super) struct CoreZpool { + mount_config: MountConfig, + name: ZpoolName, +} + +#[derive(AsRef, Clone, Debug, From)] +pub(super) struct DebugZpool { + mount_config: MountConfig, + name: ZpoolName, +} impl GetMountpoint for DebugZpool { type NewType = DebugDataset; const MOUNTPOINT: &'static str = DUMP_DATASET; + fn mount_config(&self) -> &MountConfig { + &self.mount_config + } } impl GetMountpoint for CoreZpool { type NewType = CoreDataset; const MOUNTPOINT: &'static str = CRASH_DATASET; + fn mount_config(&self) -> &MountConfig { + &self.mount_config + } } // only want to access these directories after they're mounted! trait GetMountpoint: AsRef { type NewType: From; const MOUNTPOINT: &'static str; + + fn mount_config(&self) -> &MountConfig; + fn mountpoint( &self, invoker: &dyn ZfsInvoker, ) -> Result, ZfsGetError> { if invoker.zfs_get_prop(&self.as_ref().to_string(), "mounted")? == "yes" { - Ok(Some(Self::NewType::from( - invoker.mountpoint(self.as_ref(), Self::MOUNTPOINT), - ))) + Ok(Some(Self::NewType::from(invoker.mountpoint( + self.mount_config(), + self.as_ref(), + Self::MOUNTPOINT, + )))) } else { Ok(None) } } } + +#[derive(Debug)] +enum DumpSetupCmd { + UpdateDumpdevSetup { + dump_slices: Vec, + debug_datasets: Vec, + core_datasets: Vec, + }, +} + struct DumpSetupWorker { core_dataset_names: Vec, debug_dataset_names: Vec, @@ -165,43 +193,45 @@ struct DumpSetupWorker { savecored_slices: HashSet, log: Logger, + rx: Receiver, coredumpadm_invoker: Box, zfs_invoker: Box, zone_invoker: Box, } pub struct DumpSetup { - worker: Arc>, - _poller: std::thread::JoinHandle<()>, + tx: tokio::sync::mpsc::Sender, + mount_config: MountConfig, + _poller: tokio::task::JoinHandle<()>, log: Logger, } impl DumpSetup { - pub fn new(log: &Logger) -> Self { - let worker = Arc::new(std::sync::Mutex::new(DumpSetupWorker::new( + pub fn new(log: &Logger, mount_config: MountConfig) -> Self { + let (tx, rx) = tokio::sync::mpsc::channel(16); + let worker = DumpSetupWorker::new( Box::new(RealCoreDumpAdm {}), Box::new(RealZfs {}), Box::new(RealZone {}), log.new(o!("component" => "DumpSetup-worker")), - ))); - let worker_weak = Arc::downgrade(&worker); - let log_poll = log.new(o!("component" => "DumpSetup-archival")); - let _poller = std::thread::spawn(move || { - Self::poll_file_archival(worker_weak, log_poll) - }); + rx, + ); + let _poller = + tokio::spawn(async move { worker.poll_file_archival().await }); let log = log.new(o!("component" => "DumpSetup")); - Self { worker, _poller, log } + Self { tx, mount_config, _poller, log } } pub(crate) async fn update_dumpdev_setup( &self, - disks: &BTreeMap, + disks: impl Iterator, ) { let log = &self.log; let mut m2_dump_slices = Vec::new(); let mut u2_debug_datasets = Vec::new(); let mut m2_core_datasets = Vec::new(); - for (_id, (disk, _)) in disks.iter() { + let mount_config = self.mount_config.clone(); + for disk in disks { if disk.is_synthetic() { // We only setup dump devices on real disks continue; @@ -222,8 +252,10 @@ impl DumpSetup { illumos_utils::zpool::Zpool::get_info(&name.to_string()) { if info.health() == ZpoolHealth::Online { - m2_core_datasets - .push(CoreZpool::from(name.clone())); + m2_core_datasets.push(CoreZpool { + mount_config: mount_config.clone(), + name: name.clone(), + }); } else { warn!(log, "Zpool {name:?} not online, won't attempt to save process core dumps there"); } @@ -235,8 +267,10 @@ impl DumpSetup { illumos_utils::zpool::Zpool::get_info(&name.to_string()) { if info.health() == ZpoolHealth::Online { - u2_debug_datasets - .push(DebugZpool::from(name.clone())); + u2_debug_datasets.push(DebugZpool { + mount_config: mount_config.clone(), + name: name.clone(), + }); } else { warn!(log, "Zpool {name:?} not online, won't attempt to save kernel core dumps there"); } @@ -245,55 +279,16 @@ impl DumpSetup { } } - let savecore_lock = self.worker.clone(); - let log_tmp = log.new(o!("component" => "DumpSetup-mutex")); - tokio::task::spawn_blocking(move || match savecore_lock.lock() { - Ok(mut guard) => { - guard.update_disk_loadout( - m2_dump_slices, - u2_debug_datasets, - m2_core_datasets, - ); - } - Err(err) => { - error!(log_tmp, "DumpSetup mutex poisoned: {err:?}"); - } - }); - } - - fn poll_file_archival( - worker: Weak>, - log: Logger, - ) { - info!(log, "DumpSetup poll loop started."); - loop { - if let Some(mutex) = worker.upgrade() { - match mutex.lock() { - Ok(mut guard) => { - guard.reevaluate_choices(); - if let Err(err) = guard.archive_files() { - error!( - log, - "Failed to archive debug/dump files: {err:?}" - ); - } - } - Err(err) => { - error!( - log, - "DumpSetup mutex poisoned in poll thread: {err:?}" - ); - break; - } - } - } else { - info!( - log, - "DumpSetup weak pointer dropped, leaving poll loop." - ); - break; - } - std::thread::sleep(ARCHIVAL_INTERVAL); + if let Err(err) = self + .tx + .send(DumpSetupCmd::UpdateDumpdevSetup { + dump_slices: m2_dump_slices, + debug_datasets: u2_debug_datasets, + core_datasets: m2_core_datasets, + }) + .await + { + error!(log, "DumpSetup channel closed: {:?}", err.0); } } } @@ -308,9 +303,10 @@ enum ZfsGetError { Parse(#[from] std::num::ParseIntError), } +#[async_trait] trait CoreDumpAdmInvoker { fn coreadm(&self, core_dir: &Utf8PathBuf) -> Result<(), ExecutionError>; - fn dumpadm( + async fn dumpadm( &self, dump_slice: &Utf8PathBuf, savecore_dir: Option<&Utf8PathBuf>, @@ -349,19 +345,22 @@ trait ZfsInvoker { fn mountpoint( &self, + mount_config: &MountConfig, zpool: &ZpoolName, mountpoint: &'static str, ) -> Utf8PathBuf; } +#[async_trait] trait ZoneInvoker { - fn get_zones(&self) -> Result, ArchiveLogsError>; + async fn get_zones(&self) -> Result, ArchiveLogsError>; } struct RealCoreDumpAdm {} struct RealZfs {} struct RealZone {} +#[async_trait] impl CoreDumpAdmInvoker for RealCoreDumpAdm { fn coreadm(&self, core_dir: &Utf8PathBuf) -> Result<(), ExecutionError> { let mut cmd = CoreAdm::new(); @@ -390,7 +389,7 @@ impl CoreDumpAdmInvoker for RealCoreDumpAdm { // function also invokes `savecore(8)` to save it into that directory. // On success, returns Ok(Some(stdout)) if `savecore(8)` was invoked, or // Ok(None) if it wasn't. - fn dumpadm( + async fn dumpadm( &self, dump_slice: &Utf8PathBuf, savecore_dir: Option<&Utf8PathBuf>, @@ -403,7 +402,7 @@ impl CoreDumpAdmInvoker for RealCoreDumpAdm { // which is in the ramdisk pool), because dumpadm refuses to do what // we ask otherwise. let tmp_crash = "/tmp/crash"; - std::fs::create_dir_all(tmp_crash).map_err(|err| { + tokio::fs::create_dir_all(tmp_crash).await.map_err(|err| { ExecutionError::ExecutionStart { command: format!("mkdir {tmp_crash:?}"), err, @@ -433,7 +432,7 @@ impl CoreDumpAdmInvoker for RealCoreDumpAdm { if savecore_dir.is_some() { // and does the dump slice have one to save off if let Ok(true) = - illumos_utils::dumpadm::dump_flag_is_valid(dump_slice) + illumos_utils::dumpadm::dump_flag_is_valid(dump_slice).await { return illumos_utils::dumpadm::SaveCore.execute(); } @@ -458,16 +457,19 @@ impl ZfsInvoker for RealZfs { fn mountpoint( &self, + mount_config: &MountConfig, zpool: &ZpoolName, mountpoint: &'static str, ) -> Utf8PathBuf { - zpool.dataset_mountpoint(mountpoint) + zpool.dataset_mountpoint(&mount_config.root, mountpoint) } } +#[async_trait] impl ZoneInvoker for RealZone { - fn get_zones(&self) -> Result, ArchiveLogsError> { - Ok(zone::Adm::list_blocking()? + async fn get_zones(&self) -> Result, ArchiveLogsError> { + Ok(zone::Adm::list() + .await? .into_iter() .filter(|z| z.global() || z.name().starts_with(ZONE_PREFIX)) .collect::>()) @@ -480,6 +482,7 @@ impl DumpSetupWorker { zfs_invoker: Box, zone_invoker: Box, log: Logger, + rx: Receiver, ) -> Self { Self { core_dataset_names: vec![], @@ -492,24 +495,67 @@ impl DumpSetupWorker { known_core_dirs: vec![], savecored_slices: Default::default(), log, + rx, coredumpadm_invoker, zfs_invoker, zone_invoker, } } + async fn poll_file_archival(mut self) { + info!(self.log, "DumpSetup poll loop started."); + loop { + match tokio::time::timeout(ARCHIVAL_INTERVAL, self.rx.recv()).await + { + Ok(Some(DumpSetupCmd::UpdateDumpdevSetup { + dump_slices, + debug_datasets, + core_datasets, + })) => { + self.update_disk_loadout( + dump_slices, + debug_datasets, + core_datasets, + ); + } + Ok(None) => { + warn!( + self.log, + "Control channel closed, no more dump archival!" + ); + break; + } + Err(_elapsed) => { + // no new disks, just pump cores/logs with what we've got + } + } + // regardless of whether we updated disks, + // at least every ARCHIVAL_INTERVAL, + // figure out if we should change our target volumes... + self.reevaluate_choices().await; + // and then do the actual archiving. + if let Err(err) = self.archive_files().await { + error!(self.log, "Failed to archive debug/dump files: {err:?}"); + } + } + } + fn update_disk_loadout( &mut self, dump_slices: Vec, debug_datasets: Vec, core_datasets: Vec, ) { + info!( + self.log, + "Updated view of disks"; + "core_datasets" => %core_datasets.len(), + "debug_datasets" => %debug_datasets.len(), + "dump_slices" => %dump_slices.len(), + ); self.core_dataset_names = core_datasets; self.debug_dataset_names = debug_datasets; - self.known_dump_slices = dump_slices; - - self.reevaluate_choices(); } // only allow mounted zfs datasets into 'known_*_dirs', @@ -529,7 +575,7 @@ impl DumpSetupWorker { .collect(); } - fn reevaluate_choices(&mut self) { + async fn reevaluate_choices(&mut self) { self.update_mounted_dirs(); self.known_dump_slices.sort(); @@ -584,7 +630,7 @@ impl DumpSetupWorker { self.chosen_debug_dir = None; } else { warn!(self.log, "All candidate debug/dump dirs are over usage threshold, removing older archived files"); - if let Err(err) = self.cleanup() { + if let Err(err) = self.cleanup().await { error!(self.log, "Couldn't clean up any debug/dump dirs, may hit dataset quota in {x:?}: {err:?}"); } else { self.chosen_debug_dir = None; @@ -640,7 +686,9 @@ impl DumpSetupWorker { // Let's try to see if it appears to have a kernel dump already match illumos_utils::dumpadm::dump_flag_is_valid( dump_slice.as_ref(), - ) { + ) + .await + { Ok(true) => { debug!(self.log, "Dump slice {dump_slice:?} appears to have a valid header; will attempt to savecore"); } @@ -651,7 +699,9 @@ impl DumpSetupWorker { debug!(self.log, "Dump slice {dump_slice:?} appears to be unused: {err:?}"); } } - if let Ok(saved) = self.dumpadm_and_savecore(&dump_slice) { + if let Ok(saved) = + self.dumpadm_and_savecore(&dump_slice).await + { if let Some(out) = saved { info!(self.log, "Previous dump on slice {dump_slice:?} saved, configured slice as target for new dumps. {out:?}"); } @@ -666,13 +716,16 @@ impl DumpSetupWorker { for dump_slice in &self.known_dump_slices { match illumos_utils::dumpadm::dump_flag_is_valid( dump_slice.as_ref(), - ) { + ) + .await + { Ok(false) => { // Have dumpadm write the config for crash dumps to be // on this slice, at least, until a U.2 comes along. match self .coredumpadm_invoker .dumpadm(dump_slice.as_ref(), None) + .await { Ok(_) => { info!(self.log, "Using dump device {dump_slice:?} with no savecore destination (no U.2 debug zvol yet)"); @@ -706,7 +759,7 @@ impl DumpSetupWorker { changed_slice = true; // temporarily changes the system's dump slice so savecore(8) // can update the header in the slice when it finishes... - match self.dumpadm_and_savecore(&dump_slice) { + match self.dumpadm_and_savecore(&dump_slice).await { Ok(saved) => { if let Some(stdout) = &saved { info!( @@ -734,6 +787,7 @@ impl DumpSetupWorker { if let Err(err) = self .coredumpadm_invoker .dumpadm(dump_slice.as_ref(), None) + .await { error!(self.log, "Could not restore dump slice to {dump_slice:?}: {err:?}"); } @@ -742,7 +796,7 @@ impl DumpSetupWorker { } } - fn archive_files(&self) -> std::io::Result<()> { + async fn archive_files(&self) -> tokio::io::Result<()> { if let Some(debug_dir) = &self.chosen_debug_dir { if self.known_core_dirs.is_empty() { info!(self.log, "No core dump locations yet known."); @@ -755,6 +809,7 @@ impl DumpSetupWorker { if let Err(err) = Self::copy_sync_and_remove(&entry.path(), &dest) + .await { error!( self.log, @@ -779,7 +834,7 @@ impl DumpSetupWorker { ); } - if let Err(err) = self.archive_logs() { + if let Err(err) = self.archive_logs().await { if !matches!(err, ArchiveLogsError::NoDebugDirYet) { error!( self.log, @@ -791,32 +846,32 @@ impl DumpSetupWorker { Ok(()) } - fn copy_sync_and_remove( + async fn copy_sync_and_remove( source: impl AsRef, dest: impl AsRef, - ) -> std::io::Result<()> { + ) -> tokio::io::Result<()> { let source = source.as_ref(); let dest = dest.as_ref(); - let mut dest_f = std::fs::File::create(&dest)?; - let mut src_f = std::fs::File::open(&source)?; + let mut dest_f = tokio::fs::File::create(&dest).await?; + let mut src_f = tokio::fs::File::open(&source).await?; - std::io::copy(&mut src_f, &mut dest_f)?; + tokio::io::copy(&mut src_f, &mut dest_f).await?; - dest_f.sync_all()?; + dest_f.sync_all().await?; drop(src_f); drop(dest_f); - std::fs::remove_file(source)?; + tokio::fs::remove_file(source).await?; Ok(()) } - fn archive_logs(&self) -> Result<(), ArchiveLogsError> { + async fn archive_logs(&self) -> Result<(), ArchiveLogsError> { let debug_dir = self .chosen_debug_dir .as_ref() .ok_or(ArchiveLogsError::NoDebugDirYet)?; - let oxz_zones = self.zone_invoker.get_zones()?; + let oxz_zones = self.zone_invoker.get_zones().await?; for zone in oxz_zones { let logdir = if zone.global() { PathBuf::from("/var/svc/log") @@ -824,12 +879,12 @@ impl DumpSetupWorker { zone.path().join("root/var/svc/log") }; let zone_name = zone.name(); - self.archive_logs_inner(debug_dir, logdir, zone_name)?; + self.archive_logs_inner(debug_dir, logdir, zone_name).await?; } Ok(()) } - fn archive_logs_inner( + async fn archive_logs_inner( &self, debug_dir: &DebugDataset, logdir: PathBuf, @@ -848,7 +903,7 @@ impl DumpSetupWorker { } let dest_dir = debug_dir.as_ref().join(zone_name).into_std_path_buf(); if !rotated_log_files.is_empty() { - std::fs::create_dir_all(&dest_dir)?; + tokio::fs::create_dir_all(&dest_dir).await?; let count = rotated_log_files.len(); info!( self.log, @@ -878,7 +933,7 @@ impl DumpSetupWorker { break; } } - if let Err(err) = Self::copy_sync_and_remove(&entry, dest) { + if let Err(err) = Self::copy_sync_and_remove(&entry, dest).await { warn!(self.log, "Failed to archive {entry:?}: {err:?}"); } } @@ -894,7 +949,7 @@ impl DumpSetupWorker { // for savecore to behave the way we want (i.e. clear the flag // after succeeding), we could hypothetically miss a dump if // the kernel crashes again while savecore is still running. - fn dumpadm_and_savecore( + async fn dumpadm_and_savecore( &mut self, dump_slice: &DumpSlicePath, ) -> Result, ExecutionError> { @@ -906,6 +961,7 @@ impl DumpSetupWorker { match self .coredumpadm_invoker .dumpadm(dump_slice.as_ref(), Some(&savecore_dir)) + .await { Ok(saved) => { self.savecored_slices.insert(dump_slice.clone()); @@ -915,10 +971,10 @@ impl DumpSetupWorker { } } - fn cleanup(&self) -> Result<(), CleanupError> { + async fn cleanup(&self) -> Result<(), CleanupError> { let mut dir_info = Vec::new(); for dir in &self.known_debug_dirs { - match self.scope_dir_for_cleanup(dir) { + match self.scope_dir_for_cleanup(dir).await { Ok(info) => { dir_info.push((info, dir)); } @@ -942,7 +998,7 @@ impl DumpSetupWorker { // the i/o error *may* be an issue with the underlying disk, so // we continue to the dataset with the next-oldest average age // of files-to-delete in the sorted list. - if let Err(err) = std::fs::remove_file(&path) { + if let Err(err) = tokio::fs::remove_file(&path).await { error!(self.log, "Couldn't delete {path:?} from debug dataset, skipping {dir:?}. {err:?}"); continue 'outer; } @@ -955,7 +1011,7 @@ impl DumpSetupWorker { Ok(()) } - fn scope_dir_for_cleanup( + async fn scope_dir_for_cleanup( &self, debug_dir: &DebugDataset, ) -> Result { @@ -974,7 +1030,7 @@ impl DumpSetupWorker { for path in glob::glob(debug_dir.as_ref().join("**/*").as_str())?.flatten() { - let meta = std::fs::metadata(&path)?; + let meta = tokio::fs::metadata(&path).await?; // we need this to be a Duration rather than SystemTime so we can // do math to it later. let time = meta.modified()?.duration_since(UNIX_EPOCH)?; @@ -1008,7 +1064,7 @@ impl DumpSetupWorker { #[derive(thiserror::Error, Debug)] pub enum ArchiveLogsError { #[error("I/O error: {0}")] - IoError(#[from] std::io::Error), + IoError(#[from] tokio::io::Error), #[error("Error calling zoneadm: {0}")] Zoneadm(#[from] ZoneError), #[error("Non-UTF8 zone path for zone {0}")] @@ -1028,7 +1084,7 @@ enum CleanupError { #[error("Failed to query ZFS properties: {0}")] ZfsError(#[from] ZfsGetError), #[error("I/O error: {0}")] - IoError(#[from] std::io::Error), + IoError(#[from] tokio::io::Error), #[error("Glob pattern invalid: {0}")] Glob(#[from] glob::PatternError), #[error("A file's observed modified time was before the Unix epoch: {0}")] @@ -1050,9 +1106,9 @@ mod tests { }; use sled_storage::dataset::{CRASH_DATASET, DUMP_DATASET}; use std::collections::HashMap; - use std::io::Write; use std::str::FromStr; use tempfile::TempDir; + use tokio::io::AsyncWriteExt; impl Clone for ZfsGetError { fn clone(&self) -> Self { @@ -1078,6 +1134,7 @@ mod tests { pub zones: Vec, } + #[async_trait] impl CoreDumpAdmInvoker for FakeCoreDumpAdm { fn coreadm( &self, @@ -1086,7 +1143,7 @@ mod tests { Ok(()) } - fn dumpadm( + async fn dumpadm( &self, _dump_slice: &Utf8PathBuf, _savecore_dir: Option<&Utf8PathBuf>, @@ -1120,6 +1177,7 @@ mod tests { fn mountpoint( &self, + _mount_config: &MountConfig, zpool: &ZpoolName, mountpoint: &'static str, ) -> Utf8PathBuf { @@ -1142,14 +1200,15 @@ mod tests { .join(mountpoint) } } + #[async_trait] impl ZoneInvoker for FakeZone { - fn get_zones(&self) -> Result, ArchiveLogsError> { + async fn get_zones(&self) -> Result, ArchiveLogsError> { Ok(self.zones.clone()) } } - #[test] - fn test_does_not_configure_coreadm_when_no_crash_dataset_mounted() { + #[tokio::test] + async fn test_does_not_configure_coreadm_when_no_crash_dataset_mounted() { let logctx = omicron_test_utils::dev::test_setup_log( "test_does_not_configure_coreadm_when_no_crash_dataset_mounted", ); @@ -1167,22 +1226,27 @@ mod tests { }), Box::::default(), logctx.log.clone(), + tokio::sync::mpsc::channel(1).1, ); // nothing when no disks worker.update_disk_loadout(vec![], vec![], vec![]); + worker.reevaluate_choices().await; assert_eq!(worker.chosen_core_dir, None); // nothing when only a disk that's not ready - let non_mounted_zpool = - CoreZpool(ZpoolName::from_str(NOT_MOUNTED_INTERNAL).unwrap()); + let non_mounted_zpool = CoreZpool { + mount_config: MountConfig::default(), + name: ZpoolName::from_str(NOT_MOUNTED_INTERNAL).unwrap(), + }; worker.update_disk_loadout(vec![], vec![], vec![non_mounted_zpool]); + worker.reevaluate_choices().await; assert_eq!(worker.chosen_core_dir, None); logctx.cleanup_successful(); } - #[test] - fn test_configures_coreadm_only_when_crash_dataset_mounted() { + #[tokio::test] + async fn test_configures_coreadm_only_when_crash_dataset_mounted() { let logctx = omicron_test_utils::dev::test_setup_log( "test_configures_coreadm_only_when_crash_dataset_mounted", ); @@ -1191,11 +1255,18 @@ mod tests { const MOUNTED_INTERNAL: &str = "oxi_474e554e-6174-616c-6965-4e677579656e"; const ERROR_INTERNAL: &str = "oxi_4861636b-2054-6865-2050-6c616e657421"; - let mounted_zpool = - CoreZpool(ZpoolName::from_str(MOUNTED_INTERNAL).unwrap()); - let non_mounted_zpool = - CoreZpool(ZpoolName::from_str(NOT_MOUNTED_INTERNAL).unwrap()); - let err_zpool = CoreZpool(ZpoolName::from_str(ERROR_INTERNAL).unwrap()); + let mounted_zpool = CoreZpool { + mount_config: MountConfig::default(), + name: ZpoolName::from_str(MOUNTED_INTERNAL).unwrap(), + }; + let non_mounted_zpool = CoreZpool { + mount_config: MountConfig::default(), + name: ZpoolName::from_str(NOT_MOUNTED_INTERNAL).unwrap(), + }; + let err_zpool = CoreZpool { + mount_config: MountConfig::default(), + name: ZpoolName::from_str(ERROR_INTERNAL).unwrap(), + }; const ZPOOL_MNT: &str = "/path/to/internal/zpool"; let mut worker = DumpSetupWorker::new( Box::::default(), @@ -1231,6 +1302,7 @@ mod tests { }), Box::::default(), logctx.log.clone(), + tokio::sync::mpsc::channel(1).1, ); // something when there's one that's ready! @@ -1239,8 +1311,9 @@ mod tests { vec![], vec![non_mounted_zpool.clone(), mounted_zpool], ); + worker.reevaluate_choices().await; assert_eq!( - worker.chosen_core_dir.as_ref().unwrap().0, + worker.chosen_core_dir.as_ref().expect("core dir wasn't chosen").0, Utf8PathBuf::from(ZPOOL_MNT).join(CRASH_DATASET) ); @@ -1250,34 +1323,35 @@ mod tests { vec![], vec![non_mounted_zpool, err_zpool], ); + worker.reevaluate_choices().await; assert_eq!(worker.chosen_core_dir, None); logctx.cleanup_successful(); } // we make these so illumos_utils::dumpadm::dump_flag_is_valid returns what we want - fn populate_tempdir_with_fake_dumps( + async fn populate_tempdir_with_fake_dumps( tempdir: &TempDir, ) -> (DumpSlicePath, DumpSlicePath) { let occupied = DumpSlicePath( Utf8PathBuf::from_path_buf(tempdir.path().join("occupied.bin")) .unwrap(), ); - let mut f = std::fs::File::create(occupied.as_ref()).unwrap(); - f.write_all(&[0u8; DUMP_OFFSET as usize]).unwrap(); - f.write_all(&DUMP_MAGIC.to_le_bytes()).unwrap(); - f.write_all(&DUMP_VERSION.to_le_bytes()).unwrap(); - f.write_all(&DF_VALID.to_le_bytes()).unwrap(); + let mut f = tokio::fs::File::create(occupied.as_ref()).await.unwrap(); + f.write_all(&[0u8; DUMP_OFFSET as usize]).await.unwrap(); + f.write_all(&DUMP_MAGIC.to_le_bytes()).await.unwrap(); + f.write_all(&DUMP_VERSION.to_le_bytes()).await.unwrap(); + f.write_all(&DF_VALID.to_le_bytes()).await.unwrap(); drop(f); let vacant = DumpSlicePath( Utf8PathBuf::from_path_buf(tempdir.path().join("vacant.bin")) .unwrap(), ); - let mut f = std::fs::File::create(vacant.as_ref()).unwrap(); - f.write_all(&[0u8; DUMP_OFFSET as usize]).unwrap(); - f.write_all(&DUMP_MAGIC.to_le_bytes()).unwrap(); - f.write_all(&DUMP_VERSION.to_le_bytes()).unwrap(); - f.write_all(&0u32.to_le_bytes()).unwrap(); + let mut f = tokio::fs::File::create(vacant.as_ref()).await.unwrap(); + f.write_all(&[0u8; DUMP_OFFSET as usize]).await.unwrap(); + f.write_all(&DUMP_MAGIC.to_le_bytes()).await.unwrap(); + f.write_all(&DUMP_VERSION.to_le_bytes()).await.unwrap(); + f.write_all(&0u32.to_le_bytes()).await.unwrap(); drop(f); (occupied, vacant) @@ -1285,8 +1359,8 @@ mod tests { // if we only have two filled dump slices and nowhere to evacuate them, // don't configure a dump slice at all. - #[test] - fn test_savecore_and_dumpadm_not_called_when_occupied_and_no_dir() { + #[tokio::test] + async fn test_savecore_and_dumpadm_not_called_when_occupied_and_no_dir() { let logctx = omicron_test_utils::dev::test_setup_log( "test_savecore_and_dumpadm_not_called_when_occupied_and_no_dir", ); @@ -1295,15 +1369,17 @@ mod tests { Box::::default(), Box::::default(), logctx.log.clone(), + tokio::sync::mpsc::channel(1).1, ); let tempdir = TempDir::new().unwrap(); - let (occupied, _) = populate_tempdir_with_fake_dumps(&tempdir); + let (occupied, _) = populate_tempdir_with_fake_dumps(&tempdir).await; worker.update_disk_loadout( vec![occupied.clone(), occupied], vec![], vec![], ); + worker.reevaluate_choices().await; assert!(worker.chosen_dump_slice.is_none()); logctx.cleanup_successful(); } @@ -1311,8 +1387,8 @@ mod tests { // if we have one dump slice that's free and one that's full, // and nowhere to savecore the full one, // we should always call dumpadm with the free one. - #[test] - fn test_dumpadm_called_when_vacant_slice_but_no_dir() { + #[tokio::test] + async fn test_dumpadm_called_when_vacant_slice_but_no_dir() { let logctx = omicron_test_utils::dev::test_setup_log( "test_dumpadm_called_when_vacant_slice_but_no_dir", ); @@ -1321,14 +1397,17 @@ mod tests { Box::::default(), Box::::default(), logctx.log.clone(), + tokio::sync::mpsc::channel(1).1, ); let tempdir = TempDir::new().unwrap(); - let (occupied, vacant) = populate_tempdir_with_fake_dumps(&tempdir); + let (occupied, vacant) = + populate_tempdir_with_fake_dumps(&tempdir).await; worker.update_disk_loadout( vec![occupied, vacant.clone()], vec![], vec![], ); + worker.reevaluate_choices().await; assert_eq!(worker.chosen_dump_slice.as_ref(), Some(&vacant)); logctx.cleanup_successful(); } @@ -1336,8 +1415,8 @@ mod tests { // if we have two occupied dump slices, // but we also have somewhere to unload them, // call dumpadm and savecore. - #[test] - fn test_savecore_and_dumpadm_invoked_when_slices_occupied_and_dir_is_available( + #[tokio::test] + async fn test_savecore_and_dumpadm_invoked_when_slices_occupied_and_dir_is_available( ) { let logctx = omicron_test_utils::dev::test_setup_log("test_savecore_and_dumpadm_invoked_when_slices_occupied_and_dir_is_available"); const MOUNTED_EXTERNAL: &str = @@ -1360,17 +1439,22 @@ mod tests { }), Box::::default(), logctx.log.clone(), + tokio::sync::mpsc::channel(1).1, ); let tempdir = TempDir::new().unwrap(); - let (occupied, _) = populate_tempdir_with_fake_dumps(&tempdir); + let (occupied, _) = populate_tempdir_with_fake_dumps(&tempdir).await; - let mounted_zpool = - DebugZpool(ZpoolName::from_str(MOUNTED_EXTERNAL).unwrap()); + let mounted_zpool = DebugZpool { + mount_config: MountConfig::default(), + name: ZpoolName::from_str(MOUNTED_EXTERNAL).unwrap(), + }; worker.update_disk_loadout( vec![occupied.clone()], vec![mounted_zpool], vec![], ); + worker.reevaluate_choices().await; + assert_eq!(worker.chosen_dump_slice.as_ref(), Some(&occupied)); assert_eq!( worker.chosen_debug_dir.unwrap().0, @@ -1379,8 +1463,8 @@ mod tests { logctx.cleanup_successful(); } - #[test] - fn test_archives_rotated_logs_and_cores() { + #[tokio::test] + async fn test_archives_rotated_logs_and_cores() { let logctx = omicron_test_utils::dev::test_setup_log( "test_archives_rotated_logs_and_cores", ); @@ -1428,36 +1512,44 @@ mod tests { }), Box::new(FakeZone { zones: vec![zone.clone()] }), logctx.log.clone(), + tokio::sync::mpsc::channel(1).1, ); - std::fs::create_dir_all(&core_dir).unwrap(); - std::fs::create_dir_all(&debug_dir).unwrap(); - std::fs::create_dir_all(&zone_logs).unwrap(); + tokio::fs::create_dir_all(&core_dir).await.unwrap(); + tokio::fs::create_dir_all(&debug_dir).await.unwrap(); + tokio::fs::create_dir_all(&zone_logs).await.unwrap(); const LOG_NAME: &'static str = "foo.log.0"; - writeln!( - std::fs::File::create(zone_logs.join(LOG_NAME)).unwrap(), - "hello" - ) - .unwrap(); + tokio::fs::File::create(zone_logs.join(LOG_NAME)) + .await + .expect("creating fake log") + .write_all(b"hello") + .await + .expect("writing fake log"); const CORE_NAME: &str = "core.myzone.myexe.123.1690540950"; - writeln!( - std::fs::File::create(core_dir.join(CORE_NAME)).unwrap(), - "crunch" - ) - .unwrap(); - - let mounted_core_zpool = - CoreZpool(ZpoolName::from_str(MOUNTED_INTERNAL).unwrap()); - let mounted_debug_zpool = - DebugZpool(ZpoolName::from_str(MOUNTED_EXTERNAL).unwrap()); + tokio::fs::File::create(core_dir.join(CORE_NAME)) + .await + .expect("creating fake core") + .write_all(b"crunch") + .await + .expect("writing fake core"); + + let mounted_core_zpool = CoreZpool { + mount_config: MountConfig::default(), + name: ZpoolName::from_str(MOUNTED_INTERNAL).unwrap(), + }; + let mounted_debug_zpool = DebugZpool { + mount_config: MountConfig::default(), + name: ZpoolName::from_str(MOUNTED_EXTERNAL).unwrap(), + }; worker.update_disk_loadout( vec![], vec![mounted_debug_zpool], vec![mounted_core_zpool], ); - worker.archive_files().unwrap(); + worker.reevaluate_choices().await; + worker.archive_files().await.unwrap(); // it'll be renamed to use an epoch timestamp instead of .0 let log_glob = diff --git a/sled-agent/src/fakes/nexus.rs b/sled-agent/src/fakes/nexus.rs index de37b77bcd..246ef07b60 100644 --- a/sled-agent/src/fakes/nexus.rs +++ b/sled-agent/src/fakes/nexus.rs @@ -18,6 +18,7 @@ use omicron_common::api::external::Error; use omicron_common::api::internal::nexus::{ SledInstanceState, UpdateArtifactId, }; +use omicron_uuid_kinds::OmicronZoneUuid; use schemars::JsonSchema; use serde::Deserialize; use uuid::Uuid; @@ -178,7 +179,7 @@ pub async fn start_dns_server( }; let nexus_zone = dns_config_builder - .host_zone(uuid::Uuid::new_v4(), *nexus_addr.ip()) + .host_zone(OmicronZoneUuid::new_v4(), *nexus_addr.ip()) .expect("failed to set up DNS"); dns_config_builder .service_backend_zone( @@ -187,7 +188,8 @@ pub async fn start_dns_server( nexus_addr.port(), ) .expect("failed to set up DNS"); - let dns_config = dns_config_builder.build(); + let dns_config = + dns_config_builder.build_full_config_for_initial_generation(); dns.initialize_with_config(log, &dns_config).await.unwrap(); dns } diff --git a/sled-agent/src/hardware_monitor.rs b/sled-agent/src/hardware_monitor.rs index cbd3134cf0..6dbca89d74 100644 --- a/sled-agent/src/hardware_monitor.rs +++ b/sled-agent/src/hardware_monitor.rs @@ -177,10 +177,27 @@ impl HardwareMonitor { } } HardwareUpdate::DiskAdded(disk) => { - self.storage_manager.upsert_disk(disk.into()).await; + // We notify the storage manager of the hardware, but do not need to + // wait for the result to be fully processed. + // + // Here and below, we're "dropping a future" rather than + // awaiting it. That's intentional - the hardware monitor + // doesn't care when this work is finished, just when it's + // enqueued. + #[allow(clippy::let_underscore_future)] + let _ = self + .storage_manager + .detected_raw_disk(disk.into()) + .await; } HardwareUpdate::DiskRemoved(disk) => { - self.storage_manager.delete_disk(disk.into()).await; + // We notify the storage manager of the hardware, but do not need to + // wait for the result to be fully processed. + #[allow(clippy::let_underscore_future)] + let _ = self + .storage_manager + .detected_raw_disk_removal(disk.into()) + .await; } }, Err(broadcast::error::RecvError::Lagged(count)) => { @@ -241,17 +258,24 @@ impl HardwareMonitor { } else { None }; + info!( self.log, "Checking current full hardware snapshot"; "underlay_network_info" => ?underlay_network, + "disks" => ?self.hardware_manager.disks(), ); + if self.hardware_manager.is_scrimlet_driver_loaded() { self.activate_switch().await; } else { self.deactivate_switch().await; } - self.storage_manager + // We notify the storage manager of the hardware, but do not need to + // wait for the result to be fully processed. + #[allow(clippy::let_underscore_future)] + let _ = self + .storage_manager .ensure_using_exactly_these_disks( self.hardware_manager.disks().into_iter().map(RawDisk::from), ) diff --git a/sled-agent/src/http_entrypoints.rs b/sled-agent/src/http_entrypoints.rs index bf1102d897..c5cd88619f 100644 --- a/sled-agent/src/http_entrypoints.rs +++ b/sled-agent/src/http_entrypoints.rs @@ -11,8 +11,8 @@ use crate::params::{ BootstoreStatus, CleanupContextUpdate, DiskEnsureBody, InstanceEnsureBody, InstanceExternalIpBody, InstancePutMigrationIdsBody, InstancePutStateBody, InstancePutStateResponse, InstanceUnregisterResponse, Inventory, - OmicronZonesConfig, SledRole, TimeSync, VpcFirewallRulesEnsureBody, - ZoneBundleId, ZoneBundleMetadata, Zpool, + OmicronPhysicalDisksConfig, OmicronZonesConfig, SledRole, TimeSync, + VpcFirewallRulesEnsureBody, ZoneBundleId, ZoneBundleMetadata, Zpool, }; use crate::sled_agent::Error as SledAgentError; use crate::zone_bundle; @@ -25,21 +25,17 @@ use dropshot::{ HttpResponseUpdatedNoContent, Path, Query, RequestContext, StreamingBody, TypedBody, }; -use illumos_utils::opte::params::{ - DeleteVirtualNetworkInterfaceHost, SetVirtualNetworkInterfaceHost, -}; +use illumos_utils::opte::params::VirtualNetworkInterfaceHost; use installinator_common::M2Slot; use omicron_common::api::external::Error; use omicron_common::api::internal::nexus::{ DiskRuntimeState, SledInstanceState, UpdateArtifactId, }; use omicron_common::api::internal::shared::SwitchPorts; -use oximeter::types::ProducerResults; -use oximeter_producer::collect; -use oximeter_producer::ProducerIdPathParams; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use sled_hardware::DiskVariant; +use sled_storage::resources::DisksManagementResult; use std::collections::BTreeMap; use uuid::Uuid; @@ -53,6 +49,7 @@ pub fn api() -> SledApiDescription { api.register(instance_issue_disk_snapshot_request)?; api.register(instance_put_migration_ids)?; api.register(instance_put_state)?; + api.register(instance_get_state)?; api.register(instance_put_external_ip)?; api.register(instance_delete_external_ip)?; api.register(instance_register)?; @@ -60,6 +57,8 @@ pub fn api() -> SledApiDescription { api.register(omicron_zones_get)?; api.register(omicron_zones_put)?; api.register(zones_list)?; + api.register(omicron_physical_disks_get)?; + api.register(omicron_physical_disks_put)?; api.register(zone_bundle_list)?; api.register(zone_bundle_list_all)?; api.register(zone_bundle_create)?; @@ -70,6 +69,7 @@ pub fn api() -> SledApiDescription { api.register(zone_bundle_cleanup_context_update)?; api.register(zone_bundle_cleanup)?; api.register(sled_role_get)?; + api.register(list_v2p)?; api.register(set_v2p)?; api.register(del_v2p)?; api.register(timesync_get)?; @@ -80,7 +80,6 @@ pub fn api() -> SledApiDescription { api.register(read_network_bootstore_config_cache)?; api.register(write_network_bootstore_config)?; api.register(sled_add)?; - api.register(metrics_collect)?; api.register(host_os_write_start)?; api.register(host_os_write_status_get)?; api.register(host_os_write_status_delete)?; @@ -338,6 +337,31 @@ async fn omicron_zones_get( Ok(HttpResponseOk(sa.omicron_zones_list().await?)) } +#[endpoint { + method = PUT, + path = "/omicron-physical-disks", +}] +async fn omicron_physical_disks_put( + rqctx: RequestContext, + body: TypedBody, +) -> Result, HttpError> { + let sa = rqctx.context(); + let body_args = body.into_inner(); + let result = sa.omicron_physical_disks_ensure(body_args).await?; + Ok(HttpResponseOk(result)) +} + +#[endpoint { + method = GET, + path = "/omicron-physical-disks", +}] +async fn omicron_physical_disks_get( + rqctx: RequestContext, +) -> Result, HttpError> { + let sa = rqctx.context(); + Ok(HttpResponseOk(sa.omicron_physical_disks_list().await?)) +} + #[endpoint { method = PUT, path = "/omicron-zones", @@ -449,6 +473,19 @@ async fn instance_put_state( )) } +#[endpoint { + method = GET, + path = "/instances/{instance_id}/state", +}] +async fn instance_get_state( + rqctx: RequestContext, + path_params: Path, +) -> Result, HttpError> { + let sa = rqctx.context(); + let instance_id = path_params.into_inner().instance_id; + Ok(HttpResponseOk(sa.instance_get_state(instance_id).await?)) +} + #[endpoint { method = PUT, path = "/instances/{instance_id}/migration-ids", @@ -614,24 +651,16 @@ async fn vpc_firewall_rules_put( Ok(HttpResponseUpdatedNoContent()) } -/// Path parameters for V2P mapping related requests (sled agent API) -#[allow(dead_code)] -#[derive(Deserialize, JsonSchema)] -struct V2pPathParam { - interface_id: Uuid, -} - /// Create a mapping from a virtual NIC to a physical host // Keep interface_id to maintain parity with the simulated sled agent, which // requires interface_id on the path. #[endpoint { method = PUT, - path = "/v2p/{interface_id}", + path = "/v2p/", }] async fn set_v2p( rqctx: RequestContext, - _path_params: Path, - body: TypedBody, + body: TypedBody, ) -> Result { let sa = rqctx.context(); let body_args = body.into_inner(); @@ -646,12 +675,11 @@ async fn set_v2p( // requires interface_id on the path. #[endpoint { method = DELETE, - path = "/v2p/{interface_id}", + path = "/v2p/", }] async fn del_v2p( rqctx: RequestContext, - _path_params: Path, - body: TypedBody, + body: TypedBody, ) -> Result { let sa = rqctx.context(); let body_args = body.into_inner(); @@ -661,6 +689,22 @@ async fn del_v2p( Ok(HttpResponseUpdatedNoContent()) } +/// List v2p mappings present on sled +// Used by nexus background task +#[endpoint { + method = GET, + path = "/v2p/", +}] +async fn list_v2p( + rqctx: RequestContext, +) -> Result>, HttpError> { + let sa = rqctx.context(); + + let vnics = sa.list_virtual_nics().await.map_err(Error::from)?; + + Ok(HttpResponseOk(vnics)) +} + #[endpoint { method = GET, path = "/timesync", @@ -786,20 +830,6 @@ async fn sled_add( Ok(HttpResponseUpdatedNoContent()) } -/// Collect oximeter samples from the sled agent. -#[endpoint { - method = GET, - path = "/metrics/collect/{producer_id}", -}] -async fn metrics_collect( - request_context: RequestContext, - path_params: Path, -) -> Result, HttpError> { - let sa = request_context.context(); - let producer_id = path_params.into_inner().producer_id; - collect(&sa.metrics_registry(), producer_id).await -} - #[derive(Clone, Copy, Debug, Deserialize, JsonSchema, Serialize)] pub struct BootDiskPathParams { pub boot_disk: M2Slot, @@ -839,8 +869,8 @@ async fn host_os_write_start( // Find our corresponding disk. let maybe_disk_path = - sa.storage().get_latest_resources().await.disks().values().find_map( - |(disk, _pool)| { + sa.storage().get_latest_disks().await.iter_managed().find_map( + |(_identity, disk)| { // Synthetic disks panic if asked for their `slot()`, so filter // them out first; additionally, filter out any non-M2 disks. if disk.is_synthetic() || disk.variant() != DiskVariant::M2 { diff --git a/sled-agent/src/instance.rs b/sled-agent/src/instance.rs index 7b76466964..271eceb556 100644 --- a/sled-agent/src/instance.rs +++ b/sled-agent/src/instance.rs @@ -405,7 +405,7 @@ impl InstanceRunner { .map_err(|_| Error::FailedSendClientClosed) }, Some(CurrentState{ tx }) => { - tx.send(self.current_state().await) + tx.send(self.current_state()) .map_err(|_| Error::FailedSendClientClosed) }, Some(PutState{ state, tx }) => { @@ -1176,7 +1176,7 @@ impl InstanceRunner { } } - async fn current_state(&self) -> SledInstanceState { + fn current_state(&self) -> SledInstanceState { self.state.sled_instance_state() } @@ -1340,7 +1340,7 @@ impl InstanceRunner { let mut rng = rand::rngs::StdRng::from_entropy(); let root = self .storage - .get_latest_resources() + .get_latest_disks() .await .all_u2_mountpoints(ZONE_DATASET) .choose(&mut rng) @@ -1520,17 +1520,15 @@ impl InstanceRunner { } } -#[cfg(test)] +#[cfg(all(test, target_os = "illumos"))] mod tests { use super::*; use crate::fakes::nexus::{FakeNexusServer, ServerContext}; - use crate::nexus::NexusClient; use crate::vmm_reservoir::VmmReservoirManagerHandle; use crate::zone_bundle::CleanupContext; use camino_tempfile::Utf8TempDir; - use dns_server::dns_server::ServerHandle as DnsServerHandle; - use dropshot::test_util::LogContext; - use dropshot::{HandlerTaskMode, HttpServer}; + use dns_server::TransientServer; + use dropshot::HttpServer; use illumos_utils::dladm::MockDladm; use illumos_utils::dladm::__mock_MockDladm::__create_vnic::Context as MockDladmCreateVnicContext; use illumos_utils::dladm::__mock_MockDladm::__delete_vnic::Context as MockDladmDeleteVnicContext; @@ -1539,15 +1537,13 @@ mod tests { use illumos_utils::zone::MockZones; use illumos_utils::zone::__mock_MockZones::__boot::Context as MockZonesBootContext; use illumos_utils::zone::__mock_MockZones::__id::Context as MockZonesIdContext; - use illumos_utils::zpool::ZpoolName; use internal_dns::resolver::Resolver; - use internal_dns::ServiceName; use omicron_common::api::external::{ ByteCount, Generation, Hostname, InstanceCpuCount, InstanceState, }; use omicron_common::api::internal::nexus::InstanceProperties; - use sled_storage::disk::{RawDisk, SyntheticDisk}; - use sled_storage::manager::FakeStorageManager; + use omicron_common::FileKv; + use sled_storage::manager_test_harness::StorageManagerTestHarness; use std::net::Ipv6Addr; use std::str::FromStr; use tokio::sync::watch::Receiver; @@ -1584,26 +1580,42 @@ mod tests { } struct FakeNexusParts { - nexus_client: NexusClient, - nexus_server: HttpServer, + nexus_client: NexusClientWithResolver, + _nexus_server: HttpServer, state_rx: Receiver, + _dns_server: TransientServer, } impl FakeNexusParts { - fn new(logctx: &LogContext) -> Self { + async fn new(log: &Logger) -> Self { let (state_tx, state_rx) = tokio::sync::watch::channel(ReceivedInstanceState::None); - let nexus_server = crate::fakes::nexus::start_test_server( - logctx.log.new(o!("component" => "FakeNexusServer")), + let _nexus_server = crate::fakes::nexus::start_test_server( + log.new(o!("component" => "FakeNexusServer")), Box::new(NexusServer { observed_runtime_state: state_tx }), ); - let nexus_client = NexusClient::new( - &format!("http://{}", nexus_server.local_addr()), - logctx.log.new(o!("component" => "NexusClient")), + + let _dns_server = + crate::fakes::nexus::start_dns_server(&log, &_nexus_server) + .await; + + let resolver = Arc::new( + Resolver::new_from_addrs( + log.clone(), + &[_dns_server.dns_server.local_address()], + ) + .unwrap(), ); - Self { nexus_client, nexus_server, state_rx } + let nexus_client = + NexusClientWithResolver::new_from_resolver_with_port( + &log, + resolver, + _nexus_server.local_addr().port(), + ); + + Self { nexus_client, _nexus_server, state_rx, _dns_server } } } @@ -1639,65 +1651,6 @@ mod tests { (boot_ctx, wait_ctx, zone_id_ctx) } - async fn dns_server( - logctx: &LogContext, - nexus_server: &HttpServer, - ) -> (DnsServerHandle, Arc, Utf8TempDir) { - let storage_path = - Utf8TempDir::new().expect("Failed to create temporary directory"); - let config_store = dns_server::storage::Config { - keep_old_generations: 3, - storage_path: storage_path.path().to_owned(), - }; - - let (dns_server, dns_dropshot) = dns_server::start_servers( - logctx.log.new(o!("component" => "DnsServer")), - dns_server::storage::Store::new( - logctx.log.new(o!("component" => "DnsStore")), - &config_store, - ) - .unwrap(), - &dns_server::dns_server::Config { - bind_address: "[::1]:0".parse().unwrap(), - }, - &dropshot::ConfigDropshot { - bind_address: "[::1]:0".parse().unwrap(), - request_body_max_bytes: 8 * 1024, - default_handler_task_mode: HandlerTaskMode::Detached, - }, - ) - .await - .expect("starting DNS server"); - - let dns_dropshot_client = dns_service_client::Client::new( - &format!("http://{}", dns_dropshot.local_addr()), - logctx.log.new(o!("component" => "DnsDropshotClient")), - ); - let mut dns_config = internal_dns::DnsConfigBuilder::new(); - let IpAddr::V6(nexus_ip_addr) = nexus_server.local_addr().ip() else { - panic!("IPv6 address required for nexus_server") - }; - let zone = dns_config.host_zone(Uuid::new_v4(), nexus_ip_addr).unwrap(); - dns_config - .service_backend_zone( - ServiceName::Nexus, - &zone, - nexus_server.local_addr().port(), - ) - .unwrap(); - let dns_config = dns_config.build(); - dns_dropshot_client.dns_config_put(&dns_config).await.unwrap(); - - let resolver = Arc::new( - Resolver::new_from_addrs( - logctx.log.new(o!("component" => "Resolver")), - &[dns_server.local_address()], - ) - .unwrap(), - ); - (dns_server, resolver, storage_path) - } - // note the "mock" here is different from the vnic/zone contexts above. // this is actually running code for a dropshot server from propolis. // (might we want a locally-defined fake whose behavior we can control @@ -1736,19 +1689,22 @@ mod tests { (srv, client) } - // make a FakeStorageManager with a "U2" upserted - async fn fake_storage_manager_with_u2() -> StorageHandle { - let (storage_manager, storage_handle) = FakeStorageManager::new(); - tokio::spawn(storage_manager.run()); - let external_zpool_name = ZpoolName::new_external(Uuid::new_v4()); - let external_disk: RawDisk = - SyntheticDisk::new(external_zpool_name, 0).into(); - storage_handle.upsert_disk(external_disk).await; - storage_handle + async fn setup_storage_manager(log: &Logger) -> StorageManagerTestHarness { + let mut harness = StorageManagerTestHarness::new(log).await; + let raw_disks = + harness.add_vdevs(&["u2_under_test.vdev", "m2_helping.vdev"]).await; + harness.handle().key_manager_ready().await; + let config = harness.make_config(1, &raw_disks); + let _ = harness + .handle() + .omicron_physical_disks_ensure(config.clone()) + .await + .expect("Ensuring disks should work after key manager is ready"); + harness } async fn instance_struct( - logctx: &LogContext, + log: &Logger, propolis_addr: SocketAddr, nexus_client_with_resolver: NexusClientWithResolver, storage_handle: StorageHandle, @@ -1763,7 +1719,7 @@ mod tests { fake_instance_initial_state(propolis_id, propolis_addr); let services = fake_instance_manager_services( - logctx, + log, storage_handle, nexus_client_with_resolver, temp_dir, @@ -1775,7 +1731,7 @@ mod tests { }; Instance::new( - logctx.log.new(o!("component" => "Instance")), + log.new(o!("component" => "Instance")), id, propolis_id, ticket, @@ -1797,11 +1753,12 @@ mod tests { hostname: Hostname::from_str("bert").unwrap(), }, nics: vec![], - source_nat: SourceNatConfig { - ip: IpAddr::V6(Ipv6Addr::UNSPECIFIED), - first_port: 0, - last_port: 0, - }, + source_nat: SourceNatConfig::new( + IpAddr::V6(Ipv6Addr::UNSPECIFIED), + 0, + 16383, + ) + .unwrap(), ephemeral_ip: None, floating_ips: vec![], firewall_rules: vec![], @@ -1833,7 +1790,7 @@ mod tests { } fn fake_instance_manager_services( - logctx: &LogContext, + log: &Logger, storage_handle: StorageHandle, nexus_client_with_resolver: NexusClientWithResolver, temp_dir: &String, @@ -1841,13 +1798,13 @@ mod tests { let vnic_allocator = VnicAllocator::new("Foo", Etherstub("mystub".to_string())); let port_manager = PortManager::new( - logctx.log.new(o!("component" => "PortManager")), + log.new(o!("component" => "PortManager")), Ipv6Addr::new(0xfd00, 0x1de, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01), ); let cleanup_context = CleanupContext::default(); let zone_bundler = ZoneBundler::new( - logctx.log.new(o!("component" => "ZoneBundler")), + log.new(o!("component" => "ZoneBundler")), storage_handle.clone(), cleanup_context, ); @@ -1867,27 +1824,24 @@ mod tests { let logctx = omicron_test_utils::dev::test_setup_log( "test_instance_create_events_normal", ); + let log = logctx.log.new(o!(FileKv)); - let (propolis_server, _propolis_client) = - propolis_mock_server(&logctx.log); + let (propolis_server, _propolis_client) = propolis_mock_server(&log); let propolis_addr = propolis_server.local_addr(); // automock'd things used during this test let _mock_vnic_contexts = mock_vnic_contexts(); let _mock_zone_contexts = mock_zone_contexts(); - let FakeNexusParts { nexus_client, nexus_server, mut state_rx } = - FakeNexusParts::new(&logctx); - - let (_dns_server, resolver, _dns_config_dir) = - timeout(TIMEOUT_DURATION, dns_server(&logctx, &nexus_server)) - .await - .expect("timed out making DNS server and Resolver"); - - let nexus_client_with_resolver = - NexusClientWithResolver::new_with_client(nexus_client, resolver); + let FakeNexusParts { + nexus_client, + mut state_rx, + _dns_server, + _nexus_server, + } = FakeNexusParts::new(&log).await; - let storage_handle = fake_storage_manager_with_u2().await; + let mut storage_harness = setup_storage_manager(&log).await; + let storage_handle = storage_harness.handle().clone(); let temp_guard = Utf8TempDir::new().unwrap(); let temp_dir = temp_guard.path().to_string(); @@ -1895,9 +1849,9 @@ mod tests { let inst = timeout( TIMEOUT_DURATION, instance_struct( - &logctx, + &log, propolis_addr, - nexus_client_with_resolver, + nexus_client, storage_handle, &temp_dir, ), @@ -1935,6 +1889,7 @@ mod tests { .expect("timed out waiting for InstanceState::Running in FakeNexus") .expect("failed to receive FakeNexus' InstanceState"); + storage_harness.cleanup().await; logctx.cleanup_successful(); } @@ -1944,23 +1899,21 @@ mod tests { let logctx = omicron_test_utils::dev::test_setup_log( "test_instance_create_timeout_while_starting_propolis", ); + let log = logctx.log.new(o!(FileKv)); // automock'd things used during this test let _mock_vnic_contexts = mock_vnic_contexts(); let _mock_zone_contexts = mock_zone_contexts(); - let FakeNexusParts { nexus_client, nexus_server, state_rx } = - FakeNexusParts::new(&logctx); - - let (_dns_server, resolver, _dns_config_dir) = - timeout(TIMEOUT_DURATION, dns_server(&logctx, &nexus_server)) - .await - .expect("timed out making DNS server and Resolver"); - - let nexus_client_with_resolver = - NexusClientWithResolver::new_with_client(nexus_client, resolver); + let FakeNexusParts { + nexus_client, + state_rx, + _dns_server, + _nexus_server, + } = FakeNexusParts::new(&log).await; - let storage_handle = fake_storage_manager_with_u2().await; + let mut storage_harness = setup_storage_manager(&logctx.log).await; + let storage_handle = storage_harness.handle().clone(); let temp_guard = Utf8TempDir::new().unwrap(); let temp_dir = temp_guard.path().to_string(); @@ -1968,10 +1921,10 @@ mod tests { let inst = timeout( TIMEOUT_DURATION, instance_struct( - &logctx, + &log, // we want to test propolis not ever coming up SocketAddr::V6(SocketAddrV6::new(Ipv6Addr::LOCALHOST, 1, 0, 0)), - nexus_client_with_resolver, + nexus_client, storage_handle, &temp_dir, ), @@ -2007,6 +1960,7 @@ mod tests { panic!("Nexus's InstanceState should never have reached running if zone creation timed out"); } + storage_harness.cleanup().await; logctx.cleanup_successful(); } @@ -2015,6 +1969,7 @@ mod tests { let logctx = omicron_test_utils::dev::test_setup_log( "test_instance_create_timeout_while_creating_zone", ); + let log = logctx.log.new(o!(FileKv)); // automock'd things used during this test let _mock_vnic_contexts = mock_vnic_contexts(); @@ -2032,18 +1987,15 @@ mod tests { let zone_id_ctx = MockZones::id_context(); zone_id_ctx.expect().times(..).returning(|_| Ok(Some(1))); - let FakeNexusParts { nexus_client, nexus_server, state_rx } = - FakeNexusParts::new(&logctx); - - let (_dns_server, resolver, _dns_config_dir) = - timeout(TIMEOUT_DURATION, dns_server(&logctx, &nexus_server)) - .await - .expect("timed out making DNS server and Resolver"); - - let nexus_client_with_resolver = - NexusClientWithResolver::new_with_client(nexus_client, resolver); + let FakeNexusParts { + nexus_client, + state_rx, + _dns_server, + _nexus_server, + } = FakeNexusParts::new(&log).await; - let storage_handle = fake_storage_manager_with_u2().await; + let mut storage_harness = setup_storage_manager(&logctx.log).await; + let storage_handle = storage_harness.handle().clone(); let temp_guard = Utf8TempDir::new().unwrap(); let temp_dir = temp_guard.path().to_string(); @@ -2051,10 +2003,10 @@ mod tests { let inst = timeout( TIMEOUT_DURATION, instance_struct( - &logctx, + &log, // isn't running because the "zone" never "boots" SocketAddr::V6(SocketAddrV6::new(Ipv6Addr::LOCALHOST, 1, 0, 0)), - nexus_client_with_resolver, + nexus_client, storage_handle, &temp_dir, ), @@ -2090,6 +2042,7 @@ mod tests { panic!("Nexus's InstanceState should never have reached running if zone creation timed out"); } + storage_harness.cleanup().await; logctx.cleanup_successful(); } @@ -2098,23 +2051,21 @@ mod tests { let logctx = omicron_test_utils::dev::test_setup_log( "test_instance_manager_creation", ); + let log = logctx.log.new(o!(FileKv)); // automock'd things used during this test let _mock_vnic_contexts = mock_vnic_contexts(); let _mock_zone_contexts = mock_zone_contexts(); - let storage_handle = fake_storage_manager_with_u2().await; - - let FakeNexusParts { nexus_client, nexus_server, mut state_rx } = - FakeNexusParts::new(&logctx); - - let (_dns_server, resolver, _dns_config_dir) = - timeout(TIMEOUT_DURATION, dns_server(&logctx, &nexus_server)) - .await - .expect("timed out making DNS server and Resolver"); + let mut storage_harness = setup_storage_manager(&logctx.log).await; + let storage_handle = storage_harness.handle().clone(); - let nexus_client_with_resolver = - NexusClientWithResolver::new_with_client(nexus_client, resolver); + let FakeNexusParts { + nexus_client, + mut state_rx, + _dns_server, + _nexus_server, + } = FakeNexusParts::new(&log).await; let temp_guard = Utf8TempDir::new().unwrap(); let temp_dir = temp_guard.path().to_string(); @@ -2127,9 +2078,9 @@ mod tests { zone_bundler, zone_builder_factory, } = fake_instance_manager_services( - &logctx, + &log, storage_handle, - nexus_client_with_resolver, + nexus_client, &temp_dir, ); @@ -2196,6 +2147,7 @@ mod tests { .expect("timed out waiting for InstanceState::Running in FakeNexus") .expect("failed to receive FakeNexus' InstanceState"); + storage_harness.cleanup().await; logctx.cleanup_successful(); } } diff --git a/sled-agent/src/instance_manager.rs b/sled-agent/src/instance_manager.rs index 2c9780b3ce..ee1425f0d7 100644 --- a/sled-agent/src/instance_manager.rs +++ b/sled-agent/src/instance_manager.rs @@ -310,6 +310,19 @@ impl InstanceManager { pub fn reservoir_size(&self) -> ByteCount { self.inner.vmm_reservoir_manager.reservoir_size() } + + pub async fn get_instance_state( + &self, + instance_id: Uuid, + ) -> Result { + let (tx, rx) = oneshot::channel(); + self.inner + .tx + .send(InstanceManagerRequest::GetState { instance_id, tx }) + .await + .map_err(|_| Error::FailedSendInstanceManagerClosed)?; + rx.await? + } } // Most requests that can be sent to the "InstanceManagerRunner" task. @@ -365,6 +378,10 @@ enum InstanceManagerRequest { ip: InstanceExternalIpBody, tx: oneshot::Sender>, }, + GetState { + instance_id: Uuid, + tx: oneshot::Sender>, + }, } // Requests that the instance manager stop processing information about a @@ -467,6 +484,14 @@ impl InstanceManagerRunner { Some(InstanceDeleteExternalIp { instance_id, ip, tx }) => { self.delete_external_ip(tx, instance_id, &ip).await }, + Some(GetState { instance_id, tx }) => { + // TODO(eliza): it could potentially be nice to + // refactor this to use `tokio::sync::watch`, rather + // than having to force `GetState` requests to + // serialize with the requests that actually update + // the state... + self.get_instance_state(tx, instance_id).await + }, None => { warn!(self.log, "InstanceManager's request channel closed; shutting down"); break; @@ -732,6 +757,22 @@ impl InstanceManagerRunner { instance.delete_external_ip(tx, ip).await?; Ok(()) } + + async fn get_instance_state( + &self, + tx: oneshot::Sender>, + instance_id: Uuid, + ) -> Result<(), Error> { + let Some(instance) = self.get_instance(instance_id) else { + return tx + .send(Err(Error::NoSuchInstance(instance_id))) + .map_err(|_| Error::FailedSendClientClosed); + }; + + let state = instance.current_state().await?; + tx.send(Ok(state)).map_err(|_| Error::FailedSendClientClosed)?; + Ok(()) + } } /// Represents membership of an instance in the [`InstanceManager`]. @@ -750,7 +791,7 @@ impl InstanceTicket { InstanceTicket { id, terminate_tx: Some(terminate_tx) } } - #[cfg(test)] + #[cfg(all(test, target_os = "illumos"))] pub(crate) fn new_without_manager_for_test(id: Uuid) -> Self { Self { id, terminate_tx: None } } diff --git a/sled-agent/src/lib.rs b/sled-agent/src/lib.rs index a4686bdb88..989f011ed8 100644 --- a/sled-agent/src/lib.rs +++ b/sled-agent/src/lib.rs @@ -7,8 +7,6 @@ // We only use rustdoc for internal documentation, including private items, so // it's expected that we'll have links to private items in the docs. #![allow(rustdoc::private_intra_doc_links)] -// Clippy's style lints are useful, but not worth running automatically. -#![allow(clippy::style)] // Module for executing the simulated sled agent. pub mod sim; diff --git a/sled-agent/src/long_running_tasks.rs b/sled-agent/src/long_running_tasks.rs index 3b29bdda60..faea94f552 100644 --- a/sled-agent/src/long_running_tasks.rs +++ b/sled-agent/src/long_running_tasks.rs @@ -20,12 +20,13 @@ use crate::config::Config; use crate::hardware_monitor::HardwareMonitor; use crate::services::ServiceManager; use crate::sled_agent::SledAgent; -use crate::storage_monitor::{StorageMonitor, UnderlayAccess}; +use crate::storage_monitor::StorageMonitor; use crate::zone_bundle::{CleanupContext, ZoneBundler}; use bootstore::schemes::v0 as bootstore; use key_manager::{KeyManager, StorageKeyRequester}; -use sled_hardware::{HardwareManager, SledMode}; -use sled_storage::disk::SyntheticDisk; +use sled_hardware::{HardwareManager, SledMode, UnparsedDisk}; +use sled_storage::config::MountConfig; +use sled_storage::disk::RawSyntheticDisk; use sled_storage::manager::{StorageHandle, StorageManager}; use slog::{info, Logger}; use std::net::Ipv6Addr; @@ -65,23 +66,25 @@ pub async fn spawn_all_longrunning_tasks( LongRunningTaskHandles, oneshot::Sender, oneshot::Sender, - oneshot::Sender, ) { let storage_key_requester = spawn_key_manager(log); let mut storage_manager = spawn_storage_manager(log, storage_key_requester.clone()); - let underlay_available_tx = - spawn_storage_monitor(log, storage_manager.clone()); + spawn_storage_monitor(log, storage_manager.clone()); - let hardware_manager = spawn_hardware_manager(log, sled_mode).await; + let nongimlet_observed_disks = + config.nongimlet_observed_disks.clone().unwrap_or(vec![]); + + let hardware_manager = + spawn_hardware_manager(log, sled_mode, nongimlet_observed_disks).await; // Start monitoring for hardware changes let (sled_agent_started_tx, service_manager_ready_tx) = spawn_hardware_monitor(log, &hardware_manager, &storage_manager); // Add some synthetic disks if necessary. - upsert_synthetic_zpools_if_needed(&log, &storage_manager, &config).await; + upsert_synthetic_disks_if_needed(&log, &storage_manager, &config).await; // Wait for the boot disk so that we can work with any ledgers, // such as those needed by the bootstore and sled-agent @@ -109,7 +112,6 @@ pub async fn spawn_all_longrunning_tasks( }, sled_agent_started_tx, service_manager_ready_tx, - underlay_available_tx, ) } @@ -127,29 +129,27 @@ fn spawn_storage_manager( key_requester: StorageKeyRequester, ) -> StorageHandle { info!(log, "Starting StorageManager"); - let (manager, handle) = StorageManager::new(log, key_requester); + let (manager, handle) = + StorageManager::new(log, MountConfig::default(), key_requester); tokio::spawn(async move { manager.run().await; }); handle } -fn spawn_storage_monitor( - log: &Logger, - storage_handle: StorageHandle, -) -> oneshot::Sender { +fn spawn_storage_monitor(log: &Logger, storage_handle: StorageHandle) { info!(log, "Starting StorageMonitor"); - let (storage_monitor, underlay_available_tx) = - StorageMonitor::new(log, storage_handle); + let storage_monitor = + StorageMonitor::new(log, MountConfig::default(), storage_handle); tokio::spawn(async move { storage_monitor.run().await; }); - underlay_available_tx } async fn spawn_hardware_manager( log: &Logger, sled_mode: SledMode, + nongimlet_observed_disks: Vec, ) -> HardwareManager { // The `HardwareManager` does not use the the "task/handle" pattern // and spawns its worker task inside `HardwareManager::new`. Instead of returning @@ -159,10 +159,10 @@ async fn spawn_hardware_manager( // // There are pros and cons to both methods, but the reason to mention it here is that // the handle in this case is the `HardwareManager` itself. - info!(log, "Starting HardwareManager"; "sled_mode" => ?sled_mode); + info!(log, "Starting HardwareManager"; "sled_mode" => ?sled_mode, "nongimlet_observed_disks" => ?nongimlet_observed_disks); let log = log.clone(); tokio::task::spawn_blocking(move || { - HardwareManager::new(&log, sled_mode).unwrap() + HardwareManager::new(&log, sled_mode, nongimlet_observed_disks).unwrap() }) .await .unwrap() @@ -188,9 +188,9 @@ async fn spawn_bootstore_tasks( hardware_manager: &HardwareManager, global_zone_bootstrap_ip: Ipv6Addr, ) -> bootstore::NodeHandle { - let storage_resources = storage_handle.get_latest_resources().await; + let iter_all = storage_handle.get_latest_disks().await; let config = new_bootstore_config( - &storage_resources, + &iter_all, hardware_manager.baseboard(), global_zone_bootstrap_ip, ) @@ -222,21 +222,22 @@ fn spawn_zone_bundler_tasks( ZoneBundler::new(log, storage_handle.clone(), CleanupContext::default()) } -async fn upsert_synthetic_zpools_if_needed( +async fn upsert_synthetic_disks_if_needed( log: &Logger, storage_manager: &StorageHandle, config: &Config, ) { - if let Some(pools) = &config.zpools { - for (i, pool) in pools.iter().enumerate() { + if let Some(vdevs) = &config.vdevs { + for (i, vdev) in vdevs.iter().enumerate() { info!( log, - "Upserting synthetic zpool to Storage Manager: {}", - pool.to_string() + "Upserting synthetic device to Storage Manager"; + "vdev" => vdev.to_string(), ); - let disk = - SyntheticDisk::new(pool.clone(), i.try_into().unwrap()).into(); - storage_manager.upsert_disk(disk).await; + let disk = RawSyntheticDisk::load(vdev, i.try_into().unwrap()) + .expect("Failed to parse synthetic disk") + .into(); + storage_manager.detected_raw_disk(disk).await.await.unwrap(); } } } diff --git a/sled-agent/src/metrics.rs b/sled-agent/src/metrics.rs index c41037fead..62eaaf6154 100644 --- a/sled-agent/src/metrics.rs +++ b/sled-agent/src/metrics.rs @@ -4,10 +4,16 @@ //! Metrics produced by the sled-agent for collection by oximeter. +use omicron_common::api::internal::nexus::ProducerEndpoint; +use omicron_common::api::internal::nexus::ProducerKind; use oximeter::types::MetricsError; use oximeter::types::ProducerRegistry; +use oximeter_producer::LogConfig; +use oximeter_producer::Server as ProducerServer; use sled_hardware_types::Baseboard; use slog::Logger; +use std::net::Ipv6Addr; +use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; use uuid::Uuid; @@ -32,6 +38,9 @@ pub(crate) const METRIC_COLLECTION_INTERVAL: Duration = Duration::from_secs(30); /// The interval on which we sample link metrics. pub(crate) const LINK_SAMPLE_INTERVAL: Duration = Duration::from_secs(10); +/// The maximum Dropshot request size for the metrics server. +const METRIC_REQUEST_MAX_SIZE: usize = 10 * 1024 * 1024; + /// An error during sled-agent metric production. #[derive(Debug, thiserror::Error)] pub enum Error { @@ -54,6 +63,9 @@ pub enum Error { #[error("Missing NULL byte in hostname")] HostnameMissingNull, + + #[error("Failed to start metric producer server")] + ProducerServer(#[source] oximeter_producer::Error), } // Basic metadata about the sled agent used when publishing metrics. @@ -73,7 +85,7 @@ struct SledIdentifiers { // which are essentially an `Arc>`. It would be nice to avoid that // pattern, but until we have more statistics, it's not clear whether that's // worth it right now. -#[derive(Clone, Debug)] +#[derive(Clone)] // NOTE: The ID fields aren't used on non-illumos systems, rather than changing // the name of fields that are not yet used. #[cfg_attr(not(target_os = "illumos"), allow(dead_code))] @@ -93,7 +105,7 @@ pub struct MetricsManager { // real key. #[cfg(target_os = "illumos")] tracked_links: Arc>>, - registry: ProducerRegistry, + producer_server: Arc, } impl MetricsManager { @@ -105,14 +117,17 @@ impl MetricsManager { sled_id: Uuid, rack_id: Uuid, baseboard: Baseboard, + sled_address: Ipv6Addr, log: Logger, ) -> Result { - let registry = ProducerRegistry::with_id(sled_id); + let producer_server = + start_producer_server(&log, sled_id, sled_address)?; cfg_if::cfg_if! { if #[cfg(target_os = "illumos")] { let kstat_sampler = KstatSampler::new(&log).map_err(Error::Kstat)?; - registry + producer_server + .registry() .register_producer(kstat_sampler.clone()) .map_err(Error::Registry)?; let tracked_links = Arc::new(Mutex::new(BTreeMap::new())); @@ -125,16 +140,44 @@ impl MetricsManager { kstat_sampler, #[cfg(target_os = "illumos")] tracked_links, - registry, + producer_server, }) } /// Return a reference to the contained producer registry. pub fn registry(&self) -> &ProducerRegistry { - &self.registry + self.producer_server.registry() } } +/// Start a metric producer server. +fn start_producer_server( + log: &Logger, + sled_id: Uuid, + sled_address: Ipv6Addr, +) -> Result, Error> { + let log = log.new(slog::o!("component" => "producer-server")); + let registry = ProducerRegistry::with_id(sled_id); + + // Listen on any available socket, using our underlay address. + let address = SocketAddr::new(sled_address.into(), 0); + + // Resolve Nexus via DNS. + let registration_address = None; + let config = oximeter_producer::Config { + server_info: ProducerEndpoint { + id: registry.producer_id(), + kind: ProducerKind::SledAgent, + address, + interval: METRIC_COLLECTION_INTERVAL, + }, + registration_address, + request_body_max_bytes: METRIC_REQUEST_MAX_SIZE, + log: LogConfig::Logger(log), + }; + ProducerServer::start(&config).map(Arc::new).map_err(Error::ProducerServer) +} + #[cfg(target_os = "illumos")] impl MetricsManager { /// Track metrics for a physical datalink. diff --git a/sled-agent/src/nexus.rs b/sled-agent/src/nexus.rs index 3f24c6a806..12fcc05ce3 100644 --- a/sled-agent/src/nexus.rs +++ b/sled-agent/src/nexus.rs @@ -60,16 +60,6 @@ impl NexusClientWithResolver { } } - // for when we have a NexusClient constructed from a FakeNexusServer - // (no need to expose this function outside of tests) - #[cfg(test)] - pub(crate) fn new_with_client( - client: NexusClient, - resolver: Arc, - ) -> Self { - Self { client, resolver } - } - /// Access the progenitor-based Nexus Client. pub fn client(&self) -> &NexusClient { &self.client diff --git a/sled-agent/src/params.rs b/sled-agent/src/params.rs index c9e0211690..2ed7ca5528 100644 --- a/sled-agent/src/params.rs +++ b/sled-agent/src/params.rs @@ -19,6 +19,7 @@ use omicron_common::api::internal::nexus::{ use omicron_common::api::internal::shared::{ NetworkInterface, SourceNatConfig, }; +use omicron_uuid_kinds::ZpoolUuid; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; pub use sled_hardware::DendriteAsic; @@ -251,7 +252,7 @@ impl From for DiskType { #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq)] pub struct Zpool { - pub id: Uuid, + pub id: ZpoolUuid, pub disk_type: DiskType, } @@ -294,6 +295,11 @@ impl std::fmt::Display for ZoneType { } } +pub type OmicronPhysicalDiskConfig = + sled_storage::disk::OmicronPhysicalDiskConfig; +pub type OmicronPhysicalDisksConfig = + sled_storage::disk::OmicronPhysicalDisksConfig; + /// Describes the set of Omicron-managed zones running on a sled #[derive( Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, @@ -370,152 +376,6 @@ impl OmicronZoneConfig { Some(self.id), ) } - - /// Returns the structure that describes this zone to Nexus during rack - /// initialization - pub fn to_nexus_service_req( - &self, - sled_id: Uuid, - ) -> nexus_client::types::ServicePutRequest { - use nexus_client::types as NexusTypes; - - let service_id = self.id; - let zone_id = Some(self.id); - match &self.zone_type { - OmicronZoneType::Nexus { - external_ip, - internal_address, - nic, - .. - } => NexusTypes::ServicePutRequest { - service_id, - zone_id, - sled_id, - address: internal_address.to_string(), - kind: NexusTypes::ServiceKind::Nexus { - external_address: *external_ip, - nic: NexusTypes::ServiceNic { - id: nic.id, - name: nic.name.clone(), - ip: nic.ip, - mac: nic.mac, - slot: nic.slot, - }, - }, - }, - OmicronZoneType::ExternalDns { - http_address, - dns_address, - nic, - .. - } => NexusTypes::ServicePutRequest { - service_id, - zone_id, - sled_id, - address: http_address.to_string(), - kind: NexusTypes::ServiceKind::ExternalDns { - external_address: dns_address.ip(), - nic: NexusTypes::ServiceNic { - id: nic.id, - name: nic.name.clone(), - ip: nic.ip, - mac: nic.mac, - slot: nic.slot, - }, - }, - }, - OmicronZoneType::InternalDns { http_address, .. } => { - NexusTypes::ServicePutRequest { - service_id, - zone_id, - sled_id, - address: http_address.to_string(), - kind: NexusTypes::ServiceKind::InternalDns, - } - } - OmicronZoneType::Oximeter { address } => { - NexusTypes::ServicePutRequest { - service_id, - zone_id, - sled_id, - address: address.to_string(), - kind: NexusTypes::ServiceKind::Oximeter, - } - } - OmicronZoneType::CruciblePantry { address } => { - NexusTypes::ServicePutRequest { - service_id, - zone_id, - sled_id, - address: address.to_string(), - kind: NexusTypes::ServiceKind::CruciblePantry, - } - } - OmicronZoneType::BoundaryNtp { address, snat_cfg, nic, .. } => { - NexusTypes::ServicePutRequest { - service_id, - zone_id, - sled_id, - address: address.to_string(), - kind: NexusTypes::ServiceKind::BoundaryNtp { - snat: snat_cfg.into(), - nic: NexusTypes::ServiceNic { - id: nic.id, - name: nic.name.clone(), - ip: nic.ip, - mac: nic.mac, - slot: nic.slot, - }, - }, - } - } - OmicronZoneType::InternalNtp { address, .. } => { - NexusTypes::ServicePutRequest { - service_id, - zone_id, - sled_id, - address: address.to_string(), - kind: NexusTypes::ServiceKind::InternalNtp, - } - } - OmicronZoneType::Clickhouse { address, .. } => { - NexusTypes::ServicePutRequest { - service_id, - zone_id, - sled_id, - address: address.to_string(), - kind: NexusTypes::ServiceKind::Clickhouse, - } - } - OmicronZoneType::ClickhouseKeeper { address, .. } => { - NexusTypes::ServicePutRequest { - service_id, - zone_id, - sled_id, - address: address.to_string(), - kind: NexusTypes::ServiceKind::ClickhouseKeeper, - } - } - OmicronZoneType::Crucible { address, .. } => { - NexusTypes::ServicePutRequest { - service_id, - zone_id, - sled_id, - address: address.to_string(), - kind: NexusTypes::ServiceKind::Crucible, - } - } - OmicronZoneType::CockroachDb { address, .. } => { - NexusTypes::ServicePutRequest { - service_id, - zone_id, - sled_id, - address: address.to_string(), - kind: NexusTypes::ServiceKind::Cockroach, - } - } - } - } } /// Describes a persistent ZFS dataset associated with an Omicron zone @@ -529,7 +389,7 @@ pub struct OmicronZoneDataset { impl From for sled_agent_client::types::OmicronZoneDataset { fn from(local: OmicronZoneDataset) -> Self { Self { - pool_name: sled_agent_client::types::ZpoolName::from_str( + pool_name: omicron_common::zpool_name::ZpoolName::from_str( &local.pool_name.to_string(), ) .unwrap(), @@ -750,7 +610,7 @@ impl From for sled_agent_client::types::OmicronZoneType { domain, ntp_servers, snat_cfg, - nic: nic, + nic, }, OmicronZoneType::Clickhouse { address, dataset } => { Other::Clickhouse { @@ -786,7 +646,7 @@ impl From for sled_agent_client::types::OmicronZoneType { dataset: dataset.into(), http_address: http_address.to_string(), dns_address: dns_address.to_string(), - nic: nic, + nic, }, OmicronZoneType::InternalDns { dataset, @@ -823,7 +683,7 @@ impl From for sled_agent_client::types::OmicronZoneType { external_ip, external_tls, internal_address: internal_address.to_string(), - nic: nic, + nic, }, OmicronZoneType::Oximeter { address } => { Other::Oximeter { address: address.to_string() } @@ -891,7 +751,7 @@ pub struct InventoryDisk { /// Identifies information about zpools managed by the control plane #[derive(Clone, Debug, Deserialize, JsonSchema, Serialize)] pub struct InventoryZpool { - pub id: Uuid, + pub id: ZpoolUuid, pub total_size: ByteCount, } diff --git a/sled-agent/src/probe_manager.rs b/sled-agent/src/probe_manager.rs index 8481dc4b79..16559039a2 100644 --- a/sled-agent/src/probe_manager.rs +++ b/sled-agent/src/probe_manager.rs @@ -206,7 +206,7 @@ impl ProbeManagerInner { let mut rng = rand::rngs::StdRng::from_entropy(); let root = self .storage - .get_latest_resources() + .get_latest_disks() .await .all_u2_mountpoints(ZONE_DATASET) .choose(&mut rng) diff --git a/sled-agent/src/rack_setup/config.rs b/sled-agent/src/rack_setup/config.rs index fba753657e..e52ed14304 100644 --- a/sled-agent/src/rack_setup/config.rs +++ b/sled-agent/src/rack_setup/config.rs @@ -70,13 +70,15 @@ impl SetupServiceConfig { } pub fn az_subnet(&self) -> Ipv6Subnet { - Ipv6Subnet::::new(self.rack_network_config.rack_subnet.ip()) + Ipv6Subnet::::new( + self.rack_network_config.rack_subnet.addr(), + ) } /// Returns the subnet for our rack. pub fn rack_subnet(&self) -> Ipv6Subnet { Ipv6Subnet::::new( - self.rack_network_config.rack_subnet.ip(), + self.rack_network_config.rack_subnet.addr(), ) } @@ -94,7 +96,9 @@ mod test { use anyhow::Context; use camino::Utf8PathBuf; use omicron_common::address::IpRange; + use omicron_common::api::internal::shared::AllowedSourceIps; use omicron_common::api::internal::shared::RackNetworkConfig; + use oxnet::Ipv6Net; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; #[test] @@ -122,13 +126,18 @@ mod test { .unwrap(), }, rack_network_config: RackNetworkConfig { - rack_subnet: "fd00:1122:3344:0100::".parse().unwrap(), + rack_subnet: Ipv6Net::new( + "fd00:1122:3344:0100::".parse().unwrap(), + RACK_PREFIX, + ) + .unwrap(), infra_ip_first: Ipv4Addr::LOCALHOST, infra_ip_last: Ipv4Addr::LOCALHOST, ports: Vec::new(), bgp: Vec::new(), bfd: Vec::new(), }, + allowed_source_ips: AllowedSourceIps::Any, }; assert_eq!( @@ -231,7 +240,7 @@ mod test { let read_cfg = SetupServiceConfig::from_file(&cfg_path) .expect("failed to read generated config with certificate"); assert_eq!(read_cfg.external_certificates.len(), 1); - let cert = read_cfg.external_certificates.iter().next().unwrap(); + let cert = read_cfg.external_certificates.first().unwrap(); let _ = rcgen::KeyPair::from_pem(&cert.key) .expect("generated PEM did not parse as KeyPair"); } diff --git a/sled-agent/src/rack_setup/plan/service.rs b/sled-agent/src/rack_setup/plan/service.rs index dd6936132b..b48e4f18b8 100644 --- a/sled-agent/src/rack_setup/plan/service.rs +++ b/sled-agent/src/rack_setup/plan/service.rs @@ -5,12 +5,15 @@ //! Plan generation for "where should services be initialized". use crate::bootstrap::params::StartSledAgentRequest; -use crate::params::{OmicronZoneConfig, OmicronZoneDataset, OmicronZoneType}; +use crate::params::{ + OmicronPhysicalDiskConfig, OmicronPhysicalDisksConfig, OmicronZoneConfig, + OmicronZoneDataset, OmicronZoneType, +}; use crate::rack_setup::config::SetupServiceConfig as Config; use camino::Utf8PathBuf; use dns_service_client::types::DnsConfigParams; use illumos_utils::zpool::ZpoolName; -use internal_dns::config::{Host, ZoneVariant}; +use internal_dns::config::{Host, Zone}; use internal_dns::ServiceName; use omicron_common::address::{ get_sled_address, get_switch_zone_address, Ipv6Subnet, ReservedRackSubnet, @@ -18,14 +21,16 @@ use omicron_common::address::{ MGD_PORT, MGS_PORT, NEXUS_REDUNDANCY, NTP_PORT, NUM_SOURCE_NAT_PORTS, RSS_RESERVED_ADDRESSES, SLED_PREFIX, }; -use omicron_common::api::external::{MacAddr, Vni}; +use omicron_common::api::external::{Generation, MacAddr, Vni}; use omicron_common::api::internal::shared::{ NetworkInterface, NetworkInterfaceKind, SourceNatConfig, + SourceNatConfigError, }; use omicron_common::backoff::{ retry_notify_ext, retry_policy_internal_service_aggressive, BackoffError, }; use omicron_common::ledger::{self, Ledger, Ledgerable}; +use omicron_uuid_kinds::{GenericUuid, OmicronZoneUuid, SledUuid, ZpoolUuid}; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use sled_agent_client::{ @@ -59,7 +64,7 @@ const CLICKHOUSE_COUNT: usize = 1; const CLICKHOUSE_KEEPER_COUNT: usize = 0; // TODO(https://github.com/oxidecomputer/omicron/issues/732): Remove. // when Nexus provisions Crucible. -const MINIMUM_U2_ZPOOL_COUNT: usize = 3; +const MINIMUM_U2_COUNT: usize = 3; // TODO(https://github.com/oxidecomputer/omicron/issues/732): Remove. // when Nexus provisions the Pantry. const PANTRY_COUNT: usize = 3; @@ -94,10 +99,16 @@ pub enum PlanError { #[error("Found only v1 service plan")] FoundV1, + + #[error("Found only v2 service plan")] + FoundV2, } #[derive(Clone, Debug, Default, Serialize, Deserialize, JsonSchema)] pub struct SledConfig { + /// Control plane disks configured for this sled + pub disks: OmicronPhysicalDisksConfig, + /// zones configured for this sled pub zones: Vec, } @@ -115,7 +126,8 @@ impl Ledgerable for Plan { fn generation_bump(&mut self) {} } const RSS_SERVICE_PLAN_V1_FILENAME: &str = "rss-service-plan.json"; -const RSS_SERVICE_PLAN_FILENAME: &str = "rss-service-plan-v2.json"; +const RSS_SERVICE_PLAN_V2_FILENAME: &str = "rss-service-plan-v2.json"; +const RSS_SERVICE_PLAN_FILENAME: &str = "rss-service-plan-v3.json"; impl Plan { pub async fn load( @@ -123,7 +135,7 @@ impl Plan { storage_manager: &StorageHandle, ) -> Result, PlanError> { let paths: Vec = storage_manager - .get_latest_resources() + .get_latest_disks() .await .all_m2_mountpoints(CONFIG_DATASET) .into_iter() @@ -167,6 +179,14 @@ impl Plan { // support a condition that we do not believe can ever happen in any // system. Err(PlanError::FoundV1) + } else if Self::has_v2(storage_manager).await.map_err(|err| { + // Same as the comment above, but for version 2. + PlanError::Io { + message: String::from("looking for v2 RSS plan"), + err, + } + })? { + Err(PlanError::FoundV2) } else { Ok(None) } @@ -176,7 +196,7 @@ impl Plan { storage_manager: &StorageHandle, ) -> Result { let paths = storage_manager - .get_latest_resources() + .get_latest_disks() .await .all_m2_mountpoints(CONFIG_DATASET) .into_iter() @@ -191,6 +211,25 @@ impl Plan { Ok(false) } + async fn has_v2( + storage_manager: &StorageHandle, + ) -> Result { + let paths = storage_manager + .get_latest_disks() + .await + .all_m2_mountpoints(CONFIG_DATASET) + .into_iter() + .map(|p| p.join(RSS_SERVICE_PLAN_V2_FILENAME)); + + for p in paths { + if p.try_exists()? { + return Ok(true); + } + } + + Ok(false) + } + async fn is_sled_scrimlet( log: &Logger, address: SocketAddrV6, @@ -214,11 +253,10 @@ impl Plan { } } - // Gets zpool UUIDs from U.2 devices on the sled. - async fn get_u2_zpools_from_sled( + async fn get_inventory( log: &Logger, address: SocketAddrV6, - ) -> Result, PlanError> { + ) -> Result { let dur = std::time::Duration::from_secs(60); let client = reqwest::ClientBuilder::new() .connect_timeout(dur) @@ -231,52 +269,47 @@ impl Plan { log.new(o!("SledAgentClient" => address.to_string())), ); - let get_u2_zpools = || async { - let zpools: Vec = client - .zpools_get() + let get_inventory = || async { + let inventory = client + .inventory() .await - .map(|response| { - response - .into_inner() - .into_iter() - .filter_map(|zpool| match zpool.disk_type { - SledAgentTypes::DiskType::U2 => { - Some(ZpoolName::new_external(zpool.id)) - } - SledAgentTypes::DiskType::M2 => None, - }) - .collect() - }) + .map(|response| response.into_inner()) .map_err(|err| { BackoffError::transient(PlanError::SledApi(err)) })?; - if zpools.len() < MINIMUM_U2_ZPOOL_COUNT { + if inventory + .disks + .iter() + .filter(|disk| { + matches!(disk.variant, SledAgentTypes::DiskVariant::U2) + }) + .count() + < MINIMUM_U2_COUNT + { return Err(BackoffError::transient( - PlanError::SledInitialization( - "Awaiting zpools".to_string(), - ), + PlanError::SledInitialization("Awaiting disks".to_string()), )); } - Ok(zpools) + Ok(inventory) }; - let log_failure = |error, call_count, total_duration| { + let log_failure = |error: PlanError, call_count, total_duration| { if call_count == 0 { - info!(log, "failed to get zpools from {address}"; "error" => ?error); + info!(log, "failed to get inventory from {address}"; "error" => ?error); } else if total_duration > std::time::Duration::from_secs(20) { - warn!(log, "failed to get zpools from {address}"; "error" => ?error, "total duration" => ?total_duration); + warn!(log, "failed to get inventory from {address}"; "error" => ?error, "total duration" => ?total_duration); } }; - let u2_zpools = retry_notify_ext( + let inventory = retry_notify_ext( retry_policy_internal_service_aggressive(), - get_u2_zpools, + get_inventory, log_failure, ) .await?; - Ok(u2_zpools) + Ok(inventory) } pub fn create_transient( @@ -307,6 +340,37 @@ impl Plan { .unwrap(); } + // Set up storage early, as it'll be necessary for placement of + // many subsequent services. + // + // Our policy at RSS time is currently "adopt all the U.2 disks we can see". + for sled_info in sled_info.iter_mut() { + let disks = sled_info + .inventory + .disks + .iter() + .filter(|disk| { + matches!(disk.variant, SledAgentTypes::DiskVariant::U2) + }) + .map(|disk| OmicronPhysicalDiskConfig { + identity: disk.identity.clone(), + id: Uuid::new_v4(), + pool_id: ZpoolUuid::new_v4(), + }) + .collect(); + sled_info.request.disks = OmicronPhysicalDisksConfig { + generation: Generation::new(), + disks, + }; + sled_info.u2_zpools = sled_info + .request + .disks + .disks + .iter() + .map(|disk| ZpoolName::new_external(disk.pool_id)) + .collect(); + } + // We'll stripe most services across all available Sleds, round-robin // style. In development and CI, this might only be one Sled. We'll // only report `NotEnoughSleds` below if there are zero Sleds or if we @@ -320,11 +384,11 @@ impl Plan { &reserved_rack_subnet.get_dns_subnets()[0..DNS_REDUNDANCY]; let rack_dns_servers = dns_subnets .into_iter() - .map(|dns_subnet| dns_subnet.dns_address().ip().into()) + .map(|dns_subnet| dns_subnet.dns_address().into()) .collect::>(); for i in 0..dns_subnets.len() { let dns_subnet = &dns_subnets[i]; - let ip = dns_subnet.dns_address().ip(); + let ip = dns_subnet.dns_address(); let sled = { let which_sled = sled_allocator.next().ok_or(PlanError::NotEnoughSleds)?; @@ -333,7 +397,7 @@ impl Plan { let http_address = SocketAddrV6::new(ip, DNS_HTTP_PORT, 0, 0); let dns_address = SocketAddrV6::new(ip, DNS_PORT, 0, 0); - let id = Uuid::new_v4(); + let id = OmicronZoneUuid::new_v4(); dns_builder .host_zone_with_one_backend( id, @@ -346,7 +410,8 @@ impl Plan { sled.alloc_from_u2_zpool(DatasetKind::InternalDns)?; sled.request.zones.push(OmicronZoneConfig { - id, + // TODO-cleanup use TypedUuid everywhere + id: id.into_untyped_uuid(), underlay_address: ip, zone_type: OmicronZoneType::InternalDns { dataset: OmicronZoneDataset { @@ -354,7 +419,7 @@ impl Plan { }, http_address, dns_address, - gz_address: dns_subnet.gz_address().ip(), + gz_address: dns_subnet.gz_address(), gz_address_index: i.try_into().expect("Giant indices?"), }, }); @@ -367,7 +432,7 @@ impl Plan { sled_allocator.next().ok_or(PlanError::NotEnoughSleds)?; &mut sled_info[which_sled] }; - let id = Uuid::new_v4(); + let id = OmicronZoneUuid::new_v4(); let ip = sled.addr_alloc.next().expect("Not enough addrs"); let port = omicron_common::address::COCKROACH_PORT; let address = SocketAddrV6::new(ip, port, 0, 0); @@ -382,7 +447,8 @@ impl Plan { let dataset_name = sled.alloc_from_u2_zpool(DatasetKind::CockroachDb)?; sled.request.zones.push(OmicronZoneConfig { - id, + // TODO-cleanup use TypedUuid everywhere + id: id.into_untyped_uuid(), underlay_address: ip, zone_type: OmicronZoneType::CockroachDb { dataset: OmicronZoneDataset { @@ -398,7 +464,7 @@ impl Plan { // server IP addresses given to us at RSS-time. // TODO(https://github.com/oxidecomputer/omicron/issues/732): Remove loop { - let id = Uuid::new_v4(); + let id = OmicronZoneUuid::new_v4(); let Some((nic, external_ip)) = svc_port_builder.next_dns(id) else { break; }; @@ -425,7 +491,8 @@ impl Plan { let dataset_name = sled.alloc_from_u2_zpool(dataset_kind)?; sled.request.zones.push(OmicronZoneConfig { - id, + // TODO-cleanup use TypedUuid everywhere + id: id.into_untyped_uuid(), underlay_address: *http_address.ip(), zone_type: OmicronZoneType::ExternalDns { dataset: OmicronZoneDataset { @@ -445,7 +512,7 @@ impl Plan { sled_allocator.next().ok_or(PlanError::NotEnoughSleds)?; &mut sled_info[which_sled] }; - let id = Uuid::new_v4(); + let id = OmicronZoneUuid::new_v4(); let address = sled.addr_alloc.next().expect("Not enough addrs"); dns_builder .host_zone_with_one_backend( @@ -457,7 +524,8 @@ impl Plan { .unwrap(); let (nic, external_ip) = svc_port_builder.next_nexus(id)?; sled.request.zones.push(OmicronZoneConfig { - id, + // TODO-cleanup use TypedUuid everywhere + id: id.into_untyped_uuid(), underlay_address: address, zone_type: OmicronZoneType::Nexus { internal_address: SocketAddrV6::new( @@ -488,7 +556,7 @@ impl Plan { sled_allocator.next().ok_or(PlanError::NotEnoughSleds)?; &mut sled_info[which_sled] }; - let id = Uuid::new_v4(); + let id = OmicronZoneUuid::new_v4(); let address = sled.addr_alloc.next().expect("Not enough addrs"); dns_builder .host_zone_with_one_backend( @@ -499,7 +567,8 @@ impl Plan { ) .unwrap(); sled.request.zones.push(OmicronZoneConfig { - id, + // TODO-cleanup use TypedUuid everywhere + id: id.into_untyped_uuid(), underlay_address: address, zone_type: OmicronZoneType::Oximeter { address: SocketAddrV6::new( @@ -520,7 +589,7 @@ impl Plan { sled_allocator.next().ok_or(PlanError::NotEnoughSleds)?; &mut sled_info[which_sled] }; - let id = Uuid::new_v4(); + let id = OmicronZoneUuid::new_v4(); let ip = sled.addr_alloc.next().expect("Not enough addrs"); let port = omicron_common::address::CLICKHOUSE_PORT; let address = SocketAddrV6::new(ip, port, 0, 0); @@ -535,7 +604,8 @@ impl Plan { let dataset_name = sled.alloc_from_u2_zpool(DatasetKind::Clickhouse)?; sled.request.zones.push(OmicronZoneConfig { - id, + // TODO-cleanup use TypedUuid everywhere + id: id.into_untyped_uuid(), underlay_address: ip, zone_type: OmicronZoneType::Clickhouse { address, @@ -556,7 +626,7 @@ impl Plan { sled_allocator.next().ok_or(PlanError::NotEnoughSleds)?; &mut sled_info[which_sled] }; - let id = Uuid::new_v4(); + let id = OmicronZoneUuid::new_v4(); let ip = sled.addr_alloc.next().expect("Not enough addrs"); let port = omicron_common::address::CLICKHOUSE_KEEPER_PORT; let address = SocketAddrV6::new(ip, port, 0, 0); @@ -571,7 +641,8 @@ impl Plan { let dataset_name = sled.alloc_from_u2_zpool(DatasetKind::ClickhouseKeeper)?; sled.request.zones.push(OmicronZoneConfig { - id, + // TODO-cleanup use TypedUuid everywhere + id: id.into_untyped_uuid(), underlay_address: ip, zone_type: OmicronZoneType::ClickhouseKeeper { address, @@ -592,7 +663,7 @@ impl Plan { }; let address = sled.addr_alloc.next().expect("Not enough addrs"); let port = omicron_common::address::CRUCIBLE_PANTRY_PORT; - let id = Uuid::new_v4(); + let id = OmicronZoneUuid::new_v4(); dns_builder .host_zone_with_one_backend( id, @@ -602,7 +673,8 @@ impl Plan { ) .unwrap(); sled.request.zones.push(OmicronZoneConfig { - id, + // TODO-cleanup use TypedUuid everywhere + id: id.into_untyped_uuid(), underlay_address: address, zone_type: OmicronZoneType::CruciblePantry { address: SocketAddrV6::new(address, port, 0, 0), @@ -617,7 +689,7 @@ impl Plan { let ip = sled.addr_alloc.next().expect("Not enough addrs"); let port = omicron_common::address::CRUCIBLE_PORT; let address = SocketAddrV6::new(ip, port, 0, 0); - let id = Uuid::new_v4(); + let id = OmicronZoneUuid::new_v4(); dns_builder .host_zone_with_one_backend( id, @@ -628,7 +700,8 @@ impl Plan { .unwrap(); sled.request.zones.push(OmicronZoneConfig { - id, + // TODO-cleanup use TypedUuid everywhere + id: id.into_untyped_uuid(), underlay_address: ip, zone_type: OmicronZoneType::Crucible { address, @@ -643,13 +716,13 @@ impl Plan { // network. let mut boundary_ntp_servers = vec![]; for (idx, sled) in sled_info.iter_mut().enumerate() { - let id = Uuid::new_v4(); + let id = OmicronZoneUuid::new_v4(); let address = sled.addr_alloc.next().expect("Not enough addrs"); let ntp_address = SocketAddrV6::new(address, NTP_PORT, 0, 0); let (zone_type, svcname) = if idx < BOUNDARY_NTP_COUNT { boundary_ntp_servers - .push(Host::for_zone(id, ZoneVariant::Other).fqdn()); + .push(Host::for_zone(Zone::Other(id)).fqdn()); let (nic, snat_cfg) = svc_port_builder.next_snat(id)?; ( OmicronZoneType::BoundaryNtp { @@ -679,7 +752,8 @@ impl Plan { .unwrap(); sled.request.zones.push(OmicronZoneConfig { - id, + // TODO-cleanup use TypedUuid everywhere + id: id.into_untyped_uuid(), underlay_address: address, zone_type, }); @@ -690,7 +764,7 @@ impl Plan { .map(|sled_info| (sled_info.sled_address, sled_info.request)) .collect(); - let dns_config = dns_builder.build(); + let dns_config = dns_builder.build_full_config_for_initial_generation(); Ok(Self { services, dns_config }) } @@ -708,16 +782,16 @@ impl Plan { |sled_request| async { let subnet = sled_request.body.subnet; let sled_address = get_sled_address(subnet); - let u2_zpools = - Self::get_u2_zpools_from_sled(log, sled_address) - .await?; + let inventory = + Self::get_inventory(log, sled_address).await?; let is_scrimlet = Self::is_sled_scrimlet(log, sled_address).await?; Ok(SledInfo::new( - sled_request.body.id, + // TODO-cleanup use TypedUuid everywhere + SledUuid::from_untyped_uuid(sled_request.body.id), subnet, sled_address, - u2_zpools, + inventory, is_scrimlet, )) }, @@ -730,7 +804,7 @@ impl Plan { // Once we've constructed a plan, write it down to durable storage. let paths: Vec = storage_manager - .get_latest_resources() + .get_latest_disks() .await .all_m2_mountpoints(CONFIG_DATASET) .into_iter() @@ -766,12 +840,14 @@ impl AddressBumpAllocator { /// Wraps up the information used to allocate components to a Sled pub struct SledInfo { /// unique id for the sled agent - pub sled_id: Uuid, + pub sled_id: SledUuid, /// the sled's unique IPv6 subnet subnet: Ipv6Subnet, /// the address of the Sled Agent on the sled's subnet pub sled_address: SocketAddrV6, - /// the list of zpools on the Sled + /// the inventory returned by the Sled + inventory: SledAgentTypes::Inventory, + /// The Zpools available for usage by services u2_zpools: Vec, /// spreads components across a Sled's zpools u2_zpool_allocators: @@ -786,17 +862,18 @@ pub struct SledInfo { impl SledInfo { pub fn new( - sled_id: Uuid, + sled_id: SledUuid, subnet: Ipv6Subnet, sled_address: SocketAddrV6, - u2_zpools: Vec, + inventory: SledAgentTypes::Inventory, is_scrimlet: bool, ) -> SledInfo { SledInfo { sled_id, subnet, sled_address, - u2_zpools, + inventory, + u2_zpools: vec![], u2_zpool_allocators: HashMap::new(), is_scrimlet, addr_alloc: AddressBumpAllocator::new(subnet), @@ -884,39 +961,29 @@ impl ServicePortBuilder { let dns_v4_ips = Box::new( DNS_OPTE_IPV4_SUBNET - .0 - .iter() + .addr_iter() .skip(NUM_INITIAL_RESERVED_IP_ADDRESSES), ); let dns_v6_ips = Box::new( - DNS_OPTE_IPV6_SUBNET - .0 - .iter() - .skip(NUM_INITIAL_RESERVED_IP_ADDRESSES), + DNS_OPTE_IPV6_SUBNET.iter().skip(NUM_INITIAL_RESERVED_IP_ADDRESSES), ); let nexus_v4_ips = Box::new( NEXUS_OPTE_IPV4_SUBNET - .0 - .iter() + .addr_iter() .skip(NUM_INITIAL_RESERVED_IP_ADDRESSES), ); let nexus_v6_ips = Box::new( NEXUS_OPTE_IPV6_SUBNET - .0 .iter() .skip(NUM_INITIAL_RESERVED_IP_ADDRESSES), ); let ntp_v4_ips = Box::new( NTP_OPTE_IPV4_SUBNET - .0 - .iter() + .addr_iter() .skip(NUM_INITIAL_RESERVED_IP_ADDRESSES), ); let ntp_v6_ips = Box::new( - NTP_OPTE_IPV6_SUBNET - .0 - .iter() - .skip(NUM_INITIAL_RESERVED_IP_ADDRESSES), + NTP_OPTE_IPV6_SUBNET.iter().skip(NUM_INITIAL_RESERVED_IP_ADDRESSES), ); Self { internal_services_ip_pool, @@ -945,7 +1012,10 @@ impl ServicePortBuilder { mac } - fn next_dns(&mut self, svc_id: Uuid) -> Option<(NetworkInterface, IpAddr)> { + fn next_dns( + &mut self, + svc_id: OmicronZoneUuid, + ) -> Option<(NetworkInterface, IpAddr)> { use omicron_common::address::{ DNS_OPTE_IPV4_SUBNET, DNS_OPTE_IPV6_SUBNET, }; @@ -964,7 +1034,10 @@ impl ServicePortBuilder { let nic = NetworkInterface { id: Uuid::new_v4(), - kind: NetworkInterfaceKind::Service { id: svc_id }, + kind: NetworkInterfaceKind::Service { + // TODO-cleanup use TypedUuid everywhere + id: svc_id.into_untyped_uuid(), + }, name: format!("external-dns-{svc_id}").parse().unwrap(), ip, mac: self.random_mac(), @@ -979,7 +1052,7 @@ impl ServicePortBuilder { fn next_nexus( &mut self, - svc_id: Uuid, + svc_id: OmicronZoneUuid, ) -> Result<(NetworkInterface, IpAddr), PlanError> { use omicron_common::address::{ NEXUS_OPTE_IPV4_SUBNET, NEXUS_OPTE_IPV6_SUBNET, @@ -1001,7 +1074,10 @@ impl ServicePortBuilder { let nic = NetworkInterface { id: Uuid::new_v4(), - kind: NetworkInterfaceKind::Service { id: svc_id }, + kind: NetworkInterfaceKind::Service { + // TODO-cleanup use TypedUuid everywhere + id: svc_id.into_untyped_uuid(), + }, name: format!("nexus-{svc_id}").parse().unwrap(), ip, mac: self.random_mac(), @@ -1016,7 +1092,7 @@ impl ServicePortBuilder { fn next_snat( &mut self, - svc_id: Uuid, + svc_id: OmicronZoneUuid, ) -> Result<(NetworkInterface, SourceNatConfig), PlanError> { use omicron_common::address::{ NTP_OPTE_IPV4_SUBNET, NTP_OPTE_IPV6_SUBNET, @@ -1033,7 +1109,14 @@ impl ServicePortBuilder { self.next_snat_ip = None; } - let snat_cfg = SourceNatConfig { ip: snat_ip, first_port, last_port }; + let snat_cfg = + match SourceNatConfig::new(snat_ip, first_port, last_port) { + Ok(cfg) => cfg, + // We know our port pair is aligned, making this unreachable. + Err(err @ SourceNatConfigError::UnalignedPortPair { .. }) => { + unreachable!("{err}"); + } + }; let (ip, subnet) = match snat_ip { IpAddr::V4(_) => ( @@ -1048,7 +1131,10 @@ impl ServicePortBuilder { let nic = NetworkInterface { id: Uuid::new_v4(), - kind: NetworkInterfaceKind::Service { id: svc_id }, + kind: NetworkInterfaceKind::Service { + // TODO-cleanup use TypedUuid everywhere + id: svc_id.into_untyped_uuid(), + }, name: format!("ntp-{svc_id}").parse().unwrap(), ip, mac: self.random_mac(), @@ -1068,7 +1154,9 @@ mod tests { use crate::bootstrap::params::BootstrapAddressDiscovery; use crate::bootstrap::params::RecoverySiloConfig; use omicron_common::address::IpRange; + use omicron_common::api::internal::shared::AllowedSourceIps; use omicron_common::api::internal::shared::RackNetworkConfig; + use oxnet::Ipv6Net; const EXPECTED_RESERVED_ADDRESSES: u16 = 2; const EXPECTED_USABLE_ADDRESSES: u16 = @@ -1164,20 +1252,23 @@ mod tests { user_password_hash: "$argon2id$v=19$m=98304,t=13,p=1$RUlWc0ZxaHo0WFdrN0N6ZQ$S8p52j85GPvMhR/ek3GL0el/oProgTwWpHJZ8lsQQoY".parse().unwrap(), }, rack_network_config: RackNetworkConfig { - rack_subnet: Ipv6Addr::LOCALHOST.into(), + rack_subnet: Ipv6Net::host_net(Ipv6Addr::LOCALHOST), infra_ip_first: Ipv4Addr::LOCALHOST, infra_ip_last: Ipv4Addr::LOCALHOST, ports: Vec::new(), bgp: Vec::new(), bfd: Vec::new(), }, + allowed_source_ips: AllowedSourceIps::Any, }; let mut svp = ServicePortBuilder::new(&config); // We should only get back the 5 DNS IPs we specified. let mut svp_dns_ips = Vec::new(); - while let Some((_interface, ip)) = svp.next_dns(Uuid::new_v4()) { + while let Some((_interface, ip)) = + svp.next_dns(OmicronZoneUuid::new_v4()) + { svp_dns_ips.push(ip.to_string()); } assert_eq!(svp_dns_ips, dns_ips); @@ -1207,10 +1298,10 @@ mod tests { } #[test] - fn test_rss_service_plan_v2_schema() { + fn test_rss_service_plan_v3_schema() { let schema = schemars::schema_for!(Plan); expectorate::assert_contents( - "../schema/rss-service-plan-v2.json", + "../schema/rss-service-plan-v3.json", &serde_json::to_string_pretty(&schema).unwrap(), ); } diff --git a/sled-agent/src/rack_setup/plan/sled.rs b/sled-agent/src/rack_setup/plan/sled.rs index efdd86d2f9..a3fd57369a 100644 --- a/sled-agent/src/rack_setup/plan/sled.rs +++ b/sled-agent/src/rack_setup/plan/sled.rs @@ -59,7 +59,7 @@ impl Plan { storage: &StorageHandle, ) -> Result, PlanError> { let paths: Vec = storage - .get_latest_resources() + .get_latest_disks() .await .all_m2_mountpoints(CONFIG_DATASET) .into_iter() @@ -126,7 +126,7 @@ impl Plan { // Once we've constructed a plan, write it down to durable storage. let paths: Vec = storage_manager - .get_latest_resources() + .get_latest_disks() .await .all_m2_mountpoints(CONFIG_DATASET) .into_iter() diff --git a/sled-agent/src/rack_setup/service.rs b/sled-agent/src/rack_setup/service.rs index 587625fe7b..1d8b3e7ad3 100644 --- a/sled-agent/src/rack_setup/service.rs +++ b/sled-agent/src/rack_setup/service.rs @@ -17,7 +17,7 @@ //! state files that get generated as RSS executes: //! //! - /pool/int/UUID/config/rss-sled-plan.json (Sled Plan) -//! - /pool/int/UUID/config/rss-service-plan-v2.json (Service Plan) +//! - /pool/int/UUID/config/rss-service-plan-v3.json (Service Plan) //! - /pool/int/UUID/config/rss-plan-completed.marker (Plan Execution Complete) //! //! These phases are described below. As each phase completes, a corresponding @@ -92,9 +92,11 @@ use nexus_client::{ types as NexusTypes, Client as NexusClient, Error as NexusError, }; use nexus_types::deployment::{ - Blueprint, BlueprintZoneConfig, BlueprintZoneDisposition, - BlueprintZonesConfig, + Blueprint, BlueprintPhysicalDisksConfig, BlueprintZoneConfig, + BlueprintZoneDisposition, BlueprintZonesConfig, + CockroachDbPreserveDowngrade, InvalidOmicronZoneType, }; +use nexus_types::external_api::views::SledState; use omicron_common::address::get_sled_address; use omicron_common::api::external::Generation; use omicron_common::api::internal::shared::ExternalPortDiscovery; @@ -103,6 +105,8 @@ use omicron_common::backoff::{ }; use omicron_common::ledger::{self, Ledger, Ledgerable}; use omicron_ddm_admin_client::{Client as DdmAdminClient, DdmError}; +use omicron_uuid_kinds::SledUuid; +use omicron_uuid_kinds::{ExternalIpUuid, GenericUuid}; use serde::{Deserialize, Serialize}; use sled_agent_client::{ types as SledAgentTypes, Client as SledAgentClient, Error as SledAgentError, @@ -115,6 +119,7 @@ use std::collections::{btree_map, BTreeMap, BTreeSet}; use std::collections::{HashMap, HashSet}; use std::iter; use std::net::{Ipv6Addr, SocketAddrV6}; +use std::time::Duration; use thiserror::Error; use uuid::Uuid; @@ -276,6 +281,125 @@ impl ServiceInner { ServiceInner { log } } + // Ensures that all storage for a particular generation is configured. + // + // This will either return: + // - Ok if the requests are all successful (where "successful" also + // includes any of the sleds having a storage configuration more recent than + // what we've requested), or + // - An error from attempting to configure storage on the underlying sleds + async fn ensure_storage_config_at_least( + &self, + plan: &ServicePlan, + ) -> Result<(), SetupServiceError> { + cancel_safe_futures::future::join_all_then_try( + plan.services.iter().map(|(sled_address, config)| async move { + self.initialize_storage_on_sled( + *sled_address, + SledAgentTypes::OmicronPhysicalDisksConfig { + generation: config.disks.generation, + disks: config + .disks + .disks + .iter() + .map(|disk| { + SledAgentTypes::OmicronPhysicalDiskConfig { + identity: disk.identity.clone(), + id: disk.id, + pool_id: disk.pool_id, + } + }) + .collect(), + }, + ) + .await + }), + ) + .await?; + Ok(()) + } + + /// Requests that the specified sled configure storage as described + /// by `storage_config`. + /// + /// This function succeeds if either the configuration is supplied, or if + /// the configuration on the target sled is newer than what we're supplying. + // This function shares a lot of implementation details with + // [Self::initialize_zones_on_sled]. Although it has a different meaning, + // the usage (and expectations around generation numbers) are similar. + async fn initialize_storage_on_sled( + &self, + sled_address: SocketAddrV6, + storage_config: SledAgentTypes::OmicronPhysicalDisksConfig, + ) -> Result<(), SetupServiceError> { + let dur = std::time::Duration::from_secs(60); + let client = reqwest::ClientBuilder::new() + .connect_timeout(dur) + .build() + .map_err(SetupServiceError::HttpClient)?; + let log = self.log.new(o!("sled_address" => sled_address.to_string())); + let client = SledAgentClient::new_with_client( + &format!("http://{}", sled_address), + client, + log.clone(), + ); + + let storage_put = || async { + info!( + log, + "attempting to set up sled's storage: {:?}", storage_config, + ); + let result = client + .omicron_physical_disks_put(&storage_config.clone()) + .await; + let Err(error) = result else { + return Ok::< + (), + BackoffError>, + >(()); + }; + + if let sled_agent_client::Error::ErrorResponse(response) = &error { + if response.status() == http::StatusCode::CONFLICT { + warn!( + log, + "ignoring attempt to initialize storage because \ + the server seems to be newer"; + "attempted_generation" => i64::from(&storage_config.generation), + "req_id" => &response.request_id, + "server_message" => &response.message, + ); + + // If we attempt to initialize storage at generation X, and + // the server refuses because it's at some generation newer + // than X, then we treat that as success. See the doc + // comment on this function. + return Ok(()); + } + } + + // TODO Many other codes here should not be retried. See + // omicron#4578. + return Err(BackoffError::transient(error)); + }; + let log_failure = |error, delay| { + warn!( + log, + "failed to initialize Omicron storage"; + "error" => #%error, + "retry_after" => ?delay, + ); + }; + retry_notify( + retry_policy_internal_service_aggressive(), + storage_put, + log_failure, + ) + .await?; + + Ok(()) + } + /// Requests that the specified sled configure zones as described by /// `zones_config` /// @@ -345,7 +469,7 @@ impl ServiceInner { warn!( log, "failed to initialize Omicron zones"; - "error" => ?error, + "error" => #%error, "retry_after" => ?delay, ); }; @@ -409,7 +533,7 @@ impl ServiceInner { } }) .collect(); - if dns_addrs.len() > 0 { + if !dns_addrs.is_empty() { Some(dns_addrs) } else { None @@ -556,49 +680,42 @@ impl ServiceInner { ) -> Result<(), SetupServiceError> { info!(self.log, "Handing off control to Nexus"); - // Build a Blueprint describing our service plan. This should never - // fail, unless we've set up an invalid plan. - let blueprint = - build_initial_blueprint_from_plan(sled_plan, service_plan) + // Remap our plan into an easier-to-use type... + let sled_configs_by_id = + build_sled_configs_by_id(sled_plan, service_plan) .map_err(SetupServiceError::ConvertPlanToBlueprint)?; + // ... and use that to derive the initial blueprint from our plan. + let blueprint = build_initial_blueprint_from_plan( + &sled_configs_by_id, + service_plan, + ) + .map_err(SetupServiceError::ConvertPlanToBlueprint)?; info!(self.log, "Nexus address: {}", nexus_address.to_string()); - let nexus_client = NexusClient::new( + const CLIENT_TIMEOUT: Duration = Duration::from_secs(60); + let client = reqwest::Client::builder() + .connect_timeout(CLIENT_TIMEOUT) + .timeout(CLIENT_TIMEOUT) + .build() + .map_err(SetupServiceError::HttpClient)?; + + let nexus_client = NexusClient::new_with_client( &format!("http://{}", nexus_address), + client, self.log.new(o!("component" => "NexusClient")), ); - // Ensure we can quickly look up "Sled Agent Address" -> "UUID of sled". - // - // We need the ID when passing info to Nexus. - let mut id_map = HashMap::new(); - for (_, sled_request) in sled_plan.sleds.iter() { - id_map.insert( - get_sled_address(sled_request.body.subnet), - sled_request.body.id, - ); - } - - // Convert all the information we have about services and datasets into - // a format which can be processed by Nexus. - let mut services: Vec = vec![]; + // Convert all the information we have about datasets into a format + // which can be processed by Nexus. let mut datasets: Vec = vec![]; - for (addr, sled_config) in service_plan.services.iter() { - let sled_id = *id_map - .get(addr) - .expect("Sled address in service plan, but not sled plan"); - - for zone in &sled_config.zones { - services.push(zone.to_nexus_service_req(sled_id)); - } - + for sled_config in service_plan.services.values() { for zone in &sled_config.zones { if let Some((dataset_name, dataset_address)) = zone.dataset_name_and_address() { datasets.push(NexusTypes::DatasetCreateRequest { - zpool_id: dataset_name.pool().id(), + zpool_id: dataset_name.pool().id().into_untyped_uuid(), dataset_id: zone.id, request: NexusTypes::DatasetPutRequest { address: dataset_address.to_string(), @@ -632,9 +749,10 @@ impl ServiceInner { .map(|r| NexusTypes::RouteConfig { destination: r.destination, nexthop: r.nexthop, + vlan_id: r.vlan_id, }) .collect(), - addresses: config.addresses.clone(), + addresses: config.addresses.iter().cloned().map(Into::into).collect(), switch: config.switch.into(), uplink_port_speed: config.uplink_port_speed.into(), uplink_port_fec: config.uplink_port_fec.into(), @@ -651,6 +769,16 @@ impl ServiceInner { delay_open: b.delay_open, idle_hold_time: b.idle_hold_time, keepalive: b.keepalive, + remote_asn: b.remote_asn, + min_ttl: b.min_ttl, + md5_auth_key: b.md5_auth_key.clone(), + multi_exit_discriminator: b.multi_exit_discriminator, + local_pref: b.local_pref, + enforce_first_as: b.enforce_first_as, + communities: b.communities.clone(), + allowed_export: b.allowed_export.clone(), + allowed_import: b.allowed_import.clone(), + vlan_id: b.vlan_id, }) .collect(), }) @@ -660,7 +788,9 @@ impl ServiceInner { .iter() .map(|config| NexusTypes::BgpConfig { asn: config.asn, - originate: config.originate.clone(), + originate: config.originate.iter().cloned().map(Into::into).collect(), + shaper: config.shaper.clone(), + checker: config.checker.clone(), }) .collect(), bfd: config @@ -687,9 +817,48 @@ impl ServiceInner { info!(self.log, "rack_network_config: {:#?}", rack_network_config); + let physical_disks: Vec<_> = sled_configs_by_id + .iter() + .flat_map(|(sled_id, config)| { + config.disks.disks.iter().map(|config| { + NexusTypes::PhysicalDiskPutRequest { + id: config.id, + vendor: config.identity.vendor.clone(), + serial: config.identity.serial.clone(), + model: config.identity.model.clone(), + variant: NexusTypes::PhysicalDiskKind::U2, + sled_id: sled_id.into_untyped_uuid(), + } + }) + }) + .collect(); + + let zpools = sled_configs_by_id + .iter() + .flat_map(|(sled_id, config)| { + config.disks.disks.iter().map(|config| { + NexusTypes::ZpoolPutRequest { + id: config.pool_id.into_untyped_uuid(), + physical_disk_id: config.id, + sled_id: sled_id.into_untyped_uuid(), + } + }) + }) + .collect(); + + // Convert the IP allowlist into the Nexus types. + // + // This is really infallible. We have a list of IpNet's here, which + // we're converting to Nexus client types through their string + // representation. + let allowed_source_ips = + NexusTypes::AllowedSourceIps::try_from(&config.allowed_source_ips) + .expect("Expected valid Nexus IP networks"); + let request = NexusTypes::RackInitializationRequest { blueprint, - services, + physical_disks, + zpools, datasets, internal_services_ip_pool_ranges, certs: config.external_certificates.clone(), @@ -698,6 +867,7 @@ impl ServiceInner { recovery_silo: config.recovery_silo.clone(), rack_network_config, external_port_count: port_discovery_mode.into(), + allowed_source_ips, }; let notify_nexus = || async { @@ -789,7 +959,7 @@ impl ServiceInner { warn!( self.log, "Failed to initialize CockroachDB"; - "error" => ?error, + "error" => #%error, "retry_after" => ?delay ); }; @@ -839,7 +1009,7 @@ impl ServiceInner { )?; let marker_paths: Vec = storage_manager - .get_latest_resources() + .get_latest_disks() .await .all_m2_mountpoints(CONFIG_DATASET) .into_iter() @@ -1004,6 +1174,10 @@ impl ServiceInner { .await? }; + // Before we can ask for any services, we need to ensure that storage is + // operational. + self.ensure_storage_config_at_least(&service_plan).await?; + // Set up internal DNS services first and write the initial // DNS configuration to the internal DNS servers. let v1generator = OmicronZonesConfigGenerator::initial_version( @@ -1121,18 +1295,18 @@ impl DeployStepVersion { const V5_EVERYTHING: Generation = Self::V4_COCKROACHDB.next(); } -fn build_initial_blueprint_from_plan( +// Build a map of sled ID to `SledConfig` based on the two plan types we +// generate. This is a bit of a code smell (why doesn't the plan generate this +// on its own if we need it?); we should be able to get rid of it when +// we get to https://github.com/oxidecomputer/omicron/issues/5272. +fn build_sled_configs_by_id( sled_plan: &SledPlan, service_plan: &ServicePlan, -) -> anyhow::Result { - let internal_dns_version = - Generation::try_from(service_plan.dns_config.generation) - .context("invalid internal dns version")?; - +) -> anyhow::Result> { let mut sled_configs = BTreeMap::new(); for sled_request in sled_plan.sleds.values() { let sled_addr = get_sled_address(sled_request.body.subnet); - let sled_id = sled_request.body.id; + let sled_id = SledUuid::from_untyped_uuid(sled_request.body.id); let entry = match sled_configs.entry(sled_id) { btree_map::Entry::Vacant(entry) => entry, btree_map::Entry::Occupied(_) => { @@ -1151,18 +1325,83 @@ fn build_initial_blueprint_from_plan( entry.insert(sled_config.clone()); } - Ok(build_initial_blueprint_from_sled_configs( - sled_configs, + if sled_configs.len() != service_plan.services.len() { + bail!( + "error mapping service plan to sled IDs; converted {} sled \ + addresses into {} sled configs", + service_plan.services.len(), + sled_configs.len(), + ); + } + + Ok(sled_configs) +} + +// Build an initial blueprint +fn build_initial_blueprint_from_plan( + sled_configs_by_id: &BTreeMap, + service_plan: &ServicePlan, +) -> anyhow::Result { + let internal_dns_version = + Generation::try_from(service_plan.dns_config.generation) + .context("invalid internal dns version")?; + + let blueprint = build_initial_blueprint_from_sled_configs( + sled_configs_by_id, internal_dns_version, - )) + )?; + + Ok(blueprint) } pub(crate) fn build_initial_blueprint_from_sled_configs( - sled_configs: BTreeMap, + sled_configs_by_id: &BTreeMap, internal_dns_version: Generation, -) -> Blueprint { +) -> Result { + // Helper to convert an `OmicronZoneConfig` into a `BlueprintZoneConfig`. + // This is separate primarily so rustfmt doesn't lose its mind. + let to_bp_zone_config = |z: &crate::params::OmicronZoneConfig| { + // All initial zones are in-service. + let disposition = BlueprintZoneDisposition::InService; + BlueprintZoneConfig::from_omicron_zone_config( + z.clone().into(), + disposition, + // This is pretty weird: IP IDs don't exist yet, so it's fine for us + // to make them up (Nexus will record them as a part of the + // handoff). We could pass `None` here for some zone types, but it's + // a little simpler to just always pass a new ID, which will only be + // used if the zone type has an external IP. + // + // This should all go away once RSS starts using blueprints more + // directly (instead of this conversion after the fact): + // https://github.com/oxidecomputer/omicron/issues/5272 + Some(ExternalIpUuid::new_v4()), + ) + }; + + let mut blueprint_disks = BTreeMap::new(); + for (sled_id, sled_config) in sled_configs_by_id { + blueprint_disks.insert( + *sled_id, + BlueprintPhysicalDisksConfig { + generation: sled_config.disks.generation, + disks: sled_config + .disks + .disks + .iter() + .map(|d| SledAgentTypes::OmicronPhysicalDiskConfig { + identity: d.identity.clone(), + id: d.id, + pool_id: d.pool_id, + }) + .collect(), + }, + ); + } + let mut blueprint_zones = BTreeMap::new(); - for (sled_id, sled_config) in sled_configs { + let mut sled_state = BTreeMap::new(); + for (sled_id, sled_config) in sled_configs_by_id { let zones_config = BlueprintZonesConfig { // This is a bit of a hack. We only construct a blueprint after // completing RSS, so we need to know the final generation value @@ -1177,31 +1416,34 @@ pub(crate) fn build_initial_blueprint_from_sled_configs( generation: DeployStepVersion::V5_EVERYTHING, zones: sled_config .zones - .into_iter() - .map(|z| BlueprintZoneConfig { - config: z.into(), - // All initial zones are in-service. - disposition: BlueprintZoneDisposition::InService, - }) - .collect(), + .iter() + .map(to_bp_zone_config) + .collect::>()?, }; - blueprint_zones.insert(sled_id, zones_config); + blueprint_zones.insert(*sled_id, zones_config); + sled_state.insert(*sled_id, SledState::Active); } - Blueprint { + Ok(Blueprint { id: Uuid::new_v4(), blueprint_zones, + blueprint_disks, + sled_state, parent_blueprint_id: None, internal_dns_version, // We don't configure external DNS during RSS, so set it to an initial // generation of 1. Nexus will bump this up when it updates external DNS // (including creating the recovery silo). external_dns_version: Generation::new(), + // Nexus will fill in the CockroachDB values during initialization. + cockroachdb_fingerprint: String::new(), + cockroachdb_setting_preserve_downgrade: + CockroachDbPreserveDowngrade::DoNotModify, time_created: Utc::now(), creator: "RSS".to_string(), comment: "initial blueprint from rack setup".to_string(), - } + }) } /// Facilitates creating a sequence of OmicronZonesConfig objects for each sled @@ -1301,57 +1543,65 @@ mod test { params::OmicronZoneType, rack_setup::plan::service::{Plan as ServicePlan, SledInfo}, }; - use illumos_utils::zpool::ZpoolName; - use omicron_common::{address::Ipv6Subnet, api::external::Generation}; + use omicron_common::{ + address::{get_sled_address, Ipv6Subnet, SLED_PREFIX}, + api::external::{ByteCount, Generation}, + disk::DiskIdentity, + }; + use omicron_uuid_kinds::{GenericUuid, SledUuid}; + use sled_agent_client::types as SledAgentTypes; + + fn make_sled_info( + sled_id: SledUuid, + subnet: Ipv6Subnet, + u2_count: usize, + ) -> SledInfo { + let sled_agent_address = get_sled_address(subnet); + SledInfo::new( + sled_id, + subnet, + sled_agent_address, + SledAgentTypes::Inventory { + sled_id: sled_id.into_untyped_uuid(), + sled_agent_address: sled_agent_address.to_string(), + sled_role: SledAgentTypes::SledRole::Scrimlet, + baseboard: SledAgentTypes::Baseboard::Unknown, + usable_hardware_threads: 32, + usable_physical_ram: ByteCount::from_gibibytes_u32(16), + reservoir_size: ByteCount::from_gibibytes_u32(0), + disks: (0..u2_count) + .map(|i| SledAgentTypes::InventoryDisk { + identity: DiskIdentity { + vendor: "test-manufacturer".to_string(), + serial: format!("test-{sled_id}-#{i}"), + model: "v1".to_string(), + }, + variant: SledAgentTypes::DiskVariant::U2, + slot: i.try_into().unwrap(), + }) + .collect(), + zpools: vec![], + }, + true, + ) + } fn make_test_service_plan() -> ServicePlan { let rss_config = crate::bootstrap::params::test_config(); let fake_sleds = vec![ - SledInfo::new( - "d4ba4bbe-8542-4907-bc8f-48df53eb5089".parse().unwrap(), - Ipv6Subnet::new("fd00:1122:3344:101::1".parse().unwrap()), - "[fd00:1122:3344:101::1]:80".parse().unwrap(), - vec![ - ZpoolName::new_internal( - "c5885278-0ae2-4f1e-9223-07f2ada818e1".parse().unwrap(), - ), - ZpoolName::new_internal( - "57465977-8275-43aa-a320-b6cd5cb20ca6".parse().unwrap(), - ), - ZpoolName::new_external( - "886f9fe7-bf70-4ddd-ae92-764dc3ed14ab".parse().unwrap(), - ), - ZpoolName::new_external( - "4c9061b1-345b-4985-8cbd-a2a899f15b68".parse().unwrap(), - ), - ZpoolName::new_external( - "b2bd488e-b187-42a0-b157-9ab0f70d91a8".parse().unwrap(), - ), - ], - true, + make_sled_info( + SledUuid::new_v4(), + Ipv6Subnet::::new( + "fd00:1122:3344:101::1".parse().unwrap(), + ), + 5, ), - SledInfo::new( - "b4359dea-665d-41ca-a681-f55912f2d5d0".parse().unwrap(), - Ipv6Subnet::new("fd00:1122:3344:102::1".parse().unwrap()), - "[fd00:1122:3344:102::1]:80".parse().unwrap(), - vec![ - ZpoolName::new_internal( - "34d6b5e5-a09f-4e96-a599-fa306ce6d983".parse().unwrap(), - ), - ZpoolName::new_internal( - "e9b8d1ea-da29-4b61-a493-c0ed319098da".parse().unwrap(), - ), - ZpoolName::new_external( - "37f8e903-2adb-4613-b78c-198122c289f0".parse().unwrap(), - ), - ZpoolName::new_external( - "b50f787c-97b3-4b91-a5bd-99d11fc86fb8".parse().unwrap(), - ), - ZpoolName::new_external( - "809e50c8-930e-413a-950c-69a540b688e2".parse().unwrap(), - ), - ], - true, + make_sled_info( + SledUuid::new_v4(), + Ipv6Subnet::::new( + "fd00:1122:3344:102::1".parse().unwrap(), + ), + 5, ), ]; let service_plan = diff --git a/sled-agent/src/server.rs b/sled-agent/src/server.rs index b93ad0721c..f702e4c67d 100644 --- a/sled-agent/src/server.rs +++ b/sled-agent/src/server.rs @@ -11,12 +11,10 @@ use crate::bootstrap::params::StartSledAgentRequest; use crate::long_running_tasks::LongRunningTaskHandles; use crate::nexus::NexusClientWithResolver; use crate::services::ServiceManager; -use crate::storage_monitor::UnderlayAccess; use internal_dns::resolver::Resolver; use slog::Logger; use std::net::SocketAddr; use std::sync::Arc; -use tokio::sync::oneshot; use uuid::Uuid; /// Packages up a [`SledAgent`], running the sled agent API under a Dropshot @@ -42,7 +40,6 @@ impl Server { request: StartSledAgentRequest, long_running_tasks_handles: LongRunningTaskHandles, services: ServiceManager, - underlay_available_tx: oneshot::Sender, ) -> Result { info!(log, "setting up sled agent server"); @@ -65,7 +62,6 @@ impl Server { request, services, long_running_tasks_handles, - underlay_available_tx, ) .await .map_err(|e| e.to_string())?; diff --git a/sled-agent/src/services.rs b/sled-agent/src/services.rs index c0dbaebcc2..ff10d4aed7 100644 --- a/sled-agent/src/services.rs +++ b/sled-agent/src/services.rs @@ -94,6 +94,7 @@ use sled_hardware::underlay; use sled_hardware::SledMode; use sled_hardware_types::underlay::BOOTSTRAP_PREFIX; use sled_hardware_types::Baseboard; +use sled_storage::config::MountConfig; use sled_storage::dataset::{ DatasetKind, DatasetName, CONFIG_DATASET, INSTALL_DATASET, ZONE_DATASET, }; @@ -332,6 +333,14 @@ impl From for omicron_common::api::external::Error { } } +/// Result of [ServiceManager::load_services] +pub enum LoadServicesResult { + /// We didn't load anything, there wasn't anything to load + NoServicesToLoad, + /// We successfully loaded the zones from our ledger. + ServicesLoaded, +} + fn display_zone_init_errors(errors: &[(String, Box)]) -> String { if errors.len() == 1 { return format!( @@ -472,6 +481,7 @@ enum SwitchService { Wicketd { baseboard: Baseboard }, Dendrite { asic: DendriteAsic }, Lldpd { baseboard: Baseboard }, + Pumpkind { asic: DendriteAsic }, Tfport { pkt_source: String, asic: DendriteAsic }, Uplink, MgDdm { mode: String }, @@ -486,6 +496,7 @@ impl crate::smf_helper::Service for SwitchService { SwitchService::Wicketd { .. } => "wicketd", SwitchService::Dendrite { .. } => "dendrite", SwitchService::Lldpd { .. } => "lldpd", + SwitchService::Pumpkind { .. } => "pumpkind", SwitchService::Tfport { .. } => "tfport", SwitchService::Uplink { .. } => "uplink", SwitchService::MgDdm { .. } => "mg-ddm", @@ -661,7 +672,7 @@ pub(crate) enum TimeSyncConfig { // Skips timesync unconditionally. Skip, // Fails timesync unconditionally. - #[cfg(test)] + #[cfg(all(test, target_os = "illumos"))] Fail, } @@ -734,12 +745,12 @@ impl ServiceManager { } } - #[cfg(test)] + #[cfg(all(test, target_os = "illumos"))] fn override_ledger_directory(&self, path: Utf8PathBuf) { self.inner.ledger_directory_override.set(path).unwrap(); } - #[cfg(test)] + #[cfg(all(test, target_os = "illumos"))] fn override_image_directory(&self, path: Utf8PathBuf) { self.inner.image_directory_override.set(path).unwrap(); } @@ -752,7 +763,7 @@ impl ServiceManager { if let Some(dir) = self.inner.ledger_directory_override.get() { return vec![dir.join(SERVICES_LEDGER_FILENAME)]; } - let resources = self.inner.storage.get_latest_resources().await; + let resources = self.inner.storage.get_latest_disks().await; resources .all_m2_mountpoints(CONFIG_DATASET) .into_iter() @@ -764,7 +775,7 @@ impl ServiceManager { if let Some(dir) = self.inner.ledger_directory_override.get() { return vec![dir.join(ZONES_LEDGER_FILENAME)]; } - let resources = self.inner.storage.get_latest_resources().await; + let resources = self.inner.storage.get_latest_disks().await; resources .all_m2_mountpoints(CONFIG_DATASET) .into_iter() @@ -935,7 +946,7 @@ impl ServiceManager { // - If we know that disks are missing, we could wait for them // - We could permanently fail if we are able to distinguish other errors // more clearly. - pub async fn load_services(&self) -> Result<(), Error> { + pub async fn load_services(&self) -> Result { let log = &self.inner.log; let mut existing_zones = self.inner.zones.lock().await; let Some(mut ledger) = @@ -947,7 +958,7 @@ impl ServiceManager { "Loading Omicron zones - \ no zones nor old-format services found" ); - return Ok(()); + return Ok(LoadServicesResult::NoServicesToLoad); }; let zones_config = ledger.data_mut(); @@ -965,7 +976,7 @@ impl ServiceManager { None, ) .await?; - Ok(()) + Ok(LoadServicesResult::ServicesLoaded) } /// Sets up "Sled Agent" information, including underlay info. @@ -1265,7 +1276,10 @@ impl ServiceManager { // XXX: need to revisit iff. any services get more than one // address. let (target_ip, first_port, last_port) = match snat { - Some(s) => (s.ip, s.first_port, s.last_port), + Some(s) => { + let (first_port, last_port) = s.port_range_raw(); + (s.ip, first_port, last_port) + } None => (floating_ips[0], 0, u16::MAX), }; @@ -1508,11 +1522,12 @@ impl ServiceManager { // If the boot disk exists, look for the image in the "install" dataset // there too. - if let Some((_, boot_zpool)) = - self.inner.storage.get_latest_resources().await.boot_disk() - { - zone_image_paths - .push(boot_zpool.dataset_mountpoint(INSTALL_DATASET)); + let all_disks = self.inner.storage.get_latest_disks().await; + if let Some((_, boot_zpool)) = all_disks.boot_disk() { + zone_image_paths.push(boot_zpool.dataset_mountpoint( + &all_disks.mount_config.root, + INSTALL_DATASET, + )); } let zone_type_str = match &request { @@ -1993,7 +2008,7 @@ impl ServiceManager { Self::dns_install(info, Some(dns_servers.to_vec()), domain) .await?; - let mut ntp_config = PropertyGroupBuilder::new("config") + let mut chrony_config = PropertyGroupBuilder::new("config") .add_property("allow", "astring", &rack_net) .add_property( "boundary", @@ -2002,7 +2017,7 @@ impl ServiceManager { ); for s in ntp_servers { - ntp_config = ntp_config.add_property( + chrony_config = chrony_config.add_property( "server", "astring", &s.to_string(), @@ -2017,13 +2032,17 @@ impl ServiceManager { } let ntp_service = ServiceBuilder::new("oxide/ntp") - .add_instance( + .add_instance(ServiceInstanceBuilder::new("default")); + + let chrony_setup_service = + ServiceBuilder::new("oxide/chrony-setup").add_instance( ServiceInstanceBuilder::new("default") - .add_property_group(ntp_config), + .add_property_group(chrony_config), ); let mut profile = ProfileBuilder::new("omicron") .add_service(nw_setup_service) + .add_service(chrony_setup_service) .add_service(disabled_ssh_service) .add_service(dns_install_service) .add_service(dns_client_service) @@ -2524,7 +2543,7 @@ impl ServiceManager { ); smfh.setprop( "config/rack-subnet", - &rack_subnet.net().ip().to_string(), + &rack_subnet.net().addr().to_string(), )?; } @@ -2692,7 +2711,7 @@ impl ServiceManager { // network address, without the mask. smfh.setprop( format!("config/techport{i}_prefix"), - prefix.net().network().to_string(), + prefix.net().addr(), )?; } smfh.setprop("config/pkt_source", pkt_source)?; @@ -2747,6 +2766,18 @@ impl ServiceManager { } smfh.refresh()?; } + SwitchService::Pumpkind { asic } => { + // The pumpkin daemon is only needed when running on + // with real sidecar. + if asic == &DendriteAsic::TofinoAsic { + info!( + self.inner.log, + "Setting up pumpkind service" + ); + smfh.setprop("config/mode", "switch")?; + smfh.refresh()?; + } + } SwitchService::Uplink => { // Nothing to do here - this service is special and // configured in @@ -2755,16 +2786,13 @@ impl ServiceManager { SwitchService::Mgd => { info!(self.inner.log, "Setting up mgd service"); smfh.delpropvalue("config/dns_servers", "*")?; - let info = self - .inner - .sled_info - .get() - .ok_or(Error::SledAgentNotReady)?; - smfh.setprop("config/rack_uuid", info.rack_id)?; - smfh.setprop( - "config/sled_uuid", - info.config.sled_id, - )?; + if let Some(info) = self.inner.sled_info.get() { + smfh.setprop("config/rack_uuid", info.rack_id)?; + smfh.setprop( + "config/sled_uuid", + info.config.sled_id, + )?; + } for address in &request.zone.addresses { if *address != Ipv6Addr::LOCALHOST { let az_prefix = @@ -2785,16 +2813,13 @@ impl ServiceManager { SwitchService::MgDdm { mode } => { info!(self.inner.log, "Setting up mg-ddm service"); smfh.setprop("config/mode", &mode)?; - let info = self - .inner - .sled_info - .get() - .ok_or(Error::SledAgentNotReady)?; - smfh.setprop("config/rack_uuid", info.rack_id)?; - smfh.setprop( - "config/sled_uuid", - info.config.sled_id, - )?; + if let Some(info) = self.inner.sled_info.get() { + smfh.setprop("config/rack_uuid", info.rack_id)?; + smfh.setprop( + "config/sled_uuid", + info.config.sled_id, + )?; + } smfh.delpropvalue("config/dns_servers", "*")?; for address in &request.zone.addresses { if *address != Ipv6Addr::LOCALHOST { @@ -2912,6 +2937,7 @@ impl ServiceManager { // storage configuration against the reality of the current sled. async fn start_omicron_zone( &self, + mount_config: &MountConfig, zone: &OmicronZoneConfig, time_is_synchronized: bool, all_u2_pools: &Vec, @@ -2930,7 +2956,11 @@ impl ServiceManager { // Ensure that this zone's storage is ready. let root = self - .validate_storage_and_pick_mountpoint(&zone, &all_u2_pools) + .validate_storage_and_pick_mountpoint( + mount_config, + &zone, + &all_u2_pools, + ) .await?; let config = OmicronZoneConfigLocal { zone: zone.clone(), root }; @@ -2959,6 +2989,7 @@ impl ServiceManager { // to start. async fn start_omicron_zones( &self, + mount_config: &MountConfig, requests: impl Iterator + Clone, time_is_synchronized: bool, all_u2_pools: &Vec, @@ -2975,6 +3006,7 @@ impl ServiceManager { let futures = requests.map(|zone| async move { self.start_omicron_zone( + mount_config, &zone, time_is_synchronized, all_u2_pools, @@ -3198,7 +3230,8 @@ impl ServiceManager { } // Collect information that's necessary to start new zones - let storage = self.inner.storage.get_latest_resources().await; + let storage = self.inner.storage.get_latest_disks().await; + let mount_config = &storage.mount_config; let all_u2_pools = storage.all_u2_zpools(); let time_is_synchronized = match self.timesync_get_locked(&existing_zones).await { @@ -3211,6 +3244,7 @@ impl ServiceManager { // Concurrently boot all new zones let StartZonesResult { new_zones, errors } = self .start_omicron_zones( + mount_config, zones_to_be_added, time_is_synchronized, &all_u2_pools, @@ -3311,6 +3345,7 @@ impl ServiceManager { // is valid. async fn validate_storage_and_pick_mountpoint( &self, + mount_config: &MountConfig, zone: &OmicronZoneConfig, all_u2_pools: &Vec, ) -> Result { @@ -3369,14 +3404,16 @@ impl ServiceManager { device: format!("zpool: {data_pool}"), }); } - data_pool.dataset_mountpoint(ZONE_DATASET) + data_pool.dataset_mountpoint(&mount_config.root, ZONE_DATASET) } else { // If the zone it not coupled to other datsets, we pick one // arbitrarily. let mut rng = rand::thread_rng(); all_u2_pools .choose(&mut rng) - .map(|pool| pool.dataset_mountpoint(ZONE_DATASET)) + .map(|pool| { + pool.dataset_mountpoint(&mount_config.root, ZONE_DATASET) + }) .ok_or_else(|| Error::U2NotFound)? .clone() }; @@ -3483,7 +3520,7 @@ impl ServiceManager { let skip_timesync = match &self.inner.time_sync_config { TimeSyncConfig::Normal => false, TimeSyncConfig::Skip => true, - #[cfg(test)] + #[cfg(all(test, target_os = "illumos"))] TimeSyncConfig::Fail => { info!(self.inner.log, "Configured to fail timesync checks"); return Err(Error::TimeNotSynchronized); @@ -3597,6 +3634,7 @@ impl ServiceManager { SwitchService::Dendrite { asic: DendriteAsic::TofinoAsic }, SwitchService::Lldpd { baseboard: baseboard.clone() }, SwitchService::ManagementGatewayService, + SwitchService::Pumpkind { asic: DendriteAsic::TofinoAsic }, SwitchService::Tfport { pkt_source: "tfpkt0".to_string(), asic: DendriteAsic::TofinoAsic, @@ -3957,12 +3995,12 @@ impl ServiceManager { info!( self.inner.log, "configuring wicketd"; - "rack_subnet" => %rack_subnet.net().ip(), + "rack_subnet" => %rack_subnet.net().addr(), ); smfh.setprop( "config/rack-subnet", - &rack_subnet.net().ip().to_string(), + &rack_subnet.net().addr().to_string(), )?; smfh.refresh()?; @@ -3989,16 +4027,74 @@ impl ServiceManager { // the tfport service shouldn't need to be // restarted. } + SwitchService::Pumpkind { .. } => { + // Unless we want to plumb through the "only log + // errors, don't react" option, there are no user + // serviceable parts for this daemon. + } SwitchService::Uplink { .. } => { // Only configured in // `ensure_switch_zone_uplinks_configured` } + SwitchService::SpSim => { + // nothing to configure + } + SwitchService::Mgd => { + info!(self.inner.log, "configuring mgd service"); + smfh.delpropvalue("config/dns_servers", "*")?; + if let Some(info) = self.inner.sled_info.get() { + smfh.setprop("config/rack_uuid", info.rack_id)?; + smfh.setprop( + "config/sled_uuid", + info.config.sled_id, + )?; + } + for address in &request.addresses { + if *address != Ipv6Addr::LOCALHOST { + let az_prefix = + Ipv6Subnet::::new(*address); + for addr in + Resolver::servers_from_subnet(az_prefix) + { + smfh.addpropvalue( + "config/dns_servers", + &format!("{addr}"), + )?; + } + break; + } + } + smfh.refresh()?; + } SwitchService::MgDdm { mode } => { + info!(self.inner.log, "configuring mg-ddm service"); smfh.delpropvalue("config/mode", "*")?; smfh.addpropvalue("config/mode", &mode)?; + if let Some(info) = self.inner.sled_info.get() { + smfh.setprop("config/rack_uuid", info.rack_id)?; + smfh.setprop( + "config/sled_uuid", + info.config.sled_id, + )?; + } + smfh.delpropvalue("config/dns_servers", "*")?; + for address in &request.addresses { + if *address != Ipv6Addr::LOCALHOST { + let az_prefix = + Ipv6Subnet::::new(*address); + for addr in + Resolver::servers_from_subnet(az_prefix) + { + smfh.addpropvalue( + "config/dns_servers", + &format!("{addr}"), + )?; + } + break; + } + } smfh.refresh()?; } - _ => (), } } } @@ -4081,10 +4177,9 @@ impl ServiceManager { } } -#[cfg(test)] +#[cfg(all(test, target_os = "illumos"))] mod test { use super::*; - use illumos_utils::zpool::ZpoolName; use illumos_utils::{ dladm::{ Etherstub, MockDladm, BOOTSTRAP_ETHERSTUB_NAME, @@ -4093,9 +4188,8 @@ mod test { svc, zone::MockZones, }; - use sled_storage::disk::{RawDisk, SyntheticDisk}; - use sled_storage::manager::{FakeStorageManager, StorageHandle}; + use sled_storage::manager_test_harness::StorageManagerTestHarness; use std::net::{Ipv6Addr, SocketAddrV6}; use std::os::unix::process::ExitStatusExt; use uuid::Uuid; @@ -4319,18 +4413,21 @@ mod test { ) -> Result<(), Error> { let zone_prefix = format!("oxz_{}", zone_type.zone_type_str()); let _expectations = expect_new_service(&zone_prefix); - mgr.ensure_all_omicron_zones_persistent( - OmicronZonesConfig { - generation, - zones: vec![OmicronZoneConfig { - id, - underlay_address: Ipv6Addr::LOCALHOST, - zone_type, - }], - }, - Some(&tmp_dir), - ) - .await + let r = mgr + .ensure_all_omicron_zones_persistent( + OmicronZonesConfig { + generation, + zones: vec![OmicronZoneConfig { + id, + underlay_address: Ipv6Addr::LOCALHOST, + zone_type, + }], + }, + Some(&tmp_dir), + ) + .await; + illumos_utils::USE_MOCKS.store(false, Ordering::SeqCst); + r } // Prepare to call "ensure" for a service which already exists. We should @@ -4413,31 +4510,25 @@ mod test { } } - async fn setup_storage() -> StorageHandle { - let (manager, handle) = FakeStorageManager::new(); - - // Spawn the storage manager as done by sled-agent - tokio::spawn(async move { - manager.run().await; - }); - - let internal_zpool_name = ZpoolName::new_internal(Uuid::new_v4()); - let internal_disk: RawDisk = - SyntheticDisk::new(internal_zpool_name, 0).into(); - handle.upsert_disk(internal_disk).await; - let external_zpool_name = ZpoolName::new_external(Uuid::new_v4()); - let external_disk: RawDisk = - SyntheticDisk::new(external_zpool_name, 1).into(); - handle.upsert_disk(external_disk).await; - - handle + async fn setup_storage(log: &Logger) -> StorageManagerTestHarness { + let mut harness = StorageManagerTestHarness::new(&log).await; + let raw_disks = + harness.add_vdevs(&["u2_test.vdev", "m2_test.vdev"]).await; + harness.handle().key_manager_ready().await; + let config = harness.make_config(1, &raw_disks); + let result = harness + .handle() + .omicron_physical_disks_ensure(config.clone()) + .await + .expect("Failed to ensure disks"); + assert!(!result.has_error(), "{:?}", result); + harness } - #[derive(Clone)] struct LedgerTestHelper<'a> { log: slog::Logger, ddmd_client: DdmAdminClient, - storage_handle: StorageHandle, + storage_test_harness: StorageManagerTestHarness, zone_bundler: ZoneBundler, test_config: &'a TestConfig, } @@ -4448,41 +4539,45 @@ mod test { test_config: &'a TestConfig, ) -> LedgerTestHelper { let ddmd_client = DdmAdminClient::localhost(&log).unwrap(); - let storage_handle = setup_storage().await; + let storage_test_harness = setup_storage(&log).await; let zone_bundler = ZoneBundler::new( log.clone(), - storage_handle.clone(), + storage_test_harness.handle().clone(), Default::default(), ); LedgerTestHelper { log, ddmd_client, - storage_handle, + storage_test_harness, zone_bundler, test_config, } } - fn new_service_manager(self) -> ServiceManager { + async fn cleanup(&mut self) { + self.storage_test_harness.cleanup().await; + } + + fn new_service_manager(&self) -> ServiceManager { self.new_service_manager_with_timesync(TimeSyncConfig::Skip) } fn new_service_manager_with_timesync( - self, + &self, time_sync_config: TimeSyncConfig, ) -> ServiceManager { let log = &self.log; let mgr = ServiceManager::new( log, - self.ddmd_client, + self.ddmd_client.clone(), make_bootstrap_networking_config(), SledMode::Auto, time_sync_config, SidecarRevision::Physical("rev-test".to_string()), vec![], - self.storage_handle, - self.zone_bundler, + self.storage_test_harness.handle().clone(), + self.zone_bundler.clone(), ); self.test_config.override_paths(&mgr); mgr @@ -4516,7 +4611,7 @@ mod test { let logctx = omicron_test_utils::dev::test_setup_log("test_ensure_service"); let test_config = TestConfig::new().await; - let helper = + let mut helper = LedgerTestHelper::new(logctx.log.clone(), &test_config).await; let mgr = helper.new_service_manager(); LedgerTestHelper::sled_agent_started(&logctx.log, &test_config, &mgr); @@ -4545,6 +4640,7 @@ mod test { drop_service_manager(mgr); + helper.cleanup().await; logctx.cleanup_successful(); } @@ -4554,7 +4650,7 @@ mod test { "test_ensure_service_before_timesync", ); let test_config = TestConfig::new().await; - let helper = + let mut helper = LedgerTestHelper::new(logctx.log.clone(), &test_config).await; let mgr = @@ -4619,6 +4715,7 @@ mod test { .unwrap(); drop_service_manager(mgr); + helper.cleanup().await; logctx.cleanup_successful(); } @@ -4628,7 +4725,7 @@ mod test { "test_ensure_service_which_already_exists", ); let test_config = TestConfig::new().await; - let helper = + let mut helper = LedgerTestHelper::new(logctx.log.clone(), &test_config).await; let mgr = helper.new_service_manager(); LedgerTestHelper::sled_agent_started(&logctx.log, &test_config, &mgr); @@ -4647,6 +4744,7 @@ mod test { drop_service_manager(mgr); + helper.cleanup().await; logctx.cleanup_successful(); } @@ -4656,12 +4754,12 @@ mod test { "test_services_are_recreated_on_reboot", ); let test_config = TestConfig::new().await; - let helper = + let mut helper = LedgerTestHelper::new(logctx.log.clone(), &test_config).await; // First, spin up a ServiceManager, create a new zone, and then tear // down the ServiceManager. - let mgr = helper.clone().new_service_manager(); + let mgr = helper.new_service_manager(); LedgerTestHelper::sled_agent_started(&logctx.log, &test_config, &mgr); let v2 = Generation::new().next(); @@ -4680,6 +4778,7 @@ mod test { let _expectations = expect_new_service(EXPECTED_ZONE_NAME_PREFIX); let mgr = helper.new_service_manager(); LedgerTestHelper::sled_agent_started(&logctx.log, &test_config, &mgr); + illumos_utils::USE_MOCKS.store(false, Ordering::SeqCst); let found = mgr.omicron_zones_list().await.expect("failed to list zones"); @@ -4689,6 +4788,7 @@ mod test { drop_service_manager(mgr); + helper.cleanup().await; logctx.cleanup_successful(); } @@ -4698,12 +4798,12 @@ mod test { "test_services_do_not_persist_without_config", ); let test_config = TestConfig::new().await; - let helper = + let mut helper = LedgerTestHelper::new(logctx.log.clone(), &test_config).await; // First, spin up a ServiceManager, create a new zone, and then tear // down the ServiceManager. - let mgr = helper.clone().new_service_manager(); + let mgr = helper.new_service_manager(); LedgerTestHelper::sled_agent_started(&logctx.log, &test_config, &mgr); let v1 = Generation::new(); @@ -4736,6 +4836,7 @@ mod test { drop_service_manager(mgr); + helper.cleanup().await; logctx.cleanup_successful(); } @@ -4745,7 +4846,7 @@ mod test { let logctx = omicron_test_utils::dev::test_setup_log("test_bad_generations"); let test_config = TestConfig::new().await; - let helper = + let mut helper = LedgerTestHelper::new(logctx.log.clone(), &test_config).await; let mgr = helper.new_service_manager(); LedgerTestHelper::sled_agent_started(&logctx.log, &test_config, &mgr); @@ -4853,6 +4954,8 @@ mod test { drop_service_manager(mgr); + illumos_utils::USE_MOCKS.store(false, Ordering::SeqCst); + helper.cleanup().await; logctx.cleanup_successful(); } @@ -4874,9 +4977,9 @@ mod test { .expect("failed to copy example old-format services ledger into place"); // Now start the service manager. - let helper = + let mut helper = LedgerTestHelper::new(logctx.log.clone(), &test_config).await; - let mgr = helper.clone().new_service_manager(); + let mgr = helper.new_service_manager(); LedgerTestHelper::sled_agent_started(&logctx.log, &test_config, &mgr); // Trigger the migration code. (Yes, it's hokey that we create this @@ -4917,6 +5020,7 @@ mod test { assert_eq!(found, expected_config); drop_service_manager(mgr); + helper.cleanup().await; logctx.cleanup_successful(); } @@ -4926,7 +5030,7 @@ mod test { "test_old_ledger_migration_bad", ); let test_config = TestConfig::new().await; - let helper = + let mut helper = LedgerTestHelper::new(logctx.log.clone(), &test_config).await; // Before we start things, stuff a broken ledger into place. For this @@ -4954,6 +5058,7 @@ mod test { format!("{:#}", error) ); + helper.cleanup().await; logctx.cleanup_successful(); } @@ -4961,9 +5066,9 @@ mod test { fn test_bootstrap_addr_to_techport_prefixes() { let ba: Ipv6Addr = "fdb0:1122:3344:5566::".parse().unwrap(); let prefixes = ServiceManager::bootstrap_addr_to_techport_prefixes(&ba); - assert!(prefixes.iter().all(|p| p.net().prefix() == 64)); - let prefix0 = prefixes[0].net().network(); - let prefix1 = prefixes[1].net().network(); + assert!(prefixes.iter().all(|p| p.net().width() == 64)); + let prefix0 = prefixes[0].net().prefix(); + let prefix1 = prefixes[1].net().prefix(); assert_eq!(prefix0.segments()[1..], ba.segments()[1..]); assert_eq!(prefix1.segments()[1..], ba.segments()[1..]); assert_eq!(prefix0.segments()[0], 0xfdb1); diff --git a/sled-agent/src/sim/disk.rs b/sled-agent/src/sim/disk.rs index fc388f6ce2..284e424ebf 100644 --- a/sled-agent/src/sim/disk.rs +++ b/sled-agent/src/sim/disk.rs @@ -8,7 +8,6 @@ use crate::nexus::NexusClient; use crate::params::DiskStateRequested; use crate::sim::simulatable::Simulatable; use async_trait::async_trait; -use dropshot::ConfigDropshot; use dropshot::ConfigLogging; use dropshot::ConfigLoggingLevel; use omicron_common::api::external::DiskState; @@ -146,7 +145,7 @@ mod producers { /// See `Simulatable` for how this works. pub struct SimDisk { state: DiskStates, - producer: Option, + producer: Option, } // "producer" doesn't implement Debug, so we can't derive it on SimDisk. @@ -171,23 +170,18 @@ impl SimDisk { id, kind: ProducerKind::SledAgent, address: producer_address, - base_route: "/collect".to_string(), interval: Duration::from_millis(200), }; let config = oximeter_producer::Config { server_info, - registration_address: nexus_address, - dropshot: ConfigDropshot { - bind_address: producer_address, - ..Default::default() - }, + registration_address: Some(nexus_address), + request_body_max_bytes: 2048, log: LogConfig::Config(ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Error, }), }; let server = - ProducerServer::start(&config).await.map_err(|e| e.to_string())?; - + ProducerServer::start(&config).map_err(|e| e.to_string())?; let producer = producers::DiskProducer::new(id); server .registry() diff --git a/sled-agent/src/sim/http_entrypoints.rs b/sled-agent/src/sim/http_entrypoints.rs index c3c92eb6fe..ae1318a8b1 100644 --- a/sled-agent/src/sim/http_entrypoints.rs +++ b/sled-agent/src/sim/http_entrypoints.rs @@ -4,14 +4,13 @@ //! HTTP entrypoint functions for the sled agent's exposed API -use crate::bootstrap::early_networking::{ - EarlyNetworkConfig, EarlyNetworkConfigBody, -}; +use crate::bootstrap::early_networking::EarlyNetworkConfig; +use crate::bootstrap::params::AddSledRequest; use crate::params::{ DiskEnsureBody, InstanceEnsureBody, InstanceExternalIpBody, InstancePutMigrationIdsBody, InstancePutStateBody, InstancePutStateResponse, InstanceUnregisterResponse, Inventory, - OmicronZonesConfig, VpcFirewallRulesEnsureBody, + OmicronPhysicalDisksConfig, OmicronZonesConfig, VpcFirewallRulesEnsureBody, }; use dropshot::endpoint; use dropshot::ApiDescription; @@ -21,17 +20,14 @@ use dropshot::HttpResponseUpdatedNoContent; use dropshot::Path; use dropshot::RequestContext; use dropshot::TypedBody; -use illumos_utils::opte::params::DeleteVirtualNetworkInterfaceHost; -use illumos_utils::opte::params::SetVirtualNetworkInterfaceHost; -use ipnetwork::Ipv6Network; +use illumos_utils::opte::params::VirtualNetworkInterfaceHost; use omicron_common::api::internal::nexus::DiskRuntimeState; use omicron_common::api::internal::nexus::SledInstanceState; use omicron_common::api::internal::nexus::UpdateArtifactId; -use omicron_common::api::internal::shared::RackNetworkConfig; use omicron_common::api::internal::shared::SwitchPorts; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; -use std::net::{Ipv4Addr, Ipv6Addr}; +use sled_storage::resources::DisksManagementResult; use std::sync::Arc; use uuid::Uuid; @@ -44,6 +40,7 @@ pub fn api() -> SledApiDescription { fn register_endpoints(api: &mut SledApiDescription) -> Result<(), String> { api.register(instance_put_migration_ids)?; api.register(instance_put_state)?; + api.register(instance_get_state)?; api.register(instance_register)?; api.register(instance_unregister)?; api.register(instance_put_external_ip)?; @@ -56,12 +53,16 @@ pub fn api() -> SledApiDescription { api.register(vpc_firewall_rules_put)?; api.register(set_v2p)?; api.register(del_v2p)?; + api.register(list_v2p)?; api.register(uplink_ensure)?; api.register(read_network_bootstore_config)?; api.register(write_network_bootstore_config)?; api.register(inventory)?; + api.register(omicron_physical_disks_get)?; + api.register(omicron_physical_disks_put)?; api.register(omicron_zones_get)?; api.register(omicron_zones_put)?; + api.register(sled_add)?; Ok(()) } @@ -134,6 +135,19 @@ async fn instance_put_state( )) } +#[endpoint { + method = GET, + path = "/instances/{instance_id}/state", +}] +async fn instance_get_state( + rqctx: RequestContext>, + path_params: Path, +) -> Result, HttpError> { + let sa = rqctx.context(); + let instance_id = path_params.into_inner().instance_id; + Ok(HttpResponseOk(sa.instance_get_state(instance_id).await?)) +} + #[endpoint { method = PUT, path = "/instances/{instance_id}/migration-ids", @@ -329,27 +343,19 @@ async fn vpc_firewall_rules_put( Ok(HttpResponseUpdatedNoContent()) } -/// Path parameters for V2P mapping related requests (sled agent API) -#[derive(Deserialize, JsonSchema)] -struct V2pPathParam { - interface_id: Uuid, -} - /// Create a mapping from a virtual NIC to a physical host #[endpoint { method = PUT, - path = "/v2p/{interface_id}", + path = "/v2p/", }] async fn set_v2p( rqctx: RequestContext>, - path_params: Path, - body: TypedBody, + body: TypedBody, ) -> Result { let sa = rqctx.context(); - let interface_id = path_params.into_inner().interface_id; let body_args = body.into_inner(); - sa.set_virtual_nic_host(interface_id, &body_args) + sa.set_virtual_nic_host(&body_args) .await .map_err(|e| HttpError::for_internal_error(e.to_string()))?; @@ -359,24 +365,37 @@ async fn set_v2p( /// Delete a mapping from a virtual NIC to a physical host #[endpoint { method = DELETE, - path = "/v2p/{interface_id}", + path = "/v2p/", }] async fn del_v2p( rqctx: RequestContext>, - path_params: Path, - body: TypedBody, + body: TypedBody, ) -> Result { let sa = rqctx.context(); - let interface_id = path_params.into_inner().interface_id; let body_args = body.into_inner(); - sa.unset_virtual_nic_host(interface_id, &body_args) + sa.unset_virtual_nic_host(&body_args) .await .map_err(|e| HttpError::for_internal_error(e.to_string()))?; Ok(HttpResponseUpdatedNoContent()) } +/// List v2p mappings present on sled +#[endpoint { + method = GET, + path = "/v2p/", +}] +async fn list_v2p( + rqctx: RequestContext>, +) -> Result>, HttpError> { + let sa = rqctx.context(); + + let vnics = sa.list_virtual_nics().await.map_err(HttpError::from)?; + + Ok(HttpResponseOk(vnics)) +} + #[endpoint { method = POST, path = "/switch-ports", @@ -393,24 +412,9 @@ async fn uplink_ensure( path = "/network-bootstore-config", }] async fn read_network_bootstore_config( - _rqctx: RequestContext>, + rqctx: RequestContext>, ) -> Result, HttpError> { - let config = EarlyNetworkConfig { - generation: 0, - schema_version: 1, - body: EarlyNetworkConfigBody { - ntp_servers: Vec::new(), - rack_network_config: Some(RackNetworkConfig { - rack_subnet: Ipv6Network::new(Ipv6Addr::UNSPECIFIED, 56) - .unwrap(), - infra_ip_first: Ipv4Addr::UNSPECIFIED, - infra_ip_last: Ipv4Addr::UNSPECIFIED, - ports: Vec::new(), - bgp: Vec::new(), - bfd: Vec::new(), - }), - }, - }; + let config = rqctx.context().bootstore_network_config.lock().await.clone(); Ok(HttpResponseOk(config)) } @@ -419,9 +423,11 @@ async fn read_network_bootstore_config( path = "/network-bootstore-config", }] async fn write_network_bootstore_config( - _rqctx: RequestContext>, - _body: TypedBody, + rqctx: RequestContext>, + body: TypedBody, ) -> Result { + let mut config = rqctx.context().bootstore_network_config.lock().await; + *config = body.into_inner(); Ok(HttpResponseUpdatedNoContent()) } @@ -441,6 +447,31 @@ async fn inventory( )) } +#[endpoint { + method = PUT, + path = "/omicron-physical-disks", +}] +async fn omicron_physical_disks_put( + rqctx: RequestContext>, + body: TypedBody, +) -> Result, HttpError> { + let sa = rqctx.context(); + let body_args = body.into_inner(); + let result = sa.omicron_physical_disks_ensure(body_args).await?; + Ok(HttpResponseOk(result)) +} + +#[endpoint { + method = GET, + path = "/omicron-physical-disks", +}] +async fn omicron_physical_disks_get( + rqctx: RequestContext>, +) -> Result, HttpError> { + let sa = rqctx.context(); + Ok(HttpResponseOk(sa.omicron_physical_disks_list().await?)) +} + #[endpoint { method = GET, path = "/omicron-zones", @@ -465,3 +496,14 @@ async fn omicron_zones_put( sa.omicron_zones_ensure(body_args).await; Ok(HttpResponseUpdatedNoContent()) } + +#[endpoint { + method = PUT, + path = "/sleds" +}] +async fn sled_add( + _rqctx: RequestContext>, + _body: TypedBody, +) -> Result { + Ok(HttpResponseUpdatedNoContent()) +} diff --git a/sled-agent/src/sim/http_entrypoints_pantry.rs b/sled-agent/src/sim/http_entrypoints_pantry.rs index 49368f363a..13882deabc 100644 --- a/sled-agent/src/sim/http_entrypoints_pantry.rs +++ b/sled-agent/src/sim/http_entrypoints_pantry.rs @@ -261,7 +261,7 @@ async fn scrub( Ok(HttpResponseOk(ScrubResponse { job_id })) } -/// Flush and close a volume, removing it from the Pantry +/// Deactivate a volume, removing it from the Pantry #[endpoint { method = DELETE, path = "/crucible/pantry/0/volume/{id}", diff --git a/sled-agent/src/sim/server.rs b/sled-agent/src/sim/server.rs index fea6b738a6..ae7f40f5f3 100644 --- a/sled-agent/src/sim/server.rs +++ b/sled-agent/src/sim/server.rs @@ -31,7 +31,12 @@ use omicron_common::api::external::Vni; use omicron_common::backoff::{ retry_notify, retry_policy_internal_service_aggressive, BackoffError, }; +use omicron_common::disk::DiskIdentity; use omicron_common::FileKv; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::SledUuid; +use omicron_uuid_kinds::ZpoolUuid; +use oxnet::Ipv6Net; use slog::{info, Drain, Logger}; use std::collections::BTreeMap; use std::collections::HashMap; @@ -163,27 +168,31 @@ impl Server { // Crucible dataset for each. This emulates the setup we expect to have // on the physical rack. for zpool in &config.storage.zpools { - let zpool_id = Uuid::new_v4(); + let physical_disk_id = Uuid::new_v4(); + let zpool_id = ZpoolUuid::new_v4(); let vendor = "synthetic-vendor".to_string(); let serial = format!("synthetic-serial-{zpool_id}"); let model = "synthetic-model".to_string(); sled_agent .create_external_physical_disk( - vendor.clone(), - serial.clone(), - model.clone(), + physical_disk_id, + DiskIdentity { + vendor: vendor.clone(), + serial: serial.clone(), + model: model.clone(), + }, ) .await; sled_agent - .create_zpool(zpool_id, vendor, serial, model, zpool.size) + .create_zpool(zpool_id, physical_disk_id, zpool.size) .await; let dataset_id = Uuid::new_v4(); let address = sled_agent.create_crucible_dataset(zpool_id, dataset_id).await; datasets.push(NexusTypes::DatasetCreateRequest { - zpool_id, + zpool_id: zpool_id.into_untyped_uuid(), dataset_id, request: NexusTypes::DatasetPutRequest { address: address.to_string(), @@ -341,7 +350,8 @@ pub async fn run_standalone_server( .expect("failed to set up DNS"); // Initialize the internal DNS entries - let dns_config = dns_config_builder.build(); + let dns_config = + dns_config_builder.build_full_config_for_initial_generation(); dns.initialize_with_config(&log, &dns_config).await?; let internal_dns_version = Generation::try_from(dns_config.generation) .expect("invalid internal dns version"); @@ -357,7 +367,7 @@ pub async fn run_standalone_server( underlay_address: *http_bound.ip(), zone_type: OmicronZoneType::InternalDns { dataset: OmicronZoneDataset { - pool_name: ZpoolName::new_external(Uuid::new_v4()), + pool_name: ZpoolName::new_external(ZpoolUuid::new_v4()), }, http_address: http_bound, dns_address: match dns.dns_server.local_address() { @@ -392,7 +402,7 @@ pub async fn run_standalone_server( kind: NetworkInterfaceKind::Service { id }, name: "nexus".parse().unwrap(), ip: NEXUS_OPTE_IPV4_SUBNET - .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES as u32 + 1) + .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES + 1) .unwrap() .into(), mac: macs.next().unwrap(), @@ -426,7 +436,7 @@ pub async fn run_standalone_server( underlay_address: ip, zone_type: OmicronZoneType::ExternalDns { dataset: OmicronZoneDataset { - pool_name: ZpoolName::new_external(Uuid::new_v4()), + pool_name: ZpoolName::new_external(ZpoolUuid::new_v4()), }, http_address: external_dns_internal_addr, dns_address: SocketAddr::V6(external_dns_internal_addr), @@ -435,7 +445,7 @@ pub async fn run_standalone_server( kind: NetworkInterfaceKind::Service { id }, name: "external-dns".parse().unwrap(), ip: DNS_OPTE_IPV4_SUBNET - .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES as u32 + 1) + .nth(NUM_INITIAL_RESERVED_IP_ADDRESSES + 1) .unwrap() .into(), mac: macs.next().unwrap(), @@ -469,12 +479,15 @@ pub async fn run_standalone_server( }; let mut datasets = vec![]; - for zpool_id in server.sled_agent.get_zpools().await { + let physical_disks = server.sled_agent.get_all_physical_disks().await; + let zpools = server.sled_agent.get_zpools().await; + for zpool in &zpools { + let zpool_id = ZpoolUuid::from_untyped_uuid(zpool.id); for (dataset_id, address) in server.sled_agent.get_datasets(zpool_id).await { datasets.push(NexusTypes::DatasetCreateRequest { - zpool_id, + zpool_id: zpool.id, dataset_id, request: NexusTypes::DatasetPutRequest { address: address.to_string(), @@ -489,17 +502,21 @@ pub async fn run_standalone_server( None => vec![], }; - let services = - zones.iter().map(|z| z.to_nexus_service_req(config.id)).collect(); + let disks = server.sled_agent.omicron_physical_disks_list().await?; let mut sled_configs = BTreeMap::new(); - sled_configs.insert(config.id, SledConfig { zones }); + sled_configs.insert( + SledUuid::from_untyped_uuid(config.id), + SledConfig { disks, zones }, + ); let rack_init_request = NexusTypes::RackInitializationRequest { blueprint: build_initial_blueprint_from_sled_configs( - sled_configs, + &sled_configs, internal_dns_version, - ), - services, + ) + .expect("failed to construct initial blueprint"), + physical_disks, + zpools, datasets, internal_services_ip_pool_ranges, certs, @@ -511,13 +528,14 @@ pub async fn run_standalone_server( HashMap::new(), ), rack_network_config: NexusTypes::RackNetworkConfigV1 { - rack_subnet: Ipv6Addr::LOCALHOST.into(), + rack_subnet: Ipv6Net::host_net(Ipv6Addr::LOCALHOST), infra_ip_first: Ipv4Addr::LOCALHOST, infra_ip_last: Ipv4Addr::LOCALHOST, ports: Vec::new(), bgp: Vec::new(), bfd: Vec::new(), }, + allowed_source_ips: NexusTypes::AllowedSourceIps::Any, }; handoff_to_nexus(&log, &config, &rack_init_request).await?; diff --git a/sled-agent/src/sim/sled_agent.rs b/sled-agent/src/sim/sled_agent.rs index 1edde622a1..742639350a 100644 --- a/sled-agent/src/sim/sled_agent.rs +++ b/sled-agent/src/sim/sled_agent.rs @@ -10,22 +10,23 @@ use super::disk::SimDisk; use super::instance::SimInstance; use super::storage::CrucibleData; use super::storage::Storage; +use crate::bootstrap::early_networking::{ + EarlyNetworkConfig, EarlyNetworkConfigBody, +}; use crate::nexus::NexusClient; use crate::params::{ DiskStateRequested, InstanceExternalIpBody, InstanceHardware, InstanceMetadata, InstanceMigrationSourceParams, InstancePutStateResponse, InstanceStateRequested, InstanceUnregisterResponse, Inventory, - OmicronZonesConfig, SledRole, + OmicronPhysicalDisksConfig, OmicronZonesConfig, SledRole, }; use crate::sim::simulatable::Simulatable; use crate::updates::UpdateManager; use anyhow::bail; use anyhow::Context; -use dropshot::HttpServer; +use dropshot::{HttpError, HttpServer}; use futures::lock::Mutex; -use illumos_utils::opte::params::{ - DeleteVirtualNetworkInterfaceHost, SetVirtualNetworkInterfaceHost, -}; +use illumos_utils::opte::params::VirtualNetworkInterfaceHost; use omicron_common::api::external::{ ByteCount, DiskState, Error, Generation, ResourceType, }; @@ -35,13 +36,18 @@ use omicron_common::api::internal::nexus::{ use omicron_common::api::internal::nexus::{ InstanceRuntimeState, VmmRuntimeState, }; +use omicron_common::api::internal::shared::RackNetworkConfig; +use omicron_common::disk::DiskIdentity; +use omicron_uuid_kinds::ZpoolUuid; +use oxnet::Ipv6Net; use propolis_client::{ types::VolumeConstructionRequest, Client as PropolisClient, }; use propolis_mock_server::Context as PropolisContext; +use sled_storage::resources::DisksManagementResult; use slog::Logger; use std::collections::{HashMap, HashSet}; -use std::net::{IpAddr, Ipv6Addr, SocketAddr}; +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::str::FromStr; use std::sync::Arc; use std::time::Duration; @@ -66,7 +72,7 @@ pub struct SledAgent { nexus_address: SocketAddr, pub nexus_client: Arc, disk_id_to_region_ids: Mutex>>, - pub v2p_mappings: Mutex>>, + pub v2p_mappings: Mutex>, mock_propolis: Mutex>, PropolisClient)>>, /// lists of external IPs assigned to instances @@ -74,6 +80,7 @@ pub struct SledAgent { config: Config, fake_zones: Mutex, instance_ensure_state_error: Mutex>, + pub bootstore_network_config: Mutex, pub log: Logger, } @@ -141,6 +148,23 @@ impl SledAgent { let disk_log = log.new(o!("kind" => "disks")); let storage_log = log.new(o!("kind" => "storage")); + let bootstore_network_config = Mutex::new(EarlyNetworkConfig { + generation: 0, + schema_version: 1, + body: EarlyNetworkConfigBody { + ntp_servers: Vec::new(), + rack_network_config: Some(RackNetworkConfig { + rack_subnet: Ipv6Net::new(Ipv6Addr::UNSPECIFIED, 56) + .unwrap(), + infra_ip_first: Ipv4Addr::UNSPECIFIED, + infra_ip_last: Ipv4Addr::UNSPECIFIED, + ports: Vec::new(), + bgp: Vec::new(), + bfd: Vec::new(), + }), + }, + }); + Arc::new(SledAgent { id, ip: config.dropshot.bind_address.ip(), @@ -156,7 +180,6 @@ impl SledAgent { )), storage: Mutex::new(Storage::new( id, - Arc::clone(&nexus_client), config.storage.ip, storage_log, )), @@ -164,7 +187,7 @@ impl SledAgent { nexus_address, nexus_client, disk_id_to_region_ids: Mutex::new(HashMap::new()), - v2p_mappings: Mutex::new(HashMap::new()), + v2p_mappings: Mutex::new(HashSet::new()), external_ips: Mutex::new(HashMap::new()), mock_propolis: Mutex::new(None), config: config.clone(), @@ -174,6 +197,7 @@ impl SledAgent { }), instance_ensure_state_error: Mutex::new(None), log, + bootstore_network_config, }) } @@ -451,6 +475,22 @@ impl SledAgent { Ok(InstancePutStateResponse { updated_runtime: Some(new_state) }) } + pub async fn instance_get_state( + &self, + instance_id: Uuid, + ) -> Result { + let instance = self + .instances + .sim_get_cloned_object(&instance_id) + .await + .map_err(|_| { + crate::sled_agent::Error::Instance( + crate::instance_manager::Error::NoSuchInstance(instance_id), + ) + })?; + Ok(instance.current()) + } + pub async fn set_instance_ensure_state_error(&self, error: Option) { *self.instance_ensure_state_error.lock().await = error; } @@ -521,25 +561,32 @@ impl SledAgent { /// Adds a Physical Disk to the simulated sled agent. pub async fn create_external_physical_disk( &self, - vendor: String, - serial: String, - model: String, + id: Uuid, + identity: DiskIdentity, ) { let variant = sled_hardware::DiskVariant::U2; self.storage .lock() .await - .insert_physical_disk(vendor, serial, model, variant) + .insert_physical_disk(id, identity, variant) .await; } - pub async fn get_zpools(&self) -> Vec { + pub async fn get_all_physical_disks( + &self, + ) -> Vec { + self.storage.lock().await.get_all_physical_disks() + } + + pub async fn get_zpools( + &self, + ) -> Vec { self.storage.lock().await.get_all_zpools() } pub async fn get_datasets( &self, - zpool_id: Uuid, + zpool_id: ZpoolUuid, ) -> Vec<(Uuid, SocketAddr)> { self.storage.lock().await.get_all_datasets(zpool_id) } @@ -547,23 +594,21 @@ impl SledAgent { /// Adds a Zpool to the simulated sled agent. pub async fn create_zpool( &self, - id: Uuid, - vendor: String, - serial: String, - model: String, + id: ZpoolUuid, + physical_disk_id: Uuid, size: u64, ) { self.storage .lock() .await - .insert_zpool(id, vendor, serial, model, size) + .insert_zpool(id, physical_disk_id, size) .await; } /// Adds a Crucible Dataset within a zpool. pub async fn create_crucible_dataset( &self, - zpool_id: Uuid, + zpool_id: ZpoolUuid, dataset_id: Uuid, ) -> SocketAddr { self.storage.lock().await.insert_dataset(zpool_id, dataset_id).await @@ -572,7 +617,7 @@ impl SledAgent { /// Returns a crucible dataset within a particular zpool. pub async fn get_crucible_dataset( &self, - zpool_id: Uuid, + zpool_id: ZpoolUuid, dataset_id: Uuid, ) -> Arc { self.storage.lock().await.get_dataset(zpool_id, dataset_id).await @@ -625,36 +670,29 @@ impl SledAgent { pub async fn set_virtual_nic_host( &self, - interface_id: Uuid, - mapping: &SetVirtualNetworkInterfaceHost, + mapping: &VirtualNetworkInterfaceHost, ) -> Result<(), Error> { let mut v2p_mappings = self.v2p_mappings.lock().await; - let vec = v2p_mappings.entry(interface_id).or_default(); - vec.push(mapping.clone()); + v2p_mappings.insert(mapping.clone()); Ok(()) } pub async fn unset_virtual_nic_host( &self, - interface_id: Uuid, - mapping: &DeleteVirtualNetworkInterfaceHost, + mapping: &VirtualNetworkInterfaceHost, ) -> Result<(), Error> { let mut v2p_mappings = self.v2p_mappings.lock().await; - let vec = v2p_mappings.entry(interface_id).or_default(); - vec.retain(|x| { - x.virtual_ip != mapping.virtual_ip || x.vni != mapping.vni - }); - - // If the last entry was removed, remove the entire interface ID so that - // tests don't have to distinguish never-created entries from - // previously-extant-but-now-empty entries. - if vec.is_empty() { - v2p_mappings.remove(&interface_id); - } - + v2p_mappings.remove(mapping); Ok(()) } + pub async fn list_virtual_nics( + &self, + ) -> Result, Error> { + let v2p_mappings = self.v2p_mappings.lock().await; + Ok(Vec::from_iter(v2p_mappings.clone())) + } + pub async fn instance_put_external_ip( &self, instance_id: Uuid, @@ -780,9 +818,9 @@ impl SledAgent { .context("reservoir_size")?, disks: storage .physical_disks() - .iter() - .map(|(identity, info)| crate::params::InventoryDisk { - identity: identity.clone(), + .values() + .map(|info| crate::params::InventoryDisk { + identity: info.identity.clone(), variant: info.variant, slot: info.slot, }) @@ -800,6 +838,19 @@ impl SledAgent { }) } + pub async fn omicron_physical_disks_list( + &self, + ) -> Result { + self.storage.lock().await.omicron_physical_disks_list().await + } + + pub async fn omicron_physical_disks_ensure( + &self, + config: OmicronPhysicalDisksConfig, + ) -> Result { + self.storage.lock().await.omicron_physical_disks_ensure(config).await + } + pub async fn omicron_zones_list(&self) -> OmicronZonesConfig { self.fake_zones.lock().await.clone() } diff --git a/sled-agent/src/sim/storage.rs b/sled-agent/src/sim/storage.rs index 8fb362c5b7..6a688f6101 100644 --- a/sled-agent/src/sim/storage.rs +++ b/sled-agent/src/sim/storage.rs @@ -8,7 +8,7 @@ //! than the representation of "virtual disks" which would be presented //! through Nexus' external API. -use crate::nexus::NexusClient; +use crate::params::OmicronPhysicalDisksConfig; use crate::sim::http_entrypoints_pantry::ExpectedDigest; use crate::sim::SledAgent; use anyhow::{self, bail, Result}; @@ -19,12 +19,14 @@ use crucible_agent_client::types::{ use dropshot::HandlerTaskMode; use dropshot::HttpError; use futures::lock::Mutex; -use nexus_client::types::{ - ByteCount, PhysicalDiskKind, PhysicalDiskPutRequest, ZpoolPutRequest, -}; use omicron_common::disk::DiskIdentity; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::OmicronZoneUuid; +use omicron_uuid_kinds::ZpoolUuid; use propolis_client::types::VolumeConstructionRequest; use sled_hardware::DiskVariant; +use sled_storage::resources::DiskManagementStatus; +use sled_storage::resources::DisksManagementResult; use slog::Logger; use std::collections::HashMap; use std::collections::HashSet; @@ -95,6 +97,8 @@ impl CrucibleDataInner { cert_pem: None, key_pem: None, root_pem: None, + source: None, + read_only: false, }; let old = self.regions.insert(id, region.clone()); @@ -474,18 +478,21 @@ impl CrucibleServer { } pub(crate) struct PhysicalDisk { + pub(crate) identity: DiskIdentity, pub(crate) variant: DiskVariant, pub(crate) slot: i64, } pub(crate) struct Zpool { - datasets: HashMap, + id: ZpoolUuid, + physical_disk_id: Uuid, total_size: u64, + datasets: HashMap, } impl Zpool { - fn new(total_size: u64) -> Self { - Zpool { datasets: HashMap::new(), total_size } + fn new(id: ZpoolUuid, physical_disk_id: Uuid, total_size: u64) -> Self { + Zpool { id, physical_disk_id, total_size, datasets: HashMap::new() } } fn insert_dataset( @@ -541,26 +548,21 @@ impl Zpool { /// Simulated representation of all storage on a sled. pub struct Storage { sled_id: Uuid, - nexus_client: Arc, log: Logger, - physical_disks: HashMap, + config: Option, + physical_disks: HashMap, next_disk_slot: i64, - zpools: HashMap, + zpools: HashMap, crucible_ip: IpAddr, next_crucible_port: u16, } impl Storage { - pub fn new( - sled_id: Uuid, - nexus_client: Arc, - crucible_ip: IpAddr, - log: Logger, - ) -> Self { + pub fn new(sled_id: Uuid, crucible_ip: IpAddr, log: Logger) -> Self { Self { sled_id, - nexus_client, log, + config: None, physical_disks: HashMap::new(), next_disk_slot: 0, zpools: HashMap::new(), @@ -570,78 +572,80 @@ impl Storage { } /// Returns an immutable reference to all (currently known) physical disks - pub fn physical_disks(&self) -> &HashMap { + pub fn physical_disks(&self) -> &HashMap { &self.physical_disks } + pub async fn omicron_physical_disks_list( + &mut self, + ) -> Result { + let Some(config) = self.config.as_ref() else { + return Err(HttpError::for_not_found( + None, + "No control plane disks".into(), + )); + }; + Ok(config.clone()) + } + + pub async fn omicron_physical_disks_ensure( + &mut self, + config: OmicronPhysicalDisksConfig, + ) -> Result { + if let Some(stored_config) = self.config.as_ref() { + if stored_config.generation < config.generation { + return Err(HttpError::for_client_error( + None, + http::StatusCode::BAD_REQUEST, + "Generation number too old".to_string(), + )); + } + } + self.config.replace(config.clone()); + + Ok(DisksManagementResult { + status: config + .disks + .into_iter() + .map(|config| DiskManagementStatus { + identity: config.identity, + err: None, + }) + .collect(), + }) + } + pub async fn insert_physical_disk( &mut self, - vendor: String, - serial: String, - model: String, + id: Uuid, + identity: DiskIdentity, variant: DiskVariant, ) { - let identifier = DiskIdentity { - vendor: vendor.clone(), - serial: serial.clone(), - model: model.clone(), - }; let slot = self.next_disk_slot; self.next_disk_slot += 1; - self.physical_disks.insert(identifier, PhysicalDisk { variant, slot }); - - let variant = match variant { - DiskVariant::U2 => PhysicalDiskKind::U2, - DiskVariant::M2 => PhysicalDiskKind::M2, - }; - - // Notify Nexus - let request = PhysicalDiskPutRequest { - vendor, - serial, - model, - variant, - sled_id: self.sled_id, - }; - self.nexus_client - .physical_disk_put(&request) - .await - .expect("Failed to notify Nexus about new Physical Disk"); + self.physical_disks + .insert(id, PhysicalDisk { identity, variant, slot }); } - /// Adds a Zpool to the sled's simulated storage and notifies Nexus. + /// Adds a Zpool to the sled's simulated storage. pub async fn insert_zpool( &mut self, - zpool_id: Uuid, - disk_vendor: String, - disk_serial: String, - disk_model: String, + zpool_id: ZpoolUuid, + disk_id: Uuid, size: u64, ) { // Update our local data - self.zpools.insert(zpool_id, Zpool::new(size)); - - // Notify Nexus - let request = ZpoolPutRequest { - size: ByteCount(size), - disk_vendor, - disk_serial, - disk_model, - }; - self.nexus_client - .zpool_put(&self.sled_id, &zpool_id, &request) - .await - .expect("Failed to notify Nexus about new Zpool"); + self.zpools.insert(zpool_id, Zpool::new(zpool_id, disk_id, size)); } /// Returns an immutable reference to all zpools - pub fn zpools(&self) -> &HashMap { + pub fn zpools(&self) -> &HashMap { &self.zpools } /// Adds a Dataset to the sled's simulated storage. pub async fn insert_dataset( &mut self, - zpool_id: Uuid, + zpool_id: ZpoolUuid, dataset_id: Uuid, ) -> SocketAddr { // Update our local data @@ -661,11 +665,48 @@ impl Storage { dataset.address() } - pub fn get_all_zpools(&self) -> Vec { - self.zpools.keys().cloned().collect() + pub fn get_all_physical_disks( + &self, + ) -> Vec { + self.physical_disks + .iter() + .map(|(id, disk)| { + let variant = match disk.variant { + DiskVariant::U2 => { + nexus_client::types::PhysicalDiskKind::U2 + } + DiskVariant::M2 => { + nexus_client::types::PhysicalDiskKind::M2 + } + }; + + nexus_client::types::PhysicalDiskPutRequest { + id: *id, + vendor: disk.identity.vendor.clone(), + serial: disk.identity.serial.clone(), + model: disk.identity.model.clone(), + variant, + sled_id: self.sled_id, + } + }) + .collect() + } + + pub fn get_all_zpools(&self) -> Vec { + self.zpools + .values() + .map(|pool| nexus_client::types::ZpoolPutRequest { + id: pool.id.into_untyped_uuid(), + sled_id: self.sled_id, + physical_disk_id: pool.physical_disk_id, + }) + .collect() } - pub fn get_all_datasets(&self, zpool_id: Uuid) -> Vec<(Uuid, SocketAddr)> { + pub fn get_all_datasets( + &self, + zpool_id: ZpoolUuid, + ) -> Vec<(Uuid, SocketAddr)> { let zpool = self.zpools.get(&zpool_id).expect("Zpool does not exist"); zpool @@ -677,7 +718,7 @@ impl Storage { pub async fn get_dataset( &self, - zpool_id: Uuid, + zpool_id: ZpoolUuid, dataset_id: Uuid, ) -> Arc { self.zpools @@ -720,7 +761,7 @@ impl Storage { /// Simulated crucible pantry pub struct Pantry { - pub id: Uuid, + pub id: OmicronZoneUuid, vcrs: Mutex>, // Please rewind! sled_agent: Arc, jobs: Mutex>, @@ -729,7 +770,7 @@ pub struct Pantry { impl Pantry { pub fn new(sled_agent: Arc) -> Self { Self { - id: Uuid::new_v4(), + id: OmicronZoneUuid::new_v4(), vcrs: Mutex::new(HashMap::default()), sled_agent, jobs: Mutex::new(HashSet::default()), @@ -843,7 +884,9 @@ impl Pantry { .. } => ( block_size, - block_size * blocks_per_extent * (extent_count as u64), + block_size + * blocks_per_extent + * u64::from(extent_count), ), _ => { diff --git a/sled-agent/src/sled_agent.rs b/sled-agent/src/sled_agent.rs index cbda32bbe1..670d486686 100644 --- a/sled-agent/src/sled_agent.rs +++ b/sled-agent/src/sled_agent.rs @@ -22,12 +22,11 @@ use crate::params::{ DiskStateRequested, InstanceExternalIpBody, InstanceHardware, InstanceMetadata, InstanceMigrationSourceParams, InstancePutStateResponse, InstanceStateRequested, InstanceUnregisterResponse, Inventory, - OmicronZonesConfig, SledRole, TimeSync, VpcFirewallRule, - ZoneBundleMetadata, Zpool, + OmicronPhysicalDisksConfig, OmicronZonesConfig, SledRole, TimeSync, + VpcFirewallRule, ZoneBundleMetadata, Zpool, }; use crate::probe_manager::ProbeManager; use crate::services::{self, ServiceManager}; -use crate::storage_monitor::UnderlayAccess; use crate::updates::{ConfigUpdates, UpdateManager}; use crate::vmm_reservoir::{ReservoirMode, VmmReservoirManager}; use crate::zone_bundle; @@ -38,9 +37,7 @@ use derive_more::From; use dropshot::HttpError; use futures::stream::FuturesUnordered; use futures::StreamExt; -use illumos_utils::opte::params::{ - DeleteVirtualNetworkInterfaceHost, SetVirtualNetworkInterfaceHost, -}; +use illumos_utils::opte::params::VirtualNetworkInterfaceHost; use illumos_utils::opte::PortManager; use illumos_utils::zone::PROPOLIS_ZONE_PREFIX; use illumos_utils::zone::ZONE_PREFIX; @@ -48,8 +45,6 @@ use omicron_common::address::{ get_sled_address, get_switch_zone_address, Ipv6Subnet, SLED_PREFIX, }; use omicron_common::api::external::{ByteCount, ByteCountRangeError, Vni}; -use omicron_common::api::internal::nexus::ProducerEndpoint; -use omicron_common::api::internal::nexus::ProducerKind; use omicron_common::api::internal::nexus::{ SledInstanceState, VmmRuntimeState, }; @@ -61,8 +56,7 @@ use omicron_common::api::{ internal::nexus::UpdateArtifactId, }; use omicron_common::backoff::{ - retry_notify, retry_policy_internal_service, - retry_policy_internal_service_aggressive, BackoffError, + retry_notify, retry_policy_internal_service_aggressive, BackoffError, }; use omicron_ddm_admin_client::Client as DdmAdminClient; use oximeter::types::ProducerRegistry; @@ -70,11 +64,11 @@ use sled_hardware::{underlay, HardwareManager}; use sled_hardware_types::underlay::BootstrapInterface; use sled_hardware_types::Baseboard; use sled_storage::manager::StorageHandle; +use sled_storage::resources::DisksManagementResult; use slog::Logger; use std::collections::BTreeMap; use std::net::{Ipv6Addr, SocketAddr, SocketAddrV6}; use std::sync::Arc; -use tokio::sync::oneshot; use uuid::Uuid; use illumos_utils::running_zone::ZoneBuilderFactory; @@ -161,8 +155,9 @@ pub enum Error { impl From for omicron_common::api::external::Error { fn from(err: Error) -> Self { match err { - // Service errors can convert themselves into the external error + // Some errors can convert themselves into the external error Error::Services(err) => err.into(), + Error::Storage(err) => err.into(), _ => omicron_common::api::external::Error::InternalError { internal_message: err.to_string(), }, @@ -174,16 +169,15 @@ impl From for omicron_common::api::external::Error { impl From for dropshot::HttpError { fn from(err: Error) -> Self { match err { - Error::Instance(instance_manager_error) => { - match instance_manager_error { - crate::instance_manager::Error::Instance( - instance_error, - ) => match instance_error { - crate::instance::Error::Propolis(propolis_error) => { - // Work around dropshot#693: HttpError::for_status - // only accepts client errors and asserts on server - // errors, so convert server errors by hand. - match propolis_error.status() { + Error::Instance(crate::instance_manager::Error::Instance( + instance_error, + )) => { + match instance_error { + crate::instance::Error::Propolis(propolis_error) => { + // Work around dropshot#693: HttpError::for_status + // only accepts client errors and asserts on server + // errors, so convert server errors by hand. + match propolis_error.status() { None => HttpError::for_internal_error( propolis_error.to_string(), ), @@ -199,18 +193,22 @@ impl From for dropshot::HttpError { HttpError::for_internal_error(propolis_error.to_string()), } } - } - crate::instance::Error::Transition(omicron_error) => { - // Preserve the status associated with the wrapped - // Omicron error so that Nexus will see it in the - // Progenitor client error it gets back. - HttpError::from(omicron_error) - } - e => HttpError::for_internal_error(e.to_string()), - }, + } + crate::instance::Error::Transition(omicron_error) => { + // Preserve the status associated with the wrapped + // Omicron error so that Nexus will see it in the + // Progenitor client error it gets back. + HttpError::from(omicron_error) + } e => HttpError::for_internal_error(e.to_string()), } } + Error::Instance( + e @ crate::instance_manager::Error::NoSuchInstance(_), + ) => HttpError::for_not_found( + Some("NO_SUCH_INSTANCE".to_string()), + e.to_string(), + ), Error::ZoneBundle(ref inner) => match inner { BundleError::NoStorage | BundleError::Unavailable { .. } => { HttpError::for_unavail(None, inner.to_string()) @@ -342,7 +340,6 @@ impl SledAgent { request: StartSledAgentRequest, services: ServiceManager, long_running_task_handles: LongRunningTaskHandles, - underlay_available_tx: oneshot::Sender, ) -> Result { // Pass the "parent_log" to all subcomponents that want to set their own // "component" value. @@ -357,7 +354,7 @@ impl SledAgent { let storage_manager = &long_running_task_handles.storage_manager; let boot_disk = storage_manager - .get_latest_resources() + .get_latest_disks() .await .boot_disk() .ok_or_else(|| Error::BootDiskNotFound)?; @@ -418,6 +415,7 @@ impl SledAgent { request.body.id, request.body.rack_id, long_running_task_handles.hardware_manager.baseboard(), + *sled_address.ip(), log.new(o!("component" => "MetricsManager")), )?; @@ -440,37 +438,12 @@ impl SledAgent { } } - // Spawn a task in the background to register our metric producer with - // Nexus. This should not block progress here. - let endpoint = ProducerEndpoint { - id: request.body.id, - kind: ProducerKind::SledAgent, - address: sled_address.into(), - base_route: String::from("/metrics/collect"), - interval: crate::metrics::METRIC_COLLECTION_INTERVAL, - }; - tokio::task::spawn(register_metric_producer_with_nexus( - log.clone(), - nexus_client.clone(), - endpoint, - )); - // Create the PortManager to manage all the OPTE ports on the sled. let port_manager = PortManager::new( parent_log.new(o!("component" => "PortManager")), *sled_address.ip(), ); - // Inform the `StorageMonitor` that the underlay is available so that - // it can try to contact nexus. - underlay_available_tx - .send(UnderlayAccess { - nexus_client: nexus_client.clone(), - sled_id: request.body.id, - }) - .map_err(|_| ()) - .expect("Failed to send to StorageMonitor"); - // Configure the VMM reservoir as either a percentage of DRAM or as an // exact size in MiB. let reservoir_mode = ReservoirMode::from_config( @@ -625,13 +598,25 @@ impl SledAgent { retry_policy_internal_service_aggressive(), || async { // Load as many services as we can, and don't exit immediately - // upon failure... + // upon failure. let load_services_result = self.inner.services.load_services().await.map_err(|err| { BackoffError::transient(Error::from(err)) }); - // ... and request firewall rule updates for as many services as + // If there wasn't any work to do, we're done immediately. + if matches!( + load_services_result, + Ok(services::LoadServicesResult::NoServicesToLoad) + ) { + info!( + self.log, + "load_services exiting early; no services to be loaded" + ); + return Ok(()); + } + + // Otherwise, request firewall rule updates for as many services as // we can. Note that we still make this request even if we only // partially load some services. let firewall_result = self @@ -802,6 +787,28 @@ impl SledAgent { self.inner.zone_bundler.cleanup().await.map_err(Error::from) } + /// Requests the set of physical disks currently managed by the Sled Agent. + /// + /// This should be contrasted by the set of disks in the inventory, which + /// may contain a slightly different set, if certain disks are not expected + /// to be in-use by the broader control plane. + pub async fn omicron_physical_disks_list( + &self, + ) -> Result { + Ok(self.storage().omicron_physical_disks_list().await?) + } + + /// Ensures that the specific set of Omicron Physical Disks are running + /// on this sled, and that no other disks are being used by the control + /// plane (with the exception of M.2s, which are always automatically + /// in-use). + pub async fn omicron_physical_disks_ensure( + &self, + config: OmicronPhysicalDisksConfig, + ) -> Result { + Ok(self.storage().omicron_physical_disks_ensure(config).await?) + } + /// List the Omicron zone configuration that's currently running pub async fn omicron_zones_list( &self, @@ -849,7 +856,7 @@ impl SledAgent { pub async fn zpools_get(&self) -> Vec { self.inner .storage - .get_latest_resources() + .get_latest_disks() .await .get_all_zpools() .into_iter() @@ -976,6 +983,18 @@ impl SledAgent { .map_err(|e| Error::Instance(e)) } + /// Returns the state of the instance with the provided ID. + pub async fn instance_get_state( + &self, + instance_id: Uuid, + ) -> Result { + self.inner + .instances + .get_instance_state(instance_id) + .await + .map_err(|e| Error::Instance(e)) + } + /// Idempotently ensures that the given virtual disk is attached (or not) as /// specified. /// @@ -1030,9 +1049,15 @@ impl SledAgent { .map_err(Error::from) } + pub async fn list_virtual_nics( + &self, + ) -> Result, Error> { + self.inner.port_manager.list_virtual_nics().map_err(Error::from) + } + pub async fn set_virtual_nic_host( &self, - mapping: &SetVirtualNetworkInterfaceHost, + mapping: &VirtualNetworkInterfaceHost, ) -> Result<(), Error> { self.inner .port_manager @@ -1042,7 +1067,7 @@ impl SledAgent { pub async fn unset_virtual_nic_host( &self, - mapping: &DeleteVirtualNetworkInterfaceHost, + mapping: &VirtualNetworkInterfaceHost, ) -> Result<(), Error> { self.inner .port_manager @@ -1105,17 +1130,33 @@ impl SledAgent { let mut disks = vec![]; let mut zpools = vec![]; - for (identity, (disk, pool)) in - self.storage().get_latest_resources().await.disks().iter() - { + let all_disks = self.storage().get_latest_disks().await; + for (identity, variant, slot) in all_disks.iter_all() { disks.push(crate::params::InventoryDisk { identity: identity.clone(), - variant: disk.variant(), - slot: disk.slot(), + variant, + slot, }); + } + for zpool in all_disks.all_u2_zpools() { + let info = + match illumos_utils::zpool::Zpool::get_info(&zpool.to_string()) + { + Ok(info) => info, + Err(err) => { + warn!( + self.log, + "Failed to access zpool info"; + "zpool" => %zpool, + "err" => %err + ); + continue; + } + }; + zpools.push(crate::params::InventoryZpool { - id: pool.name.id(), - total_size: ByteCount::try_from(pool.info.size())?, + id: zpool.id(), + total_size: ByteCount::try_from(info.size())?, }); } @@ -1133,33 +1174,6 @@ impl SledAgent { } } -async fn register_metric_producer_with_nexus( - log: Logger, - client: NexusClientWithResolver, - endpoint: ProducerEndpoint, -) { - let endpoint = nexus_client::types::ProducerEndpoint::from(&endpoint); - let register_with_nexus = || async { - client.client().cpapi_producers_post(&endpoint).await.map_err(|e| { - BackoffError::transient(format!("Metric registration error: {e}")) - }) - }; - retry_notify( - retry_policy_internal_service(), - register_with_nexus, - |error, delay| { - warn!( - log, - "failed to register as a metric producer with Nexus"; - "error" => ?error, - "retry_after" => ?delay, - ); - }, - ) - .await - .expect("Expected an infinite retry loop registering with Nexus"); -} - #[derive(From, thiserror::Error, Debug)] pub enum AddSledError { #[error("Failed to learn bootstrap ip for {sled_id}")] diff --git a/sled-agent/src/storage_monitor.rs b/sled-agent/src/storage_monitor.rs index 0c9b287396..8cb63e31f8 100644 --- a/sled-agent/src/storage_monitor.rs +++ b/sled-agent/src/storage_monitor.rs @@ -3,67 +3,19 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. //! A task that listens for storage events from [`sled_storage::manager::StorageManager`] -//! and dispatches them to other parst of the bootstrap agent and sled agent +//! and dispatches them to other parts of the bootstrap agent and sled agent //! code. use crate::dump_setup::DumpSetup; -use crate::nexus::{ConvertInto, NexusClientWithResolver}; -use derive_more::From; -use futures::stream::FuturesOrdered; -use futures::FutureExt; -use futures::StreamExt; -use nexus_client::types::PhysicalDiskDeleteRequest; -use nexus_client::types::PhysicalDiskPutRequest; -use nexus_client::types::ZpoolPutRequest; -use omicron_common::api::external::ByteCount; -use omicron_common::backoff; -use omicron_common::disk::DiskIdentity; +use sled_storage::config::MountConfig; use sled_storage::manager::StorageHandle; -use sled_storage::pool::Pool; -use sled_storage::resources::StorageResources; +use sled_storage::resources::AllDisks; use slog::Logger; -use std::fmt::Debug; -use std::pin::Pin; -use tokio::sync::oneshot; -use uuid::Uuid; - -#[derive(From, Clone, Debug)] -enum NexusDiskRequest { - Put(PhysicalDiskPutRequest), - Delete(PhysicalDiskDeleteRequest), -} - -/// Describes the access to the underlay used by the StorageManager. -#[derive(Clone)] -pub struct UnderlayAccess { - pub nexus_client: NexusClientWithResolver, - pub sled_id: Uuid, -} - -impl Debug for UnderlayAccess { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("UnderlayAccess") - .field("sled_id", &self.sled_id) - .finish() - } -} pub struct StorageMonitor { log: Logger, storage_manager: StorageHandle, - // Receive a onetime notification that the underlay is available - underlay_available_rx: oneshot::Receiver, - - // A cached copy of the `StorageResources` from the last update - storage_resources: StorageResources, - - // Ability to access the underlay network - underlay: Option, - - // A queue for sending nexus notifications in order - nexus_notifications: FuturesOrdered, - // Invokes dumpadm(8) and savecore(8) when new disks are encountered dump_setup: DumpSetup, } @@ -71,24 +23,12 @@ pub struct StorageMonitor { impl StorageMonitor { pub fn new( log: &Logger, + mount_config: MountConfig, storage_manager: StorageHandle, - ) -> (StorageMonitor, oneshot::Sender) { - let (underlay_available_tx, underlay_available_rx) = oneshot::channel(); - let storage_resources = StorageResources::default(); - let dump_setup = DumpSetup::new(&log); + ) -> StorageMonitor { + let dump_setup = DumpSetup::new(&log, mount_config); let log = log.new(o!("component" => "StorageMonitor")); - ( - StorageMonitor { - log, - storage_manager, - underlay_available_rx, - storage_resources, - underlay: None, - nexus_notifications: FuturesOrdered::new(), - dump_setup, - }, - underlay_available_tx, - ) + StorageMonitor { log, storage_manager, dump_setup } } /// Run the main receive loop of the `StorageMonitor` @@ -97,277 +37,23 @@ impl StorageMonitor { pub async fn run(mut self) { loop { tokio::select! { - res = self.nexus_notifications.next(), - if !self.nexus_notifications.is_empty() => - { - match res { - Some(Ok(s)) => { - info!(self.log, "Nexus notification complete: {s}"); - } - e => error!(self.log, "Nexus notification error: {e:?}") - } - } - resources = self.storage_manager.wait_for_changes() => { + disks = self.storage_manager.wait_for_changes() => { info!( self.log, "Received storage manager update"; - "resources" => ?resources + "disks" => ?disks ); - self.handle_resource_update(resources).await; + self.handle_resource_update(disks).await; } - Ok(underlay) = &mut self.underlay_available_rx, - if self.underlay.is_none() => - { - let sled_id = underlay.sled_id; - info!( - self.log, - "Underlay Available"; "sled_id" => %sled_id - ); - self.underlay = Some(underlay); - self.notify_nexus_about_existing_resources(sled_id).await; - } - } - } - } - - /// When the underlay becomes available, we need to notify nexus about any - /// discovered disks and pools, since we don't attempt to notify until there - /// is an underlay available. - async fn notify_nexus_about_existing_resources(&mut self, sled_id: Uuid) { - let current = StorageResources::default(); - let updated = &self.storage_resources; - let nexus_updates = - compute_resource_diffs(&self.log, &sled_id, ¤t, updated); - for put in nexus_updates.disk_puts { - self.physical_disk_notify(put.into()).await; - } - for (pool, put) in nexus_updates.zpool_puts { - self.add_zpool_notify(pool, put).await; - } - } - - async fn handle_resource_update( - &mut self, - updated_resources: StorageResources, - ) { - // If the underlay isn't available, we only record the changes. Nexus - // isn't yet reachable to notify. - if self.underlay.is_some() { - let nexus_updates = compute_resource_diffs( - &self.log, - &self.underlay.as_ref().unwrap().sled_id, - &self.storage_resources, - &updated_resources, - ); - - for put in nexus_updates.disk_puts { - self.physical_disk_notify(put.into()).await; - } - for del in nexus_updates.disk_deletes { - self.physical_disk_notify(del.into()).await; - } - for (pool, put) in nexus_updates.zpool_puts { - self.add_zpool_notify(pool, put).await; } } - self.dump_setup.update_dumpdev_setup(updated_resources.disks()).await; - - // Save the updated `StorageResources` - self.storage_resources = updated_resources; - } - - // Adds a "notification to nexus" to `self.nexus_notifications`, informing it - // about the addition/removal of a physical disk to this sled. - async fn physical_disk_notify(&mut self, disk: NexusDiskRequest) { - let underlay = self.underlay.as_ref().unwrap().clone(); - let disk2 = disk.clone(); - let notify_nexus = move || { - let underlay = underlay.clone(); - let disk = disk.clone(); - async move { - let nexus_client = underlay.nexus_client.client().clone(); - - match &disk { - NexusDiskRequest::Put(request) => { - nexus_client - .physical_disk_put(&request) - .await - .map_err(|e| { - backoff::BackoffError::transient(e.to_string()) - })?; - } - NexusDiskRequest::Delete(request) => { - nexus_client - .physical_disk_delete(&request) - .await - .map_err(|e| { - backoff::BackoffError::transient(e.to_string()) - })?; - } - } - let msg = format!("{:?}", disk); - Ok(msg) - } - }; - - let log = self.log.clone(); - // This notification is often invoked before Nexus has started - // running, so avoid flagging any errors as concerning until some - // time has passed. - let log_post_failure = move |err, call_count, total_duration| { - if call_count == 0 { - info!(log, "failed to notify nexus about {disk2:?}"; - "err" => ?err - ); - } else if total_duration > std::time::Duration::from_secs(30) { - warn!(log, "failed to notify nexus about {disk2:?}"; - "err" => ?err, - "total duration" => ?total_duration); - } - }; - self.nexus_notifications.push_back( - backoff::retry_notify_ext( - backoff::retry_policy_internal_service_aggressive(), - notify_nexus, - log_post_failure, - ) - .boxed(), - ); } - // Adds a "notification to nexus" to `nexus_notifications`, - // informing it about the addition of `pool_id` to this sled. - async fn add_zpool_notify( - &mut self, - pool: Pool, - zpool_request: ZpoolPutRequest, - ) { - let pool_id = pool.name.id(); - let underlay = self.underlay.as_ref().unwrap().clone(); - - let notify_nexus = move || { - let underlay = underlay.clone(); - let zpool_request = zpool_request.clone(); - async move { - let sled_id = underlay.sled_id; - let nexus_client = underlay.nexus_client.client().clone(); - nexus_client - .zpool_put(&sled_id, &pool_id, &zpool_request) - .await - .map_err(|e| { - backoff::BackoffError::transient(e.to_string()) - })?; - let msg = format!("{:?}", zpool_request); - Ok(msg) - } - }; - - let log = self.log.clone(); - let name = pool.name.clone(); - let disk = pool.parent.clone(); - let log_post_failure = move |err, call_count, total_duration| { - if call_count == 0 { - info!(log, "failed to notify nexus about a new pool {name} on disk {disk:?}"; - "err" => ?err); - } else if total_duration > std::time::Duration::from_secs(30) { - warn!(log, "failed to notify nexus about a new pool {name} on disk {disk:?}"; - "err" => ?err, - "total duration" => ?total_duration); - } - }; - self.nexus_notifications.push_back( - backoff::retry_notify_ext( - backoff::retry_policy_internal_service_aggressive(), - notify_nexus, - log_post_failure, + async fn handle_resource_update(&mut self, updated_disks: AllDisks) { + self.dump_setup + .update_dumpdev_setup( + updated_disks.iter_managed().map(|(_id, disk)| disk), ) - .boxed(), - ); + .await; } } - -// The type of a future which is used to send a notification to Nexus. -type NotifyFut = - Pin> + Send>>; - -struct NexusUpdates { - disk_puts: Vec, - disk_deletes: Vec, - zpool_puts: Vec<(Pool, ZpoolPutRequest)>, -} - -fn compute_resource_diffs( - log: &Logger, - sled_id: &Uuid, - current: &StorageResources, - updated: &StorageResources, -) -> NexusUpdates { - let mut disk_puts = vec![]; - let mut disk_deletes = vec![]; - let mut zpool_puts = vec![]; - - let mut put_pool = |disk_id: &DiskIdentity, updated_pool: &Pool| { - match ByteCount::try_from(updated_pool.info.size()) { - Ok(size) => zpool_puts.push(( - updated_pool.clone(), - ZpoolPutRequest { - size: size.into(), - disk_model: disk_id.model.clone(), - disk_serial: disk_id.serial.clone(), - disk_vendor: disk_id.vendor.clone(), - }, - )), - Err(err) => { - error!( - log, - "Error parsing pool size"; - "name" => updated_pool.name.to_string(), - "err" => ?err); - } - } - }; - - // Diff the existing resources with the update to see what has changed - // This loop finds disks and pools that were modified or deleted - for (disk_id, (disk, pool)) in current.disks().iter() { - match updated.disks().get(disk_id) { - Some((updated_disk, updated_pool)) => { - if disk != updated_disk { - disk_puts.push(PhysicalDiskPutRequest { - sled_id: *sled_id, - model: disk_id.model.clone(), - serial: disk_id.serial.clone(), - vendor: disk_id.vendor.clone(), - variant: updated_disk.variant().convert(), - }); - } - if pool != updated_pool { - put_pool(disk_id, updated_pool); - } - } - None => disk_deletes.push(PhysicalDiskDeleteRequest { - model: disk_id.model.clone(), - serial: disk_id.serial.clone(), - vendor: disk_id.vendor.clone(), - sled_id: *sled_id, - }), - } - } - - // Diff the existing resources with the update to see what has changed - // This loop finds new disks and pools - for (disk_id, (updated_disk, updated_pool)) in updated.disks().iter() { - if !current.disks().contains_key(disk_id) { - disk_puts.push(PhysicalDiskPutRequest { - sled_id: *sled_id, - model: disk_id.model.clone(), - serial: disk_id.serial.clone(), - vendor: disk_id.vendor.clone(), - variant: updated_disk.variant().convert(), - }); - put_pool(disk_id, updated_pool); - } - } - - NexusUpdates { disk_puts, disk_deletes, zpool_puts } -} diff --git a/sled-agent/src/swap_device.rs b/sled-agent/src/swap_device.rs index 6a00b42672..7dadf62434 100644 --- a/sled-agent/src/swap_device.rs +++ b/sled-agent/src/swap_device.rs @@ -62,7 +62,7 @@ pub(crate) fn ensure_swap_device( assert!(size_gb > 0); let devs = swapctl::list_swap_devices()?; - if devs.len() > 0 { + if !devs.is_empty() { if devs.len() > 1 { // This should really never happen unless we've made a mistake, but it's // probably fine to have more than one swap device. Thus, don't panic @@ -450,7 +450,7 @@ mod swapctl { let path = String::from_utf8_lossy(p.to_bytes()).to_string(); devices.push(SwapDevice { - path: path, + path, start: e.ste_start as u64, length: e.ste_length as u64, total_pages: e.ste_pages as u64, @@ -473,8 +473,8 @@ mod swapctl { SwapDeviceError::AddDevice { msg: format!("could not convert path to CString: {}", e,), path: path_cp.clone(), - start: start, - length: length, + start, + length, } })?; @@ -490,8 +490,8 @@ mod swapctl { SwapDeviceError::AddDevice { msg: e.to_string(), path: path_cp, - start: start, - length: length, + start, + length, } })? }; diff --git a/sled-agent/src/vmm_reservoir.rs b/sled-agent/src/vmm_reservoir.rs index b16286f5f5..0fa7bc14af 100644 --- a/sled-agent/src/vmm_reservoir.rs +++ b/sled-agent/src/vmm_reservoir.rs @@ -120,7 +120,8 @@ impl VmmReservoirManagerHandle { rx.await.map_err(|_| Error::ReplySenderDropped)? } - #[cfg(test)] + /// TODO: We should be able run to tests in VMs that can use the real VmmReservoir + #[cfg(all(test, target_os = "illumos"))] pub fn stub_for_test() -> Self { let (tx, _) = flume::bounded(1); let (size_updated_tx, _) = broadcast::channel(1); @@ -239,7 +240,8 @@ impl VmmReservoirManager { percent ))); }; - (hardware_physical_ram_bytes as f64 * (percent as f64 / 100.0)) + (hardware_physical_ram_bytes as f64 + * (f64::from(percent) / 100.0)) .floor() as u64 } }; diff --git a/sled-agent/src/zone_bundle.rs b/sled-agent/src/zone_bundle.rs index 7b0d9b8071..16147e5957 100644 --- a/sled-agent/src/zone_bundle.rs +++ b/sled-agent/src/zone_bundle.rs @@ -255,7 +255,7 @@ impl Inner { // that can exist but do not, i.e., those whose parent datasets already // exist; and returns those. async fn bundle_directories(&self) -> Vec { - let resources = self.storage_handle.get_latest_resources().await; + let resources = self.storage_handle.get_latest_disks().await; let expected = resources.all_zone_bundle_directories(); let mut out = Vec::with_capacity(expected.len()); for each in expected.into_iter() { @@ -263,6 +263,7 @@ impl Inner { out.push(each); } } + out.sort(); out } } @@ -427,7 +428,7 @@ impl ZoneBundler { ) -> Result { let inner = self.inner.lock().await; let storage_dirs = inner.bundle_directories().await; - let resources = inner.storage_handle.get_latest_resources().await; + let resources = inner.storage_handle.get_latest_disks().await; let extra_log_dirs = resources .all_u2_mountpoints(U2_DEBUG_DATASET) .into_iter() @@ -986,13 +987,7 @@ async fn create( let zone_metadata = ZoneBundleMetadata::new(zone.name(), context.cause); let filename = format!("{}.tar.gz", zone_metadata.id.bundle_id); let full_path = zone_bundle_dirs[0].join(&filename); - let file = match tokio::fs::OpenOptions::new() - .read(true) - .write(true) - .create(true) - .open(&full_path) - .await - { + let file = match tokio::fs::File::create(&full_path).await { Ok(f) => f.into_std().await, Err(e) => { error!( @@ -2168,26 +2163,22 @@ mod illumos_tests { use super::StorageLimit; use super::Utf8Path; use super::Utf8PathBuf; - use super::Uuid; use super::ZoneBundleCause; use super::ZoneBundleId; use super::ZoneBundleInfo; use super::ZoneBundleMetadata; use super::ZoneBundler; - use super::ZFS; use anyhow::Context; use chrono::DateTime; use chrono::TimeZone; use chrono::Timelike; use chrono::Utc; - use illumos_utils::zpool::ZpoolName; use rand::RngCore; - use sled_storage::disk::RawDisk; - use sled_storage::disk::SyntheticDisk; - use sled_storage::manager::{FakeStorageManager, StorageHandle}; + use sled_storage::manager_test_harness::StorageManagerTestHarness; use slog::Drain; use slog::Logger; - use tokio::process::Command; + use std::sync::Arc; + use tokio::sync::Mutex; /// An iterator that returns the date of consecutive days beginning with 1st /// January 2020. The time portion of each returned date will be fixed at @@ -2239,77 +2230,58 @@ mod illumos_tests { assert!(zfs_quota(&path).await.is_err()); } - struct CleanupTestContext { + struct CleanupTestContextInner { resource_wrapper: ResourceWrapper, context: CleanupContext, bundler: ZoneBundler, } + // Practically, we only expect one thread to "own" this context at a time. + // However, with the "run_test_with_zfs_dataset", it's hard to pass an + // async function as a parameter ("test") that acts on a mutable reference + // without some fancy HRTB shenanigans. + // + // Reader: If you think you can pass a "&mut CleanupTestContextInner" + // there instead of an "Arc>", I welcome you to try! + #[derive(Clone)] + struct CleanupTestContext { + ctx: Arc>, + } + // A wrapper around `StorageResources`, that automatically creates dummy // directories in the provided test locations and removes them on drop. // - // I'd much prefer this to be done in $TEMPDIR. However, `StorageResources` - // is difficult to mock out or modify in such a way that the underlying - // dataset locations can be controlled. - // - // This creates completely BS disks, and fake names for the zpools on them. - // Those pools are _supposed_ to live at directories like: - // - // `/pool/int/` - // // They don't exist when you just do `StorageResources::new_for_test()`. // This type creates the datasets at the expected mountpoints, backed by the // ramdisk, and removes them on drop. This is basically a tempdir-like // system, that creates the directories implied by the `StorageResources` // expected disk structure. struct ResourceWrapper { - storage_handle: StorageHandle, + storage_test_harness: StorageManagerTestHarness, dirs: Vec, } - async fn setup_storage() -> StorageHandle { - let (manager, handle) = FakeStorageManager::new(); + async fn setup_storage(log: &Logger) -> StorageManagerTestHarness { + let mut harness = StorageManagerTestHarness::new(&log).await; - // Spawn the storage manager as done by sled-agent - tokio::spawn(async move { - manager.run().await; - }); - - // These must be internal zpools - for i in 0..2 { - let internal_zpool_name = ZpoolName::new_internal(Uuid::new_v4()); - let internal_disk: RawDisk = - SyntheticDisk::new(internal_zpool_name.clone(), i).into(); - handle.upsert_disk(internal_disk).await; - } - handle + harness.handle().key_manager_ready().await; + let _raw_disks = + harness.add_vdevs(&["m2_left.vdev", "m2_right.vdev"]).await; + harness } impl ResourceWrapper { - // Create new storage resources, and mount fake datasets at the required + // Create new storage resources, and mount datasets at the required // locations. - async fn new() -> Self { + async fn new(log: &Logger) -> Self { // Spawn the storage related tasks required for testing and insert // synthetic disks. - let storage_handle = setup_storage().await; - let resources = storage_handle.get_latest_resources().await; - let dirs = resources.all_zone_bundle_directories(); - for d in dirs.iter() { - let id = - d.components().nth(3).unwrap().as_str().parse().unwrap(); - create_test_dataset(&id, d).await.unwrap(); - } - Self { storage_handle, dirs } - } - } - - impl Drop for ResourceWrapper { - fn drop(&mut self) { - for d in self.dirs.iter() { - let id = - d.components().nth(3).unwrap().as_str().parse().unwrap(); - remove_test_dataset(&id).unwrap(); - } + let storage_test_harness = setup_storage(log).await; + let resources = + storage_test_harness.handle().get_latest_disks().await; + let mut dirs = resources.all_zone_bundle_directories(); + dirs.sort(); + Self { storage_test_harness, dirs } } } @@ -2325,25 +2297,34 @@ mod illumos_tests { async fn setup_fake_cleanup_task() -> anyhow::Result { let log = test_logger(); let context = CleanupContext::default(); - let resource_wrapper = ResourceWrapper::new().await; + let resource_wrapper = ResourceWrapper::new(&log).await; let bundler = ZoneBundler::new( log, - resource_wrapper.storage_handle.clone(), + resource_wrapper.storage_test_harness.handle().clone(), context, ); - Ok(CleanupTestContext { resource_wrapper, context, bundler }) + Ok(CleanupTestContext { + ctx: Arc::new(Mutex::new(CleanupTestContextInner { + resource_wrapper, + context, + bundler, + })), + }) } #[tokio::test] async fn test_context() { - let ctx = setup_fake_cleanup_task().await.unwrap(); + let context = setup_fake_cleanup_task().await.unwrap(); + let mut ctx = context.ctx.lock().await; let context = ctx.bundler.cleanup_context().await; assert_eq!(context, ctx.context, "received incorrect context"); + ctx.resource_wrapper.storage_test_harness.cleanup().await; } #[tokio::test] async fn test_update_context() { - let ctx = setup_fake_cleanup_task().await.unwrap(); + let context = setup_fake_cleanup_task().await.unwrap(); + let mut ctx = context.ctx.lock().await; let new_context = CleanupContext { period: CleanupPeriod::new(ctx.context.period.as_duration() / 2) .unwrap(), @@ -2363,6 +2344,7 @@ mod illumos_tests { .expect("failed to set context"); let context = ctx.bundler.cleanup_context().await; assert_eq!(context, new_context, "failed to update context"); + ctx.resource_wrapper.storage_test_harness.cleanup().await; } // Quota applied to test datasets. @@ -2374,59 +2356,7 @@ mod illumos_tests { // i.e., the "ashift" value. An empty dataset is unlikely to contain more // than one megabyte of overhead, so use that as a conservative test size to // avoid issues. - const TEST_QUOTA: u64 = 1024 * 1024; - - async fn create_test_dataset( - id: &Uuid, - mountpoint: &Utf8PathBuf, - ) -> anyhow::Result<()> { - let output = Command::new("/usr/bin/pfexec") - .arg(ZFS) - .arg("create") - .arg("-o") - .arg(format!("quota={TEST_QUOTA}")) - .arg("-o") - .arg(format!("mountpoint={mountpoint}")) - .arg(format!("rpool/{id}")) - .output() - .await - .context("failed to spawn zfs create operation")?; - anyhow::ensure!( - output.status.success(), - "zfs create operation failed: {}", - String::from_utf8_lossy(&output.stderr), - ); - - // Make the path operable by the test code. - let output = Command::new("/usr/bin/pfexec") - .arg("chmod") - .arg("a+rw") - .arg(&mountpoint) - .output() - .await - .context("failed to spawn chmod operation")?; - anyhow::ensure!( - output.status.success(), - "chmod-ing the dataset failed: {}", - String::from_utf8_lossy(&output.stderr), - ); - Ok(()) - } - - fn remove_test_dataset(id: &Uuid) -> anyhow::Result<()> { - let output = std::process::Command::new("/usr/bin/pfexec") - .arg(ZFS) - .arg("destroy") - .arg(format!("rpool/{id}")) - .output() - .context("failed to spawn zfs destroy operation")?; - anyhow::ensure!( - output.status.success(), - "zfs destroy operation failed: {}", - String::from_utf8_lossy(&output.stderr), - ); - Ok(()) - } + const TEST_QUOTA: usize = sled_storage::dataset::DEBUG_DATASET_QUOTA; async fn run_test_with_zfs_dataset(test: T) where @@ -2436,7 +2366,14 @@ mod illumos_tests { let context = setup_fake_cleanup_task() .await .expect("failed to create cleanup task"); - let result = test(context).await; + let result = test(context.clone()).await; + + let mut ctx = context.ctx.lock().await; + info!( + &ctx.bundler.log, + "Test completed, performing cleanup before emitting result" + ); + ctx.resource_wrapper.storage_test_harness.cleanup().await; result.expect("test failed!"); } @@ -2448,6 +2385,7 @@ mod illumos_tests { async fn test_utilization_body( ctx: CleanupTestContext, ) -> anyhow::Result<()> { + let ctx = ctx.ctx.lock().await; let utilization = ctx.bundler.utilization().await?; let paths = utilization.keys().cloned().collect::>(); @@ -2462,8 +2400,22 @@ mod illumos_tests { .values() .next() .context("no utilization information?")?; + + // If this needs to change, go modify the "add_vdevs" call in + // "setup_storage". + assert!( + TEST_QUOTA + < StorageManagerTestHarness::DEFAULT_VDEV_SIZE + .try_into() + .unwrap(), + "Quota larger than underlying device (quota: {}, device size: {})", + TEST_QUOTA, + StorageManagerTestHarness::DEFAULT_VDEV_SIZE, + ); + anyhow::ensure!( - bundle_utilization.dataset_quota == TEST_QUOTA, + bundle_utilization.dataset_quota + == u64::try_from(TEST_QUOTA).unwrap(), "computed incorrect dataset quota" ); @@ -2489,9 +2441,13 @@ mod illumos_tests { DaysOfOurBundles::new().next().unwrap(), ZoneBundleCause::ExplicitRequest, ) - .await?; + .await + .context("Failed to insert_fake_bundle")?; - let new_utilization = ctx.bundler.utilization().await?; + let new_utilization = + ctx.bundler.utilization().await.context( + "Failed to get utilization after inserting fake bundle", + )?; anyhow::ensure!( paths == new_utilization.keys().cloned().collect::>(), "paths should not change" @@ -2545,6 +2501,7 @@ mod illumos_tests { } async fn test_cleanup_body(ctx: CleanupTestContext) -> anyhow::Result<()> { + let ctx = ctx.ctx.lock().await; // Let's add a bunch of fake bundles, until we should be over the // storage limit. These will all be explicit requests, so the priority // should be decided based on time, i.e., the ones first added should be @@ -2560,16 +2517,18 @@ mod illumos_tests { let mut days = DaysOfOurBundles::new(); let mut info = Vec::new(); let mut utilization = ctx.bundler.utilization().await?; + let bundle_dir = &ctx.resource_wrapper.dirs[0]; loop { let us = utilization - .values() - .next() + .get(bundle_dir) .context("no utilization information")?; + if us.bytes_used > us.bytes_available { break; } + let it = insert_fake_bundle( - &ctx.resource_wrapper.dirs[0], + bundle_dir, days.next().unwrap(), ZoneBundleCause::ExplicitRequest, ) @@ -2582,15 +2541,8 @@ mod illumos_tests { let counts = ctx.bundler.cleanup().await.context("failed to run cleanup")?; - // We should have cleaned up items in the same paths that we have in the - // context. - anyhow::ensure!( - counts.keys().zip(ctx.resource_wrapper.dirs.iter()).all(|(a, b)| a == b), - "cleaned-up directories do not match the context's storage directories", - ); - // We should have cleaned up the first-inserted bundle. - let count = counts.values().next().context("no cleanup counts")?; + let count = counts.get(bundle_dir).context("no cleanup counts")?; anyhow::ensure!(count.bundles == 1, "expected to cleanup one bundle"); anyhow::ensure!( count.bytes == info[0].bytes, @@ -2621,6 +2573,7 @@ mod illumos_tests { async fn test_list_with_filter_body( ctx: CleanupTestContext, ) -> anyhow::Result<()> { + let ctx = ctx.ctx.lock().await; let mut days = DaysOfOurBundles::new(); let mut info = Vec::new(); const N_BUNDLES: usize = 3; @@ -2717,11 +2670,7 @@ mod illumos_tests { let path = zone_dir.join(format!("{}.tar.gz", metadata.id.bundle_id)); // Create a tarball at the path with this fake metadata. - let file = tokio::fs::OpenOptions::new() - .read(true) - .write(true) - .create(true) - .open(&path) + let file = tokio::fs::File::create(&path) .await .context("failed to open zone bundle path")? .into_std() diff --git a/sled-agent/tests/integration_tests/early_network.rs b/sled-agent/tests/integration_tests/early_network.rs index b15694c7f1..4421e76e8b 100644 --- a/sled-agent/tests/integration_tests/early_network.rs +++ b/sled-agent/tests/integration_tests/early_network.rs @@ -8,7 +8,7 @@ use std::net::Ipv4Addr; use bootstore::schemes::v0 as bootstore; use omicron_common::api::{ - external::SwitchLocation, + external::{ImportExportPolicy, SwitchLocation}, internal::shared::{ BgpConfig, BgpPeerConfig, PortConfigV1, PortFec, PortSpeed, RackNetworkConfig, RouteConfig, @@ -124,6 +124,7 @@ fn current_config_example() -> (&'static str, EarlyNetworkConfig) { routes: vec![RouteConfig { destination: "10.1.9.32/16".parse().unwrap(), nexthop: "10.1.9.32".parse().unwrap(), + vlan_id: None, }], addresses: vec!["2001:db8::/96".parse().unwrap()], switch: SwitchLocation::Switch0, @@ -139,12 +140,24 @@ fn current_config_example() -> (&'static str, EarlyNetworkConfig) { delay_open: None, connect_retry: Some(30), keepalive: Some(10), + remote_asn: None, + min_ttl: None, + md5_auth_key: None, + multi_exit_discriminator: None, + communities: Vec::new(), + local_pref: None, + enforce_first_as: false, + allowed_export: ImportExportPolicy::NoFiltering, + allowed_import: ImportExportPolicy::NoFiltering, + vlan_id: None, }], autoneg: true, }], bgp: vec![BgpConfig { asn: 20000, originate: vec!["192.168.0.0/24".parse().unwrap()], + shaper: None, + checker: None, }], bfd: vec![], }), diff --git a/sled-agent/tests/output/new-rss-sled-plans/madrid-rss-sled-plan.json b/sled-agent/tests/output/new-rss-sled-plans/madrid-rss-sled-plan.json index 5462c77d25..108914a26f 100644 --- a/sled-agent/tests/output/new-rss-sled-plans/madrid-rss-sled-plan.json +++ b/sled-agent/tests/output/new-rss-sled-plans/madrid-rss-sled-plan.json @@ -127,7 +127,8 @@ "routes": [ { "destination": "0.0.0.0/0", - "nexthop": "172.20.15.33" + "nexthop": "172.20.15.33", + "vlan_id": null } ], "addresses": [ @@ -144,7 +145,8 @@ "routes": [ { "destination": "0.0.0.0/0", - "nexthop": "172.20.15.33" + "nexthop": "172.20.15.33", + "vlan_id": null } ], "addresses": [ @@ -160,6 +162,9 @@ ], "bgp": [], "bfd": [] + }, + "allowed_source_ips": { + "allow": "any" } } } \ No newline at end of file diff --git a/sled-hardware/Cargo.toml b/sled-hardware/Cargo.toml index 1c914e2897..df2eb36071 100644 --- a/sled-hardware/Cargo.toml +++ b/sled-hardware/Cargo.toml @@ -5,6 +5,9 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] anyhow.workspace = true camino.workspace = true @@ -15,6 +18,7 @@ illumos-utils.workspace = true libc.workspace = true macaddr.workspace = true omicron-common.workspace = true +omicron-uuid-kinds.workspace = true rand.workspace = true schemars.workspace = true serde.workspace = true diff --git a/sled-hardware/src/disk.rs b/sled-hardware/src/disk.rs index a649b205e1..d48dd88c3d 100644 --- a/sled-hardware/src/disk.rs +++ b/sled-hardware/src/disk.rs @@ -5,14 +5,13 @@ use camino::{Utf8Path, Utf8PathBuf}; use illumos_utils::fstyp::Fstyp; use illumos_utils::zpool::Zpool; -use illumos_utils::zpool::ZpoolKind; -use illumos_utils::zpool::ZpoolName; use omicron_common::disk::DiskIdentity; +use omicron_common::zpool_name::{ZpoolKind, ZpoolName}; +use omicron_uuid_kinds::ZpoolUuid; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use slog::Logger; use slog::{info, warn}; -use uuid::Uuid; cfg_if::cfg_if! { if #[cfg(target_os = "illumos")] { @@ -32,6 +31,14 @@ pub enum PooledDiskError { BadPartitionLayout { path: Utf8PathBuf, why: String }, #[error("Requested partition {partition:?} not found on device {path}")] NotFound { path: Utf8PathBuf, partition: Partition }, + #[error("Zpool UUID required to format this disk")] + MissingZpoolUuid, + #[error("Observed Zpool with unexpected UUID (saw: {observed}, expected: {expected})")] + UnexpectedUuid { expected: ZpoolUuid, observed: ZpoolUuid }, + #[error("Unexpected disk variant")] + UnexpectedVariant, + #[error("Zpool does not exist")] + ZpoolDoesNotExist, #[error(transparent)] ZpoolCreate(#[from] illumos_utils::zpool::CreateError), #[error("Cannot import zpool: {0}")] @@ -58,7 +65,9 @@ pub enum Partition { ZfsPool, } -#[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[derive( + Debug, Clone, PartialEq, Eq, Hash, Ord, PartialOrd, Deserialize, Serialize, +)] pub struct DiskPaths { // Full path to the disk under "/devices". // Should NOT end with a ":partition_letter". @@ -69,7 +78,11 @@ pub struct DiskPaths { impl DiskPaths { // Returns the "illumos letter-indexed path" for a device. - fn partition_path(&self, index: usize, raw: bool) -> Option { + pub fn partition_path( + &self, + index: usize, + raw: bool, + ) -> Option { let index = u8::try_from(index).ok()?; let path = &self.devfs_path; @@ -125,7 +138,9 @@ impl DiskPaths { /// This exists as a distinct entity from `Disk` in `sled-storage` because it /// may be desirable to monitor for hardware in one context, and conform disks /// to partition layouts in a different context. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[derive( + Debug, Clone, PartialEq, Eq, Hash, Ord, PartialOrd, Deserialize, Serialize, +)] pub struct UnparsedDisk { paths: DiskPaths, slot: i64, @@ -135,7 +150,6 @@ pub struct UnparsedDisk { } impl UnparsedDisk { - #[allow(dead_code)] pub fn new( devfs_path: Utf8PathBuf, dev_path: Option, @@ -153,6 +167,10 @@ impl UnparsedDisk { } } + pub fn paths(&self) -> &DiskPaths { + &self.paths + } + pub fn devfs_path(&self) -> &Utf8PathBuf { &self.paths.devfs_path } @@ -168,6 +186,10 @@ impl UnparsedDisk { pub fn is_boot_disk(&self) -> bool { self.is_boot_disk } + + pub fn slot(&self) -> i64 { + self.slot + } } /// A physical disk that is partitioned to contain exactly one zpool @@ -197,14 +219,15 @@ impl PooledDisk { pub fn new( log: &Logger, unparsed_disk: UnparsedDisk, + zpool_id: Option, ) -> Result { let paths = &unparsed_disk.paths; let variant = unparsed_disk.variant; - let identity = unparsed_disk.identity(); + let identity = &unparsed_disk.identity; // Ensure the GPT has the right format. This does not necessarily // mean that the partitions are populated with the data we need. let partitions = - ensure_partition_layout(&log, &paths, variant, identity)?; + ensure_partition_layout(&log, &paths, variant, identity, zpool_id)?; // Find the path to the zpool which exists on this disk. // @@ -216,9 +239,10 @@ impl PooledDisk { false, )?; - let zpool_name = Self::ensure_zpool_exists(log, variant, &zpool_path)?; - Self::ensure_zpool_imported(log, &zpool_name)?; - Self::ensure_zpool_failmode_is_continue(log, &zpool_name)?; + let zpool_name = + ensure_zpool_exists(log, variant, &zpool_path, zpool_id)?; + ensure_zpool_imported(log, &zpool_name)?; + ensure_zpool_failmode_is_continue(log, &zpool_name)?; Ok(Self { paths: unparsed_disk.paths, @@ -230,83 +254,130 @@ impl PooledDisk { zpool_name, }) } +} - fn ensure_zpool_exists( - log: &Logger, - variant: DiskVariant, - zpool_path: &Utf8Path, - ) -> Result { - let zpool_name = match Fstyp::get_zpool(&zpool_path) { - Ok(zpool_name) => zpool_name, - Err(_) => { - // What happened here? - // - We saw that a GPT exists for this Disk (or we didn't, and - // made our own). - // - However, this particular partition does not appear to have - // a zpool. - // - // This can happen in situations where "zpool create" - // initialized a zpool, and "zpool destroy" removes the zpool - // but still leaves the partition table untouched. - // - // To remedy: Let's enforce that the partition exists. - info!( - log, - "GPT exists without Zpool: formatting zpool at {}", - zpool_path, - ); - // If a zpool does not already exist, create one. - let zpool_name = match variant { - DiskVariant::M2 => ZpoolName::new_internal(Uuid::new_v4()), - DiskVariant::U2 => ZpoolName::new_external(Uuid::new_v4()), - }; - Zpool::create(&zpool_name, &zpool_path)?; - zpool_name +/// Checks if the zpool exists, but makes no modifications, +/// and does not attempt to import the zpool. +pub fn check_if_zpool_exists( + zpool_path: &Utf8Path, +) -> Result { + let zpool_name = match Fstyp::get_zpool(&zpool_path) { + Ok(zpool_name) => zpool_name, + Err(_) => return Err(PooledDiskError::ZpoolDoesNotExist), + }; + Ok(zpool_name) +} + +pub fn ensure_zpool_exists( + log: &Logger, + variant: DiskVariant, + zpool_path: &Utf8Path, + zpool_id: Option, +) -> Result { + let zpool_name = match Fstyp::get_zpool(&zpool_path) { + Ok(zpool_name) => { + if let Some(expected) = zpool_id { + info!(log, "Checking that UUID in storage matches request"; "expected" => ?expected); + let observed = zpool_name.id(); + if expected != observed { + warn!(log, "Zpool UUID mismatch"; "expected" => ?expected, "observed" => ?observed); + return Err(PooledDiskError::UnexpectedUuid { + expected, + observed, + }); + } } - }; - Zpool::import(&zpool_name).map_err(|e| { - warn!(log, "Failed to import zpool {zpool_name}: {e}"); - PooledDiskError::ZpoolImport(e) - })?; + zpool_name + } + Err(_) => { + // What happened here? + // - We saw that a GPT exists for this Disk (or we didn't, and + // made our own). + // - However, this particular partition does not appear to have + // a zpool. + // + // This can happen in situations where "zpool create" + // initialized a zpool, and "zpool destroy" removes the zpool + // but still leaves the partition table untouched. + // + // To remedy: Let's enforce that the partition exists. + info!( + log, + "GPT exists without Zpool: formatting zpool at {}", zpool_path, + ); + let id = match zpool_id { + Some(id) => { + info!(log, "Formatting zpool with requested ID"; "id" => ?id); + id + } + None => { + let id = ZpoolUuid::new_v4(); + info!(log, "Formatting zpool with generated ID"; "id" => ?id); + id + } + }; + + // If a zpool does not already exist, create one. + let zpool_name = match variant { + DiskVariant::M2 => ZpoolName::new_internal(id), + DiskVariant::U2 => ZpoolName::new_external(id), + }; + Zpool::create(&zpool_name, &zpool_path)?; + zpool_name + } + }; + Zpool::import(&zpool_name).map_err(|e| { + warn!(log, "Failed to import zpool {zpool_name}: {e}"); + PooledDiskError::ZpoolImport(e) + })?; - Ok(zpool_name) - } + Ok(zpool_name) +} - fn ensure_zpool_imported( - log: &Logger, - zpool_name: &ZpoolName, - ) -> Result<(), PooledDiskError> { - Zpool::import(&zpool_name).map_err(|e| { - warn!(log, "Failed to import zpool {zpool_name}: {e}"); - PooledDiskError::ZpoolImport(e) - })?; - Ok(()) - } +pub fn ensure_zpool_imported( + log: &Logger, + zpool_name: &ZpoolName, +) -> Result<(), PooledDiskError> { + Zpool::import(&zpool_name).map_err(|e| { + warn!(log, "Failed to import zpool {zpool_name}: {e}"); + PooledDiskError::ZpoolImport(e) + })?; + Ok(()) +} - fn ensure_zpool_failmode_is_continue( - log: &Logger, - zpool_name: &ZpoolName, - ) -> Result<(), PooledDiskError> { - // Ensure failmode is set to `continue`. See - // https://github.com/oxidecomputer/omicron/issues/2766 for details. The - // short version is, each pool is only backed by one vdev. There is no - // recovery if one starts breaking, so if connectivity to one dies it's - // actively harmful to try to wait for it to come back; we'll be waiting - // forever and get stuck. We'd rather get the errors so we can deal with - // them ourselves. - Zpool::set_failmode_continue(&zpool_name).map_err(|e| { - warn!( - log, - "Failed to set failmode=continue on zpool {zpool_name}: {e}" - ); - PooledDiskError::ZpoolImport(e) - })?; - Ok(()) - } +pub fn ensure_zpool_failmode_is_continue( + log: &Logger, + zpool_name: &ZpoolName, +) -> Result<(), PooledDiskError> { + // Ensure failmode is set to `continue`. See + // https://github.com/oxidecomputer/omicron/issues/2766 for details. The + // short version is, each pool is only backed by one vdev. There is no + // recovery if one starts breaking, so if connectivity to one dies it's + // actively harmful to try to wait for it to come back; we'll be waiting + // forever and get stuck. We'd rather get the errors so we can deal with + // them ourselves. + Zpool::set_failmode_continue(&zpool_name).map_err(|e| { + warn!( + log, + "Failed to set failmode=continue on zpool {zpool_name}: {e}" + ); + PooledDiskError::ZpoolImport(e) + })?; + Ok(()) } #[derive( - Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema, + Debug, + Clone, + Copy, + PartialEq, + Eq, + Hash, + Serialize, + Deserialize, + JsonSchema, + Ord, + PartialOrd, )] pub enum DiskVariant { U2, diff --git a/sled-hardware/src/illumos/mod.rs b/sled-hardware/src/illumos/mod.rs index 7dd6f9e20d..e9a47de29e 100644 --- a/sled-hardware/src/illumos/mod.rs +++ b/sled-hardware/src/illumos/mod.rs @@ -263,7 +263,7 @@ impl HardwareView { updates.push(DiskAdded(disk.clone())); } - self.disks = polled_hw.disks.clone(); + self.disks.clone_from(&polled_hw.disks); } } @@ -509,6 +509,7 @@ fn poll_blkdev_node( fn poll_device_tree( log: &Logger, inner: &Arc>, + nongimlet_observed_disks: &[UnparsedDisk], tx: &broadcast::Sender, ) -> Result<(), Error> { // Construct a view of hardware by walking the device tree. @@ -517,28 +518,36 @@ fn poll_device_tree( Err(e) => { if let Error::NotAGimlet(root_node) = &e { + let mut inner = inner.lock().unwrap(); + if root_node.as_str() == "i86pc" { // If on i86pc, generate some baseboard information before // returning this error. Each sled agent has to be uniquely // identified for multiple non-gimlets to work. - { - let mut inner = inner.lock().unwrap(); - - if inner.baseboard.is_none() { - let pc_baseboard = Baseboard::new_pc( - gethostname().into_string().unwrap_or_else( - |_| Uuid::new_v4().simple().to_string(), - ), - root_node.clone(), - ); - - info!( - log, - "Generated i86pc baseboard {:?}", pc_baseboard - ); - - inner.baseboard = Some(pc_baseboard); - } + if inner.baseboard.is_none() { + let pc_baseboard = Baseboard::new_pc( + gethostname().into_string().unwrap_or_else(|_| { + Uuid::new_v4().simple().to_string() + }), + root_node.clone(), + ); + + info!( + log, + "Generated i86pc baseboard {:?}", pc_baseboard + ); + + inner.baseboard = Some(pc_baseboard); + } + } + + // For platforms that don't support the HardwareSnapshot + // functionality, sled-agent can be supplied a fixed list of + // UnparsedDisks. Add those to the HardwareSnapshot here if they + // are missing (which they will be for non-gimlets). + for observed_disk in nongimlet_observed_disks { + if !inner.disks.contains(observed_disk) { + inner.disks.insert(observed_disk.clone()); } } } @@ -572,10 +581,11 @@ fn poll_device_tree( async fn hardware_tracking_task( log: Logger, inner: Arc>, + nongimlet_observed_disks: Vec, tx: broadcast::Sender, ) { loop { - match poll_device_tree(&log, &inner, &tx) { + match poll_device_tree(&log, &inner, &nongimlet_observed_disks, &tx) { // We've already warned about `NotAGimlet` by this point, // so let's not spam the logs. Ok(_) | Err(Error::NotAGimlet(_)) => (), @@ -604,7 +614,13 @@ impl HardwareManager { /// /// Arguments: /// - `sled_mode`: The sled's mode of operation (auto detect or force gimlet/scrimlet). - pub fn new(log: &Logger, sled_mode: SledMode) -> Result { + /// - `nongimlet_observed_disks`: For non-gimlets, inject these disks into + /// HardwareSnapshot objects. + pub fn new( + log: &Logger, + sled_mode: SledMode, + nongimlet_observed_disks: Vec, + ) -> Result { let log = log.new(o!("component" => "HardwareManager")); info!(log, "Creating HardwareManager"); @@ -650,7 +666,7 @@ impl HardwareManager { // This mitigates issues where the Sled Agent could try to propagate // an "empty" view of hardware to other consumers before the first // query. - match poll_device_tree(&log, &inner, &tx) { + match poll_device_tree(&log, &inner, &nongimlet_observed_disks, &tx) { Ok(_) => (), // Allow non-gimlet devices to proceed with a "null" view of // hardware, otherwise they won't be able to start. @@ -666,7 +682,8 @@ impl HardwareManager { let inner2 = inner.clone(); let tx2 = tx.clone(); tokio::task::spawn(async move { - hardware_tracking_task(log2, inner2, tx2).await + hardware_tracking_task(log2, inner2, nongimlet_observed_disks, tx2) + .await }); Ok(Self { log, inner, tx }) diff --git a/sled-hardware/src/illumos/partitions.rs b/sled-hardware/src/illumos/partitions.rs index 3b8e0af2ee..0308e842c0 100644 --- a/sled-hardware/src/illumos/partitions.rs +++ b/sled-hardware/src/illumos/partitions.rs @@ -12,9 +12,9 @@ use crate::{DiskPaths, DiskVariant, Partition, PooledDiskError}; use camino::Utf8Path; use illumos_utils::zpool::ZpoolName; use omicron_common::disk::DiskIdentity; +use omicron_uuid_kinds::ZpoolUuid; use slog::info; use slog::Logger; -use uuid::Uuid; #[cfg(test)] use illumos_utils::zpool::MockZpool as Zpool; @@ -148,9 +148,10 @@ pub fn ensure_partition_layout( paths: &DiskPaths, variant: DiskVariant, identity: &DiskIdentity, + zpool_id: Option, ) -> Result, PooledDiskError> { internal_ensure_partition_layout::( - log, paths, variant, identity, + log, paths, variant, identity, zpool_id, ) } @@ -161,23 +162,26 @@ fn internal_ensure_partition_layout( paths: &DiskPaths, variant: DiskVariant, identity: &DiskIdentity, + zpool_id: Option, ) -> Result, PooledDiskError> { // Open the "Whole Disk" as a raw device to be parsed by the // libefi-illumos library. This lets us peek at the GPT before // making too many assumptions about it. let raw = true; let path = paths.whole_disk(raw); + let devfs_path_str = paths.devfs_path.as_str().to_string(); + let log = log.new(slog::o!("path" => devfs_path_str)); let gpt = match GPT::read(&path) { Ok(gpt) => { // This should be the common steady-state case - info!(log, "Disk at {} already has a GPT", paths.devfs_path); + info!(log, "Disk already has a GPT"); gpt } Err(libefi_illumos::Error::LabelNotFound) => { // Fresh U.2 disks are an example of devices where "we don't expect // a GPT to exist". - info!(log, "Disk at {} does not have a GPT", paths.devfs_path); + info!(log, "Disk does not have a GPT"); // For ZFS-implementation-specific reasons, Zpool create can only // act on devices under the "/dev" hierarchy, rather than the device @@ -193,12 +197,19 @@ fn internal_ensure_partition_layout( DiskVariant::U2 => { // First we need to check that this disk is of the proper // size and correct logical block address formatting. - ensure_size_and_formatting(log, identity)?; + ensure_size_and_formatting(&log, identity)?; + + info!( + log, + "Formatting zpool on disk"; + "uuid" => ?zpool_id, + ); + let Some(zpool_id) = zpool_id else { + return Err(PooledDiskError::MissingZpoolUuid); + }; - // If we were successful we can create a zpool on this disk. - info!(log, "Formatting zpool on disk {}", paths.devfs_path); // If a zpool does not already exist, create one. - let zpool_name = ZpoolName::new_external(Uuid::new_v4()); + let zpool_name = ZpoolName::new_external(zpool_id); Zpool::create(&zpool_name, dev_path)?; return Ok(vec![Partition::ZfsPool]); } @@ -385,6 +396,7 @@ mod test { &DiskPaths { devfs_path, dev_path: None }, DiskVariant::U2, &mock_disk_identity(), + None, ); match result { Err(PooledDiskError::CannotFormatMissingDevPath { .. }) => {} @@ -419,6 +431,7 @@ mod test { }, DiskVariant::U2, &mock_disk_identity(), + Some(ZpoolUuid::new_v4()), ) .expect("Should have succeeded partitioning disk"); @@ -444,6 +457,7 @@ mod test { }, DiskVariant::M2, &mock_disk_identity(), + None, ) .is_err()); @@ -482,6 +496,7 @@ mod test { }, DiskVariant::U2, &mock_disk_identity(), + None, ) .expect("Should be able to parse disk"); @@ -525,6 +540,7 @@ mod test { }, DiskVariant::M2, &mock_disk_identity(), + None, ) .expect("Should be able to parse disk"); @@ -565,6 +581,7 @@ mod test { }, DiskVariant::M2, &mock_disk_identity(), + None, ) .expect_err("Should have failed parsing empty GPT"), PooledDiskError::BadPartitionLayout { .. } @@ -591,6 +608,7 @@ mod test { }, DiskVariant::U2, &mock_disk_identity(), + None, ) .expect_err("Should have failed parsing empty GPT"), PooledDiskError::BadPartitionLayout { .. } diff --git a/sled-hardware/src/non_illumos/mod.rs b/sled-hardware/src/non_illumos/mod.rs index e990567b7c..3516962577 100644 --- a/sled-hardware/src/non_illumos/mod.rs +++ b/sled-hardware/src/non_illumos/mod.rs @@ -7,6 +7,7 @@ use crate::disk::{ }; use crate::SledMode; use omicron_common::disk::DiskIdentity; +use omicron_uuid_kinds::ZpoolUuid; use sled_hardware_types::Baseboard; use slog::Logger; use std::collections::HashSet; @@ -30,7 +31,11 @@ pub enum NvmeFormattingError { pub struct HardwareManager {} impl HardwareManager { - pub fn new(_log: &Logger, _sled_mode: SledMode) -> Result { + pub fn new( + _log: &Logger, + _sled_mode: SledMode, + _nongimlet_observed_disks: Vec, + ) -> Result { unimplemented!("Accessing hardware unsupported on non-illumos"); } @@ -68,6 +73,7 @@ pub fn ensure_partition_layout( _paths: &DiskPaths, _variant: DiskVariant, _identity: &DiskIdentity, + _zpool_id: Option, ) -> Result, PooledDiskError> { unimplemented!("Accessing hardware unsupported on non-illumos"); } diff --git a/sled-hardware/types/Cargo.toml b/sled-hardware/types/Cargo.toml index d6eefa49ac..3b73728ca5 100644 --- a/sled-hardware/types/Cargo.toml +++ b/sled-hardware/types/Cargo.toml @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] illumos-utils.workspace = true omicron-common.workspace = true diff --git a/sled-hardware/types/src/underlay.rs b/sled-hardware/types/src/underlay.rs index bbeb43bd4d..ca380c08c2 100644 --- a/sled-hardware/types/src/underlay.rs +++ b/sled-hardware/types/src/underlay.rs @@ -45,9 +45,9 @@ fn mac_to_bootstrap_ip(mac: MacAddr, interface_id: u64) -> Ipv6Addr { Ipv6Addr::new( BOOTSTRAP_PREFIX, - ((mac_bytes[0] as u16) << 8) | mac_bytes[1] as u16, - ((mac_bytes[2] as u16) << 8) | mac_bytes[3] as u16, - ((mac_bytes[4] as u16) << 8) | mac_bytes[5] as u16, + (u16::from(mac_bytes[0]) << 8) | u16::from(mac_bytes[1]), + (u16::from(mac_bytes[2]) << 8) | u16::from(mac_bytes[3]), + (u16::from(mac_bytes[4]) << 8) | u16::from(mac_bytes[5]), (interface_id >> 48 & 0xffff).try_into().unwrap(), (interface_id >> 32 & 0xffff).try_into().unwrap(), (interface_id >> 16 & 0xffff).try_into().unwrap(), diff --git a/sled-storage/Cargo.toml b/sled-storage/Cargo.toml index cb3a790631..2439c52aa7 100644 --- a/sled-storage/Cargo.toml +++ b/sled-storage/Cargo.toml @@ -3,15 +3,23 @@ name = "sled-storage" version = "0.1.0" edition = "2021" +[lints] +workspace = true + [dependencies] +anyhow.workspace = true async-trait.workspace = true camino.workspace = true +camino-tempfile.workspace = true cfg-if.workspace = true +debug-ignore.workspace = true derive_more.workspace = true glob.workspace = true +futures.workspace = true illumos-utils.workspace = true key-manager.workspace = true omicron-common.workspace = true +omicron-uuid-kinds.workspace = true rand.workspace = true schemars = { workspace = true, features = [ "chrono", "uuid1" ] } serde.workspace = true @@ -24,9 +32,9 @@ uuid.workspace = true omicron-workspace-hack.workspace = true [dev-dependencies] -illumos-utils = { workspace = true, features = ["tmp_keypath", "testing"] } +expectorate.workspace = true +illumos-utils = { workspace = true, features = ["testing"] } omicron-test-utils.workspace = true -camino-tempfile.workspace = true [features] # Quotas and the like can be shrunk via this feature diff --git a/sled-storage/src/config.rs b/sled-storage/src/config.rs new file mode 100644 index 0000000000..a3baf220b2 --- /dev/null +++ b/sled-storage/src/config.rs @@ -0,0 +1,39 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Describes a handful of configuration options that can be +//! used to tweak behavior under test. + +use camino::Utf8PathBuf; + +/// Options to alter the mount path of datasets. +/// +/// By default, datasets within a pool are mounted under "/pool/ext/..." and +/// "/pool/int/...". For more context, see: +/// [illumos_utils::zpool::ZpoolName::dataset_mountpoint]. +/// +/// However, under test, it can be desirable to have a root filesystem +/// which is isolated from other tests, and which doesn't need to exist under +/// the root filesystem. [MountConfig] provides options to tweak which path is +/// used to set up and access these datasets. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct MountConfig { + /// The root path under which datasets are located. + pub root: Utf8PathBuf, + + /// The path where synthetic disks are stored, + /// if their paths are not absolute. + pub synthetic_disk_root: Utf8PathBuf, +} + +impl Default for MountConfig { + fn default() -> Self { + Self { + root: Utf8PathBuf::from( + illumos_utils::zpool::ZPOOL_MOUNTPOINT_ROOT, + ), + synthetic_disk_root: Utf8PathBuf::from("/var/tmp"), + } + } +} diff --git a/sled-storage/src/dataset.rs b/sled-storage/src/dataset.rs index 41b77ea38b..7846826ee8 100644 --- a/sled-storage/src/dataset.rs +++ b/sled-storage/src/dataset.rs @@ -4,6 +4,7 @@ //! ZFS dataset related functionality +use crate::config::MountConfig; use crate::keyfile::KeyFile; use camino::Utf8PathBuf; use cfg_if::cfg_if; @@ -33,7 +34,7 @@ pub const M2_BACKING_DATASET: &'static str = "backing"; cfg_if! { if #[cfg(any(test, feature = "testing"))] { // Tuned for zone_bundle tests - pub const DEBUG_DATASET_QUOTA: usize = 100 * (1 << 10); + pub const DEBUG_DATASET_QUOTA: usize = 1 << 20; } else { // TODO-correctness: This value of 100GiB is a pretty wild guess, and should be // tuned as needed. @@ -279,10 +280,12 @@ pub enum DatasetError { /// `None` is for the M.2s touched by the Installinator. pub(crate) async fn ensure_zpool_has_datasets( log: &Logger, + mount_config: &MountConfig, zpool_name: &ZpoolName, disk_identity: &DiskIdentity, key_requester: Option<&StorageKeyRequester>, ) -> Result<(), DatasetError> { + info!(log, "Ensuring zpool has datasets"; "zpool" => ?zpool_name, "disk_identity" => ?disk_identity); let (root, datasets) = match zpool_name.kind().into() { DiskVariant::M2 => (None, M2_EXPECTED_DATASETS.iter()), DiskVariant::U2 => (Some(CRYPT_DATASET), U2_EXPECTED_DATASETS.iter()), @@ -297,8 +300,10 @@ pub(crate) async fn ensure_zpool_has_datasets( let Some(key_requester) = key_requester else { return Err(DatasetError::MissingStorageKeyRequester); }; - let mountpoint = zpool_name.dataset_mountpoint(dataset); - let keypath: Keypath = disk_identity.into(); + let mountpoint = + zpool_name.dataset_mountpoint(&mount_config.root, dataset); + let keypath: Keypath = + illumos_utils::zfs::Keypath::new(disk_identity, &mount_config.root); let epoch = if let Ok(epoch_str) = Zfs::get_oxide_value(dataset, "epoch") @@ -324,15 +329,15 @@ pub(crate) async fn ensure_zpool_has_datasets( // other reason, but the dataset actually existed, we will // try to create the dataset below and that will fail. So // there is no harm in just loading the latest secret here. - info!(log, "Loading latest secret"; "disk_id"=>#?disk_identity); + info!(log, "Loading latest secret"; "disk_id"=>?disk_identity); let epoch = key_requester.load_latest_secret().await?; - info!(log, "Loaded latest secret"; "epoch"=>%epoch, "disk_id"=>#?disk_identity); + info!(log, "Loaded latest secret"; "epoch"=>%epoch, "disk_id"=>?disk_identity); epoch }; - info!(log, "Retrieving key"; "epoch"=>%epoch, "disk_id"=>#?disk_identity); + info!(log, "Retrieving key"; "epoch"=>%epoch, "disk_id"=>?disk_identity); let key = key_requester.get_key(epoch, disk_identity.clone()).await?; - info!(log, "Got key"; "epoch"=>%epoch, "disk_id"=>#?disk_identity); + info!(log, "Got key"; "epoch"=>%epoch, "disk_id"=>?disk_identity); let mut keyfile = KeyFile::create(keypath.clone(), key.expose_secret(), log) @@ -366,7 +371,8 @@ pub(crate) async fn ensure_zpool_has_datasets( }; for dataset in datasets.into_iter() { - let mountpoint = zpool_name.dataset_mountpoint(dataset.name); + let mountpoint = + zpool_name.dataset_mountpoint(&mount_config.root, dataset.name); let name = &format!("{}/{}", zpool_name, dataset.name); // Use a value that's alive for the duration of this sled agent @@ -788,11 +794,11 @@ async fn finalize_encryption_migration( #[cfg(test)] mod test { use super::*; - use uuid::Uuid; + use omicron_uuid_kinds::ZpoolUuid; #[test] fn serialize_dataset_name() { - let pool = ZpoolName::new_internal(Uuid::new_v4()); + let pool = ZpoolName::new_internal(ZpoolUuid::new_v4()); let kind = DatasetKind::Crucible; let name = DatasetName::new(pool, kind); serde_json::to_string(&name).unwrap(); diff --git a/sled-storage/src/disk.rs b/sled-storage/src/disk.rs index 705b38718a..608d3678da 100644 --- a/sled-storage/src/disk.rs +++ b/sled-storage/src/disk.rs @@ -4,19 +4,82 @@ //! Disk related types +use anyhow::bail; use camino::{Utf8Path, Utf8PathBuf}; use derive_more::From; -use illumos_utils::zpool::{Zpool, ZpoolKind, ZpoolName}; use key_manager::StorageKeyRequester; +use omicron_common::api::external::Generation; use omicron_common::disk::DiskIdentity; +use omicron_common::ledger::Ledgerable; +use omicron_common::zpool_name::{ZpoolKind, ZpoolName}; +use omicron_uuid_kinds::ZpoolUuid; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; use sled_hardware::{ DiskVariant, Partition, PooledDisk, PooledDiskError, UnparsedDisk, }; -use slog::Logger; -use std::fs::File; +use slog::{info, Logger}; +use uuid::Uuid; +use crate::config::MountConfig; use crate::dataset; +#[derive( + Clone, + Debug, + Deserialize, + Serialize, + JsonSchema, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, +)] +pub struct OmicronPhysicalDiskConfig { + pub identity: DiskIdentity, + pub id: Uuid, + pub pool_id: ZpoolUuid, +} + +#[derive( + Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, Eq, Hash, +)] +pub struct OmicronPhysicalDisksConfig { + /// generation number of this configuration + /// + /// This generation number is owned by the control plane (i.e., RSS or + /// Nexus, depending on whether RSS-to-Nexus handoff has happened). It + /// should not be bumped within Sled Agent. + /// + /// Sled Agent rejects attempts to set the configuration to a generation + /// older than the one it's currently running. + pub generation: Generation, + + pub disks: Vec, +} + +impl Default for OmicronPhysicalDisksConfig { + fn default() -> Self { + Self { generation: Generation::new(), disks: vec![] } + } +} + +impl Ledgerable for OmicronPhysicalDisksConfig { + fn is_newer_than(&self, other: &OmicronPhysicalDisksConfig) -> bool { + self.generation > other.generation + } + + // No need to do this, the generation number is provided externally. + fn generation_bump(&mut self) {} +} + +impl OmicronPhysicalDisksConfig { + pub fn new() -> Self { + Self { generation: Generation::new(), disks: vec![] } + } +} + #[derive(Debug, thiserror::Error)] pub enum DiskError { #[error(transparent)] @@ -25,13 +88,11 @@ pub enum DiskError { PooledDisk(#[from] sled_hardware::PooledDiskError), } -// A synthetic disk that acts as one "found" by the hardware and that is backed -// by a zpool +/// A synthetic disk which has been formatted with a zpool. #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct SyntheticDisk { - pub identity: DiskIdentity, - pub zpool_name: ZpoolName, - pub slot: i64, + raw: RawSyntheticDisk, + zpool_name: ZpoolName, } // By adding slots at an "offset", this acts as a barrier against synthetic @@ -43,45 +104,111 @@ pub struct SyntheticDisk { const SYNTHETIC_SLOT_OFFSET: i64 = 1024; impl SyntheticDisk { - // Create a zpool and import it for the synthetic disk - // Zpools willl be set to the min size of 64Mib - pub fn create_zpool( - dir: &Utf8Path, - zpool_name: &ZpoolName, + // "Manages" a SyntheticDisk by ensuring that it has a Zpool and importing + // it. If the zpool already exists, it is imported, but not re-created. + pub fn new( + log: &Logger, + mount_config: &MountConfig, + raw: RawSyntheticDisk, + zpool_id: Option, + ) -> Self { + let path = if raw.path.is_absolute() { + raw.path.clone() + } else { + mount_config.synthetic_disk_root.join(&raw.path) + }; + + info!( + log, + "Invoking SyntheticDisk::new"; + "identity" => ?raw.identity, + "path" => %path, + ); + + let zpool_name = sled_hardware::disk::ensure_zpool_exists( + log, + raw.variant, + &path, + zpool_id, + ) + .unwrap(); + sled_hardware::disk::ensure_zpool_imported(log, &zpool_name).unwrap(); + sled_hardware::disk::ensure_zpool_failmode_is_continue( + log, + &zpool_name, + ) + .unwrap(); + + Self { raw, zpool_name } + } +} + +// A synthetic disk that acts as one "found" by the hardware and that is backed +// by a vdev. +#[derive(Debug, Clone, PartialEq, Eq, Hash, Ord, PartialOrd)] +pub struct RawSyntheticDisk { + pub path: Utf8PathBuf, + pub identity: DiskIdentity, + pub variant: DiskVariant, + pub slot: i64, +} + +impl RawSyntheticDisk { + /// Creates the file with a specified length, and also parses it as + /// a [RawSyntheticDisk]. + pub fn new_with_length>( + vdev: P, + length: u64, slot: i64, - ) -> SyntheticDisk { - // 64 MiB (min size of zpool) - const DISK_SIZE: u64 = 64 * 1024 * 1024; - let path = dir.join(zpool_name.to_string()); - let file = File::create(&path).unwrap(); - file.set_len(DISK_SIZE).unwrap(); - drop(file); - Zpool::create(zpool_name, &path).unwrap(); - Zpool::import(zpool_name).unwrap(); - Zpool::set_failmode_continue(zpool_name).unwrap(); - Self::new(zpool_name.clone(), slot) + ) -> Result { + let file = std::fs::File::create(vdev.as_ref())?; + file.set_len(length)?; + Self::load(vdev, slot) } - pub fn new(zpool_name: ZpoolName, slot: i64) -> SyntheticDisk { - let id = zpool_name.id(); + /// Treats a file at path `vdev` as a synthetic disk. The file + /// should already exist, and have the desired length. + pub fn load>( + vdev: P, + slot: i64, + ) -> Result { + let path = vdev.as_ref(); + let Some(file) = path.file_name() else { + bail!("Missing file name for synthetic disk"); + }; + + let Some(file) = file.strip_suffix(".vdev") else { + bail!("Missing '.vdev' suffix for synthetic disk"); + }; + + let (serial, variant) = if let Some(serial) = file.strip_prefix("m2_") { + (serial, DiskVariant::M2) + } else if let Some(serial) = file.strip_prefix("u2_") { + (serial, DiskVariant::U2) + } else { + bail!("Unknown file prefix: {file}. Try one of {{m2_,u2_}}"); + }; + let identity = DiskIdentity { vendor: "synthetic-vendor".to_string(), - serial: format!("synthetic-serial-{id}"), - model: "synthetic-model".to_string(), + serial: format!("synthetic-serial-{serial}"), + model: format!("synthetic-model-{variant:?}"), }; - SyntheticDisk { + + Ok(Self { + path: path.into(), identity, - zpool_name, + variant, slot: slot + SYNTHETIC_SLOT_OFFSET, - } + }) } } // An [`UnparsedDisk`] disk learned about from the hardware or a wrapped zpool -#[derive(Debug, Clone, PartialEq, Eq, Hash, From)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Ord, PartialOrd, From)] pub enum RawDisk { Real(UnparsedDisk), - Synthetic(SyntheticDisk), + Synthetic(RawSyntheticDisk), } impl RawDisk { @@ -90,7 +217,7 @@ impl RawDisk { Self::Real(disk) => disk.is_boot_disk(), Self::Synthetic(disk) => { // Just label any M.2 the boot disk. - disk.zpool_name.kind() == ZpoolKind::Internal + disk.variant == DiskVariant::M2 } } } @@ -105,18 +232,7 @@ impl RawDisk { pub fn variant(&self) -> DiskVariant { match self { Self::Real(disk) => disk.variant(), - Self::Synthetic(disk) => match disk.zpool_name.kind() { - ZpoolKind::External => DiskVariant::U2, - ZpoolKind::Internal => DiskVariant::M2, - }, - } - } - - #[cfg(test)] - pub fn zpool_name(&self) -> &ZpoolName { - match self { - Self::Real(_) => unreachable!(), - Self::Synthetic(disk) => &disk.zpool_name, + Self::Synthetic(disk) => disk.variant, } } @@ -131,12 +247,37 @@ impl RawDisk { !self.is_synthetic() } + pub fn u2_zpool_path(&self) -> Result { + if !matches!(self.variant(), DiskVariant::U2) { + return Err(PooledDiskError::UnexpectedVariant); + } + match self { + Self::Real(disk) => { + let paths = disk.paths(); + // This is hard-coded to be "0", but that's because we aren't + // really parsing the whole partition table before considering + // where this would be see. + paths + .partition_path(0, false) + .ok_or_else(|| PooledDiskError::ZpoolDoesNotExist) + } + Self::Synthetic(raw) => Ok(raw.path.clone()), + } + } + pub fn devfs_path(&self) -> &Utf8PathBuf { match self { Self::Real(disk) => disk.devfs_path(), Self::Synthetic(_) => unreachable!(), } } + + pub fn slot(&self) -> i64 { + match self { + Self::Real(disk) => disk.slot(), + Self::Synthetic(disk) => disk.slot, + } + } } /// A physical [`PooledDisk`] or a [`SyntheticDisk`] that contains or is backed @@ -151,15 +292,23 @@ pub enum Disk { impl Disk { pub async fn new( log: &Logger, + mount_config: &MountConfig, raw_disk: RawDisk, + pool_id: Option, key_requester: Option<&StorageKeyRequester>, ) -> Result { - let disk = match raw_disk { - RawDisk::Real(disk) => PooledDisk::new(log, disk)?.into(), - RawDisk::Synthetic(disk) => Disk::Synthetic(disk), + let disk: Disk = match raw_disk { + RawDisk::Real(disk) => PooledDisk::new(log, disk, pool_id)?.into(), + RawDisk::Synthetic(disk) => Disk::Synthetic(SyntheticDisk::new( + log, + mount_config, + disk, + pool_id, + )), }; dataset::ensure_zpool_has_datasets( log, + mount_config, disk.zpool_name(), disk.identity(), key_requester, @@ -194,7 +343,7 @@ impl Disk { Self::Real(disk) => disk.is_boot_disk, Self::Synthetic(disk) => { // Just label any M.2 the boot disk. - disk.zpool_name.kind() == ZpoolKind::Internal + disk.raw.variant == DiskVariant::M2 } } } @@ -202,7 +351,7 @@ impl Disk { pub fn identity(&self) -> &DiskIdentity { match self { Self::Real(disk) => &disk.identity, - Self::Synthetic(disk) => &disk.identity, + Self::Synthetic(disk) => &disk.raw.identity, } } @@ -261,7 +410,25 @@ impl Disk { pub fn slot(&self) -> i64 { match self { Self::Real(disk) => disk.slot, - Self::Synthetic(disk) => disk.slot, + Self::Synthetic(disk) => disk.raw.slot, + } + } +} + +impl From for RawDisk { + fn from(disk: Disk) -> RawDisk { + match disk { + Disk::Real(pooled_disk) => RawDisk::Real(UnparsedDisk::new( + pooled_disk.paths.devfs_path, + pooled_disk.paths.dev_path, + pooled_disk.slot, + pooled_disk.variant, + pooled_disk.identity, + pooled_disk.is_boot_disk, + )), + Disk::Synthetic(synthetic_disk) => { + RawDisk::Synthetic(synthetic_disk.raw) + } } } } diff --git a/sled-storage/src/error.rs b/sled-storage/src/error.rs index b9f97ee428..4c5582fd79 100644 --- a/sled-storage/src/error.rs +++ b/sled-storage/src/error.rs @@ -8,6 +8,7 @@ use crate::dataset::{DatasetError, DatasetName}; use crate::disk::DiskError; use camino::Utf8PathBuf; use omicron_common::api::external::ByteCountRangeError; +use omicron_common::api::external::Generation; use uuid::Uuid; #[derive(thiserror::Error, Debug)] @@ -49,9 +50,6 @@ pub enum Error { #[error(transparent)] ZoneInstall(#[from] illumos_utils::running_zone::InstallZoneError), - #[error("No U.2 Zpools found")] - NoU2Zpool, - #[error("Failed to parse UUID from {path}: {err}")] ParseUuid { path: Utf8PathBuf, @@ -76,6 +74,50 @@ pub enum Error { err: uuid::Error, }, + #[error("Not ready to manage U.2s (key manager is not ready)")] + KeyManagerNotReady, + + #[error("Physical disk configuration out-of-date (asked for {requested}, but latest is {current})")] + PhysicalDiskConfigurationOutdated { + requested: Generation, + current: Generation, + }, + + #[error("Failed to update ledger in internal storage")] + Ledger(#[from] omicron_common::ledger::Error), + + #[error("No ledger found on internal storage")] + LedgerNotFound, + #[error("Zpool Not Found: {0}")] ZpoolNotFound(String), } + +impl From for omicron_common::api::external::Error { + fn from(err: Error) -> Self { + use omicron_common::api::external::Error as ExternalError; + use omicron_common::api::external::LookupType; + use omicron_common::api::external::ResourceType; + + match err { + Error::LedgerNotFound => ExternalError::ObjectNotFound { + type_name: ResourceType::SledLedger, + lookup_type: LookupType::ByOther( + "Could not find record on M.2s".to_string(), + ), + }, + Error::ZpoolNotFound(name) => ExternalError::ObjectNotFound { + type_name: ResourceType::Zpool, + lookup_type: LookupType::ByName(name), + }, + Error::KeyManagerNotReady => ExternalError::ServiceUnavailable { + internal_message: + "Not ready to manage disks, try again after trust quorum" + .to_string(), + }, + _ => omicron_common::api::external::Error::InternalError { + internal_message: err.to_string(), + }, + } + } +} diff --git a/sled-storage/src/keyfile.rs b/sled-storage/src/keyfile.rs index 48e5d9a528..190dfb9c26 100644 --- a/sled-storage/src/keyfile.rs +++ b/sled-storage/src/keyfile.rs @@ -27,14 +27,11 @@ impl KeyFile { key: &[u8; 32], log: &Logger, ) -> std::io::Result { + info!(log, "About to create keyfile"; "path" => ?path); // We want to overwrite any existing contents. - let mut file = tokio::fs::OpenOptions::new() - .create(true) - .write(true) - .open(&path.0) - .await?; + let mut file = tokio::fs::File::create(&path.0).await?; file.write_all(key).await?; - info!(log, "Created keyfile {}", path); + info!(log, "Created keyfile"; "path" => ?path); Ok(KeyFile { path, file, diff --git a/sled-storage/src/lib.rs b/sled-storage/src/lib.rs index d4b64c55a5..681f003b52 100644 --- a/sled-storage/src/lib.rs +++ b/sled-storage/src/lib.rs @@ -8,10 +8,13 @@ //! hardware partitions from the `sled-hardware` crate. It utilizes the //! `illumos-utils` crate to actually perform ZFS related OS calls. +pub mod config; pub mod dataset; pub mod disk; pub mod error; pub(crate) mod keyfile; pub mod manager; +#[cfg(any(feature = "testing", test))] +pub mod manager_test_harness; pub mod pool; pub mod resources; diff --git a/sled-storage/src/manager.rs b/sled-storage/src/manager.rs index bb749cc366..4f45f1771e 100644 --- a/sled-storage/src/manager.rs +++ b/sled-storage/src/manager.rs @@ -6,17 +6,24 @@ use std::collections::HashSet; -use crate::dataset::{DatasetError, DatasetName}; -use crate::disk::{Disk, DiskError, RawDisk}; +use crate::config::MountConfig; +use crate::dataset::{DatasetName, CONFIG_DATASET}; +use crate::disk::{ + OmicronPhysicalDiskConfig, OmicronPhysicalDisksConfig, RawDisk, +}; use crate::error::Error; -use crate::resources::{AddDiskResult, StorageResources}; +use crate::resources::{AllDisks, DisksManagementResult, StorageResources}; use camino::Utf8PathBuf; +use debug_ignore::DebugIgnore; +use futures::future::FutureExt; use illumos_utils::zfs::{Mountpoint, Zfs}; use illumos_utils::zpool::ZpoolName; use key_manager::StorageKeyRequester; use omicron_common::disk::DiskIdentity; +use omicron_common::ledger::Ledger; use sled_hardware::DiskVariant; -use slog::{error, info, o, warn, Logger}; +use slog::{info, o, warn, Logger}; +use std::future::Future; use tokio::sync::{mpsc, oneshot, watch}; use tokio::time::{interval, Duration, MissedTickBehavior}; use uuid::Uuid; @@ -48,80 +55,199 @@ use uuid::Uuid; // large messages. // // Here we start relatively small so that we can evaluate our choice over time. -const QUEUE_SIZE: usize = 256; +pub(crate) const QUEUE_SIZE: usize = 256; + +const SYNCHRONIZE_INTERVAL: Duration = Duration::from_secs(10); + +// The filename of the ledger storing physical disk info +const DISKS_LEDGER_FILENAME: &str = "omicron-physical-disks.json"; #[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum StorageManagerState { +enum StorageManagerState { + // We know that any attempts to manage disks will fail, as the key manager + // is not ready yet. WaitingForKeyManager, - QueueingDisks, - Normal, + + // This state is used to indicate that the set of "control plane" physical + // disks and the set of "observed" disks may be out-of-sync. + // + // This can happen when: + // - The sled boots, and the ledger of "control plane disks" is initially + // loaded. + // - A U.2 is added to the disk after initial boot. + // + // In both of these cases, if trust quorum hasn't been established, it's + // possible that the request to [Self::manage_disks] will need to retry. + SynchronizationNeeded, + + // This state indicates the key manager is ready, and the storage manager + // believes that the set of control plane disks is in-sync with the set of + // observed disks. + Synchronized, } #[derive(Debug)] -struct NewFilesystemRequest { +pub(crate) struct NewFilesystemRequest { dataset_id: Uuid, dataset_name: DatasetName, - responder: oneshot::Sender>, + responder: DebugIgnore>>, } #[derive(Debug)] -enum StorageRequest { - AddDisk(RawDisk), - RemoveDisk(RawDisk), - DisksChanged(HashSet), +pub(crate) enum StorageRequest { + // Requests to manage which devices the sled considers active. + // These are manipulated by hardware management. + DetectedRawDisk { + raw_disk: RawDisk, + tx: DebugIgnore>>, + }, + DetectedRawDiskRemoval { + raw_disk: RawDisk, + tx: DebugIgnore>>, + }, + DetectedRawDisksChanged { + raw_disks: HashSet, + tx: DebugIgnore>>, + }, + + // Requests to explicitly manage or stop managing a set of devices + OmicronPhysicalDisksEnsure { + config: OmicronPhysicalDisksConfig, + tx: DebugIgnore>>, + }, + + // Reads the last set of physical disks that were successfully ensured. + OmicronPhysicalDisksList { + tx: DebugIgnore< + oneshot::Sender>, + >, + }, + + // Requests the creation of a new dataset within a managed disk. NewFilesystem(NewFilesystemRequest), + KeyManagerReady, + /// This will always grab the latest state after any new updates, as it /// serializes through the `StorageManager` task after all prior requests. /// This serialization is particularly useful for tests. - GetLatestResources(oneshot::Sender), - - /// Get the internal task state of the manager - GetManagerState(oneshot::Sender), -} - -/// Data managed internally to the StorageManagerTask that can be useful -/// to clients for debugging purposes, and that isn't exposed in other ways. -#[derive(Debug, Clone)] -pub struct StorageManagerData { - pub state: StorageManagerState, - pub queued_u2_drives: HashSet, + GetLatestResources(DebugIgnore>), } /// A mechanism for interacting with the [`StorageManager`] #[derive(Clone)] pub struct StorageHandle { tx: mpsc::Sender, - resource_updates: watch::Receiver, + disk_updates: watch::Receiver, } impl StorageHandle { + pub(crate) fn new( + tx: mpsc::Sender, + disk_updates: watch::Receiver, + ) -> Self { + Self { tx, disk_updates } + } + /// Adds a disk and associated zpool to the storage manager. - pub async fn upsert_disk(&self, disk: RawDisk) { - self.tx.send(StorageRequest::AddDisk(disk)).await.unwrap(); + /// + /// Returns a future which completes once the notification has been + /// processed. Awaiting this future is optional. + pub async fn detected_raw_disk( + &self, + raw_disk: RawDisk, + ) -> impl Future> { + let (tx, rx) = oneshot::channel(); + self.tx + .send(StorageRequest::DetectedRawDisk { raw_disk, tx: tx.into() }) + .await + .unwrap(); + + rx.map(|result| result.unwrap()) } /// Removes a disk, if it's tracked by the storage manager, as well /// as any associated zpools. - pub async fn delete_disk(&self, disk: RawDisk) { - self.tx.send(StorageRequest::RemoveDisk(disk)).await.unwrap(); + /// + /// Returns a future which completes once the notification has been + /// processed. Awaiting this future is optional. + pub async fn detected_raw_disk_removal( + &self, + raw_disk: RawDisk, + ) -> impl Future> { + let (tx, rx) = oneshot::channel(); + self.tx + .send(StorageRequest::DetectedRawDiskRemoval { + raw_disk, + tx: tx.into(), + }) + .await + .unwrap(); + + rx.map(|result| result.unwrap()) } /// Ensures that the storage manager tracks exactly the provided disks. /// - /// This acts similar to a batch [Self::upsert_disk] for all new disks, and - /// [Self::delete_disk] for all removed disks. + /// This acts similar to a batch [Self::detected_raw_disk] for all new disks, and + /// [Self::detected_raw_disk_removal] for all removed disks. /// /// If errors occur, an arbitrary "one" of them will be returned, but a /// best-effort attempt to add all disks will still be attempted. - pub async fn ensure_using_exactly_these_disks(&self, raw_disks: I) + /// + /// Returns a future which completes once the notification has been + /// processed. Awaiting this future is optional. + pub async fn ensure_using_exactly_these_disks( + &self, + raw_disks: I, + ) -> impl Future> where I: IntoIterator, { + let (tx, rx) = oneshot::channel(); self.tx - .send(StorageRequest::DisksChanged(raw_disks.into_iter().collect())) + .send(StorageRequest::DetectedRawDisksChanged { + raw_disks: raw_disks.into_iter().collect(), + tx: tx.into(), + }) .await .unwrap(); + rx.map(|result| result.unwrap()) + } + + pub async fn omicron_physical_disks_ensure( + &self, + config: OmicronPhysicalDisksConfig, + ) -> Result { + let (tx, rx) = oneshot::channel(); + self.tx + .send(StorageRequest::OmicronPhysicalDisksEnsure { + config, + tx: tx.into(), + }) + .await + .unwrap(); + + rx.await.unwrap() + } + + /// Reads the last value written to storage by + /// [Self::omicron_physical_disks_ensure]. + /// + /// This should be contrasted with both inventory and the result + /// of [Self::get_latest_disks] -- since this function focuses on + /// "Control Plane disks", it may return information about disks + /// that are no longer detected within the hardware of this sled. + pub async fn omicron_physical_disks_list( + &self, + ) -> Result { + let (tx, rx) = oneshot::channel(); + self.tx + .send(StorageRequest::OmicronPhysicalDisksList { tx: tx.into() }) + .await + .unwrap(); + + rx.await.unwrap() } /// Notify the [`StorageManager`] that the [`key_manager::KeyManager`] @@ -139,36 +265,35 @@ impl StorageHandle { /// Wait for a boot disk to be initialized pub async fn wait_for_boot_disk(&mut self) -> (DiskIdentity, ZpoolName) { + // We create a distinct receiver to avoid colliding with + // the receiver used by [Self::wait_for_changes]. + let mut receiver = self.disk_updates.clone(); loop { - let resources = self.resource_updates.borrow_and_update(); + let resources = receiver.borrow_and_update(); if let Some((disk_id, zpool_name)) = resources.boot_disk() { return (disk_id, zpool_name); } drop(resources); // We panic if the sender is dropped, as this means // the StorageManager has gone away, which it should not do. - self.resource_updates.changed().await.unwrap(); + receiver.changed().await.unwrap(); } } /// Wait for any storage resource changes - pub async fn wait_for_changes(&mut self) -> StorageResources { - self.resource_updates.changed().await.unwrap(); - self.resource_updates.borrow_and_update().clone() + pub async fn wait_for_changes(&mut self) -> AllDisks { + self.disk_updates.changed().await.unwrap(); + self.disk_updates.borrow_and_update().clone() } - /// Retrieve the latest value of `StorageResources` from the + /// Retrieve the latest value of `AllDisks` from the /// `StorageManager` task. - pub async fn get_latest_resources(&self) -> StorageResources { - let (tx, rx) = oneshot::channel(); - self.tx.send(StorageRequest::GetLatestResources(tx)).await.unwrap(); - rx.await.unwrap() - } - - /// Return internal data useful for debugging and testing - pub async fn get_manager_state(&self) -> StorageManagerData { + pub async fn get_latest_disks(&self) -> AllDisks { let (tx, rx) = oneshot::channel(); - self.tx.send(StorageRequest::GetManagerState(tx)).await.unwrap(); + self.tx + .send(StorageRequest::GetLatestResources(tx.into())) + .await + .unwrap(); rx.await.unwrap() } @@ -178,112 +303,42 @@ impl StorageHandle { dataset_name: DatasetName, ) -> Result<(), Error> { let (tx, rx) = oneshot::channel(); - let request = - NewFilesystemRequest { dataset_id, dataset_name, responder: tx }; + let request = NewFilesystemRequest { + dataset_id, + dataset_name, + responder: tx.into(), + }; self.tx.send(StorageRequest::NewFilesystem(request)).await.unwrap(); rx.await.unwrap() } } - -// Some sled-agent tests cannot currently use the real StorageManager -// and want to fake the entire behavior, but still have access to the -// `StorageResources`. We allow this via use of the `FakeStorageManager` -// that will respond to real storage requests from a real `StorageHandle`. -#[cfg(feature = "testing")] -pub struct FakeStorageManager { - rx: mpsc::Receiver, - resources: StorageResources, - resource_updates: watch::Sender, -} - -#[cfg(feature = "testing")] -impl FakeStorageManager { - pub fn new() -> (Self, StorageHandle) { - let (tx, rx) = mpsc::channel(QUEUE_SIZE); - let resources = StorageResources::default(); - let (update_tx, update_rx) = watch::channel(resources.clone()); - ( - Self { rx, resources, resource_updates: update_tx }, - StorageHandle { tx, resource_updates: update_rx }, - ) - } - - /// Run the main receive loop of the `FakeStorageManager` - /// - /// This should be spawned into a tokio task - pub async fn run(mut self) { - loop { - match self.rx.recv().await { - Some(StorageRequest::AddDisk(raw_disk)) => { - if self.add_disk(raw_disk).disk_inserted() { - self.resource_updates - .send_replace(self.resources.clone()); - } - } - Some(StorageRequest::GetLatestResources(tx)) => { - let _ = tx.send(self.resources.clone()); - } - Some(_) => { - unreachable!(); - } - None => break, - } - } - } - - // Add a disk to `StorageResources` if it is new and return true if so - fn add_disk(&mut self, raw_disk: RawDisk) -> AddDiskResult { - let disk = match raw_disk { - RawDisk::Real(_) => { - panic!( - "Only synthetic disks can be used with `FakeStorageManager`" - ); - } - RawDisk::Synthetic(synthetic_disk) => { - Disk::Synthetic(synthetic_disk) - } - }; - self.resources.insert_fake_disk(disk) - } -} - /// The storage manager responsible for the state of the storage /// on a sled. The storage manager runs in its own task and is interacted /// with via the [`StorageHandle`]. pub struct StorageManager { log: Logger, state: StorageManagerState, - // Used to find the capacity of the channel for tracking purposes - tx: mpsc::Sender, rx: mpsc::Receiver, resources: StorageResources, - queued_u2_drives: HashSet, - key_requester: StorageKeyRequester, - resource_updates: watch::Sender, - last_logged_capacity: usize, } impl StorageManager { pub fn new( log: &Logger, + mount_config: MountConfig, key_requester: StorageKeyRequester, ) -> (StorageManager, StorageHandle) { let (tx, rx) = mpsc::channel(QUEUE_SIZE); - let resources = StorageResources::default(); - let (update_tx, update_rx) = watch::channel(resources.clone()); + let resources = StorageResources::new(log, mount_config, key_requester); + let disk_updates = resources.watch_disks(); ( StorageManager { log: log.new(o!("component" => "StorageManager")), state: StorageManagerState::WaitingForKeyManager, - tx: tx.clone(), rx, resources, - queued_u2_drives: HashSet::new(), - key_requester, - resource_updates: update_tx, - last_logged_capacity: QUEUE_SIZE, }, - StorageHandle { tx, resource_updates: update_rx }, + StorageHandle::new(tx, disk_updates), ) } @@ -291,22 +346,29 @@ impl StorageManager { /// /// This should be spawned into a tokio task pub async fn run(mut self) { + let mut interval = interval(SYNCHRONIZE_INTERVAL); + interval.set_missed_tick_behavior(MissedTickBehavior::Delay); + tokio::pin!(interval); + loop { - const QUEUED_DISK_RETRY_TIMEOUT: Duration = Duration::from_secs(10); - let mut interval = interval(QUEUED_DISK_RETRY_TIMEOUT); - interval.set_missed_tick_behavior(MissedTickBehavior::Delay); tokio::select! { - res = self.step() => { - if let Err(e) = res { + Some(req) = self.rx.recv() => { + // It's critical that we don't "step" directly in the select + // branch, as that could cancel an ongoing request if it + // fires while a request is being processed. + // + // Instead, if we receive any request, we stop + // "select!"-ing and fully process the request before + // continuing. + if let Err(e) = self.step(req).await { warn!(self.log, "{e}"); } } _ = interval.tick(), - if self.state == StorageManagerState::QueueingDisks => + if self.state == StorageManagerState::SynchronizationNeeded => { - if self.add_queued_disks().await { - let _ = self.resource_updates.send_replace(self.resources.clone()); - } + info!(self.log, "automatically managing disks"); + self.manage_disks().await; } } } @@ -315,191 +377,387 @@ impl StorageManager { /// Process the next event /// /// This is useful for testing/debugging - pub async fn step(&mut self) -> Result<(), Error> { - const CAPACITY_LOG_THRESHOLD: usize = 10; - // We check the capacity and log it every time it changes by at least 10 - // entries in either direction. - let current = self.tx.capacity(); - if self.last_logged_capacity.saturating_sub(current) - >= CAPACITY_LOG_THRESHOLD - { - info!( - self.log, - "Channel capacity decreased"; - "previous" => ?self.last_logged_capacity, - "current" => ?current - ); - self.last_logged_capacity = current; - } else if current.saturating_sub(self.last_logged_capacity) - >= CAPACITY_LOG_THRESHOLD - { - info!( - self.log, - "Channel capacity increased"; - "previous" => ?self.last_logged_capacity, - "current" => ?current - ); - self.last_logged_capacity = current; - } - // The sending side never disappears because we hold a copy - let req = self.rx.recv().await.unwrap(); + async fn step(&mut self, req: StorageRequest) -> Result<(), Error> { info!(self.log, "Received {:?}", req); - let should_send_updates = match req { - StorageRequest::AddDisk(raw_disk) => { - self.add_disk(raw_disk).await?.disk_inserted() + + match req { + StorageRequest::DetectedRawDisk { raw_disk, tx } => { + let result = self.detected_raw_disk(raw_disk).await; + if let Err(ref err) = &result { + warn!(self.log, "Failed to add raw disk"; "err" => ?err); + } + let _ = tx.0.send(result); + } + StorageRequest::DetectedRawDiskRemoval { raw_disk, tx } => { + self.detected_raw_disk_removal(raw_disk); + let _ = tx.0.send(Ok(())); + } + StorageRequest::DetectedRawDisksChanged { raw_disks, tx } => { + self.ensure_using_exactly_these_disks(raw_disks).await; + let _ = tx.0.send(Ok(())); + } + StorageRequest::OmicronPhysicalDisksEnsure { config, tx } => { + let _ = + tx.0.send(self.omicron_physical_disks_ensure(config).await); } - StorageRequest::RemoveDisk(raw_disk) => self.remove_disk(raw_disk), - StorageRequest::DisksChanged(raw_disks) => { - self.ensure_using_exactly_these_disks(raw_disks).await + StorageRequest::OmicronPhysicalDisksList { tx } => { + let _ = tx.0.send(self.omicron_physical_disks_list().await); } StorageRequest::NewFilesystem(request) => { let result = self.add_dataset(&request).await; - if result.is_err() { - warn!(self.log, "{result:?}"); + if let Err(ref err) = &result { + warn!(self.log, "Failed to add dataset"; "err" => ?err); } - let _ = request.responder.send(result); - false + let _ = request.responder.0.send(result); } StorageRequest::KeyManagerReady => { - self.state = StorageManagerState::Normal; - self.add_queued_disks().await + self.key_manager_ready().await?; } StorageRequest::GetLatestResources(tx) => { - let _ = tx.send(self.resources.clone()); - false - } - StorageRequest::GetManagerState(tx) => { - let _ = tx.send(StorageManagerData { - state: self.state, - queued_u2_drives: self.queued_u2_drives.clone(), - }); - false + let _ = tx.0.send(self.resources.disks().clone()); } }; - if should_send_updates { - let _ = self.resource_updates.send_replace(self.resources.clone()); - } - Ok(()) } - // Loop through all queued disks inserting them into [`StorageResources`] - // unless we hit a transient error. If we hit a transient error, we return - // and wait for the next retry window to re-call this method. If we hit a - // permanent error we log it, but we continue inserting queued disks. - // - // Return true if updates should be sent to watchers, false otherwise - async fn add_queued_disks(&mut self) -> bool { + async fn manage_disks(&mut self) { + let result = self.resources.synchronize_disk_management().await; + + if result.has_retryable_error() { + // This is logged as "info", not "warn", as it can happen before + // trust quorum has been established. + info!( + self.log, + "Failed to synchronize disks, but will retry"; + "result" => ?result, + ); + return; + } + + self.state = StorageManagerState::Synchronized; + + if result.has_error() { + warn!( + self.log, + "Failed to synchronize disks due to permanant error"; + "result" => #?result, + ); + return; + } + info!( self.log, - "Attempting to add queued disks"; - "num_disks" => %self.queued_u2_drives.len() + "Successfully synchronized disks without error"; + "result" => ?result, ); - self.state = StorageManagerState::Normal; - - let mut send_updates = false; - - // Disks that should be requeued. - let queued = self.queued_u2_drives.clone(); - let mut to_dequeue = HashSet::new(); - for disk in queued.iter() { - if self.state == StorageManagerState::QueueingDisks { - // We hit a transient error in a prior iteration. - break; - } else { - match self.add_u2_disk(disk.clone()).await { - Err(_) => { - // This is an unrecoverable error, so we don't queue the - // disk again. - to_dequeue.insert(disk); - } - Ok(AddDiskResult::DiskInserted) => { - send_updates = true; - to_dequeue.insert(disk); - } - Ok(AddDiskResult::DiskAlreadyInserted) => { - to_dequeue.insert(disk); - } - Ok(AddDiskResult::DiskQueued) => (), + } + + async fn all_omicron_disk_ledgers(&self) -> Vec { + self.resources + .disks() + .all_m2_mountpoints(CONFIG_DATASET) + .into_iter() + .map(|p| p.join(DISKS_LEDGER_FILENAME)) + .collect() + } + + // Manages a newly detected disk that has been attached to this sled. + // + // For U.2s: we update our inventory. + // For M.2s: we do the same, but also begin "managing" the disk so + // it can automatically be in-use. + async fn detected_raw_disk( + &mut self, + raw_disk: RawDisk, + ) -> Result<(), Error> { + // In other words, the decision of "should we use this U.2" requires + // coordination with the control plane at large. + let needs_synchronization = + matches!(raw_disk.variant(), DiskVariant::U2); + self.resources.insert_disk(raw_disk).await?; + + if needs_synchronization { + match self.state { + // We'll synchronize once the key manager comes up. + StorageManagerState::WaitingForKeyManager => (), + // In these cases, we'd benefit from another call + // to "manage_disks" from StorageManager task runner. + StorageManagerState::SynchronizationNeeded + | StorageManagerState::Synchronized => { + self.state = StorageManagerState::SynchronizationNeeded; + + // TODO(https://github.com/oxidecomputer/omicron/issues/5328): + // We can remove this call once we've migrated everyone to a + // world that uses the ledger -- normally we'd only need to + // load the storage config once, when we know that the key + // manager is ready, but without a ledger, we may need to + // retry auto-management when any new U.2 appears. + self.load_storage_config().await?; } } } - // Dequeue any inserted disks - self.queued_u2_drives.retain(|k| !to_dequeue.contains(k)); - send_updates + + Ok(()) } - // Add a disk to `StorageResources` if it is new, - // updated, or its pool has been updated as determined by - // [`$crate::resources::StorageResources::insert_disk`] and we decide not to - // queue the disk for later addition. - async fn add_disk( - &mut self, - raw_disk: RawDisk, - ) -> Result { - match raw_disk.variant() { - DiskVariant::U2 => self.add_u2_disk(raw_disk).await, - DiskVariant::M2 => self.add_m2_disk(raw_disk).await, + async fn load_ledger(&self) -> Option> { + let ledger_paths = self.all_omicron_disk_ledgers().await; + let log = self.log.new(o!("request" => "load_ledger")); + let maybe_ledger = Ledger::::new( + &log, + ledger_paths.clone(), + ) + .await; + + match maybe_ledger { + Some(ledger) => { + info!(self.log, "Ledger of physical disks exists"); + return Some(ledger); + } + None => { + info!(self.log, "No ledger of physical disks exists"); + return None; + } } } - // Add a U.2 disk to [`StorageResources`] or queue it to be added later - async fn add_u2_disk( + async fn key_manager_ready(&mut self) -> Result<(), Error> { + self.load_storage_config().await + } + + async fn load_storage_config(&mut self) -> Result<(), Error> { + info!(self.log, "Loading storage config"); + // Set the state to "synchronization needed", to force us to try to + // asynchronously ensure that disks are ready. + self.state = StorageManagerState::SynchronizationNeeded; + + // Now that we're actually able to unpack U.2s, attempt to load the + // set of disks which we previously stored in the ledger, if one + // existed. + let ledger = self.load_ledger().await; + if let Some(ledger) = ledger { + info!(self.log, "Setting StorageResources state to match ledger"); + + // Identify which disks should be managed by the control + // plane, and adopt all requested disks into the control plane + // in a background task (see: [Self::manage_disks]). + self.resources.set_config(&ledger.data().disks); + } else { + info!(self.log, "KeyManager ready, but no ledger detected"); + let mut synthetic_config = + self.resources.get_config().values().cloned().collect(); + // TODO(https://github.com/oxidecomputer/omicron/issues/5328): Once + // we are confident that we have migrated to a world where this + // ledger is universally used, we should remove the following + // kludge. The sled agent should not need to "self-manage" anything! + let changed = self + .self_manage_disks_with_zpools(&mut synthetic_config) + .await?; + if !changed { + info!(self.log, "No disks to be automatically managed"); + return Ok(()); + } + info!(self.log, "auto-managed disks"; "count" => synthetic_config.len()); + self.resources.set_config(&synthetic_config); + } + + Ok(()) + } + + // NOTE: What follows is an exceptional case: one where we have + // no record of "Control Plane Physical Disks", but we have zpools + // on our U.2s, and we want to use them regardless. + // + // THIS WOULD NORMALLY BE INCORRECT BEHAVIOR. In the future, these + // zpools will not be "automatically imported", and instead, we'll + // let Nexus decide whether or not to reformat the disks. + // + // However, because we are transitioning from "the set of disks / + // zpools is implicit" to a world where that set is explicit, this + // is a necessary transitional tool. + // + // Returns "true" if the synthetic_config has changed. + async fn self_manage_disks_with_zpools( &mut self, - raw_disk: RawDisk, - ) -> Result { - if self.state != StorageManagerState::Normal { - self.queued_u2_drives.insert(raw_disk); - return Ok(AddDiskResult::DiskQueued); + synthetic_config: &mut Vec, + ) -> Result { + let mut changed = false; + for (identity, disk) in self.resources.disks().values.iter() { + match disk { + crate::resources::ManagedDisk::Unmanaged(raw) => { + let zpool_path = match raw.u2_zpool_path() { + Ok(zpool_path) => zpool_path, + Err(err) => { + info!(self.log, "Cannot find zpool path"; "identity" => ?identity, "err" => ?err); + continue; + } + }; + + let zpool_name = + match sled_hardware::disk::check_if_zpool_exists( + &zpool_path, + ) { + Ok(zpool_name) => zpool_name, + Err(err) => { + info!(self.log, "Zpool does not exist"; "identity" => ?identity, "err" => ?err); + continue; + } + }; + + info!(self.log, "Found existing zpool on device without ledger"; + "identity" => ?identity, + "zpool" => ?zpool_name); + + // We found an unmanaged disk with a zpool, even though + // we have no prior record of a ledger of control-plane + // disks. + synthetic_config.push( + // These disks don't have a control-plane UUID -- + // report "nil" until they're overwritten with real + // values. + OmicronPhysicalDiskConfig { + identity: identity.clone(), + id: Uuid::nil(), + pool_id: zpool_name.id(), + }, + ); + changed = true; + } + _ => continue, + } } + Ok(changed) + } - match Disk::new(&self.log, raw_disk.clone(), Some(&self.key_requester)) - .await - { - Ok(disk) => self.resources.insert_disk(disk), - Err(err @ DiskError::Dataset(DatasetError::KeyManager(_))) => { - warn!( - self.log, - "Transient error: {err}: queuing disk"; - "disk_id" => ?raw_disk.identity() + // Makes an U.2 disk managed by the control plane within [`StorageResources`]. + async fn omicron_physical_disks_ensure( + &mut self, + mut config: OmicronPhysicalDisksConfig, + ) -> Result { + let log = + self.log.new(o!("request" => "omicron_physical_disks_ensure")); + + // Ensure that the set of disks arrives in a consistent order. + config + .disks + .sort_by(|a, b| a.identity.partial_cmp(&b.identity).unwrap()); + + // We rely on the schema being stable across reboots -- observe + // "test_omicron_physical_disks_schema" below for that property + // guarantee. + let ledger_paths = self.all_omicron_disk_ledgers().await; + let maybe_ledger = Ledger::::new( + &log, + ledger_paths.clone(), + ) + .await; + + let mut ledger = match maybe_ledger { + Some(ledger) => { + info!( + log, + "Comparing 'requested disks' to ledger on internal storage" ); - self.queued_u2_drives.insert(raw_disk); - self.state = StorageManagerState::QueueingDisks; - Ok(AddDiskResult::DiskQueued) + let ledger_data = ledger.data(); + if config.generation < ledger_data.generation { + warn!( + log, + "Request looks out-of-date compared to prior request" + ); + return Err(Error::PhysicalDiskConfigurationOutdated { + requested: config.generation, + current: ledger_data.generation, + }); + } + + // TODO: If the generation is equal, check that the values are + // also equal. + + info!(log, "Request looks newer than prior requests"); + ledger } - Err(err) => { - error!( - self.log, - "Persistent error:not queueing disk"; - "err" => ?err, - "disk_id" => ?raw_disk.identity() - ); - Err(err.into()) + None => { + info!(log, "No previously-stored 'requested disks', creating new ledger"); + Ledger::::new_with( + &log, + ledger_paths.clone(), + OmicronPhysicalDisksConfig::new(), + ) } + }; + + let result = + self.omicron_physical_disks_ensure_internal(&log, &config).await?; + + let ledger_data = ledger.data_mut(); + if *ledger_data == config { + return Ok(result); } + *ledger_data = config; + ledger.commit().await?; + + Ok(result) } - // Add a U.2 disk to [`StorageResources`] if new and return `Ok(true)` if so - // + // Updates [StorageResources] to manage the disks requested by `config`, if + // those disks exist. // - // We never queue M.2 drives, as they don't rely on [`KeyManager`] based - // encryption - async fn add_m2_disk( + // Makes no attempts to manipulate the ledger storage. + async fn omicron_physical_disks_ensure_internal( &mut self, - raw_disk: RawDisk, - ) -> Result { - let disk = - Disk::new(&self.log, raw_disk.clone(), Some(&self.key_requester)) - .await?; - self.resources.insert_disk(disk) + log: &Logger, + config: &OmicronPhysicalDisksConfig, + ) -> Result { + if self.state == StorageManagerState::WaitingForKeyManager { + warn!( + log, + "Not ready to manage storage yet (waiting for the key manager)" + ); + return Err(Error::KeyManagerNotReady); + } + + // Identify which disks should be managed by the control + // plane, and adopt all requested disks into the control plane. + self.resources.set_config(&config.disks); + + // Actually try to "manage" those disks, which may involve formatting + // zpools and conforming partitions to those expected by the control + // plane. + Ok(self.resources.synchronize_disk_management().await) + } + + async fn omicron_physical_disks_list( + &mut self, + ) -> Result { + let log = self.log.new(o!("request" => "omicron_physical_disks_list")); + + // TODO(https://github.com/oxidecomputer/omicron/issues/5328): This + // could just use "resources.get_config", but that'll be more feasible + // once we don't have to cons up a fake "Generation" number. + + let ledger_paths = self.all_omicron_disk_ledgers().await; + let maybe_ledger = Ledger::::new( + &log, + ledger_paths.clone(), + ) + .await; + + match maybe_ledger { + Some(ledger) => { + info!(log, "Found ledger on internal storage"); + return Ok(ledger.data().clone()); + } + None => { + info!(log, "No ledger detected on internal storage"); + return Err(Error::LedgerNotFound); + } + } } // Delete a real disk and return `true` if the disk was actually removed - fn remove_disk(&mut self, raw_disk: RawDisk) -> bool { - // If the disk is a U.2, we want to first delete it from any queued disks - let _ = self.queued_u2_drives.remove(&raw_disk); - self.resources.remove_disk(raw_disk.identity()) + fn detected_raw_disk_removal(&mut self, raw_disk: RawDisk) { + self.resources.remove_disk(raw_disk.identity()); } // Find all disks to remove that are not in raw_disks and remove them. Then @@ -509,13 +767,7 @@ impl StorageManager { async fn ensure_using_exactly_these_disks( &mut self, raw_disks: HashSet, - ) -> bool { - let mut should_update = false; - - // Clear out any queued U.2 disks that are real. - // We keep synthetic disks, as they are only added once. - self.queued_u2_drives.retain(|d| d.is_synthetic()); - + ) { let all_ids: HashSet<_> = raw_disks.iter().map(|d| d.identity()).collect(); @@ -523,8 +775,8 @@ impl StorageManager { let to_remove: Vec = self .resources .disks() - .keys() - .filter_map(|id| { + .iter_all() + .filter_map(|(id, _variant, _slot)| { if !all_ids.contains(id) { Some(id.clone()) } else { @@ -534,27 +786,19 @@ impl StorageManager { .collect(); for id in to_remove { - if self.resources.remove_disk(&id) { - should_update = true; - } + self.resources.remove_disk(&id); } for raw_disk in raw_disks { let disk_id = raw_disk.identity().clone(); - match self.add_disk(raw_disk).await { - Ok(AddDiskResult::DiskInserted) => should_update = true, - Ok(_) => (), - Err(err) => { - warn!( - self.log, - "Failed to add disk to storage resources: {err}"; - "disk_id" => ?disk_id - ); - } + if let Err(err) = self.detected_raw_disk(raw_disk).await { + warn!( + self.log, + "Failed to add disk to storage resources: {err}"; + "disk_id" => ?disk_id + ); } } - - should_update } // Attempts to add a dataset within a zpool, according to `request`. @@ -562,15 +806,15 @@ impl StorageManager { &mut self, request: &NewFilesystemRequest, ) -> Result<(), Error> { - info!(self.log, "add_dataset: {:?}", request); + info!(self.log, "add_dataset"; "request" => ?request); if !self .resources .disks() - .values() - .any(|(_, pool)| &pool.name == request.dataset_name.pool()) + .iter_managed() + .any(|(_, disk)| disk.zpool_name() == request.dataset_name.pool()) { return Err(Error::ZpoolNotFound(format!( - "{}, looked up while trying to add dataset", + "{}", request.dataset_name.pool(), ))); } @@ -617,271 +861,314 @@ impl StorageManager { #[cfg(all(test, target_os = "illumos"))] mod tests { use crate::dataset::DatasetKind; - use crate::disk::SyntheticDisk; + use crate::disk::RawSyntheticDisk; + use crate::manager_test_harness::StorageManagerTestHarness; + use crate::resources::DiskManagementError; use super::*; - use async_trait::async_trait; - use camino_tempfile::tempdir; - use illumos_utils::zpool::Zpool; - use key_manager::{ - KeyManager, SecretRetriever, SecretRetrieverError, SecretState, - VersionedIkm, - }; + use camino_tempfile::tempdir_in; + use omicron_common::api::external::Generation; + use omicron_common::ledger; use omicron_test_utils::dev::test_setup_log; - use std::sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }; + use omicron_uuid_kinds::ZpoolUuid; + use std::sync::atomic::Ordering; use uuid::Uuid; - /// A [`key-manager::SecretRetriever`] that only returns hardcoded IKM for - /// epoch 0 - #[derive(Debug, Default)] - struct HardcodedSecretRetriever { - inject_error: Arc, - } + // A helper struct to advance time. + struct TimeTravel {} - #[async_trait] - impl SecretRetriever for HardcodedSecretRetriever { - async fn get_latest( - &self, - ) -> Result { - if self.inject_error.load(Ordering::SeqCst) { - return Err(SecretRetrieverError::Bootstore( - "Timeout".to_string(), - )); - } - - let epoch = 0; - let salt = [0u8; 32]; - let secret = [0x1d; 32]; - - Ok(VersionedIkm::new(epoch, salt, &secret)) + impl TimeTravel { + pub fn new() -> Self { + tokio::time::pause(); + Self {} } - /// We don't plan to do any key rotation before trust quorum is ready - async fn get( - &self, - epoch: u64, - ) -> Result { - if self.inject_error.load(Ordering::SeqCst) { - return Err(SecretRetrieverError::Bootstore( - "Timeout".to_string(), - )); - } - if epoch != 0 { - return Err(SecretRetrieverError::NoSuchEpoch(epoch)); - } - Ok(SecretState::Current(self.get_latest().await?)) + pub async fn enough_to_start_synchronization(&self) { + tokio::time::advance(SYNCHRONIZE_INTERVAL).await; } } #[tokio::test] - async fn add_u2_disk_while_not_in_normal_stage_and_ensure_it_gets_queued() { + async fn add_control_plane_disks_requires_keymanager() { illumos_utils::USE_MOCKS.store(false, Ordering::SeqCst); - let logctx = test_setup_log( - "add_u2_disk_while_not_in_normal_stage_and_ensure_it_gets_queued", - ); - let (mut _key_manager, key_requester) = - KeyManager::new(&logctx.log, HardcodedSecretRetriever::default()); - let (mut manager, _) = StorageManager::new(&logctx.log, key_requester); - let zpool_name = ZpoolName::new_external(Uuid::new_v4()); - let raw_disk: RawDisk = SyntheticDisk::new(zpool_name, 0).into(); - assert_eq!(StorageManagerState::WaitingForKeyManager, manager.state); - manager.add_u2_disk(raw_disk.clone()).await.unwrap(); - assert!(manager.resources.all_u2_zpools().is_empty()); - assert_eq!(manager.queued_u2_drives, HashSet::from([raw_disk.clone()])); - - // Check other non-normal stages and ensure disk gets queued - manager.queued_u2_drives.clear(); - manager.state = StorageManagerState::QueueingDisks; - manager.add_u2_disk(raw_disk.clone()).await.unwrap(); - assert!(manager.resources.all_u2_zpools().is_empty()); - assert_eq!(manager.queued_u2_drives, HashSet::from([raw_disk])); + let logctx = + test_setup_log("add_control_plane_disks_requires_keymanager"); + + let mut harness = StorageManagerTestHarness::new(&logctx.log).await; + let raw_disks = + harness.add_vdevs(&["u2_under_test.vdev", "m2_helping.vdev"]).await; + + // These disks should exist, but only the M.2 should have a zpool. + let all_disks = harness.handle().get_latest_disks().await; + assert_eq!(2, all_disks.iter_all().collect::>().len()); + assert_eq!(0, all_disks.all_u2_zpools().len()); + assert_eq!(1, all_disks.all_m2_zpools().len()); + + // If we try to "act like nexus" and request a control-plane disk, we'll + // see a failure because the key manager isn't ready. + let config = harness.make_config(1, &raw_disks); + let result = harness + .handle() + .omicron_physical_disks_ensure(config.clone()) + .await; + assert!(matches!(result, Err(Error::KeyManagerNotReady))); + + // If we make the key manager ready and try again, it'll work. + harness.handle().key_manager_ready().await; + let result = harness + .handle() + .omicron_physical_disks_ensure(config.clone()) + .await + .expect("Ensuring disks should work after key manager is ready"); + assert!(!result.has_error(), "{:?}", result); + + // If we look at the disks again, we'll now see one U.2 zpool. + let all_disks = harness.handle().get_latest_disks().await; + assert_eq!(2, all_disks.iter_all().collect::>().len()); + assert_eq!(1, all_disks.all_u2_zpools().len()); + assert_eq!(1, all_disks.all_m2_zpools().len()); + + harness.cleanup().await; logctx.cleanup_successful(); } #[tokio::test] - async fn ensure_u2_gets_added_to_resources() { - illumos_utils::USE_MOCKS.store(false, Ordering::SeqCst); - let logctx = test_setup_log("ensure_u2_gets_added_to_resources"); - let (mut key_manager, key_requester) = - KeyManager::new(&logctx.log, HardcodedSecretRetriever::default()); - let (mut manager, _) = StorageManager::new(&logctx.log, key_requester); - let zpool_name = ZpoolName::new_external(Uuid::new_v4()); - let dir = tempdir().unwrap(); - let disk = - SyntheticDisk::create_zpool(dir.path(), &zpool_name, 0).into(); - - // Spawn the key_manager so that it will respond to requests for encryption keys - tokio::spawn(async move { key_manager.run().await }); - - // Set the stage to pretend we've progressed enough to have a key_manager available. - manager.state = StorageManagerState::Normal; - manager.add_u2_disk(disk).await.unwrap(); - assert_eq!(manager.resources.all_u2_zpools().len(), 1); - Zpool::destroy(&zpool_name).unwrap(); + async fn ledger_writes_require_at_least_one_m2() { + let logctx = test_setup_log("ledger_writes_require_at_least_one_m2"); + + // Create a single U.2 under test, with a ready-to-go key manager. + let mut harness = StorageManagerTestHarness::new(&logctx.log).await; + let raw_disks = harness.add_vdevs(&["u2_under_test.vdev"]).await; + harness.handle().key_manager_ready().await; + let config = harness.make_config(1, &raw_disks); + + // Attempting to adopt this U.2 fails (we don't have anywhere to put the + // ledger). + let result = harness + .handle() + .omicron_physical_disks_ensure(config.clone()) + .await; + assert!( + matches!( + result, + Err(Error::Ledger(ledger::Error::FailedToWrite { .. })) + ), + "Saw unexpected result: {:?}", + result + ); + + // Add an M.2 which can store the ledger. + let _raw_disks = + harness.add_vdevs(&["m2_finally_showed_up.vdev"]).await; + harness.handle_mut().wait_for_boot_disk().await; + + let result = harness + .handle() + .omicron_physical_disks_ensure(config.clone()) + .await + .expect("After adding an M.2, the ledger write should have worked"); + assert!(!result.has_error(), "{:?}", result); + + // Wait for the add disk notification + let tt = TimeTravel::new(); + tt.enough_to_start_synchronization().await; + let all_disks = harness.handle_mut().wait_for_changes().await; + assert_eq!(all_disks.all_u2_zpools().len(), 1); + assert_eq!(all_disks.all_m2_zpools().len(), 1); + + harness.cleanup().await; logctx.cleanup_successful(); } #[tokio::test] - async fn wait_for_bootdisk() { + async fn add_raw_u2_does_not_create_zpool() { illumos_utils::USE_MOCKS.store(false, Ordering::SeqCst); - let logctx = test_setup_log("wait_for_bootdisk"); - let (mut key_manager, key_requester) = - KeyManager::new(&logctx.log, HardcodedSecretRetriever::default()); - let (manager, mut handle) = - StorageManager::new(&logctx.log, key_requester); - // Spawn the key_manager so that it will respond to requests for encryption keys - tokio::spawn(async move { key_manager.run().await }); - - // Spawn the storage manager as done by sled-agent - tokio::spawn(async move { - manager.run().await; - }); - - // Create a synthetic internal disk - let zpool_name = ZpoolName::new_internal(Uuid::new_v4()); - let dir = tempdir().unwrap(); - let disk = - SyntheticDisk::create_zpool(dir.path(), &zpool_name, 0).into(); - - handle.upsert_disk(disk).await; - handle.wait_for_boot_disk().await; - Zpool::destroy(&zpool_name).unwrap(); + let logctx = test_setup_log("add_raw_u2_does_not_create_zpool"); + let mut harness = StorageManagerTestHarness::new(&logctx.log).await; + harness.handle().key_manager_ready().await; + + // Add a representative scenario for a small sled: a U.2 and M.2. + let _raw_disks = + harness.add_vdevs(&["u2_under_test.vdev", "m2_helping.vdev"]).await; + + // This disks should exist, but only the M.2 should have a zpool. + let all_disks = harness.handle().get_latest_disks().await; + assert_eq!(2, all_disks.iter_all().collect::>().len()); + assert_eq!(0, all_disks.all_u2_zpools().len()); + assert_eq!(1, all_disks.all_m2_zpools().len()); + + harness.cleanup().await; logctx.cleanup_successful(); } #[tokio::test] - async fn queued_disks_get_added_as_resources() { + async fn wait_for_boot_disk() { illumos_utils::USE_MOCKS.store(false, Ordering::SeqCst); - let logctx = test_setup_log("queued_disks_get_added_as_resources"); - let (mut key_manager, key_requester) = - KeyManager::new(&logctx.log, HardcodedSecretRetriever::default()); - let (manager, handle) = StorageManager::new(&logctx.log, key_requester); + let logctx = test_setup_log("wait_for_boot_disk"); + let mut harness = StorageManagerTestHarness::new(&logctx.log).await; + let _raw_disks = harness.add_vdevs(&["u2_under_test.vdev"]).await; + + // When we wait for changes, we can see the U.2 being added, but no boot + // disk. + let all_disks = harness.handle_mut().wait_for_changes().await; + assert_eq!(1, all_disks.iter_all().collect::>().len()); + assert!(all_disks.boot_disk().is_none()); + + // Waiting for the boot disk should time out. + assert!(tokio::time::timeout( + tokio::time::Duration::from_millis(10), + harness.handle_mut().wait_for_boot_disk(), + ) + .await + .is_err()); - // Spawn the key_manager so that it will respond to requests for encryption keys - tokio::spawn(async move { key_manager.run().await }); + // Now we add a boot disk. + let boot_disk = harness.add_vdevs(&["m2_under_test.vdev"]).await; - // Spawn the storage manager as done by sled-agent - tokio::spawn(async move { - manager.run().await; - }); + // It shows up through the general "wait for changes" API. + let all_disks = harness.handle_mut().wait_for_changes().await; + assert_eq!(2, all_disks.iter_all().collect::>().len()); + assert!(all_disks.boot_disk().is_some()); - // Queue up a disks, as we haven't told the `StorageManager` that - // the `KeyManager` is ready yet. - let zpool_name = ZpoolName::new_external(Uuid::new_v4()); - let dir = tempdir().unwrap(); - let disk = - SyntheticDisk::create_zpool(dir.path(), &zpool_name, 0).into(); - handle.upsert_disk(disk).await; - let resources = handle.get_latest_resources().await; - assert!(resources.all_u2_zpools().is_empty()); - - // Now inform the storage manager that the key manager is ready - // The queued disk should be successfully added - handle.key_manager_ready().await; - let resources = handle.get_latest_resources().await; - assert_eq!(resources.all_u2_zpools().len(), 1); - Zpool::destroy(&zpool_name).unwrap(); + // We can wait for, and see, the boot disk. + let (id, _) = harness.handle_mut().wait_for_boot_disk().await; + assert_eq!(&id, boot_disk[0].identity()); + + // We can keep calling this function without blocking. + let (id, _) = harness.handle_mut().wait_for_boot_disk().await; + assert_eq!(&id, boot_disk[0].identity()); + + harness.cleanup().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn disks_automatically_managed_after_key_manager_ready() { + illumos_utils::USE_MOCKS.store(false, Ordering::SeqCst); + let logctx = test_setup_log( + "disks_automatically_managed_after_key_manager_ready", + ); + let mut harness = StorageManagerTestHarness::new(&logctx.log).await; + + // Boot normally, add an M.2 and a U.2, and let them + // create pools. + let raw_disks = + harness.add_vdevs(&["u2_under_test.vdev", "m2_helping.vdev"]).await; + harness.handle().key_manager_ready().await; + let config = harness.make_config(1, &raw_disks); + let result = harness + .handle() + .omicron_physical_disks_ensure(config.clone()) + .await + .unwrap(); + assert!(!result.has_error(), "{:?}", result); + + // Both pools exist + let all_disks = harness.handle().get_latest_disks().await; + assert_eq!(2, all_disks.iter_all().collect::>().len()); + assert_eq!(1, all_disks.all_u2_zpools().len()); + assert_eq!(1, all_disks.all_m2_zpools().len()); + + // "reboot" the storage manager, and let it see the disks before + // the key manager is ready. + let mut harness = harness.reboot(&logctx.log).await; + + // Both disks exist, but the U.2's pool is not yet accessible. + let all_disks = harness.handle_mut().wait_for_changes().await; + assert_eq!(2, all_disks.iter_all().collect::>().len()); + assert_eq!(0, all_disks.all_u2_zpools().len()); + assert_eq!(1, all_disks.all_m2_zpools().len()); + + // Mark the key manaager ready. This should eventually lead to the + // U.2 being managed, since it exists in the M.2 ledger. + harness.handle().key_manager_ready().await; + let all_disks = harness.handle_mut().wait_for_changes().await; + assert_eq!(1, all_disks.all_u2_zpools().len()); + + harness.cleanup().await; logctx.cleanup_successful(); } - /// For this test, we are going to step through the msg recv loop directly - /// without running the `StorageManager` in a tokio task. - /// This allows us to control timing precisely. #[tokio::test] async fn queued_disks_get_requeued_on_secret_retriever_error() { illumos_utils::USE_MOCKS.store(false, Ordering::SeqCst); let logctx = test_setup_log( "queued_disks_get_requeued_on_secret_retriever_error", ); - let inject_error = Arc::new(AtomicBool::new(false)); - let (mut key_manager, key_requester) = KeyManager::new( - &logctx.log, - HardcodedSecretRetriever { inject_error: inject_error.clone() }, - ); - let (mut manager, handle) = - StorageManager::new(&logctx.log, key_requester); - - // Spawn the key_manager so that it will respond to requests for encryption keys - tokio::spawn(async move { key_manager.run().await }); + let mut harness = StorageManagerTestHarness::new(&logctx.log).await; // Queue up a disks, as we haven't told the `StorageManager` that // the `KeyManager` is ready yet. - let zpool_name = ZpoolName::new_external(Uuid::new_v4()); - let dir = tempdir().unwrap(); - let disk = - SyntheticDisk::create_zpool(dir.path(), &zpool_name, 0).into(); - handle.upsert_disk(disk).await; - manager.step().await.unwrap(); - - // We can't wait for a reply through the handle as the storage manager task - // isn't actually running. We just check the resources directly. - assert!(manager.resources.all_u2_zpools().is_empty()); - - // Let's inject an error to the `SecretRetriever` to simulate a trust - // quorum timeout - inject_error.store(true, Ordering::SeqCst); - - // Now inform the storage manager that the key manager is ready - // The queued disk should not be added due to the error - handle.key_manager_ready().await; - manager.step().await.unwrap(); - assert!(manager.resources.all_u2_zpools().is_empty()); - - // Manually simulating a timer tick to add queued disks should also - // still hit the error - manager.add_queued_disks().await; - assert!(manager.resources.all_u2_zpools().is_empty()); - - // Clearing the injected error will cause the disk to get added - inject_error.store(false, Ordering::SeqCst); - manager.add_queued_disks().await; - assert_eq!(1, manager.resources.all_u2_zpools().len()); - - Zpool::destroy(&zpool_name).unwrap(); + let raw_disks = + harness.add_vdevs(&["u2_under_test.vdev", "m2_helping.vdev"]).await; + let config = harness.make_config(1, &raw_disks); + let result = harness + .handle() + .omicron_physical_disks_ensure(config.clone()) + .await; + assert!(matches!(result, Err(Error::KeyManagerNotReady))); + + // As usual, the U.2 isn't ready yet. + let all_disks = harness.handle().get_latest_disks().await; + assert_eq!(2, all_disks.iter_all().collect::>().len()); + assert_eq!(0, all_disks.all_u2_zpools().len()); + + // Mark the key manager ready, but throwing errors. + harness.key_manager_error_injector().store(true, Ordering::SeqCst); + harness.handle().key_manager_ready().await; + + let result = harness + .handle() + .omicron_physical_disks_ensure(config.clone()) + .await + .unwrap(); + assert!(result.has_error()); + assert!(matches!( + result.status[0].err.as_ref(), + Some(DiskManagementError::KeyManager(_)) + )); + let all_disks = harness.handle().get_latest_disks().await; + assert_eq!(0, all_disks.all_u2_zpools().len()); + + // After toggling KeyManager errors off, the U.2 can be successfully added. + harness.key_manager_error_injector().store(false, Ordering::SeqCst); + let result = harness + .handle() + .omicron_physical_disks_ensure(config.clone()) + .await + .expect("Ensuring control plane disks should have worked"); + assert!(!result.has_error(), "{:?}", result); + let all_disks = harness.handle().get_latest_disks().await; + assert_eq!(1, all_disks.all_u2_zpools().len()); + + harness.cleanup().await; logctx.cleanup_successful(); } #[tokio::test] - async fn delete_disk_triggers_notification() { + async fn detected_raw_disk_removal_triggers_notification() { illumos_utils::USE_MOCKS.store(false, Ordering::SeqCst); - let logctx = test_setup_log("delete_disk_triggers_notification"); - let (mut key_manager, key_requester) = - KeyManager::new(&logctx.log, HardcodedSecretRetriever::default()); - let (manager, mut handle) = - StorageManager::new(&logctx.log, key_requester); - - // Spawn the key_manager so that it will respond to requests for encryption keys - tokio::spawn(async move { key_manager.run().await }); - - // Spawn the storage manager as done by sled-agent - tokio::spawn(async move { - manager.run().await; - }); - - // Inform the storage manager that the key manager is ready, so disks - // don't get queued - handle.key_manager_ready().await; - - // Create and add a disk - let zpool_name = ZpoolName::new_external(Uuid::new_v4()); - let dir = tempdir().unwrap(); - let disk: RawDisk = - SyntheticDisk::create_zpool(dir.path(), &zpool_name, 0).into(); - handle.upsert_disk(disk.clone()).await; + let logctx = + test_setup_log("detected_raw_disk_removal_triggers_notification"); + let mut harness = StorageManagerTestHarness::new(&logctx.log).await; + harness.handle().key_manager_ready().await; + let mut raw_disks = harness.add_vdevs(&["u2_under_test.vdev"]).await; - // Wait for the add disk notification - let resources = handle.wait_for_changes().await; - assert_eq!(resources.all_u2_zpools().len(), 1); + // Access the add disk notification + let all_disks = harness.handle_mut().wait_for_changes().await; + assert_eq!(1, all_disks.iter_all().collect::>().len()); // Delete the disk and wait for a notification - handle.delete_disk(disk).await; - let resources = handle.wait_for_changes().await; - assert!(resources.all_u2_zpools().is_empty()); + harness + .handle() + .detected_raw_disk_removal(raw_disks.remove(0)) + .await + .await + .unwrap(); + let all_disks = harness.handle_mut().wait_for_changes().await; + assert_eq!(0, all_disks.iter_all().collect::>().len()); - Zpool::destroy(&zpool_name).unwrap(); + harness.cleanup().await; logctx.cleanup_successful(); } @@ -889,122 +1176,81 @@ mod tests { async fn ensure_using_exactly_these_disks() { illumos_utils::USE_MOCKS.store(false, Ordering::SeqCst); let logctx = test_setup_log("ensure_using_exactly_these_disks"); - let (mut key_manager, key_requester) = - KeyManager::new(&logctx.log, HardcodedSecretRetriever::default()); - let (manager, mut handle) = - StorageManager::new(&logctx.log, key_requester); - - // Spawn the key_manager so that it will respond to requests for encryption keys - tokio::spawn(async move { key_manager.run().await }); - - // Spawn the storage manager as done by sled-agent - tokio::spawn(async move { - manager.run().await; - }); - - // Create a bunch of file backed external disks with zpools - let dir = tempdir().unwrap(); - let zpools: Vec = - (0..10).map(|_| ZpoolName::new_external(Uuid::new_v4())).collect(); - let disks: Vec = zpools - .iter() - .enumerate() - .map(|(slot, zpool_name)| { - SyntheticDisk::create_zpool( - dir.path(), - zpool_name, - slot.try_into().unwrap(), - ) - .into() + let mut harness = StorageManagerTestHarness::new(&logctx.log).await; + + // Create a bunch of file backed external disks + let vdev_dir = tempdir_in("/var/tmp").unwrap(); + let disks: Vec = (0..10) + .map(|serial| { + let vdev_path = + vdev_dir.path().join(format!("u2_{serial}.vdev")); + RawSyntheticDisk::new_with_length(&vdev_path, 1 << 20, serial) + .unwrap() + .into() }) .collect(); - // Add the first 3 disks, and ensure they get queued, as we haven't - // marked our key manager ready yet - handle + // Observe the first three disks + harness + .handle() .ensure_using_exactly_these_disks(disks.iter().take(3).cloned()) - .await; - let state = handle.get_manager_state().await; - assert_eq!(state.queued_u2_drives.len(), 3); - assert_eq!(state.state, StorageManagerState::WaitingForKeyManager); - assert!(handle.get_latest_resources().await.all_u2_zpools().is_empty()); - - // Mark the key manager ready and wait for the storage update - handle.key_manager_ready().await; - let resources = handle.wait_for_changes().await; - let expected: HashSet<_> = - disks.iter().take(3).map(|d| d.identity()).collect(); - let actual: HashSet<_> = resources.disks().keys().collect(); - assert_eq!(expected, actual); + .await + .await + .unwrap(); - // Add first three disks after the initial one. The returned resources + let all_disks = harness.handle().get_latest_disks().await; + assert_eq!(3, all_disks.iter_all().collect::>().len()); + + // Add first three disks after the initial one. The returned disks // should not contain the first disk. - handle + harness + .handle() .ensure_using_exactly_these_disks( disks.iter().skip(1).take(3).cloned(), ) - .await; - let resources = handle.wait_for_changes().await; + .await + .await + .unwrap(); + + let all_disks = harness.handle_mut().wait_for_changes().await; + assert_eq!(3, all_disks.iter_all().collect::>().len()); + let expected: HashSet<_> = disks.iter().skip(1).take(3).map(|d| d.identity()).collect(); - let actual: HashSet<_> = resources.disks().keys().collect(); + let actual: HashSet<_> = all_disks.values.keys().collect(); assert_eq!(expected, actual); // Ensure the same set of disks and make sure no change occurs - // Note that we directly request the resources this time so we aren't + // Note that we directly request the disks this time so we aren't // waiting forever for a change notification. - handle + harness + .handle() .ensure_using_exactly_these_disks( disks.iter().skip(1).take(3).cloned(), ) - .await; - let resources2 = handle.get_latest_resources().await; - assert_eq!(resources, resources2); + .await + .await + .unwrap(); + let all_disks2 = harness.handle().get_latest_disks().await; + assert_eq!(all_disks.values, all_disks2.values); // Add a disjoint set of disks and see that only they come through - handle + harness + .handle() .ensure_using_exactly_these_disks( disks.iter().skip(4).take(5).cloned(), ) - .await; - let resources = handle.wait_for_changes().await; + .await + .await + .unwrap(); + + let all_disks = harness.handle().get_latest_disks().await; let expected: HashSet<_> = disks.iter().skip(4).take(5).map(|d| d.identity()).collect(); - let actual: HashSet<_> = resources.disks().keys().collect(); + let actual: HashSet<_> = all_disks.values.keys().collect(); assert_eq!(expected, actual); - // Finally, change the zpool backing of the 5th disk to be that of the 10th - // and ensure that disk changes. Note that we don't change the identity - // of the 5th disk. - let mut modified_disk = disks[4].clone(); - if let RawDisk::Synthetic(disk) = &mut modified_disk { - disk.zpool_name = disks[9].zpool_name().clone(); - } else { - panic!(); - } - let mut expected: HashSet<_> = - disks.iter().skip(5).take(4).cloned().collect(); - expected.insert(modified_disk); - - handle - .ensure_using_exactly_these_disks(expected.clone().into_iter()) - .await; - let resources = handle.wait_for_changes().await; - - // Ensure the one modified disk changed as we expected - assert_eq!(5, resources.disks().len()); - for raw_disk in expected { - let (disk, pool) = - resources.disks().get(raw_disk.identity()).unwrap(); - assert_eq!(disk.zpool_name(), raw_disk.zpool_name()); - assert_eq!(&pool.name, disk.zpool_name()); - assert_eq!(raw_disk.identity(), &pool.parent); - } - - // Cleanup - for zpool in zpools { - Zpool::destroy(&zpool).unwrap(); - } + harness.cleanup().await; logctx.cleanup_successful(); } @@ -1012,34 +1258,194 @@ mod tests { async fn upsert_filesystem() { illumos_utils::USE_MOCKS.store(false, Ordering::SeqCst); let logctx = test_setup_log("upsert_filesystem"); - let (mut key_manager, key_requester) = - KeyManager::new(&logctx.log, HardcodedSecretRetriever::default()); - let (manager, handle) = StorageManager::new(&logctx.log, key_requester); + let mut harness = StorageManagerTestHarness::new(&logctx.log).await; + + // Test setup: Add a U.2 and M.2, adopt them into the "control plane" + // for usage. + harness.handle().key_manager_ready().await; + let raw_disks = + harness.add_vdevs(&["u2_under_test.vdev", "m2_helping.vdev"]).await; + let config = harness.make_config(1, &raw_disks); + let result = harness + .handle() + .omicron_physical_disks_ensure(config.clone()) + .await + .expect("Ensuring disks should work after key manager is ready"); + assert!(!result.has_error(), "{:?}", result); - // Spawn the key_manager so that it will respond to requests for encryption keys - tokio::spawn(async move { key_manager.run().await }); + // Create a filesystem on the newly formatted U.2 + let dataset_id = Uuid::new_v4(); + let zpool_name = ZpoolName::new_external(config.disks[0].pool_id); + let dataset_name = + DatasetName::new(zpool_name.clone(), DatasetKind::Crucible); + harness + .handle() + .upsert_filesystem(dataset_id, dataset_name) + .await + .unwrap(); - // Spawn the storage manager as done by sled-agent - tokio::spawn(async move { - manager.run().await; - }); + harness.cleanup().await; + logctx.cleanup_successful(); + } - handle.key_manager_ready().await; + #[tokio::test] + async fn ledgerless_to_ledgered_migration() { + illumos_utils::USE_MOCKS.store(false, Ordering::SeqCst); + let logctx = test_setup_log("ledgerless_to_ledgered_migration"); + let mut harness = StorageManagerTestHarness::new(&logctx.log).await; + + // Test setup: Create two U.2s and an M.2 + let raw_disks = harness + .add_vdevs(&[ + "u2_under_test.vdev", + "u2_that_shows_up_late.vdev", + "m2_helping.vdev", + ]) + .await; - // Create and add a disk - let zpool_name = ZpoolName::new_external(Uuid::new_v4()); - let dir = tempdir().unwrap(); - let disk: RawDisk = - SyntheticDisk::create_zpool(dir.path(), &zpool_name, 0).into(); - handle.upsert_disk(disk.clone()).await; + // First, we format the U.2s to have a zpool. This should work, even + // without looping in the StorageManager. + let first_u2 = &raw_disks[0]; + let first_pool_id = ZpoolUuid::new_v4(); + let _disk = crate::disk::Disk::new( + &logctx.log, + &harness.mount_config(), + first_u2.clone(), + Some(first_pool_id), + Some(harness.key_requester()), + ) + .await + .expect("Failed to format U.2"); - // Create a filesystem - let dataset_id = Uuid::new_v4(); - let dataset_name = - DatasetName::new(zpool_name.clone(), DatasetKind::Crucible); - handle.upsert_filesystem(dataset_id, dataset_name).await.unwrap(); + let second_u2 = &raw_disks[1]; + let second_pool_id = ZpoolUuid::new_v4(); + let _disk = crate::disk::Disk::new( + &logctx.log, + &harness.mount_config(), + second_u2.clone(), + Some(second_pool_id), + Some(harness.key_requester()), + ) + .await + .expect("Failed to format U.2"); + + // Because we did that formatting "behind the back" of the + // StorageManager, we should see no evidence of the U.2 being managed. + // + // This currently matches the format of "existing systems, which were + // initialized before the storage ledger was created". + + // We should still see no ledger. + let result = harness.handle().omicron_physical_disks_list().await; + assert!(matches!(result, Err(Error::LedgerNotFound)), "{:?}", result); + + // We should also not see any managed U.2s. + let disks = harness.handle().get_latest_disks().await; + assert!(disks.all_u2_zpools().is_empty()); + + // Leave one of the U.2s attached, but "remove" the other one. + harness.remove_vdev(second_u2).await; + + // When the system activates, we should see a single Zpool, and + // "auto-manage" it. + harness.handle().key_manager_ready().await; + + // It might take a moment for synchronization to be handled by the + // background task, but we'll eventually see the U.2 zpool. + // + // This is the equivalent of us "loading a zpool, even though + // it was not backed by a ledger". + let tt = TimeTravel::new(); + tt.enough_to_start_synchronization().await; + while harness + .handle_mut() + .wait_for_changes() + .await + .all_u2_zpools() + .is_empty() + { + info!(&logctx.log, "Waiting for U.2 to automatically show up"); + } + let u2s = harness.handle().get_latest_disks().await.all_u2_zpools(); + assert_eq!(u2s.len(), 1, "{:?}", u2s); + + // If we attach the second U.2 -- the equivalent of it appearing after + // the key manager is ready -- it'll also be included in the set of + // auto-maanged U.2s. + harness.add_vdev_as(second_u2.clone()).await; + tt.enough_to_start_synchronization().await; + while harness + .handle_mut() + .wait_for_changes() + .await + .all_u2_zpools() + .len() + == 1 + { + info!(&logctx.log, "Waiting for U.2 to automatically show up"); + } + let u2s = harness.handle().get_latest_disks().await.all_u2_zpools(); + assert_eq!(u2s.len(), 2, "{:?}", u2s); + + // This is the equivalent of the "/omicron-physical-disks GET" API, + // which Nexus might use to contact this sled. + // + // This means that we'll bootstrap the sled successfully, but report a + // 404 if nexus asks us for the latest configuration. + let result = harness.handle().omicron_physical_disks_list().await; + assert!(matches!(result, Err(Error::LedgerNotFound),), "{:?}", result); + + // At this point, Nexus may want to explicitly tell sled agent which + // disks it should use. This is the equivalent of invoking + // "/omicron-physical-disks PUT". + let mut disks = vec![ + OmicronPhysicalDiskConfig { + identity: first_u2.identity().clone(), + id: Uuid::new_v4(), + pool_id: first_pool_id, + }, + OmicronPhysicalDiskConfig { + identity: second_u2.identity().clone(), + id: Uuid::new_v4(), + pool_id: second_pool_id, + }, + ]; + // Sort the disks to ensure the "output" matches the "input" when we + // query later. + disks.sort_by(|a, b| a.identity.partial_cmp(&b.identity).unwrap()); + let config = + OmicronPhysicalDisksConfig { generation: Generation::new(), disks }; + let result = harness + .handle() + .omicron_physical_disks_ensure(config.clone()) + .await + .expect("Failed to ensure disks with 'new' Config"); + assert!(!result.has_error(), "{:?}", result); + + let observed_config = harness + .handle() + .omicron_physical_disks_list() + .await + .expect("Failed to retreive config after ensuring it"); + assert_eq!(observed_config, config); + + let u2s = harness.handle().get_latest_disks().await.all_u2_zpools(); + assert_eq!(u2s.len(), 2, "{:?}", u2s); - Zpool::destroy(&zpool_name).unwrap(); + harness.cleanup().await; logctx.cleanup_successful(); } } + +#[cfg(test)] +mod test { + use super::*; + #[test] + fn test_omicron_physical_disks_schema() { + let schema = schemars::schema_for!(OmicronPhysicalDisksConfig); + expectorate::assert_contents( + "../schema/omicron-physical-disks.json", + &serde_json::to_string_pretty(&schema).unwrap(), + ); + } +} diff --git a/sled-storage/src/manager_test_harness.rs b/sled-storage/src/manager_test_harness.rs new file mode 100644 index 0000000000..a2180a95b5 --- /dev/null +++ b/sled-storage/src/manager_test_harness.rs @@ -0,0 +1,394 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Utilities for creating a StorageManager under test. + +use crate::config::MountConfig; +use crate::disk::{OmicronPhysicalDisksConfig, RawDisk}; +use crate::manager::{StorageHandle, StorageManager}; +use camino::Utf8PathBuf; +use key_manager::StorageKeyRequester; +use omicron_uuid_kinds::ZpoolUuid; +use slog::{info, Logger}; +use std::sync::{ + atomic::{AtomicBool, Ordering}, + Arc, +}; +use uuid::Uuid; + +/// A [`key-manager::SecretRetriever`] that only returns hardcoded IKM for +/// epoch 0 +#[derive(Debug, Default)] +struct HardcodedSecretRetriever { + inject_error: Arc, +} + +#[async_trait::async_trait] +impl key_manager::SecretRetriever for HardcodedSecretRetriever { + async fn get_latest( + &self, + ) -> Result + { + if self.inject_error.load(Ordering::SeqCst) { + return Err(key_manager::SecretRetrieverError::Bootstore( + "Timeout".to_string(), + )); + } + + let epoch = 0; + let salt = [0u8; 32]; + let secret = [0x1d; 32]; + + Ok(key_manager::VersionedIkm::new(epoch, salt, &secret)) + } + + /// We don't plan to do any key rotation before trust quorum is ready + async fn get( + &self, + epoch: u64, + ) -> Result + { + if self.inject_error.load(Ordering::SeqCst) { + return Err(key_manager::SecretRetrieverError::Bootstore( + "Timeout".to_string(), + )); + } + if epoch != 0 { + return Err(key_manager::SecretRetrieverError::NoSuchEpoch(epoch)); + } + Ok(key_manager::SecretState::Current(self.get_latest().await?)) + } +} + +/// Helper utility for tests that want to use a StorageManager. +/// +/// Attempts to make it easy to create a set of vdev-based M.2 and U.2 +/// devices, which can be formatted with arbitrary zpools. +pub struct StorageManagerTestHarness { + handle: StorageHandle, + vdev_dir: Option, + vdevs: std::collections::BTreeSet, + next_slot: i64, + #[allow(unused)] + key_requester: StorageKeyRequester, + key_manager_error_injector: Arc, + key_manager_task: tokio::task::JoinHandle<()>, + storage_manager_task: tokio::task::JoinHandle<()>, +} + +impl Drop for StorageManagerTestHarness { + fn drop(&mut self) { + if let Some(vdev_dir) = self.vdev_dir.take() { + eprintln!( + "WARNING: StorageManagerTestHarness called without 'cleanup()'.\n\ + We may have leaked zpools, and not correctly deleted {}", + vdev_dir.path() + ); + + let pools = [ + ( + omicron_common::zpool_name::ZPOOL_INTERNAL_PREFIX, + vdev_dir.path().join("pool/int"), + ), + ( + omicron_common::zpool_name::ZPOOL_EXTERNAL_PREFIX, + vdev_dir.path().join("pool/ext"), + ), + ]; + + eprintln!( + "The following commands may need to be run to clean up state:" + ); + eprintln!("---"); + for (prefix, pool) in pools { + let Ok(entries) = pool.read_dir_utf8() else { + continue; + }; + for entry in entries.flatten() { + eprintln!( + " pfexec zpool destroy {prefix}{} ", + entry.file_name() + ); + } + } + eprintln!(" pfexec rm -rf {}", vdev_dir.path()); + eprintln!("---"); + + panic!("Dropped without cleanup. See stderr for cleanup advice"); + } + } +} + +impl StorageManagerTestHarness { + /// Creates a new StorageManagerTestHarness with no associated disks. + pub async fn new(log: &Logger) -> Self { + illumos_utils::USE_MOCKS.store(false, Ordering::SeqCst); + let tmp = camino_tempfile::tempdir_in("/var/tmp") + .expect("Failed to make temporary directory"); + info!(log, "Using tmp: {}", tmp.path()); + Self::new_with_tmp_dir(log, tmp).await + } + + async fn new_with_tmp_dir( + log: &Logger, + tmp: camino_tempfile::Utf8TempDir, + ) -> Self { + let mount_config = + MountConfig { root: tmp.path().into(), ..Default::default() }; + + let key_manager_error_injector = Arc::new(AtomicBool::new(false)); + let (mut key_manager, key_requester) = key_manager::KeyManager::new( + &log, + HardcodedSecretRetriever { + inject_error: key_manager_error_injector.clone(), + }, + ); + let (manager, handle) = + StorageManager::new(&log, mount_config, key_requester.clone()); + + // Spawn the key_manager so that it will respond to requests for encryption keys + let key_manager_task = + tokio::spawn(async move { key_manager.run().await }); + + // Spawn the storage manager as done by sled-agent + let storage_manager_task = tokio::spawn(async move { + manager.run().await; + }); + + Self { + handle, + vdev_dir: Some(tmp), + vdevs: std::collections::BTreeSet::new(), + next_slot: 0, + key_requester, + key_manager_error_injector, + key_manager_task, + storage_manager_task, + } + } + + /// Emulate a system rebooting. + /// + /// - Stops the currently running tasks and restarts them + /// - Re-inserts all vdevs previously created by [Self::add_vdevs]. + pub async fn reboot(mut self, log: &Logger) -> Self { + // Abort ongoing tasks, in lieu of a cleaner shutdown mechanism. + self.key_manager_task.abort(); + self.storage_manager_task.abort(); + + // Deconstruct the test harness + let vdev_dir = + std::mem::take(&mut self.vdev_dir).expect("Already terminated"); + let vdevs = std::mem::take(&mut self.vdevs); + + // Re-create all the state we created during the constructor, but + // leave the temporary directory as it was "before reboot". + let mut slef = Self::new_with_tmp_dir(log, vdev_dir).await; + slef.next_slot = self.next_slot; + + // Notify ourselves of the new disks, just as the hardware would. + // + // NOTE: Technically, if these disks have pools, they're still imported. + // However, the SledManager doesn't know about them, and wouldn't + // assume they're being managed right now. + for raw_disk in vdevs { + slef.handle + .detected_raw_disk(raw_disk.clone()) + .await // Notify StorageManager + .await // Wait for it to finish processing + .unwrap(); + slef.vdevs.insert(raw_disk.clone()); + } + + slef + } + + #[allow(unused)] + pub(crate) fn mount_config(&self) -> MountConfig { + MountConfig { + root: self + .vdev_dir + .as_ref() + .expect("Harness destroyed?") + .path() + .into(), + ..Default::default() + } + } + + #[allow(unused)] + pub(crate) fn key_requester(&self) -> &StorageKeyRequester { + &self.key_requester + } + + pub const DEFAULT_VDEV_SIZE: u64 = 64 * (1 << 20); + + /// Adds raw devices to the [crate::manager::StorageManager], as if they were detected via + /// hardware. Can be called several times. + /// + /// Each device is [Self::DEFAULT_VDEV_SIZE] in size. + /// Use [Self::add_vdevs_with_size] if you need more control + /// over device sizes. + pub async fn add_vdevs + ?Sized>( + &mut self, + vdevs: &[&P], + ) -> Vec { + self.add_vdevs_with_size( + &vdevs + .iter() + .map(|vdev| (vdev, Self::DEFAULT_VDEV_SIZE)) + .collect::>(), + ) + .await + } + + pub async fn add_vdevs_with_size + ?Sized>( + &mut self, + vdevs: &[(&P, u64)], + ) -> Vec { + let vdev_dir = self + .vdev_dir + .as_ref() + .expect("Cannot add vdevs, test harness terminated"); + let mut added = vec![]; + for (vdev, size) in vdevs + .iter() + .map(|(vdev, size)| (Utf8PathBuf::from(vdev.as_ref()), size)) + { + assert!(vdev.is_relative()); + let vdev_path = vdev_dir.path().join(&vdev); + let raw_disk: RawDisk = + crate::disk::RawSyntheticDisk::new_with_length( + &vdev_path, + *size, + self.next_slot, + ) + .unwrap_or_else(|err| { + panic!( + "Failed to create synthetic disk for {vdev}: {err:?}" + ) + }) + .into(); + self.next_slot += 1; + self.handle + .detected_raw_disk(raw_disk.clone()) + .await // Notify StorageManager + .await // Wait for it to finish processing + .unwrap(); + + self.vdevs.insert(raw_disk.clone()); + added.push(raw_disk); + } + added + } + + // Removes a vdev from the set of "tracked" devices. + // + // This is equivalent to having the hardware monitor unplug a device. + // + // If this device has an associated zpool, it must be either re-attached + // to the harness or manually destroyed before the test completes. + // Otherwise, removing the temporary directory containing that zpool + // will likely fail with a "device busy" error. + pub async fn remove_vdev(&mut self, raw: &RawDisk) { + assert!(self.vdevs.remove(&raw), "Vdev does not exist"); + self.handle + .detected_raw_disk_removal(raw.clone()) + .await + .await + .expect("Failed to remove vdev"); + } + + // Adds a vdev to the set of "tracked" devices. + pub async fn add_vdev_as(&mut self, raw_disk: RawDisk) { + self.handle + .detected_raw_disk(raw_disk.clone()) + .await // Notify StorageManager + .await // Wait for it to finish processing + .unwrap(); + self.vdevs.insert(raw_disk.clone()); + } + + pub fn make_config( + &self, + generation: u32, + disks: &[RawDisk], + ) -> OmicronPhysicalDisksConfig { + let disks = disks + .into_iter() + .map(|raw| { + let identity = raw.identity(); + + crate::disk::OmicronPhysicalDiskConfig { + identity: identity.clone(), + id: Uuid::new_v4(), + pool_id: ZpoolUuid::new_v4(), + } + }) + .collect(); + + OmicronPhysicalDisksConfig { + generation: omicron_common::api::external::Generation::from( + generation, + ), + disks, + } + } + + /// Returns the underlying [crate::manager::StorageHandle]. + pub fn handle_mut(&mut self) -> &mut StorageHandle { + &mut self.handle + } + + /// Returns the underlying [crate::manager::StorageHandle]. + pub fn handle(&self) -> &StorageHandle { + &self.handle + } + + /// Set to "true" to throw errors, "false" to not inject errors. + pub fn key_manager_error_injector(&self) -> &Arc { + &self.key_manager_error_injector + } + + /// Cleanly terminates the test harness + pub async fn cleanup(&mut self) { + let Some(vdev_dir) = self.vdev_dir.take() else { + // Already terminated + return; + }; + + eprintln!("Terminating StorageManagerTestHarness"); + let disks = self.handle().get_latest_disks().await; + let pools = disks.get_all_zpools(); + for (pool, _) in pools { + eprintln!("Destroying pool: {pool:?}"); + if let Err(e) = illumos_utils::zpool::Zpool::destroy(&pool) { + eprintln!("Failed to destroy {pool:?}: {e:?}"); + } + } + + self.key_manager_task.abort(); + self.storage_manager_task.abort(); + + // Make sure that we're actually able to delete everything within the + // temporary directory. + // + // This is necessary because the act of mounting datasets within this + // directory may have created directories owned by root, and the test + // process may not have been started as root. + // + // Since we're about to delete all these files anyway, make them + // accessible to everyone before destroying them. + let mut command = std::process::Command::new("/usr/bin/pfexec"); + let mount = vdev_dir.path(); + let cmd = command.args(["chmod", "-R", "a+rw", mount.as_str()]); + cmd.output().expect( + "Failed to change ownership of the temporary directory we're trying to delete" + ); + + // Actually delete everything, and check the result to fail loud if + // something goes wrong. + vdev_dir.close().expect("Failed to clean up temporary directory"); + } +} diff --git a/sled-storage/src/resources.rs b/sled-storage/src/resources.rs index c1f460dc92..b44c8e5b53 100644 --- a/sled-storage/src/resources.rs +++ b/sled-storage/src/resources.rs @@ -4,17 +4,23 @@ //! Discovered and usable disks and zpools -use crate::dataset::M2_DEBUG_DATASET; -use crate::disk::Disk; +use crate::config::MountConfig; +use crate::dataset::{DatasetError, M2_DEBUG_DATASET}; +use crate::disk::{Disk, DiskError, OmicronPhysicalDiskConfig, RawDisk}; use crate::error::Error; -use crate::pool::Pool; use camino::Utf8PathBuf; use cfg_if::cfg_if; use illumos_utils::zpool::ZpoolName; +use key_manager::StorageKeyRequester; use omicron_common::disk::DiskIdentity; +use omicron_uuid_kinds::ZpoolUuid; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; use sled_hardware::DiskVariant; +use slog::{info, o, warn, Logger}; use std::collections::BTreeMap; use std::sync::Arc; +use tokio::sync::watch; // The directory within the debug dataset in which bundles are created. const BUNDLE_DIRECTORY: &str = "bundle"; @@ -22,129 +28,131 @@ const BUNDLE_DIRECTORY: &str = "bundle"; // The directory for zone bundles. const ZONE_BUNDLE_DIRECTORY: &str = "zone"; -pub enum AddDiskResult { - DiskInserted, - DiskAlreadyInserted, - DiskQueued, +#[derive(Debug, thiserror::Error, JsonSchema, Serialize, Deserialize)] +#[serde(rename_all = "snake_case", tag = "type", content = "value")] +pub enum DiskManagementError { + #[error("Disk requested by control plane, but not found on device")] + NotFound, + + #[error("Expected zpool UUID of {expected}, but saw {observed}")] + ZpoolUuidMismatch { expected: ZpoolUuid, observed: ZpoolUuid }, + + #[error("Failed to access keys necessary to unlock storage. This error may be transient.")] + KeyManager(String), + + #[error("Other error starting disk management: {0}")] + Other(String), } -impl AddDiskResult { - pub fn disk_inserted(&self) -> bool { +impl DiskManagementError { + fn retryable(&self) -> bool { match self { - AddDiskResult::DiskInserted => true, + DiskManagementError::KeyManager(_) => true, _ => false, } } } -/// Storage related resources: disks and zpools -/// -/// This state is internal to the [`crate::manager::StorageManager`] task. Clones -/// of this state can be retrieved by requests to the `StorageManager` task -/// from the [`crate::manager::StorageHandle`]. This state is not `Sync`, and -/// as such does not require any mutexes. However, we do expect to share it -/// relatively frequently, and we want copies of it to be as cheaply made -/// as possible. So any large state is stored inside `Arc`s. On the other -/// hand, we expect infrequent updates to this state, and as such, we use -/// [`std::sync::Arc::make_mut`] to implement clone on write functionality -/// inside the `StorageManager` task if there are any outstanding copies. -/// Therefore, we only pay the cost to update infrequently, and no locks are -/// required by callers when operating on cloned data. The only contention here -/// is for the reference counters of the internal Arcs when `StorageResources` -/// gets cloned or dropped. -#[derive(Debug, Clone, Default, PartialEq, Eq)] -pub struct StorageResources { - // All disks, real and synthetic, being managed by this sled - disks: Arc>, +/// Identifies how a single disk management operation may have succeeded or +/// failed. +#[derive(Debug, JsonSchema, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub struct DiskManagementStatus { + pub identity: DiskIdentity, + pub err: Option, } -impl StorageResources { - /// Return a reference to the current snapshot of disks - pub fn disks(&self) -> &BTreeMap { - &self.disks - } +/// The result from attempting to manage underlying disks. +/// +/// This is more complex than a simple "Error" type because it's possible +/// for some disks to be initialized correctly, while others can fail. +/// +/// This structure provides a mechanism for callers to learn about partial +/// failures, and handle them appropriately on a per-disk basis. +#[derive(Default, Debug, JsonSchema, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +#[must_use = "this `DiskManagementResult` may contain errors, which should be handled"] +pub struct DisksManagementResult { + pub status: Vec, +} - /// Insert a disk and its zpool - /// - /// If the disk passed in is new or modified, or its pool size or pool - /// name changed, then insert the changed values and return `DiskInserted`. - /// Otherwise, do not insert anything and return `DiskAlreadyInserted`. - /// For instance, if only the pool health changes, because it is not one - /// of the checked values, we will not insert the update and will return - /// `DiskAlreadyInserted`. - pub(crate) fn insert_disk( - &mut self, - disk: Disk, - ) -> Result { - let disk_id = disk.identity().clone(); - let zpool_name = disk.zpool_name().clone(); - let zpool = Pool::new(zpool_name, disk_id.clone())?; - if let Some((stored_disk, stored_pool)) = self.disks.get(&disk_id) { - if stored_disk == &disk - && stored_pool.info.size() == zpool.info.size() - && stored_pool.name == zpool.name - { - return Ok(AddDiskResult::DiskAlreadyInserted); +impl DisksManagementResult { + pub fn has_error(&self) -> bool { + for status in &self.status { + if status.err.is_some() { + return true; } } - // Either the disk or zpool changed - Arc::make_mut(&mut self.disks).insert(disk_id, (disk, zpool)); - Ok(AddDiskResult::DiskInserted) - } - - /// Insert a disk while creating a fake pool - /// This is a workaround for current mock based testing strategies - /// in the sled-agent. - #[cfg(feature = "testing")] - pub fn insert_fake_disk(&mut self, disk: Disk) -> AddDiskResult { - let disk_id = disk.identity().clone(); - let zpool_name = disk.zpool_name().clone(); - let zpool = Pool::new_with_fake_info(zpool_name, disk_id.clone()); - if self.disks.contains_key(&disk_id) { - return AddDiskResult::DiskAlreadyInserted; - } - // Either the disk or zpool changed - Arc::make_mut(&mut self.disks).insert(disk_id, (disk, zpool)); - AddDiskResult::DiskInserted + false } - /// Delete a disk and its zpool - /// - /// Return true, if data was changed, false otherwise - /// - /// Note: We never allow removal of synthetic disks in production as they - /// are only added once. - pub(crate) fn remove_disk(&mut self, id: &DiskIdentity) -> bool { - let Some((disk, _)) = self.disks.get(id) else { - return false; - }; - - cfg_if! { - if #[cfg(test)] { - // For testing purposes, we allow synthetic disks to be deleted. - // Silence an unused variable warning. - _ = disk; - } else { - // In production, we disallow removal of synthetic disks as they - // are only added once. - if disk.is_synthetic() { - return false; + pub fn has_retryable_error(&self) -> bool { + for status in &self.status { + if let Some(err) = &status.err { + if err.retryable() { + return true; } } } - - // Safe to unwrap as we just checked the key existed above - Arc::make_mut(&mut self.disks).remove(id).unwrap(); - true + false } +} + +// The Sled Agent is responsible for both observing disks and managing them at +// the request of the broader control plane. This enum encompasses that duality, +// by representing all disks that can exist, managed or not. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum ManagedDisk { + // A disk explicitly managed by the control plane. + // + // This includes U.2s which Nexus has told us to format and use. + ExplicitlyManaged(Disk), + + // A disk implicitly managed by the control plane. + // + // This includes M.2s which the sled agent auto-detects and uses. + ImplicitlyManaged(Disk), + + // A disk which has been observed by the sled, but which is not yet being + // managed by the control plane. + // + // This disk should be treated as "read-only" until we're explicitly told to + // use it. + Unmanaged(RawDisk), +} + +/// The disks, keyed by their identity, managed by the sled agent. +/// +/// This state is owned by [`crate::manager::StorageManager`], through +/// [`crate::resources::StorageResources`]. Clones of this state can be +/// retrieved by requests to the `StorageManager` task from the +/// [`crate::manager::StorageHandle`]. This state is not `Sync`, and as such +/// does not require any mutexes. However, we do expect to share it relatively +/// frequently, and we want copies of it to be as cheaply made as possible. So +/// any large state is stored inside `Arc`s. On the other hand, we expect +/// infrequent updates to this state, and as such, we use +/// [`std::sync::Arc::make_mut`] to implement clone on write functionality +/// inside the `StorageManager` task if there are any outstanding copies. +/// Therefore, we only pay the cost to update infrequently, and no locks are +/// required by callers when operating on cloned data. The only contention here +/// is for the reference counters of the internal Arcs when `AllDisks` +/// gets cloned or dropped. +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct AllDisks { + pub values: Arc>, + pub mount_config: MountConfig, +} +impl AllDisks { /// Returns the identity of the boot disk. /// /// If this returns `None`, we have not processed the boot disk yet. pub fn boot_disk(&self) -> Option<(DiskIdentity, ZpoolName)> { - for (id, (disk, _)) in self.disks.iter() { - if disk.is_boot_disk() { - return Some((id.clone(), disk.zpool_name().clone())); + for (id, disk) in self.values.iter() { + if let ManagedDisk::ImplicitlyManaged(disk) = disk { + if disk.is_boot_disk() { + return Some((id.clone(), disk.zpool_name().clone())); + } } } None @@ -164,7 +172,9 @@ impl StorageResources { pub fn all_m2_mountpoints(&self, dataset: &str) -> Vec { self.all_m2_zpools() .iter() - .map(|zpool| zpool.dataset_mountpoint(dataset)) + .map(|zpool| { + zpool.dataset_mountpoint(&self.mount_config.root, dataset) + }) .collect() } @@ -172,26 +182,41 @@ impl StorageResources { pub fn all_u2_mountpoints(&self, dataset: &str) -> Vec { self.all_u2_zpools() .iter() - .map(|zpool| zpool.dataset_mountpoint(dataset)) + .map(|zpool| { + zpool.dataset_mountpoint(&self.mount_config.root, dataset) + }) .collect() } + /// Returns all zpools managed by the control plane pub fn get_all_zpools(&self) -> Vec<(ZpoolName, DiskVariant)> { - self.disks + self.values .values() - .map(|(disk, _)| (disk.zpool_name().clone(), disk.variant())) + .filter_map(|disk| match disk { + ManagedDisk::ExplicitlyManaged(disk) + | ManagedDisk::ImplicitlyManaged(disk) => { + Some((disk.zpool_name().clone(), disk.variant())) + } + ManagedDisk::Unmanaged(_) => None, + }) .collect() } - // Returns all zpools of a particular variant + // Returns all zpools of a particular variant. + // + // Only returns zpools from disks actively being managed. fn all_zpools(&self, variant: DiskVariant) -> Vec { - self.disks + self.values .values() - .filter_map(|(disk, _)| { - if disk.variant() == variant { - return Some(disk.zpool_name().clone()); + .filter_map(|disk| match disk { + ManagedDisk::ExplicitlyManaged(disk) + | ManagedDisk::ImplicitlyManaged(disk) => { + if disk.variant() == variant { + return Some(disk.zpool_name().clone()); + } + None } - None + ManagedDisk::Unmanaged(_) => None, }) .collect() } @@ -203,4 +228,336 @@ impl StorageResources { .map(|p| p.join(BUNDLE_DIRECTORY).join(ZONE_BUNDLE_DIRECTORY)) .collect() } + + /// Returns an iterator over all managed disks. + pub fn iter_managed(&self) -> impl Iterator { + self.values.iter().filter_map(|(identity, disk)| match disk { + ManagedDisk::ExplicitlyManaged(disk) => Some((identity, disk)), + ManagedDisk::ImplicitlyManaged(disk) => Some((identity, disk)), + _ => None, + }) + } + + /// Returns an iterator over all disks, managed or not. + pub fn iter_all( + &self, + ) -> impl Iterator { + self.values.iter().map(|(identity, disk)| match disk { + ManagedDisk::ExplicitlyManaged(disk) => { + (identity, disk.variant(), disk.slot()) + } + ManagedDisk::ImplicitlyManaged(disk) => { + (identity, disk.variant(), disk.slot()) + } + ManagedDisk::Unmanaged(raw) => { + (identity, raw.variant(), raw.slot()) + } + }) + } +} + +/// The intersection of "physical disks noticed by hardware" and "physical +/// disks requested by the control plane". +#[derive(Debug)] +pub struct StorageResources { + log: Logger, + + key_requester: StorageKeyRequester, + + // All disks, real and synthetic, that exist within this sled + disks: AllDisks, + + // The last set of disks the control plane explicitly told us to manage. + // + // Only includes external storage (U.2s). + control_plane_disks: BTreeMap, + + // Many clients are interested when changes in the set of [AllDisks] + // might occur. This watch channel is updated once these disks get updated. + disk_updates: watch::Sender, +} + +impl StorageResources { + pub fn new( + log: &Logger, + mount_config: MountConfig, + key_requester: StorageKeyRequester, + ) -> Self { + let disks = + AllDisks { values: Arc::new(BTreeMap::new()), mount_config }; + Self { + log: log.new(o!("component" => "StorageResources")), + key_requester, + disks: disks.clone(), + control_plane_disks: BTreeMap::new(), + disk_updates: watch::Sender::new(disks), + } + } + + /// Monitors the set of disks for any updates + pub fn watch_disks(&self) -> watch::Receiver { + self.disk_updates.subscribe() + } + + /// Gets the set of all disks + pub fn disks(&self) -> &AllDisks { + &self.disks + } + + /// Sets the "control plane disk" state, as last requested by Nexus. + /// + /// Does not attempt to manage any of the physical disks previously + /// observed. To synchronize the "set of requested disks" with the "set of + /// observed disks", call [Self::synchronize_disk_management]. + pub fn set_config(&mut self, config: &Vec) { + self.control_plane_disks = config + .iter() + .map(|disk| (disk.identity.clone(), disk.clone())) + .collect(); + } + + pub fn get_config( + &self, + ) -> &BTreeMap { + &self.control_plane_disks + } + + /// Attempts to "manage" all the U.2 disks requested by the control plane. + /// + /// If any requested physical disks have not been observed by the hardware + /// monitor, they are ignored. + /// If the hardware monitor has observed disks that are not requested, they + /// are ignored. + /// + /// Attempts to manage all disks possible, and returns an error on partial + /// failure, indicating "which disks have failed to be synchronized". + pub async fn synchronize_disk_management( + &mut self, + ) -> DisksManagementResult { + let mut updated = false; + let disks = Arc::make_mut(&mut self.disks.values); + info!(self.log, "Synchronizing disk managment"); + + // "Unmanage" all disks no longer requested by the control plane. + // + // This updates the reported sets of "managed" disks, and performs no + // other modifications to the underlying storage. + for (identity, managed_disk) in &mut *disks { + match managed_disk { + // This leaves the presence of the disk still in "Self", but + // downgrades the disk to an unmanaged status. + ManagedDisk::ExplicitlyManaged(disk) => { + if !self.control_plane_disks.contains_key(identity) { + *managed_disk = + ManagedDisk::Unmanaged(RawDisk::from(disk.clone())); + updated = true; + } + } + _ => (), + } + } + + // "Manage" all disks that the control plane wants. + // + // If the disk can be successfully managed, and it's new, it will be + // formatted with a zpool identified by the Nexus-specified + // configuration. + let mut result = DisksManagementResult::default(); + for (identity, config) in &self.control_plane_disks { + let Some(managed_disk) = disks.get_mut(identity) else { + warn!( + self.log, + "Control plane disk requested, but not detected within sled"; + "disk_identity" => ?identity + ); + result.status.push(DiskManagementStatus { + identity: identity.clone(), + err: Some(DiskManagementError::NotFound), + }); + continue; + }; + info!(self.log, "Managing disk"; "disk_identity" => ?identity); + match managed_disk { + // Disk is currently unmanaged. Try to adopt the disk, which may + // involve formatting it, and emplacing the zpool. + ManagedDisk::Unmanaged(raw_disk) => { + match Self::begin_disk_management( + &self.log, + &self.disks.mount_config, + raw_disk, + config, + Some(&self.key_requester), + ) + .await + { + Ok(disk) => { + info!(self.log, "Disk management started successfully"; "disk_identity" => ?identity); + *managed_disk = disk; + updated = true; + } + Err(err) => { + warn!(self.log, "Cannot parse disk"; "err" => ?err); + result.status.push(DiskManagementStatus { + identity: identity.clone(), + err: Some(err), + }); + continue; + } + } + } + // Disk is already managed. Check that the configuration + // matches what we expect. + ManagedDisk::ExplicitlyManaged(disk) => { + let expected = config.pool_id; + let observed = disk.zpool_name().id(); + if expected != observed { + warn!( + self.log, + "Observed an unexpected zpool uuid"; + "expected" => ?expected, "observed" => ?observed + ); + result.status.push(DiskManagementStatus { + identity: identity.clone(), + err: Some(DiskManagementError::ZpoolUuidMismatch { + expected, + observed, + }), + }); + continue; + } + info!(self.log, "Disk already managed successfully"; "disk_identity" => ?identity); + } + // Skip disks that are managed implicitly + ManagedDisk::ImplicitlyManaged(_) => continue, + } + + result.status.push(DiskManagementStatus { + identity: identity.clone(), + err: None, + }); + } + + if updated { + self.disk_updates.send_replace(self.disks.clone()); + } + + return result; + } + + // Helper function to help transition an "unmanaged" disk to a "managed" + // disk. + async fn begin_disk_management( + log: &Logger, + mount_config: &MountConfig, + raw_disk: &RawDisk, + config: &OmicronPhysicalDiskConfig, + key_requester: Option<&StorageKeyRequester>, + ) -> Result { + info!(log, "Invoking Disk::new on an unmanaged disk"); + let disk = Disk::new( + &log, + mount_config, + raw_disk.clone(), + Some(config.pool_id), + key_requester, + ) + .await + .map_err(|err| { + warn!(log, "Disk::new failed"; "err" => ?err); + match err { + // We pick this error out and identify it separately because + // it may be transient, and should sometimes be handled with + // a retry. + DiskError::Dataset(DatasetError::KeyManager(_)) => { + DiskManagementError::KeyManager(err.to_string()) + } + err => DiskManagementError::Other(err.to_string()), + } + })?; + info!(log, "Disk::new completed successfully"; "disk_identity" => ?raw_disk.identity()); + Ok(ManagedDisk::ExplicitlyManaged(disk)) + } + + /// Tracks a new disk. + /// + /// For U.2s: Does not automatically attempt to manage disks -- for this, + /// the caller will need to also invoke + /// [`Self::synchronize_disk_management`]. + /// + /// For M.2s: As no additional control plane guidance is necessary to adopt + /// M.2s, these are automatically managed. + pub(crate) async fn insert_disk( + &mut self, + disk: RawDisk, + ) -> Result<(), Error> { + let disk_identity = disk.identity().clone(); + info!(self.log, "Inserting disk"; "identity" => ?disk_identity); + if self.disks.values.contains_key(&disk_identity) { + info!(self.log, "Disk already exists"; "identity" => ?disk_identity); + return Ok(()); + } + + let disks = Arc::make_mut(&mut self.disks.values); + match disk.variant() { + DiskVariant::U2 => { + disks.insert(disk_identity, ManagedDisk::Unmanaged(disk)); + } + DiskVariant::M2 => { + let managed_disk = Disk::new( + &self.log, + &self.disks.mount_config, + disk, + None, + Some(&self.key_requester), + ) + .await?; + disks.insert( + disk_identity, + ManagedDisk::ImplicitlyManaged(managed_disk), + ); + } + } + self.disk_updates.send_replace(self.disks.clone()); + + Ok(()) + } + + /// Delete a disk and its zpool + /// + /// Return true, if data was changed, false otherwise + /// + /// Note: We never allow removal of synthetic disks in production as they + /// are only added once. + pub(crate) fn remove_disk(&mut self, id: &DiskIdentity) { + info!(self.log, "Removing disk"; "identity" => ?id); + let Some(entry) = self.disks.values.get(id) else { + info!(self.log, "Disk not found by id, exiting"; "identity" => ?id); + return; + }; + + let synthetic = match entry { + ManagedDisk::ExplicitlyManaged(disk) + | ManagedDisk::ImplicitlyManaged(disk) => disk.is_synthetic(), + ManagedDisk::Unmanaged(raw) => raw.is_synthetic(), + }; + + cfg_if! { + if #[cfg(test)] { + // For testing purposes, we allow synthetic disks to be deleted. + // Silence an unused variable warning. + _ = synthetic; + } else { + // In production, we disallow removal of synthetic disks as they + // are only added once. + if synthetic { + info!(self.log, "Not removing synthetic disk"; "identity" => ?id); + return; + } + } + } + + // Safe to unwrap as we just checked the key existed above + Arc::make_mut(&mut self.disks.values).remove(id).unwrap(); + self.disk_updates.send_replace(self.disks.clone()); + } } diff --git a/smf/ntp/etc/logadm.d/chrony.logadm.conf b/smf/chrony-setup/etc/logadm.d/chrony.logadm.conf similarity index 100% rename from smf/ntp/etc/logadm.d/chrony.logadm.conf rename to smf/chrony-setup/etc/logadm.d/chrony.logadm.conf diff --git a/smf/chrony-setup/manifest.xml b/smf/chrony-setup/manifest.xml new file mode 100644 index 0000000000..f31f13a2ea --- /dev/null +++ b/smf/chrony-setup/manifest.xml @@ -0,0 +1,46 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/smf/nexus/multi-sled/config-partial.toml b/smf/nexus/multi-sled/config-partial.toml index 553cdb0aef..3827cbb38c 100644 --- a/smf/nexus/multi-sled/config-partial.toml +++ b/smf/nexus/multi-sled/config-partial.toml @@ -33,6 +33,7 @@ dns_external.period_secs_config = 60 dns_external.period_secs_servers = 60 dns_external.period_secs_propagation = 60 dns_external.max_concurrent_server_updates = 5 +metrics_producer_gc.period_secs = 60 # How frequently we check the list of stored TLS certificates. This is # approximately an upper bound on how soon after updating the list of # certificates it will take _other_ Nexus instances to notice and stop serving @@ -48,11 +49,16 @@ inventory.nkeep = 3 # Disable inventory collection altogether (for emergencies) inventory.disable = false phantom_disks.period_secs = 30 +physical_disk_adoption.period_secs = 30 blueprints.period_secs_load = 10 blueprints.period_secs_execute = 60 sync_service_zone_nat.period_secs = 30 switch_port_settings_manager.period_secs = 30 region_replacement.period_secs = 30 +service_firewall_propagation.period_secs = 300 +v2p_mapping_propagation.period_secs = 30 +instance_watcher.period_secs = 30 +abandoned_vmm_reaper.period_secs = 60 [default_region_allocation_strategy] # by default, allocate across 3 distinct sleds diff --git a/smf/nexus/single-sled/config-partial.toml b/smf/nexus/single-sled/config-partial.toml index 9f7cb959d3..ee04f88e59 100644 --- a/smf/nexus/single-sled/config-partial.toml +++ b/smf/nexus/single-sled/config-partial.toml @@ -33,6 +33,7 @@ dns_external.period_secs_config = 60 dns_external.period_secs_servers = 60 dns_external.period_secs_propagation = 60 dns_external.max_concurrent_server_updates = 5 +metrics_producer_gc.period_secs = 60 # How frequently we check the list of stored TLS certificates. This is # approximately an upper bound on how soon after updating the list of # certificates it will take _other_ Nexus instances to notice and stop serving @@ -48,11 +49,16 @@ inventory.nkeep = 3 # Disable inventory collection altogether (for emergencies) inventory.disable = false phantom_disks.period_secs = 30 +physical_disk_adoption.period_secs = 30 blueprints.period_secs_load = 10 blueprints.period_secs_execute = 60 sync_service_zone_nat.period_secs = 30 switch_port_settings_manager.period_secs = 30 region_replacement.period_secs = 30 +service_firewall_propagation.period_secs = 300 +v2p_mapping_propagation.period_secs = 30 +instance_watcher.period_secs = 30 +abandoned_vmm_reaper.period_secs = 60 [default_region_allocation_strategy] # by default, allocate without requirement for distinct sleds. diff --git a/smf/ntp/etc/inet/chrony.conf.boundary b/smf/ntp/etc/inet/chrony.conf.boundary deleted file mode 100644 index d13bc9c815..0000000000 --- a/smf/ntp/etc/inet/chrony.conf.boundary +++ /dev/null @@ -1,32 +0,0 @@ -# -# Configuration file for a boundary NTP server - one which communicates with -# NTP servers outside the rack. -# - -pool @SERVER@ iburst maxdelay 0.1 maxsources 16 - -driftfile /var/lib/chrony/drift -ntsdumpdir /var/lib/chrony -dumpdir /var/lib/chrony -pidfile /var/run/chrony/chronyd.pid -logdir /var/log/chrony - -log measurements statistics tracking - -allow fe80::/10 -allow @ALLOW@ - -# Enable local reference mode, which keeps us operating as an NTP server that -# appears synchronised even if there are currently no active upstreams. When -# in this mode, we report as stratum 10 to clients. -local stratum 10 - -# makestep -# We allow chrony to step the system clock during the first three time updates -# if we are more than 0.1 seconds out. -makestep 0.1 3 - -# When a leap second occurs we slew the clock over approximately 37 seconds. -leapsecmode slew -maxslewrate 2708.333 - diff --git a/smf/ntp/etc/inet/chrony.conf.internal b/smf/ntp/etc/inet/chrony.conf.internal deleted file mode 100644 index 9e9ff3ddea..0000000000 --- a/smf/ntp/etc/inet/chrony.conf.internal +++ /dev/null @@ -1,31 +0,0 @@ -# -# Configuration file for an internal NTP server - one which communicates with -# boundary NTP servers within the rack. -# - -server @SERVER@ iburst minpoll 0 maxpoll 4 - -driftfile /var/lib/chrony/drift -ntsdumpdir /var/lib/chrony -dumpdir /var/lib/chrony -pidfile /var/run/chrony/chronyd.pid -logdir /var/log/chrony - -log measurements statistics tracking - -# makestep -# We allow chrony to step the system clock if we are more than a day out, -# regardless of how many clock updates have occurred since boot. -# The boundary NTP servers are configured with local reference mode, which -# means that if they start up without external connectivity, they will appear -# as authoritative servers even if they are advertising January 1987 -# (which is the default system clock on a gimlet after boot). -# This configuration allows a one-off adjustment once RSS begins and the -# boundary servers are synchronised, after which the clock will advance -# monotonically forwards. -makestep 86400 -1 - -# When a leap second occurs we slew the clock over approximately 37 seconds. -leapsecmode slew -maxslewrate 2708.333 - diff --git a/smf/ntp/manifest/manifest.xml b/smf/ntp/manifest/manifest.xml index 7783bbe76c..df427a16a5 100644 --- a/smf/ntp/manifest/manifest.xml +++ b/smf/ntp/manifest/manifest.xml @@ -39,6 +39,11 @@ + + + + @@ -57,7 +62,9 @@ The service also always starts the binary with ASLR enabled, regardless of whether it was linked with -zaslr --> - - - - - - - - - - -