diff --git a/.config/hakari.toml b/.config/hakari.toml new file mode 100644 index 0000000000..9562f92300 --- /dev/null +++ b/.config/hakari.toml @@ -0,0 +1,29 @@ +# This file contains settings for `cargo hakari`. +# See https://docs.rs/cargo-hakari/latest/cargo_hakari/config for a full list of options. + +hakari-package = "omicron-workspace-hack" + +# Format for `workspace-hack = ...` lines in other Cargo.tomls. Requires cargo-hakari 0.9.8 or above. +dep-format-version = "4" + +# Setting workspace.resolver = "2" in the root Cargo.toml is HIGHLY recommended. +# Hakari works much better with the new feature resolver. +# For more about the new feature resolver, see: +# https://blog.rust-lang.org/2021/03/25/Rust-1.51.0.html#cargos-new-feature-resolver +resolver = "2" + +# Add triples corresponding to platforms commonly used by developers here. +# https://doc.rust-lang.org/rustc/platform-support.html +platforms = [ + "x86_64-unknown-linux-gnu", + "x86_64-apple-darwin", + "aarch64-apple-darwin", + "x86_64-unknown-illumos", + # "x86_64-pc-windows-msvc", +] + +[traversal-excludes] +workspace-members = ["xtask"] + +# Write out exact versions rather than a semver range. (Defaults to false.) +# exact-versions = true diff --git a/.config/nextest.toml b/.config/nextest.toml index 32890f66a8..b2a8b360bb 100644 --- a/.config/nextest.toml +++ b/.config/nextest.toml @@ -3,7 +3,16 @@ # # The required version should be bumped up if we need new features, performance # improvements or bugfixes that are present in newer versions of nextest. -nextest-version = { required = "0.9.55", recommended = "0.9.57" } +nextest-version = { required = "0.9.59", recommended = "0.9.59" } + +experimental = ["setup-scripts"] + +[[profile.default.scripts]] +filter = 'rdeps(nexus-test-utils)' +setup = 'crdb-seed' [profile.ci] fail-fast = false + +[script.crdb-seed] +command = 'cargo run -p crdb-seed' diff --git a/.github/buildomat/build-and-test.sh b/.github/buildomat/build-and-test.sh index 4245c7f458..8791ea2fa6 100755 --- a/.github/buildomat/build-and-test.sh +++ b/.github/buildomat/build-and-test.sh @@ -7,7 +7,7 @@ set -o xtrace # NOTE: This version should be in sync with the recommended version in # .config/nextest.toml. (Maybe build an automated way to pull the recommended # version in the future.) -NEXTEST_VERSION='0.9.57' +NEXTEST_VERSION='0.9.59' cargo --version rustc --version @@ -66,6 +66,11 @@ ptime -m timeout 2h cargo nextest run --profile ci --locked --verbose banner doctest ptime -m timeout 1h cargo test --doc --locked --verbose --no-fail-fast +# We expect the seed CRDB to be placed here, so we explicitly remove it so the +# rmdir check below doesn't get triggered. Nextest doesn't have support for +# teardown scripts so this is the best we've got. +rm -rf "$TEST_TMPDIR/crdb-base" + # # Make sure that we have left nothing around in $TEST_TMPDIR. The easiest way # to check is to try to remove it with `rmdir`. diff --git a/.github/buildomat/jobs/build-and-test-helios.sh b/.github/buildomat/jobs/build-and-test-helios.sh index c1a9bee581..f9722a2b92 100755 --- a/.github/buildomat/jobs/build-and-test-helios.sh +++ b/.github/buildomat/jobs/build-and-test-helios.sh @@ -3,7 +3,7 @@ #: name = "build-and-test (helios)" #: variety = "basic" #: target = "helios-2.0" -#: rust_toolchain = "1.72.0" +#: rust_toolchain = "1.72.1" #: output_rules = [ #: "/var/tmp/omicron_tmp/*", #: "!/var/tmp/omicron_tmp/crdb-base*", diff --git a/.github/buildomat/jobs/build-and-test-linux.sh b/.github/buildomat/jobs/build-and-test-linux.sh index 50bdb22e56..f33d1a8cfa 100755 --- a/.github/buildomat/jobs/build-and-test-linux.sh +++ b/.github/buildomat/jobs/build-and-test-linux.sh @@ -3,7 +3,7 @@ #: name = "build-and-test (ubuntu-20.04)" #: variety = "basic" #: target = "ubuntu-20.04" -#: rust_toolchain = "1.72.0" +#: rust_toolchain = "1.72.1" #: output_rules = [ #: "/var/tmp/omicron_tmp/*", #: "!/var/tmp/omicron_tmp/crdb-base*", diff --git a/.github/buildomat/jobs/ci-tools.sh b/.github/buildomat/jobs/ci-tools.sh index b5d1d26f1a..702561a951 100755 --- a/.github/buildomat/jobs/ci-tools.sh +++ b/.github/buildomat/jobs/ci-tools.sh @@ -3,7 +3,7 @@ #: name = "helios / CI tools" #: variety = "basic" #: target = "helios-2.0" -#: rust_toolchain = "1.72.0" +#: rust_toolchain = "1.72.1" #: output_rules = [ #: "=/work/end-to-end-tests/*.gz", #: "=/work/caboose-util.gz", diff --git a/.github/buildomat/jobs/clippy.sh b/.github/buildomat/jobs/clippy.sh index 184b4ccbe6..dba1021919 100755 --- a/.github/buildomat/jobs/clippy.sh +++ b/.github/buildomat/jobs/clippy.sh @@ -3,7 +3,7 @@ #: name = "clippy (helios)" #: variety = "basic" #: target = "helios-2.0" -#: rust_toolchain = "1.72.0" +#: rust_toolchain = "1.72.1" #: output_rules = [] # Run clippy on illumos (not just other systems) because a bunch of our code diff --git a/.github/buildomat/jobs/host-image.sh b/.github/buildomat/jobs/host-image.sh index 0e2191ebc8..ba0b4e1ac3 100755 --- a/.github/buildomat/jobs/host-image.sh +++ b/.github/buildomat/jobs/host-image.sh @@ -3,7 +3,7 @@ #: name = "helios / build OS image" #: variety = "basic" #: target = "helios-2.0" -#: rust_toolchain = "1.72.0" +#: rust_toolchain = "1.72.1" #: output_rules = [ #: "=/work/helios/image/output/os.tar.gz", #: ] diff --git a/.github/buildomat/jobs/package.sh b/.github/buildomat/jobs/package.sh index eedb3b1723..fe5d6a9b7f 100755 --- a/.github/buildomat/jobs/package.sh +++ b/.github/buildomat/jobs/package.sh @@ -3,7 +3,7 @@ #: name = "helios / package" #: variety = "basic" #: target = "helios-2.0" -#: rust_toolchain = "1.72.0" +#: rust_toolchain = "1.72.1" #: output_rules = [ #: "=/work/version.txt", #: "=/work/package.tar.gz", diff --git a/.github/buildomat/jobs/trampoline-image.sh b/.github/buildomat/jobs/trampoline-image.sh index 794ebd96c1..6014d7dca0 100755 --- a/.github/buildomat/jobs/trampoline-image.sh +++ b/.github/buildomat/jobs/trampoline-image.sh @@ -3,7 +3,7 @@ #: name = "helios / build trampoline OS image" #: variety = "basic" #: target = "helios-2.0" -#: rust_toolchain = "1.72.0" +#: rust_toolchain = "1.72.1" #: output_rules = [ #: "=/work/helios/image/output/os.tar.gz", #: ] diff --git a/.github/workflows/hakari.yml b/.github/workflows/hakari.yml new file mode 100644 index 0000000000..d79196d318 --- /dev/null +++ b/.github/workflows/hakari.yml @@ -0,0 +1,37 @@ +# This workflow file serves as an example for cargo-hakari CI integration. + +on: + push: + branches: + - main + pull_request: + branches: + - main + +name: cargo hakari + +jobs: + workspace-hack-check: + name: Check workspace-hack + runs-on: ubuntu-latest + env: + RUSTFLAGS: -D warnings + steps: + - uses: actions/checkout@v4 + - uses: actions-rs/toolchain@v1 + with: + toolchain: stable + - name: Install cargo-hakari + uses: taiki-e/install-action@v2 + with: + tool: cargo-hakari + - name: Check workspace-hack Cargo.toml is up-to-date + uses: actions-rs/cargo@v1 + with: + command: hakari + args: generate --diff + - name: Check all crates depend on workspace-hack + uses: actions-rs/cargo@v1 + with: + command: hakari + args: manage-deps --dry-run diff --git a/Cargo.lock b/Cargo.lock index bcd07154e8..b7296ea184 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -184,6 +184,7 @@ dependencies = [ name = "api_identity" version = "0.1.0" dependencies = [ + "omicron-workspace-hack", "proc-macro2", "quote", "syn 2.0.32", @@ -286,7 +287,7 @@ checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" [[package]] name = "async-bb8-diesel" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/async-bb8-diesel?rev=be3d9bce50051d8c0e0c06078e8066cc27db3001#be3d9bce50051d8c0e0c06078e8066cc27db3001" +source = "git+https://github.com/oxidecomputer/async-bb8-diesel?rev=da04c087f835a51e0441addb19c5ef4986e1fcf2#da04c087f835a51e0441addb19c5ef4986e1fcf2" dependencies = [ "async-trait", "bb8", @@ -370,6 +371,7 @@ name = "authz-macros" version = "0.1.0" dependencies = [ "heck 0.4.1", + "omicron-workspace-hack", "proc-macro2", "quote", "serde", @@ -658,6 +660,7 @@ dependencies = [ "omicron-common 0.1.0", "omicron-rpaths", "omicron-test-utils", + "omicron-workspace-hack", "pq-sys", "proptest", "rand 0.8.5", @@ -684,6 +687,7 @@ dependencies = [ "chrono", "ipnetwork", "omicron-common 0.1.0", + "omicron-workspace-hack", "progenitor", "regress", "reqwest", @@ -789,6 +793,7 @@ version = "0.1.0" dependencies = [ "anyhow", "hubtools", + "omicron-workspace-hack", ] [[package]] @@ -1236,6 +1241,21 @@ dependencies = [ "cfg-if 1.0.0", ] +[[package]] +name = "crdb-seed" +version = "0.1.0" +dependencies = [ + "camino", + "camino-tempfile", + "dropshot", + "hex", + "omicron-test-utils", + "omicron-workspace-hack", + "ring", + "slog", + "tokio", +] + [[package]] name = "criterion" version = "0.5.1" @@ -1712,6 +1732,7 @@ name = "db-macros" version = "0.1.0" dependencies = [ "heck 0.4.1", + "omicron-workspace-hack", "proc-macro2", "quote", "rustfmt-wrapper", @@ -1727,6 +1748,7 @@ dependencies = [ "anyhow", "either", "omicron-common 0.1.0", + "omicron-workspace-hack", "omicron-zone-package", "progenitor", "progenitor-client", @@ -1995,6 +2017,7 @@ dependencies = [ "expectorate", "http", "omicron-test-utils", + "omicron-workspace-hack", "openapi-lint", "openapiv3", "pretty-hex 0.3.0", @@ -2025,6 +2048,7 @@ version = "0.1.0" dependencies = [ "chrono", "http", + "omicron-workspace-hack", "progenitor", "reqwest", "schemars", @@ -2081,6 +2105,7 @@ dependencies = [ "futures", "http", "ipnetwork", + "omicron-workspace-hack", "omicron-zone-package", "progenitor", "progenitor-client", @@ -2266,6 +2291,7 @@ dependencies = [ "http", "omicron-sled-agent", "omicron-test-utils", + "omicron-workspace-hack", "oxide-client", "rand 0.8.5", "reqwest", @@ -2706,6 +2732,7 @@ dependencies = [ "hex", "libc", "omicron-common 0.1.0", + "omicron-workspace-hack", "reqwest", "serde", "serde_json", @@ -2724,6 +2751,7 @@ version = "0.1.0" dependencies = [ "base64 0.21.4", "chrono", + "omicron-workspace-hack", "progenitor", "rand 0.8.5", "reqwest", @@ -2788,6 +2816,7 @@ dependencies = [ "gateway-messages", "omicron-gateway", "omicron-test-utils", + "omicron-workspace-hack", "slog", "sp-sim", "tokio", @@ -3365,6 +3394,7 @@ dependencies = [ "macaddr", "mockall", "omicron-common 0.1.0", + "omicron-workspace-hack", "opte-ioctl", "oxide-vpc", "regress", @@ -3474,6 +3504,7 @@ dependencies = [ "libc", "omicron-common 0.1.0", "omicron-test-utils", + "omicron-workspace-hack", "once_cell", "partial-io", "progenitor-client", @@ -3503,6 +3534,7 @@ name = "installinator-artifact-client" version = "0.1.0" dependencies = [ "installinator-common", + "omicron-workspace-hack", "progenitor", "regress", "reqwest", @@ -3528,6 +3560,7 @@ dependencies = [ "installinator-common", "omicron-common 0.1.0", "omicron-test-utils", + "omicron-workspace-hack", "openapi-lint", "openapiv3", "schemars", @@ -3547,6 +3580,7 @@ dependencies = [ "camino", "illumos-utils", "omicron-common 0.1.0", + "omicron-workspace-hack", "schemars", "serde", "serde_json", @@ -3579,6 +3613,7 @@ dependencies = [ "hyper", "omicron-common 0.1.0", "omicron-test-utils", + "omicron-workspace-hack", "progenitor", "reqwest", "serde", @@ -3621,6 +3656,7 @@ dependencies = [ "dropshot", "internal-dns 0.1.0", "omicron-common 0.1.0", + "omicron-workspace-hack", "slog", "tokio", "trust-dns-resolver", @@ -3644,6 +3680,7 @@ dependencies = [ "ciborium", "libc", "omicron-common 0.1.0", + "omicron-workspace-hack", "proptest", "serde", "test-strategy", @@ -3748,6 +3785,7 @@ dependencies = [ "async-trait", "hkdf", "omicron-common 0.1.0", + "omicron-workspace-hack", "secrecy", "sha3", "slog", @@ -4238,6 +4276,7 @@ dependencies = [ "ipnetwork", "omicron-common 0.1.0", "omicron-passwords 0.1.0", + "omicron-workspace-hack", "progenitor", "regress", "reqwest", @@ -4287,6 +4326,7 @@ dependencies = [ "omicron-common 0.1.0", "omicron-passwords 0.1.0", "omicron-rpaths", + "omicron-workspace-hack", "parse-display", "pq-sys", "rand 0.8.5", @@ -4342,6 +4382,7 @@ dependencies = [ "omicron-rpaths", "omicron-sled-agent", "omicron-test-utils", + "omicron-workspace-hack", "once_cell", "openapiv3", "openssl", @@ -4387,6 +4428,7 @@ dependencies = [ "ipnetwork", "lazy_static", "omicron-common 0.1.0", + "omicron-workspace-hack", "rand 0.8.5", "serde_json", ] @@ -4400,6 +4442,7 @@ dependencies = [ "internal-dns 0.1.0", "nexus-types", "omicron-common 0.1.0", + "omicron-workspace-hack", "slog", "uuid", ] @@ -4428,6 +4471,7 @@ dependencies = [ "omicron-passwords 0.1.0", "omicron-sled-agent", "omicron-test-utils", + "omicron-workspace-hack", "oximeter 0.1.0", "oximeter-client", "oximeter-collector", @@ -4438,7 +4482,6 @@ dependencies = [ "serde_urlencoded", "slog", "tempfile", - "tokio", "trust-dns-proto", "trust-dns-resolver", "uuid", @@ -4448,6 +4491,7 @@ dependencies = [ name = "nexus-test-utils-macros" version = "0.1.0" dependencies = [ + "omicron-workspace-hack", "proc-macro2", "quote", "syn 2.0.32", @@ -4466,6 +4510,7 @@ dependencies = [ "newtype_derive", "omicron-common 0.1.0", "omicron-passwords 0.1.0", + "omicron-workspace-hack", "openssl", "openssl-probe", "openssl-sys", @@ -4772,6 +4817,7 @@ dependencies = [ "foreign-types 0.3.2", "omicron-common 0.1.0", "omicron-test-utils", + "omicron-workspace-hack", "openssl", "openssl-sys", "rcgen", @@ -4799,6 +4845,7 @@ dependencies = [ "lazy_static", "libc", "macaddr", + "omicron-workspace-hack", "parse-display", "progenitor", "proptest", @@ -4872,6 +4919,7 @@ dependencies = [ "clap 4.4.3", "crossbeam", "omicron-package", + "omicron-workspace-hack", "serde", "serde_derive", "thiserror", @@ -4897,6 +4945,7 @@ dependencies = [ "omicron-rpaths", "omicron-sled-agent", "omicron-test-utils", + "omicron-workspace-hack", "openssl", "oxide-client", "pq-sys", @@ -4930,6 +4979,7 @@ dependencies = [ "ipcc-key-value", "omicron-common 0.1.0", "omicron-test-utils", + "omicron-workspace-hack", "once_cell", "openapi-lint", "openapiv3", @@ -5007,6 +5057,7 @@ dependencies = [ "omicron-rpaths", "omicron-sled-agent", "omicron-test-utils", + "omicron-workspace-hack", "once_cell", "openapi-lint", "openapiv3", @@ -5086,6 +5137,7 @@ dependencies = [ "omicron-nexus", "omicron-rpaths", "omicron-test-utils", + "omicron-workspace-hack", "pq-sys", "regex", "serde", @@ -5112,6 +5164,7 @@ dependencies = [ "illumos-utils", "indicatif", "omicron-common 0.1.0", + "omicron-workspace-hack", "omicron-zone-package", "petgraph", "rayon", @@ -5141,6 +5194,7 @@ version = "0.1.0" dependencies = [ "argon2", "criterion", + "omicron-workspace-hack", "rand 0.8.5", "rust-argon2", "schemars", @@ -5165,6 +5219,9 @@ dependencies = [ [[package]] name = "omicron-rpaths" version = "0.1.0" +dependencies = [ + "omicron-workspace-hack", +] [[package]] name = "omicron-sled-agent" @@ -5210,6 +5267,7 @@ dependencies = [ "nexus-client 0.1.0", "omicron-common 0.1.0", "omicron-test-utils", + "omicron-workspace-hack", "once_cell", "openapi-lint", "openapiv3", @@ -5257,6 +5315,7 @@ name = "omicron-test-utils" version = "0.1.0" dependencies = [ "anyhow", + "camino", "dropshot", "expectorate", "futures", @@ -5264,6 +5323,7 @@ dependencies = [ "http", "libc", "omicron-common 0.1.0", + "omicron-workspace-hack", "pem", "rcgen", "regex", @@ -5278,6 +5338,110 @@ dependencies = [ "usdt", ] +[[package]] +name = "omicron-workspace-hack" +version = "0.1.0" +dependencies = [ + "anyhow", + "bit-set", + "bit-vec", + "bitflags 1.3.2", + "bitflags 2.4.0", + "bitvec", + "bstr 0.2.17", + "bstr 1.6.0", + "bytes", + "cc", + "chrono", + "cipher", + "clap 4.4.3", + "clap_builder", + "console", + "const-oid", + "crossbeam-epoch", + "crossbeam-utils", + "crypto-common", + "diesel", + "digest", + "either", + "flate2", + "futures", + "futures-channel", + "futures-core", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", + "gateway-messages", + "generic-array", + "getrandom 0.2.10", + "hashbrown 0.13.2", + "hashbrown 0.14.0", + "hex", + "hyper", + "hyper-rustls", + "indexmap 2.0.0", + "inout", + "ipnetwork", + "itertools 0.10.5", + "lalrpop-util", + "lazy_static", + "libc", + "log", + "managed", + "memchr", + "mio", + "num-bigint", + "num-integer", + "num-iter", + "num-traits", + "once_cell", + "openapiv3", + "petgraph", + "postgres-types", + "ppv-lite86", + "predicates 3.0.3", + "rand 0.8.5", + "rand_chacha 0.3.1", + "regex", + "regex-automata 0.3.8", + "regex-syntax 0.7.5", + "reqwest", + "ring", + "rustix 0.38.9", + "schemars", + "semver 1.0.18", + "serde", + "sha2", + "signature 2.1.0", + "similar", + "slog", + "spin 0.9.8", + "string_cache", + "subtle", + "syn 1.0.109", + "syn 2.0.32", + "textwrap 0.16.0", + "time", + "time-macros", + "tokio", + "tokio-postgres", + "tokio-stream", + "toml 0.7.8", + "toml_datetime", + "toml_edit 0.19.15", + "tracing", + "trust-dns-proto", + "unicode-bidi", + "unicode-normalization", + "unicode-xid", + "usdt", + "uuid", + "yasna", + "zeroize", + "zip", +] + [[package]] name = "omicron-zone-package" version = "0.8.3" @@ -5484,6 +5648,7 @@ dependencies = [ "futures", "http", "hyper", + "omicron-workspace-hack", "progenitor", "rand 0.8.5", "regress", @@ -5518,6 +5683,7 @@ dependencies = [ "chrono", "num", "omicron-common 0.1.0", + "omicron-workspace-hack", "oximeter-macro-impl 0.1.0", "rstest", "schemars", @@ -5549,6 +5715,7 @@ version = "0.1.0" dependencies = [ "chrono", "omicron-common 0.1.0", + "omicron-workspace-hack", "progenitor", "reqwest", "serde", @@ -5568,6 +5735,7 @@ dependencies = [ "nexus-client 0.1.0", "omicron-common 0.1.0", "omicron-test-utils", + "omicron-workspace-hack", "openapi-lint", "openapiv3", "oximeter 0.1.0", @@ -5596,6 +5764,7 @@ dependencies = [ "dropshot", "itertools 0.11.0", "omicron-test-utils", + "omicron-workspace-hack", "oximeter 0.1.0", "regex", "reqwest", @@ -5620,6 +5789,7 @@ dependencies = [ "dropshot", "futures", "http", + "omicron-workspace-hack", "oximeter 0.1.0", "tokio", "uuid", @@ -5629,6 +5799,7 @@ dependencies = [ name = "oximeter-macro-impl" version = "0.1.0" dependencies = [ + "omicron-workspace-hack", "proc-macro2", "quote", "syn 2.0.32", @@ -5652,6 +5823,7 @@ dependencies = [ "dropshot", "nexus-client 0.1.0", "omicron-common 0.1.0", + "omicron-workspace-hack", "oximeter 0.1.0", "reqwest", "schemars", @@ -7906,6 +8078,7 @@ dependencies = [ "chrono", "ipnetwork", "omicron-common 0.1.0", + "omicron-workspace-hack", "progenitor", "regress", "reqwest", @@ -7931,6 +8104,7 @@ dependencies = [ "nexus-client 0.1.0", "omicron-common 0.1.0", "omicron-test-utils", + "omicron-workspace-hack", "rand 0.8.5", "schemars", "serde", @@ -8157,6 +8331,7 @@ dependencies = [ "hex", "omicron-common 0.1.0", "omicron-gateway", + "omicron-workspace-hack", "serde", "slog", "slog-dtrace", @@ -9154,6 +9329,7 @@ dependencies = [ "humantime", "omicron-common 0.1.0", "omicron-test-utils", + "omicron-workspace-hack", "predicates 3.0.3", "slog", "slog-async", @@ -9182,6 +9358,7 @@ dependencies = [ "itertools 0.11.0", "omicron-common 0.1.0", "omicron-test-utils", + "omicron-workspace-hack", "rand 0.8.5", "ring", "serde", @@ -9408,6 +9585,7 @@ dependencies = [ "indicatif", "linear-map", "omicron-test-utils", + "omicron-workspace-hack", "owo-colors", "petgraph", "schemars", @@ -9785,6 +9963,7 @@ dependencies = [ "itertools 0.11.0", "omicron-common 0.1.0", "omicron-passwords 0.1.0", + "omicron-workspace-hack", "once_cell", "owo-colors", "proptest", @@ -9820,6 +9999,7 @@ dependencies = [ "anyhow", "gateway-client", "omicron-common 0.1.0", + "omicron-workspace-hack", "schemars", "serde", "serde_json", @@ -9837,6 +10017,7 @@ dependencies = [ "ciborium", "clap 4.4.3", "crossterm 0.27.0", + "omicron-workspace-hack", "ratatui", "reedline", "serde", @@ -9882,10 +10063,12 @@ dependencies = [ "installinator-artifact-client", "installinator-artifactd", "installinator-common", + "itertools 0.11.0", "omicron-certificates", "omicron-common 0.1.0", "omicron-passwords 0.1.0", "omicron-test-utils", + "omicron-workspace-hack", "openapi-lint", "openapiv3", "rand 0.8.5", @@ -9922,6 +10105,7 @@ dependencies = [ "chrono", "installinator-common", "ipnetwork", + "omicron-workspace-hack", "progenitor", "regress", "reqwest", diff --git a/Cargo.toml b/Cargo.toml index 174e1052a0..0e194394f9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,6 +6,7 @@ members = [ "caboose-util", "certificates", "common", + "crdb-seed", "ddm-admin-client", "deploy", "dev-tools/omdb", @@ -63,6 +64,7 @@ members = [ "wicket", "wicketd-client", "wicketd", + "workspace-hack", ] default-members = [ @@ -133,7 +135,7 @@ api_identity = { path = "api_identity" } approx = "0.5.1" assert_matches = "1.5.0" assert_cmd = "2.0.12" -async-bb8-diesel = { git = "https://github.com/oxidecomputer/async-bb8-diesel", rev = "be3d9bce50051d8c0e0c06078e8066cc27db3001" } +async-bb8-diesel = { git = "https://github.com/oxidecomputer/async-bb8-diesel", rev = "da04c087f835a51e0441addb19c5ef4986e1fcf2" } async-trait = "0.1.73" authz-macros = { path = "nexus/authz-macros" } backoff = { version = "0.4.0", features = [ "tokio" ] } diff --git a/README.adoc b/README.adoc index 988c1276fd..93d1fa4fb8 100644 --- a/README.adoc +++ b/README.adoc @@ -152,30 +152,17 @@ Many of these components themselves are made up of other packages (e.g., `nexus- Use Cargo's `-p PACKAGE` to check/build/test only the package you're working on. Since people are usually only working on one or two components at a time, you can usually iterate faster this way. -=== Why is Cargo rebuilding stuff all the time? +=== Workspace management -People are often surprised to find Cargo rebuilding stuff that it seems like it's just built, even when the relevant source files haven't changed. +Omicron uses `cargo-hakari` to ensure that all workspace dependencies enable the same set of features. This dramatically improves compilation time when switching between different subsets of packages (e.g. `-p wicket` or `-p nexus-db-model`), because the sets of enabled features remain consistent. -* Say you're iterating on code, running `cargo build -p nexus-db-model` to build just that package. Great, it works. Let's run tests: `cargo nextest run -p nexus-db-model`. Now it's rebuilding some _dependency_ of `nexus-db-model` again?! -* Say you've just run `cargo nextest run -p nexus-db-model`. Now you go run `cargo nextest run -p omicron-nexus`, which uses `nexus-db-model`. You see Cargo building `nexus-db-model` again?! +`cargo hakari` status is checked in CI; if the CI check fails, then update the configuration locally with -This usually has to do with the way Cargo selects package https://doc.rust-lang.org/cargo/reference/features.html[features]. These are essentially tags that are used at build time to include specific code or dependencies. For example, the https://docs.rs/serde/latest/serde/[serde] crate defines a feature called https://docs.rs/crate/serde/latest/features["derive"] that controls whether the `Serialize`/`Deserialize` derive macros will be included. Let's look at how this affects builds. - -TIP: You can use `cargo tree` to inspect a package's dependencies, including features. This is useful for debugging feature-related build issues. - -==== Feature selection differs when building tests - -When you run `cargo build -p nexus-db-model`, Cargo looks at all the packages in the depencency tree of `nexus-db-model` and figures out what features it needs for each one. Let's take the `uuid` package as an example. Cargo takes https://doc.rust-lang.org/cargo/reference/features.html#feature-unification[union of the features required by any of the packages that depend on `uuid` in the whole dependency tree of `nexus-db-model`]. Let's say that's just the "v4" feature. Simple enough. - -When you then run `cargo nextest run -p nexus-db-model`, it does the same thing. Only this time, it's looking at the `dev-dependencies` tree. `nexus-db-model` 's dev-dependencies might include some other package that depends on `uuid` and requires the "v5" feature. Now, Cargo has to rebuild `uuid` -- and anything else that depends on it. - -This is why when using Cargo's check/build/clippy commands, we suggest using `--all-targets`. When you use `cargo build --all-targets`, it builds the tests as well. It's usually not much more time and avoids extra rebuilds when switching back and forth between the default targets and the targets with tests included. - -==== Feature selection differs when building different packages - -People run into a similar problem when switching packages within Omicron. Once you've got `cargo nextest run -p nexus-db-model` working, you may run `cargo nextest run -p omicron-nexus`, which uses `nexus-db-model`. And you may be surprised to see Cargo rebuilding some common dependency like `uuid`. It's the same as above: we're building a different package now. It has a different (larger) dependency tree. That may result in some crate deep in the dependency tree needing some new feature, causing it and all of its dependents to be rebuilt. - -NOTE: https://github.com/rust-lang/cargo/issues/4463[There is interest in changing the way feature selection works in workspaces like Omicron for exactly this reason.] It's been suggested to have an option for Cargo to always look at the features required for all packages in the workspace, rather than just the one you've selected. This could eliminate this particular problem. In the meantime, we mitigate this with heavy use of https://doc.rust-lang.org/cargo/reference/workspaces.html#the-dependencies-table[workspace dependencies], which helps make sure that different packages _within_ Omicron depend on the same set of features for a given dependency. +``` +cargo install cargo-hakari --locked # only needed on the first run +cargo hakari generate +cargo hakari manage-deps +``` === Why am I getting compile errors after I thought I'd already built everything? diff --git a/api_identity/Cargo.toml b/api_identity/Cargo.toml index 761e5e3017..9faf2a1878 100644 --- a/api_identity/Cargo.toml +++ b/api_identity/Cargo.toml @@ -14,3 +14,4 @@ proc-macro = true proc-macro2.workspace = true quote.workspace = true syn.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/bootstore/Cargo.toml b/bootstore/Cargo.toml index d916bf80c7..eefe05c8d6 100644 --- a/bootstore/Cargo.toml +++ b/bootstore/Cargo.toml @@ -36,6 +36,7 @@ zeroize.workspace = true # utils`. Unfortunately, it doesn't appear possible to put the `pq-sys` dep # only in `[dev-dependencies]`. pq-sys = "*" +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] assert_matches.workspace = true diff --git a/bootstrap-agent-client/Cargo.toml b/bootstrap-agent-client/Cargo.toml index f7d3ad2db6..17989a5c5f 100644 --- a/bootstrap-agent-client/Cargo.toml +++ b/bootstrap-agent-client/Cargo.toml @@ -17,3 +17,4 @@ serde.workspace = true sled-hardware.workspace = true slog.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/caboose-util/Cargo.toml b/caboose-util/Cargo.toml index 195bbfd5d7..253d54643d 100644 --- a/caboose-util/Cargo.toml +++ b/caboose-util/Cargo.toml @@ -7,3 +7,4 @@ license = "MPL-2.0" [dependencies] anyhow.workspace = true hubtools.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/certificates/Cargo.toml b/certificates/Cargo.toml index 29c4a8bc2e..d20d257e4c 100644 --- a/certificates/Cargo.toml +++ b/certificates/Cargo.toml @@ -12,6 +12,7 @@ openssl-sys.workspace = true thiserror.workspace = true omicron-common.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] omicron-test-utils.workspace = true diff --git a/common/Cargo.toml b/common/Cargo.toml index 492b6ca860..bda88d0d43 100644 --- a/common/Cargo.toml +++ b/common/Cargo.toml @@ -40,6 +40,7 @@ toml.workspace = true uuid.workspace = true parse-display.workspace = true progenitor.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] camino-tempfile.workspace = true diff --git a/crdb-seed/Cargo.toml b/crdb-seed/Cargo.toml new file mode 100644 index 0000000000..fa71fe7e8a --- /dev/null +++ b/crdb-seed/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "crdb-seed" +version = "0.1.0" +edition = "2021" +license = "MPL-2.0" + +[dependencies] +camino.workspace = true +camino-tempfile.workspace = true +dropshot.workspace = true +hex.workspace = true +omicron-test-utils.workspace = true +ring.workspace = true +slog.workspace = true +tokio.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/crdb-seed/src/main.rs b/crdb-seed/src/main.rs new file mode 100644 index 0000000000..b8572bd886 --- /dev/null +++ b/crdb-seed/src/main.rs @@ -0,0 +1,92 @@ +use camino::Utf8PathBuf; +use dropshot::{test_util::LogContext, ConfigLogging, ConfigLoggingLevel}; +use omicron_test_utils::dev; +use slog::Logger; +use std::io::Write; + +// Creates a string identifier for the current DB schema and version. +// +// The goal here is to allow to create different "seed" directories +// for each revision of the DB. +fn digest_unique_to_schema() -> String { + let schema = include_str!("../../schema/crdb/dbinit.sql"); + let crdb_version = include_str!("../../tools/cockroachdb_version"); + let mut ctx = ring::digest::Context::new(&ring::digest::SHA256); + ctx.update(&schema.as_bytes()); + ctx.update(&crdb_version.as_bytes()); + let digest = ctx.finish(); + hex::encode(digest.as_ref()) +} + +enum SeedDirectoryStatus { + Created, + Existing, +} + +async fn ensure_seed_directory_exists( + log: &Logger, +) -> (Utf8PathBuf, SeedDirectoryStatus) { + let base_seed_dir = Utf8PathBuf::from_path_buf(std::env::temp_dir()) + .expect("Not a UTF-8 path") + .join("crdb-base"); + std::fs::create_dir_all(&base_seed_dir).unwrap(); + let desired_seed_dir = base_seed_dir.join(digest_unique_to_schema()); + + if desired_seed_dir.exists() { + return (desired_seed_dir, SeedDirectoryStatus::Existing); + } + + // The directory didn't exist when we started, so try to create it. + // + // Nextest will execute it just once, but it is possible for a user to start + // up multiple nextest processes to be running at the same time. So we + // should consider it possible for another caller to create this seed + // directory before we finish setting it up ourselves. + let tmp_seed_dir = + camino_tempfile::Utf8TempDir::new_in(base_seed_dir).unwrap(); + dev::test_setup_database_seed(log, tmp_seed_dir.path()).await; + + // If we can successfully perform the rename, there was either no + // contention or we won a creation race. + // + // If we couldn't perform the rename, the directory might already exist. + // Check that this is the error we encountered -- otherwise, we're + // struggling. + if let Err(err) = std::fs::rename(tmp_seed_dir.path(), &desired_seed_dir) { + if !desired_seed_dir.exists() { + panic!("Cannot rename seed directory for CockroachDB: {err}"); + } + } + + (desired_seed_dir, SeedDirectoryStatus::Created) +} + +#[tokio::main] +async fn main() { + // TODO: dropshot is v heavyweight for this, we should be able to pull in a + // smaller binary + let logctx = LogContext::new( + "crdb_seeding", + &ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }, + ); + let (dir, status) = ensure_seed_directory_exists(&logctx.log).await; + match status { + SeedDirectoryStatus::Created => { + slog::info!(logctx.log, "Created seed directory: `{dir}`"); + } + SeedDirectoryStatus::Existing => { + slog::info!(logctx.log, "Using existing seed directory: `{dir}`"); + } + } + if let Ok(env_path) = std::env::var("NEXTEST_ENV") { + let mut file = std::fs::File::create(&env_path) + .expect("failed to open NEXTEST_ENV file"); + writeln!(file, "CRDB_SEED_DIR={dir}") + .expect("failed to write to NEXTEST_ENV file"); + } else { + slog::warn!( + logctx.log, + "NEXTEST_ENV not set (is this script running under nextest?)" + ); + } +} diff --git a/ddm-admin-client/Cargo.toml b/ddm-admin-client/Cargo.toml index 6e9ee930a0..3814446b3e 100644 --- a/ddm-admin-client/Cargo.toml +++ b/ddm-admin-client/Cargo.toml @@ -15,6 +15,7 @@ tokio.workspace = true omicron-common.workspace = true sled-hardware.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [build-dependencies] anyhow.workspace = true diff --git a/deploy/Cargo.toml b/deploy/Cargo.toml index 1b8e6a92d8..17bacd6354 100644 --- a/deploy/Cargo.toml +++ b/deploy/Cargo.toml @@ -14,6 +14,7 @@ serde.workspace = true serde_derive.workspace = true thiserror.workspace = true toml.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [[bin]] name = "thing-flinger" diff --git a/dev-tools/omdb/Cargo.toml b/dev-tools/omdb/Cargo.toml index c9ebbe35ad..5b2adde1b2 100644 --- a/dev-tools/omdb/Cargo.toml +++ b/dev-tools/omdb/Cargo.toml @@ -32,6 +32,7 @@ tabled.workspace = true textwrap.workspace = true tokio = { workspace = true, features = [ "full" ] } uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } [dev-dependencies] expectorate.workspace = true diff --git a/dev-tools/omdb/src/bin/omdb/db.rs b/dev-tools/omdb/src/bin/omdb/db.rs index 268e74a622..93e5ef4301 100644 --- a/dev-tools/omdb/src/bin/omdb/db.rs +++ b/dev-tools/omdb/src/bin/omdb/db.rs @@ -24,13 +24,16 @@ use clap::ValueEnum; use diesel::expression::SelectableHelper; use diesel::query_dsl::QueryDsl; use diesel::ExpressionMethods; +use nexus_db_model::Dataset; use nexus_db_model::Disk; use nexus_db_model::DnsGroup; use nexus_db_model::DnsName; use nexus_db_model::DnsVersion; use nexus_db_model::DnsZone; use nexus_db_model::Instance; +use nexus_db_model::Region; use nexus_db_model::Sled; +use nexus_db_model::Zpool; use nexus_db_queries::context::OpContext; use nexus_db_queries::db; use nexus_db_queries::db::identity::Asset; @@ -45,6 +48,7 @@ use omicron_common::api::external::Generation; use omicron_common::postgres_config::PostgresConfigWithUrl; use std::cmp::Ordering; use std::collections::BTreeMap; +use std::collections::HashSet; use std::fmt::Display; use std::num::NonZeroU32; use std::sync::Arc; @@ -96,6 +100,8 @@ enum DiskCommands { Info(DiskInfoArgs), /// Summarize current disks List, + /// Determine what crucible resources are on the given physical disk. + Physical(DiskPhysicalArgs), } #[derive(Debug, Args)] @@ -104,6 +110,12 @@ struct DiskInfoArgs { uuid: Uuid, } +#[derive(Debug, Args)] +struct DiskPhysicalArgs { + /// The UUID of the physical disk + uuid: Uuid, +} + #[derive(Debug, Args)] struct DnsArgs { #[command(subcommand)] @@ -214,6 +226,12 @@ impl DbArgs { DbCommands::Disks(DiskArgs { command: DiskCommands::List }) => { cmd_db_disk_list(&datastore, self.fetch_limit).await } + DbCommands::Disks(DiskArgs { + command: DiskCommands::Physical(uuid), + }) => { + cmd_db_disk_physical(&opctx, &datastore, self.fetch_limit, uuid) + .await + } DbCommands::Dns(DnsArgs { command: DnsCommands::Show }) => { cmd_db_dns_show(&opctx, &datastore, self.fetch_limit).await } @@ -349,7 +367,7 @@ async fn cmd_db_disk_list( .filter(dsl::time_deleted.is_null()) .limit(i64::from(u32::from(limit))) .select(Disk::as_select()) - .load_async(datastore.pool_for_tests().await?) + .load_async(&*datastore.pool_connection_for_tests().await?) .await .context("loading disks")?; @@ -403,11 +421,13 @@ async fn cmd_db_disk_info( use db::schema::disk::dsl as disk_dsl; + let conn = datastore.pool_connection_for_tests().await?; + let disk = disk_dsl::disk .filter(disk_dsl::id.eq(args.uuid)) .limit(1) .select(Disk::as_select()) - .load_async(datastore.pool_for_tests().await?) + .load_async(&*conn) .await .context("loading requested disk")?; @@ -427,7 +447,7 @@ async fn cmd_db_disk_info( .filter(instance_dsl::id.eq(instance_uuid)) .limit(1) .select(Instance::as_select()) - .load_async(datastore.pool_for_tests().await?) + .load_async(&*conn) .await .context("loading requested instance")?; @@ -509,6 +529,151 @@ async fn cmd_db_disk_info( Ok(()) } +/// Run `omdb db disk physical `. +async fn cmd_db_disk_physical( + opctx: &OpContext, + datastore: &DataStore, + limit: NonZeroU32, + args: &DiskPhysicalArgs, +) -> Result<(), anyhow::Error> { + // We start by finding any zpools that are using the physical disk. + use db::schema::zpool::dsl as zpool_dsl; + let zpools = zpool_dsl::zpool + .filter(zpool_dsl::time_deleted.is_null()) + .filter(zpool_dsl::physical_disk_id.eq(args.uuid)) + .select(Zpool::as_select()) + .load_async(&*datastore.pool_connection_for_tests().await?) + .await + .context("loading zpool from pysical disk id")?; + + let mut sled_ids = HashSet::new(); + let mut dataset_ids = HashSet::new(); + + // The current plan is a single zpool per physical disk, so we expect that + // this will have a single item. However, If single zpool per disk ever + // changes, this code will still work. + for zp in zpools { + // zpool has the sled id, record that so we can find the serial number. + sled_ids.insert(zp.sled_id); + + // Next, we find all the datasets that are on our zpool. + use db::schema::dataset::dsl as dataset_dsl; + let datasets = dataset_dsl::dataset + .filter(dataset_dsl::time_deleted.is_null()) + .filter(dataset_dsl::pool_id.eq(zp.id())) + .select(Dataset::as_select()) + .load_async(&*datastore.pool_connection_for_tests().await?) + .await + .context("loading dataset")?; + + // Add all the datasets ids that are using this pool. + for ds in datasets { + dataset_ids.insert(ds.id()); + } + } + + // If we do have more than one sled ID, then something is wrong, but + // go ahead and print out whatever we have found. + for sid in sled_ids { + let (_, my_sled) = LookupPath::new(opctx, datastore) + .sled_id(sid) + .fetch() + .await + .context("failed to look up sled")?; + + println!( + "Physical disk: {} found on sled: {}", + args.uuid, + my_sled.serial_number() + ); + } + + let mut volume_ids = HashSet::new(); + // Now, take the list of datasets we found and search all the regions + // to see if any of them are on the dataset. If we find a region that + // is on one of our datasets, then record the volume ID of that region. + for did in dataset_ids.clone().into_iter() { + use db::schema::region::dsl as region_dsl; + let regions = region_dsl::region + .filter(region_dsl::dataset_id.eq(did)) + .select(Region::as_select()) + .load_async(&*datastore.pool_connection_for_tests().await?) + .await + .context("loading region")?; + + for rs in regions { + volume_ids.insert(rs.volume_id()); + } + } + + // At this point, we have a list of volume IDs that contain a region + // that is part of a dataset on a pool on our disk. The final step is + // to find the virtual disks associated with these volume IDs and + // display information about those disks. + use db::schema::disk::dsl; + let disks = dsl::disk + .filter(dsl::time_deleted.is_null()) + .filter(dsl::volume_id.eq_any(volume_ids)) + .limit(i64::from(u32::from(limit))) + .select(Disk::as_select()) + .load_async(&*datastore.pool_connection_for_tests().await?) + .await + .context("loading disks")?; + + check_limit(&disks, limit, || "listing disks".to_string()); + + #[derive(Tabled)] + #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] + struct DiskRow { + name: String, + id: String, + state: String, + instance_name: String, + } + + let mut rows = Vec::new(); + + for disk in disks { + // If the disk is attached to an instance, determine the name of the + // instance. + let instance_name = + if let Some(instance_uuid) = disk.runtime().attach_instance_id { + // Get the instance this disk is attached to + use db::schema::instance::dsl as instance_dsl; + let instance = instance_dsl::instance + .filter(instance_dsl::id.eq(instance_uuid)) + .limit(1) + .select(Instance::as_select()) + .load_async(&*datastore.pool_connection_for_tests().await?) + .await + .context("loading requested instance")?; + + if let Some(instance) = instance.into_iter().next() { + instance.name().to_string() + } else { + "???".to_string() + } + } else { + "-".to_string() + }; + + rows.push(DiskRow { + name: disk.name().to_string(), + id: disk.id().to_string(), + state: disk.runtime().disk_state, + instance_name: instance_name, + }); + } + + let table = tabled::Table::new(rows) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + + println!("{}", table); + Ok(()) +} + // SERVICES #[derive(Tabled)] @@ -714,7 +879,7 @@ async fn cmd_db_instances( let instances = dsl::instance .limit(i64::from(u32::from(limit))) .select(Instance::as_select()) - .load_async(datastore.pool_for_tests().await?) + .load_async(&*datastore.pool_connection_for_tests().await?) .await .context("loading instances")?; @@ -808,7 +973,7 @@ async fn load_zones_version( .filter(dsl::version.eq(nexus_db_model::Generation::from(version))) .limit(1) .select(DnsVersion::as_select()) - .load_async(datastore.pool_for_tests().await?) + .load_async(&*datastore.pool_connection_for_tests().await?) .await .context("loading requested version")?; @@ -850,7 +1015,7 @@ async fn cmd_db_dns_diff( .filter(dsl::version_added.eq(version.version)) .limit(i64::from(u32::from(limit))) .select(DnsName::as_select()) - .load_async(datastore.pool_for_tests().await?) + .load_async(&*datastore.pool_connection_for_tests().await?) .await .context("loading added names")?; check_limit(&added, limit, || "loading added names"); @@ -860,7 +1025,7 @@ async fn cmd_db_dns_diff( .filter(dsl::version_removed.eq(version.version)) .limit(i64::from(u32::from(limit))) .select(DnsName::as_select()) - .load_async(datastore.pool_for_tests().await?) + .load_async(&*datastore.pool_connection_for_tests().await?) .await .context("loading added names")?; check_limit(&added, limit, || "loading removed names"); diff --git a/dev-tools/omdb/tests/env.out b/dev-tools/omdb/tests/env.out index e7a50da935..eb4cd0d32d 100644 --- a/dev-tools/omdb/tests/env.out +++ b/dev-tools/omdb/tests/env.out @@ -7,7 +7,7 @@ sim-b6d65341 [::1]:REDACTED_PORT - REDACTED_UUID_REDACTED_UUID_REDACTED --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (4.0.0) +note: database schema version matches expected (5.0.0) ============================================= EXECUTING COMMAND: omdb ["db", "--db-url", "junk", "sleds"] termination: Exited(2) @@ -172,7 +172,7 @@ stderr: note: database URL not specified. Will search DNS. note: (override with --db-url or OMDB_DB_URL) note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (4.0.0) +note: database schema version matches expected (5.0.0) ============================================= EXECUTING COMMAND: omdb ["--dns-server", "[::1]:REDACTED_PORT", "db", "sleds"] termination: Exited(0) @@ -185,5 +185,5 @@ stderr: note: database URL not specified. Will search DNS. note: (override with --db-url or OMDB_DB_URL) note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (4.0.0) +note: database schema version matches expected (5.0.0) ============================================= diff --git a/dev-tools/omdb/tests/successes.out b/dev-tools/omdb/tests/successes.out index 7532e9b61e..b1464cb824 100644 --- a/dev-tools/omdb/tests/successes.out +++ b/dev-tools/omdb/tests/successes.out @@ -8,7 +8,7 @@ external oxide-dev.test 2 create silo: "tes --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (4.0.0) +note: database schema version matches expected (5.0.0) ============================================= EXECUTING COMMAND: omdb ["db", "dns", "diff", "external", "2"] termination: Exited(0) @@ -24,7 +24,7 @@ changes: names added: 1, names removed: 0 --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (4.0.0) +note: database schema version matches expected (5.0.0) ============================================= EXECUTING COMMAND: omdb ["db", "dns", "names", "external", "2"] termination: Exited(0) @@ -36,7 +36,7 @@ External zone: oxide-dev.test --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (4.0.0) +note: database schema version matches expected (5.0.0) ============================================= EXECUTING COMMAND: omdb ["db", "services", "list-instances"] termination: Exited(0) @@ -52,7 +52,7 @@ Nexus REDACTED_UUID_REDACTED_UUID_REDACTED [::ffff:127.0.0.1]:REDACTED_ --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (4.0.0) +note: database schema version matches expected (5.0.0) ============================================= EXECUTING COMMAND: omdb ["db", "services", "list-by-sled"] termination: Exited(0) @@ -71,7 +71,7 @@ sled: sim-b6d65341 (id REDACTED_UUID_REDACTED_UUID_REDACTED) --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (4.0.0) +note: database schema version matches expected (5.0.0) ============================================= EXECUTING COMMAND: omdb ["db", "sleds"] termination: Exited(0) @@ -82,7 +82,7 @@ sim-b6d65341 [::1]:REDACTED_PORT - REDACTED_UUID_REDACTED_UUID_REDACTED --------------------------------------------- stderr: note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=disable -note: database schema version matches expected (4.0.0) +note: database schema version matches expected (5.0.0) ============================================= EXECUTING COMMAND: omdb ["nexus", "background-tasks", "doc"] termination: Exited(0) diff --git a/dev-tools/omicron-dev/Cargo.toml b/dev-tools/omicron-dev/Cargo.toml index 2061489cbb..95da4d42ef 100644 --- a/dev-tools/omicron-dev/Cargo.toml +++ b/dev-tools/omicron-dev/Cargo.toml @@ -28,6 +28,7 @@ signal-hook-tokio.workspace = true tokio = { workspace = true, features = [ "full" ] } tokio-postgres.workspace = true toml.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } [dev-dependencies] camino-tempfile.workspace = true diff --git a/dev-tools/xtask/src/main.rs b/dev-tools/xtask/src/main.rs index a64c87a570..3e52d742f5 100644 --- a/dev-tools/xtask/src/main.rs +++ b/dev-tools/xtask/src/main.rs @@ -78,6 +78,8 @@ fn cmd_clippy() -> Result<()> { Ok(()) } +const WORKSPACE_HACK_PACKAGE_NAME: &str = "omicron-workspace-hack"; + fn cmd_check_workspace_deps() -> Result<()> { // Ignore issues with "pq-sys". See the omicron-rpaths package for details. const EXCLUDED: &[&'static str] = &["pq-sys"]; @@ -97,6 +99,12 @@ fn cmd_check_workspace_deps() -> Result<()> { // Iterate the workspace packages and fill out the maps above. for pkg_info in workspace.workspace_packages() { + if pkg_info.name == WORKSPACE_HACK_PACKAGE_NAME { + // Skip over workspace-hack because hakari doesn't yet support + // workspace deps: https://github.com/guppy-rs/guppy/issues/7 + continue; + } + let manifest_path = &pkg_info.manifest_path; let manifest = read_cargo_toml(manifest_path)?; for tree in [ @@ -125,6 +133,12 @@ fn cmd_check_workspace_deps() -> Result<()> { } } + if name == WORKSPACE_HACK_PACKAGE_NAME { + // Skip over workspace-hack because hakari doesn't yet support + // workspace deps: https://github.com/guppy-rs/guppy/issues/7 + continue; + } + non_workspace_dependencies .entry(name.to_owned()) .or_insert_with(Vec::new) diff --git a/dns-server/Cargo.toml b/dns-server/Cargo.toml index 243876a5a2..d7606dcff5 100644 --- a/dns-server/Cargo.toml +++ b/dns-server/Cargo.toml @@ -30,6 +30,7 @@ trust-dns-proto.workspace = true trust-dns-resolver.workspace = true trust-dns-server.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] expectorate.workspace = true diff --git a/dns-service-client/Cargo.toml b/dns-service-client/Cargo.toml index 7f5cf63d6a..e351d90da2 100644 --- a/dns-service-client/Cargo.toml +++ b/dns-service-client/Cargo.toml @@ -14,3 +14,4 @@ serde.workspace = true serde_json.workspace = true slog.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/dpd-client/Cargo.toml b/dpd-client/Cargo.toml index fdbdcd07af..26807f7d79 100644 --- a/dpd-client/Cargo.toml +++ b/dpd-client/Cargo.toml @@ -17,6 +17,7 @@ ipnetwork.workspace = true http.workspace = true schemars.workspace = true rand.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [build-dependencies] anyhow.workspace = true diff --git a/end-to-end-tests/Cargo.toml b/end-to-end-tests/Cargo.toml index a0e099b756..5ff0f9b377 100644 --- a/end-to-end-tests/Cargo.toml +++ b/end-to-end-tests/Cargo.toml @@ -24,3 +24,4 @@ tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } toml.workspace = true trust-dns-resolver.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/gateway-cli/Cargo.toml b/gateway-cli/Cargo.toml index d5083d1999..0d179750ea 100644 --- a/gateway-cli/Cargo.toml +++ b/gateway-cli/Cargo.toml @@ -24,3 +24,4 @@ uuid.workspace = true gateway-client.workspace = true gateway-messages.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/gateway-client/Cargo.toml b/gateway-client/Cargo.toml index 81d1630a1d..96a1eb221f 100644 --- a/gateway-client/Cargo.toml +++ b/gateway-client/Cargo.toml @@ -15,3 +15,4 @@ serde_json.workspace = true schemars.workspace = true slog.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/gateway-test-utils/Cargo.toml b/gateway-test-utils/Cargo.toml index 8f6e14d68a..9d80e63f05 100644 --- a/gateway-test-utils/Cargo.toml +++ b/gateway-test-utils/Cargo.toml @@ -14,3 +14,4 @@ slog.workspace = true sp-sim.workspace = true tokio.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/gateway-test-utils/src/setup.rs b/gateway-test-utils/src/setup.rs index b947f32a19..e789f8de78 100644 --- a/gateway-test-utils/src/setup.rs +++ b/gateway-test-utils/src/setup.rs @@ -142,13 +142,12 @@ pub async fn test_setup_with_config( } // Start gateway server - let rack_id = Uuid::parse_str(RACK_UUID).unwrap(); + let rack_id = Some(Uuid::parse_str(RACK_UUID).unwrap()); - let args = MgsArguments { id: Uuid::new_v4(), addresses }; + let args = MgsArguments { id: Uuid::new_v4(), addresses, rack_id }; let server = omicron_gateway::Server::start( server_config.clone(), args, - rack_id, log.clone(), ) .await diff --git a/gateway/Cargo.toml b/gateway/Cargo.toml index 307baa3f27..f5abce88e9 100644 --- a/gateway/Cargo.toml +++ b/gateway/Cargo.toml @@ -34,6 +34,7 @@ tokio-tungstenite.workspace = true tokio-util.workspace = true toml.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] expectorate.workspace = true diff --git a/gateway/src/bin/mgs.rs b/gateway/src/bin/mgs.rs index fe047b766b..cb9070a9a5 100644 --- a/gateway/src/bin/mgs.rs +++ b/gateway/src/bin/mgs.rs @@ -56,6 +56,7 @@ enum Args { struct ConfigProperties { id: Uuid, addresses: Vec, + rack_id: Option, } #[tokio::main] @@ -91,15 +92,17 @@ async fn do_run() -> Result<(), CmdError> { )) })?; - let (id, addresses) = if id_and_address_from_smf { + let (id, addresses, rack_id) = if id_and_address_from_smf { let config = read_smf_config()?; - (config.id, config.addresses) + (config.id, config.addresses, config.rack_id) } else { - // Clap ensures these are present if `id_and_address_from_smf` - // is false, so we can safely unwrap. - (id.unwrap(), vec![address.unwrap()]) + // Does it matter if `rack_id` is always `None` in this case? + let rack_id = None; + // Clap ensures the first two fields are present if + // `id_and_address_from_smf` is false, so we can safely unwrap. + (id.unwrap(), vec![address.unwrap()], rack_id) }; - let args = MgsArguments { id, addresses }; + let args = MgsArguments { id, addresses, rack_id }; let mut server = start_server(config, args).await.map_err(CmdError::Failure)?; @@ -114,6 +117,7 @@ async fn do_run() -> Result<(), CmdError> { .to_string() )); } + server.set_rack_id(new_config.rack_id); server .adjust_dropshot_addresses(&new_config.addresses) .await @@ -148,6 +152,9 @@ fn read_smf_config() -> Result { // Name of the property within CONFIG_PG for our server addresses. const PROP_ADDR: &str = "address"; + // Name of the property within CONFIG_PG for our rack ID. + const PROP_RACK_ID: &str = "rack_id"; + // This function is pretty boilerplate-y; we can reduce it by using this // error type to help us construct a `CmdError::Failure(_)` string. It // assumes (for the purposes of error messages) any property being fetched @@ -210,6 +217,26 @@ fn read_smf_config() -> Result { )) })?; + let prop_rack_id = config + .get_property(PROP_RACK_ID) + .map_err(|err| Error::GetProperty { prop: PROP_RACK_ID, err })? + .ok_or_else(|| Error::MissingProperty { prop: PROP_RACK_ID })? + .value() + .map_err(|err| Error::GetValue { prop: PROP_RACK_ID, err })? + .ok_or(Error::MissingValue { prop: PROP_RACK_ID })? + .as_string() + .map_err(|err| Error::ValueAsString { prop: PROP_RACK_ID, err })?; + + let rack_id = if prop_rack_id.as_str() == "unknown" { + None + } else { + Some(Uuid::try_parse(&prop_rack_id).map_err(|err| { + CmdError::Failure(format!( + "failed to parse `{CONFIG_PG}/{PROP_RACK_ID}` ({prop_rack_id:?}) as a UUID: {err}" + )) + })?) + }; + let prop_addr = config .get_property(PROP_ADDR) .map_err(|err| Error::GetProperty { prop: PROP_ADDR, err })? @@ -236,7 +263,7 @@ fn read_smf_config() -> Result { "no addresses specified by `{CONFIG_PG}/{PROP_ADDR}`" ))) } else { - Ok(ConfigProperties { id: prop_id, addresses }) + Ok(ConfigProperties { id: prop_id, addresses, rack_id }) } } diff --git a/gateway/src/context.rs b/gateway/src/context.rs index e78e5c7842..939bb9b6b9 100644 --- a/gateway/src/context.rs +++ b/gateway/src/context.rs @@ -6,13 +6,16 @@ use crate::error::StartupError; use crate::management_switch::ManagementSwitch; use crate::management_switch::SwitchConfig; use gateway_sp_comms::InMemoryHostPhase2Provider; -use slog::Logger; +use slog::{info, Logger}; use std::sync::Arc; +use std::sync::OnceLock; +use uuid::Uuid; /// Shared state used by API request handlers pub struct ServerContext { pub mgmt_switch: ManagementSwitch, pub host_phase2_provider: Arc, + pub rack_id: OnceLock, pub log: Logger, } @@ -20,15 +23,24 @@ impl ServerContext { pub async fn new( host_phase2_provider: Arc, switch_config: SwitchConfig, + rack_id_config: Option, log: &Logger, ) -> Result, StartupError> { let mgmt_switch = ManagementSwitch::new(switch_config, &host_phase2_provider, log) .await?; + let rack_id = if let Some(id) = rack_id_config { + info!(log, "Setting rack_id"; "rack_id" => %id); + OnceLock::from(id) + } else { + OnceLock::new() + }; + Ok(Arc::new(ServerContext { mgmt_switch, host_phase2_provider, + rack_id, log: log.clone(), })) } diff --git a/gateway/src/lib.rs b/gateway/src/lib.rs index 871b05719a..10fcf1539c 100644 --- a/gateway/src/lib.rs +++ b/gateway/src/lib.rs @@ -58,6 +58,7 @@ pub fn run_openapi() -> Result<(), String> { pub struct MgsArguments { pub id: Uuid, pub addresses: Vec, + pub rack_id: Option, } type HttpServer = dropshot::HttpServer>; @@ -125,7 +126,6 @@ impl Server { pub async fn start( config: Config, args: MgsArguments, - _rack_id: Uuid, log: Logger, ) -> Result { if args.addresses.is_empty() { @@ -146,12 +146,14 @@ impl Server { Arc::new(InMemoryHostPhase2Provider::with_capacity( config.host_phase2_recovery_image_cache_max_images, )); - let apictx = - ServerContext::new(host_phase2_provider, config.switch, &log) - .await - .map_err(|error| { - format!("initializing server context: {}", error) - })?; + let apictx = ServerContext::new( + host_phase2_provider, + config.switch, + args.rack_id, + &log, + ) + .await + .map_err(|error| format!("initializing server context: {}", error))?; let mut http_servers = HashMap::with_capacity(args.addresses.len()); let all_servers_shutdown = FuturesUnordered::new(); @@ -283,6 +285,25 @@ impl Server { Ok(()) } + /// The rack_id will be set on a refresh of the SMF property when the sled + /// agent starts. + pub fn set_rack_id(&self, rack_id: Option) { + if let Some(rack_id) = rack_id { + let val = self.apictx.rack_id.get_or_init(|| rack_id); + if *val != rack_id { + error!( + self.apictx.log, + "Ignoring attempted change to rack ID"; + "current_rack_id" => %val, + "ignored_new_rack_id" => %rack_id); + } else { + info!(self.apictx.log, "Set rack_id"; "rack_id" => %rack_id); + } + } else { + warn!(self.apictx.log, "SMF refresh called without a rack id"); + } + } + // TODO does MGS register itself with oximeter? // Register the Nexus server as a metric producer with `oximeter. // pub async fn register_as_producer(&self) { @@ -313,8 +334,7 @@ pub async fn start_server( } else { debug!(log, "registered DTrace probes"); } - let rack_id = Uuid::new_v4(); - let server = Server::start(config, args, rack_id, log).await?; + let server = Server::start(config, args, log).await?; // server.register_as_producer().await; Ok(server) } diff --git a/illumos-utils/Cargo.toml b/illumos-utils/Cargo.toml index 3c0c2e7fc9..e292097bc5 100644 --- a/illumos-utils/Cargo.toml +++ b/illumos-utils/Cargo.toml @@ -29,6 +29,7 @@ zone.workspace = true # only enabled via the `testing` feature mockall = { workspace = true, optional = true } +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [target.'cfg(target_os = "illumos")'.dependencies] opte-ioctl.workspace = true diff --git a/installinator-artifact-client/Cargo.toml b/installinator-artifact-client/Cargo.toml index ddbc106ee8..18447b8e83 100644 --- a/installinator-artifact-client/Cargo.toml +++ b/installinator-artifact-client/Cargo.toml @@ -15,3 +15,4 @@ serde_json.workspace = true slog.workspace = true update-engine.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/installinator-artifactd/Cargo.toml b/installinator-artifactd/Cargo.toml index 3ce6112165..9318b725db 100644 --- a/installinator-artifactd/Cargo.toml +++ b/installinator-artifactd/Cargo.toml @@ -20,6 +20,7 @@ uuid.workspace = true installinator-common.workspace = true omicron-common.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] expectorate.workspace = true diff --git a/installinator-common/Cargo.toml b/installinator-common/Cargo.toml index ff664f28a3..0f1bf86901 100644 --- a/installinator-common/Cargo.toml +++ b/installinator-common/Cargo.toml @@ -15,3 +15,4 @@ serde_json.workspace = true serde_with.workspace = true thiserror.workspace = true update-engine.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/installinator/Cargo.toml b/installinator/Cargo.toml index c0e7625e6e..3b2f04c38f 100644 --- a/installinator/Cargo.toml +++ b/installinator/Cargo.toml @@ -42,6 +42,7 @@ toml.workspace = true tufaceous-lib.workspace = true update-engine.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] omicron-test-utils.workspace = true diff --git a/internal-dns-cli/Cargo.toml b/internal-dns-cli/Cargo.toml index d922544722..fb5780d22a 100644 --- a/internal-dns-cli/Cargo.toml +++ b/internal-dns-cli/Cargo.toml @@ -13,3 +13,4 @@ omicron-common.workspace = true slog.workspace = true tokio.workspace = true trust-dns-resolver.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/internal-dns/Cargo.toml b/internal-dns/Cargo.toml index 5ead1cc8a4..d680ab3ce1 100644 --- a/internal-dns/Cargo.toml +++ b/internal-dns/Cargo.toml @@ -17,6 +17,7 @@ thiserror.workspace = true trust-dns-proto.workspace = true trust-dns-resolver.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] assert_matches.workspace = true diff --git a/ipcc-key-value/Cargo.toml b/ipcc-key-value/Cargo.toml index a3f17cea52..128fde9a01 100644 --- a/ipcc-key-value/Cargo.toml +++ b/ipcc-key-value/Cargo.toml @@ -11,6 +11,7 @@ omicron-common.workspace = true serde.workspace = true thiserror.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] omicron-common = { workspace = true, features = ["testing"] } diff --git a/key-manager/Cargo.toml b/key-manager/Cargo.toml index 7954a977a3..69ae3b25bd 100644 --- a/key-manager/Cargo.toml +++ b/key-manager/Cargo.toml @@ -14,4 +14,5 @@ slog.workspace = true thiserror.workspace = true tokio.workspace = true zeroize.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/nexus-client/Cargo.toml b/nexus-client/Cargo.toml index 589562c930..d59c013992 100644 --- a/nexus-client/Cargo.toml +++ b/nexus-client/Cargo.toml @@ -18,3 +18,4 @@ serde.workspace = true serde_json.workspace = true slog.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/nexus/Cargo.toml b/nexus/Cargo.toml index 1a09f07f6c..91872e2c32 100644 --- a/nexus/Cargo.toml +++ b/nexus/Cargo.toml @@ -90,6 +90,7 @@ oximeter.workspace = true oximeter-instruments = { workspace = true, features = ["http-instruments"] } oximeter-producer.workspace = true rustls = { workspace = true } +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] async-bb8-diesel.workspace = true diff --git a/nexus/authz-macros/Cargo.toml b/nexus/authz-macros/Cargo.toml index 40303b2e34..3d55afa477 100644 --- a/nexus/authz-macros/Cargo.toml +++ b/nexus/authz-macros/Cargo.toml @@ -14,3 +14,4 @@ quote.workspace = true serde.workspace = true serde_tokenstream.workspace = true syn.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } diff --git a/nexus/db-macros/Cargo.toml b/nexus/db-macros/Cargo.toml index 3fb228f26c..ce206bb56e 100644 --- a/nexus/db-macros/Cargo.toml +++ b/nexus/db-macros/Cargo.toml @@ -15,6 +15,7 @@ quote.workspace = true serde.workspace = true serde_tokenstream.workspace = true syn = { workspace = true, features = ["extra-traits"] } +omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } [dev-dependencies] rustfmt-wrapper.workspace = true diff --git a/nexus/db-macros/src/lookup.rs b/nexus/db-macros/src/lookup.rs index 93c2bd3652..38cab15e30 100644 --- a/nexus/db-macros/src/lookup.rs +++ b/nexus/db-macros/src/lookup.rs @@ -806,11 +806,11 @@ fn generate_database_functions(config: &Config) -> TokenStream { #lookup_filter .select(nexus_db_model::#resource_name::as_select()) .get_result_async( - datastore.pool_authorized(opctx).await? + &*datastore.pool_connection_authorized(opctx).await? ) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByLookup( ResourceType::#resource_name, @@ -891,10 +891,10 @@ fn generate_database_functions(config: &Config) -> TokenStream { #soft_delete_filter #(.filter(dsl::#pkey_column_names.eq(#pkey_names.clone())))* .select(nexus_db_model::#resource_name::as_select()) - .get_result_async(datastore.pool_authorized(opctx).await?) + .get_result_async(&*datastore.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByLookup( ResourceType::#resource_name, diff --git a/nexus/db-model/Cargo.toml b/nexus/db-model/Cargo.toml index dc83670725..aedbb9168b 100644 --- a/nexus/db-model/Cargo.toml +++ b/nexus/db-model/Cargo.toml @@ -36,6 +36,7 @@ nexus-defaults.workspace = true nexus-types.workspace = true omicron-passwords.workspace = true sled-agent-client.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } [dev-dependencies] expectorate.workspace = true diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 3fde9ee715..94a770e2ca 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -1130,7 +1130,7 @@ table! { /// /// This should be updated whenever the schema is changed. For more details, /// refer to: schema/crdb/README.adoc -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(4, 0, 0); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(5, 0, 0); allow_tables_to_appear_in_same_query!( system_update, diff --git a/nexus/db-queries/Cargo.toml b/nexus/db-queries/Cargo.toml index a8256cb60a..af01c1732b 100644 --- a/nexus/db-queries/Cargo.toml +++ b/nexus/db-queries/Cargo.toml @@ -63,6 +63,7 @@ nexus-types.workspace = true omicron-common.workspace = true omicron-passwords.workspace = true oximeter.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } [dev-dependencies] assert_matches.workspace = true diff --git a/nexus/db-queries/src/db/collection_attach.rs b/nexus/db-queries/src/db/collection_attach.rs index c88054795d..40ec659bf9 100644 --- a/nexus/db-queries/src/db/collection_attach.rs +++ b/nexus/db-queries/src/db/collection_attach.rs @@ -17,7 +17,7 @@ use super::cte_utils::{ QueryFromClause, QuerySqlType, TableDefaultWhereClause, }; use super::pool::DbConnection; -use async_bb8_diesel::{AsyncRunQueryDsl, PoolError}; +use async_bb8_diesel::{AsyncRunQueryDsl, ConnectionError}; use diesel::associations::HasTable; use diesel::expression::{AsExpression, Expression}; use diesel::helper_types::*; @@ -299,7 +299,7 @@ where /// Result of [`AttachToCollectionStatement`] when executed asynchronously pub type AsyncAttachToCollectionResult = - Result<(C, ResourceType), AttachError>; + Result<(C, ResourceType), AttachError>; /// Errors returned by [`AttachToCollectionStatement`]. #[derive(Debug)] @@ -332,10 +332,9 @@ where AttachToCollectionStatement: Send, { /// Issues the CTE asynchronously and parses the result. - pub async fn attach_and_get_result_async( + pub async fn attach_and_get_result_async( self, - conn: &(impl async_bb8_diesel::AsyncConnection - + Sync), + conn: &async_bb8_diesel::Connection, ) -> AsyncAttachToCollectionResult where // We require this bound to ensure that "Self" is runnable as query. @@ -344,13 +343,11 @@ where DbConnection, RawOutput, >, - ConnErr: From + Send + 'static, - PoolError: From, { self.get_result_async::>(conn) .await // If the database returns an error, propagate it right away. - .map_err(|e| AttachError::DatabaseError(PoolError::from(e))) + .map_err(|e| AttachError::DatabaseError(e)) // Otherwise, parse the output to determine if the CTE succeeded. .and_then(Self::parse_result) } @@ -570,6 +567,7 @@ mod test { }; use async_bb8_diesel::{ AsyncConnection, AsyncRunQueryDsl, AsyncSimpleConnection, + ConnectionManager, }; use chrono::Utc; use db_macros::Resource; @@ -605,7 +603,9 @@ mod test { } } - async fn setup_db(pool: &crate::db::Pool) { + async fn setup_db( + pool: &crate::db::Pool, + ) -> bb8::PooledConnection> { let connection = pool.pool().get().await.unwrap(); (*connection) .batch_execute_async( @@ -633,6 +633,7 @@ mod test { ) .await .unwrap(); + connection } /// Describes a resource within the database. @@ -669,7 +670,7 @@ mod test { async fn insert_collection( id: Uuid, name: &str, - pool: &db::Pool, + conn: &async_bb8_diesel::Connection, ) -> Collection { let create_params = IdentityMetadataCreateParams { name: Name::try_from(name.to_string()).unwrap(), @@ -680,18 +681,21 @@ mod test { diesel::insert_into(collection::table) .values(c) - .execute_async(pool.pool()) + .execute_async(conn) .await .unwrap(); - get_collection(id, &pool).await + get_collection(id, &conn).await } - async fn get_collection(id: Uuid, pool: &db::Pool) -> Collection { + async fn get_collection( + id: Uuid, + conn: &async_bb8_diesel::Connection, + ) -> Collection { collection::table .find(id) .select(Collection::as_select()) - .first_async(pool.pool()) + .first_async(conn) .await .unwrap() } @@ -699,7 +703,7 @@ mod test { async fn insert_resource( id: Uuid, name: &str, - pool: &db::Pool, + conn: &async_bb8_diesel::Connection, ) -> Resource { let create_params = IdentityMetadataCreateParams { name: Name::try_from(name.to_string()).unwrap(), @@ -712,18 +716,21 @@ mod test { diesel::insert_into(resource::table) .values(r) - .execute_async(pool.pool()) + .execute_async(conn) .await .unwrap(); - get_resource(id, &pool).await + get_resource(id, conn).await } - async fn get_resource(id: Uuid, pool: &db::Pool) -> Resource { + async fn get_resource( + id: Uuid, + conn: &async_bb8_diesel::Connection, + ) -> Resource { resource::table .find(id) .select(Resource::as_select()) - .first_async(pool.pool()) + .first_async(conn) .await .unwrap() } @@ -856,7 +863,7 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); @@ -869,7 +876,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)), ) - .attach_and_get_result_async(pool.pool()) + .attach_and_get_result_async(&conn) .await; assert!(matches!(attach, Err(AttachError::CollectionNotFound))); @@ -885,14 +892,14 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); // Create the collection let collection = - insert_collection(collection_id, "collection", &pool).await; + insert_collection(collection_id, "collection", &conn).await; // Attempt to attach - even though the resource does not exist. let attach = Collection::attach_resource( @@ -904,12 +911,12 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)), ) - .attach_and_get_result_async(pool.pool()) + .attach_and_get_result_async(&conn) .await; assert!(matches!(attach, Err(AttachError::ResourceNotFound))); // The collection should remain unchanged. - assert_eq!(collection, get_collection(collection_id, &pool).await); + assert_eq!(collection, get_collection(collection_id, &conn).await); db.cleanup().await.unwrap(); logctx.cleanup_successful(); @@ -922,15 +929,15 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); // Create the collection and resource. let _collection = - insert_collection(collection_id, "collection", &pool).await; - let _resource = insert_resource(resource_id, "resource", &pool).await; + insert_collection(collection_id, "collection", &conn).await; + let _resource = insert_resource(resource_id, "resource", &conn).await; // Attach the resource to the collection. let attach = Collection::attach_resource( @@ -942,7 +949,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)), ) - .attach_and_get_result_async(pool.pool()) + .attach_and_get_result_async(&conn) .await; // "attach_and_get_result_async" should return the "attached" resource. @@ -955,9 +962,9 @@ mod test { // The returned value should be the latest value in the DB. assert_eq!( returned_collection, - get_collection(collection_id, &pool).await + get_collection(collection_id, &conn).await ); - assert_eq!(returned_resource, get_resource(resource_id, &pool).await); + assert_eq!(returned_resource, get_resource(resource_id, &conn).await); db.cleanup().await.unwrap(); logctx.cleanup_successful(); @@ -970,15 +977,15 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); // Create the collection and resource. let _collection = - insert_collection(collection_id, "collection", &pool).await; - let _resource = insert_resource(resource_id, "resource", &pool).await; + insert_collection(collection_id, "collection", &conn).await; + let _resource = insert_resource(resource_id, "resource", &conn).await; // Attach the resource to the collection. let attach_query = Collection::attach_resource( @@ -991,10 +998,10 @@ mod test { .set(resource::dsl::collection_id.eq(collection_id)), ); - type TxnError = - TransactionError>; - let result = pool - .pool() + type TxnError = TransactionError< + AttachError, + >; + let result = conn .transaction_async(|conn| async move { attach_query.attach_and_get_result_async(&conn).await.map_err( |e| match e { @@ -1015,9 +1022,9 @@ mod test { // The returned values should be the latest value in the DB. assert_eq!( returned_collection, - get_collection(collection_id, &pool).await + get_collection(collection_id, &conn).await ); - assert_eq!(returned_resource, get_resource(resource_id, &pool).await); + assert_eq!(returned_resource, get_resource(resource_id, &conn).await); db.cleanup().await.unwrap(); logctx.cleanup_successful(); @@ -1030,7 +1037,7 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; const RESOURCE_COUNT: u32 = 5; @@ -1038,12 +1045,12 @@ mod test { // Create the collection. let _collection = - insert_collection(collection_id, "collection", &pool).await; + insert_collection(collection_id, "collection", &conn).await; // Create each resource, attaching them to the collection. for i in 0..RESOURCE_COUNT { let resource_id = uuid::Uuid::new_v4(); - insert_resource(resource_id, &format!("resource{}", i), &pool) + insert_resource(resource_id, &format!("resource{}", i), &conn) .await; // Attach the resource to the collection. @@ -1056,7 +1063,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)), ) - .attach_and_get_result_async(pool.pool()) + .attach_and_get_result_async(&conn) .await; // "attach_and_get_result_async" should return the "attached" resource. @@ -1071,7 +1078,7 @@ mod test { // The returned resource value should be the latest value in the DB. assert_eq!( returned_resource, - get_resource(resource_id, &pool).await + get_resource(resource_id, &conn).await ); } @@ -1086,15 +1093,15 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); // Attach a resource to a collection, as usual. let _collection = - insert_collection(collection_id, "collection", &pool).await; + insert_collection(collection_id, "collection", &conn).await; let resource_id1 = uuid::Uuid::new_v4(); - let _resource = insert_resource(resource_id1, "resource1", &pool).await; + let _resource = insert_resource(resource_id1, "resource1", &conn).await; let attach = Collection::attach_resource( collection_id, resource_id1, @@ -1104,7 +1111,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)), ) - .attach_and_get_result_async(pool.pool()) + .attach_and_get_result_async(&conn) .await; assert_eq!( attach.expect("Attach should have worked").1.id(), @@ -1113,7 +1120,7 @@ mod test { // Let's try attaching a second resource, now that we're at capacity. let resource_id2 = uuid::Uuid::new_v4(); - let _resource = insert_resource(resource_id2, "resource2", &pool).await; + let _resource = insert_resource(resource_id2, "resource2", &conn).await; let attach = Collection::attach_resource( collection_id, resource_id2, @@ -1123,17 +1130,17 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)), ) - .attach_and_get_result_async(pool.pool()) + .attach_and_get_result_async(&conn) .await; let err = attach.expect_err("Should have failed to attach"); match err { AttachError::NoUpdate { attached_count, resource, collection } => { assert_eq!(attached_count, 1); - assert_eq!(resource, get_resource(resource_id2, &pool).await); + assert_eq!(resource, get_resource(resource_id2, &conn).await); assert_eq!( collection, - get_collection(collection_id, &pool).await + get_collection(collection_id, &conn).await ); } _ => panic!("Unexpected error: {:?}", err), @@ -1150,15 +1157,15 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); // Attach a resource to a collection, as usual. let _collection = - insert_collection(collection_id, "collection", &pool).await; + insert_collection(collection_id, "collection", &conn).await; let resource_id = uuid::Uuid::new_v4(); - let _resource = insert_resource(resource_id, "resource", &pool).await; + let _resource = insert_resource(resource_id, "resource", &conn).await; let attach = Collection::attach_resource( collection_id, resource_id, @@ -1168,7 +1175,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)), ) - .attach_and_get_result_async(pool.pool()) + .attach_and_get_result_async(&conn) .await; assert_eq!( attach.expect("Attach should have worked").1.id(), @@ -1185,7 +1192,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)), ) - .attach_and_get_result_async(pool.pool()) + .attach_and_get_result_async(&conn) .await; let err = attach.expect_err("Should have failed to attach"); @@ -1203,10 +1210,10 @@ mod test { .expect("Should already be attached"), collection_id ); - assert_eq!(resource, get_resource(resource_id, &pool).await); + assert_eq!(resource, get_resource(resource_id, &conn).await); assert_eq!( collection, - get_collection(collection_id, &pool).await + get_collection(collection_id, &conn).await ); } _ => panic!("Unexpected error: {:?}", err), @@ -1222,7 +1229,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)), ) - .attach_and_get_result_async(pool.pool()) + .attach_and_get_result_async(&conn) .await; let err = attach.expect_err("Should have failed to attach"); // Even when at capacity, the same information should be propagated back @@ -1237,10 +1244,10 @@ mod test { .expect("Should already be attached"), collection_id ); - assert_eq!(resource, get_resource(resource_id, &pool).await); + assert_eq!(resource, get_resource(resource_id, &conn).await); assert_eq!( collection, - get_collection(collection_id, &pool).await + get_collection(collection_id, &conn).await ); } _ => panic!("Unexpected error: {:?}", err), @@ -1257,15 +1264,15 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); // Create the collection and resource. let _collection = - insert_collection(collection_id, "collection", &pool).await; - let _resource = insert_resource(resource_id, "resource", &pool).await; + insert_collection(collection_id, "collection", &conn).await; + let _resource = insert_resource(resource_id, "resource", &conn).await; // Attach the resource to the collection. // @@ -1290,7 +1297,7 @@ mod test { resource::dsl::description.eq("new description".to_string()), )), ) - .attach_and_get_result_async(pool.pool()) + .attach_and_get_result_async(&conn) .await; let (_, returned_resource) = attach.expect("Attach should have worked"); @@ -1298,7 +1305,7 @@ mod test { returned_resource.collection_id.expect("Expected a collection ID"), collection_id ); - assert_eq!(returned_resource, get_resource(resource_id, &pool).await); + assert_eq!(returned_resource, get_resource(resource_id, &conn).await); assert_eq!(returned_resource.description(), "new description"); db.cleanup().await.unwrap(); @@ -1312,22 +1319,22 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); // Create the collection and resource. let _collection = - insert_collection(collection_id, "collection", &pool).await; - let _resource = insert_resource(resource_id, "resource", &pool).await; + insert_collection(collection_id, "collection", &conn).await; + let _resource = insert_resource(resource_id, "resource", &conn).await; // Immediately soft-delete the resource. diesel::update( resource::table.filter(resource::dsl::id.eq(resource_id)), ) .set(resource::dsl::time_deleted.eq(Utc::now())) - .execute_async(pool.pool()) + .execute_async(&*conn) .await .unwrap(); @@ -1342,7 +1349,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)), ) - .attach_and_get_result_async(pool.pool()) + .attach_and_get_result_async(&conn) .await; assert!(matches!(attach, Err(AttachError::ResourceNotFound))); @@ -1357,19 +1364,19 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); // Create the collection and some resources. let _collection = - insert_collection(collection_id, "collection", &pool).await; + insert_collection(collection_id, "collection", &conn).await; let resource_id1 = uuid::Uuid::new_v4(); let resource_id2 = uuid::Uuid::new_v4(); let _resource1 = - insert_resource(resource_id1, "resource1", &pool).await; + insert_resource(resource_id1, "resource1", &conn).await; let _resource2 = - insert_resource(resource_id2, "resource2", &pool).await; + insert_resource(resource_id2, "resource2", &conn).await; // Attach the resource to the collection. // @@ -1384,7 +1391,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)), ) - .attach_and_get_result_async(pool.pool()) + .attach_and_get_result_async(&conn) .await; let (_, returned_resource) = attach.expect("Attach should have worked"); @@ -1394,10 +1401,10 @@ mod test { // "resource2" should have automatically been filtered away from the // update statement, regardless of user input. assert_eq!( - get_resource(resource_id1, &pool).await.collection_id.unwrap(), + get_resource(resource_id1, &conn).await.collection_id.unwrap(), collection_id ); - assert!(get_resource(resource_id2, &pool) + assert!(get_resource(resource_id2, &conn) .await .collection_id .is_none()); diff --git a/nexus/db-queries/src/db/collection_detach.rs b/nexus/db-queries/src/db/collection_detach.rs index 04894ecb21..df157040e6 100644 --- a/nexus/db-queries/src/db/collection_detach.rs +++ b/nexus/db-queries/src/db/collection_detach.rs @@ -16,7 +16,7 @@ use super::cte_utils::{ QueryFromClause, QuerySqlType, }; use super::pool::DbConnection; -use async_bb8_diesel::{AsyncRunQueryDsl, PoolError}; +use async_bb8_diesel::{AsyncRunQueryDsl, ConnectionError}; use diesel::associations::HasTable; use diesel::expression::{AsExpression, Expression}; use diesel::helper_types::*; @@ -230,7 +230,7 @@ where /// Result of [`DetachFromCollectionStatement`] when executed asynchronously pub type AsyncDetachFromCollectionResult = - Result>; + Result>; /// Errors returned by [`DetachFromCollectionStatement`]. #[derive(Debug)] @@ -265,8 +265,7 @@ where /// Issues the CTE asynchronously and parses the result. pub async fn detach_and_get_result_async( self, - conn: &(impl async_bb8_diesel::AsyncConnection - + Sync), + conn: &async_bb8_diesel::Connection, ) -> AsyncDetachFromCollectionResult where // We require this bound to ensure that "Self" is runnable as query. @@ -482,7 +481,9 @@ mod test { use super::*; use crate::db::collection_attach::DatastoreAttachTarget; use crate::db::{self, identity::Resource as IdentityResource}; - use async_bb8_diesel::{AsyncRunQueryDsl, AsyncSimpleConnection}; + use async_bb8_diesel::{ + AsyncRunQueryDsl, AsyncSimpleConnection, ConnectionManager, + }; use chrono::Utc; use db_macros::Resource; use diesel::expression_methods::ExpressionMethods; @@ -517,7 +518,9 @@ mod test { } } - async fn setup_db(pool: &crate::db::Pool) { + async fn setup_db( + pool: &crate::db::Pool, + ) -> bb8::PooledConnection> { let connection = pool.pool().get().await.unwrap(); (*connection) .batch_execute_async( @@ -545,6 +548,7 @@ mod test { ) .await .unwrap(); + connection } /// Describes a resource within the database. @@ -581,7 +585,7 @@ mod test { async fn insert_collection( id: Uuid, name: &str, - pool: &db::Pool, + conn: &async_bb8_diesel::Connection, ) -> Collection { let create_params = IdentityMetadataCreateParams { name: Name::try_from(name.to_string()).unwrap(), @@ -592,18 +596,21 @@ mod test { diesel::insert_into(collection::table) .values(c) - .execute_async(pool.pool()) + .execute_async(conn) .await .unwrap(); - get_collection(id, &pool).await + get_collection(id, conn).await } - async fn get_collection(id: Uuid, pool: &db::Pool) -> Collection { + async fn get_collection( + id: Uuid, + conn: &async_bb8_diesel::Connection, + ) -> Collection { collection::table .find(id) .select(Collection::as_select()) - .first_async(pool.pool()) + .first_async(conn) .await .unwrap() } @@ -611,7 +618,7 @@ mod test { async fn insert_resource( id: Uuid, name: &str, - pool: &db::Pool, + conn: &async_bb8_diesel::Connection, ) -> Resource { let create_params = IdentityMetadataCreateParams { name: Name::try_from(name.to_string()).unwrap(), @@ -624,17 +631,17 @@ mod test { diesel::insert_into(resource::table) .values(r) - .execute_async(pool.pool()) + .execute_async(conn) .await .unwrap(); - get_resource(id, &pool).await + get_resource(id, conn).await } async fn attach_resource( collection_id: Uuid, resource_id: Uuid, - pool: &db::Pool, + conn: &async_bb8_diesel::Connection, ) { Collection::attach_resource( collection_id, @@ -645,16 +652,19 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)), ) - .attach_and_get_result_async(pool.pool()) + .attach_and_get_result_async(conn) .await .unwrap(); } - async fn get_resource(id: Uuid, pool: &db::Pool) -> Resource { + async fn get_resource( + id: Uuid, + conn: &async_bb8_diesel::Connection, + ) -> Resource { resource::table .find(id) .select(Resource::as_select()) - .first_async(pool.pool()) + .first_async(conn) .await .unwrap() } @@ -777,7 +787,7 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); @@ -789,7 +799,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(Option::::None)), ) - .detach_and_get_result_async(pool.pool()) + .detach_and_get_result_async(&conn) .await; assert!(matches!(detach, Err(DetachError::CollectionNotFound))); @@ -805,14 +815,14 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); // Create the collection let collection = - insert_collection(collection_id, "collection", &pool).await; + insert_collection(collection_id, "collection", &conn).await; // Attempt to detach - even though the resource does not exist. let detach = Collection::detach_resource( @@ -823,12 +833,12 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(Option::::None)), ) - .detach_and_get_result_async(pool.pool()) + .detach_and_get_result_async(&conn) .await; assert!(matches!(detach, Err(DetachError::ResourceNotFound))); // The collection should remain unchanged. - assert_eq!(collection, get_collection(collection_id, &pool).await); + assert_eq!(collection, get_collection(collection_id, &conn).await); db.cleanup().await.unwrap(); logctx.cleanup_successful(); @@ -841,16 +851,16 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); // Create the collection and resource. Attach them. let _collection = - insert_collection(collection_id, "collection", &pool).await; - let _resource = insert_resource(resource_id, "resource", &pool).await; - attach_resource(collection_id, resource_id, &pool).await; + insert_collection(collection_id, "collection", &conn).await; + let _resource = insert_resource(resource_id, "resource", &conn).await; + attach_resource(collection_id, resource_id, &conn).await; // Detach the resource from the collection. let detach = Collection::detach_resource( @@ -861,14 +871,14 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(Option::::None)), ) - .detach_and_get_result_async(pool.pool()) + .detach_and_get_result_async(&conn) .await; // "detach_and_get_result_async" should return the "detached" resource. let returned_resource = detach.expect("Detach should have worked"); assert!(returned_resource.collection_id.is_none(),); // The returned value should be the latest value in the DB. - assert_eq!(returned_resource, get_resource(resource_id, &pool).await); + assert_eq!(returned_resource, get_resource(resource_id, &conn).await); db.cleanup().await.unwrap(); logctx.cleanup_successful(); @@ -881,15 +891,15 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let _collection = - insert_collection(collection_id, "collection", &pool).await; + insert_collection(collection_id, "collection", &conn).await; let resource_id = uuid::Uuid::new_v4(); - let _resource = insert_resource(resource_id, "resource", &pool).await; - attach_resource(collection_id, resource_id, &pool).await; + let _resource = insert_resource(resource_id, "resource", &conn).await; + attach_resource(collection_id, resource_id, &conn).await; // Detach a resource from a collection, as usual. let detach = Collection::detach_resource( @@ -900,7 +910,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(Option::::None)), ) - .detach_and_get_result_async(pool.pool()) + .detach_and_get_result_async(&conn) .await; assert_eq!( detach.expect("Detach should have worked").id(), @@ -916,7 +926,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(Option::::None)), ) - .detach_and_get_result_async(pool.pool()) + .detach_and_get_result_async(&conn) .await; let err = detach.expect_err("Should have failed to detach"); @@ -925,10 +935,10 @@ mod test { match err { DetachError::NoUpdate { resource, collection } => { assert!(resource.collection_id.as_ref().is_none()); - assert_eq!(resource, get_resource(resource_id, &pool).await); + assert_eq!(resource, get_resource(resource_id, &conn).await); assert_eq!( collection, - get_collection(collection_id, &pool).await + get_collection(collection_id, &conn).await ); } _ => panic!("Unexpected error: {:?}", err), @@ -945,22 +955,22 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); // Create the collection and resource. let _collection = - insert_collection(collection_id, "collection", &pool).await; - let _resource = insert_resource(resource_id, "resource", &pool).await; + insert_collection(collection_id, "collection", &conn).await; + let _resource = insert_resource(resource_id, "resource", &conn).await; // Immediately soft-delete the resource. diesel::update( resource::table.filter(resource::dsl::id.eq(resource_id)), ) .set(resource::dsl::time_deleted.eq(Utc::now())) - .execute_async(pool.pool()) + .execute_async(&*conn) .await .unwrap(); @@ -974,7 +984,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)), ) - .detach_and_get_result_async(pool.pool()) + .detach_and_get_result_async(&conn) .await; assert!(matches!(detach, Err(DetachError::ResourceNotFound))); @@ -989,21 +999,21 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); // Create the collection and some resources. let _collection = - insert_collection(collection_id, "collection", &pool).await; + insert_collection(collection_id, "collection", &conn).await; let resource_id1 = uuid::Uuid::new_v4(); let resource_id2 = uuid::Uuid::new_v4(); let _resource1 = - insert_resource(resource_id1, "resource1", &pool).await; - attach_resource(collection_id, resource_id1, &pool).await; + insert_resource(resource_id1, "resource1", &conn).await; + attach_resource(collection_id, resource_id1, &conn).await; let _resource2 = - insert_resource(resource_id2, "resource2", &pool).await; - attach_resource(collection_id, resource_id2, &pool).await; + insert_resource(resource_id2, "resource2", &conn).await; + attach_resource(collection_id, resource_id2, &conn).await; // Detach the resource from the collection. // @@ -1017,7 +1027,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(Option::::None)), ) - .detach_and_get_result_async(pool.pool()) + .detach_and_get_result_async(&conn) .await; let returned_resource = detach.expect("Detach should have worked"); @@ -1026,11 +1036,11 @@ mod test { // Note that only "resource1" should be detached. // "resource2" should have automatically been filtered away from the // update statement, regardless of user input. - assert!(get_resource(resource_id1, &pool) + assert!(get_resource(resource_id1, &conn) .await .collection_id .is_none()); - assert!(get_resource(resource_id2, &pool) + assert!(get_resource(resource_id2, &conn) .await .collection_id .is_some()); diff --git a/nexus/db-queries/src/db/collection_detach_many.rs b/nexus/db-queries/src/db/collection_detach_many.rs index 3418296568..0b65c404c5 100644 --- a/nexus/db-queries/src/db/collection_detach_many.rs +++ b/nexus/db-queries/src/db/collection_detach_many.rs @@ -16,7 +16,7 @@ use super::cte_utils::{ QueryFromClause, QuerySqlType, }; use super::pool::DbConnection; -use async_bb8_diesel::{AsyncRunQueryDsl, PoolError}; +use async_bb8_diesel::AsyncRunQueryDsl; use diesel::associations::HasTable; use diesel::expression::{AsExpression, Expression}; use diesel::helper_types::*; @@ -241,7 +241,7 @@ where /// Result of [`DetachManyFromCollectionStatement`] when executed asynchronously pub type AsyncDetachManyFromCollectionResult = - Result>; + Result>; /// Errors returned by [`DetachManyFromCollectionStatement`]. #[derive(Debug)] @@ -273,21 +273,18 @@ where DetachManyFromCollectionStatement: Send, { /// Issues the CTE asynchronously and parses the result. - pub async fn detach_and_get_result_async( + pub async fn detach_and_get_result_async( self, - conn: &(impl async_bb8_diesel::AsyncConnection - + Sync), + conn: &async_bb8_diesel::Connection, ) -> AsyncDetachManyFromCollectionResult where // We require this bound to ensure that "Self" is runnable as query. Self: query_methods::LoadQuery<'static, DbConnection, RawOutput>, - ConnErr: From + Send + 'static, - PoolError: From, { self.get_result_async::>(conn) .await // If the database returns an error, propagate it right away. - .map_err(|e| DetachManyError::DatabaseError(PoolError::from(e))) + .map_err(|e| DetachManyError::DatabaseError(e)) // Otherwise, parse the output to determine if the CTE succeeded. .and_then(Self::parse_result) } @@ -486,6 +483,7 @@ mod test { }; use async_bb8_diesel::{ AsyncConnection, AsyncRunQueryDsl, AsyncSimpleConnection, + ConnectionManager, }; use chrono::Utc; use db_macros::Resource; @@ -521,7 +519,9 @@ mod test { } } - async fn setup_db(pool: &crate::db::Pool) { + async fn setup_db( + pool: &crate::db::Pool, + ) -> bb8::PooledConnection> { let connection = pool.pool().get().await.unwrap(); (*connection) .batch_execute_async( @@ -549,6 +549,7 @@ mod test { ) .await .unwrap(); + connection } /// Describes a resource within the database. @@ -585,7 +586,7 @@ mod test { async fn insert_collection( id: Uuid, name: &str, - pool: &db::Pool, + conn: &async_bb8_diesel::Connection, ) -> Collection { let create_params = IdentityMetadataCreateParams { name: Name::try_from(name.to_string()).unwrap(), @@ -596,18 +597,21 @@ mod test { diesel::insert_into(collection::table) .values(c) - .execute_async(pool.pool()) + .execute_async(conn) .await .unwrap(); - get_collection(id, &pool).await + get_collection(id, conn).await } - async fn get_collection(id: Uuid, pool: &db::Pool) -> Collection { + async fn get_collection( + id: Uuid, + conn: &async_bb8_diesel::Connection, + ) -> Collection { collection::table .find(id) .select(Collection::as_select()) - .first_async(pool.pool()) + .first_async(conn) .await .unwrap() } @@ -615,7 +619,7 @@ mod test { async fn insert_resource( id: Uuid, name: &str, - pool: &db::Pool, + conn: &async_bb8_diesel::Connection, ) -> Resource { let create_params = IdentityMetadataCreateParams { name: Name::try_from(name.to_string()).unwrap(), @@ -628,17 +632,17 @@ mod test { diesel::insert_into(resource::table) .values(r) - .execute_async(pool.pool()) + .execute_async(conn) .await .unwrap(); - get_resource(id, &pool).await + get_resource(id, conn).await } async fn attach_resource( collection_id: Uuid, resource_id: Uuid, - pool: &db::Pool, + conn: &async_bb8_diesel::Connection, ) { Collection::attach_resource( collection_id, @@ -649,16 +653,19 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)), ) - .attach_and_get_result_async(pool.pool()) + .attach_and_get_result_async(conn) .await .unwrap(); } - async fn get_resource(id: Uuid, pool: &db::Pool) -> Resource { + async fn get_resource( + id: Uuid, + conn: &async_bb8_diesel::Connection, + ) -> Resource { resource::table .find(id) .select(Resource::as_select()) - .first_async(pool.pool()) + .first_async(conn) .await .unwrap() } @@ -775,7 +782,7 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let _resource_id = uuid::Uuid::new_v4(); @@ -788,7 +795,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(Option::::None)), ) - .detach_and_get_result_async(pool.pool()) + .detach_and_get_result_async(&conn) .await; assert!(matches!(detach, Err(DetachManyError::CollectionNotFound))); @@ -805,14 +812,14 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let _resource_id = uuid::Uuid::new_v4(); // Create the collection let _collection = - insert_collection(collection_id, "collection", &pool).await; + insert_collection(collection_id, "collection", &conn).await; // Attempt to detach - even though the resource does not exist. let detach = Collection::detach_resources( @@ -824,7 +831,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(Option::::None)), ) - .detach_and_get_result_async(pool.pool()) + .detach_and_get_result_async(&conn) .await; let returned_collection = detach.expect("Detach should have worked"); @@ -832,7 +839,7 @@ mod test { // The collection should still be updated. assert_eq!( returned_collection, - get_collection(collection_id, &pool).await + get_collection(collection_id, &conn).await ); db.cleanup().await.unwrap(); @@ -846,16 +853,16 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); // Create the collection and resource. Attach them. let _collection = - insert_collection(collection_id, "collection", &pool).await; - let _resource = insert_resource(resource_id, "resource", &pool).await; - attach_resource(collection_id, resource_id, &pool).await; + insert_collection(collection_id, "collection", &conn).await; + let _resource = insert_resource(resource_id, "resource", &conn).await; + attach_resource(collection_id, resource_id, &conn).await; // Detach the resource from the collection. let detach = Collection::detach_resources( @@ -867,7 +874,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(Option::::None)), ) - .detach_and_get_result_async(pool.pool()) + .detach_and_get_result_async(&conn) .await; // "detach_and_get_result_async" should return the updated collection. @@ -875,7 +882,7 @@ mod test { // The returned value should be the latest value in the DB. assert_eq!( returned_collection, - get_collection(collection_id, &pool).await + get_collection(collection_id, &conn).await ); db.cleanup().await.unwrap(); @@ -889,16 +896,16 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); // Create the collection and resource. let _collection = - insert_collection(collection_id, "collection", &pool).await; - let _resource = insert_resource(resource_id, "resource", &pool).await; - attach_resource(collection_id, resource_id, &pool).await; + insert_collection(collection_id, "collection", &conn).await; + let _resource = insert_resource(resource_id, "resource", &conn).await; + attach_resource(collection_id, resource_id, &conn).await; // Detach the resource from the collection. let detach_query = Collection::detach_resources( @@ -911,10 +918,10 @@ mod test { .set(resource::dsl::collection_id.eq(Option::::None)), ); - type TxnError = - TransactionError>; - let result = pool - .pool() + type TxnError = TransactionError< + DetachManyError, + >; + let result = conn .transaction_async(|conn| async move { detach_query.detach_and_get_result_async(&conn).await.map_err( |e| match e { @@ -930,7 +937,7 @@ mod test { // The returned values should be the latest value in the DB. assert_eq!( returned_collection, - get_collection(collection_id, &pool).await + get_collection(collection_id, &conn).await ); db.cleanup().await.unwrap(); @@ -944,15 +951,15 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let _collection = - insert_collection(collection_id, "collection", &pool).await; + insert_collection(collection_id, "collection", &conn).await; let resource_id = uuid::Uuid::new_v4(); - let _resource = insert_resource(resource_id, "resource", &pool).await; - attach_resource(collection_id, resource_id, &pool).await; + let _resource = insert_resource(resource_id, "resource", &conn).await; + attach_resource(collection_id, resource_id, &conn).await; // Detach a resource from a collection, as usual. let detach = Collection::detach_resources( @@ -964,7 +971,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(Option::::None)), ) - .detach_and_get_result_async(pool.pool()) + .detach_and_get_result_async(&conn) .await; assert_eq!( detach.expect("Detach should have worked").description(), @@ -982,7 +989,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(Option::::None)), ) - .detach_and_get_result_async(pool.pool()) + .detach_and_get_result_async(&conn) .await; assert_eq!( detach.expect("Detach should have worked").description(), @@ -1000,15 +1007,15 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let _collection = - insert_collection(collection_id, "collection", &pool).await; + insert_collection(collection_id, "collection", &conn).await; let resource_id = uuid::Uuid::new_v4(); - let _resource = insert_resource(resource_id, "resource", &pool).await; - attach_resource(collection_id, resource_id, &pool).await; + let _resource = insert_resource(resource_id, "resource", &conn).await; + attach_resource(collection_id, resource_id, &conn).await; // Detach a resource from a collection, but do so with a picky filter // on the collectipon. @@ -1023,7 +1030,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(Option::::None)), ) - .detach_and_get_result_async(pool.pool()) + .detach_and_get_result_async(&conn) .await; let err = detach.expect_err("Expected this detach to fail"); @@ -1034,7 +1041,7 @@ mod test { DetachManyError::NoUpdate { collection } => { assert_eq!( collection, - get_collection(collection_id, &pool).await + get_collection(collection_id, &conn).await ); } _ => panic!("Unexpected error: {:?}", err), @@ -1051,23 +1058,23 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); // Create the collection and resource. let _collection = - insert_collection(collection_id, "collection", &pool).await; - let _resource = insert_resource(resource_id, "resource", &pool).await; - attach_resource(collection_id, resource_id, &pool).await; + insert_collection(collection_id, "collection", &conn).await; + let _resource = insert_resource(resource_id, "resource", &conn).await; + attach_resource(collection_id, resource_id, &conn).await; // Immediately soft-delete the resource. diesel::update( resource::table.filter(resource::dsl::id.eq(resource_id)), ) .set(resource::dsl::time_deleted.eq(Utc::now())) - .execute_async(pool.pool()) + .execute_async(&*conn) .await .unwrap(); @@ -1082,7 +1089,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(collection_id)), ) - .detach_and_get_result_async(pool.pool()) + .detach_and_get_result_async(&conn) .await; assert_eq!( @@ -1090,7 +1097,7 @@ mod test { "Updated desc" ); assert_eq!( - get_resource(resource_id, &pool) + get_resource(resource_id, &conn) .await .collection_id .as_ref() @@ -1109,20 +1116,20 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; // Create the collection and some resources. let collection_id1 = uuid::Uuid::new_v4(); let _collection1 = - insert_collection(collection_id1, "collection", &pool).await; + insert_collection(collection_id1, "collection", &conn).await; let resource_id1 = uuid::Uuid::new_v4(); let resource_id2 = uuid::Uuid::new_v4(); let _resource1 = - insert_resource(resource_id1, "resource1", &pool).await; - attach_resource(collection_id1, resource_id1, &pool).await; + insert_resource(resource_id1, "resource1", &conn).await; + attach_resource(collection_id1, resource_id1, &conn).await; let _resource2 = - insert_resource(resource_id2, "resource2", &pool).await; - attach_resource(collection_id1, resource_id2, &pool).await; + insert_resource(resource_id2, "resource2", &conn).await; + attach_resource(collection_id1, resource_id2, &conn).await; // Create a separate collection with a resource. // @@ -1130,11 +1137,11 @@ mod test { // on "collection_id1". let collection_id2 = uuid::Uuid::new_v4(); let _collection2 = - insert_collection(collection_id2, "collection2", &pool).await; + insert_collection(collection_id2, "collection2", &conn).await; let resource_id3 = uuid::Uuid::new_v4(); let _resource3 = - insert_resource(resource_id3, "resource3", &pool).await; - attach_resource(collection_id2, resource_id3, &pool).await; + insert_resource(resource_id3, "resource3", &conn).await; + attach_resource(collection_id2, resource_id3, &conn).await; // Detach the resource from the collection. let detach = Collection::detach_resources( @@ -1146,7 +1153,7 @@ mod test { diesel::update(resource::table) .set(resource::dsl::collection_id.eq(Option::::None)), ) - .detach_and_get_result_async(pool.pool()) + .detach_and_get_result_async(&conn) .await; let returned_resource = detach.expect("Detach should have worked"); @@ -1154,18 +1161,18 @@ mod test { assert_eq!(returned_resource.description(), "Updated desc"); // Note that only "resource1" and "resource2" should be detached. - assert!(get_resource(resource_id1, &pool) + assert!(get_resource(resource_id1, &conn) .await .collection_id .is_none()); - assert!(get_resource(resource_id2, &pool) + assert!(get_resource(resource_id2, &conn) .await .collection_id .is_none()); // "resource3" should have been left alone. assert_eq!( - get_resource(resource_id3, &pool) + get_resource(resource_id3, &conn) .await .collection_id .as_ref() diff --git a/nexus/db-queries/src/db/collection_insert.rs b/nexus/db-queries/src/db/collection_insert.rs index cebb21a96d..993f16e048 100644 --- a/nexus/db-queries/src/db/collection_insert.rs +++ b/nexus/db-queries/src/db/collection_insert.rs @@ -10,7 +10,7 @@ //! 3) inserts the child resource row use super::pool::DbConnection; -use async_bb8_diesel::{AsyncRunQueryDsl, ConnectionError, PoolError}; +use async_bb8_diesel::{AsyncRunQueryDsl, ConnectionError}; use diesel::associations::HasTable; use diesel::helper_types::*; use diesel::pg::Pg; @@ -170,7 +170,7 @@ pub enum AsyncInsertError { /// The collection that the query was inserting into does not exist CollectionNotFound, /// Other database error - DatabaseError(PoolError), + DatabaseError(ConnectionError), } impl InsertIntoCollectionStatement @@ -188,20 +188,17 @@ where /// - Ok(new row) /// - Error(collection not found) /// - Error(other diesel error) - pub async fn insert_and_get_result_async( + pub async fn insert_and_get_result_async( self, - conn: &(impl async_bb8_diesel::AsyncConnection - + Sync), + conn: &async_bb8_diesel::Connection, ) -> AsyncInsertIntoCollectionResult where // We require this bound to ensure that "Self" is runnable as query. Self: query_methods::LoadQuery<'static, DbConnection, ResourceType>, - ConnErr: From + Send + 'static, - PoolError: From, { self.get_result_async::(conn) .await - .map_err(|e| Self::translate_async_error(PoolError::from(e))) + .map_err(|e| Self::translate_async_error(e)) } /// Issues the CTE asynchronously and parses the result. @@ -210,20 +207,17 @@ where /// - Ok(Vec of new rows) /// - Error(collection not found) /// - Error(other diesel error) - pub async fn insert_and_get_results_async( + pub async fn insert_and_get_results_async( self, - conn: &(impl async_bb8_diesel::AsyncConnection - + Sync), + conn: &async_bb8_diesel::Connection, ) -> AsyncInsertIntoCollectionResult> where // We require this bound to ensure that "Self" is runnable as query. Self: query_methods::LoadQuery<'static, DbConnection, ResourceType>, - ConnErr: From + Send + 'static, - PoolError: From, { self.get_results_async::(conn) .await - .map_err(|e| Self::translate_async_error(PoolError::from(e))) + .map_err(|e| Self::translate_async_error(e)) } /// Check for the intentional division by zero error @@ -244,9 +238,9 @@ where /// Translate from diesel errors into AsyncInsertError, handling the /// intentional division-by-zero error in the CTE. - fn translate_async_error(err: PoolError) -> AsyncInsertError { + fn translate_async_error(err: ConnectionError) -> AsyncInsertError { match err { - PoolError::Connection(ConnectionError::Query(err)) + ConnectionError::Query(err) if Self::error_is_division_by_zero(&err) => { AsyncInsertError::CollectionNotFound @@ -393,7 +387,9 @@ where mod test { use super::*; use crate::db::{self, identity::Resource as IdentityResource}; - use async_bb8_diesel::{AsyncRunQueryDsl, AsyncSimpleConnection}; + use async_bb8_diesel::{ + AsyncRunQueryDsl, AsyncSimpleConnection, ConnectionManager, + }; use chrono::{NaiveDateTime, TimeZone, Utc}; use db_macros::Resource; use diesel::expression_methods::ExpressionMethods; @@ -426,7 +422,9 @@ mod test { } } - async fn setup_db(pool: &crate::db::Pool) { + async fn setup_db( + pool: &crate::db::Pool, + ) -> bb8::PooledConnection> { let connection = pool.pool().get().await.unwrap(); (*connection) .batch_execute_async( @@ -452,6 +450,7 @@ mod test { ) .await .unwrap(); + connection } /// Describes an organization within the database. @@ -548,7 +547,7 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); @@ -563,7 +562,7 @@ mod test { resource::dsl::collection_id.eq(collection_id), )), ) - .insert_and_get_result_async(pool.pool()) + .insert_and_get_result_async(&conn) .await; assert!(matches!(insert, Err(AsyncInsertError::CollectionNotFound))); @@ -578,7 +577,7 @@ mod test { let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); - setup_db(&pool).await; + let conn = setup_db(&pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); @@ -593,7 +592,7 @@ mod test { collection::dsl::time_modified.eq(Utc::now()), collection::dsl::rcgen.eq(1), )]) - .execute_async(pool.pool()) + .execute_async(&*conn) .await .unwrap(); @@ -614,7 +613,7 @@ mod test { resource::dsl::collection_id.eq(collection_id), )]), ) - .insert_and_get_result_async(pool.pool()) + .insert_and_get_result_async(&conn) .await .unwrap(); assert_eq!(resource.id(), resource_id); @@ -627,7 +626,7 @@ mod test { let collection_rcgen = collection::table .find(collection_id) .select(collection::dsl::rcgen) - .first_async::(pool.pool()) + .first_async::(&*conn) .await .unwrap(); diff --git a/nexus/db-queries/src/db/datastore/address_lot.rs b/nexus/db-queries/src/db/datastore/address_lot.rs index 35b45753e6..9d264dbf6b 100644 --- a/nexus/db-queries/src/db/datastore/address_lot.rs +++ b/nexus/db-queries/src/db/datastore/address_lot.rs @@ -7,14 +7,14 @@ use crate::authz; use crate::context::OpContext; use crate::db; use crate::db::datastore::PgConnection; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; use crate::db::model::Name; use crate::db::model::{AddressLot, AddressLotBlock, AddressLotReservedBlock}; use crate::db::pagination::paginated; use async_bb8_diesel::{ - AsyncConnection, AsyncRunQueryDsl, Connection, ConnectionError, PoolError, + AsyncConnection, AsyncRunQueryDsl, Connection, ConnectionError, }; use chrono::Utc; use diesel::result::Error as DieselError; @@ -47,7 +47,7 @@ impl DataStore { use db::schema::address_lot::dsl as lot_dsl; use db::schema::address_lot_block::dsl as block_dsl; - self.pool_authorized(opctx) + self.pool_connection_authorized(opctx) .await? // TODO https://github.com/oxidecomputer/omicron/issues/2811 // Audit external networking database transaction usage @@ -84,16 +84,16 @@ impl DataStore { }) .await .map_err(|e| match e { - PoolError::Connection(ConnectionError::Query( - DieselError::DatabaseError(_, _), - )) => public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::AddressLot, - ¶ms.identity.name.as_str(), - ), - ), - _ => public_error_from_diesel_pool(e, ErrorHandler::Server), + ConnectionError::Query(DieselError::DatabaseError(_, _)) => { + public_error_from_diesel( + e, + ErrorHandler::Conflict( + ResourceType::AddressLot, + ¶ms.identity.name.as_str(), + ), + ) + } + _ => public_error_from_diesel(e, ErrorHandler::Server), }) } @@ -110,7 +110,7 @@ impl DataStore { let id = authz_address_lot.id(); - let pool = self.pool_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; #[derive(Debug)] enum AddressLotDeleteError { @@ -121,7 +121,7 @@ impl DataStore { // TODO https://github.com/oxidecomputer/omicron/issues/2811 // Audit external networking database transaction usage - pool.transaction_async(|conn| async move { + conn.transaction_async(|conn| async move { let rsvd: Vec = rsvd_block_dsl::address_lot_rsvd_block .filter(rsvd_block_dsl::address_lot_id.eq(id)) @@ -151,8 +151,8 @@ impl DataStore { }) .await .map_err(|e| match e { - TxnError::Pool(e) => { - public_error_from_diesel_pool(e, ErrorHandler::Server) + TxnError::Connection(e) => { + public_error_from_diesel(e, ErrorHandler::Server) } TxnError::CustomError(AddressLotDeleteError::LotInUse) => { Error::invalid_request("lot is in use") @@ -179,9 +179,9 @@ impl DataStore { } .filter(dsl::time_deleted.is_null()) .select(AddressLot::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn address_lot_block_list( @@ -192,14 +192,14 @@ impl DataStore { ) -> ListResultVec { use db::schema::address_lot_block::dsl; - let pool = self.pool_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; paginated(dsl::address_lot_block, dsl::id, &pagparams) .filter(dsl::address_lot_id.eq(authz_address_lot.id())) .select(AddressLotBlock::as_select()) - .load_async(pool) + .load_async(&*conn) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn address_lot_id_for_block_id( @@ -207,7 +207,7 @@ impl DataStore { opctx: &OpContext, address_lot_block_id: Uuid, ) -> LookupResult { - let pool = self.pool_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; use db::schema::address_lot_block; use db::schema::address_lot_block::dsl as block_dsl; @@ -216,11 +216,9 @@ impl DataStore { .filter(address_lot_block::id.eq(address_lot_block_id)) .select(address_lot_block::address_lot_id) .limit(1) - .first_async::(pool) + .first_async::(&*conn) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; Ok(address_lot_id) } diff --git a/nexus/db-queries/src/db/datastore/certificate.rs b/nexus/db-queries/src/db/datastore/certificate.rs index c37d026251..4b043becd8 100644 --- a/nexus/db-queries/src/db/datastore/certificate.rs +++ b/nexus/db-queries/src/db/datastore/certificate.rs @@ -8,7 +8,7 @@ use super::DataStore; use crate::authz; use crate::context::OpContext; use crate::db; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::model::Certificate; use crate::db::model::Name; @@ -49,10 +49,10 @@ impl DataStore { .do_update() .set(dsl::time_modified.eq(dsl::time_modified)) .returning(Certificate::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::Certificate, @@ -117,9 +117,11 @@ impl DataStore { query .select(Certificate::as_select()) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::( + &*self.pool_connection_authorized(opctx).await?, + ) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn certificate_delete( @@ -136,10 +138,10 @@ impl DataStore { .filter(dsl::time_deleted.is_null()) .filter(dsl::id.eq(authz_cert.id())) .set(dsl::time_deleted.eq(now)) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_cert), ) diff --git a/nexus/db-queries/src/db/datastore/console_session.rs b/nexus/db-queries/src/db/datastore/console_session.rs index 1e02f9b61d..113a316ae4 100644 --- a/nexus/db-queries/src/db/datastore/console_session.rs +++ b/nexus/db-queries/src/db/datastore/console_session.rs @@ -46,7 +46,7 @@ impl DataStore { diesel::insert_into(dsl::console_session) .values(session) .returning(ConsoleSession::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { Error::internal_error(&format!( @@ -68,7 +68,7 @@ impl DataStore { .filter(dsl::token.eq(authz_session.id())) .set((dsl::time_last_used.eq(Utc::now()),)) .returning(ConsoleSession::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { Error::internal_error(&format!( @@ -130,7 +130,7 @@ impl DataStore { diesel::delete(dsl::console_session) .filter(dsl::silo_user_id.eq(silo_user_id)) .filter(dsl::token.eq(authz_session.id())) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await .map(|_rows_deleted| ()) .map_err(|e| { diff --git a/nexus/db-queries/src/db/datastore/dataset.rs b/nexus/db-queries/src/db/datastore/dataset.rs index 55259e922f..99972459c8 100644 --- a/nexus/db-queries/src/db/datastore/dataset.rs +++ b/nexus/db-queries/src/db/datastore/dataset.rs @@ -8,7 +8,7 @@ use super::DataStore; use crate::db; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::identity::Asset; use crate::db::model::Dataset; @@ -44,22 +44,22 @@ impl DataStore { dsl::kind.eq(excluded(dsl::kind)), )), ) - .insert_and_get_result_async(self.pool()) + .insert_and_get_result_async( + &*self.pool_connection_unauthorized().await?, + ) .await .map_err(|e| match e { AsyncInsertError::CollectionNotFound => Error::ObjectNotFound { type_name: ResourceType::Zpool, lookup_type: LookupType::ById(zpool_id), }, - AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::Dataset, - &dataset.id().to_string(), - ), - ) - } + AsyncInsertError::DatabaseError(e) => public_error_from_diesel( + e, + ErrorHandler::Conflict( + ResourceType::Dataset, + &dataset.id().to_string(), + ), + ), }) } } diff --git a/nexus/db-queries/src/db/datastore/db_metadata.rs b/nexus/db-queries/src/db/datastore/db_metadata.rs index ac43081601..181b3c1798 100644 --- a/nexus/db-queries/src/db/datastore/db_metadata.rs +++ b/nexus/db-queries/src/db/datastore/db_metadata.rs @@ -6,7 +6,7 @@ use super::DataStore; use crate::db; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::TransactionError; use async_bb8_diesel::{ @@ -270,11 +270,9 @@ impl DataStore { let version: String = dsl::db_metadata .filter(dsl::singleton.eq(true)) .select(dsl::version) - .get_result_async(self.pool()) + .get_result_async(&*self.pool_connection_unauthorized().await?) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; SemverVersion::from_str(&version).map_err(|e| { Error::internal_error(&format!("Invalid schema version: {e}")) @@ -312,9 +310,9 @@ impl DataStore { dsl::time_modified.eq(Utc::now()), dsl::target_version.eq(Some(to_version.to_string())), )) - .execute_async(self.pool()) + .execute_async(&*self.pool_connection_unauthorized().await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; if rows_updated != 1 { return Err(Error::internal_error( @@ -332,7 +330,7 @@ impl DataStore { target: &SemverVersion, sql: &String, ) -> Result<(), Error> { - let result = self.pool().transaction_async(|conn| async move { + let result = self.pool_connection_unauthorized().await?.transaction_async(|conn| async move { if target.to_string() != EARLIEST_SUPPORTED_VERSION { let validate_version_query = format!("SELECT CAST(\ IF(\ @@ -353,8 +351,8 @@ impl DataStore { match result { Ok(()) => Ok(()), Err(TransactionError::CustomError(())) => panic!("No custom error"), - Err(TransactionError::Pool(e)) => { - Err(public_error_from_diesel_pool(e, ErrorHandler::Server)) + Err(TransactionError::Connection(e)) => { + Err(public_error_from_diesel(e, ErrorHandler::Server)) } } } @@ -378,9 +376,9 @@ impl DataStore { dsl::version.eq(to_version.to_string()), dsl::target_version.eq(None as Option), )) - .execute_async(self.pool()) + .execute_async(&*self.pool_connection_unauthorized().await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; if rows_updated != 1 { return Err(Error::internal_error( @@ -432,6 +430,7 @@ mod test { let cfg = db::Config { url: crdb.pg_config().clone() }; let pool = Arc::new(db::Pool::new(&logctx.log, &cfg)); + let conn = pool.pool().get().await.unwrap(); // Mimic the layout of "schema/crdb". let config_dir = tempfile::TempDir::new().unwrap(); @@ -457,7 +456,7 @@ mod test { use db::schema::db_metadata::dsl; diesel::update(dsl::db_metadata.filter(dsl::singleton.eq(true))) .set(dsl::version.eq(v0.to_string())) - .execute_async(pool.pool()) + .execute_async(&*conn) .await .expect("Failed to set version back to 0.0.0"); @@ -507,7 +506,7 @@ mod test { "EXISTS (SELECT * FROM pg_tables WHERE tablename = 'widget')" ) ) - .get_result_async::(datastore.pool()) + .get_result_async::(&*datastore.pool_connection_for_tests().await.unwrap()) .await .expect("Failed to query for table"); assert_eq!(result, false, "The 'widget' table should have been deleted, but it exists.\ diff --git a/nexus/db-queries/src/db/datastore/device_auth.rs b/nexus/db-queries/src/db/datastore/device_auth.rs index 62e54f2321..e084834833 100644 --- a/nexus/db-queries/src/db/datastore/device_auth.rs +++ b/nexus/db-queries/src/db/datastore/device_auth.rs @@ -8,7 +8,7 @@ use super::DataStore; use crate::authz; use crate::context::OpContext; use crate::db; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; use crate::db::model::DeviceAccessToken; @@ -42,9 +42,9 @@ impl DataStore { diesel::insert_into(dsl::device_auth_request) .values(auth_request) .returning(DeviceAuthRequest::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Remove the device authorization request and create a new device @@ -77,7 +77,7 @@ impl DataStore { } type TxnError = TransactionError; - self.pool_authorized(opctx) + self.pool_connection_authorized(opctx) .await? .transaction_async(|conn| async move { match delete_request.execute_async(&conn).await? { @@ -103,8 +103,8 @@ impl DataStore { TxnError::CustomError(TokenGrantError::TooManyRequests) => { Error::internal_error("unexpectedly found multiple device auth requests for the same user code") } - TxnError::Pool(e) => { - public_error_from_diesel_pool(e, ErrorHandler::Server) + TxnError::Connection(e) => { + public_error_from_diesel(e, ErrorHandler::Server) } }) } @@ -127,10 +127,10 @@ impl DataStore { .filter(dsl::client_id.eq(client_id)) .filter(dsl::device_code.eq(device_code)) .select(DeviceAccessToken::as_select()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByLookup( ResourceType::DeviceAccessToken, diff --git a/nexus/db-queries/src/db/datastore/disk.rs b/nexus/db-queries/src/db/datastore/disk.rs index 7ae9967285..80f72c1e18 100644 --- a/nexus/db-queries/src/db/datastore/disk.rs +++ b/nexus/db-queries/src/db/datastore/disk.rs @@ -15,7 +15,7 @@ use crate::db::collection_detach::DatastoreDetachTarget; use crate::db::collection_detach::DetachError; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::identity::Resource; use crate::db::lookup::LookupPath; @@ -71,9 +71,9 @@ impl DataStore { .filter(dsl::time_deleted.is_null()) .filter(dsl::attach_instance_id.eq(authz_instance.id())) .select(Disk::as_select()) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn project_create_disk( @@ -98,16 +98,16 @@ impl DataStore { .do_update() .set(dsl::time_modified.eq(dsl::time_modified)), ) - .insert_and_get_result_async(self.pool_authorized(opctx).await?) + .insert_and_get_result_async( + &*self.pool_connection_authorized(opctx).await?, + ) .await .map_err(|e| match e { AsyncInsertError::CollectionNotFound => authz_project.not_found(), - AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict(ResourceType::Disk, name.as_str()), - ) - } + AsyncInsertError::DatabaseError(e) => public_error_from_diesel( + e, + ErrorHandler::Conflict(ResourceType::Disk, name.as_str()), + ), })?; let runtime = disk.runtime(); @@ -146,9 +146,9 @@ impl DataStore { .filter(dsl::time_deleted.is_null()) .filter(dsl::project_id.eq(authz_project.id())) .select(Disk::as_select()) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Attaches a disk to an instance, if both objects: @@ -199,7 +199,7 @@ impl DataStore { diesel::update(disk::dsl::disk).set(attach_update), ); - let (instance, disk) = query.attach_and_get_result_async(self.pool_authorized(opctx).await?) + let (instance, disk) = query.attach_and_get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .or_else(|e| { match e { @@ -278,7 +278,7 @@ impl DataStore { } }, AttachError::DatabaseError(e) => { - Err(public_error_from_diesel_pool(e, ErrorHandler::Server)) + Err(public_error_from_diesel(e, ErrorHandler::Server)) }, } })?; @@ -331,7 +331,7 @@ impl DataStore { disk::dsl::slot.eq(Option::::None) )) ) - .detach_and_get_result_async(self.pool_authorized(opctx).await?) + .detach_and_get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .or_else(|e| { match e { @@ -405,7 +405,7 @@ impl DataStore { } }, DetachError::DatabaseError(e) => { - Err(public_error_from_diesel_pool(e, ErrorHandler::Server)) + Err(public_error_from_diesel(e, ErrorHandler::Server)) }, } })?; @@ -438,14 +438,14 @@ impl DataStore { .filter(dsl::state_generation.lt(new_runtime.gen)) .set(new_runtime.clone()) .check_if_exists::(disk_id) - .execute_and_check(self.pool()) + .execute_and_check(&*self.pool_connection_authorized(opctx).await?) .await .map(|r| match r.status { UpdateStatus::Updated => true, UpdateStatus::NotUpdatedButExists => false, }) .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_disk), ) @@ -469,14 +469,14 @@ impl DataStore { .filter(dsl::id.eq(disk_id)) .set(dsl::pantry_address.eq(pantry_address.to_string())) .check_if_exists::(disk_id) - .execute_and_check(self.pool_authorized(opctx).await?) + .execute_and_check(&*self.pool_connection_authorized(opctx).await?) .await .map(|r| match r.status { UpdateStatus::Updated => true, UpdateStatus::NotUpdatedButExists => false, }) .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_disk), ) @@ -499,14 +499,14 @@ impl DataStore { .filter(dsl::id.eq(disk_id)) .set(&DiskUpdate { pantry_address: None }) .check_if_exists::(disk_id) - .execute_and_check(self.pool_authorized(opctx).await?) + .execute_and_check(&*self.pool_connection_authorized(opctx).await?) .await .map(|r| match r.status { UpdateStatus::Updated => true, UpdateStatus::NotUpdatedButExists => false, }) .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_disk), ) @@ -571,7 +571,7 @@ impl DataStore { ok_to_delete_states: &[api::external::DiskState], ) -> Result { use db::schema::disk::dsl; - let pool = self.pool(); + let conn = self.pool_connection_unauthorized().await?; let now = Utc::now(); let ok_to_delete_state_labels: Vec<_> = @@ -585,10 +585,10 @@ impl DataStore { .filter(dsl::attach_instance_id.is_null()) .set((dsl::disk_state.eq(destroyed), dsl::time_deleted.eq(now))) .check_if_exists::(*disk_id) - .execute_and_check(pool) + .execute_and_check(&conn) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByLookup( ResourceType::Disk, diff --git a/nexus/db-queries/src/db/datastore/dns.rs b/nexus/db-queries/src/db/datastore/dns.rs index ddf2718930..d9704594b1 100644 --- a/nexus/db-queries/src/db/datastore/dns.rs +++ b/nexus/db-queries/src/db/datastore/dns.rs @@ -6,7 +6,7 @@ use super::DataStore; use crate::authz; use crate::context::OpContext; use crate::db; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::model::DnsGroup; use crate::db::model::DnsName; @@ -15,9 +15,10 @@ use crate::db::model::DnsZone; use crate::db::model::Generation; use crate::db::model::InitialDnsGroup; use crate::db::pagination::paginated; +use crate::db::pool::DbConnection; use crate::db::TransactionError; +use async_bb8_diesel::AsyncConnection; use async_bb8_diesel::AsyncRunQueryDsl; -use async_bb8_diesel::PoolError; use diesel::prelude::*; use nexus_types::internal_api::params::DnsConfigParams; use nexus_types::internal_api::params::DnsConfigZone; @@ -51,9 +52,9 @@ impl DataStore { paginated(dsl::dns_zone, dsl::zone_name, pagparams) .filter(dsl::dns_group.eq(dns_group)) .select(DnsZone::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// List all DNS zones in a DNS group without pagination @@ -65,25 +66,18 @@ impl DataStore { opctx: &OpContext, dns_group: DnsGroup, ) -> ListResultVec { - let conn = self.pool_authorized(opctx).await?; - self.dns_zones_list_all_on_connection(opctx, conn, dns_group).await + let conn = self.pool_connection_authorized(opctx).await?; + self.dns_zones_list_all_on_connection(opctx, &conn, dns_group).await } /// Variant of [`Self::dns_zones_list_all`] which may be called from a /// transaction context. - pub(crate) async fn dns_zones_list_all_on_connection( + pub(crate) async fn dns_zones_list_all_on_connection( &self, opctx: &OpContext, - conn: &(impl async_bb8_diesel::AsyncConnection< - crate::db::pool::DbConnection, - ConnErr, - > + Sync), + conn: &async_bb8_diesel::Connection, dns_group: DnsGroup, - ) -> ListResultVec - where - ConnErr: From + Send + 'static, - ConnErr: Into, - { + ) -> ListResultVec { use db::schema::dns_zone::dsl; const LIMIT: usize = 5; @@ -95,9 +89,7 @@ impl DataStore { .select(DnsZone::as_select()) .load_async(conn) .await - .map_err(|e| { - public_error_from_diesel_pool(e.into(), ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; bail_unless!( list.len() < LIMIT, @@ -116,25 +108,18 @@ impl DataStore { ) -> LookupResult { self.dns_group_latest_version_conn( opctx, - self.pool_authorized(opctx).await?, + &*self.pool_connection_authorized(opctx).await?, dns_group, ) .await } - pub async fn dns_group_latest_version_conn( + pub async fn dns_group_latest_version_conn( &self, opctx: &OpContext, - conn: &(impl async_bb8_diesel::AsyncConnection< - crate::db::pool::DbConnection, - ConnErr, - > + Sync), + conn: &async_bb8_diesel::Connection, dns_group: DnsGroup, - ) -> LookupResult - where - ConnErr: From + Send + 'static, - ConnErr: Into, - { + ) -> LookupResult { opctx.authorize(authz::Action::Read, &authz::DNS_CONFIG).await?; use db::schema::dns_version::dsl; let versions = dsl::dns_version @@ -144,9 +129,7 @@ impl DataStore { .select(DnsVersion::as_select()) .load_async(conn) .await - .map_err(|e| { - public_error_from_diesel_pool(e.into(), ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; bail_unless!( versions.len() == 1, @@ -178,11 +161,9 @@ impl DataStore { .or(dsl::version_removed.gt(version)), ) .select(DnsName::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })? + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))? .into_iter() .filter_map(|n: DnsName| match n.records() { Ok(records) => Some((n.name, records)), @@ -326,17 +307,10 @@ impl DataStore { } /// Load initial data for a DNS group into the database - pub async fn load_dns_data( - conn: &(impl async_bb8_diesel::AsyncConnection< - crate::db::pool::DbConnection, - ConnErr, - > + Sync), + pub async fn load_dns_data( + conn: &async_bb8_diesel::Connection, dns: InitialDnsGroup, - ) -> Result<(), Error> - where - ConnErr: From + Send + 'static, - ConnErr: Into, - { + ) -> Result<(), Error> { { use db::schema::dns_zone::dsl; diesel::insert_into(dsl::dns_zone) @@ -346,10 +320,7 @@ impl DataStore { .execute_async(conn) .await .map_err(|e| { - public_error_from_diesel_pool( - e.into(), - ErrorHandler::Server, - ) + public_error_from_diesel(e, ErrorHandler::Server) })?; } @@ -362,10 +333,7 @@ impl DataStore { .execute_async(conn) .await .map_err(|e| { - public_error_from_diesel_pool( - e.into(), - ErrorHandler::Server, - ) + public_error_from_diesel(e, ErrorHandler::Server) })?; } @@ -378,10 +346,7 @@ impl DataStore { .execute_async(conn) .await .map_err(|e| { - public_error_from_diesel_pool( - e.into(), - ErrorHandler::Server, - ) + public_error_from_diesel(e, ErrorHandler::Server) })?; } @@ -407,20 +372,12 @@ impl DataStore { /// **Callers almost certainly want to wake up the corresponding Nexus /// background task to cause these changes to be propagated to the /// corresponding DNS servers.** - pub async fn dns_update( + pub async fn dns_update( &self, opctx: &OpContext, - conn: &(impl async_bb8_diesel::AsyncConnection< - crate::db::pool::DbConnection, - ConnErr, - > + Sync), + conn: &async_bb8_diesel::Connection, update: DnsVersionUpdateBuilder, - ) -> Result<(), Error> - where - ConnErr: From + Send + 'static, - ConnErr: Into, - TransactionError: From, - { + ) -> Result<(), Error> { opctx.authorize(authz::Action::Modify, &authz::DNS_CONFIG).await?; let zones = self @@ -438,28 +395,21 @@ impl DataStore { match result { Ok(()) => Ok(()), Err(TransactionError::CustomError(e)) => Err(e), - Err(TransactionError::Pool(e)) => { - Err(public_error_from_diesel_pool(e, ErrorHandler::Server)) + Err(TransactionError::Connection(e)) => { + Err(public_error_from_diesel(e, ErrorHandler::Server)) } } } // This must only be used inside a transaction. Otherwise, it may make // invalid changes to the database state. Use `dns_update()` instead. - async fn dns_update_internal( + async fn dns_update_internal( &self, opctx: &OpContext, - conn: &(impl async_bb8_diesel::AsyncConnection< - crate::db::pool::DbConnection, - ConnErr, - > + Sync), + conn: &async_bb8_diesel::Connection, update: DnsVersionUpdateBuilder, zones: Vec, - ) -> Result<(), Error> - where - ConnErr: From + Send + 'static, - ConnErr: Into, - { + ) -> Result<(), Error> { // TODO-scalability TODO-performance This would be much better as a CTE // for all the usual reasons described in RFD 192. Using an interactive // transaction here means that either we wind up holding database locks @@ -507,10 +457,7 @@ impl DataStore { .execute_async(conn) .await .map_err(|e| { - public_error_from_diesel_pool( - e.into(), - ErrorHandler::Server, - ) + public_error_from_diesel(e, ErrorHandler::Server) })?; } @@ -534,9 +481,7 @@ impl DataStore { .set(dsl::version_removed.eq(new_version_num)) .execute_async(conn) .await - .map_err(|e| { - public_error_from_diesel_pool(e.into(), ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; bail_unless!( nremoved == ntoremove, @@ -552,10 +497,7 @@ impl DataStore { .execute_async(conn) .await .map_err(|e| { - public_error_from_diesel_pool( - e.into(), - ErrorHandler::Server, - ) + public_error_from_diesel(e, ErrorHandler::Server) })?; bail_unless!( @@ -749,8 +691,8 @@ mod test { comment: "test suite".to_string(), }) .execute_async( - datastore - .pool_for_tests() + &*datastore + .pool_connection_for_tests() .await .expect("failed to get datastore connection"), ) @@ -810,8 +752,8 @@ mod test { HashMap::new(), ); { - let conn = datastore.pool_for_tests().await.unwrap(); - DataStore::load_dns_data(conn, initial) + let conn = datastore.pool_connection_for_tests().await.unwrap(); + DataStore::load_dns_data(&conn, initial) .await .expect("failed to load initial DNS zone"); } @@ -850,8 +792,8 @@ mod test { ]), ); { - let conn = datastore.pool_for_tests().await.unwrap(); - DataStore::load_dns_data(conn, initial) + let conn = datastore.pool_connection_for_tests().await.unwrap(); + DataStore::load_dns_data(&conn, initial) .await .expect("failed to load initial DNS zone"); } @@ -1026,7 +968,9 @@ mod test { zone_name: "z1.foo".to_string(), }, ]) - .execute_async(datastore.pool_for_tests().await.unwrap()) + .execute_async( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .unwrap(); } @@ -1042,7 +986,9 @@ mod test { vi1.clone(), vi2.clone(), ]) - .execute_async(datastore.pool_for_tests().await.unwrap()) + .execute_async( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .unwrap(); } @@ -1142,7 +1088,9 @@ mod test { ) .unwrap(), ]) - .execute_async(datastore.pool_for_tests().await.unwrap()) + .execute_async( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .unwrap(); } @@ -1288,7 +1236,9 @@ mod test { zone_name: "z1.foo".to_string(), }, ]) - .execute_async(datastore.pool_for_tests().await.unwrap()) + .execute_async( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .unwrap_err(); assert!(error @@ -1317,7 +1267,9 @@ mod test { comment: "test suite 4".to_string(), }, ]) - .execute_async(datastore.pool_for_tests().await.unwrap()) + .execute_async( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .unwrap_err(); assert!(error @@ -1349,7 +1301,9 @@ mod test { ) .unwrap(), ]) - .execute_async(datastore.pool_for_tests().await.unwrap()) + .execute_async( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .unwrap_err(); assert!(error @@ -1470,7 +1424,9 @@ mod test { dns_zone2.clone(), dns_zone3.clone(), ]) - .execute_async(datastore.pool_for_tests().await.unwrap()) + .execute_async( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .unwrap(); } @@ -1494,7 +1450,9 @@ mod test { comment: "test suite 8".to_string(), }, ]) - .execute_async(datastore.pool_for_tests().await.unwrap()) + .execute_async( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .unwrap(); } @@ -1523,8 +1481,8 @@ mod test { update.add_name(String::from("n1"), records1.clone()).unwrap(); update.add_name(String::from("n2"), records2.clone()).unwrap(); - let conn = datastore.pool_for_tests().await.unwrap(); - datastore.dns_update(&opctx, conn, update).await.unwrap(); + let conn = datastore.pool_connection_for_tests().await.unwrap(); + datastore.dns_update(&opctx, &conn, update).await.unwrap(); } // Verify the new config. @@ -1556,8 +1514,8 @@ mod test { update.remove_name(String::from("n1")).unwrap(); update.add_name(String::from("n1"), records12.clone()).unwrap(); - let conn = datastore.pool_for_tests().await.unwrap(); - datastore.dns_update(&opctx, conn, update).await.unwrap(); + let conn = datastore.pool_connection_for_tests().await.unwrap(); + datastore.dns_update(&opctx, &conn, update).await.unwrap(); } let dns_config = datastore @@ -1586,8 +1544,8 @@ mod test { ); update.remove_name(String::from("n1")).unwrap(); - let conn = datastore.pool_for_tests().await.unwrap(); - datastore.dns_update(&opctx, conn, update).await.unwrap(); + let conn = datastore.pool_connection_for_tests().await.unwrap(); + datastore.dns_update(&opctx, &conn, update).await.unwrap(); } let dns_config = datastore @@ -1613,8 +1571,8 @@ mod test { ); update.add_name(String::from("n1"), records2.clone()).unwrap(); - let conn = datastore.pool_for_tests().await.unwrap(); - datastore.dns_update(&opctx, conn, update).await.unwrap(); + let conn = datastore.pool_connection_for_tests().await.unwrap(); + datastore.dns_update(&opctx, &conn, update).await.unwrap(); } let dns_config = datastore @@ -1644,8 +1602,8 @@ mod test { ); update1.remove_name(String::from("n1")).unwrap(); - let conn1 = datastore.pool_for_tests().await.unwrap(); - let conn2 = datastore.pool_for_tests().await.unwrap(); + let conn1 = datastore.pool_connection_for_tests().await.unwrap(); + let conn2 = datastore.pool_connection_for_tests().await.unwrap(); let (wait1_tx, wait1_rx) = tokio::sync::oneshot::channel(); let (wait2_tx, wait2_rx) = tokio::sync::oneshot::channel(); @@ -1680,7 +1638,7 @@ mod test { String::from("the test suite"), ); update2.add_name(String::from("n1"), records1.clone()).unwrap(); - datastore.dns_update(&opctx, conn2, update2).await.unwrap(); + datastore.dns_update(&opctx, &conn2, update2).await.unwrap(); // Now let the first one finish. wait2_tx.send(()).unwrap(); @@ -1723,9 +1681,9 @@ mod test { ); update.remove_name(String::from("n4")).unwrap(); - let conn = datastore.pool_for_tests().await.unwrap(); + let conn = datastore.pool_connection_for_tests().await.unwrap(); let error = - datastore.dns_update(&opctx, conn, update).await.unwrap_err(); + datastore.dns_update(&opctx, &conn, update).await.unwrap_err(); assert_eq!( error.to_string(), "Internal Error: updated wrong number of dns_name \ @@ -1748,9 +1706,9 @@ mod test { ); update.add_name(String::from("n2"), records1.clone()).unwrap(); - let conn = datastore.pool_for_tests().await.unwrap(); + let conn = datastore.pool_connection_for_tests().await.unwrap(); let error = - datastore.dns_update(&opctx, conn, update).await.unwrap_err(); + datastore.dns_update(&opctx, &conn, update).await.unwrap_err(); let msg = error.to_string(); assert!(msg.starts_with("Internal Error: ")); assert!(msg.contains("violates unique constraint")); diff --git a/nexus/db-queries/src/db/datastore/external_ip.rs b/nexus/db-queries/src/db/datastore/external_ip.rs index 8f5e9ba4c1..268b284a0a 100644 --- a/nexus/db-queries/src/db/datastore/external_ip.rs +++ b/nexus/db-queries/src/db/datastore/external_ip.rs @@ -9,7 +9,7 @@ use crate::authz; use crate::authz::ApiResource; use crate::context::OpContext; use crate::db; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::lookup::LookupPath; use crate::db::model::ExternalIp; @@ -20,7 +20,7 @@ use crate::db::pool::DbConnection; use crate::db::queries::external_ip::NextExternalIp; use crate::db::update_and_check::UpdateAndCheck; use crate::db::update_and_check::UpdateStatus; -use async_bb8_diesel::{AsyncRunQueryDsl, PoolError}; +use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; use diesel::prelude::*; use nexus_types::identity::Resource; @@ -131,29 +131,22 @@ impl DataStore { opctx: &OpContext, data: IncompleteExternalIp, ) -> CreateResult { - let conn = self.pool_authorized(opctx).await?; - Self::allocate_external_ip_on_connection(conn, data).await + let conn = self.pool_connection_authorized(opctx).await?; + Self::allocate_external_ip_on_connection(&conn, data).await } /// Variant of [Self::allocate_external_ip] which may be called from a /// transaction context. - pub(crate) async fn allocate_external_ip_on_connection( - conn: &(impl async_bb8_diesel::AsyncConnection - + Sync), + pub(crate) async fn allocate_external_ip_on_connection( + conn: &async_bb8_diesel::Connection, data: IncompleteExternalIp, - ) -> CreateResult - where - ConnErr: From + Send + 'static, - PoolError: From, - { + ) -> CreateResult { let explicit_ip = data.explicit_ip().is_some(); NextExternalIp::new(data).get_result_async(conn).await.map_err(|e| { use async_bb8_diesel::ConnectionError::Query; - use async_bb8_diesel::PoolError::Connection; use diesel::result::Error::NotFound; - let e = PoolError::from(e); match e { - Connection(Query(NotFound)) => { + Query(NotFound) => { if explicit_ip { Error::invalid_request( "Requested external IP address not available", @@ -164,7 +157,7 @@ impl DataStore { ) } } - _ => crate::db::queries::external_ip::from_pool(e), + _ => crate::db::queries::external_ip::from_diesel(e), } }) } @@ -238,13 +231,13 @@ impl DataStore { .filter(dsl::id.eq(ip_id)) .set(dsl::time_deleted.eq(now)) .check_if_exists::(ip_id) - .execute_and_check(self.pool_authorized(opctx).await?) + .execute_and_check(&*self.pool_connection_authorized(opctx).await?) .await .map(|r| match r.status { UpdateStatus::Updated => true, UpdateStatus::NotUpdatedButExists => false, }) - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Delete all external IP addresses associated with the provided instance @@ -268,9 +261,9 @@ impl DataStore { .filter(dsl::parent_id.eq(instance_id)) .filter(dsl::kind.ne(IpKind::Floating)) .set(dsl::time_deleted.eq(now)) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Fetch all external IP addresses of any kind for the provided instance @@ -285,8 +278,8 @@ impl DataStore { .filter(dsl::parent_id.eq(instance_id)) .filter(dsl::time_deleted.is_null()) .select(ExternalIp::as_select()) - .get_results_async(self.pool_authorized(opctx).await?) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } } diff --git a/nexus/db-queries/src/db/datastore/identity_provider.rs b/nexus/db-queries/src/db/datastore/identity_provider.rs index 4d725d1cf4..fdc9a020e7 100644 --- a/nexus/db-queries/src/db/datastore/identity_provider.rs +++ b/nexus/db-queries/src/db/datastore/identity_provider.rs @@ -8,7 +8,7 @@ use super::DataStore; use crate::authz; use crate::context::OpContext; use crate::db; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::identity::Resource; use crate::db::model::IdentityProvider; @@ -46,9 +46,11 @@ impl DataStore { .filter(dsl::silo_id.eq(authz_idp_list.silo().id())) .filter(dsl::time_deleted.is_null()) .select(IdentityProvider::as_select()) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::( + &*self.pool_connection_authorized(opctx).await?, + ) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn saml_identity_provider_create( @@ -61,7 +63,7 @@ impl DataStore { assert_eq!(provider.silo_id, authz_idp_list.silo().id()); let name = provider.identity().name.to_string(); - self.pool_authorized(opctx) + self.pool_connection_authorized(opctx) .await? .transaction_async(|conn| async move { // insert silo identity provider record with type Saml @@ -94,7 +96,7 @@ impl DataStore { }) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::SamlIdentityProvider, diff --git a/nexus/db-queries/src/db/datastore/image.rs b/nexus/db-queries/src/db/datastore/image.rs index 17bdb6fae0..e44da013cd 100644 --- a/nexus/db-queries/src/db/datastore/image.rs +++ b/nexus/db-queries/src/db/datastore/image.rs @@ -4,7 +4,7 @@ use crate::context::OpContext; use crate::db; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::model::Image; use crate::db::model::Project; @@ -52,9 +52,11 @@ impl DataStore { .filter(project_dsl::time_deleted.is_null()) .filter(project_dsl::project_id.eq(authz_project.id())) .select(ProjectImage::as_select()) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::( + &*self.pool_connection_authorized(opctx).await?, + ) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) .map(|v| v.into_iter().map(|v| v.into()).collect()) } @@ -80,9 +82,11 @@ impl DataStore { .filter(dsl::time_deleted.is_null()) .filter(dsl::silo_id.eq(authz_silo.id())) .select(SiloImage::as_select()) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::( + &*self.pool_connection_authorized(opctx).await?, + ) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) .map(|v| v.into_iter().map(|v| v.into()).collect()) } @@ -107,19 +111,19 @@ impl DataStore { .do_update() .set(dsl::time_modified.eq(dsl::time_modified)), ) - .insert_and_get_result_async(self.pool_authorized(opctx).await?) + .insert_and_get_result_async( + &*self.pool_connection_authorized(opctx).await?, + ) .await .map_err(|e| match e { AsyncInsertError::CollectionNotFound => authz_silo.not_found(), - AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::ProjectImage, - name.as_str(), - ), - ) - } + AsyncInsertError::DatabaseError(e) => public_error_from_diesel( + e, + ErrorHandler::Conflict( + ResourceType::ProjectImage, + name.as_str(), + ), + ), })?; Ok(image) } @@ -145,19 +149,19 @@ impl DataStore { .do_update() .set(dsl::time_modified.eq(dsl::time_modified)), ) - .insert_and_get_result_async(self.pool_authorized(opctx).await?) + .insert_and_get_result_async( + &*self.pool_connection_authorized(opctx).await?, + ) .await .map_err(|e| match e { AsyncInsertError::CollectionNotFound => authz_project.not_found(), - AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::ProjectImage, - name.as_str(), - ), - ) - } + AsyncInsertError::DatabaseError(e) => public_error_from_diesel( + e, + ErrorHandler::Conflict( + ResourceType::ProjectImage, + name.as_str(), + ), + ), })?; Ok(image) } @@ -181,10 +185,10 @@ impl DataStore { dsl::time_modified.eq(Utc::now()), )) .returning(Image::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::SiloImage, @@ -215,10 +219,10 @@ impl DataStore { dsl::time_modified.eq(Utc::now()), )) .returning(Image::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::ProjectImage, diff --git a/nexus/db-queries/src/db/datastore/instance.rs b/nexus/db-queries/src/db/datastore/instance.rs index 1f347d2378..46ca07a74a 100644 --- a/nexus/db-queries/src/db/datastore/instance.rs +++ b/nexus/db-queries/src/db/datastore/instance.rs @@ -13,7 +13,7 @@ use crate::db::collection_detach_many::DatastoreDetachManyTarget; use crate::db::collection_detach_many::DetachManyError; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::identity::Resource; use crate::db::lookup::LookupPath; @@ -84,19 +84,16 @@ impl DataStore { .do_update() .set(dsl::time_modified.eq(dsl::time_modified)), ) - .insert_and_get_result_async(self.pool_authorized(opctx).await?) + .insert_and_get_result_async( + &*self.pool_connection_authorized(opctx).await?, + ) .await .map_err(|e| match e { AsyncInsertError::CollectionNotFound => authz_project.not_found(), - AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::Instance, - name.as_str(), - ), - ) - } + AsyncInsertError::DatabaseError(e) => public_error_from_diesel( + e, + ErrorHandler::Conflict(ResourceType::Instance, name.as_str()), + ), })?; bail_unless!( @@ -135,9 +132,9 @@ impl DataStore { .filter(dsl::project_id.eq(authz_project.id())) .filter(dsl::time_deleted.is_null()) .select(Instance::as_select()) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Fetches information about an Instance that the caller has previously @@ -178,10 +175,10 @@ impl DataStore { .filter(dsl::id.eq(authz_instance.id())) .filter(dsl::time_deleted.is_not_null()) .select(Instance::as_select()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByLookup( ResourceType::Instance, @@ -225,14 +222,14 @@ impl DataStore { ) .set(new_runtime.clone()) .check_if_exists::(*instance_id) - .execute_and_check(self.pool()) + .execute_and_check(&*self.pool_connection_unauthorized().await?) .await .map(|r| match r.status { UpdateStatus::Updated => true, UpdateStatus::NotUpdatedButExists => false, }) .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByLookup( ResourceType::Instance, @@ -288,7 +285,9 @@ impl DataStore { disk::dsl::slot.eq(Option::::None), )), ) - .detach_and_get_result_async(self.pool_authorized(opctx).await?) + .detach_and_get_result_async( + &*self.pool_connection_authorized(opctx).await?, + ) .await .map_err(|e| match e { DetachManyError::CollectionNotFound => Error::not_found_by_id( @@ -309,7 +308,7 @@ impl DataStore { } } DetachManyError::DatabaseError(e) => { - public_error_from_diesel_pool(e, ErrorHandler::Server) + public_error_from_diesel(e, ErrorHandler::Server) } })?; diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 1248edf7a8..bd3148f2f7 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -10,8 +10,8 @@ use crate::context::OpContext; use crate::db; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; -use crate::db::error::diesel_pool_result_optional; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::diesel_result_optional; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::fixed_data::silo::INTERNAL_SILO_ID; use crate::db::identity::Resource; @@ -22,7 +22,7 @@ use crate::db::model::Name; use crate::db::pagination::paginated; use crate::db::pool::DbConnection; use crate::db::queries::ip_pool::FilterOverlappingIpRanges; -use async_bb8_diesel::{AsyncRunQueryDsl, PoolError}; +use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; use diesel::prelude::*; use ipnetwork::IpNetwork; @@ -65,9 +65,9 @@ impl DataStore { .filter(dsl::silo_id.ne(*INTERNAL_SILO_ID).or(dsl::silo_id.is_null())) .filter(dsl::time_deleted.is_null()) .select(db::model::IpPool::as_select()) - .get_results_async(self.pool_authorized(opctx).await?) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Look up the default IP pool for the current silo. If there is no default @@ -104,9 +104,11 @@ impl DataStore { // then by only taking the first result, we get the most specific one .order(dsl::silo_id.asc().nulls_last()) .select(IpPool::as_select()) - .first_async::(self.pool_authorized(opctx).await?) + .first_async::( + &*self.pool_connection_authorized(opctx).await?, + ) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Looks up an IP pool intended for internal services. @@ -127,9 +129,9 @@ impl DataStore { .filter(dsl::silo_id.eq(*INTERNAL_SILO_ID)) .filter(dsl::time_deleted.is_null()) .select(IpPool::as_select()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) .map(|ip_pool| { ( authz::IpPool::new( @@ -160,10 +162,10 @@ impl DataStore { diesel::insert_into(dsl::ip_pool) .values(pool) .returning(IpPool::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict(ResourceType::IpPool, &pool_name), ) @@ -181,16 +183,18 @@ impl DataStore { opctx.authorize(authz::Action::Delete, authz_pool).await?; // Verify there are no IP ranges still in this pool - let range = diesel_pool_result_optional( + let range = diesel_result_optional( ip_pool_range::dsl::ip_pool_range .filter(ip_pool_range::dsl::ip_pool_id.eq(authz_pool.id())) .filter(ip_pool_range::dsl::time_deleted.is_null()) .select(ip_pool_range::dsl::id) .limit(1) - .first_async::(self.pool_authorized(opctx).await?) + .first_async::( + &*self.pool_connection_authorized(opctx).await?, + ) .await, ) - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; if range.is_some() { return Err(Error::InvalidRequest { message: @@ -212,10 +216,10 @@ impl DataStore { .filter(dsl::id.eq(authz_pool.id())) .filter(dsl::rcgen.eq(db_pool.rcgen)) .set(dsl::time_deleted.eq(now)) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_pool), ) @@ -247,10 +251,10 @@ impl DataStore { .filter(dsl::time_deleted.is_null()) .set(updates) .returning(IpPool::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_pool), ) @@ -269,10 +273,10 @@ impl DataStore { .filter(dsl::ip_pool_id.eq(authz_pool.id())) .filter(dsl::time_deleted.is_null()) .select(IpPoolRange::as_select()) - .get_results_async(self.pool_authorized(opctx).await?) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_pool), ) @@ -285,24 +289,19 @@ impl DataStore { authz_pool: &authz::IpPool, range: &IpRange, ) -> CreateResult { - let conn = self.pool_authorized(opctx).await?; - Self::ip_pool_add_range_on_connection(conn, opctx, authz_pool, range) + let conn = self.pool_connection_authorized(opctx).await?; + Self::ip_pool_add_range_on_connection(&conn, opctx, authz_pool, range) .await } /// Variant of [Self::ip_pool_add_range] which may be called from a /// transaction context. - pub(crate) async fn ip_pool_add_range_on_connection( - conn: &(impl async_bb8_diesel::AsyncConnection - + Sync), + pub(crate) async fn ip_pool_add_range_on_connection( + conn: &async_bb8_diesel::Connection, opctx: &OpContext, authz_pool: &authz::IpPool, range: &IpRange, - ) -> CreateResult - where - ConnErr: From + Send + 'static, - PoolError: From, - { + ) -> CreateResult { use db::schema::ip_pool_range::dsl; opctx.authorize(authz::Action::CreateChild, authz_pool).await?; let pool_id = authz_pool.id(); @@ -315,13 +314,16 @@ impl DataStore { .await .map_err(|e| { use async_bb8_diesel::ConnectionError::Query; - use async_bb8_diesel::PoolError::Connection; use diesel::result::Error::NotFound; match e { - AsyncInsertError::DatabaseError(Connection(Query( - NotFound, - ))) => { + AsyncInsertError::CollectionNotFound => { + Error::ObjectNotFound { + type_name: ResourceType::IpPool, + lookup_type: LookupType::ById(pool_id), + } + } + AsyncInsertError::DatabaseError(Query(NotFound)) => { // We've filtered out the IP addresses the client provided, // i.e., there's some overlap with existing addresses. Error::invalid_request( @@ -334,14 +336,8 @@ impl DataStore { .as_str(), ) } - AsyncInsertError::CollectionNotFound => { - Error::ObjectNotFound { - type_name: ResourceType::IpPool, - lookup_type: LookupType::ById(pool_id), - } - } AsyncInsertError::DatabaseError(err) => { - public_error_from_diesel_pool(err, ErrorHandler::Server) + public_error_from_diesel(err, ErrorHandler::Server) } } }) @@ -366,19 +362,18 @@ impl DataStore { // Fetch the range itself, if it exists. We'll need to protect against // concurrent inserts of new external IPs from the target range by // comparing the rcgen. - let range = diesel_pool_result_optional( + let conn = self.pool_connection_authorized(opctx).await?; + let range = diesel_result_optional( dsl::ip_pool_range .filter(dsl::ip_pool_id.eq(pool_id)) .filter(dsl::first_address.eq(first_net)) .filter(dsl::last_address.eq(last_net)) .filter(dsl::time_deleted.is_null()) .select(IpPoolRange::as_select()) - .get_result_async::( - self.pool_authorized(opctx).await?, - ) + .get_result_async::(&*conn) .await, ) - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))? + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))? .ok_or_else(|| { Error::invalid_request( format!( @@ -397,9 +392,9 @@ impl DataStore { .filter(external_ip::dsl::ip_pool_range_id.eq(range_id)) .filter(external_ip::dsl::time_deleted.is_null()), )) - .get_result_async::(self.pool_authorized(opctx).await?) + .get_result_async::(&*conn) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; if has_children { return Err(Error::invalid_request( "IP pool ranges cannot be deleted while \ @@ -419,9 +414,9 @@ impl DataStore { .filter(dsl::rcgen.eq(rcgen)), ) .set(dsl::time_deleted.eq(now)) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*conn) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; if updated_rows == 1 { Ok(()) } else { diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index f653675728..ff1df710bb 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -24,7 +24,7 @@ use crate::authz; use crate::context::OpContext; use crate::db::{ self, - error::{public_error_from_diesel_pool, ErrorHandler}, + error::{public_error_from_diesel, ErrorHandler}, }; use ::oximeter::types::ProducerRegistry; use async_bb8_diesel::{AsyncRunQueryDsl, ConnectionManager}; @@ -200,16 +200,7 @@ impl DataStore { .unwrap(); } - // TODO-security This should be deprecated in favor of pool_authorized(), - // which gives us the chance to do a minimal security check before hitting - // the database. Eventually, this function should only be used for doing - // authentication in the first place (since we can't do an authz check in - // that case). - fn pool(&self) -> &bb8::Pool> { - self.pool.pool() - } - - pub(super) async fn pool_authorized( + async fn pool_authorized( &self, opctx: &OpContext, ) -> Result<&bb8::Pool>, Error> { @@ -217,12 +208,41 @@ impl DataStore { Ok(self.pool.pool()) } + /// Returns a connection to a connection from the database connection pool. + pub(super) async fn pool_connection_authorized( + &self, + opctx: &OpContext, + ) -> Result>, Error> + { + let pool = self.pool_authorized(opctx).await?; + let connection = pool.get().await.map_err(|err| { + Error::unavail(&format!("Failed to access DB connection: {err}")) + })?; + Ok(connection) + } + + /// Returns an unauthorized connection to a connection from the database + /// connection pool. + /// + /// TODO-security: This should be deprecated in favor of + /// "pool_connection_authorized". + pub(super) async fn pool_connection_unauthorized( + &self, + ) -> Result>, Error> + { + let connection = self.pool.pool().get().await.map_err(|err| { + Error::unavail(&format!("Failed to access DB connection: {err}")) + })?; + Ok(connection) + } + /// For testing only. This isn't cfg(test) because nexus needs access to it. #[doc(hidden)] - pub async fn pool_for_tests( + pub async fn pool_connection_for_tests( &self, - ) -> Result<&bb8::Pool>, Error> { - Ok(self.pool.pool()) + ) -> Result>, Error> + { + self.pool_connection_unauthorized().await } /// Return the next available IPv6 address for an Oxide service running on @@ -238,10 +258,10 @@ impl DataStore { ) .set(dsl::last_used_address.eq(dsl::last_used_address + 1)) .returning(dsl::last_used_address) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByLookup( ResourceType::Sled, @@ -266,19 +286,17 @@ impl DataStore { #[cfg(test)] async fn test_try_table_scan(&self, opctx: &OpContext) -> Error { use db::schema::project::dsl; - let conn = self.pool_authorized(opctx).await; + let conn = self.pool_connection_authorized(opctx).await; if let Err(error) = conn { return error; } let result = dsl::project .select(diesel::dsl::count_star()) - .first_async::(conn.unwrap()) + .first_async::(&*conn.unwrap()) .await; match result { Ok(_) => Error::internal_error("table scan unexpectedly succeeded"), - Err(error) => { - public_error_from_diesel_pool(error, ErrorHandler::Server) - } + Err(error) => public_error_from_diesel(error, ErrorHandler::Server), } } } @@ -1000,9 +1018,9 @@ mod test { let pool = db::Pool::new(&logctx.log, &cfg); let datastore = DataStore::new(&logctx.log, Arc::new(pool), None).await.unwrap(); - + let conn = datastore.pool_connection_for_tests().await.unwrap(); let explanation = DataStore::get_allocated_regions_query(Uuid::nil()) - .explain_async(datastore.pool()) + .explain_async(&conn) .await .unwrap(); assert!( @@ -1027,7 +1045,7 @@ mod test { .values(values) .returning(VpcSubnet::as_returning()); println!("{}", diesel::debug_query(&query)); - let explanation = query.explain_async(datastore.pool()).await.unwrap(); + let explanation = query.explain_async(&conn).await.unwrap(); assert!( !explanation.contains("FULL SCAN"), "Found an unexpected FULL SCAN: {}", @@ -1403,6 +1421,7 @@ mod test { ); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; + let conn = datastore.pool_connection_for_tests().await.unwrap(); // Create a few records. let now = Utc::now(); @@ -1429,7 +1448,7 @@ mod test { .collect::>(); diesel::insert_into(dsl::external_ip) .values(ips.clone()) - .execute_async(datastore.pool()) + .execute_async(&*conn) .await .unwrap(); @@ -1464,6 +1483,7 @@ mod test { dev::test_setup_log("test_deallocate_external_ip_is_idempotent"); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = datastore_test(&logctx, &db).await; + let conn = datastore.pool_connection_for_tests().await.unwrap(); // Create a record. let now = Utc::now(); @@ -1487,7 +1507,7 @@ mod test { }; diesel::insert_into(dsl::external_ip) .values(ip.clone()) - .execute_async(datastore.pool()) + .execute_async(&*conn) .await .unwrap(); @@ -1522,13 +1542,13 @@ mod test { use crate::db::model::IpKind; use crate::db::schema::external_ip::dsl; use async_bb8_diesel::ConnectionError::Query; - use async_bb8_diesel::PoolError::Connection; use diesel::result::DatabaseErrorKind::CheckViolation; use diesel::result::Error::DatabaseError; let logctx = dev::test_setup_log("test_external_ip_check_constraints"); let mut db = test_setup_database(&logctx.log).await; let (_opctx, datastore) = datastore_test(&logctx, &db).await; + let conn = datastore.pool_connection_for_tests().await.unwrap(); let now = Utc::now(); // Create a mostly-populated record, for a floating IP @@ -1582,7 +1602,7 @@ mod test { }; let res = diesel::insert_into(dsl::external_ip) .values(new_ip) - .execute_async(datastore.pool()) + .execute_async(&*conn) .await; if name.is_some() && description.is_some() { // Name/description must be non-NULL, instance ID can be @@ -1607,10 +1627,10 @@ mod test { assert!( matches!( err, - Connection(Query(DatabaseError( + Query(DatabaseError( CheckViolation, _ - ))) + )) ), "Expected a CHECK violation when inserting a \ Floating IP record with NULL name and/or description", @@ -1639,7 +1659,7 @@ mod test { }; let res = diesel::insert_into(dsl::external_ip) .values(new_ip.clone()) - .execute_async(datastore.pool()) + .execute_async(&*conn) .await; let ip_type = if is_service { "Service" } else { "Instance" }; @@ -1656,10 +1676,10 @@ mod test { assert!( matches!( err, - Connection(Query(DatabaseError( + Query(DatabaseError( CheckViolation, _ - ))) + )) ), "Expected a CHECK violation when inserting an \ Ephemeral Service IP", @@ -1687,10 +1707,10 @@ mod test { assert!( matches!( err, - Connection(Query(DatabaseError( + Query(DatabaseError( CheckViolation, _ - ))) + )) ), "Expected a CHECK violation when inserting a \ {:?} IP record with non-NULL name, description, \ diff --git a/nexus/db-queries/src/db/datastore/network_interface.rs b/nexus/db-queries/src/db/datastore/network_interface.rs index af1068d6bf..3d7b8afa71 100644 --- a/nexus/db-queries/src/db/datastore/network_interface.rs +++ b/nexus/db-queries/src/db/datastore/network_interface.rs @@ -11,7 +11,7 @@ use crate::db; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; use crate::db::cte_utils::BoxedQuery; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; use crate::db::model::IncompleteNetworkInterface; @@ -27,7 +27,6 @@ use crate::db::pool::DbConnection; use crate::db::queries::network_interface; use async_bb8_diesel::AsyncConnection; use async_bb8_diesel::AsyncRunQueryDsl; -use async_bb8_diesel::PoolError; use chrono::Utc; use diesel::prelude::*; use omicron_common::api::external; @@ -156,21 +155,17 @@ impl DataStore { interface: IncompleteNetworkInterface, ) -> Result { let conn = self - .pool_authorized(opctx) + .pool_connection_authorized(opctx) .await .map_err(network_interface::InsertError::External)?; - self.create_network_interface_raw_conn(conn, interface).await + self.create_network_interface_raw_conn(&conn, interface).await } - pub(crate) async fn create_network_interface_raw_conn( + pub(crate) async fn create_network_interface_raw_conn( &self, - conn: &(impl AsyncConnection + Sync), + conn: &async_bb8_diesel::Connection, interface: IncompleteNetworkInterface, - ) -> Result - where - ConnErr: From + Send + 'static, - PoolError: From, - { + ) -> Result { use db::schema::network_interface::dsl; let subnet_id = interface.subnet.identity.id; let query = network_interface::InsertQuery::new(interface.clone()); @@ -190,7 +185,7 @@ impl DataStore { ) } AsyncInsertError::DatabaseError(e) => { - network_interface::InsertError::from_pool(e, &interface) + network_interface::InsertError::from_diesel(e, &interface) } }) } @@ -210,10 +205,10 @@ impl DataStore { .filter(dsl::kind.eq(NetworkInterfaceKind::Instance)) .filter(dsl::time_deleted.is_null()) .set(dsl::time_deleted.eq(now)) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_instance), ) @@ -243,13 +238,14 @@ impl DataStore { query .clone() .execute_async( - self.pool_authorized(opctx) + &*self + .pool_connection_authorized(opctx) .await .map_err(network_interface::DeleteError::External)?, ) .await .map_err(|e| { - network_interface::DeleteError::from_pool(e, &query) + network_interface::DeleteError::from_diesel(e, &query) })?; Ok(()) } @@ -291,11 +287,11 @@ impl DataStore { network_interface::is_primary, network_interface::slot, )) - .get_results_async::(self.pool_authorized(opctx).await?) + .get_results_async::( + &*self.pool_connection_authorized(opctx).await?, + ) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; Ok(rows .into_iter() .map(sled_client_types::NetworkInterface::from) @@ -386,10 +382,10 @@ impl DataStore { .filter(dsl::instance_id.eq(authz_instance.id())) .select(InstanceNetworkInterface::as_select()) .load_async::( - self.pool_authorized(opctx).await?, + &*self.pool_connection_authorized(opctx).await?, ) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Update a network interface associated with a given instance. @@ -471,9 +467,9 @@ impl DataStore { } type TxnError = TransactionError; - let pool = self.pool_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; if primary { - pool.transaction_async(|conn| async move { + conn.transaction_async(|conn| async move { let instance_state = instance_query .get_result_async(&conn) .await? @@ -517,7 +513,7 @@ impl DataStore { // be done there. The other columns always need to be updated, and // we're only hitting a single row. Note that we still need to // verify the instance is stopped. - pool.transaction_async(|conn| async move { + conn.transaction_async(|conn| async move { let instance_state = instance_query .get_result_async(&conn) .await? diff --git a/nexus/db-queries/src/db/datastore/oximeter.rs b/nexus/db-queries/src/db/datastore/oximeter.rs index 178c2466a7..c9b3a59b05 100644 --- a/nexus/db-queries/src/db/datastore/oximeter.rs +++ b/nexus/db-queries/src/db/datastore/oximeter.rs @@ -6,7 +6,7 @@ use super::DataStore; use crate::db; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::model::OximeterInfo; use crate::db::model::ProducerEndpoint; @@ -41,10 +41,10 @@ impl DataStore { dsl::ip.eq(info.ip), dsl::port.eq(info.port), )) - .execute_async(self.pool()) + .execute_async(&*self.pool_connection_unauthorized().await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::Oximeter, @@ -62,9 +62,11 @@ impl DataStore { ) -> ListResultVec { use db::schema::oximeter::dsl; paginated(dsl::oximeter, dsl::id, page_params) - .load_async::(self.pool()) + .load_async::( + &*self.pool_connection_unauthorized().await?, + ) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } // Create a record for a new producer endpoint @@ -86,10 +88,10 @@ impl DataStore { dsl::interval.eq(producer.interval), dsl::base_route.eq(producer.base_route.clone()), )) - .execute_async(self.pool()) + .execute_async(&*self.pool_connection_unauthorized().await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::MetricProducer, @@ -111,10 +113,10 @@ impl DataStore { .filter(dsl::oximeter_id.eq(oximeter_id)) .order_by((dsl::oximeter_id, dsl::id)) .select(ProducerEndpoint::as_select()) - .load_async(self.pool()) + .load_async(&*self.pool_connection_unauthorized().await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::MetricProducer, diff --git a/nexus/db-queries/src/db/datastore/physical_disk.rs b/nexus/db-queries/src/db/datastore/physical_disk.rs index ec9f29d27d..3c83b91d21 100644 --- a/nexus/db-queries/src/db/datastore/physical_disk.rs +++ b/nexus/db-queries/src/db/datastore/physical_disk.rs @@ -10,7 +10,7 @@ use crate::context::OpContext; use crate::db; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::model::PhysicalDisk; use crate::db::model::Sled; @@ -60,7 +60,9 @@ impl DataStore { dsl::time_modified.eq(now), )), ) - .insert_and_get_result_async(self.pool()) + .insert_and_get_result_async( + &*self.pool_connection_authorized(&opctx).await?, + ) .await .map_err(|e| match e { AsyncInsertError::CollectionNotFound => Error::ObjectNotFound { @@ -68,7 +70,7 @@ impl DataStore { lookup_type: LookupType::ById(sled_id), }, AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel_pool(e, ErrorHandler::Server) + public_error_from_diesel(e, ErrorHandler::Server) } })?; @@ -85,9 +87,9 @@ impl DataStore { paginated(dsl::physical_disk, dsl::id, pagparams) .filter(dsl::time_deleted.is_null()) .select(PhysicalDisk::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn sled_list_physical_disks( @@ -102,9 +104,9 @@ impl DataStore { .filter(dsl::time_deleted.is_null()) .filter(dsl::sled_id.eq(sled_id)) .select(PhysicalDisk::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Deletes a disk from the database. @@ -125,10 +127,10 @@ impl DataStore { .filter(dsl::model.eq(model)) .filter(dsl::sled_id.eq(sled_id)) .set(dsl::time_deleted.eq(now)) - .execute_async(self.pool()) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await .map(|_rows_modified| ()) - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } } diff --git a/nexus/db-queries/src/db/datastore/project.rs b/nexus/db-queries/src/db/datastore/project.rs index b3759f9cce..0285679cd5 100644 --- a/nexus/db-queries/src/db/datastore/project.rs +++ b/nexus/db-queries/src/db/datastore/project.rs @@ -11,8 +11,8 @@ use crate::context::OpContext; use crate::db; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; -use crate::db::error::diesel_pool_result_optional; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::diesel_result_optional; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; use crate::db::fixed_data::project::SERVICES_PROJECT; @@ -25,7 +25,7 @@ use crate::db::model::ProjectUpdate; use crate::db::model::Silo; use crate::db::model::VirtualProvisioningCollection; use crate::db::pagination::paginated; -use async_bb8_diesel::{AsyncConnection, AsyncRunQueryDsl, PoolError}; +use async_bb8_diesel::{AsyncConnection, AsyncRunQueryDsl}; use chrono::Utc; use diesel::prelude::*; use omicron_common::api::external::http_pagination::PaginatedBy; @@ -60,16 +60,16 @@ macro_rules! generate_fn_to_ensure_none_in_project { ) -> DeleteResult { use db::schema::$i; - let maybe_label = diesel_pool_result_optional( + let maybe_label = diesel_result_optional( $i::dsl::$i .filter($i::dsl::project_id.eq(authz_project.id())) .filter($i::dsl::time_deleted.is_null()) .select($i::dsl::$label) .limit(1) - .first_async::<$label_ty>(self.pool_authorized(opctx).await?) + .first_async::<$label_ty>(&*self.pool_connection_authorized(opctx).await?) .await, ) - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; if let Some(label) = maybe_label { let object = stringify!($i).replace('_', " "); @@ -155,7 +155,7 @@ impl DataStore { let name = project.name().as_str().to_string(); let db_project = self - .pool_authorized(opctx) + .pool_connection_authorized(opctx) .await? .transaction_async(|conn| async move { let project: Project = Silo::insert_resource( @@ -169,7 +169,7 @@ impl DataStore { authz_silo_inner.not_found() } AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::Project, @@ -193,8 +193,8 @@ impl DataStore { .await .map_err(|e| match e { TransactionError::CustomError(e) => e, - TransactionError::Pool(e) => { - public_error_from_diesel_pool(e, ErrorHandler::Server) + TransactionError::Connection(e) => { + public_error_from_diesel(e, ErrorHandler::Server) } })?; @@ -233,7 +233,7 @@ impl DataStore { use db::schema::project::dsl; type TxnError = TransactionError; - self.pool_authorized(opctx) + self.pool_connection_authorized(opctx) .await? .transaction_async(|conn| async move { let now = Utc::now(); @@ -246,8 +246,8 @@ impl DataStore { .execute_async(&conn) .await .map_err(|e| { - public_error_from_diesel_pool( - PoolError::from(e), + public_error_from_diesel( + e, ErrorHandler::NotFoundByResource(authz_project), ) })?; @@ -270,8 +270,8 @@ impl DataStore { .await .map_err(|e| match e { TxnError::CustomError(e) => e, - TxnError::Pool(e) => { - public_error_from_diesel_pool(e, ErrorHandler::Server) + TxnError::Connection(e) => { + public_error_from_diesel(e, ErrorHandler::Server) } })?; Ok(()) @@ -300,9 +300,9 @@ impl DataStore { .filter(dsl::silo_id.eq(authz_silo.id())) .filter(dsl::time_deleted.is_null()) .select(Project::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Updates a project (clobbering update -- no etag) @@ -320,10 +320,10 @@ impl DataStore { .filter(dsl::id.eq(authz_project.id())) .set(updates) .returning(Project::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_project), ) @@ -355,8 +355,8 @@ impl DataStore { .filter(dsl::silo_id.ne(*INTERNAL_SILO_ID).or(dsl::silo_id.is_null())) .filter(dsl::time_deleted.is_null()) .select(db::model::IpPool::as_select()) - .get_results_async(self.pool_authorized(opctx).await?) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } } diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index 54346b31c0..1be3e1ee4c 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -12,7 +12,7 @@ use crate::context::OpContext; use crate::db; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; use crate::db::fixed_data::silo::INTERNAL_SILO_ID; @@ -28,7 +28,6 @@ use crate::db::pagination::paginated; use crate::db::pool::DbConnection; use async_bb8_diesel::AsyncConnection; use async_bb8_diesel::AsyncRunQueryDsl; -use async_bb8_diesel::PoolError; use chrono::Utc; use diesel::prelude::*; use diesel::upsert::excluded; @@ -80,7 +79,7 @@ enum RackInitError { AddingNic(Error), ServiceInsert(Error), DatasetInsert { err: AsyncInsertError, zpool_id: Uuid }, - RackUpdate { err: PoolError, rack_id: Uuid }, + RackUpdate { err: async_bb8_diesel::ConnectionError, rack_id: Uuid }, DnsSerialization(Error), Silo(Error), RoleAssignment(Error), @@ -101,7 +100,7 @@ impl From for Error { lookup_type: LookupType::ById(zpool_id), }, AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel_pool(e, ErrorHandler::Server) + public_error_from_diesel(e, ErrorHandler::Server) } }, TxnError::CustomError(RackInitError::ServiceInsert(err)) => { @@ -113,7 +112,7 @@ impl From for Error { TxnError::CustomError(RackInitError::RackUpdate { err, rack_id, - }) => public_error_from_diesel_pool( + }) => public_error_from_diesel( err, ErrorHandler::NotFoundByLookup( ResourceType::Rack, @@ -138,7 +137,7 @@ impl From for Error { err )) } - TxnError::Pool(e) => { + TxnError::Connection(e) => { Error::internal_error(&format!("Transaction error: {}", e)) } } @@ -155,9 +154,9 @@ impl DataStore { use db::schema::rack::dsl; paginated(dsl::rack, dsl::id, pagparams) .select(Rack::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Stores a new rack in the database. @@ -177,10 +176,10 @@ impl DataStore { // This is a no-op, since we conflicted on the ID. .set(dsl::id.eq(excluded(dsl::id))) .returning(Rack::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::Rack, @@ -194,25 +193,17 @@ impl DataStore { // which comes from the transaction created in `rack_set_initialized`. #[allow(clippy::too_many_arguments)] - async fn rack_create_recovery_silo( + async fn rack_create_recovery_silo( &self, opctx: &OpContext, - conn: &(impl AsyncConnection + Sync), + conn: &async_bb8_diesel::Connection, log: &slog::Logger, recovery_silo: external_params::SiloCreate, recovery_silo_fq_dns_name: String, recovery_user_id: external_params::UserId, recovery_user_password_hash: omicron_passwords::PasswordHashString, dns_update: DnsVersionUpdateBuilder, - ) -> Result<(), TxnError> - where - ConnError: From + Send + 'static, - PoolError: From, - TransactionError: From, - TxnError: From, - async_bb8_diesel::Connection: - AsyncConnection, - { + ) -> Result<(), TxnError> { let db_silo = self .silo_create_conn( conn, @@ -289,17 +280,13 @@ impl DataStore { Ok(()) } - async fn rack_populate_service_records( + async fn rack_populate_service_records( &self, - conn: &(impl AsyncConnection + Sync), + conn: &async_bb8_diesel::Connection, log: &slog::Logger, service_pool: &db::model::IpPool, service: internal_params::ServicePutRequest, - ) -> Result<(), TxnError> - where - ConnError: From + Send + 'static, - PoolError: From, - { + ) -> Result<(), TxnError> { use internal_params::ServiceKind; let service_db = db::model::Service::new( @@ -431,7 +418,7 @@ impl DataStore { // the low-frequency of calls, this optimization has been deferred. let log = opctx.log.clone(); let rack = self - .pool_authorized(opctx) + .pool_connection_authorized(opctx) .await? .transaction_async(|conn| async move { // Early exit if the rack has already been initialized. @@ -443,7 +430,7 @@ impl DataStore { .map_err(|e| { warn!(log, "Initializing Rack: Rack UUID not found"); TxnError::CustomError(RackInitError::RackUpdate { - err: PoolError::from(e), + err: e, rack_id, }) })?; @@ -548,9 +535,9 @@ impl DataStore { .returning(Rack::as_returning()) .get_result_async::(&conn) .await - .map_err(|e| { + .map_err(|err| { TxnError::CustomError(RackInitError::RackUpdate { - err: PoolError::from(e), + err, rack_id, }) })?; @@ -612,7 +599,7 @@ impl DataStore { use crate::db::schema::external_ip::dsl as extip_dsl; use crate::db::schema::service::dsl as service_dsl; type TxnError = TransactionError; - self.pool_authorized(opctx) + self.pool_connection_authorized(opctx) .await? .transaction_async(|conn| async move { let ips = extip_dsl::external_ip @@ -644,8 +631,8 @@ impl DataStore { .await .map_err(|error: TxnError| match error { TransactionError::CustomError(err) => err, - TransactionError::Pool(e) => { - public_error_from_diesel_pool(e, ErrorHandler::Server) + TransactionError::Connection(e) => { + public_error_from_diesel(e, ErrorHandler::Server) } }) } @@ -879,7 +866,7 @@ mod test { async fn [](db: &DataStore) -> Vec<$model> { use crate::db::schema::$table::dsl; use nexus_test_utils::db::ALLOW_FULL_TABLE_SCAN_SQL; - db.pool_for_tests() + db.pool_connection_for_tests() .await .unwrap() .transaction_async(|conn| async move { diff --git a/nexus/db-queries/src/db/datastore/region.rs b/nexus/db-queries/src/db/datastore/region.rs index 6bfea9085d..5bc79b9481 100644 --- a/nexus/db-queries/src/db/datastore/region.rs +++ b/nexus/db-queries/src/db/datastore/region.rs @@ -9,7 +9,7 @@ use super::RegionAllocationStrategy; use super::RunnableQuery; use crate::context::OpContext; use crate::db; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; use crate::db::lookup::LookupPath; @@ -51,9 +51,11 @@ impl DataStore { volume_id: Uuid, ) -> Result, Error> { Self::get_allocated_regions_query(volume_id) - .get_results_async::<(Dataset, Region)>(self.pool()) + .get_results_async::<(Dataset, Region)>( + &*self.pool_connection_unauthorized().await?, + ) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } async fn get_block_size_from_disk_source( @@ -136,9 +138,11 @@ impl DataStore { extent_count, allocation_strategy, ) - .get_results_async(self.pool()) + .get_results_async(&*self.pool_connection_authorized(&opctx).await?) .await - .map_err(|e| crate::db::queries::region_allocation::from_pool(e))?; + .map_err(|e| { + crate::db::queries::region_allocation::from_diesel(e) + })?; Ok(dataset_and_regions) } @@ -168,8 +172,9 @@ impl DataStore { // transaction" error. let transaction = { |region_ids: Vec| async { - self.pool() - .transaction(move |conn| { + self.pool_connection_unauthorized() + .await? + .transaction_async(|conn| async move { use db::schema::dataset::dsl as dataset_dsl; use db::schema::region::dsl as region_dsl; @@ -177,7 +182,7 @@ impl DataStore { let datasets = diesel::delete(region_dsl::region) .filter(region_dsl::id.eq_any(region_ids)) .returning(region_dsl::dataset_id) - .get_results::(conn)?; + .get_results_async::(&conn).await?; // Update datasets to which the regions belonged. for dataset in datasets { @@ -191,7 +196,7 @@ impl DataStore { * region_dsl::extent_count, )) .nullable() - .get_result(conn)?; + .get_result_async(&conn).await?; let dataset_total_occupied_size: i64 = if let Some( dataset_total_occupied_size, @@ -220,7 +225,7 @@ impl DataStore { dataset_dsl::size_used .eq(dataset_total_occupied_size), ) - .execute(conn)?; + .execute_async(&conn).await?; } Ok(()) @@ -269,10 +274,10 @@ impl DataStore { * region_dsl::extent_count, )) .nullable() - .get_result_async(self.pool()) + .get_result_async(&*self.pool_connection_unauthorized().await?) .await .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) + public_error_from_diesel(e, ErrorHandler::Server) })?; if let Some(total_occupied_size) = total_occupied_size { diff --git a/nexus/db-queries/src/db/datastore/region_snapshot.rs b/nexus/db-queries/src/db/datastore/region_snapshot.rs index dab3a90bcb..0a707e4504 100644 --- a/nexus/db-queries/src/db/datastore/region_snapshot.rs +++ b/nexus/db-queries/src/db/datastore/region_snapshot.rs @@ -6,7 +6,7 @@ use super::DataStore; use crate::db; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::model::RegionSnapshot; use async_bb8_diesel::AsyncRunQueryDsl; @@ -25,10 +25,10 @@ impl DataStore { diesel::insert_into(dsl::region_snapshot) .values(region_snapshot.clone()) .on_conflict_do_nothing() - .execute_async(self.pool()) + .execute_async(&*self.pool_connection_unauthorized().await?) .await .map(|_| ()) - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn region_snapshot_remove( @@ -43,9 +43,9 @@ impl DataStore { .filter(dsl::dataset_id.eq(dataset_id)) .filter(dsl::region_id.eq(region_id)) .filter(dsl::snapshot_id.eq(snapshot_id)) - .execute_async(self.pool()) + .execute_async(&*self.pool_connection_unauthorized().await?) .await .map(|_rows_deleted| ()) - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } } diff --git a/nexus/db-queries/src/db/datastore/role.rs b/nexus/db-queries/src/db/datastore/role.rs index ba217ff350..f1198c239b 100644 --- a/nexus/db-queries/src/db/datastore/role.rs +++ b/nexus/db-queries/src/db/datastore/role.rs @@ -11,7 +11,7 @@ use crate::context::OpContext; use crate::db; use crate::db::datastore::RunnableQuery; use crate::db::datastore::RunnableQueryNoReturn; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; use crate::db::fixed_data::role_assignment::BUILTIN_ROLE_ASSIGNMENTS; @@ -24,7 +24,6 @@ use crate::db::pagination::paginated_multicolumn; use crate::db::pool::DbConnection; use async_bb8_diesel::AsyncConnection; use async_bb8_diesel::AsyncRunQueryDsl; -use async_bb8_diesel::PoolError; use diesel::prelude::*; use nexus_types::external_api::shared; use omicron_common::api::external::DataPageParams; @@ -43,15 +42,17 @@ impl DataStore { ) -> ListResultVec { use db::schema::role_builtin::dsl; opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; + + let conn = self.pool_connection_authorized(opctx).await?; paginated_multicolumn( dsl::role_builtin, (dsl::resource_type, dsl::role_name), pagparams, ) .select(RoleBuiltin::as_select()) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::(&*conn) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Load built-in roles into the database @@ -75,15 +76,14 @@ impl DataStore { .collect::>(); debug!(opctx.log, "attempting to create built-in roles"); + let conn = self.pool_connection_authorized(opctx).await?; let count = diesel::insert_into(dsl::role_builtin) .values(builtin_roles) .on_conflict((dsl::resource_type, dsl::role_name)) .do_nothing() - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*conn) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; info!(opctx.log, "created {} built-in roles", count); Ok(()) } @@ -99,6 +99,7 @@ impl DataStore { opctx.authorize(authz::Action::Modify, &authz::DATABASE).await?; debug!(opctx.log, "attempting to create built-in role assignments"); + let conn = self.pool_connection_authorized(opctx).await?; let count = diesel::insert_into(dsl::role_assignment) .values(&*BUILTIN_ROLE_ASSIGNMENTS) .on_conflict(( @@ -109,11 +110,9 @@ impl DataStore { dsl::role_name, )) .do_nothing() - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*conn) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; info!(opctx.log, "created {} built-in role assignments", count); Ok(()) } @@ -141,7 +140,7 @@ impl DataStore { // into some hurt by assigning loads of roles to someone and having that // person attempt to access anything. - self.pool_authorized(opctx).await? + self.pool_connection_authorized(opctx).await? .transaction_async(|conn| async move { let mut role_assignments = dsl::role_assignment .filter(dsl::identity_type.eq(identity_type.clone())) @@ -175,7 +174,7 @@ impl DataStore { Ok(role_assignments) }) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Fetches all of the externally-visible role assignments for the specified @@ -196,28 +195,19 @@ impl DataStore { opctx: &OpContext, authz_resource: &T, ) -> ListResultVec { - self.role_assignment_fetch_visible_conn( - opctx, - authz_resource, - self.pool_authorized(opctx).await?, - ) - .await + let conn = self.pool_connection_authorized(opctx).await?; + self.role_assignment_fetch_visible_conn(opctx, authz_resource, &conn) + .await } pub async fn role_assignment_fetch_visible_conn< T: authz::ApiResourceWithRoles + AuthorizedResource + Clone, - ConnErr, >( &self, opctx: &OpContext, authz_resource: &T, - conn: &(impl async_bb8_diesel::AsyncConnection - + Sync), - ) -> ListResultVec - where - ConnErr: From + Send + 'static, - PoolError: From, - { + conn: &async_bb8_diesel::Connection, + ) -> ListResultVec { opctx.authorize(authz::Action::ReadPolicy, authz_resource).await?; let resource_type = authz_resource.resource_type(); let resource_id = authz_resource.resource_id(); @@ -231,9 +221,7 @@ impl DataStore { .select(RoleAssignment::as_select()) .load_async::(conn) .await - .map_err(|e| { - public_error_from_diesel_pool(e.into(), ErrorHandler::Server) - }) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Removes all existing externally-visble role assignments on @@ -283,7 +271,7 @@ impl DataStore { // We might instead want to first-class the idea of Policies in the // database so that we can build up a whole new Policy in batches and // then flip the resource over to using it. - self.pool_authorized(opctx) + self.pool_connection_authorized(opctx) .await? .transaction_async(|conn| async move { delete_old_query.execute_async(&conn).await?; @@ -292,8 +280,8 @@ impl DataStore { .await .map_err(|e| match e { TransactionError::CustomError(e) => e, - TransactionError::Pool(e) => { - public_error_from_diesel_pool(e, ErrorHandler::Server) + TransactionError::Connection(e) => { + public_error_from_diesel(e, ErrorHandler::Server) } }) } diff --git a/nexus/db-queries/src/db/datastore/saga.rs b/nexus/db-queries/src/db/datastore/saga.rs index 91e69e3fe5..2ec0c40799 100644 --- a/nexus/db-queries/src/db/datastore/saga.rs +++ b/nexus/db-queries/src/db/datastore/saga.rs @@ -6,7 +6,7 @@ use super::DataStore; use crate::db; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::model::Generation; use crate::db::pagination::paginated; @@ -30,11 +30,9 @@ impl DataStore { diesel::insert_into(dsl::saga) .values(saga.clone()) - .execute_async(self.pool()) + .execute_async(&*self.pool_connection_unauthorized().await?) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; Ok(()) } @@ -48,10 +46,10 @@ impl DataStore { // owning this saga. diesel::insert_into(dsl::saga_node_event) .values(event.clone()) - .execute_async(self.pool()) + .execute_async(&*self.pool_connection_unauthorized().await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict(ResourceType::SagaDbg, "Saga Event"), ) @@ -75,10 +73,10 @@ impl DataStore { .filter(dsl::adopt_generation.eq(current_adopt_generation)) .set(dsl::saga_state.eq(db::saga_types::SagaCachedState(new_state))) .check_if_exists::(saga_id) - .execute_and_check(self.pool()) + .execute_and_check(&*self.pool_connection_unauthorized().await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByLookup( ResourceType::SagaDbg, @@ -117,10 +115,10 @@ impl DataStore { steno::SagaCachedState::Done, ))) .filter(dsl::current_sec.eq(*sec_id)) - .load_async(self.pool()) + .load_async(&*self.pool_connection_unauthorized().await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByLookup( ResourceType::SagaDbg, @@ -138,10 +136,12 @@ impl DataStore { use db::schema::saga_node_event::dsl; paginated(dsl::saga_node_event, dsl::saga_id, &pagparams) .filter(dsl::saga_id.eq(id)) - .load_async::(self.pool()) + .load_async::( + &*self.pool_connection_unauthorized().await?, + ) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByLookup( ResourceType::SagaDbg, diff --git a/nexus/db-queries/src/db/datastore/service.rs b/nexus/db-queries/src/db/datastore/service.rs index b2c8505fea..40bf250abe 100644 --- a/nexus/db-queries/src/db/datastore/service.rs +++ b/nexus/db-queries/src/db/datastore/service.rs @@ -10,16 +10,14 @@ use crate::context::OpContext; use crate::db; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::identity::Asset; use crate::db::model::Service; use crate::db::model::Sled; use crate::db::pagination::paginated; use crate::db::pool::DbConnection; -use async_bb8_diesel::AsyncConnection; use async_bb8_diesel::AsyncRunQueryDsl; -use async_bb8_diesel::PoolError; use chrono::Utc; use diesel::prelude::*; use diesel::upsert::excluded; @@ -39,20 +37,16 @@ impl DataStore { opctx: &OpContext, service: Service, ) -> CreateResult { - let conn = self.pool_authorized(opctx).await?; - self.service_upsert_conn(conn, service).await + let conn = self.pool_connection_authorized(opctx).await?; + self.service_upsert_conn(&conn, service).await } /// Stores a new service in the database (using an existing db connection). - pub(crate) async fn service_upsert_conn( + pub(crate) async fn service_upsert_conn( &self, - conn: &(impl AsyncConnection + Sync), + conn: &async_bb8_diesel::Connection, service: Service, - ) -> CreateResult - where - ConnError: From + Send + 'static, - PoolError: From, - { + ) -> CreateResult { use db::schema::service::dsl; let service_id = service.id(); @@ -78,15 +72,13 @@ impl DataStore { type_name: ResourceType::Sled, lookup_type: LookupType::ById(sled_id), }, - AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::Service, - &service_id.to_string(), - ), - ) - } + AsyncInsertError::DatabaseError(e) => public_error_from_diesel( + e, + ErrorHandler::Conflict( + ResourceType::Service, + &service_id.to_string(), + ), + ), }) } @@ -102,8 +94,8 @@ impl DataStore { paginated(dsl::service, dsl::id, pagparams) .filter(dsl::kind.eq(kind)) .select(Service::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } } diff --git a/nexus/db-queries/src/db/datastore/silo.rs b/nexus/db-queries/src/db/datastore/silo.rs index ed2b97257e..5e909b84c4 100644 --- a/nexus/db-queries/src/db/datastore/silo.rs +++ b/nexus/db-queries/src/db/datastore/silo.rs @@ -10,8 +10,8 @@ use crate::authz; use crate::context::OpContext; use crate::db; use crate::db::datastore::RunnableQuery; -use crate::db::error::diesel_pool_result_optional; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::diesel_result_optional; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; use crate::db::fixed_data::silo::{DEFAULT_SILO, INTERNAL_SILO}; @@ -24,7 +24,6 @@ use crate::db::pagination::paginated; use crate::db::pool::DbConnection; use async_bb8_diesel::AsyncConnection; use async_bb8_diesel::AsyncRunQueryDsl; -use async_bb8_diesel::PoolError; use chrono::Utc; use diesel::prelude::*; use nexus_db_model::Certificate; @@ -66,11 +65,9 @@ impl DataStore { .values([&*DEFAULT_SILO, &*INTERNAL_SILO]) .on_conflict(dsl::id) .do_nothing() - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; info!(opctx.log, "created {} built-in silos", count); self.virtual_provisioning_collection_create( @@ -126,9 +123,9 @@ impl DataStore { new_silo_dns_names: &[String], dns_update: DnsVersionUpdateBuilder, ) -> CreateResult { - let conn = self.pool_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; self.silo_create_conn( - conn, + &conn, opctx, nexus_opctx, new_silo_params, @@ -138,27 +135,15 @@ impl DataStore { .await } - pub async fn silo_create_conn( + pub async fn silo_create_conn( &self, - conn: &(impl async_bb8_diesel::AsyncConnection - + Sync), + conn: &async_bb8_diesel::Connection, opctx: &OpContext, nexus_opctx: &OpContext, new_silo_params: params::SiloCreate, new_silo_dns_names: &[String], dns_update: DnsVersionUpdateBuilder, - ) -> CreateResult - where - ConnErr: From + Send + 'static, - PoolError: From, - TransactionError: From, - - CalleeConnErr: From + Send + 'static, - PoolError: From, - TransactionError: From, - async_bb8_diesel::Connection: - AsyncConnection, - { + ) -> CreateResult { let silo_id = Uuid::new_v4(); let silo_group_id = Uuid::new_v4(); @@ -220,8 +205,8 @@ impl DataStore { .get_result_async(&conn) .await .map_err(|e| { - public_error_from_diesel_pool( - e.into(), + public_error_from_diesel( + e, ErrorHandler::Conflict( ResourceType::Silo, new_silo_params.identity.name.as_str(), @@ -276,8 +261,8 @@ impl DataStore { .await .map_err(|e| match e { TransactionError::CustomError(e) => e, - TransactionError::Pool(e) => { - public_error_from_diesel_pool(e, ErrorHandler::Server) + TransactionError::Connection(e) => { + public_error_from_diesel(e, ErrorHandler::Server) } }) } @@ -294,9 +279,9 @@ impl DataStore { .filter(dsl::time_deleted.is_null()) .filter(dsl::discoverable.eq(true)) .select(Silo::as_select()) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn silos_list( @@ -325,9 +310,9 @@ impl DataStore { query .select(Silo::as_select()) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn silo_delete( @@ -348,19 +333,21 @@ impl DataStore { use db::schema::silo_user; use db::schema::silo_user_password_hash; + let conn = self.pool_connection_authorized(opctx).await?; + // Make sure there are no projects present within this silo. let id = authz_silo.id(); let rcgen = db_silo.rcgen; - let project_found = diesel_pool_result_optional( + let project_found = diesel_result_optional( project::dsl::project .filter(project::dsl::silo_id.eq(id)) .filter(project::dsl::time_deleted.is_null()) .select(project::dsl::id) .limit(1) - .first_async::(self.pool_authorized(opctx).await?) + .first_async::(&*conn) .await, ) - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; if project_found.is_some() { return Err(Error::InvalidRequest { @@ -371,48 +358,47 @@ impl DataStore { let now = Utc::now(); type TxnError = TransactionError; - self.pool_authorized(opctx) - .await? - .transaction_async(|conn| async move { - let updated_rows = diesel::update(silo::dsl::silo) - .filter(silo::dsl::time_deleted.is_null()) - .filter(silo::dsl::id.eq(id)) - .filter(silo::dsl::rcgen.eq(rcgen)) - .set(silo::dsl::time_deleted.eq(now)) - .execute_async(&conn) - .await - .map_err(|e| { - public_error_from_diesel_pool( - PoolError::from(e), - ErrorHandler::NotFoundByResource(authz_silo), - ) - })?; - - if updated_rows == 0 { - return Err(TxnError::CustomError(Error::InvalidRequest { - message: "silo deletion failed due to concurrent modification" + conn.transaction_async(|conn| async move { + let updated_rows = diesel::update(silo::dsl::silo) + .filter(silo::dsl::time_deleted.is_null()) + .filter(silo::dsl::id.eq(id)) + .filter(silo::dsl::rcgen.eq(rcgen)) + .set(silo::dsl::time_deleted.eq(now)) + .execute_async(&conn) + .await + .map_err(|e| { + public_error_from_diesel( + e, + ErrorHandler::NotFoundByResource(authz_silo), + ) + })?; + + if updated_rows == 0 { + return Err(TxnError::CustomError(Error::InvalidRequest { + message: + "silo deletion failed due to concurrent modification" .to_string(), - })); - } + })); + } - self.virtual_provisioning_collection_delete_on_connection( - &conn, - id, - ).await?; + self.virtual_provisioning_collection_delete_on_connection( + &conn, id, + ) + .await?; - self.dns_update(dns_opctx, &conn, dns_update).await?; + self.dns_update(dns_opctx, &conn, dns_update).await?; - info!(opctx.log, "deleted silo {}", id); + info!(opctx.log, "deleted silo {}", id); - Ok(()) - }) - .await - .map_err(|e| match e { - TxnError::CustomError(e) => e, - TxnError::Pool(e) => { - public_error_from_diesel_pool(e, ErrorHandler::Server) - } - })?; + Ok(()) + }) + .await + .map_err(|e| match e { + TxnError::CustomError(e) => e, + TxnError::Connection(e) => { + public_error_from_diesel(e, ErrorHandler::Server) + } + })?; // TODO-correctness This needs to happen in a saga or some other // mechanism that ensures it happens even if we crash at this point. @@ -429,9 +415,9 @@ impl DataStore { .select(silo_user::dsl::id), ), ) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*conn) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; debug!( opctx.log, @@ -442,11 +428,9 @@ impl DataStore { .filter(silo_user::dsl::silo_id.eq(id)) .filter(silo_user::dsl::time_deleted.is_null()) .set(silo_user::dsl::time_deleted.eq(now)) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*conn) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; debug!( opctx.log, @@ -464,10 +448,10 @@ impl DataStore { .select(silo_group::dsl::id), ), ) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*conn) .await .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) + public_error_from_diesel(e, ErrorHandler::Server) })?; debug!( @@ -480,11 +464,9 @@ impl DataStore { .filter(silo_group::dsl::silo_id.eq(id)) .filter(silo_group::dsl::time_deleted.is_null()) .set(silo_group::dsl::time_deleted.eq(now)) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*conn) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; debug!( opctx.log, @@ -498,11 +480,9 @@ impl DataStore { .filter(idp_dsl::silo_id.eq(id)) .filter(idp_dsl::time_deleted.is_null()) .set(idp_dsl::time_deleted.eq(Utc::now())) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*conn) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; debug!(opctx.log, "deleted {} silo IdPs for silo {}", updated_rows, id); @@ -512,11 +492,9 @@ impl DataStore { .filter(saml_idp_dsl::silo_id.eq(id)) .filter(saml_idp_dsl::time_deleted.is_null()) .set(saml_idp_dsl::time_deleted.eq(Utc::now())) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*conn) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; debug!( opctx.log, @@ -530,11 +508,9 @@ impl DataStore { .filter(cert_dsl::silo_id.eq(id)) .filter(cert_dsl::time_deleted.is_null()) .set(cert_dsl::time_deleted.eq(Utc::now())) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*conn) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; debug!(opctx.log, "deleted {} silo IdPs for silo {}", updated_rows, id); diff --git a/nexus/db-queries/src/db/datastore/silo_group.rs b/nexus/db-queries/src/db/datastore/silo_group.rs index 0261dc5542..d13986bb2d 100644 --- a/nexus/db-queries/src/db/datastore/silo_group.rs +++ b/nexus/db-queries/src/db/datastore/silo_group.rs @@ -9,7 +9,7 @@ use crate::authz; use crate::context::OpContext; use crate::db; use crate::db::datastore::RunnableQuery; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; use crate::db::model::SiloGroup; @@ -56,11 +56,9 @@ impl DataStore { DataStore::silo_group_ensure_query(opctx, authz_silo, silo_group) .await? - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; Ok(self .silo_group_optional_lookup(opctx, authz_silo, external_id) @@ -83,10 +81,10 @@ impl DataStore { .filter(dsl::external_id.eq(external_id)) .filter(dsl::time_deleted.is_null()) .select(db::model::SiloGroup::as_select()) - .first_async(self.pool_authorized(opctx).await?) + .first_async(&*self.pool_connection_authorized(opctx).await?) .await .optional() - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn silo_group_membership_for_user( @@ -101,9 +99,9 @@ impl DataStore { dsl::silo_group_membership .filter(dsl::silo_user_id.eq(silo_user_id)) .select(SiloGroupMembership::as_returning()) - .get_results_async(self.pool_authorized(opctx).await?) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn silo_groups_for_self( @@ -125,9 +123,9 @@ impl DataStore { .filter(sgm::silo_user_id.eq(actor.actor_id())) .filter(sg::time_deleted.is_null()) .select(SiloGroup::as_returning()) - .get_results_async(self.pool_authorized(opctx).await?) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Update a silo user's group membership: @@ -147,7 +145,7 @@ impl DataStore { ) -> UpdateResult<()> { opctx.authorize(authz::Action::Modify, authz_silo_user).await?; - self.pool_authorized(opctx) + self.pool_connection_authorized(opctx) .await? .transaction_async(|conn| async move { use db::schema::silo_group_membership::dsl; @@ -166,7 +164,7 @@ impl DataStore { .iter() .map(|group_id| db::model::SiloGroupMembership { silo_group_id: *group_id, - silo_user_id: silo_user_id, + silo_user_id, }) .collect(); @@ -178,7 +176,7 @@ impl DataStore { Ok(()) }) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn silo_group_delete( @@ -197,7 +195,7 @@ impl DataStore { let group_id = authz_silo_group.id(); - self.pool_authorized(opctx) + self.pool_connection_authorized(opctx) .await? .transaction_async(|conn| async move { use db::schema::silo_group_membership; @@ -240,10 +238,9 @@ impl DataStore { id )), - TxnError::Pool(pool_error) => public_error_from_diesel_pool( - pool_error, - ErrorHandler::Server, - ), + TxnError::Connection(error) => { + public_error_from_diesel(error, ErrorHandler::Server) + } }) } @@ -260,8 +257,10 @@ impl DataStore { .filter(dsl::silo_id.eq(authz_silo.id())) .filter(dsl::time_deleted.is_null()) .select(SiloGroup::as_select()) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::( + &*self.pool_connection_authorized(opctx).await?, + ) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } } diff --git a/nexus/db-queries/src/db/datastore/silo_user.rs b/nexus/db-queries/src/db/datastore/silo_user.rs index e0fcf6c469..6084f8c2ab 100644 --- a/nexus/db-queries/src/db/datastore/silo_user.rs +++ b/nexus/db-queries/src/db/datastore/silo_user.rs @@ -10,7 +10,7 @@ use crate::authz; use crate::context::OpContext; use crate::db; use crate::db::datastore::IdentityMetadataCreateParams; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::model::Name; use crate::db::model::Silo; @@ -53,13 +53,14 @@ impl DataStore { use db::schema::silo_user::dsl; let silo_user_external_id = silo_user.external_id.clone(); + let conn = self.pool_connection_unauthorized().await?; diesel::insert_into(dsl::silo_user) .values(silo_user) .returning(SiloUser::as_returning()) - .get_result_async(self.pool()) + .get_result_async(&*conn) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::SiloUser, @@ -91,7 +92,7 @@ impl DataStore { // TODO-robustness We might consider the RFD 192 "rcgen" pattern as well // so that people can't, say, login while we do this. let authz_silo_user_id = authz_silo_user.id(); - self.pool_authorized(opctx) + self.pool_connection_authorized(opctx) .await? .transaction_async(|mut conn| async move { // Delete the user record. @@ -148,7 +149,7 @@ impl DataStore { }) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_silo_user), ) @@ -176,11 +177,11 @@ impl DataStore { .filter(dsl::external_id.eq(external_id.to_string())) .filter(dsl::time_deleted.is_null()) .select(SiloUser::as_select()) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::( + &*self.pool_connection_authorized(opctx).await?, + ) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })? + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))? .pop() .map(|db_silo_user| { let authz_silo_user = authz::SiloUser::new( @@ -208,9 +209,11 @@ impl DataStore { .filter(silo_id.eq(authz_silo_user_list.silo().id())) .filter(time_deleted.is_null()) .select(SiloUser::as_select()) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::( + &*self.pool_connection_authorized(opctx).await?, + ) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn silo_group_users_list( @@ -237,9 +240,11 @@ impl DataStore { ), )) .select(SiloUser::as_select()) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::( + &*self.pool_connection_authorized(opctx).await?, + ) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Updates or deletes the password hash for a given Silo user @@ -280,18 +285,18 @@ impl DataStore { .on_conflict(dsl::silo_user_id) .do_update() .set(SiloUserPasswordUpdate::new(hash_for_update)) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) + public_error_from_diesel(e, ErrorHandler::Server) })?; } else { diesel::delete(dsl::silo_user_password_hash) .filter(dsl::silo_user_id.eq(authz_silo_user.id())) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) + public_error_from_diesel(e, ErrorHandler::Server) })?; } @@ -323,12 +328,10 @@ impl DataStore { .filter(dsl::silo_user_id.eq(authz_silo_user.id())) .select(SiloUserPasswordHash::as_select()) .load_async::( - self.pool_authorized(opctx).await?, + &*self.pool_connection_authorized(opctx).await?, ) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })? + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))? .pop()) } @@ -341,9 +344,11 @@ impl DataStore { opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; paginated(dsl::user_builtin, dsl::name, pagparams) .select(UserBuiltin::as_select()) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::( + &*self.pool_connection_authorized(opctx).await?, + ) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Load built-in users into the database @@ -383,11 +388,9 @@ impl DataStore { .values(builtin_users) .on_conflict(dsl::id) .do_nothing() - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; info!(opctx.log, "created {} built-in users", count); Ok(()) @@ -410,11 +413,9 @@ impl DataStore { .values(users) .on_conflict(dsl::id) .do_nothing() - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; info!(opctx.log, "created {} silo users", count); Ok(()) @@ -437,11 +438,9 @@ impl DataStore { dsl::role_name, )) .do_nothing() - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; info!(opctx.log, "created {} silo user role assignments", count); Ok(()) diff --git a/nexus/db-queries/src/db/datastore/sled.rs b/nexus/db-queries/src/db/datastore/sled.rs index a70ec26d8c..ec6cca0071 100644 --- a/nexus/db-queries/src/db/datastore/sled.rs +++ b/nexus/db-queries/src/db/datastore/sled.rs @@ -8,7 +8,7 @@ use super::DataStore; use crate::authz; use crate::context::OpContext; use crate::db; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; use crate::db::identity::Asset; @@ -46,10 +46,10 @@ impl DataStore { dsl::reservoir_size.eq(sled.reservoir_size), )) .returning(Sled::as_returning()) - .get_result_async(self.pool()) + .get_result_async(&*self.pool_connection_unauthorized().await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::Sled, @@ -68,9 +68,9 @@ impl DataStore { use db::schema::sled::dsl; paginated(dsl::sled, dsl::id, pagparams) .select(Sled::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn sled_reservation_create( @@ -87,7 +87,7 @@ impl DataStore { } type TxnError = TransactionError; - self.pool_authorized(opctx) + self.pool_connection_authorized(opctx) .await? .transaction_async(|conn| async move { use db::schema::sled_resource::dsl as resource_dsl; @@ -183,8 +183,8 @@ impl DataStore { "No sleds can fit the requested instance", ) } - TxnError::Pool(e) => { - public_error_from_diesel_pool(e, ErrorHandler::Server) + TxnError::Connection(e) => { + public_error_from_diesel(e, ErrorHandler::Server) } }) } @@ -197,11 +197,9 @@ impl DataStore { use db::schema::sled_resource::dsl as resource_dsl; diesel::delete(resource_dsl::sled_resource) .filter(resource_dsl::id.eq(resource_id)) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; Ok(()) } } diff --git a/nexus/db-queries/src/db/datastore/sled_instance.rs b/nexus/db-queries/src/db/datastore/sled_instance.rs index 9ba6861cec..dbdd696d70 100644 --- a/nexus/db-queries/src/db/datastore/sled_instance.rs +++ b/nexus/db-queries/src/db/datastore/sled_instance.rs @@ -3,7 +3,7 @@ use super::DataStore; use crate::authz; use crate::context::OpContext; use crate::db; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::pagination::paginated; use async_bb8_diesel::AsyncRunQueryDsl; @@ -25,8 +25,10 @@ impl DataStore { paginated(dsl::sled_instance, dsl::id, &pagparams) .filter(dsl::active_sled_id.eq(authz_sled.id())) .select(SledInstance::as_select()) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::( + &*self.pool_connection_authorized(opctx).await?, + ) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } } diff --git a/nexus/db-queries/src/db/datastore/snapshot.rs b/nexus/db-queries/src/db/datastore/snapshot.rs index d8db6d72a4..29fbb38e88 100644 --- a/nexus/db-queries/src/db/datastore/snapshot.rs +++ b/nexus/db-queries/src/db/datastore/snapshot.rs @@ -10,7 +10,7 @@ use crate::context::OpContext; use crate::db; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::model::Generation; use crate::db::model::Name; @@ -63,7 +63,7 @@ impl DataStore { let project_id = snapshot.project_id; let snapshot: Snapshot = self - .pool_authorized(opctx) + .pool_connection_authorized(opctx) .await? .transaction_async(|conn| async move { use db::schema::snapshot::dsl; @@ -157,16 +157,13 @@ impl DataStore { } } AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel_pool( - e, - ErrorHandler::Server, - ) + public_error_from_diesel(e, ErrorHandler::Server) } }, }, - TxnError::Pool(e) => { - public_error_from_diesel_pool(e, ErrorHandler::Server) + TxnError::Connection(e) => { + public_error_from_diesel(e, ErrorHandler::Server) } })?; @@ -203,10 +200,10 @@ impl DataStore { .filter(dsl::gen.eq(old_gen)) .set((dsl::state.eq(new_state), dsl::gen.eq(next_gen))) .returning(Snapshot::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_snapshot), ) @@ -235,9 +232,9 @@ impl DataStore { .filter(dsl::time_deleted.is_null()) .filter(dsl::project_id.eq(authz_project.id())) .select(Snapshot::as_select()) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn project_delete_snapshot( @@ -273,11 +270,9 @@ impl DataStore { dsl::state.eq(SnapshotState::Destroyed), )) .check_if_exists::(snapshot_id) - .execute_async(self.pool_authorized(&opctx).await?) + .execute_async(&*self.pool_connection_authorized(&opctx).await?) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; if updated_rows == 0 { // Either: diff --git a/nexus/db-queries/src/db/datastore/ssh_key.rs b/nexus/db-queries/src/db/datastore/ssh_key.rs index 622a54d740..c925903e12 100644 --- a/nexus/db-queries/src/db/datastore/ssh_key.rs +++ b/nexus/db-queries/src/db/datastore/ssh_key.rs @@ -8,7 +8,7 @@ use super::DataStore; use crate::authz; use crate::context::OpContext; use crate::db; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::identity::Resource; use crate::db::model::Name; @@ -48,9 +48,9 @@ impl DataStore { .filter(dsl::silo_user_id.eq(authz_user.id())) .filter(dsl::time_deleted.is_null()) .select(SshKey::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Create a new SSH public key for a user. @@ -68,10 +68,10 @@ impl DataStore { diesel::insert_into(dsl::ssh_key) .values(ssh_key) .returning(SshKey::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict(ResourceType::SshKey, &name), ) @@ -92,10 +92,10 @@ impl DataStore { .filter(dsl::time_deleted.is_null()) .set(dsl::time_deleted.eq(Utc::now())) .check_if_exists::(authz_ssh_key.id()) - .execute_and_check(self.pool_authorized(opctx).await?) + .execute_and_check(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_ssh_key), ) diff --git a/nexus/db-queries/src/db/datastore/switch.rs b/nexus/db-queries/src/db/datastore/switch.rs index 56cfb9a96a..148f4577de 100644 --- a/nexus/db-queries/src/db/datastore/switch.rs +++ b/nexus/db-queries/src/db/datastore/switch.rs @@ -2,7 +2,7 @@ use super::DataStore; use crate::authz; use crate::context::OpContext; use crate::db; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::identity::Asset; use crate::db::model::Switch; @@ -20,6 +20,8 @@ impl DataStore { /// Stores a new switch in the database. pub async fn switch_upsert(&self, switch: Switch) -> CreateResult { use db::schema::switch::dsl; + + let conn = self.pool_connection_unauthorized().await?; diesel::insert_into(dsl::switch) .values(switch.clone()) .on_conflict(dsl::id) @@ -29,10 +31,10 @@ impl DataStore { dsl::rack_id.eq(switch.rack_id), )) .returning(Switch::as_returning()) - .get_result_async(self.pool()) + .get_result_async(&*conn) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::Switch, @@ -51,8 +53,8 @@ impl DataStore { use db::schema::switch::dsl; paginated(dsl::switch, dsl::id, pagparams) .select(Switch::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } } diff --git a/nexus/db-queries/src/db/datastore/switch_interface.rs b/nexus/db-queries/src/db/datastore/switch_interface.rs index 5c26dc5431..498064ce37 100644 --- a/nexus/db-queries/src/db/datastore/switch_interface.rs +++ b/nexus/db-queries/src/db/datastore/switch_interface.rs @@ -9,14 +9,12 @@ use crate::db; use crate::db::datastore::address_lot::{ ReserveBlockError, ReserveBlockTxnError, }; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; use crate::db::model::LoopbackAddress; use crate::db::pagination::paginated; -use async_bb8_diesel::{ - AsyncConnection, AsyncRunQueryDsl, ConnectionError, PoolError, -}; +use async_bb8_diesel::{AsyncConnection, AsyncRunQueryDsl, ConnectionError}; use diesel::result::Error as DieselError; use diesel::{ExpressionMethods, QueryDsl, SelectableHelper}; use ipnetwork::IpNetwork; @@ -44,14 +42,14 @@ impl DataStore { type TxnError = TransactionError; - let pool = self.pool_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; let inet = IpNetwork::new(params.address, params.mask) .map_err(|_| Error::invalid_request("invalid address"))?; // TODO https://github.com/oxidecomputer/omicron/issues/2811 // Audit external networking database transaction usage - pool.transaction_async(|conn| async move { + conn.transaction_async(|conn| async move { let lot_id = authz_address_lot.id(); let (block, rsvd_block) = crate::db::datastore::address_lot::try_reserve_block( @@ -67,7 +65,9 @@ impl DataStore { LoopbackAddressCreateError::ReserveBlock(err), ) } - ReserveBlockTxnError::Pool(err) => TxnError::Pool(err), + ReserveBlockTxnError::Connection(err) => { + TxnError::Connection(err) + } })?; // Address block reserved, now create the loopback address. @@ -103,17 +103,17 @@ impl DataStore { ReserveBlockError::AddressNotInLot, ), ) => Error::invalid_request("address not in lot"), - TxnError::Pool(e) => match e { - PoolError::Connection(ConnectionError::Query( - DieselError::DatabaseError(_, _), - )) => public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::LoopbackAddress, - &format!("lo {}", inet), - ), - ), - _ => public_error_from_diesel_pool(e, ErrorHandler::Server), + TxnError::Connection(e) => match e { + ConnectionError::Query(DieselError::DatabaseError(_, _)) => { + public_error_from_diesel( + e, + ErrorHandler::Conflict( + ResourceType::LoopbackAddress, + &format!("lo {}", inet), + ), + ) + } + _ => public_error_from_diesel(e, ErrorHandler::Server), }, }) } @@ -128,11 +128,11 @@ impl DataStore { let id = authz_loopback_address.id(); - let pool = self.pool_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; // TODO https://github.com/oxidecomputer/omicron/issues/2811 // Audit external networking database transaction usage - pool.transaction_async(|conn| async move { + conn.transaction_async(|conn| async move { let la = diesel::delete(dsl::loopback_address) .filter(dsl::id.eq(id)) .returning(LoopbackAddress::as_returning()) @@ -147,7 +147,7 @@ impl DataStore { Ok(()) }) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn loopback_address_get( @@ -160,15 +160,15 @@ impl DataStore { let id = authz_loopback_address.id(); - let pool = self.pool_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; loopback_dsl::loopback_address .filter(loopback_address::id.eq(id)) .select(LoopbackAddress::as_select()) .limit(1) - .first_async::(pool) + .first_async::(&*conn) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn loopback_address_list( @@ -180,8 +180,8 @@ impl DataStore { paginated(dsl::loopback_address, dsl::id, &pagparams) .select(LoopbackAddress::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } } diff --git a/nexus/db-queries/src/db/datastore/switch_port.rs b/nexus/db-queries/src/db/datastore/switch_port.rs index 33dfd56359..940fedb473 100644 --- a/nexus/db-queries/src/db/datastore/switch_port.rs +++ b/nexus/db-queries/src/db/datastore/switch_port.rs @@ -9,7 +9,7 @@ use crate::db::datastore::address_lot::{ ReserveBlockError, ReserveBlockTxnError, }; use crate::db::datastore::UpdatePrecondition; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; use crate::db::model::{ @@ -20,9 +20,7 @@ use crate::db::model::{ SwitchVlanInterfaceConfig, }; use crate::db::pagination::paginated; -use async_bb8_diesel::{ - AsyncConnection, AsyncRunQueryDsl, ConnectionError, PoolError, -}; +use async_bb8_diesel::{AsyncConnection, AsyncRunQueryDsl, ConnectionError}; use diesel::result::Error as DieselError; use diesel::{ ExpressionMethods, JoinOnDsl, NullableExpressionMethods, QueryDsl, @@ -128,11 +126,11 @@ impl DataStore { } type TxnError = TransactionError; - let pool = self.pool_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; // TODO https://github.com/oxidecomputer/omicron/issues/2811 // Audit external networking database transaction usage - pool.transaction_async(|conn| async move { + conn.transaction_async(|conn| async move { // create the top level port settings object let port_settings = SwitchPortSettings::new(¶ms.identity); @@ -371,7 +369,7 @@ impl DataStore { SwitchPortSettingsCreateError::ReserveBlock(err) ) } - ReserveBlockTxnError::Pool(err) => TxnError::Pool(err), + ReserveBlockTxnError::Connection(err) => TxnError::Connection(err), })?; address_config.push(SwitchPortAddressConfig::new( @@ -418,17 +416,17 @@ impl DataStore { ReserveBlockError::AddressNotInLot ) ) => Error::invalid_request("address not in lot"), - TxnError::Pool(e) => match e { - PoolError::Connection(ConnectionError::Query( + TxnError::Connection(e) => match e { + ConnectionError::Query( DieselError::DatabaseError(_, _), - )) => public_error_from_diesel_pool( + ) => public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::SwitchPortSettings, params.identity.name.as_str(), ), ), - _ => public_error_from_diesel_pool(e, ErrorHandler::Server), + _ => public_error_from_diesel(e, ErrorHandler::Server), }, }) } @@ -446,7 +444,7 @@ impl DataStore { } type TxnError = TransactionError; - let pool = self.pool_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; let selector = match ¶ms.port_settings { None => return Err(Error::invalid_request("name or id required")), @@ -455,7 +453,7 @@ impl DataStore { // TODO https://github.com/oxidecomputer/omicron/issues/2811 // Audit external networking database transaction usage - pool.transaction_async(|conn| async move { + conn.transaction_async(|conn| async move { use db::schema::switch_port_settings; let id = match selector { @@ -601,15 +599,15 @@ impl DataStore { SwitchPortSettingsDeleteError::SwitchPortSettingsNotFound) => { Error::invalid_request("port settings not found") } - TxnError::Pool(e) => match e { - PoolError::Connection(ConnectionError::Query( + TxnError::Connection(e) => match e { + ConnectionError::Query( DieselError::DatabaseError(_, _), - )) => { + ) => { let name = match ¶ms.port_settings { Some(name_or_id) => name_or_id.to_string(), None => String::new(), }; - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::SwitchPortSettings, @@ -617,7 +615,7 @@ impl DataStore { ), ) }, - _ => public_error_from_diesel_pool(e, ErrorHandler::Server), + _ => public_error_from_diesel(e, ErrorHandler::Server), }, }) } @@ -641,9 +639,9 @@ impl DataStore { } .filter(dsl::time_deleted.is_null()) .select(SwitchPortSettings::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn switch_port_settings_get( @@ -657,11 +655,11 @@ impl DataStore { } type TxnError = TransactionError; - let pool = self.pool_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; // TODO https://github.com/oxidecomputer/omicron/issues/2811 // Audit external networking database transaction usage - pool.transaction_async(|conn| async move { + conn.transaction_async(|conn| async move { // get the top level port settings object use db::schema::switch_port_settings::dsl as port_settings_dsl; @@ -806,12 +804,12 @@ impl DataStore { SwitchPortSettingsGetError::NotFound(name)) => { Error::not_found_by_name(ResourceType::SwitchPortSettings, &name) } - TxnError::Pool(e) => match e { - PoolError::Connection(ConnectionError::Query( + TxnError::Connection(e) => match e { + ConnectionError::Query( DieselError::DatabaseError(_, _), - )) => { + ) => { let name = name_or_id.to_string(); - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::SwitchPortSettings, @@ -819,7 +817,7 @@ impl DataStore { ), ) }, - _ => public_error_from_diesel_pool(e, ErrorHandler::Server), + _ => public_error_from_diesel(e, ErrorHandler::Server), }, }) } @@ -839,7 +837,7 @@ impl DataStore { } type TxnError = TransactionError; - let pool = self.pool_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; let switch_port = SwitchPort::new( rack_id, switch_location.to_string(), @@ -848,7 +846,7 @@ impl DataStore { // TODO https://github.com/oxidecomputer/omicron/issues/2811 // Audit external networking database transaction usage - pool.transaction_async(|conn| async move { + conn.transaction_async(|conn| async move { use db::schema::rack; use db::schema::rack::dsl as rack_dsl; rack_dsl::rack @@ -880,17 +878,20 @@ impl DataStore { TxnError::CustomError(SwitchPortCreateError::RackNotFound) => { Error::invalid_request("rack not found") } - TxnError::Pool(e) => match e { - PoolError::Connection(ConnectionError::Query( - DieselError::DatabaseError(_, _), - )) => public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::SwitchPort, - &format!("{}/{}/{}", rack_id, &switch_location, &port,), - ), - ), - _ => public_error_from_diesel_pool(e, ErrorHandler::Server), + TxnError::Connection(e) => match e { + ConnectionError::Query(DieselError::DatabaseError(_, _)) => { + public_error_from_diesel( + e, + ErrorHandler::Conflict( + ResourceType::SwitchPort, + &format!( + "{}/{}/{}", + rack_id, &switch_location, &port, + ), + ), + ) + } + _ => public_error_from_diesel(e, ErrorHandler::Server), }, }) } @@ -908,11 +909,11 @@ impl DataStore { } type TxnError = TransactionError; - let pool = self.pool_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; // TODO https://github.com/oxidecomputer/omicron/issues/2811 // Audit external networking database transaction usage - pool.transaction_async(|conn| async move { + conn.transaction_async(|conn| async move { use db::schema::switch_port; use db::schema::switch_port::dsl as switch_port_dsl; @@ -957,8 +958,8 @@ impl DataStore { TxnError::CustomError(SwitchPortDeleteError::ActiveSettings) => { Error::invalid_request("must clear port settings first") } - TxnError::Pool(e) => { - public_error_from_diesel_pool(e, ErrorHandler::Server) + TxnError::Connection(e) => { + public_error_from_diesel(e, ErrorHandler::Server) } }) } @@ -972,9 +973,9 @@ impl DataStore { paginated(dsl::switch_port, dsl::id, pagparams) .select(SwitchPort::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn switch_port_get( @@ -985,15 +986,15 @@ impl DataStore { use db::schema::switch_port; use db::schema::switch_port::dsl as switch_port_dsl; - let pool = self.pool_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; switch_port_dsl::switch_port .filter(switch_port::id.eq(id)) .select(SwitchPort::as_select()) .limit(1) - .first_async::(pool) + .first_async::(&*conn) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn switch_port_set_settings_id( @@ -1006,17 +1007,17 @@ impl DataStore { use db::schema::switch_port; use db::schema::switch_port::dsl as switch_port_dsl; - let pool = self.pool_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; match current { UpdatePrecondition::DontCare => { diesel::update(switch_port_dsl::switch_port) .filter(switch_port::id.eq(switch_port_id)) .set(switch_port::port_settings_id.eq(port_settings_id)) - .execute_async(pool) + .execute_async(&*conn) .await .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) + public_error_from_diesel(e, ErrorHandler::Server) })?; } UpdatePrecondition::Null => { @@ -1024,10 +1025,10 @@ impl DataStore { .filter(switch_port::id.eq(switch_port_id)) .filter(switch_port::port_settings_id.is_null()) .set(switch_port::port_settings_id.eq(port_settings_id)) - .execute_async(pool) + .execute_async(&*conn) .await .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) + public_error_from_diesel(e, ErrorHandler::Server) })?; } UpdatePrecondition::Value(current_id) => { @@ -1035,10 +1036,10 @@ impl DataStore { .filter(switch_port::id.eq(switch_port_id)) .filter(switch_port::port_settings_id.eq(current_id)) .set(switch_port::port_settings_id.eq(port_settings_id)) - .execute_async(pool) + .execute_async(&*conn) .await .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) + public_error_from_diesel(e, ErrorHandler::Server) })?; } } @@ -1056,7 +1057,7 @@ impl DataStore { use db::schema::switch_port; use db::schema::switch_port::dsl as switch_port_dsl; - let pool = self.pool_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; let id: Uuid = switch_port_dsl::switch_port .filter(switch_port::rack_id.eq(rack_id)) .filter( @@ -1065,7 +1066,7 @@ impl DataStore { .filter(switch_port::port_name.eq(port_name.to_string())) .select(switch_port::id) .limit(1) - .first_async::(pool) + .first_async::(&*conn) .await .map_err(|_| { Error::not_found_by_name(ResourceType::SwitchPort, &port_name) @@ -1082,7 +1083,7 @@ impl DataStore { use db::schema::switch_port_settings; use db::schema::switch_port_settings::dsl as port_settings_dsl; - let pool = self.pool_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; let db_name = name.to_string(); let id = port_settings_dsl::switch_port_settings @@ -1090,7 +1091,7 @@ impl DataStore { .filter(switch_port_settings::name.eq(db_name)) .select(switch_port_settings::id) .limit(1) - .first_async::(pool) + .first_async::(&*conn) .await .map_err(|_| { Error::not_found_by_name( @@ -1122,8 +1123,10 @@ impl DataStore { // pagination in the future, or maybe a way to constrain the query to // a rack? .limit(64) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::( + &*self.pool_connection_authorized(opctx).await?, + ) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } } diff --git a/nexus/db-queries/src/db/datastore/update.rs b/nexus/db-queries/src/db/datastore/update.rs index 851ee66bd9..5a3e3b27e4 100644 --- a/nexus/db-queries/src/db/datastore/update.rs +++ b/nexus/db-queries/src/db/datastore/update.rs @@ -9,7 +9,7 @@ use crate::authz; use crate::context::OpContext; use crate::db; use crate::db::error::{ - public_error_from_diesel_pool, ErrorHandler, TransactionError, + public_error_from_diesel, ErrorHandler, TransactionError, }; use crate::db::model::{ ComponentUpdate, SemverVersion, SystemUpdate, UpdateArtifact, @@ -42,9 +42,9 @@ impl DataStore { .do_update() .set(artifact.clone()) .returning(UpdateArtifact::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn update_artifact_hard_delete_outdated( @@ -60,10 +60,10 @@ impl DataStore { use db::schema::update_artifact::dsl; diesel::delete(dsl::update_artifact) .filter(dsl::targets_role_version.lt(current_targets_role_version)) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await .map(|_rows_deleted| ()) - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) .internal_context("deleting outdated available artifacts") } @@ -84,10 +84,10 @@ impl DataStore { // to add more metadata to this model .set(time_modified.eq(Utc::now())) .returning(SystemUpdate::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::SystemUpdate, @@ -112,10 +112,10 @@ impl DataStore { system_update .filter(version.eq(target)) .select(SystemUpdate::as_select()) - .first_async(self.pool_authorized(opctx).await?) + .first_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByLookup( ResourceType::SystemUpdate, @@ -141,7 +141,7 @@ impl DataStore { let version_string = update.version.to_string(); - self.pool_authorized(opctx) + self.pool_connection_authorized(opctx) .await? .transaction_async(|conn| async move { let db_update = diesel::insert_into(component_update::table) @@ -164,7 +164,7 @@ impl DataStore { .await .map_err(|e| match e { TransactionError::CustomError(e) => e, - TransactionError::Pool(e) => public_error_from_diesel_pool( + TransactionError::Connection(e) => public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::ComponentUpdate, @@ -186,9 +186,9 @@ impl DataStore { paginated(system_update, id, pagparams) .select(SystemUpdate::as_select()) .order(version.desc()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn system_update_components_list( @@ -205,9 +205,9 @@ impl DataStore { .inner_join(join_table::table) .filter(join_table::columns::system_update_id.eq(system_update_id)) .select(ComponentUpdate::as_select()) - .get_results_async(self.pool_authorized(opctx).await?) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn create_updateable_component( @@ -226,10 +226,10 @@ impl DataStore { diesel::insert_into(updateable_component) .values(component.clone()) .returning(UpdateableComponent::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::UpdateableComponent, @@ -250,9 +250,9 @@ impl DataStore { paginated(updateable_component, id, pagparams) .select(UpdateableComponent::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn lowest_component_system_version( @@ -266,9 +266,9 @@ impl DataStore { updateable_component .select(system_version) .order(system_version.asc()) - .first_async(self.pool_authorized(opctx).await?) + .first_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn highest_component_system_version( @@ -282,9 +282,9 @@ impl DataStore { updateable_component .select(system_version) .order(system_version.desc()) - .first_async(self.pool_authorized(opctx).await?) + .first_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn create_update_deployment( @@ -299,10 +299,10 @@ impl DataStore { diesel::insert_into(update_deployment) .values(deployment.clone()) .returning(UpdateDeployment::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::UpdateDeployment, @@ -330,10 +330,10 @@ impl DataStore { time_modified.eq(diesel::dsl::now), )) .returning(UpdateDeployment::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByLookup( ResourceType::UpdateDeployment, @@ -354,9 +354,9 @@ impl DataStore { paginated(update_deployment, id, pagparams) .select(UpdateDeployment::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn latest_update_deployment( @@ -370,8 +370,8 @@ impl DataStore { update_deployment .select(UpdateDeployment::as_returning()) .order(time_created.desc()) - .first_async(self.pool_authorized(opctx).await?) + .first_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } } diff --git a/nexus/db-queries/src/db/datastore/virtual_provisioning_collection.rs b/nexus/db-queries/src/db/datastore/virtual_provisioning_collection.rs index 404b071ad9..18ff58735e 100644 --- a/nexus/db-queries/src/db/datastore/virtual_provisioning_collection.rs +++ b/nexus/db-queries/src/db/datastore/virtual_provisioning_collection.rs @@ -7,13 +7,13 @@ use super::DataStore; use crate::context::OpContext; use crate::db; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::model::ByteCount; use crate::db::model::VirtualProvisioningCollection; use crate::db::pool::DbConnection; use crate::db::queries::virtual_provisioning_collection_update::VirtualProvisioningCollectionUpdate; -use async_bb8_diesel::{AsyncRunQueryDsl, PoolError}; +use async_bb8_diesel::AsyncRunQueryDsl; use diesel::prelude::*; use omicron_common::api::external::{DeleteResult, Error}; use uuid::Uuid; @@ -46,26 +46,19 @@ impl DataStore { opctx: &OpContext, virtual_provisioning_collection: VirtualProvisioningCollection, ) -> Result, Error> { - let pool = self.pool_authorized(opctx).await?; + let conn = self.pool_connection_authorized(opctx).await?; self.virtual_provisioning_collection_create_on_connection( - pool, + &conn, virtual_provisioning_collection, ) .await } - pub(crate) async fn virtual_provisioning_collection_create_on_connection< - ConnErr, - >( + pub(crate) async fn virtual_provisioning_collection_create_on_connection( &self, - conn: &(impl async_bb8_diesel::AsyncConnection - + Sync), + conn: &async_bb8_diesel::Connection, virtual_provisioning_collection: VirtualProvisioningCollection, - ) -> Result, Error> - where - ConnErr: From + Send + 'static, - PoolError: From, - { + ) -> Result, Error> { use db::schema::virtual_provisioning_collection::dsl; let provisions: Vec = @@ -75,10 +68,7 @@ impl DataStore { .get_results_async(conn) .await .map_err(|e| { - public_error_from_diesel_pool( - PoolError::from(e), - ErrorHandler::Server, - ) + public_error_from_diesel(e, ErrorHandler::Server) })?; self.virtual_provisioning_collection_producer .append_all_metrics(&provisions)?; @@ -96,10 +86,12 @@ impl DataStore { dsl::virtual_provisioning_collection .find(id) .select(VirtualProvisioningCollection::as_select()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async( + &*self.pool_connection_authorized(opctx).await?, + ) .await .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) + public_error_from_diesel(e, ErrorHandler::Server) })?; Ok(virtual_provisioning_collection) } @@ -110,24 +102,17 @@ impl DataStore { opctx: &OpContext, id: Uuid, ) -> DeleteResult { - let pool = self.pool_authorized(opctx).await?; - self.virtual_provisioning_collection_delete_on_connection(pool, id) + let conn = self.pool_connection_authorized(opctx).await?; + self.virtual_provisioning_collection_delete_on_connection(&conn, id) .await } /// Delete a [`VirtualProvisioningCollection`] object. - pub(crate) async fn virtual_provisioning_collection_delete_on_connection< - ConnErr, - >( + pub(crate) async fn virtual_provisioning_collection_delete_on_connection( &self, - conn: &(impl async_bb8_diesel::AsyncConnection - + Sync), + conn: &async_bb8_diesel::Connection, id: Uuid, - ) -> DeleteResult - where - ConnErr: From + Send + 'static, - PoolError: From, - { + ) -> DeleteResult { use db::schema::virtual_provisioning_collection::dsl; // NOTE: We don't really need to extract the value we're deleting from @@ -138,12 +123,7 @@ impl DataStore { .returning(VirtualProvisioningCollection::as_select()) .get_result_async(conn) .await - .map_err(|e| { - public_error_from_diesel_pool( - PoolError::from(e), - ErrorHandler::Server, - ) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; assert!( collection.is_empty(), "Collection deleted while non-empty: {collection:?}" @@ -209,11 +189,9 @@ impl DataStore { project_id, storage_type, ) - .get_results_async(self.pool_authorized(opctx).await?) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; self.virtual_provisioning_collection_producer .append_disk_metrics(&provisions)?; Ok(provisions) @@ -265,11 +243,9 @@ impl DataStore { disk_byte_diff, project_id, ) - .get_results_async(self.pool_authorized(opctx).await?) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; self.virtual_provisioning_collection_producer .append_disk_metrics(&provisions)?; Ok(provisions) @@ -288,11 +264,9 @@ impl DataStore { VirtualProvisioningCollectionUpdate::new_insert_instance( id, cpus_diff, ram_diff, project_id, ) - .get_results_async(self.pool_authorized(opctx).await?) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; self.virtual_provisioning_collection_producer .append_cpu_metrics(&provisions)?; Ok(provisions) @@ -311,11 +285,9 @@ impl DataStore { VirtualProvisioningCollectionUpdate::new_delete_instance( id, cpus_diff, ram_diff, project_id, ) - .get_results_async(self.pool_authorized(opctx).await?) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; self.virtual_provisioning_collection_producer .append_cpu_metrics(&provisions)?; Ok(provisions) diff --git a/nexus/db-queries/src/db/datastore/volume.rs b/nexus/db-queries/src/db/datastore/volume.rs index 901cf16f63..b3e82886de 100644 --- a/nexus/db-queries/src/db/datastore/volume.rs +++ b/nexus/db-queries/src/db/datastore/volume.rs @@ -6,7 +6,7 @@ use super::DataStore; use crate::db; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; use crate::db::identity::Asset; @@ -19,7 +19,6 @@ use async_bb8_diesel::AsyncRunQueryDsl; use async_bb8_diesel::OptionalExtension; use chrono::Utc; use diesel::prelude::*; -use diesel::OptionalExtension as DieselOptionalExtension; use omicron_common::api::external::CreateResult; use omicron_common::api::external::DeleteResult; use omicron_common::api::external::Error; @@ -65,19 +64,18 @@ impl DataStore { crucible_targets }; - self.pool() - .transaction(move |conn| { + self.pool_connection_unauthorized() + .await? + .transaction_async(|conn| async move { let maybe_volume: Option = dsl::volume .filter(dsl::id.eq(volume.id())) .select(Volume::as_select()) - .first(conn) + .first_async(&conn) + .await .optional() .map_err(|e| { TxnError::CustomError(VolumeCreationError::Public( - public_error_from_diesel_pool( - e.into(), - ErrorHandler::Server, - ), + public_error_from_diesel(e, ErrorHandler::Server), )) })?; @@ -97,11 +95,12 @@ impl DataStore { .on_conflict(dsl::id) .do_nothing() .returning(Volume::as_returning()) - .get_result(conn) + .get_result_async(&conn) + .await .map_err(|e| { TxnError::CustomError(VolumeCreationError::Public( - public_error_from_diesel_pool( - e.into(), + public_error_from_diesel( + e, ErrorHandler::Conflict( ResourceType::Volume, volume.id().to_string().as_str(), @@ -124,11 +123,12 @@ impl DataStore { rs_dsl::volume_references .eq(rs_dsl::volume_references + 1), ) - .execute(conn) + .execute_async(&conn) + .await .map_err(|e| { TxnError::CustomError(VolumeCreationError::Public( - public_error_from_diesel_pool( - e.into(), + public_error_from_diesel( + e, ErrorHandler::Server, ), )) @@ -156,10 +156,10 @@ impl DataStore { dsl::volume .filter(dsl::id.eq(volume_id)) .select(Volume::as_select()) - .first_async::(self.pool()) + .first_async::(&*self.pool_connection_unauthorized().await?) .await .optional() - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Delete the volume if it exists. If it was already deleted, this is a @@ -169,10 +169,10 @@ impl DataStore { diesel::delete(dsl::volume) .filter(dsl::id.eq(volume_id)) - .execute_async(self.pool()) + .execute_async(&*self.pool_connection_unauthorized().await?) .await .map(|_| ()) - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Checkout a copy of the Volume from the database. @@ -206,13 +206,15 @@ impl DataStore { // types that require it). The generation number (along with the // rest of the volume data) that was in the database is what is // returned to the caller. - self.pool() - .transaction(move |conn| { + self.pool_connection_unauthorized() + .await? + .transaction_async(|conn| async move { // Grab the volume in question. let volume = dsl::volume .filter(dsl::id.eq(volume_id)) .select(Volume::as_select()) - .get_result(conn)?; + .get_result_async(&conn) + .await?; // Turn the volume.data into the VolumeConstructionRequest let vcr: VolumeConstructionRequest = @@ -289,7 +291,8 @@ impl DataStore { diesel::update(volume_dsl::volume) .filter(volume_dsl::id.eq(volume_id)) .set(volume_dsl::data.eq(new_volume_data)) - .execute(conn)?; + .execute_async(&conn) + .await?; // This should update just one row. If it does // not, then something is terribly wrong in the @@ -332,10 +335,7 @@ impl DataStore { .await .map_err(|e| match e { TxnError::CustomError(VolumeGetError::DieselError(e)) => { - public_error_from_diesel_pool( - e.into(), - ErrorHandler::Server, - ) + public_error_from_diesel(e.into(), ErrorHandler::Server) } _ => { @@ -478,9 +478,9 @@ impl DataStore { Region::as_select(), Volume::as_select(), )) - .load_async(self.pool()) + .load_async(&*self.pool_connection_unauthorized().await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn read_only_resources_associated_with_volume( @@ -576,8 +576,9 @@ impl DataStore { // // TODO it would be nice to make this transaction_async, but I couldn't // get the async optional extension to work. - self.pool() - .transaction(move |conn| { + self.pool_connection_unauthorized() + .await? + .transaction_async(|conn| async move { // Grab the volume in question. If the volume record was already // hard-deleted, assume clean-up has occurred and return an empty // CrucibleResources. If the volume record was soft-deleted, then @@ -588,7 +589,8 @@ impl DataStore { let volume = volume_dsl::volume .filter(volume_dsl::id.eq(volume_id)) .select(Volume::as_select()) - .get_result(conn) + .get_result_async(&conn) + .await .optional()?; let volume = if let Some(v) = volume { @@ -643,10 +645,11 @@ impl DataStore { diesel::update(dsl::region_snapshot) .filter( dsl::snapshot_addr - .eq_any(&crucible_targets.read_only_targets), + .eq_any(crucible_targets.read_only_targets.clone()), ) .set(dsl::volume_references.eq(dsl::volume_references - 1)) - .execute(conn)?; + .execute_async(&conn) + .await?; // Return what results can be cleaned up let result = CrucibleResources::V1(CrucibleResourcesV1 { @@ -681,7 +684,8 @@ impl DataStore { .or(dsl::volume_references.is_null()), ) .select((Dataset::as_select(), Region::as_select())) - .get_results::<(Dataset, Region)>(conn)? + .get_results_async::<(Dataset, Region)>(&conn) + .await? }, // A volume (for a disk or snapshot) may reference another nested @@ -707,11 +711,9 @@ impl DataStore { // delete a read-only downstairs running for a // snapshot that doesn't exist will return a 404, // causing the saga to error and unwind. - .filter( - dsl::snapshot_addr.eq_any( - &crucible_targets.read_only_targets, - ), - ) + .filter(dsl::snapshot_addr.eq_any( + crucible_targets.read_only_targets.clone(), + )) .filter(dsl::volume_references.eq(0)) .inner_join( dataset_dsl::dataset @@ -721,7 +723,10 @@ impl DataStore { Dataset::as_select(), RegionSnapshot::as_select(), )) - .get_results::<(Dataset, RegionSnapshot)>(conn)? + .get_results_async::<(Dataset, RegionSnapshot)>( + &conn, + ) + .await? }, }); @@ -742,7 +747,8 @@ impl DataStore { })?, ), )) - .execute(conn)?; + .execute_async(&conn) + .await?; Ok(result) }) @@ -750,10 +756,7 @@ impl DataStore { .map_err(|e| match e { TxnError::CustomError( DecreaseCrucibleResourcesError::DieselError(e), - ) => public_error_from_diesel_pool( - e.into(), - ErrorHandler::Server, - ), + ) => public_error_from_diesel(e.into(), ErrorHandler::Server), _ => { Error::internal_error(&format!("Transaction error: {}", e)) @@ -799,8 +802,9 @@ impl DataStore { // data from original volume_id. // - Put the new temp VCR into the temp volume.data, update the // temp_volume in the database. - self.pool() - .transaction(move |conn| { + self.pool_connection_unauthorized() + .await? + .transaction_async(|conn| async move { // Grab the volume in question. If the volume record was already // deleted then we can just return. let volume = { @@ -809,7 +813,8 @@ impl DataStore { let volume = dsl::volume .filter(dsl::id.eq(volume_id)) .select(Volume::as_select()) - .get_result(conn) + .get_result_async(&conn) + .await .optional()?; let volume = if let Some(v) = volume { @@ -882,7 +887,8 @@ impl DataStore { let num_updated = diesel::update(volume_dsl::volume) .filter(volume_dsl::id.eq(volume_id)) .set(volume_dsl::data.eq(new_volume_data)) - .execute(conn)?; + .execute_async(&conn) + .await?; // This should update just one row. If it does // not, then something is terribly wrong in the @@ -920,7 +926,8 @@ impl DataStore { .filter(volume_dsl::id.eq(temp_volume_id)) .filter(volume_dsl::time_deleted.is_null()) .set(volume_dsl::data.eq(rop_volume_data)) - .execute(conn)?; + .execute_async(&conn) + .await?; if num_updated != 1 { return Err(TxnError::CustomError( RemoveReadOnlyParentError::UnexpectedDatabaseUpdate(num_updated, 1), @@ -946,7 +953,7 @@ impl DataStore { .map_err(|e| match e { TxnError::CustomError( RemoveReadOnlyParentError::DieselError(e), - ) => public_error_from_diesel_pool( + ) => public_error_from_diesel( e.into(), ErrorHandler::Server, ), diff --git a/nexus/db-queries/src/db/datastore/vpc.rs b/nexus/db-queries/src/db/datastore/vpc.rs index f82270a27f..af7ea93456 100644 --- a/nexus/db-queries/src/db/datastore/vpc.rs +++ b/nexus/db-queries/src/db/datastore/vpc.rs @@ -10,8 +10,8 @@ use crate::context::OpContext; use crate::db; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; -use crate::db::error::diesel_pool_result_optional; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::diesel_result_optional; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; use crate::db::fixed_data::vpc::SERVICES_VPC_ID; @@ -279,9 +279,9 @@ impl DataStore { .filter(dsl::time_deleted.is_null()) .filter(dsl::project_id.eq(authz_project.id())) .select(Vpc::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn project_create_vpc( @@ -312,23 +312,22 @@ impl DataStore { let name = vpc_query.vpc.identity.name.clone(); let project_id = vpc_query.vpc.project_id; + let conn = self.pool_connection_authorized(opctx).await?; let vpc: Vpc = Project::insert_resource( project_id, diesel::insert_into(dsl::vpc).values(vpc_query), ) - .insert_and_get_result_async(self.pool()) + .insert_and_get_result_async(&conn) .await .map_err(|e| match e { AsyncInsertError::CollectionNotFound => Error::ObjectNotFound { type_name: ResourceType::Project, lookup_type: LookupType::ById(project_id), }, - AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict(ResourceType::Vpc, name.as_str()), - ) - } + AsyncInsertError::DatabaseError(e) => public_error_from_diesel( + e, + ErrorHandler::Conflict(ResourceType::Vpc, name.as_str()), + ), })?; Ok(( authz::Vpc::new( @@ -354,10 +353,10 @@ impl DataStore { .filter(dsl::id.eq(authz_vpc.id())) .set(updates) .returning(Vpc::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_vpc), ) @@ -390,16 +389,18 @@ impl DataStore { // but we can't have NICs be a child of both tables at this point, and // we need to prevent VPC Subnets from being deleted while they have // NICs in them as well. - if diesel_pool_result_optional( + if diesel_result_optional( vpc_subnet::dsl::vpc_subnet .filter(vpc_subnet::dsl::vpc_id.eq(authz_vpc.id())) .filter(vpc_subnet::dsl::time_deleted.is_null()) .select(vpc_subnet::dsl::id) .limit(1) - .first_async::(self.pool_authorized(opctx).await?) + .first_async::( + &*self.pool_connection_authorized(opctx).await?, + ) .await, ) - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))? + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))? .is_some() { return Err(Error::InvalidRequest { @@ -416,10 +417,10 @@ impl DataStore { .filter(dsl::id.eq(authz_vpc.id())) .filter(dsl::subnet_gen.eq(db_vpc.subnet_gen)) .set(dsl::time_deleted.eq(now)) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_vpc), ) @@ -448,14 +449,15 @@ impl DataStore { opctx.authorize(authz::Action::Read, authz_vpc).await?; use db::schema::vpc_firewall_rule::dsl; + let conn = self.pool_connection_authorized(opctx).await?; dsl::vpc_firewall_rule .filter(dsl::time_deleted.is_null()) .filter(dsl::vpc_id.eq(authz_vpc.id())) .order(dsl::name.asc()) .select(VpcFirewallRule::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*conn) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn vpc_delete_all_firewall_rules( @@ -466,16 +468,17 @@ impl DataStore { opctx.authorize(authz::Action::Modify, authz_vpc).await?; use db::schema::vpc_firewall_rule::dsl; + let conn = self.pool_connection_authorized(opctx).await?; let now = Utc::now(); // TODO-performance: Paginate this update to avoid long queries diesel::update(dsl::vpc_firewall_rule) .filter(dsl::time_deleted.is_null()) .filter(dsl::vpc_id.eq(authz_vpc.id())) .set(dsl::time_deleted.eq(now)) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*conn) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_vpc), ) @@ -525,7 +528,7 @@ impl DataStore { // hold a transaction open across multiple roundtrips from the database, // but for now we're using a transaction due to the severely decreased // legibility of CTEs via diesel right now. - self.pool_authorized(opctx) + self.pool_connection_authorized(opctx) .await? .transaction_async(|conn| async move { delete_old_query.execute_async(&conn).await?; @@ -553,7 +556,7 @@ impl DataStore { TxnError::CustomError( FirewallUpdateError::CollectionNotFound, ) => Error::not_found_by_id(ResourceType::Vpc, &authz_vpc.id()), - TxnError::Pool(e) => public_error_from_diesel_pool( + TxnError::Connection(e) => public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_vpc), ), @@ -604,11 +607,12 @@ impl DataStore { sleds = sleds.filter(sled::id.eq_any(sleds_filter.to_vec())); } + let conn = self.pool_connection_unauthorized().await?; sleds .intersect(instance_query.union(service_query)) - .get_results_async(self.pool()) + .get_results_async(&*conn) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn vpc_subnet_list( @@ -620,6 +624,7 @@ impl DataStore { opctx.authorize(authz::Action::ListChildren, authz_vpc).await?; use db::schema::vpc_subnet::dsl; + let conn = self.pool_connection_authorized(opctx).await?; match pagparams { PaginatedBy::Id(pagparams) => { paginated(dsl::vpc_subnet, dsl::id, &pagparams) @@ -633,9 +638,9 @@ impl DataStore { .filter(dsl::time_deleted.is_null()) .filter(dsl::vpc_id.eq(authz_vpc.id())) .select(VpcSubnet::as_select()) - .load_async(self.pool_authorized(opctx).await?) + .load_async(&*conn) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Insert a VPC Subnet, checking for unique IP address ranges. @@ -668,12 +673,17 @@ impl DataStore { ) -> Result { use db::schema::vpc_subnet::dsl; let values = FilterConflictingVpcSubnetRangesQuery::new(subnet.clone()); + let conn = self + .pool_connection_unauthorized() + .await + .map_err(SubnetError::External)?; + diesel::insert_into(dsl::vpc_subnet) .values(values) .returning(VpcSubnet::as_returning()) - .get_result_async(self.pool()) + .get_result_async(&*conn) .await - .map_err(|e| SubnetError::from_pool(e, &subnet)) + .map_err(|e| SubnetError::from_diesel(e, &subnet)) } pub async fn vpc_delete_subnet( @@ -687,17 +697,19 @@ impl DataStore { use db::schema::network_interface; use db::schema::vpc_subnet::dsl; + let conn = self.pool_connection_authorized(opctx).await?; + // Verify there are no child network interfaces in this VPC Subnet - if diesel_pool_result_optional( + if diesel_result_optional( network_interface::dsl::network_interface .filter(network_interface::dsl::subnet_id.eq(authz_subnet.id())) .filter(network_interface::dsl::time_deleted.is_null()) .select(network_interface::dsl::id) .limit(1) - .first_async::(self.pool_authorized(opctx).await?) + .first_async::(&*conn) .await, ) - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))? + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))? .is_some() { return Err(Error::InvalidRequest { @@ -715,10 +727,10 @@ impl DataStore { .filter(dsl::id.eq(authz_subnet.id())) .filter(dsl::rcgen.eq(db_subnet.rcgen)) .set(dsl::time_deleted.eq(now)) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_subnet), ) @@ -748,10 +760,10 @@ impl DataStore { .filter(dsl::id.eq(authz_subnet.id())) .set(updates) .returning(VpcSubnet::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_subnet), ) @@ -782,10 +794,10 @@ impl DataStore { .filter(dsl::subnet_id.eq(authz_subnet.id())) .select(InstanceNetworkInterface::as_select()) .load_async::( - self.pool_authorized(opctx).await?, + &*self.pool_connection_authorized(opctx).await?, ) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn vpc_router_list( @@ -810,9 +822,11 @@ impl DataStore { .filter(dsl::time_deleted.is_null()) .filter(dsl::vpc_id.eq(authz_vpc.id())) .select(VpcRouter::as_select()) - .load_async::(self.pool_authorized(opctx).await?) + .load_async::( + &*self.pool_connection_authorized(opctx).await?, + ) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn vpc_create_router( @@ -830,10 +844,10 @@ impl DataStore { .on_conflict(dsl::id) .do_nothing() .returning(VpcRouter::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::Conflict( ResourceType::VpcRouter, @@ -864,10 +878,10 @@ impl DataStore { .filter(dsl::time_deleted.is_null()) .filter(dsl::id.eq(authz_router.id())) .set(dsl::time_deleted.eq(now)) - .execute_async(self.pool()) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_router), ) @@ -889,10 +903,10 @@ impl DataStore { .filter(dsl::id.eq(authz_router.id())) .set(updates) .returning(VpcRouter::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_router), ) @@ -922,10 +936,10 @@ impl DataStore { .filter(dsl::vpc_router_id.eq(authz_router.id())) .select(RouterRoute::as_select()) .load_async::( - self.pool_authorized(opctx).await?, + &*self.pool_connection_authorized(opctx).await?, ) .await - .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } pub async fn router_create_route( @@ -945,22 +959,22 @@ impl DataStore { router_id, diesel::insert_into(dsl::router_route).values(route), ) - .insert_and_get_result_async(self.pool_authorized(opctx).await?) + .insert_and_get_result_async( + &*self.pool_connection_authorized(opctx).await?, + ) .await .map_err(|e| match e { AsyncInsertError::CollectionNotFound => Error::ObjectNotFound { type_name: ResourceType::VpcRouter, lookup_type: LookupType::ById(router_id), }, - AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::RouterRoute, - name.as_str(), - ), - ) - } + AsyncInsertError::DatabaseError(e) => public_error_from_diesel( + e, + ErrorHandler::Conflict( + ResourceType::RouterRoute, + name.as_str(), + ), + ), }) } @@ -977,10 +991,10 @@ impl DataStore { .filter(dsl::time_deleted.is_null()) .filter(dsl::id.eq(authz_route.id())) .set(dsl::time_deleted.eq(now)) - .execute_async(self.pool_authorized(opctx).await?) + .execute_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_route), ) @@ -1002,10 +1016,10 @@ impl DataStore { .filter(dsl::id.eq(authz_route.id())) .set(route_update) .returning(RouterRoute::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByResource(authz_route), ) @@ -1037,11 +1051,11 @@ impl DataStore { vpc_subnet::ipv4_block, vpc_subnet::ipv6_block, )) - .get_results_async::(self.pool()) + .get_results_async::( + &*self.pool_connection_unauthorized().await?, + ) .await - .map_err(|e| { - public_error_from_diesel_pool(e, ErrorHandler::Server) - })?; + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; let mut result = BTreeMap::new(); for subnet in subnets { @@ -1063,10 +1077,10 @@ impl DataStore { .filter(dsl::vni.eq(vni)) .filter(dsl::time_deleted.is_null()) .select(Vpc::as_select()) - .get_result_async(self.pool_authorized(opctx).await?) + .get_result_async(&*self.pool_connection_authorized(opctx).await?) .await .map_err(|e| { - public_error_from_diesel_pool( + public_error_from_diesel( e, ErrorHandler::NotFoundByLookup( ResourceType::Vpc, diff --git a/nexus/db-queries/src/db/datastore/zpool.rs b/nexus/db-queries/src/db/datastore/zpool.rs index b2fb6cdf7a..5d6c0844ef 100644 --- a/nexus/db-queries/src/db/datastore/zpool.rs +++ b/nexus/db-queries/src/db/datastore/zpool.rs @@ -8,7 +8,7 @@ use super::DataStore; use crate::db; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; -use crate::db::error::public_error_from_diesel_pool; +use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; use crate::db::identity::Asset; use crate::db::model::Sled; @@ -39,22 +39,22 @@ impl DataStore { dsl::total_size.eq(excluded(dsl::total_size)), )), ) - .insert_and_get_result_async(self.pool()) + .insert_and_get_result_async( + &*self.pool_connection_unauthorized().await?, + ) .await .map_err(|e| match e { AsyncInsertError::CollectionNotFound => Error::ObjectNotFound { type_name: ResourceType::Sled, lookup_type: LookupType::ById(sled_id), }, - AsyncInsertError::DatabaseError(e) => { - public_error_from_diesel_pool( - e, - ErrorHandler::Conflict( - ResourceType::Zpool, - &zpool.id().to_string(), - ), - ) - } + AsyncInsertError::DatabaseError(e) => public_error_from_diesel( + e, + ErrorHandler::Conflict( + ResourceType::Zpool, + &zpool.id().to_string(), + ), + ), }) } } diff --git a/nexus/db-queries/src/db/error.rs b/nexus/db-queries/src/db/error.rs index 59094d2e0b..f7402bb8c7 100644 --- a/nexus/db-queries/src/db/error.rs +++ b/nexus/db-queries/src/db/error.rs @@ -4,7 +4,7 @@ //! Error handling and conversions. -use async_bb8_diesel::{ConnectionError, PoolError, PoolResult}; +use async_bb8_diesel::ConnectionError; use diesel::result::DatabaseErrorInformation; use diesel::result::DatabaseErrorKind as DieselErrorKind; use diesel::result::Error as DieselError; @@ -25,23 +25,15 @@ pub enum TransactionError { /// /// This error covers failure due to accessing the DB pool or errors /// propagated from the DB itself. - #[error("Pool error: {0}")] - Pool(#[from] async_bb8_diesel::PoolError), + #[error("Connection error: {0}")] + Connection(#[from] async_bb8_diesel::ConnectionError), } // Maps a "diesel error" into a "pool error", which // is already contained within the error type. impl From for TransactionError { fn from(err: DieselError) -> Self { - Self::Pool(PoolError::Connection(ConnectionError::Query(err))) - } -} - -// Maps a "connection error" into a "pool error", which -// is already contained within the error type. -impl From for TransactionError { - fn from(err: async_bb8_diesel::ConnectionError) -> Self { - Self::Pool(PoolError::Connection(err)) + Self::Connection(ConnectionError::Query(err)) } } @@ -58,22 +50,16 @@ impl TransactionError { /// [1]: https://www.cockroachlabs.com/docs/v23.1/transaction-retry-error-reference#client-side-retry-handling pub fn retry_transaction(&self) -> bool { match &self { - TransactionError::Pool(e) => match e { - PoolError::Connection(ConnectionError::Query( - DieselError::DatabaseError(kind, boxed_error_information), - )) => match kind { - DieselErrorKind::SerializationFailure => { - return boxed_error_information - .message() - .starts_with("restart transaction"); - } - - _ => false, - }, - + TransactionError::Connection(ConnectionError::Query( + DieselError::DatabaseError(kind, boxed_error_information), + )) => match kind { + DieselErrorKind::SerializationFailure => { + return boxed_error_information + .message() + .starts_with("restart transaction"); + } _ => false, }, - _ => false, } } @@ -110,14 +96,12 @@ fn format_database_error( /// Like [`diesel::result::OptionalExtension::optional`]. This turns Ok(v) /// into Ok(Some(v)), Err("NotFound") into Ok(None), and leave all other values /// unchanged. -pub fn diesel_pool_result_optional( - result: PoolResult, -) -> PoolResult> { +pub fn diesel_result_optional( + result: Result, +) -> Result, ConnectionError> { match result { Ok(v) => Ok(Some(v)), - Err(PoolError::Connection(ConnectionError::Query( - DieselError::NotFound, - ))) => Ok(None), + Err(ConnectionError::Query(DieselError::NotFound)) => Ok(None), Err(e) => Err(e), } } @@ -153,57 +137,46 @@ pub enum ErrorHandler<'a> { Server, } -/// Converts a Diesel pool error to a public-facing error. +/// Converts a Diesel connection error to a public-facing error. /// /// [`ErrorHandler`] may be used to add additional handlers for the error /// being returned. -pub fn public_error_from_diesel_pool( - error: PoolError, +pub fn public_error_from_diesel( + error: ConnectionError, handler: ErrorHandler<'_>, ) -> PublicError { - public_error_from_diesel_pool_helper(error, |error| match handler { - ErrorHandler::NotFoundByResource(resource) => { - public_error_from_diesel_lookup( - error, - resource.resource_type(), - resource.lookup_type(), - ) - } - ErrorHandler::NotFoundByLookup(resource_type, lookup_type) => { - public_error_from_diesel_lookup(error, resource_type, &lookup_type) - } - ErrorHandler::Conflict(resource_type, object_name) => { - public_error_from_diesel_create(error, resource_type, object_name) - } - ErrorHandler::Server => PublicError::internal_error(&format!( - "unexpected database error: {:#}", + match error { + ConnectionError::Connection(error) => PublicError::unavail(&format!( + "Failed to access connection pool: {}", error )), - }) -} - -/// Handles the common cases for all pool errors (particularly around transient -/// errors while delegating the special case of -/// `PoolError::Connection(ConnectionError::Query(diesel_error))` to -/// `make_query_error(diesel_error)`, allowing the caller to decide how to -/// format a message for that case. -fn public_error_from_diesel_pool_helper( - error: PoolError, - make_query_error: F, -) -> PublicError -where - F: FnOnce(DieselError) -> PublicError, -{ - match error { - PoolError::Connection(error) => match error { - ConnectionError::Connection(error) => PublicError::unavail( - &format!("Failed to access connection pool: {}", error), - ), - ConnectionError::Query(error) => make_query_error(error), + ConnectionError::Query(error) => match handler { + ErrorHandler::NotFoundByResource(resource) => { + public_error_from_diesel_lookup( + error, + resource.resource_type(), + resource.lookup_type(), + ) + } + ErrorHandler::NotFoundByLookup(resource_type, lookup_type) => { + public_error_from_diesel_lookup( + error, + resource_type, + &lookup_type, + ) + } + ErrorHandler::Conflict(resource_type, object_name) => { + public_error_from_diesel_create( + error, + resource_type, + object_name, + ) + } + ErrorHandler::Server => PublicError::internal_error(&format!( + "unexpected database error: {:#}", + error + )), }, - PoolError::Timeout => { - PublicError::unavail("Timeout accessing connection pool") - } } } diff --git a/nexus/db-queries/src/db/explain.rs b/nexus/db-queries/src/db/explain.rs index de834eb301..fc8098b876 100644 --- a/nexus/db-queries/src/db/explain.rs +++ b/nexus/db-queries/src/db/explain.rs @@ -5,7 +5,7 @@ //! Utility allowing Diesel to EXPLAIN queries. use super::pool::DbConnection; -use async_bb8_diesel::{AsyncRunQueryDsl, ConnectionManager, PoolError}; +use async_bb8_diesel::{AsyncRunQueryDsl, ConnectionError}; use async_trait::async_trait; use diesel::pg::Pg; use diesel::prelude::*; @@ -48,8 +48,8 @@ pub trait ExplainableAsync { /// Asynchronously issues an explain statement. async fn explain_async( self, - pool: &bb8::Pool>, - ) -> Result; + conn: &async_bb8_diesel::Connection, + ) -> Result; } #[async_trait] @@ -64,10 +64,10 @@ where { async fn explain_async( self, - pool: &bb8::Pool>, - ) -> Result { + conn: &async_bb8_diesel::Connection, + ) -> Result { Ok(ExplainStatement { query: self } - .get_results_async::(pool) + .get_results_async::(conn) .await? .join("\n")) } @@ -167,6 +167,7 @@ mod test { let mut db = test_setup_database(&logctx.log).await; let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); + let conn = pool.pool().get().await.unwrap(); create_schema(&pool).await; @@ -174,7 +175,7 @@ mod test { let explanation = dsl::test_users .filter(dsl::id.eq(Uuid::nil())) .select(User::as_select()) - .explain_async(pool.pool()) + .explain_async(&conn) .await .unwrap(); @@ -190,6 +191,7 @@ mod test { let mut db = test_setup_database(&logctx.log).await; let cfg = db::Config { url: db.pg_config().clone() }; let pool = db::Pool::new(&logctx.log, &cfg); + let conn = pool.pool().get().await.unwrap(); create_schema(&pool).await; @@ -197,7 +199,7 @@ mod test { let explanation = dsl::test_users .filter(dsl::age.eq(2)) .select(User::as_select()) - .explain_async(pool.pool()) + .explain_async(&conn) .await .unwrap(); diff --git a/nexus/db-queries/src/db/lookup.rs b/nexus/db-queries/src/db/lookup.rs index e7e7bb47fc..72a32f562c 100644 --- a/nexus/db-queries/src/db/lookup.rs +++ b/nexus/db-queries/src/db/lookup.rs @@ -11,7 +11,7 @@ use crate::{ authz, context::OpContext, db, - db::error::{public_error_from_diesel_pool, ErrorHandler}, + db::error::{public_error_from_diesel, ErrorHandler}, }; use async_bb8_diesel::AsyncRunQueryDsl; use db_macros::lookup_resource; diff --git a/nexus/db-queries/src/db/pagination.rs b/nexus/db-queries/src/db/pagination.rs index 50da36c156..dd7daab14f 100644 --- a/nexus/db-queries/src/db/pagination.rs +++ b/nexus/db-queries/src/db/pagination.rs @@ -214,14 +214,12 @@ mod test { async fn populate_users(pool: &db::Pool, values: &Vec<(i64, i64)>) { use schema::test_users::dsl; + let conn = pool.pool().get().await.unwrap(); + // The indexes here work around the check that prevents full table // scans. - pool.pool() - .get() - .await - .unwrap() - .batch_execute_async( - "CREATE TABLE test_users ( + conn.batch_execute_async( + "CREATE TABLE test_users ( id UUID PRIMARY KEY, age INT NOT NULL, height INT NOT NULL @@ -229,9 +227,9 @@ mod test { CREATE INDEX ON test_users (age, height); CREATE INDEX ON test_users (height, age);", - ) - .await - .unwrap(); + ) + .await + .unwrap(); let users: Vec = values .iter() @@ -244,7 +242,7 @@ mod test { diesel::insert_into(dsl::test_users) .values(users) - .execute_async(pool.pool()) + .execute_async(&*conn) .await .unwrap(); } @@ -254,7 +252,8 @@ mod test { pool: &db::Pool, query: BoxedQuery, ) -> Vec { - query.select(User::as_select()).load_async(pool.pool()).await.unwrap() + let conn = pool.pool().get().await.unwrap(); + query.select(User::as_select()).load_async(&*conn).await.unwrap() } #[tokio::test] diff --git a/nexus/db-queries/src/db/queries/external_ip.rs b/nexus/db-queries/src/db/queries/external_ip.rs index e5f57181fa..18360e1045 100644 --- a/nexus/db-queries/src/db/queries/external_ip.rs +++ b/nexus/db-queries/src/db/queries/external_ip.rs @@ -42,7 +42,7 @@ const REALLOCATION_WITH_DIFFERENT_IP_SENTINEL: &'static str = "Reallocation of IP with different value"; /// Translates a generic pool error to an external error. -pub fn from_pool(e: async_bb8_diesel::PoolError) -> external::Error { +pub fn from_diesel(e: async_bb8_diesel::ConnectionError) -> external::Error { use crate::db::error; let sentinels = [REALLOCATION_WITH_DIFFERENT_IP_SENTINEL]; @@ -58,7 +58,7 @@ pub fn from_pool(e: async_bb8_diesel::PoolError) -> external::Error { } } - error::public_error_from_diesel_pool(e, error::ErrorHandler::Server) + error::public_error_from_diesel(e, error::ErrorHandler::Server) } const MAX_PORT: u16 = u16::MAX; @@ -877,15 +877,16 @@ mod tests { is_default, ); + let conn = self + .db_datastore + .pool_connection_authorized(&self.opctx) + .await + .unwrap(); + use crate::db::schema::ip_pool::dsl as ip_pool_dsl; diesel::insert_into(ip_pool_dsl::ip_pool) .values(pool.clone()) - .execute_async( - self.db_datastore - .pool_authorized(&self.opctx) - .await - .unwrap(), - ) + .execute_async(&*conn) .await .expect("Failed to create IP Pool"); @@ -895,16 +896,16 @@ mod tests { async fn initialize_ip_pool(&self, name: &str, range: IpRange) { // Find the target IP pool use crate::db::schema::ip_pool::dsl as ip_pool_dsl; + let conn = self + .db_datastore + .pool_connection_authorized(&self.opctx) + .await + .unwrap(); let pool = ip_pool_dsl::ip_pool .filter(ip_pool_dsl::name.eq(name.to_string())) .filter(ip_pool_dsl::time_deleted.is_null()) .select(IpPool::as_select()) - .get_result_async( - self.db_datastore - .pool_authorized(&self.opctx) - .await - .unwrap(), - ) + .get_result_async(&*conn) .await .expect("Failed to 'SELECT' IP Pool"); @@ -915,7 +916,11 @@ mod tests { ) .values(pool_range) .execute_async( - self.db_datastore.pool_authorized(&self.opctx).await.unwrap(), + &*self + .db_datastore + .pool_connection_authorized(&self.opctx) + .await + .unwrap(), ) .await .expect("Failed to create IP Pool range"); diff --git a/nexus/db-queries/src/db/queries/network_interface.rs b/nexus/db-queries/src/db/queries/network_interface.rs index 5bb9da928e..877daad9e3 100644 --- a/nexus/db-queries/src/db/queries/network_interface.rs +++ b/nexus/db-queries/src/db/queries/network_interface.rs @@ -125,22 +125,21 @@ impl InsertError { /// can generate, especially the intentional errors that indicate either IP /// address exhaustion or an attempt to attach an interface to an instance /// that is already associated with another VPC. - pub fn from_pool( - e: async_bb8_diesel::PoolError, + pub fn from_diesel( + e: async_bb8_diesel::ConnectionError, interface: &IncompleteNetworkInterface, ) -> Self { use crate::db::error; use async_bb8_diesel::ConnectionError; - use async_bb8_diesel::PoolError; use diesel::result::Error; match e { // Catch the specific errors designed to communicate the failures we // want to distinguish - PoolError::Connection(ConnectionError::Query( - Error::DatabaseError(_, _), - )) => decode_database_error(e, interface), + ConnectionError::Query(Error::DatabaseError(_, _)) => { + decode_database_error(e, interface) + } // Any other error at all is a bug - _ => InsertError::External(error::public_error_from_diesel_pool( + _ => InsertError::External(error::public_error_from_diesel( e, error::ErrorHandler::Server, )), @@ -224,12 +223,11 @@ impl InsertError { /// As such, it naturally is extremely tightly coupled to the database itself, /// including the software version and our schema. fn decode_database_error( - err: async_bb8_diesel::PoolError, + err: async_bb8_diesel::ConnectionError, interface: &IncompleteNetworkInterface, ) -> InsertError { use crate::db::error; use async_bb8_diesel::ConnectionError; - use async_bb8_diesel::PoolError; use diesel::result::DatabaseErrorKind; use diesel::result::Error; @@ -294,8 +292,9 @@ fn decode_database_error( // If the address allocation subquery fails, we'll attempt to insert // NULL for the `ip` column. This checks that the non-NULL constraint on // that colum has been violated. - PoolError::Connection(ConnectionError::Query( - Error::DatabaseError(DatabaseErrorKind::NotNullViolation, ref info), + ConnectionError::Query(Error::DatabaseError( + DatabaseErrorKind::NotNullViolation, + ref info, )) if info.message() == IP_EXHAUSTION_ERROR_MESSAGE => { InsertError::NoAvailableIpAddresses } @@ -304,16 +303,18 @@ fn decode_database_error( // `push_ensure_unique_vpc_expression` subquery, which generates a // UUID parsing error if the resource (e.g. instance) we want to attach // to is already associated with another VPC. - PoolError::Connection(ConnectionError::Query( - Error::DatabaseError(DatabaseErrorKind::Unknown, ref info), + ConnectionError::Query(Error::DatabaseError( + DatabaseErrorKind::Unknown, + ref info, )) if info.message() == MULTIPLE_VPC_ERROR_MESSAGE => { InsertError::ResourceSpansMultipleVpcs(interface.parent_id) } // This checks the constraint on the interface slot numbers, used to // limit total number of interfaces per resource to a maximum number. - PoolError::Connection(ConnectionError::Query( - Error::DatabaseError(DatabaseErrorKind::CheckViolation, ref info), + ConnectionError::Query(Error::DatabaseError( + DatabaseErrorKind::CheckViolation, + ref info, )) if info.message() == NO_SLOTS_AVAILABLE_ERROR_MESSAGE => { InsertError::NoSlotsAvailable } @@ -321,8 +322,9 @@ fn decode_database_error( // If the MAC allocation subquery fails, we'll attempt to insert NULL // for the `mac` column. This checks that the non-NULL constraint on // that column has been violated. - PoolError::Connection(ConnectionError::Query( - Error::DatabaseError(DatabaseErrorKind::NotNullViolation, ref info), + ConnectionError::Query(Error::DatabaseError( + DatabaseErrorKind::NotNullViolation, + ref info, )) if info.message() == MAC_EXHAUSTION_ERROR_MESSAGE => { InsertError::NoMacAddrressesAvailable } @@ -331,8 +333,9 @@ fn decode_database_error( // `push_ensure_unique_vpc_subnet_expression` subquery, which generates // a UUID parsing error if the resource has another interface in the VPC // Subnet of the one we're trying to insert. - PoolError::Connection(ConnectionError::Query( - Error::DatabaseError(DatabaseErrorKind::Unknown, ref info), + ConnectionError::Query(Error::DatabaseError( + DatabaseErrorKind::Unknown, + ref info, )) if info.message() == NON_UNIQUE_VPC_SUBNET_ERROR_MESSAGE => { InsertError::NonUniqueVpcSubnets } @@ -340,8 +343,9 @@ fn decode_database_error( // This catches the UUID-cast failure intentionally introduced by // `push_instance_state_verification_subquery`, which verifies that // the instance is actually stopped when running this query. - PoolError::Connection(ConnectionError::Query( - Error::DatabaseError(DatabaseErrorKind::Unknown, ref info), + ConnectionError::Query(Error::DatabaseError( + DatabaseErrorKind::Unknown, + ref info, )) if info.message() == INSTANCE_BAD_STATE_ERROR_MESSAGE => { assert_eq!(interface.kind, NetworkInterfaceKind::Instance); InsertError::InstanceMustBeStopped(interface.parent_id) @@ -349,16 +353,18 @@ fn decode_database_error( // This catches the UUID-cast failure intentionally introduced by // `push_instance_state_verification_subquery`, which verifies that // the instance doesn't even exist when running this query. - PoolError::Connection(ConnectionError::Query( - Error::DatabaseError(DatabaseErrorKind::Unknown, ref info), + ConnectionError::Query(Error::DatabaseError( + DatabaseErrorKind::Unknown, + ref info, )) if info.message() == NO_INSTANCE_ERROR_MESSAGE => { assert_eq!(interface.kind, NetworkInterfaceKind::Instance); InsertError::InstanceNotFound(interface.parent_id) } // This path looks specifically at constraint names. - PoolError::Connection(ConnectionError::Query( - Error::DatabaseError(DatabaseErrorKind::UniqueViolation, ref info), + ConnectionError::Query(Error::DatabaseError( + DatabaseErrorKind::UniqueViolation, + ref info, )) => match info.constraint_name() { // Constraint violated if a user-requested IP address has // already been assigned within the same VPC Subnet. @@ -385,7 +391,7 @@ fn decode_database_error( external::ResourceType::ServiceNetworkInterface } }; - InsertError::External(error::public_error_from_diesel_pool( + InsertError::External(error::public_error_from_diesel( err, error::ErrorHandler::Conflict( resource_type, @@ -402,14 +408,14 @@ fn decode_database_error( ) } // Any other constraint violation is a bug - _ => InsertError::External(error::public_error_from_diesel_pool( + _ => InsertError::External(error::public_error_from_diesel( err, error::ErrorHandler::Server, )), }, // Any other error at all is a bug - _ => InsertError::External(error::public_error_from_diesel_pool( + _ => InsertError::External(error::public_error_from_diesel( err, error::ErrorHandler::Server, )), @@ -1544,25 +1550,24 @@ impl DeleteError { /// can generate, specifically the intentional errors that indicate that /// either the instance is still running, or that the instance has one or /// more secondary interfaces. - pub fn from_pool( - e: async_bb8_diesel::PoolError, + pub fn from_diesel( + e: async_bb8_diesel::ConnectionError, query: &DeleteQuery, ) -> Self { use crate::db::error; use async_bb8_diesel::ConnectionError; - use async_bb8_diesel::PoolError; use diesel::result::Error; match e { // Catch the specific errors designed to communicate the failures we // want to distinguish - PoolError::Connection(ConnectionError::Query( - Error::DatabaseError(_, _), - )) => decode_delete_network_interface_database_error( - e, - query.parent_id, - ), + ConnectionError::Query(Error::DatabaseError(_, _)) => { + decode_delete_network_interface_database_error( + e, + query.parent_id, + ) + } // Any other error at all is a bug - _ => DeleteError::External(error::public_error_from_diesel_pool( + _ => DeleteError::External(error::public_error_from_diesel( e, error::ErrorHandler::Server, )), @@ -1603,12 +1608,11 @@ impl DeleteError { /// As such, it naturally is extremely tightly coupled to the database itself, /// including the software version and our schema. fn decode_delete_network_interface_database_error( - err: async_bb8_diesel::PoolError, + err: async_bb8_diesel::ConnectionError, parent_id: Uuid, ) -> DeleteError { use crate::db::error; use async_bb8_diesel::ConnectionError; - use async_bb8_diesel::PoolError; use diesel::result::DatabaseErrorKind; use diesel::result::Error; @@ -1623,8 +1627,9 @@ fn decode_delete_network_interface_database_error( // first CTE, which generates a UUID parsing error if we're trying to // delete the primary interface, and the instance also has one or more // secondaries. - PoolError::Connection(ConnectionError::Query( - Error::DatabaseError(DatabaseErrorKind::Unknown, ref info), + ConnectionError::Query(Error::DatabaseError( + DatabaseErrorKind::Unknown, + ref info, )) if info.message() == HAS_SECONDARIES_ERROR_MESSAGE => { DeleteError::SecondariesExist(parent_id) } @@ -1632,22 +1637,24 @@ fn decode_delete_network_interface_database_error( // This catches the UUID-cast failure intentionally introduced by // `push_instance_state_verification_subquery`, which verifies that // the instance can be worked on when running this query. - PoolError::Connection(ConnectionError::Query( - Error::DatabaseError(DatabaseErrorKind::Unknown, ref info), + ConnectionError::Query(Error::DatabaseError( + DatabaseErrorKind::Unknown, + ref info, )) if info.message() == INSTANCE_BAD_STATE_ERROR_MESSAGE => { DeleteError::InstanceBadState(parent_id) } // This catches the UUID-cast failure intentionally introduced by // `push_instance_state_verification_subquery`, which verifies that // the instance doesn't even exist when running this query. - PoolError::Connection(ConnectionError::Query( - Error::DatabaseError(DatabaseErrorKind::Unknown, ref info), + ConnectionError::Query(Error::DatabaseError( + DatabaseErrorKind::Unknown, + ref info, )) if info.message() == NO_INSTANCE_ERROR_MESSAGE => { DeleteError::InstanceNotFound(parent_id) } // Any other error at all is a bug - _ => DeleteError::External(error::public_error_from_diesel_pool( + _ => DeleteError::External(error::public_error_from_diesel( err, error::ErrorHandler::Server, )), @@ -1883,16 +1890,18 @@ mod tests { db_datastore.project_create(&opctx, project).await.unwrap(); use crate::db::schema::vpc_subnet::dsl::vpc_subnet; - let p = db_datastore.pool_authorized(&opctx).await.unwrap(); + let conn = + db_datastore.pool_connection_authorized(&opctx).await.unwrap(); let net1 = Network::new(n_subnets); let net2 = Network::new(n_subnets); for subnet in net1.subnets.iter().chain(net2.subnets.iter()) { diesel::insert_into(vpc_subnet) .values(subnet.clone()) - .execute_async(p) + .execute_async(&*conn) .await .unwrap(); } + drop(conn); Self { logctx, opctx, diff --git a/nexus/db-queries/src/db/queries/next_item.rs b/nexus/db-queries/src/db/queries/next_item.rs index 3ba09788a0..007aec943d 100644 --- a/nexus/db-queries/src/db/queries/next_item.rs +++ b/nexus/db-queries/src/db/queries/next_item.rs @@ -593,6 +593,7 @@ mod tests { let mut db = test_setup_database(&log).await; let cfg = crate::db::Config { url: db.pg_config().clone() }; let pool = Arc::new(crate::db::Pool::new(&logctx.log, &cfg)); + let conn = pool.pool().get().await.unwrap(); // We're going to operate on a separate table, for simplicity. setup_test_schema(&pool).await; @@ -607,7 +608,7 @@ mod tests { let it = diesel::insert_into(item::dsl::item) .values(query) .returning(Item::as_returning()) - .get_result_async(pool.pool()) + .get_result_async(&*conn) .await .unwrap(); assert_eq!(it.value, 0); @@ -616,7 +617,7 @@ mod tests { let it = diesel::insert_into(item::dsl::item) .values(query) .returning(Item::as_returning()) - .get_result_async(pool.pool()) + .get_result_async(&*conn) .await .unwrap(); assert_eq!(it.value, 1); @@ -628,7 +629,7 @@ mod tests { let it = diesel::insert_into(item::dsl::item) .values(query) .returning(Item::as_returning()) - .get_result_async(pool.pool()) + .get_result_async(&*conn) .await .unwrap(); assert_eq!(it.value, 10); @@ -638,7 +639,7 @@ mod tests { let it = diesel::insert_into(item::dsl::item) .values(query) .returning(Item::as_returning()) - .get_result_async(pool.pool()) + .get_result_async(&*conn) .await .unwrap(); assert_eq!(it.value, 2); diff --git a/nexus/db-queries/src/db/queries/region_allocation.rs b/nexus/db-queries/src/db/queries/region_allocation.rs index 674a525c5c..b071ee3f44 100644 --- a/nexus/db-queries/src/db/queries/region_allocation.rs +++ b/nexus/db-queries/src/db/queries/region_allocation.rs @@ -36,7 +36,7 @@ const NOT_ENOUGH_UNIQUE_ZPOOLS_SENTINEL: &'static str = /// Translates a generic pool error to an external error based /// on messages which may be emitted during region provisioning. -pub fn from_pool(e: async_bb8_diesel::PoolError) -> external::Error { +pub fn from_diesel(e: async_bb8_diesel::ConnectionError) -> external::Error { use crate::db::error; let sentinels = [ @@ -66,7 +66,7 @@ pub fn from_pool(e: async_bb8_diesel::PoolError) -> external::Error { } } - error::public_error_from_diesel_pool(e, error::ErrorHandler::Server) + error::public_error_from_diesel(e, error::ErrorHandler::Server) } /// A subquery to find all old regions associated with a particular volume. diff --git a/nexus/db-queries/src/db/queries/vpc_subnet.rs b/nexus/db-queries/src/db/queries/vpc_subnet.rs index 78da549620..bbb229da1e 100644 --- a/nexus/db-queries/src/db/queries/vpc_subnet.rs +++ b/nexus/db-queries/src/db/queries/vpc_subnet.rs @@ -28,13 +28,12 @@ pub enum SubnetError { impl SubnetError { /// Construct a `SubnetError` from a Diesel error, catching the desired /// cases and building useful errors. - pub fn from_pool( - e: async_bb8_diesel::PoolError, + pub fn from_diesel( + e: async_bb8_diesel::ConnectionError, subnet: &VpcSubnet, ) -> Self { use crate::db::error; use async_bb8_diesel::ConnectionError; - use async_bb8_diesel::PoolError; use diesel::result::DatabaseErrorKind; use diesel::result::Error; const IPV4_OVERLAP_ERROR_MESSAGE: &str = @@ -44,33 +43,27 @@ impl SubnetError { const NAME_CONFLICT_CONSTRAINT: &str = "vpc_subnet_vpc_id_name_key"; match e { // Attempt to insert overlapping IPv4 subnet - PoolError::Connection(ConnectionError::Query( - Error::DatabaseError( - DatabaseErrorKind::NotNullViolation, - ref info, - ), + ConnectionError::Query(Error::DatabaseError( + DatabaseErrorKind::NotNullViolation, + ref info, )) if info.message() == IPV4_OVERLAP_ERROR_MESSAGE => { SubnetError::OverlappingIpRange(subnet.ipv4_block.0 .0.into()) } // Attempt to insert overlapping IPv6 subnet - PoolError::Connection(ConnectionError::Query( - Error::DatabaseError( - DatabaseErrorKind::NotNullViolation, - ref info, - ), + ConnectionError::Query(Error::DatabaseError( + DatabaseErrorKind::NotNullViolation, + ref info, )) if info.message() == IPV6_OVERLAP_ERROR_MESSAGE => { SubnetError::OverlappingIpRange(subnet.ipv6_block.0 .0.into()) } // Conflicting name for the subnet within a VPC - PoolError::Connection(ConnectionError::Query( - Error::DatabaseError( - DatabaseErrorKind::UniqueViolation, - ref info, - ), + ConnectionError::Query(Error::DatabaseError( + DatabaseErrorKind::UniqueViolation, + ref info, )) if info.constraint_name() == Some(NAME_CONFLICT_CONSTRAINT) => { - SubnetError::External(error::public_error_from_diesel_pool( + SubnetError::External(error::public_error_from_diesel( e, error::ErrorHandler::Conflict( external::ResourceType::VpcSubnet, @@ -80,7 +73,7 @@ impl SubnetError { } // Any other error at all is a bug - _ => SubnetError::External(error::public_error_from_diesel_pool( + _ => SubnetError::External(error::public_error_from_diesel( e, error::ErrorHandler::Server, )), diff --git a/nexus/db-queries/src/db/true_or_cast_error.rs b/nexus/db-queries/src/db/true_or_cast_error.rs index 6f14cd4642..e04d865182 100644 --- a/nexus/db-queries/src/db/true_or_cast_error.rs +++ b/nexus/db-queries/src/db/true_or_cast_error.rs @@ -77,11 +77,10 @@ where /// Returns one of the sentinels if it matches the expected value from /// a [`TrueOrCastError`]. pub fn matches_sentinel( - e: &async_bb8_diesel::PoolError, + e: &async_bb8_diesel::ConnectionError, sentinels: &[&'static str], ) -> Option<&'static str> { use async_bb8_diesel::ConnectionError; - use async_bb8_diesel::PoolError; use diesel::result::DatabaseErrorKind; use diesel::result::Error; @@ -94,8 +93,9 @@ pub fn matches_sentinel( match e { // Catch the specific errors designed to communicate the failures we // want to distinguish. - PoolError::Connection(ConnectionError::Query( - Error::DatabaseError(DatabaseErrorKind::Unknown, ref info), + ConnectionError::Query(Error::DatabaseError( + DatabaseErrorKind::Unknown, + ref info, )) => { for sentinel in sentinels { if info.message() == bool_parse_error(sentinel) { diff --git a/nexus/db-queries/src/db/update_and_check.rs b/nexus/db-queries/src/db/update_and_check.rs index 8c7845b61b..96cb3e4c79 100644 --- a/nexus/db-queries/src/db/update_and_check.rs +++ b/nexus/db-queries/src/db/update_and_check.rs @@ -5,7 +5,7 @@ //! CTE implementation for "UPDATE with extended return status". use super::pool::DbConnection; -use async_bb8_diesel::{AsyncRunQueryDsl, PoolError}; +use async_bb8_diesel::AsyncRunQueryDsl; use diesel::associations::HasTable; use diesel::pg::Pg; use diesel::prelude::*; @@ -153,16 +153,13 @@ where /// - Ok(Row exists and was updated) /// - Ok(Row exists, but was not updated) /// - Error (row doesn't exist, or other diesel error) - pub async fn execute_and_check( + pub async fn execute_and_check( self, - conn: &(impl async_bb8_diesel::AsyncConnection - + Sync), - ) -> Result, PoolError> + conn: &async_bb8_diesel::Connection, + ) -> Result, async_bb8_diesel::ConnectionError> where // We require this bound to ensure that "Self" is runnable as query. Self: LoadQuery<'static, DbConnection, (Option, Option, Q)>, - ConnErr: From + Send + 'static, - PoolError: From, { let (id0, id1, found) = self.get_result_async::<(Option, Option, Q)>(conn).await?; diff --git a/nexus/defaults/Cargo.toml b/nexus/defaults/Cargo.toml index 910ae2afd6..09a95fa839 100644 --- a/nexus/defaults/Cargo.toml +++ b/nexus/defaults/Cargo.toml @@ -11,3 +11,4 @@ rand.workspace = true serde_json.workspace = true omicron-common.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } diff --git a/nexus/src/app/background/dns_config.rs b/nexus/src/app/background/dns_config.rs index c0aaa267a2..654e9c0bf1 100644 --- a/nexus/src/app/background/dns_config.rs +++ b/nexus/src/app/background/dns_config.rs @@ -220,7 +220,9 @@ mod test { { use nexus_db_queries::db::schema::dns_version::dsl; diesel::delete(dsl::dns_version.filter(dsl::version.eq(2))) - .execute_async(datastore.pool_for_tests().await.unwrap()) + .execute_async( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .unwrap(); } @@ -236,7 +238,7 @@ mod test { // Similarly, wipe all of the state and verify that we handle that okay. datastore - .pool_for_tests() + .pool_connection_for_tests() .await .unwrap() .transaction_async(|conn| async move { diff --git a/nexus/src/app/background/dns_servers.rs b/nexus/src/app/background/dns_servers.rs index 419b94d360..3a75c09302 100644 --- a/nexus/src/app/background/dns_servers.rs +++ b/nexus/src/app/background/dns_servers.rs @@ -237,7 +237,9 @@ mod test { SocketAddrV6::new(Ipv6Addr::LOCALHOST, 1, 0, 0), ServiceKind::InternalDns, )) - .execute_async(datastore.pool_for_tests().await.unwrap()) + .execute_async( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .unwrap(); } @@ -265,7 +267,9 @@ mod test { diesel::insert_into(dsl::service) .values(new_services) - .execute_async(datastore.pool_for_tests().await.unwrap()) + .execute_async( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .unwrap(); } @@ -281,7 +285,9 @@ mod test { diesel::delete( dsl::service.filter(dsl::kind.eq(ServiceKind::InternalDns)), ) - .execute_async(datastore.pool_for_tests().await.unwrap()) + .execute_async( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .unwrap(); } diff --git a/nexus/src/app/background/init.rs b/nexus/src/app/background/init.rs index 5d1568bcb5..aa949bbc9f 100644 --- a/nexus/src/app/background/init.rs +++ b/nexus/src/app/background/init.rs @@ -370,7 +370,7 @@ pub mod test { ) { type TxnError = TransactionError<()>; { - let conn = datastore.pool_for_tests().await.unwrap(); + let conn = datastore.pool_connection_for_tests().await.unwrap(); let _: Result<(), TxnError> = conn .transaction_async(|conn| async move { { diff --git a/nexus/src/app/sagas/disk_create.rs b/nexus/src/app/sagas/disk_create.rs index 8e2e1d0a04..cca36cefa7 100644 --- a/nexus/src/app/sagas/disk_create.rs +++ b/nexus/src/app/sagas/disk_create.rs @@ -921,7 +921,9 @@ pub(crate) mod test { dsl::disk .filter(dsl::time_deleted.is_null()) .select(Disk::as_select()) - .first_async::(datastore.pool_for_tests().await.unwrap()) + .first_async::( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .optional() .unwrap() @@ -935,7 +937,9 @@ pub(crate) mod test { dsl::volume .filter(dsl::time_deleted.is_null()) .select(Volume::as_select()) - .first_async::(datastore.pool_for_tests().await.unwrap()) + .first_async::( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .optional() .unwrap() @@ -951,7 +955,7 @@ pub(crate) mod test { dsl::virtual_provisioning_resource .select(VirtualProvisioningResource::as_select()) .first_async::( - datastore.pool_for_tests().await.unwrap(), + &*datastore.pool_connection_for_tests().await.unwrap(), ) .await .optional() @@ -966,7 +970,7 @@ pub(crate) mod test { use nexus_db_queries::db::schema::virtual_provisioning_collection::dsl; datastore - .pool_for_tests() + .pool_connection_for_tests() .await .unwrap() .transaction_async(|conn| async move { diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index d5af080381..6fc93ce8db 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -1467,7 +1467,9 @@ pub mod test { dsl::instance .filter(dsl::time_deleted.is_null()) .select(Instance::as_select()) - .first_async::(datastore.pool_for_tests().await.unwrap()) + .first_async::( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .optional() .unwrap() @@ -1484,7 +1486,7 @@ pub mod test { .filter(dsl::kind.eq(NetworkInterfaceKind::Instance)) .select(NetworkInterface::as_select()) .first_async::( - datastore.pool_for_tests().await.unwrap(), + &*datastore.pool_connection_for_tests().await.unwrap(), ) .await .optional() @@ -1501,7 +1503,7 @@ pub mod test { .filter(dsl::is_service.eq(false)) .select(ExternalIp::as_select()) .first_async::( - datastore.pool_for_tests().await.unwrap(), + &*datastore.pool_connection_for_tests().await.unwrap(), ) .await .optional() @@ -1516,7 +1518,7 @@ pub mod test { use nexus_db_queries::db::schema::sled_resource::dsl; datastore - .pool_for_tests() + .pool_connection_for_tests() .await .unwrap() .transaction_async(|conn| async move { @@ -1550,7 +1552,7 @@ pub mod test { use nexus_db_queries::db::model::VirtualProvisioningResource; use nexus_db_queries::db::schema::virtual_provisioning_resource::dsl; - datastore.pool_for_tests() + datastore.pool_connection_for_tests() .await .unwrap() .transaction_async(|conn| async move { @@ -1578,7 +1580,7 @@ pub mod test { use nexus_db_queries::db::schema::virtual_provisioning_collection::dsl; datastore - .pool_for_tests() + .pool_connection_for_tests() .await .unwrap() .transaction_async(|conn| async move { @@ -1615,7 +1617,9 @@ pub mod test { .filter(dsl::time_deleted.is_null()) .filter(dsl::name.eq(DISK_NAME)) .select(Disk::as_select()) - .first_async::(datastore.pool_for_tests().await.unwrap()) + .first_async::( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .unwrap() .runtime_state diff --git a/nexus/src/app/sagas/project_create.rs b/nexus/src/app/sagas/project_create.rs index 65efabd8e9..1cbf9070ee 100644 --- a/nexus/src/app/sagas/project_create.rs +++ b/nexus/src/app/sagas/project_create.rs @@ -213,7 +213,9 @@ mod test { // ignore built-in services project .filter(dsl::id.ne(*SERVICES_PROJECT_ID)) .select(Project::as_select()) - .first_async::(datastore.pool_for_tests().await.unwrap()) + .first_async::( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .optional() .unwrap() @@ -230,7 +232,7 @@ mod test { use nexus_db_queries::db::model::VirtualProvisioningCollection; use nexus_db_queries::db::schema::virtual_provisioning_collection::dsl; - datastore.pool_for_tests() + datastore.pool_connection_for_tests() .await .unwrap() .transaction_async(|conn| async move { diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index bcebd17021..b27f4a3a9b 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -1885,7 +1885,9 @@ mod test { dsl::snapshot .filter(dsl::time_deleted.is_null()) .select(Snapshot::as_select()) - .first_async::(datastore.pool_for_tests().await.unwrap()) + .first_async::( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .optional() .unwrap() @@ -1899,7 +1901,7 @@ mod test { dsl::region_snapshot .select(RegionSnapshot::as_select()) .first_async::( - datastore.pool_for_tests().await.unwrap(), + &*datastore.pool_connection_for_tests().await.unwrap(), ) .await .optional() diff --git a/nexus/src/app/sagas/test_helpers.rs b/nexus/src/app/sagas/test_helpers.rs index 29f743a350..aa9334b682 100644 --- a/nexus/src/app/sagas/test_helpers.rs +++ b/nexus/src/app/sagas/test_helpers.rs @@ -398,7 +398,7 @@ pub(crate) async fn assert_no_failed_undo_steps( use nexus_db_queries::db::model::saga_types::SagaNodeEvent; let saga_node_events: Vec = datastore - .pool_for_tests() + .pool_connection_for_tests() .await .unwrap() .transaction_async(|conn| async move { diff --git a/nexus/src/app/sagas/vpc_create.rs b/nexus/src/app/sagas/vpc_create.rs index 97961a6fa1..85eed6616d 100644 --- a/nexus/src/app/sagas/vpc_create.rs +++ b/nexus/src/app/sagas/vpc_create.rs @@ -599,7 +599,9 @@ pub(crate) mod test { // ignore built-in services VPC .filter(dsl::id.ne(*SERVICES_VPC_ID)) .select(Vpc::as_select()) - .first_async::(datastore.pool_for_tests().await.unwrap()) + .first_async::( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .optional() .unwrap() @@ -618,7 +620,9 @@ pub(crate) mod test { // ignore built-in services VPC .filter(dsl::vpc_id.ne(*SERVICES_VPC_ID)) .select(VpcRouter::as_select()) - .first_async::(datastore.pool_for_tests().await.unwrap()) + .first_async::( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .optional() .unwrap() @@ -646,7 +650,7 @@ pub(crate) mod test { ) .select(RouterRoute::as_select()) .first_async::( - datastore.pool_for_tests().await.unwrap(), + &*datastore.pool_connection_for_tests().await.unwrap(), ) .await .optional() @@ -666,7 +670,9 @@ pub(crate) mod test { // ignore built-in services VPC .filter(dsl::vpc_id.ne(*SERVICES_VPC_ID)) .select(VpcSubnet::as_select()) - .first_async::(datastore.pool_for_tests().await.unwrap()) + .first_async::( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) .await .optional() .unwrap() @@ -686,7 +692,7 @@ pub(crate) mod test { .filter(dsl::vpc_id.ne(*SERVICES_VPC_ID)) .select(VpcFirewallRule::as_select()) .first_async::( - datastore.pool_for_tests().await.unwrap(), + &*datastore.pool_connection_for_tests().await.unwrap(), ) .await .optional() diff --git a/nexus/test-interface/Cargo.toml b/nexus/test-interface/Cargo.toml index 44c894411b..e0743e84bc 100644 --- a/nexus/test-interface/Cargo.toml +++ b/nexus/test-interface/Cargo.toml @@ -12,3 +12,4 @@ nexus-types.workspace = true omicron-common.workspace = true slog.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } diff --git a/nexus/test-utils-macros/Cargo.toml b/nexus/test-utils-macros/Cargo.toml index 4f9d3eca32..1bfa25017a 100644 --- a/nexus/test-utils-macros/Cargo.toml +++ b/nexus/test-utils-macros/Cargo.toml @@ -11,3 +11,4 @@ proc-macro = true proc-macro2.workspace = true quote.workspace = true syn = { workspace = true, features = [ "fold", "parsing" ] } +omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } diff --git a/nexus/test-utils/Cargo.toml b/nexus/test-utils/Cargo.toml index db98a979c1..a2e7600e93 100644 --- a/nexus/test-utils/Cargo.toml +++ b/nexus/test-utils/Cargo.toml @@ -38,8 +38,4 @@ tempfile.workspace = true trust-dns-proto.workspace = true trust-dns-resolver.workspace = true uuid.workspace = true - -[build-dependencies] -dropshot.workspace = true -omicron-test-utils.workspace = true -tokio.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } diff --git a/nexus/test-utils/build.rs b/nexus/test-utils/build.rs deleted file mode 100644 index aada3fe039..0000000000 --- a/nexus/test-utils/build.rs +++ /dev/null @@ -1,37 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -use dropshot::{test_util::LogContext, ConfigLogging, ConfigLoggingLevel}; -use omicron_test_utils::dev::test_setup_database_seed; -use std::env; -use std::path::Path; - -// Creates a "pre-populated" CockroachDB storage directory, which -// subsequent tests can copy instead of creating themselves. -// -// Is it critical this happens at build-time? No. However, it -// makes it more convenient for tests to assume this seeded -// directory exists, rather than all attempting to create it -// concurrently. -// -// Refer to the documentation of [`test_setup_database_seed`] for -// more context. -#[tokio::main] -async fn main() { - println!("cargo:rerun-if-changed=build.rs"); - println!("cargo:rerun-if-changed=../../schema/crdb/dbinit.sql"); - println!("cargo:rerun-if-changed=../../tools/cockroachdb_checksums"); - println!("cargo:rerun-if-changed=../../tools/cockroachdb_version"); - - let logctx = LogContext::new( - "crdb_seeding", - &ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }, - ); - - let seed = - Path::new(&env::var("OUT_DIR").expect("Missing output directory")) - .join("crdb-base"); - test_setup_database_seed(&logctx.log, &seed).await; - logctx.cleanup_successful(); -} diff --git a/nexus/test-utils/src/db.rs b/nexus/test-utils/src/db.rs index bf0b7ed52d..37d7128c49 100644 --- a/nexus/test-utils/src/db.rs +++ b/nexus/test-utils/src/db.rs @@ -4,9 +4,9 @@ //! Database testing facilities. +use camino::Utf8PathBuf; use omicron_test_utils::dev; use slog::Logger; -use std::path::PathBuf; /// Path to the "seed" CockroachDB directory. /// @@ -16,8 +16,11 @@ use std::path::PathBuf; /// By creating a "seed" version of the database, we can cut down /// on the time spent performing this operation. Instead, we opt /// to copy the database from this seed location. -fn seed_dir() -> PathBuf { - PathBuf::from(concat!(env!("OUT_DIR"), "/crdb-base")) +fn seed_dir() -> Utf8PathBuf { + // The setup script should set this environment variable. + let seed_dir = std::env::var("CRDB_SEED_DIR") + .expect("CRDB_SEED_DIR missing -- are you running this test with `cargo nextest run`?"); + seed_dir.into() } /// Wrapper around [`dev::test_setup_database`] which uses a a diff --git a/nexus/types/Cargo.toml b/nexus/types/Cargo.toml index c0f175cf31..f7ffafec52 100644 --- a/nexus/types/Cargo.toml +++ b/nexus/types/Cargo.toml @@ -25,3 +25,4 @@ api_identity.workspace = true dns-service-client.workspace = true omicron-common.workspace = true omicron-passwords.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } diff --git a/openapi/wicketd.json b/openapi/wicketd.json index 40d798da00..d67fc79f7a 100644 --- a/openapi/wicketd.json +++ b/openapi/wicketd.json @@ -598,6 +598,33 @@ } } }, + "/update": { + "post": { + "summary": "An endpoint to start updating one or more sleds, switches and PSCs.", + "operationId": "post_start_update", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StartUpdateParams" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, "/update/{type}/{slot}": { "get": { "summary": "An endpoint to get the status of any update being performed or recently", @@ -641,51 +668,6 @@ "$ref": "#/components/responses/Error" } } - }, - "post": { - "summary": "An endpoint to start updating a sled.", - "operationId": "post_start_update", - "parameters": [ - { - "in": "path", - "name": "slot", - "required": true, - "schema": { - "type": "integer", - "format": "uint32", - "minimum": 0 - } - }, - { - "in": "path", - "name": "type", - "required": true, - "schema": { - "$ref": "#/components/schemas/SpType" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/StartUpdateOptions" - } - } - }, - "required": true - }, - "responses": { - "204": { - "description": "resource updated" - }, - "4XX": { - "$ref": "#/components/responses/Error" - }, - "5XX": { - "$ref": "#/components/responses/Error" - } - } } } }, @@ -2761,6 +2743,31 @@ "skip_sp_version_check" ] }, + "StartUpdateParams": { + "type": "object", + "properties": { + "options": { + "description": "Options for the update.", + "allOf": [ + { + "$ref": "#/components/schemas/StartUpdateOptions" + } + ] + }, + "targets": { + "description": "The SP identifiers to start the update with. Must be non-empty.", + "type": "array", + "items": { + "$ref": "#/components/schemas/SpIdentifier" + }, + "uniqueItems": true + } + }, + "required": [ + "options", + "targets" + ] + }, "StepComponentSummaryForGenericSpec": { "type": "object", "properties": { diff --git a/oxide-client/Cargo.toml b/oxide-client/Cargo.toml index 0602066e6d..df34ab9721 100644 --- a/oxide-client/Cargo.toml +++ b/oxide-client/Cargo.toml @@ -21,3 +21,4 @@ thiserror.workspace = true tokio = { workspace = true, features = [ "net" ] } trust-dns-resolver.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/oximeter-client/Cargo.toml b/oximeter-client/Cargo.toml index e4e68464d7..297dfb6c92 100644 --- a/oximeter-client/Cargo.toml +++ b/oximeter-client/Cargo.toml @@ -12,3 +12,4 @@ reqwest = { workspace = true, features = ["json", "rustls-tls", "stream"] } serde.workspace = true slog.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/oximeter/collector/Cargo.toml b/oximeter/collector/Cargo.toml index 1137651aa0..c8c4030dba 100644 --- a/oximeter/collector/Cargo.toml +++ b/oximeter/collector/Cargo.toml @@ -22,6 +22,7 @@ thiserror.workspace = true tokio.workspace = true toml.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } [dev-dependencies] expectorate.workspace = true diff --git a/oximeter/db/Cargo.toml b/oximeter/db/Cargo.toml index 9ff4ac5c06..77bce09db9 100644 --- a/oximeter/db/Cargo.toml +++ b/oximeter/db/Cargo.toml @@ -25,6 +25,7 @@ thiserror.workspace = true tokio = { workspace = true, features = [ "rt-multi-thread", "macros" ] } usdt.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } [dev-dependencies] itertools.workspace = true diff --git a/oximeter/instruments/Cargo.toml b/oximeter/instruments/Cargo.toml index 98f8f3b5b2..4adff0463a 100644 --- a/oximeter/instruments/Cargo.toml +++ b/oximeter/instruments/Cargo.toml @@ -12,6 +12,7 @@ oximeter.workspace = true tokio.workspace = true http = { workspace = true, optional = true } uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } [features] default = ["http-instruments"] diff --git a/oximeter/oximeter-macro-impl/Cargo.toml b/oximeter/oximeter-macro-impl/Cargo.toml index c38d85ed2d..ff116e1c9d 100644 --- a/oximeter/oximeter-macro-impl/Cargo.toml +++ b/oximeter/oximeter-macro-impl/Cargo.toml @@ -12,3 +12,4 @@ proc-macro = true proc-macro2.workspace = true quote.workspace = true syn = { workspace = true, features = [ "full", "extra-traits" ] } +omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } diff --git a/oximeter/oximeter/Cargo.toml b/oximeter/oximeter/Cargo.toml index f0549548a6..b2aa15f85e 100644 --- a/oximeter/oximeter/Cargo.toml +++ b/oximeter/oximeter/Cargo.toml @@ -15,6 +15,7 @@ schemars = { workspace = true, features = [ "uuid1", "bytes", "chrono" ] } serde.workspace = true thiserror.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } [dev-dependencies] approx.workspace = true diff --git a/oximeter/producer/Cargo.toml b/oximeter/producer/Cargo.toml index e511294e52..f171f57e8a 100644 --- a/oximeter/producer/Cargo.toml +++ b/oximeter/producer/Cargo.toml @@ -19,3 +19,4 @@ slog-dtrace.workspace = true tokio.workspace = true thiserror.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../../workspace-hack" } diff --git a/package-manifest.toml b/package-manifest.toml index b2e1552f3c..4dc0f6b616 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -259,7 +259,10 @@ service_name = "mgs" only_for_targets.image = "standard" only_for_targets.switch = "stub" source.type = "local" -source.paths = [ { from = "smf/mgs-sim", to = "/var/svc/manifest/site/mgs" } ] +source.paths = [ + { from = "smf/mgs/manifest.xml", to = "/var/svc/manifest/site/mgs/manifest.xml" }, + { from = "smf/mgs-sim/config.toml", to = "/var/svc/manifest/site/mgs/config.toml" } +] output.intermediate_only = true output.type = "zone" @@ -268,7 +271,10 @@ service_name = "mgs" only_for_targets.image = "standard" only_for_targets.switch = "softnpu" source.type = "local" -source.paths = [ { from = "smf/mgs-sim", to = "/var/svc/manifest/site/mgs" } ] +source.paths = [ + { from = "smf/mgs/manifest.xml", to = "/var/svc/manifest/site/mgs/manifest.xml" }, + { from = "smf/mgs-sim/config.toml", to = "/var/svc/manifest/site/mgs/config.toml" } +] output.intermediate_only = true output.type = "zone" diff --git a/package/Cargo.toml b/package/Cargo.toml index 7c786b77ef..9fc4610020 100644 --- a/package/Cargo.toml +++ b/package/Cargo.toml @@ -34,6 +34,7 @@ tokio = { workspace = true, features = [ "full" ] } toml.workspace = true topological-sort.workspace = true walkdir.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] expectorate.workspace = true diff --git a/passwords/Cargo.toml b/passwords/Cargo.toml index 1731716101..cbd569ef4c 100644 --- a/passwords/Cargo.toml +++ b/passwords/Cargo.toml @@ -11,6 +11,7 @@ thiserror.workspace = true schemars.workspace = true serde.workspace = true serde_with.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [dev-dependencies] argon2alt = { package = "rust-argon2", version = "1.0" } diff --git a/rpaths/Cargo.toml b/rpaths/Cargo.toml index 829b4ffe28..7671be4968 100644 --- a/rpaths/Cargo.toml +++ b/rpaths/Cargo.toml @@ -5,3 +5,4 @@ edition = "2021" license = "MPL-2.0" [dependencies] +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 77379db470..eacfef491b 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -4,5 +4,5 @@ # # We choose a specific toolchain (rather than "stable") for repeatability. The # intent is to keep this up-to-date with recently-released stable Rust. -channel = "1.72.0" +channel = "1.72.1" profile = "default" diff --git a/schema/crdb/5.0.0/up1.sql b/schema/crdb/5.0.0/up1.sql new file mode 100644 index 0000000000..0976070947 --- /dev/null +++ b/schema/crdb/5.0.0/up1.sql @@ -0,0 +1 @@ +CREATE INDEX IF NOT EXISTS lookup_zpool_by_disk ON omicron.public.zpool (physical_disk_id, id) WHERE physical_disk_id IS NOT NULL AND time_deleted IS NULL; diff --git a/schema/crdb/5.0.0/up2.sql b/schema/crdb/5.0.0/up2.sql new file mode 100644 index 0000000000..c209dab96b --- /dev/null +++ b/schema/crdb/5.0.0/up2.sql @@ -0,0 +1 @@ +CREATE INDEX IF NOT EXISTS lookup_dataset_by_zpool ON omicron.public.dataset (pool_id, id) WHERE pool_id IS NOT NULL AND time_deleted IS NULL; diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 4b38b7dfe4..ad09092f8f 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -388,6 +388,12 @@ CREATE TABLE IF NOT EXISTS omicron.public.zpool ( total_size INT NOT NULL ); +/* Create an index on the physical disk id */ +CREATE INDEX IF NOT EXISTS lookup_zpool_by_disk on omicron.public.zpool ( + physical_disk_id, + id +) WHERE physical_disk_id IS NOT NULL AND time_deleted IS NULL; + CREATE TYPE IF NOT EXISTS omicron.public.dataset_kind AS ENUM ( 'crucible', 'cockroach', @@ -437,6 +443,12 @@ CREATE INDEX IF NOT EXISTS lookup_dataset_by_size_used on omicron.public.dataset size_used ) WHERE size_used IS NOT NULL AND time_deleted IS NULL; +/* Create an index on the zpool id */ +CREATE INDEX IF NOT EXISTS lookup_dataset_by_zpool on omicron.public.dataset ( + pool_id, + id +) WHERE pool_id IS NOT NULL AND time_deleted IS NULL; + /* * A region of space allocated to Crucible Downstairs, within a dataset. */ @@ -2562,7 +2574,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - ( TRUE, NOW(), NOW(), '4.0.0', NULL) + ( TRUE, NOW(), NOW(), '5.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; diff --git a/sled-agent-client/Cargo.toml b/sled-agent-client/Cargo.toml index f6c58fbf2b..01c1032a51 100644 --- a/sled-agent-client/Cargo.toml +++ b/sled-agent-client/Cargo.toml @@ -15,3 +15,4 @@ reqwest = { workspace = true, features = [ "json", "rustls-tls", "stream" ] } serde.workspace = true slog.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } diff --git a/sled-agent/Cargo.toml b/sled-agent/Cargo.toml index f172136726..b131698395 100644 --- a/sled-agent/Cargo.toml +++ b/sled-agent/Cargo.toml @@ -76,6 +76,7 @@ uuid.workspace = true zeroize.workspace = true zone.workspace = true static_assertions.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [target.'cfg(target_os = "illumos")'.dependencies] opte-ioctl.workspace = true diff --git a/sled-agent/src/services.rs b/sled-agent/src/services.rs index 9868bb4f0d..96cdf8222b 100644 --- a/sled-agent/src/services.rs +++ b/sled-agent/src/services.rs @@ -1645,6 +1645,10 @@ impl ServiceManager { } } + if let Some(info) = self.inner.sled_info.get() { + smfh.setprop("config/rack_id", info.rack_id)?; + } + smfh.refresh()?; } ServiceType::SpSim => { @@ -2754,6 +2758,20 @@ impl ServiceManager { &format!("[{address}]:{MGS_PORT}"), )?; + // It should be impossible for the `sled_info` not to be set here, + // as the underlay is set at the same time. + if let Some(info) = self.inner.sled_info.get() { + smfh.setprop("config/rack_id", info.rack_id)?; + } else { + error!( + self.inner.log, + concat!( + "rack_id not present,", + " even though underlay address exists" + ) + ); + } + smfh.refresh()?; } ServiceType::Dendrite { .. } => { diff --git a/sled-hardware/Cargo.toml b/sled-hardware/Cargo.toml index c6bc09f41e..880f93441c 100644 --- a/sled-hardware/Cargo.toml +++ b/sled-hardware/Cargo.toml @@ -24,6 +24,7 @@ thiserror.workspace = true tofino.workspace = true tokio.workspace = true uuid.workspace = true +omicron-workspace-hack = { version = "0.1", path = "../workspace-hack" } [target.'cfg(target_os = "illumos")'.dependencies] illumos-devinfo = { git = "https://github.com/oxidecomputer/illumos-devinfo", branch = "main" } diff --git a/smf/mgs-sim/manifest.xml b/smf/mgs-sim/manifest.xml deleted file mode 100644 index 1e9b752773..0000000000 --- a/smf/mgs-sim/manifest.xml +++ /dev/null @@ -1,57 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/smf/mgs/manifest.xml b/smf/mgs/manifest.xml index 1e9b752773..125c32ce2b 100644 --- a/smf/mgs/manifest.xml +++ b/smf/mgs/manifest.xml @@ -31,6 +31,12 @@ "unknown" with a UUID as a part of starting us. --> + +