From d881633e54dbf4d90e56e51aa2d1327dae60c672 Mon Sep 17 00:00:00 2001 From: Rain Date: Fri, 16 Aug 2024 19:38:12 -0700 Subject: [PATCH] [spr] changes to main this commit is based on Created using spr 1.3.6-beta.1 [skip ci] --- Cargo.lock | 139 +- Cargo.toml | 21 +- dev-tools/openapi-manager/Cargo.toml | 1 + dev-tools/openapi-manager/src/check.rs | 128 +- dev-tools/openapi-manager/src/dispatch.rs | 8 +- dev-tools/openapi-manager/src/generate.rs | 26 +- dev-tools/openapi-manager/src/output.rs | 50 +- dev-tools/openapi-manager/src/spec.rs | 270 +- dev-tools/openapi-manager/types/Cargo.toml | 13 + dev-tools/openapi-manager/types/src/lib.rs | 12 + .../openapi-manager/types/src/validation.rs | 47 + nexus/Cargo.toml | 2 + nexus/auth-types/Cargo.toml | 17 + .../src/authn}/cookies.rs | 0 nexus/auth-types/src/authn/mod.rs | 7 + nexus/auth-types/src/lib.rs | 7 + nexus/auth/Cargo.toml | 1 + nexus/auth/src/authn/external/mod.rs | 1 - .../auth/src/authn/external/session_cookie.rs | 2 +- nexus/src/app/metrics.rs | 4 +- nexus/src/external_api/console_api.rs | 10 +- nexus/src/external_api/http_entrypoints.rs | 13673 ++++++++-------- nexus/tests/integration_tests/metrics.rs | 6 +- openapi/nexus.json | 14 +- oximeter/db/Cargo.toml | 2 + oximeter/db/src/client/mod.rs | 31 +- oximeter/db/src/client/oxql.rs | 30 +- oximeter/db/src/lib.rs | 3 +- oximeter/db/src/model.rs | 11 +- oximeter/db/src/oxql/ast/table_ops/align.rs | 35 +- oximeter/db/src/oxql/ast/table_ops/filter.rs | 47 +- .../db/src/oxql/ast/table_ops/group_by.rs | 73 +- oximeter/db/src/oxql/ast/table_ops/join.rs | 292 +- oximeter/db/src/oxql/ast/table_ops/limit.rs | 108 +- oximeter/db/src/oxql/ast/table_ops/mod.rs | 2 +- oximeter/db/src/oxql/mod.rs | 4 - oximeter/db/src/oxql/query/mod.rs | 10 - oximeter/db/src/query.rs | 5 +- oximeter/db/src/shells/oxql.rs | 3 +- oximeter/db/tests/integration_test.rs | 13 +- oximeter/impl/src/test_util.rs | 130 - oximeter/oximeter/Cargo.toml | 3 +- oximeter/oximeter/src/lib.rs | 9 +- oximeter/oxql-types/Cargo.toml | 18 + oximeter/oxql-types/src/lib.rs | 25 + .../{db/src/oxql => oxql-types/src}/point.rs | 525 +- .../{db/src/oxql => oxql-types/src}/table.rs | 87 +- oximeter/schema/Cargo.toml | 24 + .../src/bin/oximeter-schema.rs | 6 +- .../src/schema => schema/src}/codegen.rs | 349 +- .../{impl/src/schema => schema/src}/ir.rs | 20 +- oximeter/schema/src/lib.rs | 12 + oximeter/test-utils/Cargo.toml | 15 + oximeter/test-utils/src/lib.rs | 295 + oximeter/timeseries-macro/Cargo.toml | 3 +- oximeter/timeseries-macro/src/lib.rs | 4 +- oximeter/{impl => types}/Cargo.toml | 12 +- oximeter/{impl => types}/benches/quantile.rs | 2 +- oximeter/{impl => types}/src/histogram.rs | 8 +- oximeter/{impl => types}/src/lib.rs | 7 +- oximeter/{impl => types}/src/quantile.rs | 16 +- .../src/schema/mod.rs => types/src/schema.rs} | 125 - oximeter/{impl => types}/src/traits.rs | 22 +- oximeter/{impl => types}/src/types.rs | 32 +- .../{impl => types}/tests/fail/failures.rs | 0 .../tests/fail/failures.stderr | 0 .../{impl => types}/tests/test_compilation.rs | 0 workspace-hack/Cargo.toml | 2 + 68 files changed, 8868 insertions(+), 8011 deletions(-) create mode 100644 dev-tools/openapi-manager/types/Cargo.toml create mode 100644 dev-tools/openapi-manager/types/src/lib.rs create mode 100644 dev-tools/openapi-manager/types/src/validation.rs create mode 100644 nexus/auth-types/Cargo.toml rename nexus/{auth/src/authn/external => auth-types/src/authn}/cookies.rs (100%) create mode 100644 nexus/auth-types/src/authn/mod.rs create mode 100644 nexus/auth-types/src/lib.rs delete mode 100644 oximeter/impl/src/test_util.rs create mode 100644 oximeter/oxql-types/Cargo.toml create mode 100644 oximeter/oxql-types/src/lib.rs rename oximeter/{db/src/oxql => oxql-types/src}/point.rs (82%) rename oximeter/{db/src/oxql => oxql-types/src}/table.rs (76%) create mode 100644 oximeter/schema/Cargo.toml rename oximeter/{oximeter => schema}/src/bin/oximeter-schema.rs (93%) rename oximeter/{impl/src/schema => schema/src}/codegen.rs (73%) rename oximeter/{impl/src/schema => schema/src}/ir.rs (99%) create mode 100644 oximeter/schema/src/lib.rs create mode 100644 oximeter/test-utils/Cargo.toml create mode 100644 oximeter/test-utils/src/lib.rs rename oximeter/{impl => types}/Cargo.toml (78%) rename oximeter/{impl => types}/benches/quantile.rs (97%) rename oximeter/{impl => types}/src/histogram.rs (99%) rename oximeter/{impl => types}/src/lib.rs (92%) rename oximeter/{impl => types}/src/quantile.rs (97%) rename oximeter/{impl/src/schema/mod.rs => types/src/schema.rs} (75%) rename oximeter/{impl => types}/src/traits.rs (96%) rename oximeter/{impl => types}/src/types.rs (97%) rename oximeter/{impl => types}/tests/fail/failures.rs (100%) rename oximeter/{impl => types}/tests/fail/failures.stderr (100%) rename oximeter/{impl => types}/tests/test_compilation.rs (100%) diff --git a/Cargo.lock b/Cargo.lock index 849c82f1f0..a24b1d4881 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4917,6 +4917,7 @@ dependencies = [ "http 0.2.12", "hyper 0.14.30", "newtype_derive", + "nexus-auth-types", "nexus-db-fixed-data", "nexus-db-model", "nexus-types", @@ -4939,6 +4940,19 @@ dependencies = [ "uuid", ] +[[package]] +name = "nexus-auth-types" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "cookie 0.18.1", + "dropshot", + "http 0.2.12", + "newtype_derive", + "omicron-workspace-hack", +] + [[package]] name = "nexus-client" version = "0.1.0" @@ -6039,6 +6053,7 @@ dependencies = [ "macaddr", "mg-admin-client", "nexus-auth", + "nexus-auth-types", "nexus-client", "nexus-config", "nexus-db-model", @@ -6077,6 +6092,7 @@ dependencies = [ "oximeter-instruments", "oximeter-producer", "oxnet", + "oxql-types", "parse-display", "paste", "pem", @@ -6504,6 +6520,7 @@ dependencies = [ "postgres-types", "predicates", "proc-macro2", + "quote", "regex", "regex-automata 0.4.6", "regex-syntax 0.8.4", @@ -6630,6 +6647,7 @@ dependencies = [ "nexus-internal-api", "omicron-workspace-hack", "openapi-lint", + "openapi-manager-types", "openapiv3", "owo-colors", "oximeter-api", @@ -6640,6 +6658,15 @@ dependencies = [ "wicketd-api", ] +[[package]] +name = "openapi-manager-types" +version = "0.1.0" +dependencies = [ + "anyhow", + "camino", + "omicron-workspace-hack", +] + [[package]] name = "openapiv3" version = "2.0.0" @@ -6821,9 +6848,10 @@ dependencies = [ "chrono", "clap", "omicron-workspace-hack", - "oximeter-impl", "oximeter-macro-impl", + "oximeter-schema", "oximeter-timeseries-macro", + "oximeter-types", "prettyplease", "syn 2.0.74", "toml 0.8.19", @@ -6925,6 +6953,8 @@ dependencies = [ "omicron-test-utils", "omicron-workspace-hack", "oximeter", + "oximeter-test-utils", + "oxql-types", "peg", "reedline", "regex", @@ -6948,39 +6978,6 @@ dependencies = [ "uuid", ] -[[package]] -name = "oximeter-impl" -version = "0.1.0" -dependencies = [ - "approx", - "bytes", - "chrono", - "criterion", - "float-ord", - "heck 0.5.0", - "num", - "omicron-common", - "omicron-workspace-hack", - "oximeter-macro-impl", - "prettyplease", - "proc-macro2", - "quote", - "rand", - "rand_distr", - "regex", - "rstest", - "schemars", - "serde", - "serde_json", - "slog-error-chain", - "strum", - "syn 2.0.74", - "thiserror", - "toml 0.8.19", - "trybuild", - "uuid", -] - [[package]] name = "oximeter-instruments" version = "0.1.0" @@ -7041,17 +7038,75 @@ dependencies = [ "uuid", ] +[[package]] +name = "oximeter-schema" +version = "0.1.0" +dependencies = [ + "anyhow", + "chrono", + "clap", + "heck 0.5.0", + "omicron-workspace-hack", + "oximeter-types", + "prettyplease", + "proc-macro2", + "quote", + "schemars", + "serde", + "slog-error-chain", + "syn 2.0.74", + "toml 0.8.19", +] + +[[package]] +name = "oximeter-test-utils" +version = "0.1.0" +dependencies = [ + "chrono", + "omicron-workspace-hack", + "oximeter-macro-impl", + "oximeter-types", + "uuid", +] + [[package]] name = "oximeter-timeseries-macro" version = "0.1.0" dependencies = [ "omicron-workspace-hack", - "oximeter-impl", + "oximeter-schema", + "oximeter-types", "proc-macro2", "quote", "syn 2.0.74", ] +[[package]] +name = "oximeter-types" +version = "0.1.0" +dependencies = [ + "approx", + "bytes", + "chrono", + "criterion", + "float-ord", + "num", + "omicron-common", + "omicron-workspace-hack", + "oximeter-macro-impl", + "rand", + "rand_distr", + "regex", + "rstest", + "schemars", + "serde", + "serde_json", + "strum", + "thiserror", + "trybuild", + "uuid", +] + [[package]] name = "oxlog" version = "0.1.0" @@ -7076,6 +7131,20 @@ dependencies = [ "serde_json", ] +[[package]] +name = "oxql-types" +version = "0.1.0" +dependencies = [ + "anyhow", + "chrono", + "highway", + "num", + "omicron-workspace-hack", + "oximeter-types", + "schemars", + "serde", +] + [[package]] name = "p256" version = "0.13.2" diff --git a/Cargo.toml b/Cargo.toml index 1b62af959d..1d60af2912 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -31,6 +31,7 @@ members = [ "dev-tools/omicron-dev", "dev-tools/omicron-dev-lib", "dev-tools/openapi-manager", + "dev-tools/openapi-manager/types", "dev-tools/oxlog", "dev-tools/reconfigurator-cli", "dev-tools/releng", @@ -56,6 +57,7 @@ members = [ "nexus-sled-agent-shared", "nexus/authz-macros", "nexus/auth", + "nexus/auth-types", "nexus/db-fixed-data", "nexus/db-macros", "nexus/db-model", @@ -77,12 +79,15 @@ members = [ "oximeter/api", "oximeter/collector", "oximeter/db", - "oximeter/impl", "oximeter/instruments", "oximeter/oximeter-macro-impl", "oximeter/oximeter", + "oximeter/oxql-types", "oximeter/producer", + "oximeter/schema", + "oximeter/test-utils", "oximeter/timeseries-macro", + "oximeter/types", "package", "passwords", "rpaths", @@ -142,6 +147,7 @@ default-members = [ "dev-tools/omicron-dev", "dev-tools/omicron-dev-lib", "dev-tools/openapi-manager", + "dev-tools/openapi-manager/types", "dev-tools/oxlog", "dev-tools/reconfigurator-cli", "dev-tools/releng", @@ -170,6 +176,7 @@ default-members = [ "nexus-sled-agent-shared", "nexus/authz-macros", "nexus/auth", + "nexus/auth-types", "nexus/db-fixed-data", "nexus/db-macros", "nexus/db-model", @@ -191,12 +198,15 @@ default-members = [ "oximeter/api", "oximeter/collector", "oximeter/db", - "oximeter/impl", "oximeter/instruments", "oximeter/oximeter-macro-impl", "oximeter/oximeter", + "oximeter/oxql-types", "oximeter/producer", + "oximeter/schema", + "oximeter/test-utils", "oximeter/timeseries-macro", + "oximeter/types", "package", "passwords", "rpaths", @@ -404,6 +414,7 @@ mg-admin-client = { git = "https://github.com/oxidecomputer/maghemite", rev = "2 ddm-admin-client = { git = "https://github.com/oxidecomputer/maghemite", rev = "220dd026e83142b83bd93123f465a64dd4600201" } multimap = "0.10.0" nexus-auth = { path = "nexus/auth" } +nexus-auth-types = { path = "nexus/auth-types" } nexus-client = { path = "clients/nexus-client" } nexus-config = { path = "nexus-config" } nexus-db-fixed-data = { path = "nexus/db-fixed-data" } @@ -447,6 +458,7 @@ oxlog = { path = "dev-tools/oxlog" } oxnet = { git = "https://github.com/oxidecomputer/oxnet" } once_cell = "1.19.0" openapi-lint = { git = "https://github.com/oxidecomputer/openapi-lint", branch = "main" } +openapi-manager-types = { path = "dev-tools/openapi-manager/types" } openapiv3 = "2.0.0" # must match samael's crate! openssl = "0.10" @@ -459,11 +471,14 @@ oximeter-api = { path = "oximeter/api" } oximeter-client = { path = "clients/oximeter-client" } oximeter-db = { path = "oximeter/db/", default-features = false } oximeter-collector = { path = "oximeter/collector" } -oximeter-impl = { path = "oximeter/impl" } oximeter-instruments = { path = "oximeter/instruments" } oximeter-macro-impl = { path = "oximeter/oximeter-macro-impl" } oximeter-producer = { path = "oximeter/producer" } +oximeter-schema = { path = "oximeter/schema" } +oximeter-test-utils = { path = "oximeter/test-utils" } oximeter-timeseries-macro = { path = "oximeter/timeseries-macro" } +oximeter-types = { path = "oximeter/types" } +oxql-types = { path = "oximeter/oxql-types" } p256 = "0.13" parse-display = "0.10.0" partial-io = { version = "0.5.4", features = ["proptest1", "tokio1"] } diff --git a/dev-tools/openapi-manager/Cargo.toml b/dev-tools/openapi-manager/Cargo.toml index fe90737d9e..2ca1bc3e4d 100644 --- a/dev-tools/openapi-manager/Cargo.toml +++ b/dev-tools/openapi-manager/Cargo.toml @@ -25,6 +25,7 @@ nexus-internal-api.workspace = true omicron-workspace-hack.workspace = true openapiv3.workspace = true openapi-lint.workspace = true +openapi-manager-types.workspace = true owo-colors.workspace = true oximeter-api.workspace = true serde_json.workspace = true diff --git a/dev-tools/openapi-manager/src/check.rs b/dev-tools/openapi-manager/src/check.rs index 182ed9fb19..d7f9b7fc46 100644 --- a/dev-tools/openapi-manager/src/check.rs +++ b/dev-tools/openapi-manager/src/check.rs @@ -5,17 +5,16 @@ use std::{io::Write, process::ExitCode}; use anyhow::Result; -use camino::Utf8Path; use indent_write::io::IndentWriter; use owo_colors::OwoColorize; use similar::TextDiff; use crate::{ output::{ - display_api_spec, display_error, display_summary, headers::*, plural, - write_diff, OutputOpts, Styles, + display_api_spec, display_api_spec_file, display_error, + display_summary, headers::*, plural, write_diff, OutputOpts, Styles, }, - spec::{all_apis, CheckStatus}, + spec::{all_apis, CheckError, Environment}, FAILURE_EXIT_CODE, NEEDS_UPDATE_EXIT_CODE, }; @@ -37,7 +36,7 @@ impl CheckResult { } pub(crate) fn check_impl( - dir: &Utf8Path, + env: &Environment, output: &OutputOpts, ) -> Result { let mut styles = Styles::default(); @@ -48,6 +47,7 @@ pub(crate) fn check_impl( let all_apis = all_apis(); let total = all_apis.len(); let count_width = total.to_string().len(); + let count_section_indent = count_section_indent(count_width); let continued_indent = continued_indent(count_width); eprintln!("{:>HEADER_WIDTH$}", SEPARATOR); @@ -59,56 +59,85 @@ pub(crate) fn check_impl( plural::documents(total), ); let mut num_up_to_date = 0; - let mut num_stale = 0; - let mut num_missing = 0; + let mut num_out_of_date = 0; let mut num_failed = 0; for (ix, spec) in all_apis.iter().enumerate() { let count = ix + 1; - match spec.check(&dir) { - Ok(status) => match status { - CheckStatus::Ok(summary) => { - eprintln!( - "{:>HEADER_WIDTH$} [{count:>count_width$}/{total}] {}: {}", - UP_TO_DATE.style(styles.success_header), - display_api_spec(spec, &styles), - display_summary(&summary, &styles), - ); + match spec.check(env) { + Ok(status) => { + let total_errors = status.total_errors(); + let total_errors_width = total_errors.to_string().len(); + + if total_errors == 0 { + // Success case. + let extra = if status.extra_files_len() > 0 { + format!( + ", {} extra files", + status.extra_files_len().style(styles.bold) + ) + } else { + "".to_string() + }; - num_up_to_date += 1; - } - CheckStatus::Stale { full_path, actual, expected } => { eprintln!( - "{:>HEADER_WIDTH$} [{count:>count_width$}/{total}] {}", - STALE.style(styles.warning_header), + "{:>HEADER_WIDTH$} [{count:>count_width$}/{total}] {}: {}{extra}", + UP_TO_DATE.style(styles.success_header), display_api_spec(spec, &styles), + display_summary(&status.summary, &styles), ); - let diff = TextDiff::from_lines(&actual, &expected); - write_diff( - &diff, - &full_path, - &styles, - // Add an indent to align diff with the status message. - &mut IndentWriter::new( - &continued_indent, - std::io::stderr(), - ), - )?; - - num_stale += 1; + num_up_to_date += 1; + continue; } - CheckStatus::Missing => { - eprintln!( - "{:>HEADER_WIDTH$} [{count:>count_width$}/{total}] {}", - MISSING.style(styles.warning_header), - display_api_spec(spec, &styles), - ); - num_missing += 1; + // Out of date: print errors. + eprintln!( + "{:>HEADER_WIDTH$} [{count:>count_width$}/{total}] {}", + OUT_OF_DATE.style(styles.warning_header), + display_api_spec(spec, &styles), + ); + num_out_of_date += 1; + + for (error_ix, (spec_file, error)) in + status.iter_errors().enumerate() + { + let error_count = error_ix + 1; + + match error { + CheckError::Stale { full_path, actual, expected } => { + eprintln!( + "{:>HEADER_WIDTH$}{count_section_indent}\ + ({error_count:>total_errors_width$}/{total_errors}) {}", + STALE.style(styles.warning_header), + display_api_spec_file(spec, spec_file, &styles), + ); + + let diff = + TextDiff::from_lines(&**actual, &**expected); + write_diff( + &diff, + &full_path, + &styles, + // Add an indent to align diff with the status message. + &mut IndentWriter::new( + &continued_indent, + std::io::stderr(), + ), + )?; + } + CheckError::Missing => { + eprintln!( + "{:>HEADER_WIDTH$}{count_section_indent}\ + ({error_count:>total_errors_width$}/{total_errors}) {}", + MISSING.style(styles.warning_header), + display_api_spec_file(spec, spec_file, &styles), + ); + } + } } - }, + } Err(error) => { eprint!( "{:>HEADER_WIDTH$} [{count:>count_width$}/{total}] {}", @@ -131,20 +160,19 @@ pub(crate) fn check_impl( let status_header = if num_failed > 0 { FAILURE.style(styles.failure_header) - } else if num_stale > 0 { - STALE.style(styles.warning_header) + } else if num_out_of_date > 0 { + OUT_OF_DATE.style(styles.warning_header) } else { SUCCESS.style(styles.success_header) }; eprintln!( - "{:>HEADER_WIDTH$} {} {} checked: {} up-to-date, {} stale, {} missing, {} failed", + "{:>HEADER_WIDTH$} {} {} checked: {} up-to-date, {} out-of-date, {} failed", status_header, total.style(styles.bold), plural::documents(total), num_up_to_date.style(styles.bold), - num_stale.style(styles.bold), - num_missing.style(styles.bold), + num_out_of_date.style(styles.bold), num_failed.style(styles.bold), ); if num_failed > 0 { @@ -154,7 +182,7 @@ pub(crate) fn check_impl( "cargo xtask openapi generate".style(styles.bold) ); Ok(CheckResult::Failures) - } else if num_stale > 0 { + } else if num_out_of_date > 0 { eprintln!( "{:>HEADER_WIDTH$} (run {} to update)", "", @@ -170,14 +198,14 @@ pub(crate) fn check_impl( mod tests { use std::process::ExitCode; - use crate::spec::find_openapi_dir; + use crate::spec::Environment; use super::*; #[test] fn check_apis_up_to_date() -> Result { let output = OutputOpts { color: clap::ColorChoice::Auto }; - let dir = find_openapi_dir()?; + let dir = Environment::new(None)?; let result = check_impl(&dir, &output)?; Ok(result.to_exit_code()) diff --git a/dev-tools/openapi-manager/src/dispatch.rs b/dev-tools/openapi-manager/src/dispatch.rs index 937a8b485f..ca2989396f 100644 --- a/dev-tools/openapi-manager/src/dispatch.rs +++ b/dev-tools/openapi-manager/src/dispatch.rs @@ -10,7 +10,7 @@ use clap::{Args, Parser, Subcommand}; use crate::{ check::check_impl, generate::generate_impl, list::list_impl, - output::OutputOpts, spec::openapi_dir, + output::OutputOpts, spec::Environment, }; /// Manage OpenAPI specifications. @@ -73,7 +73,7 @@ pub struct GenerateArgs { impl GenerateArgs { fn exec(self, output: &OutputOpts) -> anyhow::Result { - let dir = openapi_dir(self.dir)?; + let dir = Environment::new(self.dir)?; Ok(generate_impl(&dir, output)?.to_exit_code()) } } @@ -87,8 +87,8 @@ pub struct CheckArgs { impl CheckArgs { fn exec(self, output: &OutputOpts) -> anyhow::Result { - let dir = openapi_dir(self.dir)?; - Ok(check_impl(&dir, output)?.to_exit_code()) + let env = Environment::new(self.dir)?; + Ok(check_impl(&env, output)?.to_exit_code()) } } diff --git a/dev-tools/openapi-manager/src/generate.rs b/dev-tools/openapi-manager/src/generate.rs index f776ff2709..1cf9ebbb61 100644 --- a/dev-tools/openapi-manager/src/generate.rs +++ b/dev-tools/openapi-manager/src/generate.rs @@ -5,7 +5,6 @@ use std::{io::Write, process::ExitCode}; use anyhow::Result; -use camino::Utf8Path; use indent_write::io::IndentWriter; use owo_colors::OwoColorize; @@ -14,7 +13,7 @@ use crate::{ display_api_spec, display_error, display_summary, headers::*, plural, OutputOpts, Styles, }, - spec::{all_apis, OverwriteStatus}, + spec::{all_apis, Environment}, FAILURE_EXIT_CODE, }; @@ -34,7 +33,7 @@ impl GenerateResult { } pub(crate) fn generate_impl( - dir: &Utf8Path, + env: &Environment, output: &OutputOpts, ) -> Result { let mut styles = Styles::default(); @@ -62,27 +61,30 @@ pub(crate) fn generate_impl( for (ix, spec) in all_apis.iter().enumerate() { let count = ix + 1; - match spec.overwrite(&dir) { - Ok((status, summary)) => match status { - OverwriteStatus::Updated => { + match spec.overwrite(env) { + Ok(status) => { + let updated_count = status.updated_count(); + + if updated_count > 0 { eprintln!( - "{:>HEADER_WIDTH$} [{count:>count_width$}/{total}] {}: {}", + "{:>HEADER_WIDTH$} [{count:>count_width$}/{total}] {}: {} ({} {} updated)", UPDATED.style(styles.success_header), display_api_spec(spec, &styles), - display_summary(&summary, &styles), + display_summary(&status.summary, &styles), + updated_count.style(styles.bold), + plural::files(updated_count), ); num_updated += 1; - } - OverwriteStatus::Unchanged => { + } else { eprintln!( "{:>HEADER_WIDTH$} [{count:>count_width$}/{total}] {}: {}", UNCHANGED.style(styles.unchanged_header), display_api_spec(spec, &styles), - display_summary(&summary, &styles), + display_summary(&status.summary, &styles), ); num_unchanged += 1; } - }, + } Err(err) => { eprintln!( "{:>HEADER_WIDTH$} [{count:>count_width$}/{total}] {}", diff --git a/dev-tools/openapi-manager/src/output.rs b/dev-tools/openapi-manager/src/output.rs index 6cd578e778..b7684acff2 100644 --- a/dev-tools/openapi-manager/src/output.rs +++ b/dev-tools/openapi-manager/src/output.rs @@ -10,7 +10,7 @@ use indent_write::fmt::IndentWriter; use owo_colors::{OwoColorize, Style}; use similar::{ChangeTag, DiffableStr, TextDiff}; -use crate::spec::{ApiSpec, DocumentSummary}; +use crate::spec::{ApiSpec, ApiSpecFile, DocumentSummary}; #[derive(Debug, Args)] #[clap(next_help_heading = "Global options")] @@ -123,6 +123,21 @@ pub(crate) fn display_api_spec(spec: &ApiSpec, styles: &Styles) -> String { ) } +pub(crate) fn display_api_spec_file( + spec: &ApiSpec, + spec_file: ApiSpecFile<'_>, + styles: &Styles, +) -> String { + match spec_file { + ApiSpecFile::Openapi => { + format!("OpenAPI document {}", spec.filename.style(styles.filename)) + } + ApiSpecFile::Extra(path) => { + format!("Extra file {}", path.style(styles.filename)) + } + } +} + pub(crate) fn display_summary( summary: &DocumentSummary, styles: &Styles, @@ -202,8 +217,9 @@ pub(crate) mod headers { pub(crate) static GENERATING: &str = "Generating"; pub(crate) static UP_TO_DATE: &str = "Up-to-date"; - pub(crate) static STALE: &str = "Stale"; - pub(crate) static MISSING: &str = "Missing"; + pub(crate) static OUT_OF_DATE: &str = "Out-of-date"; + pub(crate) static STALE: &str = "-> Stale"; + pub(crate) static MISSING: &str = "-> Missing"; pub(crate) static UPDATED: &str = "Updated"; pub(crate) static UNCHANGED: &str = "Unchanged"; @@ -211,22 +227,38 @@ pub(crate) mod headers { pub(crate) static SUCCESS: &str = "Success"; pub(crate) static FAILURE: &str = "Failure"; - pub(crate) fn continued_indent(count_width: usize) -> String { + fn count_section_width(count_width: usize) -> usize { // Status strings are of the form: // // Generated [ 1/12] api.json: 1 path, 1 schema + // ^^^^^^^^^ // - // So the continued indent is: - // - // HEADER_WIDTH for the status string - // + (count_width * 2) for current and total counts + // So the width of the count section is: + // (count_width * 2) for current and total counts // + 3 for '[/]' // + 2 for spaces on either side. - " ".repeat(HEADER_WIDTH + count_width * 2 + 3 + 2) + count_width * 2 + 3 + 2 + } + + pub(crate) fn count_section_indent(count_width: usize) -> String { + " ".repeat(count_section_width(count_width)) + } + + pub(crate) fn continued_indent(count_width: usize) -> String { + // HEADER_WIDTH for the status string + count_section_width + " ".repeat(HEADER_WIDTH + count_section_width(count_width)) } } pub(crate) mod plural { + pub(crate) fn files(count: usize) -> &'static str { + if count == 1 { + "file" + } else { + "files" + } + } + pub(crate) fn documents(count: usize) -> &'static str { if count == 1 { "document" diff --git a/dev-tools/openapi-manager/src/spec.rs b/dev-tools/openapi-manager/src/spec.rs index 29601a63d6..dfa6b3eeca 100644 --- a/dev-tools/openapi-manager/src/spec.rs +++ b/dev-tools/openapi-manager/src/spec.rs @@ -9,6 +9,7 @@ use atomicwrites::AtomicFile; use camino::{Utf8Path, Utf8PathBuf}; use dropshot::{ApiDescription, ApiDescriptionBuildErrors, StubContext}; use fs_err as fs; +use openapi_manager_types::{ValidationBackend, ValidationContext}; use openapiv3::OpenAPI; /// All APIs managed by openapi-manager. @@ -143,47 +144,64 @@ pub struct ApiSpec { pub filename: &'static str, /// Extra validation to perform on the OpenAPI spec, if any. - pub extra_validation: Option anyhow::Result<()>>, + pub extra_validation: Option)>, } impl ApiSpec { pub(crate) fn overwrite( &self, - dir: &Utf8Path, - ) -> Result<(OverwriteStatus, DocumentSummary)> { + env: &Environment, + ) -> Result { let contents = self.to_json_bytes()?; - let summary = self + let (summary, validation_result) = self .validate_json(&contents) .context("OpenAPI document validation failed")?; - let full_path = dir.join(&self.filename); - let status = overwrite_file(&full_path, &contents)?; - - Ok((status, summary)) + let full_path = env.openapi_dir.join(&self.filename); + let openapi_doc_status = overwrite_file(&full_path, &contents)?; + + let extra_files = validation_result + .extra_files + .into_iter() + .map(|(path, contents)| { + let full_path = env.workspace_root.join(&path); + let status = overwrite_file(&full_path, &contents)?; + Ok((path, status)) + }) + .collect::>()?; + + Ok(SpecOverwriteStatus { + summary, + openapi_doc: openapi_doc_status, + extra_files, + }) } - pub(crate) fn check(&self, dir: &Utf8Path) -> Result { + pub(crate) fn check(&self, env: &Environment) -> Result { let contents = self.to_json_bytes()?; - let summary = self + let (summary, validation_result) = self .validate_json(&contents) .context("OpenAPI document validation failed")?; - let full_path = dir.join(&self.filename); - let existing_contents = - read_opt(&full_path).context("failed to read contents on disk")?; - - match existing_contents { - Some(existing_contents) if existing_contents == contents => { - Ok(CheckStatus::Ok(summary)) - } - Some(existing_contents) => Ok(CheckStatus::Stale { - full_path, - actual: existing_contents, - expected: contents, - }), - None => Ok(CheckStatus::Missing), - } + let full_path = env.openapi_dir.join(&self.filename); + let openapi_doc_status = check_file(full_path, contents)?; + + let extra_files = validation_result + .extra_files + .into_iter() + .map(|(path, contents)| { + let full_path = env.workspace_root.join(&path); + let status = check_file(full_path, contents)?; + Ok((path, status)) + }) + .collect::>()?; + + Ok(SpecCheckStatus { + summary, + openapi_doc: openapi_doc_status, + extra_files, + }) } pub(crate) fn to_openapi_doc(&self) -> Result { @@ -216,7 +234,10 @@ impl ApiSpec { Ok(contents) } - fn validate_json(&self, contents: &[u8]) -> Result { + fn validate_json( + &self, + contents: &[u8], + ) -> Result<(DocumentSummary, ValidationResult)> { let openapi_doc = contents_to_openapi(contents) .context("JSON returned by ApiDescription is not valid OpenAPI")?; @@ -231,11 +252,51 @@ impl ApiSpec { return Err(anyhow::anyhow!("{}", errors.join("\n\n"))); } - if let Some(extra_validation) = self.extra_validation { - extra_validation(&openapi_doc)?; - } + let extra_files = if let Some(extra_validation) = self.extra_validation + { + let mut validation_context = + ValidationContextImpl { errors: Vec::new(), files: Vec::new() }; + extra_validation( + &openapi_doc, + ValidationContext::new(&mut validation_context), + ); + + if !validation_context.errors.is_empty() { + return Err(anyhow::anyhow!( + "OpenAPI document extended validation failed:\n{}", + validation_context + .errors + .iter() + .map(|e| e.to_string()) + .collect::>() + .join("\n") + )); + } - Ok(DocumentSummary::new(&openapi_doc)) + validation_context.files + } else { + Vec::new() + }; + + Ok(( + DocumentSummary::new(&openapi_doc), + ValidationResult { extra_files }, + )) + } +} + +struct ValidationContextImpl { + errors: Vec, + files: Vec<(Utf8PathBuf, Vec)>, +} + +impl ValidationBackend for ValidationContextImpl { + fn report_error(&mut self, error: anyhow::Error) { + self.errors.push(error); + } + + fn record_file_contents(&mut self, path: Utf8PathBuf, contents: Vec) { + self.files.push((path, contents)); } } @@ -260,6 +321,32 @@ impl fmt::Display for ApiBoundary { } } +#[derive(Debug)] +#[must_use] +pub(crate) struct SpecOverwriteStatus { + pub(crate) summary: DocumentSummary, + openapi_doc: OverwriteStatus, + extra_files: Vec<(Utf8PathBuf, OverwriteStatus)>, +} + +impl SpecOverwriteStatus { + pub(crate) fn updated_count(&self) -> usize { + self.iter() + .filter(|(_, status)| matches!(status, OverwriteStatus::Updated)) + .count() + } + + fn iter( + &self, + ) -> impl Iterator, &OverwriteStatus)> { + std::iter::once((ApiSpecFile::Openapi, &self.openapi_doc)).chain( + self.extra_files.iter().map(|(file_name, status)| { + (ApiSpecFile::Extra(file_name), status) + }), + ) + } +} + #[derive(Debug)] #[must_use] pub(crate) enum OverwriteStatus { @@ -267,10 +354,56 @@ pub(crate) enum OverwriteStatus { Unchanged, } +#[derive(Debug)] +#[must_use] +pub(crate) struct SpecCheckStatus { + pub(crate) summary: DocumentSummary, + pub(crate) openapi_doc: CheckStatus, + pub(crate) extra_files: Vec<(Utf8PathBuf, CheckStatus)>, +} + +impl SpecCheckStatus { + pub(crate) fn total_errors(&self) -> usize { + self.iter_errors().count() + } + + pub(crate) fn extra_files_len(&self) -> usize { + self.extra_files.len() + } + + pub(crate) fn iter_errors( + &self, + ) -> impl Iterator, &CheckError)> { + std::iter::once((ApiSpecFile::Openapi, &self.openapi_doc)) + .chain(self.extra_files.iter().map(|(file_name, status)| { + (ApiSpecFile::Extra(file_name), status) + })) + .filter_map(|(spec_file, status)| { + if let CheckStatus::Error(e) = status { + Some((spec_file, e)) + } else { + None + } + }) + } +} + +#[derive(Clone, Copy, Debug)] +pub(crate) enum ApiSpecFile<'a> { + Openapi, + Extra(&'a Utf8Path), +} + #[derive(Debug)] #[must_use] pub(crate) enum CheckStatus { - Ok(DocumentSummary), + Ok, + Error(CheckError), +} + +#[derive(Debug)] +#[must_use] +pub(crate) enum CheckError { Stale { full_path: Utf8PathBuf, actual: Vec, expected: Vec }, Missing, } @@ -295,31 +428,45 @@ impl DocumentSummary { } } -pub(crate) fn openapi_dir(dir: Option) -> Result { - match dir { - Some(dir) => Ok(dir.canonicalize_utf8().with_context(|| { - format!("failed to canonicalize directory: {}", dir) - })?), - None => find_openapi_dir().context("failed to find openapi directory"), - } +#[derive(Debug)] +#[must_use] +struct ValidationResult { + // Extra files recorded by the validation context. + extra_files: Vec<(Utf8PathBuf, Vec)>, +} + +pub(crate) struct Environment { + pub(crate) workspace_root: Utf8PathBuf, + pub(crate) openapi_dir: Utf8PathBuf, } -pub(crate) fn find_openapi_dir() -> Result { - let mut root = Utf8PathBuf::from(env!("CARGO_MANIFEST_DIR")); - // This crate is two levels down from the root of omicron, so go up twice. - root.pop(); - root.pop(); +impl Environment { + pub(crate) fn new(openapi_dir: Option) -> Result { + let mut root = Utf8PathBuf::from(env!("CARGO_MANIFEST_DIR")); + // This crate is two levels down from the root of omicron, so go up twice. + root.pop(); + root.pop(); - root.push("openapi"); - let root = root.canonicalize_utf8().with_context(|| { - format!("failed to canonicalize openapi directory: {}", root) - })?; + let workspace_root = root.canonicalize_utf8().with_context(|| { + format!("failed to canonicalize workspace root: {}", root) + })?; - if !root.is_dir() { - anyhow::bail!("openapi root is not a directory: {}", root); - } + let openapi_dir = + openapi_dir.unwrap_or_else(|| workspace_root.join("openapi")); + let openapi_dir = + openapi_dir.canonicalize_utf8().with_context(|| { + format!( + "failed to canonicalize openapi directory: {}", + openapi_dir + ) + })?; + + if !openapi_dir.is_dir() { + anyhow::bail!("openapi root is not a directory: {}", root); + } - Ok(root) + Ok(Self { workspace_root, openapi_dir }) + } } /// Overwrite a file with new contents, if the contents are different. @@ -344,6 +491,27 @@ fn overwrite_file(path: &Utf8Path, contents: &[u8]) -> Result { Ok(OverwriteStatus::Updated) } +/// Check a file against expected contents. +fn check_file( + full_path: Utf8PathBuf, + contents: Vec, +) -> Result { + let existing_contents = + read_opt(&full_path).context("failed to read contents on disk")?; + + match existing_contents { + Some(existing_contents) if existing_contents == contents => { + Ok(CheckStatus::Ok) + } + Some(existing_contents) => Ok(CheckStatus::Error(CheckError::Stale { + full_path, + actual: existing_contents, + expected: contents, + })), + None => Ok(CheckStatus::Error(CheckError::Missing)), + } +} + fn read_opt(path: &Utf8Path) -> std::io::Result>> { match fs::read(path) { Ok(contents) => Ok(Some(contents)), diff --git a/dev-tools/openapi-manager/types/Cargo.toml b/dev-tools/openapi-manager/types/Cargo.toml new file mode 100644 index 0000000000..262529f1a9 --- /dev/null +++ b/dev-tools/openapi-manager/types/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "openapi-manager-types" +version = "0.1.0" +edition = "2021" +license = "MPL-2.0" + +[lints] +workspace = true + +[dependencies] +anyhow.workspace = true +camino.workspace = true +omicron-workspace-hack.workspace = true diff --git a/dev-tools/openapi-manager/types/src/lib.rs b/dev-tools/openapi-manager/types/src/lib.rs new file mode 100644 index 0000000000..b48ea03e74 --- /dev/null +++ b/dev-tools/openapi-manager/types/src/lib.rs @@ -0,0 +1,12 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Shared types for the OpenAPI manager. +//! +//! API trait crates can depend on this crate to get access to interfaces +//! exposed by the OpenAPI manager. + +mod validation; + +pub use validation::*; diff --git a/dev-tools/openapi-manager/types/src/validation.rs b/dev-tools/openapi-manager/types/src/validation.rs new file mode 100644 index 0000000000..6f22228f4d --- /dev/null +++ b/dev-tools/openapi-manager/types/src/validation.rs @@ -0,0 +1,47 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use camino::Utf8PathBuf; + +/// Context for validation of OpenAPI specifications. +pub struct ValidationContext<'a> { + backend: &'a mut dyn ValidationBackend, +} + +impl<'a> ValidationContext<'a> { + /// Note part of the public API -- only called by the OpenAPI manager. + #[doc(hidden)] + pub fn new(backend: &'a mut dyn ValidationBackend) -> Self { + Self { backend } + } + + /// Reports a validation error. + pub fn report_error(&mut self, error: anyhow::Error) { + self.backend.report_error(error); + } + + /// Records that the file has the given contents. + /// + /// In check mode, if the files differ, an error is logged. + /// + /// In generate mode, the file is overwritten with the given contents. + /// + /// The path is treated as relative to the root of the repository. + pub fn record_file_contents( + &mut self, + path: impl Into, + contents: Vec, + ) { + self.backend.record_file_contents(path.into(), contents); + } +} + +/// The backend for validation. +/// +/// Not part of the public API -- only implemented by the OpenAPI manager. +#[doc(hidden)] +pub trait ValidationBackend { + fn report_error(&mut self, error: anyhow::Error); + fn record_file_contents(&mut self, path: Utf8PathBuf, contents: Vec); +} diff --git a/nexus/Cargo.toml b/nexus/Cargo.toml index 8977507505..411aebaced 100644 --- a/nexus/Cargo.toml +++ b/nexus/Cargo.toml @@ -57,6 +57,7 @@ openssl.workspace = true oximeter-client.workspace = true oximeter-db = { workspace = true, default-features = false, features = [ "oxql" ] } oxnet.workspace = true +oxql-types.workspace = true parse-display.workspace = true paste.workspace = true # See omicron-rpaths for more about the "pq-sys" dependency. @@ -91,6 +92,7 @@ tough.workspace = true uuid.workspace = true nexus-auth.workspace = true +nexus-auth-types.workspace = true nexus-defaults.workspace = true nexus-db-model.workspace = true nexus-db-queries.workspace = true diff --git a/nexus/auth-types/Cargo.toml b/nexus/auth-types/Cargo.toml new file mode 100644 index 0000000000..e375c4954d --- /dev/null +++ b/nexus/auth-types/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "nexus-auth-types" +version = "0.1.0" +edition = "2021" +license = "MPL-2.0" + +[lints] +workspace = true + +[dependencies] +anyhow.workspace = true +async-trait.workspace = true +cookie.workspace = true +dropshot.workspace = true +http.workspace = true +newtype_derive.workspace = true +omicron-workspace-hack.workspace = true diff --git a/nexus/auth/src/authn/external/cookies.rs b/nexus/auth-types/src/authn/cookies.rs similarity index 100% rename from nexus/auth/src/authn/external/cookies.rs rename to nexus/auth-types/src/authn/cookies.rs diff --git a/nexus/auth-types/src/authn/mod.rs b/nexus/auth-types/src/authn/mod.rs new file mode 100644 index 0000000000..f87935428e --- /dev/null +++ b/nexus/auth-types/src/authn/mod.rs @@ -0,0 +1,7 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Authentication types for the Nexus API. + +pub mod cookies; diff --git a/nexus/auth-types/src/lib.rs b/nexus/auth-types/src/lib.rs new file mode 100644 index 0000000000..4ac0f66367 --- /dev/null +++ b/nexus/auth-types/src/lib.rs @@ -0,0 +1,7 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Authentication and authorization types for the Nexus API. + +pub mod authn; diff --git a/nexus/auth/Cargo.toml b/nexus/auth/Cargo.toml index 1a926f1789..0bb2eb2c84 100644 --- a/nexus/auth/Cargo.toml +++ b/nexus/auth/Cargo.toml @@ -37,6 +37,7 @@ tokio = { workspace = true, features = ["full"] } uuid.workspace = true authz-macros.workspace = true +nexus-auth-types.workspace = true nexus-db-fixed-data.workspace = true nexus-db-model.workspace = true nexus-types.workspace = true diff --git a/nexus/auth/src/authn/external/mod.rs b/nexus/auth/src/authn/external/mod.rs index ccb7218285..5c7fc7af05 100644 --- a/nexus/auth/src/authn/external/mod.rs +++ b/nexus/auth/src/authn/external/mod.rs @@ -13,7 +13,6 @@ use slog::trace; use std::borrow::Borrow; use uuid::Uuid; -pub mod cookies; pub mod session_cookie; pub mod spoof; pub mod token; diff --git a/nexus/auth/src/authn/external/session_cookie.rs b/nexus/auth/src/authn/external/session_cookie.rs index 7811bf2826..d4c7792069 100644 --- a/nexus/auth/src/authn/external/session_cookie.rs +++ b/nexus/auth/src/authn/external/session_cookie.rs @@ -4,7 +4,6 @@ //! authn scheme for console that looks up cookie values in a session table -use super::cookies::parse_cookies; use super::{HttpAuthnScheme, Reason, SchemeResult}; use crate::authn; use crate::authn::{Actor, Details}; @@ -13,6 +12,7 @@ use async_trait::async_trait; use chrono::{DateTime, Duration, Utc}; use dropshot::HttpError; use http::HeaderValue; +use nexus_auth_types::authn::cookies::parse_cookies; use slog::debug; use uuid::Uuid; diff --git a/nexus/src/app/metrics.rs b/nexus/src/app/metrics.rs index 3728a3bdc1..3a6e7e27be 100644 --- a/nexus/src/app/metrics.rs +++ b/nexus/src/app/metrics.rs @@ -14,7 +14,7 @@ use nexus_db_queries::{ }; use omicron_common::api::external::{Error, InternalContext}; use oximeter_db::{ - oxql, Measurement, TimeseriesSchema, TimeseriesSchemaPaginationParams, + Measurement, TimeseriesSchema, TimeseriesSchemaPaginationParams, }; use std::num::NonZeroU32; @@ -138,7 +138,7 @@ impl super::Nexus { &self, opctx: &OpContext, query: impl AsRef, - ) -> Result, Error> { + ) -> Result, Error> { // Must be a fleet user to list timeseries schema. // // TODO-security: We need to figure out how to implement proper security diff --git a/nexus/src/external_api/console_api.rs b/nexus/src/external_api/console_api.rs index fb0a47bbea..5cc84db8ff 100644 --- a/nexus/src/external_api/console_api.rs +++ b/nexus/src/external_api/console_api.rs @@ -31,16 +31,14 @@ use dropshot::{ }; use http::{header, HeaderName, HeaderValue, Response, StatusCode, Uri}; use hyper::Body; +use nexus_auth_types::authn::cookies::Cookies; use nexus_db_model::AuthenticationMode; use nexus_db_queries::authn::silos::IdentityProviderType; use nexus_db_queries::context::OpContext; use nexus_db_queries::{ - authn::external::{ - cookies::Cookies, - session_cookie::{ - clear_session_cookie_header_value, session_cookie_header_value, - SessionStore, SESSION_COOKIE_COOKIE_NAME, - }, + authn::external::session_cookie::{ + clear_session_cookie_header_value, session_cookie_header_value, + SessionStore, SESSION_COOKIE_COOKIE_NAME, }, db::identity::Asset, }; diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 8e8b63229b..f5e0657100 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -98,6989 +98,7308 @@ use uuid::Uuid; type NexusApiDescription = ApiDescription; -/// Returns a description of the external nexus API -pub(crate) fn external_api() -> NexusApiDescription { - fn register_endpoints( - api: &mut NexusApiDescription, - ) -> Result<(), ApiDescriptionRegisterError> { - api.register(ping)?; - - api.register(system_policy_view)?; - api.register(system_policy_update)?; - - api.register(policy_view)?; - api.register(policy_update)?; - - api.register(project_list)?; - api.register(project_create)?; - api.register(project_view)?; - api.register(project_delete)?; - api.register(project_update)?; - api.register(project_policy_view)?; - api.register(project_policy_update)?; - api.register(project_ip_pool_list)?; - api.register(project_ip_pool_view)?; - - // Operator-Accessible IP Pools API - api.register(ip_pool_list)?; - api.register(ip_pool_create)?; - api.register(ip_pool_silo_list)?; - api.register(ip_pool_silo_link)?; - api.register(ip_pool_silo_unlink)?; - api.register(ip_pool_silo_update)?; - api.register(ip_pool_view)?; - api.register(ip_pool_delete)?; - api.register(ip_pool_update)?; - // Variants for internal services - api.register(ip_pool_service_view)?; - api.register(ip_pool_utilization_view)?; - - // Operator-Accessible IP Pool Range API - api.register(ip_pool_range_list)?; - api.register(ip_pool_range_add)?; - api.register(ip_pool_range_remove)?; - // Variants for internal services - api.register(ip_pool_service_range_list)?; - api.register(ip_pool_service_range_add)?; - api.register(ip_pool_service_range_remove)?; - - api.register(floating_ip_list)?; - api.register(floating_ip_create)?; - api.register(floating_ip_view)?; - api.register(floating_ip_update)?; - api.register(floating_ip_delete)?; - api.register(floating_ip_attach)?; - api.register(floating_ip_detach)?; - - api.register(disk_list)?; - api.register(disk_create)?; - api.register(disk_view)?; - api.register(disk_delete)?; - api.register(disk_metrics_list)?; - - api.register(disk_bulk_write_import_start)?; - api.register(disk_bulk_write_import)?; - api.register(disk_bulk_write_import_stop)?; - api.register(disk_finalize_import)?; - - api.register(instance_list)?; - api.register(instance_view)?; - api.register(instance_create)?; - api.register(instance_delete)?; - api.register(instance_reboot)?; - api.register(instance_start)?; - api.register(instance_stop)?; - api.register(instance_disk_list)?; - api.register(instance_disk_attach)?; - api.register(instance_disk_detach)?; - api.register(instance_serial_console)?; - api.register(instance_serial_console_stream)?; - api.register(instance_ssh_public_key_list)?; - - api.register(image_list)?; - api.register(image_create)?; - api.register(image_view)?; - api.register(image_delete)?; - api.register(image_promote)?; - api.register(image_demote)?; - - api.register(snapshot_list)?; - api.register(snapshot_create)?; - api.register(snapshot_view)?; - api.register(snapshot_delete)?; - - api.register(vpc_list)?; - api.register(vpc_create)?; - api.register(vpc_view)?; - api.register(vpc_update)?; - api.register(vpc_delete)?; - - api.register(vpc_subnet_list)?; - api.register(vpc_subnet_view)?; - api.register(vpc_subnet_create)?; - api.register(vpc_subnet_delete)?; - api.register(vpc_subnet_update)?; - api.register(vpc_subnet_list_network_interfaces)?; - - api.register(instance_network_interface_create)?; - api.register(instance_network_interface_list)?; - api.register(instance_network_interface_view)?; - api.register(instance_network_interface_update)?; - api.register(instance_network_interface_delete)?; - - api.register(instance_external_ip_list)?; - api.register(instance_ephemeral_ip_attach)?; - api.register(instance_ephemeral_ip_detach)?; - - api.register(vpc_router_list)?; - api.register(vpc_router_view)?; - api.register(vpc_router_create)?; - api.register(vpc_router_delete)?; - api.register(vpc_router_update)?; - - api.register(vpc_router_route_list)?; - api.register(vpc_router_route_view)?; - api.register(vpc_router_route_create)?; - api.register(vpc_router_route_delete)?; - api.register(vpc_router_route_update)?; - - api.register(vpc_firewall_rules_view)?; - api.register(vpc_firewall_rules_update)?; - - api.register(rack_list)?; - api.register(rack_view)?; - api.register(sled_list)?; - api.register(sled_view)?; - api.register(sled_set_provision_policy)?; - api.register(sled_instance_list)?; - api.register(sled_physical_disk_list)?; - api.register(physical_disk_list)?; - api.register(physical_disk_view)?; - api.register(switch_list)?; - api.register(switch_view)?; - api.register(sled_list_uninitialized)?; - api.register(sled_add)?; - - api.register(user_builtin_list)?; - api.register(user_builtin_view)?; - - api.register(role_list)?; - api.register(role_view)?; - - api.register(current_user_view)?; - api.register(current_user_groups)?; - api.register(current_user_ssh_key_list)?; - api.register(current_user_ssh_key_view)?; - api.register(current_user_ssh_key_create)?; - api.register(current_user_ssh_key_delete)?; - - // Customer network integration - api.register(networking_address_lot_list)?; - api.register(networking_address_lot_create)?; - api.register(networking_address_lot_delete)?; - api.register(networking_address_lot_block_list)?; - - api.register(networking_loopback_address_create)?; - api.register(networking_loopback_address_delete)?; - api.register(networking_loopback_address_list)?; - - api.register(networking_switch_port_settings_list)?; - api.register(networking_switch_port_settings_view)?; - api.register(networking_switch_port_settings_create)?; - api.register(networking_switch_port_settings_delete)?; - - api.register(networking_switch_port_list)?; - api.register(networking_switch_port_status)?; - api.register(networking_switch_port_apply_settings)?; - api.register(networking_switch_port_clear_settings)?; - - api.register(networking_bgp_config_create)?; - api.register(networking_bgp_config_list)?; - api.register(networking_bgp_status)?; - api.register(networking_bgp_imported_routes_ipv4)?; - api.register(networking_bgp_config_delete)?; - api.register(networking_bgp_announce_set_update)?; - api.register(networking_bgp_announce_set_list)?; - api.register(networking_bgp_announce_set_delete)?; - api.register(networking_bgp_message_history)?; - - api.register(networking_bfd_enable)?; - api.register(networking_bfd_disable)?; - api.register(networking_bfd_status)?; - - api.register(networking_allow_list_view)?; - api.register(networking_allow_list_update)?; - - api.register(utilization_view)?; - - // Fleet-wide API operations - api.register(silo_list)?; - api.register(silo_create)?; - api.register(silo_view)?; - api.register(silo_delete)?; - api.register(silo_policy_view)?; - api.register(silo_policy_update)?; - api.register(silo_ip_pool_list)?; - - api.register(silo_utilization_view)?; - api.register(silo_utilization_list)?; - - api.register(system_quotas_list)?; - api.register(silo_quotas_view)?; - api.register(silo_quotas_update)?; - - api.register(silo_identity_provider_list)?; - - api.register(saml_identity_provider_create)?; - api.register(saml_identity_provider_view)?; - - api.register(local_idp_user_create)?; - api.register(local_idp_user_delete)?; - api.register(local_idp_user_set_password)?; - - api.register(certificate_list)?; - api.register(certificate_create)?; - api.register(certificate_view)?; - api.register(certificate_delete)?; - - api.register(system_metric)?; - api.register(silo_metric)?; - api.register(timeseries_schema_list)?; - api.register(timeseries_query)?; - - api.register(system_update_put_repository)?; - api.register(system_update_get_repository)?; - - api.register(user_list)?; - api.register(silo_user_list)?; - api.register(silo_user_view)?; - api.register(group_list)?; - api.register(group_view)?; - - // Console API operations - api.register(console_api::login_begin)?; - api.register(console_api::login_local_begin)?; - api.register(console_api::login_local)?; - api.register(console_api::login_saml_begin)?; - api.register(console_api::login_saml_redirect)?; - api.register(console_api::login_saml)?; - api.register(console_api::logout)?; - - api.register(console_api::console_lookup)?; - api.register(console_api::console_projects)?; - api.register(console_api::console_projects_new)?; - api.register(console_api::console_silo_images)?; - api.register(console_api::console_silo_utilization)?; - api.register(console_api::console_silo_access)?; - api.register(console_api::console_root)?; - api.register(console_api::console_settings_page)?; - api.register(console_api::console_system_page)?; - api.register(console_api::asset)?; - - api.register(device_auth::device_auth_request)?; - api.register(device_auth::device_auth_verify)?; - api.register(device_auth::device_auth_success)?; - api.register(device_auth::device_auth_confirm)?; - api.register(device_auth::device_access_token)?; - - Ok(()) - } - - fn register_experimental( - api: &mut NexusApiDescription, - endpoint: T, - ) -> Result<(), ApiDescriptionRegisterError> - where - T: Into>, +// Temporary module just to add a level of indentation and avoid ruining blame. +mod imp { + use super::*; + + /// Returns a description of the external nexus API + pub(crate) fn external_api() -> NexusApiDescription { + fn register_endpoints( + api: &mut NexusApiDescription, + ) -> Result<(), ApiDescriptionRegisterError> { + api.register(ping)?; + + api.register(system_policy_view)?; + api.register(system_policy_update)?; + + api.register(policy_view)?; + api.register(policy_update)?; + + api.register(project_list)?; + api.register(project_create)?; + api.register(project_view)?; + api.register(project_delete)?; + api.register(project_update)?; + api.register(project_policy_view)?; + api.register(project_policy_update)?; + api.register(project_ip_pool_list)?; + api.register(project_ip_pool_view)?; + + // Operator-Accessible IP Pools API + api.register(ip_pool_list)?; + api.register(ip_pool_create)?; + api.register(ip_pool_silo_list)?; + api.register(ip_pool_silo_link)?; + api.register(ip_pool_silo_unlink)?; + api.register(ip_pool_silo_update)?; + api.register(ip_pool_view)?; + api.register(ip_pool_delete)?; + api.register(ip_pool_update)?; + // Variants for internal services + api.register(ip_pool_service_view)?; + api.register(ip_pool_utilization_view)?; + + // Operator-Accessible IP Pool Range API + api.register(ip_pool_range_list)?; + api.register(ip_pool_range_add)?; + api.register(ip_pool_range_remove)?; + // Variants for internal services + api.register(ip_pool_service_range_list)?; + api.register(ip_pool_service_range_add)?; + api.register(ip_pool_service_range_remove)?; + + api.register(floating_ip_list)?; + api.register(floating_ip_create)?; + api.register(floating_ip_view)?; + api.register(floating_ip_update)?; + api.register(floating_ip_delete)?; + api.register(floating_ip_attach)?; + api.register(floating_ip_detach)?; + + api.register(disk_list)?; + api.register(disk_create)?; + api.register(disk_view)?; + api.register(disk_delete)?; + api.register(disk_metrics_list)?; + + api.register(disk_bulk_write_import_start)?; + api.register(disk_bulk_write_import)?; + api.register(disk_bulk_write_import_stop)?; + api.register(disk_finalize_import)?; + + api.register(instance_list)?; + api.register(instance_view)?; + api.register(instance_create)?; + api.register(instance_delete)?; + api.register(instance_reboot)?; + api.register(instance_start)?; + api.register(instance_stop)?; + api.register(instance_disk_list)?; + api.register(instance_disk_attach)?; + api.register(instance_disk_detach)?; + api.register(instance_serial_console)?; + api.register(instance_serial_console_stream)?; + api.register(instance_ssh_public_key_list)?; + + api.register(image_list)?; + api.register(image_create)?; + api.register(image_view)?; + api.register(image_delete)?; + api.register(image_promote)?; + api.register(image_demote)?; + + api.register(snapshot_list)?; + api.register(snapshot_create)?; + api.register(snapshot_view)?; + api.register(snapshot_delete)?; + + api.register(vpc_list)?; + api.register(vpc_create)?; + api.register(vpc_view)?; + api.register(vpc_update)?; + api.register(vpc_delete)?; + + api.register(vpc_subnet_list)?; + api.register(vpc_subnet_view)?; + api.register(vpc_subnet_create)?; + api.register(vpc_subnet_delete)?; + api.register(vpc_subnet_update)?; + api.register(vpc_subnet_list_network_interfaces)?; + + api.register(instance_network_interface_create)?; + api.register(instance_network_interface_list)?; + api.register(instance_network_interface_view)?; + api.register(instance_network_interface_update)?; + api.register(instance_network_interface_delete)?; + + api.register(instance_external_ip_list)?; + api.register(instance_ephemeral_ip_attach)?; + api.register(instance_ephemeral_ip_detach)?; + + api.register(vpc_router_list)?; + api.register(vpc_router_view)?; + api.register(vpc_router_create)?; + api.register(vpc_router_delete)?; + api.register(vpc_router_update)?; + + api.register(vpc_router_route_list)?; + api.register(vpc_router_route_view)?; + api.register(vpc_router_route_create)?; + api.register(vpc_router_route_delete)?; + api.register(vpc_router_route_update)?; + + api.register(vpc_firewall_rules_view)?; + api.register(vpc_firewall_rules_update)?; + + api.register(rack_list)?; + api.register(rack_view)?; + api.register(sled_list)?; + api.register(sled_view)?; + api.register(sled_set_provision_policy)?; + api.register(sled_instance_list)?; + api.register(sled_physical_disk_list)?; + api.register(physical_disk_list)?; + api.register(physical_disk_view)?; + api.register(switch_list)?; + api.register(switch_view)?; + api.register(sled_list_uninitialized)?; + api.register(sled_add)?; + + api.register(user_builtin_list)?; + api.register(user_builtin_view)?; + + api.register(role_list)?; + api.register(role_view)?; + + api.register(current_user_view)?; + api.register(current_user_groups)?; + api.register(current_user_ssh_key_list)?; + api.register(current_user_ssh_key_view)?; + api.register(current_user_ssh_key_create)?; + api.register(current_user_ssh_key_delete)?; + + // Customer network integration + api.register(networking_address_lot_list)?; + api.register(networking_address_lot_create)?; + api.register(networking_address_lot_delete)?; + api.register(networking_address_lot_block_list)?; + + api.register(networking_loopback_address_create)?; + api.register(networking_loopback_address_delete)?; + api.register(networking_loopback_address_list)?; + + api.register(networking_switch_port_settings_list)?; + api.register(networking_switch_port_settings_view)?; + api.register(networking_switch_port_settings_create)?; + api.register(networking_switch_port_settings_delete)?; + + api.register(networking_switch_port_list)?; + api.register(networking_switch_port_status)?; + api.register(networking_switch_port_apply_settings)?; + api.register(networking_switch_port_clear_settings)?; + + api.register(networking_bgp_config_create)?; + api.register(networking_bgp_config_list)?; + api.register(networking_bgp_status)?; + api.register(networking_bgp_imported_routes_ipv4)?; + api.register(networking_bgp_config_delete)?; + api.register(networking_bgp_announce_set_update)?; + api.register(networking_bgp_announce_set_list)?; + api.register(networking_bgp_announce_set_delete)?; + api.register(networking_bgp_message_history)?; + + api.register(networking_bfd_enable)?; + api.register(networking_bfd_disable)?; + api.register(networking_bfd_status)?; + + api.register(networking_allow_list_view)?; + api.register(networking_allow_list_update)?; + + api.register(utilization_view)?; + + // Fleet-wide API operations + api.register(silo_list)?; + api.register(silo_create)?; + api.register(silo_view)?; + api.register(silo_delete)?; + api.register(silo_policy_view)?; + api.register(silo_policy_update)?; + api.register(silo_ip_pool_list)?; + + api.register(silo_utilization_view)?; + api.register(silo_utilization_list)?; + + api.register(system_quotas_list)?; + api.register(silo_quotas_view)?; + api.register(silo_quotas_update)?; + + api.register(silo_identity_provider_list)?; + + api.register(saml_identity_provider_create)?; + api.register(saml_identity_provider_view)?; + + api.register(local_idp_user_create)?; + api.register(local_idp_user_delete)?; + api.register(local_idp_user_set_password)?; + + api.register(certificate_list)?; + api.register(certificate_create)?; + api.register(certificate_view)?; + api.register(certificate_delete)?; + + api.register(system_metric)?; + api.register(silo_metric)?; + api.register(timeseries_schema_list)?; + api.register(timeseries_query)?; + + api.register(system_update_put_repository)?; + api.register(system_update_get_repository)?; + + api.register(user_list)?; + api.register(silo_user_list)?; + api.register(silo_user_view)?; + api.register(group_list)?; + api.register(group_view)?; + + // Console API operations + api.register(console_api::login_begin)?; + api.register(console_api::login_local_begin)?; + api.register(console_api::login_local)?; + api.register(console_api::login_saml_begin)?; + api.register(console_api::login_saml_redirect)?; + api.register(console_api::login_saml)?; + api.register(console_api::logout)?; + + api.register(console_api::console_lookup)?; + api.register(console_api::console_projects)?; + api.register(console_api::console_projects_new)?; + api.register(console_api::console_silo_images)?; + api.register(console_api::console_silo_utilization)?; + api.register(console_api::console_silo_access)?; + api.register(console_api::console_root)?; + api.register(console_api::console_settings_page)?; + api.register(console_api::console_system_page)?; + api.register(console_api::asset)?; + + api.register(device_auth::device_auth_request)?; + api.register(device_auth::device_auth_verify)?; + api.register(device_auth::device_auth_success)?; + api.register(device_auth::device_auth_confirm)?; + api.register(device_auth::device_access_token)?; + + Ok(()) + } + + fn register_experimental( + api: &mut NexusApiDescription, + endpoint: T, + ) -> Result<(), ApiDescriptionRegisterError> + where + T: Into>, + { + let mut ep: ApiEndpoint = endpoint.into(); + // only one tag is allowed + ep.tags = vec![String::from("hidden")]; + ep.path = String::from("/experimental") + &ep.path; + api.register(ep) + } + + fn register_experimental_endpoints( + api: &mut NexusApiDescription, + ) -> Result<(), ApiDescriptionRegisterError> { + register_experimental(api, probe_list)?; + register_experimental(api, probe_view)?; + register_experimental(api, probe_create)?; + register_experimental(api, probe_delete)?; + + Ok(()) + } + + let conf = + serde_json::from_str(include_str!("./tag-config.json")).unwrap(); + let mut api = NexusApiDescription::new().tag_config(conf); + + if let Err(err) = register_endpoints(&mut api) { + panic!("failed to register entrypoints: {}", err); + } + if let Err(err) = register_experimental_endpoints(&mut api) { + panic!("failed to register experimental entrypoints: {}", err); + } + api + } + + // API ENDPOINT FUNCTION NAMING CONVENTIONS + // + // Generally, HTTP resources are grouped within some collection. For a + // relatively simple example: + // + // GET v1/projects (list the projects in the collection) + // POST v1/projects (create a project in the collection) + // GET v1/projects/{project} (look up a project in the collection) + // DELETE v1/projects/{project} (delete a project in the collection) + // PUT v1/projects/{project} (update a project in the collection) + // + // We pick a name for the function that implements a given API entrypoint + // based on how we expect it to appear in the CLI subcommand hierarchy. For + // example: + // + // GET v1/projects -> project_list() + // POST v1/projects -> project_create() + // GET v1/projects/{project} -> project_view() + // DELETE v1/projects/{project} -> project_delete() + // PUT v1/projects/{project} -> project_update() + // + // Note that the path typically uses the entity's plural form while the + // function name uses its singular. + // + // Operations beyond list, create, view, delete, and update should use a + // descriptive noun or verb, again bearing in mind that this will be + // transcribed into the CLI and SDKs: + // + // POST -> instance_reboot + // POST -> instance_stop + // GET -> instance_serial_console + // + // Note that these function names end up in generated OpenAPI spec as the + // operationId for each endpoint, and therefore represent a contract with + // clients. Client generators use operationId to name API methods, so changing + // a function name is a breaking change from a client perspective. + + /// Ping API + /// + /// Always responds with Ok if it responds at all. + #[endpoint { + method = GET, + path = "/v1/ping", + tags = ["system/status"], + }] + async fn ping( + _rqctx: RequestContext, + ) -> Result, HttpError> { + Ok(HttpResponseOk(views::Ping { status: views::PingStatus::Ok })) + } + + /// Fetch top-level IAM policy + #[endpoint { + method = GET, + path = "/v1/system/policy", + tags = ["policy"], + }] + async fn system_policy_view( + rqctx: RequestContext, + ) -> Result>, HttpError> + { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let policy = nexus.fleet_fetch_policy(&opctx).await?; + Ok(HttpResponseOk(policy)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + /// Update top-level IAM policy + #[endpoint { + method = PUT, + path = "/v1/system/policy", + tags = ["policy"], + }] + async fn system_policy_update( + rqctx: RequestContext, + new_policy: TypedBody>, + ) -> Result>, HttpError> { - let mut ep: ApiEndpoint = endpoint.into(); - // only one tag is allowed - ep.tags = vec![String::from("hidden")]; - ep.path = String::from("/experimental") + &ep.path; - api.register(ep) + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let new_policy = new_policy.into_inner(); + let nasgns = new_policy.role_assignments.len(); + // This should have been validated during parsing. + bail_unless!(nasgns <= shared::MAX_ROLE_ASSIGNMENTS_PER_RESOURCE); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let policy = nexus.fleet_update_policy(&opctx, &new_policy).await?; + Ok(HttpResponseOk(policy)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } - fn register_experimental_endpoints( - api: &mut NexusApiDescription, - ) -> Result<(), ApiDescriptionRegisterError> { - register_experimental(api, probe_list)?; - register_experimental(api, probe_view)?; - register_experimental(api, probe_create)?; - register_experimental(api, probe_delete)?; + /// Fetch current silo's IAM policy + #[endpoint { + method = GET, + path = "/v1/policy", + tags = ["silos"], + }] + pub(crate) async fn policy_view( + rqctx: RequestContext, + ) -> Result>, HttpError> + { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let silo: NameOrId = opctx + .authn + .silo_required() + .internal_context("loading current silo")? + .id() + .into(); + + let silo_lookup = nexus.silo_lookup(&opctx, silo)?; + let policy = nexus.silo_fetch_policy(&opctx, &silo_lookup).await?; + Ok(HttpResponseOk(policy)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } - Ok(()) + /// Update current silo's IAM policy + #[endpoint { + method = PUT, + path = "/v1/policy", + tags = ["silos"], + }] + async fn policy_update( + rqctx: RequestContext, + new_policy: TypedBody>, + ) -> Result>, HttpError> + { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let new_policy = new_policy.into_inner(); + let nasgns = new_policy.role_assignments.len(); + // This should have been validated during parsing. + bail_unless!(nasgns <= shared::MAX_ROLE_ASSIGNMENTS_PER_RESOURCE); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let silo: NameOrId = opctx + .authn + .silo_required() + .internal_context("loading current silo")? + .id() + .into(); + let silo_lookup = nexus.silo_lookup(&opctx, silo)?; + let policy = nexus + .silo_update_policy(&opctx, &silo_lookup, &new_policy) + .await?; + Ok(HttpResponseOk(policy)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } - let conf = serde_json::from_str(include_str!("./tag-config.json")).unwrap(); - let mut api = NexusApiDescription::new().tag_config(conf); + /// Fetch resource utilization for user's current silo + #[endpoint { + method = GET, + path = "/v1/utilization", + tags = ["silos"], + }] + async fn utilization_view( + rqctx: RequestContext, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let silo_lookup = nexus.current_silo_lookup(&opctx)?; + let utilization = + nexus.silo_utilization_view(&opctx, &silo_lookup).await?; + + Ok(HttpResponseOk(utilization.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } - if let Err(err) = register_endpoints(&mut api) { - panic!("failed to register entrypoints: {}", err); + /// Fetch current utilization for given silo + #[endpoint { + method = GET, + path = "/v1/system/utilization/silos/{silo}", + tags = ["system/silos"], + }] + async fn silo_utilization_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let silo_lookup = + nexus.silo_lookup(&opctx, path_params.into_inner().silo)?; + let quotas = + nexus.silo_utilization_view(&opctx, &silo_lookup).await?; + + Ok(HttpResponseOk(quotas.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } - if let Err(err) = register_experimental_endpoints(&mut api) { - panic!("failed to register experimental entrypoints: {}", err); + /// List current utilization state for all silos + #[endpoint { + method = GET, + path = "/v1/system/utilization/silos", + tags = ["system/silos"], + }] + async fn silo_utilization_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + + let query = query_params.into_inner(); + let pagparams = data_page_params_for(&rqctx, &query)?; + let scan_params = ScanByNameOrId::from_query(&query)?; + let paginated_by = name_or_id_pagination(&pagparams, scan_params)?; + + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let utilization = nexus + .silo_utilization_list(&opctx, &paginated_by) + .await? + .into_iter() + .map(|p| p.into()) + .collect(); + + Ok(HttpResponseOk(ScanByNameOrId::results_page( + &query, + utilization, + &marker_for_name_or_id, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } - api -} -// API ENDPOINT FUNCTION NAMING CONVENTIONS -// -// Generally, HTTP resources are grouped within some collection. For a -// relatively simple example: -// -// GET v1/projects (list the projects in the collection) -// POST v1/projects (create a project in the collection) -// GET v1/projects/{project} (look up a project in the collection) -// DELETE v1/projects/{project} (delete a project in the collection) -// PUT v1/projects/{project} (update a project in the collection) -// -// We pick a name for the function that implements a given API entrypoint -// based on how we expect it to appear in the CLI subcommand hierarchy. For -// example: -// -// GET v1/projects -> project_list() -// POST v1/projects -> project_create() -// GET v1/projects/{project} -> project_view() -// DELETE v1/projects/{project} -> project_delete() -// PUT v1/projects/{project} -> project_update() -// -// Note that the path typically uses the entity's plural form while the -// function name uses its singular. -// -// Operations beyond list, create, view, delete, and update should use a -// descriptive noun or verb, again bearing in mind that this will be -// transcribed into the CLI and SDKs: -// -// POST -> instance_reboot -// POST -> instance_stop -// GET -> instance_serial_console -// -// Note that these function names end up in generated OpenAPI spec as the -// operationId for each endpoint, and therefore represent a contract with -// clients. Client generators use operationId to name API methods, so changing -// a function name is a breaking change from a client perspective. - -/// Ping API -/// -/// Always responds with Ok if it responds at all. -#[endpoint { - method = GET, - path = "/v1/ping", - tags = ["system/status"], -}] -async fn ping( - _rqctx: RequestContext, -) -> Result, HttpError> { - Ok(HttpResponseOk(views::Ping { status: views::PingStatus::Ok })) -} + /// Lists resource quotas for all silos + #[endpoint { + method = GET, + path = "/v1/system/silo-quotas", + tags = ["system/silos"], + }] + async fn system_quotas_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + + let query = query_params.into_inner(); + let pagparams = data_page_params_for(&rqctx, &query)?; + + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let quotas = nexus + .fleet_list_quotas(&opctx, &pagparams) + .await? + .into_iter() + .map(|p| p.into()) + .collect(); + + Ok(HttpResponseOk(ScanById::results_page( + &query, + quotas, + &|_, quota: &SiloQuotas| quota.silo_id, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Fetch top-level IAM policy -#[endpoint { - method = GET, - path = "/v1/system/policy", - tags = ["policy"], -}] -async fn system_policy_view( - rqctx: RequestContext, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let policy = nexus.fleet_fetch_policy(&opctx).await?; - Ok(HttpResponseOk(policy)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Fetch resource quotas for silo + #[endpoint { + method = GET, + path = "/v1/system/silos/{silo}/quotas", + tags = ["system/silos"], + }] + async fn silo_quotas_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let silo_lookup = + nexus.silo_lookup(&opctx, path_params.into_inner().silo)?; + let quota = nexus.silo_quotas_view(&opctx, &silo_lookup).await?; + Ok(HttpResponseOk(quota.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Update top-level IAM policy -#[endpoint { - method = PUT, - path = "/v1/system/policy", - tags = ["policy"], -}] -async fn system_policy_update( - rqctx: RequestContext, - new_policy: TypedBody>, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let new_policy = new_policy.into_inner(); - let nasgns = new_policy.role_assignments.len(); - // This should have been validated during parsing. - bail_unless!(nasgns <= shared::MAX_ROLE_ASSIGNMENTS_PER_RESOURCE); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let policy = nexus.fleet_update_policy(&opctx, &new_policy).await?; - Ok(HttpResponseOk(policy)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Update resource quotas for silo + /// + /// If a quota value is not specified, it will remain unchanged. + #[endpoint { + method = PUT, + path = "/v1/system/silos/{silo}/quotas", + tags = ["system/silos"], + }] + async fn silo_quotas_update( + rqctx: RequestContext, + path_params: Path, + new_quota: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let silo_lookup = + nexus.silo_lookup(&opctx, path_params.into_inner().silo)?; + let quota = nexus + .silo_update_quota( + &opctx, + &silo_lookup, + &new_quota.into_inner(), + ) + .await?; + Ok(HttpResponseOk(quota.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Fetch current silo's IAM policy -#[endpoint { - method = GET, - path = "/v1/policy", - tags = ["silos"], - }] -pub(crate) async fn policy_view( - rqctx: RequestContext, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let silo: NameOrId = opctx - .authn - .silo_required() - .internal_context("loading current silo")? - .id() - .into(); - - let silo_lookup = nexus.silo_lookup(&opctx, silo)?; - let policy = nexus.silo_fetch_policy(&opctx, &silo_lookup).await?; - Ok(HttpResponseOk(policy)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// List silos + /// + /// Lists silos that are discoverable based on the current permissions. + #[endpoint { + method = GET, + path = "/v1/system/silos", + tags = ["system/silos"], + }] + async fn silo_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let pag_params = data_page_params_for(&rqctx, &query)?; + let scan_params = ScanByNameOrId::from_query(&query)?; + let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let silos = nexus + .silos_list(&opctx, &paginated_by) + .await? + .into_iter() + .map(|p| p.try_into()) + .collect::, Error>>()?; + Ok(HttpResponseOk(ScanByNameOrId::results_page( + &query, + silos, + &marker_for_name_or_id, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Update current silo's IAM policy -#[endpoint { - method = PUT, - path = "/v1/policy", - tags = ["silos"], -}] -async fn policy_update( - rqctx: RequestContext, - new_policy: TypedBody>, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let new_policy = new_policy.into_inner(); - let nasgns = new_policy.role_assignments.len(); - // This should have been validated during parsing. - bail_unless!(nasgns <= shared::MAX_ROLE_ASSIGNMENTS_PER_RESOURCE); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let silo: NameOrId = opctx - .authn - .silo_required() - .internal_context("loading current silo")? - .id() - .into(); - let silo_lookup = nexus.silo_lookup(&opctx, silo)?; - let policy = - nexus.silo_update_policy(&opctx, &silo_lookup, &new_policy).await?; - Ok(HttpResponseOk(policy)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Create a silo + #[endpoint { + method = POST, + path = "/v1/system/silos", + tags = ["system/silos"], + }] + async fn silo_create( + rqctx: RequestContext, + new_silo_params: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let silo = + nexus.silo_create(&opctx, new_silo_params.into_inner()).await?; + Ok(HttpResponseCreated(silo.try_into()?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Fetch resource utilization for user's current silo -#[endpoint { - method = GET, - path = "/v1/utilization", - tags = ["silos"], -}] -async fn utilization_view( - rqctx: RequestContext, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let silo_lookup = nexus.current_silo_lookup(&opctx)?; - let utilization = - nexus.silo_utilization_view(&opctx, &silo_lookup).await?; - - Ok(HttpResponseOk(utilization.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Fetch silo + /// + /// Fetch silo by name or ID. + #[endpoint { + method = GET, + path = "/v1/system/silos/{silo}", + tags = ["system/silos"], + }] + async fn silo_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let silo_lookup = nexus.silo_lookup(&opctx, path.silo)?; + let (.., silo) = silo_lookup.fetch().await?; + Ok(HttpResponseOk(silo.try_into()?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Fetch current utilization for given silo -#[endpoint { - method = GET, - path = "/v1/system/utilization/silos/{silo}", - tags = ["system/silos"], -}] -async fn silo_utilization_view( - rqctx: RequestContext, - path_params: Path, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; + /// List IP pools linked to silo + /// + /// Linked IP pools are available to users in the specified silo. A silo can + /// have at most one default pool. IPs are allocated from the default pool when + /// users ask for one without specifying a pool. + #[endpoint { + method = GET, + path = "/v1/system/silos/{silo}/ip-pools", + tags = ["system/silos"], + }] + async fn silo_ip_pool_list( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + + let query = query_params.into_inner(); + let pag_params = data_page_params_for(&rqctx, &query)?; + let scan_params = ScanByNameOrId::from_query(&query)?; + let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; + + let silo_lookup = nexus.silo_lookup(&opctx, path.silo)?; + let pools = nexus + .silo_ip_pool_list(&opctx, &silo_lookup, &paginated_by) + .await? + .iter() + .map(|(pool, silo_link)| views::SiloIpPool { + identity: pool.identity(), + is_default: silo_link.is_default, + }) + .collect(); + + Ok(HttpResponseOk(ScanByNameOrId::results_page( + &query, + pools, + &marker_for_name_or_id, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let silo_lookup = - nexus.silo_lookup(&opctx, path_params.into_inner().silo)?; - let quotas = nexus.silo_utilization_view(&opctx, &silo_lookup).await?; - - Ok(HttpResponseOk(quotas.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} -/// List current utilization state for all silos -#[endpoint { - method = GET, - path = "/v1/system/utilization/silos", - tags = ["system/silos"], -}] -async fn silo_utilization_list( - rqctx: RequestContext, - query_params: Query, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; + /// Delete a silo + /// + /// Delete a silo by name or ID. + #[endpoint { + method = DELETE, + path = "/v1/system/silos/{silo}", + tags = ["system/silos"], + }] + async fn silo_delete( + rqctx: RequestContext, + path_params: Path, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let params = path_params.into_inner(); + let silo_lookup = nexus.silo_lookup(&opctx, params.silo)?; + nexus.silo_delete(&opctx, &silo_lookup).await?; + Ok(HttpResponseDeleted()) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } - let query = query_params.into_inner(); - let pagparams = data_page_params_for(&rqctx, &query)?; - let scan_params = ScanByNameOrId::from_query(&query)?; - let paginated_by = name_or_id_pagination(&pagparams, scan_params)?; + /// Fetch silo IAM policy + #[endpoint { + method = GET, + path = "/v1/system/silos/{silo}/policy", + tags = ["system/silos"], + }] + async fn silo_policy_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result>, HttpError> + { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let silo_lookup = nexus.silo_lookup(&opctx, path.silo)?; + let policy = nexus.silo_fetch_policy(&opctx, &silo_lookup).await?; + Ok(HttpResponseOk(policy)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let utilization = nexus - .silo_utilization_list(&opctx, &paginated_by) - .await? - .into_iter() - .map(|p| p.into()) - .collect(); - - Ok(HttpResponseOk(ScanByNameOrId::results_page( - &query, - utilization, - &marker_for_name_or_id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Update silo IAM policy + #[endpoint { + method = PUT, + path = "/v1/system/silos/{silo}/policy", + tags = ["system/silos"], + }] + async fn silo_policy_update( + rqctx: RequestContext, + path_params: Path, + new_policy: TypedBody>, + ) -> Result>, HttpError> + { + let apictx = rqctx.context(); + let handler = async { + let new_policy = new_policy.into_inner(); + let nasgns = new_policy.role_assignments.len(); + // This should have been validated during parsing. + bail_unless!(nasgns <= shared::MAX_ROLE_ASSIGNMENTS_PER_RESOURCE); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let silo_lookup = nexus.silo_lookup(&opctx, path.silo)?; + let policy = nexus + .silo_update_policy(&opctx, &silo_lookup, &new_policy) + .await?; + Ok(HttpResponseOk(policy)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Lists resource quotas for all silos -#[endpoint { - method = GET, - path = "/v1/system/silo-quotas", - tags = ["system/silos"], -}] -async fn system_quotas_list( - rqctx: RequestContext, - query_params: Query, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; + // Silo-specific user endpoints + + /// List built-in (system) users in silo + #[endpoint { + method = GET, + path = "/v1/system/users", + tags = ["system/silos"], + }] + async fn silo_user_list( + rqctx: RequestContext, + query_params: Query>, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let pag_params = data_page_params_for(&rqctx, &query)?; + let scan_params = ScanById::from_query(&query)?; + let silo_lookup = + nexus.silo_lookup(&opctx, scan_params.selector.silo.clone())?; + let users = nexus + .silo_list_users(&opctx, &silo_lookup, &pag_params) + .await? + .into_iter() + .map(|i| i.into()) + .collect(); + Ok(HttpResponseOk(ScanById::results_page( + &query, + users, + &|_, user: &User| user.id, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } - let query = query_params.into_inner(); - let pagparams = data_page_params_for(&rqctx, &query)?; + /// Path parameters for Silo User requests + #[derive(Deserialize, JsonSchema)] + struct UserParam { + /// The user's internal id + user_id: Uuid, + } - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let quotas = nexus - .fleet_list_quotas(&opctx, &pagparams) - .await? - .into_iter() - .map(|p| p.into()) - .collect(); - - Ok(HttpResponseOk(ScanById::results_page( - &query, - quotas, - &|_, quota: &SiloQuotas| quota.silo_id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Fetch built-in (system) user + #[endpoint { + method = GET, + path = "/v1/system/users/{user_id}", + tags = ["system/silos"], + }] + async fn silo_user_view( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let silo_lookup = nexus.silo_lookup(&opctx, query.silo)?; + let user = nexus + .silo_user_fetch(&opctx, &silo_lookup, path.user_id) + .await?; + Ok(HttpResponseOk(user.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Fetch resource quotas for silo -#[endpoint { - method = GET, - path = "/v1/system/silos/{silo}/quotas", - tags = ["system/silos"], -}] -async fn silo_quotas_view( - rqctx: RequestContext, - path_params: Path, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; + // Silo identity providers + + /// List a silo's IdP's name + #[endpoint { + method = GET, + path = "/v1/system/identity-providers", + tags = ["system/silos"], + }] + async fn silo_identity_provider_list( + rqctx: RequestContext, + query_params: Query>, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let pag_params = data_page_params_for(&rqctx, &query)?; + let scan_params = ScanByNameOrId::from_query(&query)?; + let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; + let silo_lookup = + nexus.silo_lookup(&opctx, scan_params.selector.silo.clone())?; + let identity_providers = nexus + .identity_provider_list(&opctx, &silo_lookup, &paginated_by) + .await? + .into_iter() + .map(|x| x.into()) + .collect(); + Ok(HttpResponseOk(ScanByNameOrId::results_page( + &query, + identity_providers, + &marker_for_name_or_id, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let silo_lookup = - nexus.silo_lookup(&opctx, path_params.into_inner().silo)?; - let quota = nexus.silo_quotas_view(&opctx, &silo_lookup).await?; - Ok(HttpResponseOk(quota.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + // Silo SAML identity providers + + /// Create SAML IdP + #[endpoint { + method = POST, + path = "/v1/system/identity-providers/saml", + tags = ["system/silos"], + }] + async fn saml_identity_provider_create( + rqctx: RequestContext, + query_params: Query, + new_provider: TypedBody, + ) -> Result, HttpError> + { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let silo_lookup = nexus.silo_lookup(&opctx, query.silo)?; + let provider = nexus + .saml_identity_provider_create( + &opctx, + &silo_lookup, + new_provider.into_inner(), + ) + .await?; + Ok(HttpResponseCreated(provider.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Update resource quotas for silo -/// -/// If a quota value is not specified, it will remain unchanged. -#[endpoint { - method = PUT, - path = "/v1/system/silos/{silo}/quotas", - tags = ["system/silos"], -}] -async fn silo_quotas_update( - rqctx: RequestContext, - path_params: Path, - new_quota: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; + /// Fetch SAML IdP + #[endpoint { + method = GET, + path = "/v1/system/identity-providers/saml/{provider}", + tags = ["system/silos"], + }] + async fn saml_identity_provider_view( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let saml_identity_provider_selector = + params::SamlIdentityProviderSelector { + silo: Some(query.silo), + saml_identity_provider: path.provider, + }; + let (.., provider) = nexus + .saml_identity_provider_lookup( + &opctx, + saml_identity_provider_selector, + )? + .fetch() + .await?; + Ok(HttpResponseOk(provider.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let silo_lookup = - nexus.silo_lookup(&opctx, path_params.into_inner().silo)?; - let quota = nexus - .silo_update_quota(&opctx, &silo_lookup, &new_quota.into_inner()) - .await?; - Ok(HttpResponseOk(quota.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + // TODO: no DELETE for identity providers? + + // "Local" Identity Provider + + /// Create user + /// + /// Users can only be created in Silos with `provision_type` == `Fixed`. + /// Otherwise, Silo users are just-in-time (JIT) provisioned when a user first + /// logs in using an external Identity Provider. + #[endpoint { + method = POST, + path = "/v1/system/identity-providers/local/users", + tags = ["system/silos"], + }] + async fn local_idp_user_create( + rqctx: RequestContext, + query_params: Query, + new_user_params: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let silo_lookup = nexus.silo_lookup(&opctx, query.silo)?; + let user = nexus + .local_idp_create_user( + &opctx, + &silo_lookup, + new_user_params.into_inner(), + ) + .await?; + Ok(HttpResponseCreated(user.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// List silos -/// -/// Lists silos that are discoverable based on the current permissions. -#[endpoint { - method = GET, - path = "/v1/system/silos", - tags = ["system/silos"], -}] -async fn silo_list( - rqctx: RequestContext, - query_params: Query, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let pag_params = data_page_params_for(&rqctx, &query)?; - let scan_params = ScanByNameOrId::from_query(&query)?; - let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let silos = nexus - .silos_list(&opctx, &paginated_by) - .await? - .into_iter() - .map(|p| p.try_into()) - .collect::, Error>>()?; - Ok(HttpResponseOk(ScanByNameOrId::results_page( - &query, - silos, - &marker_for_name_or_id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Delete user + #[endpoint { + method = DELETE, + path = "/v1/system/identity-providers/local/users/{user_id}", + tags = ["system/silos"], + }] + async fn local_idp_user_delete( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let silo_lookup = nexus.silo_lookup(&opctx, query.silo)?; + nexus + .local_idp_delete_user(&opctx, &silo_lookup, path.user_id) + .await?; + Ok(HttpResponseDeleted()) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Create a silo -#[endpoint { - method = POST, - path = "/v1/system/silos", - tags = ["system/silos"], -}] -async fn silo_create( - rqctx: RequestContext, - new_silo_params: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let silo = - nexus.silo_create(&opctx, new_silo_params.into_inner()).await?; - Ok(HttpResponseCreated(silo.try_into()?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Set or invalidate user's password + /// + /// Passwords can only be updated for users in Silos with identity mode + /// `LocalOnly`. + #[endpoint { + method = POST, + path = "/v1/system/identity-providers/local/users/{user_id}/set-password", + tags = ["system/silos"], + }] + async fn local_idp_user_set_password( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + update: TypedBody, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let silo_lookup = nexus.silo_lookup(&opctx, query.silo)?; + nexus + .local_idp_user_set_password( + &opctx, + &silo_lookup, + path.user_id, + update.into_inner(), + ) + .await?; + Ok(HttpResponseUpdatedNoContent()) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Fetch silo -/// -/// Fetch silo by name or ID. -#[endpoint { - method = GET, - path = "/v1/system/silos/{silo}", - tags = ["system/silos"], -}] -async fn silo_view( - rqctx: RequestContext, - path_params: Path, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + /// List projects + #[endpoint { + method = GET, + path = "/v1/projects", + tags = ["projects"], + }] + async fn project_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let pag_params = data_page_params_for(&rqctx, &query)?; + let scan_params = ScanByNameOrId::from_query(&query)?; + let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let projects = nexus + .project_list(&opctx, &paginated_by) + .await? + .into_iter() + .map(|p| p.into()) + .collect(); + Ok(HttpResponseOk(ScanByNameOrId::results_page( + &query, + projects, + &marker_for_name_or_id, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + /// Create project + #[endpoint { + method = POST, + path = "/v1/projects", + tags = ["projects"], + }] + async fn project_create( + rqctx: RequestContext, + new_project: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let nexus = &apictx.context.nexus; + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let project = + nexus.project_create(&opctx, &new_project.into_inner()).await?; + Ok(HttpResponseCreated(project.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + /// Fetch project + #[endpoint { + method = GET, + path = "/v1/projects/{project}", + tags = ["projects"], + }] + async fn project_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError> { + let apictx = rqctx.context(); let nexus = &apictx.context.nexus; let path = path_params.into_inner(); - let silo_lookup = nexus.silo_lookup(&opctx, path.silo)?; - let (.., silo) = silo_lookup.fetch().await?; - Ok(HttpResponseOk(silo.try_into()?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let project_selector = + params::ProjectSelector { project: path.project }; + let (.., project) = + nexus.project_lookup(&opctx, project_selector)?.fetch().await?; + Ok(HttpResponseOk(project.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// List IP pools linked to silo -/// -/// Linked IP pools are available to users in the specified silo. A silo can -/// have at most one default pool. IPs are allocated from the default pool when -/// users ask for one without specifying a pool. -#[endpoint { - method = GET, - path = "/v1/system/silos/{silo}/ip-pools", - tags = ["system/silos"], -}] -async fn silo_ip_pool_list( - rqctx: RequestContext, - path_params: Path, - query_params: Query, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + /// Delete project + #[endpoint { + method = DELETE, + path = "/v1/projects/{project}", + tags = ["projects"], + }] + async fn project_delete( + rqctx: RequestContext, + path_params: Path, + ) -> Result { + let apictx = rqctx.context(); let nexus = &apictx.context.nexus; let path = path_params.into_inner(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let project_selector = + params::ProjectSelector { project: path.project }; + let project_lookup = + nexus.project_lookup(&opctx, project_selector)?; + nexus.project_delete(&opctx, &project_lookup).await?; + Ok(HttpResponseDeleted()) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } - let query = query_params.into_inner(); - let pag_params = data_page_params_for(&rqctx, &query)?; - let scan_params = ScanByNameOrId::from_query(&query)?; - let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; - - let silo_lookup = nexus.silo_lookup(&opctx, path.silo)?; - let pools = nexus - .silo_ip_pool_list(&opctx, &silo_lookup, &paginated_by) - .await? - .iter() - .map(|(pool, silo_link)| views::SiloIpPool { - identity: pool.identity(), - is_default: silo_link.is_default, - }) - .collect(); - - Ok(HttpResponseOk(ScanByNameOrId::results_page( - &query, - pools, - &marker_for_name_or_id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -/// Delete a silo -/// -/// Delete a silo by name or ID. -#[endpoint { - method = DELETE, - path = "/v1/system/silos/{silo}", - tags = ["system/silos"], -}] -async fn silo_delete( - rqctx: RequestContext, - path_params: Path, -) -> Result { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + // TODO-correctness: Is it valid for PUT to accept application/json that's a + // subset of what the resource actually represents? If not, is that a problem? + // (HTTP may require that this be idempotent.) If so, can we get around that + // having this be a slightly different content-type (e.g., + // "application/json-patch")? We should see what other APIs do. + /// Update a project + #[endpoint { + method = PUT, + path = "/v1/projects/{project}", + tags = ["projects"], + }] + async fn project_update( + rqctx: RequestContext, + path_params: Path, + updated_project: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); let nexus = &apictx.context.nexus; - let params = path_params.into_inner(); - let silo_lookup = nexus.silo_lookup(&opctx, params.silo)?; - nexus.silo_delete(&opctx, &silo_lookup).await?; - Ok(HttpResponseDeleted()) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + let path = path_params.into_inner(); + let updated_project = updated_project.into_inner(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let project_selector = + params::ProjectSelector { project: path.project }; + let project_lookup = + nexus.project_lookup(&opctx, project_selector)?; + let project = nexus + .project_update(&opctx, &project_lookup, &updated_project) + .await?; + Ok(HttpResponseOk(project.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Fetch silo IAM policy -#[endpoint { - method = GET, - path = "/v1/system/silos/{silo}/policy", - tags = ["system/silos"], -}] -async fn silo_policy_view( - rqctx: RequestContext, - path_params: Path, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + /// Fetch project's IAM policy + #[endpoint { + method = GET, + path = "/v1/projects/{project}/policy", + tags = ["projects"], + }] + async fn project_policy_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result>, HttpError> + { + let apictx = rqctx.context(); let nexus = &apictx.context.nexus; let path = path_params.into_inner(); - let silo_lookup = nexus.silo_lookup(&opctx, path.silo)?; - let policy = nexus.silo_fetch_policy(&opctx, &silo_lookup).await?; - Ok(HttpResponseOk(policy)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let project_selector = + params::ProjectSelector { project: path.project }; + let project_lookup = + nexus.project_lookup(&opctx, project_selector)?; + let policy = + nexus.project_fetch_policy(&opctx, &project_lookup).await?; + Ok(HttpResponseOk(policy)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Update silo IAM policy -#[endpoint { - method = PUT, - path = "/v1/system/silos/{silo}/policy", - tags = ["system/silos"], -}] -async fn silo_policy_update( - rqctx: RequestContext, - path_params: Path, - new_policy: TypedBody>, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let new_policy = new_policy.into_inner(); - let nasgns = new_policy.role_assignments.len(); - // This should have been validated during parsing. - bail_unless!(nasgns <= shared::MAX_ROLE_ASSIGNMENTS_PER_RESOURCE); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + /// Update project's IAM policy + #[endpoint { + method = PUT, + path = "/v1/projects/{project}/policy", + tags = ["projects"], + }] + async fn project_policy_update( + rqctx: RequestContext, + path_params: Path, + new_policy: TypedBody>, + ) -> Result>, HttpError> + { + let apictx = rqctx.context(); let nexus = &apictx.context.nexus; let path = path_params.into_inner(); - let silo_lookup = nexus.silo_lookup(&opctx, path.silo)?; - let policy = - nexus.silo_update_policy(&opctx, &silo_lookup, &new_policy).await?; - Ok(HttpResponseOk(policy)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -// Silo-specific user endpoints + let new_policy = new_policy.into_inner(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let project_selector = + params::ProjectSelector { project: path.project }; + let project_lookup = + nexus.project_lookup(&opctx, project_selector)?; + nexus + .project_update_policy(&opctx, &project_lookup, &new_policy) + .await?; + Ok(HttpResponseOk(new_policy)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// List built-in (system) users in silo -#[endpoint { - method = GET, - path = "/v1/system/users", - tags = ["system/silos"], -}] -async fn silo_user_list( - rqctx: RequestContext, - query_params: Query>, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let pag_params = data_page_params_for(&rqctx, &query)?; - let scan_params = ScanById::from_query(&query)?; - let silo_lookup = - nexus.silo_lookup(&opctx, scan_params.selector.silo.clone())?; - let users = nexus - .silo_list_users(&opctx, &silo_lookup, &pag_params) - .await? - .into_iter() - .map(|i| i.into()) - .collect(); - Ok(HttpResponseOk(ScanById::results_page( - &query, - users, - &|_, user: &User| user.id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + // IP Pools + + /// List IP pools + #[endpoint { + method = GET, + path = "/v1/ip-pools", + tags = ["projects"], + }] + async fn project_ip_pool_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let pag_params = data_page_params_for(&rqctx, &query)?; + let scan_params = ScanByNameOrId::from_query(&query)?; + let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let pools = nexus + .current_silo_ip_pool_list(&opctx, &paginated_by) + .await? + .into_iter() + .map(|(pool, silo_link)| views::SiloIpPool { + identity: pool.identity(), + is_default: silo_link.is_default, + }) + .collect(); + Ok(HttpResponseOk(ScanByNameOrId::results_page( + &query, + pools, + &marker_for_name_or_id, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Path parameters for Silo User requests -#[derive(Deserialize, JsonSchema)] -struct UserParam { - /// The user's internal id - user_id: Uuid, -} + /// Fetch IP pool + #[endpoint { + method = GET, + path = "/v1/ip-pools/{pool}", + tags = ["projects"], + }] + async fn project_ip_pool_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let pool_selector = path_params.into_inner().pool; + let (pool, silo_link) = + nexus.silo_ip_pool_fetch(&opctx, &pool_selector).await?; + Ok(HttpResponseOk(views::SiloIpPool { + identity: pool.identity(), + is_default: silo_link.is_default, + })) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Fetch built-in (system) user -#[endpoint { - method = GET, - path = "/v1/system/users/{user_id}", - tags = ["system/silos"], -}] -async fn silo_user_view( - rqctx: RequestContext, - path_params: Path, - query_params: Query, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let silo_lookup = nexus.silo_lookup(&opctx, query.silo)?; - let user = - nexus.silo_user_fetch(&opctx, &silo_lookup, path.user_id).await?; - Ok(HttpResponseOk(user.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// List IP pools + #[endpoint { + method = GET, + path = "/v1/system/ip-pools", + tags = ["system/networking"], + }] + async fn ip_pool_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let pag_params = data_page_params_for(&rqctx, &query)?; + let scan_params = ScanByNameOrId::from_query(&query)?; + let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let pools = nexus + .ip_pools_list(&opctx, &paginated_by) + .await? + .into_iter() + .map(IpPool::from) + .collect(); + Ok(HttpResponseOk(ScanByNameOrId::results_page( + &query, + pools, + &marker_for_name_or_id, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -// Silo identity providers + /// Create IP pool + #[endpoint { + method = POST, + path = "/v1/system/ip-pools", + tags = ["system/networking"], + }] + async fn ip_pool_create( + rqctx: RequestContext, + pool_params: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let nexus = &apictx.context.nexus; + let pool_params = pool_params.into_inner(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let pool = nexus.ip_pool_create(&opctx, &pool_params).await?; + Ok(HttpResponseCreated(IpPool::from(pool))) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// List a silo's IdP's name -#[endpoint { - method = GET, - path = "/v1/system/identity-providers", - tags = ["system/silos"], -}] -async fn silo_identity_provider_list( - rqctx: RequestContext, - query_params: Query>, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let pag_params = data_page_params_for(&rqctx, &query)?; - let scan_params = ScanByNameOrId::from_query(&query)?; - let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; - let silo_lookup = - nexus.silo_lookup(&opctx, scan_params.selector.silo.clone())?; - let identity_providers = nexus - .identity_provider_list(&opctx, &silo_lookup, &paginated_by) - .await? - .into_iter() - .map(|x| x.into()) - .collect(); - Ok(HttpResponseOk(ScanByNameOrId::results_page( - &query, - identity_providers, - &marker_for_name_or_id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -// Silo SAML identity providers - -/// Create SAML IdP -#[endpoint { - method = POST, - path = "/v1/system/identity-providers/saml", - tags = ["system/silos"], -}] -async fn saml_identity_provider_create( - rqctx: RequestContext, - query_params: Query, - new_provider: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let silo_lookup = nexus.silo_lookup(&opctx, query.silo)?; - let provider = nexus - .saml_identity_provider_create( - &opctx, - &silo_lookup, - new_provider.into_inner(), - ) - .await?; - Ok(HttpResponseCreated(provider.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Fetch IP pool + #[endpoint { + method = GET, + path = "/v1/system/ip-pools/{pool}", + tags = ["system/networking"], + }] + async fn ip_pool_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let pool_selector = path_params.into_inner().pool; + // We do not prevent the service pool from being fetched by name or ID + // like we do for update, delete, associate. + let (.., pool) = + nexus.ip_pool_lookup(&opctx, &pool_selector)?.fetch().await?; + Ok(HttpResponseOk(IpPool::from(pool))) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Fetch SAML IdP -#[endpoint { - method = GET, - path = "/v1/system/identity-providers/saml/{provider}", - tags = ["system/silos"], -}] -async fn saml_identity_provider_view( - rqctx: RequestContext, - path_params: Path, - query_params: Query, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let saml_identity_provider_selector = - params::SamlIdentityProviderSelector { - silo: Some(query.silo), - saml_identity_provider: path.provider, - }; - let (.., provider) = nexus - .saml_identity_provider_lookup( - &opctx, - saml_identity_provider_selector, - )? - .fetch() - .await?; - Ok(HttpResponseOk(provider.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Delete IP pool + #[endpoint { + method = DELETE, + path = "/v1/system/ip-pools/{pool}", + tags = ["system/networking"], + }] + async fn ip_pool_delete( + rqctx: RequestContext, + path_params: Path, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; + nexus.ip_pool_delete(&opctx, &pool_lookup).await?; + Ok(HttpResponseDeleted()) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -// TODO: no DELETE for identity providers? - -// "Local" Identity Provider - -/// Create user -/// -/// Users can only be created in Silos with `provision_type` == `Fixed`. -/// Otherwise, Silo users are just-in-time (JIT) provisioned when a user first -/// logs in using an external Identity Provider. -#[endpoint { - method = POST, - path = "/v1/system/identity-providers/local/users", - tags = ["system/silos"], -}] -async fn local_idp_user_create( - rqctx: RequestContext, - query_params: Query, - new_user_params: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let silo_lookup = nexus.silo_lookup(&opctx, query.silo)?; - let user = nexus - .local_idp_create_user( - &opctx, - &silo_lookup, - new_user_params.into_inner(), - ) - .await?; - Ok(HttpResponseCreated(user.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Update IP pool + #[endpoint { + method = PUT, + path = "/v1/system/ip-pools/{pool}", + tags = ["system/networking"], + }] + async fn ip_pool_update( + rqctx: RequestContext, + path_params: Path, + updates: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let updates = updates.into_inner(); + let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; + let pool = + nexus.ip_pool_update(&opctx, &pool_lookup, &updates).await?; + Ok(HttpResponseOk(pool.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Delete user -#[endpoint { - method = DELETE, - path = "/v1/system/identity-providers/local/users/{user_id}", - tags = ["system/silos"], -}] -async fn local_idp_user_delete( - rqctx: RequestContext, - path_params: Path, - query_params: Query, -) -> Result { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let silo_lookup = nexus.silo_lookup(&opctx, query.silo)?; - nexus.local_idp_delete_user(&opctx, &silo_lookup, path.user_id).await?; - Ok(HttpResponseDeleted()) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Fetch IP pool utilization + #[endpoint { + method = GET, + path = "/v1/system/ip-pools/{pool}/utilization", + tags = ["system/networking"], + }] + async fn ip_pool_utilization_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let pool_selector = path_params.into_inner().pool; + // We do not prevent the service pool from being fetched by name or ID + // like we do for update, delete, associate. + let pool_lookup = nexus.ip_pool_lookup(&opctx, &pool_selector)?; + let utilization = + nexus.ip_pool_utilization_view(&opctx, &pool_lookup).await?; + Ok(HttpResponseOk(utilization.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Set or invalidate user's password -/// -/// Passwords can only be updated for users in Silos with identity mode -/// `LocalOnly`. -#[endpoint { - method = POST, - path = "/v1/system/identity-providers/local/users/{user_id}/set-password", - tags = ["system/silos"], -}] -async fn local_idp_user_set_password( - rqctx: RequestContext, - path_params: Path, - query_params: Query, - update: TypedBody, -) -> Result { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let silo_lookup = nexus.silo_lookup(&opctx, query.silo)?; - nexus - .local_idp_user_set_password( - &opctx, - &silo_lookup, - path.user_id, - update.into_inner(), - ) - .await?; - Ok(HttpResponseUpdatedNoContent()) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// List IP pool's linked silos + #[endpoint { + method = GET, + path = "/v1/system/ip-pools/{pool}/silos", + tags = ["system/networking"], + }] + async fn ip_pool_silo_list( + rqctx: RequestContext, + path_params: Path, + // paginating by resource_id because they're unique per pool. most robust + // option would be to paginate by a composite key representing the (pool, + // resource_type, resource) + query_params: Query, + // TODO: this could just list views::Silo -- it's not like knowing silo_id + // and nothing else is particularly useful -- except we also want to say + // whether the pool is marked default on each silo. So one option would + // be to do the same as we did with SiloIpPool -- include is_default on + // whatever the thing is. Still... all we'd have to do to make this usable + // in both places would be to make it { ...IpPool, silo_id, silo_name, + // is_default } + ) -> Result>, HttpError> + { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; -/// List projects -#[endpoint { - method = GET, - path = "/v1/projects", - tags = ["projects"], -}] -async fn project_list( - rqctx: RequestContext, - query_params: Query, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let pag_params = data_page_params_for(&rqctx, &query)?; - let scan_params = ScanByNameOrId::from_query(&query)?; - let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let projects = nexus - .project_list(&opctx, &paginated_by) - .await? - .into_iter() - .map(|p| p.into()) - .collect(); - Ok(HttpResponseOk(ScanByNameOrId::results_page( - &query, - projects, - &marker_for_name_or_id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + let query = query_params.into_inner(); + let pag_params = data_page_params_for(&rqctx, &query)?; -/// Create project -#[endpoint { - method = POST, - path = "/v1/projects", - tags = ["projects"], -}] -async fn project_create( - rqctx: RequestContext, - new_project: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let nexus = &apictx.context.nexus; - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let project = - nexus.project_create(&opctx, &new_project.into_inner()).await?; - Ok(HttpResponseCreated(project.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + let path = path_params.into_inner(); + let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; -/// Fetch project -#[endpoint { - method = GET, - path = "/v1/projects/{project}", - tags = ["projects"], -}] -async fn project_view( - rqctx: RequestContext, - path_params: Path, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let project_selector = - params::ProjectSelector { project: path.project }; - let (.., project) = - nexus.project_lookup(&opctx, project_selector)?.fetch().await?; - Ok(HttpResponseOk(project.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + let assocs = nexus + .ip_pool_silo_list(&opctx, &pool_lookup, &pag_params) + .await? + .into_iter() + .map(|assoc| assoc.into()) + .collect(); + + Ok(HttpResponseOk(ScanById::results_page( + &query, + assocs, + &|_, x: &views::IpPoolSiloLink| x.silo_id, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Delete project -#[endpoint { - method = DELETE, - path = "/v1/projects/{project}", - tags = ["projects"], -}] -async fn project_delete( - rqctx: RequestContext, - path_params: Path, -) -> Result { - let apictx = rqctx.context(); - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let project_selector = - params::ProjectSelector { project: path.project }; - let project_lookup = nexus.project_lookup(&opctx, project_selector)?; - nexus.project_delete(&opctx, &project_lookup).await?; - Ok(HttpResponseDeleted()) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Link IP pool to silo + /// + /// Users in linked silos can allocate external IPs from this pool for their + /// instances. A silo can have at most one default pool. IPs are allocated from + /// the default pool when users ask for one without specifying a pool. + #[endpoint { + method = POST, + path = "/v1/system/ip-pools/{pool}/silos", + tags = ["system/networking"], + }] + async fn ip_pool_silo_link( + rqctx: RequestContext, + path_params: Path, + resource_assoc: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let resource_assoc = resource_assoc.into_inner(); + let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; + let assoc = nexus + .ip_pool_link_silo(&opctx, &pool_lookup, &resource_assoc) + .await?; + Ok(HttpResponseCreated(assoc.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -// TODO-correctness: Is it valid for PUT to accept application/json that's a -// subset of what the resource actually represents? If not, is that a problem? -// (HTTP may require that this be idempotent.) If so, can we get around that -// having this be a slightly different content-type (e.g., -// "application/json-patch")? We should see what other APIs do. -/// Update a project -#[endpoint { - method = PUT, - path = "/v1/projects/{project}", - tags = ["projects"], -}] -async fn project_update( - rqctx: RequestContext, - path_params: Path, - updated_project: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let updated_project = updated_project.into_inner(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let project_selector = - params::ProjectSelector { project: path.project }; - let project_lookup = nexus.project_lookup(&opctx, project_selector)?; - let project = nexus - .project_update(&opctx, &project_lookup, &updated_project) - .await?; - Ok(HttpResponseOk(project.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Unlink IP pool from silo + /// + /// Will fail if there are any outstanding IPs allocated in the silo. + #[endpoint { + method = DELETE, + path = "/v1/system/ip-pools/{pool}/silos/{silo}", + tags = ["system/networking"], + }] + async fn ip_pool_silo_unlink( + rqctx: RequestContext, + path_params: Path, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; + let silo_lookup = nexus.silo_lookup(&opctx, path.silo)?; + nexus + .ip_pool_unlink_silo(&opctx, &pool_lookup, &silo_lookup) + .await?; + Ok(HttpResponseUpdatedNoContent()) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Fetch project's IAM policy -#[endpoint { - method = GET, - path = "/v1/projects/{project}/policy", - tags = ["projects"], -}] -async fn project_policy_view( - rqctx: RequestContext, - path_params: Path, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let project_selector = - params::ProjectSelector { project: path.project }; - let project_lookup = nexus.project_lookup(&opctx, project_selector)?; - let policy = - nexus.project_fetch_policy(&opctx, &project_lookup).await?; - Ok(HttpResponseOk(policy)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Make IP pool default for silo + /// + /// When a user asks for an IP (e.g., at instance create time) without + /// specifying a pool, the IP comes from the default pool if a default is + /// configured. When a pool is made the default for a silo, any existing default + /// will remain linked to the silo, but will no longer be the default. + #[endpoint { + method = PUT, + path = "/v1/system/ip-pools/{pool}/silos/{silo}", + tags = ["system/networking"], + }] + async fn ip_pool_silo_update( + rqctx: RequestContext, + path_params: Path, + update: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let update = update.into_inner(); + let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; + let silo_lookup = nexus.silo_lookup(&opctx, path.silo)?; + let assoc = nexus + .ip_pool_silo_update( + &opctx, + &pool_lookup, + &silo_lookup, + &update, + ) + .await?; + Ok(HttpResponseOk(assoc.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Update project's IAM policy -#[endpoint { - method = PUT, - path = "/v1/projects/{project}/policy", - tags = ["projects"], -}] -async fn project_policy_update( - rqctx: RequestContext, - path_params: Path, - new_policy: TypedBody>, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let new_policy = new_policy.into_inner(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let project_selector = - params::ProjectSelector { project: path.project }; - let project_lookup = nexus.project_lookup(&opctx, project_selector)?; - nexus - .project_update_policy(&opctx, &project_lookup, &new_policy) - .await?; - Ok(HttpResponseOk(new_policy)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Fetch Oxide service IP pool + #[endpoint { + method = GET, + path = "/v1/system/ip-pools-service", + tags = ["system/networking"], + }] + async fn ip_pool_service_view( + rqctx: RequestContext, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let nexus = &apictx.context.nexus; + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let pool = nexus.ip_pool_service_fetch(&opctx).await?; + Ok(HttpResponseOk(IpPool::from(pool))) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -// IP Pools + type IpPoolRangePaginationParams = + PaginationParams; + + /// List ranges for IP pool + /// + /// Ranges are ordered by their first address. + #[endpoint { + method = GET, + path = "/v1/system/ip-pools/{pool}/ranges", + tags = ["system/networking"], + }] + async fn ip_pool_range_list( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let path = path_params.into_inner(); + let marker = match query.page { + WhichPage::First(_) => None, + WhichPage::Next(ref addr) => Some(addr), + }; + let pag_params = DataPageParams { + limit: rqctx.page_limit(&query)?, + direction: PaginationOrder::Ascending, + marker, + }; + let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; + let ranges = nexus + .ip_pool_list_ranges(&opctx, &pool_lookup, &pag_params) + .await? + .into_iter() + .map(|range| range.into()) + .collect(); + Ok(HttpResponseOk(ResultsPage::new( + ranges, + &EmptyScanParams {}, + |range: &IpPoolRange, _| { + IpNetwork::from(range.range.first_address()) + }, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// List IP pools -#[endpoint { - method = GET, - path = "/v1/ip-pools", - tags = ["projects"], -}] -async fn project_ip_pool_list( - rqctx: RequestContext, - query_params: Query, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let pag_params = data_page_params_for(&rqctx, &query)?; - let scan_params = ScanByNameOrId::from_query(&query)?; - let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let pools = nexus - .current_silo_ip_pool_list(&opctx, &paginated_by) - .await? - .into_iter() - .map(|(pool, silo_link)| views::SiloIpPool { - identity: pool.identity(), - is_default: silo_link.is_default, - }) - .collect(); - Ok(HttpResponseOk(ScanByNameOrId::results_page( - &query, - pools, - &marker_for_name_or_id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Add range to IP pool + /// + /// IPv6 ranges are not allowed yet. + #[endpoint { + method = POST, + path = "/v1/system/ip-pools/{pool}/ranges/add", + tags = ["system/networking"], + }] + async fn ip_pool_range_add( + rqctx: RequestContext, + path_params: Path, + range_params: TypedBody, + ) -> Result, HttpError> { + let apictx = &rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let range = range_params.into_inner(); + let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; + let out = + nexus.ip_pool_add_range(&opctx, &pool_lookup, &range).await?; + Ok(HttpResponseCreated(out.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Fetch IP pool -#[endpoint { - method = GET, - path = "/v1/ip-pools/{pool}", - tags = ["projects"], -}] -async fn project_ip_pool_view( - rqctx: RequestContext, - path_params: Path, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let pool_selector = path_params.into_inner().pool; - let (pool, silo_link) = - nexus.silo_ip_pool_fetch(&opctx, &pool_selector).await?; - Ok(HttpResponseOk(views::SiloIpPool { - identity: pool.identity(), - is_default: silo_link.is_default, - })) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Remove range from IP pool + #[endpoint { + method = POST, + path = "/v1/system/ip-pools/{pool}/ranges/remove", + tags = ["system/networking"], + }] + async fn ip_pool_range_remove( + rqctx: RequestContext, + path_params: Path, + range_params: TypedBody, + ) -> Result { + let apictx = &rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let range = range_params.into_inner(); + let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; + nexus.ip_pool_delete_range(&opctx, &pool_lookup, &range).await?; + Ok(HttpResponseUpdatedNoContent()) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// List IP pools -#[endpoint { - method = GET, - path = "/v1/system/ip-pools", - tags = ["system/networking"], -}] -async fn ip_pool_list( - rqctx: RequestContext, - query_params: Query, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let pag_params = data_page_params_for(&rqctx, &query)?; - let scan_params = ScanByNameOrId::from_query(&query)?; - let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let pools = nexus - .ip_pools_list(&opctx, &paginated_by) - .await? - .into_iter() - .map(IpPool::from) - .collect(); - Ok(HttpResponseOk(ScanByNameOrId::results_page( - &query, - pools, - &marker_for_name_or_id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// List IP ranges for the Oxide service pool + /// + /// Ranges are ordered by their first address. + #[endpoint { + method = GET, + path = "/v1/system/ip-pools-service/ranges", + tags = ["system/networking"], + }] + async fn ip_pool_service_range_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let marker = match query.page { + WhichPage::First(_) => None, + WhichPage::Next(ref addr) => Some(addr), + }; + let pag_params = DataPageParams { + limit: rqctx.page_limit(&query)?, + direction: PaginationOrder::Ascending, + marker, + }; + let ranges = nexus + .ip_pool_service_list_ranges(&opctx, &pag_params) + .await? + .into_iter() + .map(|range| range.into()) + .collect(); + Ok(HttpResponseOk(ResultsPage::new( + ranges, + &EmptyScanParams {}, + |range: &IpPoolRange, _| { + IpNetwork::from(range.range.first_address()) + }, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Create IP pool -#[endpoint { - method = POST, - path = "/v1/system/ip-pools", - tags = ["system/networking"], -}] -async fn ip_pool_create( - rqctx: RequestContext, - pool_params: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let nexus = &apictx.context.nexus; - let pool_params = pool_params.into_inner(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let pool = nexus.ip_pool_create(&opctx, &pool_params).await?; - Ok(HttpResponseCreated(IpPool::from(pool))) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Add IP range to Oxide service pool + /// + /// IPv6 ranges are not allowed yet. + #[endpoint { + method = POST, + path = "/v1/system/ip-pools-service/ranges/add", + tags = ["system/networking"], + }] + async fn ip_pool_service_range_add( + rqctx: RequestContext, + range_params: TypedBody, + ) -> Result, HttpError> { + let apictx = &rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let range = range_params.into_inner(); + let out = nexus.ip_pool_service_add_range(&opctx, &range).await?; + Ok(HttpResponseCreated(out.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Fetch IP pool -#[endpoint { - method = GET, - path = "/v1/system/ip-pools/{pool}", - tags = ["system/networking"], -}] -async fn ip_pool_view( - rqctx: RequestContext, - path_params: Path, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + /// Remove IP range from Oxide service pool + #[endpoint { + method = POST, + path = "/v1/system/ip-pools-service/ranges/remove", + tags = ["system/networking"], + }] + async fn ip_pool_service_range_remove( + rqctx: RequestContext, + range_params: TypedBody, + ) -> Result { + let apictx = &rqctx.context(); let nexus = &apictx.context.nexus; - let pool_selector = path_params.into_inner().pool; - // We do not prevent the service pool from being fetched by name or ID - // like we do for update, delete, associate. - let (.., pool) = - nexus.ip_pool_lookup(&opctx, &pool_selector)?.fetch().await?; - Ok(HttpResponseOk(IpPool::from(pool))) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + let range = range_params.into_inner(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + nexus.ip_pool_service_delete_range(&opctx, &range).await?; + Ok(HttpResponseUpdatedNoContent()) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Delete IP pool -#[endpoint { - method = DELETE, - path = "/v1/system/ip-pools/{pool}", - tags = ["system/networking"], -}] -async fn ip_pool_delete( - rqctx: RequestContext, - path_params: Path, -) -> Result { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; - nexus.ip_pool_delete(&opctx, &pool_lookup).await?; - Ok(HttpResponseDeleted()) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + // Floating IP Addresses + + /// List floating IPs + #[endpoint { + method = GET, + path = "/v1/floating-ips", + tags = ["floating-ips"], + }] + async fn floating_ip_list( + rqctx: RequestContext, + query_params: Query>, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let query = query_params.into_inner(); + let pag_params = data_page_params_for(&rqctx, &query)?; + let scan_params = ScanByNameOrId::from_query(&query)?; + let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; + let project_lookup = + nexus.project_lookup(&opctx, scan_params.selector.clone())?; + let ips = nexus + .floating_ips_list(&opctx, &project_lookup, &paginated_by) + .await?; + Ok(HttpResponseOk(ScanByNameOrId::results_page( + &query, + ips, + &marker_for_name_or_id, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Update IP pool -#[endpoint { - method = PUT, - path = "/v1/system/ip-pools/{pool}", - tags = ["system/networking"], -}] -async fn ip_pool_update( - rqctx: RequestContext, - path_params: Path, - updates: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let updates = updates.into_inner(); - let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; - let pool = nexus.ip_pool_update(&opctx, &pool_lookup, &updates).await?; - Ok(HttpResponseOk(pool.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Create floating IP + #[endpoint { + method = POST, + path = "/v1/floating-ips", + tags = ["floating-ips"], + }] + async fn floating_ip_create( + rqctx: RequestContext, + query_params: Query, + floating_params: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let nexus = &apictx.context.nexus; + let floating_params = floating_params.into_inner(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let project_lookup = + nexus.project_lookup(&opctx, query_params.into_inner())?; + let ip = nexus + .floating_ip_create(&opctx, &project_lookup, floating_params) + .await?; + Ok(HttpResponseCreated(ip)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Fetch IP pool utilization -#[endpoint { - method = GET, - path = "/v1/system/ip-pools/{pool}/utilization", - tags = ["system/networking"], -}] -async fn ip_pool_utilization_view( - rqctx: RequestContext, - path_params: Path, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let pool_selector = path_params.into_inner().pool; - // We do not prevent the service pool from being fetched by name or ID - // like we do for update, delete, associate. - let pool_lookup = nexus.ip_pool_lookup(&opctx, &pool_selector)?; - let utilization = - nexus.ip_pool_utilization_view(&opctx, &pool_lookup).await?; - Ok(HttpResponseOk(utilization.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Update floating IP + #[endpoint { + method = PUT, + path = "/v1/floating-ips/{floating_ip}", + tags = ["floating-ips"], + }] + async fn floating_ip_update( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + updated_floating_ip: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let updated_floating_ip_params = updated_floating_ip.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let floating_ip_selector = params::FloatingIpSelector { + project: query.project, + floating_ip: path.floating_ip, + }; + let floating_ip_lookup = + nexus.floating_ip_lookup(&opctx, floating_ip_selector)?; + let floating_ip = nexus + .floating_ip_update( + &opctx, + floating_ip_lookup, + updated_floating_ip_params, + ) + .await?; + Ok(HttpResponseOk(floating_ip)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// List IP pool's linked silos -#[endpoint { - method = GET, - path = "/v1/system/ip-pools/{pool}/silos", - tags = ["system/networking"], -}] -async fn ip_pool_silo_list( - rqctx: RequestContext, - path_params: Path, - // paginating by resource_id because they're unique per pool. most robust - // option would be to paginate by a composite key representing the (pool, - // resource_type, resource) - query_params: Query, - // TODO: this could just list views::Silo -- it's not like knowing silo_id - // and nothing else is particularly useful -- except we also want to say - // whether the pool is marked default on each silo. So one option would - // be to do the same as we did with SiloIpPool -- include is_default on - // whatever the thing is. Still... all we'd have to do to make this usable - // in both places would be to make it { ...IpPool, silo_id, silo_name, - // is_default } -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; + /// Delete floating IP + #[endpoint { + method = DELETE, + path = "/v1/floating-ips/{floating_ip}", + tags = ["floating-ips"], + }] + async fn floating_ip_delete( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let floating_ip_selector = params::FloatingIpSelector { + floating_ip: path.floating_ip, + project: query.project, + }; + let fip_lookup = + nexus.floating_ip_lookup(&opctx, floating_ip_selector)?; - let query = query_params.into_inner(); - let pag_params = data_page_params_for(&rqctx, &query)?; + nexus.floating_ip_delete(&opctx, fip_lookup).await?; + Ok(HttpResponseDeleted()) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } - let path = path_params.into_inner(); - let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; - - let assocs = nexus - .ip_pool_silo_list(&opctx, &pool_lookup, &pag_params) - .await? - .into_iter() - .map(|assoc| assoc.into()) - .collect(); - - Ok(HttpResponseOk(ScanById::results_page( - &query, - assocs, - &|_, x: &views::IpPoolSiloLink| x.silo_id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Fetch floating IP + #[endpoint { + method = GET, + path = "/v1/floating-ips/{floating_ip}", + tags = ["floating-ips"] + }] + async fn floating_ip_view( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let floating_ip_selector = params::FloatingIpSelector { + floating_ip: path.floating_ip, + project: query.project, + }; + let (.., fip) = nexus + .floating_ip_lookup(&opctx, floating_ip_selector)? + .fetch() + .await?; + Ok(HttpResponseOk(fip.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Link IP pool to silo -/// -/// Users in linked silos can allocate external IPs from this pool for their -/// instances. A silo can have at most one default pool. IPs are allocated from -/// the default pool when users ask for one without specifying a pool. -#[endpoint { - method = POST, - path = "/v1/system/ip-pools/{pool}/silos", - tags = ["system/networking"], -}] -async fn ip_pool_silo_link( - rqctx: RequestContext, - path_params: Path, - resource_assoc: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let resource_assoc = resource_assoc.into_inner(); - let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; - let assoc = nexus - .ip_pool_link_silo(&opctx, &pool_lookup, &resource_assoc) - .await?; - Ok(HttpResponseCreated(assoc.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Attach floating IP + /// + /// Attach floating IP to an instance or other resource. + #[endpoint { + method = POST, + path = "/v1/floating-ips/{floating_ip}/attach", + tags = ["floating-ips"], + }] + async fn floating_ip_attach( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + target: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let floating_ip_selector = params::FloatingIpSelector { + floating_ip: path.floating_ip, + project: query.project, + }; + let ip = nexus + .floating_ip_attach( + &opctx, + floating_ip_selector, + target.into_inner(), + ) + .await?; + Ok(HttpResponseAccepted(ip)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Unlink IP pool from silo -/// -/// Will fail if there are any outstanding IPs allocated in the silo. -#[endpoint { - method = DELETE, - path = "/v1/system/ip-pools/{pool}/silos/{silo}", - tags = ["system/networking"], -}] -async fn ip_pool_silo_unlink( - rqctx: RequestContext, - path_params: Path, -) -> Result { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; - let silo_lookup = nexus.silo_lookup(&opctx, path.silo)?; - nexus.ip_pool_unlink_silo(&opctx, &pool_lookup, &silo_lookup).await?; - Ok(HttpResponseUpdatedNoContent()) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Detach floating IP + /// + // Detach floating IP from instance or other resource. + #[endpoint { + method = POST, + path = "/v1/floating-ips/{floating_ip}/detach", + tags = ["floating-ips"], + }] + async fn floating_ip_detach( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let floating_ip_selector = params::FloatingIpSelector { + floating_ip: path.floating_ip, + project: query.project, + }; + let fip_lookup = + nexus.floating_ip_lookup(&opctx, floating_ip_selector)?; + let ip = nexus.floating_ip_detach(&opctx, fip_lookup).await?; + Ok(HttpResponseAccepted(ip)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Make IP pool default for silo -/// -/// When a user asks for an IP (e.g., at instance create time) without -/// specifying a pool, the IP comes from the default pool if a default is -/// configured. When a pool is made the default for a silo, any existing default -/// will remain linked to the silo, but will no longer be the default. -#[endpoint { - method = PUT, - path = "/v1/system/ip-pools/{pool}/silos/{silo}", - tags = ["system/networking"], -}] -async fn ip_pool_silo_update( - rqctx: RequestContext, - path_params: Path, - update: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let update = update.into_inner(); - let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; - let silo_lookup = nexus.silo_lookup(&opctx, path.silo)?; - let assoc = nexus - .ip_pool_silo_update(&opctx, &pool_lookup, &silo_lookup, &update) - .await?; - Ok(HttpResponseOk(assoc.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + // Disks + + /// List disks + #[endpoint { + method = GET, + path = "/v1/disks", + tags = ["disks"], + }] + async fn disk_list( + rqctx: RequestContext, + query_params: Query>, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let pag_params = data_page_params_for(&rqctx, &query)?; + let scan_params = ScanByNameOrId::from_query(&query)?; + let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; + let project_lookup = + nexus.project_lookup(&opctx, scan_params.selector.clone())?; + let disks = nexus + .disk_list(&opctx, &project_lookup, &paginated_by) + .await? + .into_iter() + .map(|disk| disk.into()) + .collect(); + Ok(HttpResponseOk(ScanByNameOrId::results_page( + &query, + disks, + &marker_for_name_or_id, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Fetch Oxide service IP pool -#[endpoint { - method = GET, - path = "/v1/system/ip-pools-service", - tags = ["system/networking"], -}] -async fn ip_pool_service_view( - rqctx: RequestContext, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let nexus = &apictx.context.nexus; - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let pool = nexus.ip_pool_service_fetch(&opctx).await?; - Ok(HttpResponseOk(IpPool::from(pool))) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + // TODO-correctness See note about instance create. This should be async. + /// Create a disk + #[endpoint { + method = POST, + path = "/v1/disks", + tags = ["disks"] + }] + async fn disk_create( + rqctx: RequestContext, + query_params: Query, + new_disk: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let params = new_disk.into_inner(); + let project_lookup = nexus.project_lookup(&opctx, query)?; + let disk = nexus + .project_create_disk(&opctx, &project_lookup, ¶ms) + .await?; + Ok(HttpResponseCreated(disk.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -type IpPoolRangePaginationParams = PaginationParams; + /// Fetch disk + #[endpoint { + method = GET, + path = "/v1/disks/{disk}", + tags = ["disks"] + }] + async fn disk_view( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let disk_selector = params::DiskSelector { + disk: path.disk, + project: query.project, + }; + let (.., disk) = + nexus.disk_lookup(&opctx, disk_selector)?.fetch().await?; + Ok(HttpResponseOk(disk.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// List ranges for IP pool -/// -/// Ranges are ordered by their first address. -#[endpoint { - method = GET, - path = "/v1/system/ip-pools/{pool}/ranges", - tags = ["system/networking"], -}] -async fn ip_pool_range_list( - rqctx: RequestContext, - path_params: Path, - query_params: Query, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let path = path_params.into_inner(); - let marker = match query.page { - WhichPage::First(_) => None, - WhichPage::Next(ref addr) => Some(addr), - }; - let pag_params = DataPageParams { - limit: rqctx.page_limit(&query)?, - direction: PaginationOrder::Ascending, - marker, - }; - let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; - let ranges = nexus - .ip_pool_list_ranges(&opctx, &pool_lookup, &pag_params) - .await? - .into_iter() - .map(|range| range.into()) - .collect(); - Ok(HttpResponseOk(ResultsPage::new( - ranges, - &EmptyScanParams {}, - |range: &IpPoolRange, _| { - IpNetwork::from(range.range.first_address()) - }, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Delete disk + #[endpoint { + method = DELETE, + path = "/v1/disks/{disk}", + tags = ["disks"], + }] + async fn disk_delete( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let disk_selector = params::DiskSelector { + disk: path.disk, + project: query.project, + }; + let disk_lookup = nexus.disk_lookup(&opctx, disk_selector)?; + nexus.project_delete_disk(&opctx, &disk_lookup).await?; + Ok(HttpResponseDeleted()) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Add range to IP pool -/// -/// IPv6 ranges are not allowed yet. -#[endpoint { - method = POST, - path = "/v1/system/ip-pools/{pool}/ranges/add", - tags = ["system/networking"], -}] -async fn ip_pool_range_add( - rqctx: RequestContext, - path_params: Path, - range_params: TypedBody, -) -> Result, HttpError> { - let apictx = &rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let range = range_params.into_inner(); - let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; - let out = nexus.ip_pool_add_range(&opctx, &pool_lookup, &range).await?; - Ok(HttpResponseCreated(out.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + #[derive(Display, Serialize, Deserialize, JsonSchema)] + #[display(style = "snake_case")] + #[serde(rename_all = "snake_case")] + pub enum DiskMetricName { + Activated, + Flush, + Read, + ReadBytes, + Write, + WriteBytes, + } -/// Remove range from IP pool -#[endpoint { - method = POST, - path = "/v1/system/ip-pools/{pool}/ranges/remove", - tags = ["system/networking"], -}] -async fn ip_pool_range_remove( - rqctx: RequestContext, - path_params: Path, - range_params: TypedBody, -) -> Result { - let apictx = &rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let range = range_params.into_inner(); - let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; - nexus.ip_pool_delete_range(&opctx, &pool_lookup, &range).await?; - Ok(HttpResponseUpdatedNoContent()) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + #[derive(Serialize, Deserialize, JsonSchema)] + struct DiskMetricsPath { + disk: NameOrId, + metric: DiskMetricName, + } -/// List IP ranges for the Oxide service pool -/// -/// Ranges are ordered by their first address. -#[endpoint { - method = GET, - path = "/v1/system/ip-pools-service/ranges", - tags = ["system/networking"], -}] -async fn ip_pool_service_range_list( - rqctx: RequestContext, - query_params: Query, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let marker = match query.page { - WhichPage::First(_) => None, - WhichPage::Next(ref addr) => Some(addr), - }; - let pag_params = DataPageParams { - limit: rqctx.page_limit(&query)?, - direction: PaginationOrder::Ascending, - marker, - }; - let ranges = nexus - .ip_pool_service_list_ranges(&opctx, &pag_params) - .await? - .into_iter() - .map(|range| range.into()) - .collect(); - Ok(HttpResponseOk(ResultsPage::new( - ranges, - &EmptyScanParams {}, - |range: &IpPoolRange, _| { - IpNetwork::from(range.range.first_address()) - }, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Fetch disk metrics + #[endpoint { + method = GET, + path = "/v1/disks/{disk}/metrics/{metric}", + tags = ["disks"], + }] + async fn disk_metrics_list( + rqctx: RequestContext, + path_params: Path, + query_params: Query< + PaginationParams, + >, + selector_params: Query, + ) -> Result>, HttpError> + { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + + let selector = selector_params.into_inner(); + let limit = rqctx.page_limit(&query)?; + let disk_selector = params::DiskSelector { + disk: path.disk, + project: selector.project, + }; + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let (.., authz_disk) = nexus + .disk_lookup(&opctx, disk_selector)? + .lookup_for(authz::Action::Read) + .await?; -/// Add IP range to Oxide service pool -/// -/// IPv6 ranges are not allowed yet. -#[endpoint { - method = POST, - path = "/v1/system/ip-pools-service/ranges/add", - tags = ["system/networking"], -}] -async fn ip_pool_service_range_add( - rqctx: RequestContext, - range_params: TypedBody, -) -> Result, HttpError> { - let apictx = &rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let range = range_params.into_inner(); - let out = nexus.ip_pool_service_add_range(&opctx, &range).await?; - Ok(HttpResponseCreated(out.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -/// Remove IP range from Oxide service pool -#[endpoint { - method = POST, - path = "/v1/system/ip-pools-service/ranges/remove", - tags = ["system/networking"], -}] -async fn ip_pool_service_range_remove( - rqctx: RequestContext, - range_params: TypedBody, -) -> Result { - let apictx = &rqctx.context(); - let nexus = &apictx.context.nexus; - let range = range_params.into_inner(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - nexus.ip_pool_service_delete_range(&opctx, &range).await?; - Ok(HttpResponseUpdatedNoContent()) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -// Floating IP Addresses + let result = nexus + .select_timeseries( + &format!("crucible_upstairs:{}", path.metric), + &[&format!("upstairs_uuid=={}", authz_disk.id())], + query, + limit, + ) + .await?; -/// List floating IPs -#[endpoint { - method = GET, - path = "/v1/floating-ips", - tags = ["floating-ips"], -}] -async fn floating_ip_list( - rqctx: RequestContext, - query_params: Query>, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let query = query_params.into_inner(); - let pag_params = data_page_params_for(&rqctx, &query)?; - let scan_params = ScanByNameOrId::from_query(&query)?; - let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; - let project_lookup = - nexus.project_lookup(&opctx, scan_params.selector.clone())?; - let ips = nexus - .floating_ips_list(&opctx, &project_lookup, &paginated_by) - .await?; - Ok(HttpResponseOk(ScanByNameOrId::results_page( - &query, - ips, - &marker_for_name_or_id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + Ok(HttpResponseOk(result)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Create floating IP -#[endpoint { - method = POST, - path = "/v1/floating-ips", - tags = ["floating-ips"], -}] -async fn floating_ip_create( - rqctx: RequestContext, - query_params: Query, - floating_params: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let nexus = &apictx.context.nexus; - let floating_params = floating_params.into_inner(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let project_lookup = - nexus.project_lookup(&opctx, query_params.into_inner())?; - let ip = nexus - .floating_ip_create(&opctx, &project_lookup, floating_params) - .await?; - Ok(HttpResponseCreated(ip)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Start importing blocks into disk + /// + /// Start the process of importing blocks into a disk + #[endpoint { + method = POST, + path = "/v1/disks/{disk}/bulk-write-start", + tags = ["disks"], + }] + async fn disk_bulk_write_import_start( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + + let disk_selector = params::DiskSelector { + disk: path.disk, + project: query.project, + }; + let disk_lookup = nexus.disk_lookup(&opctx, disk_selector)?; -/// Update floating IP -#[endpoint { - method = PUT, - path = "/v1/floating-ips/{floating_ip}", - tags = ["floating-ips"], -}] -async fn floating_ip_update( - rqctx: RequestContext, - path_params: Path, - query_params: Query, - updated_floating_ip: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let updated_floating_ip_params = updated_floating_ip.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let floating_ip_selector = params::FloatingIpSelector { - project: query.project, - floating_ip: path.floating_ip, - }; - let floating_ip_lookup = - nexus.floating_ip_lookup(&opctx, floating_ip_selector)?; - let floating_ip = nexus - .floating_ip_update( - &opctx, - floating_ip_lookup, - updated_floating_ip_params, - ) - .await?; - Ok(HttpResponseOk(floating_ip)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + nexus.disk_manual_import_start(&opctx, &disk_lookup).await?; -/// Delete floating IP -#[endpoint { - method = DELETE, - path = "/v1/floating-ips/{floating_ip}", - tags = ["floating-ips"], -}] -async fn floating_ip_delete( - rqctx: RequestContext, - path_params: Path, - query_params: Query, -) -> Result { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let floating_ip_selector = params::FloatingIpSelector { - floating_ip: path.floating_ip, - project: query.project, + Ok(HttpResponseUpdatedNoContent()) }; - let fip_lookup = - nexus.floating_ip_lookup(&opctx, floating_ip_selector)?; - - nexus.floating_ip_delete(&opctx, fip_lookup).await?; - Ok(HttpResponseDeleted()) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Fetch floating IP -#[endpoint { - method = GET, - path = "/v1/floating-ips/{floating_ip}", - tags = ["floating-ips"] -}] -async fn floating_ip_view( - rqctx: RequestContext, - path_params: Path, - query_params: Query, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let floating_ip_selector = params::FloatingIpSelector { - floating_ip: path.floating_ip, - project: query.project, - }; - let (.., fip) = nexus - .floating_ip_lookup(&opctx, floating_ip_selector)? - .fetch() - .await?; - Ok(HttpResponseOk(fip.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Import blocks into disk + #[endpoint { + method = POST, + path = "/v1/disks/{disk}/bulk-write", + tags = ["disks"], + }] + async fn disk_bulk_write_import( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + import_params: TypedBody, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let params = import_params.into_inner(); + + let disk_selector = params::DiskSelector { + disk: path.disk, + project: query.project, + }; + let disk_lookup = nexus.disk_lookup(&opctx, disk_selector)?; -/// Attach floating IP -/// -/// Attach floating IP to an instance or other resource. -#[endpoint { - method = POST, - path = "/v1/floating-ips/{floating_ip}/attach", - tags = ["floating-ips"], -}] -async fn floating_ip_attach( - rqctx: RequestContext, - path_params: Path, - query_params: Query, - target: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let floating_ip_selector = params::FloatingIpSelector { - floating_ip: path.floating_ip, - project: query.project, - }; - let ip = nexus - .floating_ip_attach( - &opctx, - floating_ip_selector, - target.into_inner(), - ) - .await?; - Ok(HttpResponseAccepted(ip)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + nexus.disk_manual_import(&disk_lookup, params).await?; -/// Detach floating IP -/// -// Detach floating IP from instance or other resource. -#[endpoint { - method = POST, - path = "/v1/floating-ips/{floating_ip}/detach", - tags = ["floating-ips"], -}] -async fn floating_ip_detach( - rqctx: RequestContext, - path_params: Path, - query_params: Query, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let floating_ip_selector = params::FloatingIpSelector { - floating_ip: path.floating_ip, - project: query.project, + Ok(HttpResponseUpdatedNoContent()) }; - let fip_lookup = - nexus.floating_ip_lookup(&opctx, floating_ip_selector)?; - let ip = nexus.floating_ip_detach(&opctx, fip_lookup).await?; - Ok(HttpResponseAccepted(ip)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -// Disks + /// Stop importing blocks into disk + /// + /// Stop the process of importing blocks into a disk + #[endpoint { + method = POST, + path = "/v1/disks/{disk}/bulk-write-stop", + tags = ["disks"], + }] + async fn disk_bulk_write_import_stop( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + + let disk_selector = params::DiskSelector { + disk: path.disk, + project: query.project, + }; + let disk_lookup = nexus.disk_lookup(&opctx, disk_selector)?; -/// List disks -#[endpoint { - method = GET, - path = "/v1/disks", - tags = ["disks"], -}] -async fn disk_list( - rqctx: RequestContext, - query_params: Query>, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let pag_params = data_page_params_for(&rqctx, &query)?; - let scan_params = ScanByNameOrId::from_query(&query)?; - let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; - let project_lookup = - nexus.project_lookup(&opctx, scan_params.selector.clone())?; - let disks = nexus - .disk_list(&opctx, &project_lookup, &paginated_by) - .await? - .into_iter() - .map(|disk| disk.into()) - .collect(); - Ok(HttpResponseOk(ScanByNameOrId::results_page( - &query, - disks, - &marker_for_name_or_id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + nexus.disk_manual_import_stop(&opctx, &disk_lookup).await?; -// TODO-correctness See note about instance create. This should be async. -/// Create a disk -#[endpoint { - method = POST, - path = "/v1/disks", - tags = ["disks"] -}] -async fn disk_create( - rqctx: RequestContext, - query_params: Query, - new_disk: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let params = new_disk.into_inner(); - let project_lookup = nexus.project_lookup(&opctx, query)?; - let disk = - nexus.project_create_disk(&opctx, &project_lookup, ¶ms).await?; - Ok(HttpResponseCreated(disk.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + Ok(HttpResponseUpdatedNoContent()) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Fetch disk -#[endpoint { - method = GET, - path = "/v1/disks/{disk}", - tags = ["disks"] -}] -async fn disk_view( - rqctx: RequestContext, - path_params: Path, - query_params: Query, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let disk_selector = - params::DiskSelector { disk: path.disk, project: query.project }; - let (.., disk) = - nexus.disk_lookup(&opctx, disk_selector)?.fetch().await?; - Ok(HttpResponseOk(disk.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Confirm disk block import completion + #[endpoint { + method = POST, + path = "/v1/disks/{disk}/finalize", + tags = ["disks"], + }] + async fn disk_finalize_import( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + finalize_params: TypedBody, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let params = finalize_params.into_inner(); + let disk_selector = params::DiskSelector { + disk: path.disk, + project: query.project, + }; + let disk_lookup = nexus.disk_lookup(&opctx, disk_selector)?; -/// Delete disk -#[endpoint { - method = DELETE, - path = "/v1/disks/{disk}", - tags = ["disks"], -}] -async fn disk_delete( - rqctx: RequestContext, - path_params: Path, - query_params: Query, -) -> Result { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let disk_selector = - params::DiskSelector { disk: path.disk, project: query.project }; - let disk_lookup = nexus.disk_lookup(&opctx, disk_selector)?; - nexus.project_delete_disk(&opctx, &disk_lookup).await?; - Ok(HttpResponseDeleted()) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + nexus.disk_finalize_import(&opctx, &disk_lookup, ¶ms).await?; -#[derive(Display, Serialize, Deserialize, JsonSchema)] -#[display(style = "snake_case")] -#[serde(rename_all = "snake_case")] -pub enum DiskMetricName { - Activated, - Flush, - Read, - ReadBytes, - Write, - WriteBytes, -} + Ok(HttpResponseUpdatedNoContent()) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -#[derive(Serialize, Deserialize, JsonSchema)] -struct DiskMetricsPath { - disk: NameOrId, - metric: DiskMetricName, -} + // Instances + + /// List instances + #[endpoint { + method = GET, + path = "/v1/instances", + tags = ["instances"], + }] + async fn instance_list( + rqctx: RequestContext, + query_params: Query>, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let pag_params = data_page_params_for(&rqctx, &query)?; + let scan_params = ScanByNameOrId::from_query(&query)?; + let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let project_lookup = + nexus.project_lookup(&opctx, scan_params.selector.clone())?; + let instances = nexus + .instance_list(&opctx, &project_lookup, &paginated_by) + .await? + .into_iter() + .map(|i| i.into()) + .collect(); + Ok(HttpResponseOk(ScanByNameOrId::results_page( + &query, + instances, + &marker_for_name_or_id, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Fetch disk metrics -#[endpoint { - method = GET, - path = "/v1/disks/{disk}/metrics/{metric}", - tags = ["disks"], -}] -async fn disk_metrics_list( - rqctx: RequestContext, - path_params: Path, - query_params: Query< - PaginationParams, - >, - selector_params: Query, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { + /// Create instance + #[endpoint { + method = POST, + path = "/v1/instances", + tags = ["instances"], + }] + async fn instance_create( + rqctx: RequestContext, + query_params: Query, + new_instance: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - - let selector = selector_params.into_inner(); - let limit = rqctx.page_limit(&query)?; - let disk_selector = - params::DiskSelector { disk: path.disk, project: selector.project }; - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let (.., authz_disk) = nexus - .disk_lookup(&opctx, disk_selector)? - .lookup_for(authz::Action::Read) - .await?; - - let result = nexus - .select_timeseries( - &format!("crucible_upstairs:{}", path.metric), - &[&format!("upstairs_uuid=={}", authz_disk.id())], - query, - limit, - ) - .await?; - - Ok(HttpResponseOk(result)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + let project_selector = query_params.into_inner(); + let new_instance_params = &new_instance.into_inner(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let project_lookup = + nexus.project_lookup(&opctx, project_selector)?; + let instance = nexus + .project_create_instance( + &opctx, + &project_lookup, + &new_instance_params, + ) + .await?; + Ok(HttpResponseCreated(instance.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Start importing blocks into disk -/// -/// Start the process of importing blocks into a disk -#[endpoint { - method = POST, - path = "/v1/disks/{disk}/bulk-write-start", - tags = ["disks"], -}] -async fn disk_bulk_write_import_start( - rqctx: RequestContext, - path_params: Path, - query_params: Query, -) -> Result { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + /// Fetch instance + #[endpoint { + method = GET, + path = "/v1/instances/{instance}", + tags = ["instances"], + }] + async fn instance_view( + rqctx: RequestContext, + query_params: Query, + path_params: Path, + ) -> Result, HttpError> { + let apictx = rqctx.context(); let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let instance_selector = params::InstanceSelector { + project: query.project, + instance: path.instance, + }; + let instance_lookup = + nexus.instance_lookup(&opctx, instance_selector)?; + let (.., authz_instance) = + instance_lookup.lookup_for(authz::Action::Read).await?; + let instance_and_vmm = nexus + .datastore() + .instance_fetch_with_vmm(&opctx, &authz_instance) + .await?; + Ok(HttpResponseOk(instance_and_vmm.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } - let disk_selector = - params::DiskSelector { disk: path.disk, project: query.project }; - let disk_lookup = nexus.disk_lookup(&opctx, disk_selector)?; - - nexus.disk_manual_import_start(&opctx, &disk_lookup).await?; - - Ok(HttpResponseUpdatedNoContent()) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -/// Import blocks into disk -#[endpoint { - method = POST, - path = "/v1/disks/{disk}/bulk-write", - tags = ["disks"], -}] -async fn disk_bulk_write_import( - rqctx: RequestContext, - path_params: Path, - query_params: Query, - import_params: TypedBody, -) -> Result { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + /// Delete instance + #[endpoint { + method = DELETE, + path = "/v1/instances/{instance}", + tags = ["instances"], + }] + async fn instance_delete( + rqctx: RequestContext, + query_params: Query, + path_params: Path, + ) -> Result { + let apictx = rqctx.context(); let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); - let params = import_params.into_inner(); - - let disk_selector = - params::DiskSelector { disk: path.disk, project: query.project }; - let disk_lookup = nexus.disk_lookup(&opctx, disk_selector)?; - - nexus.disk_manual_import(&disk_lookup, params).await?; - - Ok(HttpResponseUpdatedNoContent()) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + let instance_selector = params::InstanceSelector { + project: query.project, + instance: path.instance, + }; + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let instance_lookup = + nexus.instance_lookup(&opctx, instance_selector)?; + nexus.project_destroy_instance(&opctx, &instance_lookup).await?; + Ok(HttpResponseDeleted()) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Stop importing blocks into disk -/// -/// Stop the process of importing blocks into a disk -#[endpoint { - method = POST, - path = "/v1/disks/{disk}/bulk-write-stop", - tags = ["disks"], -}] -async fn disk_bulk_write_import_stop( - rqctx: RequestContext, - path_params: Path, - query_params: Query, -) -> Result { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + /// Reboot an instance + #[endpoint { + method = POST, + path = "/v1/instances/{instance}/reboot", + tags = ["instances"], + }] + async fn instance_reboot( + rqctx: RequestContext, + query_params: Query, + path_params: Path, + ) -> Result, HttpError> { + let apictx = rqctx.context(); let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); + let instance_selector = params::InstanceSelector { + project: query.project, + instance: path.instance, + }; + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let instance_lookup = + nexus.instance_lookup(&opctx, instance_selector)?; + let instance = + nexus.instance_reboot(&opctx, &instance_lookup).await?; + Ok(HttpResponseAccepted(instance.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } - let disk_selector = - params::DiskSelector { disk: path.disk, project: query.project }; - let disk_lookup = nexus.disk_lookup(&opctx, disk_selector)?; - - nexus.disk_manual_import_stop(&opctx, &disk_lookup).await?; - - Ok(HttpResponseUpdatedNoContent()) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -/// Confirm disk block import completion -#[endpoint { - method = POST, - path = "/v1/disks/{disk}/finalize", - tags = ["disks"], -}] -async fn disk_finalize_import( - rqctx: RequestContext, - path_params: Path, - query_params: Query, - finalize_params: TypedBody, -) -> Result { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + /// Boot instance + #[endpoint { + method = POST, + path = "/v1/instances/{instance}/start", + tags = ["instances"], + }] + async fn instance_start( + rqctx: RequestContext, + query_params: Query, + path_params: Path, + ) -> Result, HttpError> { + let apictx = rqctx.context(); let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); - let params = finalize_params.into_inner(); - let disk_selector = - params::DiskSelector { disk: path.disk, project: query.project }; - let disk_lookup = nexus.disk_lookup(&opctx, disk_selector)?; - - nexus.disk_finalize_import(&opctx, &disk_lookup, ¶ms).await?; - - Ok(HttpResponseUpdatedNoContent()) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -// Instances - -/// List instances -#[endpoint { - method = GET, - path = "/v1/instances", - tags = ["instances"], -}] -async fn instance_list( - rqctx: RequestContext, - query_params: Query>, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let pag_params = data_page_params_for(&rqctx, &query)?; - let scan_params = ScanByNameOrId::from_query(&query)?; - let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let project_lookup = - nexus.project_lookup(&opctx, scan_params.selector.clone())?; - let instances = nexus - .instance_list(&opctx, &project_lookup, &paginated_by) - .await? - .into_iter() - .map(|i| i.into()) - .collect(); - Ok(HttpResponseOk(ScanByNameOrId::results_page( - &query, - instances, - &marker_for_name_or_id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -/// Create instance -#[endpoint { - method = POST, - path = "/v1/instances", - tags = ["instances"], -}] -async fn instance_create( - rqctx: RequestContext, - query_params: Query, - new_instance: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let nexus = &apictx.context.nexus; - let project_selector = query_params.into_inner(); - let new_instance_params = &new_instance.into_inner(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let project_lookup = nexus.project_lookup(&opctx, project_selector)?; - let instance = nexus - .project_create_instance( - &opctx, - &project_lookup, - &new_instance_params, - ) - .await?; - Ok(HttpResponseCreated(instance.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -/// Fetch instance -#[endpoint { - method = GET, - path = "/v1/instances/{instance}", - tags = ["instances"], -}] -async fn instance_view( - rqctx: RequestContext, - query_params: Query, - path_params: Path, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let instance_selector = params::InstanceSelector { project: query.project, instance: path.instance, }; - let instance_lookup = - nexus.instance_lookup(&opctx, instance_selector)?; - let (.., authz_instance) = - instance_lookup.lookup_for(authz::Action::Read).await?; - let instance_and_vmm = nexus - .datastore() - .instance_fetch_with_vmm(&opctx, &authz_instance) - .await?; - Ok(HttpResponseOk(instance_and_vmm.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -/// Delete instance -#[endpoint { - method = DELETE, - path = "/v1/instances/{instance}", - tags = ["instances"], -}] -async fn instance_delete( - rqctx: RequestContext, - query_params: Query, - path_params: Path, -) -> Result { - let apictx = rqctx.context(); - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let instance_selector = params::InstanceSelector { - project: query.project, - instance: path.instance, - }; - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let instance_lookup = - nexus.instance_lookup(&opctx, instance_selector)?; - nexus.project_destroy_instance(&opctx, &instance_lookup).await?; - Ok(HttpResponseDeleted()) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -/// Reboot an instance -#[endpoint { - method = POST, - path = "/v1/instances/{instance}/reboot", - tags = ["instances"], -}] -async fn instance_reboot( - rqctx: RequestContext, - query_params: Query, - path_params: Path, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let instance_selector = params::InstanceSelector { - project: query.project, - instance: path.instance, - }; - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let instance_lookup = - nexus.instance_lookup(&opctx, instance_selector)?; - let instance = nexus.instance_reboot(&opctx, &instance_lookup).await?; - Ok(HttpResponseAccepted(instance.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -/// Boot instance -#[endpoint { - method = POST, - path = "/v1/instances/{instance}/start", - tags = ["instances"], -}] -async fn instance_start( - rqctx: RequestContext, - query_params: Query, - path_params: Path, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let instance_selector = params::InstanceSelector { - project: query.project, - instance: path.instance, - }; - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let instance_lookup = - nexus.instance_lookup(&opctx, instance_selector)?; - let instance = nexus.instance_start(&opctx, &instance_lookup).await?; - Ok(HttpResponseAccepted(instance.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -/// Stop instance -#[endpoint { - method = POST, - path = "/v1/instances/{instance}/stop", - tags = ["instances"], -}] -async fn instance_stop( - rqctx: RequestContext, - query_params: Query, - path_params: Path, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let instance_selector = params::InstanceSelector { - project: query.project, - instance: path.instance, - }; - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let instance_lookup = - nexus.instance_lookup(&opctx, instance_selector)?; - let instance = nexus.instance_stop(&opctx, &instance_lookup).await?; - Ok(HttpResponseAccepted(instance.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let instance_lookup = + nexus.instance_lookup(&opctx, instance_selector)?; + let instance = + nexus.instance_start(&opctx, &instance_lookup).await?; + Ok(HttpResponseAccepted(instance.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Fetch instance serial console -#[endpoint { - method = GET, - path = "/v1/instances/{instance}/serial-console", - tags = ["instances"], -}] -async fn instance_serial_console( - rqctx: RequestContext, - path_params: Path, - query_params: Query, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + /// Stop instance + #[endpoint { + method = POST, + path = "/v1/instances/{instance}/stop", + tags = ["instances"], + }] + async fn instance_stop( + rqctx: RequestContext, + query_params: Query, + path_params: Path, + ) -> Result, HttpError> { + let apictx = rqctx.context(); let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); let instance_selector = params::InstanceSelector { - project: query.project.clone(), + project: query.project, instance: path.instance, }; - let instance_lookup = - nexus.instance_lookup(&opctx, instance_selector)?; - let data = nexus - .instance_serial_console_data(&opctx, &instance_lookup, &query) - .await?; - Ok(HttpResponseOk(data)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let instance_lookup = + nexus.instance_lookup(&opctx, instance_selector)?; + let instance = + nexus.instance_stop(&opctx, &instance_lookup).await?; + Ok(HttpResponseAccepted(instance.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Stream instance serial console -#[channel { - protocol = WEBSOCKETS, - path = "/v1/instances/{instance}/serial-console/stream", - tags = ["instances"], -}] -async fn instance_serial_console_stream( - rqctx: RequestContext, - path_params: Path, - query_params: Query, - conn: WebsocketConnection, -) -> WebsocketChannelResult { - let apictx = rqctx.context(); - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let instance_selector = params::InstanceSelector { - project: query.project.clone(), - instance: path.instance, - }; - let mut client_stream = WebSocketStream::from_raw_socket( - conn.into_inner(), - WebSocketRole::Server, - None, - ) - .await; - match nexus.instance_lookup(&opctx, instance_selector) { - Ok(instance_lookup) => { - nexus - .instance_serial_console_stream( - &opctx, - client_stream, - &instance_lookup, - &query, - ) + /// Fetch instance serial console + #[endpoint { + method = GET, + path = "/v1/instances/{instance}/serial-console", + tags = ["instances"], + }] + async fn instance_serial_console( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError> + { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let instance_selector = params::InstanceSelector { + project: query.project.clone(), + instance: path.instance, + }; + let instance_lookup = + nexus.instance_lookup(&opctx, instance_selector)?; + let data = nexus + .instance_serial_console_data(&opctx, &instance_lookup, &query) .await?; - Ok(()) - } - Err(e) => { - let _ = client_stream - .close(Some(CloseFrame { - code: CloseCode::Error, - reason: e.to_string().into(), - })) - .await - .is_ok(); - Err(e.into()) - } + Ok(HttpResponseOk(data)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await } -} -/// List SSH public keys for instance -/// -/// List SSH public keys injected via cloud-init during instance creation. Note -/// that this list is a snapshot in time and will not reflect updates made after -/// the instance is created. -#[endpoint { - method = GET, - path = "/v1/instances/{instance}/ssh-public-keys", - tags = ["instances"], -}] -async fn instance_ssh_public_key_list( - rqctx: RequestContext, - path_params: Path, - query_params: Query>, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { + /// Stream instance serial console + #[channel { + protocol = WEBSOCKETS, + path = "/v1/instances/{instance}/serial-console/stream", + tags = ["instances"], + }] + async fn instance_serial_console_stream( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + conn: WebsocketConnection, + ) -> WebsocketChannelResult { + let apictx = rqctx.context(); let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); - let pag_params = data_page_params_for(&rqctx, &query)?; - let scan_params = ScanByNameOrId::from_query(&query)?; - let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let instance_selector = params::InstanceSelector { - project: scan_params.selector.project.clone(), + project: query.project.clone(), instance: path.instance, }; - let instance_lookup = - nexus.instance_lookup(&opctx, instance_selector)?; - let ssh_keys = nexus - .instance_ssh_keys_list(&opctx, &instance_lookup, &paginated_by) - .await? - .into_iter() - .map(|k| k.into()) - .collect(); - Ok(HttpResponseOk(ScanByNameOrId::results_page( - &query, - ssh_keys, - &marker_for_name_or_id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + let mut client_stream = WebSocketStream::from_raw_socket( + conn.into_inner(), + WebSocketRole::Server, + None, + ) + .await; + match nexus.instance_lookup(&opctx, instance_selector) { + Ok(instance_lookup) => { + nexus + .instance_serial_console_stream( + &opctx, + client_stream, + &instance_lookup, + &query, + ) + .await?; + Ok(()) + } + Err(e) => { + let _ = client_stream + .close(Some(CloseFrame { + code: CloseCode::Error, + reason: e.to_string().into(), + })) + .await + .is_ok(); + Err(e.into()) + } + } + } -/// List disks for instance -#[endpoint { - method = GET, - path = "/v1/instances/{instance}/disks", - tags = ["instances"], -}] -async fn instance_disk_list( - rqctx: RequestContext, - query_params: Query>, - path_params: Path, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let pag_params = data_page_params_for(&rqctx, &query)?; - let scan_params = ScanByNameOrId::from_query(&query)?; - let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let instance_selector = params::InstanceSelector { - project: scan_params.selector.project.clone(), - instance: path.instance, + /// List SSH public keys for instance + /// + /// List SSH public keys injected via cloud-init during instance creation. Note + /// that this list is a snapshot in time and will not reflect updates made after + /// the instance is created. + #[endpoint { + method = GET, + path = "/v1/instances/{instance}/ssh-public-keys", + tags = ["instances"], + }] + async fn instance_ssh_public_key_list( + rqctx: RequestContext, + path_params: Path, + query_params: Query< + PaginatedByNameOrId, + >, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let pag_params = data_page_params_for(&rqctx, &query)?; + let scan_params = ScanByNameOrId::from_query(&query)?; + let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let instance_selector = params::InstanceSelector { + project: scan_params.selector.project.clone(), + instance: path.instance, + }; + let instance_lookup = + nexus.instance_lookup(&opctx, instance_selector)?; + let ssh_keys = nexus + .instance_ssh_keys_list(&opctx, &instance_lookup, &paginated_by) + .await? + .into_iter() + .map(|k| k.into()) + .collect(); + Ok(HttpResponseOk(ScanByNameOrId::results_page( + &query, + ssh_keys, + &marker_for_name_or_id, + )?)) }; - let instance_lookup = - nexus.instance_lookup(&opctx, instance_selector)?; - let disks = nexus - .instance_list_disks(&opctx, &instance_lookup, &paginated_by) - .await? - .into_iter() - .map(|d| d.into()) - .collect(); - Ok(HttpResponseOk(ScanByNameOrId::results_page( - &query, - disks, - &marker_for_name_or_id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Attach disk to instance -#[endpoint { - method = POST, - path = "/v1/instances/{instance}/disks/attach", - tags = ["instances"], -}] -async fn instance_disk_attach( - rqctx: RequestContext, - path_params: Path, - query_params: Query, - disk_to_attach: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let disk = disk_to_attach.into_inner().disk; - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let instance_selector = params::InstanceSelector { - project: query.project, - instance: path.instance, + /// List disks for instance + #[endpoint { + method = GET, + path = "/v1/instances/{instance}/disks", + tags = ["instances"], + }] + async fn instance_disk_list( + rqctx: RequestContext, + query_params: Query< + PaginatedByNameOrId, + >, + path_params: Path, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let pag_params = data_page_params_for(&rqctx, &query)?; + let scan_params = ScanByNameOrId::from_query(&query)?; + let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let instance_selector = params::InstanceSelector { + project: scan_params.selector.project.clone(), + instance: path.instance, + }; + let instance_lookup = + nexus.instance_lookup(&opctx, instance_selector)?; + let disks = nexus + .instance_list_disks(&opctx, &instance_lookup, &paginated_by) + .await? + .into_iter() + .map(|d| d.into()) + .collect(); + Ok(HttpResponseOk(ScanByNameOrId::results_page( + &query, + disks, + &marker_for_name_or_id, + )?)) }; - let instance_lookup = - nexus.instance_lookup(&opctx, instance_selector)?; - let disk = - nexus.instance_attach_disk(&opctx, &instance_lookup, disk).await?; - Ok(HttpResponseAccepted(disk.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Detach disk from instance -#[endpoint { - method = POST, - path = "/v1/instances/{instance}/disks/detach", - tags = ["instances"], -}] -async fn instance_disk_detach( - rqctx: RequestContext, - path_params: Path, - query_params: Query, - disk_to_detach: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + /// Attach disk to instance + #[endpoint { + method = POST, + path = "/v1/instances/{instance}/disks/attach", + tags = ["instances"], + }] + async fn instance_disk_attach( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + disk_to_attach: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); let nexus = &apictx.context.nexus; let path = path_params.into_inner(); let query = query_params.into_inner(); - let disk = disk_to_detach.into_inner().disk; - let instance_selector = params::InstanceSelector { - project: query.project, - instance: path.instance, + let disk = disk_to_attach.into_inner().disk; + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let instance_selector = params::InstanceSelector { + project: query.project, + instance: path.instance, + }; + let instance_lookup = + nexus.instance_lookup(&opctx, instance_selector)?; + let disk = nexus + .instance_attach_disk(&opctx, &instance_lookup, disk) + .await?; + Ok(HttpResponseAccepted(disk.into())) }; - let instance_lookup = - nexus.instance_lookup(&opctx, instance_selector)?; - let disk = - nexus.instance_detach_disk(&opctx, &instance_lookup, disk).await?; - Ok(HttpResponseAccepted(disk.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -// Certificates + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// List certificates for external endpoints -/// -/// Returns a list of TLS certificates used for the external API (for the -/// current Silo). These are sorted by creation date, with the most recent -/// certificates appearing first. -#[endpoint { - method = GET, - path = "/v1/certificates", - tags = ["silos"], -}] -async fn certificate_list( - rqctx: RequestContext, - query_params: Query, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let pag_params = data_page_params_for(&rqctx, &query)?; - let scan_params = ScanByNameOrId::from_query(&query)?; - let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let certs = nexus - .certificates_list(&opctx, &paginated_by) - .await? - .into_iter() - .map(|d| d.try_into()) - .collect::, Error>>()?; - Ok(HttpResponseOk(ScanByNameOrId::results_page( - &query, - certs, - &marker_for_name_or_id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Detach disk from instance + #[endpoint { + method = POST, + path = "/v1/instances/{instance}/disks/detach", + tags = ["instances"], + }] + async fn instance_disk_detach( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + disk_to_detach: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let disk = disk_to_detach.into_inner().disk; + let instance_selector = params::InstanceSelector { + project: query.project, + instance: path.instance, + }; + let instance_lookup = + nexus.instance_lookup(&opctx, instance_selector)?; + let disk = nexus + .instance_detach_disk(&opctx, &instance_lookup, disk) + .await?; + Ok(HttpResponseAccepted(disk.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Create new system-wide x.509 certificate -/// -/// This certificate is automatically used by the Oxide Control plane to serve -/// external connections. -#[endpoint { - method = POST, - path = "/v1/certificates", - tags = ["silos"] -}] -async fn certificate_create( - rqctx: RequestContext, - new_cert: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let new_cert_params = new_cert.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let cert = nexus.certificate_create(&opctx, new_cert_params).await?; - Ok(HttpResponseCreated(cert.try_into()?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + // Certificates + + /// List certificates for external endpoints + /// + /// Returns a list of TLS certificates used for the external API (for the + /// current Silo). These are sorted by creation date, with the most recent + /// certificates appearing first. + #[endpoint { + method = GET, + path = "/v1/certificates", + tags = ["silos"], + }] + async fn certificate_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let pag_params = data_page_params_for(&rqctx, &query)?; + let scan_params = ScanByNameOrId::from_query(&query)?; + let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let certs = nexus + .certificates_list(&opctx, &paginated_by) + .await? + .into_iter() + .map(|d| d.try_into()) + .collect::, Error>>()?; + Ok(HttpResponseOk(ScanByNameOrId::results_page( + &query, + certs, + &marker_for_name_or_id, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Path parameters for Certificate requests -#[derive(Deserialize, JsonSchema)] -struct CertificatePathParam { - certificate: NameOrId, -} + /// Create new system-wide x.509 certificate + /// + /// This certificate is automatically used by the Oxide Control plane to serve + /// external connections. + #[endpoint { + method = POST, + path = "/v1/certificates", + tags = ["silos"] + }] + async fn certificate_create( + rqctx: RequestContext, + new_cert: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let new_cert_params = new_cert.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let cert = + nexus.certificate_create(&opctx, new_cert_params).await?; + Ok(HttpResponseCreated(cert.try_into()?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Fetch certificate -/// -/// Returns the details of a specific certificate -#[endpoint { - method = GET, - path = "/v1/certificates/{certificate}", - tags = ["silos"], -}] -async fn certificate_view( - rqctx: RequestContext, - path_params: Path, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let (.., cert) = - nexus.certificate_lookup(&opctx, &path.certificate).fetch().await?; - Ok(HttpResponseOk(cert.try_into()?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Path parameters for Certificate requests + #[derive(Deserialize, JsonSchema)] + struct CertificatePathParam { + certificate: NameOrId, + } -/// Delete certificate -/// -/// Permanently delete a certificate. This operation cannot be undone. -#[endpoint { - method = DELETE, - path = "/v1/certificates/{certificate}", - tags = ["silos"], -}] -async fn certificate_delete( - rqctx: RequestContext, - path_params: Path, -) -> Result { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - nexus - .certificate_delete( - &opctx, - nexus.certificate_lookup(&opctx, &path.certificate), - ) - .await?; - Ok(HttpResponseDeleted()) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Fetch certificate + /// + /// Returns the details of a specific certificate + #[endpoint { + method = GET, + path = "/v1/certificates/{certificate}", + tags = ["silos"], + }] + async fn certificate_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let (.., cert) = nexus + .certificate_lookup(&opctx, &path.certificate) + .fetch() + .await?; + Ok(HttpResponseOk(cert.try_into()?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Create address lot -#[endpoint { - method = POST, - path = "/v1/system/networking/address-lot", - tags = ["system/networking"], -}] -async fn networking_address_lot_create( - rqctx: RequestContext, - new_address_lot: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let params = new_address_lot.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let result = nexus.address_lot_create(&opctx, params).await?; - - let lot: AddressLot = result.lot.into(); - let blocks: Vec = - result.blocks.iter().map(|b| b.clone().into()).collect(); - - Ok(HttpResponseCreated(AddressLotCreateResponse { lot, blocks })) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Delete certificate + /// + /// Permanently delete a certificate. This operation cannot be undone. + #[endpoint { + method = DELETE, + path = "/v1/certificates/{certificate}", + tags = ["silos"], + }] + async fn certificate_delete( + rqctx: RequestContext, + path_params: Path, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + nexus + .certificate_delete( + &opctx, + nexus.certificate_lookup(&opctx, &path.certificate), + ) + .await?; + Ok(HttpResponseDeleted()) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Delete address lot -#[endpoint { - method = DELETE, - path = "/v1/system/networking/address-lot/{address_lot}", - tags = ["system/networking"], -}] -async fn networking_address_lot_delete( - rqctx: RequestContext, - path_params: Path, -) -> Result { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let address_lot_lookup = - nexus.address_lot_lookup(&opctx, path.address_lot)?; - nexus.address_lot_delete(&opctx, &address_lot_lookup).await?; - Ok(HttpResponseDeleted()) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Create address lot + #[endpoint { + method = POST, + path = "/v1/system/networking/address-lot", + tags = ["system/networking"], + }] + async fn networking_address_lot_create( + rqctx: RequestContext, + new_address_lot: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let params = new_address_lot.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let result = nexus.address_lot_create(&opctx, params).await?; + + let lot: AddressLot = result.lot.into(); + let blocks: Vec = + result.blocks.iter().map(|b| b.clone().into()).collect(); + + Ok(HttpResponseCreated(AddressLotCreateResponse { lot, blocks })) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// List address lots -#[endpoint { - method = GET, - path = "/v1/system/networking/address-lot", - tags = ["system/networking"], -}] -async fn networking_address_lot_list( - rqctx: RequestContext, - query_params: Query, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let pag_params = data_page_params_for(&rqctx, &query)?; - let scan_params = ScanByNameOrId::from_query(&query)?; - let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let lots = nexus - .address_lot_list(&opctx, &paginated_by) - .await? - .into_iter() - .map(|p| p.into()) - .collect(); - - Ok(HttpResponseOk(ScanByNameOrId::results_page( - &query, - lots, - &marker_for_name_or_id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Delete address lot + #[endpoint { + method = DELETE, + path = "/v1/system/networking/address-lot/{address_lot}", + tags = ["system/networking"], + }] + async fn networking_address_lot_delete( + rqctx: RequestContext, + path_params: Path, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let address_lot_lookup = + nexus.address_lot_lookup(&opctx, path.address_lot)?; + nexus.address_lot_delete(&opctx, &address_lot_lookup).await?; + Ok(HttpResponseDeleted()) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// List blocks in address lot -#[endpoint { - method = GET, - path = "/v1/system/networking/address-lot/{address_lot}/blocks", - tags = ["system/networking"], -}] -async fn networking_address_lot_block_list( - rqctx: RequestContext, - path_params: Path, - query_params: Query, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let path = path_params.into_inner(); - let pagparams = data_page_params_for(&rqctx, &query)?; - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let address_lot_lookup = - nexus.address_lot_lookup(&opctx, path.address_lot)?; - let blocks = nexus - .address_lot_block_list(&opctx, &address_lot_lookup, &pagparams) - .await? - .into_iter() - .map(|p| p.into()) - .collect(); - - Ok(HttpResponseOk(ScanById::results_page( - &query, - blocks, - &|_, x: &AddressLotBlock| x.id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// List address lots + #[endpoint { + method = GET, + path = "/v1/system/networking/address-lot", + tags = ["system/networking"], + }] + async fn networking_address_lot_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let pag_params = data_page_params_for(&rqctx, &query)?; + let scan_params = ScanByNameOrId::from_query(&query)?; + let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let lots = nexus + .address_lot_list(&opctx, &paginated_by) + .await? + .into_iter() + .map(|p| p.into()) + .collect(); + + Ok(HttpResponseOk(ScanByNameOrId::results_page( + &query, + lots, + &marker_for_name_or_id, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Create loopback address -#[endpoint { - method = POST, - path = "/v1/system/networking/loopback-address", - tags = ["system/networking"], -}] -async fn networking_loopback_address_create( - rqctx: RequestContext, - new_loopback_address: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let params = new_loopback_address.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let result = nexus.loopback_address_create(&opctx, params).await?; + /// List blocks in address lot + #[endpoint { + method = GET, + path = "/v1/system/networking/address-lot/{address_lot}/blocks", + tags = ["system/networking"], + }] + async fn networking_address_lot_block_list( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let path = path_params.into_inner(); + let pagparams = data_page_params_for(&rqctx, &query)?; + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let address_lot_lookup = + nexus.address_lot_lookup(&opctx, path.address_lot)?; + let blocks = nexus + .address_lot_block_list(&opctx, &address_lot_lookup, &pagparams) + .await? + .into_iter() + .map(|p| p.into()) + .collect(); + + Ok(HttpResponseOk(ScanById::results_page( + &query, + blocks, + &|_, x: &AddressLotBlock| x.id, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } - let addr: LoopbackAddress = result.into(); + /// Create loopback address + #[endpoint { + method = POST, + path = "/v1/system/networking/loopback-address", + tags = ["system/networking"], + }] + async fn networking_loopback_address_create( + rqctx: RequestContext, + new_loopback_address: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let params = new_loopback_address.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let result = nexus.loopback_address_create(&opctx, params).await?; + + let addr: LoopbackAddress = result.into(); + + Ok(HttpResponseCreated(addr)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } - Ok(HttpResponseCreated(addr)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + #[derive(Serialize, Deserialize, JsonSchema)] + pub struct LoopbackAddressPath { + /// The rack to use when selecting the loopback address. + pub rack_id: Uuid, -#[derive(Serialize, Deserialize, JsonSchema)] -pub struct LoopbackAddressPath { - /// The rack to use when selecting the loopback address. - pub rack_id: Uuid, + /// The switch location to use when selecting the loopback address. + pub switch_location: Name, - /// The switch location to use when selecting the loopback address. - pub switch_location: Name, + /// The IP address and subnet mask to use when selecting the loopback + /// address. + pub address: IpAddr, - /// The IP address and subnet mask to use when selecting the loopback - /// address. - pub address: IpAddr, + /// The IP address and subnet mask to use when selecting the loopback + /// address. + pub subnet_mask: u8, + } - /// The IP address and subnet mask to use when selecting the loopback - /// address. - pub subnet_mask: u8, -} + /// Delete loopback address + #[endpoint { + method = DELETE, + path = "/v1/system/networking/loopback-address/{rack_id}/{switch_location}/{address}/{subnet_mask}", + tags = ["system/networking"], + }] + async fn networking_loopback_address_delete( + rqctx: RequestContext, + path: Path, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let path = path.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let addr = match IpNetwork::new(path.address, path.subnet_mask) { + Ok(addr) => Ok(addr), + Err(_) => Err(HttpError::for_bad_request( + None, + "invalid ip address".into(), + )), + }?; + nexus + .loopback_address_delete( + &opctx, + path.rack_id, + path.switch_location.clone(), + addr.into(), + ) + .await?; + Ok(HttpResponseDeleted()) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Delete loopback address -#[endpoint { - method = DELETE, - path = "/v1/system/networking/loopback-address/{rack_id}/{switch_location}/{address}/{subnet_mask}", - tags = ["system/networking"], -}] -async fn networking_loopback_address_delete( - rqctx: RequestContext, - path: Path, -) -> Result { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let path = path.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let addr = match IpNetwork::new(path.address, path.subnet_mask) { - Ok(addr) => Ok(addr), - Err(_) => Err(HttpError::for_bad_request( - None, - "invalid ip address".into(), - )), - }?; - nexus - .loopback_address_delete( - &opctx, - path.rack_id, - path.switch_location.clone(), - addr.into(), - ) - .await?; - Ok(HttpResponseDeleted()) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// List loopback addresses + #[endpoint { + method = GET, + path = "/v1/system/networking/loopback-address", + tags = ["system/networking"], + }] + async fn networking_loopback_address_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let pagparams = data_page_params_for(&rqctx, &query)?; + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let addrs = nexus + .loopback_address_list(&opctx, &pagparams) + .await? + .into_iter() + .map(|p| p.into()) + .collect(); + + Ok(HttpResponseOk(ScanById::results_page( + &query, + addrs, + &|_, x: &LoopbackAddress| x.id, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// List loopback addresses -#[endpoint { - method = GET, - path = "/v1/system/networking/loopback-address", - tags = ["system/networking"], -}] -async fn networking_loopback_address_list( - rqctx: RequestContext, - query_params: Query, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let pagparams = data_page_params_for(&rqctx, &query)?; - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let addrs = nexus - .loopback_address_list(&opctx, &pagparams) - .await? - .into_iter() - .map(|p| p.into()) - .collect(); - - Ok(HttpResponseOk(ScanById::results_page( - &query, - addrs, - &|_, x: &LoopbackAddress| x.id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Create switch port settings + #[endpoint { + method = POST, + path = "/v1/system/networking/switch-port-settings", + tags = ["system/networking"], + }] + async fn networking_switch_port_settings_create( + rqctx: RequestContext, + new_settings: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let params = new_settings.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let result = + nexus.switch_port_settings_post(&opctx, params).await?; + + let settings: SwitchPortSettingsView = result.into(); + Ok(HttpResponseCreated(settings)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Create switch port settings -#[endpoint { - method = POST, - path = "/v1/system/networking/switch-port-settings", - tags = ["system/networking"], -}] -async fn networking_switch_port_settings_create( - rqctx: RequestContext, - new_settings: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let params = new_settings.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let result = nexus.switch_port_settings_post(&opctx, params).await?; - - let settings: SwitchPortSettingsView = result.into(); - Ok(HttpResponseCreated(settings)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Delete switch port settings + #[endpoint { + method = DELETE, + path = "/v1/system/networking/switch-port-settings", + tags = ["system/networking"], + }] + async fn networking_switch_port_settings_delete( + rqctx: RequestContext, + query_params: Query, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let selector = query_params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + nexus.switch_port_settings_delete(&opctx, &selector).await?; + Ok(HttpResponseDeleted()) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Delete switch port settings -#[endpoint { - method = DELETE, - path = "/v1/system/networking/switch-port-settings", - tags = ["system/networking"], -}] -async fn networking_switch_port_settings_delete( - rqctx: RequestContext, - query_params: Query, -) -> Result { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let selector = query_params.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - nexus.switch_port_settings_delete(&opctx, &selector).await?; - Ok(HttpResponseDeleted()) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// List switch port settings + #[endpoint { + method = GET, + path = "/v1/system/networking/switch-port-settings", + tags = ["system/networking"], + }] + async fn networking_switch_port_settings_list( + rqctx: RequestContext, + query_params: Query< + PaginatedByNameOrId, + >, + ) -> Result>, HttpError> + { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let pag_params = data_page_params_for(&rqctx, &query)?; + let scan_params = ScanByNameOrId::from_query(&query)?; + let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let settings = nexus + .switch_port_settings_list(&opctx, &paginated_by) + .await? + .into_iter() + .map(|p| p.into()) + .collect(); + + Ok(HttpResponseOk(ScanByNameOrId::results_page( + &query, + settings, + &marker_for_name_or_id, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// List switch port settings -#[endpoint { - method = GET, - path = "/v1/system/networking/switch-port-settings", - tags = ["system/networking"], -}] -async fn networking_switch_port_settings_list( - rqctx: RequestContext, - query_params: Query< - PaginatedByNameOrId, - >, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let pag_params = data_page_params_for(&rqctx, &query)?; - let scan_params = ScanByNameOrId::from_query(&query)?; - let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let settings = nexus - .switch_port_settings_list(&opctx, &paginated_by) - .await? - .into_iter() - .map(|p| p.into()) - .collect(); - - Ok(HttpResponseOk(ScanByNameOrId::results_page( - &query, - settings, - &marker_for_name_or_id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Get information about switch port + #[endpoint { + method = GET, + path = "/v1/system/networking/switch-port-settings/{port}", + tags = ["system/networking"], + }] + async fn networking_switch_port_settings_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let query = path_params.into_inner().port; + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let settings = + nexus.switch_port_settings_get(&opctx, &query).await?; + Ok(HttpResponseOk(settings.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Get information about switch port -#[endpoint { - method = GET, - path = "/v1/system/networking/switch-port-settings/{port}", - tags = ["system/networking"], -}] -async fn networking_switch_port_settings_view( - rqctx: RequestContext, - path_params: Path, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let query = path_params.into_inner().port; - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let settings = nexus.switch_port_settings_get(&opctx, &query).await?; - Ok(HttpResponseOk(settings.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// List switch ports + #[endpoint { + method = GET, + path = "/v1/system/hardware/switch-port", + tags = ["system/hardware"], + }] + async fn networking_switch_port_list( + rqctx: RequestContext, + query_params: Query>, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let pagparams = data_page_params_for(&rqctx, &query)?; + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let addrs = nexus + .switch_port_list(&opctx, &pagparams) + .await? + .into_iter() + .map(|p| p.into()) + .collect(); + + Ok(HttpResponseOk(ScanById::results_page( + &query, + addrs, + &|_, x: &SwitchPort| x.id, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// List switch ports -#[endpoint { - method = GET, - path = "/v1/system/hardware/switch-port", - tags = ["system/hardware"], -}] -async fn networking_switch_port_list( - rqctx: RequestContext, - query_params: Query>, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let pagparams = data_page_params_for(&rqctx, &query)?; - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let addrs = nexus - .switch_port_list(&opctx, &pagparams) - .await? - .into_iter() - .map(|p| p.into()) - .collect(); - - Ok(HttpResponseOk(ScanById::results_page( - &query, - addrs, - &|_, x: &SwitchPort| x.id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Get switch port status + #[endpoint { + method = GET, + path = "/v1/system/hardware/switch-port/{port}/status", + tags = ["system/hardware"], + }] + async fn networking_switch_port_status( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let path = path_params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + Ok(HttpResponseOk( + nexus + .switch_port_status( + &opctx, + query.switch_location, + path.port, + ) + .await?, + )) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Get switch port status -#[endpoint { - method = GET, - path = "/v1/system/hardware/switch-port/{port}/status", - tags = ["system/hardware"], -}] -async fn networking_switch_port_status( - rqctx: RequestContext, - path_params: Path, - query_params: Query, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let path = path_params.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - Ok(HttpResponseOk( + /// Apply switch port settings + #[endpoint { + method = POST, + path = "/v1/system/hardware/switch-port/{port}/settings", + tags = ["system/hardware"], + }] + async fn networking_switch_port_apply_settings( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + settings_body: TypedBody, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let port = path_params.into_inner().port; + let query = query_params.into_inner(); + let settings = settings_body.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; nexus - .switch_port_status(&opctx, query.switch_location, path.port) - .await?, - )) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -/// Apply switch port settings -#[endpoint { - method = POST, - path = "/v1/system/hardware/switch-port/{port}/settings", - tags = ["system/hardware"], -}] -async fn networking_switch_port_apply_settings( - rqctx: RequestContext, - path_params: Path, - query_params: Query, - settings_body: TypedBody, -) -> Result { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let port = path_params.into_inner().port; - let query = query_params.into_inner(); - let settings = settings_body.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - nexus - .switch_port_apply_settings(&opctx, &port, &query, &settings) - .await?; - Ok(HttpResponseUpdatedNoContent {}) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -/// Clear switch port settings -#[endpoint { - method = DELETE, - path = "/v1/system/hardware/switch-port/{port}/settings", - tags = ["system/hardware"], -}] -async fn networking_switch_port_clear_settings( - rqctx: RequestContext, - path_params: Path, - query_params: Query, -) -> Result { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let port = path_params.into_inner().port; - let query = query_params.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - nexus.switch_port_clear_settings(&opctx, &port, &query).await?; - Ok(HttpResponseUpdatedNoContent {}) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + .switch_port_apply_settings(&opctx, &port, &query, &settings) + .await?; + Ok(HttpResponseUpdatedNoContent {}) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Create new BGP configuration -#[endpoint { - method = POST, - path = "/v1/system/networking/bgp", - tags = ["system/networking"], -}] -async fn networking_bgp_config_create( - rqctx: RequestContext, - config: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let config = config.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let result = nexus.bgp_config_set(&opctx, &config).await?; - Ok(HttpResponseCreated::(result.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Clear switch port settings + #[endpoint { + method = DELETE, + path = "/v1/system/hardware/switch-port/{port}/settings", + tags = ["system/hardware"], + }] + async fn networking_switch_port_clear_settings( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let port = path_params.into_inner().port; + let query = query_params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + nexus.switch_port_clear_settings(&opctx, &port, &query).await?; + Ok(HttpResponseUpdatedNoContent {}) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// List BGP configurations -#[endpoint { - method = GET, - path = "/v1/system/networking/bgp", - tags = ["system/networking"], -}] -async fn networking_bgp_config_list( - rqctx: RequestContext, - query_params: Query>, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let pag_params = data_page_params_for(&rqctx, &query)?; - let scan_params = ScanByNameOrId::from_query(&query)?; - let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let configs = nexus - .bgp_config_list(&opctx, &paginated_by) - .await? - .into_iter() - .map(|p| p.into()) - .collect(); - - Ok(HttpResponseOk(ScanByNameOrId::results_page( - &query, - configs, - &marker_for_name_or_id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Create new BGP configuration + #[endpoint { + method = POST, + path = "/v1/system/networking/bgp", + tags = ["system/networking"], + }] + async fn networking_bgp_config_create( + rqctx: RequestContext, + config: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let config = config.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let result = nexus.bgp_config_set(&opctx, &config).await?; + Ok(HttpResponseCreated::(result.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -//TODO pagination? the normal by-name/by-id stuff does not work here -/// Get BGP peer status -#[endpoint { - method = GET, - path = "/v1/system/networking/bgp-status", - tags = ["system/networking"], -}] -async fn networking_bgp_status( - rqctx: RequestContext, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let handler = async { - let nexus = &apictx.context.nexus; - let result = nexus.bgp_peer_status(&opctx).await?; - Ok(HttpResponseOk(result)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// List BGP configurations + #[endpoint { + method = GET, + path = "/v1/system/networking/bgp", + tags = ["system/networking"], + }] + async fn networking_bgp_config_list( + rqctx: RequestContext, + query_params: Query>, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let pag_params = data_page_params_for(&rqctx, &query)?; + let scan_params = ScanByNameOrId::from_query(&query)?; + let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let configs = nexus + .bgp_config_list(&opctx, &paginated_by) + .await? + .into_iter() + .map(|p| p.into()) + .collect(); + + Ok(HttpResponseOk(ScanByNameOrId::results_page( + &query, + configs, + &marker_for_name_or_id, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Get BGP router message history -#[endpoint { - method = GET, - path = "/v1/system/networking/bgp-message-history", - tags = ["system/networking"], -}] -async fn networking_bgp_message_history( - rqctx: RequestContext, - query_params: Query, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let handler = async { - let nexus = &apictx.context.nexus; - let sel = query_params.into_inner(); - let result = nexus.bgp_message_history(&opctx, &sel).await?; - Ok(HttpResponseOk(AggregateBgpMessageHistory::new(result))) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + //TODO pagination? the normal by-name/by-id stuff does not work here + /// Get BGP peer status + #[endpoint { + method = GET, + path = "/v1/system/networking/bgp-status", + tags = ["system/networking"], + }] + async fn networking_bgp_status( + rqctx: RequestContext, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + let handler = async { + let nexus = &apictx.context.nexus; + let result = nexus.bgp_peer_status(&opctx).await?; + Ok(HttpResponseOk(result)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -//TODO pagination? the normal by-name/by-id stuff does not work here -/// Get imported IPv4 BGP routes -#[endpoint { - method = GET, - path = "/v1/system/networking/bgp-routes-ipv4", - tags = ["system/networking"], -}] -async fn networking_bgp_imported_routes_ipv4( - rqctx: RequestContext, - query_params: Query, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let handler = async { - let nexus = &apictx.context.nexus; - let sel = query_params.into_inner(); - let result = nexus.bgp_imported_routes_ipv4(&opctx, &sel).await?; - Ok(HttpResponseOk(result)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Get BGP router message history + #[endpoint { + method = GET, + path = "/v1/system/networking/bgp-message-history", + tags = ["system/networking"], + }] + async fn networking_bgp_message_history( + rqctx: RequestContext, + query_params: Query, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + let handler = async { + let nexus = &apictx.context.nexus; + let sel = query_params.into_inner(); + let result = nexus.bgp_message_history(&opctx, &sel).await?; + Ok(HttpResponseOk(AggregateBgpMessageHistory::new(result))) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Delete BGP configuration -#[endpoint { - method = DELETE, - path = "/v1/system/networking/bgp", - tags = ["system/networking"], -}] -async fn networking_bgp_config_delete( - rqctx: RequestContext, - sel: Query, -) -> Result { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let sel = sel.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - nexus.bgp_config_delete(&opctx, &sel).await?; - Ok(HttpResponseUpdatedNoContent {}) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + //TODO pagination? the normal by-name/by-id stuff does not work here + /// Get imported IPv4 BGP routes + #[endpoint { + method = GET, + path = "/v1/system/networking/bgp-routes-ipv4", + tags = ["system/networking"], + }] + async fn networking_bgp_imported_routes_ipv4( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + let handler = async { + let nexus = &apictx.context.nexus; + let sel = query_params.into_inner(); + let result = nexus.bgp_imported_routes_ipv4(&opctx, &sel).await?; + Ok(HttpResponseOk(result)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Update BGP announce set -/// -/// If the announce set exists, this endpoint replaces the existing announce -/// set with the one specified. -#[endpoint { - method = PUT, - path = "/v1/system/networking/bgp-announce", - tags = ["system/networking"], -}] -async fn networking_bgp_announce_set_update( - rqctx: RequestContext, - config: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let config = config.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let result = nexus.bgp_update_announce_set(&opctx, &config).await?; - Ok(HttpResponseCreated::(result.0.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Delete BGP configuration + #[endpoint { + method = DELETE, + path = "/v1/system/networking/bgp", + tags = ["system/networking"], + }] + async fn networking_bgp_config_delete( + rqctx: RequestContext, + sel: Query, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let sel = sel.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + nexus.bgp_config_delete(&opctx, &sel).await?; + Ok(HttpResponseUpdatedNoContent {}) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -//TODO pagination? the normal by-name/by-id stuff does not work here -/// Get originated routes for a BGP configuration -#[endpoint { - method = GET, - path = "/v1/system/networking/bgp-announce", - tags = ["system/networking"], -}] -async fn networking_bgp_announce_set_list( - rqctx: RequestContext, - query_params: Query, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let sel = query_params.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let result = nexus - .bgp_announce_list(&opctx, &sel) - .await? - .into_iter() - .map(|p| p.into()) - .collect(); - Ok(HttpResponseOk(result)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Update BGP announce set + /// + /// If the announce set exists, this endpoint replaces the existing announce + /// set with the one specified. + #[endpoint { + method = PUT, + path = "/v1/system/networking/bgp-announce", + tags = ["system/networking"], + }] + async fn networking_bgp_announce_set_update( + rqctx: RequestContext, + config: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let config = config.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let result = nexus.bgp_update_announce_set(&opctx, &config).await?; + Ok(HttpResponseCreated::(result.0.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Delete BGP announce set -#[endpoint { - method = DELETE, - path = "/v1/system/networking/bgp-announce", - tags = ["system/networking"], -}] -async fn networking_bgp_announce_set_delete( - rqctx: RequestContext, - selector: Query, -) -> Result { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let sel = selector.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - nexus.bgp_delete_announce_set(&opctx, &sel).await?; - Ok(HttpResponseUpdatedNoContent {}) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + //TODO pagination? the normal by-name/by-id stuff does not work here + /// Get originated routes for a BGP configuration + #[endpoint { + method = GET, + path = "/v1/system/networking/bgp-announce", + tags = ["system/networking"], + }] + async fn networking_bgp_announce_set_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let sel = query_params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let result = nexus + .bgp_announce_list(&opctx, &sel) + .await? + .into_iter() + .map(|p| p.into()) + .collect(); + Ok(HttpResponseOk(result)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Enable a BFD session -#[endpoint { - method = POST, - path = "/v1/system/networking/bfd-enable", - tags = ["system/networking"], -}] -async fn networking_bfd_enable( - rqctx: RequestContext, - session: TypedBody, -) -> Result { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; - nexus.bfd_enable(&opctx, session.into_inner()).await?; - Ok(HttpResponseUpdatedNoContent {}) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Delete BGP announce set + #[endpoint { + method = DELETE, + path = "/v1/system/networking/bgp-announce", + tags = ["system/networking"], + }] + async fn networking_bgp_announce_set_delete( + rqctx: RequestContext, + selector: Query, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let sel = selector.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + nexus.bgp_delete_announce_set(&opctx, &sel).await?; + Ok(HttpResponseUpdatedNoContent {}) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Disable a BFD session -#[endpoint { - method = POST, - path = "/v1/system/networking/bfd-disable", - tags = ["system/networking"], -}] -async fn networking_bfd_disable( - rqctx: RequestContext, - session: TypedBody, -) -> Result { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; - nexus.bfd_disable(&opctx, session.into_inner()).await?; - Ok(HttpResponseUpdatedNoContent {}) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Enable a BFD session + #[endpoint { + method = POST, + path = "/v1/system/networking/bfd-enable", + tags = ["system/networking"], + }] + async fn networking_bfd_enable( + rqctx: RequestContext, + session: TypedBody, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; + nexus.bfd_enable(&opctx, session.into_inner()).await?; + Ok(HttpResponseUpdatedNoContent {}) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Get BFD status -#[endpoint { - method = GET, - path = "/v1/system/networking/bfd-status", - tags = ["system/networking"], -}] -async fn networking_bfd_status( - rqctx: RequestContext, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; - let status = nexus.bfd_status(&opctx).await?; - Ok(HttpResponseOk(status)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Disable a BFD session + #[endpoint { + method = POST, + path = "/v1/system/networking/bfd-disable", + tags = ["system/networking"], + }] + async fn networking_bfd_disable( + rqctx: RequestContext, + session: TypedBody, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; + nexus.bfd_disable(&opctx, session.into_inner()).await?; + Ok(HttpResponseUpdatedNoContent {}) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Get user-facing services IP allowlist -#[endpoint { - method = GET, - path = "/v1/system/networking/allow-list", - tags = ["system/networking"], -}] -async fn networking_allow_list_view( - rqctx: RequestContext, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - nexus - .allow_list_view(&opctx) - .await - .map(HttpResponseOk) - .map_err(HttpError::from) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Get BFD status + #[endpoint { + method = GET, + path = "/v1/system/networking/bfd-status", + tags = ["system/networking"], + }] + async fn networking_bfd_status( + rqctx: RequestContext, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; + let status = nexus.bfd_status(&opctx).await?; + Ok(HttpResponseOk(status)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Update user-facing services IP allowlist -#[endpoint { - method = PUT, - path = "/v1/system/networking/allow-list", - tags = ["system/networking"], -}] -async fn networking_allow_list_update( - rqctx: RequestContext, - params: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let server_kind = apictx.kind; - let params = params.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let remote_addr = rqctx.request.remote_addr().ip(); - nexus - .allow_list_upsert(&opctx, remote_addr, server_kind, params) - .await - .map(HttpResponseOk) - .map_err(HttpError::from) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Get user-facing services IP allowlist + #[endpoint { + method = GET, + path = "/v1/system/networking/allow-list", + tags = ["system/networking"], + }] + async fn networking_allow_list_view( + rqctx: RequestContext, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + nexus + .allow_list_view(&opctx) + .await + .map(HttpResponseOk) + .map_err(HttpError::from) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -// Images + /// Update user-facing services IP allowlist + #[endpoint { + method = PUT, + path = "/v1/system/networking/allow-list", + tags = ["system/networking"], + }] + async fn networking_allow_list_update( + rqctx: RequestContext, + params: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let server_kind = apictx.kind; + let params = params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let remote_addr = rqctx.request.remote_addr().ip(); + nexus + .allow_list_upsert(&opctx, remote_addr, server_kind, params) + .await + .map(HttpResponseOk) + .map_err(HttpError::from) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// List images -/// -/// List images which are global or scoped to the specified project. The images -/// are returned sorted by creation date, with the most recent images appearing first. -#[endpoint { - method = GET, - path = "/v1/images", - tags = ["images"], -}] -async fn image_list( - rqctx: RequestContext, - query_params: Query>, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let pag_params = data_page_params_for(&rqctx, &query)?; - let scan_params = ScanByNameOrId::from_query(&query)?; - let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; - let parent_lookup = match scan_params.selector.project.clone() { - Some(project) => { - let project_lookup = nexus.project_lookup( - &opctx, - params::ProjectSelector { project }, - )?; - ImageParentLookup::Project(project_lookup) - } - None => { - let silo_lookup = nexus.current_silo_lookup(&opctx)?; - ImageParentLookup::Silo(silo_lookup) - } + // Images + + /// List images + /// + /// List images which are global or scoped to the specified project. The images + /// are returned sorted by creation date, with the most recent images appearing first. + #[endpoint { + method = GET, + path = "/v1/images", + tags = ["images"], + }] + async fn image_list( + rqctx: RequestContext, + query_params: Query< + PaginatedByNameOrId, + >, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let pag_params = data_page_params_for(&rqctx, &query)?; + let scan_params = ScanByNameOrId::from_query(&query)?; + let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; + let parent_lookup = match scan_params.selector.project.clone() { + Some(project) => { + let project_lookup = nexus.project_lookup( + &opctx, + params::ProjectSelector { project }, + )?; + ImageParentLookup::Project(project_lookup) + } + None => { + let silo_lookup = nexus.current_silo_lookup(&opctx)?; + ImageParentLookup::Silo(silo_lookup) + } + }; + let images = nexus + .image_list(&opctx, &parent_lookup, &paginated_by) + .await? + .into_iter() + .map(|d| d.into()) + .collect(); + Ok(HttpResponseOk(ScanByNameOrId::results_page( + &query, + images, + &marker_for_name_or_id, + )?)) }; - let images = nexus - .image_list(&opctx, &parent_lookup, &paginated_by) - .await? - .into_iter() - .map(|d| d.into()) - .collect(); - Ok(HttpResponseOk(ScanByNameOrId::results_page( - &query, - images, - &marker_for_name_or_id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Create image -/// -/// Create a new image in a project. -#[endpoint { - method = POST, - path = "/v1/images", - tags = ["images"] -}] -async fn image_create( - rqctx: RequestContext, - query_params: Query, - new_image: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let params = &new_image.into_inner(); - let parent_lookup = match query.project.clone() { - Some(project) => { - let project_lookup = nexus.project_lookup( + /// Create image + /// + /// Create a new image in a project. + #[endpoint { + method = POST, + path = "/v1/images", + tags = ["images"] + }] + async fn image_create( + rqctx: RequestContext, + query_params: Query, + new_image: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let params = &new_image.into_inner(); + let parent_lookup = match query.project.clone() { + Some(project) => { + let project_lookup = nexus.project_lookup( + &opctx, + params::ProjectSelector { project }, + )?; + ImageParentLookup::Project(project_lookup) + } + None => { + let silo_lookup = nexus.current_silo_lookup(&opctx)?; + ImageParentLookup::Silo(silo_lookup) + } + }; + let image = + nexus.image_create(&opctx, &parent_lookup, ¶ms).await?; + Ok(HttpResponseCreated(image.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + /// Fetch image + /// + /// Fetch the details for a specific image in a project. + #[endpoint { + method = GET, + path = "/v1/images/{image}", + tags = ["images"], + }] + async fn image_view( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let image: nexus_db_model::Image = match nexus + .image_lookup( &opctx, - params::ProjectSelector { project }, - )?; - ImageParentLookup::Project(project_lookup) - } - None => { - let silo_lookup = nexus.current_silo_lookup(&opctx)?; - ImageParentLookup::Silo(silo_lookup) - } + params::ImageSelector { + image: path.image, + project: query.project, + }, + ) + .await? + { + ImageLookup::ProjectImage(image) => { + let (.., db_image) = image.fetch().await?; + db_image.into() + } + ImageLookup::SiloImage(image) => { + let (.., db_image) = image.fetch().await?; + db_image.into() + } + }; + Ok(HttpResponseOk(image.into())) }; - let image = nexus.image_create(&opctx, &parent_lookup, ¶ms).await?; - Ok(HttpResponseCreated(image.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Fetch image -/// -/// Fetch the details for a specific image in a project. -#[endpoint { - method = GET, - path = "/v1/images/{image}", - tags = ["images"], -}] -async fn image_view( - rqctx: RequestContext, - path_params: Path, - query_params: Query, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let image: nexus_db_model::Image = match nexus - .image_lookup( - &opctx, - params::ImageSelector { - image: path.image, - project: query.project, - }, - ) - .await? - { - ImageLookup::ProjectImage(image) => { - let (.., db_image) = image.fetch().await?; - db_image.into() - } - ImageLookup::SiloImage(image) => { - let (.., db_image) = image.fetch().await?; - db_image.into() - } + /// Delete image + /// + /// Permanently delete an image from a project. This operation cannot be undone. + /// Any instances in the project using the image will continue to run, however + /// new instances can not be created with this image. + #[endpoint { + method = DELETE, + path = "/v1/images/{image}", + tags = ["images"], + }] + async fn image_delete( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let image_lookup = nexus + .image_lookup( + &opctx, + params::ImageSelector { + image: path.image, + project: query.project, + }, + ) + .await?; + nexus.image_delete(&opctx, &image_lookup).await?; + Ok(HttpResponseDeleted()) }; - Ok(HttpResponseOk(image.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Delete image -/// -/// Permanently delete an image from a project. This operation cannot be undone. -/// Any instances in the project using the image will continue to run, however -/// new instances can not be created with this image. -#[endpoint { - method = DELETE, - path = "/v1/images/{image}", - tags = ["images"], -}] -async fn image_delete( - rqctx: RequestContext, - path_params: Path, - query_params: Query, -) -> Result { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let image_lookup = nexus - .image_lookup( - &opctx, - params::ImageSelector { - image: path.image, - project: query.project, - }, - ) - .await?; - nexus.image_delete(&opctx, &image_lookup).await?; - Ok(HttpResponseDeleted()) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Promote project image + /// + /// Promote project image to be visible to all projects in the silo + #[endpoint { + method = POST, + path = "/v1/images/{image}/promote", + tags = ["images"] + }] + async fn image_promote( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let image_lookup = nexus + .image_lookup( + &opctx, + params::ImageSelector { + image: path.image, + project: query.project, + }, + ) + .await?; + let image = nexus.image_promote(&opctx, &image_lookup).await?; + Ok(HttpResponseAccepted(image.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Promote project image -/// -/// Promote project image to be visible to all projects in the silo -#[endpoint { - method = POST, - path = "/v1/images/{image}/promote", - tags = ["images"] -}] -async fn image_promote( - rqctx: RequestContext, - path_params: Path, - query_params: Query, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let image_lookup = nexus - .image_lookup( - &opctx, - params::ImageSelector { - image: path.image, - project: query.project, - }, - ) - .await?; - let image = nexus.image_promote(&opctx, &image_lookup).await?; - Ok(HttpResponseAccepted(image.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Demote silo image + /// + /// Demote silo image to be visible only to a specified project + #[endpoint { + method = POST, + path = "/v1/images/{image}/demote", + tags = ["images"] + }] + async fn image_demote( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let image_lookup = nexus + .image_lookup( + &opctx, + params::ImageSelector { image: path.image, project: None }, + ) + .await?; -/// Demote silo image -/// -/// Demote silo image to be visible only to a specified project -#[endpoint { - method = POST, - path = "/v1/images/{image}/demote", - tags = ["images"] -}] -async fn image_demote( - rqctx: RequestContext, - path_params: Path, - query_params: Query, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let image_lookup = nexus - .image_lookup( - &opctx, - params::ImageSelector { image: path.image, project: None }, - ) - .await?; - - let project_lookup = nexus.project_lookup(&opctx, query)?; - - let image = - nexus.image_demote(&opctx, &image_lookup, &project_lookup).await?; - Ok(HttpResponseAccepted(image.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + let project_lookup = nexus.project_lookup(&opctx, query)?; -/// List network interfaces -#[endpoint { - method = GET, - path = "/v1/network-interfaces", - tags = ["instances"], -}] -async fn instance_network_interface_list( - rqctx: RequestContext, - query_params: Query>, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let pag_params = data_page_params_for(&rqctx, &query)?; - let scan_params = ScanByNameOrId::from_query(&query)?; - let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; - let instance_lookup = - nexus.instance_lookup(&opctx, scan_params.selector.clone())?; - let interfaces = nexus - .instance_network_interface_list( - &opctx, - &instance_lookup, - &paginated_by, - ) - .await? - .into_iter() - .map(|d| d.into()) - .collect(); - Ok(HttpResponseOk(ScanByNameOrId::results_page( - &query, - interfaces, - &marker_for_name_or_id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -/// Create network interface -#[endpoint { - method = POST, - path = "/v1/network-interfaces", - tags = ["instances"], -}] -async fn instance_network_interface_create( - rqctx: RequestContext, - query_params: Query, - interface_params: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let instance_lookup = nexus.instance_lookup(&opctx, query)?; - let iface = nexus - .network_interface_create( - &opctx, - &instance_lookup, - &interface_params.into_inner(), - ) - .await?; - Ok(HttpResponseCreated(iface.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + let image = nexus + .image_demote(&opctx, &image_lookup, &project_lookup) + .await?; + Ok(HttpResponseAccepted(image.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Delete network interface -/// -/// Note that the primary interface for an instance cannot be deleted if there -/// are any secondary interfaces. A new primary interface must be designated -/// first. The primary interface can be deleted if there are no secondary -/// interfaces. -#[endpoint { - method = DELETE, - path = "/v1/network-interfaces/{interface}", - tags = ["instances"], -}] -async fn instance_network_interface_delete( - rqctx: RequestContext, - path_params: Path, - query_params: Query, -) -> Result { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let interface_selector = params::InstanceNetworkInterfaceSelector { - project: query.project, - instance: query.instance, - network_interface: path.interface, - }; - let interface_lookup = nexus - .instance_network_interface_lookup(&opctx, interface_selector)?; - nexus - .instance_network_interface_delete(&opctx, &interface_lookup) - .await?; - Ok(HttpResponseDeleted()) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// List network interfaces + #[endpoint { + method = GET, + path = "/v1/network-interfaces", + tags = ["instances"], + }] + async fn instance_network_interface_list( + rqctx: RequestContext, + query_params: Query>, + ) -> Result>, HttpError> + { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let pag_params = data_page_params_for(&rqctx, &query)?; + let scan_params = ScanByNameOrId::from_query(&query)?; + let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; + let instance_lookup = + nexus.instance_lookup(&opctx, scan_params.selector.clone())?; + let interfaces = nexus + .instance_network_interface_list( + &opctx, + &instance_lookup, + &paginated_by, + ) + .await? + .into_iter() + .map(|d| d.into()) + .collect(); + Ok(HttpResponseOk(ScanByNameOrId::results_page( + &query, + interfaces, + &marker_for_name_or_id, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Fetch network interface -#[endpoint { - method = GET, - path = "/v1/network-interfaces/{interface}", - tags = ["instances"], -}] -async fn instance_network_interface_view( - rqctx: RequestContext, - path_params: Path, - query_params: Query, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let interface_selector = params::InstanceNetworkInterfaceSelector { - project: query.project, - instance: query.instance, - network_interface: path.interface, - }; - let (.., interface) = nexus - .instance_network_interface_lookup(&opctx, interface_selector)? - .fetch() - .await?; - Ok(HttpResponseOk(interface.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Create network interface + #[endpoint { + method = POST, + path = "/v1/network-interfaces", + tags = ["instances"], + }] + async fn instance_network_interface_create( + rqctx: RequestContext, + query_params: Query, + interface_params: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let instance_lookup = nexus.instance_lookup(&opctx, query)?; + let iface = nexus + .network_interface_create( + &opctx, + &instance_lookup, + &interface_params.into_inner(), + ) + .await?; + Ok(HttpResponseCreated(iface.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Update network interface -#[endpoint { - method = PUT, - path = "/v1/network-interfaces/{interface}", - tags = ["instances"], -}] -async fn instance_network_interface_update( - rqctx: RequestContext, - path_params: Path, - query_params: Query, - updated_iface: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let updated_iface = updated_iface.into_inner(); - let network_interface_selector = - params::InstanceNetworkInterfaceSelector { + /// Delete network interface + /// + /// Note that the primary interface for an instance cannot be deleted if there + /// are any secondary interfaces. A new primary interface must be designated + /// first. The primary interface can be deleted if there are no secondary + /// interfaces. + #[endpoint { + method = DELETE, + path = "/v1/network-interfaces/{interface}", + tags = ["instances"], + }] + async fn instance_network_interface_delete( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let interface_selector = params::InstanceNetworkInterfaceSelector { project: query.project, instance: query.instance, network_interface: path.interface, }; - let network_interface_lookup = nexus - .instance_network_interface_lookup( + let interface_lookup = nexus.instance_network_interface_lookup( &opctx, - network_interface_selector, + interface_selector, )?; - let interface = nexus - .instance_network_interface_update( - &opctx, - &network_interface_lookup, - updated_iface, - ) - .await?; - Ok(HttpResponseOk(InstanceNetworkInterface::from(interface))) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -// External IP addresses for instances - -/// List external IP addresses -#[endpoint { - method = GET, - path = "/v1/instances/{instance}/external-ips", - tags = ["instances"], -}] -async fn instance_external_ip_list( - rqctx: RequestContext, - query_params: Query, - path_params: Path, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let instance_selector = params::InstanceSelector { - project: query.project, - instance: path.instance, + nexus + .instance_network_interface_delete(&opctx, &interface_lookup) + .await?; + Ok(HttpResponseDeleted()) }; - let instance_lookup = - nexus.instance_lookup(&opctx, instance_selector)?; - let ips = - nexus.instance_list_external_ips(&opctx, &instance_lookup).await?; - Ok(HttpResponseOk(ResultsPage { items: ips, next_page: None })) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Allocate and attach ephemeral IP to instance -#[endpoint { - method = POST, - path = "/v1/instances/{instance}/external-ips/ephemeral", - tags = ["instances"], -}] -async fn instance_ephemeral_ip_attach( - rqctx: RequestContext, - path_params: Path, - query_params: Query, - ip_to_create: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let instance_selector = params::InstanceSelector { - project: query.project, - instance: path.instance, + /// Fetch network interface + #[endpoint { + method = GET, + path = "/v1/network-interfaces/{interface}", + tags = ["instances"], + }] + async fn instance_network_interface_view( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let interface_selector = params::InstanceNetworkInterfaceSelector { + project: query.project, + instance: query.instance, + network_interface: path.interface, + }; + let (.., interface) = nexus + .instance_network_interface_lookup(&opctx, interface_selector)? + .fetch() + .await?; + Ok(HttpResponseOk(interface.into())) }; - let instance_lookup = - nexus.instance_lookup(&opctx, instance_selector)?; - let ip = nexus - .instance_attach_ephemeral_ip( - &opctx, - &instance_lookup, - ip_to_create.into_inner().pool, - ) - .await?; - Ok(HttpResponseAccepted(ip)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Detach and deallocate ephemeral IP from instance -#[endpoint { - method = DELETE, - path = "/v1/instances/{instance}/external-ips/ephemeral", - tags = ["instances"], -}] -async fn instance_ephemeral_ip_detach( - rqctx: RequestContext, - path_params: Path, - query_params: Query, -) -> Result { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let instance_selector = params::InstanceSelector { - project: query.project, - instance: path.instance, + /// Update network interface + #[endpoint { + method = PUT, + path = "/v1/network-interfaces/{interface}", + tags = ["instances"], + }] + async fn instance_network_interface_update( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + updated_iface: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let updated_iface = updated_iface.into_inner(); + let network_interface_selector = + params::InstanceNetworkInterfaceSelector { + project: query.project, + instance: query.instance, + network_interface: path.interface, + }; + let network_interface_lookup = nexus + .instance_network_interface_lookup( + &opctx, + network_interface_selector, + )?; + let interface = nexus + .instance_network_interface_update( + &opctx, + &network_interface_lookup, + updated_iface, + ) + .await?; + Ok(HttpResponseOk(InstanceNetworkInterface::from(interface))) }; - let instance_lookup = - nexus.instance_lookup(&opctx, instance_selector)?; - nexus - .instance_detach_external_ip( - &opctx, - &instance_lookup, - ¶ms::ExternalIpDetach::Ephemeral, - ) - .await?; - Ok(HttpResponseDeleted()) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -// Snapshots - -/// List snapshots -#[endpoint { - method = GET, - path = "/v1/snapshots", - tags = ["snapshots"], -}] -async fn snapshot_list( - rqctx: RequestContext, - query_params: Query>, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let pag_params = data_page_params_for(&rqctx, &query)?; - let scan_params = ScanByNameOrId::from_query(&query)?; - let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; - let project_lookup = - nexus.project_lookup(&opctx, scan_params.selector.clone())?; - let snapshots = nexus - .snapshot_list(&opctx, &project_lookup, &paginated_by) - .await? - .into_iter() - .map(|d| d.into()) - .collect(); - Ok(HttpResponseOk(ScanByNameOrId::results_page( - &query, - snapshots, - &marker_for_name_or_id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -/// Create snapshot -/// -/// Creates a point-in-time snapshot from a disk. -#[endpoint { - method = POST, - path = "/v1/snapshots", - tags = ["snapshots"], -}] -async fn snapshot_create( - rqctx: RequestContext, - query_params: Query, - new_snapshot: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let new_snapshot_params = &new_snapshot.into_inner(); - let project_lookup = nexus.project_lookup(&opctx, query)?; - let snapshot = nexus - .snapshot_create(&opctx, project_lookup, &new_snapshot_params) - .await?; - Ok(HttpResponseCreated(snapshot.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -/// Fetch snapshot -#[endpoint { - method = GET, - path = "/v1/snapshots/{snapshot}", - tags = ["snapshots"], -}] -async fn snapshot_view( - rqctx: RequestContext, - path_params: Path, - query_params: Query, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let snapshot_selector = params::SnapshotSelector { - project: query.project, - snapshot: path.snapshot, - }; - let (.., snapshot) = - nexus.snapshot_lookup(&opctx, snapshot_selector)?.fetch().await?; - Ok(HttpResponseOk(snapshot.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -/// Delete snapshot -#[endpoint { - method = DELETE, - path = "/v1/snapshots/{snapshot}", - tags = ["snapshots"], -}] -async fn snapshot_delete( - rqctx: RequestContext, - path_params: Path, - query_params: Query, -) -> Result { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let snapshot_selector = params::SnapshotSelector { - project: query.project, - snapshot: path.snapshot, - }; - let snapshot_lookup = - nexus.snapshot_lookup(&opctx, snapshot_selector)?; - nexus.snapshot_delete(&opctx, &snapshot_lookup).await?; - Ok(HttpResponseDeleted()) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -// VPCs - -/// List VPCs -#[endpoint { - method = GET, - path = "/v1/vpcs", - tags = ["vpcs"], -}] -async fn vpc_list( - rqctx: RequestContext, - query_params: Query>, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let pag_params = data_page_params_for(&rqctx, &query)?; - let scan_params = ScanByNameOrId::from_query(&query)?; - let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let project_lookup = - nexus.project_lookup(&opctx, scan_params.selector.clone())?; - let vpcs = nexus - .vpc_list(&opctx, &project_lookup, &paginated_by) - .await? - .into_iter() - .map(|p| p.into()) - .collect(); - - Ok(HttpResponseOk(ScanByNameOrId::results_page( - &query, - vpcs, - &marker_for_name_or_id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -/// Create VPC -#[endpoint { - method = POST, - path = "/v1/vpcs", - tags = ["vpcs"], -}] -async fn vpc_create( - rqctx: RequestContext, - query_params: Query, - body: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let new_vpc_params = body.into_inner(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let project_lookup = nexus.project_lookup(&opctx, query)?; - let vpc = nexus - .project_create_vpc(&opctx, &project_lookup, &new_vpc_params) - .await?; - Ok(HttpResponseCreated(vpc.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -/// Fetch VPC -#[endpoint { - method = GET, - path = "/v1/vpcs/{vpc}", - tags = ["vpcs"], -}] -async fn vpc_view( - rqctx: RequestContext, - path_params: Path, - query_params: Query, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let vpc_selector = - params::VpcSelector { project: query.project, vpc: path.vpc }; - let (.., vpc) = nexus.vpc_lookup(&opctx, vpc_selector)?.fetch().await?; - Ok(HttpResponseOk(vpc.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -/// Update a VPC -#[endpoint { - method = PUT, - path = "/v1/vpcs/{vpc}", - tags = ["vpcs"], -}] -async fn vpc_update( - rqctx: RequestContext, - path_params: Path, - query_params: Query, - updated_vpc: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let updated_vpc_params = &updated_vpc.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let vpc_selector = - params::VpcSelector { project: query.project, vpc: path.vpc }; - let vpc_lookup = nexus.vpc_lookup(&opctx, vpc_selector)?; - let vpc = nexus - .project_update_vpc(&opctx, &vpc_lookup, &updated_vpc_params) - .await?; - Ok(HttpResponseOk(vpc.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -/// Delete VPC -#[endpoint { - method = DELETE, - path = "/v1/vpcs/{vpc}", - tags = ["vpcs"], -}] -async fn vpc_delete( - rqctx: RequestContext, - path_params: Path, - query_params: Query, -) -> Result { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let vpc_selector = - params::VpcSelector { project: query.project, vpc: path.vpc }; - let vpc_lookup = nexus.vpc_lookup(&opctx, vpc_selector)?; - nexus.project_delete_vpc(&opctx, &vpc_lookup).await?; - Ok(HttpResponseDeleted()) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -/// List subnets -#[endpoint { - method = GET, - path = "/v1/vpc-subnets", - tags = ["vpcs"], -}] -async fn vpc_subnet_list( - rqctx: RequestContext, - query_params: Query>, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let pag_params = data_page_params_for(&rqctx, &query)?; - let scan_params = ScanByNameOrId::from_query(&query)?; - let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let vpc_lookup = - nexus.vpc_lookup(&opctx, scan_params.selector.clone())?; - let subnets = nexus - .vpc_subnet_list(&opctx, &vpc_lookup, &paginated_by) - .await? - .into_iter() - .map(|vpc| vpc.into()) - .collect(); - Ok(HttpResponseOk(ScanByNameOrId::results_page( - &query, - subnets, - &marker_for_name_or_id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -/// Create subnet -#[endpoint { - method = POST, - path = "/v1/vpc-subnets", - tags = ["vpcs"], -}] -async fn vpc_subnet_create( - rqctx: RequestContext, - query_params: Query, - create_params: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let create = create_params.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let vpc_lookup = nexus.vpc_lookup(&opctx, query)?; - let subnet = - nexus.vpc_create_subnet(&opctx, &vpc_lookup, &create).await?; - Ok(HttpResponseCreated(subnet.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -/// Fetch subnet -#[endpoint { - method = GET, - path = "/v1/vpc-subnets/{subnet}", - tags = ["vpcs"], -}] -async fn vpc_subnet_view( - rqctx: RequestContext, - path_params: Path, - query_params: Query, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let subnet_selector = params::SubnetSelector { - project: query.project, - vpc: query.vpc, - subnet: path.subnet, - }; - let (.., subnet) = - nexus.vpc_subnet_lookup(&opctx, subnet_selector)?.fetch().await?; - Ok(HttpResponseOk(subnet.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -/// Delete subnet -#[endpoint { - method = DELETE, - path = "/v1/vpc-subnets/{subnet}", - tags = ["vpcs"], -}] -async fn vpc_subnet_delete( - rqctx: RequestContext, - path_params: Path, - query_params: Query, -) -> Result { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let subnet_selector = params::SubnetSelector { - project: query.project, - vpc: query.vpc, - subnet: path.subnet, - }; - let subnet_lookup = nexus.vpc_subnet_lookup(&opctx, subnet_selector)?; - nexus.vpc_delete_subnet(&opctx, &subnet_lookup).await?; - Ok(HttpResponseDeleted()) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -/// Update subnet -#[endpoint { - method = PUT, - path = "/v1/vpc-subnets/{subnet}", - tags = ["vpcs"], -}] -async fn vpc_subnet_update( - rqctx: RequestContext, - path_params: Path, - query_params: Query, - subnet_params: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let subnet_params = subnet_params.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let subnet_selector = params::SubnetSelector { - project: query.project, - vpc: query.vpc, - subnet: path.subnet, - }; - let subnet_lookup = nexus.vpc_subnet_lookup(&opctx, subnet_selector)?; - let subnet = nexus - .vpc_update_subnet(&opctx, &subnet_lookup, &subnet_params) - .await?; - Ok(HttpResponseOk(subnet.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -// This endpoint is likely temporary. We would rather list all IPs allocated in -// a subnet whether they come from NICs or something else. See -// https://github.com/oxidecomputer/omicron/issues/2476 - -/// List network interfaces -#[endpoint { - method = GET, - path = "/v1/vpc-subnets/{subnet}/network-interfaces", - tags = ["vpcs"], -}] -async fn vpc_subnet_list_network_interfaces( - rqctx: RequestContext, - path_params: Path, - query_params: Query>, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let path = path_params.into_inner(); - let pag_params = data_page_params_for(&rqctx, &query)?; - let scan_params = ScanByNameOrId::from_query(&query)?; - let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let subnet_selector = params::SubnetSelector { - project: scan_params.selector.project.clone(), - vpc: scan_params.selector.vpc.clone(), - subnet: path.subnet, - }; - let subnet_lookup = nexus.vpc_subnet_lookup(&opctx, subnet_selector)?; - let interfaces = nexus - .subnet_list_instance_network_interfaces( - &opctx, - &subnet_lookup, - &paginated_by, - ) - .await? - .into_iter() - .map(|interfaces| interfaces.into()) - .collect(); - Ok(HttpResponseOk(ScanByNameOrId::results_page( - &query, - interfaces, - &marker_for_name_or_id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -// VPC Firewalls - -/// List firewall rules -#[endpoint { - method = GET, - path = "/v1/vpc-firewall-rules", - tags = ["vpcs"], -}] -async fn vpc_firewall_rules_view( - rqctx: RequestContext, - query_params: Query, -) -> Result, HttpError> { - // TODO: Check If-Match and fail if the ETag doesn't match anymore. - // Without this check, if firewall rules change while someone is listing - // the rules, they will see a mix of the old and new rules. - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let vpc_lookup = nexus.vpc_lookup(&opctx, query)?; - let rules = nexus.vpc_list_firewall_rules(&opctx, &vpc_lookup).await?; - Ok(HttpResponseOk(VpcFirewallRules { - rules: rules.into_iter().map(|rule| rule.into()).collect(), - })) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -// Note: the limits in the below comment come from the firewall rules model -// file, nexus/db-model/src/vpc_firewall_rule.rs. - -/// Replace firewall rules -/// -/// The maximum number of rules per VPC is 1024. -/// -/// Targets are used to specify the set of instances to which a firewall rule -/// applies. You can target instances directly by name, or specify a VPC, VPC -/// subnet, IP, or IP subnet, which will apply the rule to traffic going to -/// all matching instances. Targets are additive: the rule applies to instances -/// matching ANY target. The maximum number of targets is 256. -/// -/// Filters reduce the scope of a firewall rule. Without filters, the rule -/// applies to all packets to the targets (or from the targets, if it's an -/// outbound rule). With multiple filters, the rule applies only to packets -/// matching ALL filters. The maximum number of each type of filter is 256. -#[endpoint { - method = PUT, - path = "/v1/vpc-firewall-rules", - tags = ["vpcs"], -}] -async fn vpc_firewall_rules_update( - rqctx: RequestContext, - query_params: Query, - router_params: TypedBody, -) -> Result, HttpError> { - // TODO: Check If-Match and fail if the ETag doesn't match anymore. - // TODO: limit size of the ruleset because the GET endpoint is not paginated - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let router_params = router_params.into_inner(); - let vpc_lookup = nexus.vpc_lookup(&opctx, query)?; - let rules = nexus - .vpc_update_firewall_rules(&opctx, &vpc_lookup, &router_params) - .await?; - Ok(HttpResponseOk(VpcFirewallRules { - rules: rules.into_iter().map(|rule| rule.into()).collect(), - })) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} - -// VPC Routers + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// List routers -#[endpoint { - method = GET, - path = "/v1/vpc-routers", - tags = ["vpcs"], -}] -async fn vpc_router_list( - rqctx: RequestContext, - query_params: Query>, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let pag_params = data_page_params_for(&rqctx, &query)?; - let scan_params = ScanByNameOrId::from_query(&query)?; - let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; - let vpc_lookup = - nexus.vpc_lookup(&opctx, scan_params.selector.clone())?; - let routers = nexus - .vpc_router_list(&opctx, &vpc_lookup, &paginated_by) - .await? - .into_iter() - .map(|s| s.into()) - .collect(); - Ok(HttpResponseOk(ScanByNameOrId::results_page( - &query, - routers, - &marker_for_name_or_id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + // External IP addresses for instances + + /// List external IP addresses + #[endpoint { + method = GET, + path = "/v1/instances/{instance}/external-ips", + tags = ["instances"], + }] + async fn instance_external_ip_list( + rqctx: RequestContext, + query_params: Query, + path_params: Path, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let instance_selector = params::InstanceSelector { + project: query.project, + instance: path.instance, + }; + let instance_lookup = + nexus.instance_lookup(&opctx, instance_selector)?; + let ips = nexus + .instance_list_external_ips(&opctx, &instance_lookup) + .await?; + Ok(HttpResponseOk(ResultsPage { items: ips, next_page: None })) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Fetch router -#[endpoint { - method = GET, - path = "/v1/vpc-routers/{router}", - tags = ["vpcs"], -}] -async fn vpc_router_view( - rqctx: RequestContext, - path_params: Path, - query_params: Query, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let router_selector = params::RouterSelector { - project: query.project, - vpc: query.vpc, - router: path.router, - }; - let (.., vpc_router) = - nexus.vpc_router_lookup(&opctx, router_selector)?.fetch().await?; - Ok(HttpResponseOk(vpc_router.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Allocate and attach ephemeral IP to instance + #[endpoint { + method = POST, + path = "/v1/instances/{instance}/external-ips/ephemeral", + tags = ["instances"], + }] + async fn instance_ephemeral_ip_attach( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ip_to_create: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let instance_selector = params::InstanceSelector { + project: query.project, + instance: path.instance, + }; + let instance_lookup = + nexus.instance_lookup(&opctx, instance_selector)?; + let ip = nexus + .instance_attach_ephemeral_ip( + &opctx, + &instance_lookup, + ip_to_create.into_inner().pool, + ) + .await?; + Ok(HttpResponseAccepted(ip)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Create VPC router -#[endpoint { - method = POST, - path = "/v1/vpc-routers", - tags = ["vpcs"], -}] -async fn vpc_router_create( - rqctx: RequestContext, - query_params: Query, - create_params: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let create = create_params.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let vpc_lookup = nexus.vpc_lookup(&opctx, query)?; - let router = nexus - .vpc_create_router( - &opctx, - &vpc_lookup, - &db::model::VpcRouterKind::Custom, - &create, - ) - .await?; - Ok(HttpResponseCreated(router.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Detach and deallocate ephemeral IP from instance + #[endpoint { + method = DELETE, + path = "/v1/instances/{instance}/external-ips/ephemeral", + tags = ["instances"], + }] + async fn instance_ephemeral_ip_detach( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let instance_selector = params::InstanceSelector { + project: query.project, + instance: path.instance, + }; + let instance_lookup = + nexus.instance_lookup(&opctx, instance_selector)?; + nexus + .instance_detach_external_ip( + &opctx, + &instance_lookup, + ¶ms::ExternalIpDetach::Ephemeral, + ) + .await?; + Ok(HttpResponseDeleted()) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Delete router -#[endpoint { - method = DELETE, - path = "/v1/vpc-routers/{router}", - tags = ["vpcs"], -}] -async fn vpc_router_delete( - rqctx: RequestContext, - path_params: Path, - query_params: Query, -) -> Result { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let router_selector = params::RouterSelector { - project: query.project, - vpc: query.vpc, - router: path.router, - }; - let router_lookup = nexus.vpc_router_lookup(&opctx, router_selector)?; - nexus.vpc_delete_router(&opctx, &router_lookup).await?; - Ok(HttpResponseDeleted()) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + // Snapshots + + /// List snapshots + #[endpoint { + method = GET, + path = "/v1/snapshots", + tags = ["snapshots"], + }] + async fn snapshot_list( + rqctx: RequestContext, + query_params: Query>, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let pag_params = data_page_params_for(&rqctx, &query)?; + let scan_params = ScanByNameOrId::from_query(&query)?; + let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; + let project_lookup = + nexus.project_lookup(&opctx, scan_params.selector.clone())?; + let snapshots = nexus + .snapshot_list(&opctx, &project_lookup, &paginated_by) + .await? + .into_iter() + .map(|d| d.into()) + .collect(); + Ok(HttpResponseOk(ScanByNameOrId::results_page( + &query, + snapshots, + &marker_for_name_or_id, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Update router -#[endpoint { - method = PUT, - path = "/v1/vpc-routers/{router}", - tags = ["vpcs"], -}] -async fn vpc_router_update( - rqctx: RequestContext, - path_params: Path, - query_params: Query, - router_params: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let router_params = router_params.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let router_selector = params::RouterSelector { - project: query.project, - vpc: query.vpc, - router: path.router, - }; - let router_lookup = nexus.vpc_router_lookup(&opctx, router_selector)?; - let router = nexus - .vpc_update_router(&opctx, &router_lookup, &router_params) - .await?; - Ok(HttpResponseOk(router.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Create snapshot + /// + /// Creates a point-in-time snapshot from a disk. + #[endpoint { + method = POST, + path = "/v1/snapshots", + tags = ["snapshots"], + }] + async fn snapshot_create( + rqctx: RequestContext, + query_params: Query, + new_snapshot: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let new_snapshot_params = &new_snapshot.into_inner(); + let project_lookup = nexus.project_lookup(&opctx, query)?; + let snapshot = nexus + .snapshot_create(&opctx, project_lookup, &new_snapshot_params) + .await?; + Ok(HttpResponseCreated(snapshot.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// List routes -/// -/// List the routes associated with a router in a particular VPC. -#[endpoint { - method = GET, - path = "/v1/vpc-router-routes", - tags = ["vpcs"], -}] -async fn vpc_router_route_list( - rqctx: RequestContext, - query_params: Query>, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let pag_params = data_page_params_for(&rqctx, &query)?; - let scan_params = ScanByNameOrId::from_query(&query)?; - let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; - let router_lookup = - nexus.vpc_router_lookup(&opctx, scan_params.selector.clone())?; - let routes = nexus - .vpc_router_route_list(&opctx, &router_lookup, &paginated_by) - .await? - .into_iter() - .map(|route| route.into()) - .collect(); - Ok(HttpResponseOk(ScanByNameOrId::results_page( - &query, - routes, - &marker_for_name_or_id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Fetch snapshot + #[endpoint { + method = GET, + path = "/v1/snapshots/{snapshot}", + tags = ["snapshots"], + }] + async fn snapshot_view( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let snapshot_selector = params::SnapshotSelector { + project: query.project, + snapshot: path.snapshot, + }; + let (.., snapshot) = nexus + .snapshot_lookup(&opctx, snapshot_selector)? + .fetch() + .await?; + Ok(HttpResponseOk(snapshot.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -// Vpc Router Routes + /// Delete snapshot + #[endpoint { + method = DELETE, + path = "/v1/snapshots/{snapshot}", + tags = ["snapshots"], + }] + async fn snapshot_delete( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let snapshot_selector = params::SnapshotSelector { + project: query.project, + snapshot: path.snapshot, + }; + let snapshot_lookup = + nexus.snapshot_lookup(&opctx, snapshot_selector)?; + nexus.snapshot_delete(&opctx, &snapshot_lookup).await?; + Ok(HttpResponseDeleted()) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Fetch route -#[endpoint { - method = GET, - path = "/v1/vpc-router-routes/{route}", - tags = ["vpcs"], -}] -async fn vpc_router_route_view( - rqctx: RequestContext, - path_params: Path, - query_params: Query, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let route_selector = params::RouteSelector { - project: query.project, - vpc: query.vpc, - router: Some(query.router), - route: path.route, - }; - let (.., route) = nexus - .vpc_router_route_lookup(&opctx, route_selector)? - .fetch() - .await?; - Ok(HttpResponseOk(route.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + // VPCs + + /// List VPCs + #[endpoint { + method = GET, + path = "/v1/vpcs", + tags = ["vpcs"], + }] + async fn vpc_list( + rqctx: RequestContext, + query_params: Query>, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let pag_params = data_page_params_for(&rqctx, &query)?; + let scan_params = ScanByNameOrId::from_query(&query)?; + let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let project_lookup = + nexus.project_lookup(&opctx, scan_params.selector.clone())?; + let vpcs = nexus + .vpc_list(&opctx, &project_lookup, &paginated_by) + .await? + .into_iter() + .map(|p| p.into()) + .collect(); + + Ok(HttpResponseOk(ScanByNameOrId::results_page( + &query, + vpcs, + &marker_for_name_or_id, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Create route -#[endpoint { - method = POST, - path = "/v1/vpc-router-routes", - tags = ["vpcs"], -}] -async fn vpc_router_route_create( - rqctx: RequestContext, - query_params: Query, - create_params: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + /// Create VPC + #[endpoint { + method = POST, + path = "/v1/vpcs", + tags = ["vpcs"], + }] + async fn vpc_create( + rqctx: RequestContext, + query_params: Query, + body: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); let nexus = &apictx.context.nexus; let query = query_params.into_inner(); - let create = create_params.into_inner(); - let router_lookup = nexus.vpc_router_lookup(&opctx, query)?; - let route = nexus - .router_create_route( - &opctx, - &router_lookup, - &RouterRouteKind::Custom, - &create, - ) - .await?; - Ok(HttpResponseCreated(route.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + let new_vpc_params = body.into_inner(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let project_lookup = nexus.project_lookup(&opctx, query)?; + let vpc = nexus + .project_create_vpc(&opctx, &project_lookup, &new_vpc_params) + .await?; + Ok(HttpResponseCreated(vpc.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Delete route -#[endpoint { - method = DELETE, - path = "/v1/vpc-router-routes/{route}", - tags = ["vpcs"], -}] -async fn vpc_router_route_delete( - rqctx: RequestContext, - path_params: Path, - query_params: Query, -) -> Result { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let route_selector = params::RouteSelector { - project: query.project, - vpc: query.vpc, - router: query.router, - route: path.route, - }; - let route_lookup = - nexus.vpc_router_route_lookup(&opctx, route_selector)?; - nexus.router_delete_route(&opctx, &route_lookup).await?; - Ok(HttpResponseDeleted()) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Fetch VPC + #[endpoint { + method = GET, + path = "/v1/vpcs/{vpc}", + tags = ["vpcs"], + }] + async fn vpc_view( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let vpc_selector = + params::VpcSelector { project: query.project, vpc: path.vpc }; + let (.., vpc) = + nexus.vpc_lookup(&opctx, vpc_selector)?.fetch().await?; + Ok(HttpResponseOk(vpc.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Update route -#[endpoint { - method = PUT, - path = "/v1/vpc-router-routes/{route}", - tags = ["vpcs"], -}] -async fn vpc_router_route_update( - rqctx: RequestContext, - path_params: Path, - query_params: Query, - router_params: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let router_params = router_params.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let route_selector = params::RouteSelector { - project: query.project, - vpc: query.vpc, - router: query.router, - route: path.route, - }; - let route_lookup = - nexus.vpc_router_route_lookup(&opctx, route_selector)?; - let route = nexus - .router_update_route(&opctx, &route_lookup, &router_params) - .await?; - Ok(HttpResponseOk(route.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Update a VPC + #[endpoint { + method = PUT, + path = "/v1/vpcs/{vpc}", + tags = ["vpcs"], + }] + async fn vpc_update( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + updated_vpc: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let updated_vpc_params = &updated_vpc.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let vpc_selector = + params::VpcSelector { project: query.project, vpc: path.vpc }; + let vpc_lookup = nexus.vpc_lookup(&opctx, vpc_selector)?; + let vpc = nexus + .project_update_vpc(&opctx, &vpc_lookup, &updated_vpc_params) + .await?; + Ok(HttpResponseOk(vpc.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -// Racks + /// Delete VPC + #[endpoint { + method = DELETE, + path = "/v1/vpcs/{vpc}", + tags = ["vpcs"], + }] + async fn vpc_delete( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let vpc_selector = + params::VpcSelector { project: query.project, vpc: path.vpc }; + let vpc_lookup = nexus.vpc_lookup(&opctx, vpc_selector)?; + nexus.project_delete_vpc(&opctx, &vpc_lookup).await?; + Ok(HttpResponseDeleted()) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// List racks -#[endpoint { - method = GET, - path = "/v1/system/hardware/racks", - tags = ["system/hardware"], -}] -async fn rack_list( - rqctx: RequestContext, - query_params: Query, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let racks = nexus - .racks_list(&opctx, &data_page_params_for(&rqctx, &query)?) - .await? - .into_iter() - .map(|r| r.into()) - .collect(); - Ok(HttpResponseOk(ScanById::results_page( - &query, - racks, - &|_, rack: &Rack| rack.identity.id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// List subnets + #[endpoint { + method = GET, + path = "/v1/vpc-subnets", + tags = ["vpcs"], + }] + async fn vpc_subnet_list( + rqctx: RequestContext, + query_params: Query>, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let pag_params = data_page_params_for(&rqctx, &query)?; + let scan_params = ScanByNameOrId::from_query(&query)?; + let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let vpc_lookup = + nexus.vpc_lookup(&opctx, scan_params.selector.clone())?; + let subnets = nexus + .vpc_subnet_list(&opctx, &vpc_lookup, &paginated_by) + .await? + .into_iter() + .map(|vpc| vpc.into()) + .collect(); + Ok(HttpResponseOk(ScanByNameOrId::results_page( + &query, + subnets, + &marker_for_name_or_id, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Path parameters for Rack requests -#[derive(Deserialize, JsonSchema)] -struct RackPathParam { - /// The rack's unique ID. - rack_id: Uuid, -} + /// Create subnet + #[endpoint { + method = POST, + path = "/v1/vpc-subnets", + tags = ["vpcs"], + }] + async fn vpc_subnet_create( + rqctx: RequestContext, + query_params: Query, + create_params: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let create = create_params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let vpc_lookup = nexus.vpc_lookup(&opctx, query)?; + let subnet = + nexus.vpc_create_subnet(&opctx, &vpc_lookup, &create).await?; + Ok(HttpResponseCreated(subnet.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Fetch rack -#[endpoint { - method = GET, - path = "/v1/system/hardware/racks/{rack_id}", - tags = ["system/hardware"], -}] -async fn rack_view( - rqctx: RequestContext, - path_params: Path, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let rack_info = nexus.rack_lookup(&opctx, &path.rack_id).await?; - Ok(HttpResponseOk(rack_info.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Fetch subnet + #[endpoint { + method = GET, + path = "/v1/vpc-subnets/{subnet}", + tags = ["vpcs"], + }] + async fn vpc_subnet_view( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let subnet_selector = params::SubnetSelector { + project: query.project, + vpc: query.vpc, + subnet: path.subnet, + }; + let (.., subnet) = nexus + .vpc_subnet_lookup(&opctx, subnet_selector)? + .fetch() + .await?; + Ok(HttpResponseOk(subnet.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// List uninitialized sleds -#[endpoint { - method = GET, - path = "/v1/system/hardware/sleds-uninitialized", - tags = ["system/hardware"] -}] -async fn sled_list_uninitialized( - rqctx: RequestContext, - query: Query>, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - // We don't actually support real pagination - let pag_params = query.into_inner(); - if let dropshot::WhichPage::Next(last_seen) = &pag_params.page { - return Err( - Error::invalid_value(last_seen.clone(), "bad page token").into() - ); - } - let handler = async { - let nexus = &apictx.context.nexus; - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let sleds = nexus.sled_list_uninitialized(&opctx).await?; - Ok(HttpResponseOk(ResultsPage { items: sleds, next_page: None })) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Delete subnet + #[endpoint { + method = DELETE, + path = "/v1/vpc-subnets/{subnet}", + tags = ["vpcs"], + }] + async fn vpc_subnet_delete( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let subnet_selector = params::SubnetSelector { + project: query.project, + vpc: query.vpc, + subnet: path.subnet, + }; + let subnet_lookup = + nexus.vpc_subnet_lookup(&opctx, subnet_selector)?; + nexus.vpc_delete_subnet(&opctx, &subnet_lookup).await?; + Ok(HttpResponseDeleted()) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// The unique ID of a sled. -#[derive(Clone, Debug, Serialize, JsonSchema)] -pub struct SledId { - pub id: Uuid, -} + /// Update subnet + #[endpoint { + method = PUT, + path = "/v1/vpc-subnets/{subnet}", + tags = ["vpcs"], + }] + async fn vpc_subnet_update( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + subnet_params: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let subnet_params = subnet_params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let subnet_selector = params::SubnetSelector { + project: query.project, + vpc: query.vpc, + subnet: path.subnet, + }; + let subnet_lookup = + nexus.vpc_subnet_lookup(&opctx, subnet_selector)?; + let subnet = nexus + .vpc_update_subnet(&opctx, &subnet_lookup, &subnet_params) + .await?; + Ok(HttpResponseOk(subnet.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Add sled to initialized rack -// -// TODO: In the future this should really be a PUT request, once we resolve -// https://github.com/oxidecomputer/omicron/issues/4494. It should also -// explicitly be tied to a rack via a `rack_id` path param. For now we assume -// we are only operating on single rack systems. -#[endpoint { - method = POST, - path = "/v1/system/hardware/sleds", - tags = ["system/hardware"] -}] -async fn sled_add( - rqctx: RequestContext, - sled: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let nexus = &apictx.context.nexus; - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let id = nexus - .sled_add(&opctx, sled.into_inner()) - .await? - .into_untyped_uuid(); - Ok(HttpResponseCreated(SledId { id })) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + // This endpoint is likely temporary. We would rather list all IPs allocated in + // a subnet whether they come from NICs or something else. See + // https://github.com/oxidecomputer/omicron/issues/2476 + + /// List network interfaces + #[endpoint { + method = GET, + path = "/v1/vpc-subnets/{subnet}/network-interfaces", + tags = ["vpcs"], + }] + async fn vpc_subnet_list_network_interfaces( + rqctx: RequestContext, + path_params: Path, + query_params: Query>, + ) -> Result>, HttpError> + { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let path = path_params.into_inner(); + let pag_params = data_page_params_for(&rqctx, &query)?; + let scan_params = ScanByNameOrId::from_query(&query)?; + let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let subnet_selector = params::SubnetSelector { + project: scan_params.selector.project.clone(), + vpc: scan_params.selector.vpc.clone(), + subnet: path.subnet, + }; + let subnet_lookup = + nexus.vpc_subnet_lookup(&opctx, subnet_selector)?; + let interfaces = nexus + .subnet_list_instance_network_interfaces( + &opctx, + &subnet_lookup, + &paginated_by, + ) + .await? + .into_iter() + .map(|interfaces| interfaces.into()) + .collect(); + Ok(HttpResponseOk(ScanByNameOrId::results_page( + &query, + interfaces, + &marker_for_name_or_id, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -// Sleds + // VPC Firewalls + + /// List firewall rules + #[endpoint { + method = GET, + path = "/v1/vpc-firewall-rules", + tags = ["vpcs"], + }] + async fn vpc_firewall_rules_view( + rqctx: RequestContext, + query_params: Query, + ) -> Result, HttpError> { + // TODO: Check If-Match and fail if the ETag doesn't match anymore. + // Without this check, if firewall rules change while someone is listing + // the rules, they will see a mix of the old and new rules. + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let vpc_lookup = nexus.vpc_lookup(&opctx, query)?; + let rules = + nexus.vpc_list_firewall_rules(&opctx, &vpc_lookup).await?; + Ok(HttpResponseOk(VpcFirewallRules { + rules: rules.into_iter().map(|rule| rule.into()).collect(), + })) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// List sleds -#[endpoint { - method = GET, - path = "/v1/system/hardware/sleds", - tags = ["system/hardware"], -}] -async fn sled_list( - rqctx: RequestContext, - query_params: Query, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let sleds = nexus - .sled_list(&opctx, &data_page_params_for(&rqctx, &query)?) - .await? - .into_iter() - .map(|s| s.into()) - .collect(); - Ok(HttpResponseOk(ScanById::results_page( - &query, - sleds, - &|_, sled: &Sled| sled.identity.id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + // Note: the limits in the below comment come from the firewall rules model + // file, nexus/db-model/src/vpc_firewall_rule.rs. + + /// Replace firewall rules + /// + /// The maximum number of rules per VPC is 1024. + /// + /// Targets are used to specify the set of instances to which a firewall rule + /// applies. You can target instances directly by name, or specify a VPC, VPC + /// subnet, IP, or IP subnet, which will apply the rule to traffic going to + /// all matching instances. Targets are additive: the rule applies to instances + /// matching ANY target. The maximum number of targets is 256. + /// + /// Filters reduce the scope of a firewall rule. Without filters, the rule + /// applies to all packets to the targets (or from the targets, if it's an + /// outbound rule). With multiple filters, the rule applies only to packets + /// matching ALL filters. The maximum number of each type of filter is 256. + #[endpoint { + method = PUT, + path = "/v1/vpc-firewall-rules", + tags = ["vpcs"], + }] + async fn vpc_firewall_rules_update( + rqctx: RequestContext, + query_params: Query, + router_params: TypedBody, + ) -> Result, HttpError> { + // TODO: Check If-Match and fail if the ETag doesn't match anymore. + // TODO: limit size of the ruleset because the GET endpoint is not paginated + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let router_params = router_params.into_inner(); + let vpc_lookup = nexus.vpc_lookup(&opctx, query)?; + let rules = nexus + .vpc_update_firewall_rules(&opctx, &vpc_lookup, &router_params) + .await?; + Ok(HttpResponseOk(VpcFirewallRules { + rules: rules.into_iter().map(|rule| rule.into()).collect(), + })) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Fetch sled -#[endpoint { - method = GET, - path = "/v1/system/hardware/sleds/{sled_id}", - tags = ["system/hardware"], -}] -async fn sled_view( - rqctx: RequestContext, - path_params: Path, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let (.., sled) = - nexus.sled_lookup(&opctx, &path.sled_id)?.fetch().await?; - Ok(HttpResponseOk(sled.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + // VPC Routers + + /// List routers + #[endpoint { + method = GET, + path = "/v1/vpc-routers", + tags = ["vpcs"], + }] + async fn vpc_router_list( + rqctx: RequestContext, + query_params: Query>, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let pag_params = data_page_params_for(&rqctx, &query)?; + let scan_params = ScanByNameOrId::from_query(&query)?; + let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; + let vpc_lookup = + nexus.vpc_lookup(&opctx, scan_params.selector.clone())?; + let routers = nexus + .vpc_router_list(&opctx, &vpc_lookup, &paginated_by) + .await? + .into_iter() + .map(|s| s.into()) + .collect(); + Ok(HttpResponseOk(ScanByNameOrId::results_page( + &query, + routers, + &marker_for_name_or_id, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Set sled provision policy -#[endpoint { - method = PUT, - path = "/v1/system/hardware/sleds/{sled_id}/provision-policy", - tags = ["system/hardware"], -}] -async fn sled_set_provision_policy( - rqctx: RequestContext, - path_params: Path, - new_provision_state: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; + /// Fetch router + #[endpoint { + method = GET, + path = "/v1/vpc-routers/{router}", + tags = ["vpcs"], + }] + async fn vpc_router_view( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let router_selector = params::RouterSelector { + project: query.project, + vpc: query.vpc, + router: path.router, + }; + let (.., vpc_router) = nexus + .vpc_router_lookup(&opctx, router_selector)? + .fetch() + .await?; + Ok(HttpResponseOk(vpc_router.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } - let path = path_params.into_inner(); - let new_state = new_provision_state.into_inner().state; + /// Create VPC router + #[endpoint { + method = POST, + path = "/v1/vpc-routers", + tags = ["vpcs"], + }] + async fn vpc_router_create( + rqctx: RequestContext, + query_params: Query, + create_params: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let create = create_params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let vpc_lookup = nexus.vpc_lookup(&opctx, query)?; + let router = nexus + .vpc_create_router( + &opctx, + &vpc_lookup, + &db::model::VpcRouterKind::Custom, + &create, + ) + .await?; + Ok(HttpResponseCreated(router.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + /// Delete router + #[endpoint { + method = DELETE, + path = "/v1/vpc-routers/{router}", + tags = ["vpcs"], + }] + async fn vpc_router_delete( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let router_selector = params::RouterSelector { + project: query.project, + vpc: query.vpc, + router: path.router, + }; + let router_lookup = + nexus.vpc_router_lookup(&opctx, router_selector)?; + nexus.vpc_delete_router(&opctx, &router_lookup).await?; + Ok(HttpResponseDeleted()) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } - let sled_lookup = nexus.sled_lookup(&opctx, &path.sled_id)?; + /// Update router + #[endpoint { + method = PUT, + path = "/v1/vpc-routers/{router}", + tags = ["vpcs"], + }] + async fn vpc_router_update( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + router_params: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let router_params = router_params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let router_selector = params::RouterSelector { + project: query.project, + vpc: query.vpc, + router: path.router, + }; + let router_lookup = + nexus.vpc_router_lookup(&opctx, router_selector)?; + let router = nexus + .vpc_update_router(&opctx, &router_lookup, &router_params) + .await?; + Ok(HttpResponseOk(router.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } - let old_state = nexus - .sled_set_provision_policy(&opctx, &sled_lookup, new_state) - .await?; + /// List routes + /// + /// List the routes associated with a router in a particular VPC. + #[endpoint { + method = GET, + path = "/v1/vpc-router-routes", + tags = ["vpcs"], + }] + async fn vpc_router_route_list( + rqctx: RequestContext, + query_params: Query>, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let pag_params = data_page_params_for(&rqctx, &query)?; + let scan_params = ScanByNameOrId::from_query(&query)?; + let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; + let router_lookup = nexus + .vpc_router_lookup(&opctx, scan_params.selector.clone())?; + let routes = nexus + .vpc_router_route_list(&opctx, &router_lookup, &paginated_by) + .await? + .into_iter() + .map(|route| route.into()) + .collect(); + Ok(HttpResponseOk(ScanByNameOrId::results_page( + &query, + routes, + &marker_for_name_or_id, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } - let response = - params::SledProvisionPolicyResponse { old_state, new_state }; + // Vpc Router Routes + + /// Fetch route + #[endpoint { + method = GET, + path = "/v1/vpc-router-routes/{route}", + tags = ["vpcs"], + }] + async fn vpc_router_route_view( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let route_selector = params::RouteSelector { + project: query.project, + vpc: query.vpc, + router: Some(query.router), + route: path.route, + }; + let (.., route) = nexus + .vpc_router_route_lookup(&opctx, route_selector)? + .fetch() + .await?; + Ok(HttpResponseOk(route.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } - Ok(HttpResponseOk(response)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Create route + #[endpoint { + method = POST, + path = "/v1/vpc-router-routes", + tags = ["vpcs"], + }] + async fn vpc_router_route_create( + rqctx: RequestContext, + query_params: Query, + create_params: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let create = create_params.into_inner(); + let router_lookup = nexus.vpc_router_lookup(&opctx, query)?; + let route = nexus + .router_create_route( + &opctx, + &router_lookup, + &RouterRouteKind::Custom, + &create, + ) + .await?; + Ok(HttpResponseCreated(route.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// List instances running on given sled -#[endpoint { - method = GET, - path = "/v1/system/hardware/sleds/{sled_id}/instances", - tags = ["system/hardware"], -}] -async fn sled_instance_list( - rqctx: RequestContext, - path_params: Path, - query_params: Query, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let sled_lookup = nexus.sled_lookup(&opctx, &path.sled_id)?; - let sled_instances = nexus - .sled_instance_list( - &opctx, - &sled_lookup, - &data_page_params_for(&rqctx, &query)?, - ) - .await? - .into_iter() - .map(|s| s.into()) - .collect(); - Ok(HttpResponseOk(ScanById::results_page( - &query, - sled_instances, - &|_, sled_instance: &views::SledInstance| sled_instance.identity.id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Delete route + #[endpoint { + method = DELETE, + path = "/v1/vpc-router-routes/{route}", + tags = ["vpcs"], + }] + async fn vpc_router_route_delete( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let route_selector = params::RouteSelector { + project: query.project, + vpc: query.vpc, + router: query.router, + route: path.route, + }; + let route_lookup = + nexus.vpc_router_route_lookup(&opctx, route_selector)?; + nexus.router_delete_route(&opctx, &route_lookup).await?; + Ok(HttpResponseDeleted()) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -// Physical disks + /// Update route + #[endpoint { + method = PUT, + path = "/v1/vpc-router-routes/{route}", + tags = ["vpcs"], + }] + async fn vpc_router_route_update( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + router_params: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let router_params = router_params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let route_selector = params::RouteSelector { + project: query.project, + vpc: query.vpc, + router: query.router, + route: path.route, + }; + let route_lookup = + nexus.vpc_router_route_lookup(&opctx, route_selector)?; + let route = nexus + .router_update_route(&opctx, &route_lookup, &router_params) + .await?; + Ok(HttpResponseOk(route.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// List physical disks -#[endpoint { - method = GET, - path = "/v1/system/hardware/disks", - tags = ["system/hardware"], -}] -async fn physical_disk_list( - rqctx: RequestContext, - query_params: Query, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let disks = nexus - .physical_disk_list(&opctx, &data_page_params_for(&rqctx, &query)?) - .await? - .into_iter() - .map(|s| s.into()) - .collect(); - Ok(HttpResponseOk(ScanById::results_page( - &query, - disks, - &|_, disk: &PhysicalDisk| disk.identity.id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + // Racks + + /// List racks + #[endpoint { + method = GET, + path = "/v1/system/hardware/racks", + tags = ["system/hardware"], + }] + async fn rack_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let racks = nexus + .racks_list(&opctx, &data_page_params_for(&rqctx, &query)?) + .await? + .into_iter() + .map(|r| r.into()) + .collect(); + Ok(HttpResponseOk(ScanById::results_page( + &query, + racks, + &|_, rack: &Rack| rack.identity.id, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Get a physical disk -#[endpoint { - method = GET, - path = "/v1/system/hardware/disks/{disk_id}", - tags = ["system/hardware"], -}] -async fn physical_disk_view( - rqctx: RequestContext, - path_params: Path, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + /// Path parameters for Rack requests + #[derive(Deserialize, JsonSchema)] + struct RackPathParam { + /// The rack's unique ID. + rack_id: Uuid, + } - let (.., physical_disk) = - nexus.physical_disk_lookup(&opctx, &path)?.fetch().await?; - Ok(HttpResponseOk(physical_disk.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Fetch rack + #[endpoint { + method = GET, + path = "/v1/system/hardware/racks/{rack_id}", + tags = ["system/hardware"], + }] + async fn rack_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let rack_info = nexus.rack_lookup(&opctx, &path.rack_id).await?; + Ok(HttpResponseOk(rack_info.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + /// List uninitialized sleds + #[endpoint { + method = GET, + path = "/v1/system/hardware/sleds-uninitialized", + tags = ["system/hardware"] + }] + async fn sled_list_uninitialized( + rqctx: RequestContext, + query: Query>, + ) -> Result>, HttpError> + { + let apictx = rqctx.context(); + // We don't actually support real pagination + let pag_params = query.into_inner(); + if let dropshot::WhichPage::Next(last_seen) = &pag_params.page { + return Err(Error::invalid_value( + last_seen.clone(), + "bad page token", + ) + .into()); + } + let handler = async { + let nexus = &apictx.context.nexus; + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let sleds = nexus.sled_list_uninitialized(&opctx).await?; + Ok(HttpResponseOk(ResultsPage { items: sleds, next_page: None })) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -// Switches + /// The unique ID of a sled. + #[derive(Clone, Debug, Serialize, JsonSchema)] + pub struct SledId { + pub id: Uuid, + } -/// List switches -#[endpoint { - method = GET, - path = "/v1/system/hardware/switches", - tags = ["system/hardware"], -}] -async fn switch_list( - rqctx: RequestContext, - query_params: Query, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let switches = nexus - .switch_list(&opctx, &data_page_params_for(&rqctx, &query)?) - .await? - .into_iter() - .map(|s| s.into()) - .collect(); - Ok(HttpResponseOk(ScanById::results_page( - &query, - switches, - &|_, switch: &views::Switch| switch.identity.id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Add sled to initialized rack + // + // TODO: In the future this should really be a PUT request, once we resolve + // https://github.com/oxidecomputer/omicron/issues/4494. It should also + // explicitly be tied to a rack via a `rack_id` path param. For now we assume + // we are only operating on single rack systems. + #[endpoint { + method = POST, + path = "/v1/system/hardware/sleds", + tags = ["system/hardware"] + }] + async fn sled_add( + rqctx: RequestContext, + sled: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let nexus = &apictx.context.nexus; + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let id = nexus + .sled_add(&opctx, sled.into_inner()) + .await? + .into_untyped_uuid(); + Ok(HttpResponseCreated(SledId { id })) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Fetch switch -#[endpoint { - method = GET, - path = "/v1/system/hardware/switches/{switch_id}", - tags = ["system/hardware"], - }] -async fn switch_view( - rqctx: RequestContext, - path_params: Path, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let (.., switch) = nexus - .switch_lookup( - &opctx, - params::SwitchSelector { switch: path.switch_id }, - )? - .fetch() - .await?; - Ok(HttpResponseOk(switch.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + // Sleds + + /// List sleds + #[endpoint { + method = GET, + path = "/v1/system/hardware/sleds", + tags = ["system/hardware"], + }] + async fn sled_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let sleds = nexus + .sled_list(&opctx, &data_page_params_for(&rqctx, &query)?) + .await? + .into_iter() + .map(|s| s.into()) + .collect(); + Ok(HttpResponseOk(ScanById::results_page( + &query, + sleds, + &|_, sled: &Sled| sled.identity.id, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// List physical disks attached to sleds -#[endpoint { - method = GET, - path = "/v1/system/hardware/sleds/{sled_id}/disks", - tags = ["system/hardware"], -}] -async fn sled_physical_disk_list( - rqctx: RequestContext, - path_params: Path, - query_params: Query, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let query = query_params.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let disks = nexus - .sled_list_physical_disks( - &opctx, - path.sled_id, - &data_page_params_for(&rqctx, &query)?, - ) - .await? - .into_iter() - .map(|s| s.into()) - .collect(); - Ok(HttpResponseOk(ScanById::results_page( - &query, - disks, - &|_, disk: &PhysicalDisk| disk.identity.id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Fetch sled + #[endpoint { + method = GET, + path = "/v1/system/hardware/sleds/{sled_id}", + tags = ["system/hardware"], + }] + async fn sled_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let (.., sled) = + nexus.sled_lookup(&opctx, &path.sled_id)?.fetch().await?; + Ok(HttpResponseOk(sled.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -// Metrics + /// Set sled provision policy + #[endpoint { + method = PUT, + path = "/v1/system/hardware/sleds/{sled_id}/provision-policy", + tags = ["system/hardware"], + }] + async fn sled_set_provision_policy( + rqctx: RequestContext, + path_params: Path, + new_provision_state: TypedBody, + ) -> Result, HttpError> + { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; -#[derive(Display, Deserialize, JsonSchema)] -#[display(style = "snake_case")] -#[serde(rename_all = "snake_case")] -pub enum SystemMetricName { - VirtualDiskSpaceProvisioned, - CpusProvisioned, - RamProvisioned, -} + let path = path_params.into_inner(); + let new_state = new_provision_state.into_inner().state; -#[derive(Deserialize, JsonSchema)] -struct SystemMetricsPathParam { - metric_name: SystemMetricName, -} + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; -/// View metrics -/// -/// View CPU, memory, or storage utilization metrics at the fleet or silo level. -#[endpoint { - method = GET, - path = "/v1/system/metrics/{metric_name}", - tags = ["system/metrics"], -}] -async fn system_metric( - rqctx: RequestContext, - path_params: Path, - pag_params: Query< - PaginationParams, - >, - other_params: Query, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let metric_name = path_params.into_inner().metric_name; - let pagination = pag_params.into_inner(); - let limit = rqctx.page_limit(&pagination)?; + let sled_lookup = nexus.sled_lookup(&opctx, &path.sled_id)?; - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let silo_lookup = match other_params.into_inner().silo { - Some(silo) => Some(nexus.silo_lookup(&opctx, silo)?), - _ => None, + let old_state = nexus + .sled_set_provision_policy(&opctx, &sled_lookup, new_state) + .await?; + + let response = + params::SledProvisionPolicyResponse { old_state, new_state }; + + Ok(HttpResponseOk(response)) }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } - let result = nexus - .system_metric_list( - &opctx, - metric_name, - silo_lookup, - pagination, - limit, - ) - .await?; - - Ok(HttpResponseOk(result)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// List instances running on given sled + #[endpoint { + method = GET, + path = "/v1/system/hardware/sleds/{sled_id}/instances", + tags = ["system/hardware"], + }] + async fn sled_instance_list( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result>, HttpError> + { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let sled_lookup = nexus.sled_lookup(&opctx, &path.sled_id)?; + let sled_instances = nexus + .sled_instance_list( + &opctx, + &sled_lookup, + &data_page_params_for(&rqctx, &query)?, + ) + .await? + .into_iter() + .map(|s| s.into()) + .collect(); + Ok(HttpResponseOk(ScanById::results_page( + &query, + sled_instances, + &|_, sled_instance: &views::SledInstance| { + sled_instance.identity.id + }, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// View metrics -/// -/// View CPU, memory, or storage utilization metrics at the silo or project level. -#[endpoint { - method = GET, - path = "/v1/metrics/{metric_name}", - tags = ["metrics"], -}] -async fn silo_metric( - rqctx: RequestContext, - path_params: Path, - pag_params: Query< - PaginationParams, - >, - other_params: Query, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let metric_name = path_params.into_inner().metric_name; + // Physical disks + + /// List physical disks + #[endpoint { + method = GET, + path = "/v1/system/hardware/disks", + tags = ["system/hardware"], + }] + async fn physical_disk_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let disks = nexus + .physical_disk_list( + &opctx, + &data_page_params_for(&rqctx, &query)?, + ) + .await? + .into_iter() + .map(|s| s.into()) + .collect(); + Ok(HttpResponseOk(ScanById::results_page( + &query, + disks, + &|_, disk: &PhysicalDisk| disk.identity.id, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let project_lookup = match other_params.into_inner().project { - Some(project) => { - let project_selector = params::ProjectSelector { project }; - Some(nexus.project_lookup(&opctx, project_selector)?) - } - _ => None, + /// Get a physical disk + #[endpoint { + method = GET, + path = "/v1/system/hardware/disks/{disk_id}", + tags = ["system/hardware"], + }] + async fn physical_disk_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + + let (.., physical_disk) = + nexus.physical_disk_lookup(&opctx, &path)?.fetch().await?; + Ok(HttpResponseOk(physical_disk.into())) }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } - let pagination = pag_params.into_inner(); - let limit = rqctx.page_limit(&pagination)?; + // Switches + + /// List switches + #[endpoint { + method = GET, + path = "/v1/system/hardware/switches", + tags = ["system/hardware"], + }] + async fn switch_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let switches = nexus + .switch_list(&opctx, &data_page_params_for(&rqctx, &query)?) + .await? + .into_iter() + .map(|s| s.into()) + .collect(); + Ok(HttpResponseOk(ScanById::results_page( + &query, + switches, + &|_, switch: &views::Switch| switch.identity.id, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let result = nexus - .silo_metric_list( - &opctx, - metric_name, - project_lookup, - pagination, - limit, - ) - .await?; - - Ok(HttpResponseOk(result)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Fetch switch + #[endpoint { + method = GET, + path = "/v1/system/hardware/switches/{switch_id}", + tags = ["system/hardware"], + }] + async fn switch_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let (.., switch) = nexus + .switch_lookup( + &opctx, + params::SwitchSelector { switch: path.switch_id }, + )? + .fetch() + .await?; + Ok(HttpResponseOk(switch.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// List timeseries schemas -#[endpoint { - method = GET, - path = "/v1/timeseries/schema", - tags = ["metrics"], -}] -async fn timeseries_schema_list( - rqctx: RequestContext, - pag_params: Query, -) -> Result>, HttpError> -{ - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let pagination = pag_params.into_inner(); - let limit = rqctx.page_limit(&pagination)?; - nexus - .timeseries_schema_list(&opctx, &pagination, limit) - .await - .map(HttpResponseOk) - .map_err(HttpError::from) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// List physical disks attached to sleds + #[endpoint { + method = GET, + path = "/v1/system/hardware/sleds/{sled_id}/disks", + tags = ["system/hardware"], + }] + async fn sled_physical_disk_list( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let disks = nexus + .sled_list_physical_disks( + &opctx, + path.sled_id, + &data_page_params_for(&rqctx, &query)?, + ) + .await? + .into_iter() + .map(|s| s.into()) + .collect(); + Ok(HttpResponseOk(ScanById::results_page( + &query, + disks, + &|_, disk: &PhysicalDisk| disk.identity.id, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -// TODO: can we link to an OxQL reference? Do we have one? Can we even do links? - -/// Run timeseries query -/// -/// Queries are written in OxQL. -#[endpoint { - method = POST, - path = "/v1/timeseries/query", - tags = ["metrics"], -}] -async fn timeseries_query( - rqctx: RequestContext, - body: TypedBody, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let query = body.into_inner().query; - nexus - .timeseries_query(&opctx, &query) - .await - .map(HttpResponseOk) - .map_err(HttpError::from) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + // Metrics -// Updates - -/// Upload TUF repository -#[endpoint { - method = PUT, - path = "/v1/system/update/repository", - tags = ["system/update"], - unpublished = true, -}] -async fn system_update_put_repository( - rqctx: RequestContext, - query: Query, - body: StreamingBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let nexus = &apictx.context.nexus; - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let query = query.into_inner(); - let body = body.into_stream(); - let update = - nexus.updates_put_repository(&opctx, body, query.file_name).await?; - Ok(HttpResponseOk(update)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + #[derive(Display, Deserialize, JsonSchema)] + #[display(style = "snake_case")] + #[serde(rename_all = "snake_case")] + pub enum SystemMetricName { + VirtualDiskSpaceProvisioned, + CpusProvisioned, + RamProvisioned, + } -/// Fetch TUF repository description -/// -/// Fetch description of TUF repository by system version. -#[endpoint { - method = GET, - path = "/v1/system/update/repository/{system_version}", - tags = ["system/update"], - unpublished = true, -}] -async fn system_update_get_repository( - rqctx: RequestContext, - path_params: Path, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let nexus = &apictx.context.nexus; - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let params = path_params.into_inner(); - let description = - nexus.updates_get_repository(&opctx, params.system_version).await?; - Ok(HttpResponseOk(TufRepoGetResponse { - description: description.into_external(), - })) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + #[derive(Deserialize, JsonSchema)] + struct SystemMetricsPathParam { + metric_name: SystemMetricName, + } -// Silo users + /// View metrics + /// + /// View CPU, memory, or storage utilization metrics at the fleet or silo level. + #[endpoint { + method = GET, + path = "/v1/system/metrics/{metric_name}", + tags = ["system/metrics"], + }] + async fn system_metric( + rqctx: RequestContext, + path_params: Path, + pag_params: Query< + PaginationParams, + >, + other_params: Query, + ) -> Result>, HttpError> + { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let metric_name = path_params.into_inner().metric_name; + let pagination = pag_params.into_inner(); + let limit = rqctx.page_limit(&pagination)?; + + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let silo_lookup = match other_params.into_inner().silo { + Some(silo) => Some(nexus.silo_lookup(&opctx, silo)?), + _ => None, + }; -/// List users -#[endpoint { - method = GET, - path = "/v1/users", - tags = ["silos"], -}] -async fn user_list( - rqctx: RequestContext, - query_params: Query>, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let pagparams = data_page_params_for(&rqctx, &query)?; - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let scan_params = ScanById::from_query(&query)?; + let result = nexus + .system_metric_list( + &opctx, + metric_name, + silo_lookup, + pagination, + limit, + ) + .await?; - // TODO: a valid UUID gets parsed here and will 404 if it doesn't exist - // (as expected) but a non-UUID string just gets let through as None - // (i.e., ignored) instead of 400ing + Ok(HttpResponseOk(result)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } - let users = if let Some(group_id) = scan_params.selector.group { - nexus - .current_silo_group_users_list(&opctx, &pagparams, &group_id) - .await? - } else { - nexus.silo_users_list_current(&opctx, &pagparams).await? - }; - - Ok(HttpResponseOk(ScanById::results_page( - &query, - users.into_iter().map(|i| i.into()).collect(), - &|_, user: &User| user.id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// View metrics + /// + /// View CPU, memory, or storage utilization metrics at the silo or project level. + #[endpoint { + method = GET, + path = "/v1/metrics/{metric_name}", + tags = ["metrics"], + }] + async fn silo_metric( + rqctx: RequestContext, + path_params: Path, + pag_params: Query< + PaginationParams, + >, + other_params: Query, + ) -> Result>, HttpError> + { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let metric_name = path_params.into_inner().metric_name; + + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let project_lookup = match other_params.into_inner().project { + Some(project) => { + let project_selector = params::ProjectSelector { project }; + Some(nexus.project_lookup(&opctx, project_selector)?) + } + _ => None, + }; -// Silo groups + let pagination = pag_params.into_inner(); + let limit = rqctx.page_limit(&pagination)?; -/// List groups -#[endpoint { - method = GET, - path = "/v1/groups", - tags = ["silos"], -}] -async fn group_list( - rqctx: RequestContext, - query_params: Query, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let pagparams = data_page_params_for(&rqctx, &query)?; - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let groups = nexus - .silo_groups_list(&opctx, &pagparams) - .await? - .into_iter() - .map(|i| i.into()) - .collect(); - Ok(HttpResponseOk(ScanById::results_page( - &query, - groups, - &|_, group: &Group| group.id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let result = nexus + .silo_metric_list( + &opctx, + metric_name, + project_lookup, + pagination, + limit, + ) + .await?; -/// Fetch group -#[endpoint { - method = GET, - path = "/v1/groups/{group_id}", - tags = ["silos"], -}] -async fn group_view( - rqctx: RequestContext, - path_params: Path, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let (.., group) = - nexus.silo_group_lookup(&opctx, &path.group_id).fetch().await?; - Ok(HttpResponseOk(group.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + Ok(HttpResponseOk(result)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -// Built-in (system) users + /// List timeseries schemas + #[endpoint { + method = GET, + path = "/v1/timeseries/schema", + tags = ["metrics"], + }] + async fn timeseries_schema_list( + rqctx: RequestContext, + pag_params: Query, + ) -> Result< + HttpResponseOk>, + HttpError, + > { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let pagination = pag_params.into_inner(); + let limit = rqctx.page_limit(&pagination)?; + nexus + .timeseries_schema_list(&opctx, &pagination, limit) + .await + .map(HttpResponseOk) + .map_err(HttpError::from) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// List built-in users -#[endpoint { - method = GET, - path = "/v1/system/users-builtin", - tags = ["system/silos"], -}] -async fn user_builtin_list( - rqctx: RequestContext, - query_params: Query, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let pagparams = - data_page_params_for(&rqctx, &query)?.map_name(|n| Name::ref_cast(n)); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let users = nexus - .users_builtin_list(&opctx, &pagparams) - .await? - .into_iter() - .map(|i| i.into()) - .collect(); - Ok(HttpResponseOk(ScanByName::results_page( - &query, - users, - &marker_for_name, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + // TODO: can we link to an OxQL reference? Do we have one? Can we even do links? + + /// Run timeseries query + /// + /// Queries are written in OxQL. + #[endpoint { + method = POST, + path = "/v1/timeseries/query", + tags = ["metrics"], + }] + async fn timeseries_query( + rqctx: RequestContext, + body: TypedBody, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let query = body.into_inner().query; + nexus + .timeseries_query(&opctx, &query) + .await + .map(HttpResponseOk) + .map_err(HttpError::from) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Fetch built-in user -#[endpoint { - method = GET, - path = "/v1/system/users-builtin/{user}", - tags = ["system/silos"], -}] -async fn user_builtin_view( - rqctx: RequestContext, - path_params: Path, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let nexus = &apictx.context.nexus; - let user_selector = path_params.into_inner(); - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let (.., user) = - nexus.user_builtin_lookup(&opctx, &user_selector)?.fetch().await?; - Ok(HttpResponseOk(user.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + // Updates + + /// Upload TUF repository + #[endpoint { + method = PUT, + path = "/v1/system/update/repository", + tags = ["system/update"], + unpublished = true, + }] + async fn system_update_put_repository( + rqctx: RequestContext, + query: Query, + body: StreamingBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let nexus = &apictx.context.nexus; + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let query = query.into_inner(); + let body = body.into_stream(); + let update = nexus + .updates_put_repository(&opctx, body, query.file_name) + .await?; + Ok(HttpResponseOk(update)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -// Built-in roles + /// Fetch TUF repository description + /// + /// Fetch description of TUF repository by system version. + #[endpoint { + method = GET, + path = "/v1/system/update/repository/{system_version}", + tags = ["system/update"], + unpublished = true, + }] + async fn system_update_get_repository( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let nexus = &apictx.context.nexus; + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let params = path_params.into_inner(); + let description = nexus + .updates_get_repository(&opctx, params.system_version) + .await?; + Ok(HttpResponseOk(TufRepoGetResponse { + description: description.into_external(), + })) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -// Roles have their own pagination scheme because they do not use the usual "id" -// or "name" types. For more, see the comment in dbinit.sql. -#[derive(Deserialize, JsonSchema, Serialize)] -struct RolePage { - last_seen: String, -} + // Silo users + + /// List users + #[endpoint { + method = GET, + path = "/v1/users", + tags = ["silos"], + }] + async fn user_list( + rqctx: RequestContext, + query_params: Query>, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let pagparams = data_page_params_for(&rqctx, &query)?; + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let scan_params = ScanById::from_query(&query)?; + + // TODO: a valid UUID gets parsed here and will 404 if it doesn't exist + // (as expected) but a non-UUID string just gets let through as None + // (i.e., ignored) instead of 400ing + + let users = if let Some(group_id) = scan_params.selector.group { + nexus + .current_silo_group_users_list( + &opctx, &pagparams, &group_id, + ) + .await? + } else { + nexus.silo_users_list_current(&opctx, &pagparams).await? + }; -/// Path parameters for global (system) role requests -#[derive(Deserialize, JsonSchema)] -struct RolePathParam { - /// The built-in role's unique name. - role_name: String, -} + Ok(HttpResponseOk(ScanById::results_page( + &query, + users.into_iter().map(|i| i.into()).collect(), + &|_, user: &User| user.id, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// List built-in roles -#[endpoint { - method = GET, - path = "/v1/system/roles", - tags = ["roles"], -}] -async fn role_list( - rqctx: RequestContext, - query_params: Query>, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let marker = match &query.page { - WhichPage::First(..) => None, - WhichPage::Next(RolePage { last_seen }) => { - Some(last_seen.split_once('.').ok_or_else(|| { - Error::invalid_value(last_seen.clone(), "bad page token") - })?) - .map(|(s1, s2)| (s1.to_string(), s2.to_string())) - } + // Silo groups + + /// List groups + #[endpoint { + method = GET, + path = "/v1/groups", + tags = ["silos"], + }] + async fn group_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let pagparams = data_page_params_for(&rqctx, &query)?; + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let groups = nexus + .silo_groups_list(&opctx, &pagparams) + .await? + .into_iter() + .map(|i| i.into()) + .collect(); + Ok(HttpResponseOk(ScanById::results_page( + &query, + groups, + &|_, group: &Group| group.id, + )?)) }; - let pagparams = DataPageParams { - limit: rqctx.page_limit(&query)?, - direction: PaginationOrder::Ascending, - marker: marker.as_ref(), - }; - let roles = nexus - .roles_builtin_list(&opctx, &pagparams) - .await? - .into_iter() - .map(|i| i.into()) - .collect(); - Ok(HttpResponseOk(dropshot::ResultsPage::new( - roles, - &EmptyScanParams {}, - |role: &Role, _| RolePage { last_seen: role.name.to_string() }, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Fetch built-in role -#[endpoint { - method = GET, - path = "/v1/system/roles/{role_name}", - tags = ["roles"], -}] -async fn role_view( - rqctx: RequestContext, - path_params: Path, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let role_name = &path.role_name; - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let role = nexus.role_builtin_fetch(&opctx, &role_name).await?; - Ok(HttpResponseOk(role.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Fetch group + #[endpoint { + method = GET, + path = "/v1/groups/{group_id}", + tags = ["silos"], + }] + async fn group_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let (.., group) = + nexus.silo_group_lookup(&opctx, &path.group_id).fetch().await?; + Ok(HttpResponseOk(group.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -// Current user - -/// Fetch user for current session -#[endpoint { - method = GET, - path = "/v1/me", - tags = ["session"], -}] -pub(crate) async fn current_user_view( - rqctx: RequestContext, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let nexus = &apictx.context.nexus; - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let user = nexus.silo_user_fetch_self(&opctx).await?; - let (_, silo) = nexus.current_silo_lookup(&opctx)?.fetch().await?; - Ok(HttpResponseOk(views::CurrentUser { - user: user.into(), - silo_name: silo.name().clone(), - })) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + // Built-in (system) users -/// Fetch current user's groups -#[endpoint { - method = GET, - path = "/v1/me/groups", - tags = ["session"], - }] -pub(crate) async fn current_user_groups( - rqctx: RequestContext, - query_params: Query, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + /// List built-in users + #[endpoint { + method = GET, + path = "/v1/system/users-builtin", + tags = ["system/silos"], + }] + async fn user_builtin_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); let nexus = &apictx.context.nexus; let query = query_params.into_inner(); - let groups = nexus - .silo_user_fetch_groups_for_self( - &opctx, - &data_page_params_for(&rqctx, &query)?, - ) - .await? - .into_iter() - .map(|d| d.into()) - .collect(); - Ok(HttpResponseOk(ScanById::results_page( - &query, - groups, - &|_, group: &views::Group| group.id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + let pagparams = data_page_params_for(&rqctx, &query)? + .map_name(|n| Name::ref_cast(n)); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let users = nexus + .users_builtin_list(&opctx, &pagparams) + .await? + .into_iter() + .map(|i| i.into()) + .collect(); + Ok(HttpResponseOk(ScanByName::results_page( + &query, + users, + &marker_for_name, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + /// Fetch built-in user + #[endpoint { + method = GET, + path = "/v1/system/users-builtin/{user}", + tags = ["system/silos"], + }] + async fn user_builtin_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let user_selector = path_params.into_inner(); + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let (.., user) = nexus + .user_builtin_lookup(&opctx, &user_selector)? + .fetch() + .await?; + Ok(HttpResponseOk(user.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -// Per-user SSH public keys + // Built-in roles -/// List SSH public keys -/// -/// Lists SSH public keys for the currently authenticated user. -#[endpoint { - method = GET, - path = "/v1/me/ssh-keys", - tags = ["session"], -}] -async fn current_user_ssh_key_list( - rqctx: RequestContext, - query_params: Query, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + // Roles have their own pagination scheme because they do not use the usual "id" + // or "name" types. For more, see the comment in dbinit.sql. + #[derive(Deserialize, JsonSchema, Serialize)] + struct RolePage { + last_seen: String, + } + + /// Path parameters for global (system) role requests + #[derive(Deserialize, JsonSchema)] + struct RolePathParam { + /// The built-in role's unique name. + role_name: String, + } + + /// List built-in roles + #[endpoint { + method = GET, + path = "/v1/system/roles", + tags = ["roles"], + }] + async fn role_list( + rqctx: RequestContext, + query_params: Query>, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); let nexus = &apictx.context.nexus; let query = query_params.into_inner(); - let pag_params = data_page_params_for(&rqctx, &query)?; - let scan_params = ScanByNameOrId::from_query(&query)?; - let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; - let &actor = opctx - .authn - .actor_required() - .internal_context("listing current user's ssh keys")?; - let ssh_keys = nexus - .ssh_keys_list(&opctx, actor.actor_id(), &paginated_by) - .await? - .into_iter() - .map(SshKey::from) - .collect::>(); - Ok(HttpResponseOk(ScanByNameOrId::results_page( - &query, - ssh_keys, - &marker_for_name_or_id, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let marker = match &query.page { + WhichPage::First(..) => None, + WhichPage::Next(RolePage { last_seen }) => { + Some(last_seen.split_once('.').ok_or_else(|| { + Error::invalid_value( + last_seen.clone(), + "bad page token", + ) + })?) + .map(|(s1, s2)| (s1.to_string(), s2.to_string())) + } + }; + let pagparams = DataPageParams { + limit: rqctx.page_limit(&query)?, + direction: PaginationOrder::Ascending, + marker: marker.as_ref(), + }; + let roles = nexus + .roles_builtin_list(&opctx, &pagparams) + .await? + .into_iter() + .map(|i| i.into()) + .collect(); + Ok(HttpResponseOk(dropshot::ResultsPage::new( + roles, + &EmptyScanParams {}, + |role: &Role, _| RolePage { last_seen: role.name.to_string() }, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Create SSH public key -/// -/// Create an SSH public key for the currently authenticated user. -#[endpoint { - method = POST, - path = "/v1/me/ssh-keys", - tags = ["session"], -}] -async fn current_user_ssh_key_create( - rqctx: RequestContext, - new_key: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; + /// Fetch built-in role + #[endpoint { + method = GET, + path = "/v1/system/roles/{role_name}", + tags = ["roles"], + }] + async fn role_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError> { + let apictx = rqctx.context(); let nexus = &apictx.context.nexus; - let &actor = opctx - .authn - .actor_required() - .internal_context("creating ssh key for current user")?; - let ssh_key = nexus - .ssh_key_create(&opctx, actor.actor_id(), new_key.into_inner()) - .await?; - Ok(HttpResponseCreated(ssh_key.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + let path = path_params.into_inner(); + let role_name = &path.role_name; + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let role = nexus.role_builtin_fetch(&opctx, &role_name).await?; + Ok(HttpResponseOk(role.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + // Current user -/// Fetch SSH public key -/// -/// Fetch SSH public key associated with the currently authenticated user. -#[endpoint { + /// Fetch user for current session + #[endpoint { method = GET, - path = "/v1/me/ssh-keys/{ssh_key}", + path = "/v1/me", tags = ["session"], -}] -async fn current_user_ssh_key_view( - rqctx: RequestContext, - path_params: Path, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let &actor = opctx - .authn - .actor_required() - .internal_context("fetching one of current user's ssh keys")?; - let ssh_key_selector = params::SshKeySelector { - silo_user_id: actor.actor_id(), - ssh_key: path.ssh_key, - }; - let ssh_key_lookup = nexus.ssh_key_lookup(&opctx, &ssh_key_selector)?; - let (.., silo_user, _, ssh_key) = ssh_key_lookup.fetch().await?; - // Ensure the SSH key exists in the current silo - assert_eq!(silo_user.id(), actor.actor_id()); - Ok(HttpResponseOk(ssh_key.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + }] + pub(crate) async fn current_user_view( + rqctx: RequestContext, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let nexus = &apictx.context.nexus; + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let user = nexus.silo_user_fetch_self(&opctx).await?; + let (_, silo) = nexus.current_silo_lookup(&opctx)?.fetch().await?; + Ok(HttpResponseOk(views::CurrentUser { + user: user.into(), + silo_name: silo.name().clone(), + })) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Delete SSH public key -/// -/// Delete an SSH public key associated with the currently authenticated user. -#[endpoint { - method = DELETE, - path = "/v1/me/ssh-keys/{ssh_key}", - tags = ["session"], -}] -async fn current_user_ssh_key_delete( - rqctx: RequestContext, - path_params: Path, -) -> Result { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let &actor = opctx - .authn - .actor_required() - .internal_context("deleting one of current user's ssh keys")?; - let ssh_key_selector = params::SshKeySelector { - silo_user_id: actor.actor_id(), - ssh_key: path.ssh_key, - }; - let ssh_key_lookup = nexus.ssh_key_lookup(&opctx, &ssh_key_selector)?; - nexus.ssh_key_delete(&opctx, actor.actor_id(), &ssh_key_lookup).await?; - Ok(HttpResponseDeleted()) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Fetch current user's groups + #[endpoint { + method = GET, + path = "/v1/me/groups", + tags = ["session"], + }] + pub(crate) async fn current_user_groups( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let groups = nexus + .silo_user_fetch_groups_for_self( + &opctx, + &data_page_params_for(&rqctx, &query)?, + ) + .await? + .into_iter() + .map(|d| d.into()) + .collect(); + Ok(HttpResponseOk(ScanById::results_page( + &query, + groups, + &|_, group: &views::Group| group.id, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// List instrumentation probes -#[endpoint { - method = GET, - path = "/v1/probes", - tags = ["system/probes"], -}] -async fn probe_list( - rqctx: RequestContext, - query_params: Query>, -) -> Result>, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; + // Per-user SSH public keys + + /// List SSH public keys + /// + /// Lists SSH public keys for the currently authenticated user. + #[endpoint { + method = GET, + path = "/v1/me/ssh-keys", + tags = ["session"], + }] + async fn current_user_ssh_key_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let pag_params = data_page_params_for(&rqctx, &query)?; + let scan_params = ScanByNameOrId::from_query(&query)?; + let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; + let &actor = opctx + .authn + .actor_required() + .internal_context("listing current user's ssh keys")?; + let ssh_keys = nexus + .ssh_keys_list(&opctx, actor.actor_id(), &paginated_by) + .await? + .into_iter() + .map(SshKey::from) + .collect::>(); + Ok(HttpResponseOk(ScanByNameOrId::results_page( + &query, + ssh_keys, + &marker_for_name_or_id, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } - let nexus = &apictx.context.nexus; - let query = query_params.into_inner(); - let pag_params = data_page_params_for(&rqctx, &query)?; - let scan_params = ScanByNameOrId::from_query(&query)?; - let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; - let project_lookup = - nexus.project_lookup(&opctx, scan_params.selector.clone())?; - - let probes = - nexus.probe_list(&opctx, &project_lookup, &paginated_by).await?; - - Ok(HttpResponseOk(ScanByNameOrId::results_page( - &query, - probes, - &|_, p: &ProbeInfo| match paginated_by { - PaginatedBy::Id(_) => NameOrId::Id(p.id), - PaginatedBy::Name(_) => NameOrId::Name(p.name.clone()), - }, - )?)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Create SSH public key + /// + /// Create an SSH public key for the currently authenticated user. + #[endpoint { + method = POST, + path = "/v1/me/ssh-keys", + tags = ["session"], + }] + async fn current_user_ssh_key_create( + rqctx: RequestContext, + new_key: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let &actor = opctx + .authn + .actor_required() + .internal_context("creating ssh key for current user")?; + let ssh_key = nexus + .ssh_key_create(&opctx, actor.actor_id(), new_key.into_inner()) + .await?; + Ok(HttpResponseCreated(ssh_key.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// View instrumentation probe -#[endpoint { - method = GET, - path = "/v1/probes/{probe}", - tags = ["system/probes"], -}] -async fn probe_view( - rqctx: RequestContext, - path_params: Path, - query_params: Query, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; + /// Fetch SSH public key + /// + /// Fetch SSH public key associated with the currently authenticated user. + #[endpoint { + method = GET, + path = "/v1/me/ssh-keys/{ssh_key}", + tags = ["session"], + }] + async fn current_user_ssh_key_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let &actor = opctx + .authn + .actor_required() + .internal_context("fetching one of current user's ssh keys")?; + let ssh_key_selector = params::SshKeySelector { + silo_user_id: actor.actor_id(), + ssh_key: path.ssh_key, + }; + let ssh_key_lookup = + nexus.ssh_key_lookup(&opctx, &ssh_key_selector)?; + let (.., silo_user, _, ssh_key) = ssh_key_lookup.fetch().await?; + // Ensure the SSH key exists in the current silo + assert_eq!(silo_user.id(), actor.actor_id()); + Ok(HttpResponseOk(ssh_key.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let project_selector = query_params.into_inner(); - let project_lookup = nexus.project_lookup(&opctx, project_selector)?; - let probe = - nexus.probe_get(&opctx, &project_lookup, &path.probe).await?; - Ok(HttpResponseOk(probe)) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + /// Delete SSH public key + /// + /// Delete an SSH public key associated with the currently authenticated user. + #[endpoint { + method = DELETE, + path = "/v1/me/ssh-keys/{ssh_key}", + tags = ["session"], + }] + async fn current_user_ssh_key_delete( + rqctx: RequestContext, + path_params: Path, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let &actor = opctx + .authn + .actor_required() + .internal_context("deleting one of current user's ssh keys")?; + let ssh_key_selector = params::SshKeySelector { + silo_user_id: actor.actor_id(), + ssh_key: path.ssh_key, + }; + let ssh_key_lookup = + nexus.ssh_key_lookup(&opctx, &ssh_key_selector)?; + nexus + .ssh_key_delete(&opctx, actor.actor_id(), &ssh_key_lookup) + .await?; + Ok(HttpResponseDeleted()) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Create instrumentation probe -#[endpoint { - method = POST, - path = "/v1/probes", - tags = ["system/probes"], -}] -async fn probe_create( - rqctx: RequestContext, - query_params: Query, - new_probe: TypedBody, -) -> Result, HttpError> { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; + /// List instrumentation probes + #[endpoint { + method = GET, + path = "/v1/probes", + tags = ["system/probes"], + }] + async fn probe_list( + rqctx: RequestContext, + query_params: Query>, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; + + let nexus = &apictx.context.nexus; + let query = query_params.into_inner(); + let pag_params = data_page_params_for(&rqctx, &query)?; + let scan_params = ScanByNameOrId::from_query(&query)?; + let paginated_by = name_or_id_pagination(&pag_params, scan_params)?; + let project_lookup = + nexus.project_lookup(&opctx, scan_params.selector.clone())?; + + let probes = nexus + .probe_list(&opctx, &project_lookup, &paginated_by) + .await?; - let nexus = &apictx.context.nexus; - let new_probe_params = &new_probe.into_inner(); - let project_selector = query_params.into_inner(); - let project_lookup = nexus.project_lookup(&opctx, project_selector)?; - let probe = nexus - .probe_create(&opctx, &project_lookup, &new_probe_params) - .await?; - Ok(HttpResponseCreated(probe.into())) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await -} + Ok(HttpResponseOk(ScanByNameOrId::results_page( + &query, + probes, + &|_, p: &ProbeInfo| match paginated_by { + PaginatedBy::Id(_) => NameOrId::Id(p.id), + PaginatedBy::Name(_) => NameOrId::Name(p.name.clone()), + }, + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } -/// Delete instrumentation probe -#[endpoint { - method = DELETE, - path = "/v1/probes/{probe}", - tags = ["system/probes"], -}] -async fn probe_delete( - rqctx: RequestContext, - query_params: Query, - path_params: Path, -) -> Result { - let apictx = rqctx.context(); - let handler = async { - let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; + /// View instrumentation probe + #[endpoint { + method = GET, + path = "/v1/probes/{probe}", + tags = ["system/probes"], + }] + async fn probe_view( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; + + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let project_selector = query_params.into_inner(); + let project_lookup = + nexus.project_lookup(&opctx, project_selector)?; + let probe = + nexus.probe_get(&opctx, &project_lookup, &path.probe).await?; + Ok(HttpResponseOk(probe)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } - let nexus = &apictx.context.nexus; - let path = path_params.into_inner(); - let project_selector = query_params.into_inner(); - let project_lookup = nexus.project_lookup(&opctx, project_selector)?; - nexus.probe_delete(&opctx, &project_lookup, path.probe).await?; - Ok(HttpResponseDeleted()) - }; - apictx - .context - .external_latencies - .instrument_dropshot_handler(&rqctx, handler) - .await + /// Create instrumentation probe + #[endpoint { + method = POST, + path = "/v1/probes", + tags = ["system/probes"], + }] + async fn probe_create( + rqctx: RequestContext, + query_params: Query, + new_probe: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; + + let nexus = &apictx.context.nexus; + let new_probe_params = &new_probe.into_inner(); + let project_selector = query_params.into_inner(); + let project_lookup = + nexus.project_lookup(&opctx, project_selector)?; + let probe = nexus + .probe_create(&opctx, &project_lookup, &new_probe_params) + .await?; + Ok(HttpResponseCreated(probe.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + /// Delete instrumentation probe + #[endpoint { + method = DELETE, + path = "/v1/probes/{probe}", + tags = ["system/probes"], + }] + async fn probe_delete( + rqctx: RequestContext, + query_params: Query, + path_params: Path, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; + + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let project_selector = query_params.into_inner(); + let project_lookup = + nexus.project_lookup(&opctx, project_selector)?; + nexus.probe_delete(&opctx, &project_lookup, path.probe).await?; + Ok(HttpResponseDeleted()) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } } +pub(crate) use imp::*; + #[cfg(test)] mod test { use super::external_api; diff --git a/nexus/tests/integration_tests/metrics.rs b/nexus/tests/integration_tests/metrics.rs index 9cfa0350e8..e24de2a3ad 100644 --- a/nexus/tests/integration_tests/metrics.rs +++ b/nexus/tests/integration_tests/metrics.rs @@ -284,7 +284,7 @@ async fn test_timeseries_schema_list( pub async fn timeseries_query( cptestctx: &ControlPlaneTestContext, query: impl ToString, -) -> Vec { +) -> Vec { // first, make sure the latest timeseries have been collected. cptestctx.oximeter.force_collect().await; @@ -429,11 +429,11 @@ async fn test_instance_watcher_metrics( #[track_caller] fn count_state( - table: &oximeter_db::oxql::Table, + table: &oxql_types::Table, instance_id: InstanceUuid, state: &'static str, ) -> i64 { - use oximeter_db::oxql::point::ValueArray; + use oxql_types::point::ValueArray; let uuid = FieldValue::Uuid(instance_id.into_untyped_uuid()); let state = FieldValue::String(state.into()); let mut timeserieses = table.timeseries().filter(|ts| { diff --git a/openapi/nexus.json b/openapi/nexus.json index 27e2870b6e..a0cbfa2f63 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -20131,10 +20131,20 @@ "type": "object", "properties": { "metric_type": { - "$ref": "#/components/schemas/MetricType" + "description": "The type of this metric.", + "allOf": [ + { + "$ref": "#/components/schemas/MetricType" + } + ] }, "values": { - "$ref": "#/components/schemas/ValueArray" + "description": "The data values.", + "allOf": [ + { + "$ref": "#/components/schemas/ValueArray" + } + ] } }, "required": [ diff --git a/oximeter/db/Cargo.toml b/oximeter/db/Cargo.toml index e3cf089cb5..2a9c615da2 100644 --- a/oximeter/db/Cargo.toml +++ b/oximeter/db/Cargo.toml @@ -24,6 +24,7 @@ num.workspace = true omicron-common.workspace = true omicron-workspace-hack.workspace = true oximeter.workspace = true +oxql-types.workspace = true regex.workspace = true serde.workspace = true serde_json.workspace = true @@ -89,6 +90,7 @@ expectorate.workspace = true indexmap.workspace = true itertools.workspace = true omicron-test-utils.workspace = true +oximeter-test-utils.workspace = true slog-dtrace.workspace = true sqlformat.workspace = true sqlparser.workspace = true diff --git a/oximeter/db/src/client/mod.rs b/oximeter/db/src/client/mod.rs index 30ae4b68d2..e8561f4d73 100644 --- a/oximeter/db/src/client/mod.rs +++ b/oximeter/db/src/client/mod.rs @@ -22,8 +22,6 @@ use crate::Error; use crate::Metric; use crate::Target; use crate::Timeseries; -use crate::TimeseriesKey; -use crate::TimeseriesName; use crate::TimeseriesPageSelector; use crate::TimeseriesScanParams; use crate::TimeseriesSchema; @@ -32,6 +30,8 @@ use dropshot::PaginationOrder; use dropshot::ResultsPage; use dropshot::WhichPage; use oximeter::types::Sample; +use oximeter::TimeseriesName; +use oxql_types::TimeseriesKey; use regex::Regex; use regex::RegexBuilder; use slog::debug; @@ -1191,7 +1191,6 @@ mod tests { }; use omicron_test_utils::dev::test_setup_log; use oximeter::histogram::Histogram; - use oximeter::test_util; use oximeter::types::MissingDatum; use oximeter::Datum; use oximeter::FieldValue; @@ -1723,7 +1722,7 @@ mod tests { let samples = { let mut s = Vec::with_capacity(8); for _ in 0..s.capacity() { - s.push(test_util::make_hist_sample()) + s.push(oximeter_test_utils::make_hist_sample()) } s }; @@ -1762,7 +1761,7 @@ mod tests { let client = Client::new(address, &log); db_type.init_db(&client).await.unwrap(); - let sample = test_util::make_sample(); + let sample = oximeter_test_utils::make_sample(); client.insert_samples(&[sample]).await.unwrap(); let bad_name = name_mismatch::TestTarget { @@ -1770,7 +1769,7 @@ mod tests { name2: "second_name".into(), num: 2, }; - let metric = test_util::TestMetric { + let metric = oximeter_test_utils::TestMetric { id: uuid::Uuid::new_v4(), good: true, datum: 1, @@ -1792,7 +1791,7 @@ mod tests { let client = Client::new(address, &log); db_type.init_db(&client).await.unwrap(); - let sample = test_util::make_sample(); + let sample = oximeter_test_utils::make_sample(); // Verify that this sample is considered new, i.e., we return rows to update the timeseries // schema table. @@ -1867,7 +1866,7 @@ mod tests { let client = Client::new(address, &log); db_type.init_db(&client).await.unwrap(); - let samples = test_util::generate_test_samples(2, 2, 2, 2); + let samples = oximeter_test_utils::generate_test_samples(2, 2, 2, 2); client.insert_samples(&samples).await?; let sample = samples.first().unwrap(); @@ -1956,7 +1955,7 @@ mod tests { // we'd like to exercise the logic of ClickHouse's replacing merge tree engine. let client = Client::new(address, &log); db_type.init_db(&client).await.unwrap(); - let samples = test_util::generate_test_samples(2, 2, 2, 2); + let samples = oximeter_test_utils::generate_test_samples(2, 2, 2, 2); client.insert_samples(&samples).await?; async fn assert_table_count( @@ -2631,7 +2630,7 @@ mod tests { let client = Client::new(address, &log); db_type.init_db(&client).await.unwrap(); - let samples = test_util::generate_test_samples(2, 2, 2, 2); + let samples = oximeter_test_utils::generate_test_samples(2, 2, 2, 2); client.insert_samples(&samples).await?; let original_schema = client.schema.lock().await.clone(); @@ -2656,7 +2655,7 @@ mod tests { let client = Client::new(address, &log); db_type.init_db(&client).await.unwrap(); - let samples = test_util::generate_test_samples(2, 2, 2, 2); + let samples = oximeter_test_utils::generate_test_samples(2, 2, 2, 2); client.insert_samples(&samples).await?; let limit = 100u32.try_into().unwrap(); @@ -2691,7 +2690,7 @@ mod tests { let client = Client::new(address, &log); db_type.init_db(&client).await.unwrap(); - let samples = test_util::generate_test_samples(2, 2, 2, 2); + let samples = oximeter_test_utils::generate_test_samples(2, 2, 2, 2); client.insert_samples(&samples).await?; let limit = 7u32.try_into().unwrap(); @@ -3364,7 +3363,7 @@ mod tests { // The values here don't matter much, we just want to check that // the database data hasn't been dropped. assert_eq!(0, get_schema_count(&client).await); - let sample = test_util::make_sample(); + let sample = oximeter_test_utils::make_sample(); client.insert_samples(&[sample.clone()]).await.unwrap(); assert_eq!(1, get_schema_count(&client).await); @@ -3438,7 +3437,7 @@ mod tests { // The values here don't matter much, we just want to check that // the database data gets dropped later. assert_eq!(0, get_schema_count(&client).await); - let sample = test_util::make_sample(); + let sample = oximeter_test_utils::make_sample(); client.insert_samples(&[sample.clone()]).await.unwrap(); assert_eq!(1, get_schema_count(&client).await); @@ -3464,7 +3463,7 @@ mod tests { let client = Client::new(address, &log); db_type.init_db(&client).await.unwrap(); - let samples = [test_util::make_sample()]; + let samples = [oximeter_test_utils::make_sample()]; client.insert_samples(&samples).await.unwrap(); // Get the count of schema directly from the DB, which should have just @@ -3549,7 +3548,7 @@ mod tests { let client = Client::new(address, &log); db_type.init_db(&client).await.unwrap(); - let samples = [test_util::make_sample()]; + let samples = [oximeter_test_utils::make_sample()]; // We're using the components of the `insert_samples()` method here, // which has been refactored explicitly for this test. We need to insert diff --git a/oximeter/db/src/client/oxql.rs b/oximeter/db/src/client/oxql.rs index 29586b8189..ac87d14c45 100644 --- a/oximeter/db/src/client/oxql.rs +++ b/oximeter/db/src/client/oxql.rs @@ -18,8 +18,8 @@ use crate::query::field_table_name; use crate::Error; use crate::Metric; use crate::Target; -use crate::TimeseriesKey; use oximeter::TimeseriesSchema; +use oxql_types::TimeseriesKey; use slog::debug; use slog::trace; use slog::Logger; @@ -68,7 +68,7 @@ pub struct OxqlResult { pub query_summaries: Vec, /// The list of OxQL tables returned from the query. - pub tables: Vec, + pub tables: Vec, } /// The maximum number of data values fetched from the database for an OxQL @@ -479,7 +479,9 @@ impl Client { query_id, total_duration: query_start.elapsed(), query_summaries, - tables: vec![oxql::Table::new(schema.timeseries_name.as_str())], + tables: vec![oxql_types::Table::new( + schema.timeseries_name.as_str(), + )], }; return Ok(result); } @@ -503,7 +505,7 @@ impl Client { // At this point, let's construct a set of tables and run the results // through the transformation pipeline. - let mut tables = vec![oxql::Table::from_timeseries( + let mut tables = vec![oxql_types::Table::from_timeseries( schema.timeseries_name.as_str(), timeseries_by_key.into_values(), )?]; @@ -553,7 +555,7 @@ impl Client { limit: Option, total_rows_fetched: &mut u64, ) -> Result< - (Vec, BTreeMap), + (Vec, BTreeMap), Error, > { // We'll create timeseries for each key on the fly. To enable computing @@ -624,25 +626,25 @@ impl Client { for (key, measurements) in measurements_by_key.into_iter() { // Constuct a new timeseries, from the target/metric info. let (target, metric) = info.get(&key).unwrap(); - let mut timeseries = oxql::Timeseries::new( + let mut timeseries = oxql_types::Timeseries::new( target .fields .iter() .chain(metric.fields.iter()) .map(|field| (field.name.clone(), field.value.clone())), - oxql::point::DataType::try_from(schema.datum_type)?, + oxql_types::point::DataType::try_from(schema.datum_type)?, if schema.datum_type.is_cumulative() { - oxql::point::MetricType::Delta + oxql_types::point::MetricType::Delta } else { - oxql::point::MetricType::Gauge + oxql_types::point::MetricType::Gauge }, )?; // Covert its oximeter measurements into OxQL data types. let points = if schema.datum_type.is_cumulative() { - oxql::point::Points::delta_from_cumulative(&measurements)? + oxql_types::point::Points::delta_from_cumulative(&measurements)? } else { - oxql::point::Points::gauge_from_gauge(&measurements)? + oxql_types::point::Points::gauge_from_gauge(&measurements)? }; timeseries.points = points; debug!( @@ -1108,10 +1110,7 @@ fn update_total_rows_and_check( mod tests { use super::ConsistentKeyGroup; use crate::client::oxql::chunk_consistent_key_groups_impl; - use crate::{ - oxql::{point::Points, Table, Timeseries}, - Client, DbWrite, - }; + use crate::{Client, DbWrite}; use crate::{Metric, Target}; use chrono::{DateTime, Utc}; use dropshot::test_util::LogContext; @@ -1119,6 +1118,7 @@ mod tests { use omicron_test_utils::dev::test_setup_log; use oximeter::{types::Cumulative, FieldValue}; use oximeter::{DatumType, Sample}; + use oxql_types::{point::Points, Table, Timeseries}; use std::collections::BTreeMap; use std::time::Duration; diff --git a/oximeter/db/src/lib.rs b/oximeter/db/src/lib.rs index 9ad382c97d..b6a7d2924d 100644 --- a/oximeter/db/src/lib.rs +++ b/oximeter/db/src/lib.rs @@ -21,6 +21,7 @@ pub use oximeter::Field; pub use oximeter::FieldType; pub use oximeter::Measurement; pub use oximeter::Sample; +use oxql_types::TimeseriesKey; use schemars::JsonSchema; use serde::Deserialize; use serde::Serialize; @@ -267,8 +268,6 @@ pub async fn make_client( Ok(client) } -pub(crate) type TimeseriesKey = u64; - // TODO-cleanup: Add the timeseries version in to the computation of the key. // This will require a full drop of the database, since we're changing the // sorting key and the timeseries key on each past sample. See diff --git a/oximeter/db/src/model.rs b/oximeter/db/src/model.rs index f27df4ed49..7a9f3977b7 100644 --- a/oximeter/db/src/model.rs +++ b/oximeter/db/src/model.rs @@ -11,7 +11,6 @@ use crate::FieldSchema; use crate::FieldSource; use crate::Metric; use crate::Target; -use crate::TimeseriesKey; use crate::TimeseriesSchema; use bytes::Bytes; use chrono::DateTime; @@ -29,6 +28,7 @@ use oximeter::types::Measurement; use oximeter::types::MissingDatum; use oximeter::types::Sample; use oximeter::Quantile; +use oxql_types::TimeseriesKey; use serde::Deserialize; use serde::Serialize; use std::collections::BTreeMap; @@ -1880,7 +1880,6 @@ mod tests { use super::*; use chrono::Timelike; use oximeter::histogram::Record; - use oximeter::test_util; use oximeter::Datum; #[test] @@ -1983,7 +1982,7 @@ mod tests { #[test] fn test_unroll_from_source() { - let sample = test_util::make_sample(); + let sample = oximeter_test_utils::make_sample(); let out = unroll_from_source(&sample); assert_eq!(out["oximeter.fields_string"].len(), 2); assert_eq!(out["oximeter.fields_i64"].len(), 1); @@ -2003,8 +2002,8 @@ mod tests { // datum. #[test] fn test_unroll_missing_measurement_row() { - let sample = test_util::make_sample(); - let missing_sample = test_util::make_missing_sample(); + let sample = oximeter_test_utils::make_sample(); + let missing_sample = oximeter_test_utils::make_missing_sample(); let (table_name, row) = unroll_measurement_row(&sample); let (missing_table_name, missing_row) = unroll_measurement_row(&missing_sample); @@ -2022,7 +2021,7 @@ mod tests { #[test] fn test_unroll_measurement_row() { - let sample = test_util::make_hist_sample(); + let sample = oximeter_test_utils::make_hist_sample(); let (table_name, row) = unroll_measurement_row(&sample); assert_eq!(table_name, "oximeter.measurements_histogramf64"); let unpacked: HistogramF64MeasurementRow = diff --git a/oximeter/db/src/oxql/ast/table_ops/align.rs b/oximeter/db/src/oxql/ast/table_ops/align.rs index cf54ebc312..b0cd7d80f1 100644 --- a/oximeter/db/src/oxql/ast/table_ops/align.rs +++ b/oximeter/db/src/oxql/ast/table_ops/align.rs @@ -6,19 +6,19 @@ // Copyright 2024 Oxide Computer Company -use crate::oxql::point::DataType; -use crate::oxql::point::MetricType; -use crate::oxql::point::Points; -use crate::oxql::point::ValueArray; -use crate::oxql::point::Values; -use crate::oxql::query::Alignment; -use crate::oxql::Error; -use crate::oxql::Table; -use crate::oxql::Timeseries; use anyhow::Context; +use anyhow::Error; use chrono::DateTime; use chrono::TimeDelta; use chrono::Utc; +use oxql_types::point::DataType; +use oxql_types::point::MetricType; +use oxql_types::point::Points; +use oxql_types::point::ValueArray; +use oxql_types::point::Values; +use oxql_types::Alignment; +use oxql_types::Table; +use oxql_types::Timeseries; use std::time::Duration; // The maximum factor by which an alignment operation may upsample data. @@ -144,7 +144,7 @@ fn align_mean_within( "Alignment by mean requires a gauge or delta metric, not {}", metric_type, ); - verify_max_upsampling_ratio(&points.timestamps, &period)?; + verify_max_upsampling_ratio(points.timestamps(), &period)?; // Always convert the output to doubles, when computing the mean. The // output is always a gauge, so we do not need the start times of the @@ -179,7 +179,7 @@ fn align_mean_within( // - Compute the mean of those. let period_ = TimeDelta::from_std(*period).context("time delta out of range")?; - let first_timestamp = points.timestamps[0]; + let first_timestamp = points.timestamps()[0]; let mut ix: u32 = 0; loop { // Compute the next output timestamp, by shifting the query end time @@ -220,15 +220,15 @@ fn align_mean_within( // entries. let output_value = if matches!(metric_type, MetricType::Gauge) { mean_gauge_value_in_window( - &points.timestamps, + points.timestamps(), &input_points, window_start, output_time, ) } else { mean_delta_value_in_window( - points.start_times.as_ref().unwrap(), - &points.timestamps, + points.start_times().unwrap(), + points.timestamps(), &input_points, window_start, output_time, @@ -255,10 +255,9 @@ fn align_mean_within( ValueArray::Double(output_values.into_iter().rev().collect()); let timestamps = output_timestamps.into_iter().rev().collect(); let values = Values { values, metric_type: MetricType::Gauge }; - new_timeseries.points = - Points { start_times: None, timestamps, values: vec![values] }; - new_timeseries.alignment = - Some(Alignment { end_time: *query_end, period: *period }); + new_timeseries.points = Points::new(None, timestamps, vec![values]); + new_timeseries + .set_alignment(Alignment { end_time: *query_end, period: *period }); output_table.insert(new_timeseries).unwrap(); } Ok(output_table) diff --git a/oximeter/db/src/oxql/ast/table_ops/filter.rs b/oximeter/db/src/oxql/ast/table_ops/filter.rs index b6fc533e4d..ad398da983 100644 --- a/oximeter/db/src/oxql/ast/table_ops/filter.rs +++ b/oximeter/db/src/oxql/ast/table_ops/filter.rs @@ -12,18 +12,18 @@ use crate::oxql::ast::literal::Literal; use crate::oxql::ast::logical_op::LogicalOp; use crate::oxql::ast::table_ops::limit::Limit; use crate::oxql::ast::table_ops::limit::LimitKind; -use crate::oxql::point::DataType; -use crate::oxql::point::MetricType; -use crate::oxql::point::Points; -use crate::oxql::point::ValueArray; use crate::oxql::Error; -use crate::oxql::Table; -use crate::oxql::Timeseries; use crate::shells::special_idents; use chrono::DateTime; use chrono::Utc; use oximeter::FieldType; use oximeter::FieldValue; +use oxql_types::point::DataType; +use oxql_types::point::MetricType; +use oxql_types::point::Points; +use oxql_types::point::ValueArray; +use oxql_types::Table; +use oxql_types::Timeseries; use regex::Regex; use std::collections::BTreeSet; use std::fmt; @@ -340,16 +340,13 @@ impl Filter { // Apply the filter to the data points as well. let points = self.filter_points(&input.points)?; - // Similar to above, if the filter removes all data points in - // the timeseries, let's remove the timeseries altogether. - if points.is_empty() { - continue; + if let Some(new_timeseries) = input.copy_with_points(points) { + timeseries.push(new_timeseries); + } else { + // None means that the filter removed all data points in + // the timeseries. In that case, we remove the timeseries + // altogether. } - timeseries.push(Timeseries { - fields: input.fields.clone(), - points, - alignment: input.alignment, - }) } output_tables.push(Table::from_timeseries( table.name(), @@ -823,7 +820,7 @@ impl SimpleFilter { ) -> Result, Error> { let ident = self.ident.as_str(); if ident == "timestamp" { - self.filter_points_by_timestamp(negated, &points.timestamps) + self.filter_points_by_timestamp(negated, points.timestamps()) } else if ident == "datum" { anyhow::ensure!( points.dimensionality() == 1, @@ -1151,15 +1148,15 @@ impl SimpleFilter { mod tests { use crate::oxql::ast::grammar::query_parser; use crate::oxql::ast::logical_op::LogicalOp; - use crate::oxql::point::DataType; - use crate::oxql::point::MetricType; - use crate::oxql::point::Points; - use crate::oxql::point::ValueArray; - use crate::oxql::point::Values; - use crate::oxql::Table; - use crate::oxql::Timeseries; use chrono::Utc; use oximeter::FieldValue; + use oxql_types::point::DataType; + use oxql_types::point::MetricType; + use oxql_types::point::Points; + use oxql_types::point::ValueArray; + use oxql_types::point::Values; + use oxql_types::Table; + use oxql_types::Timeseries; use std::time::Duration; use uuid::Uuid; @@ -1172,7 +1169,7 @@ mod tests { values: ValueArray::Double(vec![Some(0.0), Some(2.0)]), metric_type: MetricType::Gauge, }]; - let points = Points { start_times, timestamps, values }; + let points = Points::new(start_times, timestamps, values); // This filter should remove the first point based on its timestamp. let t = Utc::now() + Duration::from_secs(10); @@ -1205,7 +1202,7 @@ mod tests { values: ValueArray::Double(vec![Some(0.0), Some(2.0)]), metric_type: MetricType::Gauge, }]; - let points = Points { start_times, timestamps, values }; + let points = Points::new(start_times, timestamps, values); let filter = query_parser::filter("filter datum < \"something\"").unwrap(); diff --git a/oximeter/db/src/oxql/ast/table_ops/group_by.rs b/oximeter/db/src/oxql/ast/table_ops/group_by.rs index f40572d762..3d465ff941 100644 --- a/oximeter/db/src/oxql/ast/table_ops/group_by.rs +++ b/oximeter/db/src/oxql/ast/table_ops/group_by.rs @@ -10,13 +10,13 @@ use chrono::DateTime; use chrono::Utc; use crate::oxql::ast::ident::Ident; -use crate::oxql::point::DataType; -use crate::oxql::point::MetricType; -use crate::oxql::point::ValueArray; -use crate::oxql::Error; -use crate::oxql::Table; -use crate::oxql::Timeseries; -use crate::TimeseriesKey; +use anyhow::Error; +use oxql_types::point::DataType; +use oxql_types::point::MetricType; +use oxql_types::point::ValueArray; +use oxql_types::Table; +use oxql_types::Timeseries; +use oxql_types::TimeseriesKey; use std::collections::btree_map::Entry; use std::collections::BTreeMap; @@ -98,7 +98,7 @@ impl GroupBy { ValueArray::Double(new_values), ValueArray::Double(existing_values), ) => { - let new_timestamps = &dropped.points.timestamps; + let new_timestamps = dropped.points.timestamps(); // We will be merging the new data with the // existing, but borrow-checking limits the degree @@ -106,7 +106,7 @@ impl GroupBy { // entry in the output table. Instead, aggregate // everything into a copy of the expected data. let mut timestamps = - existing.points.timestamps.clone(); + existing.points.timestamps().to_owned(); let mut values = existing_values.clone(); // Merge in the new values, so long as they actually @@ -152,10 +152,7 @@ impl GroupBy { // Replace the existing output timeseries's // timestamps and data arrays. - std::mem::swap( - &mut existing.points.timestamps, - &mut timestamps, - ); + existing.points.set_timestamps(timestamps); existing .points .values_mut(0) @@ -166,7 +163,7 @@ impl GroupBy { ValueArray::Integer(new_values), ValueArray::Integer(existing_values), ) => { - let new_timestamps = &dropped.points.timestamps; + let new_timestamps = dropped.points.timestamps(); // We will be merging the new data with the // existing, but borrow-checking limits the degree @@ -174,7 +171,7 @@ impl GroupBy { // entry in the output table. Instead, aggregate // everything into a copy of the expected data. let mut timestamps = - existing.points.timestamps.clone(); + existing.points.timestamps().to_owned(); let mut values = existing_values.clone(); // Merge in the new values, so long as they actually @@ -220,10 +217,7 @@ impl GroupBy { // Replace the existing output timeseries's // timestamps and data arrays. - std::mem::swap( - &mut existing.points.timestamps, - &mut timestamps, - ); + existing.points.set_timestamps(timestamps); existing .points .values_mut(0) @@ -286,14 +280,15 @@ impl GroupBy { else { unreachable!(); }; - let new_timestamps = &new_points.timestamps; + let new_timestamps = new_points.timestamps(); // We will be merging the new data with the // existing, but borrow-checking limits the degree // to which we can easily do this on the `existing` // entry in the output table. Instead, aggregate // everything into a copy of the expected data. - let mut timestamps = existing.points.timestamps.clone(); + let mut timestamps = + existing.points.timestamps().to_owned(); let mut values = existing .points .values(0) @@ -360,10 +355,7 @@ impl GroupBy { // Replace the existing output timeseries's // timestamps and data arrays. - std::mem::swap( - &mut existing.points.timestamps, - &mut timestamps, - ); + existing.points.set_timestamps(timestamps); existing .points .values_mut(0) @@ -388,7 +380,7 @@ impl GroupBy { // _zero_ for any where the values are none. let counts = new_timeseries .points - .timestamps + .timestamps() .iter() .zip(values) .map(|(timestamp, maybe_value)| { @@ -434,16 +426,16 @@ pub enum Reducer { #[cfg(test)] mod tests { use super::{GroupBy, Reducer}; - use crate::oxql::{ - ast::{ - ident::Ident, - table_ops::align::{Align, AlignmentMethod}, - }, - point::{DataType, MetricType, ValueArray}, - Table, Timeseries, + use crate::oxql::ast::{ + ident::Ident, + table_ops::align::{Align, AlignmentMethod}, }; use chrono::{DateTime, Utc}; use oximeter::FieldValue; + use oxql_types::{ + point::{DataType, MetricType, ValueArray}, + Table, Timeseries, + }; use std::{collections::BTreeMap, time::Duration}; // Which timeseries the second data point is missing from. @@ -495,8 +487,8 @@ mod tests { MetricType::Gauge, ) .unwrap(); - ts0.points.start_times = None; - ts0.points.timestamps.clone_from(×tamps); + ts0.points.clear_start_times(); + ts0.points.set_timestamps(timestamps.clone()); *ts0.points.values_mut(0).unwrap() = ValueArray::Double(vec![ Some(1.0), if matches!( @@ -527,7 +519,7 @@ mod tests { MetricType::Gauge, ) .unwrap(); - ts1.points.start_times = None; + ts1.points.clear_start_times(); // Non-overlapping in this test setup means that we just shift one // value from this array backward in time by one additional second. @@ -538,7 +530,7 @@ mod tests { // // When reducing, t0 is never changed, and t1-t2 are always reduced // together, if the values are present. - ts1.points.timestamps = if cfg.overlapping_times { + let new_timestamps = if cfg.overlapping_times { timestamps.clone() } else { let mut new_timestamps = timestamps.clone(); @@ -546,6 +538,7 @@ mod tests { timestamps.insert(0, new_timestamps[0]); new_timestamps }; + ts1.points.set_timestamps(new_timestamps); *ts1.points.values_mut(0).unwrap() = ValueArray::Double(vec![ Some(2.0), if matches!(cfg.missing_value, MissingValue::Both) { @@ -604,11 +597,13 @@ mod tests { let points = &grouped_timeseries.points; assert_eq!(points.dimensionality(), 1, "Points should still be 1D"); assert_eq!( - points.start_times, None, + points.start_times(), + None, "Points should not have start times" ); assert_eq!( - points.timestamps, test.timestamps, + points.timestamps(), + test.timestamps, "Points do not have correct timestamps" ); diff --git a/oximeter/db/src/oxql/ast/table_ops/join.rs b/oximeter/db/src/oxql/ast/table_ops/join.rs index 3c150a4acf..2893f6cf3e 100644 --- a/oximeter/db/src/oxql/ast/table_ops/join.rs +++ b/oximeter/db/src/oxql/ast/table_ops/join.rs @@ -6,12 +6,10 @@ // Copyright 2024 Oxide Computer Company -use crate::oxql::point::MetricType; -use crate::oxql::point::Points; -use crate::oxql::point::Values; -use crate::oxql::Error; -use crate::oxql::Table; use anyhow::Context; +use anyhow::Error; +use oxql_types::point::MetricType; +use oxql_types::Table; /// An AST node for a natural inner join. #[derive(Clone, Copy, Debug, PartialEq)] @@ -80,10 +78,8 @@ impl Join { // 1. They have the same alignment, and // 2. We merge the timepoints rather than simply creating a // ragged array of points. - timeseries.points = inner_join_point_arrays( - ×eries.points, - &next_timeseries.points, - )?; + timeseries.points = + timeseries.points.inner_join(&next_timeseries.points)?; } // We'll also update the name, to indicate the joined data. out.name.push(','); @@ -93,101 +89,6 @@ impl Join { } } -// Given two arrays of points, stack them together at matching timepoints. -// -// For time points in either which do not have a corresponding point in the -// other, the entire time point is elided. -fn inner_join_point_arrays( - left: &Points, - right: &Points, -) -> Result { - // Create an output array with roughly the right capacity, and double the - // number of dimensions. We're trying to stack output value arrays together - // along the dimension axis. - let data_types = - left.data_types().chain(right.data_types()).collect::>(); - let metric_types = - left.metric_types().chain(right.metric_types()).collect::>(); - let mut out = Points::with_capacity( - left.len().max(right.len()), - data_types.iter().copied(), - metric_types.iter().copied(), - )?; - - // Iterate through each array until one is exhausted. We're only inserting - // values from both arrays where the timestamps actually match, since this - // is an inner join. We may want to insert missing values where timestamps - // do not match on either side, when we support an outer join of some kind. - let n_left_dim = left.values.len(); - let mut left_ix = 0; - let mut right_ix = 0; - while left_ix < left.len() && right_ix < right.len() { - let left_timestamp = left.timestamps[left_ix]; - let right_timestamp = right.timestamps[right_ix]; - if left_timestamp == right_timestamp { - out.timestamps.push(left_timestamp); - push_concrete_values( - &mut out.values[..n_left_dim], - &left.values, - left_ix, - ); - push_concrete_values( - &mut out.values[n_left_dim..], - &right.values, - right_ix, - ); - left_ix += 1; - right_ix += 1; - } else if left_timestamp < right_timestamp { - left_ix += 1; - } else { - right_ix += 1; - } - } - Ok(out) -} - -// Push the `i`th value from each dimension of `from` onto `to`. -fn push_concrete_values(to: &mut [Values], from: &[Values], i: usize) { - assert_eq!(to.len(), from.len()); - for (output, input) in to.iter_mut().zip(from.iter()) { - let input_array = &input.values; - let output_array = &mut output.values; - assert_eq!(input_array.data_type(), output_array.data_type()); - if let Ok(ints) = input_array.as_integer() { - output_array.as_integer_mut().unwrap().push(ints[i]); - continue; - } - if let Ok(doubles) = input_array.as_double() { - output_array.as_double_mut().unwrap().push(doubles[i]); - continue; - } - if let Ok(bools) = input_array.as_boolean() { - output_array.as_boolean_mut().unwrap().push(bools[i]); - continue; - } - if let Ok(strings) = input_array.as_string() { - output_array.as_string_mut().unwrap().push(strings[i].clone()); - continue; - } - if let Ok(dists) = input_array.as_integer_distribution() { - output_array - .as_integer_distribution_mut() - .unwrap() - .push(dists[i].clone()); - continue; - } - if let Ok(dists) = input_array.as_double_distribution() { - output_array - .as_double_distribution_mut() - .unwrap() - .push(dists[i].clone()); - continue; - } - unreachable!(); - } -} - // Return an error if any metric types are not suitable for joining. fn ensure_all_metric_types( mut metric_types: impl ExactSizeIterator, @@ -200,186 +101,3 @@ fn ensure_all_metric_types( ); Ok(()) } - -#[cfg(test)] -mod tests { - use super::*; - use crate::oxql::point::DataType; - use crate::oxql::point::Datum; - use crate::oxql::point::ValueArray; - use chrono::Utc; - use std::time::Duration; - - #[test] - fn test_push_concrete_values() { - let mut points = Points::with_capacity( - 2, - [DataType::Integer, DataType::Double].into_iter(), - [MetricType::Gauge, MetricType::Gauge].into_iter(), - ) - .unwrap(); - - // Push a concrete value for the integer dimension - let from_ints = vec![Values { - values: ValueArray::Integer(vec![Some(1)]), - metric_type: MetricType::Gauge, - }]; - push_concrete_values(&mut points.values[..1], &from_ints, 0); - - // And another for the double dimension. - let from_doubles = vec![Values { - values: ValueArray::Double(vec![Some(2.0)]), - metric_type: MetricType::Gauge, - }]; - push_concrete_values(&mut points.values[1..], &from_doubles, 0); - - assert_eq!( - points.dimensionality(), - 2, - "Points should have 2 dimensions", - ); - let ints = points.values[0].values.as_integer().unwrap(); - assert_eq!( - ints.len(), - 1, - "Should have pushed one point in the first dimension" - ); - assert_eq!( - ints[0], - Some(1), - "Should have pushed 1 onto the first dimension" - ); - let doubles = points.values[1].values.as_double().unwrap(); - assert_eq!( - doubles.len(), - 1, - "Should have pushed one point in the second dimension" - ); - assert_eq!( - doubles[0], - Some(2.0), - "Should have pushed 2.0 onto the second dimension" - ); - } - - #[test] - fn test_join_point_arrays() { - let now = Utc::now(); - - // Create a set of integer points to join with. - // - // This will have two timestamps, one of which will match the points - // below that are merged in. - let int_points = Points { - start_times: None, - timestamps: vec![ - now - Duration::from_secs(3), - now - Duration::from_secs(2), - now, - ], - values: vec![Values { - values: ValueArray::Integer(vec![Some(1), Some(2), Some(3)]), - metric_type: MetricType::Gauge, - }], - }; - - // Create an additional set of double points. - // - // This also has two timepoints, one of which matches with the above, - // and one of which does not. - let double_points = Points { - start_times: None, - timestamps: vec![ - now - Duration::from_secs(3), - now - Duration::from_secs(1), - now, - ], - values: vec![Values { - values: ValueArray::Double(vec![ - Some(4.0), - Some(5.0), - Some(6.0), - ]), - metric_type: MetricType::Gauge, - }], - }; - - // Merge the arrays. - let merged = - inner_join_point_arrays(&int_points, &double_points).unwrap(); - - // Basic checks that we merged in the right values and have the right - // types and dimensions. - assert_eq!( - merged.dimensionality(), - 2, - "Should have appended the dimensions from each input array" - ); - assert_eq!(merged.len(), 2, "Should have merged two common points",); - assert_eq!( - merged.data_types().collect::>(), - &[DataType::Integer, DataType::Double], - "Should have combined the data types of the input arrays" - ); - assert_eq!( - merged.metric_types().collect::>(), - &[MetricType::Gauge, MetricType::Gauge], - "Should have combined the metric types of the input arrays" - ); - - // Check the actual values of the array. - let mut points = merged.iter_points(); - - // The first and last timepoint overlapped between the two arrays, so we - // should have both of them as concrete samples. - let pt = points.next().unwrap(); - assert_eq!(pt.start_time, None, "Gauges don't have a start time"); - assert_eq!( - *pt.timestamp, int_points.timestamps[0], - "Should have taken the first input timestamp from both arrays", - ); - assert_eq!( - *pt.timestamp, double_points.timestamps[0], - "Should have taken the first input timestamp from both arrays", - ); - let values = pt.values; - assert_eq!(values.len(), 2, "Should have 2 dimensions"); - assert_eq!( - &values[0], - &(Datum::Integer(Some(&1)), MetricType::Gauge), - "Should have pulled value from first integer array." - ); - assert_eq!( - &values[1], - &(Datum::Double(Some(&4.0)), MetricType::Gauge), - "Should have pulled value from second double array." - ); - - // And the next point - let pt = points.next().unwrap(); - assert_eq!(pt.start_time, None, "Gauges don't have a start time"); - assert_eq!( - *pt.timestamp, int_points.timestamps[2], - "Should have taken the input timestamp from both arrays", - ); - assert_eq!( - *pt.timestamp, double_points.timestamps[2], - "Should have taken the input timestamp from both arrays", - ); - let values = pt.values; - assert_eq!(values.len(), 2, "Should have 2 dimensions"); - assert_eq!( - &values[0], - &(Datum::Integer(Some(&3)), MetricType::Gauge), - "Should have pulled value from first integer array." - ); - assert_eq!( - &values[1], - &(Datum::Double(Some(&6.0)), MetricType::Gauge), - "Should have pulled value from second double array." - ); - - // And there should be no other values. - assert!(points.next().is_none(), "There should be no more points"); - } -} diff --git a/oximeter/db/src/oxql/ast/table_ops/limit.rs b/oximeter/db/src/oxql/ast/table_ops/limit.rs index 0205868f5c..89afb31a7c 100644 --- a/oximeter/db/src/oxql/ast/table_ops/limit.rs +++ b/oximeter/db/src/oxql/ast/table_ops/limit.rs @@ -6,12 +6,8 @@ // Copyright 2024 Oxide Computer Company -use crate::oxql::point::Points; -use crate::oxql::point::ValueArray; -use crate::oxql::point::Values; -use crate::oxql::Error; -use crate::oxql::Table; -use crate::oxql::Timeseries; +use anyhow::Error; +use oxql_types::Table; use std::num::NonZeroUsize; /// The kind of limiting operation @@ -65,58 +61,7 @@ impl Limit { } }; - // Slice the various data arrays. - let start_times = input_points - .start_times - .as_ref() - .map(|s| s[start..end].to_vec()); - let timestamps = - input_points.timestamps[start..end].to_vec(); - let values = input_points - .values - .iter() - .map(|vals| { - let values = match &vals.values { - ValueArray::Integer(inner) => { - ValueArray::Integer( - inner[start..end].to_vec(), - ) - } - ValueArray::Double(inner) => { - ValueArray::Double( - inner[start..end].to_vec(), - ) - } - ValueArray::Boolean(inner) => { - ValueArray::Boolean( - inner[start..end].to_vec(), - ) - } - ValueArray::String(inner) => { - ValueArray::String( - inner[start..end].to_vec(), - ) - } - ValueArray::IntegerDistribution(inner) => { - ValueArray::IntegerDistribution( - inner[start..end].to_vec(), - ) - } - ValueArray::DoubleDistribution(inner) => { - ValueArray::DoubleDistribution( - inner[start..end].to_vec(), - ) - } - }; - Values { values, metric_type: vals.metric_type } - }) - .collect(); - let points = Points { start_times, timestamps, values }; - Timeseries { - fields: timeseries.fields.clone(), - points, - alignment: timeseries.alignment, - } + timeseries.limit(start, end) }); Table::from_timeseries(table.name(), timeseries) }) @@ -127,9 +72,12 @@ impl Limit { #[cfg(test)] mod tests { use super::*; - use crate::oxql::point::{DataType, MetricType}; use chrono::Utc; use oximeter::FieldValue; + use oxql_types::{ + point::{DataType, MetricType}, + Timeseries, + }; use std::{collections::BTreeMap, time::Duration}; fn test_tables() -> Vec { @@ -150,12 +98,14 @@ mod tests { MetricType::Gauge, ) .unwrap(); - timeseries.points.timestamps.clone_from(×tamps); - timeseries.points.values[0].values.as_integer_mut().unwrap().extend([ - Some(1), - Some(2), - Some(3), - ]); + timeseries.points.set_timestamps(timestamps.clone()); + timeseries + .points + .values_mut(0) + .unwrap() + .as_integer_mut() + .unwrap() + .extend([Some(1), Some(2), Some(3)]); let table1 = Table::from_timeseries("first", std::iter::once(timeseries)) .unwrap(); @@ -166,12 +116,14 @@ mod tests { MetricType::Gauge, ) .unwrap(); - timeseries.points.timestamps.clone_from(×tamps); - timeseries.points.values[0].values.as_integer_mut().unwrap().extend([ - Some(4), - Some(5), - Some(6), - ]); + timeseries.points.set_timestamps(timestamps.clone()); + timeseries + .points + .values_mut(0) + .unwrap() + .as_integer_mut() + .unwrap() + .extend([Some(4), Some(5), Some(6)]); let table2 = Table::from_timeseries("second", std::iter::once(timeseries)) .unwrap(); @@ -223,7 +175,8 @@ mod tests { "Limited table should have the same fields" ); assert_eq!( - timeseries.alignment, limited_timeseries.alignment, + timeseries.alignment(), + limited_timeseries.alignment(), "Limited timeseries should have the same alignment" ); assert_eq!( @@ -237,14 +190,15 @@ mod tests { // These depend on the limit operation. let points = ×eries.points; let limited_points = &limited_timeseries.points; - assert_eq!(points.start_times, limited_points.start_times); + assert_eq!(points.start_times(), limited_points.start_times()); assert_eq!( - points.timestamps[start..end], - limited_points.timestamps + &points.timestamps()[start..end], + limited_points.timestamps() ); assert_eq!( - limited_points.values[0].values.as_integer().unwrap(), - &points.values[0].values.as_integer().unwrap()[start..end], + limited_points.values(0).unwrap().as_integer().unwrap(), + &points.values(0).unwrap().as_integer().unwrap() + [start..end], "Points should be limited to [{start}..{end}]", ); } diff --git a/oximeter/db/src/oxql/ast/table_ops/mod.rs b/oximeter/db/src/oxql/ast/table_ops/mod.rs index 46f5106a08..8b8d4cbe1b 100644 --- a/oximeter/db/src/oxql/ast/table_ops/mod.rs +++ b/oximeter/db/src/oxql/ast/table_ops/mod.rs @@ -20,10 +20,10 @@ use self::join::Join; use self::limit::Limit; use crate::oxql::ast::Query; use crate::oxql::Error; -use crate::oxql::Table; use chrono::DateTime; use chrono::Utc; use oximeter::TimeseriesName; +use oxql_types::Table; /// A basic table operation, the atoms of an OxQL query. #[derive(Clone, Debug, PartialEq)] diff --git a/oximeter/db/src/oxql/mod.rs b/oximeter/db/src/oxql/mod.rs index 3961fae1cc..fcdfb783c5 100644 --- a/oximeter/db/src/oxql/mod.rs +++ b/oximeter/db/src/oxql/mod.rs @@ -10,13 +10,9 @@ use peg::error::ParseError as PegError; use peg::str::LineCol; pub mod ast; -pub mod point; pub mod query; -pub mod table; pub use self::query::Query; -pub use self::table::Table; -pub use self::table::Timeseries; pub use anyhow::Error; /// Format a PEG parsing error into a nice anyhow error. diff --git a/oximeter/db/src/oxql/query/mod.rs b/oximeter/db/src/oxql/query/mod.rs index e1fada9f2a..46c9bbc92c 100644 --- a/oximeter/db/src/oxql/query/mod.rs +++ b/oximeter/db/src/oxql/query/mod.rs @@ -23,7 +23,6 @@ use crate::oxql::Error; use crate::TimeseriesName; use chrono::DateTime; use chrono::Utc; -use std::time::Duration; /// A parsed OxQL query. #[derive(Clone, Debug, PartialEq)] @@ -391,15 +390,6 @@ fn restrict_filter_idents( } } -/// Describes the time alignment for an OxQL query. -#[derive(Clone, Copy, Debug, PartialEq)] -pub struct Alignment { - /// The end time of the query, which the temporal reference point. - pub end_time: DateTime, - /// The alignment period, the interval on which values are produced. - pub period: Duration, -} - #[cfg(test)] mod tests { use super::Filter; diff --git a/oximeter/db/src/query.rs b/oximeter/db/src/query.rs index ceabf00888..eee65ef215 100644 --- a/oximeter/db/src/query.rs +++ b/oximeter/db/src/query.rs @@ -6,13 +6,14 @@ // Copyright 2021 Oxide Computer Company use crate::{ - Error, FieldSchema, FieldSource, TimeseriesKey, TimeseriesSchema, - DATABASE_NAME, DATABASE_SELECT_FORMAT, + Error, FieldSchema, FieldSource, TimeseriesSchema, DATABASE_NAME, + DATABASE_SELECT_FORMAT, }; use chrono::{DateTime, Utc}; use dropshot::PaginationOrder; use oximeter::types::{DatumType, FieldType, FieldValue}; use oximeter::{Metric, Target}; +use oxql_types::TimeseriesKey; use regex::Regex; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; diff --git a/oximeter/db/src/shells/oxql.rs b/oximeter/db/src/shells/oxql.rs index 0f23ea7d64..f46d08c0cf 100644 --- a/oximeter/db/src/shells/oxql.rs +++ b/oximeter/db/src/shells/oxql.rs @@ -7,9 +7,10 @@ // Copyright 2024 Oxide Computer use super::{list_timeseries, prepare_columns}; -use crate::{make_client, oxql::Table, Client, OxqlResult}; +use crate::{make_client, Client, OxqlResult}; use clap::Args; use crossterm::style::Stylize; +use oxql_types::Table; use reedline::DefaultPrompt; use reedline::DefaultPromptSegment; use reedline::Reedline; diff --git a/oximeter/db/tests/integration_test.rs b/oximeter/db/tests/integration_test.rs index 732683c414..f5d81d51d1 100644 --- a/oximeter/db/tests/integration_test.rs +++ b/oximeter/db/tests/integration_test.rs @@ -10,7 +10,6 @@ use clickward::{ use dropshot::test_util::log_prefix_for_test; use omicron_test_utils::dev::poll; use omicron_test_utils::dev::test_setup_log; -use oximeter::test_util; use oximeter_db::{Client, DbWrite, OxqlResult, Sample, TestDbWrite}; use slog::{debug, info, Logger}; use std::collections::BTreeSet; @@ -199,7 +198,7 @@ async fn test_cluster() -> anyhow::Result<()> { // Let's write some samples to our first replica and wait for them to show // up on replica 2. let start = tokio::time::Instant::now(); - let samples = test_util::generate_test_samples( + let samples = oximeter_test_utils::generate_test_samples( input.n_projects, input.n_instances, input.n_cpus, @@ -261,7 +260,7 @@ async fn test_cluster() -> anyhow::Result<()> { info!(log, "successfully stopped server 1"); // Generate some new samples and insert them at replica3 - let samples = test_util::generate_test_samples( + let samples = oximeter_test_utils::generate_test_samples( input.n_projects, input.n_instances, input.n_cpus, @@ -298,7 +297,7 @@ async fn test_cluster() -> anyhow::Result<()> { .expect("failed to get samples from client1"); // We still have a quorum (2 of 3 keepers), so we should be able to insert - let samples = test_util::generate_test_samples( + let samples = oximeter_test_utils::generate_test_samples( input.n_projects, input.n_instances, input.n_cpus, @@ -321,7 +320,7 @@ async fn test_cluster() -> anyhow::Result<()> { .expect("failed to get samples from client1"); info!(log, "Attempting to insert samples without keeper quorum"); - let samples = test_util::generate_test_samples( + let samples = oximeter_test_utils::generate_test_samples( input.n_projects, input.n_instances, input.n_cpus, @@ -350,7 +349,7 @@ async fn test_cluster() -> anyhow::Result<()> { ) .await .expect("failed to sync keepers"); - let samples = test_util::generate_test_samples( + let samples = oximeter_test_utils::generate_test_samples( input.n_projects, input.n_instances, input.n_cpus, @@ -370,7 +369,7 @@ async fn test_cluster() -> anyhow::Result<()> { ) .await .expect("failed to sync keepers"); - let samples = test_util::generate_test_samples( + let samples = oximeter_test_utils::generate_test_samples( input.n_projects, input.n_instances, input.n_cpus, diff --git a/oximeter/impl/src/test_util.rs b/oximeter/impl/src/test_util.rs deleted file mode 100644 index c2ac7b34bd..0000000000 --- a/oximeter/impl/src/test_util.rs +++ /dev/null @@ -1,130 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -//! Utilities for testing the oximeter crate. -// Copyright 2024 Oxide Computer Company - -use crate::histogram; -use crate::histogram::{Histogram, Record}; -use crate::types::{Cumulative, Sample}; -use uuid::Uuid; - -#[derive(oximeter::Target)] -pub struct TestTarget { - pub name1: String, - pub name2: String, - pub num: i64, -} - -impl Default for TestTarget { - fn default() -> Self { - TestTarget { - name1: "first_name".into(), - name2: "second_name".into(), - num: 0, - } - } -} - -#[derive(oximeter::Metric)] -pub struct TestMetric { - pub id: Uuid, - pub good: bool, - pub datum: i64, -} - -#[derive(oximeter::Metric)] -pub struct TestCumulativeMetric { - pub id: Uuid, - pub good: bool, - pub datum: Cumulative, -} - -#[derive(oximeter::Metric)] -pub struct TestHistogram { - pub id: Uuid, - pub good: bool, - pub datum: Histogram, -} - -const ID: Uuid = uuid::uuid!("e00ced4d-39d1-446a-ae85-a67f05c9750b"); - -pub fn make_sample() -> Sample { - let target = TestTarget::default(); - let metric = TestMetric { id: ID, good: true, datum: 1 }; - Sample::new(&target, &metric).unwrap() -} - -pub fn make_missing_sample() -> Sample { - let target = TestTarget::default(); - let metric = TestMetric { id: ID, good: true, datum: 1 }; - Sample::new_missing(&target, &metric).unwrap() -} - -pub fn make_hist_sample() -> Sample { - let target = TestTarget::default(); - let mut hist = histogram::Histogram::new(&[0.0, 5.0, 10.0]).unwrap(); - hist.sample(1.0).unwrap(); - hist.sample(2.0).unwrap(); - hist.sample(6.0).unwrap(); - let metric = TestHistogram { id: ID, good: true, datum: hist }; - Sample::new(&target, &metric).unwrap() -} - -/// A target identifying a single virtual machine instance -#[derive(Debug, Clone, Copy, oximeter::Target)] -pub struct VirtualMachine { - pub project_id: Uuid, - pub instance_id: Uuid, -} - -/// A metric recording the total time a vCPU is busy, by its ID -#[derive(Debug, Clone, Copy, oximeter::Metric)] -pub struct CpuBusy { - cpu_id: i64, - datum: Cumulative, -} - -pub fn generate_test_samples( - n_projects: usize, - n_instances: usize, - n_cpus: usize, - n_samples: usize, -) -> Vec { - let n_timeseries = n_projects * n_instances * n_cpus; - let mut samples = Vec::with_capacity(n_samples * n_timeseries); - for _ in 0..n_projects { - let project_id = Uuid::new_v4(); - for _ in 0..n_instances { - let vm = VirtualMachine { project_id, instance_id: Uuid::new_v4() }; - for cpu in 0..n_cpus { - for sample in 0..n_samples { - let cpu_busy = CpuBusy { - cpu_id: cpu as _, - datum: Cumulative::new(sample as f64), - }; - let sample = Sample::new(&vm, &cpu_busy).unwrap(); - samples.push(sample); - } - } - } - } - samples -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_gen_test_samples() { - let (n_projects, n_instances, n_cpus, n_samples) = (2, 2, 2, 2); - let samples = - generate_test_samples(n_projects, n_instances, n_cpus, n_samples); - assert_eq!( - samples.len(), - n_projects * n_instances * n_cpus * n_samples - ); - } -} diff --git a/oximeter/oximeter/Cargo.toml b/oximeter/oximeter/Cargo.toml index c04d1bd3ae..63b370bee6 100644 --- a/oximeter/oximeter/Cargo.toml +++ b/oximeter/oximeter/Cargo.toml @@ -13,9 +13,10 @@ anyhow.workspace = true clap.workspace = true chrono.workspace = true omicron-workspace-hack.workspace = true -oximeter-impl.workspace = true oximeter-macro-impl.workspace = true +oximeter-schema.workspace = true oximeter-timeseries-macro.workspace = true +oximeter-types.workspace = true prettyplease.workspace = true syn.workspace = true toml.workspace = true diff --git a/oximeter/oximeter/src/lib.rs b/oximeter/oximeter/src/lib.rs index 5ec6a49e5c..913318b8a8 100644 --- a/oximeter/oximeter/src/lib.rs +++ b/oximeter/oximeter/src/lib.rs @@ -185,14 +185,15 @@ //! `Producer`s may be registered with the same `ProducerServer`, each with potentially different //! sampling intervals. -pub use oximeter_impl::*; +pub use oximeter_macro_impl::{Metric, Target}; pub use oximeter_timeseries_macro::use_timeseries; +pub use oximeter_types::*; #[cfg(test)] mod test { - use oximeter_impl::schema::ir::load_schema; - use oximeter_impl::schema::{FieldSource, SCHEMA_DIRECTORY}; - use oximeter_impl::TimeseriesSchema; + use oximeter_schema::ir::load_schema; + use oximeter_types::schema::{FieldSource, SCHEMA_DIRECTORY}; + use oximeter_types::TimeseriesSchema; use std::collections::BTreeMap; use std::fs; diff --git a/oximeter/oxql-types/Cargo.toml b/oximeter/oxql-types/Cargo.toml new file mode 100644 index 0000000000..da7c7bcd1c --- /dev/null +++ b/oximeter/oxql-types/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "oxql-types" +version = "0.1.0" +edition = "2021" +license = "MPL-2.0" + +[lints] +workspace = true + +[dependencies] +anyhow.workspace = true +chrono.workspace = true +highway.workspace = true +num.workspace = true +omicron-workspace-hack.workspace = true +oximeter-types.workspace = true +schemars.workspace = true +serde.workspace = true diff --git a/oximeter/oxql-types/src/lib.rs b/oximeter/oxql-types/src/lib.rs new file mode 100644 index 0000000000..a537c62ca1 --- /dev/null +++ b/oximeter/oxql-types/src/lib.rs @@ -0,0 +1,25 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Core types for OxQL. + +use chrono::{DateTime, Utc}; +use std::time::Duration; + +pub mod point; +pub mod table; + +pub use self::table::Table; +pub use self::table::Timeseries; + +pub type TimeseriesKey = u64; + +/// Describes the time alignment for an OxQL query. +#[derive(Clone, Copy, Debug, PartialEq)] +pub struct Alignment { + /// The end time of the query, which the temporal reference point. + pub end_time: DateTime, + /// The alignment period, the interval on which values are produced. + pub period: Duration, +} diff --git a/oximeter/db/src/oxql/point.rs b/oximeter/oxql-types/src/point.rs similarity index 82% rename from oximeter/db/src/oxql/point.rs rename to oximeter/oxql-types/src/point.rs index e04193e8b8..6e3c7143dc 100644 --- a/oximeter/db/src/oxql/point.rs +++ b/oximeter/oxql-types/src/point.rs @@ -6,15 +6,15 @@ // Copyright 2024 Oxide Computer Company -use super::Error; use anyhow::Context; +use anyhow::Error; use chrono::DateTime; use chrono::Utc; use num::ToPrimitive; -use oximeter::traits::HistogramSupport; -use oximeter::DatumType; -use oximeter::Measurement; -use oximeter::Quantile; +use oximeter_types::traits::HistogramSupport; +use oximeter_types::DatumType; +use oximeter_types::Measurement; +use oximeter_types::Quantile; use schemars::JsonSchema; use serde::Deserialize; use serde::Serialize; @@ -131,32 +131,32 @@ impl CumulativeDatum { // not cumulative. fn from_cumulative(meas: &Measurement) -> Result { let datum = match meas.datum() { - oximeter::Datum::CumulativeI64(val) => { + oximeter_types::Datum::CumulativeI64(val) => { CumulativeDatum::Integer(val.value()) } - oximeter::Datum::CumulativeU64(val) => { + oximeter_types::Datum::CumulativeU64(val) => { let int = val .value() .try_into() .context("Overflow converting u64 to i64")?; CumulativeDatum::Integer(int) } - oximeter::Datum::CumulativeF32(val) => { + oximeter_types::Datum::CumulativeF32(val) => { CumulativeDatum::Double(val.value().into()) } - oximeter::Datum::CumulativeF64(val) => { + oximeter_types::Datum::CumulativeF64(val) => { CumulativeDatum::Double(val.value()) } - oximeter::Datum::HistogramI8(hist) => hist.into(), - oximeter::Datum::HistogramU8(hist) => hist.into(), - oximeter::Datum::HistogramI16(hist) => hist.into(), - oximeter::Datum::HistogramU16(hist) => hist.into(), - oximeter::Datum::HistogramI32(hist) => hist.into(), - oximeter::Datum::HistogramU32(hist) => hist.into(), - oximeter::Datum::HistogramI64(hist) => hist.into(), - oximeter::Datum::HistogramU64(hist) => hist.try_into()?, - oximeter::Datum::HistogramF32(hist) => hist.into(), - oximeter::Datum::HistogramF64(hist) => hist.into(), + oximeter_types::Datum::HistogramI8(hist) => hist.into(), + oximeter_types::Datum::HistogramU8(hist) => hist.into(), + oximeter_types::Datum::HistogramI16(hist) => hist.into(), + oximeter_types::Datum::HistogramU16(hist) => hist.into(), + oximeter_types::Datum::HistogramI32(hist) => hist.into(), + oximeter_types::Datum::HistogramU32(hist) => hist.into(), + oximeter_types::Datum::HistogramI64(hist) => hist.into(), + oximeter_types::Datum::HistogramU64(hist) => hist.try_into()?, + oximeter_types::Datum::HistogramF32(hist) => hist.into(), + oximeter_types::Datum::HistogramF64(hist) => hist.into(), other => anyhow::bail!( "Input datum of type {} is not cumulative", other.datum_type(), @@ -169,10 +169,10 @@ impl CumulativeDatum { /// A single list of values, for one dimension of a timeseries. #[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq, Serialize)] pub struct Values { - // The data values. - pub(super) values: ValueArray, - // The type of this metric. - pub(super) metric_type: MetricType, + /// The data values. + pub values: ValueArray, + /// The type of this metric. + pub metric_type: MetricType, } impl Values { @@ -285,14 +285,23 @@ impl<'a> fmt::Display for Datum<'a> { #[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq, Serialize)] pub struct Points { // The start time points for cumulative or delta metrics. - pub(super) start_times: Option>>, + pub(crate) start_times: Option>>, // The timestamp of each value. - pub(super) timestamps: Vec>, + pub(crate) timestamps: Vec>, // The array of data values, one for each dimension. - pub(super) values: Vec, + pub(crate) values: Vec, } impl Points { + /// Construct a new `Points` with the provided data. + pub fn new( + start_times: Option>>, + timestamps: Vec>, + values: Vec, + ) -> Self { + Self { start_times, timestamps, values } + } + /// Construct an empty array of points to hold data of the provided type. pub fn empty(data_type: DataType, metric_type: MetricType) -> Self { Self::with_capacity( @@ -303,8 +312,28 @@ impl Points { .unwrap() } - // Return a mutable reference to the value array of the specified dimension, if any. - pub(super) fn values_mut(&mut self, dim: usize) -> Option<&mut ValueArray> { + /// Return the start times of the points, if any. + pub fn start_times(&self) -> Option<&[DateTime]> { + self.start_times.as_deref() + } + + /// Clear the start times of the points. + pub fn clear_start_times(&mut self) { + self.start_times = None; + } + + /// Return the timestamps of the points. + pub fn timestamps(&self) -> &[DateTime] { + &self.timestamps + } + + pub fn set_timestamps(&mut self, timestamps: Vec>) { + self.timestamps = timestamps; + } + + /// Return a mutable reference to the value array of the specified + /// dimension, if any. + pub fn values_mut(&mut self, dim: usize) -> Option<&mut ValueArray> { self.values.get_mut(dim).map(|val| &mut val.values) } @@ -563,8 +592,8 @@ impl Points { }) } - // Filter points in self to those where `to_keep` is true. - pub(crate) fn filter(&self, to_keep: Vec) -> Result { + /// Filter points in self to those where `to_keep` is true. + pub fn filter(&self, to_keep: Vec) -> Result { anyhow::ensure!( to_keep.len() == self.len(), "Filter array must be the same length as self", @@ -646,8 +675,8 @@ impl Points { Ok(out) } - // Return a new set of points, with the values casted to the provided types. - pub(crate) fn cast(&self, types: &[DataType]) -> Result { + /// Return a new set of points, with the values casted to the provided types. + pub fn cast(&self, types: &[DataType]) -> Result { anyhow::ensure!( types.len() == self.dimensionality(), "Cannot cast to {} types, the data has dimensionality {}", @@ -863,12 +892,104 @@ impl Points { Ok(Self { start_times, timestamps, values: new_values }) } + /// Given two arrays of points, stack them together at matching timepoints. + /// + /// For time points in either which do not have a corresponding point in + /// the other, the entire time point is elided. + pub fn inner_join(&self, right: &Points) -> Result { + // Create an output array with roughly the right capacity, and double the + // number of dimensions. We're trying to stack output value arrays together + // along the dimension axis. + let data_types = + self.data_types().chain(right.data_types()).collect::>(); + let metric_types = + self.metric_types().chain(right.metric_types()).collect::>(); + let mut out = Points::with_capacity( + self.len().max(right.len()), + data_types.iter().copied(), + metric_types.iter().copied(), + )?; + + // Iterate through each array until one is exhausted. We're only inserting + // values from both arrays where the timestamps actually match, since this + // is an inner join. We may want to insert missing values where timestamps + // do not match on either side, when we support an outer join of some kind. + let n_left_dim = self.dimensionality(); + let mut left_ix = 0; + let mut right_ix = 0; + while left_ix < self.len() && right_ix < right.len() { + let left_timestamp = self.timestamps()[left_ix]; + let right_timestamp = right.timestamps()[right_ix]; + if left_timestamp == right_timestamp { + out.timestamps.push(left_timestamp); + push_concrete_values( + &mut out.values[..n_left_dim], + &self.values, + left_ix, + ); + push_concrete_values( + &mut out.values[n_left_dim..], + &right.values, + right_ix, + ); + left_ix += 1; + right_ix += 1; + } else if left_timestamp < right_timestamp { + left_ix += 1; + } else { + right_ix += 1; + } + } + Ok(out) + } + /// Return true if self contains no data points. pub fn is_empty(&self) -> bool { self.len() == 0 } } +// Push the `i`th value from each dimension of `from` onto `to`. +fn push_concrete_values(to: &mut [Values], from: &[Values], i: usize) { + assert_eq!(to.len(), from.len()); + for (output, input) in to.iter_mut().zip(from.iter()) { + let input_array = &input.values; + let output_array = &mut output.values; + assert_eq!(input_array.data_type(), output_array.data_type()); + if let Ok(ints) = input_array.as_integer() { + output_array.as_integer_mut().unwrap().push(ints[i]); + continue; + } + if let Ok(doubles) = input_array.as_double() { + output_array.as_double_mut().unwrap().push(doubles[i]); + continue; + } + if let Ok(bools) = input_array.as_boolean() { + output_array.as_boolean_mut().unwrap().push(bools[i]); + continue; + } + if let Ok(strings) = input_array.as_string() { + output_array.as_string_mut().unwrap().push(strings[i].clone()); + continue; + } + if let Ok(dists) = input_array.as_integer_distribution() { + output_array + .as_integer_distribution_mut() + .unwrap() + .push(dists[i].clone()); + continue; + } + if let Ok(dists) = input_array.as_double_distribution() { + output_array + .as_double_distribution_mut() + .unwrap() + .push(dists[i].clone()); + continue; + } + unreachable!(); + } +} + /// List of data values for one timeseries. /// /// Each element is an option, where `None` represents a missing sample. @@ -900,8 +1021,8 @@ impl ValueArray { } } - // Return the data type in self. - pub(super) fn data_type(&self) -> DataType { + /// Return the data type in self. + pub fn data_type(&self) -> DataType { match self { ValueArray::Integer(_) => DataType::Integer, ValueArray::Double(_) => DataType::Double, @@ -947,10 +1068,8 @@ impl ValueArray { Ok(inner) } - // Access the inner array of integers, if possible. - pub(super) fn as_integer_mut( - &mut self, - ) -> Result<&mut Vec>, Error> { + /// Access the inner array of integers, if possible. + pub fn as_integer_mut(&mut self) -> Result<&mut Vec>, Error> { let ValueArray::Integer(inner) = self else { anyhow::bail!( "Cannot access value array as integer type, it has type {}", @@ -1107,91 +1226,97 @@ impl ValueArray { // Push a value directly from a datum, without modification. fn push_value_from_datum( &mut self, - datum: &oximeter::Datum, + datum: &oximeter_types::Datum, ) -> Result<(), Error> { match datum { - oximeter::Datum::Bool(b) => self.as_boolean_mut()?.push(Some(*b)), - oximeter::Datum::I8(i) => { + oximeter_types::Datum::Bool(b) => { + self.as_boolean_mut()?.push(Some(*b)) + } + oximeter_types::Datum::I8(i) => { self.as_integer_mut()?.push(Some(i64::from(*i))) } - oximeter::Datum::U8(i) => { + oximeter_types::Datum::U8(i) => { self.as_integer_mut()?.push(Some(i64::from(*i))) } - oximeter::Datum::I16(i) => { + oximeter_types::Datum::I16(i) => { self.as_integer_mut()?.push(Some(i64::from(*i))) } - oximeter::Datum::U16(i) => { + oximeter_types::Datum::U16(i) => { self.as_integer_mut()?.push(Some(i64::from(*i))) } - oximeter::Datum::I32(i) => { + oximeter_types::Datum::I32(i) => { self.as_integer_mut()?.push(Some(i64::from(*i))) } - oximeter::Datum::U32(i) => { + oximeter_types::Datum::U32(i) => { self.as_integer_mut()?.push(Some(i64::from(*i))) } - oximeter::Datum::I64(i) => self.as_integer_mut()?.push(Some(*i)), - oximeter::Datum::U64(i) => { + oximeter_types::Datum::I64(i) => { + self.as_integer_mut()?.push(Some(*i)) + } + oximeter_types::Datum::U64(i) => { let i = i.to_i64().context("Failed to convert u64 datum to i64")?; self.as_integer_mut()?.push(Some(i)); } - oximeter::Datum::F32(f) => { + oximeter_types::Datum::F32(f) => { self.as_double_mut()?.push(Some(f64::from(*f))) } - oximeter::Datum::F64(f) => self.as_double_mut()?.push(Some(*f)), - oximeter::Datum::String(s) => { + oximeter_types::Datum::F64(f) => { + self.as_double_mut()?.push(Some(*f)) + } + oximeter_types::Datum::String(s) => { self.as_string_mut()?.push(Some(s.clone())) } - oximeter::Datum::Bytes(_) => { + oximeter_types::Datum::Bytes(_) => { anyhow::bail!("Bytes data types are not yet supported") } - oximeter::Datum::CumulativeI64(c) => { + oximeter_types::Datum::CumulativeI64(c) => { self.as_integer_mut()?.push(Some(c.value())) } - oximeter::Datum::CumulativeU64(c) => { + oximeter_types::Datum::CumulativeU64(c) => { let c = c .value() .to_i64() .context("Failed to convert u64 datum to i64")?; self.as_integer_mut()?.push(Some(c)); } - oximeter::Datum::CumulativeF32(c) => { + oximeter_types::Datum::CumulativeF32(c) => { self.as_double_mut()?.push(Some(f64::from(c.value()))) } - oximeter::Datum::CumulativeF64(c) => { + oximeter_types::Datum::CumulativeF64(c) => { self.as_double_mut()?.push(Some(c.value())) } - oximeter::Datum::HistogramI8(h) => self + oximeter_types::Datum::HistogramI8(h) => self .as_integer_distribution_mut()? .push(Some(Distribution::from(h))), - oximeter::Datum::HistogramU8(h) => self + oximeter_types::Datum::HistogramU8(h) => self .as_integer_distribution_mut()? .push(Some(Distribution::from(h))), - oximeter::Datum::HistogramI16(h) => self + oximeter_types::Datum::HistogramI16(h) => self .as_integer_distribution_mut()? .push(Some(Distribution::from(h))), - oximeter::Datum::HistogramU16(h) => self + oximeter_types::Datum::HistogramU16(h) => self .as_integer_distribution_mut()? .push(Some(Distribution::from(h))), - oximeter::Datum::HistogramI32(h) => self + oximeter_types::Datum::HistogramI32(h) => self .as_integer_distribution_mut()? .push(Some(Distribution::from(h))), - oximeter::Datum::HistogramU32(h) => self + oximeter_types::Datum::HistogramU32(h) => self .as_integer_distribution_mut()? .push(Some(Distribution::from(h))), - oximeter::Datum::HistogramI64(h) => self + oximeter_types::Datum::HistogramI64(h) => self .as_integer_distribution_mut()? .push(Some(Distribution::from(h))), - oximeter::Datum::HistogramU64(h) => self + oximeter_types::Datum::HistogramU64(h) => self .as_integer_distribution_mut()? .push(Some(Distribution::try_from(h)?)), - oximeter::Datum::HistogramF32(h) => self + oximeter_types::Datum::HistogramF32(h) => self .as_double_distribution_mut()? .push(Some(Distribution::from(h))), - oximeter::Datum::HistogramF64(h) => self + oximeter_types::Datum::HistogramF64(h) => self .as_double_distribution_mut()? .push(Some(Distribution::from(h))), - oximeter::Datum::Missing(missing) => { + oximeter_types::Datum::Missing(missing) => { self.push_missing(missing.datum_type())? } } @@ -1216,7 +1341,7 @@ impl ValueArray { fn push_diff_from_last_to_datum( &mut self, last_datum: &Option, - new_datum: &oximeter::Datum, + new_datum: &oximeter_types::Datum, data_type: DataType, ) -> Result<(), Error> { match (last_datum.as_ref(), new_datum.is_missing()) { @@ -1253,49 +1378,49 @@ impl ValueArray { match (last_datum, new_datum) { ( CumulativeDatum::Integer(last), - oximeter::Datum::I8(new), + oximeter_types::Datum::I8(new), ) => { let new = i64::from(*new); self.as_integer_mut()?.push(Some(new - last)); } ( CumulativeDatum::Integer(last), - oximeter::Datum::U8(new), + oximeter_types::Datum::U8(new), ) => { let new = i64::from(*new); self.as_integer_mut()?.push(Some(new - last)); } ( CumulativeDatum::Integer(last), - oximeter::Datum::I16(new), + oximeter_types::Datum::I16(new), ) => { let new = i64::from(*new); self.as_integer_mut()?.push(Some(new - last)); } ( CumulativeDatum::Integer(last), - oximeter::Datum::U16(new), + oximeter_types::Datum::U16(new), ) => { let new = i64::from(*new); self.as_integer_mut()?.push(Some(new - last)); } ( CumulativeDatum::Integer(last), - oximeter::Datum::I32(new), + oximeter_types::Datum::I32(new), ) => { let new = i64::from(*new); self.as_integer_mut()?.push(Some(new - last)); } ( CumulativeDatum::Integer(last), - oximeter::Datum::U32(new), + oximeter_types::Datum::U32(new), ) => { let new = i64::from(*new); self.as_integer_mut()?.push(Some(new - last)); } ( CumulativeDatum::Integer(last), - oximeter::Datum::I64(new), + oximeter_types::Datum::I64(new), ) => { let diff = new .checked_sub(*last) @@ -1304,7 +1429,7 @@ impl ValueArray { } ( CumulativeDatum::Integer(last), - oximeter::Datum::U64(new), + oximeter_types::Datum::U64(new), ) => { let new = new .to_i64() @@ -1316,20 +1441,20 @@ impl ValueArray { } ( CumulativeDatum::Double(last), - oximeter::Datum::F32(new), + oximeter_types::Datum::F32(new), ) => { self.as_double_mut()? .push(Some(f64::from(*new) - last)); } ( CumulativeDatum::Double(last), - oximeter::Datum::F64(new), + oximeter_types::Datum::F64(new), ) => { self.as_double_mut()?.push(Some(new - last)); } ( CumulativeDatum::Integer(last), - oximeter::Datum::CumulativeI64(new), + oximeter_types::Datum::CumulativeI64(new), ) => { let new = new.value(); let diff = new @@ -1339,7 +1464,7 @@ impl ValueArray { } ( CumulativeDatum::Integer(last), - oximeter::Datum::CumulativeU64(new), + oximeter_types::Datum::CumulativeU64(new), ) => { let new = new .value() @@ -1352,20 +1477,20 @@ impl ValueArray { } ( CumulativeDatum::Double(last), - oximeter::Datum::CumulativeF32(new), + oximeter_types::Datum::CumulativeF32(new), ) => { self.as_double_mut()? .push(Some(f64::from(new.value()) - last)); } ( CumulativeDatum::Double(last), - oximeter::Datum::CumulativeF64(new), + oximeter_types::Datum::CumulativeF64(new), ) => { self.as_double_mut()?.push(Some(new.value() - last)); } ( CumulativeDatum::IntegerDistribution(last), - oximeter::Datum::HistogramI8(new), + oximeter_types::Datum::HistogramI8(new), ) => { let new = Distribution::from(new); self.as_integer_distribution_mut()? @@ -1373,7 +1498,7 @@ impl ValueArray { } ( CumulativeDatum::IntegerDistribution(last), - oximeter::Datum::HistogramU8(new), + oximeter_types::Datum::HistogramU8(new), ) => { let new = Distribution::from(new); self.as_integer_distribution_mut()? @@ -1381,7 +1506,7 @@ impl ValueArray { } ( CumulativeDatum::IntegerDistribution(last), - oximeter::Datum::HistogramI16(new), + oximeter_types::Datum::HistogramI16(new), ) => { let new = Distribution::from(new); self.as_integer_distribution_mut()? @@ -1389,7 +1514,7 @@ impl ValueArray { } ( CumulativeDatum::IntegerDistribution(last), - oximeter::Datum::HistogramU16(new), + oximeter_types::Datum::HistogramU16(new), ) => { let new = Distribution::from(new); self.as_integer_distribution_mut()? @@ -1397,7 +1522,7 @@ impl ValueArray { } ( CumulativeDatum::IntegerDistribution(last), - oximeter::Datum::HistogramI32(new), + oximeter_types::Datum::HistogramI32(new), ) => { let new = Distribution::from(new); self.as_integer_distribution_mut()? @@ -1405,7 +1530,7 @@ impl ValueArray { } ( CumulativeDatum::IntegerDistribution(last), - oximeter::Datum::HistogramU32(new), + oximeter_types::Datum::HistogramU32(new), ) => { let new = Distribution::from(new); self.as_integer_distribution_mut()? @@ -1413,7 +1538,7 @@ impl ValueArray { } ( CumulativeDatum::IntegerDistribution(last), - oximeter::Datum::HistogramI64(new), + oximeter_types::Datum::HistogramI64(new), ) => { let new = Distribution::from(new); self.as_integer_distribution_mut()? @@ -1421,7 +1546,7 @@ impl ValueArray { } ( CumulativeDatum::IntegerDistribution(last), - oximeter::Datum::HistogramU64(new), + oximeter_types::Datum::HistogramU64(new), ) => { let new = Distribution::try_from(new)?; self.as_integer_distribution_mut()? @@ -1429,7 +1554,7 @@ impl ValueArray { } ( CumulativeDatum::DoubleDistribution(last), - oximeter::Datum::HistogramF32(new), + oximeter_types::Datum::HistogramF32(new), ) => { let new = Distribution::::from(new); self.as_double_distribution_mut()? @@ -1437,7 +1562,7 @@ impl ValueArray { } ( CumulativeDatum::DoubleDistribution(last), - oximeter::Datum::HistogramF64(new), + oximeter_types::Datum::HistogramF64(new), ) => { let new = Distribution::::from(new); self.as_double_distribution_mut()? @@ -1486,8 +1611,8 @@ impl ValueArray { } } - // Swap the value in self with other, asserting they're the same type. - pub(crate) fn swap(&mut self, mut values: ValueArray) { + /// Swap the value in self with other, asserting they're the same type. + pub fn swap(&mut self, mut values: ValueArray) { use std::mem::swap; match (self, &mut values) { (ValueArray::Integer(x), ValueArray::Integer(y)) => swap(x, y), @@ -1733,8 +1858,10 @@ where macro_rules! i64_dist_from { ($t:ty) => { - impl From<&oximeter::histogram::Histogram<$t>> for Distribution { - fn from(hist: &oximeter::histogram::Histogram<$t>) -> Self { + impl From<&oximeter_types::histogram::Histogram<$t>> + for Distribution + { + fn from(hist: &oximeter_types::histogram::Histogram<$t>) -> Self { let (bins, counts) = hist.bins_and_counts(); Self { bins: bins.into_iter().map(i64::from).collect(), @@ -1750,8 +1877,10 @@ macro_rules! i64_dist_from { } } - impl From<&oximeter::histogram::Histogram<$t>> for CumulativeDatum { - fn from(hist: &oximeter::histogram::Histogram<$t>) -> Self { + impl From<&oximeter_types::histogram::Histogram<$t>> + for CumulativeDatum + { + fn from(hist: &oximeter_types::histogram::Histogram<$t>) -> Self { CumulativeDatum::IntegerDistribution(hist.into()) } } @@ -1766,10 +1895,10 @@ i64_dist_from!(i32); i64_dist_from!(u32); i64_dist_from!(i64); -impl TryFrom<&oximeter::histogram::Histogram> for Distribution { +impl TryFrom<&oximeter_types::histogram::Histogram> for Distribution { type Error = Error; fn try_from( - hist: &oximeter::histogram::Histogram, + hist: &oximeter_types::histogram::Histogram, ) -> Result { let (bins, counts) = hist.bins_and_counts(); let bins = bins @@ -1791,10 +1920,10 @@ impl TryFrom<&oximeter::histogram::Histogram> for Distribution { } } -impl TryFrom<&oximeter::histogram::Histogram> for CumulativeDatum { +impl TryFrom<&oximeter_types::histogram::Histogram> for CumulativeDatum { type Error = Error; fn try_from( - hist: &oximeter::histogram::Histogram, + hist: &oximeter_types::histogram::Histogram, ) -> Result { hist.try_into().map(CumulativeDatum::IntegerDistribution) } @@ -1802,8 +1931,10 @@ impl TryFrom<&oximeter::histogram::Histogram> for CumulativeDatum { macro_rules! f64_dist_from { ($t:ty) => { - impl From<&oximeter::histogram::Histogram<$t>> for Distribution { - fn from(hist: &oximeter::histogram::Histogram<$t>) -> Self { + impl From<&oximeter_types::histogram::Histogram<$t>> + for Distribution + { + fn from(hist: &oximeter_types::histogram::Histogram<$t>) -> Self { let (bins, counts) = hist.bins_and_counts(); Self { bins: bins.into_iter().map(f64::from).collect(), @@ -1819,8 +1950,10 @@ macro_rules! f64_dist_from { } } - impl From<&oximeter::histogram::Histogram<$t>> for CumulativeDatum { - fn from(hist: &oximeter::histogram::Histogram<$t>) -> Self { + impl From<&oximeter_types::histogram::Histogram<$t>> + for CumulativeDatum + { + fn from(hist: &oximeter_types::histogram::Histogram<$t>) -> Self { CumulativeDatum::DoubleDistribution(hist.into()) } } @@ -1833,9 +1966,9 @@ f64_dist_from!(f64); #[cfg(test)] mod tests { use super::{Distribution, MetricType, Points, Values}; - use crate::oxql::point::{DataType, ValueArray}; + use crate::point::{push_concrete_values, DataType, Datum, ValueArray}; use chrono::{DateTime, Utc}; - use oximeter::{ + use oximeter_types::{ histogram::Record, types::Cumulative, Measurement, Quantile, }; use std::time::Duration; @@ -1939,12 +2072,12 @@ mod tests { let now = Utc::now(); let current1 = now + Duration::from_secs(1); let mut hist1 = - oximeter::histogram::Histogram::new(&[0i64, 10, 20]).unwrap(); + oximeter_types::histogram::Histogram::new(&[0i64, 10, 20]).unwrap(); hist1.sample(1).unwrap(); hist1.set_start_time(current1); let current2 = now + Duration::from_secs(2); let mut hist2 = - oximeter::histogram::Histogram::new(&[0i64, 10, 20]).unwrap(); + oximeter_types::histogram::Histogram::new(&[0i64, 10, 20]).unwrap(); hist2.sample(5).unwrap(); hist2.sample(10).unwrap(); hist2.sample(15).unwrap(); @@ -2273,4 +2406,176 @@ mod tests { .cast(&[DataType::DoubleDistribution, DataType::DoubleDistribution]) .is_err()); } + + #[test] + fn test_push_concrete_values() { + let mut points = Points::with_capacity( + 2, + [DataType::Integer, DataType::Double].into_iter(), + [MetricType::Gauge, MetricType::Gauge].into_iter(), + ) + .unwrap(); + + // Push a concrete value for the integer dimension + let from_ints = vec![Values { + values: ValueArray::Integer(vec![Some(1)]), + metric_type: MetricType::Gauge, + }]; + push_concrete_values(&mut points.values[..1], &from_ints, 0); + + // And another for the double dimension. + let from_doubles = vec![Values { + values: ValueArray::Double(vec![Some(2.0)]), + metric_type: MetricType::Gauge, + }]; + push_concrete_values(&mut points.values[1..], &from_doubles, 0); + + assert_eq!( + points.dimensionality(), + 2, + "Points should have 2 dimensions", + ); + let ints = points.values[0].values.as_integer().unwrap(); + assert_eq!( + ints.len(), + 1, + "Should have pushed one point in the first dimension" + ); + assert_eq!( + ints[0], + Some(1), + "Should have pushed 1 onto the first dimension" + ); + let doubles = points.values[1].values.as_double().unwrap(); + assert_eq!( + doubles.len(), + 1, + "Should have pushed one point in the second dimension" + ); + assert_eq!( + doubles[0], + Some(2.0), + "Should have pushed 2.0 onto the second dimension" + ); + } + + #[test] + fn test_join_point_arrays() { + let now = Utc::now(); + + // Create a set of integer points to join with. + // + // This will have two timestamps, one of which will match the points + // below that are merged in. + let int_points = Points { + start_times: None, + timestamps: vec![ + now - Duration::from_secs(3), + now - Duration::from_secs(2), + now, + ], + values: vec![Values { + values: ValueArray::Integer(vec![Some(1), Some(2), Some(3)]), + metric_type: MetricType::Gauge, + }], + }; + + // Create an additional set of double points. + // + // This also has two timepoints, one of which matches with the above, + // and one of which does not. + let double_points = Points { + start_times: None, + timestamps: vec![ + now - Duration::from_secs(3), + now - Duration::from_secs(1), + now, + ], + values: vec![Values { + values: ValueArray::Double(vec![ + Some(4.0), + Some(5.0), + Some(6.0), + ]), + metric_type: MetricType::Gauge, + }], + }; + + // Merge the arrays. + let merged = int_points.inner_join(&double_points).unwrap(); + + // Basic checks that we merged in the right values and have the right + // types and dimensions. + assert_eq!( + merged.dimensionality(), + 2, + "Should have appended the dimensions from each input array" + ); + assert_eq!(merged.len(), 2, "Should have merged two common points",); + assert_eq!( + merged.data_types().collect::>(), + &[DataType::Integer, DataType::Double], + "Should have combined the data types of the input arrays" + ); + assert_eq!( + merged.metric_types().collect::>(), + &[MetricType::Gauge, MetricType::Gauge], + "Should have combined the metric types of the input arrays" + ); + + // Check the actual values of the array. + let mut points = merged.iter_points(); + + // The first and last timepoint overlapped between the two arrays, so we + // should have both of them as concrete samples. + let pt = points.next().unwrap(); + assert_eq!(pt.start_time, None, "Gauges don't have a start time"); + assert_eq!( + *pt.timestamp, int_points.timestamps[0], + "Should have taken the first input timestamp from both arrays", + ); + assert_eq!( + *pt.timestamp, double_points.timestamps[0], + "Should have taken the first input timestamp from both arrays", + ); + let values = pt.values; + assert_eq!(values.len(), 2, "Should have 2 dimensions"); + assert_eq!( + &values[0], + &(Datum::Integer(Some(&1)), MetricType::Gauge), + "Should have pulled value from first integer array." + ); + assert_eq!( + &values[1], + &(Datum::Double(Some(&4.0)), MetricType::Gauge), + "Should have pulled value from second double array." + ); + + // And the next point + let pt = points.next().unwrap(); + assert_eq!(pt.start_time, None, "Gauges don't have a start time"); + assert_eq!( + *pt.timestamp, int_points.timestamps[2], + "Should have taken the input timestamp from both arrays", + ); + assert_eq!( + *pt.timestamp, double_points.timestamps[2], + "Should have taken the input timestamp from both arrays", + ); + let values = pt.values; + assert_eq!(values.len(), 2, "Should have 2 dimensions"); + assert_eq!( + &values[0], + &(Datum::Integer(Some(&3)), MetricType::Gauge), + "Should have pulled value from first integer array." + ); + assert_eq!( + &values[1], + &(Datum::Double(Some(&6.0)), MetricType::Gauge), + "Should have pulled value from second double array." + ); + + // And there should be no other values. + assert!(points.next().is_none(), "There should be no more points"); + } } diff --git a/oximeter/db/src/oxql/table.rs b/oximeter/oxql-types/src/table.rs similarity index 76% rename from oximeter/db/src/oxql/table.rs rename to oximeter/oxql-types/src/table.rs index 2cd141d2fa..85c464268c 100644 --- a/oximeter/db/src/oxql/table.rs +++ b/oximeter/oxql-types/src/table.rs @@ -6,14 +6,16 @@ // Copyright 2024 Oxide Computer Company -use super::point::DataType; -use super::point::MetricType; -use super::point::Points; -use super::query::Alignment; -use super::Error; +use crate::point::DataType; +use crate::point::MetricType; +use crate::point::Points; +use crate::point::ValueArray; +use crate::point::Values; +use crate::Alignment; use crate::TimeseriesKey; +use anyhow::Error; use highway::HighwayHasher; -use oximeter::FieldValue; +use oximeter_types::FieldValue; use schemars::JsonSchema; use serde::Deserialize; use serde::Serialize; @@ -67,10 +69,20 @@ impl Timeseries { hasher.finish() } + /// Return the alignment of this timeseries, if any. + pub fn alignment(&self) -> Option { + self.alignment + } + + /// Set the alignment of this timeseries. + pub fn set_alignment(&mut self, alignment: Alignment) { + self.alignment = Some(alignment); + } + /// Return a copy of the timeseries, keeping only the provided fields. /// /// An error is returned if the timeseries does not contain those fields. - pub(crate) fn copy_with_fields( + pub fn copy_with_fields( &self, kept_fields: &[&str], ) -> Result { @@ -88,6 +100,20 @@ impl Timeseries { }) } + /// Return a copy of the timeseries, keeping only the provided points. + /// + /// Returns `None` if `kept_points` is empty. + pub fn copy_with_points(&self, kept_points: Points) -> Option { + if kept_points.is_empty() { + return None; + } + Some(Self { + fields: self.fields.clone(), + points: kept_points, + alignment: self.alignment, + }) + } + // Return `true` if the schema in `other` matches that of `self`. fn matches_schema(&self, other: &Timeseries) -> bool { if self.fields.len() != other.fields.len() { @@ -125,7 +151,7 @@ impl Timeseries { /// This returns an error if the points cannot be so cast, or the /// dimensionality of the types requested differs from the dimensionality of /// the points themselves. - pub(crate) fn cast(&self, types: &[DataType]) -> Result { + pub fn cast(&self, types: &[DataType]) -> Result { let fields = self.fields.clone(); Ok(Self { fields, @@ -133,6 +159,49 @@ impl Timeseries { alignment: self.alignment, }) } + + /// Return a new timeseries, with the points limited to the provided range. + pub fn limit(&self, start: usize, end: usize) -> Self { + let input_points = &self.points; + + // Slice the various data arrays. + let start_times = + input_points.start_times().map(|s| s[start..end].to_vec()); + let timestamps = input_points.timestamps()[start..end].to_vec(); + let values = input_points + .values + .iter() + .map(|vals| { + let values = match &vals.values { + ValueArray::Integer(inner) => { + ValueArray::Integer(inner[start..end].to_vec()) + } + ValueArray::Double(inner) => { + ValueArray::Double(inner[start..end].to_vec()) + } + ValueArray::Boolean(inner) => { + ValueArray::Boolean(inner[start..end].to_vec()) + } + ValueArray::String(inner) => { + ValueArray::String(inner[start..end].to_vec()) + } + ValueArray::IntegerDistribution(inner) => { + ValueArray::IntegerDistribution( + inner[start..end].to_vec(), + ) + } + ValueArray::DoubleDistribution(inner) => { + ValueArray::DoubleDistribution( + inner[start..end].to_vec(), + ) + } + }; + Values { values, metric_type: vals.metric_type } + }) + .collect(); + let points = Points::new(start_times, timestamps, values); + Self { fields: self.fields.clone(), points, alignment: self.alignment } + } } /// A table represents one or more timeseries with the same schema. @@ -146,7 +215,7 @@ pub struct Table { // // This starts as the name of the timeseries schema the data is derived // from, but can be modified as operations are done. - pub(super) name: String, + pub name: String, // The set of timeseries in the table, ordered by key. timeseries: BTreeMap, } diff --git a/oximeter/schema/Cargo.toml b/oximeter/schema/Cargo.toml new file mode 100644 index 0000000000..fe2e28705a --- /dev/null +++ b/oximeter/schema/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "oximeter-schema" +version = "0.1.0" +edition = "2021" +license = "MPL-2.0" + +[lints] +workspace = true + +[dependencies] +anyhow.workspace = true +chrono.workspace = true +clap.workspace = true +heck.workspace = true +omicron-workspace-hack.workspace = true +oximeter-types.workspace = true +prettyplease.workspace = true +proc-macro2.workspace = true +quote.workspace = true +schemars.workspace = true +serde.workspace = true +slog-error-chain.workspace = true +syn.workspace = true +toml.workspace = true diff --git a/oximeter/oximeter/src/bin/oximeter-schema.rs b/oximeter/schema/src/bin/oximeter-schema.rs similarity index 93% rename from oximeter/oximeter/src/bin/oximeter-schema.rs rename to oximeter/schema/src/bin/oximeter-schema.rs index 14fb31b1e8..5595a28639 100644 --- a/oximeter/oximeter/src/bin/oximeter-schema.rs +++ b/oximeter/schema/src/bin/oximeter-schema.rs @@ -9,7 +9,7 @@ use anyhow::Context as _; use clap::Parser; use clap::Subcommand; -use oximeter::schema::ir::TimeseriesDefinition; +use oximeter_schema::ir::TimeseriesDefinition; use std::num::NonZeroU8; use std::path::PathBuf; @@ -56,7 +56,7 @@ fn main() -> anyhow::Result<()> { println!("{def:#?}"); } Cmd::Schema { timeseries, version } => { - let schema = oximeter_impl::schema::ir::load_schema(&contents)?; + let schema = oximeter_schema::ir::load_schema(&contents)?; match (timeseries, version) { (None, None) => { for each in schema.into_iter() { @@ -87,7 +87,7 @@ fn main() -> anyhow::Result<()> { } } Cmd::Emit => { - let code = oximeter::schema::codegen::use_timeseries(&contents)?; + let code = oximeter_schema::codegen::use_timeseries(&contents)?; let formatted = prettyplease::unparse(&syn::parse_file(&format!("{code}"))?); println!("{formatted}"); diff --git a/oximeter/impl/src/schema/codegen.rs b/oximeter/schema/src/codegen.rs similarity index 73% rename from oximeter/impl/src/schema/codegen.rs rename to oximeter/schema/src/codegen.rs index 4778cf4970..0429cf0534 100644 --- a/oximeter/impl/src/schema/codegen.rs +++ b/oximeter/schema/src/codegen.rs @@ -6,18 +6,18 @@ //! Generate Rust types and code from oximeter schema definitions. -use crate::schema::ir::find_schema_version; -use crate::schema::ir::load_schema; -use crate::schema::AuthzScope; -use crate::schema::FieldSource; -use crate::schema::Units; -use crate::DatumType; -use crate::FieldSchema; -use crate::FieldType; -use crate::MetricsError; -use crate::TimeseriesSchema; +use crate::ir::find_schema_version; +use crate::ir::load_schema; use chrono::prelude::DateTime; use chrono::prelude::Utc; +use oximeter_types::AuthzScope; +use oximeter_types::DatumType; +use oximeter_types::FieldSchema; +use oximeter_types::FieldSource; +use oximeter_types::FieldType; +use oximeter_types::MetricsError; +use oximeter_types::TimeseriesSchema; +use oximeter_types::Units; use proc_macro2::TokenStream; use quote::quote; @@ -34,7 +34,7 @@ pub fn use_timeseries(contents: &str) -> Result { let latest = find_schema_version(schema.iter().cloned(), None); let mod_name = quote::format_ident!("{}", latest[0].target_name()); let types = emit_schema_types(latest); - let func = emit_schema_function(schema.into_iter()); + let func = emit_schema_function(schema.iter()); Ok(quote! { pub mod #mod_name { #types @@ -43,9 +43,10 @@ pub fn use_timeseries(contents: &str) -> Result { }) } -fn emit_schema_function( - list: impl Iterator, +fn emit_schema_function<'a>( + list: impl Iterator, ) -> TokenStream { + let list = list.map(quote_timeseries_schema); quote! { pub fn timeseries_schema() -> Vec<::oximeter::schema::TimeseriesSchema> { vec![ @@ -310,66 +311,63 @@ fn emit_one(source: FieldSource, schema: &TimeseriesSchema) -> TokenStream { // This is used so that we can emit a function that will return the same data as // we parse from the TOML file with the timeseries definition, as a way to // export the definitions without needing that actual file at runtime. -impl quote::ToTokens for DatumType { - fn to_tokens(&self, tokens: &mut TokenStream) { - let toks = match self { - DatumType::Bool => quote! { ::oximeter::DatumType::Bool }, - DatumType::I8 => quote! { ::oximeter::DatumType::I8 }, - DatumType::U8 => quote! { ::oximeter::DatumType::U8 }, - DatumType::I16 => quote! { ::oximeter::DatumType::I16 }, - DatumType::U16 => quote! { ::oximeter::DatumType::U16 }, - DatumType::I32 => quote! { ::oximeter::DatumType::I32 }, - DatumType::U32 => quote! { ::oximeter::DatumType::U32 }, - DatumType::I64 => quote! { ::oximeter::DatumType::I64 }, - DatumType::U64 => quote! { ::oximeter::DatumType::U64 }, - DatumType::F32 => quote! { ::oximeter::DatumType::F32 }, - DatumType::F64 => quote! { ::oximeter::DatumType::F64 }, - DatumType::String => quote! { ::oximeter::DatumType::String }, - DatumType::Bytes => quote! { ::oximeter::DatumType::Bytes }, - DatumType::CumulativeI64 => { - quote! { ::oximeter::DatumType::CumulativeI64 } - } - DatumType::CumulativeU64 => { - quote! { ::oximeter::DatumType::CumulativeU64 } - } - DatumType::CumulativeF32 => { - quote! { ::oximeter::DatumType::CumulativeF32 } - } - DatumType::CumulativeF64 => { - quote! { ::oximeter::DatumType::CumulativeF64 } - } - DatumType::HistogramI8 => { - quote! { ::oximeter::DatumType::HistogramI8 } - } - DatumType::HistogramU8 => { - quote! { ::oximeter::DatumType::HistogramU8 } - } - DatumType::HistogramI16 => { - quote! { ::oximeter::DatumType::HistogramI16 } - } - DatumType::HistogramU16 => { - quote! { ::oximeter::DatumType::HistogramU16 } - } - DatumType::HistogramI32 => { - quote! { ::oximeter::DatumType::HistogramI32 } - } - DatumType::HistogramU32 => { - quote! { ::oximeter::DatumType::HistogramU32 } - } - DatumType::HistogramI64 => { - quote! { ::oximeter::DatumType::HistogramI64 } - } - DatumType::HistogramU64 => { - quote! { ::oximeter::DatumType::HistogramU64 } - } - DatumType::HistogramF32 => { - quote! { ::oximeter::DatumType::HistogramF32 } - } - DatumType::HistogramF64 => { - quote! { ::oximeter::DatumType::HistogramF64 } - } - }; - toks.to_tokens(tokens); +fn quote_datum_type(datum_type: DatumType) -> TokenStream { + match datum_type { + DatumType::Bool => quote! { ::oximeter::DatumType::Bool }, + DatumType::I8 => quote! { ::oximeter::DatumType::I8 }, + DatumType::U8 => quote! { ::oximeter::DatumType::U8 }, + DatumType::I16 => quote! { ::oximeter::DatumType::I16 }, + DatumType::U16 => quote! { ::oximeter::DatumType::U16 }, + DatumType::I32 => quote! { ::oximeter::DatumType::I32 }, + DatumType::U32 => quote! { ::oximeter::DatumType::U32 }, + DatumType::I64 => quote! { ::oximeter::DatumType::I64 }, + DatumType::U64 => quote! { ::oximeter::DatumType::U64 }, + DatumType::F32 => quote! { ::oximeter::DatumType::F32 }, + DatumType::F64 => quote! { ::oximeter::DatumType::F64 }, + DatumType::String => quote! { ::oximeter::DatumType::String }, + DatumType::Bytes => quote! { ::oximeter::DatumType::Bytes }, + DatumType::CumulativeI64 => { + quote! { ::oximeter::DatumType::CumulativeI64 } + } + DatumType::CumulativeU64 => { + quote! { ::oximeter::DatumType::CumulativeU64 } + } + DatumType::CumulativeF32 => { + quote! { ::oximeter::DatumType::CumulativeF32 } + } + DatumType::CumulativeF64 => { + quote! { ::oximeter::DatumType::CumulativeF64 } + } + DatumType::HistogramI8 => { + quote! { ::oximeter::DatumType::HistogramI8 } + } + DatumType::HistogramU8 => { + quote! { ::oximeter::DatumType::HistogramU8 } + } + DatumType::HistogramI16 => { + quote! { ::oximeter::DatumType::HistogramI16 } + } + DatumType::HistogramU16 => { + quote! { ::oximeter::DatumType::HistogramU16 } + } + DatumType::HistogramI32 => { + quote! { ::oximeter::DatumType::HistogramI32 } + } + DatumType::HistogramU32 => { + quote! { ::oximeter::DatumType::HistogramU32 } + } + DatumType::HistogramI64 => { + quote! { ::oximeter::DatumType::HistogramI64 } + } + DatumType::HistogramU64 => { + quote! { ::oximeter::DatumType::HistogramU64 } + } + DatumType::HistogramF32 => { + quote! { ::oximeter::DatumType::HistogramF32 } + } + DatumType::HistogramF64 => { + quote! { ::oximeter::DatumType::HistogramF64 } + } } } @@ -452,55 +450,46 @@ fn emit_rust_type_for_field(field_type: FieldType) -> TokenStream { } } -impl quote::ToTokens for FieldSource { - fn to_tokens(&self, tokens: &mut TokenStream) { - let toks = match self { - FieldSource::Target => { - quote! { ::oximeter::schema::FieldSource::Target } - } - FieldSource::Metric => { - quote! { ::oximeter::schema::FieldSource::Metric } - } - }; - toks.to_tokens(tokens); +fn quote_field_source(source: FieldSource) -> TokenStream { + match source { + FieldSource::Target => { + quote! { ::oximeter::schema::FieldSource::Target } + } + FieldSource::Metric => { + quote! { ::oximeter::schema::FieldSource::Metric } + } } } -impl quote::ToTokens for FieldType { - fn to_tokens(&self, tokens: &mut TokenStream) { - let toks = match self { - FieldType::String => quote! { ::oximeter::FieldType::String }, - FieldType::I8 => quote! { ::oximeter::FieldType::I8 }, - FieldType::U8 => quote! { ::oximeter::FieldType::U8 }, - FieldType::I16 => quote! { ::oximeter::FieldType::I16 }, - FieldType::U16 => quote! { ::oximeter::FieldType::U16 }, - FieldType::I32 => quote! { ::oximeter::FieldType::I32 }, - FieldType::U32 => quote! { ::oximeter::FieldType::U32 }, - FieldType::I64 => quote! { ::oximeter::FieldType::I64 }, - FieldType::U64 => quote! { ::oximeter::FieldType::U64 }, - FieldType::IpAddr => quote! { ::oximeter::FieldType::IpAddr }, - FieldType::Uuid => quote! { ::oximeter::FieldType::Uuid }, - FieldType::Bool => quote! { ::oximeter::FieldType::Bool }, - }; - toks.to_tokens(tokens); +fn quote_field_type(field_type: FieldType) -> TokenStream { + match field_type { + FieldType::String => quote! { ::oximeter::FieldType::String }, + FieldType::I8 => quote! { ::oximeter::FieldType::I8 }, + FieldType::U8 => quote! { ::oximeter::FieldType::U8 }, + FieldType::I16 => quote! { ::oximeter::FieldType::I16 }, + FieldType::U16 => quote! { ::oximeter::FieldType::U16 }, + FieldType::I32 => quote! { ::oximeter::FieldType::I32 }, + FieldType::U32 => quote! { ::oximeter::FieldType::U32 }, + FieldType::I64 => quote! { ::oximeter::FieldType::I64 }, + FieldType::U64 => quote! { ::oximeter::FieldType::U64 }, + FieldType::IpAddr => quote! { ::oximeter::FieldType::IpAddr }, + FieldType::Uuid => quote! { ::oximeter::FieldType::Uuid }, + FieldType::Bool => quote! { ::oximeter::FieldType::Bool }, } } -impl quote::ToTokens for AuthzScope { - fn to_tokens(&self, tokens: &mut TokenStream) { - let toks = match self { - AuthzScope::Fleet => { - quote! { ::oximeter::schema::AuthzScope::Fleet } - } - AuthzScope::Silo => quote! { ::oximeter::schema::AuthzScope::Silo }, - AuthzScope::Project => { - quote! { ::oximeter::schema::AuthzScope::Project } - } - AuthzScope::ViewableToAll => { - quote! { ::oximeter::schema::AuthzScope::ViewableToAll } - } - }; - toks.to_tokens(tokens); +fn quote_authz_scope(authz_scope: AuthzScope) -> TokenStream { + match authz_scope { + AuthzScope::Fleet => { + quote! { ::oximeter::schema::AuthzScope::Fleet } + } + AuthzScope::Silo => quote! { ::oximeter::schema::AuthzScope::Silo }, + AuthzScope::Project => { + quote! { ::oximeter::schema::AuthzScope::Project } + } + AuthzScope::ViewableToAll => { + quote! { ::oximeter::schema::AuthzScope::ViewableToAll } + } } } @@ -512,85 +501,79 @@ fn quote_creation_time(created: DateTime) -> TokenStream { } } -impl quote::ToTokens for Units { - fn to_tokens(&self, tokens: &mut TokenStream) { - let toks = match self { - Units::None => quote! { ::oximeter::schema::Units::None }, - Units::Count => quote! { ::oximeter::schema::Units::Count }, - Units::Bytes => quote! { ::oximeter::schema::Units::Bytes }, - Units::Seconds => quote! { ::oximeter::schema::Units::Seconds }, - Units::Nanoseconds => { - quote! { ::oximeter::schema::Units::Nanoseconds } - } - Units::Amps => quote! { ::oximeter::schema::Units::Amps }, - Units::Volts => quote! { ::oximeter::schema::Units::Volts }, - Units::DegreesCelcius => { - quote! { ::oximeter::schema::Units::DegreesCelcius } - } - Units::Rpm => quote! { ::oximeter::schema::Units::Rpm }, - }; - toks.to_tokens(tokens); +fn quote_units(units: Units) -> TokenStream { + match units { + Units::None => quote! { ::oximeter::schema::Units::None }, + Units::Count => quote! { ::oximeter::schema::Units::Count }, + Units::Bytes => quote! { ::oximeter::schema::Units::Bytes }, + Units::Seconds => quote! { ::oximeter::schema::Units::Seconds }, + Units::Nanoseconds => { + quote! { ::oximeter::schema::Units::Nanoseconds } + } + Units::Amps => quote! { ::oximeter::schema::Units::Amps }, + Units::Volts => quote! { ::oximeter::schema::Units::Volts }, + Units::DegreesCelcius => { + quote! { ::oximeter::schema::Units::DegreesCelcius } + } + Units::Rpm => quote! { ::oximeter::schema::Units::Rpm }, } } -impl quote::ToTokens for FieldSchema { - fn to_tokens(&self, tokens: &mut TokenStream) { - let name = self.name.as_str(); - let field_type = self.field_type; - let source = self.source; - let description = self.description.as_str(); - let toks = quote! { - ::oximeter::FieldSchema { - name: String::from(#name), - field_type: #field_type, - source: #source, - description: String::from(#description), - } - }; - toks.to_tokens(tokens); +fn quote_field_schema(field_schema: &FieldSchema) -> TokenStream { + let name = field_schema.name.as_str(); + let field_type = quote_field_type(field_schema.field_type); + let source = quote_field_source(field_schema.source); + let description = field_schema.description.as_str(); + quote! { + ::oximeter::FieldSchema { + name: String::from(#name), + field_type: #field_type, + source: #source, + description: String::from(#description), + } } } -impl quote::ToTokens for TimeseriesSchema { - fn to_tokens(&self, tokens: &mut TokenStream) { - let field_schema = &self.field_schema; - let timeseries_name = self.timeseries_name.to_string(); - let target_description = self.description.target.as_str(); - let metric_description = self.description.metric.as_str(); - let authz_scope = self.authz_scope; - let units = self.units; - let datum_type = self.datum_type; - let ver = self.version.get(); - let version = quote! { ::core::num::NonZeroU8::new(#ver).unwrap() }; - let created = quote_creation_time(self.created); - let toks = quote! { - ::oximeter::schema::TimeseriesSchema { - timeseries_name: - <::oximeter::TimeseriesName as ::std::convert::TryFrom<&str>>::try_from( - #timeseries_name - ).unwrap(), - description: ::oximeter::schema::TimeseriesDescription { - target: String::from(#target_description), - metric: String::from(#metric_description), - }, - authz_scope: #authz_scope, - units: #units, - field_schema: ::std::collections::BTreeSet::from([ - #(#field_schema),* - ]), - datum_type: #datum_type, - version: #version, - created: #created, - } - }; - toks.to_tokens(tokens); +fn quote_timeseries_schema( + timeseries_schema: &TimeseriesSchema, +) -> TokenStream { + let field_schema = + timeseries_schema.field_schema.iter().map(quote_field_schema); + let timeseries_name = timeseries_schema.timeseries_name.to_string(); + let target_description = timeseries_schema.description.target.as_str(); + let metric_description = timeseries_schema.description.metric.as_str(); + let authz_scope = quote_authz_scope(timeseries_schema.authz_scope); + let units = quote_units(timeseries_schema.units); + let datum_type = quote_datum_type(timeseries_schema.datum_type); + let ver = timeseries_schema.version.get(); + let version = quote! { ::core::num::NonZeroU8::new(#ver).unwrap() }; + let created = quote_creation_time(timeseries_schema.created); + quote! { + ::oximeter::schema::TimeseriesSchema { + timeseries_name: + <::oximeter::TimeseriesName as ::std::convert::TryFrom<&str>>::try_from( + #timeseries_name + ).unwrap(), + description: ::oximeter::schema::TimeseriesDescription { + target: String::from(#target_description), + metric: String::from(#metric_description), + }, + authz_scope: #authz_scope, + units: #units, + field_schema: ::std::collections::BTreeSet::from([ + #(#field_schema),* + ]), + datum_type: #datum_type, + version: #version, + created: #created, + } } } #[cfg(test)] mod tests { use super::*; - use crate::schema::TimeseriesDescription; + use oximeter_types::TimeseriesDescription; use std::{collections::BTreeSet, num::NonZeroU8}; #[test] diff --git a/oximeter/impl/src/schema/ir.rs b/oximeter/schema/src/ir.rs similarity index 99% rename from oximeter/impl/src/schema/ir.rs rename to oximeter/schema/src/ir.rs index f7a209294f..370236000a 100644 --- a/oximeter/impl/src/schema/ir.rs +++ b/oximeter/schema/src/ir.rs @@ -11,17 +11,17 @@ //! inspected or used to generate code that contains the equivalent Rust types //! and trait implementations. -use crate::schema::AuthzScope; -use crate::schema::DatumType; -use crate::schema::FieldSource; -use crate::schema::FieldType; -use crate::schema::TimeseriesDescription; -use crate::schema::Units; -use crate::FieldSchema; -use crate::MetricsError; -use crate::TimeseriesName; -use crate::TimeseriesSchema; use chrono::Utc; +use oximeter_types::AuthzScope; +use oximeter_types::DatumType; +use oximeter_types::FieldSchema; +use oximeter_types::FieldSource; +use oximeter_types::FieldType; +use oximeter_types::MetricsError; +use oximeter_types::TimeseriesDescription; +use oximeter_types::TimeseriesName; +use oximeter_types::TimeseriesSchema; +use oximeter_types::Units; use serde::Deserialize; use std::collections::btree_map::Entry; use std::collections::BTreeMap; diff --git a/oximeter/schema/src/lib.rs b/oximeter/schema/src/lib.rs new file mode 100644 index 0000000000..b1ce73a940 --- /dev/null +++ b/oximeter/schema/src/lib.rs @@ -0,0 +1,12 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +// Copyright 2024 Oxide Computer Company + +//! Tools for working with schemas for fields and timeseries. +//! +//! The actual schema type definitions are in [`oximeter_types::schema`]. + +pub mod codegen; +pub mod ir; diff --git a/oximeter/test-utils/Cargo.toml b/oximeter/test-utils/Cargo.toml new file mode 100644 index 0000000000..f463e74aca --- /dev/null +++ b/oximeter/test-utils/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "oximeter-test-utils" +version = "0.1.0" +edition = "2021" +license = "MPL-2.0" + +[lints] +workspace = true + +[dependencies] +chrono.workspace = true +omicron-workspace-hack.workspace = true +oximeter-macro-impl.workspace = true +oximeter-types.workspace = true +uuid.workspace = true diff --git a/oximeter/test-utils/src/lib.rs b/oximeter/test-utils/src/lib.rs new file mode 100644 index 0000000000..04c49add65 --- /dev/null +++ b/oximeter/test-utils/src/lib.rs @@ -0,0 +1,295 @@ +// Copyright 2024 Oxide Computer Company + +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Utilities for testing the oximeter crate. + +// Export the current crate as `oximeter`. The macros defined in `oximeter-macro-impl` generate +// code referring to symbols like `oximeter::traits::Target`. In consumers of this crate, that's +// fine, but internally there _is_ no crate named `oximeter`, it's just `self` or `crate`. +// +// See https://github.com/rust-lang/rust/pull/55275 for the PR introducing this fix, which links to +// lots of related issues and discussion. +extern crate self as oximeter; + +use oximeter_macro_impl::{Metric, Target}; +use oximeter_types::histogram; +use oximeter_types::histogram::{Histogram, Record}; +use oximeter_types::traits; +use oximeter_types::types::{ + Cumulative, Datum, DatumType, FieldType, FieldValue, Measurement, Sample, +}; +use oximeter_types::{Metric, Target}; +use uuid::Uuid; + +#[derive(Target)] +pub struct TestTarget { + pub name1: String, + pub name2: String, + pub num: i64, +} + +impl Default for TestTarget { + fn default() -> Self { + TestTarget { + name1: "first_name".into(), + name2: "second_name".into(), + num: 0, + } + } +} + +#[derive(Metric)] +pub struct TestMetric { + pub id: Uuid, + pub good: bool, + pub datum: i64, +} + +#[derive(Metric)] +pub struct TestCumulativeMetric { + pub id: Uuid, + pub good: bool, + pub datum: Cumulative, +} + +#[derive(Metric)] +pub struct TestHistogram { + pub id: Uuid, + pub good: bool, + pub datum: Histogram, +} + +const ID: Uuid = uuid::uuid!("e00ced4d-39d1-446a-ae85-a67f05c9750b"); + +pub fn make_sample() -> Sample { + let target = TestTarget::default(); + let metric = TestMetric { id: ID, good: true, datum: 1 }; + Sample::new(&target, &metric).unwrap() +} + +pub fn make_missing_sample() -> Sample { + let target = TestTarget::default(); + let metric = TestMetric { id: ID, good: true, datum: 1 }; + Sample::new_missing(&target, &metric).unwrap() +} + +pub fn make_hist_sample() -> Sample { + let target = TestTarget::default(); + let mut hist = histogram::Histogram::new(&[0.0, 5.0, 10.0]).unwrap(); + hist.sample(1.0).unwrap(); + hist.sample(2.0).unwrap(); + hist.sample(6.0).unwrap(); + let metric = TestHistogram { id: ID, good: true, datum: hist }; + Sample::new(&target, &metric).unwrap() +} + +/// A target identifying a single virtual machine instance +#[derive(Debug, Clone, Copy, oximeter::Target)] +pub struct VirtualMachine { + pub project_id: Uuid, + pub instance_id: Uuid, +} + +/// A metric recording the total time a vCPU is busy, by its ID +#[derive(Debug, Clone, Copy, oximeter::Metric)] +pub struct CpuBusy { + cpu_id: i64, + datum: Cumulative, +} + +pub fn generate_test_samples( + n_projects: usize, + n_instances: usize, + n_cpus: usize, + n_samples: usize, +) -> Vec { + let n_timeseries = n_projects * n_instances * n_cpus; + let mut samples = Vec::with_capacity(n_samples * n_timeseries); + for _ in 0..n_projects { + let project_id = Uuid::new_v4(); + for _ in 0..n_instances { + let vm = VirtualMachine { project_id, instance_id: Uuid::new_v4() }; + for cpu in 0..n_cpus { + for sample in 0..n_samples { + let cpu_busy = CpuBusy { + cpu_id: cpu as _, + datum: Cumulative::new(sample as f64), + }; + let sample = Sample::new(&vm, &cpu_busy).unwrap(); + samples.push(sample); + } + } + } + } + samples +} + +#[cfg(test)] +mod tests { + use chrono::Utc; + use oximeter_types::{ + schema::{ + default_schema_version, AuthzScope, FieldSchema, FieldSource, + TimeseriesSchema, Units, + }, + TimeseriesName, + }; + + use super::*; + + #[test] + fn test_gen_test_samples() { + let (n_projects, n_instances, n_cpus, n_samples) = (2, 2, 2, 2); + let samples = + generate_test_samples(n_projects, n_instances, n_cpus, n_samples); + assert_eq!( + samples.len(), + n_projects * n_instances * n_cpus * n_samples + ); + } + + #[test] + fn test_sample_struct() { + let t = TestTarget::default(); + let m = TestMetric { id: Uuid::new_v4(), good: true, datum: 1i64 }; + let sample = Sample::new(&t, &m).unwrap(); + assert_eq!( + sample.timeseries_name, + format!("{}:{}", t.name(), m.name()) + ); + assert!(sample.measurement.start_time().is_none()); + assert_eq!(sample.measurement.datum(), &Datum::from(1i64)); + + let m = TestCumulativeMetric { + id: Uuid::new_v4(), + good: true, + datum: 1i64.into(), + }; + let sample = Sample::new(&t, &m).unwrap(); + assert!(sample.measurement.start_time().is_some()); + } + + #[derive(Target)] + struct MyTarget { + id: Uuid, + name: String, + } + + const ID: Uuid = uuid::uuid!("ca565ef4-65dc-4ab0-8622-7be43ed72105"); + + impl Default for MyTarget { + fn default() -> Self { + Self { id: ID, name: String::from("name") } + } + } + + #[derive(Metric)] + struct MyMetric { + happy: bool, + datum: u64, + } + + impl Default for MyMetric { + fn default() -> Self { + Self { happy: true, datum: 0 } + } + } + + #[test] + fn test_timeseries_schema_from_parts() { + let target = MyTarget::default(); + let metric = MyMetric::default(); + let schema = TimeseriesSchema::new(&target, &metric).unwrap(); + + assert_eq!(schema.timeseries_name, "my_target:my_metric"); + let f = schema.schema_for_field("id").unwrap(); + assert_eq!(f.name, "id"); + assert_eq!(f.field_type, FieldType::Uuid); + assert_eq!(f.source, FieldSource::Target); + + let f = schema.schema_for_field("name").unwrap(); + assert_eq!(f.name, "name"); + assert_eq!(f.field_type, FieldType::String); + assert_eq!(f.source, FieldSource::Target); + + let f = schema.schema_for_field("happy").unwrap(); + assert_eq!(f.name, "happy"); + assert_eq!(f.field_type, FieldType::Bool); + assert_eq!(f.source, FieldSource::Metric); + assert_eq!(schema.datum_type, DatumType::U64); + } + + #[test] + fn test_timeseries_schema_from_sample() { + let target = MyTarget::default(); + let metric = MyMetric::default(); + let sample = Sample::new(&target, &metric).unwrap(); + let schema = TimeseriesSchema::new(&target, &metric).unwrap(); + let schema_from_sample = TimeseriesSchema::from(&sample); + assert_eq!(schema, schema_from_sample); + } + + // Test that we correctly order field across a target and metric. + // + // In an earlier commit, we switched from storing fields in an unordered Vec + // to using a BTree{Map,Set} to ensure ordering by name. However, the + // `TimeseriesSchema` type stored all its fields by chaining the sorted + // fields from the target and metric, without then sorting _across_ them. + // + // This was exacerbated by the error reporting, where we did in fact sort + // all fields across the target and metric, making it difficult to tell how + // the derived schema was different, if at all. + // + // This test generates a sample with a schema where the target and metric + // fields are sorted within them, but not across them. We check that the + // derived schema are actually equal, which means we've imposed that + // ordering when deriving the schema. + #[test] + fn test_schema_field_ordering_across_target_metric() { + let target_field = FieldSchema { + name: String::from("later"), + field_type: FieldType::U64, + source: FieldSource::Target, + description: String::new(), + }; + let metric_field = FieldSchema { + name: String::from("earlier"), + field_type: FieldType::U64, + source: FieldSource::Metric, + description: String::new(), + }; + let timeseries_name: TimeseriesName = "foo:bar".parse().unwrap(); + let datum_type = DatumType::U64; + let field_schema = + [target_field.clone(), metric_field.clone()].into_iter().collect(); + let expected_schema = TimeseriesSchema { + timeseries_name, + description: Default::default(), + field_schema, + datum_type, + version: default_schema_version(), + authz_scope: AuthzScope::Fleet, + units: Units::Count, + created: Utc::now(), + }; + + #[derive(oximeter::Target)] + struct Foo { + later: u64, + } + #[derive(oximeter::Metric)] + struct Bar { + earlier: u64, + datum: u64, + } + + let target = Foo { later: 1 }; + let metric = Bar { earlier: 2, datum: 10 }; + let sample = Sample::new(&target, &metric).unwrap(); + let derived_schema = TimeseriesSchema::from(&sample); + assert_eq!(derived_schema, expected_schema); + } +} diff --git a/oximeter/timeseries-macro/Cargo.toml b/oximeter/timeseries-macro/Cargo.toml index db591aed06..2fb8b8f312 100644 --- a/oximeter/timeseries-macro/Cargo.toml +++ b/oximeter/timeseries-macro/Cargo.toml @@ -8,7 +8,8 @@ proc-macro = true [dependencies] omicron-workspace-hack.workspace = true -oximeter-impl.workspace = true +oximeter-schema.workspace = true +oximeter-types.workspace = true proc-macro2.workspace = true quote.workspace = true syn.workspace = true diff --git a/oximeter/timeseries-macro/src/lib.rs b/oximeter/timeseries-macro/src/lib.rs index 317a8533a4..f3ef164e0e 100644 --- a/oximeter/timeseries-macro/src/lib.rs +++ b/oximeter/timeseries-macro/src/lib.rs @@ -8,7 +8,7 @@ extern crate proc_macro; -use oximeter_impl::schema::SCHEMA_DIRECTORY; +use oximeter_types::schema::SCHEMA_DIRECTORY; /// Generate code to use the timeseries from one target. /// @@ -45,7 +45,7 @@ pub fn use_timeseries( .into(); } }; - match oximeter_impl::schema::codegen::use_timeseries(&contents) { + match oximeter_schema::codegen::use_timeseries(&contents) { Ok(toks) => { let path_ = path.display().to_string(); return quote::quote! { diff --git a/oximeter/impl/Cargo.toml b/oximeter/types/Cargo.toml similarity index 78% rename from oximeter/impl/Cargo.toml rename to oximeter/types/Cargo.toml index 91277d9d47..6d6bbc07e6 100644 --- a/oximeter/impl/Cargo.toml +++ b/oximeter/types/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "oximeter-impl" +name = "oximeter-types" version = "0.1.0" edition = "2021" license = "MPL-2.0" @@ -11,22 +11,13 @@ workspace = true bytes = { workspace = true, features = [ "serde" ] } chrono.workspace = true float-ord.workspace = true -heck.workspace = true num.workspace = true omicron-common.workspace = true omicron-workspace-hack.workspace = true -oximeter-macro-impl.workspace = true -prettyplease.workspace = true -proc-macro2.workspace = true -quote.workspace = true regex.workspace = true schemars = { workspace = true, features = [ "uuid1", "bytes", "chrono" ] } serde.workspace = true -serde_json.workspace = true -slog-error-chain.workspace = true strum.workspace = true -syn.workspace = true -toml.workspace = true thiserror.workspace = true uuid.workspace = true @@ -34,6 +25,7 @@ uuid.workspace = true approx.workspace = true # For benchmark criterion.workspace = true +oximeter-macro-impl.workspace = true rand = { workspace = true, features = ["std_rng"] } rand_distr.workspace = true rstest.workspace = true diff --git a/oximeter/impl/benches/quantile.rs b/oximeter/types/benches/quantile.rs similarity index 97% rename from oximeter/impl/benches/quantile.rs rename to oximeter/types/benches/quantile.rs index 4540ba8f6a..b88cb211e6 100644 --- a/oximeter/impl/benches/quantile.rs +++ b/oximeter/types/benches/quantile.rs @@ -8,7 +8,7 @@ // Copyright 2024 Oxide Computer Company use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; -use oximeter_impl::Quantile; +use oximeter_types::Quantile; use rand_distr::{Distribution, Normal}; /// Emulates baseline code in a Python implementation of the P² diff --git a/oximeter/impl/src/histogram.rs b/oximeter/types/src/histogram.rs similarity index 99% rename from oximeter/impl/src/histogram.rs rename to oximeter/types/src/histogram.rs index 40df0a1b41..0b85727ee0 100644 --- a/oximeter/impl/src/histogram.rs +++ b/oximeter/types/src/histogram.rs @@ -523,9 +523,9 @@ where /// Example /// ------- /// ```rust - /// # // Rename the impl crate so the doctests can refer to the public + /// # // Rename the types crate so the doctests can refer to the public /// # // `oximeter` crate, not the private impl. - /// # use oximeter_impl as oximeter; + /// # use oximeter_types as oximeter; /// use oximeter::histogram::Histogram; /// /// let hist = Histogram::with_bins(&[(0..10).into(), (10..100).into()]).unwrap(); @@ -922,9 +922,9 @@ where /// ------- /// /// ```rust - /// # // Rename the impl crate so the doctests can refer to the public + /// # // Rename the types crate so the doctests can refer to the public /// # // `oximeter` crate, not the private impl. - /// # use oximeter_impl as oximeter; + /// # use oximeter_types as oximeter; /// use oximeter::histogram::{Histogram, BinRange}; /// use std::ops::{RangeBounds, Bound}; /// diff --git a/oximeter/impl/src/lib.rs b/oximeter/types/src/lib.rs similarity index 92% rename from oximeter/impl/src/lib.rs rename to oximeter/types/src/lib.rs index 5acbeb9422..7a1a480f8d 100644 --- a/oximeter/impl/src/lib.rs +++ b/oximeter/types/src/lib.rs @@ -4,8 +4,6 @@ // Copyright 2024 Oxide Computer Company -pub use oximeter_macro_impl::*; - // Export the current crate as `oximeter`. The macros defined in `oximeter-macro-impl` generate // code referring to symbols like `oximeter::traits::Target`. In consumers of this crate, that's // fine, but internally there _is_ no crate named `oximeter`, it's just `self` or `crate`. @@ -17,15 +15,18 @@ extern crate self as oximeter; pub mod histogram; pub mod quantile; pub mod schema; -pub mod test_util; pub mod traits; pub mod types; pub use quantile::Quantile; pub use quantile::QuantileError; +pub use schema::AuthzScope; pub use schema::FieldSchema; +pub use schema::FieldSource; +pub use schema::TimeseriesDescription; pub use schema::TimeseriesName; pub use schema::TimeseriesSchema; +pub use schema::Units; pub use traits::Metric; pub use traits::Producer; pub use traits::Target; diff --git a/oximeter/impl/src/quantile.rs b/oximeter/types/src/quantile.rs similarity index 97% rename from oximeter/impl/src/quantile.rs rename to oximeter/types/src/quantile.rs index fafe9c9ece..40777217e5 100644 --- a/oximeter/impl/src/quantile.rs +++ b/oximeter/types/src/quantile.rs @@ -78,9 +78,9 @@ impl Quantile { /// # Examples /// /// ``` - /// # // Rename the impl crate so the doctests can refer to the public + /// # // Rename the types crate so the doctests can refer to the public /// # // `oximeter` crate, not the private impl. - /// # use oximeter_impl as oximeter; + /// # use oximeter_types as oximeter; /// use oximeter::Quantile; /// let q = Quantile::new(0.5).unwrap(); /// @@ -116,9 +116,9 @@ impl Quantile { /// /// # Examples /// ``` - /// # // Rename the impl crate so the doctests can refer to the public + /// # // Rename the types crate so the doctests can refer to the public /// # // `oximeter` crate, not the private impl. - /// # use oximeter_impl as oximeter; + /// # use oximeter_types as oximeter; /// use oximeter::Quantile; /// let q = Quantile::from_parts( /// 0.5, @@ -200,9 +200,9 @@ impl Quantile { /// # Examples /// /// ``` - /// # // Rename the impl crate so the doctests can refer to the public + /// # // Rename the types crate so the doctests can refer to the public /// # // `oximeter` crate, not the private impl. - /// # use oximeter_impl as oximeter; + /// # use oximeter_types as oximeter; /// use oximeter::Quantile; /// let mut q = Quantile::new(0.5).unwrap(); /// for o in 1..=100 { @@ -243,9 +243,9 @@ impl Quantile { /// # Examples /// /// ``` - /// # // Rename the impl crate so the doctests can refer to the public + /// # // Rename the types crate so the doctests can refer to the public /// # // `oximeter` crate, not the private impl. - /// # use oximeter_impl as oximeter; + /// # use oximeter_types as oximeter; /// use oximeter::Quantile; /// let mut q = Quantile::new(0.9).unwrap(); /// q.append(10).unwrap(); diff --git a/oximeter/impl/src/schema/mod.rs b/oximeter/types/src/schema.rs similarity index 75% rename from oximeter/impl/src/schema/mod.rs rename to oximeter/types/src/schema.rs index 250604d7be..2efd5265ff 100644 --- a/oximeter/impl/src/schema/mod.rs +++ b/oximeter/types/src/schema.rs @@ -6,9 +6,6 @@ //! Tools for working with schema for fields and timeseries. -pub mod codegen; -pub mod ir; - use crate::types::DatumType; use crate::types::FieldType; use crate::types::MetricsError; @@ -402,7 +399,6 @@ pub enum AuthzScope { mod tests { use super::*; use std::convert::TryFrom; - use uuid::Uuid; #[test] fn test_timeseries_name() { @@ -426,127 +422,6 @@ mod tests { assert!(TimeseriesName::try_from("x.a:b").is_err()); } - #[derive(Target)] - struct MyTarget { - id: Uuid, - name: String, - } - - const ID: Uuid = uuid::uuid!("ca565ef4-65dc-4ab0-8622-7be43ed72105"); - - impl Default for MyTarget { - fn default() -> Self { - Self { id: ID, name: String::from("name") } - } - } - - #[derive(Metric)] - struct MyMetric { - happy: bool, - datum: u64, - } - - impl Default for MyMetric { - fn default() -> Self { - Self { happy: true, datum: 0 } - } - } - - #[test] - fn test_timeseries_schema_from_parts() { - let target = MyTarget::default(); - let metric = MyMetric::default(); - let schema = TimeseriesSchema::new(&target, &metric).unwrap(); - - assert_eq!(schema.timeseries_name, "my_target:my_metric"); - let f = schema.schema_for_field("id").unwrap(); - assert_eq!(f.name, "id"); - assert_eq!(f.field_type, FieldType::Uuid); - assert_eq!(f.source, FieldSource::Target); - - let f = schema.schema_for_field("name").unwrap(); - assert_eq!(f.name, "name"); - assert_eq!(f.field_type, FieldType::String); - assert_eq!(f.source, FieldSource::Target); - - let f = schema.schema_for_field("happy").unwrap(); - assert_eq!(f.name, "happy"); - assert_eq!(f.field_type, FieldType::Bool); - assert_eq!(f.source, FieldSource::Metric); - assert_eq!(schema.datum_type, DatumType::U64); - } - - #[test] - fn test_timeseries_schema_from_sample() { - let target = MyTarget::default(); - let metric = MyMetric::default(); - let sample = Sample::new(&target, &metric).unwrap(); - let schema = TimeseriesSchema::new(&target, &metric).unwrap(); - let schema_from_sample = TimeseriesSchema::from(&sample); - assert_eq!(schema, schema_from_sample); - } - - // Test that we correctly order field across a target and metric. - // - // In an earlier commit, we switched from storing fields in an unordered Vec - // to using a BTree{Map,Set} to ensure ordering by name. However, the - // `TimeseriesSchema` type stored all its fields by chaining the sorted - // fields from the target and metric, without then sorting _across_ them. - // - // This was exacerbated by the error reporting, where we did in fact sort - // all fields across the target and metric, making it difficult to tell how - // the derived schema was different, if at all. - // - // This test generates a sample with a schema where the target and metric - // fields are sorted within them, but not across them. We check that the - // derived schema are actually equal, which means we've imposed that - // ordering when deriving the schema. - #[test] - fn test_schema_field_ordering_across_target_metric() { - let target_field = FieldSchema { - name: String::from("later"), - field_type: FieldType::U64, - source: FieldSource::Target, - description: String::new(), - }; - let metric_field = FieldSchema { - name: String::from("earlier"), - field_type: FieldType::U64, - source: FieldSource::Metric, - description: String::new(), - }; - let timeseries_name: TimeseriesName = "foo:bar".parse().unwrap(); - let datum_type = DatumType::U64; - let field_schema = - [target_field.clone(), metric_field.clone()].into_iter().collect(); - let expected_schema = TimeseriesSchema { - timeseries_name, - description: Default::default(), - field_schema, - datum_type, - version: default_schema_version(), - authz_scope: AuthzScope::Fleet, - units: Units::Count, - created: Utc::now(), - }; - - #[derive(oximeter::Target)] - struct Foo { - later: u64, - } - #[derive(oximeter::Metric)] - struct Bar { - earlier: u64, - datum: u64, - } - - let target = Foo { later: 1 }; - let metric = Bar { earlier: 2, datum: 10 }; - let sample = Sample::new(&target, &metric).unwrap(); - let derived_schema = TimeseriesSchema::from(&sample); - assert_eq!(derived_schema, expected_schema); - } - #[test] fn test_field_schema_ordering() { let mut fields = BTreeSet::new(); diff --git a/oximeter/impl/src/traits.rs b/oximeter/types/src/traits.rs similarity index 96% rename from oximeter/impl/src/traits.rs rename to oximeter/types/src/traits.rs index 16baa4f619..91ecca817d 100644 --- a/oximeter/impl/src/traits.rs +++ b/oximeter/types/src/traits.rs @@ -45,9 +45,9 @@ use std::ops::AddAssign; /// -------- /// /// ```rust -/// # // Rename the impl crate so the doctests can refer to the public +/// # // Rename the types crate so the doctests can refer to the public /// # // `oximeter` crate, not the private impl. -/// # extern crate oximeter_impl as oximeter; +/// # extern crate oximeter_types as oximeter; /// # use oximeter_macro_impl::*; /// use oximeter::{traits::Target, types::FieldType}; /// use uuid::Uuid; @@ -75,9 +75,9 @@ use std::ops::AddAssign; /// supported types. /// /// ```compile_fail -/// # // Rename the impl crate so the doctests can refer to the public +/// # // Rename the types crate so the doctests can refer to the public /// # // `oximeter` crate, not the private impl. -/// # extern crate oximeter_impl as oximeter; +/// # extern crate oximeter_types as oximeter; /// # use oximeter_macro_impl::*; /// #[derive(oximeter::Target)] /// struct Bad { @@ -160,9 +160,9 @@ pub trait Target { /// Example /// ------- /// ```rust -/// # // Rename the impl crate so the doctests can refer to the public +/// # // Rename the types crate so the doctests can refer to the public /// # // `oximeter` crate, not the private impl. -/// # extern crate oximeter_impl as oximeter; +/// # extern crate oximeter_types as oximeter; /// # use oximeter_macro_impl::*; /// use chrono::Utc; /// use oximeter::Metric; @@ -185,9 +185,9 @@ pub trait Target { /// an unsupported type. /// /// ```compile_fail -/// # // Rename the impl crate so the doctests can refer to the public +/// # // Rename the types crate so the doctests can refer to the public /// # // `oximeter` crate, not the private impl. -/// # extern crate oximeter_impl as oximeter; +/// # extern crate oximeter_types as oximeter; /// # use oximeter_macro_impl::*; /// #[derive(Metric)] /// pub struct BadType { @@ -364,9 +364,9 @@ pub use crate::histogram::HistogramSupport; /// Example /// ------- /// ```rust -/// # // Rename the impl crate so the doctests can refer to the public +/// # // Rename the types crate so the doctests can refer to the public /// # // `oximeter` crate, not the private impl. -/// # extern crate oximeter_impl as oximeter; +/// # extern crate oximeter_types as oximeter; /// # use oximeter_macro_impl::*; /// use oximeter::{Datum, MetricsError, Metric, Producer, Target}; /// use oximeter::types::{Measurement, Sample, Cumulative}; @@ -464,6 +464,8 @@ pub trait Producer: Send + Sync + std::fmt::Debug + 'static { #[cfg(test)] mod tests { + use oximeter_macro_impl::{Metric, Target}; + use crate::types; use crate::{ Datum, DatumType, FieldType, FieldValue, Metric, MetricsError, diff --git a/oximeter/impl/src/types.rs b/oximeter/types/src/types.rs similarity index 97% rename from oximeter/impl/src/types.rs rename to oximeter/types/src/types.rs index 370557f7f7..60260e3649 100644 --- a/oximeter/impl/src/types.rs +++ b/oximeter/types/src/types.rs @@ -850,7 +850,7 @@ pub struct Sample { /// The version of the timeseries this sample belongs to // // TODO-cleanup: This should be removed once schema are tracked in CRDB. - #[serde(default = "::oximeter::schema::default_schema_version")] + #[serde(default = "crate::schema::default_schema_version")] pub timeseries_version: NonZeroU8, // Target name and fields @@ -1104,15 +1104,10 @@ mod tests { use super::Measurement; use super::MetricsError; use super::Sample; - use crate::test_util; - use crate::types; - use crate::Metric; - use crate::Target; use bytes::Bytes; use std::collections::BTreeMap; use std::net::Ipv4Addr; use std::net::Ipv6Addr; - use uuid::Uuid; #[test] fn test_cumulative_i64() { @@ -1176,31 +1171,6 @@ mod tests { assert!(measurement.timestamp() >= measurement.start_time().unwrap()); } - #[test] - fn test_sample_struct() { - let t = test_util::TestTarget::default(); - let m = test_util::TestMetric { - id: Uuid::new_v4(), - good: true, - datum: 1i64, - }; - let sample = types::Sample::new(&t, &m).unwrap(); - assert_eq!( - sample.timeseries_name, - format!("{}:{}", t.name(), m.name()) - ); - assert!(sample.measurement.start_time().is_none()); - assert_eq!(sample.measurement.datum(), &Datum::from(1i64)); - - let m = test_util::TestCumulativeMetric { - id: Uuid::new_v4(), - good: true, - datum: 1i64.into(), - }; - let sample = types::Sample::new(&t, &m).unwrap(); - assert!(sample.measurement.start_time().is_some()); - } - #[rstest::rstest] #[case::as_string("some string", FieldValue::String("some string".into()))] #[case::as_i8("2", FieldValue::I8(2))] diff --git a/oximeter/impl/tests/fail/failures.rs b/oximeter/types/tests/fail/failures.rs similarity index 100% rename from oximeter/impl/tests/fail/failures.rs rename to oximeter/types/tests/fail/failures.rs diff --git a/oximeter/impl/tests/fail/failures.stderr b/oximeter/types/tests/fail/failures.stderr similarity index 100% rename from oximeter/impl/tests/fail/failures.stderr rename to oximeter/types/tests/fail/failures.stderr diff --git a/oximeter/impl/tests/test_compilation.rs b/oximeter/types/tests/test_compilation.rs similarity index 100% rename from oximeter/impl/tests/test_compilation.rs rename to oximeter/types/tests/test_compilation.rs diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 5dc3bc11e7..138d220bc7 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -86,6 +86,7 @@ pkcs8 = { version = "0.10.2", default-features = false, features = ["encryption" postgres-types = { version = "0.2.7", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } predicates = { version = "3.1.2" } proc-macro2 = { version = "1.0.86" } +quote = { version = "1.0.36" } regex = { version = "1.10.6" } regex-automata = { version = "0.4.6", default-features = false, features = ["dfa", "hybrid", "meta", "nfa", "perf", "unicode"] } regex-syntax = { version = "0.8.4" } @@ -193,6 +194,7 @@ pkcs8 = { version = "0.10.2", default-features = false, features = ["encryption" postgres-types = { version = "0.2.7", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } predicates = { version = "3.1.2" } proc-macro2 = { version = "1.0.86" } +quote = { version = "1.0.36" } regex = { version = "1.10.6" } regex-automata = { version = "0.4.6", default-features = false, features = ["dfa", "hybrid", "meta", "nfa", "perf", "unicode"] } regex-syntax = { version = "0.8.4" }