diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 0000000000..ddfd1b04b7 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,2 @@ +# Whitespace-only changes +d01ba56c2127789d85723793380a7378394583f1 diff --git a/Cargo.lock b/Cargo.lock index f1ae026d56..902329a691 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4464,7 +4464,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" dependencies = [ "cfg-if", - "windows-targets 0.52.6", + "windows-targets 0.48.5", ] [[package]] @@ -5222,6 +5222,24 @@ dependencies = [ "serde_json", ] +[[package]] +name = "nexus-external-api" +version = "0.1.0" +dependencies = [ + "anyhow", + "dropshot 0.10.2-dev", + "http 0.2.12", + "hyper 0.14.30", + "ipnetwork", + "nexus-types", + "omicron-common", + "omicron-workspace-hack", + "openapi-manager-types", + "openapiv3", + "oximeter-types", + "oxql-types", +] + [[package]] name = "nexus-internal-api" version = "0.1.0" @@ -6177,6 +6195,7 @@ dependencies = [ "nexus-db-model", "nexus-db-queries", "nexus-defaults", + "nexus-external-api", "nexus-internal-api", "nexus-inventory", "nexus-metrics-producer-gc", @@ -6762,6 +6781,7 @@ dependencies = [ "gateway-api", "indent_write", "installinator-api", + "nexus-external-api", "nexus-internal-api", "omicron-workspace-hack", "openapi-lint", diff --git a/Cargo.toml b/Cargo.toml index 8c24784132..b424862dd6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -64,6 +64,7 @@ members = [ "nexus/db-model", "nexus/db-queries", "nexus/defaults", + "nexus/external-api", "nexus/internal-api", "nexus/inventory", "nexus/macros-common", @@ -186,6 +187,7 @@ default-members = [ "nexus/db-model", "nexus/db-queries", "nexus/defaults", + "nexus/external-api", "nexus/internal-api", "nexus/inventory", "nexus/macros-common", @@ -425,6 +427,7 @@ nexus-db-fixed-data = { path = "nexus/db-fixed-data" } nexus-db-model = { path = "nexus/db-model" } nexus-db-queries = { path = "nexus/db-queries" } nexus-defaults = { path = "nexus/defaults" } +nexus-external-api = { path = "nexus/external-api" } nexus-inventory = { path = "nexus/inventory" } nexus-internal-api = { path = "nexus/internal-api" } nexus-macros-common = { path = "nexus/macros-common" } diff --git a/dev-tools/openapi-manager/Cargo.toml b/dev-tools/openapi-manager/Cargo.toml index 2ca1bc3e4d..211e134016 100644 --- a/dev-tools/openapi-manager/Cargo.toml +++ b/dev-tools/openapi-manager/Cargo.toml @@ -21,6 +21,7 @@ fs-err.workspace = true gateway-api.workspace = true indent_write.workspace = true installinator-api.workspace = true +nexus-external-api.workspace = true nexus-internal-api.workspace = true omicron-workspace-hack.workspace = true openapiv3.workspace = true diff --git a/dev-tools/openapi-manager/src/spec.rs b/dev-tools/openapi-manager/src/spec.rs index e74cf7ed7a..03511a7945 100644 --- a/dev-tools/openapi-manager/src/spec.rs +++ b/dev-tools/openapi-manager/src/spec.rs @@ -79,6 +79,16 @@ pub fn all_apis() -> Vec { filename: "installinator.json", extra_validation: None, }, + ApiSpec { + title: "Oxide Region API", + version: "20240821.0", + description: "API for interacting with the Oxide control plane", + boundary: ApiBoundary::External, + api_description: + nexus_external_api::nexus_external_api_mod::stub_api_description, + filename: "nexus.json", + extra_validation: Some(nexus_external_api::validate_api), + }, ApiSpec { title: "Nexus internal API", version: "0.0.1", diff --git a/nexus/Cargo.toml b/nexus/Cargo.toml index 5b181c7fa0..d6f97adc39 100644 --- a/nexus/Cargo.toml +++ b/nexus/Cargo.toml @@ -47,6 +47,7 @@ macaddr.workspace = true # integration tests. nexus-client.workspace = true nexus-config.workspace = true +nexus-external-api.workspace = true nexus-internal-api.workspace = true nexus-networking.workspace = true nexus-saga-recovery.workspace = true diff --git a/nexus/external-api/Cargo.toml b/nexus/external-api/Cargo.toml new file mode 100644 index 0000000000..0875e1f574 --- /dev/null +++ b/nexus/external-api/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "nexus-external-api" +version = "0.1.0" +edition = "2021" +license = "MPL-2.0" + +[lints] +workspace = true + +[dependencies] +anyhow.workspace = true +dropshot.workspace = true +http.workspace = true +hyper.workspace = true +ipnetwork.workspace = true +nexus-types.workspace = true +omicron-common.workspace = true +omicron-workspace-hack.workspace = true +openapiv3.workspace = true +openapi-manager-types.workspace = true +oximeter-types.workspace = true +oxql-types.workspace = true diff --git a/nexus/tests/output/nexus_tags.txt b/nexus/external-api/output/nexus_tags.txt similarity index 100% rename from nexus/tests/output/nexus_tags.txt rename to nexus/external-api/output/nexus_tags.txt diff --git a/nexus/external-api/src/lib.rs b/nexus/external-api/src/lib.rs new file mode 100644 index 0000000000..669b25145f --- /dev/null +++ b/nexus/external-api/src/lib.rs @@ -0,0 +1,3032 @@ +use std::collections::BTreeMap; + +use anyhow::anyhow; +use dropshot::{ + EmptyScanParams, EndpointTagPolicy, HttpError, HttpResponseAccepted, + HttpResponseCreated, HttpResponseDeleted, HttpResponseFound, + HttpResponseHeaders, HttpResponseOk, HttpResponseSeeOther, + HttpResponseUpdatedNoContent, PaginationParams, Path, Query, + RequestContext, ResultsPage, StreamingBody, TypedBody, + WebsocketChannelResult, WebsocketConnection, +}; +use http::Response; +use hyper::Body; +use ipnetwork::IpNetwork; +use nexus_types::{ + authn::cookies::Cookies, + external_api::{params, shared, views}, +}; +use omicron_common::api::external::{ + http_pagination::{PaginatedById, PaginatedByName, PaginatedByNameOrId}, + *, +}; +use openapi_manager_types::ValidationContext; +use openapiv3::OpenAPI; + +pub const API_VERSION: &str = "20240821.0"; + +// API ENDPOINT FUNCTION NAMING CONVENTIONS +// +// Generally, HTTP resources are grouped within some collection. For a +// relatively simple example: +// +// GET v1/projects (list the projects in the collection) +// POST v1/projects (create a project in the collection) +// GET v1/projects/{project} (look up a project in the collection) +// DELETE v1/projects/{project} (delete a project in the collection) +// PUT v1/projects/{project} (update a project in the collection) +// +// We pick a name for the function that implements a given API entrypoint +// based on how we expect it to appear in the CLI subcommand hierarchy. For +// example: +// +// GET v1/projects -> project_list() +// POST v1/projects -> project_create() +// GET v1/projects/{project} -> project_view() +// DELETE v1/projects/{project} -> project_delete() +// PUT v1/projects/{project} -> project_update() +// +// Note that the path typically uses the entity's plural form while the +// function name uses its singular. +// +// Operations beyond list, create, view, delete, and update should use a +// descriptive noun or verb, again bearing in mind that this will be +// transcribed into the CLI and SDKs: +// +// POST -> instance_reboot +// POST -> instance_stop +// GET -> instance_serial_console +// +// Note that these function names end up in generated OpenAPI spec as the +// operationId for each endpoint, and therefore represent a contract with +// clients. Client generators use operationId to name API methods, so changing +// a function name is a breaking change from a client perspective. + +#[dropshot::api_description { + tag_config = { + allow_other_tags = false, + policy = EndpointTagPolicy::ExactlyOne, + tags = { + "disks" = { + description = "Virtual disks are used to store instance-local data which includes the operating system.", + external_docs = { + url = "http://docs.oxide.computer/api/disks" + } + }, + "floating-ips" = { + description = "Floating IPs allow a project to allocate well-known IPs to instances.", + external_docs = { + url = "http://docs.oxide.computer/api/floating-ips" + } + }, + "hidden" = { + description = "TODO operations that will not ship to customers", + external_docs = { + url = "http://docs.oxide.computer/api" + } + }, + "images" = { + description = "Images are read-only virtual disks that may be used to boot virtual machines.", + external_docs = { + url = "http://docs.oxide.computer/api/images" + } + }, + "instances" = { + description = "Virtual machine instances are the basic unit of computation. These operations are used for provisioning, controlling, and destroying instances.", + external_docs = { + url = "http://docs.oxide.computer/api/instances" + } + }, + "login" = { + description = "Authentication endpoints", + external_docs = { + url = "http://docs.oxide.computer/api/login" + } + }, + "metrics" = { + description = "Silo-scoped metrics", + external_docs = { + url = "http://docs.oxide.computer/api/metrics" + } + }, + "policy" = { + description = "System-wide IAM policy", + external_docs = { + url = "http://docs.oxide.computer/api/policy" + } + }, + "projects" = { + description = "Projects are a grouping of associated resources such as instances and disks within a silo for purposes of billing and access control.", + external_docs = { + url = "http://docs.oxide.computer/api/projects" + } + }, + "roles" = { + description = "Roles are a component of Identity and Access Management (IAM) that allow a user or agent account access to additional permissions.", + external_docs = { + url = "http://docs.oxide.computer/api/roles" + } + }, + "session" = { + description = "Information pertaining to the current session.", + external_docs = { + url = "http://docs.oxide.computer/api/session" + } + }, + "silos" = { + description = "Silos represent a logical partition of users and resources.", + external_docs = { + url = "http://docs.oxide.computer/api/silos" + } + }, + "snapshots" = { + description = "Snapshots of virtual disks at a particular point in time.", + external_docs = { + url = "http://docs.oxide.computer/api/snapshots" + } + }, + "vpcs" = { + description = "Virtual Private Clouds (VPCs) provide isolated network environments for managing and deploying services.", + external_docs = { + url = "http://docs.oxide.computer/api/vpcs" + } + }, + "system/probes" = { + description = "Probes for testing network connectivity", + external_docs = { + url = "http://docs.oxide.computer/api/probes" + } + }, + "system/status" = { + description = "Endpoints related to system health", + external_docs = { + url = "http://docs.oxide.computer/api/system-status" + } + }, + "system/hardware" = { + description = "These operations pertain to hardware inventory and management. Racks are the unit of expansion of an Oxide deployment. Racks are in turn composed of sleds, switches, power supplies, and a cabled backplane.", + external_docs = { + url = "http://docs.oxide.computer/api/system-hardware" + } + }, + "system/metrics" = { + description = "Metrics provide insight into the operation of the Oxide deployment. These include telemetry on hardware and software components that can be used to understand the current state as well as to diagnose issues.", + external_docs = { + url = "http://docs.oxide.computer/api/system-metrics" + } + }, + "system/networking" = { + description = "This provides rack-level network configuration.", + external_docs = { + url = "http://docs.oxide.computer/api/system-networking" + } + }, + "system/silos" = { + description = "Silos represent a logical partition of users and resources.", + external_docs = { + url = "http://docs.oxide.computer/api/system-silos" + } + } + } + } +}] +pub trait NexusExternalApi { + type Context; + + /// Ping API + /// + /// Always responds with Ok if it responds at all. + #[endpoint { + method = GET, + path = "/v1/ping", + tags = ["system/status"], + }] + async fn ping( + _rqctx: RequestContext, + ) -> Result, HttpError> { + Ok(HttpResponseOk(views::Ping { status: views::PingStatus::Ok })) + } + + /// Fetch top-level IAM policy + #[endpoint { + method = GET, + path = "/v1/system/policy", + tags = ["policy"], + }] + async fn system_policy_view( + rqctx: RequestContext, + ) -> Result>, HttpError>; + + /// Update top-level IAM policy + #[endpoint { + method = PUT, + path = "/v1/system/policy", + tags = ["policy"], + }] + async fn system_policy_update( + rqctx: RequestContext, + new_policy: TypedBody>, + ) -> Result>, HttpError>; + + /// Fetch current silo's IAM policy + #[endpoint { + method = GET, + path = "/v1/policy", + tags = ["silos"], + }] + async fn policy_view( + rqctx: RequestContext, + ) -> Result>, HttpError>; + + /// Update current silo's IAM policy + #[endpoint { + method = PUT, + path = "/v1/policy", + tags = ["silos"], + }] + async fn policy_update( + rqctx: RequestContext, + new_policy: TypedBody>, + ) -> Result>, HttpError>; + + /// Fetch resource utilization for user's current silo + #[endpoint { + method = GET, + path = "/v1/utilization", + tags = ["silos"], + }] + async fn utilization_view( + rqctx: RequestContext, + ) -> Result, HttpError>; + + /// Fetch current utilization for given silo + #[endpoint { + method = GET, + path = "/v1/system/utilization/silos/{silo}", + tags = ["system/silos"], + }] + async fn silo_utilization_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError>; + + /// List current utilization state for all silos + #[endpoint { + method = GET, + path = "/v1/system/utilization/silos", + tags = ["system/silos"], + }] + async fn silo_utilization_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError>; + + /// Lists resource quotas for all silos + #[endpoint { + method = GET, + path = "/v1/system/silo-quotas", + tags = ["system/silos"], + }] + async fn system_quotas_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError>; + + /// Fetch resource quotas for silo + #[endpoint { + method = GET, + path = "/v1/system/silos/{silo}/quotas", + tags = ["system/silos"], + }] + async fn silo_quotas_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError>; + + /// Update resource quotas for silo + /// + /// If a quota value is not specified, it will remain unchanged. + #[endpoint { + method = PUT, + path = "/v1/system/silos/{silo}/quotas", + tags = ["system/silos"], + }] + async fn silo_quotas_update( + rqctx: RequestContext, + path_params: Path, + new_quota: TypedBody, + ) -> Result, HttpError>; + + /// List silos + /// + /// Lists silos that are discoverable based on the current permissions. + #[endpoint { + method = GET, + path = "/v1/system/silos", + tags = ["system/silos"], + }] + async fn silo_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError>; + + /// Create a silo + #[endpoint { + method = POST, + path = "/v1/system/silos", + tags = ["system/silos"], + }] + async fn silo_create( + rqctx: RequestContext, + new_silo_params: TypedBody, + ) -> Result, HttpError>; + + /// Fetch silo + /// + /// Fetch silo by name or ID. + #[endpoint { + method = GET, + path = "/v1/system/silos/{silo}", + tags = ["system/silos"], + }] + async fn silo_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError>; + + /// List IP pools linked to silo + /// + /// Linked IP pools are available to users in the specified silo. A silo + /// can have at most one default pool. IPs are allocated from the default + /// pool when users ask for one without specifying a pool. + #[endpoint { + method = GET, + path = "/v1/system/silos/{silo}/ip-pools", + tags = ["system/silos"], + }] + async fn silo_ip_pool_list( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result>, HttpError>; + + /// Delete a silo + /// + /// Delete a silo by name or ID. + #[endpoint { + method = DELETE, + path = "/v1/system/silos/{silo}", + tags = ["system/silos"], + }] + async fn silo_delete( + rqctx: RequestContext, + path_params: Path, + ) -> Result; + + /// Fetch silo IAM policy + #[endpoint { + method = GET, + path = "/v1/system/silos/{silo}/policy", + tags = ["system/silos"], + }] + async fn silo_policy_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result>, HttpError>; + + /// Update silo IAM policy + #[endpoint { + method = PUT, + path = "/v1/system/silos/{silo}/policy", + tags = ["system/silos"], + }] + async fn silo_policy_update( + rqctx: RequestContext, + path_params: Path, + new_policy: TypedBody>, + ) -> Result>, HttpError>; + + // Silo-specific user endpoints + + /// List built-in (system) users in silo + #[endpoint { + method = GET, + path = "/v1/system/users", + tags = ["system/silos"], + }] + async fn silo_user_list( + rqctx: RequestContext, + query_params: Query>, + ) -> Result>, HttpError>; + + /// Fetch built-in (system) user + #[endpoint { + method = GET, + path = "/v1/system/users/{user_id}", + tags = ["system/silos"], + }] + async fn silo_user_view( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError>; + + // Silo identity providers + + /// List a silo's IdP's name + #[endpoint { + method = GET, + path = "/v1/system/identity-providers", + tags = ["system/silos"], + }] + async fn silo_identity_provider_list( + rqctx: RequestContext, + query_params: Query>, + ) -> Result>, HttpError>; + + // Silo SAML identity providers + + /// Create SAML IdP + #[endpoint { + method = POST, + path = "/v1/system/identity-providers/saml", + tags = ["system/silos"], + }] + async fn saml_identity_provider_create( + rqctx: RequestContext, + query_params: Query, + new_provider: TypedBody, + ) -> Result, HttpError>; + + /// Fetch SAML IdP + #[endpoint { + method = GET, + path = "/v1/system/identity-providers/saml/{provider}", + tags = ["system/silos"], + }] + async fn saml_identity_provider_view( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError>; + + // TODO: no DELETE for identity providers? + + // "Local" Identity Provider + + /// Create user + /// + /// Users can only be created in Silos with `provision_type` == `Fixed`. + /// Otherwise, Silo users are just-in-time (JIT) provisioned when a user + /// first logs in using an external Identity Provider. + #[endpoint { + method = POST, + path = "/v1/system/identity-providers/local/users", + tags = ["system/silos"], + }] + async fn local_idp_user_create( + rqctx: RequestContext, + query_params: Query, + new_user_params: TypedBody, + ) -> Result, HttpError>; + + /// Delete user + #[endpoint { + method = DELETE, + path = "/v1/system/identity-providers/local/users/{user_id}", + tags = ["system/silos"], + }] + async fn local_idp_user_delete( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result; + + /// Set or invalidate user's password + /// + /// Passwords can only be updated for users in Silos with identity mode + /// `LocalOnly`. + #[endpoint { + method = POST, + path = "/v1/system/identity-providers/local/users/{user_id}/set-password", + tags = ["system/silos"], + }] + async fn local_idp_user_set_password( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + update: TypedBody, + ) -> Result; + + /// List projects + #[endpoint { + method = GET, + path = "/v1/projects", + tags = ["projects"], + }] + async fn project_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError>; + + /// Create project + #[endpoint { + method = POST, + path = "/v1/projects", + tags = ["projects"], + }] + async fn project_create( + rqctx: RequestContext, + new_project: TypedBody, + ) -> Result, HttpError>; + + /// Fetch project + #[endpoint { + method = GET, + path = "/v1/projects/{project}", + tags = ["projects"], + }] + async fn project_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError>; + + /// Delete project + #[endpoint { + method = DELETE, + path = "/v1/projects/{project}", + tags = ["projects"], + }] + async fn project_delete( + rqctx: RequestContext, + path_params: Path, + ) -> Result; + + // TODO-correctness: Is it valid for PUT to accept application/json that's + // a subset of what the resource actually represents? If not, is that a + // problem? (HTTP may require that this be idempotent.) If so, can we get + // around that having this be a slightly different content-type (e.g., + // "application/json-patch")? We should see what other APIs do. + /// Update a project + #[endpoint { + method = PUT, + path = "/v1/projects/{project}", + tags = ["projects"], + }] + async fn project_update( + rqctx: RequestContext, + path_params: Path, + updated_project: TypedBody, + ) -> Result, HttpError>; + + /// Fetch project's IAM policy + #[endpoint { + method = GET, + path = "/v1/projects/{project}/policy", + tags = ["projects"], + }] + async fn project_policy_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result>, HttpError>; + + /// Update project's IAM policy + #[endpoint { + method = PUT, + path = "/v1/projects/{project}/policy", + tags = ["projects"], + }] + async fn project_policy_update( + rqctx: RequestContext, + path_params: Path, + new_policy: TypedBody>, + ) -> Result>, HttpError>; + + // IP Pools + + /// List IP pools + #[endpoint { + method = GET, + path = "/v1/ip-pools", + tags = ["projects"], + }] + async fn project_ip_pool_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError>; + + /// Fetch IP pool + #[endpoint { + method = GET, + path = "/v1/ip-pools/{pool}", + tags = ["projects"], + }] + async fn project_ip_pool_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError>; + + /// List IP pools + #[endpoint { + method = GET, + path = "/v1/system/ip-pools", + tags = ["system/networking"], + }] + async fn ip_pool_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError>; + + /// Create IP pool + #[endpoint { + method = POST, + path = "/v1/system/ip-pools", + tags = ["system/networking"], + }] + async fn ip_pool_create( + rqctx: RequestContext, + pool_params: TypedBody, + ) -> Result, HttpError>; + + /// Fetch IP pool + #[endpoint { + method = GET, + path = "/v1/system/ip-pools/{pool}", + tags = ["system/networking"], + }] + async fn ip_pool_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError>; + + /// Delete IP pool + #[endpoint { + method = DELETE, + path = "/v1/system/ip-pools/{pool}", + tags = ["system/networking"], + }] + async fn ip_pool_delete( + rqctx: RequestContext, + path_params: Path, + ) -> Result; + + /// Update IP pool + #[endpoint { + method = PUT, + path = "/v1/system/ip-pools/{pool}", + tags = ["system/networking"], + }] + async fn ip_pool_update( + rqctx: RequestContext, + path_params: Path, + updates: TypedBody, + ) -> Result, HttpError>; + + /// Fetch IP pool utilization + #[endpoint { + method = GET, + path = "/v1/system/ip-pools/{pool}/utilization", + tags = ["system/networking"], + }] + async fn ip_pool_utilization_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError>; + + /// List IP pool's linked silos + #[endpoint { + method = GET, + path = "/v1/system/ip-pools/{pool}/silos", + tags = ["system/networking"], + }] + async fn ip_pool_silo_list( + rqctx: RequestContext, + path_params: Path, + // paginating by resource_id because they're unique per pool. most robust + // option would be to paginate by a composite key representing the (pool, + // resource_type, resource) + query_params: Query, + // TODO: this could just list views::Silo -- it's not like knowing silo_id + // and nothing else is particularly useful -- except we also want to say + // whether the pool is marked default on each silo. So one option would + // be to do the same as we did with SiloIpPool -- include is_default on + // whatever the thing is. Still... all we'd have to do to make this usable + // in both places would be to make it { ...IpPool, silo_id, silo_name, + // is_default } + ) -> Result>, HttpError>; + + /// Link IP pool to silo + /// + /// Users in linked silos can allocate external IPs from this pool for their + /// instances. A silo can have at most one default pool. IPs are allocated from + /// the default pool when users ask for one without specifying a pool. + #[endpoint { + method = POST, + path = "/v1/system/ip-pools/{pool}/silos", + tags = ["system/networking"], + }] + async fn ip_pool_silo_link( + rqctx: RequestContext, + path_params: Path, + resource_assoc: TypedBody, + ) -> Result, HttpError>; + + /// Unlink IP pool from silo + /// + /// Will fail if there are any outstanding IPs allocated in the silo. + #[endpoint { + method = DELETE, + path = "/v1/system/ip-pools/{pool}/silos/{silo}", + tags = ["system/networking"], + }] + async fn ip_pool_silo_unlink( + rqctx: RequestContext, + path_params: Path, + ) -> Result; + + /// Make IP pool default for silo + /// + /// When a user asks for an IP (e.g., at instance create time) without + /// specifying a pool, the IP comes from the default pool if a default is + /// configured. When a pool is made the default for a silo, any existing + /// default will remain linked to the silo, but will no longer be the + /// default. + #[endpoint { + method = PUT, + path = "/v1/system/ip-pools/{pool}/silos/{silo}", + tags = ["system/networking"], + }] + async fn ip_pool_silo_update( + rqctx: RequestContext, + path_params: Path, + update: TypedBody, + ) -> Result, HttpError>; + + /// Fetch Oxide service IP pool + #[endpoint { + method = GET, + path = "/v1/system/ip-pools-service", + tags = ["system/networking"], + }] + async fn ip_pool_service_view( + rqctx: RequestContext, + ) -> Result, HttpError>; + + /// List ranges for IP pool + /// + /// Ranges are ordered by their first address. + #[endpoint { + method = GET, + path = "/v1/system/ip-pools/{pool}/ranges", + tags = ["system/networking"], + }] + async fn ip_pool_range_list( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result>, HttpError>; + + /// Add range to IP pool + /// + /// IPv6 ranges are not allowed yet. + #[endpoint { + method = POST, + path = "/v1/system/ip-pools/{pool}/ranges/add", + tags = ["system/networking"], + }] + async fn ip_pool_range_add( + rqctx: RequestContext, + path_params: Path, + range_params: TypedBody, + ) -> Result, HttpError>; + + /// Remove range from IP pool + #[endpoint { + method = POST, + path = "/v1/system/ip-pools/{pool}/ranges/remove", + tags = ["system/networking"], + }] + async fn ip_pool_range_remove( + rqctx: RequestContext, + path_params: Path, + range_params: TypedBody, + ) -> Result; + + /// List IP ranges for the Oxide service pool + /// + /// Ranges are ordered by their first address. + #[endpoint { + method = GET, + path = "/v1/system/ip-pools-service/ranges", + tags = ["system/networking"], + }] + async fn ip_pool_service_range_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError>; + + /// Add IP range to Oxide service pool + /// + /// IPv6 ranges are not allowed yet. + #[endpoint { + method = POST, + path = "/v1/system/ip-pools-service/ranges/add", + tags = ["system/networking"], + }] + async fn ip_pool_service_range_add( + rqctx: RequestContext, + range_params: TypedBody, + ) -> Result, HttpError>; + + /// Remove IP range from Oxide service pool + #[endpoint { + method = POST, + path = "/v1/system/ip-pools-service/ranges/remove", + tags = ["system/networking"], + }] + async fn ip_pool_service_range_remove( + rqctx: RequestContext, + range_params: TypedBody, + ) -> Result; + + // Floating IP Addresses + + /// List floating IPs + #[endpoint { + method = GET, + path = "/v1/floating-ips", + tags = ["floating-ips"], + }] + async fn floating_ip_list( + rqctx: RequestContext, + query_params: Query>, + ) -> Result>, HttpError>; + + /// Create floating IP + #[endpoint { + method = POST, + path = "/v1/floating-ips", + tags = ["floating-ips"], + }] + async fn floating_ip_create( + rqctx: RequestContext, + query_params: Query, + floating_params: TypedBody, + ) -> Result, HttpError>; + + /// Update floating IP + #[endpoint { + method = PUT, + path = "/v1/floating-ips/{floating_ip}", + tags = ["floating-ips"], + }] + async fn floating_ip_update( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + updated_floating_ip: TypedBody, + ) -> Result, HttpError>; + + /// Delete floating IP + #[endpoint { + method = DELETE, + path = "/v1/floating-ips/{floating_ip}", + tags = ["floating-ips"], + }] + async fn floating_ip_delete( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result; + + /// Fetch floating IP + #[endpoint { + method = GET, + path = "/v1/floating-ips/{floating_ip}", + tags = ["floating-ips"] + }] + async fn floating_ip_view( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError>; + + /// Attach floating IP + /// + /// Attach floating IP to an instance or other resource. + #[endpoint { + method = POST, + path = "/v1/floating-ips/{floating_ip}/attach", + tags = ["floating-ips"], + }] + async fn floating_ip_attach( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + target: TypedBody, + ) -> Result, HttpError>; + + /// Detach floating IP + /// + // Detach floating IP from instance or other resource. + #[endpoint { + method = POST, + path = "/v1/floating-ips/{floating_ip}/detach", + tags = ["floating-ips"], + }] + async fn floating_ip_detach( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError>; + + // Disks + + /// List disks + #[endpoint { + method = GET, + path = "/v1/disks", + tags = ["disks"], + }] + async fn disk_list( + rqctx: RequestContext, + query_params: Query>, + ) -> Result>, HttpError>; + + // TODO-correctness See note about instance create. This should be async. + /// Create a disk + #[endpoint { + method = POST, + path = "/v1/disks", + tags = ["disks"] + }] + async fn disk_create( + rqctx: RequestContext, + query_params: Query, + new_disk: TypedBody, + ) -> Result, HttpError>; + + /// Fetch disk + #[endpoint { + method = GET, + path = "/v1/disks/{disk}", + tags = ["disks"] + }] + async fn disk_view( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError>; + + /// Delete disk + #[endpoint { + method = DELETE, + path = "/v1/disks/{disk}", + tags = ["disks"], + }] + async fn disk_delete( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result; + + /// Fetch disk metrics + #[endpoint { + method = GET, + path = "/v1/disks/{disk}/metrics/{metric}", + tags = ["disks"], + }] + async fn disk_metrics_list( + rqctx: RequestContext, + path_params: Path, + query_params: Query< + PaginationParams, + >, + selector_params: Query, + ) -> Result< + HttpResponseOk>, + HttpError, + >; + + /// Start importing blocks into disk + /// + /// Start the process of importing blocks into a disk + #[endpoint { + method = POST, + path = "/v1/disks/{disk}/bulk-write-start", + tags = ["disks"], + }] + async fn disk_bulk_write_import_start( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result; + + /// Import blocks into disk + #[endpoint { + method = POST, + path = "/v1/disks/{disk}/bulk-write", + tags = ["disks"], + }] + async fn disk_bulk_write_import( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + import_params: TypedBody, + ) -> Result; + + /// Stop importing blocks into disk + /// + /// Stop the process of importing blocks into a disk + #[endpoint { + method = POST, + path = "/v1/disks/{disk}/bulk-write-stop", + tags = ["disks"], + }] + async fn disk_bulk_write_import_stop( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result; + + /// Confirm disk block import completion + #[endpoint { + method = POST, + path = "/v1/disks/{disk}/finalize", + tags = ["disks"], + }] + async fn disk_finalize_import( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + finalize_params: TypedBody, + ) -> Result; + + // Instances + + /// List instances + #[endpoint { + method = GET, + path = "/v1/instances", + tags = ["instances"], + }] + async fn instance_list( + rqctx: RequestContext, + query_params: Query>, + ) -> Result>, HttpError>; + + /// Create instance + #[endpoint { + method = POST, + path = "/v1/instances", + tags = ["instances"], + }] + async fn instance_create( + rqctx: RequestContext, + query_params: Query, + new_instance: TypedBody, + ) -> Result, HttpError>; + + /// Fetch instance + #[endpoint { + method = GET, + path = "/v1/instances/{instance}", + tags = ["instances"], + }] + async fn instance_view( + rqctx: RequestContext, + query_params: Query, + path_params: Path, + ) -> Result, HttpError>; + + /// Delete instance + #[endpoint { + method = DELETE, + path = "/v1/instances/{instance}", + tags = ["instances"], + }] + async fn instance_delete( + rqctx: RequestContext, + query_params: Query, + path_params: Path, + ) -> Result; + + /// Reboot an instance + #[endpoint { + method = POST, + path = "/v1/instances/{instance}/reboot", + tags = ["instances"], + }] + async fn instance_reboot( + rqctx: RequestContext, + query_params: Query, + path_params: Path, + ) -> Result, HttpError>; + + /// Boot instance + #[endpoint { + method = POST, + path = "/v1/instances/{instance}/start", + tags = ["instances"], + }] + async fn instance_start( + rqctx: RequestContext, + query_params: Query, + path_params: Path, + ) -> Result, HttpError>; + + /// Stop instance + #[endpoint { + method = POST, + path = "/v1/instances/{instance}/stop", + tags = ["instances"], + }] + async fn instance_stop( + rqctx: RequestContext, + query_params: Query, + path_params: Path, + ) -> Result, HttpError>; + + /// Fetch instance serial console + #[endpoint { + method = GET, + path = "/v1/instances/{instance}/serial-console", + tags = ["instances"], + }] + async fn instance_serial_console( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError>; + + /// Stream instance serial console + #[channel { + protocol = WEBSOCKETS, + path = "/v1/instances/{instance}/serial-console/stream", + tags = ["instances"], + }] + async fn instance_serial_console_stream( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + conn: WebsocketConnection, + ) -> WebsocketChannelResult; + + /// List SSH public keys for instance + /// + /// List SSH public keys injected via cloud-init during instance creation. + /// Note that this list is a snapshot in time and will not reflect updates + /// made after the instance is created. + #[endpoint { + method = GET, + path = "/v1/instances/{instance}/ssh-public-keys", + tags = ["instances"], + }] + async fn instance_ssh_public_key_list( + rqctx: RequestContext, + path_params: Path, + query_params: Query< + PaginatedByNameOrId, + >, + ) -> Result>, HttpError>; + + /// List disks for instance + #[endpoint { + method = GET, + path = "/v1/instances/{instance}/disks", + tags = ["instances"], + }] + async fn instance_disk_list( + rqctx: RequestContext, + query_params: Query< + PaginatedByNameOrId, + >, + path_params: Path, + ) -> Result>, HttpError>; + + /// Attach disk to instance + #[endpoint { + method = POST, + path = "/v1/instances/{instance}/disks/attach", + tags = ["instances"], + }] + async fn instance_disk_attach( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + disk_to_attach: TypedBody, + ) -> Result, HttpError>; + + /// Detach disk from instance + #[endpoint { + method = POST, + path = "/v1/instances/{instance}/disks/detach", + tags = ["instances"], + }] + async fn instance_disk_detach( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + disk_to_detach: TypedBody, + ) -> Result, HttpError>; + + // Certificates + + /// List certificates for external endpoints + /// + /// Returns a list of TLS certificates used for the external API (for the + /// current Silo). These are sorted by creation date, with the most recent + /// certificates appearing first. + #[endpoint { + method = GET, + path = "/v1/certificates", + tags = ["silos"], + }] + async fn certificate_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError>; + + /// Create new system-wide x.509 certificate + /// + /// This certificate is automatically used by the Oxide Control plane to serve + /// external connections. + #[endpoint { + method = POST, + path = "/v1/certificates", + tags = ["silos"] + }] + async fn certificate_create( + rqctx: RequestContext, + new_cert: TypedBody, + ) -> Result, HttpError>; + + /// Fetch certificate + /// + /// Returns the details of a specific certificate + #[endpoint { + method = GET, + path = "/v1/certificates/{certificate}", + tags = ["silos"], + }] + async fn certificate_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError>; + + /// Delete certificate + /// + /// Permanently delete a certificate. This operation cannot be undone. + #[endpoint { + method = DELETE, + path = "/v1/certificates/{certificate}", + tags = ["silos"], + }] + async fn certificate_delete( + rqctx: RequestContext, + path_params: Path, + ) -> Result; + + /// Create address lot + #[endpoint { + method = POST, + path = "/v1/system/networking/address-lot", + tags = ["system/networking"], + }] + async fn networking_address_lot_create( + rqctx: RequestContext, + new_address_lot: TypedBody, + ) -> Result, HttpError>; + + /// Delete address lot + #[endpoint { + method = DELETE, + path = "/v1/system/networking/address-lot/{address_lot}", + tags = ["system/networking"], + }] + async fn networking_address_lot_delete( + rqctx: RequestContext, + path_params: Path, + ) -> Result; + + /// List address lots + #[endpoint { + method = GET, + path = "/v1/system/networking/address-lot", + tags = ["system/networking"], + }] + async fn networking_address_lot_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError>; + + /// List blocks in address lot + #[endpoint { + method = GET, + path = "/v1/system/networking/address-lot/{address_lot}/blocks", + tags = ["system/networking"], + }] + async fn networking_address_lot_block_list( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result>, HttpError>; + + /// Create loopback address + #[endpoint { + method = POST, + path = "/v1/system/networking/loopback-address", + tags = ["system/networking"], + }] + async fn networking_loopback_address_create( + rqctx: RequestContext, + new_loopback_address: TypedBody, + ) -> Result, HttpError>; + + /// Delete loopback address + #[endpoint { + method = DELETE, + path = "/v1/system/networking/loopback-address/{rack_id}/{switch_location}/{address}/{subnet_mask}", + tags = ["system/networking"], + }] + async fn networking_loopback_address_delete( + rqctx: RequestContext, + path: Path, + ) -> Result; + + /// List loopback addresses + #[endpoint { + method = GET, + path = "/v1/system/networking/loopback-address", + tags = ["system/networking"], + }] + async fn networking_loopback_address_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError>; + + /// Create switch port settings + #[endpoint { + method = POST, + path = "/v1/system/networking/switch-port-settings", + tags = ["system/networking"], + }] + async fn networking_switch_port_settings_create( + rqctx: RequestContext, + new_settings: TypedBody, + ) -> Result, HttpError>; + + /// Delete switch port settings + #[endpoint { + method = DELETE, + path = "/v1/system/networking/switch-port-settings", + tags = ["system/networking"], + }] + async fn networking_switch_port_settings_delete( + rqctx: RequestContext, + query_params: Query, + ) -> Result; + + /// List switch port settings + #[endpoint { + method = GET, + path = "/v1/system/networking/switch-port-settings", + tags = ["system/networking"], + }] + async fn networking_switch_port_settings_list( + rqctx: RequestContext, + query_params: Query< + PaginatedByNameOrId, + >, + ) -> Result>, HttpError>; + + /// Get information about switch port + #[endpoint { + method = GET, + path = "/v1/system/networking/switch-port-settings/{port}", + tags = ["system/networking"], + }] + async fn networking_switch_port_settings_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError>; + + /// List switch ports + #[endpoint { + method = GET, + path = "/v1/system/hardware/switch-port", + tags = ["system/hardware"], + }] + async fn networking_switch_port_list( + rqctx: RequestContext, + query_params: Query>, + ) -> Result>, HttpError>; + + /// Get switch port status + #[endpoint { + method = GET, + path = "/v1/system/hardware/switch-port/{port}/status", + tags = ["system/hardware"], + }] + async fn networking_switch_port_status( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError>; + + /// Apply switch port settings + #[endpoint { + method = POST, + path = "/v1/system/hardware/switch-port/{port}/settings", + tags = ["system/hardware"], + }] + async fn networking_switch_port_apply_settings( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + settings_body: TypedBody, + ) -> Result; + + /// Clear switch port settings + #[endpoint { + method = DELETE, + path = "/v1/system/hardware/switch-port/{port}/settings", + tags = ["system/hardware"], + }] + async fn networking_switch_port_clear_settings( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result; + + /// Create new BGP configuration + #[endpoint { + method = POST, + path = "/v1/system/networking/bgp", + tags = ["system/networking"], + }] + async fn networking_bgp_config_create( + rqctx: RequestContext, + config: TypedBody, + ) -> Result, HttpError>; + + /// List BGP configurations + #[endpoint { + method = GET, + path = "/v1/system/networking/bgp", + tags = ["system/networking"], + }] + async fn networking_bgp_config_list( + rqctx: RequestContext, + query_params: Query>, + ) -> Result>, HttpError>; + + //TODO pagination? the normal by-name/by-id stuff does not work here + /// Get BGP peer status + #[endpoint { + method = GET, + path = "/v1/system/networking/bgp-status", + tags = ["system/networking"], + }] + async fn networking_bgp_status( + rqctx: RequestContext, + ) -> Result>, HttpError>; + + //TODO pagination? the normal by-name/by-id stuff does not work here + /// Get BGP exported routes + #[endpoint { + method = GET, + path = "/v1/system/networking/bgp-exported", + tags = ["system/networking"], + }] + async fn networking_bgp_exported( + rqctx: RequestContext, + ) -> Result, HttpError>; + + /// Get BGP router message history + #[endpoint { + method = GET, + path = "/v1/system/networking/bgp-message-history", + tags = ["system/networking"], + }] + async fn networking_bgp_message_history( + rqctx: RequestContext, + query_params: Query, + ) -> Result, HttpError>; + + //TODO pagination? the normal by-name/by-id stuff does not work here + /// Get imported IPv4 BGP routes + #[endpoint { + method = GET, + path = "/v1/system/networking/bgp-routes-ipv4", + tags = ["system/networking"], + }] + async fn networking_bgp_imported_routes_ipv4( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError>; + + /// Delete BGP configuration + #[endpoint { + method = DELETE, + path = "/v1/system/networking/bgp", + tags = ["system/networking"], + }] + async fn networking_bgp_config_delete( + rqctx: RequestContext, + sel: Query, + ) -> Result; + + /// Update BGP announce set + /// + /// If the announce set exists, this endpoint replaces the existing announce + /// set with the one specified. + #[endpoint { + method = PUT, + path = "/v1/system/networking/bgp-announce-set", + tags = ["system/networking"], + }] + async fn networking_bgp_announce_set_update( + rqctx: RequestContext, + config: TypedBody, + ) -> Result, HttpError>; + + /// List BGP announce sets + #[endpoint { + method = GET, + path = "/v1/system/networking/bgp-announce-set", + tags = ["system/networking"], + }] + async fn networking_bgp_announce_set_list( + rqctx: RequestContext, + query_params: Query< + PaginatedByNameOrId, + >, + ) -> Result>, HttpError>; + + /// Delete BGP announce set + #[endpoint { + method = DELETE, + path = "/v1/system/networking/bgp-announce-set/{name_or_id}", + tags = ["system/networking"], + }] + async fn networking_bgp_announce_set_delete( + rqctx: RequestContext, + path_params: Path, + ) -> Result; + + // TODO: is pagination necessary here? How large do we expect the list of + // announcements to become in real usage? + /// Get originated routes for a specified BGP announce set + #[endpoint { + method = GET, + path = "/v1/system/networking/bgp-announce-set/{name_or_id}/announcement", + tags = ["system/networking"], + }] + async fn networking_bgp_announcement_list( + rqctx: RequestContext, + path_params: Path, + ) -> Result>, HttpError>; + + /// Enable a BFD session + #[endpoint { + method = POST, + path = "/v1/system/networking/bfd-enable", + tags = ["system/networking"], + }] + async fn networking_bfd_enable( + rqctx: RequestContext, + session: TypedBody, + ) -> Result; + + /// Disable a BFD session + #[endpoint { + method = POST, + path = "/v1/system/networking/bfd-disable", + tags = ["system/networking"], + }] + async fn networking_bfd_disable( + rqctx: RequestContext, + session: TypedBody, + ) -> Result; + + /// Get BFD status + #[endpoint { + method = GET, + path = "/v1/system/networking/bfd-status", + tags = ["system/networking"], + }] + async fn networking_bfd_status( + rqctx: RequestContext, + ) -> Result>, HttpError>; + + /// Get user-facing services IP allowlist + #[endpoint { + method = GET, + path = "/v1/system/networking/allow-list", + tags = ["system/networking"], + }] + async fn networking_allow_list_view( + rqctx: RequestContext, + ) -> Result, HttpError>; + + /// Update user-facing services IP allowlist + #[endpoint { + method = PUT, + path = "/v1/system/networking/allow-list", + tags = ["system/networking"], + }] + async fn networking_allow_list_update( + rqctx: RequestContext, + params: TypedBody, + ) -> Result, HttpError>; + + // Images + + /// List images + /// + /// List images which are global or scoped to the specified project. The images + /// are returned sorted by creation date, with the most recent images appearing first. + #[endpoint { + method = GET, + path = "/v1/images", + tags = ["images"], + }] + async fn image_list( + rqctx: RequestContext, + query_params: Query< + PaginatedByNameOrId, + >, + ) -> Result>, HttpError>; + + /// Create image + /// + /// Create a new image in a project. + #[endpoint { + method = POST, + path = "/v1/images", + tags = ["images"] + }] + async fn image_create( + rqctx: RequestContext, + query_params: Query, + new_image: TypedBody, + ) -> Result, HttpError>; + + /// Fetch image + /// + /// Fetch the details for a specific image in a project. + #[endpoint { + method = GET, + path = "/v1/images/{image}", + tags = ["images"], + }] + async fn image_view( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError>; + + /// Delete image + /// + /// Permanently delete an image from a project. This operation cannot be undone. + /// Any instances in the project using the image will continue to run, however + /// new instances can not be created with this image. + #[endpoint { + method = DELETE, + path = "/v1/images/{image}", + tags = ["images"], + }] + async fn image_delete( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result; + + /// Promote project image + /// + /// Promote project image to be visible to all projects in the silo + #[endpoint { + method = POST, + path = "/v1/images/{image}/promote", + tags = ["images"] + }] + async fn image_promote( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError>; + + /// Demote silo image + /// + /// Demote silo image to be visible only to a specified project + #[endpoint { + method = POST, + path = "/v1/images/{image}/demote", + tags = ["images"] + }] + async fn image_demote( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError>; + + /// List network interfaces + #[endpoint { + method = GET, + path = "/v1/network-interfaces", + tags = ["instances"], + }] + async fn instance_network_interface_list( + rqctx: RequestContext, + query_params: Query>, + ) -> Result>, HttpError>; + + /// Create network interface + #[endpoint { + method = POST, + path = "/v1/network-interfaces", + tags = ["instances"], + }] + async fn instance_network_interface_create( + rqctx: RequestContext, + query_params: Query, + interface_params: TypedBody, + ) -> Result, HttpError>; + + /// Delete network interface + /// + /// Note that the primary interface for an instance cannot be deleted if there + /// are any secondary interfaces. A new primary interface must be designated + /// first. The primary interface can be deleted if there are no secondary + /// interfaces. + #[endpoint { + method = DELETE, + path = "/v1/network-interfaces/{interface}", + tags = ["instances"], + }] + async fn instance_network_interface_delete( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result; + + /// Fetch network interface + #[endpoint { + method = GET, + path = "/v1/network-interfaces/{interface}", + tags = ["instances"], + }] + async fn instance_network_interface_view( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError>; + + /// Update network interface + #[endpoint { + method = PUT, + path = "/v1/network-interfaces/{interface}", + tags = ["instances"], + }] + async fn instance_network_interface_update( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + updated_iface: TypedBody, + ) -> Result, HttpError>; + + // External IP addresses for instances + + /// List external IP addresses + #[endpoint { + method = GET, + path = "/v1/instances/{instance}/external-ips", + tags = ["instances"], + }] + async fn instance_external_ip_list( + rqctx: RequestContext, + query_params: Query, + path_params: Path, + ) -> Result>, HttpError>; + + /// Allocate and attach ephemeral IP to instance + #[endpoint { + method = POST, + path = "/v1/instances/{instance}/external-ips/ephemeral", + tags = ["instances"], + }] + async fn instance_ephemeral_ip_attach( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ip_to_create: TypedBody, + ) -> Result, HttpError>; + + /// Detach and deallocate ephemeral IP from instance + #[endpoint { + method = DELETE, + path = "/v1/instances/{instance}/external-ips/ephemeral", + tags = ["instances"], + }] + async fn instance_ephemeral_ip_detach( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result; + + // Snapshots + + /// List snapshots + #[endpoint { + method = GET, + path = "/v1/snapshots", + tags = ["snapshots"], + }] + async fn snapshot_list( + rqctx: RequestContext, + query_params: Query>, + ) -> Result>, HttpError>; + + /// Create snapshot + /// + /// Creates a point-in-time snapshot from a disk. + #[endpoint { + method = POST, + path = "/v1/snapshots", + tags = ["snapshots"], + }] + async fn snapshot_create( + rqctx: RequestContext, + query_params: Query, + new_snapshot: TypedBody, + ) -> Result, HttpError>; + + /// Fetch snapshot + #[endpoint { + method = GET, + path = "/v1/snapshots/{snapshot}", + tags = ["snapshots"], + }] + async fn snapshot_view( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError>; + + /// Delete snapshot + #[endpoint { + method = DELETE, + path = "/v1/snapshots/{snapshot}", + tags = ["snapshots"], + }] + async fn snapshot_delete( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result; + + // VPCs + + /// List VPCs + #[endpoint { + method = GET, + path = "/v1/vpcs", + tags = ["vpcs"], + }] + async fn vpc_list( + rqctx: RequestContext, + query_params: Query>, + ) -> Result>, HttpError>; + + /// Create VPC + #[endpoint { + method = POST, + path = "/v1/vpcs", + tags = ["vpcs"], + }] + async fn vpc_create( + rqctx: RequestContext, + query_params: Query, + body: TypedBody, + ) -> Result, HttpError>; + + /// Fetch VPC + #[endpoint { + method = GET, + path = "/v1/vpcs/{vpc}", + tags = ["vpcs"], + }] + async fn vpc_view( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError>; + + /// Update a VPC + #[endpoint { + method = PUT, + path = "/v1/vpcs/{vpc}", + tags = ["vpcs"], + }] + async fn vpc_update( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + updated_vpc: TypedBody, + ) -> Result, HttpError>; + + /// Delete VPC + #[endpoint { + method = DELETE, + path = "/v1/vpcs/{vpc}", + tags = ["vpcs"], + }] + async fn vpc_delete( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result; + + /// List subnets + #[endpoint { + method = GET, + path = "/v1/vpc-subnets", + tags = ["vpcs"], + }] + async fn vpc_subnet_list( + rqctx: RequestContext, + query_params: Query>, + ) -> Result>, HttpError>; + + /// Create subnet + #[endpoint { + method = POST, + path = "/v1/vpc-subnets", + tags = ["vpcs"], + }] + async fn vpc_subnet_create( + rqctx: RequestContext, + query_params: Query, + create_params: TypedBody, + ) -> Result, HttpError>; + + /// Fetch subnet + #[endpoint { + method = GET, + path = "/v1/vpc-subnets/{subnet}", + tags = ["vpcs"], + }] + async fn vpc_subnet_view( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError>; + + /// Delete subnet + #[endpoint { + method = DELETE, + path = "/v1/vpc-subnets/{subnet}", + tags = ["vpcs"], + }] + async fn vpc_subnet_delete( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result; + + /// Update subnet + #[endpoint { + method = PUT, + path = "/v1/vpc-subnets/{subnet}", + tags = ["vpcs"], + }] + async fn vpc_subnet_update( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + subnet_params: TypedBody, + ) -> Result, HttpError>; + + // This endpoint is likely temporary. We would rather list all IPs allocated in + // a subnet whether they come from NICs or something else. See + // https://github.com/oxidecomputer/omicron/issues/2476 + + /// List network interfaces + #[endpoint { + method = GET, + path = "/v1/vpc-subnets/{subnet}/network-interfaces", + tags = ["vpcs"], + }] + async fn vpc_subnet_list_network_interfaces( + rqctx: RequestContext, + path_params: Path, + query_params: Query>, + ) -> Result>, HttpError>; + + // VPC Firewalls + + /// List firewall rules + #[endpoint { + method = GET, + path = "/v1/vpc-firewall-rules", + tags = ["vpcs"], + }] + async fn vpc_firewall_rules_view( + rqctx: RequestContext, + query_params: Query, + ) -> Result, HttpError>; + + // Note: the limits in the below comment come from the firewall rules model + // file, nexus/db-model/src/vpc_firewall_rule.rs. + + /// Replace firewall rules + /// + /// The maximum number of rules per VPC is 1024. + /// + /// Targets are used to specify the set of instances to which a firewall rule + /// applies. You can target instances directly by name, or specify a VPC, VPC + /// subnet, IP, or IP subnet, which will apply the rule to traffic going to + /// all matching instances. Targets are additive: the rule applies to instances + /// matching ANY target. The maximum number of targets is 256. + /// + /// Filters reduce the scope of a firewall rule. Without filters, the rule + /// applies to all packets to the targets (or from the targets, if it's an + /// outbound rule). With multiple filters, the rule applies only to packets + /// matching ALL filters. The maximum number of each type of filter is 256. + #[endpoint { + method = PUT, + path = "/v1/vpc-firewall-rules", + tags = ["vpcs"], + }] + async fn vpc_firewall_rules_update( + rqctx: RequestContext, + query_params: Query, + router_params: TypedBody, + ) -> Result, HttpError>; + + // VPC Routers + + /// List routers + #[endpoint { + method = GET, + path = "/v1/vpc-routers", + tags = ["vpcs"], + }] + async fn vpc_router_list( + rqctx: RequestContext, + query_params: Query>, + ) -> Result>, HttpError>; + + /// Fetch router + #[endpoint { + method = GET, + path = "/v1/vpc-routers/{router}", + tags = ["vpcs"], + }] + async fn vpc_router_view( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError>; + + /// Create VPC router + #[endpoint { + method = POST, + path = "/v1/vpc-routers", + tags = ["vpcs"], + }] + async fn vpc_router_create( + rqctx: RequestContext, + query_params: Query, + create_params: TypedBody, + ) -> Result, HttpError>; + + /// Delete router + #[endpoint { + method = DELETE, + path = "/v1/vpc-routers/{router}", + tags = ["vpcs"], + }] + async fn vpc_router_delete( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result; + + /// Update router + #[endpoint { + method = PUT, + path = "/v1/vpc-routers/{router}", + tags = ["vpcs"], + }] + async fn vpc_router_update( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + router_params: TypedBody, + ) -> Result, HttpError>; + + /// List routes + /// + /// List the routes associated with a router in a particular VPC. + #[endpoint { + method = GET, + path = "/v1/vpc-router-routes", + tags = ["vpcs"], + }] + async fn vpc_router_route_list( + rqctx: RequestContext, + query_params: Query>, + ) -> Result>, HttpError>; + + // Vpc Router Routes + + /// Fetch route + #[endpoint { + method = GET, + path = "/v1/vpc-router-routes/{route}", + tags = ["vpcs"], + }] + async fn vpc_router_route_view( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError>; + + /// Create route + #[endpoint { + method = POST, + path = "/v1/vpc-router-routes", + tags = ["vpcs"], + }] + async fn vpc_router_route_create( + rqctx: RequestContext, + query_params: Query, + create_params: TypedBody, + ) -> Result, HttpError>; + + /// Delete route + #[endpoint { + method = DELETE, + path = "/v1/vpc-router-routes/{route}", + tags = ["vpcs"], + }] + async fn vpc_router_route_delete( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result; + + /// Update route + #[endpoint { + method = PUT, + path = "/v1/vpc-router-routes/{route}", + tags = ["vpcs"], + }] + async fn vpc_router_route_update( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + router_params: TypedBody, + ) -> Result, HttpError>; + + // Racks + + /// List racks + #[endpoint { + method = GET, + path = "/v1/system/hardware/racks", + tags = ["system/hardware"], + }] + async fn rack_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError>; + + /// Fetch rack + #[endpoint { + method = GET, + path = "/v1/system/hardware/racks/{rack_id}", + tags = ["system/hardware"], + }] + async fn rack_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError>; + + /// List uninitialized sleds + #[endpoint { + method = GET, + path = "/v1/system/hardware/sleds-uninitialized", + tags = ["system/hardware"] + }] + async fn sled_list_uninitialized( + rqctx: RequestContext, + query: Query>, + ) -> Result>, HttpError>; + + /// Add sled to initialized rack + // + // TODO: In the future this should really be a PUT request, once we resolve + // https://github.com/oxidecomputer/omicron/issues/4494. It should also + // explicitly be tied to a rack via a `rack_id` path param. For now we assume + // we are only operating on single rack systems. + #[endpoint { + method = POST, + path = "/v1/system/hardware/sleds", + tags = ["system/hardware"] + }] + async fn sled_add( + rqctx: RequestContext, + sled: TypedBody, + ) -> Result, HttpError>; + + // Sleds + + /// List sleds + #[endpoint { + method = GET, + path = "/v1/system/hardware/sleds", + tags = ["system/hardware"], + }] + async fn sled_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError>; + + /// Fetch sled + #[endpoint { + method = GET, + path = "/v1/system/hardware/sleds/{sled_id}", + tags = ["system/hardware"], + }] + async fn sled_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError>; + + /// Set sled provision policy + #[endpoint { + method = PUT, + path = "/v1/system/hardware/sleds/{sled_id}/provision-policy", + tags = ["system/hardware"], + }] + async fn sled_set_provision_policy( + rqctx: RequestContext, + path_params: Path, + new_provision_state: TypedBody, + ) -> Result, HttpError>; + + /// List instances running on given sled + #[endpoint { + method = GET, + path = "/v1/system/hardware/sleds/{sled_id}/instances", + tags = ["system/hardware"], + }] + async fn sled_instance_list( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result>, HttpError>; + + // Physical disks + + /// List physical disks + #[endpoint { + method = GET, + path = "/v1/system/hardware/disks", + tags = ["system/hardware"], + }] + async fn physical_disk_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError>; + + /// Get a physical disk + #[endpoint { + method = GET, + path = "/v1/system/hardware/disks/{disk_id}", + tags = ["system/hardware"], + }] + async fn physical_disk_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError>; + + // Switches + + /// List switches + #[endpoint { + method = GET, + path = "/v1/system/hardware/switches", + tags = ["system/hardware"], + }] + async fn switch_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError>; + + /// Fetch switch + #[endpoint { + method = GET, + path = "/v1/system/hardware/switches/{switch_id}", + tags = ["system/hardware"], + }] + async fn switch_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError>; + + /// List physical disks attached to sleds + #[endpoint { + method = GET, + path = "/v1/system/hardware/sleds/{sled_id}/disks", + tags = ["system/hardware"], + }] + async fn sled_physical_disk_list( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result>, HttpError>; + + // Metrics + + /// View metrics + /// + /// View CPU, memory, or storage utilization metrics at the fleet or silo level. + #[endpoint { + method = GET, + path = "/v1/system/metrics/{metric_name}", + tags = ["system/metrics"], + }] + async fn system_metric( + rqctx: RequestContext, + path_params: Path, + pag_params: Query< + PaginationParams, + >, + other_params: Query, + ) -> Result< + HttpResponseOk>, + HttpError, + >; + + /// View metrics + /// + /// View CPU, memory, or storage utilization metrics at the silo or project level. + #[endpoint { + method = GET, + path = "/v1/metrics/{metric_name}", + tags = ["metrics"], + }] + async fn silo_metric( + rqctx: RequestContext, + path_params: Path, + pag_params: Query< + PaginationParams, + >, + other_params: Query, + ) -> Result< + HttpResponseOk>, + HttpError, + >; + + /// List timeseries schemas + #[endpoint { + method = GET, + path = "/v1/timeseries/schema", + tags = ["metrics"], + }] + async fn timeseries_schema_list( + rqctx: RequestContext, + pag_params: Query, + ) -> Result< + HttpResponseOk>, + HttpError, + >; + + // TODO: can we link to an OxQL reference? Do we have one? Can we even do links? + + /// Run timeseries query + /// + /// Queries are written in OxQL. + #[endpoint { + method = POST, + path = "/v1/timeseries/query", + tags = ["metrics"], + }] + async fn timeseries_query( + rqctx: RequestContext, + body: TypedBody, + ) -> Result, HttpError>; + + // Updates + + /// Upload TUF repository + #[endpoint { + method = PUT, + path = "/v1/system/update/repository", + tags = ["system/update"], + unpublished = true, + }] + async fn system_update_put_repository( + rqctx: RequestContext, + query: Query, + body: StreamingBody, + ) -> Result, HttpError>; + + /// Fetch TUF repository description + /// + /// Fetch description of TUF repository by system version. + #[endpoint { + method = GET, + path = "/v1/system/update/repository/{system_version}", + tags = ["system/update"], + unpublished = true, + }] + async fn system_update_get_repository( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError>; + + // Silo users + + /// List users + #[endpoint { + method = GET, + path = "/v1/users", + tags = ["silos"], + }] + async fn user_list( + rqctx: RequestContext, + query_params: Query>, + ) -> Result>, HttpError>; + + // Silo groups + + /// List groups + #[endpoint { + method = GET, + path = "/v1/groups", + tags = ["silos"], + }] + async fn group_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError>; + + /// Fetch group + #[endpoint { + method = GET, + path = "/v1/groups/{group_id}", + tags = ["silos"], + }] + async fn group_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError>; + + // Built-in (system) users + + /// List built-in users + #[endpoint { + method = GET, + path = "/v1/system/users-builtin", + tags = ["system/silos"], + }] + async fn user_builtin_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError>; + + /// Fetch built-in user + #[endpoint { + method = GET, + path = "/v1/system/users-builtin/{user}", + tags = ["system/silos"], + }] + async fn user_builtin_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError>; + + // Built-in roles + + /// List built-in roles + #[endpoint { + method = GET, + path = "/v1/system/roles", + tags = ["roles"], + }] + async fn role_list( + rqctx: RequestContext, + query_params: Query< + PaginationParams, + >, + ) -> Result>, HttpError>; + + /// Fetch built-in role + #[endpoint { + method = GET, + path = "/v1/system/roles/{role_name}", + tags = ["roles"], + }] + async fn role_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError>; + + // Current user + + /// Fetch user for current session + #[endpoint { + method = GET, + path = "/v1/me", + tags = ["session"], + }] + async fn current_user_view( + rqctx: RequestContext, + ) -> Result, HttpError>; + + /// Fetch current user's groups + #[endpoint { + method = GET, + path = "/v1/me/groups", + tags = ["session"], + }] + async fn current_user_groups( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError>; + + // Per-user SSH public keys + + /// List SSH public keys + /// + /// Lists SSH public keys for the currently authenticated user. + #[endpoint { + method = GET, + path = "/v1/me/ssh-keys", + tags = ["session"], + }] + async fn current_user_ssh_key_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError>; + + /// Create SSH public key + /// + /// Create an SSH public key for the currently authenticated user. + #[endpoint { + method = POST, + path = "/v1/me/ssh-keys", + tags = ["session"], + }] + async fn current_user_ssh_key_create( + rqctx: RequestContext, + new_key: TypedBody, + ) -> Result, HttpError>; + + /// Fetch SSH public key + /// + /// Fetch SSH public key associated with the currently authenticated user. + #[endpoint { + method = GET, + path = "/v1/me/ssh-keys/{ssh_key}", + tags = ["session"], + }] + async fn current_user_ssh_key_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError>; + + /// Delete SSH public key + /// + /// Delete an SSH public key associated with the currently authenticated user. + #[endpoint { + method = DELETE, + path = "/v1/me/ssh-keys/{ssh_key}", + tags = ["session"], + }] + async fn current_user_ssh_key_delete( + rqctx: RequestContext, + path_params: Path, + ) -> Result; + + // Probes (experimental) + + /// List instrumentation probes + #[endpoint { + method = GET, + path = "/experimental/v1/probes", + tags = ["hidden"], // system/probes: only one tag is allowed + }] + async fn probe_list( + rqctx: RequestContext, + query_params: Query>, + ) -> Result>, HttpError>; + + /// View instrumentation probe + #[endpoint { + method = GET, + path = "/experimental/v1/probes/{probe}", + tags = ["hidden"], // system/probes: only one tag is allowed + }] + async fn probe_view( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError>; + + /// Create instrumentation probe + #[endpoint { + method = POST, + path = "/experimental/v1/probes", + tags = ["hidden"], // system/probes: only one tag is allowed + }] + async fn probe_create( + rqctx: RequestContext, + query_params: Query, + new_probe: TypedBody, + ) -> Result, HttpError>; + + /// Delete instrumentation probe + #[endpoint { + method = DELETE, + path = "/experimental/v1/probes/{probe}", + tags = ["hidden"], // system/probes: only one tag is allowed + }] + async fn probe_delete( + rqctx: RequestContext, + query_params: Query, + path_params: Path, + ) -> Result; + + // Console API: logins + + /// SAML login console page (just a link to the IdP) + #[endpoint { + method = GET, + path = "/login/{silo_name}/saml/{provider_name}", + tags = ["login"], + unpublished = true, + }] + async fn login_saml_begin( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError>; + + /// Get a redirect straight to the IdP + /// + /// Console uses this to avoid having to ask the API anything about the IdP. It + /// already knows the IdP name from the path, so it can just link to this path + /// and rely on Nexus to redirect to the actual IdP. + #[endpoint { + method = GET, + path = "/login/{silo_name}/saml/{provider_name}/redirect", + tags = ["login"], + unpublished = true, + }] + async fn login_saml_redirect( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result; + + /// Authenticate a user via SAML + #[endpoint { + method = POST, + path = "/login/{silo_name}/saml/{provider_name}", + tags = ["login"], + }] + async fn login_saml( + rqctx: RequestContext, + path_params: Path, + body_bytes: dropshot::UntypedBody, + ) -> Result; + + #[endpoint { + method = GET, + path = "/login/{silo_name}/local", + tags = ["login"], + unpublished = true, + }] + async fn login_local_begin( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError>; + + /// Authenticate a user via username and password + #[endpoint { + method = POST, + path = "/v1/login/{silo_name}/local", + tags = ["login"], + }] + async fn login_local( + rqctx: RequestContext, + path_params: Path, + credentials: TypedBody, + ) -> Result, HttpError>; + + /// Log user out of web console by deleting session on client and server + #[endpoint { + // important for security that this be a POST despite the empty req body + method = POST, + path = "/v1/logout", + tags = ["hidden"], + }] + async fn logout( + rqctx: RequestContext, + cookies: Cookies, + ) -> Result, HttpError>; + + /// Redirect to a login page for the current Silo (if that can be determined) + #[endpoint { + method = GET, + path = "/login", + unpublished = true, + }] + async fn login_begin( + rqctx: RequestContext, + query_params: Query, + ) -> Result; + + // Console API: Pages + + #[endpoint { + method = GET, + path = "/projects/{path:.*}", + unpublished = true, + }] + async fn console_projects( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError>; + + #[endpoint { + method = GET, + path = "/settings/{path:.*}", + unpublished = true, + }] + async fn console_settings_page( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError>; + + #[endpoint { + method = GET, + path = "/system/{path:.*}", + unpublished = true, + }] + async fn console_system_page( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError>; + + #[endpoint { + method = GET, + path = "/lookup/{path:.*}", + unpublished = true, + }] + async fn console_lookup( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError>; + + #[endpoint { + method = GET, + path = "/", + unpublished = true, + }] + async fn console_root( + rqctx: RequestContext, + ) -> Result, HttpError>; + + #[endpoint { + method = GET, + path = "/projects-new", + unpublished = true, + }] + async fn console_projects_new( + rqctx: RequestContext, + ) -> Result, HttpError>; + + #[endpoint { + method = GET, + path = "/images", + unpublished = true, + }] + async fn console_silo_images( + rqctx: RequestContext, + ) -> Result, HttpError>; + + #[endpoint { + method = GET, + path = "/utilization", + unpublished = true, + }] + async fn console_silo_utilization( + rqctx: RequestContext, + ) -> Result, HttpError>; + + #[endpoint { + method = GET, + path = "/access", + unpublished = true, + }] + async fn console_silo_access( + rqctx: RequestContext, + ) -> Result, HttpError>; + + /// Serve a static asset + #[endpoint { + method = GET, + path = "/assets/{path:.*}", + unpublished = true, + }] + async fn asset( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError>; + + /// Start an OAuth 2.0 Device Authorization Grant + /// + /// This endpoint is designed to be accessed from an *unauthenticated* + /// API client. It generates and records a `device_code` and `user_code` + /// which must be verified and confirmed prior to a token being granted. + #[endpoint { + method = POST, + path = "/device/auth", + content_type = "application/x-www-form-urlencoded", + tags = ["hidden"], // "token" + }] + async fn device_auth_request( + rqctx: RequestContext, + params: TypedBody, + ) -> Result, HttpError>; + + /// Verify an OAuth 2.0 Device Authorization Grant + /// + /// This endpoint should be accessed in a full user agent (e.g., + /// a browser). If the user is not logged in, we redirect them to + /// the login page and use the `state` parameter to get them back + /// here on completion. If they are logged in, serve up the console + /// verification page so they can verify the user code. + #[endpoint { + method = GET, + path = "/device/verify", + unpublished = true, + }] + async fn device_auth_verify( + rqctx: RequestContext, + ) -> Result, HttpError>; + + #[endpoint { + method = GET, + path = "/device/success", + unpublished = true, + }] + async fn device_auth_success( + rqctx: RequestContext, + ) -> Result, HttpError>; + + /// Confirm an OAuth 2.0 Device Authorization Grant + /// + /// This endpoint is designed to be accessed by the user agent (browser), + /// not the client requesting the token. So we do not actually return the + /// token here; it will be returned in response to the poll on `/device/token`. + #[endpoint { + method = POST, + path = "/device/confirm", + tags = ["hidden"], // "token" + }] + async fn device_auth_confirm( + rqctx: RequestContext, + params: TypedBody, + ) -> Result; + + /// Request a device access token + /// + /// This endpoint should be polled by the client until the user code + /// is verified and the grant is confirmed. + #[endpoint { + method = POST, + path = "/device/token", + content_type = "application/x-www-form-urlencoded", + tags = ["hidden"], // "token" + }] + async fn device_access_token( + rqctx: RequestContext, + params: TypedBody, + ) -> Result, HttpError>; +} + +/// Perform extra validations on the OpenAPI spec. +pub fn validate_api(spec: &OpenAPI, mut cx: ValidationContext<'_>) { + if spec.openapi != "3.0.3" { + cx.report_error(anyhow!( + "Expected OpenAPI version to be 3.0.3, found {}", + spec.openapi, + )); + } + if spec.info.title != "Oxide Region API" { + cx.report_error(anyhow!( + "Expected OpenAPI version to be 'Oxide Region API', found '{}'", + spec.info.title, + )); + } + if spec.info.version != API_VERSION { + cx.report_error(anyhow!( + "Expected OpenAPI version to be '{}', found '{}'", + API_VERSION, + spec.info.version, + )); + } + + // Spot check a couple of items. + if spec.paths.paths.is_empty() { + cx.report_error(anyhow!("Expected at least one path in the spec")); + } + if spec.paths.paths.get("/v1/projects").is_none() { + cx.report_error(anyhow!("Expected a path for /v1/projects")); + } + + // Construct a string that helps us identify the organization of tags and + // operations. + let mut ops_by_tag = + BTreeMap::>::new(); + + let mut ops_by_tag_valid = true; + for (path, method, op) in spec.operations() { + // Make sure each operation has exactly one tag. Note, we intentionally + // do this before validating the OpenAPI output as fixing an error here + // would necessitate refreshing the spec file again. + if op.tags.len() != 1 { + cx.report_error(anyhow!( + "operation '{}' has {} tags rather than 1", + op.operation_id.as_ref().unwrap(), + op.tags.len() + )); + ops_by_tag_valid = false; + continue; + } + + // Every non-hidden endpoint must have a summary + if op.tags.contains(&"hidden".to_string()) && op.summary.is_none() { + cx.report_error(anyhow!( + "operation '{}' is missing a summary doc comment", + op.operation_id.as_ref().unwrap() + )); + // This error does not prevent `ops_by_tag` from being populated + // correctly, so we can continue. + } + + ops_by_tag + .entry(op.tags.first().unwrap().to_string()) + .or_default() + .push(( + op.operation_id.as_ref().unwrap().to_string(), + method.to_string().to_uppercase(), + path.to_string(), + )); + } + + if ops_by_tag_valid { + let mut tags = String::new(); + for (tag, mut ops) in ops_by_tag { + ops.sort(); + tags.push_str(&format!( + r#"API operations found with tag "{}""#, + tag + )); + tags.push_str(&format!( + "\n{:40} {:8} {}\n", + "OPERATION ID", "METHOD", "URL PATH" + )); + for (operation_id, method, path) in ops { + tags.push_str(&format!( + "{:40} {:8} {}\n", + operation_id, method, path + )); + } + tags.push('\n'); + } + + // When this fails, verify that operations on which you're adding, + // renaming, or changing the tags are what you intend. + cx.record_file_contents( + "nexus/external-api/output/nexus_tags.txt", + tags.into_bytes(), + ); + } +} + +pub type IpPoolRangePaginationParams = + PaginationParams; + +/// Type used to paginate request to list timeseries schema. +pub type TimeseriesSchemaPaginationParams = + PaginationParams; diff --git a/nexus/src/app/metrics.rs b/nexus/src/app/metrics.rs index 3a6e7e27be..4dc7309e76 100644 --- a/nexus/src/app/metrics.rs +++ b/nexus/src/app/metrics.rs @@ -4,7 +4,6 @@ //! Metrics -use crate::external_api::http_entrypoints::SystemMetricName; use crate::external_api::params::ResourceMetrics; use dropshot::PaginationParams; use nexus_db_queries::authz; @@ -12,10 +11,10 @@ use nexus_db_queries::{ context::OpContext, db::{fixed_data::FLEET_ID, lookup}, }; +use nexus_external_api::TimeseriesSchemaPaginationParams; +use nexus_types::external_api::params::SystemMetricName; use omicron_common::api::external::{Error, InternalContext}; -use oximeter_db::{ - Measurement, TimeseriesSchema, TimeseriesSchemaPaginationParams, -}; +use oximeter_db::{Measurement, TimeseriesSchema}; use std::num::NonZeroU32; impl super::Nexus { diff --git a/nexus/src/bin/nexus.rs b/nexus/src/bin/nexus.rs index 33870b39e3..01e4bfc3af 100644 --- a/nexus/src/bin/nexus.rs +++ b/nexus/src/bin/nexus.rs @@ -16,20 +16,11 @@ use clap::Parser; use nexus_config::NexusConfig; use omicron_common::cmd::fatal; use omicron_common::cmd::CmdError; -use omicron_nexus::run_openapi_external; use omicron_nexus::run_server; #[derive(Debug, Parser)] #[clap(name = "nexus", about = "See README.adoc for more information")] struct Args { - #[clap( - short = 'O', - long = "openapi", - help = "Print the external OpenAPI Spec document and exit", - action - )] - openapi: bool, - #[clap(name = "CONFIG_FILE_PATH", action)] config_file_path: Option, } @@ -44,23 +35,19 @@ async fn main() { async fn do_run() -> Result<(), CmdError> { let args = Args::parse(); - if args.openapi { - run_openapi_external().map_err(|err| CmdError::Failure(anyhow!(err))) - } else { - let config_path = match args.config_file_path { - Some(path) => path, - None => { - use clap::CommandFactory; - - eprintln!("{}", Args::command().render_help()); - return Err(CmdError::Usage( - "CONFIG_FILE_PATH is required".to_string(), - )); - } - }; - let config = NexusConfig::from_file(config_path) - .map_err(|e| CmdError::Failure(anyhow!(e)))?; - - run_server(&config).await.map_err(|err| CmdError::Failure(anyhow!(err))) - } + let config_path = match args.config_file_path { + Some(path) => path, + None => { + use clap::CommandFactory; + + eprintln!("{}", Args::command().render_help()); + return Err(CmdError::Usage( + "CONFIG_FILE_PATH is required".to_string(), + )); + } + }; + let config = NexusConfig::from_file(config_path) + .map_err(|e| CmdError::Failure(anyhow!(e)))?; + + run_server(&config).await.map_err(|err| CmdError::Failure(anyhow!(err))) } diff --git a/nexus/src/external_api/console_api.rs b/nexus/src/external_api/console_api.rs index 2169b631a7..4ea8290bf9 100644 --- a/nexus/src/external_api/console_api.rs +++ b/nexus/src/external_api/console_api.rs @@ -25,11 +25,11 @@ use crate::context::ApiContext; use anyhow::Context; use camino::{Utf8Path, Utf8PathBuf}; use dropshot::{ - endpoint, http_response_found, http_response_see_other, HttpError, - HttpResponseFound, HttpResponseHeaders, HttpResponseSeeOther, - HttpResponseUpdatedNoContent, Path, Query, RequestContext, + http_response_found, http_response_see_other, HttpError, HttpResponseFound, + HttpResponseHeaders, HttpResponseSeeOther, HttpResponseUpdatedNoContent, + Path, Query, RequestContext, }; -use http::{header, HeaderName, HeaderValue, Response, StatusCode, Uri}; +use http::{header, HeaderName, HeaderValue, Response, StatusCode}; use hyper::Body; use nexus_db_model::AuthenticationMode; use nexus_db_queries::authn::silos::IdentityProviderType; @@ -42,18 +42,16 @@ use nexus_db_queries::{ db::identity::Asset, }; use nexus_types::authn::cookies::Cookies; -use nexus_types::external_api::params; +use nexus_types::external_api::params::{self, RelativeUri}; use nexus_types::identity::Resource; use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::external::{DataPageParams, Error, NameOrId}; use once_cell::sync::Lazy; -use parse_display::Display; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use serde_urlencoded; use std::collections::HashMap; use std::num::NonZeroU32; -use std::str::FromStr; use tokio::fs::File; use tokio_util::codec::{BytesCodec, FramedRead}; @@ -194,12 +192,6 @@ use tokio_util::codec::{BytesCodec, FramedRead}; // // /logout/{silo_name}/{provider_name} -#[derive(Deserialize, JsonSchema)] -pub struct LoginToProviderPathParam { - pub silo_name: nexus_db_queries::db::model::Name, - pub provider_name: nexus_db_queries::db::model::Name, -} - #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct RelayState { pub redirect_uri: Option, @@ -228,36 +220,18 @@ impl RelayState { } } -/// SAML login console page (just a link to the IdP) -#[endpoint { - method = GET, - path = "/login/{silo_name}/saml/{provider_name}", - tags = ["login"], - unpublished = true, -}] pub(crate) async fn login_saml_begin( rqctx: RequestContext, - _path_params: Path, - _query_params: Query, + _path_params: Path, + _query_params: Query, ) -> Result, HttpError> { serve_console_index(rqctx).await } -/// Get a redirect straight to the IdP -/// -/// Console uses this to avoid having to ask the API anything about the IdP. It -/// already knows the IdP name from the path, so it can just link to this path -/// and rely on Nexus to redirect to the actual IdP. -#[endpoint { - method = GET, - path = "/login/{silo_name}/saml/{provider_name}/redirect", - tags = ["login"], - unpublished = true, -}] pub(crate) async fn login_saml_redirect( rqctx: RequestContext, - path_params: Path, - query_params: Query, + path_params: Path, + query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { @@ -272,8 +246,8 @@ pub(crate) async fn login_saml_redirect( .datastore() .identity_provider_lookup( &opctx, - &path_params.silo_name, - &path_params.provider_name, + &path_params.silo_name.into(), + &path_params.provider_name.into(), ) .await?; @@ -308,15 +282,9 @@ pub(crate) async fn login_saml_redirect( .await } -/// Authenticate a user via SAML -#[endpoint { - method = POST, - path = "/login/{silo_name}/saml/{provider_name}", - tags = ["login"], -}] pub(crate) async fn login_saml( rqctx: RequestContext, - path_params: Path, + path_params: Path, body_bytes: dropshot::UntypedBody, ) -> Result { let apictx = rqctx.context(); @@ -333,8 +301,8 @@ pub(crate) async fn login_saml( .datastore() .identity_provider_lookup( &opctx, - &path_params.silo_name, - &path_params.provider_name, + &path_params.silo_name.into(), + &path_params.provider_name.into(), ) .await?; @@ -395,21 +363,10 @@ pub(crate) async fn login_saml( .await } -#[derive(Deserialize, JsonSchema)] -pub struct LoginPathParam { - pub silo_name: nexus_db_queries::db::model::Name, -} - -#[endpoint { - method = GET, - path = "/login/{silo_name}/local", - tags = ["login"], - unpublished = true, -}] pub(crate) async fn login_local_begin( rqctx: RequestContext, - _path_params: Path, - _query_params: Query, + _path_params: Path, + _query_params: Query, ) -> Result, HttpError> { // TODO: figure out why instrumenting doesn't work // let apictx = rqctx.context(); @@ -418,15 +375,9 @@ pub(crate) async fn login_local_begin( serve_console_index(rqctx).await } -/// Authenticate a user via username and password -#[endpoint { - method = POST, - path = "/v1/login/{silo_name}/local", - tags = ["login"], -}] pub(crate) async fn login_local( rqctx: RequestContext, - path_params: Path, + path_params: Path, credentials: dropshot::TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); @@ -485,13 +436,6 @@ async fn create_session( Ok(session) } -/// Log user out of web console by deleting session on client and server -#[endpoint { - // important for security that this be a POST despite the empty req body - method = POST, - path = "/v1/logout", - tags = ["hidden"], -}] pub(crate) async fn logout( rqctx: RequestContext, cookies: Cookies, @@ -539,53 +483,6 @@ pub(crate) async fn logout( .await } -#[derive(Deserialize, JsonSchema)] -pub struct RestPathParam { - path: Vec, -} - -/// This is meant as a security feature. We want to ensure we never redirect to -/// a URI on a different host. -#[derive(Serialize, Deserialize, Debug, JsonSchema, Clone, Display)] -#[serde(try_from = "String")] -#[display("{0}")] -pub struct RelativeUri(String); - -impl FromStr for RelativeUri { - type Err = String; - - fn from_str(s: &str) -> Result { - Self::try_from(s.to_string()) - } -} - -impl TryFrom for RelativeUri { - type Error = String; - - fn try_from(uri: Uri) -> Result { - if uri.host().is_none() && uri.scheme().is_none() { - Ok(Self(uri.to_string())) - } else { - Err(format!("\"{}\" is not a relative URI", uri)) - } - } -} - -impl TryFrom for RelativeUri { - type Error = String; - - fn try_from(s: String) -> Result { - s.parse::() - .map_err(|_| format!("\"{}\" is not a relative URI", s)) - .and_then(|uri| Self::try_from(uri)) - } -} - -#[derive(Serialize, Deserialize, JsonSchema)] -pub struct LoginUrlQuery { - redirect_uri: Option, -} - /// Generate URI to the appropriate login form for this Silo. Optional /// `redirect_uri` represents the URL to send the user back to after successful /// login, and is included in `state` query param if present @@ -642,7 +539,7 @@ async fn get_login_url( // Stick redirect_url into the state param and URL encode it so it can be // used as a query string. We assume it's not already encoded. - let query_data = LoginUrlQuery { redirect_uri }; + let query_data = params::LoginUrlQuery { redirect_uri }; Ok(match serde_urlencoded::to_string(query_data) { // only put the ? in front if there's something there @@ -652,15 +549,9 @@ async fn get_login_url( }) } -/// Redirect to a login page for the current Silo (if that can be determined) -#[endpoint { - method = GET, - path = "/login", - unpublished = true, -}] pub(crate) async fn login_begin( rqctx: RequestContext, - query_params: Query, + query_params: Query, ) -> Result { let apictx = rqctx.context(); let handler = async { @@ -694,7 +585,11 @@ pub(crate) async fn console_index_or_login_redirect( .request .uri() .path_and_query() - .map(|p| RelativeUri(p.to_string())); + .map(|p| p.to_string().parse::()) + .transpose() + .map_err(|e| { + HttpError::for_internal_error(format!("parsing URI: {}", e)) + })?; let login_url = get_login_url(&rqctx, redirect_uri).await?; Ok(Response::builder() @@ -709,8 +604,7 @@ pub(crate) async fn console_index_or_login_redirect( // to manually define more specific routes. macro_rules! console_page { - ($name:ident, $path:literal) => { - #[endpoint { method = GET, path = $path, unpublished = true, }] + ($name:ident) => { pub(crate) async fn $name( rqctx: RequestContext, ) -> Result, HttpError> { @@ -721,26 +615,25 @@ macro_rules! console_page { // only difference is the _path_params arg macro_rules! console_page_wildcard { - ($name:ident, $path:literal) => { - #[endpoint { method = GET, path = $path, unpublished = true, }] + ($name:ident) => { pub(crate) async fn $name( rqctx: RequestContext, - _path_params: Path, + _path_params: Path, ) -> Result, HttpError> { console_index_or_login_redirect(rqctx).await } }; } -console_page_wildcard!(console_projects, "/projects/{path:.*}"); -console_page_wildcard!(console_settings_page, "/settings/{path:.*}"); -console_page_wildcard!(console_system_page, "/system/{path:.*}"); -console_page_wildcard!(console_lookup, "/lookup/{path:.*}"); -console_page!(console_root, "/"); -console_page!(console_projects_new, "/projects-new"); -console_page!(console_silo_images, "/images"); -console_page!(console_silo_utilization, "/utilization"); -console_page!(console_silo_access, "/access"); +console_page_wildcard!(console_projects); +console_page_wildcard!(console_settings_page); +console_page_wildcard!(console_system_page); +console_page_wildcard!(console_lookup); +console_page!(console_root); +console_page!(console_projects_new); +console_page!(console_silo_images); +console_page!(console_silo_utilization); +console_page!(console_silo_access); /// Check if `gzip` is listed in the request's `Accept-Encoding` header. fn accept_gz(header_value: &str) -> bool { @@ -868,15 +761,10 @@ async fn serve_static( /// /// Note that Dropshot protects us from directory traversal attacks (e.g. /// `/assets/../../../etc/passwd`). This is tested in the `console_api` -/// integration tests. -#[endpoint { - method = GET, - path = "/assets/{path:.*}", - unpublished = true, -}] +/// integration tests pub(crate) async fn asset( rqctx: RequestContext, - path_params: Path, + path_params: Path, ) -> Result, HttpError> { // asset URLs contain hashes, so cache for 1 year const CACHE_CONTROL: HeaderValue = diff --git a/nexus/src/external_api/device_auth.rs b/nexus/src/external_api/device_auth.rs index 883dbf4e19..87ccbd9752 100644 --- a/nexus/src/external_api/device_auth.rs +++ b/nexus/src/external_api/device_auth.rs @@ -14,16 +14,14 @@ use super::views::DeviceAccessTokenGrant; use crate::app::external_endpoints::authority_for_request; use crate::ApiContext; use dropshot::{ - endpoint, HttpError, HttpResponseUpdatedNoContent, RequestContext, - TypedBody, + HttpError, HttpResponseUpdatedNoContent, RequestContext, TypedBody, }; use http::{header, Response, StatusCode}; use hyper::Body; use nexus_db_queries::db::model::DeviceAccessToken; +use nexus_types::external_api::params; use omicron_common::api::external::InternalContext; -use schemars::JsonSchema; -use serde::{Deserialize, Serialize}; -use uuid::Uuid; +use serde::Serialize; // Token granting à la RFC 8628 (OAuth 2.0 Device Authorization Grant) @@ -46,25 +44,9 @@ where .body(body.into())?) } -#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -pub struct DeviceAuthRequest { - pub client_id: Uuid, -} - -/// Start an OAuth 2.0 Device Authorization Grant -/// -/// This endpoint is designed to be accessed from an *unauthenticated* -/// API client. It generates and records a `device_code` and `user_code` -/// which must be verified and confirmed prior to a token being granted. -#[endpoint { - method = POST, - path = "/device/auth", - content_type = "application/x-www-form-urlencoded", - tags = ["hidden"], // "token" -}] pub(crate) async fn device_auth_request( rqctx: RequestContext, - params: TypedBody, + params: TypedBody, ) -> Result, HttpError> { let apictx = rqctx.context(); let nexus = &apictx.context.nexus; @@ -99,53 +81,21 @@ pub(crate) async fn device_auth_request( .await } -#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -pub struct DeviceAuthVerify { - pub user_code: String, -} - -/// Verify an OAuth 2.0 Device Authorization Grant -/// -/// This endpoint should be accessed in a full user agent (e.g., -/// a browser). If the user is not logged in, we redirect them to -/// the login page and use the `state` parameter to get them back -/// here on completion. If they are logged in, serve up the console -/// verification page so they can verify the user code. -#[endpoint { - method = GET, - path = "/device/verify", - unpublished = true, -}] pub(crate) async fn device_auth_verify( rqctx: RequestContext, ) -> Result, HttpError> { console_index_or_login_redirect(rqctx).await } -#[endpoint { - method = GET, - path = "/device/success", - unpublished = true, -}] pub(crate) async fn device_auth_success( rqctx: RequestContext, ) -> Result, HttpError> { console_index_or_login_redirect(rqctx).await } -/// Confirm an OAuth 2.0 Device Authorization Grant -/// -/// This endpoint is designed to be accessed by the user agent (browser), -/// not the client requesting the token. So we do not actually return the -/// token here; it will be returned in response to the poll on `/device/token`. -#[endpoint { - method = POST, - path = "/device/confirm", - tags = ["hidden"], // "token" -}] pub(crate) async fn device_auth_confirm( rqctx: RequestContext, - params: TypedBody, + params: TypedBody, ) -> Result { let apictx = rqctx.context(); let nexus = &apictx.context.nexus; @@ -171,13 +121,6 @@ pub(crate) async fn device_auth_confirm( .await } -#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] -pub struct DeviceAccessTokenRequest { - pub grant_type: String, - pub device_code: String, - pub client_id: Uuid, -} - #[derive(Debug)] pub enum DeviceAccessTokenResponse { Granted(DeviceAccessToken), @@ -186,23 +129,12 @@ pub enum DeviceAccessTokenResponse { Denied, } -/// Request a device access token -/// -/// This endpoint should be polled by the client until the user code -/// is verified and the grant is confirmed. -#[endpoint { - method = POST, - path = "/device/token", - content_type = "application/x-www-form-urlencoded", - tags = ["hidden"], // "token" -}] pub(crate) async fn device_access_token( rqctx: RequestContext, - params: TypedBody, + params: params::DeviceAccessTokenRequest, ) -> Result, HttpError> { let apictx = rqctx.context(); let nexus = &apictx.context.nexus; - let params = params.into_inner(); let handler = async { // RFC 8628 §3.4 if params.grant_type != "urn:ietf:params:oauth:grant-type:device_code" { diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 7c89c86f1b..a297eaa533 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -14,8 +14,8 @@ use super::{ }, }; use crate::{context::ApiContext, external_api::shared}; -use dropshot::HttpResponseAccepted; -use dropshot::HttpResponseCreated; +use dropshot::EmptyScanParams; +use dropshot::HttpError; use dropshot::HttpResponseDeleted; use dropshot::HttpResponseOk; use dropshot::HttpResponseUpdatedNoContent; @@ -27,12 +27,12 @@ use dropshot::RequestContext; use dropshot::ResultsPage; use dropshot::TypedBody; use dropshot::WhichPage; -use dropshot::{ - channel, endpoint, WebsocketChannelResult, WebsocketConnection, -}; use dropshot::{ApiDescription, StreamingBody}; -use dropshot::{ApiDescriptionRegisterError, HttpError}; -use dropshot::{ApiEndpoint, EmptyScanParams}; +use dropshot::{HttpResponseAccepted, HttpResponseFound, HttpResponseSeeOther}; +use dropshot::{HttpResponseCreated, HttpResponseHeaders}; +use dropshot::{WebsocketChannelResult, WebsocketConnection}; +use http::Response; +use hyper::Body; use ipnetwork::IpNetwork; use nexus_db_queries::authz; use nexus_db_queries::db; @@ -40,7 +40,14 @@ use nexus_db_queries::db::identity::Resource; use nexus_db_queries::db::lookup::ImageLookup; use nexus_db_queries::db::lookup::ImageParentLookup; use nexus_db_queries::db::model::Name; -use nexus_types::external_api::shared::{BfdStatus, ProbeInfo}; +use nexus_external_api::*; +use nexus_types::{ + authn::cookies::Cookies, + external_api::{ + params::SystemMetricsPathParam, + shared::{BfdStatus, ProbeInfo}, + }, +}; use omicron_common::api::external::http_pagination::data_page_params_for; use omicron_common::api::external::http_pagination::marker_for_name; use omicron_common::api::external::http_pagination::marker_for_name_or_id; @@ -83,392 +90,26 @@ use omicron_common::api::external::VpcFirewallRuleUpdateParams; use omicron_common::api::external::VpcFirewallRules; use omicron_common::bail_unless; use omicron_uuid_kinds::GenericUuid; -use parse_display::Display; use propolis_client::support::tungstenite::protocol::frame::coding::CloseCode; use propolis_client::support::tungstenite::protocol::{ CloseFrame, Role as WebSocketRole, }; use propolis_client::support::WebSocketStream; use ref_cast::RefCast; -use schemars::JsonSchema; -use serde::Deserialize; -use serde::Serialize; -use std::net::IpAddr; -use uuid::Uuid; type NexusApiDescription = ApiDescription; -// Temporary module just to add a level of indentation and avoid ruining blame -// for https://github.com/oxidecomputer/omicron/pull/6373. -mod imp { - use super::*; - - /// Returns a description of the external nexus API - pub(crate) fn external_api() -> NexusApiDescription { - fn register_endpoints( - api: &mut NexusApiDescription, - ) -> Result<(), ApiDescriptionRegisterError> { - api.register(ping)?; - - api.register(system_policy_view)?; - api.register(system_policy_update)?; - - api.register(policy_view)?; - api.register(policy_update)?; - - api.register(project_list)?; - api.register(project_create)?; - api.register(project_view)?; - api.register(project_delete)?; - api.register(project_update)?; - api.register(project_policy_view)?; - api.register(project_policy_update)?; - api.register(project_ip_pool_list)?; - api.register(project_ip_pool_view)?; - - // Operator-Accessible IP Pools API - api.register(ip_pool_list)?; - api.register(ip_pool_create)?; - api.register(ip_pool_silo_list)?; - api.register(ip_pool_silo_link)?; - api.register(ip_pool_silo_unlink)?; - api.register(ip_pool_silo_update)?; - api.register(ip_pool_view)?; - api.register(ip_pool_delete)?; - api.register(ip_pool_update)?; - // Variants for internal services - api.register(ip_pool_service_view)?; - api.register(ip_pool_utilization_view)?; - - // Operator-Accessible IP Pool Range API - api.register(ip_pool_range_list)?; - api.register(ip_pool_range_add)?; - api.register(ip_pool_range_remove)?; - // Variants for internal services - api.register(ip_pool_service_range_list)?; - api.register(ip_pool_service_range_add)?; - api.register(ip_pool_service_range_remove)?; - - api.register(floating_ip_list)?; - api.register(floating_ip_create)?; - api.register(floating_ip_view)?; - api.register(floating_ip_update)?; - api.register(floating_ip_delete)?; - api.register(floating_ip_attach)?; - api.register(floating_ip_detach)?; - - api.register(disk_list)?; - api.register(disk_create)?; - api.register(disk_view)?; - api.register(disk_delete)?; - api.register(disk_metrics_list)?; - - api.register(disk_bulk_write_import_start)?; - api.register(disk_bulk_write_import)?; - api.register(disk_bulk_write_import_stop)?; - api.register(disk_finalize_import)?; - - api.register(instance_list)?; - api.register(instance_view)?; - api.register(instance_create)?; - api.register(instance_delete)?; - api.register(instance_reboot)?; - api.register(instance_start)?; - api.register(instance_stop)?; - api.register(instance_disk_list)?; - api.register(instance_disk_attach)?; - api.register(instance_disk_detach)?; - api.register(instance_serial_console)?; - api.register(instance_serial_console_stream)?; - api.register(instance_ssh_public_key_list)?; - - api.register(image_list)?; - api.register(image_create)?; - api.register(image_view)?; - api.register(image_delete)?; - api.register(image_promote)?; - api.register(image_demote)?; - - api.register(snapshot_list)?; - api.register(snapshot_create)?; - api.register(snapshot_view)?; - api.register(snapshot_delete)?; - - api.register(vpc_list)?; - api.register(vpc_create)?; - api.register(vpc_view)?; - api.register(vpc_update)?; - api.register(vpc_delete)?; - - api.register(vpc_subnet_list)?; - api.register(vpc_subnet_view)?; - api.register(vpc_subnet_create)?; - api.register(vpc_subnet_delete)?; - api.register(vpc_subnet_update)?; - api.register(vpc_subnet_list_network_interfaces)?; - - api.register(instance_network_interface_create)?; - api.register(instance_network_interface_list)?; - api.register(instance_network_interface_view)?; - api.register(instance_network_interface_update)?; - api.register(instance_network_interface_delete)?; - - api.register(instance_external_ip_list)?; - api.register(instance_ephemeral_ip_attach)?; - api.register(instance_ephemeral_ip_detach)?; - - api.register(vpc_router_list)?; - api.register(vpc_router_view)?; - api.register(vpc_router_create)?; - api.register(vpc_router_delete)?; - api.register(vpc_router_update)?; - - api.register(vpc_router_route_list)?; - api.register(vpc_router_route_view)?; - api.register(vpc_router_route_create)?; - api.register(vpc_router_route_delete)?; - api.register(vpc_router_route_update)?; - - api.register(vpc_firewall_rules_view)?; - api.register(vpc_firewall_rules_update)?; - - api.register(rack_list)?; - api.register(rack_view)?; - api.register(sled_list)?; - api.register(sled_view)?; - api.register(sled_set_provision_policy)?; - api.register(sled_instance_list)?; - api.register(sled_physical_disk_list)?; - api.register(physical_disk_list)?; - api.register(physical_disk_view)?; - api.register(switch_list)?; - api.register(switch_view)?; - api.register(sled_list_uninitialized)?; - api.register(sled_add)?; - - api.register(user_builtin_list)?; - api.register(user_builtin_view)?; - - api.register(role_list)?; - api.register(role_view)?; - - api.register(current_user_view)?; - api.register(current_user_groups)?; - api.register(current_user_ssh_key_list)?; - api.register(current_user_ssh_key_view)?; - api.register(current_user_ssh_key_create)?; - api.register(current_user_ssh_key_delete)?; - - // Customer network integration - api.register(networking_address_lot_list)?; - api.register(networking_address_lot_create)?; - api.register(networking_address_lot_delete)?; - api.register(networking_address_lot_block_list)?; - - api.register(networking_loopback_address_create)?; - api.register(networking_loopback_address_delete)?; - api.register(networking_loopback_address_list)?; - - api.register(networking_switch_port_settings_list)?; - api.register(networking_switch_port_settings_view)?; - api.register(networking_switch_port_settings_create)?; - api.register(networking_switch_port_settings_delete)?; - - api.register(networking_switch_port_list)?; - api.register(networking_switch_port_status)?; - api.register(networking_switch_port_apply_settings)?; - api.register(networking_switch_port_clear_settings)?; - - api.register(networking_bgp_config_create)?; - api.register(networking_bgp_config_list)?; - api.register(networking_bgp_status)?; - api.register(networking_bgp_exported)?; - api.register(networking_bgp_imported_routes_ipv4)?; - api.register(networking_bgp_config_delete)?; - api.register(networking_bgp_announce_set_update)?; - api.register(networking_bgp_announce_set_list)?; - api.register(networking_bgp_announce_set_delete)?; - api.register(networking_bgp_message_history)?; - - api.register(networking_bgp_announcement_list)?; - - api.register(networking_bfd_enable)?; - api.register(networking_bfd_disable)?; - api.register(networking_bfd_status)?; - - api.register(networking_allow_list_view)?; - api.register(networking_allow_list_update)?; - - api.register(utilization_view)?; - - // Fleet-wide API operations - api.register(silo_list)?; - api.register(silo_create)?; - api.register(silo_view)?; - api.register(silo_delete)?; - api.register(silo_policy_view)?; - api.register(silo_policy_update)?; - api.register(silo_ip_pool_list)?; - - api.register(silo_utilization_view)?; - api.register(silo_utilization_list)?; - - api.register(system_quotas_list)?; - api.register(silo_quotas_view)?; - api.register(silo_quotas_update)?; - - api.register(silo_identity_provider_list)?; - - api.register(saml_identity_provider_create)?; - api.register(saml_identity_provider_view)?; - - api.register(local_idp_user_create)?; - api.register(local_idp_user_delete)?; - api.register(local_idp_user_set_password)?; - - api.register(certificate_list)?; - api.register(certificate_create)?; - api.register(certificate_view)?; - api.register(certificate_delete)?; - - api.register(system_metric)?; - api.register(silo_metric)?; - api.register(timeseries_schema_list)?; - api.register(timeseries_query)?; - - api.register(system_update_put_repository)?; - api.register(system_update_get_repository)?; - - api.register(user_list)?; - api.register(silo_user_list)?; - api.register(silo_user_view)?; - api.register(group_list)?; - api.register(group_view)?; - - // Console API operations - api.register(console_api::login_begin)?; - api.register(console_api::login_local_begin)?; - api.register(console_api::login_local)?; - api.register(console_api::login_saml_begin)?; - api.register(console_api::login_saml_redirect)?; - api.register(console_api::login_saml)?; - api.register(console_api::logout)?; - - api.register(console_api::console_lookup)?; - api.register(console_api::console_projects)?; - api.register(console_api::console_projects_new)?; - api.register(console_api::console_silo_images)?; - api.register(console_api::console_silo_utilization)?; - api.register(console_api::console_silo_access)?; - api.register(console_api::console_root)?; - api.register(console_api::console_settings_page)?; - api.register(console_api::console_system_page)?; - api.register(console_api::asset)?; - - api.register(device_auth::device_auth_request)?; - api.register(device_auth::device_auth_verify)?; - api.register(device_auth::device_auth_success)?; - api.register(device_auth::device_auth_confirm)?; - api.register(device_auth::device_access_token)?; - - Ok(()) - } - - fn register_experimental( - api: &mut NexusApiDescription, - endpoint: T, - ) -> Result<(), ApiDescriptionRegisterError> - where - T: Into>, - { - let mut ep: ApiEndpoint = endpoint.into(); - // only one tag is allowed - ep.tags = vec![String::from("hidden")]; - ep.path = String::from("/experimental") + &ep.path; - api.register(ep) - } - - fn register_experimental_endpoints( - api: &mut NexusApiDescription, - ) -> Result<(), ApiDescriptionRegisterError> { - register_experimental(api, probe_list)?; - register_experimental(api, probe_view)?; - register_experimental(api, probe_create)?; - register_experimental(api, probe_delete)?; +/// Returns a description of the external nexus API +pub(crate) fn external_api() -> NexusApiDescription { + nexus_external_api_mod::api_description::() + .expect("registered entrypoints") +} - Ok(()) - } +enum NexusExternalApiImpl {} - let conf = - serde_json::from_str(include_str!("./tag-config.json")).unwrap(); - let mut api = NexusApiDescription::new().tag_config(conf); +impl NexusExternalApi for NexusExternalApiImpl { + type Context = ApiContext; - if let Err(err) = register_endpoints(&mut api) { - panic!("failed to register entrypoints: {}", err); - } - if let Err(err) = register_experimental_endpoints(&mut api) { - panic!("failed to register experimental entrypoints: {}", err); - } - api - } - - // API ENDPOINT FUNCTION NAMING CONVENTIONS - // - // Generally, HTTP resources are grouped within some collection. For a - // relatively simple example: - // - // GET v1/projects (list the projects in the collection) - // POST v1/projects (create a project in the collection) - // GET v1/projects/{project} (look up a project in the collection) - // DELETE v1/projects/{project} (delete a project in the collection) - // PUT v1/projects/{project} (update a project in the collection) - // - // We pick a name for the function that implements a given API entrypoint - // based on how we expect it to appear in the CLI subcommand hierarchy. For - // example: - // - // GET v1/projects -> project_list() - // POST v1/projects -> project_create() - // GET v1/projects/{project} -> project_view() - // DELETE v1/projects/{project} -> project_delete() - // PUT v1/projects/{project} -> project_update() - // - // Note that the path typically uses the entity's plural form while the - // function name uses its singular. - // - // Operations beyond list, create, view, delete, and update should use a - // descriptive noun or verb, again bearing in mind that this will be - // transcribed into the CLI and SDKs: - // - // POST -> instance_reboot - // POST -> instance_stop - // GET -> instance_serial_console - // - // Note that these function names end up in generated OpenAPI spec as the - // operationId for each endpoint, and therefore represent a contract with - // clients. Client generators use operationId to name API methods, so changing - // a function name is a breaking change from a client perspective. - - /// Ping API - /// - /// Always responds with Ok if it responds at all. - #[endpoint { - method = GET, - path = "/v1/ping", - tags = ["system/status"], -}] - async fn ping( - _rqctx: RequestContext, - ) -> Result, HttpError> { - Ok(HttpResponseOk(views::Ping { status: views::PingStatus::Ok })) - } - - /// Fetch top-level IAM policy - #[endpoint { - method = GET, - path = "/v1/system/policy", - tags = ["policy"], -}] async fn system_policy_view( rqctx: RequestContext, ) -> Result>, HttpError> @@ -488,12 +129,6 @@ mod imp { .await } - /// Update top-level IAM policy - #[endpoint { - method = PUT, - path = "/v1/system/policy", - tags = ["policy"], -}] async fn system_policy_update( rqctx: RequestContext, new_policy: TypedBody>, @@ -518,13 +153,7 @@ mod imp { .await } - /// Fetch current silo's IAM policy - #[endpoint { - method = GET, - path = "/v1/policy", - tags = ["silos"], - }] - pub(crate) async fn policy_view( + async fn policy_view( rqctx: RequestContext, ) -> Result>, HttpError> { @@ -551,12 +180,6 @@ mod imp { .await } - /// Update current silo's IAM policy - #[endpoint { - method = PUT, - path = "/v1/policy", - tags = ["silos"], -}] async fn policy_update( rqctx: RequestContext, new_policy: TypedBody>, @@ -590,12 +213,6 @@ mod imp { .await } - /// Fetch resource utilization for user's current silo - #[endpoint { - method = GET, - path = "/v1/utilization", - tags = ["silos"], -}] async fn utilization_view( rqctx: RequestContext, ) -> Result, HttpError> { @@ -617,12 +234,6 @@ mod imp { .await } - /// Fetch current utilization for given silo - #[endpoint { - method = GET, - path = "/v1/system/utilization/silos/{silo}", - tags = ["system/silos"], -}] async fn silo_utilization_view( rqctx: RequestContext, path_params: Path, @@ -646,12 +257,6 @@ mod imp { .instrument_dropshot_handler(&rqctx, handler) .await } - /// List current utilization state for all silos - #[endpoint { - method = GET, - path = "/v1/system/utilization/silos", - tags = ["system/silos"], -}] async fn silo_utilization_list( rqctx: RequestContext, query_params: Query, @@ -687,12 +292,6 @@ mod imp { .await } - /// Lists resource quotas for all silos - #[endpoint { - method = GET, - path = "/v1/system/silo-quotas", - tags = ["system/silos"], -}] async fn system_quotas_list( rqctx: RequestContext, query_params: Query, @@ -726,12 +325,6 @@ mod imp { .await } - /// Fetch resource quotas for silo - #[endpoint { - method = GET, - path = "/v1/system/silos/{silo}/quotas", - tags = ["system/silos"], -}] async fn silo_quotas_view( rqctx: RequestContext, path_params: Path, @@ -754,14 +347,6 @@ mod imp { .await } - /// Update resource quotas for silo - /// - /// If a quota value is not specified, it will remain unchanged. - #[endpoint { - method = PUT, - path = "/v1/system/silos/{silo}/quotas", - tags = ["system/silos"], -}] async fn silo_quotas_update( rqctx: RequestContext, path_params: Path, @@ -791,14 +376,6 @@ mod imp { .await } - /// List silos - /// - /// Lists silos that are discoverable based on the current permissions. - #[endpoint { - method = GET, - path = "/v1/system/silos", - tags = ["system/silos"], -}] async fn silo_list( rqctx: RequestContext, query_params: Query, @@ -831,12 +408,6 @@ mod imp { .await } - /// Create a silo - #[endpoint { - method = POST, - path = "/v1/system/silos", - tags = ["system/silos"], -}] async fn silo_create( rqctx: RequestContext, new_silo_params: TypedBody, @@ -857,14 +428,6 @@ mod imp { .await } - /// Fetch silo - /// - /// Fetch silo by name or ID. - #[endpoint { - method = GET, - path = "/v1/system/silos/{silo}", - tags = ["system/silos"], -}] async fn silo_view( rqctx: RequestContext, path_params: Path, @@ -886,16 +449,6 @@ mod imp { .await } - /// List IP pools linked to silo - /// - /// Linked IP pools are available to users in the specified silo. A silo can - /// have at most one default pool. IPs are allocated from the default pool when - /// users ask for one without specifying a pool. - #[endpoint { - method = GET, - path = "/v1/system/silos/{silo}/ip-pools", - tags = ["system/silos"], -}] async fn silo_ip_pool_list( rqctx: RequestContext, path_params: Path, @@ -937,14 +490,6 @@ mod imp { .await } - /// Delete a silo - /// - /// Delete a silo by name or ID. - #[endpoint { - method = DELETE, - path = "/v1/system/silos/{silo}", - tags = ["system/silos"], -}] async fn silo_delete( rqctx: RequestContext, path_params: Path, @@ -966,12 +511,6 @@ mod imp { .await } - /// Fetch silo IAM policy - #[endpoint { - method = GET, - path = "/v1/system/silos/{silo}/policy", - tags = ["system/silos"], -}] async fn silo_policy_view( rqctx: RequestContext, path_params: Path, @@ -994,12 +533,6 @@ mod imp { .await } - /// Update silo IAM policy - #[endpoint { - method = PUT, - path = "/v1/system/silos/{silo}/policy", - tags = ["system/silos"], -}] async fn silo_policy_update( rqctx: RequestContext, path_params: Path, @@ -1031,12 +564,6 @@ mod imp { // Silo-specific user endpoints - /// List built-in (system) users in silo - #[endpoint { - method = GET, - path = "/v1/system/users", - tags = ["system/silos"], -}] async fn silo_user_list( rqctx: RequestContext, query_params: Query>, @@ -1070,22 +597,9 @@ mod imp { .await } - /// Path parameters for Silo User requests - #[derive(Deserialize, JsonSchema)] - struct UserParam { - /// The user's internal id - user_id: Uuid, - } - - /// Fetch built-in (system) user - #[endpoint { - method = GET, - path = "/v1/system/users/{user_id}", - tags = ["system/silos"], -}] async fn silo_user_view( rqctx: RequestContext, - path_params: Path, + path_params: Path, query_params: Query, ) -> Result, HttpError> { let apictx = rqctx.context(); @@ -1110,12 +624,6 @@ mod imp { // Silo identity providers - /// List a silo's IdP's name - #[endpoint { - method = GET, - path = "/v1/system/identity-providers", - tags = ["system/silos"], -}] async fn silo_identity_provider_list( rqctx: RequestContext, query_params: Query>, @@ -1152,12 +660,6 @@ mod imp { // Silo SAML identity providers - /// Create SAML IdP - #[endpoint { - method = POST, - path = "/v1/system/identity-providers/saml", - tags = ["system/silos"], -}] async fn saml_identity_provider_create( rqctx: RequestContext, query_params: Query, @@ -1187,12 +689,6 @@ mod imp { .await } - /// Fetch SAML IdP - #[endpoint { - method = GET, - path = "/v1/system/identity-providers/saml/{provider}", - tags = ["system/silos"], -}] async fn saml_identity_provider_view( rqctx: RequestContext, path_params: Path, @@ -1230,16 +726,6 @@ mod imp { // "Local" Identity Provider - /// Create user - /// - /// Users can only be created in Silos with `provision_type` == `Fixed`. - /// Otherwise, Silo users are just-in-time (JIT) provisioned when a user first - /// logs in using an external Identity Provider. - #[endpoint { - method = POST, - path = "/v1/system/identity-providers/local/users", - tags = ["system/silos"], -}] async fn local_idp_user_create( rqctx: RequestContext, query_params: Query, @@ -1268,15 +754,9 @@ mod imp { .await } - /// Delete user - #[endpoint { - method = DELETE, - path = "/v1/system/identity-providers/local/users/{user_id}", - tags = ["system/silos"], -}] async fn local_idp_user_delete( rqctx: RequestContext, - path_params: Path, + path_params: Path, query_params: Query, ) -> Result { let apictx = rqctx.context(); @@ -1299,18 +779,9 @@ mod imp { .await } - /// Set or invalidate user's password - /// - /// Passwords can only be updated for users in Silos with identity mode - /// `LocalOnly`. - #[endpoint { - method = POST, - path = "/v1/system/identity-providers/local/users/{user_id}/set-password", - tags = ["system/silos"], -}] async fn local_idp_user_set_password( rqctx: RequestContext, - path_params: Path, + path_params: Path, query_params: Query, update: TypedBody, ) -> Result { @@ -1339,12 +810,6 @@ mod imp { .await } - /// List projects - #[endpoint { - method = GET, - path = "/v1/projects", - tags = ["projects"], -}] async fn project_list( rqctx: RequestContext, query_params: Query, @@ -1377,12 +842,6 @@ mod imp { .await } - /// Create project - #[endpoint { - method = POST, - path = "/v1/projects", - tags = ["projects"], -}] async fn project_create( rqctx: RequestContext, new_project: TypedBody, @@ -1403,12 +862,6 @@ mod imp { .await } - /// Fetch project - #[endpoint { - method = GET, - path = "/v1/projects/{project}", - tags = ["projects"], -}] async fn project_view( rqctx: RequestContext, path_params: Path, @@ -1432,12 +885,6 @@ mod imp { .await } - /// Delete project - #[endpoint { - method = DELETE, - path = "/v1/projects/{project}", - tags = ["projects"], -}] async fn project_delete( rqctx: RequestContext, path_params: Path, @@ -1467,12 +914,6 @@ mod imp { // (HTTP may require that this be idempotent.) If so, can we get around that // having this be a slightly different content-type (e.g., // "application/json-patch")? We should see what other APIs do. - /// Update a project - #[endpoint { - method = PUT, - path = "/v1/projects/{project}", - tags = ["projects"], -}] async fn project_update( rqctx: RequestContext, path_params: Path, @@ -1501,12 +942,6 @@ mod imp { .await } - /// Fetch project's IAM policy - #[endpoint { - method = GET, - path = "/v1/projects/{project}/policy", - tags = ["projects"], -}] async fn project_policy_view( rqctx: RequestContext, path_params: Path, @@ -1533,12 +968,6 @@ mod imp { .await } - /// Update project's IAM policy - #[endpoint { - method = PUT, - path = "/v1/projects/{project}/policy", - tags = ["projects"], -}] async fn project_policy_update( rqctx: RequestContext, path_params: Path, @@ -1570,12 +999,6 @@ mod imp { // IP Pools - /// List IP pools - #[endpoint { - method = GET, - path = "/v1/ip-pools", - tags = ["projects"], -}] async fn project_ip_pool_list( rqctx: RequestContext, query_params: Query, @@ -1611,12 +1034,6 @@ mod imp { .await } - /// Fetch IP pool - #[endpoint { - method = GET, - path = "/v1/ip-pools/{pool}", - tags = ["projects"], -}] async fn project_ip_pool_view( rqctx: RequestContext, path_params: Path, @@ -1641,12 +1058,6 @@ mod imp { .await } - /// List IP pools - #[endpoint { - method = GET, - path = "/v1/system/ip-pools", - tags = ["system/networking"], -}] async fn ip_pool_list( rqctx: RequestContext, query_params: Query, @@ -1679,12 +1090,6 @@ mod imp { .await } - /// Create IP pool - #[endpoint { - method = POST, - path = "/v1/system/ip-pools", - tags = ["system/networking"], -}] async fn ip_pool_create( rqctx: RequestContext, pool_params: TypedBody, @@ -1705,12 +1110,6 @@ mod imp { .await } - /// Fetch IP pool - #[endpoint { - method = GET, - path = "/v1/system/ip-pools/{pool}", - tags = ["system/networking"], -}] async fn ip_pool_view( rqctx: RequestContext, path_params: Path, @@ -1734,12 +1133,6 @@ mod imp { .await } - /// Delete IP pool - #[endpoint { - method = DELETE, - path = "/v1/system/ip-pools/{pool}", - tags = ["system/networking"], -}] async fn ip_pool_delete( rqctx: RequestContext, path_params: Path, @@ -1761,12 +1154,6 @@ mod imp { .await } - /// Update IP pool - #[endpoint { - method = PUT, - path = "/v1/system/ip-pools/{pool}", - tags = ["system/networking"], -}] async fn ip_pool_update( rqctx: RequestContext, path_params: Path, @@ -1791,12 +1178,6 @@ mod imp { .await } - /// Fetch IP pool utilization - #[endpoint { - method = GET, - path = "/v1/system/ip-pools/{pool}/utilization", - tags = ["system/networking"], -}] async fn ip_pool_utilization_view( rqctx: RequestContext, path_params: Path, @@ -1821,12 +1202,6 @@ mod imp { .await } - /// List IP pool's linked silos - #[endpoint { - method = GET, - path = "/v1/system/ip-pools/{pool}/silos", - tags = ["system/networking"], -}] async fn ip_pool_silo_list( rqctx: RequestContext, path_params: Path, @@ -1875,16 +1250,6 @@ mod imp { .await } - /// Link IP pool to silo - /// - /// Users in linked silos can allocate external IPs from this pool for their - /// instances. A silo can have at most one default pool. IPs are allocated from - /// the default pool when users ask for one without specifying a pool. - #[endpoint { - method = POST, - path = "/v1/system/ip-pools/{pool}/silos", - tags = ["system/networking"], -}] async fn ip_pool_silo_link( rqctx: RequestContext, path_params: Path, @@ -1910,14 +1275,6 @@ mod imp { .await } - /// Unlink IP pool from silo - /// - /// Will fail if there are any outstanding IPs allocated in the silo. - #[endpoint { - method = DELETE, - path = "/v1/system/ip-pools/{pool}/silos/{silo}", - tags = ["system/networking"], -}] async fn ip_pool_silo_unlink( rqctx: RequestContext, path_params: Path, @@ -1942,17 +1299,6 @@ mod imp { .await } - /// Make IP pool default for silo - /// - /// When a user asks for an IP (e.g., at instance create time) without - /// specifying a pool, the IP comes from the default pool if a default is - /// configured. When a pool is made the default for a silo, any existing default - /// will remain linked to the silo, but will no longer be the default. - #[endpoint { - method = PUT, - path = "/v1/system/ip-pools/{pool}/silos/{silo}", - tags = ["system/networking"], -}] async fn ip_pool_silo_update( rqctx: RequestContext, path_params: Path, @@ -1984,12 +1330,6 @@ mod imp { .await } - /// Fetch Oxide service IP pool - #[endpoint { - method = GET, - path = "/v1/system/ip-pools-service", - tags = ["system/networking"], -}] async fn ip_pool_service_view( rqctx: RequestContext, ) -> Result, HttpError> { @@ -2008,17 +1348,6 @@ mod imp { .await } - type IpPoolRangePaginationParams = - PaginationParams; - - /// List ranges for IP pool - /// - /// Ranges are ordered by their first address. - #[endpoint { - method = GET, - path = "/v1/system/ip-pools/{pool}/ranges", - tags = ["system/networking"], -}] async fn ip_pool_range_list( rqctx: RequestContext, path_params: Path, @@ -2062,14 +1391,6 @@ mod imp { .await } - /// Add range to IP pool - /// - /// IPv6 ranges are not allowed yet. - #[endpoint { - method = POST, - path = "/v1/system/ip-pools/{pool}/ranges/add", - tags = ["system/networking"], -}] async fn ip_pool_range_add( rqctx: RequestContext, path_params: Path, @@ -2094,12 +1415,6 @@ mod imp { .await } - /// Remove range from IP pool - #[endpoint { - method = POST, - path = "/v1/system/ip-pools/{pool}/ranges/remove", - tags = ["system/networking"], -}] async fn ip_pool_range_remove( rqctx: RequestContext, path_params: Path, @@ -2123,14 +1438,6 @@ mod imp { .await } - /// List IP ranges for the Oxide service pool - /// - /// Ranges are ordered by their first address. - #[endpoint { - method = GET, - path = "/v1/system/ip-pools-service/ranges", - tags = ["system/networking"], -}] async fn ip_pool_service_range_list( rqctx: RequestContext, query_params: Query, @@ -2171,14 +1478,6 @@ mod imp { .await } - /// Add IP range to Oxide service pool - /// - /// IPv6 ranges are not allowed yet. - #[endpoint { - method = POST, - path = "/v1/system/ip-pools-service/ranges/add", - tags = ["system/networking"], -}] async fn ip_pool_service_range_add( rqctx: RequestContext, range_params: TypedBody, @@ -2199,12 +1498,6 @@ mod imp { .await } - /// Remove IP range from Oxide service pool - #[endpoint { - method = POST, - path = "/v1/system/ip-pools-service/ranges/remove", - tags = ["system/networking"], -}] async fn ip_pool_service_range_remove( rqctx: RequestContext, range_params: TypedBody, @@ -2227,12 +1520,6 @@ mod imp { // Floating IP Addresses - /// List floating IPs - #[endpoint { - method = GET, - path = "/v1/floating-ips", - tags = ["floating-ips"], -}] async fn floating_ip_list( rqctx: RequestContext, query_params: Query>, @@ -2264,12 +1551,6 @@ mod imp { .await } - /// Create floating IP - #[endpoint { - method = POST, - path = "/v1/floating-ips", - tags = ["floating-ips"], -}] async fn floating_ip_create( rqctx: RequestContext, query_params: Query, @@ -2295,12 +1576,6 @@ mod imp { .await } - /// Update floating IP - #[endpoint { - method = PUT, - path = "/v1/floating-ips/{floating_ip}", - tags = ["floating-ips"], -}] async fn floating_ip_update( rqctx: RequestContext, path_params: Path, @@ -2337,12 +1612,6 @@ mod imp { .await } - /// Delete floating IP - #[endpoint { - method = DELETE, - path = "/v1/floating-ips/{floating_ip}", - tags = ["floating-ips"], -}] async fn floating_ip_delete( rqctx: RequestContext, path_params: Path, @@ -2372,12 +1641,6 @@ mod imp { .await } - /// Fetch floating IP - #[endpoint { - method = GET, - path = "/v1/floating-ips/{floating_ip}", - tags = ["floating-ips"] -}] async fn floating_ip_view( rqctx: RequestContext, path_params: Path, @@ -2407,14 +1670,6 @@ mod imp { .await } - /// Attach floating IP - /// - /// Attach floating IP to an instance or other resource. - #[endpoint { - method = POST, - path = "/v1/floating-ips/{floating_ip}/attach", - tags = ["floating-ips"], -}] async fn floating_ip_attach( rqctx: RequestContext, path_params: Path, @@ -2448,14 +1703,6 @@ mod imp { .await } - /// Detach floating IP - /// - // Detach floating IP from instance or other resource. - #[endpoint { - method = POST, - path = "/v1/floating-ips/{floating_ip}/detach", - tags = ["floating-ips"], -}] async fn floating_ip_detach( rqctx: RequestContext, path_params: Path, @@ -2486,12 +1733,6 @@ mod imp { // Disks - /// List disks - #[endpoint { - method = GET, - path = "/v1/disks", - tags = ["disks"], -}] async fn disk_list( rqctx: RequestContext, query_params: Query>, @@ -2527,12 +1768,6 @@ mod imp { } // TODO-correctness See note about instance create. This should be async. - /// Create a disk - #[endpoint { - method = POST, - path = "/v1/disks", - tags = ["disks"] -}] async fn disk_create( rqctx: RequestContext, query_params: Query, @@ -2558,12 +1793,6 @@ mod imp { .await } - /// Fetch disk - #[endpoint { - method = GET, - path = "/v1/disks/{disk}", - tags = ["disks"] -}] async fn disk_view( rqctx: RequestContext, path_params: Path, @@ -2591,12 +1820,6 @@ mod imp { .await } - /// Delete disk - #[endpoint { - method = DELETE, - path = "/v1/disks/{disk}", - tags = ["disks"], -}] async fn disk_delete( rqctx: RequestContext, path_params: Path, @@ -2624,33 +1847,9 @@ mod imp { .await } - #[derive(Display, Serialize, Deserialize, JsonSchema)] - #[display(style = "snake_case")] - #[serde(rename_all = "snake_case")] - pub enum DiskMetricName { - Activated, - Flush, - Read, - ReadBytes, - Write, - WriteBytes, - } - - #[derive(Serialize, Deserialize, JsonSchema)] - struct DiskMetricsPath { - disk: NameOrId, - metric: DiskMetricName, - } - - /// Fetch disk metrics - #[endpoint { - method = GET, - path = "/v1/disks/{disk}/metrics/{metric}", - tags = ["disks"], -}] async fn disk_metrics_list( rqctx: RequestContext, - path_params: Path, + path_params: Path, query_params: Query< PaginationParams, >, @@ -2694,14 +1893,6 @@ mod imp { .await } - /// Start importing blocks into disk - /// - /// Start the process of importing blocks into a disk - #[endpoint { - method = POST, - path = "/v1/disks/{disk}/bulk-write-start", - tags = ["disks"], -}] async fn disk_bulk_write_import_start( rqctx: RequestContext, path_params: Path, @@ -2732,12 +1923,6 @@ mod imp { .await } - /// Import blocks into disk - #[endpoint { - method = POST, - path = "/v1/disks/{disk}/bulk-write", - tags = ["disks"], -}] async fn disk_bulk_write_import( rqctx: RequestContext, path_params: Path, @@ -2770,14 +1955,6 @@ mod imp { .await } - /// Stop importing blocks into disk - /// - /// Stop the process of importing blocks into a disk - #[endpoint { - method = POST, - path = "/v1/disks/{disk}/bulk-write-stop", - tags = ["disks"], -}] async fn disk_bulk_write_import_stop( rqctx: RequestContext, path_params: Path, @@ -2808,12 +1985,6 @@ mod imp { .await } - /// Confirm disk block import completion - #[endpoint { - method = POST, - path = "/v1/disks/{disk}/finalize", - tags = ["disks"], -}] async fn disk_finalize_import( rqctx: RequestContext, path_params: Path, @@ -2847,12 +2018,6 @@ mod imp { // Instances - /// List instances - #[endpoint { - method = GET, - path = "/v1/instances", - tags = ["instances"], -}] async fn instance_list( rqctx: RequestContext, query_params: Query>, @@ -2887,12 +2052,6 @@ mod imp { .await } - /// Create instance - #[endpoint { - method = POST, - path = "/v1/instances", - tags = ["instances"], -}] async fn instance_create( rqctx: RequestContext, query_params: Query, @@ -2923,12 +2082,6 @@ mod imp { .await } - /// Fetch instance - #[endpoint { - method = GET, - path = "/v1/instances/{instance}", - tags = ["instances"], -}] async fn instance_view( rqctx: RequestContext, query_params: Query, @@ -2962,12 +2115,6 @@ mod imp { .await } - /// Delete instance - #[endpoint { - method = DELETE, - path = "/v1/instances/{instance}", - tags = ["instances"], -}] async fn instance_delete( rqctx: RequestContext, query_params: Query, @@ -2996,12 +2143,6 @@ mod imp { .await } - /// Reboot an instance - #[endpoint { - method = POST, - path = "/v1/instances/{instance}/reboot", - tags = ["instances"], -}] async fn instance_reboot( rqctx: RequestContext, query_params: Query, @@ -3031,12 +2172,6 @@ mod imp { .await } - /// Boot instance - #[endpoint { - method = POST, - path = "/v1/instances/{instance}/start", - tags = ["instances"], -}] async fn instance_start( rqctx: RequestContext, query_params: Query, @@ -3066,12 +2201,6 @@ mod imp { .await } - /// Stop instance - #[endpoint { - method = POST, - path = "/v1/instances/{instance}/stop", - tags = ["instances"], -}] async fn instance_stop( rqctx: RequestContext, query_params: Query, @@ -3101,12 +2230,6 @@ mod imp { .await } - /// Fetch instance serial console - #[endpoint { - method = GET, - path = "/v1/instances/{instance}/serial-console", - tags = ["instances"], -}] async fn instance_serial_console( rqctx: RequestContext, path_params: Path, @@ -3138,12 +2261,6 @@ mod imp { .await } - /// Stream instance serial console - #[channel { - protocol = WEBSOCKETS, - path = "/v1/instances/{instance}/serial-console/stream", - tags = ["instances"], -}] async fn instance_serial_console_stream( rqctx: RequestContext, path_params: Path, @@ -3190,16 +2307,6 @@ mod imp { } } - /// List SSH public keys for instance - /// - /// List SSH public keys injected via cloud-init during instance creation. Note - /// that this list is a snapshot in time and will not reflect updates made after - /// the instance is created. - #[endpoint { - method = GET, - path = "/v1/instances/{instance}/ssh-public-keys", - tags = ["instances"], -}] async fn instance_ssh_public_key_list( rqctx: RequestContext, path_params: Path, @@ -3242,12 +2349,6 @@ mod imp { .await } - /// List disks for instance - #[endpoint { - method = GET, - path = "/v1/instances/{instance}/disks", - tags = ["instances"], -}] async fn instance_disk_list( rqctx: RequestContext, query_params: Query< @@ -3290,12 +2391,6 @@ mod imp { .await } - /// Attach disk to instance - #[endpoint { - method = POST, - path = "/v1/instances/{instance}/disks/attach", - tags = ["instances"], -}] async fn instance_disk_attach( rqctx: RequestContext, path_params: Path, @@ -3328,12 +2423,6 @@ mod imp { .await } - /// Detach disk from instance - #[endpoint { - method = POST, - path = "/v1/instances/{instance}/disks/detach", - tags = ["instances"], -}] async fn instance_disk_detach( rqctx: RequestContext, path_params: Path, @@ -3368,16 +2457,6 @@ mod imp { // Certificates - /// List certificates for external endpoints - /// - /// Returns a list of TLS certificates used for the external API (for the - /// current Silo). These are sorted by creation date, with the most recent - /// certificates appearing first. - #[endpoint { - method = GET, - path = "/v1/certificates", - tags = ["silos"], -}] async fn certificate_list( rqctx: RequestContext, query_params: Query, @@ -3410,15 +2489,6 @@ mod imp { .await } - /// Create new system-wide x.509 certificate - /// - /// This certificate is automatically used by the Oxide Control plane to serve - /// external connections. - #[endpoint { - method = POST, - path = "/v1/certificates", - tags = ["silos"] -}] async fn certificate_create( rqctx: RequestContext, new_cert: TypedBody, @@ -3440,23 +2510,9 @@ mod imp { .await } - /// Path parameters for Certificate requests - #[derive(Deserialize, JsonSchema)] - struct CertificatePathParam { - certificate: NameOrId, - } - - /// Fetch certificate - /// - /// Returns the details of a specific certificate - #[endpoint { - method = GET, - path = "/v1/certificates/{certificate}", - tags = ["silos"], -}] async fn certificate_view( rqctx: RequestContext, - path_params: Path, + path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { @@ -3477,17 +2533,9 @@ mod imp { .await } - /// Delete certificate - /// - /// Permanently delete a certificate. This operation cannot be undone. - #[endpoint { - method = DELETE, - path = "/v1/certificates/{certificate}", - tags = ["silos"], -}] async fn certificate_delete( rqctx: RequestContext, - path_params: Path, + path_params: Path, ) -> Result { let apictx = rqctx.context(); let handler = async { @@ -3510,12 +2558,6 @@ mod imp { .await } - /// Create address lot - #[endpoint { - method = POST, - path = "/v1/system/networking/address-lot", - tags = ["system/networking"], -}] async fn networking_address_lot_create( rqctx: RequestContext, new_address_lot: TypedBody, @@ -3541,12 +2583,6 @@ mod imp { .await } - /// Delete address lot - #[endpoint { - method = DELETE, - path = "/v1/system/networking/address-lot/{address_lot}", - tags = ["system/networking"], -}] async fn networking_address_lot_delete( rqctx: RequestContext, path_params: Path, @@ -3569,12 +2605,6 @@ mod imp { .await } - /// List address lots - #[endpoint { - method = GET, - path = "/v1/system/networking/address-lot", - tags = ["system/networking"], -}] async fn networking_address_lot_list( rqctx: RequestContext, query_params: Query, @@ -3608,12 +2638,6 @@ mod imp { .await } - /// List blocks in address lot - #[endpoint { - method = GET, - path = "/v1/system/networking/address-lot/{address_lot}/blocks", - tags = ["system/networking"], -}] async fn networking_address_lot_block_list( rqctx: RequestContext, path_params: Path, @@ -3649,12 +2673,6 @@ mod imp { .await } - /// Create loopback address - #[endpoint { - method = POST, - path = "/v1/system/networking/loopback-address", - tags = ["system/networking"], -}] async fn networking_loopback_address_create( rqctx: RequestContext, new_loopback_address: TypedBody, @@ -3678,32 +2696,9 @@ mod imp { .await } - #[derive(Serialize, Deserialize, JsonSchema)] - pub struct LoopbackAddressPath { - /// The rack to use when selecting the loopback address. - pub rack_id: Uuid, - - /// The switch location to use when selecting the loopback address. - pub switch_location: Name, - - /// The IP address and subnet mask to use when selecting the loopback - /// address. - pub address: IpAddr, - - /// The IP address and subnet mask to use when selecting the loopback - /// address. - pub subnet_mask: u8, - } - - /// Delete loopback address - #[endpoint { - method = DELETE, - path = "/v1/system/networking/loopback-address/{rack_id}/{switch_location}/{address}/{subnet_mask}", - tags = ["system/networking"], -}] async fn networking_loopback_address_delete( rqctx: RequestContext, - path: Path, + path: Path, ) -> Result { let apictx = rqctx.context(); let handler = async { @@ -3722,7 +2717,7 @@ mod imp { .loopback_address_delete( &opctx, path.rack_id, - path.switch_location.clone(), + path.switch_location.into(), addr.into(), ) .await?; @@ -3735,12 +2730,6 @@ mod imp { .await } - /// List loopback addresses - #[endpoint { - method = GET, - path = "/v1/system/networking/loopback-address", - tags = ["system/networking"], -}] async fn networking_loopback_address_list( rqctx: RequestContext, query_params: Query, @@ -3772,12 +2761,6 @@ mod imp { .await } - /// Create switch port settings - #[endpoint { - method = POST, - path = "/v1/system/networking/switch-port-settings", - tags = ["system/networking"], -}] async fn networking_switch_port_settings_create( rqctx: RequestContext, new_settings: TypedBody, @@ -3801,12 +2784,6 @@ mod imp { .await } - /// Delete switch port settings - #[endpoint { - method = DELETE, - path = "/v1/system/networking/switch-port-settings", - tags = ["system/networking"], -}] async fn networking_switch_port_settings_delete( rqctx: RequestContext, query_params: Query, @@ -3827,12 +2804,6 @@ mod imp { .await } - /// List switch port settings - #[endpoint { - method = GET, - path = "/v1/system/networking/switch-port-settings", - tags = ["system/networking"], -}] async fn networking_switch_port_settings_list( rqctx: RequestContext, query_params: Query< @@ -3869,12 +2840,6 @@ mod imp { .await } - /// Get information about switch port - #[endpoint { - method = GET, - path = "/v1/system/networking/switch-port-settings/{port}", - tags = ["system/networking"], -}] async fn networking_switch_port_settings_view( rqctx: RequestContext, path_params: Path, @@ -3896,12 +2861,6 @@ mod imp { .await } - /// List switch ports - #[endpoint { - method = GET, - path = "/v1/system/hardware/switch-port", - tags = ["system/hardware"], -}] async fn networking_switch_port_list( rqctx: RequestContext, query_params: Query>, @@ -3933,12 +2892,6 @@ mod imp { .await } - /// Get switch port status - #[endpoint { - method = GET, - path = "/v1/system/hardware/switch-port/{port}/status", - tags = ["system/hardware"], -}] async fn networking_switch_port_status( rqctx: RequestContext, path_params: Path, @@ -3968,12 +2921,6 @@ mod imp { .await } - /// Apply switch port settings - #[endpoint { - method = POST, - path = "/v1/system/hardware/switch-port/{port}/settings", - tags = ["system/hardware"], -}] async fn networking_switch_port_apply_settings( rqctx: RequestContext, path_params: Path, @@ -4000,12 +2947,6 @@ mod imp { .await } - /// Clear switch port settings - #[endpoint { - method = DELETE, - path = "/v1/system/hardware/switch-port/{port}/settings", - tags = ["system/hardware"], -}] async fn networking_switch_port_clear_settings( rqctx: RequestContext, path_params: Path, @@ -4028,12 +2969,6 @@ mod imp { .await } - /// Create new BGP configuration - #[endpoint { - method = POST, - path = "/v1/system/networking/bgp", - tags = ["system/networking"], -}] async fn networking_bgp_config_create( rqctx: RequestContext, config: TypedBody, @@ -4054,12 +2989,6 @@ mod imp { .await } - /// List BGP configurations - #[endpoint { - method = GET, - path = "/v1/system/networking/bgp", - tags = ["system/networking"], -}] async fn networking_bgp_config_list( rqctx: RequestContext, query_params: Query>, @@ -4094,12 +3023,6 @@ mod imp { } //TODO pagination? the normal by-name/by-id stuff does not work here - /// Get BGP peer status - #[endpoint { - method = GET, - path = "/v1/system/networking/bgp-status", - tags = ["system/networking"], -}] async fn networking_bgp_status( rqctx: RequestContext, ) -> Result>, HttpError> { @@ -4118,12 +3041,6 @@ mod imp { } //TODO pagination? the normal by-name/by-id stuff does not work here - /// Get BGP exported routes - #[endpoint { - method = GET, - path = "/v1/system/networking/bgp-exported", - tags = ["system/networking"], -}] async fn networking_bgp_exported( rqctx: RequestContext, ) -> Result, HttpError> { @@ -4141,12 +3058,6 @@ mod imp { .await } - /// Get BGP router message history - #[endpoint { - method = GET, - path = "/v1/system/networking/bgp-message-history", - tags = ["system/networking"], -}] async fn networking_bgp_message_history( rqctx: RequestContext, query_params: Query, @@ -4167,12 +3078,6 @@ mod imp { } //TODO pagination? the normal by-name/by-id stuff does not work here - /// Get imported IPv4 BGP routes - #[endpoint { - method = GET, - path = "/v1/system/networking/bgp-routes-ipv4", - tags = ["system/networking"], -}] async fn networking_bgp_imported_routes_ipv4( rqctx: RequestContext, query_params: Query, @@ -4192,12 +3097,6 @@ mod imp { .await } - /// Delete BGP configuration - #[endpoint { - method = DELETE, - path = "/v1/system/networking/bgp", - tags = ["system/networking"], -}] async fn networking_bgp_config_delete( rqctx: RequestContext, sel: Query, @@ -4218,15 +3117,6 @@ mod imp { .await } - /// Update BGP announce set - /// - /// If the announce set exists, this endpoint replaces the existing announce - /// set with the one specified. - #[endpoint { - method = PUT, - path = "/v1/system/networking/bgp-announce-set", - tags = ["system/networking"], -}] async fn networking_bgp_announce_set_update( rqctx: RequestContext, config: TypedBody, @@ -4247,12 +3137,6 @@ mod imp { .await } - /// List BGP announce sets - #[endpoint { - method = GET, - path = "/v1/system/networking/bgp-announce-set", - tags = ["system/networking"], -}] async fn networking_bgp_announce_set_list( rqctx: RequestContext, query_params: Query< @@ -4283,12 +3167,6 @@ mod imp { .await } - /// Delete BGP announce set - #[endpoint { - method = DELETE, - path = "/v1/system/networking/bgp-announce-set/{name_or_id}", - tags = ["system/networking"], -}] async fn networking_bgp_announce_set_delete( rqctx: RequestContext, path_params: Path, @@ -4309,14 +3187,6 @@ mod imp { .await } - // TODO: is pagination necessary here? How large do we expect the list of - // announcements to become in real usage? - /// Get originated routes for a specified BGP announce set - #[endpoint { - method = GET, - path = "/v1/system/networking/bgp-announce-set/{name_or_id}/announcement", - tags = ["system/networking"], -}] async fn networking_bgp_announcement_list( rqctx: RequestContext, path_params: Path, @@ -4344,12 +3214,6 @@ mod imp { .await } - /// Enable a BFD session - #[endpoint { - method = POST, - path = "/v1/system/networking/bfd-enable", - tags = ["system/networking"], -}] async fn networking_bfd_enable( rqctx: RequestContext, session: TypedBody, @@ -4370,12 +3234,6 @@ mod imp { .await } - /// Disable a BFD session - #[endpoint { - method = POST, - path = "/v1/system/networking/bfd-disable", - tags = ["system/networking"], -}] async fn networking_bfd_disable( rqctx: RequestContext, session: TypedBody, @@ -4396,12 +3254,6 @@ mod imp { .await } - /// Get BFD status - #[endpoint { - method = GET, - path = "/v1/system/networking/bfd-status", - tags = ["system/networking"], -}] async fn networking_bfd_status( rqctx: RequestContext, ) -> Result>, HttpError> { @@ -4421,12 +3273,6 @@ mod imp { .await } - /// Get user-facing services IP allowlist - #[endpoint { - method = GET, - path = "/v1/system/networking/allow-list", - tags = ["system/networking"], -}] async fn networking_allow_list_view( rqctx: RequestContext, ) -> Result, HttpError> { @@ -4448,12 +3294,6 @@ mod imp { .await } - /// Update user-facing services IP allowlist - #[endpoint { - method = PUT, - path = "/v1/system/networking/allow-list", - tags = ["system/networking"], -}] async fn networking_allow_list_update( rqctx: RequestContext, params: TypedBody, @@ -4481,15 +3321,6 @@ mod imp { // Images - /// List images - /// - /// List images which are global or scoped to the specified project. The images - /// are returned sorted by creation date, with the most recent images appearing first. - #[endpoint { - method = GET, - path = "/v1/images", - tags = ["images"], -}] async fn image_list( rqctx: RequestContext, query_params: Query< @@ -4537,14 +3368,6 @@ mod imp { .await } - /// Create image - /// - /// Create a new image in a project. - #[endpoint { - method = POST, - path = "/v1/images", - tags = ["images"] -}] async fn image_create( rqctx: RequestContext, query_params: Query, @@ -4581,14 +3404,6 @@ mod imp { .await } - /// Fetch image - /// - /// Fetch the details for a specific image in a project. - #[endpoint { - method = GET, - path = "/v1/images/{image}", - tags = ["images"], -}] async fn image_view( rqctx: RequestContext, path_params: Path, @@ -4629,16 +3444,6 @@ mod imp { .await } - /// Delete image - /// - /// Permanently delete an image from a project. This operation cannot be undone. - /// Any instances in the project using the image will continue to run, however - /// new instances can not be created with this image. - #[endpoint { - method = DELETE, - path = "/v1/images/{image}", - tags = ["images"], -}] async fn image_delete( rqctx: RequestContext, path_params: Path, @@ -4670,14 +3475,6 @@ mod imp { .await } - /// Promote project image - /// - /// Promote project image to be visible to all projects in the silo - #[endpoint { - method = POST, - path = "/v1/images/{image}/promote", - tags = ["images"] -}] async fn image_promote( rqctx: RequestContext, path_params: Path, @@ -4709,14 +3506,6 @@ mod imp { .await } - /// Demote silo image - /// - /// Demote silo image to be visible only to a specified project - #[endpoint { - method = POST, - path = "/v1/images/{image}/demote", - tags = ["images"] -}] async fn image_demote( rqctx: RequestContext, path_params: Path, @@ -4750,12 +3539,6 @@ mod imp { .await } - /// List network interfaces - #[endpoint { - method = GET, - path = "/v1/network-interfaces", - tags = ["instances"], -}] async fn instance_network_interface_list( rqctx: RequestContext, query_params: Query>, @@ -4795,12 +3578,6 @@ mod imp { .await } - /// Create network interface - #[endpoint { - method = POST, - path = "/v1/network-interfaces", - tags = ["instances"], -}] async fn instance_network_interface_create( rqctx: RequestContext, query_params: Query, @@ -4829,17 +3606,6 @@ mod imp { .await } - /// Delete network interface - /// - /// Note that the primary interface for an instance cannot be deleted if there - /// are any secondary interfaces. A new primary interface must be designated - /// first. The primary interface can be deleted if there are no secondary - /// interfaces. - #[endpoint { - method = DELETE, - path = "/v1/network-interfaces/{interface}", - tags = ["instances"], -}] async fn instance_network_interface_delete( rqctx: RequestContext, path_params: Path, @@ -4873,12 +3639,6 @@ mod imp { .await } - /// Fetch network interface - #[endpoint { - method = GET, - path = "/v1/network-interfaces/{interface}", - tags = ["instances"], -}] async fn instance_network_interface_view( rqctx: RequestContext, path_params: Path, @@ -4909,12 +3669,6 @@ mod imp { .await } - /// Update network interface - #[endpoint { - method = PUT, - path = "/v1/network-interfaces/{interface}", - tags = ["instances"], -}] async fn instance_network_interface_update( rqctx: RequestContext, path_params: Path, @@ -4958,12 +3712,6 @@ mod imp { // External IP addresses for instances - /// List external IP addresses - #[endpoint { - method = GET, - path = "/v1/instances/{instance}/external-ips", - tags = ["instances"], -}] async fn instance_external_ip_list( rqctx: RequestContext, query_params: Query, @@ -4994,12 +3742,6 @@ mod imp { .await } - /// Allocate and attach ephemeral IP to instance - #[endpoint { - method = POST, - path = "/v1/instances/{instance}/external-ips/ephemeral", - tags = ["instances"], -}] async fn instance_ephemeral_ip_attach( rqctx: RequestContext, path_params: Path, @@ -5035,12 +3777,6 @@ mod imp { .await } - /// Detach and deallocate ephemeral IP from instance - #[endpoint { - method = DELETE, - path = "/v1/instances/{instance}/external-ips/ephemeral", - tags = ["instances"], -}] async fn instance_ephemeral_ip_detach( rqctx: RequestContext, path_params: Path, @@ -5077,12 +3813,6 @@ mod imp { // Snapshots - /// List snapshots - #[endpoint { - method = GET, - path = "/v1/snapshots", - tags = ["snapshots"], -}] async fn snapshot_list( rqctx: RequestContext, query_params: Query>, @@ -5117,14 +3847,6 @@ mod imp { .await } - /// Create snapshot - /// - /// Creates a point-in-time snapshot from a disk. - #[endpoint { - method = POST, - path = "/v1/snapshots", - tags = ["snapshots"], -}] async fn snapshot_create( rqctx: RequestContext, query_params: Query, @@ -5150,12 +3872,6 @@ mod imp { .await } - /// Fetch snapshot - #[endpoint { - method = GET, - path = "/v1/snapshots/{snapshot}", - tags = ["snapshots"], -}] async fn snapshot_view( rqctx: RequestContext, path_params: Path, @@ -5185,12 +3901,6 @@ mod imp { .await } - /// Delete snapshot - #[endpoint { - method = DELETE, - path = "/v1/snapshots/{snapshot}", - tags = ["snapshots"], -}] async fn snapshot_delete( rqctx: RequestContext, path_params: Path, @@ -5221,12 +3931,6 @@ mod imp { // VPCs - /// List VPCs - #[endpoint { - method = GET, - path = "/v1/vpcs", - tags = ["vpcs"], -}] async fn vpc_list( rqctx: RequestContext, query_params: Query>, @@ -5262,12 +3966,6 @@ mod imp { .await } - /// Create VPC - #[endpoint { - method = POST, - path = "/v1/vpcs", - tags = ["vpcs"], -}] async fn vpc_create( rqctx: RequestContext, query_params: Query, @@ -5293,12 +3991,6 @@ mod imp { .await } - /// Fetch VPC - #[endpoint { - method = GET, - path = "/v1/vpcs/{vpc}", - tags = ["vpcs"], -}] async fn vpc_view( rqctx: RequestContext, path_params: Path, @@ -5324,12 +4016,6 @@ mod imp { .await } - /// Update a VPC - #[endpoint { - method = PUT, - path = "/v1/vpcs/{vpc}", - tags = ["vpcs"], -}] async fn vpc_update( rqctx: RequestContext, path_params: Path, @@ -5359,12 +4045,6 @@ mod imp { .await } - /// Delete VPC - #[endpoint { - method = DELETE, - path = "/v1/vpcs/{vpc}", - tags = ["vpcs"], -}] async fn vpc_delete( rqctx: RequestContext, path_params: Path, @@ -5390,12 +4070,6 @@ mod imp { .await } - /// List subnets - #[endpoint { - method = GET, - path = "/v1/vpc-subnets", - tags = ["vpcs"], -}] async fn vpc_subnet_list( rqctx: RequestContext, query_params: Query>, @@ -5430,12 +4104,6 @@ mod imp { .await } - /// Create subnet - #[endpoint { - method = POST, - path = "/v1/vpc-subnets", - tags = ["vpcs"], -}] async fn vpc_subnet_create( rqctx: RequestContext, query_params: Query, @@ -5460,12 +4128,6 @@ mod imp { .await } - /// Fetch subnet - #[endpoint { - method = GET, - path = "/v1/vpc-subnets/{subnet}", - tags = ["vpcs"], -}] async fn vpc_subnet_view( rqctx: RequestContext, path_params: Path, @@ -5496,12 +4158,6 @@ mod imp { .await } - /// Delete subnet - #[endpoint { - method = DELETE, - path = "/v1/vpc-subnets/{subnet}", - tags = ["vpcs"], -}] async fn vpc_subnet_delete( rqctx: RequestContext, path_params: Path, @@ -5531,12 +4187,6 @@ mod imp { .await } - /// Update subnet - #[endpoint { - method = PUT, - path = "/v1/vpc-subnets/{subnet}", - tags = ["vpcs"], -}] async fn vpc_subnet_update( rqctx: RequestContext, path_params: Path, @@ -5574,12 +4224,6 @@ mod imp { // a subnet whether they come from NICs or something else. See // https://github.com/oxidecomputer/omicron/issues/2476 - /// List network interfaces - #[endpoint { - method = GET, - path = "/v1/vpc-subnets/{subnet}/network-interfaces", - tags = ["vpcs"], -}] async fn vpc_subnet_list_network_interfaces( rqctx: RequestContext, path_params: Path, @@ -5628,12 +4272,6 @@ mod imp { // VPC Firewalls - /// List firewall rules - #[endpoint { - method = GET, - path = "/v1/vpc-firewall-rules", - tags = ["vpcs"], -}] async fn vpc_firewall_rules_view( rqctx: RequestContext, query_params: Query, @@ -5664,25 +4302,6 @@ mod imp { // Note: the limits in the below comment come from the firewall rules model // file, nexus/db-model/src/vpc_firewall_rule.rs. - /// Replace firewall rules - /// - /// The maximum number of rules per VPC is 1024. - /// - /// Targets are used to specify the set of instances to which a firewall rule - /// applies. You can target instances directly by name, or specify a VPC, VPC - /// subnet, IP, or IP subnet, which will apply the rule to traffic going to - /// all matching instances. Targets are additive: the rule applies to instances - /// matching ANY target. The maximum number of targets is 256. - /// - /// Filters reduce the scope of a firewall rule. Without filters, the rule - /// applies to all packets to the targets (or from the targets, if it's an - /// outbound rule). With multiple filters, the rule applies only to packets - /// matching ALL filters. The maximum number of each type of filter is 256. - #[endpoint { - method = PUT, - path = "/v1/vpc-firewall-rules", - tags = ["vpcs"], -}] async fn vpc_firewall_rules_update( rqctx: RequestContext, query_params: Query, @@ -5714,12 +4333,6 @@ mod imp { // VPC Routers - /// List routers - #[endpoint { - method = GET, - path = "/v1/vpc-routers", - tags = ["vpcs"], -}] async fn vpc_router_list( rqctx: RequestContext, query_params: Query>, @@ -5754,12 +4367,6 @@ mod imp { .await } - /// Fetch router - #[endpoint { - method = GET, - path = "/v1/vpc-routers/{router}", - tags = ["vpcs"], -}] async fn vpc_router_view( rqctx: RequestContext, path_params: Path, @@ -5790,12 +4397,6 @@ mod imp { .await } - /// Create VPC router - #[endpoint { - method = POST, - path = "/v1/vpc-routers", - tags = ["vpcs"], -}] async fn vpc_router_create( rqctx: RequestContext, query_params: Query, @@ -5826,12 +4427,6 @@ mod imp { .await } - /// Delete router - #[endpoint { - method = DELETE, - path = "/v1/vpc-routers/{router}", - tags = ["vpcs"], -}] async fn vpc_router_delete( rqctx: RequestContext, path_params: Path, @@ -5861,12 +4456,6 @@ mod imp { .await } - /// Update router - #[endpoint { - method = PUT, - path = "/v1/vpc-routers/{router}", - tags = ["vpcs"], -}] async fn vpc_router_update( rqctx: RequestContext, path_params: Path, @@ -5900,14 +4489,6 @@ mod imp { .await } - /// List routes - /// - /// List the routes associated with a router in a particular VPC. - #[endpoint { - method = GET, - path = "/v1/vpc-router-routes", - tags = ["vpcs"], -}] async fn vpc_router_route_list( rqctx: RequestContext, query_params: Query>, @@ -5944,12 +4525,6 @@ mod imp { // Vpc Router Routes - /// Fetch route - #[endpoint { - method = GET, - path = "/v1/vpc-router-routes/{route}", - tags = ["vpcs"], -}] async fn vpc_router_route_view( rqctx: RequestContext, path_params: Path, @@ -5981,12 +4556,6 @@ mod imp { .await } - /// Create route - #[endpoint { - method = POST, - path = "/v1/vpc-router-routes", - tags = ["vpcs"], -}] async fn vpc_router_route_create( rqctx: RequestContext, query_params: Query, @@ -6017,12 +4586,6 @@ mod imp { .await } - /// Delete route - #[endpoint { - method = DELETE, - path = "/v1/vpc-router-routes/{route}", - tags = ["vpcs"], -}] async fn vpc_router_route_delete( rqctx: RequestContext, path_params: Path, @@ -6053,12 +4616,6 @@ mod imp { .await } - /// Update route - #[endpoint { - method = PUT, - path = "/v1/vpc-router-routes/{route}", - tags = ["vpcs"], -}] async fn vpc_router_route_update( rqctx: RequestContext, path_params: Path, @@ -6095,12 +4652,6 @@ mod imp { // Racks - /// List racks - #[endpoint { - method = GET, - path = "/v1/system/hardware/racks", - tags = ["system/hardware"], -}] async fn rack_list( rqctx: RequestContext, query_params: Query, @@ -6130,22 +4681,9 @@ mod imp { .await } - /// Path parameters for Rack requests - #[derive(Deserialize, JsonSchema)] - struct RackPathParam { - /// The rack's unique ID. - rack_id: Uuid, - } - - /// Fetch rack - #[endpoint { - method = GET, - path = "/v1/system/hardware/racks/{rack_id}", - tags = ["system/hardware"], -}] async fn rack_view( rqctx: RequestContext, - path_params: Path, + path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let handler = async { @@ -6163,12 +4701,6 @@ mod imp { .await } - /// List uninitialized sleds - #[endpoint { - method = GET, - path = "/v1/system/hardware/sleds-uninitialized", - tags = ["system/hardware"] -}] async fn sled_list_uninitialized( rqctx: RequestContext, query: Query>, @@ -6198,27 +4730,10 @@ mod imp { .await } - /// The unique ID of a sled. - #[derive(Clone, Debug, Serialize, JsonSchema)] - pub struct SledId { - pub id: Uuid, - } - - /// Add sled to initialized rack - // - // TODO: In the future this should really be a PUT request, once we resolve - // https://github.com/oxidecomputer/omicron/issues/4494. It should also - // explicitly be tied to a rack via a `rack_id` path param. For now we assume - // we are only operating on single rack systems. - #[endpoint { - method = POST, - path = "/v1/system/hardware/sleds", - tags = ["system/hardware"] -}] async fn sled_add( rqctx: RequestContext, sled: TypedBody, - ) -> Result, HttpError> { + ) -> Result, HttpError> { let apictx = rqctx.context(); let nexus = &apictx.context.nexus; let handler = async { @@ -6228,7 +4743,7 @@ mod imp { .sled_add(&opctx, sled.into_inner()) .await? .into_untyped_uuid(); - Ok(HttpResponseCreated(SledId { id })) + Ok(HttpResponseCreated(views::SledId { id })) }; apictx .context @@ -6239,12 +4754,6 @@ mod imp { // Sleds - /// List sleds - #[endpoint { - method = GET, - path = "/v1/system/hardware/sleds", - tags = ["system/hardware"], -}] async fn sled_list( rqctx: RequestContext, query_params: Query, @@ -6274,12 +4783,6 @@ mod imp { .await } - /// Fetch sled - #[endpoint { - method = GET, - path = "/v1/system/hardware/sleds/{sled_id}", - tags = ["system/hardware"], -}] async fn sled_view( rqctx: RequestContext, path_params: Path, @@ -6301,12 +4804,6 @@ mod imp { .await } - /// Set sled provision policy - #[endpoint { - method = PUT, - path = "/v1/system/hardware/sleds/{sled_id}/provision-policy", - tags = ["system/hardware"], -}] async fn sled_set_provision_policy( rqctx: RequestContext, path_params: Path, @@ -6341,12 +4838,6 @@ mod imp { .await } - /// List instances running on given sled - #[endpoint { - method = GET, - path = "/v1/system/hardware/sleds/{sled_id}/instances", - tags = ["system/hardware"], -}] async fn sled_instance_list( rqctx: RequestContext, path_params: Path, @@ -6388,12 +4879,6 @@ mod imp { // Physical disks - /// List physical disks - #[endpoint { - method = GET, - path = "/v1/system/hardware/disks", - tags = ["system/hardware"], -}] async fn physical_disk_list( rqctx: RequestContext, query_params: Query, @@ -6426,12 +4911,6 @@ mod imp { .await } - /// Get a physical disk - #[endpoint { - method = GET, - path = "/v1/system/hardware/disks/{disk_id}", - tags = ["system/hardware"], -}] async fn physical_disk_view( rqctx: RequestContext, path_params: Path, @@ -6456,12 +4935,6 @@ mod imp { // Switches - /// List switches - #[endpoint { - method = GET, - path = "/v1/system/hardware/switches", - tags = ["system/hardware"], -}] async fn switch_list( rqctx: RequestContext, query_params: Query, @@ -6491,12 +4964,6 @@ mod imp { .await } - /// Fetch switch - #[endpoint { - method = GET, - path = "/v1/system/hardware/switches/{switch_id}", - tags = ["system/hardware"], - }] async fn switch_view( rqctx: RequestContext, path_params: Path, @@ -6523,12 +4990,6 @@ mod imp { .await } - /// List physical disks attached to sleds - #[endpoint { - method = GET, - path = "/v1/system/hardware/sleds/{sled_id}/disks", - tags = ["system/hardware"], -}] async fn sled_physical_disk_list( rqctx: RequestContext, path_params: Path, @@ -6566,28 +5027,6 @@ mod imp { // Metrics - #[derive(Display, Deserialize, JsonSchema)] - #[display(style = "snake_case")] - #[serde(rename_all = "snake_case")] - pub enum SystemMetricName { - VirtualDiskSpaceProvisioned, - CpusProvisioned, - RamProvisioned, - } - - #[derive(Deserialize, JsonSchema)] - struct SystemMetricsPathParam { - metric_name: SystemMetricName, - } - - /// View metrics - /// - /// View CPU, memory, or storage utilization metrics at the fleet or silo level. - #[endpoint { - method = GET, - path = "/v1/system/metrics/{metric_name}", - tags = ["system/metrics"], -}] async fn system_metric( rqctx: RequestContext, path_params: Path, @@ -6630,14 +5069,6 @@ mod imp { .await } - /// View metrics - /// - /// View CPU, memory, or storage utilization metrics at the silo or project level. - #[endpoint { - method = GET, - path = "/v1/metrics/{metric_name}", - tags = ["metrics"], -}] async fn silo_metric( rqctx: RequestContext, path_params: Path, @@ -6686,15 +5117,9 @@ mod imp { .await } - /// List timeseries schemas - #[endpoint { - method = GET, - path = "/v1/timeseries/schema", - tags = ["metrics"], -}] async fn timeseries_schema_list( rqctx: RequestContext, - pag_params: Query, + pag_params: Query, ) -> Result< HttpResponseOk>, HttpError, @@ -6719,16 +5144,6 @@ mod imp { .await } - // TODO: can we link to an OxQL reference? Do we have one? Can we even do links? - - /// Run timeseries query - /// - /// Queries are written in OxQL. - #[endpoint { - method = POST, - path = "/v1/timeseries/query", - tags = ["metrics"], -}] async fn timeseries_query( rqctx: RequestContext, body: TypedBody, @@ -6754,13 +5169,6 @@ mod imp { // Updates - /// Upload TUF repository - #[endpoint { - method = PUT, - path = "/v1/system/update/repository", - tags = ["system/update"], - unpublished = true, -}] async fn system_update_put_repository( rqctx: RequestContext, query: Query, @@ -6785,15 +5193,6 @@ mod imp { .await } - /// Fetch TUF repository description - /// - /// Fetch description of TUF repository by system version. - #[endpoint { - method = GET, - path = "/v1/system/update/repository/{system_version}", - tags = ["system/update"], - unpublished = true, -}] async fn system_update_get_repository( rqctx: RequestContext, path_params: Path, @@ -6820,12 +5219,6 @@ mod imp { // Silo users - /// List users - #[endpoint { - method = GET, - path = "/v1/users", - tags = ["silos"], -}] async fn user_list( rqctx: RequestContext, query_params: Query>, @@ -6868,12 +5261,6 @@ mod imp { // Silo groups - /// List groups - #[endpoint { - method = GET, - path = "/v1/groups", - tags = ["silos"], -}] async fn group_list( rqctx: RequestContext, query_params: Query, @@ -6904,12 +5291,6 @@ mod imp { .await } - /// Fetch group - #[endpoint { - method = GET, - path = "/v1/groups/{group_id}", - tags = ["silos"], -}] async fn group_view( rqctx: RequestContext, path_params: Path, @@ -6933,12 +5314,6 @@ mod imp { // Built-in (system) users - /// List built-in users - #[endpoint { - method = GET, - path = "/v1/system/users-builtin", - tags = ["system/silos"], -}] async fn user_builtin_list( rqctx: RequestContext, query_params: Query, @@ -6970,12 +5345,6 @@ mod imp { .await } - /// Fetch built-in user - #[endpoint { - method = GET, - path = "/v1/system/users-builtin/{user}", - tags = ["system/silos"], -}] async fn user_builtin_view( rqctx: RequestContext, path_params: Path, @@ -7001,29 +5370,11 @@ mod imp { // Built-in roles - // Roles have their own pagination scheme because they do not use the usual "id" - // or "name" types. For more, see the comment in dbinit.sql. - #[derive(Deserialize, JsonSchema, Serialize)] - struct RolePage { - last_seen: String, - } - - /// Path parameters for global (system) role requests - #[derive(Deserialize, JsonSchema)] - struct RolePathParam { - /// The built-in role's unique name. - role_name: String, - } - - /// List built-in roles - #[endpoint { - method = GET, - path = "/v1/system/roles", - tags = ["roles"], -}] async fn role_list( rqctx: RequestContext, - query_params: Query>, + query_params: Query< + PaginationParams, + >, ) -> Result>, HttpError> { let apictx = rqctx.context(); let nexus = &apictx.context.nexus; @@ -7033,7 +5384,7 @@ mod imp { crate::context::op_context_for_external_api(&rqctx).await?; let marker = match &query.page { WhichPage::First(..) => None, - WhichPage::Next(RolePage { last_seen }) => { + WhichPage::Next(params::RolePage { last_seen }) => { Some(last_seen.split_once('.').ok_or_else(|| { Error::invalid_value( last_seen.clone(), @@ -7057,7 +5408,9 @@ mod imp { Ok(HttpResponseOk(dropshot::ResultsPage::new( roles, &EmptyScanParams {}, - |role: &Role, _| RolePage { last_seen: role.name.to_string() }, + |role: &Role, _| params::RolePage { + last_seen: role.name.to_string(), + }, )?)) }; apictx @@ -7067,15 +5420,9 @@ mod imp { .await } - /// Fetch built-in role - #[endpoint { - method = GET, - path = "/v1/system/roles/{role_name}", - tags = ["roles"], -}] async fn role_view( rqctx: RequestContext, - path_params: Path, + path_params: Path, ) -> Result, HttpError> { let apictx = rqctx.context(); let nexus = &apictx.context.nexus; @@ -7096,13 +5443,7 @@ mod imp { // Current user - /// Fetch user for current session - #[endpoint { - method = GET, - path = "/v1/me", - tags = ["session"], -}] - pub(crate) async fn current_user_view( + async fn current_user_view( rqctx: RequestContext, ) -> Result, HttpError> { let apictx = rqctx.context(); @@ -7124,13 +5465,7 @@ mod imp { .await } - /// Fetch current user's groups - #[endpoint { - method = GET, - path = "/v1/me/groups", - tags = ["session"], - }] - pub(crate) async fn current_user_groups( + async fn current_user_groups( rqctx: RequestContext, query_params: Query, ) -> Result>, HttpError> { @@ -7162,16 +5497,6 @@ mod imp { .await } - // Per-user SSH public keys - - /// List SSH public keys - /// - /// Lists SSH public keys for the currently authenticated user. - #[endpoint { - method = GET, - path = "/v1/me/ssh-keys", - tags = ["session"], -}] async fn current_user_ssh_key_list( rqctx: RequestContext, query_params: Query, @@ -7208,14 +5533,6 @@ mod imp { .await } - /// Create SSH public key - /// - /// Create an SSH public key for the currently authenticated user. - #[endpoint { - method = POST, - path = "/v1/me/ssh-keys", - tags = ["session"], -}] async fn current_user_ssh_key_create( rqctx: RequestContext, new_key: TypedBody, @@ -7241,14 +5558,6 @@ mod imp { .await } - /// Fetch SSH public key - /// - /// Fetch SSH public key associated with the currently authenticated user. - #[endpoint { - method = GET, - path = "/v1/me/ssh-keys/{ssh_key}", - tags = ["session"], -}] async fn current_user_ssh_key_view( rqctx: RequestContext, path_params: Path, @@ -7281,14 +5590,6 @@ mod imp { .await } - /// Delete SSH public key - /// - /// Delete an SSH public key associated with the currently authenticated user. - #[endpoint { - method = DELETE, - path = "/v1/me/ssh-keys/{ssh_key}", - tags = ["session"], -}] async fn current_user_ssh_key_delete( rqctx: RequestContext, path_params: Path, @@ -7321,12 +5622,6 @@ mod imp { .await } - /// List instrumentation probes - #[endpoint { - method = GET, - path = "/v1/probes", - tags = ["system/probes"], -}] async fn probe_list( rqctx: RequestContext, query_params: Query>, @@ -7365,12 +5660,6 @@ mod imp { .await } - /// View instrumentation probe - #[endpoint { - method = GET, - path = "/v1/probes/{probe}", - tags = ["system/probes"], -}] async fn probe_view( rqctx: RequestContext, path_params: Path, @@ -7398,12 +5687,6 @@ mod imp { .await } - /// Create instrumentation probe - #[endpoint { - method = POST, - path = "/v1/probes", - tags = ["system/probes"], -}] async fn probe_create( rqctx: RequestContext, query_params: Query, @@ -7432,12 +5715,6 @@ mod imp { .await } - /// Delete instrumentation probe - #[endpoint { - method = DELETE, - path = "/v1/probes/{probe}", - tags = ["system/probes"], -}] async fn probe_delete( rqctx: RequestContext, query_params: Query, @@ -7463,18 +5740,158 @@ mod imp { .instrument_dropshot_handler(&rqctx, handler) .await } -} -pub use imp::*; + async fn login_saml_begin( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError> { + console_api::login_saml_begin(rqctx, path_params, query_params).await + } + + async fn login_saml_redirect( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result { + console_api::login_saml_redirect(rqctx, path_params, query_params).await + } + + async fn login_saml( + rqctx: RequestContext, + path_params: Path, + body_bytes: dropshot::UntypedBody, + ) -> Result { + console_api::login_saml(rqctx, path_params, body_bytes).await + } + + async fn login_local_begin( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError> { + console_api::login_local_begin(rqctx, path_params, query_params).await + } + + async fn login_local( + rqctx: RequestContext, + path_params: Path, + credentials: TypedBody, + ) -> Result, HttpError> + { + console_api::login_local(rqctx, path_params, credentials).await + } + + async fn logout( + rqctx: RequestContext, + cookies: Cookies, + ) -> Result, HttpError> + { + console_api::logout(rqctx, cookies).await + } + + async fn login_begin( + rqctx: RequestContext, + query_params: Query, + ) -> Result { + console_api::login_begin(rqctx, query_params).await + } + + async fn console_projects( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError> { + console_api::console_projects(rqctx, path_params).await + } -#[cfg(test)] -mod test { - use super::external_api; + async fn console_settings_page( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError> { + console_api::console_settings_page(rqctx, path_params).await + } + + async fn console_system_page( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError> { + console_api::console_system_page(rqctx, path_params).await + } + + async fn console_lookup( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError> { + console_api::console_lookup(rqctx, path_params).await + } + + async fn console_root( + rqctx: RequestContext, + ) -> Result, HttpError> { + console_api::console_root(rqctx).await + } + + async fn console_projects_new( + rqctx: RequestContext, + ) -> Result, HttpError> { + console_api::console_projects_new(rqctx).await + } + + async fn console_silo_images( + rqctx: RequestContext, + ) -> Result, HttpError> { + console_api::console_silo_images(rqctx).await + } + + async fn console_silo_utilization( + rqctx: RequestContext, + ) -> Result, HttpError> { + console_api::console_silo_utilization(rqctx).await + } + + async fn console_silo_access( + rqctx: RequestContext, + ) -> Result, HttpError> { + console_api::console_silo_access(rqctx).await + } + + async fn asset( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError> { + console_api::asset(rqctx, path_params).await + } + + async fn device_auth_request( + rqctx: RequestContext, + params: TypedBody, + ) -> Result, HttpError> { + device_auth::device_auth_request(rqctx, params).await + } + + async fn device_auth_verify( + rqctx: RequestContext, + ) -> Result, HttpError> { + device_auth::device_auth_verify(rqctx).await + } + + async fn device_auth_success( + rqctx: RequestContext, + ) -> Result, HttpError> { + device_auth::device_auth_success(rqctx).await + } + + async fn device_auth_confirm( + rqctx: RequestContext, + params: TypedBody, + ) -> Result { + device_auth::device_auth_confirm(rqctx, params).await + } - #[test] - fn test_nexus_tag_policy() { - // This will fail if any of the endpoints don't match the policy in - // ./tag-config.json - let _ = external_api(); + async fn device_access_token( + rqctx: RequestContext, + params: TypedBody, + ) -> Result, HttpError> { + device_auth::device_access_token(rqctx, params.into_inner()).await } } diff --git a/nexus/src/external_api/tag-config.json b/nexus/src/external_api/tag-config.json deleted file mode 100644 index 6974906507..0000000000 --- a/nexus/src/external_api/tag-config.json +++ /dev/null @@ -1,126 +0,0 @@ -{ - "allow_other_tags": false, - "endpoint_tag_policy": "ExactlyOne", - "tag_definitions": { - "disks": { - "description": "Virtual disks are used to store instance-local data which includes the operating system.", - "external_docs": { - "url": "http://docs.oxide.computer/api/disks" - } - }, - "floating-ips": { - "description": "Floating IPs allow a project to allocate well-known IPs to instances.", - "external_docs": { - "url": "http://docs.oxide.computer/api/floating-ips" - } - }, - "hidden": { - "description": "TODO operations that will not ship to customers", - "external_docs": { - "url": "http://docs.oxide.computer/api" - } - }, - "images": { - "description": "Images are read-only virtual disks that may be used to boot virtual machines.", - "external_docs": { - "url": "http://docs.oxide.computer/api/images" - } - }, - "instances": { - "description": "Virtual machine instances are the basic unit of computation. These operations are used for provisioning, controlling, and destroying instances.", - "external_docs": { - "url": "http://docs.oxide.computer/api/instances" - } - }, - "login": { - "description": "Authentication endpoints", - "external_docs": { - "url": "http://docs.oxide.computer/api/login" - } - }, - "metrics": { - "description": "Silo-scoped metrics", - "external_docs": { - "url": "http://docs.oxide.computer/api/metrics" - } - }, - "policy": { - "description": "System-wide IAM policy", - "external_docs": { - "url": "http://docs.oxide.computer/api/policy" - } - }, - "projects": { - "description": "Projects are a grouping of associated resources such as instances and disks within a silo for purposes of billing and access control.", - "external_docs": { - "url": "http://docs.oxide.computer/api/projects" - } - }, - "roles": { - "description": "Roles are a component of Identity and Access Management (IAM) that allow a user or agent account access to additional permissions.", - "external_docs": { - "url": "http://docs.oxide.computer/api/roles" - } - }, - "session": { - "description": "Information pertaining to the current session.", - "external_docs": { - "url": "http://docs.oxide.computer/api/session" - } - }, - "silos": { - "description": "Silos represent a logical partition of users and resources.", - "external_docs": { - "url": "http://docs.oxide.computer/api/silos" - } - }, - "snapshots": { - "description": "Snapshots of virtual disks at a particular point in time.", - "external_docs": { - "url": "http://docs.oxide.computer/api/snapshots" - } - }, - "vpcs": { - "description": "Virtual Private Clouds (VPCs) provide isolated network environments for managing and deploying services.", - "external_docs": { - "url": "http://docs.oxide.computer/api/vpcs" - } - }, - "system/probes": { - "description": "Probes for testing network connectivity", - "external_docs": { - "url": "http://docs.oxide.computer/api/probes" - } - }, - "system/status": { - "description": "Endpoints related to system health", - "external_docs": { - "url": "http://docs.oxide.computer/api/system-status" - } - }, - "system/hardware": { - "description": "These operations pertain to hardware inventory and management. Racks are the unit of expansion of an Oxide deployment. Racks are in turn composed of sleds, switches, power supplies, and a cabled backplane.", - "external_docs": { - "url": "http://docs.oxide.computer/api/system-hardware" - } - }, - "system/metrics": { - "description": "Metrics provide insight into the operation of the Oxide deployment. These include telemetry on hardware and software components that can be used to understand the current state as well as to diagnose issues.", - "external_docs": { - "url": "http://docs.oxide.computer/api/system-metrics" - } - }, - "system/networking": { - "description": "This provides rack-level network configuration.", - "external_docs": { - "url": "http://docs.oxide.computer/api/system-networking" - } - }, - "system/silos": { - "description": "Silos represent a logical partition of users and resources.", - "external_docs": { - "url": "http://docs.oxide.computer/api/system-silos" - } - } - } -} diff --git a/nexus/src/lib.rs b/nexus/src/lib.rs index 284e8de2ea..eaabbd748b 100644 --- a/nexus/src/lib.rs +++ b/nexus/src/lib.rs @@ -53,18 +53,6 @@ use uuid::Uuid; #[macro_use] extern crate slog; -/// Run the OpenAPI generator for the external API, which emits the OpenAPI spec -/// to stdout. -pub fn run_openapi_external() -> Result<(), String> { - external_api() - .openapi("Oxide Region API", "20240821.0") - .description("API for interacting with the Oxide control plane") - .contact_url("https://oxide.computer") - .contact_email("api@oxide.computer") - .write(&mut std::io::stdout()) - .map_err(|e| e.to_string()) -} - /// A partially-initialized Nexus server, which exposes an internal interface, /// but is not ready to receive external requests. pub struct InternalServer { diff --git a/nexus/tests/integration_tests/commands.rs b/nexus/tests/integration_tests/commands.rs index c2277ba776..7ae4223556 100644 --- a/nexus/tests/integration_tests/commands.rs +++ b/nexus/tests/integration_tests/commands.rs @@ -15,10 +15,7 @@ use omicron_test_utils::dev::test_cmds::path_to_executable; use omicron_test_utils::dev::test_cmds::run_command; use omicron_test_utils::dev::test_cmds::temp_file_path; use omicron_test_utils::dev::test_cmds::EXIT_FAILURE; -use omicron_test_utils::dev::test_cmds::EXIT_SUCCESS; use omicron_test_utils::dev::test_cmds::EXIT_USAGE; -use openapiv3::OpenAPI; -use std::collections::BTreeMap; use std::fs; use std::path::PathBuf; use subprocess::Exec; @@ -78,105 +75,3 @@ fn test_nexus_invalid_config() { ); assert!(&stderr_text.starts_with(&expected_err)); } - -#[track_caller] -fn run_command_with_arg(arg: &str) -> (String, String) { - // This is a little goofy: we need a config file for the program. - // (Arguably, --openapi shouldn't require a config file, but it's - // conceivable that the API metadata or the exposed endpoints would depend - // on the configuration.) We ship a config file in "examples", and we may - // as well use it here -- it would be a bug if that one didn't work for this - // purpose. However, it's not clear how to reliably locate it at runtime. - // But we do know where it is at compile time, so we load it then. - let config = include_str!("../../examples/config.toml"); - let config_path = write_config(config); - let exec = Exec::cmd(path_to_nexus()).arg(&config_path).arg(arg); - let (exit_status, stdout_text, stderr_text) = run_command(exec); - fs::remove_file(&config_path).expect("failed to remove temporary file"); - assert_exit_code(exit_status, EXIT_SUCCESS, &stderr_text); - - (stdout_text, stderr_text) -} - -#[test] -fn test_nexus_openapi() { - let (stdout_text, stderr_text) = run_command_with_arg("--openapi"); - assert_contents("tests/output/cmd-nexus-openapi-stderr", &stderr_text); - - // Make sure the result parses as a valid OpenAPI spec and sanity-check a - // few fields. - let spec: OpenAPI = serde_json::from_str(&stdout_text) - .expect("stdout was not valid OpenAPI"); - assert_eq!(spec.openapi, "3.0.3"); - assert_eq!(spec.info.title, "Oxide Region API"); - assert_eq!(spec.info.version, "20240821.0"); - - // Spot check a couple of items. - assert!(!spec.paths.paths.is_empty()); - assert!(spec.paths.paths.get("/v1/projects").is_some()); - - // Check for lint errors. - let errors = openapi_lint::validate_external(&spec); - assert!(errors.is_empty(), "{}", errors.join("\n\n")); - - // Construct a string that helps us identify the organization of tags and - // operations. - let mut ops_by_tag = - BTreeMap::>::new(); - for (path, method, op) in spec.operations() { - // Make sure each operation has exactly one tag. Note, we intentionally - // do this before validating the OpenAPI output as fixing an error here - // would necessitate refreshing the spec file again. - assert_eq!( - op.tags.len(), - 1, - "operation '{}' has {} tags rather than 1", - op.operation_id.as_ref().unwrap(), - op.tags.len() - ); - - // Every non-hidden endpoint must have a summary - if !op.tags.contains(&"hidden".to_string()) { - assert!( - op.summary.is_some(), - "operation '{}' is missing a summary doc comment", - op.operation_id.as_ref().unwrap() - ); - } - - ops_by_tag - .entry(op.tags.first().unwrap().to_string()) - .or_default() - .push(( - op.operation_id.as_ref().unwrap().to_string(), - method.to_string().to_uppercase(), - path.to_string(), - )); - } - - let mut tags = String::new(); - for (tag, mut ops) in ops_by_tag { - ops.sort(); - tags.push_str(&format!(r#"API operations found with tag "{}""#, tag)); - tags.push_str(&format!( - "\n{:40} {:8} {}\n", - "OPERATION ID", "METHOD", "URL PATH" - )); - for (operation_id, method, path) in ops { - tags.push_str(&format!( - "{:40} {:8} {}\n", - operation_id, method, path - )); - } - tags.push('\n'); - } - - // Confirm that the output hasn't changed. It's expected that we'll change - // this file as the API evolves, but pay attention to the diffs to ensure - // that the changes match your expectations. - assert_contents("../openapi/nexus.json", &stdout_text); - - // When this fails, verify that operations on which you're adding, - // renaming, or changing the tags are what you intend. - assert_contents("tests/output/nexus_tags.txt", &tags); -} diff --git a/nexus/tests/integration_tests/device_auth.rs b/nexus/tests/integration_tests/device_auth.rs index 5bb34eb19e..65730f6cc8 100644 --- a/nexus/tests/integration_tests/device_auth.rs +++ b/nexus/tests/integration_tests/device_auth.rs @@ -4,11 +4,11 @@ use nexus_test_utils::http_testing::{AuthnMode, NexusRequest, RequestBuilder}; use nexus_test_utils_macros::nexus_test; -use nexus_types::external_api::views::{ - DeviceAccessTokenGrant, DeviceAccessTokenType, DeviceAuthResponse, -}; -use omicron_nexus::external_api::device_auth::{ - DeviceAccessTokenRequest, DeviceAuthRequest, DeviceAuthVerify, +use nexus_types::external_api::{ + params::{DeviceAccessTokenRequest, DeviceAuthRequest, DeviceAuthVerify}, + views::{ + DeviceAccessTokenGrant, DeviceAccessTokenType, DeviceAuthResponse, + }, }; use http::{header, method::Method, StatusCode}; diff --git a/nexus/tests/output/cmd-nexus-noargs-stderr b/nexus/tests/output/cmd-nexus-noargs-stderr index 385248bd0e..5a218b5c94 100644 --- a/nexus/tests/output/cmd-nexus-noargs-stderr +++ b/nexus/tests/output/cmd-nexus-noargs-stderr @@ -1,12 +1,11 @@ See README.adoc for more information -Usage: nexus [OPTIONS] [CONFIG_FILE_PATH] +Usage: nexus [CONFIG_FILE_PATH] Arguments: [CONFIG_FILE_PATH] Options: - -O, --openapi Print the external OpenAPI Spec document and exit - -h, --help Print help + -h, --help Print help nexus: CONFIG_FILE_PATH is required diff --git a/nexus/tests/output/cmd-nexus-openapi-stderr b/nexus/tests/output/cmd-nexus-openapi-stderr deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/nexus/types/Cargo.toml b/nexus/types/Cargo.toml index 124f0d42c9..6b31013d49 100644 --- a/nexus/types/Cargo.toml +++ b/nexus/types/Cargo.toml @@ -10,10 +10,10 @@ workspace = true [dependencies] anyhow.workspace = true async-trait.workspace = true +base64.workspace = true chrono.workspace = true clap.workspace = true cookie.workspace = true -base64.workspace = true derive-where.workspace = true derive_more.workspace = true dropshot.workspace = true diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index 83897cbd1d..691f36534d 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -8,6 +8,7 @@ use crate::external_api::shared; use base64::Engine; use chrono::{DateTime, Utc}; +use http::Uri; use omicron_common::api::external::{ AddressLotKind, AllowedSourceIps, BfdMode, BgpPeer, ByteCount, Hostname, IdentityMetadataCreateParams, IdentityMetadataUpdateParams, @@ -16,6 +17,7 @@ use omicron_common::api::external::{ }; use omicron_common::disk::DiskVariant; use oxnet::{IpNet, Ipv4Net, Ipv6Net}; +use parse_display::Display; use schemars::JsonSchema; use serde::{ de::{self, Visitor}, @@ -83,11 +85,13 @@ path_param!(IpPoolPath, pool, "IP pool"); path_param!(SshKeyPath, ssh_key, "SSH key"); path_param!(AddressLotPath, address_lot, "address lot"); path_param!(ProbePath, probe, "probe"); +path_param!(CertificatePath, certificate, "certificate"); id_path_param!(GroupPath, group_id, "group"); // TODO: The hardware resources should be represented by its UUID or a hardware // ID that can be used to deterministically generate the UUID. +id_path_param!(RackPath, rack_id, "rack"); id_path_param!(SledPath, sled_id, "sled"); id_path_param!(SwitchPath, switch_id, "switch"); id_path_param!(PhysicalDiskPath, disk_id, "physical disk"); @@ -141,6 +145,13 @@ pub struct OptionalSiloSelector { pub silo: Option, } +/// Path parameters for Silo User requests +#[derive(Deserialize, JsonSchema)] +pub struct UserParam { + /// The user's internal ID + pub user_id: Uuid, +} + #[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq)] pub struct SamlIdentityProviderSelector { /// Name or ID of the silo in which the SAML identity provider is associated @@ -1241,6 +1252,24 @@ pub struct RouterRouteUpdate { // DISKS +#[derive(Display, Serialize, Deserialize, JsonSchema)] +#[display(style = "snake_case")] +#[serde(rename_all = "snake_case")] +pub enum DiskMetricName { + Activated, + Flush, + Read, + ReadBytes, + Write, + WriteBytes, +} + +#[derive(Serialize, Deserialize, JsonSchema)] +pub struct DiskMetricsPath { + pub disk: NameOrId, + pub metric: DiskMetricName, +} + #[derive(Copy, Clone, Debug, Deserialize, Serialize)] #[serde(try_from = "u32")] // invoke the try_from validation routine below pub struct BlockSize(pub u32); @@ -1421,6 +1450,23 @@ pub struct LoopbackAddressCreate { pub anycast: bool, } +#[derive(Serialize, Deserialize, JsonSchema)] +pub struct LoopbackAddressPath { + /// The rack to use when selecting the loopback address. + pub rack_id: Uuid, + + /// The switch location to use when selecting the loopback address. + pub switch_location: Name, + + /// The IP address and subnet mask to use when selecting the loopback + /// address. + pub address: IpAddr, + + /// The IP address and subnet mask to use when selecting the loopback + /// address. + pub subnet_mask: u8, +} + /// Parameters for creating a port settings group. #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct SwtichPortSettingsGroupCreate { @@ -1897,6 +1943,20 @@ pub struct SshKeyCreate { // METRICS +#[derive(Display, Deserialize, JsonSchema)] +#[display(style = "snake_case")] +#[serde(rename_all = "snake_case")] +pub enum SystemMetricName { + VirtualDiskSpaceProvisioned, + CpusProvisioned, + RamProvisioned, +} + +#[derive(Deserialize, JsonSchema)] +pub struct SystemMetricsPathParam { + pub metric_name: SystemMetricName, +} + /// Query parameters common to resource metrics endpoints. #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct ResourceMetrics { @@ -1958,3 +2018,98 @@ pub struct AllowListUpdate { /// The new list of allowed source IPs. pub allowed_ips: AllowedSourceIps, } + +// Roles + +// Roles have their own pagination scheme because they do not use the usual "id" +// or "name" types. For more, see the comment in dbinit.sql. +#[derive(Deserialize, JsonSchema, Serialize)] +pub struct RolePage { + pub last_seen: String, +} + +/// Path parameters for global (system) role requests +#[derive(Deserialize, JsonSchema)] +pub struct RolePath { + /// The built-in role's unique name. + pub role_name: String, +} + +// Console API + +#[derive(Deserialize, JsonSchema)] +pub struct RestPathParam { + pub path: Vec, +} + +#[derive(Deserialize, JsonSchema)] +pub struct LoginToProviderPathParam { + pub silo_name: Name, + pub provider_name: Name, +} + +#[derive(Serialize, Deserialize, JsonSchema)] +pub struct LoginUrlQuery { + pub redirect_uri: Option, +} + +#[derive(Deserialize, JsonSchema)] +pub struct LoginPath { + pub silo_name: Name, +} + +/// This is meant as a security feature. We want to ensure we never redirect to +/// a URI on a different host. +#[derive(Serialize, Deserialize, Debug, JsonSchema, Clone, Display)] +#[serde(try_from = "String")] +#[display("{0}")] +pub struct RelativeUri(String); + +impl FromStr for RelativeUri { + type Err = String; + + fn from_str(s: &str) -> Result { + Self::try_from(s.to_string()) + } +} + +impl TryFrom for RelativeUri { + type Error = String; + + fn try_from(uri: Uri) -> Result { + if uri.host().is_none() && uri.scheme().is_none() { + Ok(Self(uri.to_string())) + } else { + Err(format!("\"{}\" is not a relative URI", uri)) + } + } +} + +impl TryFrom for RelativeUri { + type Error = String; + + fn try_from(s: String) -> Result { + s.parse::() + .map_err(|_| format!("\"{}\" is not a relative URI", s)) + .and_then(|uri| Self::try_from(uri)) + } +} + +// Device auth + +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct DeviceAuthRequest { + pub client_id: Uuid, +} + +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct DeviceAuthVerify { + pub user_code: String, +} + +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct DeviceAccessTokenRequest { + pub grant_type: String, + pub device_code: String, + pub client_id: Uuid, +} diff --git a/nexus/types/src/external_api/views.rs b/nexus/types/src/external_api/views.rs index 58c2e560ab..e8d81b05bb 100644 --- a/nexus/types/src/external_api/views.rs +++ b/nexus/types/src/external_api/views.rs @@ -498,6 +498,12 @@ pub struct Rack { // SLEDS +/// The unique ID of a sled. +#[derive(Clone, Debug, Serialize, JsonSchema)] +pub struct SledId { + pub id: Uuid, +} + /// An operator's view of a Sled. #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct Sled { diff --git a/openapi/nexus.json b/openapi/nexus.json index 47f1f0822b..a855378cd4 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -468,6 +468,7 @@ { "in": "path", "name": "certificate", + "description": "Name or ID of the certificate", "required": true, "schema": { "$ref": "#/components/schemas/NameOrId" @@ -504,6 +505,7 @@ { "in": "path", "name": "certificate", + "description": "Name or ID of the certificate", "required": true, "schema": { "$ref": "#/components/schemas/NameOrId" @@ -4167,7 +4169,7 @@ { "in": "path", "name": "rack_id", - "description": "The rack's unique ID.", + "description": "ID of the rack", "required": true, "schema": { "type": "string", @@ -5028,7 +5030,7 @@ { "in": "path", "name": "user_id", - "description": "The user's internal id", + "description": "The user's internal ID", "required": true, "schema": { "type": "string", @@ -5070,7 +5072,7 @@ { "in": "path", "name": "user_id", - "description": "The user's internal id", + "description": "The user's internal ID", "required": true, "schema": { "type": "string", @@ -7872,7 +7874,7 @@ { "in": "path", "name": "user_id", - "description": "The user's internal id", + "description": "The user's internal ID", "required": true, "schema": { "type": "string", diff --git a/oximeter/db/src/lib.rs b/oximeter/db/src/lib.rs index 5d56d802c9..2b3c2d6118 100644 --- a/oximeter/db/src/lib.rs +++ b/oximeter/db/src/lib.rs @@ -10,8 +10,6 @@ use crate::query::StringFieldSelector; use anyhow::Context as _; use chrono::DateTime; use chrono::Utc; -use dropshot::EmptyScanParams; -use dropshot::PaginationParams; pub use oximeter::schema::FieldSchema; pub use oximeter::schema::FieldSource; use oximeter::schema::TimeseriesKey; @@ -235,10 +233,6 @@ impl From for DbFieldSource { } } -/// Type used to paginate request to list timeseries schema. -pub type TimeseriesSchemaPaginationParams = - PaginationParams; - #[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] pub struct TimeseriesScanParams { pub timeseries_name: TimeseriesName,