diff --git a/Cargo.lock b/Cargo.lock index f08c7a483d..c35f22dd86 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1072,6 +1072,19 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67ba02a97a2bd10f4b59b25c7973101c79642302776489e030cd13cdab09ed15" +[[package]] +name = "cockroach-admin-api" +version = "0.1.0" +dependencies = [ + "cockroach-admin-types", + "dropshot", + "omicron-common", + "omicron-uuid-kinds", + "omicron-workspace-hack", + "schemars", + "serde", +] + [[package]] name = "cockroach-admin-client" version = "0.1.0" @@ -1086,6 +1099,20 @@ dependencies = [ "slog", ] +[[package]] +name = "cockroach-admin-types" +version = "0.1.0" +dependencies = [ + "chrono", + "csv", + "omicron-common", + "omicron-workspace-hack", + "proptest", + "schemars", + "serde", + "test-strategy", +] + [[package]] name = "colorchoice" version = "1.0.1" @@ -5307,6 +5334,8 @@ dependencies = [ "camino", "chrono", "clap", + "cockroach-admin-api", + "cockroach-admin-types", "csv", "dropshot", "expectorate", @@ -6107,6 +6136,7 @@ dependencies = [ "atomicwrites", "camino", "clap", + "cockroach-admin-api", "dns-server-api", "dropshot", "fs-err", diff --git a/Cargo.toml b/Cargo.toml index 8e9663bc7b..00db359aa8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,6 +16,8 @@ members = [ "clients/sled-agent-client", "clients/wicketd-client", "cockroach-admin", + "cockroach-admin-api", + "cockroach-admin-types", "common", "dev-tools/crdb-seed", "dev-tools/omdb", @@ -110,6 +112,8 @@ default-members = [ "clients/sled-agent-client", "clients/wicketd-client", "cockroach-admin", + "cockroach-admin-api", + "cockroach-admin-types", "common", "dev-tools/crdb-seed", "dev-tools/omdb", @@ -263,7 +267,9 @@ ciborium = "0.2.2" cfg-if = "1.0" chrono = { version = "0.4", features = [ "serde" ] } clap = { version = "4.5", features = ["cargo", "derive", "env", "wrap_help"] } +cockroach-admin-api = { path = "cockroach-admin-api" } cockroach-admin-client = { path = "clients/cockroach-admin-client" } +cockroach-admin-types = { path = "cockroach-admin-types" } colored = "2.1" const_format = "0.2.32" cookie = "0.18" diff --git a/cockroach-admin-api/Cargo.toml b/cockroach-admin-api/Cargo.toml new file mode 100644 index 0000000000..f0434856d2 --- /dev/null +++ b/cockroach-admin-api/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "cockroach-admin-api" +version = "0.1.0" +edition = "2021" +license = "MPL-2.0" + +[lints] +workspace = true + +[dependencies] +cockroach-admin-types.workspace = true +dropshot.workspace = true +omicron-common.workspace = true +omicron-uuid-kinds.workspace = true +omicron-workspace-hack.workspace = true +schemars.workspace = true +serde.workspace = true diff --git a/cockroach-admin-api/src/lib.rs b/cockroach-admin-api/src/lib.rs new file mode 100644 index 0000000000..192ff56f04 --- /dev/null +++ b/cockroach-admin-api/src/lib.rs @@ -0,0 +1,76 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use cockroach_admin_types::{NodeDecommission, NodeStatus}; +use dropshot::{HttpError, HttpResponseOk, RequestContext, TypedBody}; +use omicron_uuid_kinds::OmicronZoneUuid; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[dropshot::api_description { + module = "cockroach_admin_api_mod", +}] +pub trait CockroachAdminApi { + type Context; + + /// Get the status of all nodes in the CRDB cluster. + #[endpoint { + method = GET, + path = "/node/status", + }] + async fn node_status( + rqctx: RequestContext, + ) -> Result, HttpError>; + + /// Get the CockroachDB node ID of the local cockroach instance. + #[endpoint { + method = GET, + path = "/node/id", + }] + async fn local_node_id( + rqctx: RequestContext, + ) -> Result, HttpError>; + + /// Decommission a node from the CRDB cluster. + #[endpoint { + method = POST, + path = "/node/decommission", + }] + async fn node_decommission( + rqctx: RequestContext, + body: TypedBody, + ) -> Result, HttpError>; +} + +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, JsonSchema)] +#[serde(rename_all = "snake_case")] +pub struct ClusterNodeStatus { + pub all_nodes: Vec, +} + +/// CockroachDB Node ID +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, JsonSchema)] +#[serde(rename_all = "snake_case")] +pub struct LocalNodeId { + /// The ID of this Omicron zone. + /// + /// This is included to ensure correctness even if a socket address on a + /// sled is reused for a different zone; if our caller is trying to + /// determine the node ID for a particular Omicron CockroachDB zone, they'll + /// contact us by socket address. We include our zone ID in the response for + /// their confirmation that we are the zone they intended to contact. + pub zone_id: OmicronZoneUuid, + // CockroachDB node IDs are integers, in practice, but our use of them is as + // input and output to the `cockroach` CLI. We use a string which is a bit + // more natural (no need to parse CLI output or stringify an ID to send it + // as input) and leaves open the door for the format to change in the + // future. + pub node_id: String, +} + +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, JsonSchema)] +#[serde(rename_all = "snake_case")] +pub struct NodeId { + pub node_id: String, +} diff --git a/cockroach-admin-types/Cargo.toml b/cockroach-admin-types/Cargo.toml new file mode 100644 index 0000000000..870d1c55c2 --- /dev/null +++ b/cockroach-admin-types/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "cockroach-admin-types" +version = "0.1.0" +edition = "2021" +license = "MPL-2.0" + +[lints] +workspace = true + +[dependencies] +chrono.workspace = true +csv.workspace = true +omicron-common.workspace = true +omicron-workspace-hack.workspace = true +schemars.workspace = true +serde.workspace = true + +[dev-dependencies] +proptest.workspace = true +test-strategy.workspace = true diff --git a/cockroach-admin-types/src/lib.rs b/cockroach-admin-types/src/lib.rs new file mode 100644 index 0000000000..3653cc616b --- /dev/null +++ b/cockroach-admin-types/src/lib.rs @@ -0,0 +1,477 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use std::{io, net::SocketAddr}; + +use chrono::{DateTime, NaiveDateTime, Utc}; +use schemars::JsonSchema; +use serde::{de, Deserialize, Serialize}; + +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, JsonSchema)] +#[serde(rename_all = "snake_case")] +pub struct NodeStatus { + pub node_id: String, + pub address: SocketAddr, + pub sql_address: SocketAddr, + pub build: String, + pub started_at: DateTime, + pub updated_at: DateTime, + pub locality: String, + pub is_available: bool, + pub is_live: bool, +} + +impl NodeStatus { + pub fn parse_from_csv(data: &[u8]) -> Result, csv::Error> { + let mut statuses = Vec::new(); + let mut reader = csv::Reader::from_reader(io::Cursor::new(data)); + for result in reader.deserialize() { + let record: CliNodeStatus = result?; + statuses.push(record.into()); + } + Ok(statuses) + } +} + +// Slightly different `NodeStatus` that matches what we get from `cockroach`: +// timestamps are a fixed format with no timezone (but are actually UTC), so we +// have a custom deserializer, and the ID column is `id` instead of `node_id`. +#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] +struct CliNodeStatus { + id: String, + address: SocketAddr, + sql_address: SocketAddr, + build: String, + #[serde(deserialize_with = "parse_cockroach_cli_timestamp")] + started_at: DateTime, + #[serde(deserialize_with = "parse_cockroach_cli_timestamp")] + updated_at: DateTime, + locality: String, + is_available: bool, + is_live: bool, +} + +impl From for NodeStatus { + fn from(cli: CliNodeStatus) -> Self { + Self { + node_id: cli.id, + address: cli.address, + sql_address: cli.sql_address, + build: cli.build, + started_at: cli.started_at, + updated_at: cli.updated_at, + locality: cli.locality, + is_available: cli.is_available, + is_live: cli.is_live, + } + } +} + +fn parse_cockroach_cli_timestamp<'de, D>( + d: D, +) -> Result, D::Error> +where + D: serde::Deserializer<'de>, +{ + struct CockroachTimestampVisitor; + impl<'de> de::Visitor<'de> for CockroachTimestampVisitor { + type Value = DateTime; + + fn expecting( + &self, + formatter: &mut std::fmt::Formatter, + ) -> std::fmt::Result { + formatter.write_str("a Cockroach CLI timestamp") + } + + fn visit_str(self, v: &str) -> Result + where + E: de::Error, + { + let dt = NaiveDateTime::parse_from_str(v, "%Y-%m-%d %H:%M:%S%.f") + .map_err(E::custom)?; + Ok(DateTime::from_naive_utc_and_offset(dt, Utc)) + } + } + + d.deserialize_str(CockroachTimestampVisitor) +} + +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, JsonSchema)] +#[serde(rename_all = "snake_case")] +pub struct NodeDecommission { + pub node_id: String, + pub is_live: bool, + pub replicas: i64, + pub is_decommissioning: bool, + pub membership: NodeMembership, + pub is_draining: bool, + pub notes: Vec, +} + +impl NodeDecommission { + pub fn parse_from_csv(data: &[u8]) -> Result { + // Reading the node decommission output is awkward because it isn't + // fully CSV. We expect a CSV header, then a row for each node being + // decommissioned, then (maybe) a blank line followed by a note that is + // just a string, not related to the initial CSV data. Even though the + // CLI supports decommissioning more than one node in one invocation, we + // only provide an API to decommission a single node, so we expect: + // + // 1. The CSV header line + // 2. The one row of CSV data + // 3. Trailing notes + // + // We'll collect the notes as a separate field and return them to our + // caller. + + // First we'll run the data through a csv::Reader; this will pull out + // the header row and the one row of data. + let mut reader = csv::Reader::from_reader(io::Cursor::new(data)); + let record: CliNodeDecommission = + reader.deserialize().next().ok_or_else(|| { + io::Error::other("fewer than two lines of output") + })??; + + // Get the position where the reader ended after that one row; we'll + // collect any remaining nonempty lines as `notes`. + let extra_data = &data[reader.position().byte() as usize..]; + let mut notes = Vec::new(); + for line in String::from_utf8_lossy(extra_data).lines() { + let line = line.trim(); + if !line.is_empty() { + notes.push(line.to_string()); + } + } + + Ok(Self::from((record, notes))) + } +} + +// Slightly different `NodeDecommission` that matches what we get from +// `cockroach`: this omites `notes`, which isn't really a CSV field at all, but +// is instead where we collect the non-CSV string output from the CLI, uses +// a custom deserializer for `membership` to handle unknown variants, and the ID +// column is `id` instead of `node_id`. +#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] +struct CliNodeDecommission { + pub id: String, + pub is_live: bool, + pub replicas: i64, + pub is_decommissioning: bool, + #[serde(deserialize_with = "parse_node_membership")] + pub membership: NodeMembership, + pub is_draining: bool, +} + +impl From<(CliNodeDecommission, Vec)> for NodeDecommission { + fn from((cli, notes): (CliNodeDecommission, Vec)) -> Self { + Self { + node_id: cli.id, + is_live: cli.is_live, + replicas: cli.replicas, + is_decommissioning: cli.is_decommissioning, + membership: cli.membership, + is_draining: cli.is_draining, + notes, + } + } +} + +fn parse_node_membership<'de, D>(d: D) -> Result +where + D: serde::Deserializer<'de>, +{ + struct CockroachNodeMembershipVisitor; + + impl<'de> de::Visitor<'de> for CockroachNodeMembershipVisitor { + type Value = NodeMembership; + + fn expecting( + &self, + formatter: &mut std::fmt::Formatter, + ) -> std::fmt::Result { + formatter.write_str("a Cockroach node membership string") + } + + fn visit_str(self, v: &str) -> Result + where + E: de::Error, + { + let membership = match v { + "active" => NodeMembership::Active, + "decommissioning" => NodeMembership::Decommissioning, + "decommissioned" => NodeMembership::Decommissioned, + _ => NodeMembership::Unknown { value: v.to_string() }, + }; + Ok(membership) + } + } + + d.deserialize_str(CockroachNodeMembershipVisitor) +} + +// The cockroach CLI and `crdb_internal.gossip_liveness` table use a string for +// node membership, but there are only three meaningful values per +// https://github.com/cockroachdb/cockroach/blob/0c92c710d2baadfdc5475be8d2238cf26cb152ca/pkg/kv/kvserver/liveness/livenesspb/liveness.go#L96, +// so we'll convert into a Rust enum and leave the "unknown" case for future +// changes that expand or reword these values. +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, JsonSchema)] +#[serde(tag = "state", rename_all = "lowercase")] +pub enum NodeMembership { + Active, + Decommissioning, + Decommissioned, + Unknown { value: String }, +} + +#[cfg(test)] +mod tests { + use super::*; + use chrono::NaiveDate; + use test_strategy::proptest; + + #[test] + fn test_node_status_parse_single_line_from_csv() { + let input = br#"id,address,sql_address,build,started_at,updated_at,locality,is_available,is_live +1,[::1]:42021,[::1]:42021,v22.1.9,2024-05-21 15:19:50.523796,2024-05-21 16:31:28.050069,,true,true"#; + let expected = NodeStatus { + node_id: "1".to_string(), + address: "[::1]:42021".parse().unwrap(), + sql_address: "[::1]:42021".parse().unwrap(), + build: "v22.1.9".to_string(), + started_at: DateTime::from_naive_utc_and_offset( + NaiveDate::from_ymd_opt(2024, 5, 21) + .unwrap() + .and_hms_micro_opt(15, 19, 50, 523796) + .unwrap(), + Utc, + ), + updated_at: DateTime::from_naive_utc_and_offset( + NaiveDate::from_ymd_opt(2024, 5, 21) + .unwrap() + .and_hms_micro_opt(16, 31, 28, 50069) + .unwrap(), + Utc, + ), + locality: String::new(), + is_available: true, + is_live: true, + }; + + let statuses = NodeStatus::parse_from_csv(input).expect("parsed input"); + assert_eq!(statuses, vec![expected]); + } + + #[test] + fn test_node_status_parse_multiple_lines_from_csv() { + let input = br#"id,address,sql_address,build,started_at,updated_at,locality,is_available,is_live +1,[fd00:1122:3344:109::3]:32221,[fd00:1122:3344:109::3]:32221,v22.1.9-dirty,2024-05-18 19:18:00.597145,2024-05-21 15:22:34.290434,,true,true +2,[fd00:1122:3344:105::3]:32221,[fd00:1122:3344:105::3]:32221,v22.1.9-dirty,2024-05-18 19:17:01.796714,2024-05-21 15:22:34.901268,,true,true +3,[fd00:1122:3344:10b::3]:32221,[fd00:1122:3344:10b::3]:32221,v22.1.9-dirty,2024-05-18 19:18:52.37564,2024-05-21 15:22:36.341146,,true,true +4,[fd00:1122:3344:107::3]:32221,[fd00:1122:3344:107::3]:32221,v22.1.9-dirty,2024-05-18 19:16:22.788276,2024-05-21 15:22:34.897047,,true,true +5,[fd00:1122:3344:108::3]:32221,[fd00:1122:3344:108::3]:32221,v22.1.9-dirty,2024-05-18 19:18:09.196634,2024-05-21 15:22:35.168738,,true,true"#; + let expected = vec![ + NodeStatus { + node_id: "1".to_string(), + address: "[fd00:1122:3344:109::3]:32221".parse().unwrap(), + sql_address: "[fd00:1122:3344:109::3]:32221".parse().unwrap(), + build: "v22.1.9-dirty".to_string(), + started_at: DateTime::from_naive_utc_and_offset( + NaiveDate::from_ymd_opt(2024, 5, 18) + .unwrap() + .and_hms_micro_opt(19, 18, 0, 597145) + .unwrap(), + Utc, + ), + updated_at: DateTime::from_naive_utc_and_offset( + NaiveDate::from_ymd_opt(2024, 5, 21) + .unwrap() + .and_hms_micro_opt(15, 22, 34, 290434) + .unwrap(), + Utc, + ), + locality: String::new(), + is_available: true, + is_live: true, + }, + NodeStatus { + node_id: "2".to_string(), + address: "[fd00:1122:3344:105::3]:32221".parse().unwrap(), + sql_address: "[fd00:1122:3344:105::3]:32221".parse().unwrap(), + build: "v22.1.9-dirty".to_string(), + started_at: DateTime::from_naive_utc_and_offset( + NaiveDate::from_ymd_opt(2024, 5, 18) + .unwrap() + .and_hms_micro_opt(19, 17, 1, 796714) + .unwrap(), + Utc, + ), + updated_at: DateTime::from_naive_utc_and_offset( + NaiveDate::from_ymd_opt(2024, 5, 21) + .unwrap() + .and_hms_micro_opt(15, 22, 34, 901268) + .unwrap(), + Utc, + ), + locality: String::new(), + is_available: true, + is_live: true, + }, + NodeStatus { + node_id: "3".to_string(), + address: "[fd00:1122:3344:10b::3]:32221".parse().unwrap(), + sql_address: "[fd00:1122:3344:10b::3]:32221".parse().unwrap(), + build: "v22.1.9-dirty".to_string(), + started_at: DateTime::from_naive_utc_and_offset( + NaiveDate::from_ymd_opt(2024, 5, 18) + .unwrap() + .and_hms_micro_opt(19, 18, 52, 375640) + .unwrap(), + Utc, + ), + updated_at: DateTime::from_naive_utc_and_offset( + NaiveDate::from_ymd_opt(2024, 5, 21) + .unwrap() + .and_hms_micro_opt(15, 22, 36, 341146) + .unwrap(), + Utc, + ), + locality: String::new(), + is_available: true, + is_live: true, + }, + NodeStatus { + node_id: "4".to_string(), + address: "[fd00:1122:3344:107::3]:32221".parse().unwrap(), + sql_address: "[fd00:1122:3344:107::3]:32221".parse().unwrap(), + build: "v22.1.9-dirty".to_string(), + started_at: DateTime::from_naive_utc_and_offset( + NaiveDate::from_ymd_opt(2024, 5, 18) + .unwrap() + .and_hms_micro_opt(19, 16, 22, 788276) + .unwrap(), + Utc, + ), + updated_at: DateTime::from_naive_utc_and_offset( + NaiveDate::from_ymd_opt(2024, 5, 21) + .unwrap() + .and_hms_micro_opt(15, 22, 34, 897047) + .unwrap(), + Utc, + ), + locality: String::new(), + is_available: true, + is_live: true, + }, + NodeStatus { + node_id: "5".to_string(), + address: "[fd00:1122:3344:108::3]:32221".parse().unwrap(), + sql_address: "[fd00:1122:3344:108::3]:32221".parse().unwrap(), + build: "v22.1.9-dirty".to_string(), + started_at: DateTime::from_naive_utc_and_offset( + NaiveDate::from_ymd_opt(2024, 5, 18) + .unwrap() + .and_hms_micro_opt(19, 18, 9, 196634) + .unwrap(), + Utc, + ), + updated_at: DateTime::from_naive_utc_and_offset( + NaiveDate::from_ymd_opt(2024, 5, 21) + .unwrap() + .and_hms_micro_opt(15, 22, 35, 168738) + .unwrap(), + Utc, + ), + locality: String::new(), + is_available: true, + is_live: true, + }, + ]; + + let statuses = NodeStatus::parse_from_csv(input).expect("parsed input"); + assert_eq!(statuses.len(), expected.len()); + for (status, expected) in statuses.iter().zip(&expected) { + assert_eq!(status, expected); + } + } + + #[test] + fn test_node_decommission_parse_with_no_trailing_notes() { + let input = + br#"id,is_live,replicas,is_decommissioning,membership,is_draining +6,true,24,true,decommissioning,false"#; + let expected = NodeDecommission { + node_id: "6".to_string(), + is_live: true, + replicas: 24, + is_decommissioning: true, + membership: NodeMembership::Decommissioning, + is_draining: false, + notes: vec![], + }; + + let statuses = + NodeDecommission::parse_from_csv(input).expect("parsed input"); + assert_eq!(statuses, expected); + } + + #[test] + fn test_node_decommission_parse_with_trailing_notes() { + let input = + br#"id,is_live,replicas,is_decommissioning,membership,is_draining +6,false,0,true,decommissioned,false + +No more data reported on target nodes. Please verify cluster health before removing the nodes. +"#; + let expected = NodeDecommission { + node_id: "6".to_string(), + is_live: false, + replicas: 0, + is_decommissioning: true, + membership: NodeMembership::Decommissioned, + is_draining: false, + notes: vec!["No more data reported on target nodes. \ + Please verify cluster health before removing the nodes." + .to_string()], + }; + + let statuses = + NodeDecommission::parse_from_csv(input).expect("parsed input"); + assert_eq!(statuses, expected); + } + + #[test] + fn test_node_decommission_parse_with_unexpected_membership_value() { + let input = + br#"id,is_live,replicas,is_decommissioning,membership,is_draining +6,false,0,true,foobar,false"#; + let expected = NodeDecommission { + node_id: "6".to_string(), + is_live: false, + replicas: 0, + is_decommissioning: true, + membership: NodeMembership::Unknown { value: "foobar".to_string() }, + is_draining: false, + notes: vec![], + }; + + let statuses = + NodeDecommission::parse_from_csv(input).expect("parsed input"); + assert_eq!(statuses, expected); + } + + // TODO: the proptests below should probably be fuzz targets instead to + // allow for guided fuzzing. + + #[proptest] + fn node_status_parse_doesnt_panic_on_arbitrary_input(input: Vec) { + _ = NodeStatus::parse_from_csv(&input); + } + + #[proptest] + fn node_decommission_parse_doesnt_panic_on_arbitrary_input(input: Vec) { + _ = NodeDecommission::parse_from_csv(&input); + } +} diff --git a/cockroach-admin/Cargo.toml b/cockroach-admin/Cargo.toml index 07f9807463..1738fd98e5 100644 --- a/cockroach-admin/Cargo.toml +++ b/cockroach-admin/Cargo.toml @@ -12,6 +12,8 @@ anyhow.workspace = true camino.workspace = true chrono.workspace = true clap.workspace = true +cockroach-admin-api.workspace = true +cockroach-admin-types.workspace = true csv.workspace = true dropshot.workspace = true http.workspace = true diff --git a/cockroach-admin/src/bin/cockroach-admin.rs b/cockroach-admin/src/bin/cockroach-admin.rs index 0399c8bbb0..ee6d8f4aa9 100644 --- a/cockroach-admin/src/bin/cockroach-admin.rs +++ b/cockroach-admin/src/bin/cockroach-admin.rs @@ -19,9 +19,6 @@ use std::net::SocketAddrV6; #[derive(Debug, Parser)] #[clap(name = "cockroach-admin", about = "Omicron CRDB cluster admin server")] enum Args { - /// Print the OpenAPI Spec document and exit - Openapi, - /// Start the CRDB admin server Run { /// Path to the `cockroach` CLI @@ -57,8 +54,6 @@ async fn main_impl() -> Result<(), CmdError> { let args = Args::parse(); match args { - Args::Openapi => omicron_cockroach_admin::run_openapi() - .map_err(|e| CmdError::Failure(anyhow!(e))), Args::Run { path_to_cockroach_binary, cockroach_address, diff --git a/cockroach-admin/src/cockroach_cli.rs b/cockroach-admin/src/cockroach_cli.rs index 1951866ce7..b812cf9749 100644 --- a/cockroach-admin/src/cockroach_cli.rs +++ b/cockroach-admin/src/cockroach_cli.rs @@ -3,20 +3,14 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. use camino::Utf8PathBuf; -use chrono::DateTime; -use chrono::NaiveDateTime; -use chrono::Utc; +use cockroach_admin_types::NodeDecommission; +use cockroach_admin_types::NodeStatus; use dropshot::HttpError; use illumos_utils::output_to_exec_error; use illumos_utils::ExecutionError; -use schemars::JsonSchema; -use serde::de; -use serde::Deserialize; -use serde::Serialize; use slog_error_chain::InlineErrorChain; use slog_error_chain::SlogInlineError; use std::io; -use std::net::SocketAddr; use std::net::SocketAddrV6; use tokio::process::Command; @@ -139,463 +133,16 @@ impl CockroachCli { } } -#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, JsonSchema)] -#[serde(rename_all = "snake_case")] -pub struct NodeStatus { - pub node_id: String, - pub address: SocketAddr, - pub sql_address: SocketAddr, - pub build: String, - pub started_at: DateTime, - pub updated_at: DateTime, - pub locality: String, - pub is_available: bool, - pub is_live: bool, -} - -// Slightly different `NodeStatus` that matches what we get from `cockroach`: -// timestamps are a fixed format with no timezone (but are actually UTC), so we -// have a custom deserializer, and the ID column is `id` instead of `node_id`. -#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] -struct CliNodeStatus { - id: String, - address: SocketAddr, - sql_address: SocketAddr, - build: String, - #[serde(deserialize_with = "parse_cockroach_cli_timestamp")] - started_at: DateTime, - #[serde(deserialize_with = "parse_cockroach_cli_timestamp")] - updated_at: DateTime, - locality: String, - is_available: bool, - is_live: bool, -} - -impl From for NodeStatus { - fn from(cli: CliNodeStatus) -> Self { - Self { - node_id: cli.id, - address: cli.address, - sql_address: cli.sql_address, - build: cli.build, - started_at: cli.started_at, - updated_at: cli.updated_at, - locality: cli.locality, - is_available: cli.is_available, - is_live: cli.is_live, - } - } -} - -fn parse_cockroach_cli_timestamp<'de, D>( - d: D, -) -> Result, D::Error> -where - D: serde::Deserializer<'de>, -{ - struct CockroachTimestampVisitor; - impl<'de> de::Visitor<'de> for CockroachTimestampVisitor { - type Value = DateTime; - - fn expecting( - &self, - formatter: &mut std::fmt::Formatter, - ) -> std::fmt::Result { - formatter.write_str("a Cockroach CLI timestamp") - } - - fn visit_str(self, v: &str) -> Result - where - E: de::Error, - { - let dt = NaiveDateTime::parse_from_str(v, "%Y-%m-%d %H:%M:%S%.f") - .map_err(E::custom)?; - Ok(DateTime::from_naive_utc_and_offset(dt, Utc)) - } - } - - d.deserialize_str(CockroachTimestampVisitor) -} - -impl NodeStatus { - pub fn parse_from_csv(data: &[u8]) -> Result, csv::Error> { - let mut statuses = Vec::new(); - let mut reader = csv::Reader::from_reader(io::Cursor::new(data)); - for result in reader.deserialize() { - let record: CliNodeStatus = result?; - statuses.push(record.into()); - } - Ok(statuses) - } -} - -// The cockroach CLI and `crdb_internal.gossip_liveness` table use a string for -// node membership, but there are only three meaningful values per -// https://github.com/cockroachdb/cockroach/blob/0c92c710d2baadfdc5475be8d2238cf26cb152ca/pkg/kv/kvserver/liveness/livenesspb/liveness.go#L96, -// so we'll convert into a Rust enum and leave the "unknown" case for future -// changes that expand or reword these values. -#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, JsonSchema)] -#[serde(tag = "state", rename_all = "lowercase")] -pub enum NodeMembership { - Active, - Decommissioning, - Decommissioned, - Unknown { value: String }, -} - -#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, JsonSchema)] -#[serde(rename_all = "snake_case")] -pub struct NodeDecommission { - pub node_id: String, - pub is_live: bool, - pub replicas: i64, - pub is_decommissioning: bool, - pub membership: NodeMembership, - pub is_draining: bool, - pub notes: Vec, -} - -// Slightly different `NodeDecommission` that matches what we get from -// `cockroach`: this omites `notes`, which isn't really a CSV field at all, but -// is instead where we collect the non-CSV string output from the CLI, uses -// a custom deserializer for `membership` to handle unknown variants, and the ID -// column is `id` instead of `node_id`. -#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] -struct CliNodeDecommission { - pub id: String, - pub is_live: bool, - pub replicas: i64, - pub is_decommissioning: bool, - #[serde(deserialize_with = "parse_node_membership")] - pub membership: NodeMembership, - pub is_draining: bool, -} - -impl From<(CliNodeDecommission, Vec)> for NodeDecommission { - fn from((cli, notes): (CliNodeDecommission, Vec)) -> Self { - Self { - node_id: cli.id, - is_live: cli.is_live, - replicas: cli.replicas, - is_decommissioning: cli.is_decommissioning, - membership: cli.membership, - is_draining: cli.is_draining, - notes, - } - } -} - -fn parse_node_membership<'de, D>(d: D) -> Result -where - D: serde::Deserializer<'de>, -{ - struct CockroachNodeMembershipVisitor; - - impl<'de> de::Visitor<'de> for CockroachNodeMembershipVisitor { - type Value = NodeMembership; - - fn expecting( - &self, - formatter: &mut std::fmt::Formatter, - ) -> std::fmt::Result { - formatter.write_str("a Cockroach node membership string") - } - - fn visit_str(self, v: &str) -> Result - where - E: de::Error, - { - let membership = match v { - "active" => NodeMembership::Active, - "decommissioning" => NodeMembership::Decommissioning, - "decommissioned" => NodeMembership::Decommissioned, - _ => NodeMembership::Unknown { value: v.to_string() }, - }; - Ok(membership) - } - } - - d.deserialize_str(CockroachNodeMembershipVisitor) -} - -impl NodeDecommission { - pub fn parse_from_csv(data: &[u8]) -> Result { - // Reading the node decommission output is awkward because it isn't - // fully CSV. We expect a CSV header, then a row for each node being - // decommissioned, then (maybe) a blank line followed by a note that is - // just a string, not related to the initial CSV data. Even though the - // CLI supports decommissioning more than one node in one invocation, we - // only provide an API to decommission a single node, so we expect: - // - // 1. The CSV header line - // 2. The one row of CSV data - // 3. Trailing notes - // - // We'll collect the notes as a separate field and return them to our - // caller. - - // First we'll run the data through a csv::Reader; this will pull out - // the header row and the one row of data. - let mut reader = csv::Reader::from_reader(io::Cursor::new(data)); - let record: CliNodeDecommission = - reader.deserialize().next().ok_or_else(|| { - io::Error::other("fewer than two lines of output") - })??; - - // Get the position where the reader ended after that one row; we'll - // collect any remaining nonempty lines as `notes`. - let extra_data = &data[reader.position().byte() as usize..]; - let mut notes = Vec::new(); - for line in String::from_utf8_lossy(extra_data).lines() { - let line = line.trim(); - if !line.is_empty() { - notes.push(line.to_string()); - } - } - - Ok(Self::from((record, notes))) - } -} - #[cfg(test)] mod tests { + use std::net::SocketAddr; + use super::*; - use chrono::NaiveDate; + use cockroach_admin_types::NodeMembership; use nexus_test_utils::db::test_setup_database; use omicron_test_utils::dev; - use test_strategy::proptest; use url::Url; - #[test] - fn test_node_status_parse_single_line_from_csv() { - let input = br#"id,address,sql_address,build,started_at,updated_at,locality,is_available,is_live -1,[::1]:42021,[::1]:42021,v22.1.9,2024-05-21 15:19:50.523796,2024-05-21 16:31:28.050069,,true,true"#; - let expected = NodeStatus { - node_id: "1".to_string(), - address: "[::1]:42021".parse().unwrap(), - sql_address: "[::1]:42021".parse().unwrap(), - build: "v22.1.9".to_string(), - started_at: DateTime::from_naive_utc_and_offset( - NaiveDate::from_ymd_opt(2024, 5, 21) - .unwrap() - .and_hms_micro_opt(15, 19, 50, 523796) - .unwrap(), - Utc, - ), - updated_at: DateTime::from_naive_utc_and_offset( - NaiveDate::from_ymd_opt(2024, 5, 21) - .unwrap() - .and_hms_micro_opt(16, 31, 28, 50069) - .unwrap(), - Utc, - ), - locality: String::new(), - is_available: true, - is_live: true, - }; - - let statuses = NodeStatus::parse_from_csv(input).expect("parsed input"); - assert_eq!(statuses, vec![expected]); - } - - #[test] - fn test_node_status_parse_multiple_lines_from_csv() { - let input = br#"id,address,sql_address,build,started_at,updated_at,locality,is_available,is_live -1,[fd00:1122:3344:109::3]:32221,[fd00:1122:3344:109::3]:32221,v22.1.9-dirty,2024-05-18 19:18:00.597145,2024-05-21 15:22:34.290434,,true,true -2,[fd00:1122:3344:105::3]:32221,[fd00:1122:3344:105::3]:32221,v22.1.9-dirty,2024-05-18 19:17:01.796714,2024-05-21 15:22:34.901268,,true,true -3,[fd00:1122:3344:10b::3]:32221,[fd00:1122:3344:10b::3]:32221,v22.1.9-dirty,2024-05-18 19:18:52.37564,2024-05-21 15:22:36.341146,,true,true -4,[fd00:1122:3344:107::3]:32221,[fd00:1122:3344:107::3]:32221,v22.1.9-dirty,2024-05-18 19:16:22.788276,2024-05-21 15:22:34.897047,,true,true -5,[fd00:1122:3344:108::3]:32221,[fd00:1122:3344:108::3]:32221,v22.1.9-dirty,2024-05-18 19:18:09.196634,2024-05-21 15:22:35.168738,,true,true"#; - let expected = vec![ - NodeStatus { - node_id: "1".to_string(), - address: "[fd00:1122:3344:109::3]:32221".parse().unwrap(), - sql_address: "[fd00:1122:3344:109::3]:32221".parse().unwrap(), - build: "v22.1.9-dirty".to_string(), - started_at: DateTime::from_naive_utc_and_offset( - NaiveDate::from_ymd_opt(2024, 5, 18) - .unwrap() - .and_hms_micro_opt(19, 18, 0, 597145) - .unwrap(), - Utc, - ), - updated_at: DateTime::from_naive_utc_and_offset( - NaiveDate::from_ymd_opt(2024, 5, 21) - .unwrap() - .and_hms_micro_opt(15, 22, 34, 290434) - .unwrap(), - Utc, - ), - locality: String::new(), - is_available: true, - is_live: true, - }, - NodeStatus { - node_id: "2".to_string(), - address: "[fd00:1122:3344:105::3]:32221".parse().unwrap(), - sql_address: "[fd00:1122:3344:105::3]:32221".parse().unwrap(), - build: "v22.1.9-dirty".to_string(), - started_at: DateTime::from_naive_utc_and_offset( - NaiveDate::from_ymd_opt(2024, 5, 18) - .unwrap() - .and_hms_micro_opt(19, 17, 1, 796714) - .unwrap(), - Utc, - ), - updated_at: DateTime::from_naive_utc_and_offset( - NaiveDate::from_ymd_opt(2024, 5, 21) - .unwrap() - .and_hms_micro_opt(15, 22, 34, 901268) - .unwrap(), - Utc, - ), - locality: String::new(), - is_available: true, - is_live: true, - }, - NodeStatus { - node_id: "3".to_string(), - address: "[fd00:1122:3344:10b::3]:32221".parse().unwrap(), - sql_address: "[fd00:1122:3344:10b::3]:32221".parse().unwrap(), - build: "v22.1.9-dirty".to_string(), - started_at: DateTime::from_naive_utc_and_offset( - NaiveDate::from_ymd_opt(2024, 5, 18) - .unwrap() - .and_hms_micro_opt(19, 18, 52, 375640) - .unwrap(), - Utc, - ), - updated_at: DateTime::from_naive_utc_and_offset( - NaiveDate::from_ymd_opt(2024, 5, 21) - .unwrap() - .and_hms_micro_opt(15, 22, 36, 341146) - .unwrap(), - Utc, - ), - locality: String::new(), - is_available: true, - is_live: true, - }, - NodeStatus { - node_id: "4".to_string(), - address: "[fd00:1122:3344:107::3]:32221".parse().unwrap(), - sql_address: "[fd00:1122:3344:107::3]:32221".parse().unwrap(), - build: "v22.1.9-dirty".to_string(), - started_at: DateTime::from_naive_utc_and_offset( - NaiveDate::from_ymd_opt(2024, 5, 18) - .unwrap() - .and_hms_micro_opt(19, 16, 22, 788276) - .unwrap(), - Utc, - ), - updated_at: DateTime::from_naive_utc_and_offset( - NaiveDate::from_ymd_opt(2024, 5, 21) - .unwrap() - .and_hms_micro_opt(15, 22, 34, 897047) - .unwrap(), - Utc, - ), - locality: String::new(), - is_available: true, - is_live: true, - }, - NodeStatus { - node_id: "5".to_string(), - address: "[fd00:1122:3344:108::3]:32221".parse().unwrap(), - sql_address: "[fd00:1122:3344:108::3]:32221".parse().unwrap(), - build: "v22.1.9-dirty".to_string(), - started_at: DateTime::from_naive_utc_and_offset( - NaiveDate::from_ymd_opt(2024, 5, 18) - .unwrap() - .and_hms_micro_opt(19, 18, 9, 196634) - .unwrap(), - Utc, - ), - updated_at: DateTime::from_naive_utc_and_offset( - NaiveDate::from_ymd_opt(2024, 5, 21) - .unwrap() - .and_hms_micro_opt(15, 22, 35, 168738) - .unwrap(), - Utc, - ), - locality: String::new(), - is_available: true, - is_live: true, - }, - ]; - - let statuses = NodeStatus::parse_from_csv(input).expect("parsed input"); - assert_eq!(statuses.len(), expected.len()); - for (status, expected) in statuses.iter().zip(&expected) { - assert_eq!(status, expected); - } - } - - #[test] - fn test_node_decommission_parse_with_no_trailing_notes() { - let input = - br#"id,is_live,replicas,is_decommissioning,membership,is_draining -6,true,24,true,decommissioning,false"#; - let expected = NodeDecommission { - node_id: "6".to_string(), - is_live: true, - replicas: 24, - is_decommissioning: true, - membership: NodeMembership::Decommissioning, - is_draining: false, - notes: vec![], - }; - - let statuses = - NodeDecommission::parse_from_csv(input).expect("parsed input"); - assert_eq!(statuses, expected); - } - - #[test] - fn test_node_decommission_parse_with_trailing_notes() { - let input = - br#"id,is_live,replicas,is_decommissioning,membership,is_draining -6,false,0,true,decommissioned,false - -No more data reported on target nodes. Please verify cluster health before removing the nodes. -"#; - let expected = NodeDecommission { - node_id: "6".to_string(), - is_live: false, - replicas: 0, - is_decommissioning: true, - membership: NodeMembership::Decommissioned, - is_draining: false, - notes: vec!["No more data reported on target nodes. \ - Please verify cluster health before removing the nodes." - .to_string()], - }; - - let statuses = - NodeDecommission::parse_from_csv(input).expect("parsed input"); - assert_eq!(statuses, expected); - } - - #[test] - fn test_node_decommission_parse_with_unexpected_membership_value() { - let input = - br#"id,is_live,replicas,is_decommissioning,membership,is_draining -6,false,0,true,foobar,false"#; - let expected = NodeDecommission { - node_id: "6".to_string(), - is_live: false, - replicas: 0, - is_decommissioning: true, - membership: NodeMembership::Unknown { value: "foobar".to_string() }, - is_draining: false, - notes: vec![], - }; - - let statuses = - NodeDecommission::parse_from_csv(input).expect("parsed input"); - assert_eq!(statuses, expected); - } - // Ensure that if `cockroach node status` changes in a future CRDB version // bump, we have a test that will fail to force us to check whether our // current parsing is still valid. @@ -721,14 +268,4 @@ No more data reported on target nodes. Please verify cluster health before remov db.cleanup().await.unwrap(); logctx.cleanup_successful(); } - - #[proptest] - fn node_status_parse_doesnt_panic_on_arbitrary_input(input: Vec) { - _ = NodeStatus::parse_from_csv(&input); - } - - #[proptest] - fn node_decommission_parse_doesnt_panic_on_arbitrary_input(input: Vec) { - _ = NodeDecommission::parse_from_csv(&input); - } } diff --git a/cockroach-admin/src/http_entrypoints.rs b/cockroach-admin/src/http_entrypoints.rs index 45957df0df..77eaf7e02b 100644 --- a/cockroach-admin/src/http_entrypoints.rs +++ b/cockroach-admin/src/http_entrypoints.rs @@ -2,112 +2,53 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -use crate::cockroach_cli::NodeDecommission; -use crate::cockroach_cli::NodeStatus; use crate::context::ServerContext; -use dropshot::endpoint; -use dropshot::ApiDescriptionRegisterError; +use cockroach_admin_api::*; +use cockroach_admin_types::NodeDecommission; use dropshot::HttpError; use dropshot::HttpResponseOk; use dropshot::RequestContext; use dropshot::TypedBody; -use omicron_uuid_kinds::OmicronZoneUuid; -use schemars::JsonSchema; -use serde::Deserialize; -use serde::Serialize; use std::sync::Arc; type CrdbApiDescription = dropshot::ApiDescription>; pub fn api() -> CrdbApiDescription { - fn register_endpoints( - api: &mut CrdbApiDescription, - ) -> Result<(), ApiDescriptionRegisterError> { - api.register(local_node_id)?; - api.register(node_status)?; - api.register(node_decommission)?; - Ok(()) - } - - let mut api = CrdbApiDescription::new(); - if let Err(err) = register_endpoints(&mut api) { - panic!("failed to register entrypoints: {}", err); - } - api + cockroach_admin_api_mod::api_description::() + .expect("registered entrypoints") } -#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, JsonSchema)] -#[serde(rename_all = "snake_case")] -pub struct ClusterNodeStatus { - pub all_nodes: Vec, -} - -/// Get the status of all nodes in the CRDB cluster -#[endpoint { - method = GET, - path = "/node/status", -}] -async fn node_status( - rqctx: RequestContext>, -) -> Result, HttpError> { - let ctx = rqctx.context(); - let all_nodes = - ctx.cockroach_cli().node_status().await.map_err(HttpError::from)?; - Ok(HttpResponseOk(ClusterNodeStatus { all_nodes })) -} +enum CockroachAdminImpl {} -/// CockroachDB Node ID -#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, JsonSchema)] -#[serde(rename_all = "snake_case")] -pub struct LocalNodeId { - /// The ID of this Omicron zone. - /// - /// This is included to ensure correctness even if a socket address on a - /// sled is reused for a different zone; if our caller is trying to - /// determine the node ID for a particular Omicron CockroachDB zone, they'll - /// contact us by socket address. We include our zone ID in the response for - /// their confirmation that we are the zone they intended to contact. - pub zone_id: OmicronZoneUuid, - // CockroachDB node IDs are integers, in practice, but our use of them is as - // input and output to the `cockroach` CLI. We use a string which is a bit - // more natural (no need to parse CLI output or stringify an ID to send it - // as input) and leaves open the door for the format to change in the - // future. - pub node_id: String, -} +impl CockroachAdminApi for CockroachAdminImpl { + type Context = Arc; -/// Get the CockroachDB node ID of the local cockroach instance. -#[endpoint { - method = GET, - path = "/node/id", -}] -async fn local_node_id( - rqctx: RequestContext>, -) -> Result, HttpError> { - let ctx = rqctx.context(); - let node_id = ctx.node_id().await?.to_string(); - let zone_id = ctx.zone_id(); - Ok(HttpResponseOk(LocalNodeId { zone_id, node_id })) -} + async fn node_status( + rqctx: RequestContext, + ) -> Result, HttpError> { + let ctx = rqctx.context(); + let all_nodes = + ctx.cockroach_cli().node_status().await.map_err(HttpError::from)?; + Ok(HttpResponseOk(ClusterNodeStatus { all_nodes })) + } -#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, JsonSchema)] -#[serde(rename_all = "snake_case")] -pub struct NodeId { - pub node_id: String, -} + async fn local_node_id( + rqctx: RequestContext, + ) -> Result, HttpError> { + let ctx = rqctx.context(); + let node_id = ctx.node_id().await?.to_string(); + let zone_id = ctx.zone_id(); + Ok(HttpResponseOk(LocalNodeId { zone_id, node_id })) + } -/// Decommission a node from the CRDB cluster -#[endpoint { - method = POST, - path = "/node/decommission", -}] -async fn node_decommission( - rqctx: RequestContext>, - body: TypedBody, -) -> Result, HttpError> { - let ctx = rqctx.context(); - let NodeId { node_id } = body.into_inner(); - let decommission_status = - ctx.cockroach_cli().node_decommission(&node_id).await?; - Ok(HttpResponseOk(decommission_status)) + async fn node_decommission( + rqctx: RequestContext, + body: TypedBody, + ) -> Result, HttpError> { + let ctx = rqctx.context(); + let NodeId { node_id } = body.into_inner(); + let decommission_status = + ctx.cockroach_cli().node_decommission(&node_id).await?; + Ok(HttpResponseOk(decommission_status)) + } } diff --git a/cockroach-admin/src/lib.rs b/cockroach-admin/src/lib.rs index f4a32cb6c0..1057344297 100644 --- a/cockroach-admin/src/lib.rs +++ b/cockroach-admin/src/lib.rs @@ -23,21 +23,6 @@ pub use cockroach_cli::CockroachCli; pub use cockroach_cli::CockroachCliError; pub use config::Config; -/// Run the OpenAPI generator for the API; this emits the OpenAPI spec to -/// stdout. -pub fn run_openapi() -> Result<(), String> { - http_entrypoints::api() - .openapi("Oxide CockroachDb Cluster Admin API", "0.0.1") - .description( - "API for interacting with the Oxide \ - control plane's CockroachDb cluster", - ) - .contact_url("https://oxide.computer") - .contact_email("api@oxide.computer") - .write(&mut std::io::stdout()) - .map_err(|e| e.to_string()) -} - #[derive(Debug, thiserror::Error, SlogInlineError)] pub enum StartError { #[error("failed to initialize logger")] diff --git a/dev-tools/openapi-manager/Cargo.toml b/dev-tools/openapi-manager/Cargo.toml index aa0cfacfd5..b7e74f6515 100644 --- a/dev-tools/openapi-manager/Cargo.toml +++ b/dev-tools/openapi-manager/Cargo.toml @@ -11,6 +11,7 @@ workspace = true anyhow.workspace = true atomicwrites.workspace = true camino.workspace = true +cockroach-admin-api.workspace = true clap.workspace = true dns-server-api.workspace = true dropshot.workspace = true diff --git a/dev-tools/openapi-manager/src/spec.rs b/dev-tools/openapi-manager/src/spec.rs index 83f0f4dd57..6a3431b1f5 100644 --- a/dev-tools/openapi-manager/src/spec.rs +++ b/dev-tools/openapi-manager/src/spec.rs @@ -14,6 +14,17 @@ use openapiv3::OpenAPI; /// All APIs managed by openapi-manager. pub fn all_apis() -> Vec { vec![ + ApiSpec { + title: "CockroachDB Cluster Admin API", + version: "0.0.1", + description: "API for interacting with the Oxide control plane's \ + CockroachDB cluster", + boundary: ApiBoundary::Internal, + api_description: + cockroach_admin_api::cockroach_admin_api_mod::stub_api_description, + filename: "cockroach-admin.json", + extra_validation: None, + }, ApiSpec { title: "Internal DNS", version: "0.0.1", diff --git a/openapi/cockroach-admin.json b/openapi/cockroach-admin.json index 3b03475ec5..76c0bea09b 100644 --- a/openapi/cockroach-admin.json +++ b/openapi/cockroach-admin.json @@ -1,8 +1,8 @@ { "openapi": "3.0.3", "info": { - "title": "Oxide CockroachDb Cluster Admin API", - "description": "API for interacting with the Oxide control plane's CockroachDb cluster", + "title": "CockroachDB Cluster Admin API", + "description": "API for interacting with the Oxide control plane's CockroachDB cluster", "contact": { "url": "https://oxide.computer", "email": "api@oxide.computer" @@ -12,7 +12,7 @@ "paths": { "/node/decommission": { "post": { - "summary": "Decommission a node from the CRDB cluster", + "summary": "Decommission a node from the CRDB cluster.", "operationId": "node_decommission", "requestBody": { "content": { @@ -70,7 +70,7 @@ }, "/node/status": { "get": { - "summary": "Get the status of all nodes in the CRDB cluster", + "summary": "Get the status of all nodes in the CRDB cluster.", "operationId": "node_status", "responses": { "200": {