diff --git a/.github/workflows/hakari.yml b/.github/workflows/hakari.yml index c0c1799892..7cd3b69c58 100644 --- a/.github/workflows/hakari.yml +++ b/.github/workflows/hakari.yml @@ -24,7 +24,7 @@ jobs: with: toolchain: stable - name: Install cargo-hakari - uses: taiki-e/install-action@2335425120e645291d84cec8194c63983a0c8ee5 # v2 + uses: taiki-e/install-action@ef2fb5af7d19da8885ee368c6bde2ae6d0758e3d # v2 with: tool: cargo-hakari - name: Check workspace-hack Cargo.toml is up-to-date diff --git a/.gitignore b/.gitignore index 39a8f36144..6e4e0eb42a 100644 --- a/.gitignore +++ b/.gitignore @@ -17,3 +17,4 @@ tags .falcon/* .img/* connectivity-report.json +*.local diff --git a/Cargo.lock b/Cargo.lock index 485c965c2e..63d1566ca1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -300,9 +300,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.80" +version = "0.1.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" +checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" dependencies = [ "proc-macro2", "quote", @@ -1614,7 +1614,7 @@ dependencies = [ [[package]] name = "ddm-admin-client" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/maghemite?rev=6e0a232fd0b443c19f61f94bf02b7695505aa8e3#6e0a232fd0b443c19f61f94bf02b7695505aa8e3" +source = "git+https://github.com/oxidecomputer/maghemite?rev=d1686c86f92ead77e07ddc6024837dee4a401d6d#d1686c86f92ead77e07ddc6024837dee4a401d6d" dependencies = [ "oxnet", "percent-encoding", @@ -2028,7 +2028,7 @@ dependencies = [ [[package]] name = "dropshot" version = "0.10.2-dev" -source = "git+https://github.com/oxidecomputer/dropshot?branch=main#6a3f84ca5fd8d0c5c010cfe837efbe6b5d117d9d" +source = "git+https://github.com/oxidecomputer/dropshot?branch=main#9fef3961c0b89aa8ab8e186dc0c89f8f4f811eea" dependencies = [ "async-stream", "async-trait", @@ -2074,7 +2074,7 @@ dependencies = [ [[package]] name = "dropshot_endpoint" version = "0.10.2-dev" -source = "git+https://github.com/oxidecomputer/dropshot?branch=main#6a3f84ca5fd8d0c5c010cfe837efbe6b5d117d9d" +source = "git+https://github.com/oxidecomputer/dropshot?branch=main#9fef3961c0b89aa8ab8e186dc0c89f8f4f811eea" dependencies = [ "heck 0.5.0", "proc-macro2", @@ -4319,7 +4319,7 @@ dependencies = [ [[package]] name = "mg-admin-client" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/maghemite?rev=6e0a232fd0b443c19f61f94bf02b7695505aa8e3#6e0a232fd0b443c19f61f94bf02b7695505aa8e3" +source = "git+https://github.com/oxidecomputer/maghemite?rev=d1686c86f92ead77e07ddc6024837dee4a401d6d#d1686c86f92ead77e07ddc6024837dee4a401d6d" dependencies = [ "anyhow", "chrono", @@ -5662,6 +5662,7 @@ dependencies = [ "omicron-uuid-kinds", "omicron-workspace-hack", "oximeter-client", + "oximeter-db", "pq-sys", "ratatui", "reedline", @@ -5676,6 +5677,7 @@ dependencies = [ "textwrap", "tokio", "unicode-width", + "url", "uuid", ] @@ -8533,9 +8535,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.203" +version = "1.0.204" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094" +checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12" dependencies = [ "serde_derive", ] @@ -8580,9 +8582,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.203" +version = "1.0.204" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" +checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index a0bea3327c..31d874f4de 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -235,7 +235,7 @@ approx = "0.5.1" assert_matches = "1.5.0" assert_cmd = "2.0.14" async-bb8-diesel = { git = "https://github.com/oxidecomputer/async-bb8-diesel", rev = "ed7ab5ef0513ba303d33efd41d3e9e381169d59b" } -async-trait = "0.1.80" +async-trait = "0.1.81" atomicwrites = "0.4.3" authz-macros = { path = "nexus/authz-macros" } backoff = { version = "0.4.0", features = [ "tokio" ] } @@ -338,8 +338,8 @@ macaddr = { version = "1.0.1", features = ["serde_std"] } maplit = "1.0.2" mockall = "0.12" newtype_derive = "0.1.6" -mg-admin-client = { git = "https://github.com/oxidecomputer/maghemite", rev = "6e0a232fd0b443c19f61f94bf02b7695505aa8e3" } -ddm-admin-client = { git = "https://github.com/oxidecomputer/maghemite", rev = "6e0a232fd0b443c19f61f94bf02b7695505aa8e3" } +mg-admin-client = { git = "https://github.com/oxidecomputer/maghemite", rev = "d1686c86f92ead77e07ddc6024837dee4a401d6d" } +ddm-admin-client = { git = "https://github.com/oxidecomputer/maghemite", rev = "d1686c86f92ead77e07ddc6024837dee4a401d6d" } multimap = "0.10.0" nexus-auth = { path = "nexus/auth" } nexus-client = { path = "clients/nexus-client" } @@ -390,7 +390,7 @@ oso = "0.27" owo-colors = "4.0.0" oximeter = { path = "oximeter/oximeter" } oximeter-client = { path = "clients/oximeter-client" } -oximeter-db = { path = "oximeter/db/" } +oximeter-db = { path = "oximeter/db/", default-features = false } oximeter-collector = { path = "oximeter/collector" } oximeter-impl = { path = "oximeter/impl" } oximeter-instruments = { path = "oximeter/instruments" } @@ -439,7 +439,7 @@ rustls = "0.22.2" rustls-pemfile = "2.1.2" rustyline = "14.0.0" samael = { version = "0.0.15", features = ["xmlsec"] } -schemars = "0.8.16" +schemars = "0.8.21" secrecy = "0.8.0" semver = { version = "1.0.23", features = ["std", "serde"] } serde = { version = "1.0", default-features = false, features = [ "derive", "rc" ] } diff --git a/dev-tools/omdb/Cargo.toml b/dev-tools/omdb/Cargo.toml index 9cdf03093c..e5d898509c 100644 --- a/dev-tools/omdb/Cargo.toml +++ b/dev-tools/omdb/Cargo.toml @@ -37,6 +37,7 @@ nexus-types.workspace = true omicron-common.workspace = true omicron-uuid-kinds.workspace = true oximeter-client.workspace = true +oximeter-db = { workspace = true, default-features = false, features = [ "oxql" ] } # See omicron-rpaths for more about the "pq-sys" dependency. pq-sys = "*" ratatui.workspace = true @@ -51,6 +52,7 @@ tabled.workspace = true textwrap.workspace = true tokio = { workspace = true, features = [ "full" ] } unicode-width.workspace = true +url.workspace = true uuid.workspace = true ipnetwork.workspace = true omicron-workspace-hack.workspace = true diff --git a/dev-tools/omdb/src/bin/omdb/main.rs b/dev-tools/omdb/src/bin/omdb/main.rs index 7469e2ba54..8fc48f5028 100644 --- a/dev-tools/omdb/src/bin/omdb/main.rs +++ b/dev-tools/omdb/src/bin/omdb/main.rs @@ -50,6 +50,7 @@ mod helpers; mod mgs; mod nexus; mod oximeter; +mod oxql; mod sled_agent; #[tokio::main] @@ -66,7 +67,8 @@ async fn main() -> Result<(), anyhow::Error> { OmdbCommands::Db(db) => db.run_cmd(&args, &log).await, OmdbCommands::Mgs(mgs) => mgs.run_cmd(&args, &log).await, OmdbCommands::Nexus(nexus) => nexus.run_cmd(&args, &log).await, - OmdbCommands::Oximeter(oximeter) => oximeter.run_cmd(&log).await, + OmdbCommands::Oximeter(oximeter) => oximeter.run_cmd(&args, &log).await, + OmdbCommands::Oxql(oxql) => oxql.run_cmd(&args, &log).await, OmdbCommands::SledAgent(sled) => sled.run_cmd(&args, &log).await, OmdbCommands::CrucibleAgent(crucible) => crucible.run_cmd(&args).await, } @@ -269,6 +271,8 @@ enum OmdbCommands { Nexus(nexus::NexusArgs), /// Query oximeter collector state Oximeter(oximeter::OximeterArgs), + /// Enter the Oximeter Query Language shell for interactive querying. + Oxql(oxql::OxqlArgs), /// Debug a specific Sled SledAgent(sled_agent::SledAgentArgs), } diff --git a/dev-tools/omdb/src/bin/omdb/oximeter.rs b/dev-tools/omdb/src/bin/omdb/oximeter.rs index a6dc2ce011..c068110b4c 100644 --- a/dev-tools/omdb/src/bin/omdb/oximeter.rs +++ b/dev-tools/omdb/src/bin/omdb/oximeter.rs @@ -5,6 +5,7 @@ //! omdb commands that query oximeter use crate::helpers::CONNECTION_OPTIONS_HEADING; +use crate::Omdb; use anyhow::Context; use clap::Args; use clap::Subcommand; @@ -18,18 +19,17 @@ use tabled::Table; use tabled::Tabled; use uuid::Uuid; +/// Arguments for the oximeter subcommand. #[derive(Debug, Args)] pub struct OximeterArgs { /// URL of the oximeter collector to query #[arg( long, env = "OMDB_OXIMETER_URL", - // This can't be global = true (i.e. passed in later in the - // command-line) because global options can't be required. If this - // changes to being optional, we should set global = true. + global = true, help_heading = CONNECTION_OPTIONS_HEADING, )] - oximeter_url: String, + oximeter_url: Option, #[command(subcommand)] command: OximeterCommands, @@ -38,20 +38,47 @@ pub struct OximeterArgs { /// Subcommands that query oximeter collector state #[derive(Debug, Subcommand)] enum OximeterCommands { - /// List the producers the collector is assigned to poll + /// List the producers the collector is assigned to poll. ListProducers, } impl OximeterArgs { - fn client(&self, log: &Logger) -> Client { - Client::new( - &self.oximeter_url, + async fn client( + &self, + omdb: &Omdb, + log: &Logger, + ) -> Result { + let oximeter_url = match &self.oximeter_url { + Some(cli_or_env_url) => cli_or_env_url.clone(), + None => { + eprintln!( + "note: Oximeter URL not specified. Will pick one from DNS." + ); + let addr = omdb + .dns_lookup_one( + log.clone(), + internal_dns::ServiceName::Oximeter, + ) + .await?; + format!("http://{}", addr) + } + }; + eprintln!("note: using Oximeter URL {}", &oximeter_url); + + let client = Client::new( + &oximeter_url, log.new(slog::o!("component" => "oximeter-client")), - ) + ); + Ok(client) } - pub async fn run_cmd(&self, log: &Logger) -> anyhow::Result<()> { - let client = self.client(log); + /// Run the command. + pub async fn run_cmd( + &self, + omdb: &Omdb, + log: &Logger, + ) -> anyhow::Result<()> { + let client = self.client(omdb, log).await?; match self.command { OximeterCommands::ListProducers => { self.list_producers(client).await diff --git a/dev-tools/omdb/src/bin/omdb/oxql.rs b/dev-tools/omdb/src/bin/omdb/oxql.rs new file mode 100644 index 0000000000..89ddae9cf2 --- /dev/null +++ b/dev-tools/omdb/src/bin/omdb/oxql.rs @@ -0,0 +1,97 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! omdb OxQL shell for interactive queries on metrics/timeseries. + +// Copyright 2024 Oxide Computer + +use crate::helpers::CONNECTION_OPTIONS_HEADING; +use crate::Omdb; +use anyhow::Context; +use clap::Args; +use oximeter_db::{ + self, + shells::oxql::{self, ShellOptions}, +}; +use slog::Logger; +use std::net::SocketAddr; +use url::Url; + +/// Command-line arguments for the OxQL shell. +#[derive(Debug, Args)] +pub struct OxqlArgs { + /// URL of the ClickHouse server to connect to. + #[arg( + long, + env = "OMDB_CLICKHOUSE_URL", + global = true, + help_heading = CONNECTION_OPTIONS_HEADING, + )] + clickhouse_url: Option, + + /// Print summaries of each SQL query run against the database. + #[clap(long = "summaries")] + print_summaries: bool, + + /// Print the total elapsed query duration. + #[clap(long = "elapsed")] + print_elapsed: bool, +} + +impl OxqlArgs { + /// Run the OxQL shell via the `omdb oxql` subcommand. + pub async fn run_cmd( + &self, + omdb: &Omdb, + log: &Logger, + ) -> anyhow::Result<()> { + let addr = self.addr(omdb, log).await?; + + let opts = ShellOptions { + print_summaries: self.print_summaries, + print_elapsed: self.print_elapsed, + }; + + oxql::shell( + addr.ip(), + addr.port(), + log.new(slog::o!("component" => "clickhouse-client")), + opts, + ) + .await + } + + /// Resolve the ClickHouse URL to a socket address. + async fn addr( + &self, + omdb: &Omdb, + log: &Logger, + ) -> anyhow::Result { + match &self.clickhouse_url { + Some(cli_or_env_url) => Url::parse(&cli_or_env_url) + .context( + "failed parsing URL from command-line or environment variable", + )? + .socket_addrs(|| None) + .context("failed resolving socket addresses")? + .into_iter() + .next() + .context("failed resolving socket addresses"), + None => { + eprintln!( + "note: ClickHouse URL not specified. Will pick one from DNS." + ); + + Ok(SocketAddr::V6( + omdb.dns_lookup_one( + log.clone(), + internal_dns::ServiceName::Clickhouse, + ) + .await + .context("failed looking up ClickHouse internal DNS entry")?, + )) + } + } + } +} diff --git a/dev-tools/omdb/tests/env.out b/dev-tools/omdb/tests/env.out index 348ff5e9ac..66a48ab394 100644 --- a/dev-tools/omdb/tests/env.out +++ b/dev-tools/omdb/tests/env.out @@ -433,3 +433,28 @@ note: using database URL postgresql://root@[::1]:REDACTED_PORT/omicron?sslmode=d note: database schema version matches expected () note: listing all commissioned sleds (use -F to filter, e.g. -F in-service) ============================================= +EXECUTING COMMAND: omdb ["oximeter", "--oximeter-url", "junk", "list-producers"] +termination: Exited(1) +--------------------------------------------- +stdout: +--------------------------------------------- +stderr: +note: using Oximeter URL junk +Error: failed to fetch collector info + +Caused by: + 0: Communication Error: builder error: relative URL without a base + 1: builder error: relative URL without a base + 2: relative URL without a base +============================================= +EXECUTING COMMAND: omdb ["oxql", "--clickhouse-url", "junk"] +termination: Exited(1) +--------------------------------------------- +stdout: +--------------------------------------------- +stderr: +Error: failed parsing URL from command-line or environment variable + +Caused by: + relative URL without a base +============================================= diff --git a/dev-tools/omdb/tests/successes.out b/dev-tools/omdb/tests/successes.out index b5147b66f9..a65098d7aa 100644 --- a/dev-tools/omdb/tests/successes.out +++ b/dev-tools/omdb/tests/successes.out @@ -405,14 +405,14 @@ task: "dns_propagation_external" task: "nat_v4_garbage_collector" - configured period: every 30s + configured period: every s currently executing: no last completed activation: , triggered by a periodic timer firing started at (s ago) and ran for ms last completion reported error: failed to resolve addresses for Dendrite services: no record found for Query { name: Name("_dendrite._tcp.control-plane.oxide.internal."), query_type: SRV, query_class: IN } task: "blueprint_loader" - configured period: every 1m 40s + configured period: every 1m s currently executing: no last completed activation: , triggered by a periodic timer firing started at (s ago) and ran for ms @@ -436,7 +436,7 @@ task: "abandoned_vmm_reaper" sled resource reservations deleted: 0 task: "bfd_manager" - configured period: every 30s + configured period: every s currently executing: no last completed activation: , triggered by a periodic timer firing started at (s ago) and ran for ms @@ -467,7 +467,7 @@ task: "external_endpoints" TLS certificates: 0 task: "instance_watcher" - configured period: every 30s + configured period: every s currently executing: no last completed activation: , triggered by a periodic timer firing started at (s ago) and ran for ms @@ -503,7 +503,7 @@ task: "metrics_producer_gc" warning: unknown background task: "metrics_producer_gc" (don't know how to interpret details: Object {"expiration": String(""), "pruned": Array []}) task: "phantom_disks" - configured period: every 30s + configured period: every s currently executing: no last completed activation: , triggered by a periodic timer firing started at (s ago) and ran for ms @@ -511,14 +511,14 @@ task: "phantom_disks" number of phantom disk delete errors: 0 task: "physical_disk_adoption" - configured period: every 30s + configured period: every s currently executing: no last completed activation: , triggered by a dependent task completing started at (s ago) and ran for ms last completion reported error: task disabled task: "region_replacement" - configured period: every 30s + configured period: every s currently executing: no last completed activation: , triggered by a periodic timer firing started at (s ago) and ran for ms @@ -526,7 +526,7 @@ task: "region_replacement" number of region replacement start errors: 0 task: "region_replacement_driver" - configured period: every 30s + configured period: every s currently executing: no last completed activation: , triggered by a periodic timer firing started at (s ago) and ran for ms @@ -541,28 +541,28 @@ task: "service_firewall_rule_propagation" started at (s ago) and ran for ms task: "service_zone_nat_tracker" - configured period: every 30s + configured period: every s currently executing: no last completed activation: , triggered by a periodic timer firing started at (s ago) and ran for ms last completion reported error: inventory collection is None task: "switch_port_config_manager" - configured period: every 30s + configured period: every s currently executing: no last completed activation: , triggered by a periodic timer firing started at (s ago) and ran for ms warning: unknown background task: "switch_port_config_manager" (don't know how to interpret details: Object {}) task: "v2p_manager" - configured period: every 30s + configured period: every s currently executing: no last completed activation: , triggered by a periodic timer firing started at (s ago) and ran for ms warning: unknown background task: "v2p_manager" (don't know how to interpret details: Object {}) task: "vpc_route_manager" - configured period: every 30s + configured period: every s currently executing: no last completed activation: , triggered by a periodic timer firing started at (s ago) and ran for ms diff --git a/dev-tools/omdb/tests/test_all_output.rs b/dev-tools/omdb/tests/test_all_output.rs index 19be33631d..6a959d726a 100644 --- a/dev-tools/omdb/tests/test_all_output.rs +++ b/dev-tools/omdb/tests/test_all_output.rs @@ -8,6 +8,7 @@ //! sure you're only breaking what you intend. use expectorate::assert_contents; +use nexus_test_utils::{OXIMETER_UUID, PRODUCER_UUID}; use nexus_test_utils_macros::nexus_test; use nexus_types::deployment::SledFilter; use nexus_types::deployment::UnstableReconfiguratorState; @@ -17,6 +18,7 @@ use omicron_test_utils::dev::test_cmds::run_command; use omicron_test_utils::dev::test_cmds::ExtraRedactions; use slog_error_chain::InlineErrorChain; use std::fmt::Write; +use std::net::IpAddr; use std::path::Path; use subprocess::Exec; @@ -26,6 +28,32 @@ const CMD_OMDB: &str = env!("CARGO_BIN_EXE_omdb"); type ControlPlaneTestContext = nexus_test_utils::ControlPlaneTestContext; +/// The `oximeter` list-producers command output is not easy to compare as a +/// string directly because the timing of registrations with both our test +/// producer and the one nexus registers. But, let's find our test producer +/// in the list. +fn assert_oximeter_list_producers_output( + output: &str, + ox_url: &str, + test_producer: IpAddr, +) { + assert!( + output.contains(format!("Collector ID: {}", OXIMETER_UUID).as_str()) + ); + assert!(output.contains(ox_url)); + + let found = output.lines().any(|line| { + line.contains(PRODUCER_UUID) + && line.contains(&test_producer.to_string()) + }); + + assert!( + found, + "test producer {} and producer UUID {} not found on line together", + test_producer, PRODUCER_UUID + ); +} + #[tokio::test] async fn test_omdb_usage_errors() { let cmd_path = path_to_executable(CMD_OMDB); @@ -57,6 +85,10 @@ async fn test_omdb_usage_errors() { &["sled-agent"], &["sled-agent", "zones"], &["sled-agent", "zpools"], + &["oximeter", "--help"], + &["oxql", "--help"], + // Mispelled argument + &["oxql", "--summarizes"], ]; for args in invocations { @@ -74,10 +106,15 @@ async fn test_omdb_success_cases(cptestctx: &ControlPlaneTestContext) { ) .await; let cmd_path = path_to_executable(CMD_OMDB); + let postgres_url = cptestctx.database.listen_url(); let nexus_internal_url = format!("http://{}/", cptestctx.internal_client.bind_address); let mgs_url = format!("http://{}/", gwtestctx.client.bind_address); + let ox_url = format!("http://{}/", cptestctx.oximeter.server_address()); + let ox_test_producer = cptestctx.producer.address().ip(); + let ch_url = format!("http://{}/", cptestctx.clickhouse.address); + let tmpdir = camino_tempfile::tempdir() .expect("failed to create temporary directory"); let tmppath = tmpdir.path().join("reconfigurator-save.out"); @@ -124,18 +161,24 @@ async fn test_omdb_success_cases(cptestctx: &ControlPlaneTestContext) { let p = postgres_url.to_string(); let u = nexus_internal_url.clone(); let g = mgs_url.clone(); + let ox = ox_url.clone(); + let ch = ch_url.clone(); do_run_extra( &mut output, move |exec| { exec.env("OMDB_DB_URL", &p) .env("OMDB_NEXUS_URL", &u) .env("OMDB_MGS_URL", &g) + .env("OMDB_OXIMETER_URL", &ox) + .env("OMDB_CLICKHOUSE_URL", &ch) }, &cmd_path, args, - ExtraRedactions::new() - .variable_length("tmp_path", tmppath.as_str()) - .fixed_length("blueprint_id", &initial_blueprint_id), + Some( + ExtraRedactions::new() + .variable_length("tmp_path", tmppath.as_str()) + .fixed_length("blueprint_id", &initial_blueprint_id), + ), ) .await; } @@ -170,6 +213,23 @@ async fn test_omdb_success_cases(cptestctx: &ControlPlaneTestContext) { .is_some()); assert!(!parsed.collections.is_empty()); + let ox_invocation = &["oximeter", "list-producers"]; + let mut ox_output = String::new(); + let ox = ox_url.clone(); + + do_run_no_redactions( + &mut ox_output, + move |exec| exec.env("OMDB_OXIMETER_URL", &ox), + &cmd_path, + ox_invocation, + ) + .await; + assert_oximeter_list_producers_output( + &ox_output, + &ox_url, + ox_test_producer, + ); + gwtestctx.teardown().await; } @@ -188,6 +248,9 @@ async fn test_omdb_env_settings(cptestctx: &ControlPlaneTestContext) { let postgres_url = cptestctx.database.listen_url().to_string(); let nexus_internal_url = format!("http://{}", cptestctx.internal_client.bind_address); + let ox_url = format!("http://{}/", cptestctx.oximeter.server_address()); + let ox_test_producer = cptestctx.producer.address().ip(); + let ch_url = format!("http://{}/", cptestctx.clickhouse.address); let dns_sockaddr = cptestctx.internal_dns.dns_server.local_address(); let mut output = String::new(); @@ -263,7 +326,46 @@ async fn test_omdb_env_settings(cptestctx: &ControlPlaneTestContext) { let args = &["--dns-server", &dns_sockaddr.to_string(), "db", "sleds"]; do_run(&mut output, move |exec| exec, &cmd_path, args).await; + // Case: specified in multiple places (command-line argument wins) + let args = &["oximeter", "--oximeter-url", "junk", "list-producers"]; + let ox = ox_url.clone(); + do_run( + &mut output, + move |exec| exec.env("OMDB_OXIMETER_URL", &ox), + &cmd_path, + args, + ) + .await; + + // Case: specified in multiple places (command-line argument wins) + let args = &["oxql", "--clickhouse-url", "junk"]; + do_run( + &mut output, + move |exec| exec.env("OMDB_CLICKHOUSE_URL", &ch_url), + &cmd_path, + args, + ) + .await; + assert_contents("tests/env.out", &output); + + // Oximeter URL + // Case 1: specified on the command line. + // Case 2: is covered by the success tests above. + let ox_args1 = &["oximeter", "--oximeter-url", &ox_url, "list-producers"]; + let mut ox_output1 = String::new(); + do_run_no_redactions( + &mut ox_output1, + move |exec| exec, + &cmd_path, + ox_args1, + ) + .await; + assert_oximeter_list_producers_output( + &ox_output1, + &ox_url, + ox_test_producer, + ); } async fn do_run( @@ -274,8 +376,25 @@ async fn do_run( ) where F: FnOnce(Exec) -> Exec + Send + 'static, { - do_run_extra(output, modexec, cmd_path, args, &ExtraRedactions::new()) - .await; + do_run_extra( + output, + modexec, + cmd_path, + args, + Some(&ExtraRedactions::new()), + ) + .await; +} + +async fn do_run_no_redactions( + output: &mut String, + modexec: F, + cmd_path: &Path, + args: &[&str], +) where + F: FnOnce(Exec) -> Exec + Send + 'static, +{ + do_run_extra(output, modexec, cmd_path, args, None).await; } async fn do_run_extra( @@ -283,18 +402,22 @@ async fn do_run_extra( modexec: F, cmd_path: &Path, args: &[&str], - extra_redactions: &ExtraRedactions<'_>, + extra_redactions: Option<&ExtraRedactions<'_>>, ) where F: FnOnce(Exec) -> Exec + Send + 'static, { - println!("running command with args: {:?}", args); write!( output, "EXECUTING COMMAND: {} {:?}\n", cmd_path.file_name().expect("missing command").to_string_lossy(), args.iter() - .map(|r| redact_extra(r, extra_redactions)) - .collect::>(), + .map(|r| { + extra_redactions.map_or_else( + || r.to_string(), + |redactions| redact_extra(r, redactions), + ) + }) + .collect::>() ) .unwrap(); @@ -326,9 +449,21 @@ async fn do_run_extra( write!(output, "termination: {:?}\n", exit_status).unwrap(); write!(output, "---------------------------------------------\n").unwrap(); write!(output, "stdout:\n").unwrap(); - output.push_str(&redact_extra(&stdout_text, extra_redactions)); + + if let Some(extra_redactions) = extra_redactions { + output.push_str(&redact_extra(&stdout_text, extra_redactions)); + } else { + output.push_str(&stdout_text); + } + write!(output, "---------------------------------------------\n").unwrap(); write!(output, "stderr:\n").unwrap(); - output.push_str(&redact_extra(&stderr_text, extra_redactions)); + + if let Some(extra_redactions) = extra_redactions { + output.push_str(&redact_extra(&stderr_text, extra_redactions)); + } else { + output.push_str(&stderr_text); + } + write!(output, "=============================================\n").unwrap(); } diff --git a/dev-tools/omdb/tests/usage_errors.out b/dev-tools/omdb/tests/usage_errors.out index b4679001fa..8762907e81 100644 --- a/dev-tools/omdb/tests/usage_errors.out +++ b/dev-tools/omdb/tests/usage_errors.out @@ -14,6 +14,7 @@ Commands: mgs Debug a specific Management Gateway Service instance nexus Debug a specific Nexus instance oximeter Query oximeter collector state + oxql Enter the Oximeter Query Language shell for interactive querying sled-agent Debug a specific Sled help Print this message or the help of the given subcommand(s) @@ -44,6 +45,7 @@ Commands: mgs Debug a specific Management Gateway Service instance nexus Debug a specific Nexus instance oximeter Query oximeter collector state + oxql Enter the Oximeter Query Language shell for interactive querying sled-agent Debug a specific Sled help Print this message or the help of the given subcommand(s) @@ -615,3 +617,68 @@ Connection Options: Safety Options: -w, --destructive Allow potentially-destructive subcommands ============================================= +EXECUTING COMMAND: omdb ["oximeter", "--help"] +termination: Exited(0) +--------------------------------------------- +stdout: +Query oximeter collector state + +Usage: omdb oximeter [OPTIONS] + +Commands: + list-producers List the producers the collector is assigned to poll + help Print this message or the help of the given subcommand(s) + +Options: + --log-level log level filter [env: LOG_LEVEL=] [default: warn] + -h, --help Print help + +Connection Options: + --oximeter-url URL of the oximeter collector to query [env: + OMDB_OXIMETER_URL=] + --dns-server [env: OMDB_DNS_SERVER=] + +Safety Options: + -w, --destructive Allow potentially-destructive subcommands +--------------------------------------------- +stderr: +============================================= +EXECUTING COMMAND: omdb ["oxql", "--help"] +termination: Exited(0) +--------------------------------------------- +stdout: +Enter the Oximeter Query Language shell for interactive querying + +Usage: omdb oxql [OPTIONS] + +Options: + --log-level log level filter [env: LOG_LEVEL=] [default: warn] + --summaries Print summaries of each SQL query run against the database + --elapsed Print the total elapsed query duration + -h, --help Print help + +Connection Options: + --clickhouse-url + URL of the ClickHouse server to connect to [env: OMDB_CLICKHOUSE_URL=] + --dns-server + [env: OMDB_DNS_SERVER=] + +Safety Options: + -w, --destructive Allow potentially-destructive subcommands +--------------------------------------------- +stderr: +============================================= +EXECUTING COMMAND: omdb ["oxql", "--summarizes"] +termination: Exited(2) +--------------------------------------------- +stdout: +--------------------------------------------- +stderr: +error: unexpected argument '--summarizes' found + + tip: a similar argument exists: '--summaries' + +Usage: omdb oxql <--clickhouse-url |--summaries|--elapsed> + +For more information, try '--help'. +============================================= diff --git a/dev-tools/openapi-manager/README.adoc b/dev-tools/openapi-manager/README.adoc index 0a6e80515c..82aa8d1742 100644 --- a/dev-tools/openapi-manager/README.adoc +++ b/dev-tools/openapi-manager/README.adoc @@ -65,7 +65,10 @@ In the implementation crate: Next, perform the steps in <> below. -Finally, remove the test which used to manage the document. +Finally, remove: + +. The test which used to manage the document. The OpenAPI manager includes a test that will automatically run in CI. +. The binary subcommand (typically called `openapi`) that generated the OpenAPI document. The test was the only practical use of this subcommand. === Adding the API crate to the manager [[add_to_manager]] diff --git a/nexus-config/src/nexus_config.rs b/nexus-config/src/nexus_config.rs index 5b7069d06f..5ca1d2d6ed 100644 --- a/nexus-config/src/nexus_config.rs +++ b/nexus-config/src/nexus_config.rs @@ -6,16 +6,14 @@ //! at deployment time. use crate::PostgresConfigWithUrl; - -use omicron_common::address::Ipv6Subnet; -use omicron_common::address::NEXUS_TECHPORT_EXTERNAL_PORT; -use omicron_common::address::RACK_PREFIX; -use omicron_common::api::internal::shared::SwitchLocation; - use anyhow::anyhow; use camino::{Utf8Path, Utf8PathBuf}; use dropshot::ConfigDropshot; use dropshot::ConfigLogging; +use omicron_common::address::Ipv6Subnet; +use omicron_common::address::NEXUS_TECHPORT_EXTERNAL_PORT; +use omicron_common::address::RACK_PREFIX; +use omicron_common::api::internal::shared::SwitchLocation; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use serde_with::serde_as; diff --git a/nexus/Cargo.toml b/nexus/Cargo.toml index 130e01d3bc..359ea616d4 100644 --- a/nexus/Cargo.toml +++ b/nexus/Cargo.toml @@ -52,7 +52,7 @@ num-integer.workspace = true once_cell.workspace = true openssl.workspace = true oximeter-client.workspace = true -oximeter-db.workspace = true +oximeter-db = { workspace = true, default-features = false, features = [ "oxql" ] } oxnet.workspace = true parse-display.workspace = true paste.workspace = true diff --git a/nexus/db-model/src/bgp.rs b/nexus/db-model/src/bgp.rs index 8aaa08ebb7..bd3c6709ee 100644 --- a/nexus/db-model/src/bgp.rs +++ b/nexus/db-model/src/bgp.rs @@ -5,7 +5,7 @@ use crate::schema::{ bgp_announce_set, bgp_announcement, bgp_config, bgp_peer_view, }; -use crate::SqlU32; +use crate::{SqlU16, SqlU32}; use db_macros::Resource; use ipnetwork::IpNetwork; use nexus_types::external_api::params; @@ -141,5 +141,5 @@ pub struct BgpPeerView { pub multi_exit_discriminator: Option, pub local_pref: Option, pub enforce_first_as: bool, - pub vlan_id: Option, + pub vlan_id: Option, } diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index d47c21cd1d..89ae6c18c5 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -281,7 +281,7 @@ table! { multi_exit_discriminator -> Nullable, local_pref -> Nullable, enforce_first_as -> Bool, - vlan_id -> Nullable, + vlan_id -> Nullable, } } diff --git a/nexus/db-queries/src/db/datastore/bgp.rs b/nexus/db-queries/src/db/datastore/bgp.rs index feb41443b2..d73e7ff327 100644 --- a/nexus/db-queries/src/db/datastore/bgp.rs +++ b/nexus/db-queries/src/db/datastore/bgp.rs @@ -314,6 +314,78 @@ impl DataStore { }) } + pub async fn bgp_update_announce_set( + &self, + opctx: &OpContext, + announce: ¶ms::BgpAnnounceSetCreate, + ) -> CreateResult<(BgpAnnounceSet, Vec)> { + use db::schema::bgp_announce_set::dsl as announce_set_dsl; + use db::schema::bgp_announcement::dsl as bgp_announcement_dsl; + + let conn = self.pool_connection_authorized(opctx).await?; + + self.transaction_retry_wrapper("bgp_update_announce_set") + .transaction(&conn, |conn| async move { + let bas: BgpAnnounceSet = announce.clone().into(); + + // ensure the announce set exists + let found_as: Option = + announce_set_dsl::bgp_announce_set + .filter( + announce_set_dsl::name + .eq(Name::from(bas.name().clone())), + ) + .filter(announce_set_dsl::time_deleted.is_null()) + .select(BgpAnnounceSet::as_select()) + .limit(1) + .first_async(&conn) + .await + .ok(); + + let db_as = match found_as { + Some(v) => v, + None => { + diesel::insert_into(announce_set_dsl::bgp_announce_set) + .values(bas.clone()) + .returning(BgpAnnounceSet::as_returning()) + .get_result_async::(&conn) + .await? + } + }; + + // clear existing announcements + diesel::delete(bgp_announcement_dsl::bgp_announcement) + .filter( + bgp_announcement_dsl::announce_set_id.eq(db_as.id()), + ) + .execute_async(&conn) + .await?; + + // repopulate announcements + let mut db_annoucements = Vec::new(); + for a in &announce.announcement { + let an = BgpAnnouncement { + announce_set_id: db_as.id(), + address_lot_block_id: bas.identity.id, + network: a.network.into(), + }; + let db_an = diesel::insert_into( + bgp_announcement_dsl::bgp_announcement, + ) + .values(an.clone()) + .returning(BgpAnnouncement::as_returning()) + .get_result_async::(&conn) + .await?; + + db_annoucements.push(db_an); + } + + Ok((db_as, db_annoucements)) + }) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + pub async fn bgp_create_announce_set( &self, opctx: &OpContext, diff --git a/nexus/db-queries/src/db/datastore/switch_port.rs b/nexus/db-queries/src/db/datastore/switch_port.rs index d373ae2f5b..159933dce0 100644 --- a/nexus/db-queries/src/db/datastore/switch_port.rs +++ b/nexus/db-queries/src/db/datastore/switch_port.rs @@ -336,13 +336,7 @@ impl DataStore { } } else { - public_error_from_diesel( - e, - ErrorHandler::Conflict( - ResourceType::SwitchPortSettings, - params.identity.name.as_str(), - ), - ) + public_error_from_diesel(e, ErrorHandler::Server) } }) } @@ -1451,6 +1445,36 @@ async fn do_switch_port_settings_delete( .execute_async(conn) .await?; + // delete allowed exports + use db::schema::switch_port_settings_bgp_peer_config_allow_export as allow_export; + use db::schema::switch_port_settings_bgp_peer_config_allow_export::dsl as allow_export_dsl; + diesel::delete( + allow_export_dsl::switch_port_settings_bgp_peer_config_allow_export, + ) + .filter(allow_export::port_settings_id.eq(id)) + .execute_async(conn) + .await?; + + // delete allowed imports + use db::schema::switch_port_settings_bgp_peer_config_allow_import as allow_import; + use db::schema::switch_port_settings_bgp_peer_config_allow_import::dsl as allow_import_dsl; + diesel::delete( + allow_import_dsl::switch_port_settings_bgp_peer_config_allow_import, + ) + .filter(allow_import::port_settings_id.eq(id)) + .execute_async(conn) + .await?; + + // delete communities + use db::schema::switch_port_settings_bgp_peer_config_communities as bgp_communities; + use db::schema::switch_port_settings_bgp_peer_config_communities::dsl as bgp_communities_dsl; + diesel::delete( + bgp_communities_dsl::switch_port_settings_bgp_peer_config_communities, + ) + .filter(bgp_communities::port_settings_id.eq(id)) + .execute_async(conn) + .await?; + // delete address configs use db::schema::switch_port_settings_address_config::{ self as address_config, dsl as address_config_dsl, diff --git a/nexus/db-queries/src/transaction_retry.rs b/nexus/db-queries/src/transaction_retry.rs index 558bb574c9..03e0574f52 100644 --- a/nexus/db-queries/src/transaction_retry.rs +++ b/nexus/db-queries/src/transaction_retry.rs @@ -7,27 +7,15 @@ use async_bb8_diesel::AsyncConnection; use chrono::Utc; use diesel::result::Error as DieselError; -use oximeter::{types::Sample, Metric, MetricsError, Target}; +use oximeter::{types::Sample, MetricsError}; use rand::{thread_rng, Rng}; use slog::{info, warn, Logger}; use std::sync::{Arc, Mutex}; use std::time::Duration; -// Identifies "which" transaction is retrying -#[derive(Debug, Clone, Target)] -struct DatabaseTransaction { - name: String, -} - -// Identifies that a retry has occurred, and track how long -// the transaction took (either since starting, or since the last -// retry failure was recorded). -#[derive(Debug, Clone, Metric)] -struct RetryData { - #[datum] - latency: f64, - attempt: u32, -} +oximeter::use_timeseries!("database-transaction.toml"); +use database_transaction::DatabaseTransaction; +use database_transaction::RetryData; // Collects all transaction retry samples #[derive(Debug, Default, Clone)] @@ -156,7 +144,7 @@ impl RetryHelper { let _ = self.producer.append( &DatabaseTransaction { name: self.name.into() }, - &RetryData { latency, attempt }, + &RetryData { datum: latency, attempt }, ); // This backoff is not exponential, but I'm not sure we actually want diff --git a/nexus/src/app/background/tasks/instance_watcher.rs b/nexus/src/app/background/tasks/instance_watcher.rs index ce202a2a08..8a41e2d062 100644 --- a/nexus/src/app/background/tasks/instance_watcher.rs +++ b/nexus/src/app/background/tasks/instance_watcher.rs @@ -26,12 +26,14 @@ use sled_agent_client::Client as SledAgentClient; use std::borrow::Cow; use std::collections::BTreeMap; use std::future::Future; -use std::net::IpAddr; use std::num::NonZeroU32; use std::sync::Arc; use std::sync::Mutex; use uuid::Uuid; +oximeter::use_timeseries!("vm-health-check.toml"); +use virtual_machine::VirtualMachine; + /// Background task that periodically checks instance states. pub(crate) struct InstanceWatcher { datastore: Arc, @@ -211,30 +213,6 @@ pub struct WatcherIdentity { pub rack_id: Uuid, } -#[derive( - Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, oximeter::Target, -)] -struct VirtualMachine { - /// The rack ID of the Nexus process which performed the health check. - rack_id: Uuid, - /// The ID of the Nexus process which performed the health check. - nexus_id: Uuid, - /// The instance's ID. - instance_id: Uuid, - /// The silo ID of the instance's silo. - silo_id: Uuid, - /// The project ID of the instance. - project_id: Uuid, - /// The VMM ID of the instance's virtual machine manager. - vmm_id: Uuid, - /// The sled-agent's ID. - sled_agent_id: Uuid, - /// The sled agent's IP address. - sled_agent_ip: IpAddr, - /// The sled agent's port. - sled_agent_port: u16, -} - impl VirtualMachine { fn new( WatcherIdentity { rack_id, nexus_id }: WatcherIdentity, @@ -497,12 +475,12 @@ impl BackgroundTask for InstanceWatcher { } mod metrics { + use super::virtual_machine::Check; + use super::virtual_machine::IncompleteCheck; use super::{CheckOutcome, Incomplete, VirtualMachine}; use oximeter::types::Cumulative; - use oximeter::Metric; use oximeter::MetricsError; use oximeter::Sample; - use std::borrow::Cow; use std::collections::BTreeMap; use std::sync::Arc; use std::sync::Mutex; @@ -539,7 +517,7 @@ mod metrics { .check_errors .entry(error) .or_insert_with(|| IncompleteCheck { - reason: error.as_str(), + failure_reason: error.as_str(), datum: Cumulative::default(), }) .datum += 1; @@ -592,37 +570,4 @@ mod metrics { Ok(()) } } - - /// The number of successful checks for a single instance, VMM, and sled agent. - #[derive(Clone, Debug, Metric)] - struct Check { - /// The string representation of the instance's state as understood by - /// the VMM. If the check failed, this will generally be "failed". - state: Cow<'static, str>, - /// `Why the instance was marked as being in this state. - /// - /// If an instance was marked as "failed" due to a check failure, this - /// will be a string representation of the failure reason. Otherwise, if - /// the check was successful, this will be "success". Note that this may - /// be "success" even if the instance's state is "failed", which - /// indicates that we successfully queried the instance's state from the - /// sled-agent, and the *sled-agent* reported that the instance has - /// failed --- which is distinct from the instance watcher marking an - /// instance as failed due to a failed check. - reason: Cow<'static, str>, - /// The number of checks for this instance and sled agent which recorded - /// this state for this reason. - datum: Cumulative, - } - - /// The number of unsuccessful checks for an instance and sled agent pair. - #[derive(Clone, Debug, Metric)] - struct IncompleteCheck { - /// The reason why the check was unsuccessful. - /// - /// This is generated from the [`Incomplete`] enum's `Display` implementation. - reason: Cow<'static, str>, - /// The number of failed checks for this instance and sled agent. - datum: Cumulative, - } } diff --git a/nexus/src/app/background/tasks/sync_switch_configuration.rs b/nexus/src/app/background/tasks/sync_switch_configuration.rs index 0351c9542a..e8f07726a5 100644 --- a/nexus/src/app/background/tasks/sync_switch_configuration.rs +++ b/nexus/src/app/background/tasks/sync_switch_configuration.rs @@ -965,7 +965,7 @@ impl BackgroundTask for SwitchPortSettingsManager { communities: Vec::new(), allowed_export: ImportExportPolicy::NoFiltering, allowed_import: ImportExportPolicy::NoFiltering, - vlan_id: c.vlan_id.map(|x| x.0 as u16), + vlan_id: c.vlan_id.map(|x| x.0), } }).collect(), port: port.port_name.clone(), diff --git a/nexus/src/app/bgp.rs b/nexus/src/app/bgp.rs index b6e3f25263..118011500a 100644 --- a/nexus/src/app/bgp.rs +++ b/nexus/src/app/bgp.rs @@ -53,14 +53,18 @@ impl super::Nexus { Ok(result) } - pub async fn bgp_create_announce_set( + pub async fn bgp_update_announce_set( &self, opctx: &OpContext, announce: ¶ms::BgpAnnounceSetCreate, ) -> CreateResult<(BgpAnnounceSet, Vec)> { opctx.authorize(authz::Action::Modify, &authz::FLEET).await?; let result = - self.db_datastore.bgp_create_announce_set(opctx, announce).await?; + self.db_datastore.bgp_update_announce_set(opctx, announce).await?; + + // eagerly propagate changes via rpw + self.background_tasks + .activate(&self.background_tasks.task_switch_port_settings_manager); Ok(result) } diff --git a/nexus/src/app/switch_port.rs b/nexus/src/app/switch_port.rs index 57ebe5eee7..9726a59d33 100644 --- a/nexus/src/app/switch_port.rs +++ b/nexus/src/app/switch_port.rs @@ -43,8 +43,12 @@ impl super::Nexus { ) .await { - Ok(id) => self.switch_port_settings_update(opctx, id, params).await, + Ok(id) => { + info!(self.log, "updating port settings {id}"); + self.switch_port_settings_update(opctx, id, params).await + } Err(_) => { + info!(self.log, "creating new switch port settings"); self.switch_port_settings_create(opctx, params, None).await } } diff --git a/nexus/src/bin/nexus.rs b/nexus/src/bin/nexus.rs index 452e033ce6..9d13409cef 100644 --- a/nexus/src/bin/nexus.rs +++ b/nexus/src/bin/nexus.rs @@ -32,14 +32,6 @@ struct Args { )] openapi: bool, - #[clap( - short = 'I', - long = "openapi-internal", - help = "Print the internal OpenAPI Spec document and exit", - action - )] - openapi_internal: bool, - #[clap(name = "CONFIG_FILE_PATH", action)] config_file_path: Option, } @@ -56,8 +48,6 @@ async fn do_run() -> Result<(), CmdError> { if args.openapi { run_openapi_external().map_err(|err| CmdError::Failure(anyhow!(err))) - } else if args.openapi_internal { - run_openapi_internal().map_err(|err| CmdError::Failure(anyhow!(err))) } else { let config_path = match args.config_file_path { Some(path) => path, diff --git a/nexus/src/context.rs b/nexus/src/context.rs index 1512671056..95d69e0c88 100644 --- a/nexus/src/context.rs +++ b/nexus/src/context.rs @@ -146,7 +146,7 @@ impl ServerContext { let authz = Arc::new(authz::Authz::new(&log)); let create_tracker = |name: &str| { let target = HttpService { - name: name.to_string(), + name: name.to_string().into(), id: config.deployment.id, }; const START_LATENCY_DECADE: i16 = -6; diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 92778701a4..1f11b4f939 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -278,7 +278,7 @@ pub(crate) fn external_api() -> NexusApiDescription { api.register(networking_bgp_status)?; api.register(networking_bgp_imported_routes_ipv4)?; api.register(networking_bgp_config_delete)?; - api.register(networking_bgp_announce_set_create)?; + api.register(networking_bgp_announce_set_update)?; api.register(networking_bgp_announce_set_list)?; api.register(networking_bgp_announce_set_delete)?; api.register(networking_bgp_message_history)?; @@ -4059,13 +4059,16 @@ async fn networking_bgp_config_delete( .await } -/// Create new BGP announce set +/// Update BGP announce set +/// +/// If the announce set exists, this endpoint replaces the existing announce +/// set with the one specified. #[endpoint { - method = POST, + method = PUT, path = "/v1/system/networking/bgp-announce", tags = ["system/networking"], }] -async fn networking_bgp_announce_set_create( +async fn networking_bgp_announce_set_update( rqctx: RequestContext, config: TypedBody, ) -> Result, HttpError> { @@ -4074,7 +4077,7 @@ async fn networking_bgp_announce_set_create( let nexus = &apictx.context.nexus; let config = config.into_inner(); let opctx = crate::context::op_context_for_external_api(&rqctx).await?; - let result = nexus.bgp_create_announce_set(&opctx, &config).await?; + let result = nexus.bgp_update_announce_set(&opctx, &config).await?; Ok(HttpResponseCreated::(result.0.into())) }; apictx diff --git a/nexus/src/lib.rs b/nexus/src/lib.rs index e48ec83d98..a359ead038 100644 --- a/nexus/src/lib.rs +++ b/nexus/src/lib.rs @@ -65,16 +65,6 @@ pub fn run_openapi_external() -> Result<(), String> { .map_err(|e| e.to_string()) } -pub fn run_openapi_internal() -> Result<(), String> { - internal_api() - .openapi("Nexus internal API", "0.0.1") - .description("Nexus internal API") - .contact_url("https://oxide.computer") - .contact_email("api@oxide.computer") - .write(&mut std::io::stdout()) - .map_err(|e| e.to_string()) -} - /// A partially-initialized Nexus server, which exposes an internal interface, /// but is not ready to receive external requests. pub struct InternalServer { diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index a29b45c4ce..52d9e14e35 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -2307,7 +2307,7 @@ pub static VERIFY_ENDPOINTS: Lazy> = Lazy::new(|| { visibility: Visibility::Public, unprivileged_access: UnprivilegedAccess::None, allowed_methods: vec![ - AllowedMethod::Post( + AllowedMethod::Put( serde_json::to_value(&*DEMO_BGP_ANNOUNCE).unwrap(), ), AllowedMethod::GetNonexistent, diff --git a/nexus/tests/output/nexus_tags.txt b/nexus/tests/output/nexus_tags.txt index 35d8c32561..4af018c5af 100644 --- a/nexus/tests/output/nexus_tags.txt +++ b/nexus/tests/output/nexus_tags.txt @@ -179,9 +179,9 @@ networking_allow_list_view GET /v1/system/networking/allow-li networking_bfd_disable POST /v1/system/networking/bfd-disable networking_bfd_enable POST /v1/system/networking/bfd-enable networking_bfd_status GET /v1/system/networking/bfd-status -networking_bgp_announce_set_create POST /v1/system/networking/bgp-announce networking_bgp_announce_set_delete DELETE /v1/system/networking/bgp-announce networking_bgp_announce_set_list GET /v1/system/networking/bgp-announce +networking_bgp_announce_set_update PUT /v1/system/networking/bgp-announce networking_bgp_config_create POST /v1/system/networking/bgp networking_bgp_config_delete DELETE /v1/system/networking/bgp networking_bgp_config_list GET /v1/system/networking/bgp diff --git a/openapi/nexus.json b/openapi/nexus.json index 759a0658e1..c9d85a8ee3 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -6630,12 +6630,13 @@ } } }, - "post": { + "put": { "tags": [ "system/networking" ], - "summary": "Create new BGP announce set", - "operationId": "networking_bgp_announce_set_create", + "summary": "Update BGP announce set", + "description": "If the announce set exists, this endpoint replaces the existing announce set with the one specified.", + "operationId": "networking_bgp_announce_set_update", "requestBody": { "content": { "application/json": { @@ -19798,7 +19799,9 @@ "type": "string", "enum": [ "count", - "bytes" + "bytes", + "seconds", + "nanoseconds" ] }, "User": { diff --git a/oximeter/collector/src/lib.rs b/oximeter/collector/src/lib.rs index 367a2066a1..02bf9152f4 100644 --- a/oximeter/collector/src/lib.rs +++ b/oximeter/collector/src/lib.rs @@ -409,4 +409,9 @@ impl Oximeter { pub fn collector_id(&self) -> &Uuid { &self.agent.id } + + /// Return the address of the server. + pub fn server_address(&self) -> SocketAddr { + self.server.local_addr() + } } diff --git a/oximeter/collector/src/self_stats.rs b/oximeter/collector/src/self_stats.rs index 66be514523..b2272117da 100644 --- a/oximeter/collector/src/self_stats.rs +++ b/oximeter/collector/src/self_stats.rs @@ -9,51 +9,21 @@ use crate::ProducerEndpoint; use oximeter::types::Cumulative; use oximeter::types::ProducerResultsItem; -use oximeter::Metric; use oximeter::MetricsError; use oximeter::Sample; -use oximeter::Target; use reqwest::StatusCode; use std::borrow::Cow; use std::collections::BTreeMap; -use std::net::IpAddr; use std::time::Duration; -use uuid::Uuid; + +oximeter::use_timeseries!("oximeter-collector.toml"); +pub use self::oximeter_collector::Collections; +pub use self::oximeter_collector::FailedCollections; +pub use self::oximeter_collector::OximeterCollector; /// The interval on which we report self statistics pub const COLLECTION_INTERVAL: Duration = Duration::from_secs(60); -/// A target representing a single oximeter collector. -#[derive(Clone, Copy, Debug, Target)] -pub struct OximeterCollector { - /// The collector's ID. - pub collector_id: Uuid, - /// The collector server's IP address. - pub collector_ip: IpAddr, - /// The collector server's port. - pub collector_port: u16, -} - -/// The number of successful collections from a single producer. -#[derive(Clone, Debug, Metric)] -pub struct Collections { - /// The producer's ID. - pub producer_id: Uuid, - /// The producer's IP address. - pub producer_ip: IpAddr, - /// The producer's port. - pub producer_port: u16, - /// The base route in the producer server used to collect metrics. - /// - /// The full route is `{base_route}/{producer_id}`. - /// - // TODO-cleanup: This is no longer relevant, but removing it entirely - // relies on nonexistent functionality for updating timeseries schema. When - // that lands, we should remove this. - pub base_route: String, - pub datum: Cumulative, -} - /// Small enum to help understand why oximeter failed to collect from a /// producer. #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] @@ -90,30 +60,6 @@ impl FailureReason { } } -/// The number of failed collections from a single producer. -#[derive(Clone, Debug, Metric)] -pub struct FailedCollections { - /// The producer's ID. - pub producer_id: Uuid, - /// The producer's IP address. - pub producer_ip: IpAddr, - /// The producer's port. - pub producer_port: u16, - /// The base route in the producer server used to collect metrics. - /// - /// The full route is `{base_route}/{producer_id}`. - /// - // TODO-cleanup: This is no longer relevant, but removing it entirely - // relies on nonexistent functionality for updating timeseries schema. When - // that lands, we should remove this. - pub base_route: String, - /// The reason we could not collect. - // - // NOTE: This should always be generated through a `FailureReason`. - pub reason: Cow<'static, str>, - pub datum: Cumulative, -} - /// Oximeter collection statistics maintained by each collection task. #[derive(Clone, Debug)] pub struct CollectionTaskStats { @@ -133,7 +79,7 @@ impl CollectionTaskStats { producer_id: producer.id, producer_ip: producer.address.ip(), producer_port: producer.address.port(), - base_route: String::new(), + base_route: "".into(), datum: Cumulative::new(0), }, failed_collections: BTreeMap::new(), @@ -176,15 +122,8 @@ impl CollectionTaskStats { #[cfg(test)] mod tests { - use super::Collections; - use super::Cumulative; - use super::FailedCollections; use super::FailureReason; - use super::OximeterCollector; use super::StatusCode; - use oximeter::schema::SchemaSet; - use std::net::IpAddr; - use std::net::Ipv6Addr; #[test] fn test_failure_reason_serialization() { @@ -197,53 +136,4 @@ mod tests { assert_eq!(variant.to_string(), *as_str); } } - - const fn collector() -> OximeterCollector { - OximeterCollector { - collector_id: uuid::uuid!("cfebaa5f-3ba9-4bb5-9145-648d287df78a"), - collector_ip: IpAddr::V6(Ipv6Addr::LOCALHOST), - collector_port: 12345, - } - } - - fn collections() -> Collections { - Collections { - producer_id: uuid::uuid!("718452ab-7cca-42f6-b8b1-1aaaa1b09104"), - producer_ip: IpAddr::V6(Ipv6Addr::LOCALHOST), - producer_port: 12345, - base_route: String::new(), - datum: Cumulative::new(0), - } - } - - fn failed_collections() -> FailedCollections { - FailedCollections { - producer_id: uuid::uuid!("718452ab-7cca-42f6-b8b1-1aaaa1b09104"), - producer_ip: IpAddr::V6(Ipv6Addr::LOCALHOST), - producer_port: 12345, - base_route: String::new(), - reason: FailureReason::Unreachable.as_string(), - datum: Cumulative::new(0), - } - } - - // Check that the self-stat timeseries schema have not changed. - #[test] - fn test_no_schema_changes() { - let collector = collector(); - let collections = collections(); - let failed = failed_collections(); - let mut set = SchemaSet::default(); - assert!(set - .insert_checked(&collector, &collections) - .unwrap() - .is_none()); - assert!(set.insert_checked(&collector, &failed).unwrap().is_none()); - - const PATH: &'static str = concat!( - env!("CARGO_MANIFEST_DIR"), - "/tests/output/self-stat-schema.json" - ); - set.assert_contents(PATH); - } } diff --git a/oximeter/collector/tests/output/self-stat-schema.json b/oximeter/collector/tests/output/self-stat-schema.json deleted file mode 100644 index 00363cb73c..0000000000 --- a/oximeter/collector/tests/output/self-stat-schema.json +++ /dev/null @@ -1,120 +0,0 @@ -{ - "oximeter_collector:collections": { - "timeseries_name": "oximeter_collector:collections", - "description": { - "target": "", - "metric": "" - }, - "field_schema": [ - { - "name": "base_route", - "field_type": "string", - "source": "metric", - "description": "" - }, - { - "name": "collector_id", - "field_type": "uuid", - "source": "target", - "description": "" - }, - { - "name": "collector_ip", - "field_type": "ip_addr", - "source": "target", - "description": "" - }, - { - "name": "collector_port", - "field_type": "u16", - "source": "target", - "description": "" - }, - { - "name": "producer_id", - "field_type": "uuid", - "source": "metric", - "description": "" - }, - { - "name": "producer_ip", - "field_type": "ip_addr", - "source": "metric", - "description": "" - }, - { - "name": "producer_port", - "field_type": "u16", - "source": "metric", - "description": "" - } - ], - "datum_type": "cumulative_u64", - "version": 1, - "authz_scope": "fleet", - "units": "count", - "created": "2024-06-19T18:42:39.343957406Z" - }, - "oximeter_collector:failed_collections": { - "timeseries_name": "oximeter_collector:failed_collections", - "description": { - "target": "", - "metric": "" - }, - "field_schema": [ - { - "name": "base_route", - "field_type": "string", - "source": "metric", - "description": "" - }, - { - "name": "collector_id", - "field_type": "uuid", - "source": "target", - "description": "" - }, - { - "name": "collector_ip", - "field_type": "ip_addr", - "source": "target", - "description": "" - }, - { - "name": "collector_port", - "field_type": "u16", - "source": "target", - "description": "" - }, - { - "name": "producer_id", - "field_type": "uuid", - "source": "metric", - "description": "" - }, - { - "name": "producer_ip", - "field_type": "ip_addr", - "source": "metric", - "description": "" - }, - { - "name": "producer_port", - "field_type": "u16", - "source": "metric", - "description": "" - }, - { - "name": "reason", - "field_type": "string", - "source": "metric", - "description": "" - } - ], - "datum_type": "cumulative_u64", - "version": 1, - "authz_scope": "fleet", - "units": "count", - "created": "2024-06-19T18:42:39.344007227Z" - } -} diff --git a/oximeter/db/Cargo.toml b/oximeter/db/Cargo.toml index c446bc7822..253015287b 100644 --- a/oximeter/db/Cargo.toml +++ b/oximeter/db/Cargo.toml @@ -19,6 +19,7 @@ clap.workspace = true dropshot.workspace = true futures.workspace = true highway.workspace = true +num.workspace = true omicron-common.workspace = true omicron-workspace-hack.workspace = true oximeter.workspace = true @@ -45,10 +46,6 @@ optional = true workspace = true optional = true -[dependencies.num] -workspace = true -optional = true - [dependencies.peg] workspace = true optional = true @@ -77,20 +74,21 @@ optional = true workspace = true optional = true -[dependencies.tokio] -workspace = true -features = [ "rt-multi-thread", "macros" ] - [dependencies.tabled] workspace = true optional = true +[dependencies.tokio] +workspace = true +features = [ "rt-multi-thread", "macros" ] + [dev-dependencies] expectorate.workspace = true indexmap.workspace = true itertools.workspace = true omicron-test-utils.workspace = true slog-dtrace.workspace = true +sqlformat.workspace = true sqlparser.workspace = true strum.workspace = true tempfile.workspace = true @@ -107,7 +105,6 @@ sql = [ ] oxql = [ "dep:crossterm", - "dep:num", "dep:peg", "dep:reedline", "dep:tabled", diff --git a/oximeter/db/src/bin/oxdb/main.rs b/oximeter/db/src/bin/oxdb/main.rs index ca11dd18a3..32ca2acb3c 100644 --- a/oximeter/db/src/bin/oxdb/main.rs +++ b/oximeter/db/src/bin/oxdb/main.rs @@ -2,7 +2,8 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -//! Tool for developing against the Oximeter timeseries database, populating data and querying. +//! CLI-Tool for developing against the Oximeter timeseries database, populating +//! data and querying. // Copyright 2024 Oxide Computer Company @@ -13,20 +14,13 @@ use oximeter::{ types::{Cumulative, Sample}, Metric, Target, }; -use oximeter_db::{query, Client, DbWrite}; +use oximeter_db::{make_client, query, Client, DbWrite}; use slog::{debug, info, o, Drain, Level, Logger}; use std::net::IpAddr; -use std::net::SocketAddr; use uuid::Uuid; -#[cfg(feature = "sql")] -mod sql; - -#[cfg(feature = "oxql")] -mod oxql; - -// Samples are inserted in chunks of this size, to avoid large allocations when inserting huge -// numbers of timeseries. +/// Samples are inserted in chunks of this size, to avoid large allocations when inserting huge +/// numbers of timeseries. const INSERT_CHUNK_SIZE: usize = 100_000; /// A target identifying a single virtual machine instance @@ -150,31 +144,17 @@ enum Subcommand { #[cfg(feature = "sql")] Sql { #[clap(flatten)] - opts: crate::sql::ShellOptions, + opts: oximeter_db::shells::sql::ShellOptions, }, /// Enter the Oximeter Query Language shell for interactive querying. #[cfg(feature = "oxql")] Oxql { #[clap(flatten)] - opts: crate::oxql::ShellOptions, + opts: oximeter_db::shells::oxql::ShellOptions, }, } -async fn make_client( - address: IpAddr, - port: u16, - log: &Logger, -) -> Result { - let address = SocketAddr::new(address, port); - let client = Client::new(address, &log); - client - .init_single_node_db() - .await - .context("Failed to initialize timeseries database")?; - Ok(client) -} - fn describe_data() { let vm = VirtualMachine::new(); print!("Target:\n\n Name: {target_name:?}\n", target_name = vm.name()); @@ -368,11 +348,13 @@ async fn main() -> anyhow::Result<()> { } #[cfg(feature = "sql")] Subcommand::Sql { opts } => { - crate::sql::sql_shell(args.address, args.port, log, opts).await? + oximeter_db::shells::sql::shell(args.address, args.port, log, opts) + .await? } #[cfg(feature = "oxql")] Subcommand::Oxql { opts } => { - crate::oxql::oxql_shell(args.address, args.port, log, opts).await? + oximeter_db::shells::oxql::shell(args.address, args.port, log, opts) + .await? } } Ok(()) diff --git a/oximeter/db/src/lib.rs b/oximeter/db/src/lib.rs index c471a837ea..c3d2014ad1 100644 --- a/oximeter/db/src/lib.rs +++ b/oximeter/db/src/lib.rs @@ -7,6 +7,7 @@ // Copyright 2024 Oxide Computer Company use crate::query::StringFieldSelector; +use anyhow::Context as _; use chrono::DateTime; use chrono::Utc; use dropshot::EmptyScanParams; @@ -23,22 +24,26 @@ pub use oximeter::Sample; use schemars::JsonSchema; use serde::Deserialize; use serde::Serialize; +use slog::Logger; use std::collections::BTreeMap; use std::convert::TryFrom; use std::io; +use std::net::{IpAddr, SocketAddr}; use std::num::NonZeroU32; use std::path::PathBuf; use thiserror::Error; mod client; pub mod model; -#[cfg(feature = "oxql")] +#[cfg(any(feature = "oxql", test))] pub mod oxql; pub mod query; +#[cfg(any(feature = "oxql", feature = "sql", test))] +pub mod shells; #[cfg(any(feature = "sql", test))] pub mod sql; -#[cfg(feature = "oxql")] +#[cfg(any(feature = "oxql", test))] pub use client::oxql::OxqlResult; pub use client::query_summary::QuerySummary; pub use client::Client; @@ -240,6 +245,21 @@ pub struct TimeseriesPageSelector { pub offset: NonZeroU32, } +/// Create a client to the timeseries database, and ensure the database exists. +pub async fn make_client( + address: IpAddr, + port: u16, + log: &Logger, +) -> Result { + let address = SocketAddr::new(address, port); + let client = Client::new(address, &log); + client + .init_single_node_db() + .await + .context("Failed to initialize timeseries database")?; + Ok(client) +} + pub(crate) type TimeseriesKey = u64; // TODO-cleanup: Add the timeseries version in to the computation of the key. diff --git a/oximeter/db/src/oxql/ast/table_ops/filter.rs b/oximeter/db/src/oxql/ast/table_ops/filter.rs index 9e796bc730..b6fc533e4d 100644 --- a/oximeter/db/src/oxql/ast/table_ops/filter.rs +++ b/oximeter/db/src/oxql/ast/table_ops/filter.rs @@ -16,10 +16,10 @@ use crate::oxql::point::DataType; use crate::oxql::point::MetricType; use crate::oxql::point::Points; use crate::oxql::point::ValueArray; -use crate::oxql::query::special_idents; use crate::oxql::Error; use crate::oxql::Table; use crate::oxql::Timeseries; +use crate::shells::special_idents; use chrono::DateTime; use chrono::Utc; use oximeter::FieldType; @@ -61,7 +61,7 @@ impl core::str::FromStr for Filter { const EXPR_COMPLEXITY_ITERATIVE_LIMIT: usize = 32; // A crude limit on expression complexity, governing how many times we -// recurisvely apply a DNF simplification before bailing out. +// recursively apply a DNF simplification before bailing out. const EXPR_COMPLEXITY_RECURSIVE_LIMIT: usize = 32; impl Filter { diff --git a/oximeter/db/src/oxql/mod.rs b/oximeter/db/src/oxql/mod.rs index b93d75b859..3961fae1cc 100644 --- a/oximeter/db/src/oxql/mod.rs +++ b/oximeter/db/src/oxql/mod.rs @@ -19,7 +19,7 @@ pub use self::table::Table; pub use self::table::Timeseries; pub use anyhow::Error; -// Format a PEG parsing error into a nice anyhow error. +/// Format a PEG parsing error into a nice anyhow error. fn fmt_parse_error(source: &str, err: PegError) -> Error { use std::fmt::Write; let mut out = diff --git a/oximeter/db/src/oxql/query/mod.rs b/oximeter/db/src/oxql/query/mod.rs index 40a6c82f93..e1fada9f2a 100644 --- a/oximeter/db/src/oxql/query/mod.rs +++ b/oximeter/db/src/oxql/query/mod.rs @@ -25,63 +25,6 @@ use chrono::DateTime; use chrono::Utc; use std::time::Duration; -/// Special identifiers for column names or other widely-used values. -pub mod special_idents { - use oximeter::DatumType; - - macro_rules! gen_marker { - ($p:expr, $field:expr) => { - concat!("p", $p, "_", $field) - }; - } - - pub const TIMESTAMP: &str = "timestamp"; - pub const START_TIME: &str = "start_time"; - pub const DATUM: &str = "datum"; - pub const BINS: &str = "bins"; - pub const COUNTS: &str = "counts"; - pub const MIN: &str = "min"; - pub const MAX: &str = "max"; - pub const SUM_OF_SAMPLES: &str = "sum_of_samples"; - pub const SQUARED_MEAN: &str = "squared_mean"; - pub const DATETIME64: &str = "DateTime64"; - pub const ARRAYU64: &str = "Array[u64]"; - pub const ARRAYFLOAT64: &str = "Array[f64]"; - pub const ARRAYINT64: &str = "Array[i64]"; - pub const FLOAT64: &str = "f64"; - pub const UINT64: &str = "u64"; - - pub const DISTRIBUTION_IDENTS: [&str; 15] = [ - "bins", - "counts", - "min", - "max", - "sum_of_samples", - "squared_mean", - gen_marker!("50", "marker_heights"), - gen_marker!("50", "marker_positions"), - gen_marker!("50", "desired_marker_positions"), - gen_marker!("90", "marker_heights"), - gen_marker!("90", "marker_positions"), - gen_marker!("90", "desired_marker_positions"), - gen_marker!("99", "marker_heights"), - gen_marker!("99", "marker_positions"), - gen_marker!("99", "desired_marker_positions"), - ]; - - pub fn array_type_name_from_histogram_type( - type_: DatumType, - ) -> Option { - if !type_.is_histogram() { - return None; - } - Some(format!( - "Array[{}]", - type_.to_string().strip_prefix("Histogram").unwrap().to_lowercase(), - )) - } -} - /// A parsed OxQL query. #[derive(Clone, Debug, PartialEq)] pub struct Query { diff --git a/oximeter/db/src/query.rs b/oximeter/db/src/query.rs index 7b622920ff..ceabf00888 100644 --- a/oximeter/db/src/query.rs +++ b/oximeter/db/src/query.rs @@ -371,10 +371,12 @@ impl FieldSelector { } } -/// A stringly-typed selector for finding fields by name and comparsion with a given value. +/// A stringly-typed selector for finding fields by name and comparsion with a +/// given value. /// -/// This is used internally to parse comparisons written as strings, such as from the `oxdb` -/// command-line tool or from another external source (Nexus API, for example). +/// This is used internally to parse comparisons written as strings, such as +/// from the `oxdb` command-line tool or from another external +/// source (Nexus API, for example). #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, JsonSchema)] pub struct StringFieldSelector { name: String, diff --git a/oximeter/db/src/shells/mod.rs b/oximeter/db/src/shells/mod.rs new file mode 100644 index 0000000000..8f653ffcd5 --- /dev/null +++ b/oximeter/db/src/shells/mod.rs @@ -0,0 +1,189 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Helper for the OxQL and SQL shell implementations. + +// Copyright 2024 Oxide Computer Company + +use crate::Client; +use dropshot::EmptyScanParams; +use dropshot::WhichPage; +use oximeter::TimeseriesSchema; + +#[cfg(any(feature = "oxql", test))] +pub mod oxql; +#[cfg(any(feature = "sql", test))] +pub mod sql; + +/// Special identifiers for column names or other widely-used values. +pub mod special_idents { + use oximeter::DatumType; + + macro_rules! gen_marker { + ($p:expr, $field:expr) => { + concat!("p", $p, "_", $field) + }; + } + + pub const TIMESTAMP: &str = "timestamp"; + pub const START_TIME: &str = "start_time"; + pub const DATUM: &str = "datum"; + pub const BINS: &str = "bins"; + pub const COUNTS: &str = "counts"; + pub const MIN: &str = "min"; + pub const MAX: &str = "max"; + pub const SUM_OF_SAMPLES: &str = "sum_of_samples"; + pub const SQUARED_MEAN: &str = "squared_mean"; + pub const DATETIME64: &str = "DateTime64"; + pub const ARRAYU64: &str = "Array[u64]"; + pub const ARRAYFLOAT64: &str = "Array[f64]"; + pub const ARRAYINT64: &str = "Array[i64]"; + pub const FLOAT64: &str = "f64"; + pub const UINT64: &str = "u64"; + + /// Distribution identifiers. + pub const DISTRIBUTION_IDENTS: [&str; 15] = [ + "bins", + "counts", + "min", + "max", + "sum_of_samples", + "squared_mean", + gen_marker!("50", "marker_heights"), + gen_marker!("50", "marker_positions"), + gen_marker!("50", "desired_marker_positions"), + gen_marker!("90", "marker_heights"), + gen_marker!("90", "marker_positions"), + gen_marker!("90", "desired_marker_positions"), + gen_marker!("99", "marker_heights"), + gen_marker!("99", "marker_positions"), + gen_marker!("99", "desired_marker_positions"), + ]; + + /// Get the array type name for a histogram type. + pub fn array_type_name_from_histogram_type( + type_: DatumType, + ) -> Option { + if !type_.is_histogram() { + return None; + } + Some(format!( + "Array[{}]", + type_.to_string().strip_prefix("Histogram").unwrap().to_lowercase(), + )) + } +} + +/// List the known timeseries. +pub async fn list_timeseries(client: &Client) -> anyhow::Result<()> { + let mut page = WhichPage::First(EmptyScanParams {}); + let limit = 100.try_into().unwrap(); + loop { + let results = client.timeseries_schema_list(&page, limit).await?; + for schema in results.items.iter() { + println!("{}", schema.timeseries_name); + } + if results.next_page.is_some() { + if let Some(last) = results.items.last() { + page = WhichPage::Next(last.timeseries_name.clone()); + } else { + return Ok(()); + } + } else { + return Ok(()); + } + } +} + +/// Describe a single timeseries. +pub async fn describe_timeseries( + client: &Client, + timeseries: &str, +) -> anyhow::Result<()> { + match timeseries.parse() { + Err(_) => eprintln!( + "Invalid timeseries name '{timeseries}, \ + use \\l to list available timeseries by name + " + ), + Ok(name) => { + if let Some(schema) = client.schema_for_timeseries(&name).await? { + let (cols, types) = prepare_columns(&schema); + let mut builder = tabled::builder::Builder::default(); + builder.push_record(cols); // first record is the header + builder.push_record(types); + println!( + "{}", + builder.build().with(tabled::settings::Style::psql()) + ); + } else { + eprintln!("No such timeseries: {timeseries}"); + } + } + } + Ok(()) +} + +/// Prepare the columns for a timeseries or virtual table. +pub(crate) fn prepare_columns( + schema: &TimeseriesSchema, +) -> (Vec, Vec) { + let mut cols = Vec::with_capacity(schema.field_schema.len() + 2); + let mut types = cols.clone(); + + for field in schema.field_schema.iter() { + cols.push(field.name.clone()); + types.push(field.field_type.to_string()); + } + + cols.push(special_idents::TIMESTAMP.into()); + types.push(special_idents::DATETIME64.into()); + + if schema.datum_type.is_histogram() { + cols.push(special_idents::START_TIME.into()); + types.push(special_idents::DATETIME64.into()); + + cols.push(special_idents::BINS.into()); + types.push( + special_idents::array_type_name_from_histogram_type( + schema.datum_type, + ) + .unwrap(), + ); + + cols.push(special_idents::COUNTS.into()); + types.push(special_idents::ARRAYU64.into()); + + cols.push(special_idents::MIN.into()); + types.push(special_idents::FLOAT64.into()); + + cols.push(special_idents::MAX.into()); + types.push(special_idents::FLOAT64.into()); + + cols.push(special_idents::SUM_OF_SAMPLES.into()); + types.push(special_idents::UINT64.into()); + + cols.push(special_idents::SQUARED_MEAN.into()); + types.push(special_idents::UINT64.into()); + + for quantile in ["P50", "P90", "P99"].iter() { + cols.push(format!("{}_MARKER_HEIGHTS", quantile)); + types.push(special_idents::ARRAYFLOAT64.into()); + cols.push(format!("{}_MARKER_POSITIONS", quantile)); + types.push(special_idents::ARRAYINT64.into()); + cols.push(format!("{}_DESIRED_MARKER_POSITIONS", quantile)); + types.push(special_idents::ARRAYFLOAT64.into()); + } + } else if schema.datum_type.is_cumulative() { + cols.push(special_idents::START_TIME.into()); + types.push(special_idents::DATETIME64.into()); + cols.push(special_idents::DATUM.into()); + types.push(schema.datum_type.to_string()); + } else { + cols.push(special_idents::DATUM.into()); + types.push(schema.datum_type.to_string()); + } + + (cols, types) +} diff --git a/oximeter/db/src/bin/oxdb/oxql.rs b/oximeter/db/src/shells/oxql.rs similarity index 72% rename from oximeter/db/src/bin/oxdb/oxql.rs rename to oximeter/db/src/shells/oxql.rs index ebe55dc7a7..0f23ea7d64 100644 --- a/oximeter/db/src/bin/oxdb/oxql.rs +++ b/oximeter/db/src/shells/oxql.rs @@ -2,20 +2,14 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -//! OxQL shell. +//! OxQL shell implementation. // Copyright 2024 Oxide Computer -use crate::make_client; +use super::{list_timeseries, prepare_columns}; +use crate::{make_client, oxql::Table, Client, OxqlResult}; use clap::Args; use crossterm::style::Stylize; -use dropshot::EmptyScanParams; -use dropshot::WhichPage; -use oximeter::TimeseriesSchema; -use oximeter_db::oxql::query::special_idents; -use oximeter_db::oxql::Table; -use oximeter_db::Client; -use oximeter_db::OxqlResult; use reedline::DefaultPrompt; use reedline::DefaultPromptSegment; use reedline::Reedline; @@ -23,228 +17,25 @@ use reedline::Signal; use slog::Logger; use std::net::IpAddr; -#[derive(Clone, Debug, Args)] +/// Options for the OxQL shell. +#[derive(Clone, Debug, Default, Args)] pub struct ShellOptions { /// Print summaries of each SQL query run against the database. #[clap(long = "summaries")] - print_summaries: bool, + pub print_summaries: bool, /// Print the total elapsed query duration. #[clap(long = "elapsed")] - print_elapsed: bool, -} - -// Print help for the basic OxQL commands. -fn print_basic_commands() { - println!("Basic commands:"); - println!(" \\?, \\h, help - Print this help"); - println!(" \\q, quit, exit, ^D - Exit the shell"); - println!(" \\l - List timeseries"); - println!(" \\d - Describe a timeseries"); - println!(" \\ql [] - Get OxQL help about an operation"); - println!(); - println!("Or try entering an OxQL `get` query"); -} - -// Print high-level information about OxQL. -fn print_general_oxql_help() { - const HELP: &str = r#"Oximeter Query Language - -The Oximeter Query Language (OxQL) implements queries as -as sequence of operations. Each of these takes zero or more -timeseries as inputs, and produces zero or more timeseries -as outputs. Operations are chained together with the pipe -operator, "|". - -All queries start with a `get` operation, which selects a -timeseries from the database, by name. For example: - -`get physical_data_link:bytes_received` - -The supported timeseries operations are: - -- get: Select a timeseries by name -- filter: Filter timeseries by field or sample values -- group_by: Group timeseries by fields, applying a reducer. -- join: Join two or more timeseries together - -Run `\ql ` to get specific help about that operation. - "#; - println!("{HELP}"); -} - -// Print help for a specific OxQL operation. -fn print_oxql_operation_help(op: &str) { - match op { - "get" => { - const HELP: &str = r#"get "); - -Get instances of a timeseries by name"#; - println!("{HELP}"); - } - "filter" => { - const HELP: &str = r#"filter "); - -Filter timeseries based on their attributes. - can be a logical combination of filtering -\"atoms\", such as `field_foo > 0`. Expressions -may use any of the usual comparison operators, and -can be nested and combined with && or ||. - -Expressions must refer to the name of a field -for a timeseries at this time, and must compare -against literals. For example, `some_field > 0` -is supported, but `some_field > other_field` is not."#; - println!("{HELP}"); - } - "group_by" => { - const HELP: &str = r#"group_by [, ... ] -group_by [, ... ], - -Group timeseries by the named fields, optionally -specifying a reducer to use when aggregating the -timeseries within each group. If no reducer is -specified, `mean` is used, averaging the values -within each group. - -Current supported reducers: - - mean - - sum"#; - println!("{HELP}"); - } - "join" => { - const HELP: &str = r#"join - -Combine 2 or more tables by peforming a natural -inner join, matching up those with fields of the -same value. Currently, joining does not take into -account the timestamps, and does not align the outputs -directly."#; - println!("{HELP}"); - } - _ => eprintln!("unrecognized OxQL operation: '{op}'"), - } -} - -// List the known timeseries. -async fn list_timeseries(client: &Client) -> anyhow::Result<()> { - let mut page = WhichPage::First(EmptyScanParams {}); - let limit = 100.try_into().unwrap(); - loop { - let results = client.timeseries_schema_list(&page, limit).await?; - for schema in results.items.iter() { - println!("{}", schema.timeseries_name); - } - if results.next_page.is_some() { - if let Some(last) = results.items.last() { - page = WhichPage::Next(last.timeseries_name.clone()); - } else { - return Ok(()); - } - } else { - return Ok(()); - } - } -} - -/// Prepare the columns for a timeseries or virtual table. -pub(crate) fn prepare_columns( - schema: &TimeseriesSchema, -) -> (Vec, Vec) { - let mut cols = Vec::with_capacity(schema.field_schema.len() + 2); - let mut types = cols.clone(); - - for field in schema.field_schema.iter() { - cols.push(field.name.clone()); - types.push(field.field_type.to_string()); - } - - cols.push(special_idents::TIMESTAMP.into()); - types.push(special_idents::DATETIME64.into()); - - if schema.datum_type.is_histogram() { - cols.push(special_idents::START_TIME.into()); - types.push(special_idents::DATETIME64.into()); - - cols.push(special_idents::BINS.into()); - types.push( - special_idents::array_type_name_from_histogram_type( - schema.datum_type, - ) - .unwrap(), - ); - - cols.push(special_idents::COUNTS.into()); - types.push(special_idents::ARRAYU64.into()); - - cols.push(special_idents::MIN.into()); - types.push(special_idents::FLOAT64.into()); - - cols.push(special_idents::MAX.into()); - types.push(special_idents::FLOAT64.into()); - - cols.push(special_idents::SUM_OF_SAMPLES.into()); - types.push(special_idents::UINT64.into()); - - cols.push(special_idents::SQUARED_MEAN.into()); - types.push(special_idents::UINT64.into()); - - for quantile in ["P50", "P90", "P99"].iter() { - cols.push(format!("{}_MARKER_HEIGHTS", quantile)); - types.push(special_idents::ARRAYFLOAT64.into()); - cols.push(format!("{}_MARKER_POSITIONS", quantile)); - types.push(special_idents::ARRAYINT64.into()); - cols.push(format!("{}_DESIRED_MARKER_POSITIONS", quantile)); - types.push(special_idents::ARRAYFLOAT64.into()); - } - } else if schema.datum_type.is_cumulative() { - cols.push(special_idents::START_TIME.into()); - types.push(special_idents::DATETIME64.into()); - cols.push(special_idents::DATUM.into()); - types.push(schema.datum_type.to_string()); - } else { - cols.push(special_idents::DATUM.into()); - types.push(schema.datum_type.to_string()); - } - - (cols, types) -} - -/// Describe a single timeseries. -async fn describe_timeseries( - client: &Client, - timeseries: &str, -) -> anyhow::Result<()> { - match timeseries.parse() { - Err(_) => eprintln!( - "Invalid timeseries name '{timeseries}, \ - use \\l to list available timeseries by name - " - ), - Ok(name) => { - if let Some(schema) = client.schema_for_timeseries(&name).await? { - let (cols, types) = prepare_columns(&schema); - let mut builder = tabled::builder::Builder::default(); - builder.push_record(cols); // first record is the header - builder.push_record(types); - println!( - "{}", - builder.build().with(tabled::settings::Style::psql()) - ); - } else { - eprintln!("No such timeseries: {timeseries}"); - } - } - } - Ok(()) + pub print_elapsed: bool, } -/// Run the OxQL shell. -pub async fn oxql_shell( +/// Run/execute the OxQL shell. +pub async fn shell( address: IpAddr, port: u16, log: Logger, opts: ShellOptions, ) -> anyhow::Result<()> { + // Create the client. let client = make_client(address, port, &log).await?; // A workaround to ensure the client has all available timeseries when the @@ -320,6 +111,127 @@ pub async fn oxql_shell( } } +/// Describe a single timeseries. +async fn describe_timeseries( + client: &Client, + timeseries: &str, +) -> anyhow::Result<()> { + match timeseries.parse() { + Err(_) => eprintln!( + "Invalid timeseries name '{timeseries}, \ + use \\l to list available timeseries by name + " + ), + Ok(name) => { + if let Some(schema) = client.schema_for_timeseries(&name).await? { + let (cols, types) = prepare_columns(&schema); + let mut builder = tabled::builder::Builder::default(); + builder.push_record(cols); // first record is the header + builder.push_record(types); + println!( + "{}", + builder.build().with(tabled::settings::Style::psql()) + ); + } else { + eprintln!("No such timeseries: {timeseries}"); + } + } + } + Ok(()) +} + +/// Print help for a specific OxQL operation. +fn print_oxql_operation_help(op: &str) { + match op { + "get" => { + const HELP: &str = r#"get "); + +Get instances of a timeseries by name"#; + println!("{HELP}"); + } + "filter" => { + const HELP: &str = r#"filter "); + +Filter timeseries based on their attributes. + can be a logical combination of filtering +\"atoms\", such as `field_foo > 0`. Expressions +may use any of the usual comparison operators, and +can be nested and combined with && or ||. + +Expressions must refer to the name of a field +for a timeseries at this time, and must compare +against literals. For example, `some_field > 0` +is supported, but `some_field > other_field` is not."#; + println!("{HELP}"); + } + "group_by" => { + const HELP: &str = r#"group_by [, ... ] +group_by [, ... ], + +Group timeseries by the named fields, optionally +specifying a reducer to use when aggregating the +timeseries within each group. If no reducer is +specified, `mean` is used, averaging the values +within each group. + +Current supported reducers: + - mean + - sum"#; + println!("{HELP}"); + } + "join" => { + const HELP: &str = r#"join + +Combine 2 or more tables by peforming a natural +inner join, matching up those with fields of the +same value. Currently, joining does not take into +account the timestamps, and does not align the outputs +directly."#; + println!("{HELP}"); + } + _ => eprintln!("unrecognized OxQL operation: '{op}'"), + } +} + +/// Print help for the basic OxQL commands. +fn print_basic_commands() { + println!("Basic commands:"); + println!(" \\?, \\h, help - Print this help"); + println!(" \\q, quit, exit, ^D - Exit the shell"); + println!(" \\l - List timeseries"); + println!(" \\d - Describe a timeseries"); + println!(" \\ql [] - Get OxQL help about an operation"); + println!(); + println!("Or try entering an OxQL `get` query"); +} + +/// Print high-level information about OxQL. +fn print_general_oxql_help() { + const HELP: &str = r#"Oximeter Query Language + +The Oximeter Query Language (OxQL) implements queries as +as sequence of operations. Each of these takes zero or more +timeseries as inputs, and produces zero or more timeseries +as outputs. Operations are chained together with the pipe +operator, "|". + +All queries start with a `get` operation, which selects a +timeseries from the database, by name. For example: + +`get physical_data_link:bytes_received` + +The supported timeseries operations are: + +- get: Select a timeseries by name +- filter: Filter timeseries by field or sample values +- group_by: Group timeseries by fields, applying a reducer. +- join: Join two or more timeseries together + +Run `\ql ` to get specific help about that operation. + "#; + println!("{HELP}"); +} + fn print_query_summary( result: &OxqlResult, print_elapsed: bool, diff --git a/oximeter/db/src/bin/oxdb/sql.rs b/oximeter/db/src/shells/sql.rs similarity index 96% rename from oximeter/db/src/bin/oxdb/sql.rs rename to oximeter/db/src/shells/sql.rs index 44780592fc..f75713da3b 100644 --- a/oximeter/db/src/bin/oxdb/sql.rs +++ b/oximeter/db/src/shells/sql.rs @@ -2,20 +2,16 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -//! SQL shell subcommand for `oxdb`. +//! SQL shell implementation. // Copyright 2024 Oxide Computer Company -use super::oxql; -use crate::make_client; +use super::prepare_columns; +use crate::sql::{function_allow_list, QueryResult, Table}; +use crate::{make_client, Client, QuerySummary}; use clap::Args; use dropshot::EmptyScanParams; use dropshot::WhichPage; -use oximeter_db::sql::function_allow_list; -use oximeter_db::sql::QueryResult; -use oximeter_db::sql::Table; -use oximeter_db::Client; -use oximeter_db::QuerySummary; use reedline::DefaultPrompt; use reedline::DefaultPromptSegment; use reedline::Reedline; @@ -23,63 +19,7 @@ use reedline::Signal; use slog::Logger; use std::net::IpAddr; -fn print_basic_commands() { - println!("Basic commands:"); - println!(" \\?, \\h, help - Print this help"); - println!(" \\q, quit, exit, ^D - Exit the shell"); - println!(" \\l - List tables"); - println!(" \\d - Describe a table"); - println!( - " \\f - List or describe ClickHouse SQL functions" - ); - println!(); - println!("Or try entering a SQL `SELECT` statement"); -} - -async fn list_virtual_tables(client: &Client) -> anyhow::Result<()> { - let mut page = WhichPage::First(EmptyScanParams {}); - let limit = 100.try_into().unwrap(); - loop { - let results = client.timeseries_schema_list(&page, limit).await?; - for schema in results.items.iter() { - println!("{}", schema.timeseries_name); - } - if results.next_page.is_some() { - if let Some(last) = results.items.last() { - page = WhichPage::Next(last.timeseries_name.clone()); - } else { - return Ok(()); - } - } else { - return Ok(()); - } - } -} - -async fn describe_virtual_table( - client: &Client, - table: &str, -) -> anyhow::Result<()> { - match table.parse() { - Err(_) => println!("Invalid timeseries name: {table}"), - Ok(name) => { - if let Some(schema) = client.schema_for_timeseries(&name).await? { - let (cols, types) = oxql::prepare_columns(&schema); - let mut builder = tabled::builder::Builder::default(); - builder.push_record(cols); // first record is the header - builder.push_record(types); - println!( - "{}", - builder.build().with(tabled::settings::Style::psql()) - ); - } else { - println!("No such timeseries: {table}"); - } - } - } - Ok(()) -} - +/// Options for the SQL shell. #[derive(Clone, Debug, Args)] pub struct ShellOptions { /// Print query metadata. @@ -107,48 +47,8 @@ impl Default for ShellOptions { } } -fn list_supported_functions() { - println!("Subset of ClickHouse SQL functions currently supported"); - println!( - "See https://clickhouse.com/docs/en/sql-reference/functions for more" - ); - println!(); - for func in function_allow_list().iter() { - println!(" {func}"); - } -} - -fn show_supported_function(name: &str) { - if let Some(func) = function_allow_list().iter().find(|f| f.name == name) { - println!("{}", func.name); - println!(" {}", func.usage); - println!(" {}", func.description); - } else { - println!("No supported function '{name}'"); - } -} - -fn print_sql_query(query: &str) { - println!( - "{}", - sqlformat::format( - &query, - &sqlformat::QueryParams::None, - sqlformat::FormatOptions { uppercase: true, ..Default::default() } - ) - ); - println!(); -} - -fn print_query_summary(table: &Table, summary: &QuerySummary) { - println!("Summary"); - println!(" Query ID: {}", summary.id); - println!(" Result rows: {}", table.rows.len()); - println!(" Time: {:?}", summary.elapsed); - println!(" Read: {}\n", summary.io_summary.read); -} - -pub async fn sql_shell( +/// Run/execute the SQL shell. +pub async fn shell( address: IpAddr, port: u16, log: Logger, @@ -261,3 +161,101 @@ pub async fn sql_shell( } } } + +fn print_basic_commands() { + println!("Basic commands:"); + println!(" \\?, \\h, help - Print this help"); + println!(" \\q, quit, exit, ^D - Exit the shell"); + println!(" \\l - List tables"); + println!(" \\d
- Describe a table"); + println!( + " \\f - List or describe ClickHouse SQL functions" + ); + println!(); + println!("Or try entering a SQL `SELECT` statement"); +} + +async fn list_virtual_tables(client: &Client) -> anyhow::Result<()> { + let mut page = WhichPage::First(EmptyScanParams {}); + let limit = 100.try_into().unwrap(); + loop { + let results = client.timeseries_schema_list(&page, limit).await?; + for schema in results.items.iter() { + println!("{}", schema.timeseries_name); + } + if results.next_page.is_some() { + if let Some(last) = results.items.last() { + page = WhichPage::Next(last.timeseries_name.clone()); + } else { + return Ok(()); + } + } else { + return Ok(()); + } + } +} + +async fn describe_virtual_table( + client: &Client, + table: &str, +) -> anyhow::Result<()> { + match table.parse() { + Err(_) => println!("Invalid timeseries name: {table}"), + Ok(name) => { + if let Some(schema) = client.schema_for_timeseries(&name).await? { + let (cols, types) = prepare_columns(&schema); + let mut builder = tabled::builder::Builder::default(); + builder.push_record(cols); // first record is the header + builder.push_record(types); + println!( + "{}", + builder.build().with(tabled::settings::Style::psql()) + ); + } else { + println!("No such timeseries: {table}"); + } + } + } + Ok(()) +} + +fn list_supported_functions() { + println!("Subset of ClickHouse SQL functions currently supported"); + println!( + "See https://clickhouse.com/docs/en/sql-reference/functions for more" + ); + println!(); + for func in function_allow_list().iter() { + println!(" {func}"); + } +} + +fn show_supported_function(name: &str) { + if let Some(func) = function_allow_list().iter().find(|f| f.name == name) { + println!("{}", func.name); + println!(" {}", func.usage); + println!(" {}", func.description); + } else { + println!("No supported function '{name}'"); + } +} + +fn print_sql_query(query: &str) { + println!( + "{}", + sqlformat::format( + &query, + &sqlformat::QueryParams::None, + sqlformat::FormatOptions { uppercase: true, ..Default::default() } + ) + ); + println!(); +} + +fn print_query_summary(table: &Table, summary: &QuerySummary) { + println!("Summary"); + println!(" Query ID: {}", summary.id); + println!(" Result rows: {}", table.rows.len()); + println!(" Time: {:?}", summary.elapsed); + println!(" Read: {}\n", summary.io_summary.read); +} diff --git a/oximeter/impl/src/schema/codegen.rs b/oximeter/impl/src/schema/codegen.rs index 4aa09cf136..cde67439de 100644 --- a/oximeter/impl/src/schema/codegen.rs +++ b/oximeter/impl/src/schema/codegen.rs @@ -74,6 +74,186 @@ fn emit_target(schema: &TimeseriesSchema) -> TokenStream { emit_one(FieldSource::Target, schema) } +/// Return true if all the fields in the schema are `Copy`. +fn fields_are_copyable<'a>( + mut fields: impl Iterator, +) -> bool { + // Do a positive match, to ensure new variants don't actually derive copy + // inappropriately. Better we clone, in that case. + fields.all(FieldSchema::is_copyable) +} + +/// Return true if the datum type is copyable. +fn datum_type_is_copyable(datum_type: DatumType) -> bool { + match datum_type { + DatumType::Bool + | DatumType::I8 + | DatumType::U8 + | DatumType::I16 + | DatumType::U16 + | DatumType::I32 + | DatumType::U32 + | DatumType::I64 + | DatumType::U64 + | DatumType::CumulativeI64 + | DatumType::CumulativeU64 + | DatumType::CumulativeF32 + | DatumType::CumulativeF64 + | DatumType::F32 + | DatumType::F64 => true, + DatumType::String + | DatumType::Bytes + | DatumType::HistogramI8 + | DatumType::HistogramU8 + | DatumType::HistogramI16 + | DatumType::HistogramU16 + | DatumType::HistogramI32 + | DatumType::HistogramU32 + | DatumType::HistogramI64 + | DatumType::HistogramU64 + | DatumType::HistogramF32 + | DatumType::HistogramF64 => false, + } +} + +/// Return `true` if values of this datum are partially ordered (can derive +/// `PartialOrd`.) +fn datum_type_is_partially_ordered(datum_type: DatumType) -> bool { + match datum_type { + DatumType::Bool + | DatumType::I8 + | DatumType::U8 + | DatumType::I16 + | DatumType::U16 + | DatumType::I32 + | DatumType::U32 + | DatumType::I64 + | DatumType::U64 + | DatumType::String + | DatumType::Bytes + | DatumType::CumulativeI64 + | DatumType::CumulativeU64 + | DatumType::CumulativeF32 + | DatumType::CumulativeF64 + | DatumType::F32 + | DatumType::F64 => true, + DatumType::HistogramI8 + | DatumType::HistogramU8 + | DatumType::HistogramI16 + | DatumType::HistogramU16 + | DatumType::HistogramI32 + | DatumType::HistogramU32 + | DatumType::HistogramI64 + | DatumType::HistogramU64 + | DatumType::HistogramF32 + | DatumType::HistogramF64 => false, + } +} + +/// Return `true` if values of this datum are totally ordered (can derive +/// `Ord`.) +fn datum_type_is_totally_ordered(datum_type: DatumType) -> bool { + match datum_type { + DatumType::Bool + | DatumType::I8 + | DatumType::U8 + | DatumType::I16 + | DatumType::U16 + | DatumType::I32 + | DatumType::U32 + | DatumType::I64 + | DatumType::U64 + | DatumType::String + | DatumType::Bytes + | DatumType::CumulativeI64 + | DatumType::CumulativeU64 + | DatumType::CumulativeF32 + | DatumType::CumulativeF64 => true, + DatumType::F32 + | DatumType::F64 + | DatumType::HistogramI8 + | DatumType::HistogramU8 + | DatumType::HistogramI16 + | DatumType::HistogramU16 + | DatumType::HistogramI32 + | DatumType::HistogramU32 + | DatumType::HistogramI64 + | DatumType::HistogramU64 + | DatumType::HistogramF32 + | DatumType::HistogramF64 => false, + } +} + +/// Return `true` if values of this datum are hashable (can derive `Hash`). +fn datum_type_is_hashable(datum_type: DatumType) -> bool { + match datum_type { + DatumType::Bool + | DatumType::I8 + | DatumType::U8 + | DatumType::I16 + | DatumType::U16 + | DatumType::I32 + | DatumType::U32 + | DatumType::I64 + | DatumType::U64 + | DatumType::String + | DatumType::Bytes + | DatumType::CumulativeI64 + | DatumType::CumulativeU64 + | DatumType::CumulativeF32 + | DatumType::CumulativeF64 => true, + DatumType::F32 + | DatumType::F64 + | DatumType::HistogramI8 + | DatumType::HistogramU8 + | DatumType::HistogramI16 + | DatumType::HistogramU16 + | DatumType::HistogramI32 + | DatumType::HistogramU32 + | DatumType::HistogramI64 + | DatumType::HistogramU64 + | DatumType::HistogramF32 + | DatumType::HistogramF64 => false, + } +} + +fn compute_extra_derives( + source: FieldSource, + schema: &TimeseriesSchema, +) -> TokenStream { + match source { + FieldSource::Target => { + if fields_are_copyable(schema.target_fields()) { + quote! { #[derive(Copy, Eq, Hash, Ord, PartialOrd)] } + } else { + quote! { #[derive(Eq, Hash, Ord, PartialOrd)] } + } + } + FieldSource::Metric => { + let mut derives = Vec::new(); + if fields_are_copyable(schema.metric_fields()) + && datum_type_is_copyable(schema.datum_type) + { + derives.push(quote! { Copy }); + } + if datum_type_is_partially_ordered(schema.datum_type) { + derives.push(quote! { PartialOrd }); + if datum_type_is_totally_ordered(schema.datum_type) { + derives.push(quote! { Eq, Ord }); + } + } + if datum_type_is_hashable(schema.datum_type) { + derives.push(quote! { Hash }) + } + if derives.is_empty() { + quote! {} + } else { + quote! { #[derive(#(#derives),*)] } + } + } + } +} + fn emit_one(source: FieldSource, schema: &TimeseriesSchema) -> TokenStream { let name = match source { FieldSource::Target => schema.target_name(), @@ -98,6 +278,7 @@ fn emit_one(source: FieldSource, schema: &TimeseriesSchema) -> TokenStream { } }) .collect(); + let extra_derives = compute_extra_derives(source, schema); let (oximeter_trait, maybe_datum, type_docstring) = match source { FieldSource::Target => ( quote! {::oximeter::Target }, @@ -116,6 +297,7 @@ fn emit_one(source: FieldSource, schema: &TimeseriesSchema) -> TokenStream { quote! { #[doc = #type_docstring] #[derive(Clone, Debug, PartialEq, #oximeter_trait)] + #extra_derives pub struct #struct_name { #( #field_defs, )* #maybe_datum @@ -335,6 +517,10 @@ impl quote::ToTokens for Units { let toks = match self { Units::Count => quote! { ::oximeter::schema::Units::Count }, Units::Bytes => quote! { ::oximeter::schema::Units::Bytes }, + Units::Seconds => quote! { ::oximeter::schema::Units::Seconds }, + Units::Nanoseconds => { + quote! { ::oximeter::schema::Units::Nanoseconds } + } }; toks.to_tokens(tokens); } @@ -431,6 +617,7 @@ mod tests { let expected = quote! { #[doc = "a target"] #[derive(Clone, Debug, PartialEq, ::oximeter::Target)] + #[derive(Eq, Hash, Ord, PartialOrd)] pub struct Foo { #[doc = "target field"] pub f0: ::std::borrow::Cow<'static, str>, @@ -438,6 +625,7 @@ mod tests { #[doc = "a metric"] #[derive(Clone, Debug, PartialEq, ::oximeter::Metric)] + #[derive(Copy, PartialOrd, Eq, Ord, Hash)] pub struct Bar { #[doc = "metric field"] pub f1: ::uuid::Uuid, @@ -474,6 +662,7 @@ mod tests { let expected = quote! { #[doc = "a target"] #[derive(Clone, Debug, PartialEq, ::oximeter::Target)] + #[derive(Eq, Hash, Ord, PartialOrd)] pub struct Foo { #[doc = "target field"] pub f0: ::std::borrow::Cow<'static, str>, @@ -481,6 +670,7 @@ mod tests { #[doc = "a metric"] #[derive(Clone, Debug, PartialEq, ::oximeter::Metric)] + #[derive(Copy, PartialOrd, Eq, Ord, Hash)] pub struct Bar { pub datum: ::oximeter::types::Cumulative, } @@ -488,4 +678,152 @@ mod tests { assert_eq!(tokens.to_string(), expected.to_string()); } + + #[test] + fn compute_extra_derives_respects_non_copy_fields() { + // Metric fields are not copy, even though datum is. + let schema = TimeseriesSchema { + timeseries_name: "foo:bar".parse().unwrap(), + description: TimeseriesDescription { + target: "a target".into(), + metric: "a metric".into(), + }, + field_schema: BTreeSet::from([FieldSchema { + name: "f0".into(), + field_type: FieldType::String, + source: FieldSource::Metric, + description: "metric field".into(), + }]), + datum_type: DatumType::CumulativeU64, + version: NonZeroU8::new(1).unwrap(), + authz_scope: AuthzScope::Fleet, + units: Units::Bytes, + created: Utc::now(), + }; + let tokens = compute_extra_derives(FieldSource::Metric, &schema); + assert_eq!( + tokens.to_string(), + quote! { #[derive(PartialOrd, Eq, Ord, Hash)] }.to_string(), + "Copy should not be derived for a datum type that is copy, \ + when the fields themselves are not copy." + ); + } + + #[test] + fn compute_extra_derives_respects_non_copy_datum_types() { + // Fields are copy, but datum is not. + let schema = TimeseriesSchema { + timeseries_name: "foo:bar".parse().unwrap(), + description: TimeseriesDescription { + target: "a target".into(), + metric: "a metric".into(), + }, + field_schema: BTreeSet::from([FieldSchema { + name: "f0".into(), + field_type: FieldType::Uuid, + source: FieldSource::Metric, + description: "metric field".into(), + }]), + datum_type: DatumType::String, + version: NonZeroU8::new(1).unwrap(), + authz_scope: AuthzScope::Fleet, + units: Units::Bytes, + created: Utc::now(), + }; + let tokens = compute_extra_derives(FieldSource::Metric, &schema); + assert_eq!( + tokens.to_string(), + quote! { #[derive(PartialOrd, Eq, Ord, Hash)] }.to_string(), + "Copy should not be derived for a datum type that is not copy, \ + when the fields themselves are copy." + ); + } + + #[test] + fn compute_extra_derives_respects_partially_ordered_datum_types() { + // No fields, datum is partially- but not totally-ordered. + let schema = TimeseriesSchema { + timeseries_name: "foo:bar".parse().unwrap(), + description: TimeseriesDescription { + target: "a target".into(), + metric: "a metric".into(), + }, + field_schema: BTreeSet::from([FieldSchema { + name: "f0".into(), + field_type: FieldType::Uuid, + source: FieldSource::Target, + description: "target field".into(), + }]), + datum_type: DatumType::F64, + version: NonZeroU8::new(1).unwrap(), + authz_scope: AuthzScope::Fleet, + units: Units::Bytes, + created: Utc::now(), + }; + let tokens = compute_extra_derives(FieldSource::Metric, &schema); + assert_eq!( + tokens.to_string(), + quote! { #[derive(Copy, PartialOrd)] }.to_string(), + "Should derive only PartialOrd for a metric type that is \ + not totally-ordered." + ); + } + + #[test] + fn compute_extra_derives_respects_totally_ordered_datum_types() { + // No fields, datum is also totally-ordered + let schema = TimeseriesSchema { + timeseries_name: "foo:bar".parse().unwrap(), + description: TimeseriesDescription { + target: "a target".into(), + metric: "a metric".into(), + }, + field_schema: BTreeSet::from([FieldSchema { + name: "f0".into(), + field_type: FieldType::Uuid, + source: FieldSource::Target, + description: "target field".into(), + }]), + datum_type: DatumType::U64, + version: NonZeroU8::new(1).unwrap(), + authz_scope: AuthzScope::Fleet, + units: Units::Bytes, + created: Utc::now(), + }; + let tokens = compute_extra_derives(FieldSource::Metric, &schema); + assert_eq!( + tokens.to_string(), + quote! { #[derive(Copy, PartialOrd, Eq, Ord, Hash)] }.to_string(), + "Should derive Ord for a metric type that is totally-ordered." + ); + } + + #[test] + fn compute_extra_derives_respects_datum_type_with_no_extra_derives() { + // No metric fields, and histograms don't admit any other derives. + let schema = TimeseriesSchema { + timeseries_name: "foo:bar".parse().unwrap(), + description: TimeseriesDescription { + target: "a target".into(), + metric: "a metric".into(), + }, + field_schema: BTreeSet::from([FieldSchema { + name: "f0".into(), + field_type: FieldType::String, + source: FieldSource::Target, + description: "target field".into(), + }]), + datum_type: DatumType::HistogramF64, + version: NonZeroU8::new(1).unwrap(), + authz_scope: AuthzScope::Fleet, + units: Units::Bytes, + created: Utc::now(), + }; + let tokens = compute_extra_derives(FieldSource::Metric, &schema); + assert!( + tokens.is_empty(), + "A histogram has no extra derives, so a timeseries schema \ + with no metric fields should also have no extra derives." + ); + } } diff --git a/oximeter/impl/src/schema/ir.rs b/oximeter/impl/src/schema/ir.rs index 573af9c2b0..f7a209294f 100644 --- a/oximeter/impl/src/schema/ir.rs +++ b/oximeter/impl/src/schema/ir.rs @@ -28,6 +28,12 @@ use std::collections::BTreeMap; use std::collections::BTreeSet; use std::num::NonZeroU8; +mod limits { + pub const MAX_FIELD_NAME_LENGTH: usize = 64; + pub const MAX_DESCRIPTION_LENGTH: usize = 1024; + pub const MAX_TIMESERIES_NAME_LENGTH: usize = 128; +} + #[derive(Debug, Deserialize)] pub struct FieldMetadata { #[serde(rename = "type")] @@ -107,6 +113,44 @@ impl TimeseriesDefinition { let mut timeseries = BTreeMap::new(); let target_name = &self.target.name; + // Validate text length limits on field names and descriptions. + if self.target.name.is_empty() { + return Err(MetricsError::SchemaDefinition(String::from( + "Target name cannot be empty", + ))); + } + for (name, metadata) in self.fields.iter() { + if name.is_empty() { + return Err(MetricsError::SchemaDefinition(String::from( + "Field names cannot be empty", + ))); + } + if name.len() > limits::MAX_FIELD_NAME_LENGTH { + return Err(MetricsError::SchemaDefinition(format!( + "Field name '{}' is {} characters, which exceeds the \ + maximum field name length of {}", + name, + name.len(), + limits::MAX_FIELD_NAME_LENGTH, + ))); + } + if metadata.description.is_empty() { + return Err(MetricsError::SchemaDefinition(format!( + "Description of field '{}' cannot be empty", + name, + ))); + } + if metadata.description.len() > limits::MAX_DESCRIPTION_LENGTH { + return Err(MetricsError::SchemaDefinition(format!( + "Description of field '{}' is {} characters, which \ + exceeds the maximum description length of {}", + name, + metadata.description.len(), + limits::MAX_DESCRIPTION_LENGTH, + ))); + } + } + // At this point, we do not support actually _modifying_ schema. // Instead, we're putting in place infrastructure to support multiple // versions, while still requiring all schema to define the first and @@ -177,6 +221,26 @@ impl TimeseriesDefinition { // version, along with running some basic lints and checks. for metric in self.metrics.iter() { let metric_name = &metric.name; + if metric_name.is_empty() { + return Err(MetricsError::SchemaDefinition(String::from( + "Metric name cannot be empty", + ))); + } + let timeseries_name = TimeseriesName::try_from(format!( + "{}:{}", + target_name, metric_name + ))?; + if timeseries_name.as_str().len() + > limits::MAX_TIMESERIES_NAME_LENGTH + { + return Err(MetricsError::SchemaDefinition(format!( + "Timeseries name '{}' is {} characters, which \ + exceeds the maximum length of {}", + timeseries_name, + timeseries_name.len(), + limits::MAX_TIMESERIES_NAME_LENGTH, + ))); + } // Store the current version of the metric. This doesn't need to be // sequential, but they do need to be monotonic and have a matching @@ -231,9 +295,6 @@ impl TimeseriesDefinition { self.target.authz_scope, &field_schema, )?; - let timeseries_name = TimeseriesName::try_from( - format!("{}:{}", target_name, metric_name), - )?; let version = NonZeroU8::new(last_target_version).unwrap(); let description = TimeseriesDescription { @@ -251,7 +312,7 @@ impl TimeseriesDefinition { created: Utc::now(), }; if let Some(old) = timeseries - .insert((timeseries_name, version), schema) + .insert((timeseries_name.clone(), version), schema) { return Err(MetricsError::SchemaDefinition( format!( @@ -1413,4 +1474,198 @@ mod tests { have at least one field, but found {msg:?}", ); } + + #[test] + fn fail_on_very_long_timeseries_name() { + let contents = r#" + format_version = 1 + + [target] + name = "veeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeery_long_target" + description = "some target" + authz_scope = "fleet" + versions = [ + { version = 1, fields = [ "foo" ] }, + ] + + [[metrics]] + name = "veeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeery_long_metric" + description = "some metric" + datum_type = "u8" + units = "count" + versions = [ + { added_in = 1, fields = [ ] }, + ] + + [fields.foo] + type = "string" + description = "a field" + "#; + let res = load_schema(contents); + let Err(MetricsError::SchemaDefinition(msg)) = &res else { + panic!( + "Expected to fail when timeseries name is too long, found {res:#?}" + ); + }; + assert!( + msg.contains("exceeds the maximum"), + "Message should complain about a long timeseries name, but found {msg:?}", + ); + } + + #[test] + fn fail_on_empty_metric_name() { + let contents = r#" + format_version = 1 + + [target] + name = "target" + description = "some target" + authz_scope = "fleet" + versions = [ + { version = 1, fields = [ "foo" ] }, + ] + + [[metrics]] + name = "" + description = "some metric" + datum_type = "u8" + units = "count" + versions = [ + { added_in = 1, fields = [ ] }, + ] + + [fields.foo] + type = "string" + description = "a field" + "#; + let res = load_schema(contents); + let Err(MetricsError::SchemaDefinition(msg)) = &res else { + panic!( + "Expected to fail when metric name is empty, found {res:#?}" + ); + }; + assert_eq!( + msg, "Metric name cannot be empty", + "Message should complain about an empty metric name \ + but found {msg:?}", + ); + } + + #[test] + fn fail_on_empty_target_name() { + let contents = r#" + format_version = 1 + + [target] + name = "" + description = "some target" + authz_scope = "fleet" + versions = [ + { version = 1, fields = [ "foo" ] }, + ] + + [[metrics]] + name = "metric" + description = "some metric" + datum_type = "u8" + units = "count" + versions = [ + { added_in = 1, fields = [ ] }, + ] + + [fields.foo] + type = "string" + description = "a field" + "#; + let res = load_schema(contents); + let Err(MetricsError::SchemaDefinition(msg)) = &res else { + panic!( + "Expected to fail when target name is empty, found {res:#?}" + ); + }; + assert_eq!( + msg, "Target name cannot be empty", + "Message should complain about an empty target name \ + but found {msg:?}", + ); + } + + #[test] + fn fail_on_very_long_field_names() { + let contents = r#" + format_version = 1 + + [target] + name = "target" + description = "some target" + authz_scope = "fleet" + versions = [ + { version = 1, fields = [ "foo" ] }, + ] + + [[metrics]] + name = "metric" + description = "some metric" + datum_type = "u8" + units = "count" + versions = [ + { added_in = 1, fields = [ ] }, + ] + + [fields.this_is_a_reeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeealy_long_field_name] + type = "string" + description = "a field" + "#; + let res = load_schema(contents); + let Err(MetricsError::SchemaDefinition(msg)) = &res else { + panic!( + "Expected to fail when field name is too long, found {res:#?}" + ); + }; + assert!( + msg.contains("which exceeds the maximum field name length"), + "Message should complain about a field name being \ + too long, but found {msg:?}", + ); + } + + #[test] + fn fail_on_empty_descriptions() { + let contents = r#" + format_version = 1 + + [target] + name = "target" + description = "some target" + authz_scope = "fleet" + versions = [ + { version = 1, fields = [ "foo" ] }, + ] + + [[metrics]] + name = "metric" + description = "some metric" + datum_type = "u8" + units = "count" + versions = [ + { added_in = 1, fields = [ ] }, + ] + + [fields.foo] + type = "string" + description = "" + "#; + let res = load_schema(contents); + let Err(MetricsError::SchemaDefinition(msg)) = &res else { + panic!( + "Expected to fail when field description is empty, found {res:#?}" + ); + }; + assert_eq!( + msg, "Description of field 'foo' cannot be empty", + "Message should complain about a field description being \ + empty, but found {msg:?}", + ); + } } diff --git a/oximeter/impl/src/schema/mod.rs b/oximeter/impl/src/schema/mod.rs index 28dbf38ab8..83a83e95b2 100644 --- a/oximeter/impl/src/schema/mod.rs +++ b/oximeter/impl/src/schema/mod.rs @@ -20,12 +20,8 @@ use chrono::Utc; use schemars::JsonSchema; use serde::Deserialize; use serde::Serialize; -use std::collections::btree_map::Entry; -use std::collections::BTreeMap; use std::collections::BTreeSet; -use std::fmt::Write; use std::num::NonZeroU8; -use std::path::Path; /// Full path to the directory containing all schema. /// @@ -54,6 +50,13 @@ pub struct FieldSchema { pub description: String, } +impl FieldSchema { + /// Return `true` if this field is copyable. + pub const fn is_copyable(&self) -> bool { + self.field_type.is_copyable() + } +} + /// The source from which a field is derived, the target or metric. #[derive( Clone, @@ -182,6 +185,8 @@ pub struct TimeseriesDescription { pub enum Units { Count, Bytes, + Seconds, + Nanoseconds, } /// The schema for a timeseries. @@ -297,6 +302,24 @@ impl TimeseriesSchema { self.field_schema.iter().find(|field| field.name == name.as_ref()) } + /// Return an iterator over the target fields. + pub fn target_fields(&self) -> impl Iterator { + self.field_iter(FieldSource::Target) + } + + /// Return an iterator over the metric fields. + pub fn metric_fields(&self) -> impl Iterator { + self.field_iter(FieldSource::Metric) + } + + /// Return an iterator over fields from the given source. + fn field_iter( + &self, + source: FieldSource, + ) -> impl Iterator { + self.field_schema.iter().filter(move |field| field.source == source) + } + /// Return the target and metric component names for this timeseries pub fn component_names(&self) -> (&str, &str) { self.timeseries_name @@ -369,220 +392,6 @@ pub enum AuthzScope { ViewableToAll, } -/// A set of timeseries schema, useful for testing changes to targets or -/// metrics. -#[derive(Debug, Default, Deserialize, PartialEq, Serialize)] -pub struct SchemaSet { - #[serde(flatten)] - inner: BTreeMap, -} - -impl SchemaSet { - /// Insert a timeseries schema, checking for conflicts. - /// - /// This inserts the schema derived from `target` and `metric`. If one - /// does _not_ already exist in `self` or a _matching_ one exists, `None` - /// is returned. - /// - /// If the derived schema _conflicts_ with one in `self`, the existing - /// schema is returned. - pub fn insert_checked( - &mut self, - target: &T, - metric: &M, - ) -> Result, MetricsError> - where - T: Target, - M: Metric, - { - let new = TimeseriesSchema::new(target, metric)?; - let name = new.timeseries_name.clone(); - match self.inner.entry(name) { - Entry::Vacant(entry) => { - entry.insert(new); - Ok(None) - } - Entry::Occupied(entry) => { - let existing = entry.get(); - if existing == &new { - Ok(None) - } else { - Ok(Some(existing.clone())) - } - } - } - } - - /// Compare the set of schema against the contents of a file. - /// - /// This function loads a `SchemaSet` from the provided JSON file, and - /// asserts that the contained schema matches those in `self`. Note that - /// equality of `TimeseriesSchema` ignores creation timestamps, so this - /// compares the "identity" data: timeseries name, field names, field types, - /// and field sources. - /// - /// This is intentionally similar to `expectorate::assert_contents()`. If - /// the provided file doesn't exist, it's treated as empty. If it does, a - /// `SchemaSet` is deserialized from it and a comparison between that and - /// `self` is done. - /// - /// You can use `EXPECTORATE=overwrite` to overwrite the existing file, - /// rather than panicking. - pub fn assert_contents(&self, path: impl AsRef) { - let path = path.as_ref(); - let v = std::env::var_os("EXPECTORATE"); - let overwrite = - v.as_deref().and_then(std::ffi::OsStr::to_str) == Some("overwrite"); - let expected_contents = serde_json::to_string_pretty(self).unwrap(); - if overwrite { - if let Err(e) = std::fs::write(path, &expected_contents) { - panic!( - "Failed to write contents to '{}': {}", - path.display(), - e - ); - } - } else { - // If the file doesn't exist, it's just empty and we'll create an - // empty set of schema. - let contents = if !path.exists() { - String::from("{}") - } else { - match std::fs::read_to_string(path) { - Err(e) => { - panic!("Failed to read '{}': {}", path.display(), e) - } - Ok(c) => c, - } - }; - let other: Self = serde_json::from_str(&contents).unwrap(); - if self == &other { - return; - } - - let mut diffs = String::new(); - writeln!( - &mut diffs, - "Timeseries schema in \"{}\" do not match\n", - path.display() - ) - .unwrap(); - - // Print schema in self that are not in the file, or mismatched - // schema. - for (name, schema) in self.inner.iter() { - let Some(other_schema) = other.inner.get(name) else { - writeln!( - &mut diffs, - "File is missing timeseries \"{}\"", - name - ) - .unwrap(); - continue; - }; - if schema == other_schema { - continue; - } - writeln!(&mut diffs, "Timeseries \"{name}\" differs").unwrap(); - - // Print out any differences in the datum type. - if schema.datum_type != other_schema.datum_type { - writeln!( - &mut diffs, - " Expected datum type: {}", - schema.datum_type - ) - .unwrap(); - writeln!( - &mut diffs, - " Actual datum type: {}", - other_schema.datum_type - ) - .unwrap(); - } - - // Print fields in self that are not in other, or are mismatched - for field in schema.field_schema.iter() { - let Some(other_field) = - other_schema.field_schema.get(field) - else { - writeln!( - &mut diffs, - " File is missing {:?} field \"{}\"", - field.source, field.name, - ) - .unwrap(); - continue; - }; - if field == other_field { - continue; - } - - writeln!( - &mut diffs, - " File has mismatched field \"{}\"", - field.name - ) - .unwrap(); - writeln!( - &mut diffs, - " Expected type: {}", - field.field_type - ) - .unwrap(); - writeln!( - &mut diffs, - " Actual type: {}", - other_field.field_type - ) - .unwrap(); - writeln!( - &mut diffs, - " Expected source: {:?}", - field.source - ) - .unwrap(); - writeln!( - &mut diffs, - " Actual source: {:?}", - other_field.source - ) - .unwrap(); - } - - // Print fields in other that are not in self, fields that are - // in both but don't match are taken care of in the above loop. - for other_field in other_schema.field_schema.iter() { - if schema.field_schema.contains(other_field) { - continue; - } - - writeln!( - &mut diffs, - " Current set is missing {:?} field \"{}\"", - other_field.source, other_field.name, - ) - .unwrap(); - } - } - - // Print schema that are in the file, but not self. Those that don't - // match are handled in the above block. - for key in other.inner.keys() { - if !self.inner.contains_key(key) { - writeln!( - &mut diffs, - " Current set is missing timeseries \"{}\"", - key - ) - .unwrap(); - } - } - panic!("{}", diffs); - } - } -} - #[cfg(test)] mod tests { use super::*; diff --git a/oximeter/impl/src/types.rs b/oximeter/impl/src/types.rs index a6518e4ad5..370557f7f7 100644 --- a/oximeter/impl/src/types.rs +++ b/oximeter/impl/src/types.rs @@ -63,6 +63,29 @@ pub enum FieldType { Bool, } +impl FieldType { + /// Return `true` if a field of this type is copyable. + /// + /// NOTE: This doesn't mean `self` is copyable, instead refering to a field + /// with this type. + pub const fn is_copyable(&self) -> bool { + match self { + FieldType::String => false, + FieldType::I8 + | FieldType::U8 + | FieldType::I16 + | FieldType::U16 + | FieldType::I32 + | FieldType::U32 + | FieldType::I64 + | FieldType::U64 + | FieldType::IpAddr + | FieldType::Uuid + | FieldType::Bool => true, + } + } +} + impl std::fmt::Display for FieldType { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "{:?}", self) @@ -693,7 +716,19 @@ impl From for omicron_common::api::external::Error { } /// A cumulative or counter data type. -#[derive(Debug, Clone, Copy, PartialEq, JsonSchema, Deserialize, Serialize)] +#[derive( + Debug, + Deserialize, + Clone, + Copy, + Eq, + Hash, + JsonSchema, + Ord, + PartialEq, + PartialOrd, + Serialize, +)] #[schemars(rename = "Cumulative{T}")] pub struct Cumulative { start_time: DateTime, diff --git a/oximeter/instruments/Cargo.toml b/oximeter/instruments/Cargo.toml index d5dcac411a..831f102c66 100644 --- a/oximeter/instruments/Cargo.toml +++ b/oximeter/instruments/Cargo.toml @@ -36,6 +36,7 @@ http-instruments = [ "dep:oximeter", "dep:schemars", "dep:serde", + "dep:slog", "dep:uuid" ] kstat = [ diff --git a/oximeter/instruments/src/http.rs b/oximeter/instruments/src/http.rs index 7db8bc27c3..6a0a35ce63 100644 --- a/oximeter/instruments/src/http.rs +++ b/oximeter/instruments/src/http.rs @@ -13,36 +13,24 @@ use futures::Future; use http::StatusCode; use http::Uri; use oximeter::{ - histogram::Histogram, histogram::Record, Metric, MetricsError, Producer, - Sample, Target, + histogram::Histogram, histogram::Record, MetricsError, Producer, Sample, }; +use std::borrow::Cow; use std::collections::BTreeMap; use std::sync::{Arc, Mutex}; use std::time::{Duration, Instant}; -use uuid::Uuid; -/// The [`HttpService`] is an [`oximeter::Target`] for monitoring HTTP servers. -#[derive(Debug, Clone, Target)] -pub struct HttpService { - pub name: String, - pub id: Uuid, -} - -/// An [`oximeter::Metric`] that tracks a histogram of the latency of requests to a specified HTTP -/// endpoint. -#[derive(Debug, Clone, Metric)] -pub struct RequestLatencyHistogram { - pub route: String, - pub method: String, - pub status_code: i64, - #[datum] - pub latency: Histogram, -} +oximeter::use_timeseries!("http-service.toml"); +pub use http_service::HttpService; +pub use http_service::RequestLatencyHistogram; // Return the route portion of the request, normalized to include a single // leading slash and no trailing slashes. -fn normalized_uri_path(uri: &Uri) -> String { - format!("/{}", uri.path().trim_end_matches('/').trim_start_matches('/')) +fn normalized_uri_path(uri: &Uri) -> Cow<'static, str> { + Cow::Owned(format!( + "/{}", + uri.path().trim_end_matches('/').trim_start_matches('/') + )) } impl RequestLatencyHistogram { @@ -56,9 +44,9 @@ impl RequestLatencyHistogram { ) -> Self { Self { route: normalized_uri_path(request.uri()), - method: request.method().to_string(), + method: request.method().to_string().into(), status_code: status_code.as_u16().into(), - latency: histogram, + datum: histogram, } } @@ -154,7 +142,7 @@ impl LatencyTracker { self.histogram.clone(), ) }); - entry.latency.sample(latency.as_secs_f64()).map_err(MetricsError::from) + entry.datum.sample(latency.as_secs_f64()).map_err(MetricsError::from) } /// Instrument the given Dropshot endpoint handler function. @@ -228,10 +216,8 @@ mod tests { #[test] fn test_latency_tracker() { - let service = HttpService { - name: String::from("my-service"), - id: ID.parse().unwrap(), - }; + let service = + HttpService { name: "my-service".into(), id: ID.parse().unwrap() }; let hist = Histogram::new(&[0.0, 1.0]).unwrap(); let tracker = LatencyTracker::new(service, hist); let request = http::request::Builder::new() @@ -249,8 +235,7 @@ mod tests { .unwrap(); let key = "/some/uri:GET:200"; - let actual_hist = - tracker.latencies.lock().unwrap()[key].latency.clone(); + let actual_hist = tracker.latencies.lock().unwrap()[key].datum.clone(); assert_eq!(actual_hist.n_samples(), 1); let bins = actual_hist.iter().collect::>(); assert_eq!(bins[1].count, 1); diff --git a/oximeter/instruments/src/kstat/sampler.rs b/oximeter/instruments/src/kstat/sampler.rs index 74770a6225..92466758c1 100644 --- a/oximeter/instruments/src/kstat/sampler.rs +++ b/oximeter/instruments/src/kstat/sampler.rs @@ -46,32 +46,13 @@ use tokio::time::Sleep; // The `KstatSampler` generates some statistics about its own operation, mostly // for surfacing failures to collect and dropped samples. mod self_stats { + oximeter::use_timeseries!("kstat-sampler.toml"); use super::BTreeMap; use super::Cumulative; use super::TargetId; - - /// Information identifying this kstat sampler. - #[derive(Debug, oximeter::Target)] - pub struct KstatSampler { - /// The hostname (or zonename) of the host machine. - pub hostname: String, - } - - /// The total number of samples dropped for a single target. - #[derive(Debug, oximeter::Metric)] - pub struct SamplesDropped { - /// The ID of the target being tracked. - pub target_id: u64, - /// The name of the target being tracked. - pub target_name: String, - pub datum: Cumulative, - } - - /// The cumulative number of expired targets. - #[derive(Debug, oximeter::Metric)] - pub struct ExpiredTargets { - pub datum: Cumulative, - } + pub use kstat_sampler::ExpiredTargets; + pub use kstat_sampler::KstatSampler; + pub use kstat_sampler::SamplesDropped; #[derive(Debug)] pub struct SelfStats { @@ -85,7 +66,7 @@ mod self_stats { impl SelfStats { pub fn new(hostname: String) -> Self { Self { - target: KstatSampler { hostname }, + target: KstatSampler { hostname: hostname.into() }, drops: BTreeMap::new(), expired: ExpiredTargets { datum: Cumulative::new(0) }, } @@ -797,7 +778,7 @@ impl KstatSamplerWorker { *drops += n_overflow_samples as u64; let metric = self_stats::SamplesDropped { target_id: target_id.0, - target_name, + target_name: target_name.into(), datum: *drops, }; let sample = match Sample::new(&stats.target, &metric) { diff --git a/oximeter/oximeter/schema/bfd-session.toml b/oximeter/oximeter/schema/bfd-session.toml new file mode 100644 index 0000000000..a7bdcf52e9 --- /dev/null +++ b/oximeter/oximeter/schema/bfd-session.toml @@ -0,0 +1,101 @@ +format_version = 1 + +[target] +name = "bfd_session" +description = "A Bidirectional Forwarding Protocol (BFD) session" +authz_scope = "fleet" +versions = [ + { version = 1, fields = [ "hostname", "rack_id", "sled_id", "peer" ] }, +] + +[[metrics]] +name = "control_packet_send_failures" +description = "Total number of failures to send a control packets to a peer" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "control_packets_sent" +description = "Total number of control packets sent to a peer" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "control_packets_received" +description = "Total number of control packets received from a peer" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "message_receive_error" +description = "Total number of failures to receive a BFD packet from the internal dispatcher" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "timeout_expired" +description = "Total number of expired timeouts waiting for a BFD packet from the internal dispatcher" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "transition_to_down" +description = """\ +Total number of times the BFD session has entered the down state, \ +whether because the peer could not be reached, or because \ +the session was marked administratively down.\ +""" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "transition_to_init" +description = "Total number of times the BFD session has entered the init state" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "transition_to_up" +description = "Total number of times the BFD session has entered the up state" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[fields.hostname] +type = "string" +description = "The hostname of the machine running the BFD session" + +[fields.rack_id] +type = "uuid" +description = "ID of the rack running the BFD session" + +[fields.sled_id] +type = "uuid" +description = "ID of the sled running the BFD session" + +[fields.peer] +type = "ip_addr" +description = "Address of the BFD session peer" diff --git a/oximeter/oximeter/schema/bgp-session.toml b/oximeter/oximeter/schema/bgp-session.toml new file mode 100644 index 0000000000..093560beea --- /dev/null +++ b/oximeter/oximeter/schema/bgp-session.toml @@ -0,0 +1,293 @@ +format_version = 1 + +[target] +name = "bgp_session" +description = "A peer-to-peer session of the Border Gateway Protocol" +authz_scope = "fleet" +versions = [ + { version = 1, fields = [ "hostname", "local_asn", "peer", "rack_id", "sled_id" ] } +] + +[[metrics]] +name = "active_connections_accepted" +description = "Number of active connections initiated by us accepted by the other peer" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "connection_retries" +description = "Number of times our connection retry timer has expired" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "hold_timer_expirations" +description = "Number of times our hold timer has expired" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "idle_hold_timer_expirations" +description = "Number of times our idle hold timer has expired" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "keepalive_send_failures" +description = "Number of times we failed to send a keep-alive message to the peer" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "keepalives_received" +description = "Number of keep-alive messages received from the peer" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "keepalives_sent" +description = "Number keep-alive messages to the peer" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "notification_send_failures" +description = "Number of times we failed to send a notification message to the peer" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "open_handle_failures" +description = "Number of times we failed to handle an open message from the peer" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "open_send_failures" +description = "Number of times we failed to send an open message to the peer" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "opens_received" +description = "Number of open messages received from the peer" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "opens_sent" +description = "Number of open messages sent to the peer" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "passive_connections_accepted" +description = "Number of TCP connections accepted by our BGP peer socket" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "prefixes_advertised" +description = "Number of prefixes advertised by our BGP peer" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "prefixes_imported" +description = "Number of prefixes imported by our BGP peer" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "transition_to_active" +description = "Number of times our BGP peer has entered the active state" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "transition_to_connect" +description = "Number of times our BGP peer has entered the connect state" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "transition_to_established" +description = "Number of times our BGP peer has entered the established state" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "transition_to_idle" +description = "Number of times our BGP peer has entered the idle state" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "transition_to_open_confirm" +description = "Number of times our BGP peer has entered the open-confirm state" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "transition_to_open_sent" +description = "Number of times our BGP peer has entered the open-sent state" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "transition_to_session_setup" +description = "Number of times our BGP peer has entered the session-setup state" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "unexpected_keepalive_messages" +description = "Number of unexpected keep-alive messages from the peer" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "unexpected_open_messages" +description = "Number of unexpected open messages from the peer" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "unexpected_update_messages" +description = "Number of unexpected update messages from the peer" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "update_nexthop_missing" +description = """\ +Number of update messages received with reachability entries, \ +but no next hop path attribute\ +""" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "update_send_failures" +description = "Number of times we failed to send an update message to the peer" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "updates_received" +description = "Number of update messages received from the peer" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "updates_sent" +description = "Number of update messages sent to the peer" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[fields.hostname] +type = "string" +description = "Hostname of the server running our BGP peer" + +[fields.local_asn] +type = "u32" +description = "The Autonomous System Number (ASN) advertised by our BGP peer" + +[fields.peer] +type = "ip_addr" +description = "IP address of the other peer in our BGP session" + +[fields.rack_id] +type = "uuid" +description = "ID of the rack on which our BGP peer is running" + +[fields.sled_id] +type = "uuid" +description = "ID of the sled on which our BGP peer is running" diff --git a/oximeter/oximeter/schema/database-transaction.toml b/oximeter/oximeter/schema/database-transaction.toml new file mode 100644 index 0000000000..bfea3fd81d --- /dev/null +++ b/oximeter/oximeter/schema/database-transaction.toml @@ -0,0 +1,26 @@ +format_version = 1 + +[target] +name = "database_transaction" +description = "A named transaction run in the control plane database" +authz_scope = "fleet" +versions = [ + { version = 1, fields = [ "name" ] } +] + +[[metrics]] +name = "retry_data" +description = "Information about a retried transaction" +units = "seconds" +datum_type = "f64" +versions = [ + { added_in = 1, fields = [ "attempt" ] } +] + +[fields.name] +type = "string" +description = "The name of the transaction" + +[fields.attempt] +type = "u32" +description = "The attempt at running the transaction" diff --git a/oximeter/oximeter/schema/ddm-router.toml b/oximeter/oximeter/schema/ddm-router.toml new file mode 100644 index 0000000000..2cb0851852 --- /dev/null +++ b/oximeter/oximeter/schema/ddm-router.toml @@ -0,0 +1,39 @@ +format_version = 1 + +[target] +name = "ddm_router" +description = "A Delay-Driven Multipath (DDM) router" +authz_scope = "fleet" +versions = [ + { version = 1, fields = [ "hostname", "rack_id", "sled_id" ] }, +] + +[[metrics]] +name = "originated_tunnel_endpoints" +description = "Current number of tunnel endpoints this router advertises" +units = "count" +datum_type = "u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "originated_underlay_prefixes" +description = "Current number of prefixes on the underlay network this router advertises" +units = "count" +datum_type = "u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[fields.hostname] +type = "string" +description = "The hostname of the machine running the DDM router" + +[fields.rack_id] +type = "uuid" +description = "ID of the rack running the DDM router" + +[fields.sled_id] +type = "uuid" +description = "ID of the sled running the DDM router" diff --git a/oximeter/oximeter/schema/ddm-session.toml b/oximeter/oximeter/schema/ddm-session.toml new file mode 100644 index 0000000000..89f6751524 --- /dev/null +++ b/oximeter/oximeter/schema/ddm-session.toml @@ -0,0 +1,136 @@ +format_version = 1 + +[target] +name = "ddm_session" +description = "A session in a Delay-Driven Multipath (DDM) router" +authz_scope = "fleet" +versions = [ + { version = 1, fields = [ "hostname", "interface", "rack_id", "sled_id" ] }, +] + +[[metrics]] +name = "advertisements_received" +description = "Total number of advertisements received from a peer" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "advertisements_sent" +description = "Total number of advertisements sent to a peer" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "imported_tunnel_endpoints" +description = "Current count of tunnel endpoints imported from a peer" +units = "count" +datum_type = "u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "imported_underlay_prefixes" +description = "Current count of underlay prefixes imported from a peer" +units = "count" +datum_type = "u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "peer_address_changes" +description = "Total number of times a peer changed its underlay address" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "peer_expirations" +description = """\ +Total number of times a peer was expired because we received no \ +messages from it within the expiration threshold\ +""" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "peer_sessions_established" +description = "Total number of times a session was established with a peer" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "solicitations_received" +description = "Total number of soliciation messages received from a peer" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "solicitations_sent" +description = "Total number of soliciation messages sent to a peer" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "update_send_fail" +description = "Total number of failures to send an update message to a peer" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "updates_received" +description = "Total number of update messages received from a peer" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "updates_sent" +description = "Total number of update messages sent to a peer" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[fields.hostname] +type = "string" +description = "The hostname of the machine running the DDM router" + +[fields.interface] +type = "string" +description = "The host interface on which the DDM session is running" + +[fields.rack_id] +type = "uuid" +description = "ID of the rack running the DDM router" + +[fields.sled_id] +type = "uuid" +description = "ID of the sled running the DDM router" diff --git a/oximeter/oximeter/schema/http-service.toml b/oximeter/oximeter/schema/http-service.toml new file mode 100644 index 0000000000..9098110656 --- /dev/null +++ b/oximeter/oximeter/schema/http-service.toml @@ -0,0 +1,38 @@ +format_version = 1 + +[target] +name = "http_service" +description = "An Oxide HTTP server" +authz_scope = "fleet" +versions = [ + { version = 1, fields = [ "name", "id" ] }, +] + +[[metrics]] +name = "request_latency_histogram" +description = "Duration for the server to handle a request" +units = "seconds" +datum_type = "histogram_f64" +versions = [ + { added_in = 1, fields = [ "route", "method", "status_code" ] } +] + +[fields.name] +type = "string" +description = "The name of the HTTP server, or program running it" + +[fields.id] +type = "uuid" +description = "UUID of the HTTP server" + +[fields.route] +type = "string" +description = "HTTP route in the request" + +[fields.method] +type = "string" +description = "HTTP method in the request" + +[fields.status_code] +type = "i64" +description = "HTTP status code in the server's response" diff --git a/oximeter/oximeter/schema/kstat-sampler.toml b/oximeter/oximeter/schema/kstat-sampler.toml new file mode 100644 index 0000000000..bb111d57e3 --- /dev/null +++ b/oximeter/oximeter/schema/kstat-sampler.toml @@ -0,0 +1,42 @@ +format_version = 1 + +[target] +name = "kstat_sampler" +description = "A software object sampling kernel statistics" +authz_scope = "fleet" +versions = [ + { version = 1, fields = [ "hostname" ] }, +] + +[[metrics]] +name = "samples_dropped" +description = "Total number of samples dropped for a single tracked target" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ "target_id", "target_name" ] } +] + +[[metrics]] +name = "expired_targets" +description = """\ +Total number of targets that have expired. Targets may expire \ +after either a limited number of unsuccessful sampling attempts, \ +or after a duration of unsuccessful sampling.""" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[fields.hostname] +type = "string" +description = "The hostname (or zonename) of the machine hosting the sampler" + +[fields.target_id] +type = "u64" +description = "The unique ID of the target being tracked" + +[fields.target_name] +type = "string" +description = "The name of the target being tracked" diff --git a/oximeter/oximeter/schema/mg-lower.toml b/oximeter/oximeter/schema/mg-lower.toml new file mode 100644 index 0000000000..bcdbbc0d02 --- /dev/null +++ b/oximeter/oximeter/schema/mg-lower.toml @@ -0,0 +1,40 @@ +format_version = 1 + +[target] +name = "mg_lower" +description = """\ +The lower-half of the Oxide Maghemite routing daemon, \ +which syncs routing information to an underlying routing platform, \ +such as a switch ASIC. The routing information may be programmed \ +directly from the control plane, or learned through routing protocols \ +exchanging information with peers.\ +""" +authz_scope = "fleet" +versions = [ + { version = 1, fields = [ "hostname", "rack_id", "sled_id" ] }, +] + +[[metrics]] +name = "routes_blocked_by_link_state" +description = """\ +The current number of routes that cannot be used \ +because the link through which they should be available \ +is not in a usable state\ +""" +units = "count" +datum_type = "u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[fields.hostname] +type = "string" +description = "The hostname of the machine running the router" + +[fields.rack_id] +type = "uuid" +description = "ID of the rack running the router" + +[fields.sled_id] +type = "uuid" +description = "ID of the sled running the router" diff --git a/oximeter/oximeter/schema/oximeter-collector.toml b/oximeter/oximeter/schema/oximeter-collector.toml new file mode 100644 index 0000000000..cfea28a658 --- /dev/null +++ b/oximeter/oximeter/schema/oximeter-collector.toml @@ -0,0 +1,59 @@ +format_version = 1 + +[target] +name = "oximeter_collector" +description = "An instance of the oximeter metric collector service" +authz_scope = "fleet" +versions = [ + { version = 1, fields = [ "collector_id", "collector_ip", "collector_port" ] }, +] + +[[metrics]] +name = "collections" +description = "Total number of successful collections from a producer" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ "base_route", "producer_id", "producer_ip", "producer_port" ] } +] + +[[metrics]] +name = "failed_collections" +description = "Total number of failed collections from a producer" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ "base_route", "producer_id", "producer_ip", "producer_port", "reason" ] } +] + +[fields.base_route] +type = "string" +description = "Base HTTP route used to request data from the producer" + +[fields.collector_id] +type = "uuid" +description = "UUID of the oximeter collector instance" + +[fields.collector_ip] +type = "ip_addr" +description = "IP address of the oximeter collector instance" + +[fields.collector_port] +type = "u16" +description = "Port of the oximeter collector instance" + +[fields.producer_id] +type = "uuid" +description = "UUID of the metric producer instance" + +[fields.producer_ip] +type = "ip_addr" +description = "IP address of the metric producer instance" + +[fields.producer_port] +type = "u16" +description = "Port of the metric producer instance" + +[fields.reason] +type = "string" +description = "Reason the collection failed" diff --git a/oximeter/oximeter/schema/static-routing-config.toml b/oximeter/oximeter/schema/static-routing-config.toml new file mode 100644 index 0000000000..ad60da2680 --- /dev/null +++ b/oximeter/oximeter/schema/static-routing-config.toml @@ -0,0 +1,42 @@ +format_version = 1 + +[target] +name = "static_routing_config" +description = "Static routing configuration used by the Oxide routing daemons" +authz_scope = "fleet" +versions = [ + { version = 1, fields = [ "hostname", "rack_id", "sled_id" ] }, +] + +[[metrics]] +name = "static_routes" +description = "Current number of static routes in the router" +units = "seconds" +# TODO: This should not be cumulative, it's the number of _current_ static +# routes +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "static_nexthops" +description = "Current number of static next-hops in the router" +units = "seconds" +# TODO: This should not be cumulative, it's the number of _current_ next hops +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[fields.hostname] +type = "string" +description = "The hostname of the machine running the router" + +[fields.rack_id] +type = "uuid" +description = "ID of the rack running the router" + +[fields.sled_id] +type = "uuid" +description = "ID of the sled running the router" diff --git a/oximeter/oximeter/schema/virtual-machine.toml b/oximeter/oximeter/schema/virtual-machine.toml new file mode 100644 index 0000000000..3ef0da4615 --- /dev/null +++ b/oximeter/oximeter/schema/virtual-machine.toml @@ -0,0 +1,65 @@ +format_version = 1 + +[target] +name = "virtual_machine" +description = "A guest virtual machine instance" +authz_scope = "project" +versions = [ + { version = 1, fields = [ "instance_id", "project_id", "silo_id" ] }, +] + +[[metrics]] +name = "vcpu_usage" +description = "Cumulative time each vCPU has spent in a state" +units = "nanoseconds" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ "state", "vcpu_id" ] } +] + +[[metrics]] +name = "reset" +description = "Cumulative number of times the virtual machine has been reset" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "pv_panic_guest_handled" +description = "Cumulative number of times a PVPANIC event was handled by the guest" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[[metrics]] +name = "pv_panic_host_handled" +description = "Cumulative number of times a PVPANIC event was handled by the host" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ ] } +] + +[fields.instance_id] +type = "uuid" +description = "ID of the virtual machine instance" + +[fields.project_id] +type = "uuid" +description = "ID of the virtual machine instance's project" + +[fields.silo_id] +type = "uuid" +description = "ID of the virtual machine instance's silo" + +[fields.state] +type = "string" +description = "The state of the vCPU" + +[fields.vcpu_id] +type = "u32" +description = "The ID of the vCPU" diff --git a/oximeter/oximeter/schema/vm-health-check.toml b/oximeter/oximeter/schema/vm-health-check.toml new file mode 100644 index 0000000000..62a5e68ca0 --- /dev/null +++ b/oximeter/oximeter/schema/vm-health-check.toml @@ -0,0 +1,87 @@ +format_version = 1 + +[target] +name = "virtual_machine" +description = "A virtual machine instance" +authz_scope = "fleet" +versions = [ + { version = 1, fields = [ "rack_id", "nexus_id", "instance_id", "silo_id", "project_id", "vmm_id", "sled_agent_id", "sled_agent_ip", "sled_agent_port" ] }, +] + +[[metrics]] +name = "check" +description = "The number of successful checks of an instance's health" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ "state", "reason" ] } +] + +[[metrics]] +name = "incomplete_check" +description = "The number of unsuccessful checks of an instance's health" +units = "count" +datum_type = "cumulative_u64" +versions = [ + { added_in = 1, fields = [ "failure_reason" ] } +] + +[fields.rack_id] +type = "uuid" +description = "The rack ID of the Nexus process which performed the health check" + +[fields.nexus_id] +type = "uuid" +description = "The ID of the Nexus process which performed the health check" + +[fields.instance_id] +type = "uuid" +description = "The instance's ID" + +[fields.silo_id] +type = "uuid" +description = "The ID of the instance's silo" + +[fields.project_id] +type = "uuid" +description = "The ID of the instance's project" + +[fields.vmm_id] +type = "uuid" +description = "The VMM ID of the instance's virtual machine manager" + +[fields.sled_agent_id] +type = "uuid" +description = "The ID of the sled-agent managing the instance" + +[fields.sled_agent_ip] +type = "ip_addr" +description = "The IP address of the sled-agent managing the instance" + +[fields.sled_agent_port] +type = "u16" +description = "The port of the sled-agent managing the instance" + +[fields.state] +type = "string" +description = """ +The string representation of the instance's state as understood by \ +the VMM. If the check failed, this will generally be "failed".""" + +[fields.reason] +type = "string" +description = """ +Why the instance was marked as being in this state. + +If an instance was marked as "failed" due to a check failure, this \ +will be a string representation of the failure reason. Otherwise, if \ +the check was successful, this will be "success". Note that this may \ +be "success" even if the instance's state is "failed", which \ +indicates that we successfully queried the instance's state from the \ +sled-agent, and the *sled-agent* reported that the instance has \ +failed -- which is distinct from the instance watcher marking an \ +instance as failed due to a failed check.""" + +[fields.failure_reason] +type = "string" +description = "The reason why the instance healh check failed" diff --git a/oximeter/oximeter/src/lib.rs b/oximeter/oximeter/src/lib.rs index 9dd8fab47a..5ec6a49e5c 100644 --- a/oximeter/oximeter/src/lib.rs +++ b/oximeter/oximeter/src/lib.rs @@ -212,15 +212,18 @@ mod test { entry.path().canonicalize().unwrap().display() ); let contents = fs::read_to_string(entry.path()).unwrap(); - let list = load_schema(&contents).unwrap_or_else(|_| { + let list = load_schema(&contents).unwrap_or_else(|e| { panic!( - "Expected a valid timeseries definition in {}", - entry.path().canonicalize().unwrap().display() + "Expected a valid timeseries definition in {}, \ + but found error: {}", + entry.path().canonicalize().unwrap().display(), + e, ) }); println!("found {} schema", list.len()); for schema in list.into_iter() { let key = (schema.timeseries_name.clone(), schema.version); + println!(" {} v{}", key.0, key.1); if let Some(dup) = all_schema.insert(key, schema.clone()) { panic!( "Timeseries '{}' version {} is duplicated.\ diff --git a/package-manifest.toml b/package-manifest.toml index 1c39dcb151..9484d933d2 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -563,10 +563,10 @@ source.repo = "maghemite" # `tools/maghemite_openapi_version`. Failing to do so will cause a failure when # building `ddm-admin-client` (which will instruct you to update # `tools/maghemite_openapi_version`). -source.commit = "6e0a232fd0b443c19f61f94bf02b7695505aa8e3" +source.commit = "d1686c86f92ead77e07ddc6024837dee4a401d6d" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/maghemite/image//mg-ddm-gz.sha256.txt -source.sha256 = "01b63e4b2b9537b223a417b9283cefb998ae8a3180108b7fbd7710adcf49bbf0" +source.sha256 = "280bd6e5c30d8f1076bac9b8dbbdbc45379e76259aa6319da257192fcbf64a54" output.type = "tarball" [package.mg-ddm] @@ -579,10 +579,10 @@ source.repo = "maghemite" # `tools/maghemite_openapi_version`. Failing to do so will cause a failure when # building `ddm-admin-client` (which will instruct you to update # `tools/maghemite_openapi_version`). -source.commit = "6e0a232fd0b443c19f61f94bf02b7695505aa8e3" +source.commit = "d1686c86f92ead77e07ddc6024837dee4a401d6d" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/maghemite/image//mg-ddm.sha256.txt -source.sha256 = "29ac6f5f61795795a2b1026606ac43d48435f1d685d2db2ab097aaaa9c5d6e6c" +source.sha256 = "f15f8bb0e13b1a9372c895775dae96b68ff1cc5e395e6bad4389c2a97957354e" output.type = "zone" output.intermediate_only = true @@ -594,10 +594,10 @@ source.repo = "maghemite" # `tools/maghemite_openapi_version`. Failing to do so will cause a failure when # building `ddm-admin-client` (which will instruct you to update # `tools/maghemite_openapi_version`). -source.commit = "6e0a232fd0b443c19f61f94bf02b7695505aa8e3" +source.commit = "d1686c86f92ead77e07ddc6024837dee4a401d6d" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/maghemite/image//mgd.sha256.txt -source.sha256 = "49f5224ea5f5078c11d71098fea29db9aafe9575876825f4e0216d9c64361f55" +source.sha256 = "b0223e0aad4c22bf980da17084caf6704d0428bad1b3e5daf54e7d415ce82d3e" output.type = "zone" output.intermediate_only = true diff --git a/sled-agent/src/rack_setup/plan/service.rs b/sled-agent/src/rack_setup/plan/service.rs index 39235b91eb..9493361d19 100644 --- a/sled-agent/src/rack_setup/plan/service.rs +++ b/sled-agent/src/rack_setup/plan/service.rs @@ -1133,6 +1133,8 @@ impl ServicePortBuilder { self.next_snat_port += NUM_SOURCE_NAT_PORTS; if self.next_snat_port.0 == 0 { self.next_snat_ip = None; + } else { + self.next_snat_ip = Some(snat_ip); } let snat_cfg = diff --git a/sled-storage/src/manager_test_harness.rs b/sled-storage/src/manager_test_harness.rs index 74c2967a84..4409275017 100644 --- a/sled-storage/src/manager_test_harness.rs +++ b/sled-storage/src/manager_test_harness.rs @@ -123,6 +123,7 @@ impl Drop for StorageManagerTestHarness { impl StorageManagerTestHarness { /// Creates a new StorageManagerTestHarness with no associated disks. pub async fn new(log: &Logger) -> Self { + #[cfg(all(test, feature = "testing"))] illumos_utils::USE_MOCKS.store(false, Ordering::SeqCst); let tmp = camino_tempfile::tempdir_in("/var/tmp") .expect("Failed to make temporary directory"); diff --git a/sled-storage/src/pool.rs b/sled-storage/src/pool.rs index cc71aeb19d..13ffe48e45 100644 --- a/sled-storage/src/pool.rs +++ b/sled-storage/src/pool.rs @@ -27,7 +27,7 @@ impl Pool { } /// Return a Pool consisting of fake info - #[cfg(feature = "testing")] + #[cfg(all(test, feature = "testing"))] pub fn new_with_fake_info(name: ZpoolName, parent: DiskIdentity) -> Pool { let info = ZpoolInfo::new_hardcoded(name.to_string()); Pool { name, info, parent } diff --git a/test-utils/src/dev/test_cmds.rs b/test-utils/src/dev/test_cmds.rs index 3c675ddfd9..5d6b9a152e 100644 --- a/test-utils/src/dev/test_cmds.rs +++ b/test-utils/src/dev/test_cmds.rs @@ -126,8 +126,8 @@ pub fn error_for_enoent() -> String { /// /// This allows use to use expectorate to verify the shape of the CLI output. pub fn redact_variable(input: &str) -> String { - // Replace TCP port numbers. We include the localhost characters to avoid - // catching any random sequence of numbers. + // Replace TCP port numbers. We include the localhost + // characters to avoid catching any random sequence of numbers. let s = regex::Regex::new(r"\[::1\]:\d{4,5}") .unwrap() .replace_all(&input, "[::1]:REDACTED_PORT") @@ -189,6 +189,13 @@ pub fn redact_variable(input: &str) -> String { .replace_all(&s, "s ago") .to_string(); + // Replace interval (s). + let s = regex::Regex::new(r"\d+s") + .unwrap() + .replace_all(&s, "s") + .to_string(); + + // Replace interval (ms). let s = regex::Regex::new(r"\d+ms") .unwrap() .replace_all(&s, "ms") diff --git a/tools/console_version b/tools/console_version index 1afb6af170..626464c23d 100644 --- a/tools/console_version +++ b/tools/console_version @@ -1,2 +1,2 @@ -COMMIT="e8bf056cc1856f96793518a0b619026e91a883fa" -SHA2="e448a22739d14943c6e090a1d3344bae2a88879570c179bffa358e0c55e22dd3" +COMMIT="4377d01585ef87981ed51a4cd1f07376e8502d39" +SHA2="3e0707dcd6a350ecc3bd62e8e7485a773eebf52f5ffd0db4e8cfb01251e28374" diff --git a/tools/maghemite_ddm_openapi_version b/tools/maghemite_ddm_openapi_version index 001b707f25..980081379e 100644 --- a/tools/maghemite_ddm_openapi_version +++ b/tools/maghemite_ddm_openapi_version @@ -1,2 +1,2 @@ -COMMIT="6e0a232fd0b443c19f61f94bf02b7695505aa8e3" +COMMIT="d1686c86f92ead77e07ddc6024837dee4a401d6d" SHA2="007bfb717ccbc077c0250dee3121aeb0c5bb0d1c16795429a514fa4f8635a5ef" diff --git a/tools/maghemite_mg_openapi_version b/tools/maghemite_mg_openapi_version index 61901b1562..a0f7ac10ec 100644 --- a/tools/maghemite_mg_openapi_version +++ b/tools/maghemite_mg_openapi_version @@ -1,2 +1,2 @@ -COMMIT="6e0a232fd0b443c19f61f94bf02b7695505aa8e3" +COMMIT="d1686c86f92ead77e07ddc6024837dee4a401d6d" SHA2="e4b42ab9daad90f0c561a830b62a9d17e294b4d0da0a6d44b4030929b0c37b7e" diff --git a/tools/maghemite_mgd_checksums b/tools/maghemite_mgd_checksums index 7c62e15a73..2e7d8a863c 100644 --- a/tools/maghemite_mgd_checksums +++ b/tools/maghemite_mgd_checksums @@ -1,2 +1,2 @@ -CIDL_SHA256="49f5224ea5f5078c11d71098fea29db9aafe9575876825f4e0216d9c64361f55" -MGD_LINUX_SHA256="48aa39bb2d68a81be5ad49986d9ebd3cbad64eee0bd6b4556c91b089760265ec" +CIDL_SHA256="b0223e0aad4c22bf980da17084caf6704d0428bad1b3e5daf54e7d415ce82d3e" +MGD_LINUX_SHA256="776f18e9e7fc905d5a2f33d1a1bdd8863ed988bb2965a222217ec06790a3f452" \ No newline at end of file diff --git a/tools/update_maghemite.sh b/tools/update_maghemite.sh index b2b31ca58a..77f5d80a0c 100755 --- a/tools/update_maghemite.sh +++ b/tools/update_maghemite.sh @@ -23,6 +23,45 @@ PACKAGES=( REPO="oxidecomputer/maghemite" . "$SOURCE_DIR/update_helpers.sh" +function update_openapi { + TARGET_COMMIT="$1" + DRY_RUN="$2" + DAEMON="$3" + SHA=$(get_sha "$REPO" "$TARGET_COMMIT" "${DAEMON}-admin.json" "openapi") + OUTPUT=$(printf "COMMIT=\"%s\"\nSHA2=\"%s\"\n" "$TARGET_COMMIT" "$SHA") + + if [ -n "$DRY_RUN" ]; then + OPENAPI_PATH="/dev/null" + else + OPENAPI_PATH="$SOURCE_DIR/maghemite_${DAEMON}_openapi_version" + fi + echo "Updating Maghemite OpenAPI from: $TARGET_COMMIT" + set -x + echo "$OUTPUT" > "$OPENAPI_PATH" + set +x +} + +function update_mgd { + TARGET_COMMIT="$1" + DRY_RUN="$2" + DAEMON="$3" + SHA=$(get_sha "$REPO" "$TARGET_COMMIT" "mgd" "image") + OUTPUT=$(printf "CIDL_SHA256=\"%s\"\n" "$SHA") + + SHA_LINUX=$(get_sha "$REPO" "$TARGET_COMMIT" "mgd" "linux") + OUTPUT_LINUX=$(printf "MGD_LINUX_SHA256=\"%s\"\n" "$SHA_LINUX") + + if [ -n "$DRY_RUN" ]; then + MGD_PATH="/dev/null" + else + MGD_PATH="$SOURCE_DIR/maghemite_mgd_checksums" + fi + echo "Updating Maghemite mgd from: $TARGET_COMMIT" + set -x + printf "$OUTPUT\n$OUTPUT_LINUX" > $MGD_PATH + set +x +} + function main { TARGET_COMMIT="" DRY_RUN="" @@ -47,6 +86,9 @@ function main { TARGET_COMMIT=$(get_latest_commit_from_gh "$REPO" "$TARGET_BRANCH") fi install_toml2json + update_mgd "$TARGET_COMMIT" "$DRY_RUN" + update_openapi "$TARGET_COMMIT" "$DRY_RUN" ddm + update_openapi "$TARGET_COMMIT" "$DRY_RUN" mg do_update_packages "$TARGET_COMMIT" "$DRY_RUN" "$REPO" "${PACKAGES[@]}" } diff --git a/uuid-kinds/Cargo.toml b/uuid-kinds/Cargo.toml index e39017c2bf..9ea2f8223c 100644 --- a/uuid-kinds/Cargo.toml +++ b/uuid-kinds/Cargo.toml @@ -19,6 +19,6 @@ paste.workspace = true [features] default = ["std"] serde = ["newtype-uuid/serde"] -schemars08 = ["newtype-uuid/schemars08", "schemars"] +schemars08 = ["newtype-uuid/schemars08", "schemars", "std"] std = ["newtype-uuid/std"] uuid-v4 = ["newtype-uuid/v4"] diff --git a/uuid-kinds/src/lib.rs b/uuid-kinds/src/lib.rs index 430b3f7f9f..53acc9c1ed 100644 --- a/uuid-kinds/src/lib.rs +++ b/uuid-kinds/src/lib.rs @@ -2,12 +2,12 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -#![cfg_attr(not(feature = "std"), no_std)] - //! A registry for UUID kinds used in Omicron and related projects. //! //! See this crate's `README.adoc` for more information. +#![cfg_attr(not(feature = "std"), no_std)] + // Export these types so that other users don't have to pull in newtype-uuid. #[doc(no_inline)] pub use newtype_uuid::{ diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 7dd7a88330..796cf0bf63 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -93,7 +93,7 @@ ring = { version = "0.17.8", features = ["std"] } schemars = { version = "0.8.21", features = ["bytes", "chrono", "uuid1"] } scopeguard = { version = "1.2.0" } semver = { version = "1.0.23", features = ["serde"] } -serde = { version = "1.0.203", features = ["alloc", "derive", "rc"] } +serde = { version = "1.0.204", features = ["alloc", "derive", "rc"] } serde_json = { version = "1.0.120", features = ["raw_value", "unbounded_depth"] } sha2 = { version = "0.10.8", features = ["oid"] } similar = { version = "2.5.0", features = ["bytes", "inline", "unicode"] } @@ -198,7 +198,7 @@ ring = { version = "0.17.8", features = ["std"] } schemars = { version = "0.8.21", features = ["bytes", "chrono", "uuid1"] } scopeguard = { version = "1.2.0" } semver = { version = "1.0.23", features = ["serde"] } -serde = { version = "1.0.203", features = ["alloc", "derive", "rc"] } +serde = { version = "1.0.204", features = ["alloc", "derive", "rc"] } serde_json = { version = "1.0.120", features = ["raw_value", "unbounded_depth"] } sha2 = { version = "0.10.8", features = ["oid"] } similar = { version = "2.5.0", features = ["bytes", "inline", "unicode"] }