diff --git a/Cargo.lock b/Cargo.lock index 58681b5684..035ffa6464 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -199,6 +199,15 @@ dependencies = [ "syn 2.0.32", ] +[[package]] +name = "approx" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cab112f0a86d568ea0e627cc1d6be74a1e9cd55214684db5561995f6dad897c6" +dependencies = [ + "num-traits", +] + [[package]] name = "arc-swap" version = "1.6.0" @@ -2651,6 +2660,12 @@ version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" +[[package]] +name = "futures-timer" +version = "3.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" + [[package]] name = "futures-util" version = "0.3.28" @@ -4511,6 +4526,19 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "num" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b05180d69e3da0e530ba2a1dae5110317e49e3b7f3d41be227dc5f92e49ee7af" +dependencies = [ + "num-complex", + "num-integer", + "num-iter", + "num-rational", + "num-traits", +] + [[package]] name = "num-bigint" version = "0.4.4" @@ -4541,6 +4569,15 @@ dependencies = [ "zeroize", ] +[[package]] +name = "num-complex" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ba157ca0885411de85d6ca030ba7e2a83a28636056c7c699b07c8b6f7383214" +dependencies = [ + "num-traits", +] + [[package]] name = "num-derive" version = "0.4.0" @@ -4573,6 +4610,17 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-rational" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + [[package]] name = "num-traits" version = "0.2.16" @@ -5437,11 +5485,13 @@ dependencies = [ name = "oximeter" version = "0.1.0" dependencies = [ + "approx", "bytes", "chrono", - "num-traits", + "num", "omicron-common 0.1.0", "oximeter-macro-impl 0.1.0", + "rstest", "schemars", "serde", "thiserror", @@ -5530,6 +5580,7 @@ dependencies = [ "slog-term", "thiserror", "tokio", + "usdt", "uuid", ] @@ -6790,6 +6841,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "relative-path" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c707298afce11da2efef2f600116fa93ffa7a032b5d7b628aa17711ec81383ca" + [[package]] name = "remove_dir_all" version = "0.5.3" @@ -6947,6 +7004,35 @@ dependencies = [ "zeroize", ] +[[package]] +name = "rstest" +version = "0.18.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97eeab2f3c0a199bc4be135c36c924b6590b88c377d416494288c14f2db30199" +dependencies = [ + "futures", + "futures-timer", + "rstest_macros", + "rustc_version 0.4.0", +] + +[[package]] +name = "rstest_macros" +version = "0.18.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d428f8247852f894ee1be110b375111b586d4fa431f6c46e64ba5a0dcccbe605" +dependencies = [ + "cfg-if 1.0.0", + "glob", + "proc-macro2", + "quote", + "regex", + "relative-path", + "rustc_version 0.4.0", + "syn 2.0.32", + "unicode-ident", +] + [[package]] name = "rtoolbox" version = "0.0.1" diff --git a/Cargo.toml b/Cargo.toml index 7f4c4767d7..57e13fae09 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -130,6 +130,7 @@ resolver = "2" [workspace.dependencies] anyhow = "1.0" api_identity = { path = "api_identity" } +approx = "0.5.1" assert_matches = "1.5.0" assert_cmd = "2.0.12" async-bb8-diesel = { git = "https://github.com/oxidecomputer/async-bb8-diesel", rev = "be3d9bce50051d8c0e0c06078e8066cc27db3001" } @@ -231,7 +232,7 @@ nexus-test-utils-macros = { path = "nexus/test-utils-macros" } nexus-test-utils = { path = "nexus/test-utils" } nexus-types = { path = "nexus/types" } num-integer = "0.1.45" -num-traits = "0.2.16" +num = { version = "0.4.1", default-features = false, features = [ "libm" ] } omicron-common = { path = "common" } omicron-dev-tools = { path = "dev-tools" } omicron-gateway = { path = "gateway" } @@ -290,6 +291,7 @@ regress = "0.7.1" reqwest = { version = "0.11", default-features = false } ring = "0.16" rpassword = "7.2.0" +rstest = "0.18.2" rustfmt-wrapper = "0.2" rustls = "0.21.7" samael = { git = "https://github.com/njaremko/samael", features = ["xmlsec"], branch = "master" } diff --git a/nexus/src/context.rs b/nexus/src/context.rs index 9c3ede009b..73125cc617 100644 --- a/nexus/src/context.rs +++ b/nexus/src/context.rs @@ -94,8 +94,8 @@ impl ServerContext { name: name.to_string(), id: config.deployment.id, }; - const START_LATENCY_DECADE: i8 = -6; - const END_LATENCY_DECADE: i8 = 3; + const START_LATENCY_DECADE: i16 = -6; + const END_LATENCY_DECADE: i16 = 3; LatencyTracker::with_latency_decades( target, START_LATENCY_DECADE, diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index a494e61a67..1ec8c1a5eb 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -837,7 +837,7 @@ } ] }, - "BinRangeint64": { + "BinRangefloat": { "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", "oneOf": [ { @@ -845,8 +845,8 @@ "type": "object", "properties": { "end": { - "type": "integer", - "format": "int64" + "type": "number", + "format": "float" }, "type": { "type": "string", @@ -865,12 +865,12 @@ "type": "object", "properties": { "end": { - "type": "integer", - "format": "int64" + "type": "number", + "format": "float" }, "start": { - "type": "integer", - "format": "int64" + "type": "number", + "format": "float" }, "type": { "type": "string", @@ -890,8 +890,8 @@ "type": "object", "properties": { "start": { - "type": "integer", - "format": "int64" + "type": "number", + "format": "float" }, "type": { "type": "string", @@ -907,704 +907,1845 @@ } ] }, - "Bindouble": { - "description": "Type storing bin edges and a count of samples within it.", - "type": "object", - "properties": { - "count": { - "description": "The total count of samples in this bin.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "range": { - "description": "The range of the support covered by this bin.", - "allOf": [ - { - "$ref": "#/components/schemas/BinRangedouble" + "BinRangeint16": { + "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "oneOf": [ + { + "description": "A range unbounded below and exclusively above, `..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "int16" + }, + "type": { + "type": "string", + "enum": [ + "range_to" + ] } + }, + "required": [ + "end", + "type" ] - } - }, - "required": [ - "count", - "range" - ] - }, - "Binint64": { - "description": "Type storing bin edges and a count of samples within it.", - "type": "object", - "properties": { - "count": { - "description": "The total count of samples in this bin.", - "type": "integer", - "format": "uint64", - "minimum": 0 }, - "range": { - "description": "The range of the support covered by this bin.", - "allOf": [ - { - "$ref": "#/components/schemas/BinRangeint64" + { + "description": "A range bounded inclusively below and exclusively above, `start..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "int16" + }, + "start": { + "type": "integer", + "format": "int16" + }, + "type": { + "type": "string", + "enum": [ + "range" + ] } + }, + "required": [ + "end", + "start", + "type" ] - } - }, - "required": [ - "count", - "range" - ] - }, - "ByteCount": { - "description": "Byte count to express memory or storage capacity.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "Certificate": { - "type": "object", - "properties": { - "cert": { - "type": "string" - }, - "key": { - "type": "string" - } - }, - "required": [ - "cert", - "key" - ] - }, - "Cumulativedouble": { - "description": "A cumulative or counter data type.", - "type": "object", - "properties": { - "start_time": { - "type": "string", - "format": "date-time" - }, - "value": { - "type": "number", - "format": "double" - } - }, - "required": [ - "start_time", - "value" - ] - }, - "Cumulativeint64": { - "description": "A cumulative or counter data type.", - "type": "object", - "properties": { - "start_time": { - "type": "string", - "format": "date-time" }, - "value": { - "type": "integer", - "format": "int64" + { + "description": "A range bounded inclusively below and unbounded above, `start..`.", + "type": "object", + "properties": { + "start": { + "type": "integer", + "format": "int16" + }, + "type": { + "type": "string", + "enum": [ + "range_from" + ] + } + }, + "required": [ + "start", + "type" + ] } - }, - "required": [ - "start_time", - "value" ] }, - "CurrentStatus": { - "description": "Describes the current status of a background task", + "BinRangeint32": { + "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", "oneOf": [ { - "description": "The background task is not running\n\nTypically, the task would be waiting for its next activation, which would happen after a timeout or some other event that triggers activation", + "description": "A range unbounded below and exclusively above, `..end`.", "type": "object", "properties": { - "current_status": { + "end": { + "type": "integer", + "format": "int32" + }, + "type": { "type": "string", "enum": [ - "idle" + "range_to" ] } }, "required": [ - "current_status" + "end", + "type" ] }, { - "description": "The background task is currently running\n\nMore precisely, the task has been activated and has not yet finished this activation", + "description": "A range bounded inclusively below and exclusively above, `start..end`.", "type": "object", "properties": { - "current_status": { + "end": { + "type": "integer", + "format": "int32" + }, + "start": { + "type": "integer", + "format": "int32" + }, + "type": { "type": "string", "enum": [ - "running" + "range" ] + } + }, + "required": [ + "end", + "start", + "type" + ] + }, + { + "description": "A range bounded inclusively below and unbounded above, `start..`.", + "type": "object", + "properties": { + "start": { + "type": "integer", + "format": "int32" }, - "details": { - "$ref": "#/components/schemas/CurrentStatusRunning" + "type": { + "type": "string", + "enum": [ + "range_from" + ] } }, "required": [ - "current_status", - "details" + "start", + "type" ] } ] }, - "CurrentStatusRunning": { - "type": "object", - "properties": { - "iteration": { - "description": "which iteration this was (counter)", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "reason": { - "description": "what kind of event triggered this activation", - "allOf": [ - { - "$ref": "#/components/schemas/ActivationReason" + "BinRangeint64": { + "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "oneOf": [ + { + "description": "A range unbounded below and exclusively above, `..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "int64" + }, + "type": { + "type": "string", + "enum": [ + "range_to" + ] } + }, + "required": [ + "end", + "type" ] }, - "start_time": { - "description": "wall-clock time when the current activation started", - "type": "string", - "format": "date-time" - } - }, - "required": [ - "iteration", - "reason", - "start_time" - ] - }, - "DatasetCreateRequest": { - "type": "object", - "properties": { - "dataset_id": { - "type": "string", - "format": "uuid" - }, - "request": { - "$ref": "#/components/schemas/DatasetPutRequest" - }, - "zpool_id": { - "type": "string", - "format": "uuid" - } - }, - "required": [ - "dataset_id", - "request", - "zpool_id" - ] - }, - "DatasetKind": { - "description": "Describes the purpose of the dataset.", - "type": "string", - "enum": [ - "crucible", - "cockroach", - "clickhouse", - "clickhouse_keeper", - "external_dns", - "internal_dns" - ] - }, - "DatasetPutRequest": { - "description": "Describes a dataset within a pool.", - "type": "object", - "properties": { - "address": { - "description": "Address on which a service is responding to requests for the dataset.", - "type": "string" - }, - "kind": { - "description": "Type of dataset being inserted.", - "allOf": [ - { - "$ref": "#/components/schemas/DatasetKind" - } - ] - } - }, - "required": [ - "address", - "kind" - ] - }, - "Datum": { - "description": "A `Datum` is a single sampled data point from a metric.", - "oneOf": [ { + "description": "A range bounded inclusively below and exclusively above, `start..end`.", "type": "object", "properties": { - "datum": { - "type": "boolean" + "end": { + "type": "integer", + "format": "int64" + }, + "start": { + "type": "integer", + "format": "int64" }, "type": { "type": "string", "enum": [ - "bool" + "range" ] } }, "required": [ - "datum", + "end", + "start", "type" ] }, { + "description": "A range bounded inclusively below and unbounded above, `start..`.", "type": "object", "properties": { - "datum": { + "start": { "type": "integer", "format": "int64" }, "type": { "type": "string", "enum": [ - "i64" + "range_from" ] } }, "required": [ - "datum", + "start", "type" ] - }, + } + ] + }, + "BinRangeint8": { + "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "oneOf": [ { + "description": "A range unbounded below and exclusively above, `..end`.", "type": "object", "properties": { - "datum": { - "type": "number", - "format": "double" + "end": { + "type": "integer", + "format": "int8" }, "type": { "type": "string", "enum": [ - "f64" + "range_to" ] } }, "required": [ - "datum", + "end", "type" ] }, { + "description": "A range bounded inclusively below and exclusively above, `start..end`.", "type": "object", "properties": { - "datum": { - "type": "string" + "end": { + "type": "integer", + "format": "int8" + }, + "start": { + "type": "integer", + "format": "int8" }, "type": { "type": "string", "enum": [ - "string" + "range" ] } }, "required": [ - "datum", + "end", + "start", "type" ] }, { + "description": "A range bounded inclusively below and unbounded above, `start..`.", "type": "object", "properties": { - "datum": { - "type": "array", - "items": { - "type": "integer", - "format": "uint8", - "minimum": 0 - } + "start": { + "type": "integer", + "format": "int8" }, "type": { "type": "string", "enum": [ - "bytes" + "range_from" ] } }, "required": [ - "datum", + "start", "type" ] - }, + } + ] + }, + "BinRangeuint16": { + "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "oneOf": [ { + "description": "A range unbounded below and exclusively above, `..end`.", "type": "object", "properties": { - "datum": { - "$ref": "#/components/schemas/Cumulativeint64" + "end": { + "type": "integer", + "format": "uint16", + "minimum": 0 }, "type": { "type": "string", "enum": [ - "cumulative_i64" + "range_to" ] } }, "required": [ - "datum", + "end", "type" ] }, { + "description": "A range bounded inclusively below and exclusively above, `start..end`.", "type": "object", "properties": { - "datum": { - "$ref": "#/components/schemas/Cumulativedouble" + "end": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "start": { + "type": "integer", + "format": "uint16", + "minimum": 0 }, "type": { "type": "string", "enum": [ - "cumulative_f64" + "range" ] } }, "required": [ - "datum", + "end", + "start", "type" ] }, { + "description": "A range bounded inclusively below and unbounded above, `start..`.", "type": "object", "properties": { - "datum": { - "$ref": "#/components/schemas/Histogramint64" + "start": { + "type": "integer", + "format": "uint16", + "minimum": 0 }, "type": { "type": "string", "enum": [ - "histogram_i64" + "range_from" ] } }, "required": [ - "datum", + "start", "type" ] - }, + } + ] + }, + "BinRangeuint32": { + "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "oneOf": [ { + "description": "A range unbounded below and exclusively above, `..end`.", "type": "object", "properties": { - "datum": { - "$ref": "#/components/schemas/Histogramdouble" + "end": { + "type": "integer", + "format": "uint32", + "minimum": 0 }, "type": { "type": "string", "enum": [ - "histogram_f64" + "range_to" ] } }, "required": [ - "datum", + "end", "type" ] - } - ] - }, - "DiskRuntimeState": { - "description": "Runtime state of the Disk, which includes its attach state and some minimal metadata", - "type": "object", - "properties": { - "disk_state": { - "description": "runtime state of the Disk", - "allOf": [ - { - "$ref": "#/components/schemas/DiskState" - } - ] - }, - "gen": { - "description": "generation number for this state", - "allOf": [ - { - "$ref": "#/components/schemas/Generation" - } - ] }, - "time_updated": { - "description": "timestamp for this information", - "type": "string", - "format": "date-time" - } - }, - "required": [ - "disk_state", - "gen", - "time_updated" - ] - }, - "DiskState": { - "description": "State of a Disk", - "oneOf": [ { - "description": "Disk is being initialized", + "description": "A range bounded inclusively below and exclusively above, `start..end`.", "type": "object", "properties": { - "state": { + "end": { + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "start": { + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "type": { "type": "string", "enum": [ - "creating" + "range" ] } }, "required": [ - "state" + "end", + "start", + "type" ] }, { - "description": "Disk is ready but detached from any Instance", + "description": "A range bounded inclusively below and unbounded above, `start..`.", "type": "object", "properties": { - "state": { + "start": { + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "type": { "type": "string", "enum": [ - "detached" + "range_from" ] } }, "required": [ - "state" + "start", + "type" ] - }, + } + ] + }, + "BinRangeuint64": { + "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "oneOf": [ { - "description": "Disk is ready to receive blocks from an external source", + "description": "A range unbounded below and exclusively above, `..end`.", "type": "object", "properties": { - "state": { - "type": "string", - "enum": [ - "import_ready" - ] - } - }, - "required": [ - "state" - ] - }, - { - "description": "Disk is importing blocks from a URL", - "type": "object", - "properties": { - "state": { - "type": "string", - "enum": [ - "importing_from_url" - ] - } - }, - "required": [ - "state" - ] - }, - { - "description": "Disk is importing blocks from bulk writes", - "type": "object", - "properties": { - "state": { + "end": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "type": { "type": "string", "enum": [ - "importing_from_bulk_writes" + "range_to" ] } }, "required": [ - "state" + "end", + "type" ] }, { - "description": "Disk is being finalized to state Detached", + "description": "A range bounded inclusively below and exclusively above, `start..end`.", "type": "object", "properties": { - "state": { + "end": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "start": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "type": { "type": "string", "enum": [ - "finalizing" + "range" ] } }, "required": [ - "state" + "end", + "start", + "type" ] }, { - "description": "Disk is undergoing maintenance", + "description": "A range bounded inclusively below and unbounded above, `start..`.", "type": "object", "properties": { - "state": { + "start": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "type": { "type": "string", "enum": [ - "maintenance" + "range_from" ] } }, "required": [ - "state" + "start", + "type" ] - }, + } + ] + }, + "BinRangeuint8": { + "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "oneOf": [ { - "description": "Disk is being attached to the given Instance", + "description": "A range unbounded below and exclusively above, `..end`.", "type": "object", "properties": { - "instance": { - "type": "string", - "format": "uuid" + "end": { + "type": "integer", + "format": "uint8", + "minimum": 0 }, - "state": { + "type": { "type": "string", "enum": [ - "attaching" + "range_to" ] } }, "required": [ - "instance", - "state" + "end", + "type" ] }, { - "description": "Disk is attached to the given Instance", + "description": "A range bounded inclusively below and exclusively above, `start..end`.", "type": "object", "properties": { - "instance": { - "type": "string", - "format": "uuid" + "end": { + "type": "integer", + "format": "uint8", + "minimum": 0 }, - "state": { + "start": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "type": { "type": "string", "enum": [ - "attached" + "range" ] } }, "required": [ - "instance", - "state" + "end", + "start", + "type" ] }, { - "description": "Disk is being detached from the given Instance", + "description": "A range bounded inclusively below and unbounded above, `start..`.", "type": "object", "properties": { - "instance": { - "type": "string", - "format": "uuid" + "start": { + "type": "integer", + "format": "uint8", + "minimum": 0 }, - "state": { + "type": { "type": "string", "enum": [ - "detaching" + "range_from" ] } }, "required": [ - "instance", - "state" + "start", + "type" ] + } + ] + }, + "Bindouble": { + "description": "Type storing bin edges and a count of samples within it.", + "type": "object", + "properties": { + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 }, - { - "description": "Disk has been destroyed", - "type": "object", - "properties": { - "state": { - "type": "string", - "enum": [ - "destroyed" - ] + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangedouble" } - }, - "required": [ - "state" ] + } + }, + "required": [ + "count", + "range" + ] + }, + "Binfloat": { + "description": "Type storing bin edges and a count of samples within it.", + "type": "object", + "properties": { + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 }, - { - "description": "Disk is unavailable", - "type": "object", - "properties": { - "state": { - "type": "string", - "enum": [ - "faulted" - ] + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangefloat" } - }, - "required": [ - "state" ] } + }, + "required": [ + "count", + "range" ] }, - "DnsConfigParams": { + "Binint16": { + "description": "Type storing bin edges and a count of samples within it.", "type": "object", "properties": { - "generation": { + "count": { + "description": "The total count of samples in this bin.", "type": "integer", "format": "uint64", "minimum": 0 }, - "time_created": { - "type": "string", - "format": "date-time" - }, - "zones": { - "type": "array", - "items": { - "$ref": "#/components/schemas/DnsConfigZone" - } + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangeint16" + } + ] } }, "required": [ - "generation", - "time_created", - "zones" + "count", + "range" ] }, - "DnsConfigZone": { + "Binint32": { + "description": "Type storing bin edges and a count of samples within it.", "type": "object", "properties": { - "records": { - "type": "object", - "additionalProperties": { - "type": "array", - "items": { - "$ref": "#/components/schemas/DnsRecord" - } - } + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 }, - "zone_name": { - "type": "string" + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangeint32" + } + ] } }, "required": [ - "records", - "zone_name" + "count", + "range" ] }, - "DnsRecord": { - "oneOf": [ - { - "type": "object", - "properties": { - "data": { - "type": "string", - "format": "ipv4" - }, - "type": { - "type": "string", - "enum": [ - "A" - ] - } - }, - "required": [ - "data", + "Binint64": { + "description": "Type storing bin edges and a count of samples within it.", + "type": "object", + "properties": { + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangeint64" + } + ] + } + }, + "required": [ + "count", + "range" + ] + }, + "Binint8": { + "description": "Type storing bin edges and a count of samples within it.", + "type": "object", + "properties": { + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangeint8" + } + ] + } + }, + "required": [ + "count", + "range" + ] + }, + "Binuint16": { + "description": "Type storing bin edges and a count of samples within it.", + "type": "object", + "properties": { + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangeuint16" + } + ] + } + }, + "required": [ + "count", + "range" + ] + }, + "Binuint32": { + "description": "Type storing bin edges and a count of samples within it.", + "type": "object", + "properties": { + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangeuint32" + } + ] + } + }, + "required": [ + "count", + "range" + ] + }, + "Binuint64": { + "description": "Type storing bin edges and a count of samples within it.", + "type": "object", + "properties": { + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangeuint64" + } + ] + } + }, + "required": [ + "count", + "range" + ] + }, + "Binuint8": { + "description": "Type storing bin edges and a count of samples within it.", + "type": "object", + "properties": { + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangeuint8" + } + ] + } + }, + "required": [ + "count", + "range" + ] + }, + "ByteCount": { + "description": "Byte count to express memory or storage capacity.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "Certificate": { + "type": "object", + "properties": { + "cert": { + "type": "string" + }, + "key": { + "type": "string" + } + }, + "required": [ + "cert", + "key" + ] + }, + "Cumulativedouble": { + "description": "A cumulative or counter data type.", + "type": "object", + "properties": { + "start_time": { + "type": "string", + "format": "date-time" + }, + "value": { + "type": "number", + "format": "double" + } + }, + "required": [ + "start_time", + "value" + ] + }, + "Cumulativefloat": { + "description": "A cumulative or counter data type.", + "type": "object", + "properties": { + "start_time": { + "type": "string", + "format": "date-time" + }, + "value": { + "type": "number", + "format": "float" + } + }, + "required": [ + "start_time", + "value" + ] + }, + "Cumulativeint64": { + "description": "A cumulative or counter data type.", + "type": "object", + "properties": { + "start_time": { + "type": "string", + "format": "date-time" + }, + "value": { + "type": "integer", + "format": "int64" + } + }, + "required": [ + "start_time", + "value" + ] + }, + "Cumulativeuint64": { + "description": "A cumulative or counter data type.", + "type": "object", + "properties": { + "start_time": { + "type": "string", + "format": "date-time" + }, + "value": { + "type": "integer", + "format": "uint64", + "minimum": 0 + } + }, + "required": [ + "start_time", + "value" + ] + }, + "CurrentStatus": { + "description": "Describes the current status of a background task", + "oneOf": [ + { + "description": "The background task is not running\n\nTypically, the task would be waiting for its next activation, which would happen after a timeout or some other event that triggers activation", + "type": "object", + "properties": { + "current_status": { + "type": "string", + "enum": [ + "idle" + ] + } + }, + "required": [ + "current_status" + ] + }, + { + "description": "The background task is currently running\n\nMore precisely, the task has been activated and has not yet finished this activation", + "type": "object", + "properties": { + "current_status": { + "type": "string", + "enum": [ + "running" + ] + }, + "details": { + "$ref": "#/components/schemas/CurrentStatusRunning" + } + }, + "required": [ + "current_status", + "details" + ] + } + ] + }, + "CurrentStatusRunning": { + "type": "object", + "properties": { + "iteration": { + "description": "which iteration this was (counter)", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "reason": { + "description": "what kind of event triggered this activation", + "allOf": [ + { + "$ref": "#/components/schemas/ActivationReason" + } + ] + }, + "start_time": { + "description": "wall-clock time when the current activation started", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "iteration", + "reason", + "start_time" + ] + }, + "DatasetCreateRequest": { + "type": "object", + "properties": { + "dataset_id": { + "type": "string", + "format": "uuid" + }, + "request": { + "$ref": "#/components/schemas/DatasetPutRequest" + }, + "zpool_id": { + "type": "string", + "format": "uuid" + } + }, + "required": [ + "dataset_id", + "request", + "zpool_id" + ] + }, + "DatasetKind": { + "description": "Describes the purpose of the dataset.", + "type": "string", + "enum": [ + "crucible", + "cockroach", + "clickhouse", + "clickhouse_keeper", + "external_dns", + "internal_dns" + ] + }, + "DatasetPutRequest": { + "description": "Describes a dataset within a pool.", + "type": "object", + "properties": { + "address": { + "description": "Address on which a service is responding to requests for the dataset.", + "type": "string" + }, + "kind": { + "description": "Type of dataset being inserted.", + "allOf": [ + { + "$ref": "#/components/schemas/DatasetKind" + } + ] + } + }, + "required": [ + "address", + "kind" + ] + }, + "Datum": { + "description": "A `Datum` is a single sampled data point from a metric.", + "oneOf": [ + { + "type": "object", + "properties": { + "datum": { + "type": "boolean" + }, + "type": { + "type": "string", + "enum": [ + "bool" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "integer", + "format": "int8" + }, + "type": { + "type": "string", + "enum": [ + "i8" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "u8" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "integer", + "format": "int16" + }, + "type": { + "type": "string", + "enum": [ + "i16" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "u16" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "integer", + "format": "int32" + }, + "type": { + "type": "string", + "enum": [ + "i32" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "u32" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "integer", + "format": "int64" + }, + "type": { + "type": "string", + "enum": [ + "i64" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "u64" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "number", + "format": "float" + }, + "type": { + "type": "string", + "enum": [ + "f32" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "number", + "format": "double" + }, + "type": { + "type": "string", + "enum": [ + "f64" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "string" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "array", + "items": { + "type": "integer", + "format": "uint8", + "minimum": 0 + } + }, + "type": { + "type": "string", + "enum": [ + "bytes" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Cumulativeint64" + }, + "type": { + "type": "string", + "enum": [ + "cumulative_i64" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Cumulativeuint64" + }, + "type": { + "type": "string", + "enum": [ + "cumulative_u64" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Cumulativefloat" + }, + "type": { + "type": "string", + "enum": [ + "cumulative_f32" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Cumulativedouble" + }, + "type": { + "type": "string", + "enum": [ + "cumulative_f64" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Histogramint8" + }, + "type": { + "type": "string", + "enum": [ + "histogram_i8" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Histogramuint8" + }, + "type": { + "type": "string", + "enum": [ + "histogram_u8" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Histogramint16" + }, + "type": { + "type": "string", + "enum": [ + "histogram_i16" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Histogramuint16" + }, + "type": { + "type": "string", + "enum": [ + "histogram_u16" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Histogramint32" + }, + "type": { + "type": "string", + "enum": [ + "histogram_i32" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Histogramuint32" + }, + "type": { + "type": "string", + "enum": [ + "histogram_u32" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Histogramint64" + }, + "type": { + "type": "string", + "enum": [ + "histogram_i64" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Histogramuint64" + }, + "type": { + "type": "string", + "enum": [ + "histogram_u64" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Histogramfloat" + }, + "type": { + "type": "string", + "enum": [ + "histogram_f32" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Histogramdouble" + }, + "type": { + "type": "string", + "enum": [ + "histogram_f64" + ] + } + }, + "required": [ + "datum", + "type" + ] + } + ] + }, + "DiskRuntimeState": { + "description": "Runtime state of the Disk, which includes its attach state and some minimal metadata", + "type": "object", + "properties": { + "disk_state": { + "description": "runtime state of the Disk", + "allOf": [ + { + "$ref": "#/components/schemas/DiskState" + } + ] + }, + "gen": { + "description": "generation number for this state", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + }, + "time_updated": { + "description": "timestamp for this information", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "disk_state", + "gen", + "time_updated" + ] + }, + "DiskState": { + "description": "State of a Disk", + "oneOf": [ + { + "description": "Disk is being initialized", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "creating" + ] + } + }, + "required": [ + "state" + ] + }, + { + "description": "Disk is ready but detached from any Instance", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "detached" + ] + } + }, + "required": [ + "state" + ] + }, + { + "description": "Disk is ready to receive blocks from an external source", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "import_ready" + ] + } + }, + "required": [ + "state" + ] + }, + { + "description": "Disk is importing blocks from a URL", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "importing_from_url" + ] + } + }, + "required": [ + "state" + ] + }, + { + "description": "Disk is importing blocks from bulk writes", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "importing_from_bulk_writes" + ] + } + }, + "required": [ + "state" + ] + }, + { + "description": "Disk is being finalized to state Detached", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "finalizing" + ] + } + }, + "required": [ + "state" + ] + }, + { + "description": "Disk is undergoing maintenance", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "maintenance" + ] + } + }, + "required": [ + "state" + ] + }, + { + "description": "Disk is being attached to the given Instance", + "type": "object", + "properties": { + "instance": { + "type": "string", + "format": "uuid" + }, + "state": { + "type": "string", + "enum": [ + "attaching" + ] + } + }, + "required": [ + "instance", + "state" + ] + }, + { + "description": "Disk is attached to the given Instance", + "type": "object", + "properties": { + "instance": { + "type": "string", + "format": "uuid" + }, + "state": { + "type": "string", + "enum": [ + "attached" + ] + } + }, + "required": [ + "instance", + "state" + ] + }, + { + "description": "Disk is being detached from the given Instance", + "type": "object", + "properties": { + "instance": { + "type": "string", + "format": "uuid" + }, + "state": { + "type": "string", + "enum": [ + "detaching" + ] + } + }, + "required": [ + "instance", + "state" + ] + }, + { + "description": "Disk has been destroyed", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "destroyed" + ] + } + }, + "required": [ + "state" + ] + }, + { + "description": "Disk is unavailable", + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "faulted" + ] + } + }, + "required": [ + "state" + ] + } + ] + }, + "DnsConfigParams": { + "type": "object", + "properties": { + "generation": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "time_created": { + "type": "string", + "format": "date-time" + }, + "zones": { + "type": "array", + "items": { + "$ref": "#/components/schemas/DnsConfigZone" + } + } + }, + "required": [ + "generation", + "time_created", + "zones" + ] + }, + "DnsConfigZone": { + "type": "object", + "properties": { + "records": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "$ref": "#/components/schemas/DnsRecord" + } + } + }, + "zone_name": { + "type": "string" + } + }, + "required": [ + "records", + "zone_name" + ] + }, + "DnsRecord": { + "oneOf": [ + { + "type": "object", + "properties": { + "data": { + "type": "string", + "format": "ipv4" + }, + "type": { + "type": "string", + "enum": [ + "A" + ] + } + }, + "required": [ + "data", "type" ] }, @@ -1766,229 +2907,592 @@ "type": { "type": "string", "enum": [ - "string" + "string" + ] + }, + "value": { + "type": "string" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "i8" + ] + }, + "value": { + "type": "integer", + "format": "int8" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "u8" + ] + }, + "value": { + "type": "integer", + "format": "uint8", + "minimum": 0 + } + }, + "required": [ + "type", + "value" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "i16" + ] + }, + "value": { + "type": "integer", + "format": "int16" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "u16" + ] + }, + "value": { + "type": "integer", + "format": "uint16", + "minimum": 0 + } + }, + "required": [ + "type", + "value" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "i32" + ] + }, + "value": { + "type": "integer", + "format": "int32" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "u32" + ] + }, + "value": { + "type": "integer", + "format": "uint32", + "minimum": 0 + } + }, + "required": [ + "type", + "value" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "i64" + ] + }, + "value": { + "type": "integer", + "format": "int64" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "u64" + ] + }, + "value": { + "type": "integer", + "format": "uint64", + "minimum": 0 + } + }, + "required": [ + "type", + "value" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "ip_addr" + ] + }, + "value": { + "type": "string", + "format": "ip" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "uuid" + ] + }, + "value": { + "type": "string", + "format": "uuid" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "bool" ] }, "value": { - "type": "string" + "type": "boolean" } }, "required": [ "type", "value" ] - }, + } + ] + }, + "Generation": { + "description": "Generation numbers stored in the database, used for optimistic concurrency control", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "HistogramError": { + "description": "Errors related to constructing histograms or adding samples into them.", + "oneOf": [ { + "description": "An attempt to construct a histogram with an empty set of bins.", "type": "object", "properties": { "type": { "type": "string", "enum": [ - "i64" + "empty_bins" ] - }, - "value": { - "type": "integer", - "format": "int64" } }, "required": [ - "type", - "value" + "type" ] }, { + "description": "An attempt to construct a histogram with non-monotonic bins.", "type": "object", "properties": { "type": { "type": "string", "enum": [ - "ip_addr" + "nonmonotonic_bins" ] + } + }, + "required": [ + "type" + ] + }, + { + "description": "A non-finite was encountered, either as a bin edge or a sample.", + "type": "object", + "properties": { + "content": { + "type": "string" }, - "value": { + "type": { "type": "string", - "format": "ip" + "enum": [ + "non_finite_value" + ] } }, "required": [ - "type", - "value" + "content", + "type" ] }, { + "description": "Error returned when two neighboring bins are not adjoining (there's space between them)", "type": "object", "properties": { + "content": { + "type": "object", + "properties": { + "left": { + "type": "string" + }, + "right": { + "type": "string" + } + }, + "required": [ + "left", + "right" + ] + }, "type": { "type": "string", "enum": [ - "uuid" + "non_adjoining_bins" + ] + } + }, + "required": [ + "content", + "type" + ] + }, + { + "description": "Bin and count arrays are of different sizes.", + "type": "object", + "properties": { + "content": { + "type": "object", + "properties": { + "n_bins": { + "type": "integer", + "format": "uint", + "minimum": 0 + }, + "n_counts": { + "type": "integer", + "format": "uint", + "minimum": 0 + } + }, + "required": [ + "n_bins", + "n_counts" ] }, - "value": { + "type": { "type": "string", - "format": "uuid" + "enum": [ + "array_size_mismatch" + ] } }, "required": [ - "type", - "value" + "content", + "type" ] }, { "type": "object", "properties": { + "content": { + "$ref": "#/components/schemas/QuantizationError" + }, "type": { "type": "string", "enum": [ - "bool" + "quantization" ] - }, - "value": { - "type": "boolean" } }, "required": [ - "type", - "value" + "content", + "type" ] } ] }, - "Generation": { - "description": "Generation numbers stored in the database, used for optimistic concurrency control", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "HistogramError": { - "description": "Errors related to constructing histograms or adding samples into them.", - "oneOf": [ - { - "description": "An attempt to construct a histogram with an empty set of bins.", - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "empty_bins" - ] - } - }, - "required": [ - "type" - ] + "Histogramdouble": { + "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "type": "object", + "properties": { + "bins": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Bindouble" + } + }, + "n_samples": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "start_time": { + "type": "string", + "format": "date-time" + } + }, + "required": [ + "bins", + "n_samples", + "start_time" + ] + }, + "Histogramfloat": { + "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "type": "object", + "properties": { + "bins": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Binfloat" + } + }, + "n_samples": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "start_time": { + "type": "string", + "format": "date-time" + } + }, + "required": [ + "bins", + "n_samples", + "start_time" + ] + }, + "Histogramint16": { + "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "type": "object", + "properties": { + "bins": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Binint16" + } + }, + "n_samples": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "start_time": { + "type": "string", + "format": "date-time" + } + }, + "required": [ + "bins", + "n_samples", + "start_time" + ] + }, + "Histogramint32": { + "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "type": "object", + "properties": { + "bins": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Binint32" + } + }, + "n_samples": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "start_time": { + "type": "string", + "format": "date-time" + } + }, + "required": [ + "bins", + "n_samples", + "start_time" + ] + }, + "Histogramint64": { + "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "type": "object", + "properties": { + "bins": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Binint64" + } + }, + "n_samples": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "start_time": { + "type": "string", + "format": "date-time" + } + }, + "required": [ + "bins", + "n_samples", + "start_time" + ] + }, + "Histogramint8": { + "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "type": "object", + "properties": { + "bins": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Binint8" + } + }, + "n_samples": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "start_time": { + "type": "string", + "format": "date-time" + } + }, + "required": [ + "bins", + "n_samples", + "start_time" + ] + }, + "Histogramuint16": { + "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "type": "object", + "properties": { + "bins": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Binuint16" + } }, - { - "description": "An attempt to construct a histogram with non-monotonic bins.", - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "nonmonotonic_bins" - ] - } - }, - "required": [ - "type" - ] + "n_samples": { + "type": "integer", + "format": "uint64", + "minimum": 0 }, - { - "description": "A non-finite was encountered, either as a bin edge or a sample.", - "type": "object", - "properties": { - "content": { - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "non_finite_value" - ] - } - }, - "required": [ - "content", - "type" - ] + "start_time": { + "type": "string", + "format": "date-time" + } + }, + "required": [ + "bins", + "n_samples", + "start_time" + ] + }, + "Histogramuint32": { + "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "type": "object", + "properties": { + "bins": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Binuint32" + } }, - { - "description": "Error returned when two neighboring bins are not adjoining (there's space between them)", - "type": "object", - "properties": { - "content": { - "type": "object", - "properties": { - "left": { - "type": "string" - }, - "right": { - "type": "string" - } - }, - "required": [ - "left", - "right" - ] - }, - "type": { - "type": "string", - "enum": [ - "non_adjoining_bins" - ] - } - }, - "required": [ - "content", - "type" - ] + "n_samples": { + "type": "integer", + "format": "uint64", + "minimum": 0 }, - { - "description": "Bin and count arrays are of different sizes.", - "type": "object", - "properties": { - "content": { - "type": "object", - "properties": { - "n_bins": { - "type": "integer", - "format": "uint", - "minimum": 0 - }, - "n_counts": { - "type": "integer", - "format": "uint", - "minimum": 0 - } - }, - "required": [ - "n_bins", - "n_counts" - ] - }, - "type": { - "type": "string", - "enum": [ - "array_size_mismatch" - ] - } - }, - "required": [ - "content", - "type" - ] + "start_time": { + "type": "string", + "format": "date-time" } + }, + "required": [ + "bins", + "n_samples", + "start_time" ] }, - "Histogramdouble": { + "Histogramuint64": { "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", "type": "object", "properties": { "bins": { "type": "array", "items": { - "$ref": "#/components/schemas/Bindouble" + "$ref": "#/components/schemas/Binuint64" } }, "n_samples": { @@ -2007,14 +3511,14 @@ "start_time" ] }, - "Histogramint64": { + "Histogramuint8": { "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", "type": "object", "properties": { "bins": { "type": "array", "items": { - "$ref": "#/components/schemas/Binint64" + "$ref": "#/components/schemas/Binuint8" } }, "n_samples": { @@ -2684,6 +4188,95 @@ } ] }, + "QuantizationError": { + "description": "Errors occurring during quantizated bin generation.", + "oneOf": [ + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "overflow" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "precision" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "invalid_base" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "invalid_steps" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "uneven_steps_for_base" + ] + } + }, + "required": [ + "type" + ] + }, + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "powers_out_of_order" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, "RackInitializationRequest": { "type": "object", "properties": { diff --git a/openapi/nexus.json b/openapi/nexus.json index 705362da5a..779b1f556c 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -7895,7 +7895,7 @@ } ] }, - "BinRangeint64": { + "BinRangefloat": { "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", "oneOf": [ { @@ -7903,8 +7903,8 @@ "type": "object", "properties": { "end": { - "type": "integer", - "format": "int64" + "type": "number", + "format": "float" }, "type": { "type": "string", @@ -7923,12 +7923,12 @@ "type": "object", "properties": { "end": { - "type": "integer", - "format": "int64" + "type": "number", + "format": "float" }, "start": { - "type": "integer", - "format": "int64" + "type": "number", + "format": "float" }, "type": { "type": "string", @@ -7948,8 +7948,8 @@ "type": "object", "properties": { "start": { - "type": "integer", - "format": "int64" + "type": "number", + "format": "float" }, "type": { "type": "string", @@ -7965,251 +7965,1399 @@ } ] }, - "Bindouble": { - "description": "Type storing bin edges and a count of samples within it.", - "type": "object", - "properties": { - "count": { - "description": "The total count of samples in this bin.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "range": { - "description": "The range of the support covered by this bin.", - "allOf": [ - { - "$ref": "#/components/schemas/BinRangedouble" + "BinRangeint16": { + "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "oneOf": [ + { + "description": "A range unbounded below and exclusively above, `..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "int16" + }, + "type": { + "type": "string", + "enum": [ + "range_to" + ] } + }, + "required": [ + "end", + "type" ] - } - }, - "required": [ - "count", - "range" - ] - }, - "Binint64": { - "description": "Type storing bin edges and a count of samples within it.", - "type": "object", - "properties": { - "count": { - "description": "The total count of samples in this bin.", - "type": "integer", - "format": "uint64", - "minimum": 0 }, - "range": { - "description": "The range of the support covered by this bin.", - "allOf": [ - { - "$ref": "#/components/schemas/BinRangeint64" + { + "description": "A range bounded inclusively below and exclusively above, `start..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "int16" + }, + "start": { + "type": "integer", + "format": "int16" + }, + "type": { + "type": "string", + "enum": [ + "range" + ] } + }, + "required": [ + "end", + "start", + "type" ] - } - }, - "required": [ - "count", - "range" - ] - }, - "BlockSize": { - "title": "disk block size in bytes", - "type": "integer", - "enum": [ - 512, - 2048, - 4096 - ] - }, - "ByteCount": { - "description": "Byte count to express memory or storage capacity.", - "type": "integer", - "format": "uint64", - "minimum": 0 - }, - "Certificate": { - "description": "View of a Certificate", - "type": "object", - "properties": { - "description": { - "description": "human-readable free-form text about a resource", - "type": "string" - }, - "id": { - "description": "unique, immutable, system-controlled identifier for each resource", - "type": "string", - "format": "uuid" }, - "name": { - "description": "unique, mutable, user-controlled identifier for each resource", - "allOf": [ - { - "$ref": "#/components/schemas/Name" + { + "description": "A range bounded inclusively below and unbounded above, `start..`.", + "type": "object", + "properties": { + "start": { + "type": "integer", + "format": "int16" + }, + "type": { + "type": "string", + "enum": [ + "range_from" + ] } + }, + "required": [ + "start", + "type" ] - }, - "service": { - "$ref": "#/components/schemas/ServiceUsingCertificate" - }, - "time_created": { - "description": "timestamp when this resource was created", - "type": "string", - "format": "date-time" - }, - "time_modified": { - "description": "timestamp when this resource was last modified", - "type": "string", - "format": "date-time" } - }, - "required": [ - "description", - "id", - "name", - "service", - "time_created", - "time_modified" ] }, - "CertificateCreate": { - "description": "Create-time parameters for a `Certificate`", - "type": "object", - "properties": { - "cert": { - "description": "PEM-formatted string containing public certificate chain", - "type": "string" - }, - "description": { - "type": "string" - }, - "key": { - "description": "PEM-formatted string containing private key", - "type": "string" + "BinRangeint32": { + "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "oneOf": [ + { + "description": "A range unbounded below and exclusively above, `..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "int32" + }, + "type": { + "type": "string", + "enum": [ + "range_to" + ] + } + }, + "required": [ + "end", + "type" + ] }, - "name": { - "$ref": "#/components/schemas/Name" + { + "description": "A range bounded inclusively below and exclusively above, `start..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "int32" + }, + "start": { + "type": "integer", + "format": "int32" + }, + "type": { + "type": "string", + "enum": [ + "range" + ] + } + }, + "required": [ + "end", + "start", + "type" + ] }, - "service": { - "description": "The service using this certificate", - "allOf": [ - { - "$ref": "#/components/schemas/ServiceUsingCertificate" + { + "description": "A range bounded inclusively below and unbounded above, `start..`.", + "type": "object", + "properties": { + "start": { + "type": "integer", + "format": "int32" + }, + "type": { + "type": "string", + "enum": [ + "range_from" + ] } + }, + "required": [ + "start", + "type" ] } - }, - "required": [ - "cert", - "description", - "key", - "name", - "service" ] }, - "CertificateResultsPage": { - "description": "A single page of results", - "type": "object", - "properties": { - "items": { - "description": "list of items on this page of results", - "type": "array", - "items": { - "$ref": "#/components/schemas/Certificate" - } + "BinRangeint64": { + "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "oneOf": [ + { + "description": "A range unbounded below and exclusively above, `..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "int64" + }, + "type": { + "type": "string", + "enum": [ + "range_to" + ] + } + }, + "required": [ + "end", + "type" + ] }, - "next_page": { - "nullable": true, - "description": "token used to fetch the next page of results (if any)", - "type": "string" + { + "description": "A range bounded inclusively below and exclusively above, `start..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "int64" + }, + "start": { + "type": "integer", + "format": "int64" + }, + "type": { + "type": "string", + "enum": [ + "range" + ] + } + }, + "required": [ + "end", + "start", + "type" + ] + }, + { + "description": "A range bounded inclusively below and unbounded above, `start..`.", + "type": "object", + "properties": { + "start": { + "type": "integer", + "format": "int64" + }, + "type": { + "type": "string", + "enum": [ + "range_from" + ] + } + }, + "required": [ + "start", + "type" + ] } - }, - "required": [ - "items" ] }, - "Cumulativedouble": { - "description": "A cumulative or counter data type.", - "type": "object", - "properties": { - "start_time": { - "type": "string", - "format": "date-time" + "BinRangeint8": { + "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "oneOf": [ + { + "description": "A range unbounded below and exclusively above, `..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "int8" + }, + "type": { + "type": "string", + "enum": [ + "range_to" + ] + } + }, + "required": [ + "end", + "type" + ] }, - "value": { - "type": "number", - "format": "double" + { + "description": "A range bounded inclusively below and exclusively above, `start..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "int8" + }, + "start": { + "type": "integer", + "format": "int8" + }, + "type": { + "type": "string", + "enum": [ + "range" + ] + } + }, + "required": [ + "end", + "start", + "type" + ] + }, + { + "description": "A range bounded inclusively below and unbounded above, `start..`.", + "type": "object", + "properties": { + "start": { + "type": "integer", + "format": "int8" + }, + "type": { + "type": "string", + "enum": [ + "range_from" + ] + } + }, + "required": [ + "start", + "type" + ] } - }, - "required": [ - "start_time", - "value" ] }, - "Cumulativeint64": { - "description": "A cumulative or counter data type.", - "type": "object", - "properties": { - "start_time": { - "type": "string", - "format": "date-time" + "BinRangeuint16": { + "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "oneOf": [ + { + "description": "A range unbounded below and exclusively above, `..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "range_to" + ] + } + }, + "required": [ + "end", + "type" + ] }, - "value": { - "type": "integer", - "format": "int64" + { + "description": "A range bounded inclusively below and exclusively above, `start..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "start": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "range" + ] + } + }, + "required": [ + "end", + "start", + "type" + ] + }, + { + "description": "A range bounded inclusively below and unbounded above, `start..`.", + "type": "object", + "properties": { + "start": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "range_from" + ] + } + }, + "required": [ + "start", + "type" + ] } - }, - "required": [ - "start_time", - "value" ] }, - "CurrentUser": { - "description": "Info about the current user", - "type": "object", - "properties": { - "display_name": { - "description": "Human-readable name that can identify the user", - "type": "string" - }, - "id": { - "type": "string", - "format": "uuid" - }, - "silo_id": { - "description": "Uuid of the silo to which this user belongs", - "type": "string", - "format": "uuid" + "BinRangeuint32": { + "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "oneOf": [ + { + "description": "A range unbounded below and exclusively above, `..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "range_to" + ] + } + }, + "required": [ + "end", + "type" + ] + }, + { + "description": "A range bounded inclusively below and exclusively above, `start..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "start": { + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "range" + ] + } + }, + "required": [ + "end", + "start", + "type" + ] + }, + { + "description": "A range bounded inclusively below and unbounded above, `start..`.", + "type": "object", + "properties": { + "start": { + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "range_from" + ] + } + }, + "required": [ + "start", + "type" + ] + } + ] + }, + "BinRangeuint64": { + "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "oneOf": [ + { + "description": "A range unbounded below and exclusively above, `..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "range_to" + ] + } + }, + "required": [ + "end", + "type" + ] + }, + { + "description": "A range bounded inclusively below and exclusively above, `start..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "start": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "range" + ] + } + }, + "required": [ + "end", + "start", + "type" + ] + }, + { + "description": "A range bounded inclusively below and unbounded above, `start..`.", + "type": "object", + "properties": { + "start": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "range_from" + ] + } + }, + "required": [ + "start", + "type" + ] + } + ] + }, + "BinRangeuint8": { + "description": "A type storing a range over `T`.\n\nThis type supports ranges similar to the `RangeTo`, `Range` and `RangeFrom` types in the standard library. Those cover `(..end)`, `(start..end)`, and `(start..)` respectively.", + "oneOf": [ + { + "description": "A range unbounded below and exclusively above, `..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "range_to" + ] + } + }, + "required": [ + "end", + "type" + ] + }, + { + "description": "A range bounded inclusively below and exclusively above, `start..end`.", + "type": "object", + "properties": { + "end": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "start": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "range" + ] + } + }, + "required": [ + "end", + "start", + "type" + ] + }, + { + "description": "A range bounded inclusively below and unbounded above, `start..`.", + "type": "object", + "properties": { + "start": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "range_from" + ] + } + }, + "required": [ + "start", + "type" + ] + } + ] + }, + "Bindouble": { + "description": "Type storing bin edges and a count of samples within it.", + "type": "object", + "properties": { + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangedouble" + } + ] + } + }, + "required": [ + "count", + "range" + ] + }, + "Binfloat": { + "description": "Type storing bin edges and a count of samples within it.", + "type": "object", + "properties": { + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangefloat" + } + ] + } + }, + "required": [ + "count", + "range" + ] + }, + "Binint16": { + "description": "Type storing bin edges and a count of samples within it.", + "type": "object", + "properties": { + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangeint16" + } + ] + } + }, + "required": [ + "count", + "range" + ] + }, + "Binint32": { + "description": "Type storing bin edges and a count of samples within it.", + "type": "object", + "properties": { + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangeint32" + } + ] + } + }, + "required": [ + "count", + "range" + ] + }, + "Binint64": { + "description": "Type storing bin edges and a count of samples within it.", + "type": "object", + "properties": { + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangeint64" + } + ] + } + }, + "required": [ + "count", + "range" + ] + }, + "Binint8": { + "description": "Type storing bin edges and a count of samples within it.", + "type": "object", + "properties": { + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangeint8" + } + ] + } + }, + "required": [ + "count", + "range" + ] + }, + "Binuint16": { + "description": "Type storing bin edges and a count of samples within it.", + "type": "object", + "properties": { + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangeuint16" + } + ] + } + }, + "required": [ + "count", + "range" + ] + }, + "Binuint32": { + "description": "Type storing bin edges and a count of samples within it.", + "type": "object", + "properties": { + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangeuint32" + } + ] + } + }, + "required": [ + "count", + "range" + ] + }, + "Binuint64": { + "description": "Type storing bin edges and a count of samples within it.", + "type": "object", + "properties": { + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangeuint64" + } + ] + } + }, + "required": [ + "count", + "range" + ] + }, + "Binuint8": { + "description": "Type storing bin edges and a count of samples within it.", + "type": "object", + "properties": { + "count": { + "description": "The total count of samples in this bin.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "range": { + "description": "The range of the support covered by this bin.", + "allOf": [ + { + "$ref": "#/components/schemas/BinRangeuint8" + } + ] + } + }, + "required": [ + "count", + "range" + ] + }, + "BlockSize": { + "title": "disk block size in bytes", + "type": "integer", + "enum": [ + 512, + 2048, + 4096 + ] + }, + "ByteCount": { + "description": "Byte count to express memory or storage capacity.", + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "Certificate": { + "description": "View of a Certificate", + "type": "object", + "properties": { + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "service": { + "$ref": "#/components/schemas/ServiceUsingCertificate" + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "description", + "id", + "name", + "service", + "time_created", + "time_modified" + ] + }, + "CertificateCreate": { + "description": "Create-time parameters for a `Certificate`", + "type": "object", + "properties": { + "cert": { + "description": "PEM-formatted string containing public certificate chain", + "type": "string" + }, + "description": { + "type": "string" + }, + "key": { + "description": "PEM-formatted string containing private key", + "type": "string" + }, + "name": { + "$ref": "#/components/schemas/Name" + }, + "service": { + "description": "The service using this certificate", + "allOf": [ + { + "$ref": "#/components/schemas/ServiceUsingCertificate" + } + ] + } + }, + "required": [ + "cert", + "description", + "key", + "name", + "service" + ] + }, + "CertificateResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/Certificate" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "Cumulativedouble": { + "description": "A cumulative or counter data type.", + "type": "object", + "properties": { + "start_time": { + "type": "string", + "format": "date-time" + }, + "value": { + "type": "number", + "format": "double" + } + }, + "required": [ + "start_time", + "value" + ] + }, + "Cumulativefloat": { + "description": "A cumulative or counter data type.", + "type": "object", + "properties": { + "start_time": { + "type": "string", + "format": "date-time" + }, + "value": { + "type": "number", + "format": "float" + } + }, + "required": [ + "start_time", + "value" + ] + }, + "Cumulativeint64": { + "description": "A cumulative or counter data type.", + "type": "object", + "properties": { + "start_time": { + "type": "string", + "format": "date-time" + }, + "value": { + "type": "integer", + "format": "int64" + } + }, + "required": [ + "start_time", + "value" + ] + }, + "Cumulativeuint64": { + "description": "A cumulative or counter data type.", + "type": "object", + "properties": { + "start_time": { + "type": "string", + "format": "date-time" + }, + "value": { + "type": "integer", + "format": "uint64", + "minimum": 0 + } + }, + "required": [ + "start_time", + "value" + ] + }, + "CurrentUser": { + "description": "Info about the current user", + "type": "object", + "properties": { + "display_name": { + "description": "Human-readable name that can identify the user", + "type": "string" + }, + "id": { + "type": "string", + "format": "uuid" + }, + "silo_id": { + "description": "Uuid of the silo to which this user belongs", + "type": "string", + "format": "uuid" + }, + "silo_name": { + "description": "Name of the silo to which this user belongs.", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + } + }, + "required": [ + "display_name", + "id", + "silo_id", + "silo_name" + ] + }, + "Datum": { + "description": "A `Datum` is a single sampled data point from a metric.", + "oneOf": [ + { + "type": "object", + "properties": { + "datum": { + "type": "boolean" + }, + "type": { + "type": "string", + "enum": [ + "bool" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "integer", + "format": "int8" + }, + "type": { + "type": "string", + "enum": [ + "i8" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "u8" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "integer", + "format": "int16" + }, + "type": { + "type": "string", + "enum": [ + "i16" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "u16" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "integer", + "format": "int32" + }, + "type": { + "type": "string", + "enum": [ + "i32" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "u32" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "integer", + "format": "int64" + }, + "type": { + "type": "string", + "enum": [ + "i64" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "type": { + "type": "string", + "enum": [ + "u64" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "number", + "format": "float" + }, + "type": { + "type": "string", + "enum": [ + "f32" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "number", + "format": "double" + }, + "type": { + "type": "string", + "enum": [ + "f64" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "string" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "type": "array", + "items": { + "type": "integer", + "format": "uint8", + "minimum": 0 + } + }, + "type": { + "type": "string", + "enum": [ + "bytes" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Cumulativeint64" + }, + "type": { + "type": "string", + "enum": [ + "cumulative_i64" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Cumulativeuint64" + }, + "type": { + "type": "string", + "enum": [ + "cumulative_u64" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Cumulativefloat" + }, + "type": { + "type": "string", + "enum": [ + "cumulative_f32" + ] + } + }, + "required": [ + "datum", + "type" + ] + }, + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Cumulativedouble" + }, + "type": { + "type": "string", + "enum": [ + "cumulative_f64" + ] + } + }, + "required": [ + "datum", + "type" + ] }, - "silo_name": { - "description": "Name of the silo to which this user belongs.", - "allOf": [ - { - "$ref": "#/components/schemas/Name" + { + "type": "object", + "properties": { + "datum": { + "$ref": "#/components/schemas/Histogramint8" + }, + "type": { + "type": "string", + "enum": [ + "histogram_i8" + ] } + }, + "required": [ + "datum", + "type" ] - } - }, - "required": [ - "display_name", - "id", - "silo_id", - "silo_name" - ] - }, - "Datum": { - "description": "A `Datum` is a single sampled data point from a metric.", - "oneOf": [ + }, { "type": "object", "properties": { "datum": { - "type": "boolean" + "$ref": "#/components/schemas/Histogramuint8" }, "type": { "type": "string", "enum": [ - "bool" + "histogram_u8" ] } }, @@ -8222,13 +9370,12 @@ "type": "object", "properties": { "datum": { - "type": "integer", - "format": "int64" + "$ref": "#/components/schemas/Histogramint16" }, "type": { "type": "string", "enum": [ - "i64" + "histogram_i16" ] } }, @@ -8241,13 +9388,12 @@ "type": "object", "properties": { "datum": { - "type": "number", - "format": "double" + "$ref": "#/components/schemas/Histogramuint16" }, "type": { "type": "string", "enum": [ - "f64" + "histogram_u16" ] } }, @@ -8260,12 +9406,12 @@ "type": "object", "properties": { "datum": { - "type": "string" + "$ref": "#/components/schemas/Histogramint32" }, "type": { "type": "string", "enum": [ - "string" + "histogram_i32" ] } }, @@ -8278,17 +9424,12 @@ "type": "object", "properties": { "datum": { - "type": "array", - "items": { - "type": "integer", - "format": "uint8", - "minimum": 0 - } + "$ref": "#/components/schemas/Histogramuint32" }, "type": { "type": "string", "enum": [ - "bytes" + "histogram_u32" ] } }, @@ -8301,12 +9442,12 @@ "type": "object", "properties": { "datum": { - "$ref": "#/components/schemas/Cumulativeint64" + "$ref": "#/components/schemas/Histogramint64" }, "type": { "type": "string", "enum": [ - "cumulative_i64" + "histogram_i64" ] } }, @@ -8319,12 +9460,12 @@ "type": "object", "properties": { "datum": { - "$ref": "#/components/schemas/Cumulativedouble" + "$ref": "#/components/schemas/Histogramuint64" }, "type": { "type": "string", "enum": [ - "cumulative_f64" + "histogram_u64" ] } }, @@ -8337,12 +9478,12 @@ "type": "object", "properties": { "datum": { - "$ref": "#/components/schemas/Histogramint64" + "$ref": "#/components/schemas/Histogramfloat" }, "type": { "type": "string", "enum": [ - "histogram_i64" + "histogram_f32" ] } }, @@ -9114,6 +10255,84 @@ "start_time" ] }, + "Histogramfloat": { + "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "type": "object", + "properties": { + "bins": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Binfloat" + } + }, + "n_samples": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "start_time": { + "type": "string", + "format": "date-time" + } + }, + "required": [ + "bins", + "n_samples", + "start_time" + ] + }, + "Histogramint16": { + "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "type": "object", + "properties": { + "bins": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Binint16" + } + }, + "n_samples": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "start_time": { + "type": "string", + "format": "date-time" + } + }, + "required": [ + "bins", + "n_samples", + "start_time" + ] + }, + "Histogramint32": { + "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "type": "object", + "properties": { + "bins": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Binint32" + } + }, + "n_samples": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "start_time": { + "type": "string", + "format": "date-time" + } + }, + "required": [ + "bins", + "n_samples", + "start_time" + ] + }, "Histogramint64": { "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", "type": "object", @@ -9140,6 +10359,136 @@ "start_time" ] }, + "Histogramint8": { + "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "type": "object", + "properties": { + "bins": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Binint8" + } + }, + "n_samples": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "start_time": { + "type": "string", + "format": "date-time" + } + }, + "required": [ + "bins", + "n_samples", + "start_time" + ] + }, + "Histogramuint16": { + "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "type": "object", + "properties": { + "bins": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Binuint16" + } + }, + "n_samples": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "start_time": { + "type": "string", + "format": "date-time" + } + }, + "required": [ + "bins", + "n_samples", + "start_time" + ] + }, + "Histogramuint32": { + "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "type": "object", + "properties": { + "bins": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Binuint32" + } + }, + "n_samples": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "start_time": { + "type": "string", + "format": "date-time" + } + }, + "required": [ + "bins", + "n_samples", + "start_time" + ] + }, + "Histogramuint64": { + "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "type": "object", + "properties": { + "bins": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Binuint64" + } + }, + "n_samples": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "start_time": { + "type": "string", + "format": "date-time" + } + }, + "required": [ + "bins", + "n_samples", + "start_time" + ] + }, + "Histogramuint8": { + "description": "Histogram metric\n\nA histogram maintains the count of any number of samples, over a set of bins. Bins are specified on construction via their _left_ edges, inclusive. There can't be any \"gaps\" in the bins, and an additional bin may be added to the left, right, or both so that the bins extend to the entire range of the support.\n\nNote that any gaps, unsorted bins, or non-finite values will result in an error.", + "type": "object", + "properties": { + "bins": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Binuint8" + } + }, + "n_samples": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "start_time": { + "type": "string", + "format": "date-time" + } + }, + "required": [ + "bins", + "n_samples", + "start_time" + ] + }, "IdentityProvider": { "description": "View of an Identity Provider", "type": "object", diff --git a/oximeter/db/Cargo.toml b/oximeter/db/Cargo.toml index 1187458acf..9ff4ac5c06 100644 --- a/oximeter/db/Cargo.toml +++ b/oximeter/db/Cargo.toml @@ -23,6 +23,7 @@ slog-async.workspace = true slog-term.workspace = true thiserror.workspace = true tokio = { workspace = true, features = [ "rt-multi-thread", "macros" ] } +usdt.workspace = true uuid.workspace = true [dev-dependencies] diff --git a/oximeter/db/src/client.rs b/oximeter/db/src/client.rs index 49a4553017..8629e4b8ef 100644 --- a/oximeter/db/src/client.rs +++ b/oximeter/db/src/client.rs @@ -3,24 +3,45 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. //! Rust client to ClickHouse database -// Copyright 2021 Oxide Computer Company -use crate::{ - model, query, Error, Metric, Target, Timeseries, TimeseriesPageSelector, - TimeseriesScanParams, TimeseriesSchema, -}; -use crate::{TimeseriesKey, TimeseriesName}; +// Copyright 2023 Oxide Computer Company + +use crate::model; +use crate::query; +use crate::Error; +use crate::Metric; +use crate::Target; +use crate::Timeseries; +use crate::TimeseriesKey; +use crate::TimeseriesName; +use crate::TimeseriesPageSelector; +use crate::TimeseriesScanParams; +use crate::TimeseriesSchema; use async_trait::async_trait; -use dropshot::{EmptyScanParams, PaginationOrder, ResultsPage, WhichPage}; +use dropshot::EmptyScanParams; +use dropshot::PaginationOrder; +use dropshot::ResultsPage; +use dropshot::WhichPage; use oximeter::types::Sample; -use slog::{debug, error, trace, Logger}; -use std::collections::{btree_map::Entry, BTreeMap, BTreeSet}; +use slog::debug; +use slog::error; +use slog::trace; +use slog::Logger; +use std::collections::btree_map::Entry; +use std::collections::BTreeMap; +use std::collections::BTreeSet; use std::convert::TryFrom; use std::net::SocketAddr; use std::num::NonZeroU32; use std::sync::Mutex; use uuid::Uuid; +#[usdt::provider(provider = "clickhouse__client")] +mod probes { + fn query__start(_: &usdt::UniqueId, sql: &str) {} + fn query__done(_: &usdt::UniqueId) {} +} + /// A `Client` to the ClickHouse metrics database. #[derive(Debug)] pub struct Client { @@ -364,7 +385,9 @@ impl Client { { let sql = sql.as_ref().to_string(); trace!(self.log, "executing SQL query: {}", sql); - handle_db_response( + let id = usdt::UniqueId::new(); + probes::query__start!(|| (&id, &sql)); + let response = handle_db_response( self.client .post(&self.url) // See regression test `test_unquoted_64bit_integers` for details. @@ -377,7 +400,9 @@ impl Client { .await? .text() .await - .map_err(|err| Error::Database(err.to_string())) + .map_err(|err| Error::Database(err.to_string())); + probes::query__done!(|| (&id)); + response } async fn get_schema(&self) -> Result<(), Error> { @@ -633,12 +658,23 @@ fn error_for_schema_mismatch( mod tests { use super::*; use crate::query; + use crate::query::field_table_name; + use crate::query::measurement_table_name; + use chrono::Utc; use omicron_test_utils::dev::clickhouse::ClickHouseInstance; + use omicron_test_utils::dev::test_setup_log; + use oximeter::histogram::Histogram; use oximeter::test_util; - use oximeter::{Metric, Target}; + use oximeter::Datum; + use oximeter::FieldValue; + use oximeter::Metric; + use oximeter::Target; use slog::o; + use std::net::Ipv4Addr; + use std::net::Ipv6Addr; use std::time::Duration; use tokio::time::sleep; + use uuid::Uuid; // NOTE: It's important that each test run the ClickHouse server with different ports. // The tests each require a clean slate. Previously, we ran the tests in a different thread, @@ -652,7 +688,8 @@ mod tests { #[tokio::test] async fn test_build_client() { - let log = slog::Logger::root(slog::Discard, o!()); + let logctx = test_setup_log("test_build_client"); + let log = &logctx.log; // Let the OS assign a port and discover it after ClickHouse starts let mut db = ClickHouseInstance::new_single_node(0) @@ -665,6 +702,7 @@ mod tests { client.wipe_single_node_db().await.unwrap(); db.cleanup().await.expect("Failed to cleanup ClickHouse server"); + logctx.cleanup_successful(); } #[tokio::test] @@ -829,6 +867,444 @@ mod tests { } } + #[tokio::test] + async fn test_recall_field_value_bool() { + let field = FieldValue::Bool(true); + let as_json = serde_json::Value::from(1_u64); + test_recall_field_value_impl(field, as_json).await; + } + + #[tokio::test] + async fn test_recall_field_value_u8() { + let field = FieldValue::U8(1); + let as_json = serde_json::Value::from(1_u8); + test_recall_field_value_impl(field, as_json).await; + } + + #[tokio::test] + async fn test_recall_field_value_i8() { + let field = FieldValue::I8(1); + let as_json = serde_json::Value::from(1_i8); + test_recall_field_value_impl(field, as_json).await; + } + + #[tokio::test] + async fn test_recall_field_value_u16() { + let field = FieldValue::U16(1); + let as_json = serde_json::Value::from(1_u16); + test_recall_field_value_impl(field, as_json).await; + } + + #[tokio::test] + async fn test_recall_field_value_i16() { + let field = FieldValue::I16(1); + let as_json = serde_json::Value::from(1_i16); + test_recall_field_value_impl(field, as_json).await; + } + + #[tokio::test] + async fn test_recall_field_value_u32() { + let field = FieldValue::U32(1); + let as_json = serde_json::Value::from(1_u32); + test_recall_field_value_impl(field, as_json).await; + } + + #[tokio::test] + async fn test_recall_field_value_i32() { + let field = FieldValue::I32(1); + let as_json = serde_json::Value::from(1_i32); + test_recall_field_value_impl(field, as_json).await; + } + + #[tokio::test] + async fn test_recall_field_value_u64() { + let field = FieldValue::U64(1); + let as_json = serde_json::Value::from(1_u64); + test_recall_field_value_impl(field, as_json).await; + } + + #[tokio::test] + async fn test_recall_field_value_i64() { + let field = FieldValue::I64(1); + let as_json = serde_json::Value::from(1_i64); + test_recall_field_value_impl(field, as_json).await; + } + + #[tokio::test] + async fn test_recall_field_value_string() { + let field = FieldValue::String("foo".into()); + let as_json = serde_json::Value::from("foo"); + test_recall_field_value_impl(field, as_json).await; + } + + #[tokio::test] + async fn test_recall_field_value_ipv4addr() { + let field = FieldValue::from(Ipv4Addr::LOCALHOST); + let as_json = serde_json::Value::from( + Ipv4Addr::LOCALHOST.to_ipv6_mapped().to_string(), + ); + test_recall_field_value_impl(field, as_json).await; + } + + #[tokio::test] + async fn test_recall_field_value_ipv6addr() { + let field = FieldValue::from(Ipv6Addr::LOCALHOST); + let as_json = serde_json::Value::from(Ipv6Addr::LOCALHOST.to_string()); + test_recall_field_value_impl(field, as_json).await; + } + + #[tokio::test] + async fn test_recall_field_value_uuid() { + let id = Uuid::new_v4(); + let field = FieldValue::from(id); + let as_json = serde_json::Value::from(id.to_string()); + test_recall_field_value_impl(field, as_json).await; + } + + async fn test_recall_field_value_impl( + field_value: FieldValue, + as_json: serde_json::Value, + ) { + let logctx = test_setup_log( + format!("test_recall_field_value_{}", field_value.field_type()) + .as_str(), + ); + let log = &logctx.log; + + // Let the OS assign a port and discover it after ClickHouse starts + let mut db = ClickHouseInstance::new_single_node(0) + .await + .expect("Failed to start ClickHouse"); + let address = SocketAddr::new("::1".parse().unwrap(), db.port()); + + let client = Client::new(address, log); + client + .init_single_node_db() + .await + .expect("Failed to initialize timeseries database"); + + // Insert a record from this field. + const TIMESERIES_NAME: &str = "foo:bar"; + const TIMESERIES_KEY: u64 = 101; + const FIELD_NAME: &str = "baz"; + + let mut inserted_row = serde_json::Map::new(); + inserted_row + .insert("timeseries_name".to_string(), TIMESERIES_NAME.into()); + inserted_row + .insert("timeseries_key".to_string(), TIMESERIES_KEY.into()); + inserted_row.insert("field_name".to_string(), FIELD_NAME.into()); + inserted_row.insert("field_value".to_string(), as_json); + let inserted_row = serde_json::Value::from(inserted_row); + + let row = serde_json::to_string(&inserted_row).unwrap(); + let field_table = field_table_name(field_value.field_type()); + let insert_sql = format!( + "INSERT INTO oximeter.{field_table} FORMAT JSONEachRow {row}" + ); + client.execute(insert_sql).await.expect("Failed to insert field row"); + + // Select it exactly back out. + let select_sql = format!( + "SELECT * FROM oximeter.{} LIMIT 1 FORMAT {};", + field_table_name(field_value.field_type()), + crate::DATABASE_SELECT_FORMAT, + ); + let body = client + .execute_with_body(select_sql) + .await + .expect("Failed to select field row"); + let actual_row: serde_json::Value = serde_json::from_str(&body) + .expect("Failed to parse field row JSON"); + println!("{actual_row:?}"); + println!("{inserted_row:?}"); + assert_eq!( + actual_row, inserted_row, + "Actual and expected field rows do not match" + ); + db.cleanup().await.expect("Failed to cleanup ClickHouse server"); + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_recall_measurement_bool() { + let datum = Datum::Bool(true); + let as_json = serde_json::Value::from(1_u64); + test_recall_measurement_impl::(datum, None, as_json).await; + } + + #[tokio::test] + async fn test_recall_measurement_i8() { + let datum = Datum::I8(1); + let as_json = serde_json::Value::from(1_i8); + test_recall_measurement_impl::(datum, None, as_json).await; + } + + #[tokio::test] + async fn test_recall_measurement_u8() { + let datum = Datum::U8(1); + let as_json = serde_json::Value::from(1_u8); + test_recall_measurement_impl::(datum, None, as_json).await; + } + + #[tokio::test] + async fn test_recall_measurement_i16() { + let datum = Datum::I16(1); + let as_json = serde_json::Value::from(1_i16); + test_recall_measurement_impl::(datum, None, as_json).await; + } + + #[tokio::test] + async fn test_recall_measurement_u16() { + let datum = Datum::U16(1); + let as_json = serde_json::Value::from(1_u16); + test_recall_measurement_impl::(datum, None, as_json).await; + } + + #[tokio::test] + async fn test_recall_measurement_i32() { + let datum = Datum::I32(1); + let as_json = serde_json::Value::from(1_i32); + test_recall_measurement_impl::(datum, None, as_json).await; + } + + #[tokio::test] + async fn test_recall_measurement_u32() { + let datum = Datum::U32(1); + let as_json = serde_json::Value::from(1_u32); + test_recall_measurement_impl::(datum, None, as_json).await; + } + + #[tokio::test] + async fn test_recall_measurement_i64() { + let datum = Datum::I64(1); + let as_json = serde_json::Value::from(1_i64); + test_recall_measurement_impl::(datum, None, as_json).await; + } + + #[tokio::test] + async fn test_recall_measurement_u64() { + let datum = Datum::U64(1); + let as_json = serde_json::Value::from(1_u64); + test_recall_measurement_impl::(datum, None, as_json).await; + } + + #[tokio::test] + async fn test_recall_measurement_f32() { + const VALUE: f32 = 1.1; + let datum = Datum::F32(VALUE); + // NOTE: This is intentionally an f64. + let as_json = serde_json::Value::from(1.1_f64); + test_recall_measurement_impl::(datum, None, as_json).await; + } + + #[tokio::test] + async fn test_recall_measurement_f64() { + const VALUE: f64 = 1.1; + let datum = Datum::F64(VALUE); + let as_json = serde_json::Value::from(VALUE); + test_recall_measurement_impl::(datum, None, as_json).await; + } + + #[tokio::test] + async fn test_recall_measurement_cumulative_i64() { + let datum = Datum::CumulativeI64(1.into()); + let as_json = serde_json::Value::from(1_i64); + test_recall_measurement_impl::(datum, None, as_json).await; + } + + #[tokio::test] + async fn test_recall_measurement_cumulative_u64() { + let datum = Datum::CumulativeU64(1.into()); + let as_json = serde_json::Value::from(1_u64); + test_recall_measurement_impl::(datum, None, as_json).await; + } + + #[tokio::test] + async fn test_recall_measurement_cumulative_f64() { + let datum = Datum::CumulativeF64(1.1.into()); + let as_json = serde_json::Value::from(1.1_f64); + test_recall_measurement_impl::(datum, None, as_json).await; + } + + async fn histogram_test_impl(hist: Histogram) + where + T: oximeter::histogram::HistogramSupport, + Datum: From>, + serde_json::Value: From, + { + let (bins, counts) = hist.to_arrays(); + let datum = Datum::from(hist); + let as_json = serde_json::Value::Array( + counts.into_iter().map(Into::into).collect(), + ); + test_recall_measurement_impl(datum, Some(bins), as_json).await; + } + + #[tokio::test] + async fn test_recall_measurement_histogram_i8() { + let hist = Histogram::new(&[0i8, 1, 2]).unwrap(); + histogram_test_impl(hist).await; + } + + #[tokio::test] + async fn test_recall_measurement_histogram_u8() { + let hist = Histogram::new(&[0u8, 1, 2]).unwrap(); + histogram_test_impl(hist).await; + } + + #[tokio::test] + async fn test_recall_measurement_histogram_i16() { + let hist = Histogram::new(&[0i16, 1, 2]).unwrap(); + histogram_test_impl(hist).await; + } + + #[tokio::test] + async fn test_recall_measurement_histogram_u16() { + let hist = Histogram::new(&[0u16, 1, 2]).unwrap(); + histogram_test_impl(hist).await; + } + + #[tokio::test] + async fn test_recall_measurement_histogram_i32() { + let hist = Histogram::new(&[0i32, 1, 2]).unwrap(); + histogram_test_impl(hist).await; + } + + #[tokio::test] + async fn test_recall_measurement_histogram_u32() { + let hist = Histogram::new(&[0u32, 1, 2]).unwrap(); + histogram_test_impl(hist).await; + } + + #[tokio::test] + async fn test_recall_measurement_histogram_i64() { + let hist = Histogram::new(&[0i64, 1, 2]).unwrap(); + histogram_test_impl(hist).await; + } + + #[tokio::test] + async fn test_recall_measurement_histogram_u64() { + let hist = Histogram::new(&[0u64, 1, 2]).unwrap(); + histogram_test_impl(hist).await; + } + + // NOTE: This test is ignored intentionally. + // + // We're using the JSONEachRow format to return data, which loses precision + // for floating point values. This means we return the _double_ 0.1 from + // the database as a `Value::Number`, which fails to compare equal to the + // `Value::Number(0.1f32 as f64)` we sent in. That's because 0.1 is not + // exactly representable in an `f32`, but it's close enough that ClickHouse + // prints `0.1` in the result, which converts to a slightly different `f64` + // than `0.1_f32 as f64` does. + // + // See https://github.com/oxidecomputer/omicron/issues/4059 for related + // discussion. + #[tokio::test] + #[ignore] + async fn test_recall_measurement_histogram_f32() { + let hist = Histogram::new(&[0.1f32, 0.2, 0.3]).unwrap(); + histogram_test_impl(hist).await; + } + + #[tokio::test] + async fn test_recall_measurement_histogram_f64() { + let hist = Histogram::new(&[0.1f64, 0.2, 0.3]).unwrap(); + histogram_test_impl(hist).await; + } + + async fn test_recall_measurement_impl + Copy>( + datum: Datum, + maybe_bins: Option>, + json_datum: serde_json::Value, + ) { + let logctx = test_setup_log( + format!("test_recall_measurement_{}", datum.datum_type()).as_str(), + ); + let log = &logctx.log; + + // Let the OS assign a port and discover it after ClickHouse starts + let mut db = ClickHouseInstance::new_single_node(0) + .await + .expect("Failed to start ClickHouse"); + let address = SocketAddr::new("::1".parse().unwrap(), db.port()); + + let client = Client::new(address, log); + client + .init_single_node_db() + .await + .expect("Failed to initialize timeseries database"); + + // Insert a record from this datum. + const TIMESERIES_NAME: &str = "foo:bar"; + const TIMESERIES_KEY: u64 = 101; + let mut inserted_row = serde_json::Map::new(); + inserted_row + .insert("timeseries_name".to_string(), TIMESERIES_NAME.into()); + inserted_row + .insert("timeseries_key".to_string(), TIMESERIES_KEY.into()); + inserted_row.insert( + "timestamp".to_string(), + Utc::now() + .format(crate::DATABASE_TIMESTAMP_FORMAT) + .to_string() + .into(), + ); + + // Insert the start time and possibly bins. + if let Some(start_time) = datum.start_time() { + inserted_row.insert( + "start_time".to_string(), + start_time + .format(crate::DATABASE_TIMESTAMP_FORMAT) + .to_string() + .into(), + ); + } + if let Some(bins) = &maybe_bins { + let bins = serde_json::Value::Array( + bins.iter().copied().map(Into::into).collect(), + ); + inserted_row.insert("bins".to_string(), bins); + inserted_row.insert("counts".to_string(), json_datum); + } else { + inserted_row.insert("datum".to_string(), json_datum); + } + let inserted_row = serde_json::Value::from(inserted_row); + + let measurement_table = measurement_table_name(datum.datum_type()); + let row = serde_json::to_string(&inserted_row).unwrap(); + let insert_sql = format!( + "INSERT INTO oximeter.{measurement_table} FORMAT JSONEachRow {row}", + ); + client + .execute(insert_sql) + .await + .expect("Failed to insert measurement row"); + + // Select it exactly back out. + let select_sql = format!( + "SELECT * FROM oximeter.{} LIMIT 1 FORMAT {};", + measurement_table, + crate::DATABASE_SELECT_FORMAT, + ); + let body = client + .execute_with_body(select_sql) + .await + .expect("Failed to select measurement row"); + let actual_row: serde_json::Value = serde_json::from_str(&body) + .expect("Failed to parse measurement row JSON"); + println!("{actual_row:?}"); + println!("{inserted_row:?}"); + assert_eq!( + actual_row, inserted_row, + "Actual and expected measurement rows do not match" + ); + db.cleanup().await.expect("Failed to cleanup ClickHouse server"); + logctx.cleanup_successful(); + } + #[tokio::test] async fn test_schema_mismatch() { let log = slog::Logger::root(slog::Discard, o!()); diff --git a/oximeter/db/src/db-replicated-init.sql b/oximeter/db/src/db-replicated-init.sql index ef3a1f6f77..7b92d967af 100644 --- a/oximeter/db/src/db-replicated-init.sql +++ b/oximeter/db/src/db-replicated-init.sql @@ -20,6 +20,126 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_bool ON CLUSTER oximeter_cluste ) ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_bool_local', xxHash64(splitByChar(':', timeseries_name)[1])); -- +CREATE TABLE IF NOT EXISTS oximeter.measurements_i8_local ON CLUSTER oximeter_cluster +( + timeseries_name String, + timeseries_key UInt64, + timestamp DateTime64(9, 'UTC'), + datum Int8 +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/measurements_i8_local', '{replica}') +ORDER BY (timeseries_name, timeseries_key, timestamp) +TTL toDateTime(timestamp) + INTERVAL 30 DAY; +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_i8 ON CLUSTER oximeter_cluster +( + timeseries_name String, + timeseries_key UInt64, + timestamp DateTime64(9, 'UTC'), + datum Int8 +) +ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_i8_local', xxHash64(splitByChar(':', timeseries_name)[1])); +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_u8_local ON CLUSTER oximeter_cluster +( + timeseries_name String, + timeseries_key UInt64, + timestamp DateTime64(9, 'UTC'), + datum UInt8 +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/measurements_u8_local', '{replica}') +ORDER BY (timeseries_name, timeseries_key, timestamp) +TTL toDateTime(timestamp) + INTERVAL 30 DAY; +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_u8 ON CLUSTER oximeter_cluster +( + timeseries_name String, + timeseries_key UInt64, + timestamp DateTime64(9, 'UTC'), + datum UInt8 +) +ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_u8_local', xxHash64(splitByChar(':', timeseries_name)[1])); +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_i16_local ON CLUSTER oximeter_cluster +( + timeseries_name String, + timeseries_key UInt64, + timestamp DateTime64(9, 'UTC'), + datum Int16 +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/measurements_i16_local', '{replica}') +ORDER BY (timeseries_name, timeseries_key, timestamp) +TTL toDateTime(timestamp) + INTERVAL 30 DAY; +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_i16 ON CLUSTER oximeter_cluster +( + timeseries_name String, + timeseries_key UInt64, + timestamp DateTime64(9, 'UTC'), + datum Int16 +) +ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_i16_local', xxHash64(splitByChar(':', timeseries_name)[1])); +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_u16_local ON CLUSTER oximeter_cluster +( + timeseries_name String, + timeseries_key UInt64, + timestamp DateTime64(9, 'UTC'), + datum UInt16 +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/measurements_u16_local', '{replica}') +ORDER BY (timeseries_name, timeseries_key, timestamp) +TTL toDateTime(timestamp) + INTERVAL 30 DAY; +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_u16 ON CLUSTER oximeter_cluster +( + timeseries_name String, + timeseries_key UInt64, + timestamp DateTime64(9, 'UTC'), + datum UInt16 +) +ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_u16_local', xxHash64(splitByChar(':', timeseries_name)[1])); +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_i32_local ON CLUSTER oximeter_cluster +( + timeseries_name String, + timeseries_key UInt64, + timestamp DateTime64(9, 'UTC'), + datum Int32 +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/measurements_i32_local', '{replica}') +ORDER BY (timeseries_name, timeseries_key, timestamp) +TTL toDateTime(timestamp) + INTERVAL 30 DAY; +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_i32 ON CLUSTER oximeter_cluster +( + timeseries_name String, + timeseries_key UInt64, + timestamp DateTime64(9, 'UTC'), + datum Int32 +) +ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_i32_local', xxHash64(splitByChar(':', timeseries_name)[1])); +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_u32_local ON CLUSTER oximeter_cluster +( + timeseries_name String, + timeseries_key UInt64, + timestamp DateTime64(9, 'UTC'), + datum UInt32 +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/measurements_u32_local', '{replica}') +ORDER BY (timeseries_name, timeseries_key, timestamp) +TTL toDateTime(timestamp) + INTERVAL 30 DAY; +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_u32 ON CLUSTER oximeter_cluster +( + timeseries_name String, + timeseries_key UInt64, + timestamp DateTime64(9, 'UTC'), + datum UInt32 +) +ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_u32_local', xxHash64(splitByChar(':', timeseries_name)[1])); +-- CREATE TABLE IF NOT EXISTS oximeter.measurements_i64_local ON CLUSTER oximeter_cluster ( timeseries_name String, @@ -40,6 +160,26 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_i64 ON CLUSTER oximeter_cluster ) ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_i64_local', xxHash64(splitByChar(':', timeseries_name)[1])); -- +CREATE TABLE IF NOT EXISTS oximeter.measurements_u64_local ON CLUSTER oximeter_cluster +( + timeseries_name String, + timeseries_key UInt64, + timestamp DateTime64(9, 'UTC'), + datum UInt64 +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/measurements_u64_local', '{replica}') +ORDER BY (timeseries_name, timeseries_key, timestamp) +TTL toDateTime(timestamp) + INTERVAL 30 DAY; +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_u64 ON CLUSTER oximeter_cluster +( + timeseries_name String, + timeseries_key UInt64, + timestamp DateTime64(9, 'UTC'), + datum UInt64 +) +ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_u64_local', xxHash64(splitByChar(':', timeseries_name)[1])); +-- CREATE TABLE IF NOT EXISTS oximeter.measurements_f64_local ON CLUSTER oximeter_cluster ( timeseries_name String, @@ -122,6 +262,50 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_cumulativei64 ON CLUSTER oximet ) ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_cumulativei64_local', xxHash64(splitByChar(':', timeseries_name)[1])); -- +CREATE TABLE IF NOT EXISTS oximeter.measurements_cumulativeu64_local ON CLUSTER oximeter_cluster +( + timeseries_name String, + timeseries_key UInt64, + start_time DateTime64(9, 'UTC'), + timestamp DateTime64(9, 'UTC'), + datum UInt64 +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/measurements_cumulativeu64_local', '{replica}') +ORDER BY (timeseries_name, timeseries_key, start_time, timestamp) +TTL toDateTime(timestamp) + INTERVAL 30 DAY; +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_cumulativeu64 ON CLUSTER oximeter_cluster +( + timeseries_name String, + timeseries_key UInt64, + start_time DateTime64(9, 'UTC'), + timestamp DateTime64(9, 'UTC'), + datum UInt64 +) +ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_cumulativeu64_local', xxHash64(splitByChar(':', timeseries_name)[1])); +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_cumulativef32_local ON CLUSTER oximeter_cluster +( + timeseries_name String, + timeseries_key UInt64, + start_time DateTime64(9, 'UTC'), + timestamp DateTime64(9, 'UTC'), + datum Float32 +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/measurements_cumulativef32_local', '{replica}') +ORDER BY (timeseries_name, timeseries_key, start_time, timestamp) +TTL toDateTime(timestamp) + INTERVAL 30 DAY; +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_cumulativef32 ON CLUSTER oximeter_cluster +( + timeseries_name String, + timeseries_key UInt64, + start_time DateTime64(9, 'UTC'), + timestamp DateTime64(9, 'UTC'), + datum Float32 +) +ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_cumulativef32_local', xxHash64(splitByChar(':', timeseries_name)[1])); +-- CREATE TABLE IF NOT EXISTS oximeter.measurements_cumulativef64_local ON CLUSTER oximeter_cluster ( timeseries_name String, @@ -144,6 +328,150 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_cumulativef64 ON CLUSTER oximet ) ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_cumulativef64_local', xxHash64(splitByChar(':', timeseries_name)[1])); -- +CREATE TABLE IF NOT EXISTS oximeter.measurements_histogrami8_local ON CLUSTER oximeter_cluster +( + timeseries_name String, + timeseries_key UInt64, + start_time DateTime64(9, 'UTC'), + timestamp DateTime64(9, 'UTC'), + bins Array(Int8), + counts Array(UInt64) +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/measurements_histogrami8_local', '{replica}') +ORDER BY (timeseries_name, timeseries_key, start_time, timestamp) +TTL toDateTime(timestamp) + INTERVAL 30 DAY; +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_histogrami8 ON CLUSTER oximeter_cluster +( + timeseries_name String, + timeseries_key UInt64, + start_time DateTime64(9, 'UTC'), + timestamp DateTime64(9, 'UTC'), + bins Array(Int8), + counts Array(UInt64) +) +ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_histogrami8_local', xxHash64(splitByChar(':', timeseries_name)[1])); +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_histogramu8_local ON CLUSTER oximeter_cluster +( + timeseries_name String, + timeseries_key UInt64, + start_time DateTime64(9, 'UTC'), + timestamp DateTime64(9, 'UTC'), + bins Array(UInt8), + counts Array(UInt64) +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/measurements_histogramu8_local', '{replica}') +ORDER BY (timeseries_name, timeseries_key, start_time, timestamp) +TTL toDateTime(timestamp) + INTERVAL 30 DAY; +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_histogramu8 ON CLUSTER oximeter_cluster +( + timeseries_name String, + timeseries_key UInt64, + start_time DateTime64(9, 'UTC'), + timestamp DateTime64(9, 'UTC'), + bins Array(UInt8), + counts Array(UInt64) +) +ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_histogramu8_local', xxHash64(splitByChar(':', timeseries_name)[1])); +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_histogrami16_local ON CLUSTER oximeter_cluster +( + timeseries_name String, + timeseries_key UInt64, + start_time DateTime64(9, 'UTC'), + timestamp DateTime64(9, 'UTC'), + bins Array(Int16), + counts Array(UInt64) +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/measurements_histogrami16_local', '{replica}') +ORDER BY (timeseries_name, timeseries_key, start_time, timestamp) +TTL toDateTime(timestamp) + INTERVAL 30 DAY; +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_histogrami16 ON CLUSTER oximeter_cluster +( + timeseries_name String, + timeseries_key UInt64, + start_time DateTime64(9, 'UTC'), + timestamp DateTime64(9, 'UTC'), + bins Array(Int16), + counts Array(UInt64) +) +ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_histogrami16_local', xxHash64(splitByChar(':', timeseries_name)[1])); +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_histogramu16_local ON CLUSTER oximeter_cluster +( + timeseries_name String, + timeseries_key UInt64, + start_time DateTime64(9, 'UTC'), + timestamp DateTime64(9, 'UTC'), + bins Array(UInt16), + counts Array(UInt64) +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/measurements_histogramu16_local', '{replica}') +ORDER BY (timeseries_name, timeseries_key, start_time, timestamp) +TTL toDateTime(timestamp) + INTERVAL 30 DAY; +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_histogramu16 ON CLUSTER oximeter_cluster +( + timeseries_name String, + timeseries_key UInt64, + start_time DateTime64(9, 'UTC'), + timestamp DateTime64(9, 'UTC'), + bins Array(UInt16), + counts Array(UInt64) +) +ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_histogramu16_local', xxHash64(splitByChar(':', timeseries_name)[1])); +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_histogrami32_local ON CLUSTER oximeter_cluster +( + timeseries_name String, + timeseries_key UInt64, + start_time DateTime64(9, 'UTC'), + timestamp DateTime64(9, 'UTC'), + bins Array(Int32), + counts Array(UInt64) +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/measurements_histogrami32_local', '{replica}') +ORDER BY (timeseries_name, timeseries_key, start_time, timestamp) +TTL toDateTime(timestamp) + INTERVAL 30 DAY; +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_histogrami32 ON CLUSTER oximeter_cluster +( + timeseries_name String, + timeseries_key UInt64, + start_time DateTime64(9, 'UTC'), + timestamp DateTime64(9, 'UTC'), + bins Array(Int32), + counts Array(UInt64) +) +ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_histogrami32_local', xxHash64(splitByChar(':', timeseries_name)[1])); +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_histogramu32_local ON CLUSTER oximeter_cluster +( + timeseries_name String, + timeseries_key UInt64, + start_time DateTime64(9, 'UTC'), + timestamp DateTime64(9, 'UTC'), + bins Array(UInt32), + counts Array(UInt64) +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/measurements_histogramu32_local', '{replica}') +ORDER BY (timeseries_name, timeseries_key, start_time, timestamp) +TTL toDateTime(timestamp) + INTERVAL 30 DAY; +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_histogramu32 ON CLUSTER oximeter_cluster +( + timeseries_name String, + timeseries_key UInt64, + start_time DateTime64(9, 'UTC'), + timestamp DateTime64(9, 'UTC'), + bins Array(UInt32), + counts Array(UInt64) +) +ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_histogramu32_local', xxHash64(splitByChar(':', timeseries_name)[1])); +-- CREATE TABLE IF NOT EXISTS oximeter.measurements_histogrami64_local ON CLUSTER oximeter_cluster ( timeseries_name String, @@ -168,6 +496,54 @@ CREATE TABLE IF NOT EXISTS oximeter.measurements_histogrami64 ON CLUSTER oximete ) ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_histogrami64_local', xxHash64(splitByChar(':', timeseries_name)[1])); -- +CREATE TABLE IF NOT EXISTS oximeter.measurements_histogramu64_local ON CLUSTER oximeter_cluster +( + timeseries_name String, + timeseries_key UInt64, + start_time DateTime64(9, 'UTC'), + timestamp DateTime64(9, 'UTC'), + bins Array(UInt64), + counts Array(UInt64) +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/measurements_histogramu64_local', '{replica}') +ORDER BY (timeseries_name, timeseries_key, start_time, timestamp) +TTL toDateTime(timestamp) + INTERVAL 30 DAY; +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_histogramu64 ON CLUSTER oximeter_cluster +( + timeseries_name String, + timeseries_key UInt64, + start_time DateTime64(9, 'UTC'), + timestamp DateTime64(9, 'UTC'), + bins Array(UInt64), + counts Array(UInt64) +) +ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_histogramu64_local', xxHash64(splitByChar(':', timeseries_name)[1])); +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_histogramf32_local ON CLUSTER oximeter_cluster +( + timeseries_name String, + timeseries_key UInt64, + start_time DateTime64(9, 'UTC'), + timestamp DateTime64(9, 'UTC'), + bins Array(Float32), + counts Array(UInt64) +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/measurements_histogramf32_local', '{replica}') +ORDER BY (timeseries_name, timeseries_key, start_time, timestamp) +TTL toDateTime(timestamp) + INTERVAL 30 DAY; +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_histogramf32 ON CLUSTER oximeter_cluster +( + timeseries_name String, + timeseries_key UInt64, + start_time DateTime64(9, 'UTC'), + timestamp DateTime64(9, 'UTC'), + bins Array(Float32), + counts Array(UInt64) +) +ENGINE = Distributed('oximeter_cluster', 'oximeter', 'measurements_histogramf32_local', xxHash64(splitByChar(':', timeseries_name)[1])); +-- CREATE TABLE IF NOT EXISTS oximeter.measurements_histogramf64_local ON CLUSTER oximeter_cluster ( timeseries_name String, diff --git a/oximeter/db/src/db-single-node-init.sql b/oximeter/db/src/db-single-node-init.sql index 6872b3ddee..5f805f5725 100644 --- a/oximeter/db/src/db-single-node-init.sql +++ b/oximeter/db/src/db-single-node-init.sql @@ -11,6 +11,72 @@ ENGINE = MergeTree() ORDER BY (timeseries_name, timeseries_key, timestamp) TTL toDateTime(timestamp) + INTERVAL 30 DAY; -- +CREATE TABLE IF NOT EXISTS oximeter.measurements_i8 +( + timeseries_name String, + timeseries_key UInt64, + timestamp DateTime64(9, 'UTC'), + datum Int8 +) +ENGINE = MergeTree() +ORDER BY (timeseries_name, timeseries_key, timestamp) +TTL toDateTime(timestamp) + INTERVAL 30 DAY; +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_u8 +( + timeseries_name String, + timeseries_key UInt64, + timestamp DateTime64(9, 'UTC'), + datum UInt8 +) +ENGINE = MergeTree() +ORDER BY (timeseries_name, timeseries_key, timestamp) +TTL toDateTime(timestamp) + INTERVAL 30 DAY; +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_i16 +( + timeseries_name String, + timeseries_key UInt64, + timestamp DateTime64(9, 'UTC'), + datum Int16 +) +ENGINE = MergeTree() +ORDER BY (timeseries_name, timeseries_key, timestamp) +TTL toDateTime(timestamp) + INTERVAL 30 DAY; +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_u16 +( + timeseries_name String, + timeseries_key UInt64, + timestamp DateTime64(9, 'UTC'), + datum UInt16 +) +ENGINE = MergeTree() +ORDER BY (timeseries_name, timeseries_key, timestamp) +TTL toDateTime(timestamp) + INTERVAL 30 DAY; +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_i32 +( + timeseries_name String, + timeseries_key UInt64, + timestamp DateTime64(9, 'UTC'), + datum Int32 +) +ENGINE = MergeTree() +ORDER BY (timeseries_name, timeseries_key, timestamp) +TTL toDateTime(timestamp) + INTERVAL 30 DAY; +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_u32 +( + timeseries_name String, + timeseries_key UInt64, + timestamp DateTime64(9, 'UTC'), + datum UInt32 +) +ENGINE = MergeTree() +ORDER BY (timeseries_name, timeseries_key, timestamp) +TTL toDateTime(timestamp) + INTERVAL 30 DAY; +-- CREATE TABLE IF NOT EXISTS oximeter.measurements_i64 ( timeseries_name String, @@ -22,6 +88,28 @@ ENGINE = MergeTree() ORDER BY (timeseries_name, timeseries_key, timestamp) TTL toDateTime(timestamp) + INTERVAL 30 DAY; -- +CREATE TABLE IF NOT EXISTS oximeter.measurements_u64 +( + timeseries_name String, + timeseries_key UInt64, + timestamp DateTime64(9, 'UTC'), + datum UInt64 +) +ENGINE = MergeTree() +ORDER BY (timeseries_name, timeseries_key, timestamp) +TTL toDateTime(timestamp) + INTERVAL 30 DAY; +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_f32 +( + timeseries_name String, + timeseries_key UInt64, + timestamp DateTime64(9, 'UTC'), + datum Float32 +) +ENGINE = MergeTree() +ORDER BY (timeseries_name, timeseries_key, timestamp) +TTL toDateTime(timestamp) + INTERVAL 30 DAY; +-- CREATE TABLE IF NOT EXISTS oximeter.measurements_f64 ( timeseries_name String, @@ -67,6 +155,31 @@ ENGINE = MergeTree() ORDER BY (timeseries_name, timeseries_key, start_time, timestamp) TTL toDateTime(timestamp) + INTERVAL 30 DAY; -- +CREATE TABLE IF NOT EXISTS oximeter.measurements_cumulativeu64 +( + timeseries_name String, + timeseries_key UInt64, + start_time DateTime64(9, 'UTC'), + timestamp DateTime64(9, 'UTC'), + datum UInt64 +) +ENGINE = MergeTree() +ORDER BY (timeseries_name, timeseries_key, start_time, timestamp) +TTL toDateTime(timestamp) + INTERVAL 30 DAY; +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_cumulativef32 +( + timeseries_name String, + timeseries_key UInt64, + start_time DateTime64(9, 'UTC'), + timestamp DateTime64(9, 'UTC'), + datum Float32 +) +ENGINE = MergeTree() +ORDER BY (timeseries_name, timeseries_key, start_time, timestamp) +TTL toDateTime(timestamp) + INTERVAL 30 DAY; +-- +-- CREATE TABLE IF NOT EXISTS oximeter.measurements_cumulativef64 ( timeseries_name String, @@ -79,6 +192,84 @@ ENGINE = MergeTree() ORDER BY (timeseries_name, timeseries_key, start_time, timestamp) TTL toDateTime(timestamp) + INTERVAL 30 DAY; -- +CREATE TABLE IF NOT EXISTS oximeter.measurements_histogrami8 +( + timeseries_name String, + timeseries_key UInt64, + start_time DateTime64(9, 'UTC'), + timestamp DateTime64(9, 'UTC'), + bins Array(Int8), + counts Array(UInt64) +) +ENGINE = MergeTree() +ORDER BY (timeseries_name, timeseries_key, start_time, timestamp) +TTL toDateTime(timestamp) + INTERVAL 30 DAY; +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_histogramu8 +( + timeseries_name String, + timeseries_key UInt64, + start_time DateTime64(9, 'UTC'), + timestamp DateTime64(9, 'UTC'), + bins Array(UInt8), + counts Array(UInt64) +) +ENGINE = MergeTree() +ORDER BY (timeseries_name, timeseries_key, start_time, timestamp) +TTL toDateTime(timestamp) + INTERVAL 30 DAY; +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_histogrami16 +( + timeseries_name String, + timeseries_key UInt64, + start_time DateTime64(9, 'UTC'), + timestamp DateTime64(9, 'UTC'), + bins Array(Int16), + counts Array(UInt64) +) +ENGINE = MergeTree() +ORDER BY (timeseries_name, timeseries_key, start_time, timestamp) +TTL toDateTime(timestamp) + INTERVAL 30 DAY; +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_histogramu16 +( + timeseries_name String, + timeseries_key UInt64, + start_time DateTime64(9, 'UTC'), + timestamp DateTime64(9, 'UTC'), + bins Array(UInt16), + counts Array(UInt64) +) +ENGINE = MergeTree() +ORDER BY (timeseries_name, timeseries_key, start_time, timestamp) +TTL toDateTime(timestamp) + INTERVAL 30 DAY; +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_histogrami32 +( + timeseries_name String, + timeseries_key UInt64, + start_time DateTime64(9, 'UTC'), + timestamp DateTime64(9, 'UTC'), + bins Array(Int32), + counts Array(UInt64) +) +ENGINE = MergeTree() +ORDER BY (timeseries_name, timeseries_key, start_time, timestamp) +TTL toDateTime(timestamp) + INTERVAL 30 DAY; +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_histogramu32 +( + timeseries_name String, + timeseries_key UInt64, + start_time DateTime64(9, 'UTC'), + timestamp DateTime64(9, 'UTC'), + bins Array(UInt32), + counts Array(UInt64) +) +ENGINE = MergeTree() +ORDER BY (timeseries_name, timeseries_key, start_time, timestamp) +TTL toDateTime(timestamp) + INTERVAL 30 DAY; +-- CREATE TABLE IF NOT EXISTS oximeter.measurements_histogrami64 ( timeseries_name String, @@ -92,6 +283,32 @@ ENGINE = MergeTree() ORDER BY (timeseries_name, timeseries_key, start_time, timestamp) TTL toDateTime(timestamp) + INTERVAL 30 DAY; -- +CREATE TABLE IF NOT EXISTS oximeter.measurements_histogramu64 +( + timeseries_name String, + timeseries_key UInt64, + start_time DateTime64(9, 'UTC'), + timestamp DateTime64(9, 'UTC'), + bins Array(UInt64), + counts Array(UInt64) +) +ENGINE = MergeTree() +ORDER BY (timeseries_name, timeseries_key, start_time, timestamp) +TTL toDateTime(timestamp) + INTERVAL 30 DAY; +-- +CREATE TABLE IF NOT EXISTS oximeter.measurements_histogramf32 +( + timeseries_name String, + timeseries_key UInt64, + start_time DateTime64(9, 'UTC'), + timestamp DateTime64(9, 'UTC'), + bins Array(Float32), + counts Array(UInt64) +) +ENGINE = MergeTree() +ORDER BY (timeseries_name, timeseries_key, start_time, timestamp) +TTL toDateTime(timestamp) + INTERVAL 30 DAY; +-- CREATE TABLE IF NOT EXISTS oximeter.measurements_histogramf64 ( timeseries_name String, @@ -115,6 +332,66 @@ CREATE TABLE IF NOT EXISTS oximeter.fields_bool ENGINE = ReplacingMergeTree() ORDER BY (timeseries_name, field_name, field_value, timeseries_key); -- +CREATE TABLE IF NOT EXISTS oximeter.fields_i8 +( + timeseries_name String, + timeseries_key UInt64, + field_name String, + field_value Int8 +) +ENGINE = ReplacingMergeTree() +ORDER BY (timeseries_name, field_name, field_value, timeseries_key); +-- +CREATE TABLE IF NOT EXISTS oximeter.fields_u8 +( + timeseries_name String, + timeseries_key UInt64, + field_name String, + field_value UInt8 +) +ENGINE = ReplacingMergeTree() +ORDER BY (timeseries_name, field_name, field_value, timeseries_key); +-- +CREATE TABLE IF NOT EXISTS oximeter.fields_i16 +( + timeseries_name String, + timeseries_key UInt64, + field_name String, + field_value Int16 +) +ENGINE = ReplacingMergeTree() +ORDER BY (timeseries_name, field_name, field_value, timeseries_key); +-- +CREATE TABLE IF NOT EXISTS oximeter.fields_u16 +( + timeseries_name String, + timeseries_key UInt64, + field_name String, + field_value UInt16 +) +ENGINE = ReplacingMergeTree() +ORDER BY (timeseries_name, field_name, field_value, timeseries_key); +-- +CREATE TABLE IF NOT EXISTS oximeter.fields_i32 +( + timeseries_name String, + timeseries_key UInt64, + field_name String, + field_value Int32 +) +ENGINE = ReplacingMergeTree() +ORDER BY (timeseries_name, field_name, field_value, timeseries_key); +-- +CREATE TABLE IF NOT EXISTS oximeter.fields_u32 +( + timeseries_name String, + timeseries_key UInt64, + field_name String, + field_value UInt32 +) +ENGINE = ReplacingMergeTree() +ORDER BY (timeseries_name, field_name, field_value, timeseries_key); +-- CREATE TABLE IF NOT EXISTS oximeter.fields_i64 ( timeseries_name String, @@ -125,6 +402,16 @@ CREATE TABLE IF NOT EXISTS oximeter.fields_i64 ENGINE = ReplacingMergeTree() ORDER BY (timeseries_name, field_name, field_value, timeseries_key); -- +CREATE TABLE IF NOT EXISTS oximeter.fields_u64 +( + timeseries_name String, + timeseries_key UInt64, + field_name String, + field_value UInt64 +) +ENGINE = ReplacingMergeTree() +ORDER BY (timeseries_name, field_name, field_value, timeseries_key); +-- CREATE TABLE IF NOT EXISTS oximeter.fields_ipaddr ( timeseries_name String, diff --git a/oximeter/db/src/model.rs b/oximeter/db/src/model.rs index f915917b27..1b3b75320f 100644 --- a/oximeter/db/src/model.rs +++ b/oximeter/db/src/model.rs @@ -3,24 +3,36 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. //! Models for timeseries data in ClickHouse -// Copyright 2022 Oxide Computer Company -use crate::{ - DbFieldSource, FieldSchema, FieldSource, Metric, Target, TimeseriesKey, - TimeseriesName, TimeseriesSchema, -}; +// Copyright 2023 Oxide Computer Company + +use crate::DbFieldSource; +use crate::FieldSchema; +use crate::FieldSource; +use crate::Metric; +use crate::Target; +use crate::TimeseriesKey; +use crate::TimeseriesName; +use crate::TimeseriesSchema; use bytes::Bytes; -use chrono::{DateTime, Utc}; +use chrono::DateTime; +use chrono::Utc; use oximeter::histogram::Histogram; use oximeter::traits; -use oximeter::types::{ - Cumulative, Datum, DatumType, Field, FieldType, FieldValue, Measurement, - Sample, -}; -use serde::{Deserialize, Serialize}; +use oximeter::types::Cumulative; +use oximeter::types::Datum; +use oximeter::types::DatumType; +use oximeter::types::Field; +use oximeter::types::FieldType; +use oximeter::types::FieldValue; +use oximeter::types::Measurement; +use oximeter::types::Sample; +use serde::Deserialize; +use serde::Serialize; use std::collections::BTreeMap; use std::convert::TryFrom; -use std::net::{IpAddr, Ipv6Addr}; +use std::net::IpAddr; +use std::net::Ipv6Addr; use uuid::Uuid; // Wrapper type to represent a boolean in the database. @@ -141,7 +153,14 @@ impl From for DbTimeseriesSchema { #[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq)] pub enum DbFieldType { String, + I8, + U8, + I16, + U16, + I32, + U32, I64, + U64, IpAddr, Uuid, Bool, @@ -151,7 +170,14 @@ impl From for FieldType { fn from(src: DbFieldType) -> Self { match src { DbFieldType::String => FieldType::String, + DbFieldType::I8 => FieldType::I8, + DbFieldType::U8 => FieldType::U8, + DbFieldType::I16 => FieldType::I16, + DbFieldType::U16 => FieldType::U16, + DbFieldType::I32 => FieldType::I32, + DbFieldType::U32 => FieldType::U32, DbFieldType::I64 => FieldType::I64, + DbFieldType::U64 => FieldType::U64, DbFieldType::IpAddr => FieldType::IpAddr, DbFieldType::Uuid => FieldType::Uuid, DbFieldType::Bool => FieldType::Bool, @@ -162,7 +188,14 @@ impl From for DbFieldType { fn from(src: FieldType) -> Self { match src { FieldType::String => DbFieldType::String, + FieldType::I8 => DbFieldType::I8, + FieldType::U8 => DbFieldType::U8, + FieldType::I16 => DbFieldType::I16, + FieldType::U16 => DbFieldType::U16, + FieldType::I32 => DbFieldType::I32, + FieldType::U32 => DbFieldType::U32, FieldType::I64 => DbFieldType::I64, + FieldType::U64 => DbFieldType::U64, FieldType::IpAddr => DbFieldType::IpAddr, FieldType::Uuid => DbFieldType::Uuid, FieldType::Bool => DbFieldType::Bool, @@ -172,13 +205,31 @@ impl From for DbFieldType { #[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq)] pub enum DbDatumType { Bool, + I8, + U8, + I16, + U16, + I32, + U32, I64, + U64, + F32, F64, String, Bytes, CumulativeI64, + CumulativeU64, + CumulativeF32, CumulativeF64, + HistogramI8, + HistogramU8, + HistogramI16, + HistogramU16, + HistogramI32, + HistogramU32, HistogramI64, + HistogramU64, + HistogramF32, HistogramF64, } @@ -186,13 +237,31 @@ impl From for DbDatumType { fn from(src: DatumType) -> Self { match src { DatumType::Bool => DbDatumType::Bool, + DatumType::I8 => DbDatumType::I8, + DatumType::U8 => DbDatumType::U8, + DatumType::I16 => DbDatumType::I16, + DatumType::U16 => DbDatumType::U16, + DatumType::I32 => DbDatumType::I32, + DatumType::U32 => DbDatumType::U32, DatumType::I64 => DbDatumType::I64, + DatumType::U64 => DbDatumType::U64, + DatumType::F32 => DbDatumType::F32, DatumType::F64 => DbDatumType::F64, DatumType::String => DbDatumType::String, DatumType::Bytes => DbDatumType::Bytes, DatumType::CumulativeI64 => DbDatumType::CumulativeI64, + DatumType::CumulativeU64 => DbDatumType::CumulativeU64, + DatumType::CumulativeF32 => DbDatumType::CumulativeF32, DatumType::CumulativeF64 => DbDatumType::CumulativeF64, + DatumType::HistogramI8 => DbDatumType::HistogramI8, + DatumType::HistogramU8 => DbDatumType::HistogramU8, + DatumType::HistogramI16 => DbDatumType::HistogramI16, + DatumType::HistogramU16 => DbDatumType::HistogramU16, + DatumType::HistogramI32 => DbDatumType::HistogramI32, + DatumType::HistogramU32 => DbDatumType::HistogramU32, DatumType::HistogramI64 => DbDatumType::HistogramI64, + DatumType::HistogramU64 => DbDatumType::HistogramU64, + DatumType::HistogramF32 => DbDatumType::HistogramF32, DatumType::HistogramF64 => DbDatumType::HistogramF64, } } @@ -202,13 +271,31 @@ impl From for DatumType { fn from(src: DbDatumType) -> Self { match src { DbDatumType::Bool => DatumType::Bool, + DbDatumType::I8 => DatumType::I8, + DbDatumType::U8 => DatumType::U8, + DbDatumType::I16 => DatumType::I16, + DbDatumType::U16 => DatumType::U16, + DbDatumType::I32 => DatumType::I32, + DbDatumType::U32 => DatumType::U32, DbDatumType::I64 => DatumType::I64, + DbDatumType::U64 => DatumType::U64, + DbDatumType::F32 => DatumType::F32, DbDatumType::F64 => DatumType::F64, DbDatumType::String => DatumType::String, DbDatumType::Bytes => DatumType::Bytes, DbDatumType::CumulativeI64 => DatumType::CumulativeI64, + DbDatumType::CumulativeU64 => DatumType::CumulativeU64, + DbDatumType::CumulativeF32 => DatumType::CumulativeF32, DbDatumType::CumulativeF64 => DatumType::CumulativeF64, + DbDatumType::HistogramI8 => DatumType::HistogramI8, + DbDatumType::HistogramU8 => DatumType::HistogramU8, + DbDatumType::HistogramI16 => DatumType::HistogramI16, + DbDatumType::HistogramU16 => DatumType::HistogramU16, + DbDatumType::HistogramI32 => DatumType::HistogramI32, + DbDatumType::HistogramU32 => DatumType::HistogramU32, DbDatumType::HistogramI64 => DatumType::HistogramI64, + DbDatumType::HistogramU64 => DatumType::HistogramU64, + DbDatumType::HistogramF32 => DatumType::HistogramF32, DbDatumType::HistogramF64 => DatumType::HistogramF64, } } @@ -285,7 +372,14 @@ macro_rules! declare_field_row { } declare_field_row! {BoolFieldRow, DbBool, "bool"} +declare_field_row! {I8FieldRow, i8, "i8"} +declare_field_row! {U8FieldRow, u8, "u8"} +declare_field_row! {I16FieldRow, i16, "i16"} +declare_field_row! {U16FieldRow, u16, "u16"} +declare_field_row! {I32FieldRow, i32, "i32"} +declare_field_row! {U32FieldRow, u32, "u32"} declare_field_row! {I64FieldRow, i64, "i64"} +declare_field_row! {U64FieldRow, u64, "u64"} declare_field_row! {StringFieldRow, String, "string"} declare_field_row! {IpAddrFieldRow, Ipv6Addr, "ipaddr"} declare_field_row! {UuidFieldRow, Uuid, "uuid"} @@ -306,7 +400,15 @@ macro_rules! declare_measurement_row { } declare_measurement_row! { BoolMeasurementRow, DbBool, "bool" } +declare_measurement_row! { I8MeasurementRow, i8, "i8" } +declare_measurement_row! { U8MeasurementRow, u8, "u8" } +declare_measurement_row! { I16MeasurementRow, i16, "i16" } +declare_measurement_row! { U16MeasurementRow, u16, "u16" } +declare_measurement_row! { I32MeasurementRow, i32, "i32" } +declare_measurement_row! { U32MeasurementRow, u32, "u32" } declare_measurement_row! { I64MeasurementRow, i64, "i64" } +declare_measurement_row! { U64MeasurementRow, u64, "u64" } +declare_measurement_row! { F32MeasurementRow, f32, "f32" } declare_measurement_row! { F64MeasurementRow, f64, "f64" } declare_measurement_row! { StringMeasurementRow, String, "string" } declare_measurement_row! { BytesMeasurementRow, Bytes, "bytes" } @@ -329,6 +431,8 @@ macro_rules! declare_cumulative_measurement_row { } declare_cumulative_measurement_row! { CumulativeI64MeasurementRow, i64, "cumulativei64" } +declare_cumulative_measurement_row! { CumulativeU64MeasurementRow, u64, "cumulativeu64" } +declare_cumulative_measurement_row! { CumulativeF32MeasurementRow, f32, "cumulativef32" } declare_cumulative_measurement_row! { CumulativeF64MeasurementRow, f64, "cumulativef64" } // Representation of a histogram in ClickHouse. @@ -370,7 +474,15 @@ macro_rules! declare_histogram_measurement_row { }; } +declare_histogram_measurement_row! { HistogramI8MeasurementRow, DbHistogram, "histogrami8" } +declare_histogram_measurement_row! { HistogramU8MeasurementRow, DbHistogram, "histogramu8" } +declare_histogram_measurement_row! { HistogramI16MeasurementRow, DbHistogram, "histogrami16" } +declare_histogram_measurement_row! { HistogramU16MeasurementRow, DbHistogram, "histogramu16" } +declare_histogram_measurement_row! { HistogramI32MeasurementRow, DbHistogram, "histogrami32" } +declare_histogram_measurement_row! { HistogramU32MeasurementRow, DbHistogram, "histogramu32" } declare_histogram_measurement_row! { HistogramI64MeasurementRow, DbHistogram, "histogrami64" } +declare_histogram_measurement_row! { HistogramU64MeasurementRow, DbHistogram, "histogramu64" } +declare_histogram_measurement_row! { HistogramF32MeasurementRow, DbHistogram, "histogramf32" } declare_histogram_measurement_row! { HistogramF64MeasurementRow, DbHistogram, "histogramf64" } // Helper to collect the field rows from a sample @@ -390,6 +502,60 @@ fn unroll_from_source(sample: &Sample) -> BTreeMap> { }; (row.table_name(), serde_json::to_string(&row).unwrap()) } + FieldValue::I8(inner) => { + let row = I8FieldRow { + timeseries_name, + timeseries_key, + field_name, + field_value: *inner, + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + FieldValue::U8(inner) => { + let row = U8FieldRow { + timeseries_name, + timeseries_key, + field_name, + field_value: *inner, + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + FieldValue::I16(inner) => { + let row = I16FieldRow { + timeseries_name, + timeseries_key, + field_name, + field_value: *inner, + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + FieldValue::U16(inner) => { + let row = U16FieldRow { + timeseries_name, + timeseries_key, + field_name, + field_value: *inner, + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + FieldValue::I32(inner) => { + let row = I32FieldRow { + timeseries_name, + timeseries_key, + field_name, + field_value: *inner, + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + FieldValue::U32(inner) => { + let row = U32FieldRow { + timeseries_name, + timeseries_key, + field_name, + field_value: *inner, + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } FieldValue::I64(inner) => { let row = I64FieldRow { timeseries_name, @@ -399,6 +565,15 @@ fn unroll_from_source(sample: &Sample) -> BTreeMap> { }; (row.table_name(), serde_json::to_string(&row).unwrap()) } + FieldValue::U64(inner) => { + let row = U64FieldRow { + timeseries_name, + timeseries_key, + field_name, + field_value: *inner, + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } FieldValue::String(inner) => { let row = StringFieldRow { timeseries_name, @@ -409,14 +584,10 @@ fn unroll_from_source(sample: &Sample) -> BTreeMap> { (row.table_name(), serde_json::to_string(&row).unwrap()) } FieldValue::IpAddr(inner) => { - // TODO-completeness Be sure to map IPV6 back to IPV4 if possible when reading. - // - // We're using the IPV6 type in ClickHouse to store all addresses. This code maps - // IPV4 into IPV6 in with an invertible mapping. The inversion method - // `to_ipv4_mapped` is currently unstable, so when we get to implementing _reading_ - // of these types from the database, we can just copy that implementation. See - // https://github.com/rust-lang/rust/issues/27709 for the tracking issue for - // stabilizing that function, which looks like it'll happen in the near future. + // We're using the IPv6 type in ClickHouse to store all + // addresses. This code maps any IPv4 address to IPv6 using an + // invertible mapping. We map things back, if possible, on the + // way out of the database. let field_value = match inner { IpAddr::V4(addr) => addr.to_ipv6_mapped(), IpAddr::V6(addr) => *addr, @@ -482,6 +653,60 @@ pub(crate) fn unroll_measurement_row(sample: &Sample) -> (String, String) { }; (row.table_name(), serde_json::to_string(&row).unwrap()) } + Datum::I8(inner) => { + let row = I8MeasurementRow { + timeseries_name, + timeseries_key, + timestamp, + datum: *inner, + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + Datum::U8(inner) => { + let row = U8MeasurementRow { + timeseries_name, + timeseries_key, + timestamp, + datum: *inner, + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + Datum::I16(inner) => { + let row = I16MeasurementRow { + timeseries_name, + timeseries_key, + timestamp, + datum: *inner, + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + Datum::U16(inner) => { + let row = U16MeasurementRow { + timeseries_name, + timeseries_key, + timestamp, + datum: *inner, + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + Datum::I32(inner) => { + let row = I32MeasurementRow { + timeseries_name, + timeseries_key, + timestamp, + datum: *inner, + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + Datum::U32(inner) => { + let row = U32MeasurementRow { + timeseries_name, + timeseries_key, + timestamp, + datum: *inner, + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } Datum::I64(inner) => { let row = I64MeasurementRow { timeseries_name, @@ -491,6 +716,24 @@ pub(crate) fn unroll_measurement_row(sample: &Sample) -> (String, String) { }; (row.table_name(), serde_json::to_string(&row).unwrap()) } + Datum::U64(inner) => { + let row = U64MeasurementRow { + timeseries_name, + timeseries_key, + timestamp, + datum: *inner, + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + Datum::F32(inner) => { + let row = F32MeasurementRow { + timeseries_name, + timeseries_key, + timestamp, + datum: *inner, + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } Datum::F64(inner) => { let row = F64MeasurementRow { timeseries_name, @@ -528,6 +771,26 @@ pub(crate) fn unroll_measurement_row(sample: &Sample) -> (String, String) { }; (row.table_name(), serde_json::to_string(&row).unwrap()) } + Datum::CumulativeU64(inner) => { + let row = CumulativeU64MeasurementRow { + timeseries_name, + timeseries_key, + start_time: extract_start_time(measurement), + timestamp, + datum: inner.value(), + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + Datum::CumulativeF32(inner) => { + let row = CumulativeF32MeasurementRow { + timeseries_name, + timeseries_key, + start_time: extract_start_time(measurement), + timestamp, + datum: inner.value(), + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } Datum::CumulativeF64(inner) => { let row = CumulativeF64MeasurementRow { timeseries_name, @@ -538,6 +801,66 @@ pub(crate) fn unroll_measurement_row(sample: &Sample) -> (String, String) { }; (row.table_name(), serde_json::to_string(&row).unwrap()) } + Datum::HistogramI8(ref inner) => { + let row = HistogramI8MeasurementRow { + timeseries_name, + timeseries_key, + start_time: extract_start_time(measurement), + timestamp, + datum: DbHistogram::from(inner), + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + Datum::HistogramU8(ref inner) => { + let row = HistogramU8MeasurementRow { + timeseries_name, + timeseries_key, + start_time: extract_start_time(measurement), + timestamp, + datum: DbHistogram::from(inner), + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + Datum::HistogramI16(ref inner) => { + let row = HistogramI16MeasurementRow { + timeseries_name, + timeseries_key, + start_time: extract_start_time(measurement), + timestamp, + datum: DbHistogram::from(inner), + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + Datum::HistogramU16(ref inner) => { + let row = HistogramU16MeasurementRow { + timeseries_name, + timeseries_key, + start_time: extract_start_time(measurement), + timestamp, + datum: DbHistogram::from(inner), + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + Datum::HistogramI32(ref inner) => { + let row = HistogramI32MeasurementRow { + timeseries_name, + timeseries_key, + start_time: extract_start_time(measurement), + timestamp, + datum: DbHistogram::from(inner), + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + Datum::HistogramU32(ref inner) => { + let row = HistogramU32MeasurementRow { + timeseries_name, + timeseries_key, + start_time: extract_start_time(measurement), + timestamp, + datum: DbHistogram::from(inner), + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } Datum::HistogramI64(ref inner) => { let row = HistogramI64MeasurementRow { timeseries_name, @@ -548,6 +871,26 @@ pub(crate) fn unroll_measurement_row(sample: &Sample) -> (String, String) { }; (row.table_name(), serde_json::to_string(&row).unwrap()) } + Datum::HistogramU64(ref inner) => { + let row = HistogramU64MeasurementRow { + timeseries_name, + timeseries_key, + start_time: extract_start_time(measurement), + timestamp, + datum: DbHistogram::from(inner), + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } + Datum::HistogramF32(ref inner) => { + let row = HistogramF32MeasurementRow { + timeseries_name, + timeseries_key, + start_time: extract_start_time(measurement), + timestamp, + datum: DbHistogram::from(inner), + }; + (row.table_name(), serde_json::to_string(&row).unwrap()) + } Datum::HistogramF64(ref inner) => { let row = HistogramF64MeasurementRow { timeseries_name, @@ -743,9 +1086,29 @@ pub(crate) fn parse_measurement_from_row( DatumType::Bool => { parse_timeseries_scalar_gauge_measurement::(line) } + DatumType::I8 => parse_timeseries_scalar_gauge_measurement::(line), + DatumType::U8 => parse_timeseries_scalar_gauge_measurement::(line), + DatumType::I16 => { + parse_timeseries_scalar_gauge_measurement::(line) + } + DatumType::U16 => { + parse_timeseries_scalar_gauge_measurement::(line) + } + DatumType::I32 => { + parse_timeseries_scalar_gauge_measurement::(line) + } + DatumType::U32 => { + parse_timeseries_scalar_gauge_measurement::(line) + } DatumType::I64 => { parse_timeseries_scalar_gauge_measurement::(line) } + DatumType::U64 => { + parse_timeseries_scalar_gauge_measurement::(line) + } + DatumType::F32 => { + parse_timeseries_scalar_gauge_measurement::(line) + } DatumType::F64 => { parse_timeseries_scalar_gauge_measurement::(line) } @@ -758,12 +1121,42 @@ pub(crate) fn parse_measurement_from_row( DatumType::CumulativeI64 => { parse_timeseries_scalar_cumulative_measurement::(line) } + DatumType::CumulativeU64 => { + parse_timeseries_scalar_cumulative_measurement::(line) + } + DatumType::CumulativeF32 => { + parse_timeseries_scalar_cumulative_measurement::(line) + } DatumType::CumulativeF64 => { parse_timeseries_scalar_cumulative_measurement::(line) } + DatumType::HistogramI8 => { + parse_timeseries_histogram_measurement::(line) + } + DatumType::HistogramU8 => { + parse_timeseries_histogram_measurement::(line) + } + DatumType::HistogramI16 => { + parse_timeseries_histogram_measurement::(line) + } + DatumType::HistogramU16 => { + parse_timeseries_histogram_measurement::(line) + } + DatumType::HistogramI32 => { + parse_timeseries_histogram_measurement::(line) + } + DatumType::HistogramU32 => { + parse_timeseries_histogram_measurement::(line) + } DatumType::HistogramI64 => { parse_timeseries_histogram_measurement::(line) } + DatumType::HistogramU64 => { + parse_timeseries_histogram_measurement::(line) + } + DatumType::HistogramF32 => { + parse_timeseries_histogram_measurement::(line) + } DatumType::HistogramF64 => { parse_timeseries_histogram_measurement::(line) } @@ -823,19 +1216,87 @@ pub(crate) fn parse_field_select_row( .expect("Missing a field value from a field select query"); let value = match expected_field.ty { FieldType::Bool => { - FieldValue::Bool(bool::from(DbBool::from(actual_field_value.as_u64().expect("Expected a u64 for a boolean field from the database")))) + FieldValue::Bool(bool::from(DbBool::from( + actual_field_value + .as_u64() + .expect("Expected a u64 for a boolean field from the database") + ))) + } + FieldType::I8 => { + let wide = actual_field_value + .as_i64() + .expect("Expected an i64 from the database for an I8 field"); + let narrow = i8::try_from(wide) + .expect("Expected a valid i8 for an I8 field from the database"); + FieldValue::from(narrow) + } + FieldType::U8 => { + let wide = actual_field_value + .as_u64() + .expect("Expected a u64 from the database for a U8 field"); + let narrow = u8::try_from(wide) + .expect("Expected a valid u8 for a U8 field from the database"); + FieldValue::from(narrow) + } + FieldType::I16 => { + let wide = actual_field_value + .as_i64() + .expect("Expected an i64 from the database for an I16 field"); + let narrow = i16::try_from(wide) + .expect("Expected a valid i16 for an I16 field from the database"); + FieldValue::from(narrow) + } + FieldType::U16 => { + let wide = actual_field_value + .as_u64() + .expect("Expected a u64 from the database for a U16 field"); + let narrow = u16::try_from(wide) + .expect("Expected a valid u16 for a U16 field from the database"); + FieldValue::from(narrow) + } + FieldType::I32 => { + let wide = actual_field_value + .as_i64() + .expect("Expected an i64 from the database for an I32 field"); + let narrow = i32::try_from(wide) + .expect("Expected a valid i32 for an I32 field from the database"); + FieldValue::from(narrow) + } + FieldType::U32 => { + let wide = actual_field_value + .as_u64() + .expect("Expected a u64 from the database for a U16 field"); + let narrow = u32::try_from(wide) + .expect("Expected a valid u32 for a U32 field from the database"); + FieldValue::from(narrow) } FieldType::I64 => { - FieldValue::from(actual_field_value.as_i64().expect("Expected an i64 for an I64 field from the database")) + FieldValue::from( + actual_field_value + .as_i64() + .expect("Expected an i64 for an I64 field from the database") + ) } - FieldType::IpAddr => { - FieldValue::IpAddr( + FieldType::U64 => { + FieldValue::from( actual_field_value - .as_str() - .expect("Expected an IP address string for an IpAddr field from the database") - .parse() - .expect("Invalid IP address from the database") - ) + .as_u64() + .expect("Expected a u64 for a U64 field from the database") + ) + } + FieldType::IpAddr => { + // We store values in the database as IPv6, by mapping IPv4 into + // that space. This tries to invert the mapping. If that + // succeeds, we know we stored an IPv4 address in the table. + let always_v6: Ipv6Addr = actual_field_value + .as_str() + .expect("Expected an IP address string for an IpAddr field from the database") + .parse() + .expect("Invalid IP address from the database"); + match always_v6.to_ipv4_mapped() { + Some(v4) => FieldValue::IpAddr(IpAddr::V4(v4)), + None => FieldValue::IpAddr(IpAddr::V6(always_v6)), + } } FieldType::Uuid => { FieldValue::Uuid( @@ -1068,7 +1529,7 @@ mod tests { } let line = r#"{"timeseries_key": 12, "start_time": "2021-01-01 00:00:00.123456789", "timestamp": "2021-01-01 01:00:00.123456789", "datum": 2 }"#; - let cumulative = Cumulative::with_start_time(start_time, 2); + let cumulative = Cumulative::with_start_time(start_time, 2u64); let datum = Datum::from(cumulative); run_test(line, &datum, start_time, timestamp); diff --git a/oximeter/db/src/query.rs b/oximeter/db/src/query.rs index 847765951e..e9e1600739 100644 --- a/oximeter/db/src/query.rs +++ b/oximeter/db/src/query.rs @@ -166,10 +166,38 @@ impl SelectQueryBuilder { } let field_value = match field_schema.ty { FieldType::String => FieldValue::from(&selector.value), + FieldType::I8 => parse_selector_field_value::( + &field_schema, + &selector.value, + )?, + FieldType::U8 => parse_selector_field_value::( + &field_schema, + &selector.value, + )?, + FieldType::I16 => parse_selector_field_value::( + &field_schema, + &selector.value, + )?, + FieldType::U16 => parse_selector_field_value::( + &field_schema, + &selector.value, + )?, + FieldType::I32 => parse_selector_field_value::( + &field_schema, + &selector.value, + )?, + FieldType::U32 => parse_selector_field_value::( + &field_schema, + &selector.value, + )?, FieldType::I64 => parse_selector_field_value::( &field_schema, &selector.value, )?, + FieldType::U64 => parse_selector_field_value::( + &field_schema, + &selector.value, + )?, FieldType::IpAddr => parse_selector_field_value::( &field_schema, &selector.value, @@ -267,7 +295,7 @@ impl SelectQueryBuilder { } } -fn measurement_table_name(ty: DatumType) -> String { +pub(crate) fn measurement_table_name(ty: DatumType) -> String { format!("measurements_{}", ty.to_string().to_lowercase()) } @@ -306,7 +334,7 @@ pub struct FieldSelector { comparison: Option, } -fn field_table_name(ty: FieldType) -> String { +pub(crate) fn field_table_name(ty: FieldType) -> String { format!("fields_{}", ty.to_string().to_lowercase()) } @@ -666,7 +694,14 @@ fn field_as_db_str(value: &FieldValue) -> String { FieldValue::Bool(ref inner) => { format!("{}", if *inner { 1 } else { 0 }) } + FieldValue::I8(ref inner) => format!("{}", inner), + FieldValue::U8(ref inner) => format!("{}", inner), + FieldValue::I16(ref inner) => format!("{}", inner), + FieldValue::U16(ref inner) => format!("{}", inner), + FieldValue::I32(ref inner) => format!("{}", inner), + FieldValue::U32(ref inner) => format!("{}", inner), FieldValue::I64(ref inner) => format!("{}", inner), + FieldValue::U64(ref inner) => format!("{}", inner), FieldValue::IpAddr(ref inner) => { let addr = match inner { IpAddr::V4(ref v4) => v4.to_ipv6_mapped(), @@ -950,7 +985,7 @@ mod tests { ty: FieldType::I64, comparison: Some(FieldComparison { op: FieldCmp::Eq, - value: FieldValue::from(0), + value: FieldValue::from(0i64), }), }, "Expected an exact comparison when building a query from parts", diff --git a/oximeter/instruments/src/http.rs b/oximeter/instruments/src/http.rs index 0aa958942a..dcbaf65c06 100644 --- a/oximeter/instruments/src/http.rs +++ b/oximeter/instruments/src/http.rs @@ -71,8 +71,8 @@ impl RequestLatencyHistogram { pub fn with_latency_decades( request: &RequestInfo, status_code: StatusCode, - start_decade: i8, - end_decade: i8, + start_decade: i16, + end_decade: i16, ) -> Result { Ok(Self::new( request, @@ -124,8 +124,8 @@ impl LatencyTracker { /// arguments. pub fn with_latency_decades( service: HttpService, - start_decade: i8, - end_decade: i8, + start_decade: i16, + end_decade: i16, ) -> Result { Ok(Self::new( service, diff --git a/oximeter/oximeter/Cargo.toml b/oximeter/oximeter/Cargo.toml index 97d2fd366d..f0549548a6 100644 --- a/oximeter/oximeter/Cargo.toml +++ b/oximeter/oximeter/Cargo.toml @@ -8,7 +8,7 @@ license = "MPL-2.0" [dependencies] bytes = { workspace = true, features = [ "serde" ] } chrono.workspace = true -num-traits.workspace = true +num.workspace = true omicron-common.workspace = true oximeter-macro-impl.workspace = true schemars = { workspace = true, features = [ "uuid1", "bytes", "chrono" ] } @@ -17,4 +17,6 @@ thiserror.workspace = true uuid.workspace = true [dev-dependencies] +approx.workspace = true +rstest.workspace = true trybuild.workspace = true diff --git a/oximeter/oximeter/src/histogram.rs b/oximeter/oximeter/src/histogram.rs index 1414296ec7..c399384ffa 100644 --- a/oximeter/oximeter/src/histogram.rs +++ b/oximeter/oximeter/src/histogram.rs @@ -3,14 +3,29 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. //! Types for managing metrics that are histograms. -// Copyright 2021 Oxide Computer Company -use chrono::{DateTime, Utc}; -use num_traits::Bounded; +// Copyright 2023 Oxide Computer Company + +use chrono::DateTime; +use chrono::Utc; +use num::traits::Bounded; +use num::traits::FromPrimitive; +use num::traits::Num; +use num::traits::ToPrimitive; +use num::Float; +use num::Integer; +use num::NumCast; use schemars::JsonSchema; -use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use serde::de::DeserializeOwned; +use serde::Deserialize; +use serde::Serialize; use std::cmp::Ordering; -use std::ops::{Bound, Range, RangeBounds, RangeFrom, RangeTo}; +use std::num::NonZeroUsize; +use std::ops::Bound; +use std::ops::Range; +use std::ops::RangeBounds; +use std::ops::RangeFrom; +use std::ops::RangeTo; use thiserror::Error; /// A trait used to identify the data types that can be used as the support of a histogram. @@ -24,25 +39,47 @@ pub trait HistogramSupport: + Serialize + DeserializeOwned + Clone - + num_traits::Zero - + num_traits::One + + Num + + FromPrimitive + + ToPrimitive + + NumCast + 'static { + type Power; + /// Return true if `self` is a finite number, not NAN or infinite. fn is_finite(&self) -> bool; } -impl HistogramSupport for i64 { - fn is_finite(&self) -> bool { - true +macro_rules! impl_int_histogram_support { + ($($type:ty),+) => { + $( + impl HistogramSupport for $type { + type Power = u16; + fn is_finite(&self) -> bool { + true + } + } + )+ } } -impl HistogramSupport for f64 { - fn is_finite(&self) -> bool { - f64::is_finite(*self) +impl_int_histogram_support! { i8, u8, i16, u16, i32, u32, i64, u64 } + +macro_rules! impl_float_histogram_support { + ($($type:ty),+) => { + $( + impl HistogramSupport for $type { + type Power = i16; + fn is_finite(&self) -> bool { + <$type>::is_finite(*self) + } + } + )+ } } +impl_float_histogram_support! { f32, f64 } + /// Errors related to constructing histograms or adding samples into them. #[derive(Debug, Clone, Error, JsonSchema, Serialize, Deserialize)] #[serde(tag = "type", content = "content", rename_all = "snake_case")] @@ -66,6 +103,37 @@ pub enum HistogramError { /// Bin and count arrays are of different sizes. #[error("Bin and count arrays must have the same size, found {n_bins} and {n_counts}")] ArraySizeMismatch { n_bins: usize, n_counts: usize }, + + #[error("Quantization error")] + Quantization(#[from] QuantizationError), +} + +/// Errors occurring during quantizated bin generation. +#[derive( + Clone, Debug, Deserialize, JsonSchema, Serialize, thiserror::Error, +)] +#[serde(tag = "type", content = "content", rename_all = "snake_case")] +pub enum QuantizationError { + #[error("Overflow during bin generation")] + Overflow, + + #[error("Precision error during bin generation")] + Precision, + + #[error("Base must in the range [1, 32]")] + InvalidBase, + + #[error("Number of steps must be > 1 and fit in the output type")] + InvalidSteps, + + #[error( + "Number of steps must be multiple of base and \ + evenly divide a power of the base" + )] + UnevenStepsForBase, + + #[error("Low power must be strictly less than high power")] + PowersOutOfOrder, } /// A type storing a range over `T`. @@ -86,6 +154,19 @@ pub enum BinRange { RangeFrom { start: T }, } +impl std::fmt::Display for BinRange +where + T: std::fmt::Display, +{ + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + BinRange::RangeTo { end } => write!(f, "< {end}"), + BinRange::Range { start, end } => write!(f, "[{start}, {end})"), + BinRange::RangeFrom { start } => write!(f, ">= {start}"), + } + } +} + impl BinRange where T: HistogramSupport, @@ -236,6 +317,20 @@ pub struct Bin { // `BinRange::RangeTo` into a `BinRange::Range`. In other words, the first bin of a histogram is // _always_ a `Bin::Range` or a `Bin::RangeFrom` after construction. In fact, every bin is one of // those variants, the `BinRange::RangeTo` is only provided as a convenience during construction. +// +// Floating point support +// ---------------------- +// +// This type allows both integer and floating-point types as the support of the +// distribution. However, developers should be very aware of the difficulties +// around floating point comparisons. It's notoriously hard to understand, +// predict, and control floating point comparisons. Resolution changes with the +// magnitude of the values; cancellation can creep in in unexpected ways; and +// arithmetic operations often lead to rounding errors. In general, one should +// strongly prefer using an integer type for the histogram support, along with a +// well-understood unit / resolution. Developers are also encouraged to +// carefully check that the bins generated from methods like +// `Histogram::with_log_linear_bins()` are exactly the ones expected. #[derive(Debug, Clone, PartialEq, Deserialize, Serialize, JsonSchema)] #[schemars(rename = "Histogram{T}")] pub struct Histogram { @@ -468,96 +563,298 @@ where pub fn start_time(&self) -> DateTime { self.start_time } +} - /// Generate a histogram with bins linearly spaced within each decade in the range - /// `[start_decade, stop_decade)`. +impl Histogram +where + T: HistogramSupport, + u16: LogLinearBins, +{ + /// Generate a histogram with 9 linearly-spaced bins, per power of 10. + /// + /// This generates a "log-linear" histogram. Within each power of 10, the + /// bins of the histogram are linearly spaced. Note that additional bins on + /// the left will be added, as described in [`Histogram::new()`]. + /// + /// Notes + /// ----- /// - /// This generates a "log-linear" histogram. Within each power of 10, the bins of the histogram - /// are linearly spaced. Note that any additional bins on the left will be added, as described - /// in `[Histogram::new]`. Also, an extra bin will be added from `[0, x)`, where `x == 10 ** - /// start_decade` -- in other words, this will add the first bin from zero to the start of the - /// decades specified. + /// Why 9 bins? Most users intuitively want the bins in each power of ten to + /// have a specific width: the power of 10 itself. For example, consider the + /// bins between 10 and 100. It is often desirable to have bins placed at 10, + /// 20, 30, ..., 90, 100. Since 100 itself does _not_ fall within the 1st + /// decade of 10, i.e., the range `[10, 100)`, this means there are exactly + /// 9 bins within the range. + /// + /// This is much more easily understood compared to the actual edges we get + /// with 10 bins, which is the sequence `10, 19, 28, ...`. If one wants + /// exactly the requested number of bins (assuming it's possible), use + /// [`Histogram::with_log_linear_bins()`]. /// /// Example /// ------- + /// /// ```rust /// use oximeter::histogram::{Histogram, BinRange}; /// use std::ops::{RangeBounds, Bound}; /// - /// let hist = Histogram::span_decades(-1, 1).unwrap(); + /// let hist: Histogram = Histogram::span_decades(-1, 1).unwrap(); /// let bins = hist.iter().collect::>(); /// - /// // First bin is from the left support edge to zero - /// assert_eq!(bins[0].range.end_bound(), Bound::Excluded(&0.0)); + /// // There are 9 bins per power of 10, plus 1 additional for everything + /// // below and above the power of 10. + /// assert_eq!(bins.len(), 2 * 9 + 2); /// - /// // First decade of bins is `[0.0, 0.1, 0.2, ...)`. - /// assert_eq!(bins[1].range, BinRange::range(0.0, 0.1)); - /// assert_eq!(bins[2].range, BinRange::range(0.1, 0.2)); + /// // First bin is from the left support edge to the first bin + /// assert_eq!(bins[0].range.end_bound(), Bound::Excluded(&0.1)); + /// + /// // First decade of bins is `[0.1, 0.2, ...)`. + /// assert_eq!(bins[1].range, BinRange::range(0.1, 0.2)); + /// + /// // Note that these are floats, which are notoriously difficult to + /// // compare. The bin edges are not _exact_, but quite close. + /// let BinRange::Range { start, end } = bins[2].range else { unreachable!() }; + /// let BinRange::Range { + /// start: expected_start, + /// end: expected_end, + /// } = BinRange::range(0.2, 0.3) else { unreachable!() }; + /// assert_eq!(start, expected_start); + /// approx::assert_ulps_eq!(end, expected_end); /// /// // Second decade is `[1.0, 2.0, 3.0, ...]` - /// assert_eq!(bins[10].range, BinRange::range(0.9, 1.0)); - /// assert_eq!(bins[11].range, BinRange::range(1.0, 2.0)); + /// assert_eq!(bins[9].range, BinRange::range(0.9, 1.0)); + /// assert_eq!(bins[10].range, BinRange::range(1.0, 2.0)); + /// assert_eq!(bins[11].range, BinRange::range(2.0, 3.0)); /// /// // Ends at the third decade, so the last bin is the remainder of the support - /// assert_eq!(bins[19].range, BinRange::from(9.0)); + /// assert_eq!(bins[19].range, BinRange::from(10.0)); /// ``` - pub fn span_decades( - start_decade: D, - end_decade: D, - ) -> Result - where - D: SpanDecade, - std::ops::Range: Iterator, - { - let edges = [ - vec![::zero()], - (start_decade..end_decade).flat_map(|x| x.span_decade()).collect(), - ] - .concat(); - Histogram::new(&edges) + pub fn span_decades( + start_decade: T::Power, + stop_decade: T::Power, + ) -> Result { + Self::with_log_linear_bins( + 10, + start_decade, + stop_decade, + 9.try_into().unwrap(), + ) + } + + /// Generate a histogram with evenly-spaced bin in each power of a base. + /// + /// This results in a histogram with `n_bins` bins in each decade over the + /// range `[base ** start_decade, base ** stop_decade)`. There are two + /// additional bins, for the values entirely below `base ** start_decade` + /// and >= `base ** stop_decade`. + pub fn with_log_linear_bins( + base: u16, + start_decade: T::Power, + stop_decade: T::Power, + n_bins: NonZeroUsize, + ) -> Result { + let bins = base.bins(start_decade, stop_decade, n_bins)?; + Histogram::new(&bins) } } -/// A trait to support generating linearly-spaced bin edges that span the given decade. -/// -/// This trait is used to generate what's sometimes called a "log-linear" histogram support. This -/// support has linearly spaced bins over a range, and many ranges, each of which is -/// logarithmically-spaced. Trait accepts a decade, a power of 10, and linearly spaces bins over -/// that decade, from `10 ** decade` to `10 ** (decade + 1)`, not inclusive of the right edges. -/// Note that the left bin is `1.0`, `10.0`, etc, not zero. -/// -/// Example -/// ------- -/// ```rust -/// use oximeter::histogram::SpanDecade; -/// let x = 0i8.span_decade(); -/// assert_eq!(x, &[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]); -/// ``` -/// -/// Note that the `SpanDecade` trait is parametrized by _two_ types, `D` is the input type, and `T` -/// is the output type, the type of elements generated in the return vector. This crate only -/// defines this trait for type pairs `(i8, f64)` and `(u8, i64)`. That is, calling -/// `i8::span_decade()` generates a `Vec` and `u8::span_decade()` generates a `Vec`. -pub trait SpanDecade -where - T: HistogramSupport, +/// A trait for generating linearly-spaced bins over a set of powers. +pub trait LogLinearBins: + ToPrimitive + FromPrimitive + Num { - /// Return a set of bin edges linearly-spaced across the decade defined by `self`, i.e, `10 ** - /// self`. - fn span_decade(&self) -> Vec; + /// Compute the left bin edges for a histogram with `count` bins over each + /// power of the base. + fn bins( + &self, + lo: Base, + hi: Base, + count: NonZeroUsize, + ) -> Result, QuantizationError>; } -impl SpanDecade for i8 { - fn span_decade(&self) -> Vec { - let mul = 10.0f64.powi((*self).into()); - (1..10).map(|x| (x as f64) * mul).collect() +impl LogLinearBins for u16 +where + T: HistogramSupport + Integer, +{ + fn bins( + &self, + lo: u16, + hi: u16, + count: NonZeroUsize, + ) -> Result, QuantizationError> { + // Basic sanity checks + if *self == 0 || *self > 32 { + return Err(QuantizationError::InvalidBase); + } + if count.get() < 2 { + return Err(QuantizationError::InvalidSteps); + } + if lo >= hi { + return Err(QuantizationError::PowersOutOfOrder); + } + + // The base must be <= the number of steps + 1. The one is because we're + // computing left bin edges. + if >::into(*self) > count.get() + 1 { + return Err(QuantizationError::InvalidSteps); + } + + // The highest power must be representable in the target type. + if self.checked_pow(hi.into()).is_none() { + return Err(QuantizationError::Overflow); + } + + // Convert everything into wide integers for easy computations that + // won't overflow during interim processing. + // + // Note that we unwrap in a few places below, where we're sure the + // narrowing conversion cannot fail, such as to a u32. + let base = >::from(*self); + let lo = >::from(lo); + let hi = >::from(hi); + let count = ::from(count.get()) + .ok_or(QuantizationError::Overflow)?; + + fn bin_count_divides_spacing( + base: u64, + lo: u64, + hi: u64, + count: u64, + ) -> bool { + let powers = lo..hi; + let next_powers = lo + 1..hi + 1; + powers.zip(next_powers).all(|(lo, hi)| { + let lo = base.pow(lo as _); + let hi = base.pow(hi as _); + let distance = hi - lo; + dbg!(distance, count); + distance.is_multiple_of(&count) + }) + } + + if !bin_count_divides_spacing(base, lo, hi, count) { + return Err(QuantizationError::UnevenStepsForBase); + } + + // Compute the next step size. + fn next_step(next: u64, count: u64) -> Result { + if next > count { + next.checked_div(count).ok_or(QuantizationError::Precision) + } else { + Ok(1) + } + } + + let mut out = Vec::with_capacity( + count + .checked_mul(hi - lo) + .ok_or(QuantizationError::Overflow)? + .try_into() + .unwrap(), + ); + let powers = lo..hi; + let mut power = lo; + let mut value = base + .checked_pow(lo.try_into().unwrap()) + .ok_or(QuantizationError::Overflow)?; + let mut next_start = base + .checked_pow((lo + 1).try_into().unwrap()) + .ok_or(QuantizationError::Overflow)?; + let mut step = next_step(next_start - value, count)?; + while powers.contains(&power) { + out.push( + ::from(value) + .ok_or(QuantizationError::Overflow)?, + ); + if value < next_start { + value = value + .checked_add(step) + .ok_or(QuantizationError::Overflow)?; + continue; + } + next_start = next_start + .checked_mul(base) + .ok_or(QuantizationError::Overflow)?; + power = power.checked_add(1).ok_or(QuantizationError::Overflow)?; + step = next_step(next_start - value, count)?; + value = + value.checked_add(step).ok_or(QuantizationError::Overflow)?; + } + Ok(out) } } -impl SpanDecade for u8 { - fn span_decade(&self) -> Vec { - let mul = 10i64.pow((*self).into()); - (1..10).map(|x| x * mul).collect() +impl LogLinearBins for u16 +where + T: HistogramSupport + Float, +{ + fn bins( + &self, + lo: i16, + hi: i16, + count: NonZeroUsize, + ) -> Result, QuantizationError> { + // Basic sanity checks. + // + // Note that for floating point, we are significantly less constrained + // in terms of the relationship between the base and the count. For + // integers, we ensure that they're relatively co-divisible, so that we + // are not losing precision by computing the steps. Floats are more + // permissive. + if *self == 0 || *self > 32 { + return Err(QuantizationError::InvalidBase); + } + if count.get() < 2 { + return Err(QuantizationError::InvalidSteps); + } + if lo >= hi { + return Err(QuantizationError::PowersOutOfOrder); + } + + // Compute the next step size. + fn next_step(next: f64, count: u64) -> Result { + let count_ = ::from(count) + .ok_or(QuantizationError::Precision)?; + Ok(next / count_) + } + + let count = ::from(count.get()) + .ok_or(QuantizationError::Overflow)?; + let base = ::from(*self).unwrap(); + let n_elems = count + .checked_mul( + ::from(hi - lo) + .ok_or(QuantizationError::Overflow)?, + ) + .ok_or(QuantizationError::Overflow)? + .try_into() + .unwrap(); + let mut out = Vec::with_capacity(n_elems); + let powers = lo..hi; + + let mut power = lo; + let mut start = base.powi(lo.into()); + let mut stop = base.powi((lo + 1).into()); + let mut step = next_step(stop - start, count)?; + while powers.contains(&power) { + for i in 0..count { + let value = start + step * ::from(i).unwrap(); + out.push( + ::from(value) + .ok_or(QuantizationError::Precision)?, + ); + } + + // Move to next power of the base. + start = stop; + stop *= base; + step = next_step(stop - start, count)?; + power += 1; + } + out.push( + ::from(start).ok_or(QuantizationError::Overflow)?, + ); + Ok(out) } } @@ -576,38 +873,7 @@ where #[cfg(test)] mod tests { use super::*; - - fn assert_approx_eq(x: f64, y: f64) { - assert!((x - y).abs() < f64::EPSILON); - } - - #[test] - fn test_span_decade_f64() { - fn run_test(decade: i8) { - let diff = 10.0f64.powi(decade.into()); - let x = decade.span_decade(); - for (x, y) in x.iter().zip(x.iter().skip(1)) { - assert_approx_eq(y - x, diff); - } - } - run_test(0); - run_test(-1); - run_test(1); - } - - #[test] - fn test_span_decade_i64() { - fn run_test(decade: u8) { - let diff = 10i64.pow(decade.into()); - let x = decade.span_decade(); - for (x, y) in x.iter().zip(x.iter().skip(1)) { - assert_eq!(y - x, diff); - } - } - run_test(0); - run_test(1); - run_test(2); - } + use std::convert::TryInto; #[test] fn test_ensure_finite() { @@ -615,6 +881,15 @@ mod tests { assert!(ensure_finite(i64::MIN).is_ok()); assert!(ensure_finite(i64::MAX).is_ok()); + assert!(ensure_finite(0u64).is_ok()); + assert!(ensure_finite(u64::MIN).is_ok()); + assert!(ensure_finite(u64::MAX).is_ok()); + + assert!(ensure_finite(0.0).is_ok()); + assert!(ensure_finite(f32::NEG_INFINITY).is_err()); + assert!(ensure_finite(f32::INFINITY).is_err()); + assert!(ensure_finite(f32::NAN).is_err()); + assert!(ensure_finite(0.0).is_ok()); assert!(ensure_finite(f64::NEG_INFINITY).is_err()); assert!(ensure_finite(f64::INFINITY).is_err()); @@ -623,7 +898,7 @@ mod tests { #[test] fn test_bin_range_to() { - let range = BinRange::to(10); + let range = BinRange::to(10_u64); assert!(!range.contains(&100)); assert!(range.contains(&0)); assert_eq!(range.cmp(&0), Ordering::Equal); @@ -632,7 +907,7 @@ mod tests { #[test] fn test_bin_range_from() { - let range = BinRange::from(10); + let range = BinRange::from(10_u64); assert!(range.contains(&100)); assert!(!range.contains(&0)); assert_eq!(range.cmp(&0), Ordering::Less); @@ -641,7 +916,7 @@ mod tests { #[test] fn test_bin_range() { - let range = BinRange::range(0, 10); + let range = BinRange::range(0_u64, 10); assert!(!range.contains(&100)); assert!(range.contains(&0)); assert!(!range.contains(&10)); @@ -694,7 +969,7 @@ mod tests { #[test] fn test_histogram_with_overlapping_bins() { - let bins = &[(..1).into(), (0..10).into()]; + let bins = &[(..1_u64).into(), (0..10).into()]; assert!(Histogram::with_bins(bins).is_err()); } @@ -831,8 +1106,260 @@ mod tests { #[test] fn test_span_decades() { - let hist = Histogram::span_decades(0i8, 3i8).unwrap(); + let hist = Histogram::::span_decades(0, 3).unwrap(); + println!("{:#?}", hist.bins); + // Total number of bins is: + // + // 1 -- for bin from (MIN, 1) + // 9 * 3 -- for each power of 10 in [10 ** 0, 10 ** 3) + // 1 -- for [10 ** 3, MAX) + // + // = 29; + assert_eq!(hist.n_bins(), 29); + } + + #[test] + fn test_span_decades_other_counts_f64() { + const N_BINS: usize = 20; + let hist = Histogram::::with_log_linear_bins( + 10, + 0, + 1, + N_BINS.try_into().unwrap(), + ) + .unwrap(); + // Total number of bins is: + // + // 1 -- for [MIN, 0) + // N_BINS -- for [10 ** 0, 10 ** 1) + // 1 -- for [10**1, MAX) println!("{:#?}", hist.bins); - assert_eq!(hist.n_bins(), 9 * 3 + 2); // 1 for bin from (-infty, 1), 1 for (0, 0.1) + assert_eq!(hist.n_bins(), N_BINS + 2); + } + + #[test] + fn test_span_decades_other_counts_u64_resolution_too_low() { + let err = Histogram::::with_log_linear_bins( + 10, + 0, + 1, + 20.try_into().unwrap(), + ) + .unwrap_err(); + assert!(matches!( + err, + HistogramError::Quantization(QuantizationError::UnevenStepsForBase) + )); + } + + #[test] + fn test_span_decades_other_counts_u64_resolution_ok() { + const N_BINS: usize = 30; + let hist = Histogram::::with_log_linear_bins( + 10, + 1, + 2, + N_BINS.try_into().unwrap(), + ) + .unwrap(); + // Total number of bins is: + // 1 -- for [0, 1) + // N_BINS -- for each power of ten in [1, 2) + // 1 -- for the last left edge + println!("{:#?}", hist.bins); + assert_eq!(hist.n_bins(), N_BINS + 2); + } + + // Sanity check that we compute exactly the expected bins for an easy case, + // where any output type can represent the exact set of bins. + #[test] + fn test_log_linear_bins_all_representable() { + const EXPECTED: &[u8] = &[ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, + ]; + let base = 10_u16; + let lo = 0; + let hi = 2; + let n_bins = NonZeroUsize::new(9).unwrap(); + + let bins: Vec = base.bins(lo, hi, n_bins).unwrap(); + assert_eq!(bins, EXPECTED); + + fn cmp(bins: Vec, expected: &[u8]) + where + T: TryFrom + std::fmt::Debug + std::cmp::PartialEq, + >::Error: std::fmt::Debug, + { + assert_eq!( + bins, + expected + .iter() + .copied() + .map(|x| T::try_from(x).unwrap()) + .collect::>() + ); + } + + let bins: Vec = base.bins(lo, hi, n_bins).unwrap(); + cmp(bins, EXPECTED); + let bins: Vec = base.bins(lo, hi, n_bins).unwrap(); + cmp(bins, EXPECTED); + let bins: Vec = base.bins(lo, hi, n_bins).unwrap(); + cmp(bins, EXPECTED); + let bins: Vec = base.bins(lo, hi, n_bins).unwrap(); + cmp(bins, EXPECTED); + let bins: Vec = base.bins(lo, hi, n_bins).unwrap(); + cmp(bins, EXPECTED); + let bins: Vec = base.bins(lo, hi, n_bins).unwrap(); + cmp(bins, EXPECTED); + let bins: Vec = base.bins(lo, hi, n_bins).unwrap(); + cmp(bins, EXPECTED); + + let base = 10_u16; + let lo = 0_i16; + let hi = 2; + let n_bins = NonZeroUsize::new(9).unwrap(); + let bins: Vec = base.bins(lo, hi, n_bins).unwrap(); + cmp(bins, EXPECTED); + let bins: Vec = base.bins(lo, hi, n_bins).unwrap(); + cmp(bins, EXPECTED); + } + + #[test] + fn test_log_linear_bins_integer_size_checks() { + // Number of steps must be >= 2. + let base = 10_u16; + let res: Result, _> = + base.bins(0_u16, 2, 1.try_into().unwrap()); + assert!(matches!(res.unwrap_err(), QuantizationError::InvalidSteps)); + + // 10 ** 100 overflows a u64. + let res: Result, _> = + base.bins(2_u16, 100, 10.try_into().unwrap()); + assert!(matches!(res.unwrap_err(), QuantizationError::Overflow)); + + // 100 bins can't evenly divide the provided base. + let res: Result, _> = base.bins(0, 1, 100.try_into().unwrap()); + assert!(matches!( + res.unwrap_err(), + QuantizationError::UnevenStepsForBase + )); + + // Base is larger than the number of steps + let res: Result, _> = base.bins(0, 1, 5.try_into().unwrap()); + assert!(matches!(res.unwrap_err(), QuantizationError::InvalidSteps)); + } + + #[test] + fn test_log_linear_bins_small_bin_count() { + let base = 10_u16; + let _: Vec = base + .bins(3, 4, 20.try_into().unwrap()) + .expect("Should be able to compute widely spaced bins"); + } + + // These are explicit tests against NumPy's linspace implementation, which + // we're trying to emulate. Specifically, assuming NumPy is installed, the + // following code will generate these values: + // + // ```python + // def space(base: int, lo: int, hi: int, count: int) -> np.ndarray: + // parts = np.concatenate([ + // np.linspace(base ** b, base ** (b + 1), count, endpoint=False) + // for b in range(lo, hi) + // ], axis=0) + // return np.append(parts, np.atleast_1d(base ** hi)) + // ``` + #[rstest::rstest] + #[case( + 2, + -3, + 0, + 7, + &[ + 0.125 , 0.14285714, 0.16071429, 0.17857143, 0.19642857, + 0.21428571, 0.23214286, 0.25 , 0.28571429, 0.32142857, + 0.35714286, 0.39285714, 0.42857143, 0.46428571, 0.5 , + 0.57142857, 0.64285714, 0.71428571, 0.78571429, 0.85714286, + 0.92857143, 1. + ] + )] + #[case( + 10, + -1, + 3, + 15, + &[ + 1.0e-01, 1.6e-01, 2.2e-01, 2.8e-01, 3.4e-01, 4.0e-01, 4.6e-01, + 5.2e-01, 5.8e-01, 6.4e-01, 7.0e-01, 7.6e-01, 8.2e-01, 8.8e-01, + 9.4e-01, 1.0e+00, 1.6e+00, 2.2e+00, 2.8e+00, 3.4e+00, 4.0e+00, + 4.6e+00, 5.2e+00, 5.8e+00, 6.4e+00, 7.0e+00, 7.6e+00, 8.2e+00, + 8.8e+00, 9.4e+00, 1.0e+01, 1.6e+01, 2.2e+01, 2.8e+01, 3.4e+01, + 4.0e+01, 4.6e+01, 5.2e+01, 5.8e+01, 6.4e+01, 7.0e+01, 7.6e+01, + 8.2e+01, 8.8e+01, 9.4e+01, 1.0e+02, 1.6e+02, 2.2e+02, 2.8e+02, + 3.4e+02, 4.0e+02, 4.6e+02, 5.2e+02, 5.8e+02, 6.4e+02, 7.0e+02, + 7.6e+02, 8.2e+02, 8.8e+02, 9.4e+02, 1.0e+03 + ] + )] + #[case( + 10, + -12, + -10, + 10, + &[ + 1.0e-12, 1.9e-12, 2.8e-12, 3.7e-12, 4.6e-12, 5.5e-12, 6.4e-12, + 7.3e-12, 8.2e-12, 9.1e-12, 1.0e-11, 1.9e-11, 2.8e-11, 3.7e-11, + 4.6e-11, 5.5e-11, 6.4e-11, 7.3e-11, 8.2e-11, 9.1e-11, 1.0e-10 + ], + )] + #[case( + 10, + 10, + 12, + 10, + &[ + 1.0e+10, 1.9e+10, 2.8e+10, 3.7e+10, 4.6e+10, 5.5e+10, 6.4e+10, + 7.3e+10, 8.2e+10, 9.1e+10, 1.0e+11, 1.9e+11, 2.8e+11, 3.7e+11, + 4.6e+11, 5.5e+11, 6.4e+11, 7.3e+11, 8.2e+11, 9.1e+11, 1.0e+12 + ] + )] + fn test_log_linear_bins_f64_matches_reference_implementation( + #[case] base: u16, + #[case] lo: i16, + #[case] hi: i16, + #[case] count: usize, + #[case] expected: &[f64], + ) { + let bins: Vec = + base.bins(lo, hi, count.try_into().unwrap()).unwrap(); + println!("{bins:#?}"); + println!("{expected:#?}"); + assert!( + all_close(&bins, expected, 1e-8, 1e-5), + "Linspaced bins don't match reference implementation" + ); + } + + fn all_close(a: &[T], b: &[T], atol: T, rtol: T) -> bool + where + T: Float, + { + if a.len() != b.len() { + return false; + } + a.iter() + .zip(b.iter()) + .all(|(a, b)| (*a - *b).abs() <= (atol + rtol * b.abs())) + } + + #[test] + fn test_foo() { + let bins: Vec = 10u16.bins(1, 3, 30.try_into().unwrap()).unwrap(); + println!("{bins:?}"); + dbg!(bins.len()); + let hist = Histogram::new(&bins).unwrap(); + for bin in hist.iter() { + println!("{}", bin.range); + } } } diff --git a/oximeter/oximeter/src/lib.rs b/oximeter/oximeter/src/lib.rs index 78c8802563..2ced404eae 100644 --- a/oximeter/oximeter/src/lib.rs +++ b/oximeter/oximeter/src/lib.rs @@ -95,7 +95,7 @@ //! `Producer`s may be registered with the same `ProducerServer`, each with potentially different //! sampling intervals. -// Copyright 2021 Oxide Computer Company +// Copyright 2023 Oxide Computer Company pub use oximeter_macro_impl::*; @@ -111,11 +111,18 @@ pub mod histogram; pub mod test_util; pub mod traits; pub mod types; -pub use traits::{Metric, Producer, Target}; -pub use types::{ - Datum, DatumType, Field, FieldType, FieldValue, Measurement, MetricsError, - Sample, -}; + +pub use traits::Metric; +pub use traits::Producer; +pub use traits::Target; +pub use types::Datum; +pub use types::DatumType; +pub use types::Field; +pub use types::FieldType; +pub use types::FieldValue; +pub use types::Measurement; +pub use types::MetricsError; +pub use types::Sample; /// Construct the timeseries name for a Target and Metric. pub fn timeseries_name(target: &T, metric: &M) -> String diff --git a/oximeter/oximeter/src/traits.rs b/oximeter/oximeter/src/traits.rs index 8cf0c5d350..096abb8023 100644 --- a/oximeter/oximeter/src/traits.rs +++ b/oximeter/oximeter/src/traits.rs @@ -3,16 +3,24 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. //! Traits used to describe metric data and its sources. + // Copyright 2021 Oxide Computer Company use crate::histogram::Histogram; use crate::types; use crate::types::{Measurement, Sample}; -use crate::{DatumType, Field, FieldType, FieldValue, MetricsError}; +use crate::DatumType; +use crate::Field; +use crate::FieldType; +use crate::FieldValue; +use crate::MetricsError; use bytes::Bytes; -use chrono::{DateTime, Utc}; -use num_traits::{One, Zero}; -use std::ops::{Add, AddAssign}; +use chrono::DateTime; +use chrono::Utc; +use num::traits::One; +use num::traits::Zero; +use std::ops::Add; +use std::ops::AddAssign; /// The `Target` trait identifies a source of metric data by a sequence of fields. /// @@ -199,85 +207,87 @@ pub trait Datum: Clone { fn datum_type(&self) -> DatumType; } -impl Datum for bool { - fn datum_type(&self) -> DatumType { - DatumType::Bool - } -} - -impl Datum for i64 { - fn datum_type(&self) -> DatumType { - DatumType::I64 - } -} - -impl Datum for f64 { - fn datum_type(&self) -> DatumType { - DatumType::F64 - } -} - -impl Datum for String { - fn datum_type(&self) -> DatumType { - DatumType::String - } +macro_rules! impl_datum { + ($type:ty, $variant:path) => { + impl Datum for $type { + fn datum_type(&self) -> DatumType { + $variant + } + } + }; } -impl Datum for Bytes { - fn datum_type(&self) -> DatumType { - DatumType::Bytes - } -} +macro_rules! impl_datum_with_start_time { + ($type:ty, $variant:path) => { + impl Datum for $type { + fn start_time(&self) -> Option> { + Some(types::Cumulative::start_time(&self)) + } -impl Datum for types::Cumulative { - fn start_time(&self) -> Option> { - Some(types::Cumulative::start_time(&self)) - } - fn datum_type(&self) -> DatumType { - DatumType::CumulativeI64 - } + fn datum_type(&self) -> DatumType { + $variant + } + } + }; } -impl Datum for types::Cumulative { - fn start_time(&self) -> Option> { - Some(types::Cumulative::start_time(&self)) - } - fn datum_type(&self) -> DatumType { - DatumType::CumulativeF64 - } -} +macro_rules! impl_datum_for_histogram { + ($type:ty, $variant:path) => { + impl Datum for $type { + fn start_time(&self) -> Option> { + Some(Histogram::start_time(&self)) + } -impl Datum for Histogram { - fn start_time(&self) -> Option> { - Some(self.start_time()) - } - fn datum_type(&self) -> DatumType { - DatumType::HistogramI64 - } + fn datum_type(&self) -> DatumType { + $variant + } + } + }; } -impl Datum for Histogram { - fn start_time(&self) -> Option> { - Some(self.start_time()) - } - fn datum_type(&self) -> DatumType { - DatumType::HistogramF64 - } -} +impl_datum! { bool, DatumType::Bool } +impl_datum! { i8, DatumType::I8 } +impl_datum! { u8, DatumType::U8 } +impl_datum! { i16, DatumType::I16 } +impl_datum! { u16, DatumType::U16 } +impl_datum! { i32, DatumType::I32 } +impl_datum! { u32, DatumType::U32 } +impl_datum! { i64, DatumType::I64 } +impl_datum! { u64, DatumType::U64 } +impl_datum! { f32, DatumType::F32 } +impl_datum! { f64, DatumType::F64 } +impl_datum! { String, DatumType::String } +impl_datum! { Bytes, DatumType::Bytes } + +impl_datum_with_start_time! { types::Cumulative, DatumType::CumulativeI64 } +impl_datum_with_start_time! { types::Cumulative, DatumType::CumulativeU64 } +impl_datum_with_start_time! { types::Cumulative, DatumType::CumulativeF32 } +impl_datum_with_start_time! { types::Cumulative, DatumType::CumulativeF64 } + +impl_datum_for_histogram! { Histogram, DatumType::HistogramI64 } +impl_datum_for_histogram! { Histogram, DatumType::HistogramU64 } +impl_datum_for_histogram! { Histogram, DatumType::HistogramF32 } +impl_datum_for_histogram! { Histogram, DatumType::HistogramF64 } /// A trait identifying types used in [`types::Cumulative`] data. pub trait Cumulative: Datum + Add + AddAssign + Copy + One + Zero {} impl Cumulative for i64 {} +impl Cumulative for u64 {} +impl Cumulative for f32 {} impl Cumulative for f64 {} /// A trait identifying types used as gauges pub trait Gauge: Datum {} -impl Gauge for String {} -impl Gauge for bool {} -impl Gauge for i64 {} -impl Gauge for f64 {} +macro_rules! impl_gauge { + ($($type:ty,)+) => { + $( + impl Gauge for $type {} + )+ + }; +} +impl_gauge! { bool, i8, u8, i16, u16, i32, u32, i64, u64, f32, f64, String, Bytes, } pub use crate::histogram::HistogramSupport; diff --git a/oximeter/oximeter/src/types.rs b/oximeter/oximeter/src/types.rs index 351b53c803..aa61a426e3 100644 --- a/oximeter/oximeter/src/types.rs +++ b/oximeter/oximeter/src/types.rs @@ -3,22 +3,30 @@ // file, You can obtain one at https://mozilla.org/MPL/2.0/. //! Types used to describe targets, metrics, and measurements. -// Copyright 2021 Oxide Computer Company + +// Copyright 2023 Oxide Computer Company use crate::histogram; use crate::traits; use crate::Producer; use bytes::Bytes; -use chrono::{DateTime, Utc}; -use num_traits::{One, Zero}; +use chrono::DateTime; +use chrono::Utc; +use num::traits::One; +use num::traits::Zero; use schemars::JsonSchema; -use serde::{Deserialize, Serialize}; +use serde::Deserialize; +use serde::Serialize; use std::boxed::Box; use std::collections::BTreeMap; use std::fmt; -use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; -use std::ops::{Add, AddAssign}; -use std::sync::{Arc, Mutex}; +use std::net::IpAddr; +use std::net::Ipv4Addr; +use std::net::Ipv6Addr; +use std::ops::Add; +use std::ops::AddAssign; +use std::sync::Arc; +use std::sync::Mutex; use thiserror::Error; use uuid::Uuid; @@ -38,7 +46,14 @@ use uuid::Uuid; #[serde(rename_all = "snake_case")] pub enum FieldType { String, + I8, + U8, + I16, + U16, + I32, + U32, I64, + U64, IpAddr, Uuid, Bool, @@ -61,7 +76,14 @@ macro_rules! impl_field_type_from { } impl_field_type_from! { String, FieldType::String } +impl_field_type_from! { i8, FieldType::I8 } +impl_field_type_from! { u8, FieldType::U8 } +impl_field_type_from! { i16, FieldType::I16 } +impl_field_type_from! { u16, FieldType::U16 } +impl_field_type_from! { i32, FieldType::I32 } +impl_field_type_from! { u32, FieldType::U32 } impl_field_type_from! { i64, FieldType::I64 } +impl_field_type_from! { u64, FieldType::U64 } impl_field_type_from! { IpAddr, FieldType::IpAddr } impl_field_type_from! { Uuid, FieldType::Uuid } impl_field_type_from! { bool, FieldType::Bool } @@ -73,7 +95,14 @@ impl_field_type_from! { bool, FieldType::Bool } #[serde(tag = "type", content = "value", rename_all = "snake_case")] pub enum FieldValue { String(String), + I8(i8), + U8(u8), + I16(i16), + U16(u16), + I32(i32), + U32(u32), I64(i64), + U64(u64), IpAddr(IpAddr), Uuid(Uuid), Bool(bool), @@ -84,7 +113,14 @@ impl FieldValue { pub fn field_type(&self) -> FieldType { match self { FieldValue::String(_) => FieldType::String, + FieldValue::I8(_) => FieldType::I8, + FieldValue::U8(_) => FieldType::U8, + FieldValue::I16(_) => FieldType::I16, + FieldValue::U16(_) => FieldType::U16, + FieldValue::I32(_) => FieldType::I32, + FieldValue::U32(_) => FieldType::U32, FieldValue::I64(_) => FieldType::I64, + FieldValue::U64(_) => FieldType::U64, FieldValue::IpAddr(_) => FieldType::IpAddr, FieldValue::Uuid(_) => FieldType::Uuid, FieldValue::Bool(_) => FieldType::Bool, @@ -103,9 +139,30 @@ impl FieldValue { }; match field_type { FieldType::String => Ok(FieldValue::String(s.to_string())), + FieldType::I8 => { + Ok(FieldValue::I8(s.parse().map_err(|_| make_err())?)) + } + FieldType::U8 => { + Ok(FieldValue::U8(s.parse().map_err(|_| make_err())?)) + } + FieldType::I16 => { + Ok(FieldValue::I16(s.parse().map_err(|_| make_err())?)) + } + FieldType::U16 => { + Ok(FieldValue::U16(s.parse().map_err(|_| make_err())?)) + } + FieldType::I32 => { + Ok(FieldValue::I32(s.parse().map_err(|_| make_err())?)) + } + FieldType::U32 => { + Ok(FieldValue::U32(s.parse().map_err(|_| make_err())?)) + } FieldType::I64 => { Ok(FieldValue::I64(s.parse().map_err(|_| make_err())?)) } + FieldType::U64 => { + Ok(FieldValue::U64(s.parse().map_err(|_| make_err())?)) + } FieldType::IpAddr => { Ok(FieldValue::IpAddr(s.parse().map_err(|_| make_err())?)) } @@ -123,7 +180,14 @@ impl fmt::Display for FieldValue { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { FieldValue::String(ref inner) => write!(f, "{}", inner), + FieldValue::I8(ref inner) => write!(f, "{}", inner), + FieldValue::U8(ref inner) => write!(f, "{}", inner), + FieldValue::I16(ref inner) => write!(f, "{}", inner), + FieldValue::U16(ref inner) => write!(f, "{}", inner), + FieldValue::I32(ref inner) => write!(f, "{}", inner), + FieldValue::U32(ref inner) => write!(f, "{}", inner), FieldValue::I64(ref inner) => write!(f, "{}", inner), + FieldValue::U64(ref inner) => write!(f, "{}", inner), FieldValue::IpAddr(ref inner) => write!(f, "{}", inner), FieldValue::Uuid(ref inner) => write!(f, "{}", inner), FieldValue::Bool(ref inner) => write!(f, "{}", inner), @@ -131,17 +195,28 @@ impl fmt::Display for FieldValue { } } -impl From for FieldValue { - fn from(value: i64) -> Self { - FieldValue::I64(value) - } +macro_rules! impl_field_value_from { + ($int:ty, $variant:path) => { + impl From<$int> for FieldValue { + fn from(value: $int) -> Self { + $variant(value) + } + } + }; } -impl From for FieldValue { - fn from(value: String) -> Self { - FieldValue::String(value) - } -} +impl_field_value_from! { i8, FieldValue::I8 } +impl_field_value_from! { u8, FieldValue::U8 } +impl_field_value_from! { i16, FieldValue::I16 } +impl_field_value_from! { u16, FieldValue::U16 } +impl_field_value_from! { i32, FieldValue::I32 } +impl_field_value_from! { u32, FieldValue::U32 } +impl_field_value_from! { i64, FieldValue::I64 } +impl_field_value_from! { u64, FieldValue::U64 } +impl_field_value_from! { String, FieldValue::String } +impl_field_value_from! { IpAddr, FieldValue::IpAddr } +impl_field_value_from! { Uuid, FieldValue::Uuid } +impl_field_value_from! { bool, FieldValue::Bool } impl From<&str> for FieldValue { fn from(value: &str) -> Self { @@ -149,12 +224,6 @@ impl From<&str> for FieldValue { } } -impl From for FieldValue { - fn from(value: IpAddr) -> Self { - FieldValue::IpAddr(value) - } -} - impl From for FieldValue { fn from(value: Ipv4Addr) -> Self { FieldValue::IpAddr(IpAddr::V4(value)) @@ -167,18 +236,6 @@ impl From for FieldValue { } } -impl From for FieldValue { - fn from(value: Uuid) -> Self { - FieldValue::Uuid(value) - } -} - -impl From for FieldValue { - fn from(value: bool) -> Self { - FieldValue::Bool(value) - } -} - impl From<&T> for FieldValue where T: Clone + Into, @@ -214,13 +271,31 @@ pub struct Field { #[serde(rename_all = "snake_case")] pub enum DatumType { Bool, + I8, + U8, + I16, + U16, + I32, + U32, I64, + U64, + F32, F64, String, Bytes, CumulativeI64, + CumulativeU64, + CumulativeF32, CumulativeF64, + HistogramI8, + HistogramU8, + HistogramI16, + HistogramU16, + HistogramI32, + HistogramU32, HistogramI64, + HistogramU64, + HistogramF32, HistogramF64, } @@ -230,8 +305,18 @@ impl DatumType { matches!( self, DatumType::CumulativeI64 + | DatumType::CumulativeU64 + | DatumType::CumulativeF32 | DatumType::CumulativeF64 + | DatumType::HistogramI8 + | DatumType::HistogramU8 + | DatumType::HistogramI16 + | DatumType::HistogramU16 + | DatumType::HistogramI32 + | DatumType::HistogramU32 | DatumType::HistogramI64 + | DatumType::HistogramU64 + | DatumType::HistogramF32 | DatumType::HistogramF64 ) } @@ -248,13 +333,31 @@ impl std::fmt::Display for DatumType { #[serde(tag = "type", content = "datum", rename_all = "snake_case")] pub enum Datum { Bool(bool), + I8(i8), + U8(u8), + I16(i16), + U16(u16), + I32(i32), + U32(u32), I64(i64), + U64(u64), + F32(f32), F64(f64), String(String), Bytes(Bytes), CumulativeI64(Cumulative), + CumulativeU64(Cumulative), + CumulativeF32(Cumulative), CumulativeF64(Cumulative), + HistogramI8(histogram::Histogram), + HistogramU8(histogram::Histogram), + HistogramI16(histogram::Histogram), + HistogramU16(histogram::Histogram), + HistogramI32(histogram::Histogram), + HistogramU32(histogram::Histogram), HistogramI64(histogram::Histogram), + HistogramU64(histogram::Histogram), + HistogramF32(histogram::Histogram), HistogramF64(histogram::Histogram), } @@ -263,13 +366,31 @@ impl Datum { pub fn datum_type(&self) -> DatumType { match self { Datum::Bool(_) => DatumType::Bool, + Datum::I8(_) => DatumType::I8, + Datum::U8(_) => DatumType::U8, + Datum::I16(_) => DatumType::I16, + Datum::U16(_) => DatumType::U16, + Datum::I32(_) => DatumType::I32, + Datum::U32(_) => DatumType::U32, Datum::I64(_) => DatumType::I64, + Datum::U64(_) => DatumType::U64, + Datum::F32(_) => DatumType::F32, Datum::F64(_) => DatumType::F64, Datum::String(_) => DatumType::String, Datum::Bytes(_) => DatumType::Bytes, Datum::CumulativeI64(_) => DatumType::CumulativeI64, + Datum::CumulativeU64(_) => DatumType::CumulativeU64, + Datum::CumulativeF32(_) => DatumType::CumulativeF32, Datum::CumulativeF64(_) => DatumType::CumulativeF64, + Datum::HistogramI8(_) => DatumType::HistogramI8, + Datum::HistogramU8(_) => DatumType::HistogramU8, + Datum::HistogramI16(_) => DatumType::HistogramI16, + Datum::HistogramU16(_) => DatumType::HistogramU16, + Datum::HistogramI32(_) => DatumType::HistogramI32, + Datum::HistogramU32(_) => DatumType::HistogramU32, Datum::HistogramI64(_) => DatumType::HistogramI64, + Datum::HistogramU64(_) => DatumType::HistogramU64, + Datum::HistogramF32(_) => DatumType::HistogramF32, Datum::HistogramF64(_) => DatumType::HistogramF64, } } @@ -282,14 +403,32 @@ impl Datum { /// Return the start time of the underlying data, if this is cumulative, or `None` pub fn start_time(&self) -> Option> { match self { - Datum::Bool(_) => None, - Datum::I64(_) => None, - Datum::F64(_) => None, - Datum::String(_) => None, - Datum::Bytes(_) => None, + Datum::Bool(_) + | Datum::I8(_) + | Datum::U8(_) + | Datum::I16(_) + | Datum::U16(_) + | Datum::I32(_) + | Datum::U32(_) + | Datum::I64(_) + | Datum::U64(_) + | Datum::F32(_) + | Datum::F64(_) + | Datum::String(_) + | Datum::Bytes(_) => None, Datum::CumulativeI64(ref inner) => Some(inner.start_time()), + Datum::CumulativeU64(ref inner) => Some(inner.start_time()), + Datum::CumulativeF32(ref inner) => Some(inner.start_time()), Datum::CumulativeF64(ref inner) => Some(inner.start_time()), + Datum::HistogramI8(ref inner) => Some(inner.start_time()), + Datum::HistogramU8(ref inner) => Some(inner.start_time()), + Datum::HistogramI16(ref inner) => Some(inner.start_time()), + Datum::HistogramU16(ref inner) => Some(inner.start_time()), + Datum::HistogramI32(ref inner) => Some(inner.start_time()), + Datum::HistogramU32(ref inner) => Some(inner.start_time()), Datum::HistogramI64(ref inner) => Some(inner.start_time()), + Datum::HistogramU64(ref inner) => Some(inner.start_time()), + Datum::HistogramF32(ref inner) => Some(inner.start_time()), Datum::HistogramF64(ref inner) => Some(inner.start_time()), } } @@ -313,13 +452,31 @@ macro_rules! impl_from { } impl_from! { bool, Bool } +impl_from! { i8, I8 } +impl_from! { u8, U8 } +impl_from! { i16, I16 } +impl_from! { u16, U16 } +impl_from! { i32, I32 } +impl_from! { u32, U32 } impl_from! { i64, I64 } +impl_from! { u64, U64 } +impl_from! { f32, F32 } impl_from! { f64, F64 } impl_from! { String, String } impl_from! { Bytes, Bytes } impl_from! { Cumulative, CumulativeI64 } +impl_from! { Cumulative, CumulativeU64 } +impl_from! { Cumulative, CumulativeF32 } impl_from! { Cumulative, CumulativeF64 } +impl_from! { histogram::Histogram, HistogramI8 } +impl_from! { histogram::Histogram, HistogramU8 } +impl_from! { histogram::Histogram, HistogramI16 } +impl_from! { histogram::Histogram, HistogramU16 } +impl_from! { histogram::Histogram, HistogramI32 } +impl_from! { histogram::Histogram, HistogramU32 } impl_from! { histogram::Histogram, HistogramI64 } +impl_from! { histogram::Histogram, HistogramU64 } +impl_from! { histogram::Histogram, HistogramF32 } impl_from! { histogram::Histogram, HistogramF64 } impl From<&str> for Datum { @@ -701,19 +858,24 @@ impl ProducerRegistry { #[cfg(test)] mod tests { use super::histogram::Histogram; + use super::Cumulative; + use super::Datum; + use super::DatumType; use super::Field; use super::FieldSet; + use super::FieldType; + use super::FieldValue; + use super::Measurement; use super::MetricsError; use super::Sample; - use super::{ - Cumulative, Datum, DatumType, FieldType, FieldValue, Measurement, - }; use crate::test_util; use crate::types; - use crate::{Metric, Target}; + use crate::Metric; + use crate::Target; use bytes::Bytes; use std::collections::BTreeMap; - use std::net::IpAddr; + use std::net::Ipv4Addr; + use std::net::Ipv6Addr; use uuid::Uuid; #[test] @@ -803,36 +965,39 @@ mod tests { assert!(sample.measurement.start_time().is_some()); } - #[test] - fn test_field_value_parse_as_type() { - let as_string = "some string"; - let as_i64 = "2"; - let as_ipaddr = "::1"; - let as_uuid = "3c937cd9-348f-42c2-bd44-d0a4dfffabd9"; - let as_bool = "false"; - - assert_eq!( - FieldValue::parse_as_type(&as_string, FieldType::String).unwrap(), - FieldValue::from(&as_string), - ); - assert_eq!( - FieldValue::parse_as_type(&as_i64, FieldType::I64).unwrap(), - FieldValue::from(2_i64), - ); - assert_eq!( - FieldValue::parse_as_type(&as_ipaddr, FieldType::IpAddr).unwrap(), - FieldValue::from(as_ipaddr.parse::().unwrap()), - ); - assert_eq!( - FieldValue::parse_as_type(&as_uuid, FieldType::Uuid).unwrap(), - FieldValue::from(as_uuid.parse::().unwrap()), - ); - assert_eq!( - FieldValue::parse_as_type(&as_bool, FieldType::Bool).unwrap(), - FieldValue::from(false), - ); - - assert!(FieldValue::parse_as_type(&as_string, FieldType::Uuid).is_err()); + #[rstest::rstest] + #[case::as_string("some string", FieldValue::String("some string".into()))] + #[case::as_i8("2", FieldValue::I8(2))] + #[case::as_u8("2", FieldValue::U8(2))] + #[case::as_i16("2", FieldValue::I16(2))] + #[case::as_u16("2", FieldValue::U16(2))] + #[case::as_i32("2", FieldValue::I32(2))] + #[case::as_u32("2", FieldValue::U32(2))] + #[case::as_i64("2", FieldValue::I64(2))] + #[case::as_u64("2", FieldValue::U64(2))] + #[case::as_uuid( + "684f42af-0500-4fc9-be93-fdf1a7ac36ad", + FieldValue::Uuid(uuid::uuid!("684f42af-0500-4fc9-be93-fdf1a7ac36ad")) + )] + #[case::as_ipv4addr("127.0.0.1", FieldValue::from(Ipv4Addr::LOCALHOST))] + #[case::as_ipv6addr("::1", FieldValue::from(Ipv6Addr::LOCALHOST))] + fn test_field_value_parse_as_type( + #[case] unparsed: &str, + #[case] expected: FieldValue, + ) { + let parsed = + FieldValue::parse_as_type(unparsed, expected.field_type()).unwrap(); + assert_eq!(parsed, expected); + } + + #[rstest::rstest] + #[case::as_u64(FieldType::U64)] + #[case::as_uuid(FieldType::Uuid)] + #[case::as_bool(FieldType::Bool)] + #[case::as_ipaddr(FieldType::IpAddr)] + #[case::as_uuid(FieldType::Uuid)] + fn test_field_value_parse_as_wrong_type(#[case] ty: FieldType) { + assert!(FieldValue::parse_as_type("baddcafe", ty).is_err()); } #[test]