diff --git a/Cargo.lock b/Cargo.lock index 397427d6f1..a6025cff8e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1749,7 +1749,7 @@ dependencies = [ [[package]] name = "derror-macro" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=a4c956e44fc9b75b58b83ad2eec22f6bd9005262#a4c956e44fc9b75b58b83ad2eec22f6bd9005262" +source = "git+https://github.com/oxidecomputer/opte?rev=7ee353a470ea59529ee1b34729681da887aa88ce#7ee353a470ea59529ee1b34729681da887aa88ce" dependencies = [ "darling 0.20.3", "proc-macro2", @@ -3436,7 +3436,7 @@ dependencies = [ [[package]] name = "illumos-sys-hdrs" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=a4c956e44fc9b75b58b83ad2eec22f6bd9005262#a4c956e44fc9b75b58b83ad2eec22f6bd9005262" +source = "git+https://github.com/oxidecomputer/opte?rev=7ee353a470ea59529ee1b34729681da887aa88ce#7ee353a470ea59529ee1b34729681da887aa88ce" [[package]] name = "illumos-utils" @@ -3841,7 +3841,7 @@ dependencies = [ [[package]] name = "kstat-macro" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=a4c956e44fc9b75b58b83ad2eec22f6bd9005262#a4c956e44fc9b75b58b83ad2eec22f6bd9005262" +source = "git+https://github.com/oxidecomputer/opte?rev=7ee353a470ea59529ee1b34729681da887aa88ce#7ee353a470ea59529ee1b34729681da887aa88ce" dependencies = [ "quote", "syn 2.0.52", @@ -4426,6 +4426,8 @@ name = "nexus-db-model" version = "0.1.0" dependencies = [ "anyhow", + "camino", + "camino-tempfile", "chrono", "db-macros", "derive-where", @@ -4444,6 +4446,7 @@ dependencies = [ "omicron-rpaths", "omicron-uuid-kinds", "omicron-workspace-hack", + "once_cell", "parse-display", "pq-sys", "rand 0.8.5", @@ -4456,6 +4459,7 @@ dependencies = [ "steno", "strum 0.26.1", "thiserror", + "tokio", "uuid 1.7.0", ] @@ -5833,7 +5837,7 @@ dependencies = [ [[package]] name = "opte" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=a4c956e44fc9b75b58b83ad2eec22f6bd9005262#a4c956e44fc9b75b58b83ad2eec22f6bd9005262" +source = "git+https://github.com/oxidecomputer/opte?rev=7ee353a470ea59529ee1b34729681da887aa88ce#7ee353a470ea59529ee1b34729681da887aa88ce" dependencies = [ "cfg-if", "derror-macro", @@ -5851,7 +5855,7 @@ dependencies = [ [[package]] name = "opte-api" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=a4c956e44fc9b75b58b83ad2eec22f6bd9005262#a4c956e44fc9b75b58b83ad2eec22f6bd9005262" +source = "git+https://github.com/oxidecomputer/opte?rev=7ee353a470ea59529ee1b34729681da887aa88ce#7ee353a470ea59529ee1b34729681da887aa88ce" dependencies = [ "illumos-sys-hdrs", "ipnetwork", @@ -5863,7 +5867,7 @@ dependencies = [ [[package]] name = "opte-ioctl" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=a4c956e44fc9b75b58b83ad2eec22f6bd9005262#a4c956e44fc9b75b58b83ad2eec22f6bd9005262" +source = "git+https://github.com/oxidecomputer/opte?rev=7ee353a470ea59529ee1b34729681da887aa88ce#7ee353a470ea59529ee1b34729681da887aa88ce" dependencies = [ "libc", "libnet", @@ -5937,7 +5941,7 @@ dependencies = [ [[package]] name = "oxide-vpc" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/opte?rev=a4c956e44fc9b75b58b83ad2eec22f6bd9005262#a4c956e44fc9b75b58b83ad2eec22f6bd9005262" +source = "git+https://github.com/oxidecomputer/opte?rev=7ee353a470ea59529ee1b34729681da887aa88ce#7ee353a470ea59529ee1b34729681da887aa88ce" dependencies = [ "cfg-if", "illumos-sys-hdrs", diff --git a/Cargo.toml b/Cargo.toml index 8c493e3699..829d7c5330 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -298,7 +298,7 @@ omicron-sled-agent = { path = "sled-agent" } omicron-test-utils = { path = "test-utils" } omicron-zone-package = "0.11.0" oxide-client = { path = "clients/oxide-client" } -oxide-vpc = { git = "https://github.com/oxidecomputer/opte", rev = "a4c956e44fc9b75b58b83ad2eec22f6bd9005262", features = [ "api", "std" ] } +oxide-vpc = { git = "https://github.com/oxidecomputer/opte", rev = "7ee353a470ea59529ee1b34729681da887aa88ce", features = [ "api", "std" ] } once_cell = "1.19.0" openapi-lint = { git = "https://github.com/oxidecomputer/openapi-lint", branch = "main" } openapiv3 = "2.0.0" @@ -306,7 +306,7 @@ openapiv3 = "2.0.0" openssl = "0.10" openssl-sys = "0.9" openssl-probe = "0.1.5" -opte-ioctl = { git = "https://github.com/oxidecomputer/opte", rev = "a4c956e44fc9b75b58b83ad2eec22f6bd9005262" } +opte-ioctl = { git = "https://github.com/oxidecomputer/opte", rev = "7ee353a470ea59529ee1b34729681da887aa88ce" } oso = "0.27" owo-colors = "4.0.0" oximeter = { path = "oximeter/oximeter" } diff --git a/clients/dpd-client/src/lib.rs b/clients/dpd-client/src/lib.rs index 1186e1722f..a898c31781 100644 --- a/clients/dpd-client/src/lib.rs +++ b/clients/dpd-client/src/lib.rs @@ -17,6 +17,10 @@ use slog::info; use slog::Logger; +use types::LinkCreate; +use types::LinkId; +use types::LinkSettings; +use types::PortSettings; include!(concat!(env!("OUT_DIR"), "/dpd-client.rs")); @@ -787,3 +791,39 @@ impl From for MacAddr { } } } + +impl Eq for PortSettings {} + +impl PartialEq for PortSettings { + fn eq(&self, other: &Self) -> bool { + self.links == other.links + } +} + +impl Eq for LinkSettings {} + +impl PartialEq for LinkSettings { + fn eq(&self, other: &Self) -> bool { + self.addrs == other.addrs && self.params == other.params + } +} + +impl Eq for LinkCreate {} + +impl PartialEq for LinkCreate { + fn eq(&self, other: &Self) -> bool { + self.autoneg == other.autoneg + && self.fec == other.fec + && self.kr == other.kr + && self.lane == other.lane + && self.speed == other.speed + } +} + +impl Eq for LinkId {} + +impl PartialEq for LinkId { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } +} diff --git a/clients/sled-agent-client/src/lib.rs b/clients/sled-agent-client/src/lib.rs index de5b3fba9a..4a66d5a567 100644 --- a/clients/sled-agent-client/src/lib.rs +++ b/clients/sled-agent-client/src/lib.rs @@ -8,8 +8,10 @@ use anyhow::Context; use async_trait::async_trait; use omicron_common::api::internal::shared::NetworkInterface; use std::convert::TryFrom; +use std::hash::Hash; use std::net::IpAddr; use std::net::SocketAddr; +use types::{BgpConfig, BgpPeerConfig, PortConfigV1, RouteConfig}; use uuid::Uuid; progenitor::generate_api!( @@ -596,3 +598,47 @@ impl TestInterfaces for Client { .expect("disk_finish_transition() failed unexpectedly"); } } + +impl Eq for BgpConfig {} + +impl Hash for BgpConfig { + fn hash(&self, state: &mut H) { + self.asn.hash(state); + self.originate.hash(state); + } +} + +impl Hash for BgpPeerConfig { + fn hash(&self, state: &mut H) { + self.addr.hash(state); + self.asn.hash(state); + self.port.hash(state); + self.hold_time.hash(state); + self.connect_retry.hash(state); + self.delay_open.hash(state); + self.idle_hold_time.hash(state); + self.keepalive.hash(state); + } +} + +impl Hash for RouteConfig { + fn hash(&self, state: &mut H) { + self.destination.hash(state); + self.nexthop.hash(state); + } +} + +impl Eq for PortConfigV1 {} + +impl Hash for PortConfigV1 { + fn hash(&self, state: &mut H) { + self.addresses.hash(state); + self.autoneg.hash(state); + self.bgp_peers.hash(state); + self.port.hash(state); + self.routes.hash(state); + self.switch.hash(state); + self.uplink_port_fec.hash(state); + self.uplink_port_speed.hash(state); + } +} diff --git a/common/src/api/internal/shared.rs b/common/src/api/internal/shared.rs index bf825fd2e7..3fc1eb9879 100644 --- a/common/src/api/internal/shared.rs +++ b/common/src/api/internal/shared.rs @@ -266,7 +266,9 @@ pub enum ExternalPortDiscovery { } /// Switchport Speed options -#[derive(Copy, Clone, Debug, Deserialize, Serialize, PartialEq, JsonSchema)] +#[derive( + Copy, Clone, Debug, Deserialize, Serialize, PartialEq, JsonSchema, Hash, +)] #[serde(rename_all = "snake_case")] pub enum PortSpeed { #[serde(alias = "0G")] @@ -290,7 +292,9 @@ pub enum PortSpeed { } /// Switchport FEC options -#[derive(Copy, Clone, Debug, Deserialize, Serialize, PartialEq, JsonSchema)] +#[derive( + Copy, Clone, Debug, Deserialize, Serialize, PartialEq, JsonSchema, Hash, +)] #[serde(rename_all = "snake_case")] pub enum PortFec { Firecode, diff --git a/dev-tools/omdb/src/bin/omdb/db.rs b/dev-tools/omdb/src/bin/omdb/db.rs index cc498d0ad1..9e0632d722 100644 --- a/dev-tools/omdb/src/bin/omdb/db.rs +++ b/dev-tools/omdb/src/bin/omdb/db.rs @@ -583,7 +583,7 @@ impl DbArgs { /// incompatible because in practice it may well not matter and it's very /// valuable for this tool to work if it possibly can. async fn check_schema_version(datastore: &DataStore) { - let expected_version = nexus_db_model::schema::SCHEMA_VERSION; + let expected_version = nexus_db_model::SCHEMA_VERSION; let version_check = datastore.database_schema_version().await; match version_check { diff --git a/nexus/db-model/Cargo.toml b/nexus/db-model/Cargo.toml index b1bb68b8f0..45a086a5b3 100644 --- a/nexus/db-model/Cargo.toml +++ b/nexus/db-model/Cargo.toml @@ -9,6 +9,7 @@ omicron-rpaths.workspace = true [dependencies] anyhow.workspace = true +camino.workspace = true chrono.workspace = true derive-where.workspace = true diesel = { workspace = true, features = ["postgres", "r2d2", "chrono", "serde_json", "network-address", "uuid"] } @@ -17,6 +18,7 @@ ipnetwork.workspace = true macaddr.workspace = true newtype_derive.workspace = true omicron-uuid-kinds.workspace = true +once_cell.workspace = true parse-display.workspace = true # See omicron-rpaths for more about the "pq-sys" dependency. pq-sys = "*" @@ -29,6 +31,7 @@ serde_json.workspace = true steno.workspace = true strum.workspace = true thiserror.workspace = true +tokio.workspace = true uuid.workspace = true db-macros.workspace = true @@ -42,4 +45,5 @@ sled-agent-client.workspace = true omicron-workspace-hack.workspace = true [dev-dependencies] +camino-tempfile.workspace = true expectorate.workspace = true diff --git a/nexus/db-model/src/lib.rs b/nexus/db-model/src/lib.rs index d2b676a3da..7124103b30 100644 --- a/nexus/db-model/src/lib.rs +++ b/nexus/db-model/src/lib.rs @@ -67,6 +67,7 @@ mod role_assignment; mod role_builtin; pub mod saga_types; pub mod schema; +mod schema_versions; mod service; mod service_kind; mod silo; @@ -158,6 +159,7 @@ pub use region::*; pub use region_snapshot::*; pub use role_assignment::*; pub use role_builtin::*; +pub use schema_versions::*; pub use semver_version::*; pub use service::*; pub use service_kind::*; diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 771d6836dc..6becea4c2a 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -6,15 +6,6 @@ //! //! NOTE: Should be kept up-to-date with dbinit.sql. -use omicron_common::api::external::SemverVersion; - -/// The version of the database schema this particular version of Nexus was -/// built against. -/// -/// This should be updated whenever the schema is changed. For more details, -/// refer to: schema/crdb/README.adoc -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(45, 0, 0); - table! { disk (id) { id -> Uuid, diff --git a/nexus/db-model/src/schema_versions.rs b/nexus/db-model/src/schema_versions.rs new file mode 100644 index 0000000000..c267ce65d8 --- /dev/null +++ b/nexus/db-model/src/schema_versions.rs @@ -0,0 +1,800 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Database schema versions and upgrades +//! +//! For details, see schema/crdb/README.adoc in the root of this repository. + +use anyhow::{bail, ensure, Context}; +use camino::Utf8Path; +use omicron_common::api::external::SemverVersion; +use once_cell::sync::Lazy; +use std::collections::BTreeMap; + +/// The version of the database schema this particular version of Nexus was +/// built against +/// +/// This must be updated when you change the database schema. Refer to +/// schema/crdb/README.adoc in the root of this repository for details. +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(46, 0, 0); + +/// List of all past database schema versions, in *reverse* order +/// +/// If you want to change the Omicron database schema, you must update this. +static KNOWN_VERSIONS: Lazy> = Lazy::new(|| { + vec![ + // +- The next version goes here! Duplicate this line, uncomment + // | the *second* copy, then update that copy for your version, + // | leaving the first copy as an example for the next person. + // v + // KnownVersion::new(next_int, "unique-dirname-with-the-sql-files"), + KnownVersion::new(46, "first-named-migration"), + // The first many schema versions only vary by major or patch number and + // their path is predictable based on the version number. (This was + // historically a problem because two pull requests both adding a new + // schema version might merge cleanly but produce an invalid result.) + KnownVersion::legacy(45, 0), + KnownVersion::legacy(44, 0), + KnownVersion::legacy(43, 0), + KnownVersion::legacy(42, 0), + KnownVersion::legacy(41, 0), + KnownVersion::legacy(40, 0), + KnownVersion::legacy(39, 0), + KnownVersion::legacy(38, 0), + KnownVersion::legacy(37, 1), + KnownVersion::legacy(37, 0), + KnownVersion::legacy(36, 0), + KnownVersion::legacy(35, 0), + KnownVersion::legacy(34, 0), + KnownVersion::legacy(33, 1), + KnownVersion::legacy(33, 0), + KnownVersion::legacy(32, 0), + KnownVersion::legacy(31, 0), + KnownVersion::legacy(30, 0), + KnownVersion::legacy(29, 0), + KnownVersion::legacy(28, 0), + KnownVersion::legacy(27, 0), + KnownVersion::legacy(26, 0), + KnownVersion::legacy(25, 0), + KnownVersion::legacy(24, 0), + KnownVersion::legacy(23, 1), + KnownVersion::legacy(23, 0), + KnownVersion::legacy(22, 0), + KnownVersion::legacy(21, 0), + KnownVersion::legacy(20, 0), + KnownVersion::legacy(19, 0), + KnownVersion::legacy(18, 0), + KnownVersion::legacy(17, 0), + KnownVersion::legacy(16, 0), + KnownVersion::legacy(15, 0), + KnownVersion::legacy(14, 0), + KnownVersion::legacy(13, 0), + KnownVersion::legacy(12, 0), + KnownVersion::legacy(11, 0), + KnownVersion::legacy(10, 0), + KnownVersion::legacy(9, 0), + KnownVersion::legacy(8, 0), + KnownVersion::legacy(7, 0), + KnownVersion::legacy(6, 0), + KnownVersion::legacy(5, 0), + KnownVersion::legacy(4, 0), + KnownVersion::legacy(3, 3), + KnownVersion::legacy(3, 2), + KnownVersion::legacy(3, 1), + KnownVersion::legacy(3, 0), + KnownVersion::legacy(2, 0), + KnownVersion::legacy(1, 0), + ] +}); + +/// The earliest supported schema version. +pub const EARLIEST_SUPPORTED_VERSION: SemverVersion = + SemverVersion::new(1, 0, 0); + +/// Describes one version of the database schema +#[derive(Debug, Clone)] +struct KnownVersion { + /// All versions have an associated SemVer. We only use the major number in + /// terms of determining compatibility. + semver: SemverVersion, + + /// Path relative to the root of the schema ("schema/crdb" in the root of + /// this repo) where this version's update SQL files are stored + relative_path: String, +} + +impl KnownVersion { + /// Generate a `KnownVersion` for a new schema version + /// + /// `major` should be the next available integer (one more than the previous + /// version's major number). + /// + /// `relative_path` is the path relative to "schema/crdb" (from the root of + /// this repository) where the SQL files live that will update the schema + /// from the previous version to this version. + fn new(major: u64, relative_path: &str) -> KnownVersion { + let semver = SemverVersion::new(major, 0, 0); + KnownVersion { semver, relative_path: relative_path.to_owned() } + } + + /// Generate a `KnownVersion` for a version that predates the current + /// directory naming scheme + /// + /// These versions varied in both major and patch numbers and the path to + /// their SQL files was predictable based solely on the version. + /// + /// **This should not be used for new schema versions.** + fn legacy(major: u64, patch: u64) -> KnownVersion { + let semver = SemverVersion::new(major, 0, patch); + let relative_path = semver.to_string(); + KnownVersion { semver, relative_path } + } +} + +impl std::fmt::Display for KnownVersion { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.semver.fmt(f) + } +} + +/// Load and inspect the set of all known schema versions +#[derive(Debug, Clone)] +pub struct AllSchemaVersions { + versions: BTreeMap, +} + +impl AllSchemaVersions { + /// Load the set of all known schema versions from the given directory tree + /// + /// The directory should contain exactly one directory for each version. + /// Each version's directory should contain the SQL files that carry out + /// schema migration from the previous version. See schema/crdb/README.adoc + /// for details. + pub fn load( + schema_directory: &Utf8Path, + ) -> Result { + Self::load_known_versions(schema_directory, KNOWN_VERSIONS.iter()) + } + + /// Load a specific set of known schema versions using the legacy + /// conventions from the given directory tree + /// + /// This is only provided for certain integration tests. + #[doc(hidden)] + pub fn load_specific_legacy_versions<'a>( + schema_directory: &Utf8Path, + versions: impl Iterator, + ) -> Result { + let known_versions: Vec<_> = versions + .map(|v| { + assert_eq!(v.0.minor, 0); + KnownVersion::legacy(v.0.major, v.0.patch) + }) + .collect(); + + Self::load_known_versions(schema_directory, known_versions.iter()) + } + + fn load_known_versions<'a>( + schema_directory: &Utf8Path, + known_versions: impl Iterator, + ) -> Result { + let mut versions = BTreeMap::new(); + for known_version in known_versions { + let version_path = + schema_directory.join(&known_version.relative_path); + let schema_version = SchemaVersion::load_from_directory( + known_version.semver.clone(), + &version_path, + ) + .with_context(|| { + format!( + "loading schema version {} from {:?}", + known_version.semver, schema_directory, + ) + })?; + + versions.insert(known_version.semver.clone(), schema_version); + } + + Ok(AllSchemaVersions { versions }) + } + + /// Iterate over the set of all known schema versions in order starting with + /// the earliest supported version + pub fn iter_versions(&self) -> impl Iterator { + self.versions.values() + } + + /// Return whether `version` is a known schema version + pub fn contains_version(&self, version: &SemverVersion) -> bool { + self.versions.contains_key(version) + } + + /// Iterate over the known schema versions within `bounds` + /// + /// This is generally used to iterate over all the schema versions between + /// two specific versions. + pub fn versions_range( + &self, + bounds: R, + ) -> impl Iterator + where + R: std::ops::RangeBounds, + { + self.versions.range(bounds).map(|(_, v)| v) + } +} + +/// Describes a single version of the schema, including the SQL steps to get +/// from the previous version to the current one +#[derive(Debug, Clone)] +pub struct SchemaVersion { + semver: SemverVersion, + upgrade_from_previous: Vec, +} + +impl SchemaVersion { + /// Reads a "version directory" and reads all SQL changes into a result Vec. + /// + /// Files that do not begin with "up" and end with ".sql" are ignored. The + /// collection of `up*.sql` files must fall into one of these two + /// conventions: + /// + /// * "up.sql" with no other files + /// * "up1.sql", "up2.sql", ..., beginning from 1, optionally with leading + /// zeroes (e.g., "up01.sql", "up02.sql", ...). There is no maximum value, + /// but there may not be any gaps (e.g., if "up2.sql" and "up4.sql" exist, + /// so must "up3.sql") and there must not be any repeats (e.g., if + /// "up1.sql" exists, "up01.sql" must not exist). + /// + /// Any violation of these two rules will result in an error. Collections of + /// the second form (`up1.sql`, ...) will be sorted numerically. + fn load_from_directory( + semver: SemverVersion, + directory: &Utf8Path, + ) -> Result { + let mut up_sqls = vec![]; + let entries = directory + .read_dir_utf8() + .with_context(|| format!("Failed to readdir {directory}"))?; + for entry in entries { + let entry = entry.with_context(|| { + format!("Reading {directory:?}: invalid entry") + })?; + let pathbuf = entry.into_path(); + + // Ensure filename ends with ".sql" + if pathbuf.extension() != Some("sql") { + continue; + } + + // Ensure filename begins with "up", and extract anything in between + // "up" and ".sql". + let Some(remaining_filename) = pathbuf + .file_stem() + .and_then(|file_stem| file_stem.strip_prefix("up")) + else { + continue; + }; + + // Ensure the remaining filename is either empty (i.e., the filename + // is exactly "up.sql") or parseable as an unsigned integer. We give + // "up.sql" the "up_number" 0 (checked in the loop below), and + // require any other number to be nonzero. + if remaining_filename.is_empty() { + up_sqls.push((0, pathbuf)); + } else { + let Ok(up_number) = remaining_filename.parse::() else { + bail!( + "invalid filename (non-numeric `up*.sql`): {pathbuf}", + ); + }; + ensure!( + up_number != 0, + "invalid filename (`up*.sql` numbering must start at 1): \ + {pathbuf}", + ); + up_sqls.push((up_number, pathbuf)); + } + } + up_sqls.sort(); + + // Validate that we have a reasonable sequence of `up*.sql` numbers. + match up_sqls.as_slice() { + [] => bail!("no `up*.sql` files found"), + [(up_number, path)] => { + // For a single file, we allow either `up.sql` (keyed as + // up_number=0) or `up1.sql`; reject any higher number. + ensure!( + *up_number <= 1, + "`up*.sql` numbering must start at 1: found first file \ + {path}" + ); + } + _ => { + for (i, (up_number, path)) in up_sqls.iter().enumerate() { + // We have 2 or more `up*.sql`; they should be numbered + // exactly 1..=up_sqls.len(). + if i as u64 + 1 != *up_number { + // We know we have at least two elements, so report an + // error referencing either the next item (if we're + // first) or the previous item (if we're not first). + let (path_a, path_b) = if i == 0 { + let (_, next_path) = &up_sqls[1]; + (path, next_path) + } else { + let (_, prev_path) = &up_sqls[i - 1]; + (prev_path, path) + }; + bail!("invalid `up*.sql` sequence: {path_a}, {path_b}"); + } + } + } + } + + // This collection of `up*.sql` files is valid. Read them all, in + // order. + let mut steps = vec![]; + for (_, path) in up_sqls.into_iter() { + let sql = std::fs::read_to_string(&path) + .with_context(|| format!("Cannot read {path}"))?; + // unwrap: `file_name()` is documented to return `None` only when + // the path is `..`. But we got this path from reading the + // directory, and that process explicitly documents that it skips + // `..`. + steps.push(SchemaUpgradeStep { + label: path.file_name().unwrap().to_string(), + sql, + }); + } + + Ok(SchemaVersion { semver, upgrade_from_previous: steps }) + } + + /// Returns the semver for this schema version + pub fn semver(&self) -> &SemverVersion { + &self.semver + } + + /// Returns true if this schema version is the one that the current program + /// thinks is the latest (current) one + pub fn is_current_software_version(&self) -> bool { + self.semver == SCHEMA_VERSION + } + + /// Iterate over the SQL steps required to update the database schema from + /// the previous version to this one + pub fn upgrade_steps(&self) -> impl Iterator { + self.upgrade_from_previous.iter() + } +} + +impl std::fmt::Display for SchemaVersion { + fn fmt( + &self, + f: &mut std::fmt::Formatter<'_>, + ) -> Result<(), std::fmt::Error> { + self.semver.fmt(f) + } +} + +/// Describes a single file containing a schema change, as SQL. +#[derive(Debug, Clone)] +pub struct SchemaUpgradeStep { + label: String, + sql: String, +} + +impl SchemaUpgradeStep { + /// Returns a human-readable name for this step (the name of the file it + /// came from) + pub fn label(&self) -> &str { + self.label.as_ref() + } + + /// Returns the actual SQL to execute for this step + pub fn sql(&self) -> &str { + self.sql.as_ref() + } +} + +#[cfg(test)] +mod test { + use super::*; + use camino_tempfile::Utf8TempDir; + + #[test] + fn test_known_versions() { + if let Err(error) = verify_known_versions( + // The real list is defined in reverse order for developer + // convenience so we reverse it before processing. + KNOWN_VERSIONS.iter().rev(), + &EARLIEST_SUPPORTED_VERSION, + &SCHEMA_VERSION, + // Versions after 45 obey our modern, stricter rules. + 45, + ) { + panic!("problem with static configuration: {:#}", error); + } + } + + // (Test the test function) + #[test] + fn test_verify() { + // EARLIEST_SUPPORTED_VERSION is somehow wrong + let error = verify_known_versions( + [&KnownVersion::legacy(2, 0), &KnownVersion::legacy(3, 0)], + &SemverVersion::new(1, 0, 0), + &SemverVersion::new(3, 0, 0), + 100, + ) + .unwrap_err(); + assert_eq!( + format!("{error:#}"), + "EARLIEST_SUPPORTED_VERSION is not the earliest in KNOWN_VERSIONS" + ); + + // SCHEMA_VERSION was not updated + let error = verify_known_versions( + [&KnownVersion::legacy(1, 0), &KnownVersion::legacy(2, 0)], + &SemverVersion::new(1, 0, 0), + &SemverVersion::new(1, 0, 0), + 100, + ) + .unwrap_err(); + assert_eq!( + format!("{error:#}"), + "latest KNOWN_VERSION is 2.0.0, but SCHEMA_VERSION is 1.0.0" + ); + + // Latest version was duplicated instead of bumped (legacy) + let error = verify_known_versions( + [ + &KnownVersion::legacy(1, 0), + &KnownVersion::legacy(2, 0), + &KnownVersion::legacy(2, 0), + ], + &EARLIEST_SUPPORTED_VERSION, + &SemverVersion::new(2, 0, 0), + 100, + ) + .unwrap_err(); + assert_eq!( + format!("{error:#}"), + "KNOWN_VERSION 2.0.0 appears directly after 2.0.0, but is not later" + ); + + // Latest version was duplicated instead of bumped (modern case) + let error = verify_known_versions( + [ + &KnownVersion::legacy(1, 0), + &KnownVersion::new(2, "dir1"), + &KnownVersion::new(2, "dir2"), + ], + &EARLIEST_SUPPORTED_VERSION, + &SemverVersion::new(2, 0, 0), + 100, + ) + .unwrap_err(); + assert_eq!( + format!("{error:#}"), + "KNOWN_VERSION 2.0.0 appears directly after 2.0.0, but is not later" + ); + + // Version added out of order + let error = verify_known_versions( + [ + &KnownVersion::legacy(1, 0), + &KnownVersion::legacy(2, 0), + &KnownVersion::legacy(1, 3), + ], + &EARLIEST_SUPPORTED_VERSION, + &SemverVersion::new(3, 0, 0), + 100, + ) + .unwrap_err(); + assert_eq!( + format!("{error:#}"), + "KNOWN_VERSION 1.0.3 appears directly after 2.0.0, but is not later" + ); + + // Gaps are not allowed. + let error = verify_known_versions( + [ + &KnownVersion::legacy(1, 0), + &KnownVersion::legacy(2, 0), + &KnownVersion::legacy(4, 0), + ], + &EARLIEST_SUPPORTED_VERSION, + &SemverVersion::new(4, 0, 0), + 100, + ) + .unwrap_err(); + assert_eq!( + format!("{error:#}"), + "KNOWN_VERSION 4.0.0 appears directly after 2.0.0, but its major \ + number is neither the same nor one greater" + ); + + // For the strict case, the patch level can't be non-zero. You can only + // make this mistake by using `KnownVersion::legacy()` for a new + // version. + let error = verify_known_versions( + [ + &KnownVersion::legacy(1, 0), + &KnownVersion::legacy(2, 0), + &KnownVersion::legacy(3, 2), + ], + &EARLIEST_SUPPORTED_VERSION, + &SemverVersion::new(3, 0, 2), + 2, + ) + .unwrap_err(); + assert_eq!(format!("{error:#}"), "new patch versions must be zero"); + + // For the strict case, the directory name cannot contain the version at + // all. You can only make this mistake by using + // `KnownVersion::legacy()` for a new version. + let error = verify_known_versions( + [ + &KnownVersion::legacy(1, 0), + &KnownVersion::legacy(2, 0), + &KnownVersion::legacy(3, 0), + ], + &EARLIEST_SUPPORTED_VERSION, + &SemverVersion::new(3, 0, 0), + 2, + ) + .unwrap_err(); + assert_eq!( + format!("{error:#}"), + "the relative path for a version should not contain the \ + version itself" + ); + } + + fn verify_known_versions<'a, I>( + // list of known versions in order from earliest to latest + known_versions: I, + earliest: &SemverVersion, + latest: &SemverVersion, + min_strict_major: u64, + ) -> Result<(), anyhow::Error> + where + I: IntoIterator, + { + let mut known_versions = known_versions.into_iter(); + + // All known versions should be unique and increasing. + let first = + known_versions.next().expect("expected at least one KNOWN_VERSION"); + ensure!( + first.semver == *earliest, + "EARLIEST_SUPPORTED_VERSION is not the earliest in KNOWN_VERSIONS" + ); + + let mut prev = first; + for v in known_versions { + println!("checking known version: {} -> {}", prev, v); + ensure!( + v.semver > prev.semver, + "KNOWN_VERSION {} appears directly after {}, but is not later", + v, + prev + ); + + // We currently make sure there are no gaps in the major number. + // This is not strictly necessary but if this isn't true then it was + // probably a mistake. + // + // It's allowed for the major numbers to be the same because a few + // past schema versions only bumped the patch number for whatever + // reason. + ensure!( + v.semver.0.major == prev.semver.0.major + || v.semver.0.major == prev.semver.0.major + 1, + "KNOWN_VERSION {} appears directly after {}, but its major \ + number is neither the same nor one greater", + v, + prev + ); + + // We never allowed minor versions to be zero and it is not + // currently possible to even construct one that had a non-zero + // minor number. + ensure!(v.semver.0.minor == 0, "new minor versions must be zero"); + + // We changed things after version 45 to require that: + // + // (1) the major always be bumped (the minor and patch must be zero) + // (2) users choose a unique directory name for the SQL files. It + // would defeat the point if people used the semver for + // + // After version 45, we do not allow non-zero minor or patch + // numbers. + if v.semver.0.major > min_strict_major { + ensure!( + v.semver.0.patch == 0, + "new patch versions must be zero" + ); + ensure!( + !v.relative_path.contains(&v.semver.to_string()), + "the relative path for a version should not contain the \ + version itself" + ); + } + + prev = v; + } + + ensure!( + prev.semver == *latest, + "latest KNOWN_VERSION is {}, but SCHEMA_VERSION is {}", + prev, + latest + ); + + Ok(()) + } + + // Confirm that `SchemaVersion::load_from_directory()` rejects `up*.sql` + // files where the `*` doesn't contain a positive integer. + #[tokio::test] + async fn test_reject_invalid_up_sql_names() { + for (invalid_filename, error_prefix) in [ + ("upA.sql", "invalid filename (non-numeric `up*.sql`)"), + ("up1a.sql", "invalid filename (non-numeric `up*.sql`)"), + ("upaaa1.sql", "invalid filename (non-numeric `up*.sql`)"), + ("up-3.sql", "invalid filename (non-numeric `up*.sql`)"), + ( + "up0.sql", + "invalid filename (`up*.sql` numbering must start at 1)", + ), + ( + "up00.sql", + "invalid filename (`up*.sql` numbering must start at 1)", + ), + ( + "up000.sql", + "invalid filename (`up*.sql` numbering must start at 1)", + ), + ] { + let tempdir = Utf8TempDir::new().unwrap(); + let filename = tempdir.path().join(invalid_filename); + _ = tokio::fs::File::create(&filename).await.unwrap(); + let maybe_schema = SchemaVersion::load_from_directory( + SemverVersion::new(12, 0, 0), + tempdir.path(), + ); + match maybe_schema { + Ok(upgrade) => { + panic!( + "unexpected success on {invalid_filename} \ + (produced {upgrade:?})" + ); + } + Err(error) => { + assert_eq!( + format!("{error:#}"), + format!("{error_prefix}: {filename}") + ); + } + } + } + } + + // Confirm that `SchemaVersion::load_from_directory()` rejects a directory + // with no appropriately-named files. + #[tokio::test] + async fn test_reject_no_up_sql_files() { + for filenames in [ + &[] as &[&str], + &["README.md"], + &["foo.sql", "bar.sql"], + &["up1sql", "up2sql"], + ] { + let tempdir = Utf8TempDir::new().unwrap(); + for filename in filenames { + _ = tokio::fs::File::create(tempdir.path().join(filename)) + .await + .unwrap(); + } + + let maybe_schema = SchemaVersion::load_from_directory( + SemverVersion::new(12, 0, 0), + tempdir.path(), + ); + match maybe_schema { + Ok(upgrade) => { + panic!( + "unexpected success on {filenames:?} \ + (produced {upgrade:?})" + ); + } + Err(error) => { + assert_eq!( + format!("{error:#}"), + "no `up*.sql` files found" + ); + } + } + } + } + + // Confirm that `SchemaVersion::load_from_directory()` rejects collections + // of `up*.sql` files with individually-valid names but that do not pass the + // rules of the entire collection. + #[tokio::test] + async fn test_reject_invalid_up_sql_collections() { + for invalid_filenames in [ + &["up.sql", "up1.sql"] as &[&str], + &["up1.sql", "up01.sql"], + &["up1.sql", "up3.sql"], + &["up1.sql", "up2.sql", "up3.sql", "up02.sql"], + ] { + let tempdir = Utf8TempDir::new().unwrap(); + for filename in invalid_filenames { + _ = tokio::fs::File::create(tempdir.path().join(filename)) + .await + .unwrap(); + } + + let maybe_schema = SchemaVersion::load_from_directory( + SemverVersion::new(12, 0, 0), + tempdir.path(), + ); + match maybe_schema { + Ok(upgrade) => { + panic!( + "unexpected success on {invalid_filenames:?} \ + (produced {upgrade:?})" + ); + } + Err(error) => { + let message = format!("{error:#}"); + assert!( + message.starts_with("invalid `up*.sql` sequence: "), + "message did not start with expected prefix: \ + {message:?}" + ); + } + } + } + } + + // Confirm that `SchemaVersion::load_from_directory()` accepts legal + // collections of `up*.sql` filenames. + #[tokio::test] + async fn test_allows_valid_up_sql_collections() { + for filenames in [ + &["up.sql"] as &[&str], + &["up1.sql", "up2.sql"], + &[ + "up01.sql", "up02.sql", "up03.sql", "up04.sql", "up05.sql", + "up06.sql", "up07.sql", "up08.sql", "up09.sql", "up10.sql", + "up11.sql", + ], + &["up00001.sql", "up00002.sql", "up00003.sql"], + ] { + let tempdir = Utf8TempDir::new().unwrap(); + for filename in filenames { + _ = tokio::fs::File::create(tempdir.path().join(filename)) + .await + .unwrap(); + } + + let maybe_schema = SchemaVersion::load_from_directory( + SemverVersion::new(12, 0, 0), + tempdir.path(), + ); + match maybe_schema { + Ok(_) => (), + Err(message) => { + panic!("unexpected failure on {filenames:?}: {message:?}"); + } + } + } + } +} diff --git a/nexus/db-queries/src/db/datastore/db_metadata.rs b/nexus/db-queries/src/db/datastore/db_metadata.rs index cbbeaf2aa4..e6b57c1c72 100644 --- a/nexus/db-queries/src/db/datastore/db_metadata.rs +++ b/nexus/db-queries/src/db/datastore/db_metadata.rs @@ -8,144 +8,18 @@ use super::DataStore; use crate::db; use crate::db::error::public_error_from_diesel; use crate::db::error::ErrorHandler; +use anyhow::{bail, ensure, Context}; use async_bb8_diesel::{AsyncRunQueryDsl, AsyncSimpleConnection}; -use camino::{Utf8Path, Utf8PathBuf}; use chrono::Utc; use diesel::prelude::*; -use nexus_config::SchemaConfig; +use nexus_db_model::AllSchemaVersions; +use nexus_db_model::SchemaVersion; +use nexus_db_model::EARLIEST_SUPPORTED_VERSION; use omicron_common::api::external::Error; use omicron_common::api::external::SemverVersion; -use slog::Logger; -use std::collections::BTreeSet; -use std::ops::Bound; +use slog::{error, info, o, Logger}; use std::str::FromStr; -pub const EARLIEST_SUPPORTED_VERSION: &'static str = "1.0.0"; - -/// Describes a single file containing a schema change, as SQL. -#[derive(Debug)] -pub struct SchemaUpgradeStep { - pub path: Utf8PathBuf, - pub sql: String, -} - -/// Describes a sequence of files containing schema changes. -#[derive(Debug)] -pub struct SchemaUpgrade { - pub steps: Vec, -} - -/// Reads a "version directory" and reads all SQL changes into -/// a result Vec. -/// -/// Files that do not begin with "up" and end with ".sql" are ignored. The -/// collection of `up*.sql` files must fall into one of these two conventions: -/// -/// * "up.sql" with no other files -/// * "up1.sql", "up2.sql", ..., beginning from 1, optionally with leading -/// zeroes (e.g., "up01.sql", "up02.sql", ...). There is no maximum value, but -/// there may not be any gaps (e.g., if "up2.sql" and "up4.sql" exist, so must -/// "up3.sql") and there must not be any repeats (e.g., if "up1.sql" exists, -/// "up01.sql" must not exist). -/// -/// Any violation of these two rules will result in an error. Collections of the -/// second form (`up1.sql`, ...) will be sorted numerically. -pub async fn all_sql_for_version_migration>( - path: P, -) -> Result { - let target_dir = path.as_ref(); - let mut up_sqls = vec![]; - let entries = target_dir - .read_dir_utf8() - .map_err(|e| format!("Failed to readdir {target_dir}: {e}"))?; - for entry in entries { - let entry = entry.map_err(|err| format!("Invalid entry: {err}"))?; - let pathbuf = entry.into_path(); - - // Ensure filename ends with ".sql" - if pathbuf.extension() != Some("sql") { - continue; - } - - // Ensure filename begins with "up", and extract anything in between - // "up" and ".sql". - let Some(remaining_filename) = pathbuf - .file_stem() - .and_then(|file_stem| file_stem.strip_prefix("up")) - else { - continue; - }; - - // Ensure the remaining filename is either empty (i.e., the filename is - // exactly "up.sql") or parseable as an unsigned integer. We give - // "up.sql" the "up_number" 0 (checked in the loop below), and require - // any other number to be nonzero. - if remaining_filename.is_empty() { - up_sqls.push((0, pathbuf)); - } else { - let Ok(up_number) = remaining_filename.parse::() else { - return Err(format!( - "invalid filename (non-numeric `up*.sql`): {pathbuf}", - )); - }; - if up_number == 0 { - return Err(format!( - "invalid filename (`up*.sql` numbering must start at 1): \ - {pathbuf}", - )); - } - up_sqls.push((up_number, pathbuf)); - } - } - up_sqls.sort(); - - // Validate that we have a reasonable sequence of `up*.sql` numbers. - match up_sqls.as_slice() { - [] => return Err("no `up*.sql` files found".to_string()), - [(up_number, path)] => { - // For a single file, we allow either `up.sql` (keyed as - // up_number=0) or `up1.sql`; reject any higher number. - if *up_number > 1 { - return Err(format!( - "`up*.sql` numbering must start at 1: found first file \ - {path}" - )); - } - } - _ => { - for (i, (up_number, path)) in up_sqls.iter().enumerate() { - // We have 2 or more `up*.sql`; they should be numbered exactly - // 1..=up_sqls.len(). - if i as u64 + 1 != *up_number { - // We know we have at least two elements, so report an error - // referencing either the next item (if we're first) or the - // previous item (if we're not first). - let (path_a, path_b) = if i == 0 { - let (_, next_path) = &up_sqls[1]; - (path, next_path) - } else { - let (_, prev_path) = &up_sqls[i - 1]; - (prev_path, path) - }; - return Err(format!( - "invalid `up*.sql` combination: {path_a}, {path_b}" - )); - } - } - } - } - - // This collection of `up*.sql` files is valid; read them all, in order. - let mut result = SchemaUpgrade { steps: vec![] }; - for (_, path) in up_sqls.into_iter() { - let sql = tokio::fs::read_to_string(&path) - .await - .map_err(|e| format!("Cannot read {path}: {e}"))?; - result.steps.push(SchemaUpgradeStep { path: path.to_owned(), sql }); - } - Ok(result) -} - impl DataStore { // Ensures that the database schema matches "desired_version". // @@ -160,147 +34,118 @@ impl DataStore { // instances on the rack are operating on the same version of software. // If that assumption is broken, nothing would stop a "new deployment" // from making a change that invalidates the queries used by an "old - // deployment". This is fixable, but it requires slightly more knowledge - // about the deployment and liveness of Nexus services within the rack. + // deployment". pub async fn ensure_schema( &self, log: &Logger, desired_version: SemverVersion, - config: Option<&SchemaConfig>, - ) -> Result<(), String> { - let mut current_version = match self.database_schema_version().await { - Ok(current_version) => { - // NOTE: We could run with a less tight restriction. - // - // If we respect the meaning of the semver version, it should be possible - // to use subsequent versions, as long as they do not introduce breaking changes. - // - // However, at the moment, we opt for conservatism: if the database does not - // exactly match the schema version, we refuse to continue without modification. - if current_version == desired_version { - info!(log, "Compatible database schema: {current_version}"); - return Ok(()); - } - let observed = ¤t_version.0; - warn!(log, "Database schema {observed} does not match expected {desired_version}"); - current_version - } - Err(e) => { - return Err(format!("Cannot read schema version: {e}")); - } - }; + all_versions: Option<&AllSchemaVersions>, + ) -> Result<(), anyhow::Error> { + let found_version = self + .database_schema_version() + .await + .context("Cannot read database schema version")?; + let log = log.new(o!( + "found_version" => found_version.to_string(), + "desired_version" => desired_version.to_string(), + )); - let Some(config) = config else { - return Err( - "Not configured to automatically update schema".to_string() - ); - }; + // NOTE: We could run with a less tight restriction. + // + // If we respect the meaning of the semver version, it should be + // possible to use subsequent versions, as long as they do not introduce + // breaking changes. + // + // However, at the moment, we opt for conservatism: if the database does + // not exactly match the schema version, we refuse to continue without + // modification. + if found_version == desired_version { + info!(log, "Database schema version is up to date"); + return Ok(()); + } - if current_version > desired_version { - return Err("Nexus older than DB version: automatic downgrades are unsupported".to_string()); + if found_version > desired_version { + error!( + log, + "Found schema version is newer than desired schema version"; + ); + bail!( + "Found schema version ({}) is newer than desired schema \ + version ({})", + found_version, + desired_version, + ) } + let Some(all_versions) = all_versions else { + error!( + log, + "Database schema version is out of date, but automatic update \ + is disabled", + ); + bail!("Schema is out of date but automatic update is disabled"); + }; + // If we're here, we know the following: // // - The schema does not match our expected version (or at least, it - // didn't when we read it moments ago). + // didn't when we read it moments ago). // - We should attempt to automatically upgrade the schema. - // - // We do the following: - // - Look in the schema directory for all the changes, in-order, to - // migrate from our current version to the desired version. - - info!(log, "Reading schemas from {}", config.schema_dir); - let mut dir = tokio::fs::read_dir(&config.schema_dir) - .await - .map_err(|e| format!("Failed to read schema config dir: {e}"))?; - let mut all_versions = BTreeSet::new(); - while let Some(entry) = dir - .next_entry() - .await - .map_err(|e| format!("Failed to read schema dir: {e}"))? - { - if entry.file_type().await.map_err(|e| e.to_string())?.is_dir() { - let name = entry - .file_name() - .into_string() - .map_err(|_| "Non-unicode schema dir".to_string())?; - if let Ok(observed_version) = name.parse::() { - all_versions.insert(observed_version); - } else { - let err_msg = - format!("Failed to parse {name} as a semver version"); - warn!(log, "{err_msg}"); - return Err(err_msg); - } - } - } + info!(log, "Database schema is out of date. Attempting upgrade."); + ensure!( + all_versions.contains_version(&found_version), + "Found schema version {found_version} was not found", + ); - if !all_versions.contains(¤t_version) { - return Err(format!( - "Current DB version {current_version} was not found in {}", - config.schema_dir - )); - } // TODO: Test this? - if !all_versions.contains(&desired_version) { - return Err(format!( - "Target DB version {desired_version} was not found in {}", - config.schema_dir - )); - } + ensure!( + all_versions.contains_version(&desired_version), + "Desired version {desired_version} was not found", + ); - let target_versions = all_versions.range(( - Bound::Excluded(¤t_version), - Bound::Included(&desired_version), - )); + let target_versions: Vec<&SchemaVersion> = all_versions + .versions_range(&found_version..=&desired_version) + .collect(); + let mut current_version = found_version; for target_version in target_versions.into_iter() { - info!( - log, - "Attempting to upgrade schema"; + let log = log.new(o!( "current_version" => current_version.to_string(), - "target_version" => target_version.to_string(), - ); - - let target_dir = config.schema_dir.join(target_version.to_string()); - - let schema_change = - all_sql_for_version_migration(&target_dir).await?; + "target_version" => target_version.semver().to_string(), + )); + info!(log, "Attempting to upgrade schema"); // Confirm the current version, set the "target_version" // column to indicate that a schema update is in-progress. // // Sets the following: // - db_metadata.target_version = new version - self.prepare_schema_update(¤t_version, &target_version) - .await - .map_err(|e| e.to_string())?; - - info!( - log, - "Marked schema upgrade as prepared"; - "current_version" => current_version.to_string(), - "target_version" => target_version.to_string(), - ); + self.prepare_schema_update( + ¤t_version, + &target_version.semver(), + ) + .await + .context("preparing schema update")?; + info!(log, "Marked schema upgrade as prepared"); - for SchemaUpgradeStep { path: _, sql } in &schema_change.steps { - // Perform the schema change. + for step in target_version.upgrade_steps() { + // Perform the schema change step. self.apply_schema_update( ¤t_version, - &target_version, - &sql, + &target_version.semver(), + step.sql(), ) .await - .map_err(|e| e.to_string())?; + .with_context(|| { + format!( + "update to {}, applying step {:?}", + target_version.semver(), + step.label() + ) + })?; } - info!( - log, - "Applied schema upgrade"; - "current_version" => current_version.to_string(), - "target_version" => target_version.to_string(), - ); + info!(log, "Applied schema upgrade"); // NOTE: We could execute the schema change in a background task, // and let it propagate, while observing it with the following @@ -324,20 +169,23 @@ impl DataStore { // Now that the schema change has completed, set the following: // - db_metadata.version = new version // - db_metadata.target_version = NULL - self.finalize_schema_update(¤t_version, &target_version) - .await - .map_err(|e| e.to_string())?; + self.finalize_schema_update( + ¤t_version, + target_version.semver(), + ) + .await + .context("finalizing schema update")?; info!( log, "Finalized schema upgrade"; - "current_version" => current_version.to_string(), - "target_version" => target_version.to_string(), ); - current_version = target_version.clone(); + current_version = target_version.semver().clone(); } + info!(log, "Schema update complete"); + Ok(()) } @@ -407,13 +255,13 @@ impl DataStore { &self, current: &SemverVersion, target: &SemverVersion, - sql: &String, + sql: &str, ) -> Result<(), Error> { let conn = self.pool_connection_unauthorized().await?; let result = self.transaction_retry_wrapper("apply_schema_update") .transaction(&conn, |conn| async move { - if target.to_string() != EARLIEST_SUPPORTED_VERSION { + if *target != EARLIEST_SUPPORTED_VERSION { let validate_version_query = format!("SELECT CAST(\ IF(\ (\ @@ -460,9 +308,10 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; if rows_updated != 1 { - return Err(Error::internal_error( - &format!("Failed to finalize schema update from version {from_version} to {to_version}"), - )); + return Err(Error::internal_error(&format!( + "Failed to finalize schema update from version \ + {from_version} to {to_version}" + ))); } Ok(()) } @@ -472,149 +321,11 @@ impl DataStore { mod test { use super::*; use camino_tempfile::Utf8TempDir; - use nexus_db_model::schema::SCHEMA_VERSION; + use nexus_db_model::SCHEMA_VERSION; use nexus_test_utils::db as test_db; use omicron_test_utils::dev; use std::sync::Arc; - // Confirm that `all_sql_for_version_migration` rejects `up*.sql` files - // where the `*` doesn't contain a positive integer. - #[tokio::test] - async fn all_sql_for_version_migration_rejects_invalid_up_sql_names() { - for (invalid_filename, error_prefix) in [ - ("upA.sql", "invalid filename (non-numeric `up*.sql`)"), - ("up1a.sql", "invalid filename (non-numeric `up*.sql`)"), - ("upaaa1.sql", "invalid filename (non-numeric `up*.sql`)"), - ("up-3.sql", "invalid filename (non-numeric `up*.sql`)"), - ( - "up0.sql", - "invalid filename (`up*.sql` numbering must start at 1)", - ), - ( - "up00.sql", - "invalid filename (`up*.sql` numbering must start at 1)", - ), - ( - "up000.sql", - "invalid filename (`up*.sql` numbering must start at 1)", - ), - ] { - let tempdir = Utf8TempDir::new().unwrap(); - let filename = tempdir.path().join(invalid_filename); - _ = tokio::fs::File::create(&filename).await.unwrap(); - - match all_sql_for_version_migration(tempdir.path()).await { - Ok(upgrade) => { - panic!( - "unexpected success on {invalid_filename} \ - (produced {upgrade:?})" - ); - } - Err(message) => { - assert_eq!(message, format!("{error_prefix}: {filename}")); - } - } - } - } - - // Confirm that `all_sql_for_version_migration` rejects a directory with no - // appriopriately-named files. - #[tokio::test] - async fn all_sql_for_version_migration_rejects_no_up_sql_files() { - for filenames in [ - &[] as &[&str], - &["README.md"], - &["foo.sql", "bar.sql"], - &["up1sql", "up2sql"], - ] { - let tempdir = Utf8TempDir::new().unwrap(); - for filename in filenames { - _ = tokio::fs::File::create(tempdir.path().join(filename)) - .await - .unwrap(); - } - - match all_sql_for_version_migration(tempdir.path()).await { - Ok(upgrade) => { - panic!( - "unexpected success on {filenames:?} \ - (produced {upgrade:?})" - ); - } - Err(message) => { - assert_eq!(message, "no `up*.sql` files found"); - } - } - } - } - - // Confirm that `all_sql_for_version_migration` rejects collections of - // `up*.sql` files with individually-valid names but that do not pass the - // rules of the entire collection. - #[tokio::test] - async fn all_sql_for_version_migration_rejects_invalid_up_sql_collections() - { - for invalid_filenames in [ - &["up.sql", "up1.sql"] as &[&str], - &["up1.sql", "up01.sql"], - &["up1.sql", "up3.sql"], - &["up1.sql", "up2.sql", "up3.sql", "up02.sql"], - ] { - let tempdir = Utf8TempDir::new().unwrap(); - for filename in invalid_filenames { - _ = tokio::fs::File::create(tempdir.path().join(filename)) - .await - .unwrap(); - } - - match all_sql_for_version_migration(tempdir.path()).await { - Ok(upgrade) => { - panic!( - "unexpected success on {invalid_filenames:?} \ - (produced {upgrade:?})" - ); - } - Err(message) => { - assert!( - message.starts_with("invalid `up*.sql` combination: "), - "message did not start with expected prefix: \ - {message:?}" - ); - } - } - } - } - - // Confirm that `all_sql_for_version_migration` accepts legal collections of - // `up*.sql` filenames. - #[tokio::test] - async fn all_sql_for_version_migration_allows_valid_up_sql_collections() { - for filenames in [ - &["up.sql"] as &[&str], - &["up1.sql", "up2.sql"], - &[ - "up01.sql", "up02.sql", "up03.sql", "up04.sql", "up05.sql", - "up06.sql", "up07.sql", "up08.sql", "up09.sql", "up10.sql", - "up11.sql", - ], - &["up00001.sql", "up00002.sql", "up00003.sql"], - ] { - let tempdir = Utf8TempDir::new().unwrap(); - for filename in filenames { - _ = tokio::fs::File::create(tempdir.path().join(filename)) - .await - .unwrap(); - } - - match all_sql_for_version_migration(tempdir.path()).await { - Ok(_) => (), - Err(message) => { - panic!("unexpected failure on {filenames:?}: {message:?}"); - } - } - } - } - // Confirms that calling the internal "ensure_schema" function can succeed // when the database is already at that version. #[tokio::test] @@ -664,12 +375,14 @@ mod test { } }; - // Create the old version directory, and also update the on-disk "current version" to - // this value. + // Create the old version directory, and also update the on-disk + // "current version" to this value. // - // Nexus will decide to upgrade to, at most, the version that its own binary understands. + // Nexus will decide to upgrade to, at most, the version that its own + // binary understands. // - // To trigger this action within a test, we manually set the "known to DB" version. + // To trigger this action within a test, we manually set the "known to + // DB" version. let v0 = SemverVersion::new(0, 0, 0); use db::schema::db_metadata::dsl; diesel::update(dsl::db_metadata.filter(dsl::singleton.eq(true))) @@ -706,29 +419,40 @@ mod test { .await; // Show that the datastores can be created concurrently. - let config = - SchemaConfig { schema_dir: config_dir.path().to_path_buf() }; + let all_versions = AllSchemaVersions::load_specific_legacy_versions( + config_dir.path(), + [&v0, &v1, &v2].into_iter(), + ) + .expect("failed to load schema"); let _ = futures::future::join_all((0..10).map(|_| { + let all_versions = all_versions.clone(); let log = log.clone(); let pool = pool.clone(); - let config = config.clone(); tokio::task::spawn(async move { - let datastore = DataStore::new(&log, pool, Some(&config)).await?; + let datastore = + DataStore::new(&log, pool, Some(&all_versions)).await?; // This is the crux of this test: confirm that, as each // migration completes, it's not possible to see any artifacts // of the "v1" migration (namely: the 'Widget' table should not // exist). - let result = diesel::select( - diesel::dsl::sql::( - "EXISTS (SELECT * FROM pg_tables WHERE tablename = 'widget')" - ) - ) - .get_result_async::(&*datastore.pool_connection_for_tests().await.unwrap()) - .await - .expect("Failed to query for table"); - assert_eq!(result, false, "The 'widget' table should have been deleted, but it exists.\ - This failure means an old update was re-applied after a newer update started."); + let result = diesel::select(diesel::dsl::sql::< + diesel::sql_types::Bool, + >( + "EXISTS (SELECT * FROM pg_tables WHERE \ + tablename = 'widget')", + )) + .get_result_async::( + &*datastore.pool_connection_for_tests().await.unwrap(), + ) + .await + .expect("Failed to query for table"); + assert_eq!( + result, false, + "The 'widget' table should have been deleted, but it \ + exists. This failure means an old update was re-applied \ + after a newer update started." + ); Ok::<_, String>(datastore) }) diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index f967f8c5c0..c2f965c1d4 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -33,7 +33,6 @@ use diesel::prelude::*; use diesel::query_builder::{QueryFragment, QueryId}; use diesel::query_dsl::methods::LoadQuery; use diesel::{ExpressionMethods, QueryDsl}; -use nexus_config::SchemaConfig; use omicron_common::api::external::Error; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::LookupType; @@ -100,13 +99,10 @@ mod vpc; mod zpool; pub use address_lot::AddressLotCreateResult; -pub use db_metadata::{ - all_sql_for_version_migration, SchemaUpgrade, SchemaUpgradeStep, - EARLIEST_SUPPORTED_VERSION, -}; pub use dns::DnsVersionUpdateBuilder; pub use instance::InstanceAndActiveVmm; pub use inventory::DataStoreInventoryTest; +use nexus_db_model::AllSchemaVersions; pub use probe::ProbeInfo; pub use rack::RackInit; pub use silo::Discoverability; @@ -197,14 +193,13 @@ impl DataStore { pub async fn new( log: &Logger, pool: Arc, - config: Option<&SchemaConfig>, + config: Option<&AllSchemaVersions>, ) -> Result { let datastore = Self::new_unchecked(log.new(o!("component" => "datastore")), pool)?; // Keep looping until we find that the schema matches our expectation. - const EXPECTED_VERSION: SemverVersion = - nexus_db_model::schema::SCHEMA_VERSION; + const EXPECTED_VERSION: SemverVersion = nexus_db_model::SCHEMA_VERSION; retry_notify( retry_policy_internal_service(), || async { diff --git a/nexus/src/app/background/nat_cleanup.rs b/nexus/src/app/background/nat_cleanup.rs index 16b1b7e357..844dbffefe 100644 --- a/nexus/src/app/background/nat_cleanup.rs +++ b/nexus/src/app/background/nat_cleanup.rs @@ -112,11 +112,21 @@ impl BackgroundTask for Ipv4NatGarbageCollector { let retention_threshold = Utc::now() - Duration::weeks(2); - let result = self + let result = match self .datastore .ipv4_nat_cleanup(opctx, min_gen, retention_threshold) - .await - .unwrap(); + .await { + Ok(v) => v, + Err(e) => { + return json!({ + "error": + format!( + "failed to perform cleanup operation: {:#}", + e + ) + }); + }, + }; let rv = serde_json::to_value(&result).unwrap_or_else(|error| { json!({ diff --git a/nexus/src/app/background/sync_switch_configuration.rs b/nexus/src/app/background/sync_switch_configuration.rs index e21f82b007..67d1d9fc1b 100644 --- a/nexus/src/app/background/sync_switch_configuration.rs +++ b/nexus/src/app/background/sync_switch_configuration.rs @@ -11,6 +11,7 @@ use crate::app::{ }, map_switch_zone_addrs, }; +use slog::o; use internal_dns::resolver::Resolver; use internal_dns::ServiceName; @@ -35,6 +36,7 @@ use nexus_db_queries::{ context::OpContext, db::{datastore::SwitchPortSettingsCombinedResult, DataStore}, }; +use nexus_types::identity::Asset; use nexus_types::{external_api::params, identity::Resource}; use omicron_common::OMICRON_DPD_TAG; use omicron_common::{ @@ -49,6 +51,7 @@ use sled_agent_client::types::{ }; use std::{ collections::{hash_map::Entry, HashMap, HashSet}, + hash::Hash, net::{IpAddr, Ipv4Addr}, str::FromStr, sync::Arc, @@ -211,6 +214,7 @@ impl SwitchPortSettingsManager { } } +#[derive(Debug)] enum PortSettingsChange { Apply(Box), Clear, @@ -222,7 +226,7 @@ impl BackgroundTask for SwitchPortSettingsManager { opctx: &'a OpContext, ) -> BoxFuture<'a, serde_json::Value> { async move { - let log = &opctx.log; + let log = opctx.log.clone(); let racks = match self.datastore.rack_list_initialized(opctx, &DataPageParams::max_page()).await { Ok(racks) => racks, @@ -244,6 +248,8 @@ impl BackgroundTask for SwitchPortSettingsManager { // but our logic for pulling switch ports and their related configurations // *isn't* per-rack, so that's something we'll need to revisit in the future. for rack in &racks { + let rack_id = rack.id().to_string(); + let log = log.new(o!("rack_id" => rack_id)); // lookup switch zones via DNS // TODO https://github.com/oxidecomputer/omicron/issues/5201 @@ -261,21 +267,21 @@ impl BackgroundTask for SwitchPortSettingsManager { // TODO https://github.com/oxidecomputer/omicron/issues/5201 let mappings = - map_switch_zone_addrs(log, switch_zone_addresses).await; + map_switch_zone_addrs(&log, switch_zone_addresses).await; // TODO https://github.com/oxidecomputer/omicron/issues/5201 // build sled agent clients - let sled_agent_clients = build_sled_agent_clients(&mappings, log); + let sled_agent_clients = build_sled_agent_clients(&mappings, &log); // TODO https://github.com/oxidecomputer/omicron/issues/5201 // build dpd clients - let dpd_clients = build_dpd_clients(&mappings, log); + let dpd_clients = build_dpd_clients(&mappings, &log); // TODO https://github.com/oxidecomputer/omicron/issues/5201 // build mgd clients - let mgd_clients = build_mgd_clients(mappings, log); + let mgd_clients = build_mgd_clients(mappings, &log); - let port_list = match self.switch_ports(opctx, log).await { + let port_list = match self.switch_ports(opctx, &log).await { Ok(value) => value, Err(e) => { error!(log, "failed to generate switchports for rack"; "error" => %e); @@ -287,7 +293,7 @@ impl BackgroundTask for SwitchPortSettingsManager { // calculate and apply switch port changes // - let changes = match self.changes(port_list, opctx, log).await { + let changes = match self.changes(port_list, opctx, &log).await { Ok(value) => value, Err(e) => { error!(log, "failed to generate changeset for switchport settings"; "error" => %e); @@ -295,7 +301,8 @@ impl BackgroundTask for SwitchPortSettingsManager { }, }; - apply_switch_port_changes(&dpd_clients, &changes, log).await; + info!(&log, "applying switch port config changes"; "changes" => ?changes); + apply_switch_port_changes(&dpd_clients, &changes, &log).await; // // calculate and apply routing changes @@ -303,7 +310,7 @@ impl BackgroundTask for SwitchPortSettingsManager { // get the static routes on each switch let current_static_routes = - static_routes_on_switch(&mgd_clients, log).await; + static_routes_on_switch(&mgd_clients, &log).await; info!(&log, "retrieved existing routes"; "routes" => ?current_static_routes); // generate the complete set of static routes that should be on a given switch @@ -315,7 +322,7 @@ impl BackgroundTask for SwitchPortSettingsManager { let routes_to_add = static_routes_to_add( &desired_static_routes, ¤t_static_routes, - log, + &log, ); info!(&log, "calculated static routes to add"; "routes" => ?routes_to_add); @@ -327,21 +334,26 @@ impl BackgroundTask for SwitchPortSettingsManager { // delete the unneeded routes first, just in case there is a conflicting route for // one we need to add - info!(&log, "deleting static routes"; "routes" => ?routes_to_del); - delete_static_routes(&mgd_clients, routes_to_del, log).await; + if !routes_to_del.is_empty() { + info!(&log, "deleting static routes"; "routes" => ?routes_to_del); + delete_static_routes(&mgd_clients, routes_to_del, &log).await; + } // add the new routes - info!(&log, "adding static routes"; "routes" => ?routes_to_add); - add_static_routes(&mgd_clients, routes_to_add, log).await; + if !routes_to_add.is_empty() { + info!(&log, "adding static routes"; "routes" => ?routes_to_add); + add_static_routes(&mgd_clients, routes_to_add, &log).await; + } // // calculate and apply loopback address changes // - match self.db_loopback_addresses(opctx, log).await { + info!(&log, "checking for changes to loopback addresses"); + match self.db_loopback_addresses(opctx, &log).await { Ok(desired_loopback_addresses) => { - let current_loopback_addresses = switch_loopback_addresses(&dpd_clients, log).await; + let current_loopback_addresses = switch_loopback_addresses(&dpd_clients, &log).await; let loopbacks_to_add: Vec<(SwitchLocation, IpAddr)> = desired_loopback_addresses .difference(¤t_loopback_addresses) @@ -352,8 +364,15 @@ impl BackgroundTask for SwitchPortSettingsManager { .map(|i| (i.0, i.1)) .collect(); - delete_loopback_addresses_from_switch(&loopbacks_to_del, &dpd_clients, log).await; - add_loopback_addresses_to_switch(&loopbacks_to_add, dpd_clients, log).await; + if !loopbacks_to_del.is_empty() { + info!(&log, "deleting loopback addresses"; "addresses" => ?loopbacks_to_del); + delete_loopback_addresses_from_switch(&loopbacks_to_del, &dpd_clients, &log).await; + } + + if !loopbacks_to_add.is_empty() { + info!(&log, "adding loopback addresses"; "addresses" => ?loopbacks_to_add); + add_loopback_addresses_to_switch(&loopbacks_to_add, dpd_clients, &log).await; + } }, Err(e) => { error!( @@ -769,7 +788,9 @@ impl BackgroundTask for SwitchPortSettingsManager { error!(log, "no blocks assigned to infra lot"); continue; }, - }; + } + ; + let mut desired_config = EarlyNetworkConfig { generation: 0, @@ -786,7 +807,7 @@ impl BackgroundTask for SwitchPortSettingsManager { }, }; - // should_update is a boolean value that determines whether or not we need to + // bootstore_needs_update is a boolean value that determines whether or not we need to // increment the bootstore version and push a new config to the sled agents. // // * If the config we've built from the switchport configuration information is @@ -804,15 +825,32 @@ impl BackgroundTask for SwitchPortSettingsManager { Ok(Some(BootstoreConfig { data, .. })) => { match serde_json::from_value::(data.clone()) { Ok(config) => { - if config.body.ntp_servers != desired_config.body.ntp_servers { + let current_ntp_servers: HashSet = config.body.ntp_servers.clone().into_iter().collect(); + let desired_ntp_servers: HashSet = desired_config.body.ntp_servers.clone().into_iter().collect(); + + let rnc_differs = match (config.body.rack_network_config.clone(), desired_config.body.rack_network_config.clone()) { + (Some(current_rnc), Some(desired_rnc)) => { + !hashset_eq(current_rnc.bgp.clone(), desired_rnc.bgp.clone()) || + !hashset_eq(current_rnc.ports.clone(), desired_rnc.ports.clone()) || + current_rnc.rack_subnet != desired_rnc.rack_subnet || + current_rnc.infra_ip_first != desired_rnc.infra_ip_first || + current_rnc.infra_ip_last != desired_rnc.infra_ip_last + }, + (None, Some(_)) => true, + _ => { + todo!("error") + } + }; + + if current_ntp_servers != desired_ntp_servers { info!( log, "ntp servers have changed"; - "old" => ?config.body.ntp_servers, - "new" => ?desired_config.body.ntp_servers, + "old" => ?current_ntp_servers, + "new" => ?desired_ntp_servers, ); true - } else if config.body.rack_network_config != desired_config.body.rack_network_config { + } else if rnc_differs { info!( log, "rack network config has changed"; @@ -855,6 +893,28 @@ impl BackgroundTask for SwitchPortSettingsManager { }, }; + // The following code is designed to give us the following + // properties + // * We only push updates to the bootstore (sled-agents) if + // configuration on our side (nexus) has relevant changes. + // * If the RPW encounters a critical error or crashes at any + // point of the operation, it will retry the configuration + // again during the next run + // * We are able to accomplish the above without inspecting + // the bootstore on the sled-agents + // + // For example, in the event that we crash after pushing to + // the sled-agents successfully, but before writing the + // results to the db + // 1. RPW will restart + // 2. RPW will build a new network config + // 3. RPW will compare against the last version stored in the db + // 4. RPW will decide to apply the config (again) + // 5. RPW will bump the version (again) + // 6. RPW will send a new bootstore update to the agents (with + // the same info as last time, but with a new version) + // 7. RPW will record the update in the db + // 8. We are now back on the happy path if bootstore_needs_update { let generation = match self.datastore .bump_bootstore_generation(opctx, NETWORK_KEY.into()) @@ -878,7 +938,7 @@ impl BackgroundTask for SwitchPortSettingsManager { "config" => ?desired_config, ); - // spush the updates to both scrimlets + // push the updates to both scrimlets // if both scrimlets are down, bootstore updates aren't happening anyway let mut one_succeeded = false; for (location, client) in &sled_agent_clients { @@ -923,6 +983,15 @@ impl BackgroundTask for SwitchPortSettingsManager { } } +fn hashset_eq(left: Vec, right: Vec) -> bool +where + T: Hash + Eq, +{ + let left = left.into_iter().collect::>(); + let right = right.into_iter().collect::>(); + left == right +} + async fn add_loopback_addresses_to_switch( loopbacks_to_add: &[(SwitchLocation, IpAddr)], dpd_clients: HashMap, @@ -1112,6 +1181,13 @@ fn static_routes_to_del( continue; }; } + + // filter out switches with no routes to remove + let routes_to_del = routes_to_del + .into_iter() + .filter(|(_location, request)| !request.routes.list.is_empty()) + .collect(); + routes_to_del } @@ -1158,6 +1234,13 @@ fn static_routes_to_add( }, ); } + + // filter out switches with no routes to add + let routes_to_add = routes_to_add + .into_iter() + .filter(|(_location, request)| !request.routes.list.is_empty()) + .collect(); + routes_to_add } @@ -1246,6 +1329,28 @@ async fn apply_switch_port_changes( } }; + let config_on_switch = + match client.port_settings_get(&dpd_port_id, DPD_TAG).await { + Ok(v) => v, + Err(e) => { + error!( + log, + "failed to retrieve port setttings from switch"; + "switch_port_id" => ?port_name, + "switch_location" => ?location, + "error" => format!("{:#}", e) + ); + continue; + } + }; + + info!( + log, + "retrieved port settings from switch"; + "switch_port_id" => ?port_name, + "settings" => ?config_on_switch, + ); + match change { PortSettingsChange::Apply(settings) => { let dpd_port_settings = match api_to_dpd_port_settings( @@ -1265,6 +1370,17 @@ async fn apply_switch_port_changes( } }; + if config_on_switch.into_inner() == dpd_port_settings { + info!( + &log, + "port settings up to date, skipping"; + "switch_port_id" => ?port_name, + "switch_location" => ?location, + "switch_port_settings_id" => ?settings.settings.id(), + ); + continue; + } + // apply settings via dpd client info!( &log, @@ -1301,6 +1417,17 @@ async fn apply_switch_port_changes( "switch_location" => ?location, "port_id" => ?dpd_port_id, ); + + if config_on_switch.into_inner().links.is_empty() { + info!( + &log, + "port settings up to date, skipping"; + "switch_port_id" => ?port_name, + "switch_location" => ?location, + ); + continue; + } + match client.port_settings_clear(&dpd_port_id, DPD_TAG).await { Ok(_) => {} Err(e) => { diff --git a/nexus/src/app/ip_pool.rs b/nexus/src/app/ip_pool.rs index 87a7d98c91..fd73a18355 100644 --- a/nexus/src/app/ip_pool.rs +++ b/nexus/src/app/ip_pool.rs @@ -27,6 +27,7 @@ use omicron_common::api::external::NameOrId; use omicron_common::api::external::ResourceType; use omicron_common::api::external::UpdateResult; use ref_cast::RefCast; +use std::matches; use uuid::Uuid; /// Helper to make it easier to 404 on attempts to manipulate internal pools @@ -291,6 +292,19 @@ impl super::Nexus { return Err(not_found_from_lookup(pool_lookup)); } + // Disallow V6 ranges until IPv6 is fully supported by the networking + // subsystem. Instead of changing the API to reflect that (making this + // endpoint inconsistent with the rest) and changing it back when we + // add support, we accept them at the API layer and error here. It + // would be nice if we could do it in the datastore layer, but we'd + // have no way of creating IPv6 ranges for the purpose of testing IP + // pool utilization. + if matches!(range, IpRange::V6(_)) { + return Err(Error::invalid_request( + "IPv6 ranges are not allowed yet", + )); + } + self.db_datastore.ip_pool_add_range(opctx, &authz_pool, range).await } @@ -347,6 +361,18 @@ impl super::Nexus { let (authz_pool, ..) = self.db_datastore.ip_pools_service_lookup(opctx).await?; opctx.authorize(authz::Action::Modify, &authz_pool).await?; + // Disallow V6 ranges until IPv6 is fully supported by the networking + // subsystem. Instead of changing the API to reflect that (making this + // endpoint inconsistent with the rest) and changing it back when we + // add support, we accept them at the API layer and error here. It + // would be nice if we could do it in the datastore layer, but we'd + // have no way of creating IPv6 ranges for the purpose of testing IP + // pool utilization. + if matches!(range, IpRange::V6(_)) { + return Err(Error::invalid_request( + "IPv6 ranges are not allowed yet", + )); + } self.db_datastore.ip_pool_add_range(opctx, &authz_pool, range).await } diff --git a/nexus/src/app/mod.rs b/nexus/src/app/mod.rs index fdae303489..9e0b12d83d 100644 --- a/nexus/src/app/mod.rs +++ b/nexus/src/app/mod.rs @@ -87,6 +87,7 @@ pub(crate) mod sagas; pub(crate) use nexus_db_queries::db::queries::disk::MAX_DISKS_PER_INSTANCE; +use nexus_db_model::AllSchemaVersions; pub(crate) use nexus_db_model::MAX_NICS_PER_INSTANCE; // XXX: Might want to recast as max *floating* IPs, we have at most one @@ -202,13 +203,16 @@ impl Nexus { authz: Arc, ) -> Result, String> { let pool = Arc::new(pool); + let all_versions = config + .pkg + .schema + .as_ref() + .map(|s| AllSchemaVersions::load(&s.schema_dir)) + .transpose() + .map_err(|error| format!("{error:#}"))?; let db_datastore = Arc::new( - db::DataStore::new( - &log, - Arc::clone(&pool), - config.pkg.schema.as_ref(), - ) - .await?, + db::DataStore::new(&log, Arc::clone(&pool), all_versions.as_ref()) + .await?, ); db_datastore.register_producers(&producer_registry); diff --git a/nexus/src/bin/schema-updater.rs b/nexus/src/bin/schema-updater.rs index 6c7fe5adf3..2932a819d8 100644 --- a/nexus/src/bin/schema-updater.rs +++ b/nexus/src/bin/schema-updater.rs @@ -4,13 +4,14 @@ //! Upgrades CRDB schema -use anyhow::{anyhow, bail}; +use anyhow::anyhow; use camino::Utf8PathBuf; use clap::Parser; use clap::Subcommand; use nexus_config::PostgresConfigWithUrl; use nexus_config::SchemaConfig; -use nexus_db_model::schema::SCHEMA_VERSION; +use nexus_db_model::AllSchemaVersions; +use nexus_db_model::SCHEMA_VERSION; use nexus_db_queries::db; use nexus_db_queries::db::DataStore; use omicron_common::api::external::SemverVersion; @@ -20,7 +21,6 @@ use slog::LevelFilter; use slog::Logger; use slog_term::FullFormat; use slog_term::TermDecorator; -use std::collections::BTreeSet; use std::sync::Arc; fn parse_log_level(s: &str) -> anyhow::Result { @@ -73,6 +73,7 @@ async fn main() -> anyhow::Result<()> { let crdb_cfg = db::Config { url: args.url }; let pool = Arc::new(db::Pool::new(&log, &crdb_cfg)); let schema_config = SchemaConfig { schema_dir: args.schema_directory }; + let all_versions = AllSchemaVersions::load(&schema_config.schema_dir)?; // We use the unchecked constructor of the datastore because we // don't want to block on someone else applying an upgrade. @@ -87,50 +88,24 @@ async fn main() -> anyhow::Result<()> { .map(|v| v.to_string()) .unwrap_or_else(|_| "Unknown".to_string()); - println!("Current Version: {current_version}"); - - let mut dir = - tokio::fs::read_dir(&schema_config.schema_dir).await.map_err( - |e| anyhow!("Failed to read from schema directory: {e}"), - )?; - - let mut all_versions = BTreeSet::new(); - while let Some(entry) = dir - .next_entry() - .await - .map_err(|e| anyhow!("Failed to read schema dir: {e}"))? - { - if entry.file_type().await.map_err(|e| anyhow!(e))?.is_dir() { - let name = entry - .file_name() - .into_string() - .map_err(|_| anyhow!("Non-unicode schema dir"))?; - if let Ok(observed_version) = name.parse::() - { - all_versions.insert(observed_version); - } else { - bail!("Failed to parse {name} as a semver version"); - } - } - } - + println!("Current Version in database: {current_version}"); println!("Known Versions:"); - for version in &all_versions { + for version in all_versions.iter_versions() { let mut extra = String::new(); - if version.to_string() == current_version { + if version.semver().to_string() == current_version { extra.push_str(" (reported by database)"); }; - if version == &SCHEMA_VERSION { + if version.is_current_software_version() { extra.push_str(" (expected by Nexus)"); }; - println!(" {version}{extra}") + println!(" {}{extra}", version.semver()) } } Cmd::Upgrade { version } => { println!("Upgrading to {version}"); datastore - .ensure_schema(&log, version.clone(), Some(&schema_config)) + .ensure_schema(&log, version.clone(), Some(&all_versions)) .await .map_err(|e| anyhow!(e))?; println!("Upgrade to {version} complete"); diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 94cd35519b..41e5bce08a 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -1789,6 +1789,8 @@ async fn ip_pool_range_list( } /// Add range to IP pool +/// +/// IPv6 ranges are not allowed yet. #[endpoint { method = POST, path = "/v1/system/ip-pools/{pool}/ranges/add", @@ -1880,6 +1882,8 @@ async fn ip_pool_service_range_list( } /// Add IP range to Oxide service pool +/// +/// IPv6 ranges are not allowed yet. #[endpoint { method = POST, path = "/v1/system/ip-pools-service/ranges/add", diff --git a/nexus/src/lib.rs b/nexus/src/lib.rs index a8ab7e803a..bf97aa8ec1 100644 --- a/nexus/src/lib.rs +++ b/nexus/src/lib.rs @@ -52,7 +52,7 @@ extern crate slog; /// to stdout. pub fn run_openapi_external() -> Result<(), String> { external_api() - .openapi("Oxide Region API", "0.0.6") + .openapi("Oxide Region API", "20240327.0") .description("API for interacting with the Oxide control plane") .contact_url("https://oxide.computer") .contact_email("api@oxide.computer") diff --git a/nexus/tests/integration_tests/commands.rs b/nexus/tests/integration_tests/commands.rs index bc79a7d5a2..fd7a6c60c0 100644 --- a/nexus/tests/integration_tests/commands.rs +++ b/nexus/tests/integration_tests/commands.rs @@ -109,7 +109,7 @@ fn test_nexus_openapi() { .expect("stdout was not valid OpenAPI"); assert_eq!(spec.openapi, "3.0.3"); assert_eq!(spec.info.title, "Oxide Region API"); - assert_eq!(spec.info.version, "0.0.6"); + assert_eq!(spec.info.version, "20240327.0"); // Spot check a couple of items. assert!(!spec.paths.paths.is_empty()); diff --git a/nexus/tests/integration_tests/initialization.rs b/nexus/tests/integration_tests/initialization.rs index a76aef832e..a305a4178e 100644 --- a/nexus/tests/integration_tests/initialization.rs +++ b/nexus/tests/integration_tests/initialization.rs @@ -191,7 +191,7 @@ async fn test_nexus_boots_with_valid_schema() { #[tokio::test] async fn test_nexus_does_not_boot_without_valid_schema() { - let s = nexus_db_model::schema::SCHEMA_VERSION; + let s = nexus_db_model::SCHEMA_VERSION; let schemas_to_test = vec![ semver::Version::new(s.0.major + 1, s.0.minor, s.0.patch), @@ -240,7 +240,7 @@ async fn test_nexus_does_not_boot_without_valid_schema() { #[tokio::test] async fn test_nexus_does_not_boot_until_schema_updated() { - let good_schema = nexus_db_model::schema::SCHEMA_VERSION; + let good_schema = nexus_db_model::SCHEMA_VERSION; let bad_schema = semver::Version::new( good_schema.0.major + 1, good_schema.0.minor, diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index c8390e8ce0..cb5eade735 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -9,6 +9,8 @@ use dropshot::HttpErrorResponseBody; use dropshot::ResultsPage; use http::method::Method; use http::StatusCode; +use nexus_db_queries::authz; +use nexus_db_queries::context::OpContext; use nexus_db_queries::db::datastore::SERVICE_IP_POOL_NAME; use nexus_db_queries::db::fixed_data::silo::DEFAULT_SILO; use nexus_db_queries::db::fixed_data::silo::INTERNAL_SILO_ID; @@ -38,7 +40,6 @@ use nexus_types::external_api::params::IpPoolSiloUpdate; use nexus_types::external_api::params::IpPoolUpdate; use nexus_types::external_api::shared::IpRange; use nexus_types::external_api::shared::Ipv4Range; -use nexus_types::external_api::shared::Ipv6Range; use nexus_types::external_api::shared::SiloIdentityMode; use nexus_types::external_api::views::IpPool; use nexus_types::external_api::views::IpPoolRange; @@ -46,7 +47,9 @@ use nexus_types::external_api::views::IpPoolSiloLink; use nexus_types::external_api::views::Silo; use nexus_types::external_api::views::SiloIpPool; use nexus_types::identity::Resource; +use omicron_common::address::Ipv6Range; use omicron_common::api::external::IdentityMetadataUpdateParams; +use omicron_common::api::external::LookupType; use omicron_common::api::external::NameOrId; use omicron_common::api::external::SimpleIdentity; use omicron_common::api::external::{IdentityMetadataCreateParams, Name}; @@ -765,7 +768,7 @@ async fn create_pool(client: &ClientTestContext, name: &str) -> IpPool { async fn test_ip_pool_utilization_total(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; - create_pool(client, "p0").await; + let pool = create_pool(client, "p0").await; assert_ip_pool_utilization(client, "p0", 0, 0, 0, 0).await; @@ -783,20 +786,36 @@ async fn test_ip_pool_utilization_total(cptestctx: &ControlPlaneTestContext) { assert_ip_pool_utilization(client, "p0", 0, 5, 0, 0).await; - // now let's add a gigantic range just for fun + // Now let's add a gigantic range. This requires direct datastore + // shenanigans because adding IPv6 ranges through the API is currently not + // allowed. It's worth doing because we want this code to correctly handle + // IPv6 ranges when they are allowed again. + + let nexus = &cptestctx.server.apictx().nexus; + let datastore = nexus.datastore(); + let log = cptestctx.logctx.log.new(o!()); + let opctx = OpContext::for_tests(log, datastore.clone()); + let authz_pool = authz::IpPool::new( + authz::FLEET, + pool.identity.id, + LookupType::ByName("p0".to_string()), + ); + let big_range = IpRange::V6( Ipv6Range::new( - std::net::Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 1), + std::net::Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 0), std::net::Ipv6Addr::new( - 0xfd00, 0, 0, 0, 0xffff, 0xfff, 0xffff, 0xffff, + 0xfd00, 0, 0, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, ), ) .unwrap(), ); - object_create::(client, &add_url, &big_range).await; + datastore + .ip_pool_add_range(&opctx, &authz_pool, &big_range) + .await + .expect("could not add range"); - assert_ip_pool_utilization(client, "p0", 0, 5, 0, 18446480190918885375) - .await; + assert_ip_pool_utilization(client, "p0", 0, 5, 0, 2u128.pow(80)).await; } // Data for testing overlapping IP ranges @@ -895,59 +914,9 @@ async fn test_ip_pool_range_overlapping_ranges_fails( }; test_bad_ip_ranges(client, &ip_pool_add_range_url, &ipv4_range).await; - // Test data for IPv6 ranges that should fail due to overlap - let ipv6_range = TestRange { - base_range: IpRange::V6( - Ipv6Range::new( - std::net::Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 10), - std::net::Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 20), - ) - .unwrap(), - ), - bad_ranges: vec![ - // The exact same range - IpRange::V6( - Ipv6Range::new( - std::net::Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 10), - std::net::Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 20), - ) - .unwrap(), - ), - // Overlaps below - IpRange::V6( - Ipv6Range::new( - std::net::Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 5), - std::net::Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 15), - ) - .unwrap(), - ), - // Overlaps above - IpRange::V6( - Ipv6Range::new( - std::net::Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 15), - std::net::Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 25), - ) - .unwrap(), - ), - // Contains the base range - IpRange::V6( - Ipv6Range::new( - std::net::Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 0), - std::net::Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 100), - ) - .unwrap(), - ), - // Contained by the base range - IpRange::V6( - Ipv6Range::new( - std::net::Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 12), - std::net::Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 13), - ) - .unwrap(), - ), - ], - }; - test_bad_ip_ranges(client, &ip_pool_add_range_url, &ipv6_range).await; + // IPv6 tests removed along with support for IPv6 ranges in + // https://github.com/oxidecomputer/omicron/pull/5107 + // Put them back when IPv6 ranges are supported again. } async fn test_bad_ip_ranges( @@ -994,6 +963,38 @@ async fn test_bad_ip_ranges( } } +// Support for IPv6 ranges removed in +// https://github.com/oxidecomputer/omicron/pull/5107 +// Delete this test when we support IPv6 again. +#[nexus_test] +async fn test_ip_pool_range_rejects_v6(cptestctx: &ControlPlaneTestContext) { + let client = &cptestctx.external_client; + + create_ip_pool(client, "p0", None).await; + + let range = IpRange::V6( + Ipv6Range::new( + std::net::Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 10), + std::net::Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 20), + ) + .unwrap(), + ); + + let add_url = "/v1/system/ip-pools/p0/ranges/add"; + let error = + object_create_error(client, add_url, &range, StatusCode::BAD_REQUEST) + .await; + + assert_eq!(error.message, "IPv6 ranges are not allowed yet"); + + // same deal with service pool + let add_url = "/v1/system/ip-pools-service/ranges/add"; + let error = + object_create_error(client, add_url, &range, StatusCode::BAD_REQUEST) + .await; + assert_eq!(error.message, "IPv6 ranges are not allowed yet"); +} + #[nexus_test] async fn test_ip_pool_range_pagination(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; @@ -1026,17 +1027,17 @@ async fn test_ip_pool_range_pagination(cptestctx: &ControlPlaneTestContext) { // address, which sorts all IPv4 before IPv6, then within protocol versions // by their first address. let ranges = [ - IpRange::V6( - Ipv6Range::new( - std::net::Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 11), - std::net::Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 20), + IpRange::V4( + Ipv4Range::new( + std::net::Ipv4Addr::new(10, 0, 0, 3), + std::net::Ipv4Addr::new(10, 0, 0, 4), ) .unwrap(), ), - IpRange::V6( - Ipv6Range::new( - std::net::Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 0), - std::net::Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 10), + IpRange::V4( + Ipv4Range::new( + std::net::Ipv4Addr::new(10, 0, 0, 5), + std::net::Ipv4Addr::new(10, 0, 0, 6), ) .unwrap(), ), @@ -1304,15 +1305,15 @@ async fn test_ip_pool_service(cptestctx: &ControlPlaneTestContext) { let ranges = [ IpRange::V4( Ipv4Range::new( - std::net::Ipv4Addr::new(10, 0, 0, 1), - std::net::Ipv4Addr::new(10, 0, 0, 2), + std::net::Ipv4Addr::new(10, 0, 0, 3), + std::net::Ipv4Addr::new(10, 0, 0, 4), ) .unwrap(), ), - IpRange::V6( - Ipv6Range::new( - std::net::Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 0), - std::net::Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 10), + IpRange::V4( + Ipv4Range::new( + std::net::Ipv4Addr::new(10, 0, 0, 1), + std::net::Ipv4Addr::new(10, 0, 0, 2), ) .unwrap(), ), diff --git a/nexus/tests/integration_tests/schema.rs b/nexus/tests/integration_tests/schema.rs index f9bc4e1da7..dbe436a8f2 100644 --- a/nexus/tests/integration_tests/schema.rs +++ b/nexus/tests/integration_tests/schema.rs @@ -8,10 +8,9 @@ use dropshot::test_util::LogContext; use futures::future::BoxFuture; use nexus_config::NexusConfig; use nexus_config::SchemaConfig; -use nexus_db_model::schema::SCHEMA_VERSION as LATEST_SCHEMA_VERSION; -use nexus_db_queries::db::datastore::{ - all_sql_for_version_migration, EARLIEST_SUPPORTED_VERSION, -}; +use nexus_db_model::EARLIEST_SUPPORTED_VERSION; +use nexus_db_model::SCHEMA_VERSION as LATEST_SCHEMA_VERSION; +use nexus_db_model::{AllSchemaVersions, SchemaVersion}; use nexus_db_queries::db::DISALLOW_FULL_TABLE_SCAN_SQL; use nexus_test_utils::{db, load_test_config, ControlPlaneTestContextBuilder}; use omicron_common::api::external::SemverVersion; @@ -20,7 +19,7 @@ use omicron_test_utils::dev::db::{Client, CockroachInstance}; use pretty_assertions::{assert_eq, assert_ne}; use similar_asserts; use slog::Logger; -use std::collections::{BTreeMap, BTreeSet}; +use std::collections::BTreeMap; use std::net::IpAddr; use tokio::time::timeout; use tokio::time::Duration; @@ -111,10 +110,10 @@ async fn apply_update_as_transaction( async fn apply_update( log: &Logger, crdb: &CockroachInstance, - version: &str, + version: &SchemaVersion, times_to_apply: usize, ) { - let log = log.new(o!("target version" => version.to_string())); + let log = log.new(o!("target version" => version.semver().to_string())); info!(log, "Performing upgrade"); let client = crdb.connect().await.expect("failed to connect"); @@ -126,25 +125,27 @@ async fn apply_update( // We skip this for the earliest supported version because these tables // might not exist yet. - if version != EARLIEST_SUPPORTED_VERSION { + if *version.semver() != EARLIEST_SUPPORTED_VERSION { info!(log, "Updating schema version in db_metadata (setting target)"); - let sql = format!("UPDATE omicron.public.db_metadata SET target_version = '{}' WHERE singleton = true;", version); + let sql = format!( + "UPDATE omicron.public.db_metadata SET target_version = '{}' \ + WHERE singleton = true;", + version + ); client .batch_execute(&sql) .await .expect("Failed to bump version number"); } - let target_dir = Utf8PathBuf::from(SCHEMA_DIR).join(version); - let schema_change = - all_sql_for_version_migration(&target_dir).await.unwrap(); - for _ in 0..times_to_apply { - for nexus_db_queries::db::datastore::SchemaUpgradeStep { path, sql } in - &schema_change.steps - { - info!(log, "Applying sql schema upgrade step"; "path" => path.to_string()); - apply_update_as_transaction(&log, &client, sql).await; + for step in version.upgrade_steps() { + info!( + log, + "Applying sql schema upgrade step"; + "file" => step.label() + ); + apply_update_as_transaction(&log, &client, step.sql()).await; } } @@ -152,7 +153,11 @@ async fn apply_update( // // We do so explicitly here. info!(log, "Updating schema version in db_metadata (removing target)"); - let sql = format!("UPDATE omicron.public.db_metadata SET version = '{}', target_version = NULL WHERE singleton = true;", version); + let sql = format!( + "UPDATE omicron.public.db_metadata SET version = '{}', \ + target_version = NULL WHERE singleton = true;", + version + ); client.batch_execute(&sql).await.expect("Failed to bump version number"); client.cleanup().await.expect("cleaning up after wipe"); @@ -186,7 +191,8 @@ impl From<(&str, &str)> for SqlEnum { // interpret SQL types. // // Note that for the purposes of schema comparisons, we don't care about parsing -// the contents of the database, merely the schema and equality of contained data. +// the contents of the database, merely the schema and equality of contained +// data. #[derive(PartialEq, Clone, Debug)] enum AnySqlType { Bool(bool), @@ -443,20 +449,8 @@ async fn crdb_list_enums(crdb: &CockroachInstance) -> Vec { process_rows(&rows) } -async fn read_all_schema_versions() -> BTreeSet { - let mut all_versions = BTreeSet::new(); - - let mut dir = - tokio::fs::read_dir(SCHEMA_DIR).await.expect("Access schema dir"); - while let Some(entry) = dir.next_entry().await.expect("Read dirent") { - if entry.file_type().await.unwrap().is_dir() { - let name = entry.file_name().into_string().unwrap(); - if let Ok(observed_version) = name.parse::() { - all_versions.insert(observed_version); - } - } - } - all_versions +fn read_all_schema_versions() -> AllSchemaVersions { + AllSchemaVersions::load(camino::Utf8Path::new(SCHEMA_DIR)).unwrap() } // This test confirms the following behavior: @@ -474,10 +468,16 @@ async fn nexus_applies_update_on_boot() { let crdb = builder.database.as_ref().expect("Should have started CRDB"); // We started with an empty database -- apply an update here to bring - // us forward to our oldest supported schema version before trying to boot nexus. - apply_update(log, &crdb, EARLIEST_SUPPORTED_VERSION, 1).await; + // us forward to our oldest supported schema version before trying to boot + // nexus. + let all_versions = read_all_schema_versions(); + let earliest = all_versions + .iter_versions() + .next() + .expect("missing earliest schema version"); + apply_update(log, &crdb, earliest, 1).await; assert_eq!( - EARLIEST_SUPPORTED_VERSION, + EARLIEST_SUPPORTED_VERSION.to_string(), query_crdb_schema_version(&crdb).await ); @@ -542,16 +542,26 @@ async fn nexus_cannot_apply_update_from_unknown_version() { let log = &builder.logctx.log; let crdb = builder.database.as_ref().expect("Should have started CRDB"); - apply_update(log, &crdb, EARLIEST_SUPPORTED_VERSION, 1).await; + let all_versions = read_all_schema_versions(); + let earliest = all_versions + .iter_versions() + .next() + .expect("missing earliest schema version"); + apply_update(log, &crdb, earliest, 1).await; assert_eq!( - EARLIEST_SUPPORTED_VERSION, + EARLIEST_SUPPORTED_VERSION.to_string(), query_crdb_schema_version(&crdb).await ); // This version is not valid; it does not exist. let version = "0.0.0"; - crdb.connect().await.expect("Failed to connect") - .batch_execute(&format!("UPDATE omicron.public.db_metadata SET version = '{version}' WHERE singleton = true")) + crdb.connect() + .await + .expect("Failed to connect") + .batch_execute(&format!( + "UPDATE omicron.public.db_metadata SET version = '{version}' \ + WHERE singleton = true" + )) .await .expect("Failed to update schema"); @@ -583,11 +593,13 @@ async fn versions_have_idempotent_up() { let populate = false; let mut crdb = test_setup_just_crdb(&logctx.log, populate).await; - let all_versions = read_all_schema_versions().await; - - for version in &all_versions { - apply_update(log, &crdb, &version.to_string(), 2).await; - assert_eq!(version.to_string(), query_crdb_schema_version(&crdb).await); + let all_versions = read_all_schema_versions(); + for version in all_versions.iter_versions() { + apply_update(log, &crdb, &version, 2).await; + assert_eq!( + version.semver().to_string(), + query_crdb_schema_version(&crdb).await + ); } assert_eq!( LATEST_SCHEMA_VERSION.to_string(), @@ -910,12 +922,15 @@ async fn dbinit_equals_sum_of_all_up() { let populate = false; let mut crdb = test_setup_just_crdb(&logctx.log, populate).await; - let all_versions = read_all_schema_versions().await; + let all_versions = read_all_schema_versions(); // Go from the first version to the latest version. - for version in &all_versions { - apply_update(log, &crdb, &version.to_string(), 1).await; - assert_eq!(version.to_string(), query_crdb_schema_version(&crdb).await); + for version in all_versions.iter_versions() { + apply_update(log, &crdb, version, 1).await; + assert_eq!( + version.semver().to_string(), + query_crdb_schema_version(&crdb).await + ); } assert_eq!( LATEST_SCHEMA_VERSION.to_string(), @@ -1190,19 +1205,22 @@ async fn validate_data_migration() { let mut crdb = test_setup_just_crdb(&logctx.log, populate).await; let client = crdb.connect().await.expect("Failed to access CRDB client"); - let all_versions = read_all_schema_versions().await; + let all_versions = read_all_schema_versions(); let all_checks = get_migration_checks(); // Go from the first version to the latest version. - for version in &all_versions { + for version in all_versions.iter_versions() { // If this check has preconditions (or setup), run them. - let checks = all_checks.get(version); + let checks = all_checks.get(version.semver()); if let Some(before) = checks.and_then(|check| check.before) { before(&client).await; } - apply_update(log, &crdb, &version.to_string(), 1).await; - assert_eq!(version.to_string(), query_crdb_schema_version(&crdb).await); + apply_update(log, &crdb, version, 1).await; + assert_eq!( + version.semver().to_string(), + query_crdb_schema_version(&crdb).await + ); // If this check has postconditions (or cleanup), run them. if let Some(after) = checks.map(|check| check.after) { diff --git a/openapi/nexus.json b/openapi/nexus.json index 7d8e1909d2..5ea1a48d8b 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -7,7 +7,7 @@ "url": "https://oxide.computer", "email": "api@oxide.computer" }, - "version": "0.0.6" + "version": "20240327.0" }, "paths": { "/device/auth": { @@ -5432,6 +5432,7 @@ "system/networking" ], "summary": "Add range to IP pool", + "description": "IPv6 ranges are not allowed yet.", "operationId": "ip_pool_range_add", "parameters": [ { @@ -5847,6 +5848,7 @@ "system/networking" ], "summary": "Add IP range to Oxide service pool", + "description": "IPv6 ranges are not allowed yet.", "operationId": "ip_pool_service_range_add", "requestBody": { "content": { diff --git a/package-manifest.toml b/package-manifest.toml index e88f135985..0cc2e4939c 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -595,8 +595,8 @@ only_for_targets.image = "standard" # 2. Copy dendrite.tar.gz from dendrite/out to omicron/out source.type = "prebuilt" source.repo = "dendrite" -source.commit = "41ddeab9d43d90a51e6fc1c236dc9982fc76f922" -source.sha256 = "8ebb889a555ce59cb0373a1ec9595536e015c951f6fc4d89308b4e3f09c83b20" +source.commit = "b128500231b916802a9436dd438742424aa5c6ce" +source.sha256 = "47e0b279cf5babb199ea4d7d1654ccef6f463eeae7cef24e8138658c57affb4a" output.type = "zone" output.intermediate_only = true @@ -620,8 +620,8 @@ only_for_targets.image = "standard" # 2. Copy the output zone image from dendrite/out to omicron/out source.type = "prebuilt" source.repo = "dendrite" -source.commit = "41ddeab9d43d90a51e6fc1c236dc9982fc76f922" -source.sha256 = "3e8aa5483d22316e1fd629c77277190dafa875938a9ab3900e92a210c5e91e91" +source.commit = "b128500231b916802a9436dd438742424aa5c6ce" +source.sha256 = "3e6e785a3cfbf12dd9752c3bb026e4e3e75aa9d4433ba65045c7396d31e970a9" output.type = "zone" output.intermediate_only = true @@ -638,8 +638,8 @@ only_for_targets.image = "standard" # 2. Copy dendrite.tar.gz from dendrite/out to omicron/out/dendrite-softnpu.tar.gz source.type = "prebuilt" source.repo = "dendrite" -source.commit = "41ddeab9d43d90a51e6fc1c236dc9982fc76f922" -source.sha256 = "5e5f2831f3c46253828ea237f701f1fa174061ab0bf73c200d31d09e94890ae7" +source.commit = "b128500231b916802a9436dd438742424aa5c6ce" +source.sha256 = "64cb9c62d3516c613ea70c36dff241cba2e1138e25a82a6367c40da262ca55ee" output.type = "zone" output.intermediate_only = true diff --git a/schema/crdb/README.adoc b/schema/crdb/README.adoc index 5b9c2f6a10..e017c01316 100644 --- a/schema/crdb/README.adoc +++ b/schema/crdb/README.adoc @@ -56,41 +56,75 @@ See RFD 319 for more discussion of the online upgrade plans. Assumptions: -* The (previously) latest schema version is referred to as `OLD_VERSION` -* Your new changes will bring the schema to a new version, `NEW_VERSION` +* We'll call the (previously) latest schema version `OLD_VERSION`. +* We'll call your new schema version `NEW_VERSION`. This will always be a major + version bump from `OLD_VERSION`. So if `OLD_VERSION` is 43.0.0, `NEW_VERSION` + should be `44.0.0`. +* You can write a sequence of SQL statements to update a database that's + currently running `OLD_VERSION` to one running `NEW_VERSION`. Process: -* Choose a `NEW_VERSION` number. This should almost certainly be a major - version bump over `OLD_VERSION`. -* Create directory `schema/crdb/NEW_VERSION`. -* If only one SQL statement is necessary to get from `OLD_VERSION` to - `NEW_VERSION`, put that statement into `schema/crdb/NEW_VERSION/up.sql`. If - multiple statements are required, put each one into a separate file, naming - these `schema/crdb/NEW_VERSION/upN.sql` for as many `N` as you need, staring - with `N=1`. -** Each file should contain _either_ one schema-modifying statement _or_ some - number of data-modifying statements. You can combine multiple data-modifying - statements. But you should not mix schema-modifying statements and - data-modifying statements in one file. And you should not include multiple - schema-modifying statements in one file. -** Beware that the entire file will be run in one transaction. Expensive data- - modifying operations leading to long-running transactions are generally - to-be-avoided; however, there's no better way to do this today if you really - do need to update thousands of rows as part of the update. -* Update `schema/crdb/dbinit.sql` to match what the database should look like - after your update is applied. Don't forget to update the version field of - `db_metadata` at the bottom of the file! -** If necessary, do the same thing for `schema/crdb/dbwipe.sql`. -* Update Nexus's idea of the latest schema, by updating its `SCHEMA_VERSION` to - `NEW_VERSION` within `nexus/db-model/src/schema.rs`. - -SQL Validation, via Automated Tests: - -* The `SCHEMA_VERSION` matches the version used in `dbinit.sql` -* The combination of all `up.sql` files results in the same schema as - `dbinit.sql` -* All `up.sql` files can be applied twice without error +* Create a directory of SQL files describing how to update a database running + version `OLD_VERSION` to one running version `NEW_VERSION`: +** Choose a unique, descriptive name for the directory. It's suggested that + you stick to lowercase letters, numbers, and hyphen. For example, if you're + adding a table called `widgets`, you might create a directory called + `create-table-widgets`. +** Create the directory: `schema/crdb/NAME`. +*** If only one SQL statement is necessary to get from `OLD_VERSION` to + `NEW_VERSION`, put that statement into `schema/crdb/NAME/up.sql`. If + multiple statements are required, put each one into a separate file, naming + these `schema/crdb/NAME/upN.sql` for as many `N` as you need, starting with + `N=1`. +*** Each file should contain _either_ one schema-modifying statement _or_ some + number of data-modifying statements. You can combine multiple data-modifying + statements. But you should not mix schema-modifying statements and + data-modifying statements in one file. And you should not include multiple + schema-modifying statements in one file. +*** Beware that the entire file will be run in one transaction. Expensive data- + modifying operations leading to long-running transactions are generally + to-be-avoided; however, there's no better way to do this today if you really + do need to update thousands of rows as part of the update. +* Update `schema/crdb/dbinit.sql`: +** Update the SQL statements to match what the database should look like + after your up*.sql files are applied. +** Update the version field of `db_metadata` at the bottom of the file. +* Update `schema/crdb/dbwipe.sql` if needed. (This is rare.) +* Update `nexus/db-model/src/schema_versions.rs`: +** Update the major number of `SCHEMA_VERSION` so that it matches `NEW_VERSION`. +** Add a new entry to the *top* of `KNOWN_VERSIONS`. It should be just one + line: `KnownVersion::new(NEW_VERSION.major, NAME)` +* Optional: check some of this work by running `cargo nextest run -p nexus-db-model -- schema_versions`. This is recommended because if you get + one of these steps wrong, these tests should be able to tell you, whereas + other tests might fail in much worse ways (e.g., they can hang if you've + updated `SCHEMA_VERSION` but not the database schema version). + +There are automated tests to validate many of these steps: + +* The `SCHEMA_VERSION` matches the version used in `dbinit.sql`. (This catches + the case where you forget to update either one of these). +* The `KNOWN_VERSIONS` are all strictly increasing semvers. New known versions + must be sequential major numbers with minor and micro both being `0`. (This + catches various mismerge errors: accidentally duplicating a version, putting + versions in the wrong order, etc.) +* The combination of all `up*.sql` files results in the same schema as + `dbinit.sql`. (This catches forgetting to update dbinit.sql, forgetting to + implement a schema update altogether, or a mismatch between dbinit.sql and + the update.) +* All `up*.sql` files can be applied twice without error. (This catches + non-idempotent update steps.) + +**If you've finished all this and another PR lands on "main" that chose the +same `NEW_VERSION`:**, then your `OLD_VERSION` has changed and so _your_ +`NEW_VERSION` needs to change, too. You'll need to: + +* In `nexus/db-model/src/schema_versions.rs`. +** Make sure `SCHEMA_VERSION` reflects your new `NEW_VERSION`. +** Make sure the `KNOWN_VERSIONS` entry that you added reflects your new + `NEW_VERSION` and still appears at the top of the list (logically after the + new version that came in from "main"). +* Update the version in `dbinit.sql` to match the new `NEW_VERSION`. === General notes diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index b56bec1ba9..82ddf6cd14 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -3707,7 +3707,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - ( TRUE, NOW(), NOW(), '45.0.0', NULL) + ( TRUE, NOW(), NOW(), '46.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; diff --git a/schema/crdb/first-named-migration/up.sql b/schema/crdb/first-named-migration/up.sql new file mode 100644 index 0000000000..3df411b9fc --- /dev/null +++ b/schema/crdb/first-named-migration/up.sql @@ -0,0 +1,2 @@ +-- This is a demo migration showing how to use the new KNOWN_VERSIONS mechanism. +SELECT TRUE; diff --git a/tools/dendrite_openapi_version b/tools/dendrite_openapi_version index bbc2110a0a..3d5f21ffe0 100644 --- a/tools/dendrite_openapi_version +++ b/tools/dendrite_openapi_version @@ -1,2 +1,2 @@ -COMMIT="41ddeab9d43d90a51e6fc1c236dc9982fc76f922" +COMMIT="b128500231b916802a9436dd438742424aa5c6ce" SHA2="50eff6d9f986b7b1af5970d11d8d01b812de37269731c6c691a244b3fdae82ae" diff --git a/tools/dendrite_stub_checksums b/tools/dendrite_stub_checksums index 86cf1a56ec..3bca6cd174 100644 --- a/tools/dendrite_stub_checksums +++ b/tools/dendrite_stub_checksums @@ -1,3 +1,3 @@ -CIDL_SHA256_ILLUMOS="8ebb889a555ce59cb0373a1ec9595536e015c951f6fc4d89308b4e3f09c83b20" -CIDL_SHA256_LINUX_DPD="f753444cae478cdedcde743a20a9df5965ed28cddab0f9632f3c263c66cd6397" +CIDL_SHA256_ILLUMOS="47e0b279cf5babb199ea4d7d1654ccef6f463eeae7cef24e8138658c57affb4a" +CIDL_SHA256_LINUX_DPD="11e464a38fa0858c3d896e82d7ee3123aee5f0cf4e3c2a029a0dd7cfd54d3adf" CIDL_SHA256_LINUX_SWADM="66eab497b955751d0704c3cd97ac5c1ed373aa656fc37ccba86ae9900b5ae96d" diff --git a/tools/opte_version b/tools/opte_version index 6a9619566a..e1b3e11499 100644 --- a/tools/opte_version +++ b/tools/opte_version @@ -1 +1 @@ -0.28.232 +0.28.233