Skip to content

Commit

Permalink
Merge branch 'main' into felixmcfelix/floating-ip-live
Browse files Browse the repository at this point in the history
  • Loading branch information
FelixMcFelix committed Dec 15, 2023
2 parents b2d08f7 + 2195b56 commit 7d6118f
Show file tree
Hide file tree
Showing 41 changed files with 1,108 additions and 106 deletions.
20 changes: 10 additions & 10 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 2 additions & 2 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -261,15 +261,15 @@ omicron-sled-agent = { path = "sled-agent" }
omicron-test-utils = { path = "test-utils" }
omicron-zone-package = "0.9.1"
oxide-client = { path = "clients/oxide-client" }
oxide-vpc = { git = "https://github.com/oxidecomputer/opte", rev = "01356ee8c5d876ce6614ea550e12114c10bcfb34", features = [ "api", "std" ] }
oxide-vpc = { git = "https://github.com/oxidecomputer/opte", rev = "4e6e6ab6379fa4bc40f5d0c7340b9f35c45ad6e4", features = [ "api", "std" ] }
once_cell = "1.19.0"
openapi-lint = { git = "https://github.com/oxidecomputer/openapi-lint", branch = "main" }
openapiv3 = "2.0.0"
# must match samael's crate!
openssl = "0.10"
openssl-sys = "0.9"
openssl-probe = "0.1.5"
opte-ioctl = { git = "https://github.com/oxidecomputer/opte", rev = "01356ee8c5d876ce6614ea550e12114c10bcfb34" }
opte-ioctl = { git = "https://github.com/oxidecomputer/opte", rev = "4e6e6ab6379fa4bc40f5d0c7340b9f35c45ad6e4" }
oso = "0.27"
owo-colors = "3.5.0"
oximeter = { path = "oximeter/oximeter" }
Expand Down
11 changes: 6 additions & 5 deletions common/src/api/external/http_pagination.rs
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,8 @@ use std::fmt::Debug;
use std::num::NonZeroU32;
use uuid::Uuid;

use super::SimpleIdentity;

// General pagination infrastructure

/// Specifies which page of results we're on
Expand Down Expand Up @@ -147,15 +149,14 @@ pub fn marker_for_id<S, T: ObjectIdentity>(_: &S, t: &T) -> Uuid {
///
/// This is intended for use with [`ScanByNameOrId::results_page`] with objects
/// that impl [`ObjectIdentity`].
pub fn marker_for_name_or_id<T: ObjectIdentity, Selector>(
pub fn marker_for_name_or_id<T: SimpleIdentity, Selector>(
scan: &ScanByNameOrId<Selector>,
item: &T,
) -> NameOrId {
let identity = item.identity();
match scan.sort_by {
NameOrIdSortMode::NameAscending => identity.name.clone().into(),
NameOrIdSortMode::NameDescending => identity.name.clone().into(),
NameOrIdSortMode::IdAscending => identity.id.into(),
NameOrIdSortMode::NameAscending => item.name().clone().into(),
NameOrIdSortMode::NameDescending => item.name().clone().into(),
NameOrIdSortMode::IdAscending => item.id().into(),
}
}

Expand Down
17 changes: 17 additions & 0 deletions common/src/api/external/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,23 @@ pub trait ObjectIdentity {
fn identity(&self) -> &IdentityMetadata;
}

/// Exists for types that don't properly implement `ObjectIdentity` but
/// still need to be paginated by name or id.
pub trait SimpleIdentity {
fn id(&self) -> Uuid;
fn name(&self) -> &Name;
}

impl<T: ObjectIdentity> SimpleIdentity for T {
fn id(&self) -> Uuid {
self.identity().id
}

fn name(&self) -> &Name {
&self.identity().name
}
}

/// Parameters used to request a specific page of results when listing a
/// collection of objects
///
Expand Down
4 changes: 2 additions & 2 deletions end-to-end-tests/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@ omicron-test-utils.workspace = true
oxide-client.workspace = true
rand.workspace = true
reqwest.workspace = true
russh = "0.40.0"
russh-keys = "0.40.0"
russh = "0.40.1"
russh-keys = "0.40.1"
serde.workspace = true
serde_json.workspace = true
tokio = { workspace = true, features = ["macros", "rt-multi-thread"] }
Expand Down
4 changes: 2 additions & 2 deletions internal-dns/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -111,8 +111,8 @@ impl Host {
///
/// `DnsConfigBuilder` provides a much simpler interface for constructing DNS
/// zone data than using `DnsConfig` directly. That's because it makes a number
/// of assumptions that are true of the control plane DNS zone (all described in
/// RFD 248), but not true in general about DNS zones:
/// of assumptions that are true of the control plane DNS zones (all described
/// in RFD 248), but not true in general about DNS zones:
///
/// - We assume that there are only two kinds of hosts: a "sled" (an illumos
/// global zone) or a "zone" (an illumos non-global zone). (Both of these are
Expand Down
2 changes: 2 additions & 0 deletions nexus/db-model/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ mod switch;
mod unsigned;
mod update_artifact;
mod user_builtin;
mod utilization;
mod virtual_provisioning_collection;
mod virtual_provisioning_resource;
mod vmm;
Expand Down Expand Up @@ -167,6 +168,7 @@ pub use switch_port::*;
pub use system_update::*;
pub use update_artifact::*;
pub use user_builtin::*;
pub use utilization::*;
pub use virtual_provisioning_collection::*;
pub use virtual_provisioning_resource::*;
pub use vmm::*;
Expand Down
21 changes: 5 additions & 16 deletions nexus/db-model/src/quota.rs
Original file line number Diff line number Diff line change
Expand Up @@ -65,22 +65,11 @@ impl From<SiloQuotas> for views::SiloQuotas {
fn from(silo_quotas: SiloQuotas) -> Self {
Self {
silo_id: silo_quotas.silo_id,
cpus: silo_quotas.cpus,
memory: silo_quotas.memory.into(),
storage: silo_quotas.storage.into(),
}
}
}

impl From<views::SiloQuotas> for SiloQuotas {
fn from(silo_quotas: views::SiloQuotas) -> Self {
Self {
silo_id: silo_quotas.silo_id,
time_created: Utc::now(),
time_modified: Utc::now(),
cpus: silo_quotas.cpus,
memory: silo_quotas.memory.into(),
storage: silo_quotas.storage.into(),
limits: views::VirtualResourceCounts {
cpus: silo_quotas.cpus,
memory: silo_quotas.memory.into(),
storage: silo_quotas.storage.into(),
},
}
}
}
Expand Down
15 changes: 14 additions & 1 deletion nexus/db-model/src/schema.rs
Original file line number Diff line number Diff line change
Expand Up @@ -420,6 +420,19 @@ table! {
}
}

table! {
silo_utilization(silo_id) {
silo_id -> Uuid,
silo_name -> Text,
cpus_provisioned -> Int8,
memory_provisioned -> Int8,
storage_provisioned -> Int8,
cpus_allocated -> Int8,
memory_allocated -> Int8,
storage_allocated -> Int8,
}
}

table! {
network_interface (id) {
id -> Uuid,
Expand Down Expand Up @@ -1333,7 +1346,7 @@ table! {
///
/// This should be updated whenever the schema is changed. For more details,
/// refer to: schema/crdb/README.adoc
pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(20, 0, 0);
pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(21, 0, 0);

allow_tables_to_appear_in_same_query!(
system_update,
Expand Down
56 changes: 56 additions & 0 deletions nexus/db-model/src/utilization.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
use crate::ByteCount;
use crate::{schema::silo_utilization, Name};
use nexus_types::external_api::views;
use serde::{Deserialize, Serialize};
use uuid::Uuid;

#[derive(Queryable, Debug, Clone, Selectable, Serialize, Deserialize)]
#[diesel(table_name = silo_utilization)]
pub struct SiloUtilization {
pub silo_id: Uuid,
pub silo_name: Name,

pub cpus_allocated: i64,
pub memory_allocated: ByteCount,
pub storage_allocated: ByteCount,

pub cpus_provisioned: i64,
pub memory_provisioned: ByteCount,
pub storage_provisioned: ByteCount,
}

impl From<SiloUtilization> for views::SiloUtilization {
fn from(silo_utilization: SiloUtilization) -> Self {
Self {
silo_id: silo_utilization.silo_id,
silo_name: silo_utilization.silo_name.into(),
provisioned: views::VirtualResourceCounts {
cpus: silo_utilization.cpus_provisioned,
memory: silo_utilization.memory_provisioned.into(),
storage: silo_utilization.storage_provisioned.into(),
},
allocated: views::VirtualResourceCounts {
cpus: silo_utilization.cpus_allocated,
memory: silo_utilization.memory_allocated.into(),
storage: silo_utilization.storage_allocated.into(),
},
}
}
}

impl From<SiloUtilization> for views::Utilization {
fn from(silo_utilization: SiloUtilization) -> Self {
Self {
provisioned: views::VirtualResourceCounts {
cpus: silo_utilization.cpus_provisioned,
memory: silo_utilization.memory_provisioned.into(),
storage: silo_utilization.storage_provisioned.into(),
},
capacity: views::VirtualResourceCounts {
cpus: silo_utilization.cpus_allocated,
memory: silo_utilization.memory_allocated.into(),
storage: silo_utilization.storage_allocated.into(),
},
}
}
}
22 changes: 21 additions & 1 deletion nexus/db-queries/src/db/datastore/disk.rs
Original file line number Diff line number Diff line change
Expand Up @@ -657,7 +657,20 @@ impl DataStore {
/// If the disk delete saga unwinds, then the disk should _not_ remain
/// deleted: disk delete saga should be triggered again in order to fully
/// complete, and the only way to do that is to un-delete the disk. Set it
/// to faulted to ensure that it won't be used.
/// to faulted to ensure that it won't be used. Use the disk's UUID as part
/// of its new name to ensure that even if a user created another disk that
/// shadows this "phantom" disk the original can still be un-deleted and
/// faulted.
///
/// It's worth pointing out that it's possible that the user created a disk,
/// then used that disk's ID to make a new disk with the same name as this
/// function would have picked when undeleting the original disk. In the
/// event that the original disk's delete saga unwound, this would cause
/// that unwind to fail at this step, and would cause a stuck saga that
/// requires manual intervention. The fixes as part of addressing issue 3866
/// should greatly reduce the number of disk delete sagas that unwind, but
/// this possibility does exist. To any customer reading this: please don't
/// name your disks `deleted-{another disk's id}` :)
pub async fn project_undelete_disk_set_faulted_no_auth(
&self,
disk_id: &Uuid,
Expand All @@ -667,12 +680,19 @@ impl DataStore {

let faulted = api::external::DiskState::Faulted.label();

// If only the UUID is used, you will hit "name cannot be a UUID to
// avoid ambiguity with IDs". Add a small prefix to avoid this, and use
// "deleted" to be unambigious to the user about what they should do
// with this disk.
let new_name = format!("deleted-{disk_id}");

let result = diesel::update(dsl::disk)
.filter(dsl::time_deleted.is_not_null())
.filter(dsl::id.eq(*disk_id))
.set((
dsl::time_deleted.eq(None::<DateTime<Utc>>),
dsl::disk_state.eq(faulted),
dsl::name.eq(new_name),
))
.check_if_exists::<Disk>(*disk_id)
.execute_and_check(&conn)
Expand Down
42 changes: 27 additions & 15 deletions nexus/db-queries/src/db/datastore/external_ip.rs
Original file line number Diff line number Diff line change
Expand Up @@ -147,22 +147,34 @@ impl DataStore {
) -> CreateResult<ExternalIp> {
let ip_id = Uuid::new_v4();

let pool_id = match params.pool {
Some(NameOrId::Name(name)) => {
LookupPath::new(opctx, self)
.ip_pool_name(&Name(name))
.fetch_for(authz::Action::Read)
.await?
.1
}
Some(NameOrId::Id(id)) => {
LookupPath::new(opctx, self)
.ip_pool_id(id)
.fetch_for(authz::Action::Read)
.await?
.1
// See `allocate_instance_ephemeral_ip`: we're replicating
// its strucutre to prevent cross-silo pool access.
let pool_id = if let Some(name_or_id) = params.pool {
let (.., authz_pool, pool) = match name_or_id {
NameOrId::Name(name) => {
LookupPath::new(opctx, self)
.ip_pool_name(&Name(name))
.fetch_for(authz::Action::CreateChild)
.await?
}
NameOrId::Id(id) => {
LookupPath::new(opctx, self)
.ip_pool_id(id)
.fetch_for(authz::Action::CreateChild)
.await?
}
};

let authz_silo_id = opctx.authn.silo_required()?.id();
if let Some(pool_silo_id) = pool.silo_id {
if pool_silo_id != authz_silo_id {
return Err(authz_pool.not_found());
}
}
None => self.ip_pools_fetch_default(opctx).await?,

pool
} else {
self.ip_pools_fetch_default(opctx).await?
}
.id();

Expand Down
Loading

0 comments on commit 7d6118f

Please sign in to comment.