Skip to content

Commit

Permalink
review feedback
Browse files Browse the repository at this point in the history
  • Loading branch information
davepacheco committed Jan 17, 2024
1 parent b49794a commit c58c7ed
Show file tree
Hide file tree
Showing 6 changed files with 112 additions and 76 deletions.
18 changes: 18 additions & 0 deletions clients/sled-agent-client/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,24 @@ impl types::OmicronZoneType {
types::OmicronZoneType::Oximeter { .. } => "oximeter",
}
}

/// Identifies whether this is an NTP zone
pub fn is_ntp(&self) -> bool {
match self {
types::OmicronZoneType::BoundaryNtp { .. }
| types::OmicronZoneType::InternalNtp { .. } => true,

types::OmicronZoneType::Clickhouse { .. }
| types::OmicronZoneType::ClickhouseKeeper { .. }
| types::OmicronZoneType::CockroachDb { .. }
| types::OmicronZoneType::Crucible { .. }
| types::OmicronZoneType::CruciblePantry { .. }
| types::OmicronZoneType::ExternalDns { .. }
| types::OmicronZoneType::InternalDns { .. }
| types::OmicronZoneType::Nexus { .. }
| types::OmicronZoneType::Oximeter { .. } => false,
}
}
}

impl omicron_common::api::external::ClientError for types::Error {
Expand Down
26 changes: 18 additions & 8 deletions internal-dns/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ pub enum ZoneVariant {

/// Used to construct the DNS name for a control plane host
#[derive(Clone, Debug, PartialEq, PartialOrd)]
enum Host {
pub enum Host {
/// Used to construct an AAAA record for a sled.
Sled(Uuid),

Expand All @@ -92,6 +92,10 @@ enum Host {
}

impl Host {
pub fn for_zone(id: Uuid, variant: ZoneVariant) -> Host {
Host::Zone { id, variant }
}

/// Returns the DNS name for this host, ignoring the zone part of the DNS
/// name
pub(crate) fn dns_name(&self) -> String {
Expand All @@ -105,6 +109,12 @@ impl Host {
}
}
}

/// Returns the full-qualified DNS name, including the zone name of the
/// control plane's internal DNS zone
pub fn fqdn(&self) -> String {
format!("{}.{}", self.dns_name(), DNS_ZONE)
}
}

/// Builder for assembling DNS data for the control plane's DNS zone
Expand Down Expand Up @@ -168,8 +178,12 @@ pub struct Zone {
}

impl Zone {
pub(crate) fn to_host(&self) -> Host {
Host::Zone { id: self.id, variant: self.variant }
}

pub(crate) fn dns_name(&self) -> String {
Host::Zone { id: self.id, variant: self.variant }.dns_name()
self.to_host().dns_name()
}
}

Expand Down Expand Up @@ -393,7 +407,7 @@ impl DnsConfigBuilder {
prio: 0,
weight: 0,
port,
target: format!("{}.{}", zone.dns_name(), DNS_ZONE),
target: zone.to_host().fqdn(),
})
})
.collect();
Expand All @@ -412,11 +426,7 @@ impl DnsConfigBuilder {
prio: 0,
weight: 0,
port,
target: format!(
"{}.{}",
Host::Sled(sled.0).dns_name(),
DNS_ZONE
),
target: Host::Sled(sled.0).fqdn(),
})
})
.collect();
Expand Down
114 changes: 61 additions & 53 deletions nexus/deployment/src/blueprint_builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@
use crate::ip_allocator::IpAllocator;
use anyhow::anyhow;
use internal_dns::DNS_ZONE;
use internal_dns::config::Host;
use internal_dns::config::ZoneVariant;
use ipnet::IpAdd;
use nexus_types::deployment::Blueprint;
use nexus_types::deployment::OmicronZoneConfig;
Expand Down Expand Up @@ -40,6 +41,16 @@ pub enum Error {
Planner(#[from] anyhow::Error),
}

/// Describes whether an idempotent "ensure" operation resulted in action taken
/// or no action was necessary
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum Ensure {
/// action was taken
Added,
/// no action was necessary
NotNeeded,
}

/// Helper for assembling a blueprint
///
/// There are two basic ways to assemble a new blueprint:
Expand Down Expand Up @@ -187,31 +198,28 @@ impl<'a> BlueprintBuilder<'a> {
///
/// This is a short human-readable string summarizing the changes reflected
/// in the blueprint. This is only intended for debugging.
pub fn comment(&mut self, comment: &str) {
self.comments.push(comment.to_owned());
pub fn comment<S>(&mut self, comment: S)
where
String: From<S>,
{
self.comments.push(String::from(comment));
}

pub fn sled_ensure_zone_internal_ntp(
pub fn sled_ensure_zone_ntp(
&mut self,
sled_id: Uuid,
) -> Result<bool, Error> {
) -> Result<Ensure, Error> {
// If there's already an NTP zone on this sled, do nothing.
let has_ntp = self
.parent_blueprint
.omicron_zones
.get(&sled_id)
.map(|found_zones| {
found_zones.zones.iter().any(|z| {
matches!(
z.zone_type,
OmicronZoneType::BoundaryNtp { .. }
| OmicronZoneType::InternalNtp { .. }
)
})
found_zones.zones.iter().any(|z| z.zone_type.is_ntp())
})
.unwrap_or(false);
if has_ntp {
return Ok(false);
return Ok(Ensure::NotNeeded);
}

let sled_info = self.sled_resources(sled_id)?;
Expand Down Expand Up @@ -239,7 +247,7 @@ impl<'a> BlueprintBuilder<'a> {
.all_omicron_zones()
.filter_map(|(_, z)| {
if matches!(z.zone_type, OmicronZoneType::BoundaryNtp { .. }) {
Some(format!("{}.host.{}", z.id, DNS_ZONE))
Some(Host::for_zone(z.id, ZoneVariant::Other).fqdn())
} else {
None
}
Expand All @@ -258,14 +266,14 @@ impl<'a> BlueprintBuilder<'a> {
};

self.sled_add_zone(sled_id, zone)?;
Ok(true)
Ok(Ensure::Added)
}

pub fn sled_ensure_zone_crucible(
&mut self,
sled_id: Uuid,
pool_name: ZpoolName,
) -> Result<bool, Error> {
) -> Result<Ensure, Error> {
// If this sled already has a Crucible zone on this pool, do nothing.
let has_crucible_on_this_pool = self
.parent_blueprint
Expand All @@ -282,7 +290,7 @@ impl<'a> BlueprintBuilder<'a> {
})
.unwrap_or(false);
if has_crucible_on_this_pool {
return Ok(false);
return Ok(Ensure::NotNeeded);
}

let sled_info = self.sled_resources(sled_id)?;
Expand All @@ -307,14 +315,15 @@ impl<'a> BlueprintBuilder<'a> {
},
};
self.sled_add_zone(sled_id, zone)?;
Ok(true)
Ok(Ensure::Added)
}

fn sled_add_zone(
&mut self,
sled_id: Uuid,
zone: OmicronZoneConfig,
) -> Result<(), Error> {
// Check the sled id and return an appropriate error if it's invalid.
let _ = self.sled_resources(sled_id)?;

if !self.zones_in_service.insert(zone.id) {
Expand Down Expand Up @@ -352,49 +361,48 @@ impl<'a> BlueprintBuilder<'a> {
/// Returns a newly-allocated underlay address suitable for use by Omicron
/// zones
fn sled_alloc_ip(&mut self, sled_id: Uuid) -> Result<Ipv6Addr, Error> {
let sled_info = self.sled_resources(sled_id)?;
let sled_subnet = self.sled_resources(sled_id)?.subnet;

// Work around rust-lang/rust-clippy#11935 and related issues.
// Clippy doesn't grok that we can't use
// `entry(&sled_id).or_insert_with(closure)` because `closure` would
// borrow `self`, which we can't do because we already have an immutable
// borrow of `self` by calling `entry()`. Using `match` on the result
// of `entry` has a similar problem.
#[allow(clippy::map_entry)]
if !self.sled_ip_allocators.contains_key(&sled_id) {
let sled_subnet = sled_info.subnet;
let sled_subnet_addr = sled_subnet.net().network();
let minimum = sled_subnet_addr
.saturating_add(u128::from(SLED_RESERVED_ADDRESSES));
let maximum = sled_subnet_addr
.saturating_add(u128::from(CP_SERVICES_RESERVED_ADDRESSES));
assert!(sled_subnet.net().contains(minimum));
assert!(sled_subnet.net().contains(maximum));
let mut allocator = IpAllocator::new(minimum, maximum);

// We shouldn't need to explicitly reserve the sled's global zone
// and switch addresses because they should be out of our range, but
// we do so just to be sure.
let sled_gz_addr = *get_sled_address(sled_subnet).ip();
assert!(sled_subnet.net().contains(sled_gz_addr));
assert!(minimum > sled_gz_addr);
assert!(maximum > sled_gz_addr);
let switch_zone_addr = get_switch_zone_address(sled_subnet);
assert!(sled_subnet.net().contains(switch_zone_addr));
assert!(minimum > switch_zone_addr);
assert!(maximum > switch_zone_addr);

// Record each of the sled's zones' underlay addresses as allocated.
if let Some(sled_zones) = self.omicron_zones.get(&sled_id) {
for z in &sled_zones.zones {
allocator.reserve(z.underlay_address);
let allocator =
self.sled_ip_allocators.entry(sled_id).or_insert_with(|| {
let sled_subnet_addr = sled_subnet.net().network();
let minimum = sled_subnet_addr
.saturating_add(u128::from(SLED_RESERVED_ADDRESSES));
let maximum = sled_subnet_addr
.saturating_add(u128::from(CP_SERVICES_RESERVED_ADDRESSES));
assert!(sled_subnet.net().contains(minimum));
assert!(sled_subnet.net().contains(maximum));
let mut allocator = IpAllocator::new(minimum, maximum);

// We shouldn't need to explicitly reserve the sled's global
// zone and switch addresses because they should be out of our
// range, but we do so just to be sure.
let sled_gz_addr = *get_sled_address(sled_subnet).ip();
assert!(sled_subnet.net().contains(sled_gz_addr));
assert!(minimum > sled_gz_addr);
assert!(maximum > sled_gz_addr);
let switch_zone_addr = get_switch_zone_address(sled_subnet);
assert!(sled_subnet.net().contains(switch_zone_addr));
assert!(minimum > switch_zone_addr);
assert!(maximum > switch_zone_addr);

// Record each of the sled's zones' underlay addresses as
// allocated.
if let Some(sled_zones) = self.omicron_zones.get(&sled_id) {
for z in &sled_zones.zones {
allocator.reserve(z.underlay_address);
}
}
}

self.sled_ip_allocators.insert(sled_id, allocator);
}
allocator
});

let allocator = self.sled_ip_allocators.get_mut(&sled_id).unwrap();
allocator.alloc().ok_or_else(|| Error::OutOfAddresses { sled_id })
}

Expand Down Expand Up @@ -592,7 +600,7 @@ pub mod test {
// existing sleds, plus Crucible zones on all pools. So if we ensure
// all these zones exist, we should see no change.
for (sled_id, sled_resources) in &policy.sleds {
builder.sled_ensure_zone_internal_ntp(*sled_id).unwrap();
builder.sled_ensure_zone_ntp(*sled_id).unwrap();
for pool_name in &sled_resources.zpools {
builder
.sled_ensure_zone_crucible(*sled_id, pool_name.clone())
Expand All @@ -615,7 +623,7 @@ pub mod test {
let _ = policy_add_sled(&mut policy, new_sled_id);
let mut builder =
BlueprintBuilder::new_based_on(&blueprint2, &policy, "test_basic");
builder.sled_ensure_zone_internal_ntp(new_sled_id).unwrap();
builder.sled_ensure_zone_ntp(new_sled_id).unwrap();
let new_sled_resources = policy.sleds.get(&new_sled_id).unwrap();
for pool_name in &new_sled_resources.zpools {
builder
Expand Down
7 changes: 5 additions & 2 deletions nexus/deployment/src/planner.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
//! See crate-level documentation for details.
use crate::blueprint_builder::BlueprintBuilder;
use crate::blueprint_builder::Ensure;
use crate::blueprint_builder::Error;
use nexus_types::deployment::Blueprint;
use nexus_types::deployment::Policy;
Expand Down Expand Up @@ -51,7 +52,7 @@ impl<'a> Planner<'a> {
// there, all we can do is provision that one zone. We have to wait
// for that to succeed and synchronize the clock before we can
// provision anything else.
if self.blueprint.sled_ensure_zone_internal_ntp(*sled_id)? {
if self.blueprint.sled_ensure_zone_ntp(*sled_id)? == Ensure::Added {
info!(
&self.log,
"found sled missing NTP zone (will add one)";
Expand All @@ -71,6 +72,7 @@ impl<'a> Planner<'a> {
if self
.blueprint
.sled_ensure_zone_crucible(*sled_id, zpool_name.clone())?
== Ensure::Added
{
info!(
&self.log,
Expand All @@ -90,6 +92,7 @@ impl<'a> Planner<'a> {
// explicit here means we won't forget to do this when more code
// is added below.)
self.blueprint.comment(&format!("sled {}: add zones", sled_id));
continue;
}
}

Expand Down Expand Up @@ -158,7 +161,7 @@ mod test {
.expect("failed to plan");

let diff = blueprint2.diff(&blueprint3);
println!("2 -> 3 (expect new NTP zone on new sled):\n{}", diff,);
println!("2 -> 3 (expect new NTP zone on new sled):\n{}", diff);
let sleds = diff.sleds_added().collect::<Vec<_>>();
let (sled_id, sled_zones) = sleds[0];
// We have defined elsewhere that the first generation contains no
Expand Down
17 changes: 6 additions & 11 deletions nexus/types/src/deployment.rs
Original file line number Diff line number Diff line change
Expand Up @@ -353,7 +353,7 @@ impl<'a> OmicronZonesDiff<'a> {
)
})
.collect();
let b2zones: BTreeMap<Uuid, ZoneInfo> = b2sledzones
let mut b2zones: BTreeMap<Uuid, ZoneInfo> = b2sledzones
.zones
.iter()
.map(|zone| {
Expand All @@ -368,13 +368,12 @@ impl<'a> OmicronZonesDiff<'a> {
)
})
.collect();
let mut zones_added = vec![];
let mut zones_removed = vec![];
let mut zones_changed = vec![];

// Now go through each zone and compare them.
for (zone_id, b1z_info) in &b1zones {
if let Some(b2z_info) = b2zones.get(zone_id) {
if let Some(b2z_info) = b2zones.remove(zone_id) {
let changed_how = if b1z_info.zone != b2z_info.zone {
DiffZoneChangedHow::DetailsChanged
} else if b1z_info.in_service && !b2z_info.in_service {
Expand All @@ -394,14 +393,10 @@ impl<'a> OmicronZonesDiff<'a> {
}
}

for (zone_id, b2z_info) in &b2zones {
if b1zones.contains_key(&zone_id) {
// We handled this zone above.
continue;
}

zones_added.push(b2z_info.zone)
}
// Since we removed common zones above, anything else exists only in
// b2 and was therefore added.
let zones_added =
b2zones.into_values().map(|b2z_info| b2z_info.zone).collect();

(
sled_id,
Expand Down
Loading

0 comments on commit c58c7ed

Please sign in to comment.