Skip to content

Commit

Permalink
wip
Browse files Browse the repository at this point in the history
  • Loading branch information
andrewjstone committed Aug 28, 2024
1 parent 8430330 commit 1686176
Show file tree
Hide file tree
Showing 12 changed files with 68 additions and 25 deletions.
3 changes: 2 additions & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ members = [
"certificates",
"clickhouse-admin",
"clickhouse-admin/api",
"clickhouse-admin/types",
"clients/bootstrap-agent-client",
"clients/cockroach-admin-client",
"clients/ddm-admin-client",
Expand Down
31 changes: 29 additions & 2 deletions nexus/db-model/src/deployment.rs
Original file line number Diff line number Diff line change
Expand Up @@ -259,6 +259,9 @@ pub struct BpOmicronZone {

pub external_ip_id: Option<DbTypedUuid<ExternalIpKind>>,
pub filesystem_pool: Option<DbTypedUuid<ZpoolKind>>,

clickhouse_keeper_id: Option<i64>,
clickhouse_server_id: Option<i64>,
}

impl BpOmicronZone {
Expand Down Expand Up @@ -308,6 +311,8 @@ impl BpOmicronZone {
snat_ip: None,
snat_first_port: None,
snat_last_port: None,
clickhouse_keeper_id: None,
clickhouse_server_id: None,
};

match &blueprint_zone.zone_type {
Expand Down Expand Up @@ -350,18 +355,40 @@ impl BpOmicronZone {
bp_omicron_zone.set_zpool_name(dataset);
}
BlueprintZoneType::ClickhouseKeeper(
blueprint_zone_type::ClickhouseKeeper { address, dataset },
blueprint_zone_type::ClickhouseKeeper {
keeper_id,
address,
dataset,
},
) => {
// Set the common fields
bp_omicron_zone.set_primary_service_ip_and_port(address);
bp_omicron_zone.set_zpool_name(dataset);

// Set the zone specific fields
bp_omicron_zone.clickhouse_keeper_id = Some(
keeper_id
.0
.try_into()
.expect("no more than 2^63 keeper IDs please"),
);
}
BlueprintZoneType::ClickhouseServer(
blueprint_zone_type::ClickhouseServer { address, dataset },
blueprint_zone_type::ClickhouseServer {
server_id,
address,
dataset,
},
) => {
// Set the common fields
bp_omicron_zone.set_primary_service_ip_and_port(address);
bp_omicron_zone.set_zpool_name(dataset);

// Set the zone specific fields
bp_omicron_zone.clickhouse_server_id =
Some(server_id.0.try_into().expect(
"no more than 2^63 clickhouse server IDs please",
));
}
BlueprintZoneType::CockroachDb(
blueprint_zone_type::CockroachDb { address, dataset },
Expand Down
4 changes: 4 additions & 0 deletions nexus/db-model/src/schema.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1595,6 +1595,10 @@ table! {
disposition -> crate::DbBpZoneDispositionEnum,
external_ip_id -> Nullable<Uuid>,
filesystem_pool -> Nullable<Uuid>,

clickhouse_keeper_id -> Nullable<Int8>,
clickhouse_server_id -> Nullable<Int8>

}
}

Expand Down
2 changes: 1 addition & 1 deletion nexus/types/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ base64.workspace = true
camino.workspace = true
chrono.workspace = true
clap.workspace = true
clickward.workspace = true
clickhouse-admin-types.workspace = true
cookie.workspace = true
derive-where.workspace = true
derive_more.workspace = true
Expand Down
12 changes: 5 additions & 7 deletions nexus/types/src/deployment/clickhouse.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,15 +8,13 @@ use crate::deployment::{
BlueprintZoneConfig, BlueprintZoneDisposition, BlueprintZonesConfig,
};
use camino::Utf8PathBuf;
use clickward::config::{
KeeperConfig, KeeperConfigsForReplica, KeeperCoordinationSettings,
LogConfig, LogLevel, Macros, RaftServerConfig, RaftServers, RemoteServers,
ReplicaConfig, ServerConfig,
use clickhouse_admin_types::{
ClickhouseKeeperConfig, ClickhouseServerConfig, KeeperId, ServerId,
};
use clickward::{KeeperId, ServerId};
use omicron_common::address::{
CLICKHOUSE_HTTP_PORT, CLICKHOUSE_INTERSERVER_HTTP_PORT,
CLICKHOUSE_KEEPER_PORT, CLICKHOUSE_KEEPER_RAFT_PORT, CLICKHOUSE_TCP_PORT,
CLICKHOUSE_HTTP_PORT, CLICKHOUSE_INTERSERVER_PORT,
CLICKHOUSE_KEEPER_RAFT_PORT, CLICKHOUSE_KEEPER_TCP_PORT,
CLICKHOUSE_TCP_PORT,
};
use omicron_common::api::external::Generation;
use omicron_uuid_kinds::SledUuid;
Expand Down
4 changes: 2 additions & 2 deletions nexus/types/src/deployment/zone_type.rs
Original file line number Diff line number Diff line change
Expand Up @@ -341,7 +341,7 @@ pub mod blueprint_zone_type {
Debug, Clone, Eq, PartialEq, JsonSchema, Deserialize, Serialize,
)]
pub struct ClickhouseKeeper {
pub keeper_id: clickward::KeeperId,
pub keeper_id: clickhouse_admin_types::KeeperId,
pub address: SocketAddrV6,
pub dataset: OmicronZoneDataset,
}
Expand All @@ -351,7 +351,7 @@ pub mod blueprint_zone_type {
Debug, Clone, Eq, PartialEq, JsonSchema, Deserialize, Serialize,
)]
pub struct ClickhouseServer {
pub server_id: clickward::ServerId,
pub server_id: clickhouse_admin_types::ServerId,
pub address: SocketAddrV6,
pub dataset: OmicronZoneDataset,
}
Expand Down
2 changes: 1 addition & 1 deletion nexus/types/src/inventory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ use crate::external_api::params::PhysicalDiskKind;
use crate::external_api::params::UninitializedSledId;
use chrono::DateTime;
use chrono::Utc;
use clickward::KeeperId;
use clickhouse_admin_types::KeeperId;
pub use gateway_client::types::PowerState;
pub use gateway_client::types::RotImageError;
pub use gateway_client::types::RotSlot;
Expand Down
1 change: 1 addition & 0 deletions sled-agent/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ cancel-safe-futures.workspace = true
cfg-if.workspace = true
chrono.workspace = true
clap.workspace = true
clickhouse-admin-types.workspace = true
# Only used by the simulated sled agent.
crucible-agent-client.workspace = true
derive_more.workspace = true
Expand Down
18 changes: 10 additions & 8 deletions sled-agent/src/rack_setup/plan/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -739,21 +739,21 @@ impl Plan {
// TODO(https://github.com/oxidecomputer/omicron/issues/732): Remove
// Temporary linter rule until replicated Clickhouse is enabled
#[allow(clippy::reversed_empty_ranges)]
for _ in 0..CLICKHOUSE_SERVER_COUNT {
for id in 0..CLICKHOUSE_SERVER_COUNT as u64 {
let sled = {
let which_sled =
sled_allocator.next().ok_or(PlanError::NotEnoughSleds)?;
&mut sled_info[which_sled]
};
let id = OmicronZoneUuid::new_v4();
let zone_id = OmicronZoneUuid::new_v4();
let ip = sled.addr_alloc.next().expect("Not enough addrs");
// TODO: This may need to be a different port if/when to have single node
// and replicated running side by side as per stage 1 of RFD 468.
let port = omicron_common::address::CLICKHOUSE_HTTP_PORT;
let address = SocketAddrV6::new(ip, port, 0, 0);
dns_builder
.host_zone_with_one_backend(
id,
zone_id,
ip,
ServiceName::ClickhouseServer,
port,
Expand All @@ -764,10 +764,11 @@ impl Plan {
let filesystem_pool = Some(dataset_name.pool().clone());
sled.request.zones.push(BlueprintZoneConfig {
disposition: BlueprintZoneDisposition::InService,
id,
id: zone_id,
underlay_address: ip,
zone_type: BlueprintZoneType::ClickhouseServer(
blueprint_zone_type::ClickhouseServer {
server_id: id.into(),
address,
dataset: OmicronZoneDataset {
pool_name: dataset_name.pool().clone(),
Expand All @@ -782,19 +783,19 @@ impl Plan {
// TODO(https://github.com/oxidecomputer/omicron/issues/732): Remove
// Temporary linter rule until replicated Clickhouse is enabled
#[allow(clippy::reversed_empty_ranges)]
for _ in 0..CLICKHOUSE_KEEPER_COUNT {
for id in 0..CLICKHOUSE_KEEPER_COUNT as u64 {
let sled = {
let which_sled =
sled_allocator.next().ok_or(PlanError::NotEnoughSleds)?;
&mut sled_info[which_sled]
};
let id = OmicronZoneUuid::new_v4();
let zone_id = OmicronZoneUuid::new_v4();
let ip = sled.addr_alloc.next().expect("Not enough addrs");
let port = omicron_common::address::CLICKHOUSE_KEEPER_TCP_PORT;
let address = SocketAddrV6::new(ip, port, 0, 0);
dns_builder
.host_zone_with_one_backend(
id,
zone_id,
ip,
ServiceName::ClickhouseKeeper,
port,
Expand All @@ -805,10 +806,11 @@ impl Plan {
let filesystem_pool = Some(dataset_name.pool().clone());
sled.request.zones.push(BlueprintZoneConfig {
disposition: BlueprintZoneDisposition::InService,
id,
id: zone_id,
underlay_address: ip,
zone_type: BlueprintZoneType::ClickhouseKeeper(
blueprint_zone_type::ClickhouseKeeper {
keeper_id: id.into(),
address,
dataset: OmicronZoneDataset {
pool_name: dataset_name.pool().clone(),
Expand Down
9 changes: 8 additions & 1 deletion sled-agent/src/rack_setup/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ use nexus_sled_agent_shared::inventory::{
};
use nexus_types::deployment::{
blueprint_zone_type, Blueprint, BlueprintZoneType, BlueprintZonesConfig,
CockroachDbPreserveDowngrade,
ClickhouseClusterConfig, CockroachDbPreserveDowngrade,
};
use nexus_types::external_api::views::SledState;
use omicron_common::address::get_sled_address;
Expand Down Expand Up @@ -692,6 +692,7 @@ impl ServiceInner {
.map_err(SetupServiceError::ConvertPlanToBlueprint)?;
// ... and use that to derive the initial blueprint from our plan.
let blueprint = build_initial_blueprint_from_plan(
&sled_plan.rack_id,
&sled_configs_by_id,
service_plan,
)
Expand Down Expand Up @@ -1388,6 +1389,7 @@ fn build_sled_configs_by_id(

// Build an initial blueprint
fn build_initial_blueprint_from_plan(
rack_id: &Uuid,
sled_configs_by_id: &BTreeMap<SledUuid, SledConfig>,
service_plan: &ServicePlan,
) -> anyhow::Result<Blueprint> {
Expand All @@ -1396,6 +1398,7 @@ fn build_initial_blueprint_from_plan(
.context("invalid internal dns version")?;

let blueprint = build_initial_blueprint_from_sled_configs(
rack_id,
sled_configs_by_id,
internal_dns_version,
);
Expand All @@ -1404,6 +1407,7 @@ fn build_initial_blueprint_from_plan(
}

pub(crate) fn build_initial_blueprint_from_sled_configs(
rack_id: &Uuid,
sled_configs_by_id: &BTreeMap<SledUuid, SledConfig>,
internal_dns_version: Generation,
) -> Blueprint {
Expand Down Expand Up @@ -1449,6 +1453,9 @@ pub(crate) fn build_initial_blueprint_from_sled_configs(
cockroachdb_fingerprint: String::new(),
cockroachdb_setting_preserve_downgrade:
CockroachDbPreserveDowngrade::DoNotModify,
clickhouse_cluster_config: ClickhouseClusterConfig::new(
rack_id.to_string(),
),
time_created: Utc::now(),
creator: "RSS".to_string(),
comment: "initial blueprint from rack setup".to_string(),
Expand Down
6 changes: 4 additions & 2 deletions sled-agent/src/sim/server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -255,12 +255,12 @@ async fn handoff_to_nexus(
log: &Logger,
config: &Config,
request: &NexusTypes::RackInitializationRequest,
rack_id: &Uuid,
) -> Result<(), anyhow::Error> {
let nexus_client = NexusClient::new(
&format!("http://{}", config.nexus_address),
log.new(o!("component" => "NexusClient")),
);
let rack_id = uuid::uuid!("c19a698f-c6f9-4a17-ae30-20d711b8f7dc");

let notify_nexus = || async {
nexus_client
Expand Down Expand Up @@ -548,8 +548,10 @@ pub async fn run_standalone_server(
SledConfig { disks, zones },
);

let rack_id = uuid::uuid!("c19a698f-c6f9-4a17-ae30-20d711b8f7dc");
let rack_init_request = NexusTypes::RackInitializationRequest {
blueprint: build_initial_blueprint_from_sled_configs(
&rack_id,
&sled_configs,
internal_dns_version,
),
Expand All @@ -576,7 +578,7 @@ pub async fn run_standalone_server(
allowed_source_ips: NexusTypes::AllowedSourceIps::Any,
};

handoff_to_nexus(&log, &config, &rack_init_request).await?;
handoff_to_nexus(&log, &config, &rack_init_request, &rack_id).await?;
info!(log, "Handoff to Nexus is complete");

server.wait_for_finish().await
Expand Down

0 comments on commit 1686176

Please sign in to comment.