Skip to content

Commit

Permalink
Add DB tables & models for blueprint clickhouse clusters (oxidecomput…
Browse files Browse the repository at this point in the history
  • Loading branch information
andrewjstone authored Sep 18, 2024
1 parent 40fc383 commit 1b43a0a
Show file tree
Hide file tree
Showing 10 changed files with 214 additions and 7 deletions.
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions nexus/db-model/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ omicron-rpaths.workspace = true
anyhow.workspace = true
camino.workspace = true
chrono.workspace = true
clickhouse-admin-types.workspace = true
derive-where.workspace = true
diesel = { workspace = true, features = ["postgres", "r2d2", "chrono", "serde_json", "network-address", "uuid"] }
hex.workspace = true
Expand Down
103 changes: 98 additions & 5 deletions nexus/db-model/src/deployment.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,11 @@
use crate::inventory::ZoneType;
use crate::omicron_zone_config::{self, OmicronZoneNic};
use crate::schema::{
blueprint, bp_omicron_physical_disk, bp_omicron_zone, bp_omicron_zone_nic,
bp_sled_omicron_physical_disks, bp_sled_omicron_zones, bp_sled_state,
bp_target,
blueprint, bp_clickhouse_cluster_config,
bp_clickhouse_keeper_zone_id_to_node_id,
bp_clickhouse_server_zone_id_to_node_id, bp_omicron_physical_disk,
bp_omicron_zone, bp_omicron_zone_nic, bp_sled_omicron_physical_disks,
bp_sled_omicron_zones, bp_sled_state, bp_target,
};
use crate::typed_uuid::DbTypedUuid;
use crate::{
Expand All @@ -19,6 +21,7 @@ use crate::{
};
use anyhow::{anyhow, bail, Context, Result};
use chrono::{DateTime, Utc};
use clickhouse_admin_types::{KeeperId, ServerId};
use ipnetwork::IpNetwork;
use nexus_sled_agent_shared::inventory::OmicronZoneDataset;
use nexus_types::deployment::BlueprintTarget;
Expand All @@ -27,7 +30,7 @@ use nexus_types::deployment::BlueprintZoneDisposition;
use nexus_types::deployment::BlueprintZonesConfig;
use nexus_types::deployment::CockroachDbPreserveDowngrade;
use nexus_types::deployment::{
blueprint_zone_type, BlueprintPhysicalDisksConfig,
blueprint_zone_type, BlueprintPhysicalDisksConfig, ClickhouseClusterConfig,
};
use nexus_types::deployment::{BlueprintPhysicalDiskConfig, BlueprintZoneType};
use nexus_types::deployment::{
Expand All @@ -37,10 +40,10 @@ use nexus_types::deployment::{
use omicron_common::api::internal::shared::NetworkInterface;
use omicron_common::disk::DiskIdentity;
use omicron_common::zpool_name::ZpoolName;
use omicron_uuid_kinds::SledUuid;
use omicron_uuid_kinds::ZpoolUuid;
use omicron_uuid_kinds::{ExternalIpKind, SledKind, ZpoolKind};
use omicron_uuid_kinds::{ExternalIpUuid, GenericUuid, OmicronZoneUuid};
use omicron_uuid_kinds::{OmicronZoneKind, SledUuid};
use std::net::{IpAddr, SocketAddrV6};
use uuid::Uuid;

Expand Down Expand Up @@ -803,6 +806,96 @@ impl From<BpOmicronZoneNic> for OmicronZoneNic {
}
}

#[derive(Queryable, Clone, Debug, Selectable, Insertable)]
#[diesel(table_name = bp_clickhouse_cluster_config)]
pub struct BpClickhouseClusterConfig {
pub blueprint_id: Uuid,
pub generation: Generation,
pub max_used_server_id: i64,
pub max_used_keeper_id: i64,
pub cluster_name: String,
pub cluster_secret: String,
pub highest_seen_keeper_leader_committed_log_index: i64,
}

impl BpClickhouseClusterConfig {
pub fn new(
blueprint_id: Uuid,
config: &ClickhouseClusterConfig,
) -> anyhow::Result<BpClickhouseClusterConfig> {
Ok(BpClickhouseClusterConfig {
blueprint_id,
generation: Generation(config.generation),
max_used_server_id: config
.max_used_server_id
.0
.try_into()
.context("more than 2^63 IDs in use")?,
max_used_keeper_id: config
.max_used_keeper_id
.0
.try_into()
.context("more than 2^63 IDs in use")?,
cluster_name: config.cluster_name.clone(),
cluster_secret: config.cluster_secret.clone(),
highest_seen_keeper_leader_committed_log_index: config
.highest_seen_keeper_leader_committed_log_index
.try_into()
.context("more than 2^63 IDs in use")?,
})
}
}

#[derive(Queryable, Clone, Debug, Selectable, Insertable)]
#[diesel(table_name = bp_clickhouse_keeper_zone_id_to_node_id)]
pub struct BpClickhouseKeeperZoneIdToNodeId {
pub blueprint_id: Uuid,
pub omicron_zone_id: DbTypedUuid<OmicronZoneKind>,
pub keeper_id: i64,
}

impl BpClickhouseKeeperZoneIdToNodeId {
pub fn new(
blueprint_id: Uuid,
omicron_zone_id: OmicronZoneUuid,
keeper_id: KeeperId,
) -> anyhow::Result<BpClickhouseKeeperZoneIdToNodeId> {
Ok(BpClickhouseKeeperZoneIdToNodeId {
blueprint_id,
omicron_zone_id: omicron_zone_id.into(),
keeper_id: keeper_id
.0
.try_into()
.context("more than 2^63 IDs in use")?,
})
}
}

#[derive(Queryable, Clone, Debug, Selectable, Insertable)]
#[diesel(table_name = bp_clickhouse_server_zone_id_to_node_id)]
pub struct BpClickhouseServerZoneIdToNodeId {
pub blueprint_id: Uuid,
pub omicron_zone_id: DbTypedUuid<OmicronZoneKind>,
pub server_id: i64,
}

impl BpClickhouseServerZoneIdToNodeId {
pub fn new(
blueprint_id: Uuid,
omicron_zone_id: OmicronZoneUuid,
server_id: ServerId,
) -> anyhow::Result<BpClickhouseServerZoneIdToNodeId> {
Ok(BpClickhouseServerZoneIdToNodeId {
blueprint_id,
omicron_zone_id: omicron_zone_id.into(),
server_id: server_id
.0
.try_into()
.context("more than 2^63 IDs in use")?,
})
}
}

mod diesel_util {
use crate::{
schema::bp_omicron_zone::disposition, to_db_bp_zone_disposition,
Expand Down
28 changes: 28 additions & 0 deletions nexus/db-model/src/schema.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1628,6 +1628,34 @@ table! {
}
}

table! {
bp_clickhouse_cluster_config (blueprint_id) {
blueprint_id -> Uuid,
generation -> Int8,
max_used_server_id -> Int8,
max_used_keeper_id -> Int8,
cluster_name -> Text,
cluster_secret -> Text,
highest_seen_keeper_leader_committed_log_index -> Int8,
}
}

table! {
bp_clickhouse_keeper_zone_id_to_node_id (blueprint_id, omicron_zone_id, keeper_id) {
blueprint_id -> Uuid,
omicron_zone_id -> Uuid,
keeper_id -> Int8,
}
}

table! {
bp_clickhouse_server_zone_id_to_node_id (blueprint_id, omicron_zone_id, server_id) {
blueprint_id -> Uuid,
omicron_zone_id -> Uuid,
server_id -> Int8,
}
}

table! {
cockroachdb_zone_id_to_node_id (omicron_zone_id, crdb_node_id) {
omicron_zone_id -> Uuid,
Expand Down
3 changes: 2 additions & 1 deletion nexus/db-model/src/schema_versions.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ use std::collections::BTreeMap;
///
/// This must be updated when you change the database schema. Refer to
/// schema/crdb/README.adoc in the root of this repository for details.
pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(98, 0, 0);
pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(99, 0, 0);

/// List of all past database schema versions, in *reverse* order
///
Expand All @@ -29,6 +29,7 @@ static KNOWN_VERSIONS: Lazy<Vec<KnownVersion>> = Lazy::new(|| {
// | leaving the first copy as an example for the next person.
// v
// KnownVersion::new(next_int, "unique-dirname-with-the-sql-files"),
KnownVersion::new(99, "blueprint-add-clickhouse-tables"),
KnownVersion::new(98, "oximeter-add-time-expunged"),
KnownVersion::new(97, "lookup-region-snapshot-by-region-id"),
KnownVersion::new(96, "inv-dataset"),
Expand Down
6 changes: 6 additions & 0 deletions nexus/types/src/deployment/planning_input.rs
Original file line number Diff line number Diff line change
Expand Up @@ -737,6 +737,12 @@ pub struct Policy {
/// Policy for replicated clickhouse setups
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ClickhousePolicy {
/// Should we run the single-node cluster alongside the replicated cluster?
/// This is stage 1 of our deployment plan as laid out in RFD 468
///
/// If this is set to false, then we will only deploy replicated clusters.
pub deploy_with_standalone: bool,

/// Desired number of clickhouse servers
pub target_servers: usize,

Expand Down
9 changes: 9 additions & 0 deletions schema/crdb/blueprint-add-clickhouse-tables/up1.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
CREATE TABLE IF NOT EXISTS omicron.public.bp_clickhouse_cluster_config (
blueprint_id UUID PRIMARY KEY,
generation INT8 NOT NULL,
max_used_server_id INT8 NOT NULL,
max_used_keeper_id INT8 NOT NULL,
cluster_name TEXT NOT NULL,
cluster_secret TEXT NOT NULL,
highest_seen_keeper_leader_committed_log_index INT8 NOT NULL
);
6 changes: 6 additions & 0 deletions schema/crdb/blueprint-add-clickhouse-tables/up2.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
CREATE TABLE IF NOT EXISTS omicron.public.bp_clickhouse_keeper_zone_id_to_node_id (
blueprint_id UUID NOT NULL,
omicron_zone_id UUID NOT NULL,
keeper_id INT8 NOT NULL,
PRIMARY KEY (blueprint_id, omicron_zone_id, keeper_id)
);
6 changes: 6 additions & 0 deletions schema/crdb/blueprint-add-clickhouse-tables/up3.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
CREATE TABLE IF NOT EXISTS omicron.public.bp_clickhouse_server_zone_id_to_node_id (
blueprint_id UUID NOT NULL,
omicron_zone_id UUID NOT NULL,
server_id INT8 NOT NULL,
PRIMARY KEY (blueprint_id, omicron_zone_id, server_id)
);
58 changes: 57 additions & 1 deletion schema/crdb/dbinit.sql
Original file line number Diff line number Diff line change
Expand Up @@ -3640,6 +3640,62 @@ CREATE TABLE IF NOT EXISTS omicron.public.bp_omicron_zone_nic (
PRIMARY KEY (blueprint_id, id)
);

-- Blueprint information related to clickhouse cluster management
--
-- Rows for this table will only exist for deployments with an existing
-- `ClickhousePolicy` as part of the fleet `Policy`. In the limit, this will be
-- all deployments.
CREATE TABLE IF NOT EXISTS omicron.public.bp_clickhouse_cluster_config (
-- Foreign key into the `blueprint` table
blueprint_id UUID PRIMARY KEY,
-- Generation number to track changes to the cluster state.
-- Used as optimizitic concurrency control.
generation INT8 NOT NULL,

-- Clickhouse server and keeper ids can never be reused. We hand them out
-- monotonically and keep track of the last one used here.
max_used_server_id INT8 NOT NULL,
max_used_keeper_id INT8 NOT NULL,

-- Each clickhouse cluster has a unique name and secret value. These are set
-- once and shared among all nodes for the lifetime of the fleet.
cluster_name TEXT NOT NULL,
cluster_secret TEXT NOT NULL,

-- A recording of an inventory value that serves as a marker to inform the
-- reconfigurator when a collection of a raft configuration is recent.
highest_seen_keeper_leader_committed_log_index INT8 NOT NULL
);

-- Mapping of an Omicron zone ID to Clickhouse Keeper node ID in a specific
-- blueprint.
--
-- This can logically be considered a subtable of `bp_clickhouse_cluster_config`
CREATE TABLE IF NOT EXISTS omicron.public.bp_clickhouse_keeper_zone_id_to_node_id (
-- Foreign key into the `blueprint` table
blueprint_id UUID NOT NULL,

omicron_zone_id UUID NOT NULL,
keeper_id INT8 NOT NULL,

PRIMARY KEY (blueprint_id, omicron_zone_id, keeper_id)
);

-- Mapping of an Omicron zone ID to Clickhouse Server node ID in a specific
-- blueprint.
--
-- This can logically be considered a subtable of `bp_clickhouse_cluster_config`
CREATE TABLE IF NOT EXISTS omicron.public.bp_clickhouse_server_zone_id_to_node_id (
-- Foreign key into the `blueprint` table
blueprint_id UUID NOT NULL,

omicron_zone_id UUID NOT NULL,
server_id INT8 NOT NULL,

PRIMARY KEY (blueprint_id, omicron_zone_id, server_id)
);


-- Mapping of Omicron zone ID to CockroachDB node ID. This isn't directly used
-- by the blueprint tables above, but is used by the more general Reconfigurator
-- system along with them (e.g., to decommission expunged CRDB nodes).
Expand Down Expand Up @@ -4299,7 +4355,7 @@ INSERT INTO omicron.public.db_metadata (
version,
target_version
) VALUES
(TRUE, NOW(), NOW(), '98.0.0', NULL)
(TRUE, NOW(), NOW(), '99.0.0', NULL)
ON CONFLICT DO NOTHING;

COMMIT;

0 comments on commit 1b43a0a

Please sign in to comment.