Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add clickhouse-cluster-config to omdb blueprint output #6968

Merged
merged 10 commits into from
Nov 12, 2024
5 changes: 4 additions & 1 deletion nexus/reconfigurator/execution/src/clickhouse.rs
Original file line number Diff line number Diff line change
Expand Up @@ -294,7 +294,10 @@ mod test {
let num_servers = 2u64;

let mut zones = BTreeMap::new();
let mut config = ClickhouseClusterConfig::new("test".to_string());
let mut config = ClickhouseClusterConfig::new(
"test".to_string(),
"test".to_string(),
);

for keeper_id in 1..=num_keepers {
let sled_id = SledUuid::new_v4();
Expand Down
82 changes: 42 additions & 40 deletions nexus/reconfigurator/planning/src/blueprint_builder/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -276,7 +276,6 @@ pub struct BlueprintBuilder<'a> {
sled_ip_allocators: BTreeMap<SledUuid, IpAllocator>,
external_networking: OnceCell<BuilderExternalNetworking<'a>>,
internal_dns_subnets: OnceCell<DnsSubnetAllocator>,
clickhouse_allocator: Option<ClickhouseAllocator>,

// These fields will become part of the final blueprint. See the
// corresponding fields in `Blueprint`.
Expand Down Expand Up @@ -395,43 +394,6 @@ impl<'a> BlueprintBuilder<'a> {
|| commissioned_sled_ids.contains(sled_id)
});

// If we have the clickhouse cluster setup enabled via policy and we
// don't yet have a `ClickhouseClusterConfiguration`, then we must create
// one and feed it to our `ClickhouseAllocator`.
let clickhouse_allocator = if input.clickhouse_cluster_enabled() {
let parent_config = parent_blueprint
.clickhouse_cluster_config
.clone()
.unwrap_or_else(|| {
info!(
log,
concat!(
"Clickhouse cluster enabled by policy: ",
"generating initial 'ClickhouseClusterConfig' ",
"and 'ClickhouseAllocator'"
)
);
ClickhouseClusterConfig::new(OXIMETER_CLUSTER.to_string())
});
Some(ClickhouseAllocator::new(
log.clone(),
parent_config,
inventory.latest_clickhouse_keeper_membership(),
))
} else {
if parent_blueprint.clickhouse_cluster_config.is_some() {
info!(
log,
concat!(
"clickhouse cluster disabled via policy ",
"discarding existing 'ClickhouseAllocator' and ",
"the resulting generated 'ClickhouseClusterConfig"
)
);
}
None
};

Ok(BlueprintBuilder {
log,
parent_blueprint,
Expand All @@ -446,7 +408,6 @@ impl<'a> BlueprintBuilder<'a> {
sled_state,
cockroachdb_setting_preserve_downgrade: parent_blueprint
.cockroachdb_setting_preserve_downgrade,
clickhouse_allocator,
creator: creator.to_owned(),
operations: Vec::new(),
comments: Vec::new(),
Expand Down Expand Up @@ -531,9 +492,50 @@ impl<'a> BlueprintBuilder<'a> {
.datasets
.into_datasets_map(self.input.all_sled_ids(SledFilter::InService));

// If we have the clickhouse cluster setup enabled via policy and we
// don't yet have a `ClickhouseClusterConfiguration`, then we must create
// one and feed it to our `ClickhouseAllocator`.
let clickhouse_allocator = if self.input.clickhouse_cluster_enabled() {
let parent_config = self
.parent_blueprint
.clickhouse_cluster_config
.clone()
.unwrap_or_else(|| {
info!(
self.log,
concat!(
"Clickhouse cluster enabled by policy: ",
"generating initial 'ClickhouseClusterConfig' ",
"and 'ClickhouseAllocator'"
)
);
ClickhouseClusterConfig::new(
OXIMETER_CLUSTER.to_string(),
self.rng.next_clickhouse().to_string(),
)
});
Some(ClickhouseAllocator::new(
self.log.clone(),
parent_config,
self.collection.latest_clickhouse_keeper_membership(),
))
} else {
if self.parent_blueprint.clickhouse_cluster_config.is_some() {
info!(
self.log,
concat!(
"clickhouse cluster disabled via policy ",
"discarding existing 'ClickhouseAllocator' and ",
"the resulting generated 'ClickhouseClusterConfig"
)
);
}
None
};

// If we have an allocator, use it to generate a new config. If an error
// is returned then log it and carry over the parent_config.
let clickhouse_cluster_config = self.clickhouse_allocator.map(|a| {
let clickhouse_cluster_config = clickhouse_allocator.map(|a| {
match a.plan(&(&blueprint_zones).into()) {
Ok(config) => config,
Err(e) => {
Expand Down
46 changes: 44 additions & 2 deletions nexus/reconfigurator/planning/src/planner.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2823,6 +2823,12 @@ mod test {
.plan()
.expect("plan");

let diff = blueprint2.diff_since_blueprint(&blueprint1);
assert_contents(
"tests/output/planner_deploy_all_keeper_nodes_1_2.txt",
&diff.display().to_string(),
);

// We should see zones for 3 clickhouse keepers, and 2 servers created
let active_zones: Vec<_> = blueprint2
.all_omicron_zones(BlueprintZoneFilter::ShouldBeRunning)
Expand Down Expand Up @@ -2932,6 +2938,12 @@ mod test {
.plan()
.expect("plan");

let diff = blueprint4.diff_since_blueprint(&blueprint3);
assert_contents(
"tests/output/planner_deploy_all_keeper_nodes_3_4.txt",
&diff.display().to_string(),
);

let bp3_config = blueprint3.clickhouse_cluster_config.as_ref().unwrap();
let bp4_config = blueprint4.clickhouse_cluster_config.as_ref().unwrap();
assert_eq!(bp4_config.generation, bp3_config.generation);
Expand Down Expand Up @@ -2974,6 +2986,12 @@ mod test {
.plan()
.expect("plan");

let diff = blueprint5.diff_since_blueprint(&blueprint4);
assert_contents(
"tests/output/planner_deploy_all_keeper_nodes_4_5.txt",
&diff.display().to_string(),
);

let active_zones: Vec<_> = blueprint5
.all_omicron_zones(BlueprintZoneFilter::ShouldBeRunning)
.map(|(_, z)| z.clone())
Expand Down Expand Up @@ -3015,6 +3033,12 @@ mod test {
.plan()
.expect("plan");

let diff = blueprint6.diff_since_blueprint(&blueprint5);
assert_contents(
"tests/output/planner_deploy_all_keeper_nodes_5_6.txt",
&diff.display().to_string(),
);

let bp6_config = blueprint6.clickhouse_cluster_config.as_ref().unwrap();
assert_eq!(bp5_config, bp6_config);

Expand Down Expand Up @@ -3238,6 +3262,12 @@ mod test {
.plan()
.expect("plan");

let diff = blueprint4.diff_since_blueprint(&blueprint3);
assert_contents(
"tests/output/planner_expunge_clickhouse_clusters_3_4.txt",
&diff.display().to_string(),
);

// The planner should expunge a zone based on the sled being expunged. Since this
// is a clickhouse keeper zone, the clickhouse keeper configuration should change
// to reflect this.
Expand Down Expand Up @@ -3296,6 +3326,12 @@ mod test {
.plan()
.expect("plan");

let diff = blueprint6.diff_since_blueprint(&blueprint5);
assert_contents(
"tests/output/planner_expunge_clickhouse_clusters_5_6.txt",
&diff.display().to_string(),
);

let old_config = blueprint5.clickhouse_cluster_config.as_ref().unwrap();
let config = blueprint6.clickhouse_cluster_config.as_ref().unwrap();

Expand All @@ -3318,7 +3354,7 @@ mod test {
#[test]
fn test_expunge_clickhouse_zones_after_policy_is_changed() {
static TEST_NAME: &str =
"planner_expunge_clickhouse__zones_after_policy_is_changed";
"planner_expunge_clickhouse_zones_after_policy_is_changed";
let logctx = test_setup_log(TEST_NAME);
let log = logctx.log.clone();

Expand Down Expand Up @@ -3425,6 +3461,12 @@ mod test {
.plan()
.expect("plan");

let diff = blueprint4.diff_since_blueprint(&blueprint3);
assert_contents(
"tests/output/planner_expunge_clickhouse_zones_after_policy_is_changed_3_4.txt",
&diff.display().to_string(),
);

// All our clickhouse keeper and server zones that we created when we
// enabled our clickhouse policy should be expunged when we disable it.
let expunged_zones: Vec<_> = blueprint4
Expand All @@ -3446,7 +3488,7 @@ mod test {
assert_eq!(keeper_zone_ids, expunged_keeper_zone_ids);
assert_eq!(server_zone_ids, expunged_server_zone_ids);

// We should have a new single-node clickhouze zone
// We should have a new single-node clickhouse zone
assert_eq!(
1,
blueprint4
Expand Down
8 changes: 8 additions & 0 deletions nexus/reconfigurator/planning/src/planner/rng.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ pub(crate) struct PlannerRng {
dataset_rng: TypedUuidRng<DatasetKind>,
network_interface_rng: UuidRng,
external_ip_rng: TypedUuidRng<ExternalIpKind>,
clickhouse_rng: UuidRng,
}

impl PlannerRng {
Expand All @@ -46,13 +47,16 @@ impl PlannerRng {
UuidRng::from_parent_rng(&mut parent, "network_interface");
let external_ip_rng =
TypedUuidRng::from_parent_rng(&mut parent, "external_ip");
let clickhouse_rng =
UuidRng::from_parent_rng(&mut parent, "clickhouse");

Self {
blueprint_rng,
zone_rng,
dataset_rng,
network_interface_rng,
external_ip_rng,
clickhouse_rng,
}
}

Expand Down Expand Up @@ -82,4 +86,8 @@ impl PlannerRng {
pub fn next_external_ip(&mut self) -> ExternalIpUuid {
self.external_ip_rng.next()
}

pub fn next_clickhouse(&mut self) -> Uuid {
self.clickhouse_rng.next()
}
}
Loading
Loading